asm.h revision 1.76 1 1.76 martin /* $NetBSD: asm.h,v 1.76 2025/01/04 19:29:44 martin Exp $ */
2 1.4 cgd
3 1.1 deraadt /*
4 1.2 glass * Copyright (c) 1992, 1993
5 1.2 glass * The Regents of the University of California. All rights reserved.
6 1.1 deraadt *
7 1.1 deraadt * This code is derived from software contributed to Berkeley by
8 1.1 deraadt * Ralph Campbell.
9 1.1 deraadt *
10 1.1 deraadt * Redistribution and use in source and binary forms, with or without
11 1.1 deraadt * modification, are permitted provided that the following conditions
12 1.1 deraadt * are met:
13 1.1 deraadt * 1. Redistributions of source code must retain the above copyright
14 1.1 deraadt * notice, this list of conditions and the following disclaimer.
15 1.1 deraadt * 2. Redistributions in binary form must reproduce the above copyright
16 1.1 deraadt * notice, this list of conditions and the following disclaimer in the
17 1.1 deraadt * documentation and/or other materials provided with the distribution.
18 1.35 agc * 3. Neither the name of the University nor the names of its contributors
19 1.1 deraadt * may be used to endorse or promote products derived from this software
20 1.1 deraadt * without specific prior written permission.
21 1.1 deraadt *
22 1.1 deraadt * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 1.1 deraadt * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 1.1 deraadt * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 1.1 deraadt * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 1.1 deraadt * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 1.1 deraadt * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 1.1 deraadt * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 1.1 deraadt * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 1.1 deraadt * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 1.1 deraadt * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 1.1 deraadt * SUCH DAMAGE.
33 1.1 deraadt *
34 1.4 cgd * @(#)machAsmDefs.h 8.1 (Berkeley) 6/10/93
35 1.1 deraadt */
36 1.1 deraadt
37 1.1 deraadt /*
38 1.1 deraadt * machAsmDefs.h --
39 1.1 deraadt *
40 1.1 deraadt * Macros used when writing assembler programs.
41 1.1 deraadt *
42 1.1 deraadt * Copyright (C) 1989 Digital Equipment Corporation.
43 1.1 deraadt * Permission to use, copy, modify, and distribute this software and
44 1.1 deraadt * its documentation for any purpose and without fee is hereby granted,
45 1.1 deraadt * provided that the above copyright notice appears in all copies.
46 1.1 deraadt * Digital Equipment Corporation makes no representations about the
47 1.1 deraadt * suitability of this software for any purpose. It is provided "as is"
48 1.1 deraadt * without express or implied warranty.
49 1.1 deraadt *
50 1.1 deraadt * from: Header: /sprite/src/kernel/mach/ds3100.md/RCS/machAsmDefs.h,
51 1.2 glass * v 1.2 89/08/15 18:28:24 rab Exp SPRITE (DECWRL)
52 1.1 deraadt */
53 1.1 deraadt
54 1.8 jonathan #ifndef _MIPS_ASM_H
55 1.41 matt #define _MIPS_ASM_H
56 1.1 deraadt
57 1.44 matt #include <sys/cdefs.h> /* for API selection */
58 1.21 soda #include <mips/regdef.h>
59 1.1 deraadt
60 1.64 simonb #if defined(_KERNEL_OPT)
61 1.64 simonb #include "opt_gprof.h"
62 1.73 riastrad #include "opt_multiprocessor.h"
63 1.64 simonb #endif
64 1.64 simonb
65 1.70 riastrad #ifdef __ASSEMBLER__
66 1.61 skrll #define __BIT(n) (1 << (n))
67 1.61 skrll #define __BITS(hi,lo) ((~((~0)<<((hi)+1)))&((~0)<<(lo)))
68 1.61 skrll
69 1.61 skrll #define __LOWEST_SET_BIT(__mask) ((((__mask) - 1) & (__mask)) ^ (__mask))
70 1.61 skrll #define __SHIFTOUT(__x, __mask) (((__x) & (__mask)) / __LOWEST_SET_BIT(__mask))
71 1.61 skrll #define __SHIFTIN(__x, __mask) ((__x) * __LOWEST_SET_BIT(__mask))
72 1.70 riastrad #endif /* __ASSEMBLER__ */
73 1.61 skrll
74 1.75 rin #ifndef GPROF
75 1.75 rin #define _MIPS_ASM_MCOUNT(x)
76 1.75 rin #else
77 1.1 deraadt /*
78 1.1 deraadt * Define -pg profile entry code.
79 1.64 simonb * Must always be noreorder, must never use a macro instruction.
80 1.64 simonb */
81 1.64 simonb #if defined(__mips_o32) /* Old 32-bit ABI */
82 1.64 simonb /*
83 1.64 simonb * The old ABI version must also decrement two less words off the
84 1.64 simonb * stack and the final addiu to t9 must always equal the size of this
85 1.65 simonb * _MIPS_ASM_MCOUNT.
86 1.1 deraadt */
87 1.75 rin #define _MIPS_ASM_MCOUNT(x) \
88 1.23 castor .set push; \
89 1.23 castor .set noreorder; \
90 1.23 castor .set noat; \
91 1.64 simonb subu sp,16; \
92 1.27 jeffs sw t9,12(sp); \
93 1.23 castor move AT,ra; \
94 1.23 castor lui t9,%hi(_mcount); \
95 1.23 castor addiu t9,t9,%lo(_mcount); \
96 1.23 castor jalr t9; \
97 1.64 simonb nop; \
98 1.23 castor lw t9,4(sp); \
99 1.64 simonb addiu sp,8; \
100 1.64 simonb addiu t9,40; \
101 1.64 simonb .set pop;
102 1.64 simonb #elif defined(__mips_o64) /* Old 64-bit ABI */
103 1.64 simonb # error yeahnah
104 1.64 simonb #else /* New (n32/n64) ABI */
105 1.64 simonb /*
106 1.64 simonb * The new ABI version just needs to put the return address in AT and
107 1.65 simonb * call _mcount(). For the no abicalls case, skip the reloc dance.
108 1.64 simonb */
109 1.65 simonb #ifdef __mips_abicalls
110 1.75 rin #if defined(__mips_n32) /* n32 */
111 1.75 rin #define _MIPS_ASM_MCOUNT(x) \
112 1.65 simonb .set push; \
113 1.65 simonb .set noreorder; \
114 1.65 simonb .set noat; \
115 1.65 simonb subu sp,16; \
116 1.65 simonb sw t9,8(sp); \
117 1.65 simonb move AT,ra; \
118 1.65 simonb lui t9,%hi(_mcount); \
119 1.65 simonb addiu t9,t9,%lo(_mcount); \
120 1.65 simonb jalr t9; \
121 1.65 simonb nop; \
122 1.65 simonb lw t9,8(sp); \
123 1.65 simonb addiu sp,16; \
124 1.65 simonb .set pop;
125 1.75 rin #else /* n64 */
126 1.75 rin #define _MIPS_ASM_MCOUNT(x) \
127 1.75 rin .set push; \
128 1.75 rin .set noreorder; \
129 1.75 rin .set noat; \
130 1.75 rin dsubu sp,16; \
131 1.75 rin sd gp,0(sp); \
132 1.75 rin sd t9,8(sp); \
133 1.75 rin move AT,ra; \
134 1.75 rin lui gp,%hi(%neg(%gp_rel(x))); \
135 1.75 rin daddiu gp,%lo(%neg(%gp_rel(x))); \
136 1.75 rin daddu gp,gp,t9; \
137 1.75 rin ld t9,%call16(_mcount)(gp); \
138 1.75 rin jalr t9; \
139 1.75 rin nop; \
140 1.75 rin ld gp,0(sp); \
141 1.75 rin ld t9,8(sp); \
142 1.75 rin daddiu sp,16; \
143 1.75 rin .set pop;
144 1.75 rin #endif
145 1.65 simonb #else /* !__mips_abicalls */
146 1.75 rin #define _MIPS_ASM_MCOUNT(x) \
147 1.64 simonb .set push; \
148 1.64 simonb .set noreorder; \
149 1.64 simonb .set noat; \
150 1.64 simonb move AT,ra; \
151 1.64 simonb jal _mcount; \
152 1.64 simonb nop; \
153 1.50 skrll .set pop;
154 1.65 simonb #endif /* !__mips_abicalls */
155 1.64 simonb #endif /* n32/n64 */
156 1.75 rin #endif /* GPROF */
157 1.1 deraadt
158 1.15 castor #ifdef USE_AENT
159 1.41 matt #define AENT(x) \
160 1.15 castor .aent x, 0
161 1.15 castor #else
162 1.41 matt #define AENT(x)
163 1.24 kleink #endif
164 1.24 kleink
165 1.31 simonb /*
166 1.31 simonb * WEAK_ALIAS: create a weak alias.
167 1.31 simonb */
168 1.24 kleink #define WEAK_ALIAS(alias,sym) \
169 1.24 kleink .weak alias; \
170 1.24 kleink alias = sym
171 1.37 christos /*
172 1.37 christos * STRONG_ALIAS: create a strong alias.
173 1.37 christos */
174 1.41 matt #define STRONG_ALIAS(alias,sym) \
175 1.37 christos .globl alias; \
176 1.37 christos alias = sym
177 1.15 castor
178 1.14 thorpej /*
179 1.33 simonb * WARN_REFERENCES: create a warning if the specified symbol is referenced.
180 1.14 thorpej */
181 1.43 joerg #define WARN_REFERENCES(sym,msg) \
182 1.44 matt .pushsection __CONCAT(.gnu.warning.,sym); \
183 1.43 joerg .ascii msg; \
184 1.43 joerg .popsection
185 1.6 mycroft
186 1.1 deraadt /*
187 1.44 matt * STATIC_LEAF_NOPROFILE
188 1.44 matt * No profilable local leaf routine.
189 1.15 castor */
190 1.44 matt #define STATIC_LEAF_NOPROFILE(x) \
191 1.47 joerg .ent _C_LABEL(x); \
192 1.15 castor _C_LABEL(x): ; \
193 1.44 matt .frame sp, 0, ra
194 1.1 deraadt
195 1.1 deraadt /*
196 1.15 castor * LEAF_NOPROFILE
197 1.15 castor * No profilable leaf routine.
198 1.1 deraadt */
199 1.41 matt #define LEAF_NOPROFILE(x) \
200 1.15 castor .globl _C_LABEL(x); \
201 1.44 matt STATIC_LEAF_NOPROFILE(x)
202 1.15 castor
203 1.15 castor /*
204 1.34 simonb * STATIC_LEAF
205 1.34 simonb * Declare a local leaf function.
206 1.34 simonb */
207 1.41 matt #define STATIC_LEAF(x) \
208 1.44 matt STATIC_LEAF_NOPROFILE(x); \
209 1.75 rin _MIPS_ASM_MCOUNT(x)
210 1.34 simonb
211 1.34 simonb /*
212 1.44 matt * LEAF
213 1.44 matt * A leaf routine does
214 1.44 matt * - call no other function,
215 1.44 matt * - never use any register that callee-saved (S0-S8), and
216 1.44 matt * - not use any local stack storage.
217 1.15 castor */
218 1.44 matt #define LEAF(x) \
219 1.44 matt LEAF_NOPROFILE(x); \
220 1.75 rin _MIPS_ASM_MCOUNT(x)
221 1.34 simonb
222 1.34 simonb /*
223 1.34 simonb * STATIC_XLEAF
224 1.34 simonb * declare alternate entry to a static leaf routine
225 1.34 simonb */
226 1.41 matt #define STATIC_XLEAF(x) \
227 1.20 soda AENT (_C_LABEL(x)); \
228 1.15 castor _C_LABEL(x):
229 1.15 castor
230 1.15 castor /*
231 1.44 matt * XLEAF
232 1.44 matt * declare alternate entry to leaf routine
233 1.44 matt */
234 1.44 matt #define XLEAF(x) \
235 1.44 matt .globl _C_LABEL(x); \
236 1.44 matt STATIC_XLEAF(x)
237 1.44 matt
238 1.44 matt /*
239 1.44 matt * STATIC_NESTED_NOPROFILE
240 1.44 matt * No profilable local nested routine.
241 1.44 matt */
242 1.44 matt #define STATIC_NESTED_NOPROFILE(x, fsize, retpc) \
243 1.55 mrg .ent _C_LABEL(x); \
244 1.55 mrg .type _C_LABEL(x), @function; \
245 1.55 mrg _C_LABEL(x): ; \
246 1.44 matt .frame sp, fsize, retpc
247 1.44 matt
248 1.44 matt /*
249 1.44 matt * NESTED_NOPROFILE
250 1.44 matt * No profilable nested routine.
251 1.44 matt */
252 1.44 matt #define NESTED_NOPROFILE(x, fsize, retpc) \
253 1.44 matt .globl _C_LABEL(x); \
254 1.44 matt STATIC_NESTED_NOPROFILE(x, fsize, retpc)
255 1.44 matt
256 1.44 matt /*
257 1.15 castor * NESTED
258 1.15 castor * A function calls other functions and needs
259 1.15 castor * therefore stack space to save/restore registers.
260 1.15 castor */
261 1.44 matt #define NESTED(x, fsize, retpc) \
262 1.44 matt NESTED_NOPROFILE(x, fsize, retpc); \
263 1.75 rin _MIPS_ASM_MCOUNT(x)
264 1.1 deraadt
265 1.1 deraadt /*
266 1.44 matt * STATIC_NESTED
267 1.44 matt * No profilable local nested routine.
268 1.1 deraadt */
269 1.44 matt #define STATIC_NESTED(x, fsize, retpc) \
270 1.44 matt STATIC_NESTED_NOPROFILE(x, fsize, retpc); \
271 1.75 rin _MIPS_ASM_MCOUNT(x)
272 1.15 castor
273 1.15 castor /*
274 1.15 castor * XNESTED
275 1.15 castor * declare alternate entry point to nested routine.
276 1.15 castor */
277 1.41 matt #define XNESTED(x) \
278 1.15 castor .globl _C_LABEL(x); \
279 1.20 soda AENT (_C_LABEL(x)); \
280 1.6 mycroft _C_LABEL(x):
281 1.1 deraadt
282 1.1 deraadt /*
283 1.15 castor * END
284 1.15 castor * Mark end of a procedure.
285 1.1 deraadt */
286 1.44 matt #define END(x) \
287 1.41 matt .end _C_LABEL(x); \
288 1.41 matt .size _C_LABEL(x), . - _C_LABEL(x)
289 1.2 glass
290 1.2 glass /*
291 1.15 castor * IMPORT -- import external symbol
292 1.2 glass */
293 1.41 matt #define IMPORT(sym, size) \
294 1.20 soda .extern _C_LABEL(sym),size
295 1.1 deraadt
296 1.1 deraadt /*
297 1.15 castor * EXPORT -- export definition of symbol
298 1.1 deraadt */
299 1.41 matt #define EXPORT(x) \
300 1.15 castor .globl _C_LABEL(x); \
301 1.15 castor _C_LABEL(x):
302 1.1 deraadt
303 1.15 castor /*
304 1.62 simonb * EXPORT_OBJECT -- export definition of symbol of symbol
305 1.62 simonb * type Object, visible to ksyms(4) address search.
306 1.62 simonb */
307 1.62 simonb #define EXPORT_OBJECT(x) \
308 1.62 simonb EXPORT(x); \
309 1.62 simonb .type _C_LABEL(x), @object;
310 1.62 simonb
311 1.62 simonb /*
312 1.17 jonathan * VECTOR
313 1.17 jonathan * exception vector entrypoint
314 1.20 soda * XXX: regmask should be used to generate .mask
315 1.17 jonathan */
316 1.41 matt #define VECTOR(x, regmask) \
317 1.47 joerg .ent _C_LABEL(x); \
318 1.17 jonathan EXPORT(x); \
319 1.17 jonathan
320 1.41 matt #define VECTOR_END(x) \
321 1.44 matt EXPORT(__CONCAT(x,_end)); \
322 1.44 matt END(x); \
323 1.44 matt .org _C_LABEL(x) + 0x80
324 1.1 deraadt
325 1.1 deraadt /*
326 1.10 christos * Macros to panic and printf from assembly language.
327 1.1 deraadt */
328 1.41 matt #define PANIC(msg) \
329 1.41 matt PTR_LA a0, 9f; \
330 1.15 castor jal _C_LABEL(panic); \
331 1.26 cgd nop; \
332 1.1 deraadt MSG(msg)
333 1.1 deraadt
334 1.15 castor #define PRINTF(msg) \
335 1.41 matt PTR_LA a0, 9f; \
336 1.15 castor jal _C_LABEL(printf); \
337 1.26 cgd nop; \
338 1.1 deraadt MSG(msg)
339 1.1 deraadt
340 1.15 castor #define MSG(msg) \
341 1.15 castor .rdata; \
342 1.48 joerg 9: .asciz msg; \
343 1.1 deraadt .text
344 1.1 deraadt
345 1.41 matt #define ASMSTR(str) \
346 1.48 joerg .asciz str; \
347 1.12 jonathan .align 3
348 1.15 castor
349 1.76 martin #ifdef _NETBSD_REVISIONID
350 1.76 martin #define RCSID(x) .pushsection ".ident","MS",@progbits,1; \
351 1.76 martin .asciz "$" "NetBSD: " __FILE__ \
352 1.76 martin " " _NETBSD_REVISIONID " $"; \
353 1.76 martin .popsection
354 1.76 martin #else
355 1.57 simonb #define RCSID(x) .pushsection ".ident","MS",@progbits,1; \
356 1.56 joerg .asciz x; \
357 1.56 joerg .popsection
358 1.76 martin #endif
359 1.41 matt
360 1.15 castor /*
361 1.15 castor * XXX retain dialects XXX
362 1.15 castor */
363 1.41 matt #define ALEAF(x) XLEAF(x)
364 1.41 matt #define NLEAF(x) LEAF_NOPROFILE(x)
365 1.41 matt #define NON_LEAF(x, fsize, retpc) NESTED(x, fsize, retpc)
366 1.41 matt #define NNON_LEAF(x, fsize, retpc) NESTED_NOPROFILE(x, fsize, retpc)
367 1.41 matt
368 1.41 matt #if defined(__mips_o32)
369 1.41 matt #define SZREG 4
370 1.41 matt #else
371 1.41 matt #define SZREG 8
372 1.41 matt #endif
373 1.41 matt
374 1.41 matt #if defined(__mips_o32) || defined(__mips_o64)
375 1.41 matt #define ALSK 7 /* stack alignment */
376 1.41 matt #define ALMASK -7 /* stack alignment */
377 1.41 matt #define SZFPREG 4
378 1.41 matt #define FP_L lwc1
379 1.41 matt #define FP_S swc1
380 1.41 matt #else
381 1.41 matt #define ALSK 15 /* stack alignment */
382 1.41 matt #define ALMASK -15 /* stack alignment */
383 1.41 matt #define SZFPREG 8
384 1.41 matt #define FP_L ldc1
385 1.41 matt #define FP_S sdc1
386 1.41 matt #endif
387 1.15 castor
388 1.22 simonb /*
389 1.16 castor * standard callframe {
390 1.44 matt * register_t cf_args[4]; arg0 - arg3 (only on o32 and o64)
391 1.41 matt * register_t cf_pad[N]; o32/64 (N=0), n32 (N=1) n64 (N=1)
392 1.41 matt * register_t cf_gp; global pointer (only on n32 and n64)
393 1.16 castor * register_t cf_sp; frame pointer
394 1.16 castor * register_t cf_ra; return address
395 1.16 castor * };
396 1.16 castor */
397 1.41 matt #if defined(__mips_o32) || defined(__mips_o64)
398 1.41 matt #define CALLFRAME_SIZ (SZREG * (4 + 2))
399 1.41 matt #define CALLFRAME_S0 0
400 1.41 matt #elif defined(__mips_n32) || defined(__mips_n64)
401 1.41 matt #define CALLFRAME_SIZ (SZREG * 4)
402 1.41 matt #define CALLFRAME_S0 (CALLFRAME_SIZ - 4 * SZREG)
403 1.41 matt #endif
404 1.41 matt #ifndef _KERNEL
405 1.41 matt #define CALLFRAME_GP (CALLFRAME_SIZ - 3 * SZREG)
406 1.41 matt #endif
407 1.41 matt #define CALLFRAME_SP (CALLFRAME_SIZ - 2 * SZREG)
408 1.41 matt #define CALLFRAME_RA (CALLFRAME_SIZ - 1 * SZREG)
409 1.16 castor
410 1.15 castor /*
411 1.22 simonb * While it would be nice to be compatible with the SGI
412 1.15 castor * REG_L and REG_S macros, because they do not take parameters, it
413 1.15 castor * is impossible to use them with the _MIPS_SIM_ABIX32 model.
414 1.15 castor *
415 1.22 simonb * These macros hide the use of mips3 instructions from the
416 1.15 castor * assembler to prevent the assembler from generating 64-bit style
417 1.15 castor * ABI calls.
418 1.15 castor */
419 1.49 matt #ifdef __mips_o32
420 1.41 matt #define PTR_ADD add
421 1.41 matt #define PTR_ADDI addi
422 1.41 matt #define PTR_ADDU addu
423 1.41 matt #define PTR_ADDIU addiu
424 1.45 matt #define PTR_SUB subu
425 1.41 matt #define PTR_SUBI subi
426 1.41 matt #define PTR_SUBU subu
427 1.41 matt #define PTR_SUBIU subu
428 1.41 matt #define PTR_L lw
429 1.41 matt #define PTR_LA la
430 1.41 matt #define PTR_S sw
431 1.41 matt #define PTR_SLL sll
432 1.41 matt #define PTR_SLLV sllv
433 1.41 matt #define PTR_SRL srl
434 1.41 matt #define PTR_SRLV srlv
435 1.41 matt #define PTR_SRA sra
436 1.41 matt #define PTR_SRAV srav
437 1.41 matt #define PTR_LL ll
438 1.41 matt #define PTR_SC sc
439 1.41 matt #define PTR_WORD .word
440 1.41 matt #define PTR_SCALESHIFT 2
441 1.41 matt #else /* _MIPS_SZPTR == 64 */
442 1.41 matt #define PTR_ADD dadd
443 1.41 matt #define PTR_ADDI daddi
444 1.41 matt #define PTR_ADDU daddu
445 1.41 matt #define PTR_ADDIU daddiu
446 1.45 matt #define PTR_SUB dsubu
447 1.41 matt #define PTR_SUBI dsubi
448 1.41 matt #define PTR_SUBU dsubu
449 1.41 matt #define PTR_SUBIU dsubu
450 1.49 matt #ifdef __mips_n32
451 1.49 matt #define PTR_L lw
452 1.49 matt #define PTR_LL ll
453 1.49 matt #define PTR_SC sc
454 1.49 matt #define PTR_S sw
455 1.49 matt #define PTR_SCALESHIFT 2
456 1.49 matt #define PTR_WORD .word
457 1.49 matt #else
458 1.41 matt #define PTR_L ld
459 1.49 matt #define PTR_LL lld
460 1.49 matt #define PTR_SC scd
461 1.49 matt #define PTR_S sd
462 1.49 matt #define PTR_SCALESHIFT 3
463 1.49 matt #define PTR_WORD .dword
464 1.49 matt #endif
465 1.41 matt #define PTR_LA dla
466 1.41 matt #define PTR_SLL dsll
467 1.41 matt #define PTR_SLLV dsllv
468 1.41 matt #define PTR_SRL dsrl
469 1.41 matt #define PTR_SRLV dsrlv
470 1.41 matt #define PTR_SRA dsra
471 1.41 matt #define PTR_SRAV dsrav
472 1.41 matt #endif /* _MIPS_SZPTR == 64 */
473 1.41 matt
474 1.41 matt #if _MIPS_SZINT == 32
475 1.41 matt #define INT_ADD add
476 1.41 matt #define INT_ADDI addi
477 1.41 matt #define INT_ADDU addu
478 1.41 matt #define INT_ADDIU addiu
479 1.45 matt #define INT_SUB subu
480 1.41 matt #define INT_SUBI subi
481 1.41 matt #define INT_SUBU subu
482 1.41 matt #define INT_SUBIU subu
483 1.41 matt #define INT_L lw
484 1.41 matt #define INT_LA la
485 1.41 matt #define INT_S sw
486 1.41 matt #define INT_SLL sll
487 1.41 matt #define INT_SLLV sllv
488 1.41 matt #define INT_SRL srl
489 1.41 matt #define INT_SRLV srlv
490 1.41 matt #define INT_SRA sra
491 1.41 matt #define INT_SRAV srav
492 1.41 matt #define INT_LL ll
493 1.41 matt #define INT_SC sc
494 1.41 matt #define INT_WORD .word
495 1.41 matt #define INT_SCALESHIFT 2
496 1.41 matt #else
497 1.41 matt #define INT_ADD dadd
498 1.41 matt #define INT_ADDI daddi
499 1.41 matt #define INT_ADDU daddu
500 1.41 matt #define INT_ADDIU daddiu
501 1.45 matt #define INT_SUB dsubu
502 1.41 matt #define INT_SUBI dsubi
503 1.41 matt #define INT_SUBU dsubu
504 1.41 matt #define INT_SUBIU dsubu
505 1.41 matt #define INT_L ld
506 1.41 matt #define INT_LA dla
507 1.41 matt #define INT_S sd
508 1.41 matt #define INT_SLL dsll
509 1.41 matt #define INT_SLLV dsllv
510 1.41 matt #define INT_SRL dsrl
511 1.41 matt #define INT_SRLV dsrlv
512 1.41 matt #define INT_SRA dsra
513 1.41 matt #define INT_SRAV dsrav
514 1.41 matt #define INT_LL lld
515 1.41 matt #define INT_SC scd
516 1.41 matt #define INT_WORD .dword
517 1.41 matt #define INT_SCALESHIFT 3
518 1.41 matt #endif
519 1.15 castor
520 1.41 matt #if _MIPS_SZLONG == 32
521 1.41 matt #define LONG_ADD add
522 1.41 matt #define LONG_ADDI addi
523 1.41 matt #define LONG_ADDU addu
524 1.41 matt #define LONG_ADDIU addiu
525 1.45 matt #define LONG_SUB subu
526 1.41 matt #define LONG_SUBI subi
527 1.41 matt #define LONG_SUBU subu
528 1.41 matt #define LONG_SUBIU subu
529 1.41 matt #define LONG_L lw
530 1.41 matt #define LONG_LA la
531 1.41 matt #define LONG_S sw
532 1.41 matt #define LONG_SLL sll
533 1.41 matt #define LONG_SLLV sllv
534 1.41 matt #define LONG_SRL srl
535 1.41 matt #define LONG_SRLV srlv
536 1.41 matt #define LONG_SRA sra
537 1.41 matt #define LONG_SRAV srav
538 1.41 matt #define LONG_LL ll
539 1.41 matt #define LONG_SC sc
540 1.41 matt #define LONG_WORD .word
541 1.41 matt #define LONG_SCALESHIFT 2
542 1.41 matt #else
543 1.41 matt #define LONG_ADD dadd
544 1.41 matt #define LONG_ADDI daddi
545 1.41 matt #define LONG_ADDU daddu
546 1.41 matt #define LONG_ADDIU daddiu
547 1.45 matt #define LONG_SUB dsubu
548 1.41 matt #define LONG_SUBI dsubi
549 1.41 matt #define LONG_SUBU dsubu
550 1.41 matt #define LONG_SUBIU dsubu
551 1.41 matt #define LONG_L ld
552 1.41 matt #define LONG_LA dla
553 1.41 matt #define LONG_S sd
554 1.41 matt #define LONG_SLL dsll
555 1.41 matt #define LONG_SLLV dsllv
556 1.41 matt #define LONG_SRL dsrl
557 1.41 matt #define LONG_SRLV dsrlv
558 1.41 matt #define LONG_SRA dsra
559 1.41 matt #define LONG_SRAV dsrav
560 1.41 matt #define LONG_LL lld
561 1.41 matt #define LONG_SC scd
562 1.41 matt #define LONG_WORD .dword
563 1.41 matt #define LONG_SCALESHIFT 3
564 1.41 matt #endif
565 1.41 matt
566 1.41 matt #if SZREG == 4
567 1.41 matt #define REG_L lw
568 1.41 matt #define REG_S sw
569 1.41 matt #define REG_LI li
570 1.41 matt #define REG_ADDU addu
571 1.41 matt #define REG_SLL sll
572 1.41 matt #define REG_SLLV sllv
573 1.41 matt #define REG_SRL srl
574 1.41 matt #define REG_SRLV srlv
575 1.41 matt #define REG_SRA sra
576 1.41 matt #define REG_SRAV srav
577 1.41 matt #define REG_LL ll
578 1.41 matt #define REG_SC sc
579 1.41 matt #define REG_SCALESHIFT 2
580 1.41 matt #else
581 1.41 matt #define REG_L ld
582 1.41 matt #define REG_S sd
583 1.41 matt #define REG_LI dli
584 1.41 matt #define REG_ADDU daddu
585 1.41 matt #define REG_SLL dsll
586 1.41 matt #define REG_SLLV dsllv
587 1.41 matt #define REG_SRL dsrl
588 1.41 matt #define REG_SRLV dsrlv
589 1.41 matt #define REG_SRA dsra
590 1.41 matt #define REG_SRAV dsrav
591 1.41 matt #define REG_LL lld
592 1.41 matt #define REG_SC scd
593 1.41 matt #define REG_SCALESHIFT 3
594 1.41 matt #endif
595 1.41 matt
596 1.51 skrll #if (MIPS1 + MIPS2) > 0
597 1.51 skrll #define NOP_L nop
598 1.51 skrll #else
599 1.51 skrll #define NOP_L /* nothing */
600 1.51 skrll #endif
601 1.51 skrll
602 1.59 skrll /* compiler define */
603 1.73 riastrad #if defined(MULTIPROCESSOR) && defined(__OCTEON__)
604 1.71 riastrad /*
605 1.71 riastrad * See common/lib/libc/arch/mips/atomic/membar_ops.S for notes on
606 1.71 riastrad * Octeon memory ordering guarantees and barriers.
607 1.71 riastrad *
608 1.71 riastrad * cnMIPS also has a quirk where the store buffer can get clogged and
609 1.71 riastrad * we need to apply a plunger to it _after_ releasing a lock or else
610 1.71 riastrad * other CPUs may spin for hundreds of thousands of cycles before they
611 1.71 riastrad * see the lock is released. So we also have the quirky SYNC_PLUNGER
612 1.72 riastrad * barrier as syncw. See the note in the SYNCW instruction description
613 1.72 riastrad * on p. 2168 of Cavium OCTEON III CN78XX Hardware Reference Manual,
614 1.72 riastrad * CN78XX-HM-0.99E, September 2014:
615 1.72 riastrad *
616 1.72 riastrad * Core A (writer)
617 1.72 riastrad *
618 1.72 riastrad * SW R1, DATA# change shared DATA value
619 1.72 riastrad * LI R1, 1
620 1.72 riastrad * SYNCW# (or SYNCWS) Perform DATA store before performing FLAG store
621 1.72 riastrad * SW R2, FLAG# say that the shared DATA value is valid
622 1.72 riastrad * SYNCW# (or SYNCWS) Force the FLAG store soon (CN78XX-specific)
623 1.72 riastrad *
624 1.72 riastrad * ...
625 1.72 riastrad *
626 1.72 riastrad * The second SYNCW instruction executed by core A is not
627 1.72 riastrad * necessary for correctness, but has very important performance
628 1.72 riastrad * effects on the CN78XX. Without it, the store to FLAG may
629 1.72 riastrad * linger in core A's write buffer before it becomes visible to
630 1.72 riastrad * any other cores. (If core A is not performing many stores,
631 1.72 riastrad * this may add hundreds of thousands of cycles to the flag
632 1.72 riastrad * release time since the CN78XX core nominally retains stores to
633 1.72 riastrad * attempt to merge them before sending the store on the CMI.)
634 1.72 riastrad * Applications should include this second SYNCW instruction after
635 1.72 riastrad * flag or lock release.
636 1.71 riastrad */
637 1.71 riastrad #define LLSCSYNC /* nothing */
638 1.68 riastrad #define BDSYNC sync
639 1.71 riastrad #define BDSYNC_ACQ nop
640 1.71 riastrad #define SYNC_ACQ /* nothing */
641 1.71 riastrad #define SYNC_REL sync 4
642 1.66 riastrad #define BDSYNC_PLUNGER sync 4
643 1.66 riastrad #define SYNC_PLUNGER sync 4
644 1.73 riastrad #elif defined(MULTIPROCESSOR) && (__mips >= 3 || !defined(__mips_o32))
645 1.69 riastrad #define LLSCSYNC /* nothing */
646 1.58 skrll #define BDSYNC sync
647 1.66 riastrad #define BDSYNC_ACQ sync
648 1.66 riastrad #define SYNC_ACQ sync
649 1.66 riastrad #define SYNC_REL sync
650 1.66 riastrad #define BDSYNC_PLUNGER nop
651 1.66 riastrad #define SYNC_PLUNGER /* nothing */
652 1.60 skrll #else
653 1.60 skrll #define LLSCSYNC /* nothing */
654 1.60 skrll #define BDSYNC nop
655 1.66 riastrad #define BDSYNC_ACQ nop
656 1.66 riastrad #define SYNC_ACQ /* nothing */
657 1.66 riastrad #define SYNC_REL /* nothing */
658 1.66 riastrad #define BDSYNC_PLUNGER nop
659 1.66 riastrad #define SYNC_PLUNGER /* nothing */
660 1.58 skrll #endif
661 1.58 skrll
662 1.74 riastrad /*
663 1.74 riastrad * Store-before-load barrier. Do not use this unless you know what
664 1.74 riastrad * you're doing.
665 1.74 riastrad */
666 1.74 riastrad #ifdef MULTIPROCESSOR
667 1.74 riastrad #define SYNC_DEKKER sync
668 1.74 riastrad #else
669 1.74 riastrad #define SYNC_DEKKER /* nothing */
670 1.74 riastrad #endif
671 1.74 riastrad
672 1.74 riastrad /*
673 1.74 riastrad * Store-before-store and load-before-load barriers. These could be
674 1.74 riastrad * made weaker than release (load/store-before-store) and acquire
675 1.74 riastrad * (load-before-load/store) barriers, and newer MIPS does have
676 1.74 riastrad * instruction encodings for finer-grained barriers like this, but I
677 1.74 riastrad * dunno how to appropriately conditionalize their use or get the
678 1.74 riastrad * assembler to be happy with them, so we'll use these definitions for
679 1.74 riastrad * now.
680 1.74 riastrad */
681 1.74 riastrad #define SYNC_PRODUCER SYNC_REL
682 1.74 riastrad #define SYNC_CONSUMER SYNC_ACQ
683 1.74 riastrad
684 1.52 maya /* CPU dependent hook for cp0 load delays */
685 1.52 maya #if defined(MIPS1) || defined(MIPS2) || defined(MIPS3)
686 1.57 simonb #define MFC0_HAZARD sll $0,$0,1 /* super scalar nop */
687 1.52 maya #else
688 1.57 simonb #define MFC0_HAZARD /* nothing */
689 1.52 maya #endif
690 1.52 maya
691 1.41 matt #if _MIPS_ISA == _MIPS_ISA_MIPS1 || _MIPS_ISA == _MIPS_ISA_MIPS2 || \
692 1.41 matt _MIPS_ISA == _MIPS_ISA_MIPS32
693 1.41 matt #define MFC0 mfc0
694 1.41 matt #define MTC0 mtc0
695 1.41 matt #endif
696 1.41 matt #if _MIPS_ISA == _MIPS_ISA_MIPS3 || _MIPS_ISA == _MIPS_ISA_MIPS4 || \
697 1.41 matt _MIPS_ISA == _MIPS_ISA_MIPS64
698 1.41 matt #define MFC0 dmfc0
699 1.41 matt #define MTC0 dmtc0
700 1.41 matt #endif
701 1.41 matt
702 1.41 matt #if defined(__mips_o32) || defined(__mips_o64)
703 1.41 matt
704 1.54 joerg #ifdef __mips_abicalls
705 1.41 matt #define CPRESTORE(r) .cprestore r
706 1.41 matt #define CPLOAD(r) .cpload r
707 1.41 matt #else
708 1.41 matt #define CPRESTORE(r) /* not needed */
709 1.41 matt #define CPLOAD(r) /* not needed */
710 1.41 matt #endif
711 1.41 matt
712 1.41 matt #define SETUP_GP \
713 1.41 matt .set push; \
714 1.41 matt .set noreorder; \
715 1.41 matt .cpload t9; \
716 1.41 matt .set pop
717 1.41 matt #define SETUP_GPX(r) \
718 1.41 matt .set push; \
719 1.41 matt .set noreorder; \
720 1.41 matt move r,ra; /* save old ra */ \
721 1.41 matt bal 7f; \
722 1.41 matt nop; \
723 1.41 matt 7: .cpload ra; \
724 1.41 matt move ra,r; \
725 1.41 matt .set pop
726 1.41 matt #define SETUP_GPX_L(r,lbl) \
727 1.41 matt .set push; \
728 1.41 matt .set noreorder; \
729 1.41 matt move r,ra; /* save old ra */ \
730 1.41 matt bal lbl; \
731 1.41 matt nop; \
732 1.41 matt lbl: .cpload ra; \
733 1.41 matt move ra,r; \
734 1.41 matt .set pop
735 1.41 matt #define SAVE_GP(x) .cprestore x
736 1.41 matt
737 1.41 matt #define SETUP_GP64(a,b) /* n32/n64 specific */
738 1.41 matt #define SETUP_GP64_R(a,b) /* n32/n64 specific */
739 1.41 matt #define SETUP_GPX64(a,b) /* n32/n64 specific */
740 1.41 matt #define SETUP_GPX64_L(a,b,c) /* n32/n64 specific */
741 1.41 matt #define RESTORE_GP64 /* n32/n64 specific */
742 1.41 matt #define USE_ALT_CP(a) /* n32/n64 specific */
743 1.41 matt #endif /* __mips_o32 || __mips_o64 */
744 1.41 matt
745 1.41 matt #if defined(__mips_o32) || defined(__mips_o64)
746 1.22 simonb #define REG_PROLOGUE .set push
747 1.16 castor #define REG_EPILOGUE .set pop
748 1.41 matt #endif
749 1.41 matt #if defined(__mips_n32) || defined(__mips_n64)
750 1.15 castor #define REG_PROLOGUE .set push ; .set mips3
751 1.15 castor #define REG_EPILOGUE .set pop
752 1.41 matt #endif
753 1.41 matt
754 1.41 matt #if defined(__mips_n32) || defined(__mips_n64)
755 1.41 matt #define SETUP_GP /* o32 specific */
756 1.41 matt #define SETUP_GPX(r) /* o32 specific */
757 1.41 matt #define SETUP_GPX_L(r,lbl) /* o32 specific */
758 1.41 matt #define SAVE_GP(x) /* o32 specific */
759 1.63 skrll #define SETUP_GP64(a,b) .cpsetup t9, a, b
760 1.41 matt #define SETUP_GPX64(a,b) \
761 1.41 matt .set push; \
762 1.41 matt move b,ra; \
763 1.41 matt .set noreorder; \
764 1.41 matt bal 7f; \
765 1.41 matt nop; \
766 1.41 matt 7: .set pop; \
767 1.41 matt .cpsetup ra, a, 7b; \
768 1.41 matt move ra,b
769 1.41 matt #define SETUP_GPX64_L(a,b,c) \
770 1.41 matt .set push; \
771 1.41 matt move b,ra; \
772 1.41 matt .set noreorder; \
773 1.41 matt bal c; \
774 1.41 matt nop; \
775 1.41 matt c: .set pop; \
776 1.41 matt .cpsetup ra, a, c; \
777 1.41 matt move ra,b
778 1.41 matt #define RESTORE_GP64 .cpreturn
779 1.41 matt #define USE_ALT_CP(a) .cplocal a
780 1.41 matt #endif /* __mips_n32 || __mips_n64 */
781 1.25 jeffs
782 1.25 jeffs /*
783 1.25 jeffs * The DYNAMIC_STATUS_MASK option adds an additional masking operation
784 1.25 jeffs * when updating the hardware interrupt mask in the status register.
785 1.25 jeffs *
786 1.25 jeffs * This is useful for platforms that need to at run-time mask
787 1.25 jeffs * interrupts based on motherboard configuration or to handle
788 1.25 jeffs * slowly clearing interrupts.
789 1.25 jeffs *
790 1.25 jeffs * XXX this is only currently implemented for mips3.
791 1.25 jeffs */
792 1.25 jeffs #ifdef MIPS_DYNAMIC_STATUS_MASK
793 1.41 matt #define DYNAMIC_STATUS_MASK(sr,scratch) \
794 1.25 jeffs lw scratch, mips_dynamic_status_mask; \
795 1.25 jeffs and sr, sr, scratch
796 1.29 jeffs
797 1.41 matt #define DYNAMIC_STATUS_MASK_TOUSER(sr,scratch1) \
798 1.29 jeffs ori sr, (MIPS_INT_MASK | MIPS_SR_INT_IE); \
799 1.29 jeffs DYNAMIC_STATUS_MASK(sr,scratch1)
800 1.25 jeffs #else
801 1.41 matt #define DYNAMIC_STATUS_MASK(sr,scratch)
802 1.41 matt #define DYNAMIC_STATUS_MASK_TOUSER(sr,scratch1)
803 1.25 jeffs #endif
804 1.1 deraadt
805 1.38 ad /* See lock_stubs.S. */
806 1.44 matt #define LOG2_MIPS_LOCK_RAS_SIZE 8
807 1.44 matt #define MIPS_LOCK_RAS_SIZE 256 /* 16 bytes left over */
808 1.38 ad
809 1.41 matt #define CPUVAR(off) _C_LABEL(cpu_info_store)+__CONCAT(CPU_INFO_,off)
810 1.39 yamt
811 1.8 jonathan #endif /* _MIPS_ASM_H */
812