asm.h revision 1.72 1 1.72 riastrad /* $NetBSD: asm.h,v 1.72 2023/02/13 12:00:18 riastradh Exp $ */
2 1.4 cgd
3 1.1 deraadt /*
4 1.2 glass * Copyright (c) 1992, 1993
5 1.2 glass * The Regents of the University of California. All rights reserved.
6 1.1 deraadt *
7 1.1 deraadt * This code is derived from software contributed to Berkeley by
8 1.1 deraadt * Ralph Campbell.
9 1.1 deraadt *
10 1.1 deraadt * Redistribution and use in source and binary forms, with or without
11 1.1 deraadt * modification, are permitted provided that the following conditions
12 1.1 deraadt * are met:
13 1.1 deraadt * 1. Redistributions of source code must retain the above copyright
14 1.1 deraadt * notice, this list of conditions and the following disclaimer.
15 1.1 deraadt * 2. Redistributions in binary form must reproduce the above copyright
16 1.1 deraadt * notice, this list of conditions and the following disclaimer in the
17 1.1 deraadt * documentation and/or other materials provided with the distribution.
18 1.35 agc * 3. Neither the name of the University nor the names of its contributors
19 1.1 deraadt * may be used to endorse or promote products derived from this software
20 1.1 deraadt * without specific prior written permission.
21 1.1 deraadt *
22 1.1 deraadt * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 1.1 deraadt * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 1.1 deraadt * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 1.1 deraadt * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 1.1 deraadt * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 1.1 deraadt * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 1.1 deraadt * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 1.1 deraadt * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 1.1 deraadt * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 1.1 deraadt * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 1.1 deraadt * SUCH DAMAGE.
33 1.1 deraadt *
34 1.4 cgd * @(#)machAsmDefs.h 8.1 (Berkeley) 6/10/93
35 1.1 deraadt */
36 1.1 deraadt
37 1.1 deraadt /*
38 1.1 deraadt * machAsmDefs.h --
39 1.1 deraadt *
40 1.1 deraadt * Macros used when writing assembler programs.
41 1.1 deraadt *
42 1.1 deraadt * Copyright (C) 1989 Digital Equipment Corporation.
43 1.1 deraadt * Permission to use, copy, modify, and distribute this software and
44 1.1 deraadt * its documentation for any purpose and without fee is hereby granted,
45 1.1 deraadt * provided that the above copyright notice appears in all copies.
46 1.1 deraadt * Digital Equipment Corporation makes no representations about the
47 1.1 deraadt * suitability of this software for any purpose. It is provided "as is"
48 1.1 deraadt * without express or implied warranty.
49 1.1 deraadt *
50 1.1 deraadt * from: Header: /sprite/src/kernel/mach/ds3100.md/RCS/machAsmDefs.h,
51 1.2 glass * v 1.2 89/08/15 18:28:24 rab Exp SPRITE (DECWRL)
52 1.1 deraadt */
53 1.1 deraadt
54 1.8 jonathan #ifndef _MIPS_ASM_H
55 1.41 matt #define _MIPS_ASM_H
56 1.1 deraadt
57 1.44 matt #include <sys/cdefs.h> /* for API selection */
58 1.21 soda #include <mips/regdef.h>
59 1.1 deraadt
60 1.64 simonb #if defined(_KERNEL_OPT)
61 1.64 simonb #include "opt_gprof.h"
62 1.64 simonb #endif
63 1.64 simonb
64 1.70 riastrad #ifdef __ASSEMBLER__
65 1.61 skrll #define __BIT(n) (1 << (n))
66 1.61 skrll #define __BITS(hi,lo) ((~((~0)<<((hi)+1)))&((~0)<<(lo)))
67 1.61 skrll
68 1.61 skrll #define __LOWEST_SET_BIT(__mask) ((((__mask) - 1) & (__mask)) ^ (__mask))
69 1.61 skrll #define __SHIFTOUT(__x, __mask) (((__x) & (__mask)) / __LOWEST_SET_BIT(__mask))
70 1.61 skrll #define __SHIFTIN(__x, __mask) ((__x) * __LOWEST_SET_BIT(__mask))
71 1.70 riastrad #endif /* __ASSEMBLER__ */
72 1.61 skrll
73 1.1 deraadt /*
74 1.1 deraadt * Define -pg profile entry code.
75 1.64 simonb * Must always be noreorder, must never use a macro instruction.
76 1.64 simonb */
77 1.64 simonb #if defined(__mips_o32) /* Old 32-bit ABI */
78 1.64 simonb /*
79 1.64 simonb * The old ABI version must also decrement two less words off the
80 1.64 simonb * stack and the final addiu to t9 must always equal the size of this
81 1.65 simonb * _MIPS_ASM_MCOUNT.
82 1.1 deraadt */
83 1.65 simonb #define _MIPS_ASM_MCOUNT \
84 1.23 castor .set push; \
85 1.23 castor .set noreorder; \
86 1.23 castor .set noat; \
87 1.64 simonb subu sp,16; \
88 1.27 jeffs sw t9,12(sp); \
89 1.23 castor move AT,ra; \
90 1.23 castor lui t9,%hi(_mcount); \
91 1.23 castor addiu t9,t9,%lo(_mcount); \
92 1.23 castor jalr t9; \
93 1.64 simonb nop; \
94 1.23 castor lw t9,4(sp); \
95 1.64 simonb addiu sp,8; \
96 1.64 simonb addiu t9,40; \
97 1.64 simonb .set pop;
98 1.64 simonb #elif defined(__mips_o64) /* Old 64-bit ABI */
99 1.64 simonb # error yeahnah
100 1.64 simonb #else /* New (n32/n64) ABI */
101 1.64 simonb /*
102 1.64 simonb * The new ABI version just needs to put the return address in AT and
103 1.65 simonb * call _mcount(). For the no abicalls case, skip the reloc dance.
104 1.64 simonb */
105 1.65 simonb #ifdef __mips_abicalls
106 1.65 simonb #define _MIPS_ASM_MCOUNT \
107 1.65 simonb .set push; \
108 1.65 simonb .set noreorder; \
109 1.65 simonb .set noat; \
110 1.65 simonb subu sp,16; \
111 1.65 simonb sw t9,8(sp); \
112 1.65 simonb move AT,ra; \
113 1.65 simonb lui t9,%hi(_mcount); \
114 1.65 simonb addiu t9,t9,%lo(_mcount); \
115 1.65 simonb jalr t9; \
116 1.65 simonb nop; \
117 1.65 simonb lw t9,8(sp); \
118 1.65 simonb addiu sp,16; \
119 1.65 simonb .set pop;
120 1.65 simonb #else /* !__mips_abicalls */
121 1.65 simonb #define _MIPS_ASM_MCOUNT \
122 1.64 simonb .set push; \
123 1.64 simonb .set noreorder; \
124 1.64 simonb .set noat; \
125 1.64 simonb move AT,ra; \
126 1.64 simonb jal _mcount; \
127 1.64 simonb nop; \
128 1.50 skrll .set pop;
129 1.65 simonb #endif /* !__mips_abicalls */
130 1.64 simonb #endif /* n32/n64 */
131 1.13 jonathan
132 1.11 jtc #ifdef GPROF
133 1.65 simonb #define MCOUNT _MIPS_ASM_MCOUNT
134 1.1 deraadt #else
135 1.1 deraadt #define MCOUNT
136 1.2 glass #endif
137 1.1 deraadt
138 1.15 castor #ifdef USE_AENT
139 1.41 matt #define AENT(x) \
140 1.15 castor .aent x, 0
141 1.15 castor #else
142 1.41 matt #define AENT(x)
143 1.24 kleink #endif
144 1.24 kleink
145 1.31 simonb /*
146 1.31 simonb * WEAK_ALIAS: create a weak alias.
147 1.31 simonb */
148 1.24 kleink #define WEAK_ALIAS(alias,sym) \
149 1.24 kleink .weak alias; \
150 1.24 kleink alias = sym
151 1.37 christos /*
152 1.37 christos * STRONG_ALIAS: create a strong alias.
153 1.37 christos */
154 1.41 matt #define STRONG_ALIAS(alias,sym) \
155 1.37 christos .globl alias; \
156 1.37 christos alias = sym
157 1.15 castor
158 1.14 thorpej /*
159 1.33 simonb * WARN_REFERENCES: create a warning if the specified symbol is referenced.
160 1.14 thorpej */
161 1.43 joerg #define WARN_REFERENCES(sym,msg) \
162 1.44 matt .pushsection __CONCAT(.gnu.warning.,sym); \
163 1.43 joerg .ascii msg; \
164 1.43 joerg .popsection
165 1.6 mycroft
166 1.1 deraadt /*
167 1.44 matt * STATIC_LEAF_NOPROFILE
168 1.44 matt * No profilable local leaf routine.
169 1.15 castor */
170 1.44 matt #define STATIC_LEAF_NOPROFILE(x) \
171 1.47 joerg .ent _C_LABEL(x); \
172 1.15 castor _C_LABEL(x): ; \
173 1.44 matt .frame sp, 0, ra
174 1.1 deraadt
175 1.1 deraadt /*
176 1.15 castor * LEAF_NOPROFILE
177 1.15 castor * No profilable leaf routine.
178 1.1 deraadt */
179 1.41 matt #define LEAF_NOPROFILE(x) \
180 1.15 castor .globl _C_LABEL(x); \
181 1.44 matt STATIC_LEAF_NOPROFILE(x)
182 1.15 castor
183 1.15 castor /*
184 1.34 simonb * STATIC_LEAF
185 1.34 simonb * Declare a local leaf function.
186 1.34 simonb */
187 1.41 matt #define STATIC_LEAF(x) \
188 1.44 matt STATIC_LEAF_NOPROFILE(x); \
189 1.34 simonb MCOUNT
190 1.34 simonb
191 1.34 simonb /*
192 1.44 matt * LEAF
193 1.44 matt * A leaf routine does
194 1.44 matt * - call no other function,
195 1.44 matt * - never use any register that callee-saved (S0-S8), and
196 1.44 matt * - not use any local stack storage.
197 1.15 castor */
198 1.44 matt #define LEAF(x) \
199 1.44 matt LEAF_NOPROFILE(x); \
200 1.44 matt MCOUNT
201 1.34 simonb
202 1.34 simonb /*
203 1.34 simonb * STATIC_XLEAF
204 1.34 simonb * declare alternate entry to a static leaf routine
205 1.34 simonb */
206 1.41 matt #define STATIC_XLEAF(x) \
207 1.20 soda AENT (_C_LABEL(x)); \
208 1.15 castor _C_LABEL(x):
209 1.15 castor
210 1.15 castor /*
211 1.44 matt * XLEAF
212 1.44 matt * declare alternate entry to leaf routine
213 1.44 matt */
214 1.44 matt #define XLEAF(x) \
215 1.44 matt .globl _C_LABEL(x); \
216 1.44 matt STATIC_XLEAF(x)
217 1.44 matt
218 1.44 matt /*
219 1.44 matt * STATIC_NESTED_NOPROFILE
220 1.44 matt * No profilable local nested routine.
221 1.44 matt */
222 1.44 matt #define STATIC_NESTED_NOPROFILE(x, fsize, retpc) \
223 1.55 mrg .ent _C_LABEL(x); \
224 1.55 mrg .type _C_LABEL(x), @function; \
225 1.55 mrg _C_LABEL(x): ; \
226 1.44 matt .frame sp, fsize, retpc
227 1.44 matt
228 1.44 matt /*
229 1.44 matt * NESTED_NOPROFILE
230 1.44 matt * No profilable nested routine.
231 1.44 matt */
232 1.44 matt #define NESTED_NOPROFILE(x, fsize, retpc) \
233 1.44 matt .globl _C_LABEL(x); \
234 1.44 matt STATIC_NESTED_NOPROFILE(x, fsize, retpc)
235 1.44 matt
236 1.44 matt /*
237 1.15 castor * NESTED
238 1.15 castor * A function calls other functions and needs
239 1.15 castor * therefore stack space to save/restore registers.
240 1.15 castor */
241 1.44 matt #define NESTED(x, fsize, retpc) \
242 1.44 matt NESTED_NOPROFILE(x, fsize, retpc); \
243 1.15 castor MCOUNT
244 1.1 deraadt
245 1.1 deraadt /*
246 1.44 matt * STATIC_NESTED
247 1.44 matt * No profilable local nested routine.
248 1.1 deraadt */
249 1.44 matt #define STATIC_NESTED(x, fsize, retpc) \
250 1.44 matt STATIC_NESTED_NOPROFILE(x, fsize, retpc); \
251 1.44 matt MCOUNT
252 1.15 castor
253 1.15 castor /*
254 1.15 castor * XNESTED
255 1.15 castor * declare alternate entry point to nested routine.
256 1.15 castor */
257 1.41 matt #define XNESTED(x) \
258 1.15 castor .globl _C_LABEL(x); \
259 1.20 soda AENT (_C_LABEL(x)); \
260 1.6 mycroft _C_LABEL(x):
261 1.1 deraadt
262 1.1 deraadt /*
263 1.15 castor * END
264 1.15 castor * Mark end of a procedure.
265 1.1 deraadt */
266 1.44 matt #define END(x) \
267 1.41 matt .end _C_LABEL(x); \
268 1.41 matt .size _C_LABEL(x), . - _C_LABEL(x)
269 1.2 glass
270 1.2 glass /*
271 1.15 castor * IMPORT -- import external symbol
272 1.2 glass */
273 1.41 matt #define IMPORT(sym, size) \
274 1.20 soda .extern _C_LABEL(sym),size
275 1.1 deraadt
276 1.1 deraadt /*
277 1.15 castor * EXPORT -- export definition of symbol
278 1.1 deraadt */
279 1.41 matt #define EXPORT(x) \
280 1.15 castor .globl _C_LABEL(x); \
281 1.15 castor _C_LABEL(x):
282 1.1 deraadt
283 1.15 castor /*
284 1.62 simonb * EXPORT_OBJECT -- export definition of symbol of symbol
285 1.62 simonb * type Object, visible to ksyms(4) address search.
286 1.62 simonb */
287 1.62 simonb #define EXPORT_OBJECT(x) \
288 1.62 simonb EXPORT(x); \
289 1.62 simonb .type _C_LABEL(x), @object;
290 1.62 simonb
291 1.62 simonb /*
292 1.17 jonathan * VECTOR
293 1.17 jonathan * exception vector entrypoint
294 1.20 soda * XXX: regmask should be used to generate .mask
295 1.17 jonathan */
296 1.41 matt #define VECTOR(x, regmask) \
297 1.47 joerg .ent _C_LABEL(x); \
298 1.17 jonathan EXPORT(x); \
299 1.17 jonathan
300 1.41 matt #define VECTOR_END(x) \
301 1.44 matt EXPORT(__CONCAT(x,_end)); \
302 1.44 matt END(x); \
303 1.44 matt .org _C_LABEL(x) + 0x80
304 1.1 deraadt
305 1.1 deraadt /*
306 1.10 christos * Macros to panic and printf from assembly language.
307 1.1 deraadt */
308 1.41 matt #define PANIC(msg) \
309 1.41 matt PTR_LA a0, 9f; \
310 1.15 castor jal _C_LABEL(panic); \
311 1.26 cgd nop; \
312 1.1 deraadt MSG(msg)
313 1.1 deraadt
314 1.15 castor #define PRINTF(msg) \
315 1.41 matt PTR_LA a0, 9f; \
316 1.15 castor jal _C_LABEL(printf); \
317 1.26 cgd nop; \
318 1.1 deraadt MSG(msg)
319 1.1 deraadt
320 1.15 castor #define MSG(msg) \
321 1.15 castor .rdata; \
322 1.48 joerg 9: .asciz msg; \
323 1.1 deraadt .text
324 1.1 deraadt
325 1.41 matt #define ASMSTR(str) \
326 1.48 joerg .asciz str; \
327 1.12 jonathan .align 3
328 1.15 castor
329 1.57 simonb #define RCSID(x) .pushsection ".ident","MS",@progbits,1; \
330 1.56 joerg .asciz x; \
331 1.56 joerg .popsection
332 1.41 matt
333 1.15 castor /*
334 1.15 castor * XXX retain dialects XXX
335 1.15 castor */
336 1.41 matt #define ALEAF(x) XLEAF(x)
337 1.41 matt #define NLEAF(x) LEAF_NOPROFILE(x)
338 1.41 matt #define NON_LEAF(x, fsize, retpc) NESTED(x, fsize, retpc)
339 1.41 matt #define NNON_LEAF(x, fsize, retpc) NESTED_NOPROFILE(x, fsize, retpc)
340 1.41 matt
341 1.41 matt #if defined(__mips_o32)
342 1.41 matt #define SZREG 4
343 1.41 matt #else
344 1.41 matt #define SZREG 8
345 1.41 matt #endif
346 1.41 matt
347 1.41 matt #if defined(__mips_o32) || defined(__mips_o64)
348 1.41 matt #define ALSK 7 /* stack alignment */
349 1.41 matt #define ALMASK -7 /* stack alignment */
350 1.41 matt #define SZFPREG 4
351 1.41 matt #define FP_L lwc1
352 1.41 matt #define FP_S swc1
353 1.41 matt #else
354 1.41 matt #define ALSK 15 /* stack alignment */
355 1.41 matt #define ALMASK -15 /* stack alignment */
356 1.41 matt #define SZFPREG 8
357 1.41 matt #define FP_L ldc1
358 1.41 matt #define FP_S sdc1
359 1.41 matt #endif
360 1.15 castor
361 1.22 simonb /*
362 1.16 castor * standard callframe {
363 1.44 matt * register_t cf_args[4]; arg0 - arg3 (only on o32 and o64)
364 1.41 matt * register_t cf_pad[N]; o32/64 (N=0), n32 (N=1) n64 (N=1)
365 1.41 matt * register_t cf_gp; global pointer (only on n32 and n64)
366 1.16 castor * register_t cf_sp; frame pointer
367 1.16 castor * register_t cf_ra; return address
368 1.16 castor * };
369 1.16 castor */
370 1.41 matt #if defined(__mips_o32) || defined(__mips_o64)
371 1.41 matt #define CALLFRAME_SIZ (SZREG * (4 + 2))
372 1.41 matt #define CALLFRAME_S0 0
373 1.41 matt #elif defined(__mips_n32) || defined(__mips_n64)
374 1.41 matt #define CALLFRAME_SIZ (SZREG * 4)
375 1.41 matt #define CALLFRAME_S0 (CALLFRAME_SIZ - 4 * SZREG)
376 1.41 matt #endif
377 1.41 matt #ifndef _KERNEL
378 1.41 matt #define CALLFRAME_GP (CALLFRAME_SIZ - 3 * SZREG)
379 1.41 matt #endif
380 1.41 matt #define CALLFRAME_SP (CALLFRAME_SIZ - 2 * SZREG)
381 1.41 matt #define CALLFRAME_RA (CALLFRAME_SIZ - 1 * SZREG)
382 1.16 castor
383 1.15 castor /*
384 1.22 simonb * While it would be nice to be compatible with the SGI
385 1.15 castor * REG_L and REG_S macros, because they do not take parameters, it
386 1.15 castor * is impossible to use them with the _MIPS_SIM_ABIX32 model.
387 1.15 castor *
388 1.22 simonb * These macros hide the use of mips3 instructions from the
389 1.15 castor * assembler to prevent the assembler from generating 64-bit style
390 1.15 castor * ABI calls.
391 1.15 castor */
392 1.49 matt #ifdef __mips_o32
393 1.41 matt #define PTR_ADD add
394 1.41 matt #define PTR_ADDI addi
395 1.41 matt #define PTR_ADDU addu
396 1.41 matt #define PTR_ADDIU addiu
397 1.45 matt #define PTR_SUB subu
398 1.41 matt #define PTR_SUBI subi
399 1.41 matt #define PTR_SUBU subu
400 1.41 matt #define PTR_SUBIU subu
401 1.41 matt #define PTR_L lw
402 1.41 matt #define PTR_LA la
403 1.41 matt #define PTR_S sw
404 1.41 matt #define PTR_SLL sll
405 1.41 matt #define PTR_SLLV sllv
406 1.41 matt #define PTR_SRL srl
407 1.41 matt #define PTR_SRLV srlv
408 1.41 matt #define PTR_SRA sra
409 1.41 matt #define PTR_SRAV srav
410 1.41 matt #define PTR_LL ll
411 1.41 matt #define PTR_SC sc
412 1.41 matt #define PTR_WORD .word
413 1.41 matt #define PTR_SCALESHIFT 2
414 1.41 matt #else /* _MIPS_SZPTR == 64 */
415 1.41 matt #define PTR_ADD dadd
416 1.41 matt #define PTR_ADDI daddi
417 1.41 matt #define PTR_ADDU daddu
418 1.41 matt #define PTR_ADDIU daddiu
419 1.45 matt #define PTR_SUB dsubu
420 1.41 matt #define PTR_SUBI dsubi
421 1.41 matt #define PTR_SUBU dsubu
422 1.41 matt #define PTR_SUBIU dsubu
423 1.49 matt #ifdef __mips_n32
424 1.49 matt #define PTR_L lw
425 1.49 matt #define PTR_LL ll
426 1.49 matt #define PTR_SC sc
427 1.49 matt #define PTR_S sw
428 1.49 matt #define PTR_SCALESHIFT 2
429 1.49 matt #define PTR_WORD .word
430 1.49 matt #else
431 1.41 matt #define PTR_L ld
432 1.49 matt #define PTR_LL lld
433 1.49 matt #define PTR_SC scd
434 1.49 matt #define PTR_S sd
435 1.49 matt #define PTR_SCALESHIFT 3
436 1.49 matt #define PTR_WORD .dword
437 1.49 matt #endif
438 1.41 matt #define PTR_LA dla
439 1.41 matt #define PTR_SLL dsll
440 1.41 matt #define PTR_SLLV dsllv
441 1.41 matt #define PTR_SRL dsrl
442 1.41 matt #define PTR_SRLV dsrlv
443 1.41 matt #define PTR_SRA dsra
444 1.41 matt #define PTR_SRAV dsrav
445 1.41 matt #endif /* _MIPS_SZPTR == 64 */
446 1.41 matt
447 1.41 matt #if _MIPS_SZINT == 32
448 1.41 matt #define INT_ADD add
449 1.41 matt #define INT_ADDI addi
450 1.41 matt #define INT_ADDU addu
451 1.41 matt #define INT_ADDIU addiu
452 1.45 matt #define INT_SUB subu
453 1.41 matt #define INT_SUBI subi
454 1.41 matt #define INT_SUBU subu
455 1.41 matt #define INT_SUBIU subu
456 1.41 matt #define INT_L lw
457 1.41 matt #define INT_LA la
458 1.41 matt #define INT_S sw
459 1.41 matt #define INT_SLL sll
460 1.41 matt #define INT_SLLV sllv
461 1.41 matt #define INT_SRL srl
462 1.41 matt #define INT_SRLV srlv
463 1.41 matt #define INT_SRA sra
464 1.41 matt #define INT_SRAV srav
465 1.41 matt #define INT_LL ll
466 1.41 matt #define INT_SC sc
467 1.41 matt #define INT_WORD .word
468 1.41 matt #define INT_SCALESHIFT 2
469 1.41 matt #else
470 1.41 matt #define INT_ADD dadd
471 1.41 matt #define INT_ADDI daddi
472 1.41 matt #define INT_ADDU daddu
473 1.41 matt #define INT_ADDIU daddiu
474 1.45 matt #define INT_SUB dsubu
475 1.41 matt #define INT_SUBI dsubi
476 1.41 matt #define INT_SUBU dsubu
477 1.41 matt #define INT_SUBIU dsubu
478 1.41 matt #define INT_L ld
479 1.41 matt #define INT_LA dla
480 1.41 matt #define INT_S sd
481 1.41 matt #define INT_SLL dsll
482 1.41 matt #define INT_SLLV dsllv
483 1.41 matt #define INT_SRL dsrl
484 1.41 matt #define INT_SRLV dsrlv
485 1.41 matt #define INT_SRA dsra
486 1.41 matt #define INT_SRAV dsrav
487 1.41 matt #define INT_LL lld
488 1.41 matt #define INT_SC scd
489 1.41 matt #define INT_WORD .dword
490 1.41 matt #define INT_SCALESHIFT 3
491 1.41 matt #endif
492 1.15 castor
493 1.41 matt #if _MIPS_SZLONG == 32
494 1.41 matt #define LONG_ADD add
495 1.41 matt #define LONG_ADDI addi
496 1.41 matt #define LONG_ADDU addu
497 1.41 matt #define LONG_ADDIU addiu
498 1.45 matt #define LONG_SUB subu
499 1.41 matt #define LONG_SUBI subi
500 1.41 matt #define LONG_SUBU subu
501 1.41 matt #define LONG_SUBIU subu
502 1.41 matt #define LONG_L lw
503 1.41 matt #define LONG_LA la
504 1.41 matt #define LONG_S sw
505 1.41 matt #define LONG_SLL sll
506 1.41 matt #define LONG_SLLV sllv
507 1.41 matt #define LONG_SRL srl
508 1.41 matt #define LONG_SRLV srlv
509 1.41 matt #define LONG_SRA sra
510 1.41 matt #define LONG_SRAV srav
511 1.41 matt #define LONG_LL ll
512 1.41 matt #define LONG_SC sc
513 1.41 matt #define LONG_WORD .word
514 1.41 matt #define LONG_SCALESHIFT 2
515 1.41 matt #else
516 1.41 matt #define LONG_ADD dadd
517 1.41 matt #define LONG_ADDI daddi
518 1.41 matt #define LONG_ADDU daddu
519 1.41 matt #define LONG_ADDIU daddiu
520 1.45 matt #define LONG_SUB dsubu
521 1.41 matt #define LONG_SUBI dsubi
522 1.41 matt #define LONG_SUBU dsubu
523 1.41 matt #define LONG_SUBIU dsubu
524 1.41 matt #define LONG_L ld
525 1.41 matt #define LONG_LA dla
526 1.41 matt #define LONG_S sd
527 1.41 matt #define LONG_SLL dsll
528 1.41 matt #define LONG_SLLV dsllv
529 1.41 matt #define LONG_SRL dsrl
530 1.41 matt #define LONG_SRLV dsrlv
531 1.41 matt #define LONG_SRA dsra
532 1.41 matt #define LONG_SRAV dsrav
533 1.41 matt #define LONG_LL lld
534 1.41 matt #define LONG_SC scd
535 1.41 matt #define LONG_WORD .dword
536 1.41 matt #define LONG_SCALESHIFT 3
537 1.41 matt #endif
538 1.41 matt
539 1.41 matt #if SZREG == 4
540 1.41 matt #define REG_L lw
541 1.41 matt #define REG_S sw
542 1.41 matt #define REG_LI li
543 1.41 matt #define REG_ADDU addu
544 1.41 matt #define REG_SLL sll
545 1.41 matt #define REG_SLLV sllv
546 1.41 matt #define REG_SRL srl
547 1.41 matt #define REG_SRLV srlv
548 1.41 matt #define REG_SRA sra
549 1.41 matt #define REG_SRAV srav
550 1.41 matt #define REG_LL ll
551 1.41 matt #define REG_SC sc
552 1.41 matt #define REG_SCALESHIFT 2
553 1.41 matt #else
554 1.41 matt #define REG_L ld
555 1.41 matt #define REG_S sd
556 1.41 matt #define REG_LI dli
557 1.41 matt #define REG_ADDU daddu
558 1.41 matt #define REG_SLL dsll
559 1.41 matt #define REG_SLLV dsllv
560 1.41 matt #define REG_SRL dsrl
561 1.41 matt #define REG_SRLV dsrlv
562 1.41 matt #define REG_SRA dsra
563 1.41 matt #define REG_SRAV dsrav
564 1.41 matt #define REG_LL lld
565 1.41 matt #define REG_SC scd
566 1.41 matt #define REG_SCALESHIFT 3
567 1.41 matt #endif
568 1.41 matt
569 1.51 skrll #if (MIPS1 + MIPS2) > 0
570 1.51 skrll #define NOP_L nop
571 1.51 skrll #else
572 1.51 skrll #define NOP_L /* nothing */
573 1.51 skrll #endif
574 1.51 skrll
575 1.59 skrll /* compiler define */
576 1.59 skrll #if defined(__OCTEON__)
577 1.71 riastrad /*
578 1.71 riastrad * See common/lib/libc/arch/mips/atomic/membar_ops.S for notes on
579 1.71 riastrad * Octeon memory ordering guarantees and barriers.
580 1.71 riastrad *
581 1.71 riastrad * cnMIPS also has a quirk where the store buffer can get clogged and
582 1.71 riastrad * we need to apply a plunger to it _after_ releasing a lock or else
583 1.71 riastrad * other CPUs may spin for hundreds of thousands of cycles before they
584 1.71 riastrad * see the lock is released. So we also have the quirky SYNC_PLUNGER
585 1.72 riastrad * barrier as syncw. See the note in the SYNCW instruction description
586 1.72 riastrad * on p. 2168 of Cavium OCTEON III CN78XX Hardware Reference Manual,
587 1.72 riastrad * CN78XX-HM-0.99E, September 2014:
588 1.72 riastrad *
589 1.72 riastrad * Core A (writer)
590 1.72 riastrad *
591 1.72 riastrad * SW R1, DATA# change shared DATA value
592 1.72 riastrad * LI R1, 1
593 1.72 riastrad * SYNCW# (or SYNCWS) Perform DATA store before performing FLAG store
594 1.72 riastrad * SW R2, FLAG# say that the shared DATA value is valid
595 1.72 riastrad * SYNCW# (or SYNCWS) Force the FLAG store soon (CN78XX-specific)
596 1.72 riastrad *
597 1.72 riastrad * ...
598 1.72 riastrad *
599 1.72 riastrad * The second SYNCW instruction executed by core A is not
600 1.72 riastrad * necessary for correctness, but has very important performance
601 1.72 riastrad * effects on the CN78XX. Without it, the store to FLAG may
602 1.72 riastrad * linger in core A's write buffer before it becomes visible to
603 1.72 riastrad * any other cores. (If core A is not performing many stores,
604 1.72 riastrad * this may add hundreds of thousands of cycles to the flag
605 1.72 riastrad * release time since the CN78XX core nominally retains stores to
606 1.72 riastrad * attempt to merge them before sending the store on the CMI.)
607 1.72 riastrad * Applications should include this second SYNCW instruction after
608 1.72 riastrad * flag or lock release.
609 1.71 riastrad */
610 1.71 riastrad #define LLSCSYNC /* nothing */
611 1.68 riastrad #define BDSYNC sync
612 1.71 riastrad #define BDSYNC_ACQ nop
613 1.71 riastrad #define SYNC_ACQ /* nothing */
614 1.71 riastrad #define SYNC_REL sync 4
615 1.66 riastrad #define BDSYNC_PLUNGER sync 4
616 1.66 riastrad #define SYNC_PLUNGER sync 4
617 1.60 skrll #elif __mips >= 3 || !defined(__mips_o32)
618 1.69 riastrad #define LLSCSYNC /* nothing */
619 1.58 skrll #define BDSYNC sync
620 1.66 riastrad #define BDSYNC_ACQ sync
621 1.66 riastrad #define SYNC_ACQ sync
622 1.66 riastrad #define SYNC_REL sync
623 1.66 riastrad #define BDSYNC_PLUNGER nop
624 1.66 riastrad #define SYNC_PLUNGER /* nothing */
625 1.60 skrll #else
626 1.60 skrll #define LLSCSYNC /* nothing */
627 1.60 skrll #define BDSYNC nop
628 1.66 riastrad #define BDSYNC_ACQ nop
629 1.66 riastrad #define SYNC_ACQ /* nothing */
630 1.66 riastrad #define SYNC_REL /* nothing */
631 1.66 riastrad #define BDSYNC_PLUNGER nop
632 1.66 riastrad #define SYNC_PLUNGER /* nothing */
633 1.58 skrll #endif
634 1.58 skrll
635 1.52 maya /* CPU dependent hook for cp0 load delays */
636 1.52 maya #if defined(MIPS1) || defined(MIPS2) || defined(MIPS3)
637 1.57 simonb #define MFC0_HAZARD sll $0,$0,1 /* super scalar nop */
638 1.52 maya #else
639 1.57 simonb #define MFC0_HAZARD /* nothing */
640 1.52 maya #endif
641 1.52 maya
642 1.41 matt #if _MIPS_ISA == _MIPS_ISA_MIPS1 || _MIPS_ISA == _MIPS_ISA_MIPS2 || \
643 1.41 matt _MIPS_ISA == _MIPS_ISA_MIPS32
644 1.41 matt #define MFC0 mfc0
645 1.41 matt #define MTC0 mtc0
646 1.41 matt #endif
647 1.41 matt #if _MIPS_ISA == _MIPS_ISA_MIPS3 || _MIPS_ISA == _MIPS_ISA_MIPS4 || \
648 1.41 matt _MIPS_ISA == _MIPS_ISA_MIPS64
649 1.41 matt #define MFC0 dmfc0
650 1.41 matt #define MTC0 dmtc0
651 1.41 matt #endif
652 1.41 matt
653 1.41 matt #if defined(__mips_o32) || defined(__mips_o64)
654 1.41 matt
655 1.54 joerg #ifdef __mips_abicalls
656 1.41 matt #define CPRESTORE(r) .cprestore r
657 1.41 matt #define CPLOAD(r) .cpload r
658 1.41 matt #else
659 1.41 matt #define CPRESTORE(r) /* not needed */
660 1.41 matt #define CPLOAD(r) /* not needed */
661 1.41 matt #endif
662 1.41 matt
663 1.41 matt #define SETUP_GP \
664 1.41 matt .set push; \
665 1.41 matt .set noreorder; \
666 1.41 matt .cpload t9; \
667 1.41 matt .set pop
668 1.41 matt #define SETUP_GPX(r) \
669 1.41 matt .set push; \
670 1.41 matt .set noreorder; \
671 1.41 matt move r,ra; /* save old ra */ \
672 1.41 matt bal 7f; \
673 1.41 matt nop; \
674 1.41 matt 7: .cpload ra; \
675 1.41 matt move ra,r; \
676 1.41 matt .set pop
677 1.41 matt #define SETUP_GPX_L(r,lbl) \
678 1.41 matt .set push; \
679 1.41 matt .set noreorder; \
680 1.41 matt move r,ra; /* save old ra */ \
681 1.41 matt bal lbl; \
682 1.41 matt nop; \
683 1.41 matt lbl: .cpload ra; \
684 1.41 matt move ra,r; \
685 1.41 matt .set pop
686 1.41 matt #define SAVE_GP(x) .cprestore x
687 1.41 matt
688 1.41 matt #define SETUP_GP64(a,b) /* n32/n64 specific */
689 1.41 matt #define SETUP_GP64_R(a,b) /* n32/n64 specific */
690 1.41 matt #define SETUP_GPX64(a,b) /* n32/n64 specific */
691 1.41 matt #define SETUP_GPX64_L(a,b,c) /* n32/n64 specific */
692 1.41 matt #define RESTORE_GP64 /* n32/n64 specific */
693 1.41 matt #define USE_ALT_CP(a) /* n32/n64 specific */
694 1.41 matt #endif /* __mips_o32 || __mips_o64 */
695 1.41 matt
696 1.41 matt #if defined(__mips_o32) || defined(__mips_o64)
697 1.22 simonb #define REG_PROLOGUE .set push
698 1.16 castor #define REG_EPILOGUE .set pop
699 1.41 matt #endif
700 1.41 matt #if defined(__mips_n32) || defined(__mips_n64)
701 1.15 castor #define REG_PROLOGUE .set push ; .set mips3
702 1.15 castor #define REG_EPILOGUE .set pop
703 1.41 matt #endif
704 1.41 matt
705 1.41 matt #if defined(__mips_n32) || defined(__mips_n64)
706 1.41 matt #define SETUP_GP /* o32 specific */
707 1.41 matt #define SETUP_GPX(r) /* o32 specific */
708 1.41 matt #define SETUP_GPX_L(r,lbl) /* o32 specific */
709 1.41 matt #define SAVE_GP(x) /* o32 specific */
710 1.63 skrll #define SETUP_GP64(a,b) .cpsetup t9, a, b
711 1.41 matt #define SETUP_GPX64(a,b) \
712 1.41 matt .set push; \
713 1.41 matt move b,ra; \
714 1.41 matt .set noreorder; \
715 1.41 matt bal 7f; \
716 1.41 matt nop; \
717 1.41 matt 7: .set pop; \
718 1.41 matt .cpsetup ra, a, 7b; \
719 1.41 matt move ra,b
720 1.41 matt #define SETUP_GPX64_L(a,b,c) \
721 1.41 matt .set push; \
722 1.41 matt move b,ra; \
723 1.41 matt .set noreorder; \
724 1.41 matt bal c; \
725 1.41 matt nop; \
726 1.41 matt c: .set pop; \
727 1.41 matt .cpsetup ra, a, c; \
728 1.41 matt move ra,b
729 1.41 matt #define RESTORE_GP64 .cpreturn
730 1.41 matt #define USE_ALT_CP(a) .cplocal a
731 1.41 matt #endif /* __mips_n32 || __mips_n64 */
732 1.25 jeffs
733 1.25 jeffs /*
734 1.25 jeffs * The DYNAMIC_STATUS_MASK option adds an additional masking operation
735 1.25 jeffs * when updating the hardware interrupt mask in the status register.
736 1.25 jeffs *
737 1.25 jeffs * This is useful for platforms that need to at run-time mask
738 1.25 jeffs * interrupts based on motherboard configuration or to handle
739 1.25 jeffs * slowly clearing interrupts.
740 1.25 jeffs *
741 1.25 jeffs * XXX this is only currently implemented for mips3.
742 1.25 jeffs */
743 1.25 jeffs #ifdef MIPS_DYNAMIC_STATUS_MASK
744 1.41 matt #define DYNAMIC_STATUS_MASK(sr,scratch) \
745 1.25 jeffs lw scratch, mips_dynamic_status_mask; \
746 1.25 jeffs and sr, sr, scratch
747 1.29 jeffs
748 1.41 matt #define DYNAMIC_STATUS_MASK_TOUSER(sr,scratch1) \
749 1.29 jeffs ori sr, (MIPS_INT_MASK | MIPS_SR_INT_IE); \
750 1.29 jeffs DYNAMIC_STATUS_MASK(sr,scratch1)
751 1.25 jeffs #else
752 1.41 matt #define DYNAMIC_STATUS_MASK(sr,scratch)
753 1.41 matt #define DYNAMIC_STATUS_MASK_TOUSER(sr,scratch1)
754 1.25 jeffs #endif
755 1.1 deraadt
756 1.38 ad /* See lock_stubs.S. */
757 1.44 matt #define LOG2_MIPS_LOCK_RAS_SIZE 8
758 1.44 matt #define MIPS_LOCK_RAS_SIZE 256 /* 16 bytes left over */
759 1.38 ad
760 1.41 matt #define CPUVAR(off) _C_LABEL(cpu_info_store)+__CONCAT(CPU_INFO_,off)
761 1.39 yamt
762 1.8 jonathan #endif /* _MIPS_ASM_H */
763