asm.h revision 1.73 1 1.73 riastrad /* $NetBSD: asm.h,v 1.73 2023/02/20 13:30:47 riastradh Exp $ */
2 1.4 cgd
3 1.1 deraadt /*
4 1.2 glass * Copyright (c) 1992, 1993
5 1.2 glass * The Regents of the University of California. All rights reserved.
6 1.1 deraadt *
7 1.1 deraadt * This code is derived from software contributed to Berkeley by
8 1.1 deraadt * Ralph Campbell.
9 1.1 deraadt *
10 1.1 deraadt * Redistribution and use in source and binary forms, with or without
11 1.1 deraadt * modification, are permitted provided that the following conditions
12 1.1 deraadt * are met:
13 1.1 deraadt * 1. Redistributions of source code must retain the above copyright
14 1.1 deraadt * notice, this list of conditions and the following disclaimer.
15 1.1 deraadt * 2. Redistributions in binary form must reproduce the above copyright
16 1.1 deraadt * notice, this list of conditions and the following disclaimer in the
17 1.1 deraadt * documentation and/or other materials provided with the distribution.
18 1.35 agc * 3. Neither the name of the University nor the names of its contributors
19 1.1 deraadt * may be used to endorse or promote products derived from this software
20 1.1 deraadt * without specific prior written permission.
21 1.1 deraadt *
22 1.1 deraadt * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 1.1 deraadt * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 1.1 deraadt * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 1.1 deraadt * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 1.1 deraadt * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 1.1 deraadt * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 1.1 deraadt * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 1.1 deraadt * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 1.1 deraadt * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 1.1 deraadt * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 1.1 deraadt * SUCH DAMAGE.
33 1.1 deraadt *
34 1.4 cgd * @(#)machAsmDefs.h 8.1 (Berkeley) 6/10/93
35 1.1 deraadt */
36 1.1 deraadt
37 1.1 deraadt /*
38 1.1 deraadt * machAsmDefs.h --
39 1.1 deraadt *
40 1.1 deraadt * Macros used when writing assembler programs.
41 1.1 deraadt *
42 1.1 deraadt * Copyright (C) 1989 Digital Equipment Corporation.
43 1.1 deraadt * Permission to use, copy, modify, and distribute this software and
44 1.1 deraadt * its documentation for any purpose and without fee is hereby granted,
45 1.1 deraadt * provided that the above copyright notice appears in all copies.
46 1.1 deraadt * Digital Equipment Corporation makes no representations about the
47 1.1 deraadt * suitability of this software for any purpose. It is provided "as is"
48 1.1 deraadt * without express or implied warranty.
49 1.1 deraadt *
50 1.1 deraadt * from: Header: /sprite/src/kernel/mach/ds3100.md/RCS/machAsmDefs.h,
51 1.2 glass * v 1.2 89/08/15 18:28:24 rab Exp SPRITE (DECWRL)
52 1.1 deraadt */
53 1.1 deraadt
54 1.8 jonathan #ifndef _MIPS_ASM_H
55 1.41 matt #define _MIPS_ASM_H
56 1.1 deraadt
57 1.44 matt #include <sys/cdefs.h> /* for API selection */
58 1.21 soda #include <mips/regdef.h>
59 1.1 deraadt
60 1.64 simonb #if defined(_KERNEL_OPT)
61 1.64 simonb #include "opt_gprof.h"
62 1.73 riastrad #include "opt_multiprocessor.h"
63 1.64 simonb #endif
64 1.64 simonb
65 1.70 riastrad #ifdef __ASSEMBLER__
66 1.61 skrll #define __BIT(n) (1 << (n))
67 1.61 skrll #define __BITS(hi,lo) ((~((~0)<<((hi)+1)))&((~0)<<(lo)))
68 1.61 skrll
69 1.61 skrll #define __LOWEST_SET_BIT(__mask) ((((__mask) - 1) & (__mask)) ^ (__mask))
70 1.61 skrll #define __SHIFTOUT(__x, __mask) (((__x) & (__mask)) / __LOWEST_SET_BIT(__mask))
71 1.61 skrll #define __SHIFTIN(__x, __mask) ((__x) * __LOWEST_SET_BIT(__mask))
72 1.70 riastrad #endif /* __ASSEMBLER__ */
73 1.61 skrll
74 1.1 deraadt /*
75 1.1 deraadt * Define -pg profile entry code.
76 1.64 simonb * Must always be noreorder, must never use a macro instruction.
77 1.64 simonb */
78 1.64 simonb #if defined(__mips_o32) /* Old 32-bit ABI */
79 1.64 simonb /*
80 1.64 simonb * The old ABI version must also decrement two less words off the
81 1.64 simonb * stack and the final addiu to t9 must always equal the size of this
82 1.65 simonb * _MIPS_ASM_MCOUNT.
83 1.1 deraadt */
84 1.65 simonb #define _MIPS_ASM_MCOUNT \
85 1.23 castor .set push; \
86 1.23 castor .set noreorder; \
87 1.23 castor .set noat; \
88 1.64 simonb subu sp,16; \
89 1.27 jeffs sw t9,12(sp); \
90 1.23 castor move AT,ra; \
91 1.23 castor lui t9,%hi(_mcount); \
92 1.23 castor addiu t9,t9,%lo(_mcount); \
93 1.23 castor jalr t9; \
94 1.64 simonb nop; \
95 1.23 castor lw t9,4(sp); \
96 1.64 simonb addiu sp,8; \
97 1.64 simonb addiu t9,40; \
98 1.64 simonb .set pop;
99 1.64 simonb #elif defined(__mips_o64) /* Old 64-bit ABI */
100 1.64 simonb # error yeahnah
101 1.64 simonb #else /* New (n32/n64) ABI */
102 1.64 simonb /*
103 1.64 simonb * The new ABI version just needs to put the return address in AT and
104 1.65 simonb * call _mcount(). For the no abicalls case, skip the reloc dance.
105 1.64 simonb */
106 1.65 simonb #ifdef __mips_abicalls
107 1.65 simonb #define _MIPS_ASM_MCOUNT \
108 1.65 simonb .set push; \
109 1.65 simonb .set noreorder; \
110 1.65 simonb .set noat; \
111 1.65 simonb subu sp,16; \
112 1.65 simonb sw t9,8(sp); \
113 1.65 simonb move AT,ra; \
114 1.65 simonb lui t9,%hi(_mcount); \
115 1.65 simonb addiu t9,t9,%lo(_mcount); \
116 1.65 simonb jalr t9; \
117 1.65 simonb nop; \
118 1.65 simonb lw t9,8(sp); \
119 1.65 simonb addiu sp,16; \
120 1.65 simonb .set pop;
121 1.65 simonb #else /* !__mips_abicalls */
122 1.65 simonb #define _MIPS_ASM_MCOUNT \
123 1.64 simonb .set push; \
124 1.64 simonb .set noreorder; \
125 1.64 simonb .set noat; \
126 1.64 simonb move AT,ra; \
127 1.64 simonb jal _mcount; \
128 1.64 simonb nop; \
129 1.50 skrll .set pop;
130 1.65 simonb #endif /* !__mips_abicalls */
131 1.64 simonb #endif /* n32/n64 */
132 1.13 jonathan
133 1.11 jtc #ifdef GPROF
134 1.65 simonb #define MCOUNT _MIPS_ASM_MCOUNT
135 1.1 deraadt #else
136 1.1 deraadt #define MCOUNT
137 1.2 glass #endif
138 1.1 deraadt
139 1.15 castor #ifdef USE_AENT
140 1.41 matt #define AENT(x) \
141 1.15 castor .aent x, 0
142 1.15 castor #else
143 1.41 matt #define AENT(x)
144 1.24 kleink #endif
145 1.24 kleink
146 1.31 simonb /*
147 1.31 simonb * WEAK_ALIAS: create a weak alias.
148 1.31 simonb */
149 1.24 kleink #define WEAK_ALIAS(alias,sym) \
150 1.24 kleink .weak alias; \
151 1.24 kleink alias = sym
152 1.37 christos /*
153 1.37 christos * STRONG_ALIAS: create a strong alias.
154 1.37 christos */
155 1.41 matt #define STRONG_ALIAS(alias,sym) \
156 1.37 christos .globl alias; \
157 1.37 christos alias = sym
158 1.15 castor
159 1.14 thorpej /*
160 1.33 simonb * WARN_REFERENCES: create a warning if the specified symbol is referenced.
161 1.14 thorpej */
162 1.43 joerg #define WARN_REFERENCES(sym,msg) \
163 1.44 matt .pushsection __CONCAT(.gnu.warning.,sym); \
164 1.43 joerg .ascii msg; \
165 1.43 joerg .popsection
166 1.6 mycroft
167 1.1 deraadt /*
168 1.44 matt * STATIC_LEAF_NOPROFILE
169 1.44 matt * No profilable local leaf routine.
170 1.15 castor */
171 1.44 matt #define STATIC_LEAF_NOPROFILE(x) \
172 1.47 joerg .ent _C_LABEL(x); \
173 1.15 castor _C_LABEL(x): ; \
174 1.44 matt .frame sp, 0, ra
175 1.1 deraadt
176 1.1 deraadt /*
177 1.15 castor * LEAF_NOPROFILE
178 1.15 castor * No profilable leaf routine.
179 1.1 deraadt */
180 1.41 matt #define LEAF_NOPROFILE(x) \
181 1.15 castor .globl _C_LABEL(x); \
182 1.44 matt STATIC_LEAF_NOPROFILE(x)
183 1.15 castor
184 1.15 castor /*
185 1.34 simonb * STATIC_LEAF
186 1.34 simonb * Declare a local leaf function.
187 1.34 simonb */
188 1.41 matt #define STATIC_LEAF(x) \
189 1.44 matt STATIC_LEAF_NOPROFILE(x); \
190 1.34 simonb MCOUNT
191 1.34 simonb
192 1.34 simonb /*
193 1.44 matt * LEAF
194 1.44 matt * A leaf routine does
195 1.44 matt * - call no other function,
196 1.44 matt * - never use any register that callee-saved (S0-S8), and
197 1.44 matt * - not use any local stack storage.
198 1.15 castor */
199 1.44 matt #define LEAF(x) \
200 1.44 matt LEAF_NOPROFILE(x); \
201 1.44 matt MCOUNT
202 1.34 simonb
203 1.34 simonb /*
204 1.34 simonb * STATIC_XLEAF
205 1.34 simonb * declare alternate entry to a static leaf routine
206 1.34 simonb */
207 1.41 matt #define STATIC_XLEAF(x) \
208 1.20 soda AENT (_C_LABEL(x)); \
209 1.15 castor _C_LABEL(x):
210 1.15 castor
211 1.15 castor /*
212 1.44 matt * XLEAF
213 1.44 matt * declare alternate entry to leaf routine
214 1.44 matt */
215 1.44 matt #define XLEAF(x) \
216 1.44 matt .globl _C_LABEL(x); \
217 1.44 matt STATIC_XLEAF(x)
218 1.44 matt
219 1.44 matt /*
220 1.44 matt * STATIC_NESTED_NOPROFILE
221 1.44 matt * No profilable local nested routine.
222 1.44 matt */
223 1.44 matt #define STATIC_NESTED_NOPROFILE(x, fsize, retpc) \
224 1.55 mrg .ent _C_LABEL(x); \
225 1.55 mrg .type _C_LABEL(x), @function; \
226 1.55 mrg _C_LABEL(x): ; \
227 1.44 matt .frame sp, fsize, retpc
228 1.44 matt
229 1.44 matt /*
230 1.44 matt * NESTED_NOPROFILE
231 1.44 matt * No profilable nested routine.
232 1.44 matt */
233 1.44 matt #define NESTED_NOPROFILE(x, fsize, retpc) \
234 1.44 matt .globl _C_LABEL(x); \
235 1.44 matt STATIC_NESTED_NOPROFILE(x, fsize, retpc)
236 1.44 matt
237 1.44 matt /*
238 1.15 castor * NESTED
239 1.15 castor * A function calls other functions and needs
240 1.15 castor * therefore stack space to save/restore registers.
241 1.15 castor */
242 1.44 matt #define NESTED(x, fsize, retpc) \
243 1.44 matt NESTED_NOPROFILE(x, fsize, retpc); \
244 1.15 castor MCOUNT
245 1.1 deraadt
246 1.1 deraadt /*
247 1.44 matt * STATIC_NESTED
248 1.44 matt * No profilable local nested routine.
249 1.1 deraadt */
250 1.44 matt #define STATIC_NESTED(x, fsize, retpc) \
251 1.44 matt STATIC_NESTED_NOPROFILE(x, fsize, retpc); \
252 1.44 matt MCOUNT
253 1.15 castor
254 1.15 castor /*
255 1.15 castor * XNESTED
256 1.15 castor * declare alternate entry point to nested routine.
257 1.15 castor */
258 1.41 matt #define XNESTED(x) \
259 1.15 castor .globl _C_LABEL(x); \
260 1.20 soda AENT (_C_LABEL(x)); \
261 1.6 mycroft _C_LABEL(x):
262 1.1 deraadt
263 1.1 deraadt /*
264 1.15 castor * END
265 1.15 castor * Mark end of a procedure.
266 1.1 deraadt */
267 1.44 matt #define END(x) \
268 1.41 matt .end _C_LABEL(x); \
269 1.41 matt .size _C_LABEL(x), . - _C_LABEL(x)
270 1.2 glass
271 1.2 glass /*
272 1.15 castor * IMPORT -- import external symbol
273 1.2 glass */
274 1.41 matt #define IMPORT(sym, size) \
275 1.20 soda .extern _C_LABEL(sym),size
276 1.1 deraadt
277 1.1 deraadt /*
278 1.15 castor * EXPORT -- export definition of symbol
279 1.1 deraadt */
280 1.41 matt #define EXPORT(x) \
281 1.15 castor .globl _C_LABEL(x); \
282 1.15 castor _C_LABEL(x):
283 1.1 deraadt
284 1.15 castor /*
285 1.62 simonb * EXPORT_OBJECT -- export definition of symbol of symbol
286 1.62 simonb * type Object, visible to ksyms(4) address search.
287 1.62 simonb */
288 1.62 simonb #define EXPORT_OBJECT(x) \
289 1.62 simonb EXPORT(x); \
290 1.62 simonb .type _C_LABEL(x), @object;
291 1.62 simonb
292 1.62 simonb /*
293 1.17 jonathan * VECTOR
294 1.17 jonathan * exception vector entrypoint
295 1.20 soda * XXX: regmask should be used to generate .mask
296 1.17 jonathan */
297 1.41 matt #define VECTOR(x, regmask) \
298 1.47 joerg .ent _C_LABEL(x); \
299 1.17 jonathan EXPORT(x); \
300 1.17 jonathan
301 1.41 matt #define VECTOR_END(x) \
302 1.44 matt EXPORT(__CONCAT(x,_end)); \
303 1.44 matt END(x); \
304 1.44 matt .org _C_LABEL(x) + 0x80
305 1.1 deraadt
306 1.1 deraadt /*
307 1.10 christos * Macros to panic and printf from assembly language.
308 1.1 deraadt */
309 1.41 matt #define PANIC(msg) \
310 1.41 matt PTR_LA a0, 9f; \
311 1.15 castor jal _C_LABEL(panic); \
312 1.26 cgd nop; \
313 1.1 deraadt MSG(msg)
314 1.1 deraadt
315 1.15 castor #define PRINTF(msg) \
316 1.41 matt PTR_LA a0, 9f; \
317 1.15 castor jal _C_LABEL(printf); \
318 1.26 cgd nop; \
319 1.1 deraadt MSG(msg)
320 1.1 deraadt
321 1.15 castor #define MSG(msg) \
322 1.15 castor .rdata; \
323 1.48 joerg 9: .asciz msg; \
324 1.1 deraadt .text
325 1.1 deraadt
326 1.41 matt #define ASMSTR(str) \
327 1.48 joerg .asciz str; \
328 1.12 jonathan .align 3
329 1.15 castor
330 1.57 simonb #define RCSID(x) .pushsection ".ident","MS",@progbits,1; \
331 1.56 joerg .asciz x; \
332 1.56 joerg .popsection
333 1.41 matt
334 1.15 castor /*
335 1.15 castor * XXX retain dialects XXX
336 1.15 castor */
337 1.41 matt #define ALEAF(x) XLEAF(x)
338 1.41 matt #define NLEAF(x) LEAF_NOPROFILE(x)
339 1.41 matt #define NON_LEAF(x, fsize, retpc) NESTED(x, fsize, retpc)
340 1.41 matt #define NNON_LEAF(x, fsize, retpc) NESTED_NOPROFILE(x, fsize, retpc)
341 1.41 matt
342 1.41 matt #if defined(__mips_o32)
343 1.41 matt #define SZREG 4
344 1.41 matt #else
345 1.41 matt #define SZREG 8
346 1.41 matt #endif
347 1.41 matt
348 1.41 matt #if defined(__mips_o32) || defined(__mips_o64)
349 1.41 matt #define ALSK 7 /* stack alignment */
350 1.41 matt #define ALMASK -7 /* stack alignment */
351 1.41 matt #define SZFPREG 4
352 1.41 matt #define FP_L lwc1
353 1.41 matt #define FP_S swc1
354 1.41 matt #else
355 1.41 matt #define ALSK 15 /* stack alignment */
356 1.41 matt #define ALMASK -15 /* stack alignment */
357 1.41 matt #define SZFPREG 8
358 1.41 matt #define FP_L ldc1
359 1.41 matt #define FP_S sdc1
360 1.41 matt #endif
361 1.15 castor
362 1.22 simonb /*
363 1.16 castor * standard callframe {
364 1.44 matt * register_t cf_args[4]; arg0 - arg3 (only on o32 and o64)
365 1.41 matt * register_t cf_pad[N]; o32/64 (N=0), n32 (N=1) n64 (N=1)
366 1.41 matt * register_t cf_gp; global pointer (only on n32 and n64)
367 1.16 castor * register_t cf_sp; frame pointer
368 1.16 castor * register_t cf_ra; return address
369 1.16 castor * };
370 1.16 castor */
371 1.41 matt #if defined(__mips_o32) || defined(__mips_o64)
372 1.41 matt #define CALLFRAME_SIZ (SZREG * (4 + 2))
373 1.41 matt #define CALLFRAME_S0 0
374 1.41 matt #elif defined(__mips_n32) || defined(__mips_n64)
375 1.41 matt #define CALLFRAME_SIZ (SZREG * 4)
376 1.41 matt #define CALLFRAME_S0 (CALLFRAME_SIZ - 4 * SZREG)
377 1.41 matt #endif
378 1.41 matt #ifndef _KERNEL
379 1.41 matt #define CALLFRAME_GP (CALLFRAME_SIZ - 3 * SZREG)
380 1.41 matt #endif
381 1.41 matt #define CALLFRAME_SP (CALLFRAME_SIZ - 2 * SZREG)
382 1.41 matt #define CALLFRAME_RA (CALLFRAME_SIZ - 1 * SZREG)
383 1.16 castor
384 1.15 castor /*
385 1.22 simonb * While it would be nice to be compatible with the SGI
386 1.15 castor * REG_L and REG_S macros, because they do not take parameters, it
387 1.15 castor * is impossible to use them with the _MIPS_SIM_ABIX32 model.
388 1.15 castor *
389 1.22 simonb * These macros hide the use of mips3 instructions from the
390 1.15 castor * assembler to prevent the assembler from generating 64-bit style
391 1.15 castor * ABI calls.
392 1.15 castor */
393 1.49 matt #ifdef __mips_o32
394 1.41 matt #define PTR_ADD add
395 1.41 matt #define PTR_ADDI addi
396 1.41 matt #define PTR_ADDU addu
397 1.41 matt #define PTR_ADDIU addiu
398 1.45 matt #define PTR_SUB subu
399 1.41 matt #define PTR_SUBI subi
400 1.41 matt #define PTR_SUBU subu
401 1.41 matt #define PTR_SUBIU subu
402 1.41 matt #define PTR_L lw
403 1.41 matt #define PTR_LA la
404 1.41 matt #define PTR_S sw
405 1.41 matt #define PTR_SLL sll
406 1.41 matt #define PTR_SLLV sllv
407 1.41 matt #define PTR_SRL srl
408 1.41 matt #define PTR_SRLV srlv
409 1.41 matt #define PTR_SRA sra
410 1.41 matt #define PTR_SRAV srav
411 1.41 matt #define PTR_LL ll
412 1.41 matt #define PTR_SC sc
413 1.41 matt #define PTR_WORD .word
414 1.41 matt #define PTR_SCALESHIFT 2
415 1.41 matt #else /* _MIPS_SZPTR == 64 */
416 1.41 matt #define PTR_ADD dadd
417 1.41 matt #define PTR_ADDI daddi
418 1.41 matt #define PTR_ADDU daddu
419 1.41 matt #define PTR_ADDIU daddiu
420 1.45 matt #define PTR_SUB dsubu
421 1.41 matt #define PTR_SUBI dsubi
422 1.41 matt #define PTR_SUBU dsubu
423 1.41 matt #define PTR_SUBIU dsubu
424 1.49 matt #ifdef __mips_n32
425 1.49 matt #define PTR_L lw
426 1.49 matt #define PTR_LL ll
427 1.49 matt #define PTR_SC sc
428 1.49 matt #define PTR_S sw
429 1.49 matt #define PTR_SCALESHIFT 2
430 1.49 matt #define PTR_WORD .word
431 1.49 matt #else
432 1.41 matt #define PTR_L ld
433 1.49 matt #define PTR_LL lld
434 1.49 matt #define PTR_SC scd
435 1.49 matt #define PTR_S sd
436 1.49 matt #define PTR_SCALESHIFT 3
437 1.49 matt #define PTR_WORD .dword
438 1.49 matt #endif
439 1.41 matt #define PTR_LA dla
440 1.41 matt #define PTR_SLL dsll
441 1.41 matt #define PTR_SLLV dsllv
442 1.41 matt #define PTR_SRL dsrl
443 1.41 matt #define PTR_SRLV dsrlv
444 1.41 matt #define PTR_SRA dsra
445 1.41 matt #define PTR_SRAV dsrav
446 1.41 matt #endif /* _MIPS_SZPTR == 64 */
447 1.41 matt
448 1.41 matt #if _MIPS_SZINT == 32
449 1.41 matt #define INT_ADD add
450 1.41 matt #define INT_ADDI addi
451 1.41 matt #define INT_ADDU addu
452 1.41 matt #define INT_ADDIU addiu
453 1.45 matt #define INT_SUB subu
454 1.41 matt #define INT_SUBI subi
455 1.41 matt #define INT_SUBU subu
456 1.41 matt #define INT_SUBIU subu
457 1.41 matt #define INT_L lw
458 1.41 matt #define INT_LA la
459 1.41 matt #define INT_S sw
460 1.41 matt #define INT_SLL sll
461 1.41 matt #define INT_SLLV sllv
462 1.41 matt #define INT_SRL srl
463 1.41 matt #define INT_SRLV srlv
464 1.41 matt #define INT_SRA sra
465 1.41 matt #define INT_SRAV srav
466 1.41 matt #define INT_LL ll
467 1.41 matt #define INT_SC sc
468 1.41 matt #define INT_WORD .word
469 1.41 matt #define INT_SCALESHIFT 2
470 1.41 matt #else
471 1.41 matt #define INT_ADD dadd
472 1.41 matt #define INT_ADDI daddi
473 1.41 matt #define INT_ADDU daddu
474 1.41 matt #define INT_ADDIU daddiu
475 1.45 matt #define INT_SUB dsubu
476 1.41 matt #define INT_SUBI dsubi
477 1.41 matt #define INT_SUBU dsubu
478 1.41 matt #define INT_SUBIU dsubu
479 1.41 matt #define INT_L ld
480 1.41 matt #define INT_LA dla
481 1.41 matt #define INT_S sd
482 1.41 matt #define INT_SLL dsll
483 1.41 matt #define INT_SLLV dsllv
484 1.41 matt #define INT_SRL dsrl
485 1.41 matt #define INT_SRLV dsrlv
486 1.41 matt #define INT_SRA dsra
487 1.41 matt #define INT_SRAV dsrav
488 1.41 matt #define INT_LL lld
489 1.41 matt #define INT_SC scd
490 1.41 matt #define INT_WORD .dword
491 1.41 matt #define INT_SCALESHIFT 3
492 1.41 matt #endif
493 1.15 castor
494 1.41 matt #if _MIPS_SZLONG == 32
495 1.41 matt #define LONG_ADD add
496 1.41 matt #define LONG_ADDI addi
497 1.41 matt #define LONG_ADDU addu
498 1.41 matt #define LONG_ADDIU addiu
499 1.45 matt #define LONG_SUB subu
500 1.41 matt #define LONG_SUBI subi
501 1.41 matt #define LONG_SUBU subu
502 1.41 matt #define LONG_SUBIU subu
503 1.41 matt #define LONG_L lw
504 1.41 matt #define LONG_LA la
505 1.41 matt #define LONG_S sw
506 1.41 matt #define LONG_SLL sll
507 1.41 matt #define LONG_SLLV sllv
508 1.41 matt #define LONG_SRL srl
509 1.41 matt #define LONG_SRLV srlv
510 1.41 matt #define LONG_SRA sra
511 1.41 matt #define LONG_SRAV srav
512 1.41 matt #define LONG_LL ll
513 1.41 matt #define LONG_SC sc
514 1.41 matt #define LONG_WORD .word
515 1.41 matt #define LONG_SCALESHIFT 2
516 1.41 matt #else
517 1.41 matt #define LONG_ADD dadd
518 1.41 matt #define LONG_ADDI daddi
519 1.41 matt #define LONG_ADDU daddu
520 1.41 matt #define LONG_ADDIU daddiu
521 1.45 matt #define LONG_SUB dsubu
522 1.41 matt #define LONG_SUBI dsubi
523 1.41 matt #define LONG_SUBU dsubu
524 1.41 matt #define LONG_SUBIU dsubu
525 1.41 matt #define LONG_L ld
526 1.41 matt #define LONG_LA dla
527 1.41 matt #define LONG_S sd
528 1.41 matt #define LONG_SLL dsll
529 1.41 matt #define LONG_SLLV dsllv
530 1.41 matt #define LONG_SRL dsrl
531 1.41 matt #define LONG_SRLV dsrlv
532 1.41 matt #define LONG_SRA dsra
533 1.41 matt #define LONG_SRAV dsrav
534 1.41 matt #define LONG_LL lld
535 1.41 matt #define LONG_SC scd
536 1.41 matt #define LONG_WORD .dword
537 1.41 matt #define LONG_SCALESHIFT 3
538 1.41 matt #endif
539 1.41 matt
540 1.41 matt #if SZREG == 4
541 1.41 matt #define REG_L lw
542 1.41 matt #define REG_S sw
543 1.41 matt #define REG_LI li
544 1.41 matt #define REG_ADDU addu
545 1.41 matt #define REG_SLL sll
546 1.41 matt #define REG_SLLV sllv
547 1.41 matt #define REG_SRL srl
548 1.41 matt #define REG_SRLV srlv
549 1.41 matt #define REG_SRA sra
550 1.41 matt #define REG_SRAV srav
551 1.41 matt #define REG_LL ll
552 1.41 matt #define REG_SC sc
553 1.41 matt #define REG_SCALESHIFT 2
554 1.41 matt #else
555 1.41 matt #define REG_L ld
556 1.41 matt #define REG_S sd
557 1.41 matt #define REG_LI dli
558 1.41 matt #define REG_ADDU daddu
559 1.41 matt #define REG_SLL dsll
560 1.41 matt #define REG_SLLV dsllv
561 1.41 matt #define REG_SRL dsrl
562 1.41 matt #define REG_SRLV dsrlv
563 1.41 matt #define REG_SRA dsra
564 1.41 matt #define REG_SRAV dsrav
565 1.41 matt #define REG_LL lld
566 1.41 matt #define REG_SC scd
567 1.41 matt #define REG_SCALESHIFT 3
568 1.41 matt #endif
569 1.41 matt
570 1.51 skrll #if (MIPS1 + MIPS2) > 0
571 1.51 skrll #define NOP_L nop
572 1.51 skrll #else
573 1.51 skrll #define NOP_L /* nothing */
574 1.51 skrll #endif
575 1.51 skrll
576 1.59 skrll /* compiler define */
577 1.73 riastrad #if defined(MULTIPROCESSOR) && defined(__OCTEON__)
578 1.71 riastrad /*
579 1.71 riastrad * See common/lib/libc/arch/mips/atomic/membar_ops.S for notes on
580 1.71 riastrad * Octeon memory ordering guarantees and barriers.
581 1.71 riastrad *
582 1.71 riastrad * cnMIPS also has a quirk where the store buffer can get clogged and
583 1.71 riastrad * we need to apply a plunger to it _after_ releasing a lock or else
584 1.71 riastrad * other CPUs may spin for hundreds of thousands of cycles before they
585 1.71 riastrad * see the lock is released. So we also have the quirky SYNC_PLUNGER
586 1.72 riastrad * barrier as syncw. See the note in the SYNCW instruction description
587 1.72 riastrad * on p. 2168 of Cavium OCTEON III CN78XX Hardware Reference Manual,
588 1.72 riastrad * CN78XX-HM-0.99E, September 2014:
589 1.72 riastrad *
590 1.72 riastrad * Core A (writer)
591 1.72 riastrad *
592 1.72 riastrad * SW R1, DATA# change shared DATA value
593 1.72 riastrad * LI R1, 1
594 1.72 riastrad * SYNCW# (or SYNCWS) Perform DATA store before performing FLAG store
595 1.72 riastrad * SW R2, FLAG# say that the shared DATA value is valid
596 1.72 riastrad * SYNCW# (or SYNCWS) Force the FLAG store soon (CN78XX-specific)
597 1.72 riastrad *
598 1.72 riastrad * ...
599 1.72 riastrad *
600 1.72 riastrad * The second SYNCW instruction executed by core A is not
601 1.72 riastrad * necessary for correctness, but has very important performance
602 1.72 riastrad * effects on the CN78XX. Without it, the store to FLAG may
603 1.72 riastrad * linger in core A's write buffer before it becomes visible to
604 1.72 riastrad * any other cores. (If core A is not performing many stores,
605 1.72 riastrad * this may add hundreds of thousands of cycles to the flag
606 1.72 riastrad * release time since the CN78XX core nominally retains stores to
607 1.72 riastrad * attempt to merge them before sending the store on the CMI.)
608 1.72 riastrad * Applications should include this second SYNCW instruction after
609 1.72 riastrad * flag or lock release.
610 1.71 riastrad */
611 1.71 riastrad #define LLSCSYNC /* nothing */
612 1.68 riastrad #define BDSYNC sync
613 1.71 riastrad #define BDSYNC_ACQ nop
614 1.71 riastrad #define SYNC_ACQ /* nothing */
615 1.71 riastrad #define SYNC_REL sync 4
616 1.66 riastrad #define BDSYNC_PLUNGER sync 4
617 1.66 riastrad #define SYNC_PLUNGER sync 4
618 1.73 riastrad #elif defined(MULTIPROCESSOR) && (__mips >= 3 || !defined(__mips_o32))
619 1.69 riastrad #define LLSCSYNC /* nothing */
620 1.58 skrll #define BDSYNC sync
621 1.66 riastrad #define BDSYNC_ACQ sync
622 1.66 riastrad #define SYNC_ACQ sync
623 1.66 riastrad #define SYNC_REL sync
624 1.66 riastrad #define BDSYNC_PLUNGER nop
625 1.66 riastrad #define SYNC_PLUNGER /* nothing */
626 1.60 skrll #else
627 1.60 skrll #define LLSCSYNC /* nothing */
628 1.60 skrll #define BDSYNC nop
629 1.66 riastrad #define BDSYNC_ACQ nop
630 1.66 riastrad #define SYNC_ACQ /* nothing */
631 1.66 riastrad #define SYNC_REL /* nothing */
632 1.66 riastrad #define BDSYNC_PLUNGER nop
633 1.66 riastrad #define SYNC_PLUNGER /* nothing */
634 1.58 skrll #endif
635 1.58 skrll
636 1.52 maya /* CPU dependent hook for cp0 load delays */
637 1.52 maya #if defined(MIPS1) || defined(MIPS2) || defined(MIPS3)
638 1.57 simonb #define MFC0_HAZARD sll $0,$0,1 /* super scalar nop */
639 1.52 maya #else
640 1.57 simonb #define MFC0_HAZARD /* nothing */
641 1.52 maya #endif
642 1.52 maya
643 1.41 matt #if _MIPS_ISA == _MIPS_ISA_MIPS1 || _MIPS_ISA == _MIPS_ISA_MIPS2 || \
644 1.41 matt _MIPS_ISA == _MIPS_ISA_MIPS32
645 1.41 matt #define MFC0 mfc0
646 1.41 matt #define MTC0 mtc0
647 1.41 matt #endif
648 1.41 matt #if _MIPS_ISA == _MIPS_ISA_MIPS3 || _MIPS_ISA == _MIPS_ISA_MIPS4 || \
649 1.41 matt _MIPS_ISA == _MIPS_ISA_MIPS64
650 1.41 matt #define MFC0 dmfc0
651 1.41 matt #define MTC0 dmtc0
652 1.41 matt #endif
653 1.41 matt
654 1.41 matt #if defined(__mips_o32) || defined(__mips_o64)
655 1.41 matt
656 1.54 joerg #ifdef __mips_abicalls
657 1.41 matt #define CPRESTORE(r) .cprestore r
658 1.41 matt #define CPLOAD(r) .cpload r
659 1.41 matt #else
660 1.41 matt #define CPRESTORE(r) /* not needed */
661 1.41 matt #define CPLOAD(r) /* not needed */
662 1.41 matt #endif
663 1.41 matt
664 1.41 matt #define SETUP_GP \
665 1.41 matt .set push; \
666 1.41 matt .set noreorder; \
667 1.41 matt .cpload t9; \
668 1.41 matt .set pop
669 1.41 matt #define SETUP_GPX(r) \
670 1.41 matt .set push; \
671 1.41 matt .set noreorder; \
672 1.41 matt move r,ra; /* save old ra */ \
673 1.41 matt bal 7f; \
674 1.41 matt nop; \
675 1.41 matt 7: .cpload ra; \
676 1.41 matt move ra,r; \
677 1.41 matt .set pop
678 1.41 matt #define SETUP_GPX_L(r,lbl) \
679 1.41 matt .set push; \
680 1.41 matt .set noreorder; \
681 1.41 matt move r,ra; /* save old ra */ \
682 1.41 matt bal lbl; \
683 1.41 matt nop; \
684 1.41 matt lbl: .cpload ra; \
685 1.41 matt move ra,r; \
686 1.41 matt .set pop
687 1.41 matt #define SAVE_GP(x) .cprestore x
688 1.41 matt
689 1.41 matt #define SETUP_GP64(a,b) /* n32/n64 specific */
690 1.41 matt #define SETUP_GP64_R(a,b) /* n32/n64 specific */
691 1.41 matt #define SETUP_GPX64(a,b) /* n32/n64 specific */
692 1.41 matt #define SETUP_GPX64_L(a,b,c) /* n32/n64 specific */
693 1.41 matt #define RESTORE_GP64 /* n32/n64 specific */
694 1.41 matt #define USE_ALT_CP(a) /* n32/n64 specific */
695 1.41 matt #endif /* __mips_o32 || __mips_o64 */
696 1.41 matt
697 1.41 matt #if defined(__mips_o32) || defined(__mips_o64)
698 1.22 simonb #define REG_PROLOGUE .set push
699 1.16 castor #define REG_EPILOGUE .set pop
700 1.41 matt #endif
701 1.41 matt #if defined(__mips_n32) || defined(__mips_n64)
702 1.15 castor #define REG_PROLOGUE .set push ; .set mips3
703 1.15 castor #define REG_EPILOGUE .set pop
704 1.41 matt #endif
705 1.41 matt
706 1.41 matt #if defined(__mips_n32) || defined(__mips_n64)
707 1.41 matt #define SETUP_GP /* o32 specific */
708 1.41 matt #define SETUP_GPX(r) /* o32 specific */
709 1.41 matt #define SETUP_GPX_L(r,lbl) /* o32 specific */
710 1.41 matt #define SAVE_GP(x) /* o32 specific */
711 1.63 skrll #define SETUP_GP64(a,b) .cpsetup t9, a, b
712 1.41 matt #define SETUP_GPX64(a,b) \
713 1.41 matt .set push; \
714 1.41 matt move b,ra; \
715 1.41 matt .set noreorder; \
716 1.41 matt bal 7f; \
717 1.41 matt nop; \
718 1.41 matt 7: .set pop; \
719 1.41 matt .cpsetup ra, a, 7b; \
720 1.41 matt move ra,b
721 1.41 matt #define SETUP_GPX64_L(a,b,c) \
722 1.41 matt .set push; \
723 1.41 matt move b,ra; \
724 1.41 matt .set noreorder; \
725 1.41 matt bal c; \
726 1.41 matt nop; \
727 1.41 matt c: .set pop; \
728 1.41 matt .cpsetup ra, a, c; \
729 1.41 matt move ra,b
730 1.41 matt #define RESTORE_GP64 .cpreturn
731 1.41 matt #define USE_ALT_CP(a) .cplocal a
732 1.41 matt #endif /* __mips_n32 || __mips_n64 */
733 1.25 jeffs
734 1.25 jeffs /*
735 1.25 jeffs * The DYNAMIC_STATUS_MASK option adds an additional masking operation
736 1.25 jeffs * when updating the hardware interrupt mask in the status register.
737 1.25 jeffs *
738 1.25 jeffs * This is useful for platforms that need to at run-time mask
739 1.25 jeffs * interrupts based on motherboard configuration or to handle
740 1.25 jeffs * slowly clearing interrupts.
741 1.25 jeffs *
742 1.25 jeffs * XXX this is only currently implemented for mips3.
743 1.25 jeffs */
744 1.25 jeffs #ifdef MIPS_DYNAMIC_STATUS_MASK
745 1.41 matt #define DYNAMIC_STATUS_MASK(sr,scratch) \
746 1.25 jeffs lw scratch, mips_dynamic_status_mask; \
747 1.25 jeffs and sr, sr, scratch
748 1.29 jeffs
749 1.41 matt #define DYNAMIC_STATUS_MASK_TOUSER(sr,scratch1) \
750 1.29 jeffs ori sr, (MIPS_INT_MASK | MIPS_SR_INT_IE); \
751 1.29 jeffs DYNAMIC_STATUS_MASK(sr,scratch1)
752 1.25 jeffs #else
753 1.41 matt #define DYNAMIC_STATUS_MASK(sr,scratch)
754 1.41 matt #define DYNAMIC_STATUS_MASK_TOUSER(sr,scratch1)
755 1.25 jeffs #endif
756 1.1 deraadt
757 1.38 ad /* See lock_stubs.S. */
758 1.44 matt #define LOG2_MIPS_LOCK_RAS_SIZE 8
759 1.44 matt #define MIPS_LOCK_RAS_SIZE 256 /* 16 bytes left over */
760 1.38 ad
761 1.41 matt #define CPUVAR(off) _C_LABEL(cpu_info_store)+__CONCAT(CPU_INFO_,off)
762 1.39 yamt
763 1.8 jonathan #endif /* _MIPS_ASM_H */
764