asm.h revision 1.74 1 /* $NetBSD: asm.h,v 1.74 2023/02/23 14:56:00 riastradh Exp $ */
2
3 /*
4 * Copyright (c) 1992, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * Ralph Campbell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)machAsmDefs.h 8.1 (Berkeley) 6/10/93
35 */
36
37 /*
38 * machAsmDefs.h --
39 *
40 * Macros used when writing assembler programs.
41 *
42 * Copyright (C) 1989 Digital Equipment Corporation.
43 * Permission to use, copy, modify, and distribute this software and
44 * its documentation for any purpose and without fee is hereby granted,
45 * provided that the above copyright notice appears in all copies.
46 * Digital Equipment Corporation makes no representations about the
47 * suitability of this software for any purpose. It is provided "as is"
48 * without express or implied warranty.
49 *
50 * from: Header: /sprite/src/kernel/mach/ds3100.md/RCS/machAsmDefs.h,
51 * v 1.2 89/08/15 18:28:24 rab Exp SPRITE (DECWRL)
52 */
53
54 #ifndef _MIPS_ASM_H
55 #define _MIPS_ASM_H
56
57 #include <sys/cdefs.h> /* for API selection */
58 #include <mips/regdef.h>
59
60 #if defined(_KERNEL_OPT)
61 #include "opt_gprof.h"
62 #include "opt_multiprocessor.h"
63 #endif
64
65 #ifdef __ASSEMBLER__
66 #define __BIT(n) (1 << (n))
67 #define __BITS(hi,lo) ((~((~0)<<((hi)+1)))&((~0)<<(lo)))
68
69 #define __LOWEST_SET_BIT(__mask) ((((__mask) - 1) & (__mask)) ^ (__mask))
70 #define __SHIFTOUT(__x, __mask) (((__x) & (__mask)) / __LOWEST_SET_BIT(__mask))
71 #define __SHIFTIN(__x, __mask) ((__x) * __LOWEST_SET_BIT(__mask))
72 #endif /* __ASSEMBLER__ */
73
74 /*
75 * Define -pg profile entry code.
76 * Must always be noreorder, must never use a macro instruction.
77 */
78 #if defined(__mips_o32) /* Old 32-bit ABI */
79 /*
80 * The old ABI version must also decrement two less words off the
81 * stack and the final addiu to t9 must always equal the size of this
82 * _MIPS_ASM_MCOUNT.
83 */
84 #define _MIPS_ASM_MCOUNT \
85 .set push; \
86 .set noreorder; \
87 .set noat; \
88 subu sp,16; \
89 sw t9,12(sp); \
90 move AT,ra; \
91 lui t9,%hi(_mcount); \
92 addiu t9,t9,%lo(_mcount); \
93 jalr t9; \
94 nop; \
95 lw t9,4(sp); \
96 addiu sp,8; \
97 addiu t9,40; \
98 .set pop;
99 #elif defined(__mips_o64) /* Old 64-bit ABI */
100 # error yeahnah
101 #else /* New (n32/n64) ABI */
102 /*
103 * The new ABI version just needs to put the return address in AT and
104 * call _mcount(). For the no abicalls case, skip the reloc dance.
105 */
106 #ifdef __mips_abicalls
107 #define _MIPS_ASM_MCOUNT \
108 .set push; \
109 .set noreorder; \
110 .set noat; \
111 subu sp,16; \
112 sw t9,8(sp); \
113 move AT,ra; \
114 lui t9,%hi(_mcount); \
115 addiu t9,t9,%lo(_mcount); \
116 jalr t9; \
117 nop; \
118 lw t9,8(sp); \
119 addiu sp,16; \
120 .set pop;
121 #else /* !__mips_abicalls */
122 #define _MIPS_ASM_MCOUNT \
123 .set push; \
124 .set noreorder; \
125 .set noat; \
126 move AT,ra; \
127 jal _mcount; \
128 nop; \
129 .set pop;
130 #endif /* !__mips_abicalls */
131 #endif /* n32/n64 */
132
133 #ifdef GPROF
134 #define MCOUNT _MIPS_ASM_MCOUNT
135 #else
136 #define MCOUNT
137 #endif
138
139 #ifdef USE_AENT
140 #define AENT(x) \
141 .aent x, 0
142 #else
143 #define AENT(x)
144 #endif
145
146 /*
147 * WEAK_ALIAS: create a weak alias.
148 */
149 #define WEAK_ALIAS(alias,sym) \
150 .weak alias; \
151 alias = sym
152 /*
153 * STRONG_ALIAS: create a strong alias.
154 */
155 #define STRONG_ALIAS(alias,sym) \
156 .globl alias; \
157 alias = sym
158
159 /*
160 * WARN_REFERENCES: create a warning if the specified symbol is referenced.
161 */
162 #define WARN_REFERENCES(sym,msg) \
163 .pushsection __CONCAT(.gnu.warning.,sym); \
164 .ascii msg; \
165 .popsection
166
167 /*
168 * STATIC_LEAF_NOPROFILE
169 * No profilable local leaf routine.
170 */
171 #define STATIC_LEAF_NOPROFILE(x) \
172 .ent _C_LABEL(x); \
173 _C_LABEL(x): ; \
174 .frame sp, 0, ra
175
176 /*
177 * LEAF_NOPROFILE
178 * No profilable leaf routine.
179 */
180 #define LEAF_NOPROFILE(x) \
181 .globl _C_LABEL(x); \
182 STATIC_LEAF_NOPROFILE(x)
183
184 /*
185 * STATIC_LEAF
186 * Declare a local leaf function.
187 */
188 #define STATIC_LEAF(x) \
189 STATIC_LEAF_NOPROFILE(x); \
190 MCOUNT
191
192 /*
193 * LEAF
194 * A leaf routine does
195 * - call no other function,
196 * - never use any register that callee-saved (S0-S8), and
197 * - not use any local stack storage.
198 */
199 #define LEAF(x) \
200 LEAF_NOPROFILE(x); \
201 MCOUNT
202
203 /*
204 * STATIC_XLEAF
205 * declare alternate entry to a static leaf routine
206 */
207 #define STATIC_XLEAF(x) \
208 AENT (_C_LABEL(x)); \
209 _C_LABEL(x):
210
211 /*
212 * XLEAF
213 * declare alternate entry to leaf routine
214 */
215 #define XLEAF(x) \
216 .globl _C_LABEL(x); \
217 STATIC_XLEAF(x)
218
219 /*
220 * STATIC_NESTED_NOPROFILE
221 * No profilable local nested routine.
222 */
223 #define STATIC_NESTED_NOPROFILE(x, fsize, retpc) \
224 .ent _C_LABEL(x); \
225 .type _C_LABEL(x), @function; \
226 _C_LABEL(x): ; \
227 .frame sp, fsize, retpc
228
229 /*
230 * NESTED_NOPROFILE
231 * No profilable nested routine.
232 */
233 #define NESTED_NOPROFILE(x, fsize, retpc) \
234 .globl _C_LABEL(x); \
235 STATIC_NESTED_NOPROFILE(x, fsize, retpc)
236
237 /*
238 * NESTED
239 * A function calls other functions and needs
240 * therefore stack space to save/restore registers.
241 */
242 #define NESTED(x, fsize, retpc) \
243 NESTED_NOPROFILE(x, fsize, retpc); \
244 MCOUNT
245
246 /*
247 * STATIC_NESTED
248 * No profilable local nested routine.
249 */
250 #define STATIC_NESTED(x, fsize, retpc) \
251 STATIC_NESTED_NOPROFILE(x, fsize, retpc); \
252 MCOUNT
253
254 /*
255 * XNESTED
256 * declare alternate entry point to nested routine.
257 */
258 #define XNESTED(x) \
259 .globl _C_LABEL(x); \
260 AENT (_C_LABEL(x)); \
261 _C_LABEL(x):
262
263 /*
264 * END
265 * Mark end of a procedure.
266 */
267 #define END(x) \
268 .end _C_LABEL(x); \
269 .size _C_LABEL(x), . - _C_LABEL(x)
270
271 /*
272 * IMPORT -- import external symbol
273 */
274 #define IMPORT(sym, size) \
275 .extern _C_LABEL(sym),size
276
277 /*
278 * EXPORT -- export definition of symbol
279 */
280 #define EXPORT(x) \
281 .globl _C_LABEL(x); \
282 _C_LABEL(x):
283
284 /*
285 * EXPORT_OBJECT -- export definition of symbol of symbol
286 * type Object, visible to ksyms(4) address search.
287 */
288 #define EXPORT_OBJECT(x) \
289 EXPORT(x); \
290 .type _C_LABEL(x), @object;
291
292 /*
293 * VECTOR
294 * exception vector entrypoint
295 * XXX: regmask should be used to generate .mask
296 */
297 #define VECTOR(x, regmask) \
298 .ent _C_LABEL(x); \
299 EXPORT(x); \
300
301 #define VECTOR_END(x) \
302 EXPORT(__CONCAT(x,_end)); \
303 END(x); \
304 .org _C_LABEL(x) + 0x80
305
306 /*
307 * Macros to panic and printf from assembly language.
308 */
309 #define PANIC(msg) \
310 PTR_LA a0, 9f; \
311 jal _C_LABEL(panic); \
312 nop; \
313 MSG(msg)
314
315 #define PRINTF(msg) \
316 PTR_LA a0, 9f; \
317 jal _C_LABEL(printf); \
318 nop; \
319 MSG(msg)
320
321 #define MSG(msg) \
322 .rdata; \
323 9: .asciz msg; \
324 .text
325
326 #define ASMSTR(str) \
327 .asciz str; \
328 .align 3
329
330 #define RCSID(x) .pushsection ".ident","MS",@progbits,1; \
331 .asciz x; \
332 .popsection
333
334 /*
335 * XXX retain dialects XXX
336 */
337 #define ALEAF(x) XLEAF(x)
338 #define NLEAF(x) LEAF_NOPROFILE(x)
339 #define NON_LEAF(x, fsize, retpc) NESTED(x, fsize, retpc)
340 #define NNON_LEAF(x, fsize, retpc) NESTED_NOPROFILE(x, fsize, retpc)
341
342 #if defined(__mips_o32)
343 #define SZREG 4
344 #else
345 #define SZREG 8
346 #endif
347
348 #if defined(__mips_o32) || defined(__mips_o64)
349 #define ALSK 7 /* stack alignment */
350 #define ALMASK -7 /* stack alignment */
351 #define SZFPREG 4
352 #define FP_L lwc1
353 #define FP_S swc1
354 #else
355 #define ALSK 15 /* stack alignment */
356 #define ALMASK -15 /* stack alignment */
357 #define SZFPREG 8
358 #define FP_L ldc1
359 #define FP_S sdc1
360 #endif
361
362 /*
363 * standard callframe {
364 * register_t cf_args[4]; arg0 - arg3 (only on o32 and o64)
365 * register_t cf_pad[N]; o32/64 (N=0), n32 (N=1) n64 (N=1)
366 * register_t cf_gp; global pointer (only on n32 and n64)
367 * register_t cf_sp; frame pointer
368 * register_t cf_ra; return address
369 * };
370 */
371 #if defined(__mips_o32) || defined(__mips_o64)
372 #define CALLFRAME_SIZ (SZREG * (4 + 2))
373 #define CALLFRAME_S0 0
374 #elif defined(__mips_n32) || defined(__mips_n64)
375 #define CALLFRAME_SIZ (SZREG * 4)
376 #define CALLFRAME_S0 (CALLFRAME_SIZ - 4 * SZREG)
377 #endif
378 #ifndef _KERNEL
379 #define CALLFRAME_GP (CALLFRAME_SIZ - 3 * SZREG)
380 #endif
381 #define CALLFRAME_SP (CALLFRAME_SIZ - 2 * SZREG)
382 #define CALLFRAME_RA (CALLFRAME_SIZ - 1 * SZREG)
383
384 /*
385 * While it would be nice to be compatible with the SGI
386 * REG_L and REG_S macros, because they do not take parameters, it
387 * is impossible to use them with the _MIPS_SIM_ABIX32 model.
388 *
389 * These macros hide the use of mips3 instructions from the
390 * assembler to prevent the assembler from generating 64-bit style
391 * ABI calls.
392 */
393 #ifdef __mips_o32
394 #define PTR_ADD add
395 #define PTR_ADDI addi
396 #define PTR_ADDU addu
397 #define PTR_ADDIU addiu
398 #define PTR_SUB subu
399 #define PTR_SUBI subi
400 #define PTR_SUBU subu
401 #define PTR_SUBIU subu
402 #define PTR_L lw
403 #define PTR_LA la
404 #define PTR_S sw
405 #define PTR_SLL sll
406 #define PTR_SLLV sllv
407 #define PTR_SRL srl
408 #define PTR_SRLV srlv
409 #define PTR_SRA sra
410 #define PTR_SRAV srav
411 #define PTR_LL ll
412 #define PTR_SC sc
413 #define PTR_WORD .word
414 #define PTR_SCALESHIFT 2
415 #else /* _MIPS_SZPTR == 64 */
416 #define PTR_ADD dadd
417 #define PTR_ADDI daddi
418 #define PTR_ADDU daddu
419 #define PTR_ADDIU daddiu
420 #define PTR_SUB dsubu
421 #define PTR_SUBI dsubi
422 #define PTR_SUBU dsubu
423 #define PTR_SUBIU dsubu
424 #ifdef __mips_n32
425 #define PTR_L lw
426 #define PTR_LL ll
427 #define PTR_SC sc
428 #define PTR_S sw
429 #define PTR_SCALESHIFT 2
430 #define PTR_WORD .word
431 #else
432 #define PTR_L ld
433 #define PTR_LL lld
434 #define PTR_SC scd
435 #define PTR_S sd
436 #define PTR_SCALESHIFT 3
437 #define PTR_WORD .dword
438 #endif
439 #define PTR_LA dla
440 #define PTR_SLL dsll
441 #define PTR_SLLV dsllv
442 #define PTR_SRL dsrl
443 #define PTR_SRLV dsrlv
444 #define PTR_SRA dsra
445 #define PTR_SRAV dsrav
446 #endif /* _MIPS_SZPTR == 64 */
447
448 #if _MIPS_SZINT == 32
449 #define INT_ADD add
450 #define INT_ADDI addi
451 #define INT_ADDU addu
452 #define INT_ADDIU addiu
453 #define INT_SUB subu
454 #define INT_SUBI subi
455 #define INT_SUBU subu
456 #define INT_SUBIU subu
457 #define INT_L lw
458 #define INT_LA la
459 #define INT_S sw
460 #define INT_SLL sll
461 #define INT_SLLV sllv
462 #define INT_SRL srl
463 #define INT_SRLV srlv
464 #define INT_SRA sra
465 #define INT_SRAV srav
466 #define INT_LL ll
467 #define INT_SC sc
468 #define INT_WORD .word
469 #define INT_SCALESHIFT 2
470 #else
471 #define INT_ADD dadd
472 #define INT_ADDI daddi
473 #define INT_ADDU daddu
474 #define INT_ADDIU daddiu
475 #define INT_SUB dsubu
476 #define INT_SUBI dsubi
477 #define INT_SUBU dsubu
478 #define INT_SUBIU dsubu
479 #define INT_L ld
480 #define INT_LA dla
481 #define INT_S sd
482 #define INT_SLL dsll
483 #define INT_SLLV dsllv
484 #define INT_SRL dsrl
485 #define INT_SRLV dsrlv
486 #define INT_SRA dsra
487 #define INT_SRAV dsrav
488 #define INT_LL lld
489 #define INT_SC scd
490 #define INT_WORD .dword
491 #define INT_SCALESHIFT 3
492 #endif
493
494 #if _MIPS_SZLONG == 32
495 #define LONG_ADD add
496 #define LONG_ADDI addi
497 #define LONG_ADDU addu
498 #define LONG_ADDIU addiu
499 #define LONG_SUB subu
500 #define LONG_SUBI subi
501 #define LONG_SUBU subu
502 #define LONG_SUBIU subu
503 #define LONG_L lw
504 #define LONG_LA la
505 #define LONG_S sw
506 #define LONG_SLL sll
507 #define LONG_SLLV sllv
508 #define LONG_SRL srl
509 #define LONG_SRLV srlv
510 #define LONG_SRA sra
511 #define LONG_SRAV srav
512 #define LONG_LL ll
513 #define LONG_SC sc
514 #define LONG_WORD .word
515 #define LONG_SCALESHIFT 2
516 #else
517 #define LONG_ADD dadd
518 #define LONG_ADDI daddi
519 #define LONG_ADDU daddu
520 #define LONG_ADDIU daddiu
521 #define LONG_SUB dsubu
522 #define LONG_SUBI dsubi
523 #define LONG_SUBU dsubu
524 #define LONG_SUBIU dsubu
525 #define LONG_L ld
526 #define LONG_LA dla
527 #define LONG_S sd
528 #define LONG_SLL dsll
529 #define LONG_SLLV dsllv
530 #define LONG_SRL dsrl
531 #define LONG_SRLV dsrlv
532 #define LONG_SRA dsra
533 #define LONG_SRAV dsrav
534 #define LONG_LL lld
535 #define LONG_SC scd
536 #define LONG_WORD .dword
537 #define LONG_SCALESHIFT 3
538 #endif
539
540 #if SZREG == 4
541 #define REG_L lw
542 #define REG_S sw
543 #define REG_LI li
544 #define REG_ADDU addu
545 #define REG_SLL sll
546 #define REG_SLLV sllv
547 #define REG_SRL srl
548 #define REG_SRLV srlv
549 #define REG_SRA sra
550 #define REG_SRAV srav
551 #define REG_LL ll
552 #define REG_SC sc
553 #define REG_SCALESHIFT 2
554 #else
555 #define REG_L ld
556 #define REG_S sd
557 #define REG_LI dli
558 #define REG_ADDU daddu
559 #define REG_SLL dsll
560 #define REG_SLLV dsllv
561 #define REG_SRL dsrl
562 #define REG_SRLV dsrlv
563 #define REG_SRA dsra
564 #define REG_SRAV dsrav
565 #define REG_LL lld
566 #define REG_SC scd
567 #define REG_SCALESHIFT 3
568 #endif
569
570 #if (MIPS1 + MIPS2) > 0
571 #define NOP_L nop
572 #else
573 #define NOP_L /* nothing */
574 #endif
575
576 /* compiler define */
577 #if defined(MULTIPROCESSOR) && defined(__OCTEON__)
578 /*
579 * See common/lib/libc/arch/mips/atomic/membar_ops.S for notes on
580 * Octeon memory ordering guarantees and barriers.
581 *
582 * cnMIPS also has a quirk where the store buffer can get clogged and
583 * we need to apply a plunger to it _after_ releasing a lock or else
584 * other CPUs may spin for hundreds of thousands of cycles before they
585 * see the lock is released. So we also have the quirky SYNC_PLUNGER
586 * barrier as syncw. See the note in the SYNCW instruction description
587 * on p. 2168 of Cavium OCTEON III CN78XX Hardware Reference Manual,
588 * CN78XX-HM-0.99E, September 2014:
589 *
590 * Core A (writer)
591 *
592 * SW R1, DATA# change shared DATA value
593 * LI R1, 1
594 * SYNCW# (or SYNCWS) Perform DATA store before performing FLAG store
595 * SW R2, FLAG# say that the shared DATA value is valid
596 * SYNCW# (or SYNCWS) Force the FLAG store soon (CN78XX-specific)
597 *
598 * ...
599 *
600 * The second SYNCW instruction executed by core A is not
601 * necessary for correctness, but has very important performance
602 * effects on the CN78XX. Without it, the store to FLAG may
603 * linger in core A's write buffer before it becomes visible to
604 * any other cores. (If core A is not performing many stores,
605 * this may add hundreds of thousands of cycles to the flag
606 * release time since the CN78XX core nominally retains stores to
607 * attempt to merge them before sending the store on the CMI.)
608 * Applications should include this second SYNCW instruction after
609 * flag or lock release.
610 */
611 #define LLSCSYNC /* nothing */
612 #define BDSYNC sync
613 #define BDSYNC_ACQ nop
614 #define SYNC_ACQ /* nothing */
615 #define SYNC_REL sync 4
616 #define BDSYNC_PLUNGER sync 4
617 #define SYNC_PLUNGER sync 4
618 #elif defined(MULTIPROCESSOR) && (__mips >= 3 || !defined(__mips_o32))
619 #define LLSCSYNC /* nothing */
620 #define BDSYNC sync
621 #define BDSYNC_ACQ sync
622 #define SYNC_ACQ sync
623 #define SYNC_REL sync
624 #define BDSYNC_PLUNGER nop
625 #define SYNC_PLUNGER /* nothing */
626 #else
627 #define LLSCSYNC /* nothing */
628 #define BDSYNC nop
629 #define BDSYNC_ACQ nop
630 #define SYNC_ACQ /* nothing */
631 #define SYNC_REL /* nothing */
632 #define BDSYNC_PLUNGER nop
633 #define SYNC_PLUNGER /* nothing */
634 #endif
635
636 /*
637 * Store-before-load barrier. Do not use this unless you know what
638 * you're doing.
639 */
640 #ifdef MULTIPROCESSOR
641 #define SYNC_DEKKER sync
642 #else
643 #define SYNC_DEKKER /* nothing */
644 #endif
645
646 /*
647 * Store-before-store and load-before-load barriers. These could be
648 * made weaker than release (load/store-before-store) and acquire
649 * (load-before-load/store) barriers, and newer MIPS does have
650 * instruction encodings for finer-grained barriers like this, but I
651 * dunno how to appropriately conditionalize their use or get the
652 * assembler to be happy with them, so we'll use these definitions for
653 * now.
654 */
655 #define SYNC_PRODUCER SYNC_REL
656 #define SYNC_CONSUMER SYNC_ACQ
657
658 /* CPU dependent hook for cp0 load delays */
659 #if defined(MIPS1) || defined(MIPS2) || defined(MIPS3)
660 #define MFC0_HAZARD sll $0,$0,1 /* super scalar nop */
661 #else
662 #define MFC0_HAZARD /* nothing */
663 #endif
664
665 #if _MIPS_ISA == _MIPS_ISA_MIPS1 || _MIPS_ISA == _MIPS_ISA_MIPS2 || \
666 _MIPS_ISA == _MIPS_ISA_MIPS32
667 #define MFC0 mfc0
668 #define MTC0 mtc0
669 #endif
670 #if _MIPS_ISA == _MIPS_ISA_MIPS3 || _MIPS_ISA == _MIPS_ISA_MIPS4 || \
671 _MIPS_ISA == _MIPS_ISA_MIPS64
672 #define MFC0 dmfc0
673 #define MTC0 dmtc0
674 #endif
675
676 #if defined(__mips_o32) || defined(__mips_o64)
677
678 #ifdef __mips_abicalls
679 #define CPRESTORE(r) .cprestore r
680 #define CPLOAD(r) .cpload r
681 #else
682 #define CPRESTORE(r) /* not needed */
683 #define CPLOAD(r) /* not needed */
684 #endif
685
686 #define SETUP_GP \
687 .set push; \
688 .set noreorder; \
689 .cpload t9; \
690 .set pop
691 #define SETUP_GPX(r) \
692 .set push; \
693 .set noreorder; \
694 move r,ra; /* save old ra */ \
695 bal 7f; \
696 nop; \
697 7: .cpload ra; \
698 move ra,r; \
699 .set pop
700 #define SETUP_GPX_L(r,lbl) \
701 .set push; \
702 .set noreorder; \
703 move r,ra; /* save old ra */ \
704 bal lbl; \
705 nop; \
706 lbl: .cpload ra; \
707 move ra,r; \
708 .set pop
709 #define SAVE_GP(x) .cprestore x
710
711 #define SETUP_GP64(a,b) /* n32/n64 specific */
712 #define SETUP_GP64_R(a,b) /* n32/n64 specific */
713 #define SETUP_GPX64(a,b) /* n32/n64 specific */
714 #define SETUP_GPX64_L(a,b,c) /* n32/n64 specific */
715 #define RESTORE_GP64 /* n32/n64 specific */
716 #define USE_ALT_CP(a) /* n32/n64 specific */
717 #endif /* __mips_o32 || __mips_o64 */
718
719 #if defined(__mips_o32) || defined(__mips_o64)
720 #define REG_PROLOGUE .set push
721 #define REG_EPILOGUE .set pop
722 #endif
723 #if defined(__mips_n32) || defined(__mips_n64)
724 #define REG_PROLOGUE .set push ; .set mips3
725 #define REG_EPILOGUE .set pop
726 #endif
727
728 #if defined(__mips_n32) || defined(__mips_n64)
729 #define SETUP_GP /* o32 specific */
730 #define SETUP_GPX(r) /* o32 specific */
731 #define SETUP_GPX_L(r,lbl) /* o32 specific */
732 #define SAVE_GP(x) /* o32 specific */
733 #define SETUP_GP64(a,b) .cpsetup t9, a, b
734 #define SETUP_GPX64(a,b) \
735 .set push; \
736 move b,ra; \
737 .set noreorder; \
738 bal 7f; \
739 nop; \
740 7: .set pop; \
741 .cpsetup ra, a, 7b; \
742 move ra,b
743 #define SETUP_GPX64_L(a,b,c) \
744 .set push; \
745 move b,ra; \
746 .set noreorder; \
747 bal c; \
748 nop; \
749 c: .set pop; \
750 .cpsetup ra, a, c; \
751 move ra,b
752 #define RESTORE_GP64 .cpreturn
753 #define USE_ALT_CP(a) .cplocal a
754 #endif /* __mips_n32 || __mips_n64 */
755
756 /*
757 * The DYNAMIC_STATUS_MASK option adds an additional masking operation
758 * when updating the hardware interrupt mask in the status register.
759 *
760 * This is useful for platforms that need to at run-time mask
761 * interrupts based on motherboard configuration or to handle
762 * slowly clearing interrupts.
763 *
764 * XXX this is only currently implemented for mips3.
765 */
766 #ifdef MIPS_DYNAMIC_STATUS_MASK
767 #define DYNAMIC_STATUS_MASK(sr,scratch) \
768 lw scratch, mips_dynamic_status_mask; \
769 and sr, sr, scratch
770
771 #define DYNAMIC_STATUS_MASK_TOUSER(sr,scratch1) \
772 ori sr, (MIPS_INT_MASK | MIPS_SR_INT_IE); \
773 DYNAMIC_STATUS_MASK(sr,scratch1)
774 #else
775 #define DYNAMIC_STATUS_MASK(sr,scratch)
776 #define DYNAMIC_STATUS_MASK_TOUSER(sr,scratch1)
777 #endif
778
779 /* See lock_stubs.S. */
780 #define LOG2_MIPS_LOCK_RAS_SIZE 8
781 #define MIPS_LOCK_RAS_SIZE 256 /* 16 bytes left over */
782
783 #define CPUVAR(off) _C_LABEL(cpu_info_store)+__CONCAT(CPU_INFO_,off)
784
785 #endif /* _MIPS_ASM_H */
786