1 1.77 martin /* $NetBSD: asm.h,v 1.77 2025/01/06 10:46:43 martin Exp $ */ 2 1.4 cgd 3 1.1 deraadt /* 4 1.2 glass * Copyright (c) 1992, 1993 5 1.2 glass * The Regents of the University of California. All rights reserved. 6 1.1 deraadt * 7 1.1 deraadt * This code is derived from software contributed to Berkeley by 8 1.1 deraadt * Ralph Campbell. 9 1.1 deraadt * 10 1.1 deraadt * Redistribution and use in source and binary forms, with or without 11 1.1 deraadt * modification, are permitted provided that the following conditions 12 1.1 deraadt * are met: 13 1.1 deraadt * 1. Redistributions of source code must retain the above copyright 14 1.1 deraadt * notice, this list of conditions and the following disclaimer. 15 1.1 deraadt * 2. Redistributions in binary form must reproduce the above copyright 16 1.1 deraadt * notice, this list of conditions and the following disclaimer in the 17 1.1 deraadt * documentation and/or other materials provided with the distribution. 18 1.35 agc * 3. Neither the name of the University nor the names of its contributors 19 1.1 deraadt * may be used to endorse or promote products derived from this software 20 1.1 deraadt * without specific prior written permission. 21 1.1 deraadt * 22 1.1 deraadt * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 1.1 deraadt * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 1.1 deraadt * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 1.1 deraadt * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 1.1 deraadt * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 1.1 deraadt * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 1.1 deraadt * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 1.1 deraadt * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 1.1 deraadt * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 1.1 deraadt * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 1.1 deraadt * SUCH DAMAGE. 33 1.1 deraadt * 34 1.4 cgd * @(#)machAsmDefs.h 8.1 (Berkeley) 6/10/93 35 1.1 deraadt */ 36 1.1 deraadt 37 1.1 deraadt /* 38 1.1 deraadt * machAsmDefs.h -- 39 1.1 deraadt * 40 1.1 deraadt * Macros used when writing assembler programs. 41 1.1 deraadt * 42 1.1 deraadt * Copyright (C) 1989 Digital Equipment Corporation. 43 1.1 deraadt * Permission to use, copy, modify, and distribute this software and 44 1.1 deraadt * its documentation for any purpose and without fee is hereby granted, 45 1.1 deraadt * provided that the above copyright notice appears in all copies. 46 1.1 deraadt * Digital Equipment Corporation makes no representations about the 47 1.1 deraadt * suitability of this software for any purpose. It is provided "as is" 48 1.1 deraadt * without express or implied warranty. 49 1.1 deraadt * 50 1.1 deraadt * from: Header: /sprite/src/kernel/mach/ds3100.md/RCS/machAsmDefs.h, 51 1.2 glass * v 1.2 89/08/15 18:28:24 rab Exp SPRITE (DECWRL) 52 1.1 deraadt */ 53 1.1 deraadt 54 1.8 jonathan #ifndef _MIPS_ASM_H 55 1.41 matt #define _MIPS_ASM_H 56 1.1 deraadt 57 1.44 matt #include <sys/cdefs.h> /* for API selection */ 58 1.21 soda #include <mips/regdef.h> 59 1.1 deraadt 60 1.64 simonb #if defined(_KERNEL_OPT) 61 1.64 simonb #include "opt_gprof.h" 62 1.73 riastrad #include "opt_multiprocessor.h" 63 1.64 simonb #endif 64 1.64 simonb 65 1.70 riastrad #ifdef __ASSEMBLER__ 66 1.61 skrll #define __BIT(n) (1 << (n)) 67 1.61 skrll #define __BITS(hi,lo) ((~((~0)<<((hi)+1)))&((~0)<<(lo))) 68 1.61 skrll 69 1.61 skrll #define __LOWEST_SET_BIT(__mask) ((((__mask) - 1) & (__mask)) ^ (__mask)) 70 1.61 skrll #define __SHIFTOUT(__x, __mask) (((__x) & (__mask)) / __LOWEST_SET_BIT(__mask)) 71 1.61 skrll #define __SHIFTIN(__x, __mask) ((__x) * __LOWEST_SET_BIT(__mask)) 72 1.70 riastrad #endif /* __ASSEMBLER__ */ 73 1.61 skrll 74 1.75 rin #ifndef GPROF 75 1.75 rin #define _MIPS_ASM_MCOUNT(x) 76 1.75 rin #else 77 1.1 deraadt /* 78 1.1 deraadt * Define -pg profile entry code. 79 1.64 simonb * Must always be noreorder, must never use a macro instruction. 80 1.64 simonb */ 81 1.64 simonb #if defined(__mips_o32) /* Old 32-bit ABI */ 82 1.64 simonb /* 83 1.64 simonb * The old ABI version must also decrement two less words off the 84 1.64 simonb * stack and the final addiu to t9 must always equal the size of this 85 1.65 simonb * _MIPS_ASM_MCOUNT. 86 1.1 deraadt */ 87 1.75 rin #define _MIPS_ASM_MCOUNT(x) \ 88 1.23 castor .set push; \ 89 1.23 castor .set noreorder; \ 90 1.23 castor .set noat; \ 91 1.64 simonb subu sp,16; \ 92 1.27 jeffs sw t9,12(sp); \ 93 1.23 castor move AT,ra; \ 94 1.23 castor lui t9,%hi(_mcount); \ 95 1.23 castor addiu t9,t9,%lo(_mcount); \ 96 1.23 castor jalr t9; \ 97 1.64 simonb nop; \ 98 1.23 castor lw t9,4(sp); \ 99 1.64 simonb addiu sp,8; \ 100 1.64 simonb addiu t9,40; \ 101 1.64 simonb .set pop; 102 1.64 simonb #elif defined(__mips_o64) /* Old 64-bit ABI */ 103 1.64 simonb # error yeahnah 104 1.64 simonb #else /* New (n32/n64) ABI */ 105 1.64 simonb /* 106 1.64 simonb * The new ABI version just needs to put the return address in AT and 107 1.65 simonb * call _mcount(). For the no abicalls case, skip the reloc dance. 108 1.64 simonb */ 109 1.65 simonb #ifdef __mips_abicalls 110 1.75 rin #if defined(__mips_n32) /* n32 */ 111 1.75 rin #define _MIPS_ASM_MCOUNT(x) \ 112 1.65 simonb .set push; \ 113 1.65 simonb .set noreorder; \ 114 1.65 simonb .set noat; \ 115 1.65 simonb subu sp,16; \ 116 1.65 simonb sw t9,8(sp); \ 117 1.65 simonb move AT,ra; \ 118 1.65 simonb lui t9,%hi(_mcount); \ 119 1.65 simonb addiu t9,t9,%lo(_mcount); \ 120 1.65 simonb jalr t9; \ 121 1.65 simonb nop; \ 122 1.65 simonb lw t9,8(sp); \ 123 1.65 simonb addiu sp,16; \ 124 1.65 simonb .set pop; 125 1.75 rin #else /* n64 */ 126 1.75 rin #define _MIPS_ASM_MCOUNT(x) \ 127 1.75 rin .set push; \ 128 1.75 rin .set noreorder; \ 129 1.75 rin .set noat; \ 130 1.75 rin dsubu sp,16; \ 131 1.75 rin sd gp,0(sp); \ 132 1.75 rin sd t9,8(sp); \ 133 1.75 rin move AT,ra; \ 134 1.75 rin lui gp,%hi(%neg(%gp_rel(x))); \ 135 1.75 rin daddiu gp,%lo(%neg(%gp_rel(x))); \ 136 1.75 rin daddu gp,gp,t9; \ 137 1.75 rin ld t9,%call16(_mcount)(gp); \ 138 1.75 rin jalr t9; \ 139 1.75 rin nop; \ 140 1.75 rin ld gp,0(sp); \ 141 1.75 rin ld t9,8(sp); \ 142 1.75 rin daddiu sp,16; \ 143 1.75 rin .set pop; 144 1.75 rin #endif 145 1.65 simonb #else /* !__mips_abicalls */ 146 1.75 rin #define _MIPS_ASM_MCOUNT(x) \ 147 1.64 simonb .set push; \ 148 1.64 simonb .set noreorder; \ 149 1.64 simonb .set noat; \ 150 1.64 simonb move AT,ra; \ 151 1.64 simonb jal _mcount; \ 152 1.64 simonb nop; \ 153 1.50 skrll .set pop; 154 1.65 simonb #endif /* !__mips_abicalls */ 155 1.64 simonb #endif /* n32/n64 */ 156 1.75 rin #endif /* GPROF */ 157 1.1 deraadt 158 1.15 castor #ifdef USE_AENT 159 1.41 matt #define AENT(x) \ 160 1.15 castor .aent x, 0 161 1.15 castor #else 162 1.41 matt #define AENT(x) 163 1.24 kleink #endif 164 1.24 kleink 165 1.31 simonb /* 166 1.31 simonb * WEAK_ALIAS: create a weak alias. 167 1.31 simonb */ 168 1.24 kleink #define WEAK_ALIAS(alias,sym) \ 169 1.24 kleink .weak alias; \ 170 1.24 kleink alias = sym 171 1.37 christos /* 172 1.37 christos * STRONG_ALIAS: create a strong alias. 173 1.37 christos */ 174 1.41 matt #define STRONG_ALIAS(alias,sym) \ 175 1.37 christos .globl alias; \ 176 1.37 christos alias = sym 177 1.15 castor 178 1.14 thorpej /* 179 1.33 simonb * WARN_REFERENCES: create a warning if the specified symbol is referenced. 180 1.14 thorpej */ 181 1.43 joerg #define WARN_REFERENCES(sym,msg) \ 182 1.44 matt .pushsection __CONCAT(.gnu.warning.,sym); \ 183 1.43 joerg .ascii msg; \ 184 1.43 joerg .popsection 185 1.6 mycroft 186 1.1 deraadt /* 187 1.44 matt * STATIC_LEAF_NOPROFILE 188 1.44 matt * No profilable local leaf routine. 189 1.15 castor */ 190 1.44 matt #define STATIC_LEAF_NOPROFILE(x) \ 191 1.47 joerg .ent _C_LABEL(x); \ 192 1.15 castor _C_LABEL(x): ; \ 193 1.44 matt .frame sp, 0, ra 194 1.1 deraadt 195 1.1 deraadt /* 196 1.15 castor * LEAF_NOPROFILE 197 1.15 castor * No profilable leaf routine. 198 1.1 deraadt */ 199 1.41 matt #define LEAF_NOPROFILE(x) \ 200 1.15 castor .globl _C_LABEL(x); \ 201 1.44 matt STATIC_LEAF_NOPROFILE(x) 202 1.15 castor 203 1.15 castor /* 204 1.34 simonb * STATIC_LEAF 205 1.34 simonb * Declare a local leaf function. 206 1.34 simonb */ 207 1.41 matt #define STATIC_LEAF(x) \ 208 1.44 matt STATIC_LEAF_NOPROFILE(x); \ 209 1.75 rin _MIPS_ASM_MCOUNT(x) 210 1.34 simonb 211 1.34 simonb /* 212 1.44 matt * LEAF 213 1.44 matt * A leaf routine does 214 1.44 matt * - call no other function, 215 1.44 matt * - never use any register that callee-saved (S0-S8), and 216 1.44 matt * - not use any local stack storage. 217 1.15 castor */ 218 1.44 matt #define LEAF(x) \ 219 1.44 matt LEAF_NOPROFILE(x); \ 220 1.75 rin _MIPS_ASM_MCOUNT(x) 221 1.34 simonb 222 1.34 simonb /* 223 1.34 simonb * STATIC_XLEAF 224 1.34 simonb * declare alternate entry to a static leaf routine 225 1.34 simonb */ 226 1.41 matt #define STATIC_XLEAF(x) \ 227 1.20 soda AENT (_C_LABEL(x)); \ 228 1.15 castor _C_LABEL(x): 229 1.15 castor 230 1.15 castor /* 231 1.44 matt * XLEAF 232 1.44 matt * declare alternate entry to leaf routine 233 1.44 matt */ 234 1.44 matt #define XLEAF(x) \ 235 1.44 matt .globl _C_LABEL(x); \ 236 1.44 matt STATIC_XLEAF(x) 237 1.44 matt 238 1.44 matt /* 239 1.44 matt * STATIC_NESTED_NOPROFILE 240 1.44 matt * No profilable local nested routine. 241 1.44 matt */ 242 1.44 matt #define STATIC_NESTED_NOPROFILE(x, fsize, retpc) \ 243 1.55 mrg .ent _C_LABEL(x); \ 244 1.55 mrg .type _C_LABEL(x), @function; \ 245 1.55 mrg _C_LABEL(x): ; \ 246 1.44 matt .frame sp, fsize, retpc 247 1.44 matt 248 1.44 matt /* 249 1.44 matt * NESTED_NOPROFILE 250 1.44 matt * No profilable nested routine. 251 1.44 matt */ 252 1.44 matt #define NESTED_NOPROFILE(x, fsize, retpc) \ 253 1.44 matt .globl _C_LABEL(x); \ 254 1.44 matt STATIC_NESTED_NOPROFILE(x, fsize, retpc) 255 1.44 matt 256 1.44 matt /* 257 1.15 castor * NESTED 258 1.15 castor * A function calls other functions and needs 259 1.15 castor * therefore stack space to save/restore registers. 260 1.15 castor */ 261 1.44 matt #define NESTED(x, fsize, retpc) \ 262 1.44 matt NESTED_NOPROFILE(x, fsize, retpc); \ 263 1.75 rin _MIPS_ASM_MCOUNT(x) 264 1.1 deraadt 265 1.1 deraadt /* 266 1.44 matt * STATIC_NESTED 267 1.44 matt * No profilable local nested routine. 268 1.1 deraadt */ 269 1.44 matt #define STATIC_NESTED(x, fsize, retpc) \ 270 1.44 matt STATIC_NESTED_NOPROFILE(x, fsize, retpc); \ 271 1.75 rin _MIPS_ASM_MCOUNT(x) 272 1.15 castor 273 1.15 castor /* 274 1.15 castor * XNESTED 275 1.15 castor * declare alternate entry point to nested routine. 276 1.15 castor */ 277 1.41 matt #define XNESTED(x) \ 278 1.15 castor .globl _C_LABEL(x); \ 279 1.20 soda AENT (_C_LABEL(x)); \ 280 1.6 mycroft _C_LABEL(x): 281 1.1 deraadt 282 1.1 deraadt /* 283 1.15 castor * END 284 1.15 castor * Mark end of a procedure. 285 1.1 deraadt */ 286 1.44 matt #define END(x) \ 287 1.41 matt .end _C_LABEL(x); \ 288 1.41 matt .size _C_LABEL(x), . - _C_LABEL(x) 289 1.2 glass 290 1.2 glass /* 291 1.15 castor * IMPORT -- import external symbol 292 1.2 glass */ 293 1.41 matt #define IMPORT(sym, size) \ 294 1.20 soda .extern _C_LABEL(sym),size 295 1.1 deraadt 296 1.1 deraadt /* 297 1.15 castor * EXPORT -- export definition of symbol 298 1.1 deraadt */ 299 1.41 matt #define EXPORT(x) \ 300 1.15 castor .globl _C_LABEL(x); \ 301 1.15 castor _C_LABEL(x): 302 1.1 deraadt 303 1.15 castor /* 304 1.62 simonb * EXPORT_OBJECT -- export definition of symbol of symbol 305 1.62 simonb * type Object, visible to ksyms(4) address search. 306 1.62 simonb */ 307 1.62 simonb #define EXPORT_OBJECT(x) \ 308 1.62 simonb EXPORT(x); \ 309 1.62 simonb .type _C_LABEL(x), @object; 310 1.62 simonb 311 1.62 simonb /* 312 1.17 jonathan * VECTOR 313 1.17 jonathan * exception vector entrypoint 314 1.20 soda * XXX: regmask should be used to generate .mask 315 1.17 jonathan */ 316 1.41 matt #define VECTOR(x, regmask) \ 317 1.47 joerg .ent _C_LABEL(x); \ 318 1.17 jonathan EXPORT(x); \ 319 1.17 jonathan 320 1.41 matt #define VECTOR_END(x) \ 321 1.44 matt EXPORT(__CONCAT(x,_end)); \ 322 1.44 matt END(x); \ 323 1.44 matt .org _C_LABEL(x) + 0x80 324 1.1 deraadt 325 1.1 deraadt /* 326 1.10 christos * Macros to panic and printf from assembly language. 327 1.1 deraadt */ 328 1.41 matt #define PANIC(msg) \ 329 1.41 matt PTR_LA a0, 9f; \ 330 1.15 castor jal _C_LABEL(panic); \ 331 1.26 cgd nop; \ 332 1.1 deraadt MSG(msg) 333 1.1 deraadt 334 1.15 castor #define PRINTF(msg) \ 335 1.41 matt PTR_LA a0, 9f; \ 336 1.15 castor jal _C_LABEL(printf); \ 337 1.26 cgd nop; \ 338 1.1 deraadt MSG(msg) 339 1.1 deraadt 340 1.15 castor #define MSG(msg) \ 341 1.15 castor .rdata; \ 342 1.48 joerg 9: .asciz msg; \ 343 1.1 deraadt .text 344 1.1 deraadt 345 1.41 matt #define ASMSTR(str) \ 346 1.48 joerg .asciz str; \ 347 1.12 jonathan .align 3 348 1.15 castor 349 1.76 martin #ifdef _NETBSD_REVISIONID 350 1.76 martin #define RCSID(x) .pushsection ".ident","MS",@progbits,1; \ 351 1.77 martin .asciz x; \ 352 1.77 martin .ascii "$"; .ascii "NetBSD: "; .ascii __FILE__; \ 353 1.77 martin .ascii " "; .ascii _NETBSD_REVISIONID; \ 354 1.77 martin .asciz " $"; \ 355 1.76 martin .popsection 356 1.76 martin #else 357 1.57 simonb #define RCSID(x) .pushsection ".ident","MS",@progbits,1; \ 358 1.56 joerg .asciz x; \ 359 1.56 joerg .popsection 360 1.76 martin #endif 361 1.41 matt 362 1.15 castor /* 363 1.15 castor * XXX retain dialects XXX 364 1.15 castor */ 365 1.41 matt #define ALEAF(x) XLEAF(x) 366 1.41 matt #define NLEAF(x) LEAF_NOPROFILE(x) 367 1.41 matt #define NON_LEAF(x, fsize, retpc) NESTED(x, fsize, retpc) 368 1.41 matt #define NNON_LEAF(x, fsize, retpc) NESTED_NOPROFILE(x, fsize, retpc) 369 1.41 matt 370 1.41 matt #if defined(__mips_o32) 371 1.41 matt #define SZREG 4 372 1.41 matt #else 373 1.41 matt #define SZREG 8 374 1.41 matt #endif 375 1.41 matt 376 1.41 matt #if defined(__mips_o32) || defined(__mips_o64) 377 1.41 matt #define ALSK 7 /* stack alignment */ 378 1.41 matt #define ALMASK -7 /* stack alignment */ 379 1.41 matt #define SZFPREG 4 380 1.41 matt #define FP_L lwc1 381 1.41 matt #define FP_S swc1 382 1.41 matt #else 383 1.41 matt #define ALSK 15 /* stack alignment */ 384 1.41 matt #define ALMASK -15 /* stack alignment */ 385 1.41 matt #define SZFPREG 8 386 1.41 matt #define FP_L ldc1 387 1.41 matt #define FP_S sdc1 388 1.41 matt #endif 389 1.15 castor 390 1.22 simonb /* 391 1.16 castor * standard callframe { 392 1.44 matt * register_t cf_args[4]; arg0 - arg3 (only on o32 and o64) 393 1.41 matt * register_t cf_pad[N]; o32/64 (N=0), n32 (N=1) n64 (N=1) 394 1.41 matt * register_t cf_gp; global pointer (only on n32 and n64) 395 1.16 castor * register_t cf_sp; frame pointer 396 1.16 castor * register_t cf_ra; return address 397 1.16 castor * }; 398 1.16 castor */ 399 1.41 matt #if defined(__mips_o32) || defined(__mips_o64) 400 1.41 matt #define CALLFRAME_SIZ (SZREG * (4 + 2)) 401 1.41 matt #define CALLFRAME_S0 0 402 1.41 matt #elif defined(__mips_n32) || defined(__mips_n64) 403 1.41 matt #define CALLFRAME_SIZ (SZREG * 4) 404 1.41 matt #define CALLFRAME_S0 (CALLFRAME_SIZ - 4 * SZREG) 405 1.41 matt #endif 406 1.41 matt #ifndef _KERNEL 407 1.41 matt #define CALLFRAME_GP (CALLFRAME_SIZ - 3 * SZREG) 408 1.41 matt #endif 409 1.41 matt #define CALLFRAME_SP (CALLFRAME_SIZ - 2 * SZREG) 410 1.41 matt #define CALLFRAME_RA (CALLFRAME_SIZ - 1 * SZREG) 411 1.16 castor 412 1.15 castor /* 413 1.22 simonb * While it would be nice to be compatible with the SGI 414 1.15 castor * REG_L and REG_S macros, because they do not take parameters, it 415 1.15 castor * is impossible to use them with the _MIPS_SIM_ABIX32 model. 416 1.15 castor * 417 1.22 simonb * These macros hide the use of mips3 instructions from the 418 1.15 castor * assembler to prevent the assembler from generating 64-bit style 419 1.15 castor * ABI calls. 420 1.15 castor */ 421 1.49 matt #ifdef __mips_o32 422 1.41 matt #define PTR_ADD add 423 1.41 matt #define PTR_ADDI addi 424 1.41 matt #define PTR_ADDU addu 425 1.41 matt #define PTR_ADDIU addiu 426 1.45 matt #define PTR_SUB subu 427 1.41 matt #define PTR_SUBI subi 428 1.41 matt #define PTR_SUBU subu 429 1.41 matt #define PTR_SUBIU subu 430 1.41 matt #define PTR_L lw 431 1.41 matt #define PTR_LA la 432 1.41 matt #define PTR_S sw 433 1.41 matt #define PTR_SLL sll 434 1.41 matt #define PTR_SLLV sllv 435 1.41 matt #define PTR_SRL srl 436 1.41 matt #define PTR_SRLV srlv 437 1.41 matt #define PTR_SRA sra 438 1.41 matt #define PTR_SRAV srav 439 1.41 matt #define PTR_LL ll 440 1.41 matt #define PTR_SC sc 441 1.41 matt #define PTR_WORD .word 442 1.41 matt #define PTR_SCALESHIFT 2 443 1.41 matt #else /* _MIPS_SZPTR == 64 */ 444 1.41 matt #define PTR_ADD dadd 445 1.41 matt #define PTR_ADDI daddi 446 1.41 matt #define PTR_ADDU daddu 447 1.41 matt #define PTR_ADDIU daddiu 448 1.45 matt #define PTR_SUB dsubu 449 1.41 matt #define PTR_SUBI dsubi 450 1.41 matt #define PTR_SUBU dsubu 451 1.41 matt #define PTR_SUBIU dsubu 452 1.49 matt #ifdef __mips_n32 453 1.49 matt #define PTR_L lw 454 1.49 matt #define PTR_LL ll 455 1.49 matt #define PTR_SC sc 456 1.49 matt #define PTR_S sw 457 1.49 matt #define PTR_SCALESHIFT 2 458 1.49 matt #define PTR_WORD .word 459 1.49 matt #else 460 1.41 matt #define PTR_L ld 461 1.49 matt #define PTR_LL lld 462 1.49 matt #define PTR_SC scd 463 1.49 matt #define PTR_S sd 464 1.49 matt #define PTR_SCALESHIFT 3 465 1.49 matt #define PTR_WORD .dword 466 1.49 matt #endif 467 1.41 matt #define PTR_LA dla 468 1.41 matt #define PTR_SLL dsll 469 1.41 matt #define PTR_SLLV dsllv 470 1.41 matt #define PTR_SRL dsrl 471 1.41 matt #define PTR_SRLV dsrlv 472 1.41 matt #define PTR_SRA dsra 473 1.41 matt #define PTR_SRAV dsrav 474 1.41 matt #endif /* _MIPS_SZPTR == 64 */ 475 1.41 matt 476 1.41 matt #if _MIPS_SZINT == 32 477 1.41 matt #define INT_ADD add 478 1.41 matt #define INT_ADDI addi 479 1.41 matt #define INT_ADDU addu 480 1.41 matt #define INT_ADDIU addiu 481 1.45 matt #define INT_SUB subu 482 1.41 matt #define INT_SUBI subi 483 1.41 matt #define INT_SUBU subu 484 1.41 matt #define INT_SUBIU subu 485 1.41 matt #define INT_L lw 486 1.41 matt #define INT_LA la 487 1.41 matt #define INT_S sw 488 1.41 matt #define INT_SLL sll 489 1.41 matt #define INT_SLLV sllv 490 1.41 matt #define INT_SRL srl 491 1.41 matt #define INT_SRLV srlv 492 1.41 matt #define INT_SRA sra 493 1.41 matt #define INT_SRAV srav 494 1.41 matt #define INT_LL ll 495 1.41 matt #define INT_SC sc 496 1.41 matt #define INT_WORD .word 497 1.41 matt #define INT_SCALESHIFT 2 498 1.41 matt #else 499 1.41 matt #define INT_ADD dadd 500 1.41 matt #define INT_ADDI daddi 501 1.41 matt #define INT_ADDU daddu 502 1.41 matt #define INT_ADDIU daddiu 503 1.45 matt #define INT_SUB dsubu 504 1.41 matt #define INT_SUBI dsubi 505 1.41 matt #define INT_SUBU dsubu 506 1.41 matt #define INT_SUBIU dsubu 507 1.41 matt #define INT_L ld 508 1.41 matt #define INT_LA dla 509 1.41 matt #define INT_S sd 510 1.41 matt #define INT_SLL dsll 511 1.41 matt #define INT_SLLV dsllv 512 1.41 matt #define INT_SRL dsrl 513 1.41 matt #define INT_SRLV dsrlv 514 1.41 matt #define INT_SRA dsra 515 1.41 matt #define INT_SRAV dsrav 516 1.41 matt #define INT_LL lld 517 1.41 matt #define INT_SC scd 518 1.41 matt #define INT_WORD .dword 519 1.41 matt #define INT_SCALESHIFT 3 520 1.41 matt #endif 521 1.15 castor 522 1.41 matt #if _MIPS_SZLONG == 32 523 1.41 matt #define LONG_ADD add 524 1.41 matt #define LONG_ADDI addi 525 1.41 matt #define LONG_ADDU addu 526 1.41 matt #define LONG_ADDIU addiu 527 1.45 matt #define LONG_SUB subu 528 1.41 matt #define LONG_SUBI subi 529 1.41 matt #define LONG_SUBU subu 530 1.41 matt #define LONG_SUBIU subu 531 1.41 matt #define LONG_L lw 532 1.41 matt #define LONG_LA la 533 1.41 matt #define LONG_S sw 534 1.41 matt #define LONG_SLL sll 535 1.41 matt #define LONG_SLLV sllv 536 1.41 matt #define LONG_SRL srl 537 1.41 matt #define LONG_SRLV srlv 538 1.41 matt #define LONG_SRA sra 539 1.41 matt #define LONG_SRAV srav 540 1.41 matt #define LONG_LL ll 541 1.41 matt #define LONG_SC sc 542 1.41 matt #define LONG_WORD .word 543 1.41 matt #define LONG_SCALESHIFT 2 544 1.41 matt #else 545 1.41 matt #define LONG_ADD dadd 546 1.41 matt #define LONG_ADDI daddi 547 1.41 matt #define LONG_ADDU daddu 548 1.41 matt #define LONG_ADDIU daddiu 549 1.45 matt #define LONG_SUB dsubu 550 1.41 matt #define LONG_SUBI dsubi 551 1.41 matt #define LONG_SUBU dsubu 552 1.41 matt #define LONG_SUBIU dsubu 553 1.41 matt #define LONG_L ld 554 1.41 matt #define LONG_LA dla 555 1.41 matt #define LONG_S sd 556 1.41 matt #define LONG_SLL dsll 557 1.41 matt #define LONG_SLLV dsllv 558 1.41 matt #define LONG_SRL dsrl 559 1.41 matt #define LONG_SRLV dsrlv 560 1.41 matt #define LONG_SRA dsra 561 1.41 matt #define LONG_SRAV dsrav 562 1.41 matt #define LONG_LL lld 563 1.41 matt #define LONG_SC scd 564 1.41 matt #define LONG_WORD .dword 565 1.41 matt #define LONG_SCALESHIFT 3 566 1.41 matt #endif 567 1.41 matt 568 1.41 matt #if SZREG == 4 569 1.41 matt #define REG_L lw 570 1.41 matt #define REG_S sw 571 1.41 matt #define REG_LI li 572 1.41 matt #define REG_ADDU addu 573 1.41 matt #define REG_SLL sll 574 1.41 matt #define REG_SLLV sllv 575 1.41 matt #define REG_SRL srl 576 1.41 matt #define REG_SRLV srlv 577 1.41 matt #define REG_SRA sra 578 1.41 matt #define REG_SRAV srav 579 1.41 matt #define REG_LL ll 580 1.41 matt #define REG_SC sc 581 1.41 matt #define REG_SCALESHIFT 2 582 1.41 matt #else 583 1.41 matt #define REG_L ld 584 1.41 matt #define REG_S sd 585 1.41 matt #define REG_LI dli 586 1.41 matt #define REG_ADDU daddu 587 1.41 matt #define REG_SLL dsll 588 1.41 matt #define REG_SLLV dsllv 589 1.41 matt #define REG_SRL dsrl 590 1.41 matt #define REG_SRLV dsrlv 591 1.41 matt #define REG_SRA dsra 592 1.41 matt #define REG_SRAV dsrav 593 1.41 matt #define REG_LL lld 594 1.41 matt #define REG_SC scd 595 1.41 matt #define REG_SCALESHIFT 3 596 1.41 matt #endif 597 1.41 matt 598 1.51 skrll #if (MIPS1 + MIPS2) > 0 599 1.51 skrll #define NOP_L nop 600 1.51 skrll #else 601 1.51 skrll #define NOP_L /* nothing */ 602 1.51 skrll #endif 603 1.51 skrll 604 1.59 skrll /* compiler define */ 605 1.73 riastrad #if defined(MULTIPROCESSOR) && defined(__OCTEON__) 606 1.71 riastrad /* 607 1.71 riastrad * See common/lib/libc/arch/mips/atomic/membar_ops.S for notes on 608 1.71 riastrad * Octeon memory ordering guarantees and barriers. 609 1.71 riastrad * 610 1.71 riastrad * cnMIPS also has a quirk where the store buffer can get clogged and 611 1.71 riastrad * we need to apply a plunger to it _after_ releasing a lock or else 612 1.71 riastrad * other CPUs may spin for hundreds of thousands of cycles before they 613 1.71 riastrad * see the lock is released. So we also have the quirky SYNC_PLUNGER 614 1.72 riastrad * barrier as syncw. See the note in the SYNCW instruction description 615 1.72 riastrad * on p. 2168 of Cavium OCTEON III CN78XX Hardware Reference Manual, 616 1.72 riastrad * CN78XX-HM-0.99E, September 2014: 617 1.72 riastrad * 618 1.72 riastrad * Core A (writer) 619 1.72 riastrad * 620 1.72 riastrad * SW R1, DATA# change shared DATA value 621 1.72 riastrad * LI R1, 1 622 1.72 riastrad * SYNCW# (or SYNCWS) Perform DATA store before performing FLAG store 623 1.72 riastrad * SW R2, FLAG# say that the shared DATA value is valid 624 1.72 riastrad * SYNCW# (or SYNCWS) Force the FLAG store soon (CN78XX-specific) 625 1.72 riastrad * 626 1.72 riastrad * ... 627 1.72 riastrad * 628 1.72 riastrad * The second SYNCW instruction executed by core A is not 629 1.72 riastrad * necessary for correctness, but has very important performance 630 1.72 riastrad * effects on the CN78XX. Without it, the store to FLAG may 631 1.72 riastrad * linger in core A's write buffer before it becomes visible to 632 1.72 riastrad * any other cores. (If core A is not performing many stores, 633 1.72 riastrad * this may add hundreds of thousands of cycles to the flag 634 1.72 riastrad * release time since the CN78XX core nominally retains stores to 635 1.72 riastrad * attempt to merge them before sending the store on the CMI.) 636 1.72 riastrad * Applications should include this second SYNCW instruction after 637 1.72 riastrad * flag or lock release. 638 1.71 riastrad */ 639 1.71 riastrad #define LLSCSYNC /* nothing */ 640 1.68 riastrad #define BDSYNC sync 641 1.71 riastrad #define BDSYNC_ACQ nop 642 1.71 riastrad #define SYNC_ACQ /* nothing */ 643 1.71 riastrad #define SYNC_REL sync 4 644 1.66 riastrad #define BDSYNC_PLUNGER sync 4 645 1.66 riastrad #define SYNC_PLUNGER sync 4 646 1.73 riastrad #elif defined(MULTIPROCESSOR) && (__mips >= 3 || !defined(__mips_o32)) 647 1.69 riastrad #define LLSCSYNC /* nothing */ 648 1.58 skrll #define BDSYNC sync 649 1.66 riastrad #define BDSYNC_ACQ sync 650 1.66 riastrad #define SYNC_ACQ sync 651 1.66 riastrad #define SYNC_REL sync 652 1.66 riastrad #define BDSYNC_PLUNGER nop 653 1.66 riastrad #define SYNC_PLUNGER /* nothing */ 654 1.60 skrll #else 655 1.60 skrll #define LLSCSYNC /* nothing */ 656 1.60 skrll #define BDSYNC nop 657 1.66 riastrad #define BDSYNC_ACQ nop 658 1.66 riastrad #define SYNC_ACQ /* nothing */ 659 1.66 riastrad #define SYNC_REL /* nothing */ 660 1.66 riastrad #define BDSYNC_PLUNGER nop 661 1.66 riastrad #define SYNC_PLUNGER /* nothing */ 662 1.58 skrll #endif 663 1.58 skrll 664 1.74 riastrad /* 665 1.74 riastrad * Store-before-load barrier. Do not use this unless you know what 666 1.74 riastrad * you're doing. 667 1.74 riastrad */ 668 1.74 riastrad #ifdef MULTIPROCESSOR 669 1.74 riastrad #define SYNC_DEKKER sync 670 1.74 riastrad #else 671 1.74 riastrad #define SYNC_DEKKER /* nothing */ 672 1.74 riastrad #endif 673 1.74 riastrad 674 1.74 riastrad /* 675 1.74 riastrad * Store-before-store and load-before-load barriers. These could be 676 1.74 riastrad * made weaker than release (load/store-before-store) and acquire 677 1.74 riastrad * (load-before-load/store) barriers, and newer MIPS does have 678 1.74 riastrad * instruction encodings for finer-grained barriers like this, but I 679 1.74 riastrad * dunno how to appropriately conditionalize their use or get the 680 1.74 riastrad * assembler to be happy with them, so we'll use these definitions for 681 1.74 riastrad * now. 682 1.74 riastrad */ 683 1.74 riastrad #define SYNC_PRODUCER SYNC_REL 684 1.74 riastrad #define SYNC_CONSUMER SYNC_ACQ 685 1.74 riastrad 686 1.52 maya /* CPU dependent hook for cp0 load delays */ 687 1.52 maya #if defined(MIPS1) || defined(MIPS2) || defined(MIPS3) 688 1.57 simonb #define MFC0_HAZARD sll $0,$0,1 /* super scalar nop */ 689 1.52 maya #else 690 1.57 simonb #define MFC0_HAZARD /* nothing */ 691 1.52 maya #endif 692 1.52 maya 693 1.41 matt #if _MIPS_ISA == _MIPS_ISA_MIPS1 || _MIPS_ISA == _MIPS_ISA_MIPS2 || \ 694 1.41 matt _MIPS_ISA == _MIPS_ISA_MIPS32 695 1.41 matt #define MFC0 mfc0 696 1.41 matt #define MTC0 mtc0 697 1.41 matt #endif 698 1.41 matt #if _MIPS_ISA == _MIPS_ISA_MIPS3 || _MIPS_ISA == _MIPS_ISA_MIPS4 || \ 699 1.41 matt _MIPS_ISA == _MIPS_ISA_MIPS64 700 1.41 matt #define MFC0 dmfc0 701 1.41 matt #define MTC0 dmtc0 702 1.41 matt #endif 703 1.41 matt 704 1.41 matt #if defined(__mips_o32) || defined(__mips_o64) 705 1.41 matt 706 1.54 joerg #ifdef __mips_abicalls 707 1.41 matt #define CPRESTORE(r) .cprestore r 708 1.41 matt #define CPLOAD(r) .cpload r 709 1.41 matt #else 710 1.41 matt #define CPRESTORE(r) /* not needed */ 711 1.41 matt #define CPLOAD(r) /* not needed */ 712 1.41 matt #endif 713 1.41 matt 714 1.41 matt #define SETUP_GP \ 715 1.41 matt .set push; \ 716 1.41 matt .set noreorder; \ 717 1.41 matt .cpload t9; \ 718 1.41 matt .set pop 719 1.41 matt #define SETUP_GPX(r) \ 720 1.41 matt .set push; \ 721 1.41 matt .set noreorder; \ 722 1.41 matt move r,ra; /* save old ra */ \ 723 1.41 matt bal 7f; \ 724 1.41 matt nop; \ 725 1.41 matt 7: .cpload ra; \ 726 1.41 matt move ra,r; \ 727 1.41 matt .set pop 728 1.41 matt #define SETUP_GPX_L(r,lbl) \ 729 1.41 matt .set push; \ 730 1.41 matt .set noreorder; \ 731 1.41 matt move r,ra; /* save old ra */ \ 732 1.41 matt bal lbl; \ 733 1.41 matt nop; \ 734 1.41 matt lbl: .cpload ra; \ 735 1.41 matt move ra,r; \ 736 1.41 matt .set pop 737 1.41 matt #define SAVE_GP(x) .cprestore x 738 1.41 matt 739 1.41 matt #define SETUP_GP64(a,b) /* n32/n64 specific */ 740 1.41 matt #define SETUP_GP64_R(a,b) /* n32/n64 specific */ 741 1.41 matt #define SETUP_GPX64(a,b) /* n32/n64 specific */ 742 1.41 matt #define SETUP_GPX64_L(a,b,c) /* n32/n64 specific */ 743 1.41 matt #define RESTORE_GP64 /* n32/n64 specific */ 744 1.41 matt #define USE_ALT_CP(a) /* n32/n64 specific */ 745 1.41 matt #endif /* __mips_o32 || __mips_o64 */ 746 1.41 matt 747 1.41 matt #if defined(__mips_o32) || defined(__mips_o64) 748 1.22 simonb #define REG_PROLOGUE .set push 749 1.16 castor #define REG_EPILOGUE .set pop 750 1.41 matt #endif 751 1.41 matt #if defined(__mips_n32) || defined(__mips_n64) 752 1.15 castor #define REG_PROLOGUE .set push ; .set mips3 753 1.15 castor #define REG_EPILOGUE .set pop 754 1.41 matt #endif 755 1.41 matt 756 1.41 matt #if defined(__mips_n32) || defined(__mips_n64) 757 1.41 matt #define SETUP_GP /* o32 specific */ 758 1.41 matt #define SETUP_GPX(r) /* o32 specific */ 759 1.41 matt #define SETUP_GPX_L(r,lbl) /* o32 specific */ 760 1.41 matt #define SAVE_GP(x) /* o32 specific */ 761 1.63 skrll #define SETUP_GP64(a,b) .cpsetup t9, a, b 762 1.41 matt #define SETUP_GPX64(a,b) \ 763 1.41 matt .set push; \ 764 1.41 matt move b,ra; \ 765 1.41 matt .set noreorder; \ 766 1.41 matt bal 7f; \ 767 1.41 matt nop; \ 768 1.41 matt 7: .set pop; \ 769 1.41 matt .cpsetup ra, a, 7b; \ 770 1.41 matt move ra,b 771 1.41 matt #define SETUP_GPX64_L(a,b,c) \ 772 1.41 matt .set push; \ 773 1.41 matt move b,ra; \ 774 1.41 matt .set noreorder; \ 775 1.41 matt bal c; \ 776 1.41 matt nop; \ 777 1.41 matt c: .set pop; \ 778 1.41 matt .cpsetup ra, a, c; \ 779 1.41 matt move ra,b 780 1.41 matt #define RESTORE_GP64 .cpreturn 781 1.41 matt #define USE_ALT_CP(a) .cplocal a 782 1.41 matt #endif /* __mips_n32 || __mips_n64 */ 783 1.25 jeffs 784 1.25 jeffs /* 785 1.25 jeffs * The DYNAMIC_STATUS_MASK option adds an additional masking operation 786 1.25 jeffs * when updating the hardware interrupt mask in the status register. 787 1.25 jeffs * 788 1.25 jeffs * This is useful for platforms that need to at run-time mask 789 1.25 jeffs * interrupts based on motherboard configuration or to handle 790 1.25 jeffs * slowly clearing interrupts. 791 1.25 jeffs * 792 1.25 jeffs * XXX this is only currently implemented for mips3. 793 1.25 jeffs */ 794 1.25 jeffs #ifdef MIPS_DYNAMIC_STATUS_MASK 795 1.41 matt #define DYNAMIC_STATUS_MASK(sr,scratch) \ 796 1.25 jeffs lw scratch, mips_dynamic_status_mask; \ 797 1.25 jeffs and sr, sr, scratch 798 1.29 jeffs 799 1.41 matt #define DYNAMIC_STATUS_MASK_TOUSER(sr,scratch1) \ 800 1.29 jeffs ori sr, (MIPS_INT_MASK | MIPS_SR_INT_IE); \ 801 1.29 jeffs DYNAMIC_STATUS_MASK(sr,scratch1) 802 1.25 jeffs #else 803 1.41 matt #define DYNAMIC_STATUS_MASK(sr,scratch) 804 1.41 matt #define DYNAMIC_STATUS_MASK_TOUSER(sr,scratch1) 805 1.25 jeffs #endif 806 1.1 deraadt 807 1.38 ad /* See lock_stubs.S. */ 808 1.44 matt #define LOG2_MIPS_LOCK_RAS_SIZE 8 809 1.44 matt #define MIPS_LOCK_RAS_SIZE 256 /* 16 bytes left over */ 810 1.38 ad 811 1.41 matt #define CPUVAR(off) _C_LABEL(cpu_info_store)+__CONCAT(CPU_INFO_,off) 812 1.39 yamt 813 1.8 jonathan #endif /* _MIPS_ASM_H */ 814