Home | History | Annotate | Line # | Download | only in mips
      1 /*	$NetBSD: mips_fixup.c,v 1.24 2025/05/03 02:00:46 riastradh Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2010 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Matt Thomas of 3am Software Foundry.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #include <sys/cdefs.h>
     33 __KERNEL_RCSID(0, "$NetBSD: mips_fixup.c,v 1.24 2025/05/03 02:00:46 riastradh Exp $");
     34 
     35 #include "opt_cputype.h"
     36 #include "opt_mips3_wired.h"
     37 #include "opt_multiprocessor.h"
     38 
     39 #include <sys/param.h>
     40 
     41 #include <uvm/uvm_extern.h>
     42 
     43 #include <mips/locore.h>
     44 #include <mips/cache.h>
     45 #include <mips/mips3_pte.h>
     46 #include <mips/regnum.h>
     47 #include <mips/mips_opcode.h>
     48 
     49 bool
     50 mips_fixup_exceptions(mips_fixup_callback_t callback, void *arg)
     51 {
     52 #if (MIPS32 + MIPS32R2 + MIPS64 + MIPS64R2) > 0
     53 	int32_t ebase = mipsNN_cp0_ebase_read();
     54 	uint32_t *start;
     55 	if (ebase == mips_options.mips_cpu_id
     56 	    || (ebase & __BITS(31,30)) != __BIT(31)) {
     57 		start = (uint32_t *)MIPS_KSEG0_START;
     58 	} else {
     59 		start = (uint32_t *)(intptr_t)(ebase & ~MIPS_EBASE_CPUNUM);
     60 	}
     61 #else
     62 	uint32_t * const start = (uint32_t *)MIPS_KSEG0_START;
     63 #endif
     64 	uint32_t * const end = start + (5 * 128) / sizeof(uint32_t);
     65 	const int32_t addr = (intptr_t)&cpu_info_store;
     66 	const size_t size = sizeof(cpu_info_store);
     67 	uint32_t new_insns[2];
     68 	uint32_t *lui_insnp = NULL;
     69 	int32_t lui_offset = 0;
     70 	bool fixed = false;
     71 	size_t lui_reg = 0;
     72 #ifdef DEBUG_VERBOSE
     73 	printf("%s: fixing %p..%p\n", __func__, start, end);
     74 #endif
     75 	/*
     76 	 * If this was allocated so that bit 15 of the value/address is 1, then
     77 	 * %hi will add 1 to the immediate (or 0x10000 to the value loaded)
     78 	 * to compensate for using a negative offset for the lower half of
     79 	 * the value.
     80 	 */
     81 	const int32_t upper_start = (addr + 32768) & ~0xffff;
     82 	const int32_t upper_end = (addr + size - 1 + 32768) & ~0xffff;
     83 
     84 #ifndef MIPS64_OCTEON
     85 	KASSERT((addr & ~0xfff) == ((addr + size - 1) & ~0xfff));
     86 #endif
     87 
     88 	uint32_t lui_insn = 0;
     89 	for (uint32_t *insnp = start; insnp < end; insnp++) {
     90 		const uint32_t insn = *insnp;
     91 		if (INSN_LUI_P(insn)) {
     92 			const int32_t offset = insn << 16;
     93 			lui_reg = (insn >> 16) & 31;
     94 #ifdef DEBUG_VERBOSE
     95 			printf("%s: %#x: insn %08x: lui r%zu, %%hi(%#x)",
     96 			    __func__, (int32_t)(intptr_t)insnp,
     97 			    insn, lui_reg, offset);
     98 #endif
     99 			KASSERT(lui_reg == _R_K0 || lui_reg == _R_K1);
    100 			if (upper_start == offset || upper_end == offset) {
    101 				lui_insnp = insnp;
    102 				lui_insn = insn;
    103 				lui_offset = offset;
    104 #ifdef DEBUG_VERBOSE
    105 				printf(" (maybe)");
    106 #endif
    107 			} else {
    108 				lui_insnp = NULL;
    109 				lui_insn = 0;
    110 				lui_offset = 0;
    111 			}
    112 #ifdef DEBUG_VERBOSE
    113 			printf("\n");
    114 #endif
    115 		} else if (lui_insn != 0
    116 			   && (INSN_LOAD_P(insn) || INSN_STORE_P(insn))) {
    117 			size_t base = (insn >> 21) & 31;
    118 #if defined(DIAGNOSTIC) || defined(DEBUG_VERBOSE)
    119 			size_t rt = (insn >> 16) & 31;
    120 #endif
    121 			int32_t load_addr = lui_offset + (int16_t)insn;
    122 			if (addr <= load_addr
    123 			    && load_addr < addr + size
    124 			    && base == lui_reg) {
    125 #if defined(DIAGNOSTIC) || defined(DEBUG_VERBOSE)
    126 				KASSERT(rt == _R_K0 || rt == _R_K1);
    127 #ifdef DEBUG_VERBOSE
    128 				printf("%s: %#x: insn %08x: %s r%zu, %%lo(%08x)(r%zu)\n",
    129 				    __func__, (int32_t)(intptr_t)insnp,
    130 				    insn,
    131 				    INSN_LOAD_P(insn)
    132 					? INSN_LW_P(insn) ? "lw" : "ld"
    133 					: INSN_SW_P(insn) ? "sw" : "sd",
    134 				    rt, load_addr, base);
    135 #endif
    136 #endif
    137 				new_insns[0] = lui_insn;
    138 				new_insns[1] = *insnp;
    139 				if ((callback)(load_addr, new_insns, arg)) {
    140 					if (lui_insnp) {
    141 						*lui_insnp = new_insns[0];
    142 						*insnp = new_insns[1];
    143 					} else if (new_insns[1] == 0) {
    144 						*insnp = new_insns[0];
    145 					} else {
    146 						*insnp = new_insns[1];
    147 					}
    148 					fixed = true;
    149 				}
    150 				lui_insnp = NULL;
    151 			}
    152 		} else if (INSN_LOAD_P(insn)) {
    153 			/*
    154 			 * If we are loading the register used in the LUI,
    155 			 * then that LUI is meaningless now.
    156 			 */
    157 			size_t rt = (insn >> 16) & 31;
    158 			if (lui_reg == rt)
    159 				lui_insn = 0;
    160 		}
    161 	}
    162 
    163 	if (fixed)
    164 		mips_icache_sync_range((intptr_t)start,
    165 		   sizeof(start[0]) * (end - start));
    166 
    167 	return fixed;
    168 }
    169 
    170 #ifdef MIPS3_PLUS
    171 bool
    172 mips_fixup_zero_relative(int32_t load_addr, uint32_t new_insns[2], void *arg)
    173 {
    174 	struct cpu_info * const ci = curcpu();
    175 	struct pmap_tlb_info * const ti = ci->ci_tlb_info;
    176 
    177 	KASSERT(MIPS_KSEG0_P(load_addr));
    178 	KASSERT(!MIPS_CACHE_VIRTUAL_ALIAS);
    179 #ifdef MULTIPROCESSOR
    180 	KASSERT(CPU_IS_PRIMARY(ci));
    181 #endif
    182 	KASSERT((intptr_t)ci <= load_addr);
    183 	KASSERT(load_addr < (intptr_t)(ci + 1));
    184 	KASSERT(MIPS_HAS_R4K_MMU);
    185 
    186 	/*
    187 	 * Use the load instruction as a prototype and it make use $0
    188 	 * as base and the new negative offset.  The second instruction
    189 	 * is a NOP.
    190 	 */
    191 	new_insns[0] =
    192 	    (new_insns[1] & (0xfc1f0000|PAGE_MASK)) | (0xffff & ~PAGE_MASK);
    193 	new_insns[1] = 0;
    194 #ifdef DEBUG_VERBOSE
    195 	printf("%s: %08x: insn#1 %08x: %s r%u, %d(r%u)\n",
    196 	    __func__, (int32_t)load_addr, new_insns[0],
    197 	    INSN_LOAD_P(new_insns[0])
    198 		? INSN_LW_P(new_insns[0]) ? "lw" : "ld"
    199 		: INSN_LW_P(new_insns[0]) ? "sw" : "sd",
    200 	    (new_insns[0] >> 16) & 31,
    201 	    (int16_t)new_insns[0],
    202 	    (new_insns[0] >> 21) & 31);
    203 #endif
    204 	/*
    205 	 * Construct the TLB_LO entry needed to map cpu_info_store.
    206 	 */
    207 
    208 	/*
    209 	 * Now allocate a TLB entry in the primary TLB for the mapping and
    210 	 * enter the mapping into the TLB.
    211 	 */
    212 	TLBINFO_LOCK(ti);
    213 	if (ci->ci_tlb_slot < 0) {
    214 		uint32_t tlb_lo = MIPS3_PG_G|MIPS3_PG_V|MIPS3_PG_D
    215 		    | mips3_paddr_to_tlbpfn(MIPS_KSEG0_TO_PHYS(trunc_page(load_addr)));
    216 		struct tlbmask tlbmask = {
    217 			.tlb_hi = -PAGE_SIZE | KERNEL_PID,
    218 #if PGSHIFT & 1
    219 			.tlb_lo1 = tlb_lo,
    220 			.tlb_lo1 = tlb_lo + MIPS3_PG_NEXT,
    221 #else
    222 			.tlb_lo0 = 0,
    223 			.tlb_lo1 = tlb_lo,
    224 #endif
    225 			.tlb_mask = -1,
    226 		};
    227 		ci->ci_tlb_slot = ti->ti_wired++;
    228 		mips3_cp0_wired_write(ti->ti_wired);
    229 		tlb_invalidate_addr(-PAGE_SIZE, KERNEL_PID);
    230 		tlb_write_entry(ci->ci_tlb_slot, &tlbmask);
    231 	}
    232 	TLBINFO_UNLOCK(ti);
    233 
    234 	return true;
    235 }
    236 #endif /* MIPS3_PLUS */
    237 
    238 #define OPCODE_J		002
    239 #define OPCODE_JAL		003
    240 
    241 static inline void
    242 fixup_mips_jump(uint32_t *insnp, const struct mips_jump_fixup_info *jfi)
    243 {
    244 	uint32_t insn = *insnp;
    245 
    246 	KASSERT((insn >> (26+1)) == (OPCODE_J >> 1));
    247 	KASSERT((insn << 6) == (jfi->jfi_stub << 6));
    248 
    249 	insn ^= (jfi->jfi_stub ^ jfi->jfi_real);
    250 
    251 	KASSERT((insn << 6) == (jfi->jfi_real << 6));
    252 
    253 #ifdef DEBUG
    254 #if 0
    255 	int32_t va = ((intptr_t) insnp >> 26) << 26;
    256 	printf("%s: %08x: [%08x] %s %08x -> [%08x] %s %08x\n",
    257 	    __func__, (int32_t)(intptr_t)insnp,
    258 	    insn, opcode == OPCODE_J ? "j" : "jal",
    259 	    va | (jfi->jfo_stub << 2),
    260 	    *insnp, opcode == OPCODE_J ? "j" : "jal",
    261 	    va | (jfi->jfi_real << 2));
    262 #endif
    263 #endif
    264 	*insnp = insn;
    265 }
    266 
    267 intptr_t
    268 mips_fixup_addr(const uint32_t *stubp)
    269 {
    270 	/*
    271 	 * Stubs typically look like:
    272 	 *	lui	v0, %hi(sym)
    273 	 *	lX	t9, %lo(sym)(v0)
    274 	 *	[nop]
    275 	 *	jr	t9
    276 	 *	nop
    277 	 *
    278 	 * Or for loongson2 (
    279 	 *	lui	v0, %hi(sym)
    280 	 *	lX	t9, %lo(sym)(v0)
    281 	 *	lui	at,0xcfff
    282 	 *	ori	at,at,0xffff
    283 	 *	and	t9,t9,at
    284 	 *	jr	t9
    285 	 *	move	at,at
    286 	 *   or:
    287 	 *	lui	v0, %hi(sym)
    288 	 *	lX	t9, %lo(sym)(v0)
    289 	 *	li	at, 0x3
    290 	 *	dmtc0	at, $22
    291 	 *	jr	t9
    292 	 *	nop
    293 	 *
    294 	 * A profiled n32/n64 stub will start with:
    295 	 *	move	ta, ra
    296 	 *	jal	_mcount
    297 	 *	 nop
    298 	 */
    299 	mips_reg_t regs[32];
    300 	uint32_t used = 1 |__BIT(_R_A0)|__BIT(_R_A1)|__BIT(_R_A2)|__BIT(_R_A3);
    301 	size_t n;
    302 	const char *errstr = "mips";
    303 
    304 #ifdef GPROF
    305 	static uint32_t mcount_addr = 0;
    306 	extern void _mcount(u_long, u_long);	/* XXX decl */
    307 
    308 	if (mcount_addr == 0)
    309 		mcount_addr = (uint32_t)(uintptr_t)_mcount & 0x0fffffff;
    310 #endif /* GPROF */
    311 
    312 	/*
    313 	 * This is basically a small MIPS emulator for those instructions
    314 	 * that might be in a stub routine.
    315 	 */
    316 	for (n = 0; n < 16; n++) {
    317 		const InstFmt insn = { .word = stubp[n] };
    318 		switch (insn.IType.op) {
    319 		case OP_LUI:
    320 			regs[insn.IType.rt] = (int16_t)insn.IType.imm << 16;
    321 			used |= (1 << insn.IType.rt);
    322 			break;
    323 #ifdef _LP64
    324 		case OP_LD:
    325 			if ((used & (1 << insn.IType.rs)) == 0) {
    326 				errstr = "LD";
    327 				goto out;
    328 			}
    329 			regs[insn.IType.rt] = *(const int64_t *)
    330 			    (regs[insn.IType.rs] + (int16_t)insn.IType.imm);
    331 			used |= (1 << insn.IType.rt);
    332 			break;
    333 		case OP_SD:
    334 			if (insn.IType.rt != _R_RA || insn.IType.rs != _R_SP) {
    335 				errstr = "SD";
    336 				goto out;
    337 			}
    338 			break;
    339 #else
    340 		case OP_LW:
    341 			if ((used & (1 << insn.IType.rs)) == 0) {
    342 				errstr = "LW";
    343 				goto out;
    344 			}
    345 			regs[insn.IType.rt] = *(const int32_t *)
    346 			    ((intptr_t)regs[insn.IType.rs]
    347 			    + (int16_t)insn.IType.imm);
    348 			used |= (1 << insn.IType.rt);
    349 			break;
    350 		case OP_SW:
    351 			if (insn.IType.rt != _R_RA || insn.IType.rs != _R_SP) {
    352 				errstr = "SW";
    353 				goto out;
    354 			}
    355 			break;
    356 #endif
    357 		case OP_ORI:
    358 			if ((used & (1 << insn.IType.rs)) == 0) {
    359 				errstr = "ORI";
    360 				goto out;
    361 			}
    362 			regs[insn.IType.rt] |= insn.IType.imm;
    363 			used |= (1 << insn.IType.rt);
    364 			break;
    365 		case OP_COP0:
    366 			switch (insn.RType.rs) {
    367 			case OP_DMT:
    368 				if (insn.RType.rd != 22) {
    369 					errstr = "dmtc0 dst";
    370 					goto out;
    371 				}
    372 				if ((used & (1 << insn.RType.rt)) == 0) {
    373 					errstr = "dmtc0 src";
    374 					goto out;
    375 				}
    376 				break;
    377 			default:
    378 				errstr = "COP0";
    379 				goto out;
    380 			}
    381 			break;
    382 #ifdef GPROF
    383 		case OP_JAL:
    384 			if (insn.JType.target << 2 != mcount_addr) {
    385 				errstr = "JAL-non-_mcount";
    386 				goto out;
    387 			}
    388 			break;
    389 #endif /* GPROF */
    390 		case OP_SPECIAL:
    391 			switch (insn.RType.func) {
    392 			case OP_JALR:
    393 			case OP_JR:
    394 				if ((used & (1 << insn.RType.rs)) == 0) {
    395 					errstr = "JR";
    396 					goto out;
    397 				}
    398 				if (stubp[n+1] != 0
    399 				    && (stubp[n+1] & 0xfff0003c) != 0x0000003c
    400 				    && stubp[n+1] != 0x00200825) {
    401 					n++;
    402 					errstr = "delay slot";
    403 					goto out;
    404 				}
    405 				return regs[insn.RType.rs];
    406 			case OP_AND:
    407 				if ((used & (1 << insn.RType.rs)) == 0
    408 				    || (used & (1 << insn.RType.rt)) == 0) {
    409 					errstr = "AND";
    410 					goto out;
    411 				}
    412 				regs[insn.RType.rd] =
    413 				    regs[insn.RType.rs] & regs[insn.RType.rt];
    414 				used |= (1 << insn.RType.rd);
    415 				break;
    416 #if !defined(__mips_o32)
    417 			case OP_DSLL32:	/* force to 32-bits */
    418 			case OP_DSRA32:	/* force to 32-bits */
    419 				if (regs[insn.RType.rd] != regs[insn.RType.rt]
    420 				    || (used & (1 << insn.RType.rt)) == 0
    421 				    || regs[insn.RType.shamt] != 0) {
    422 					errstr = "AND";
    423 					goto out;
    424 				}
    425 				break;
    426 #endif
    427 			case OP_SLL:	/* nop */
    428 				if (insn.RType.rd != _R_ZERO) {
    429 					errstr = "NOP";
    430 					goto out;
    431 				}
    432 				break;
    433 #ifdef GPROF
    434 			case OP_OR:
    435 				if (insn.RType.rt != 0) {
    436 					errstr = "NON-MOVE OR";
    437 					goto out;
    438 				}
    439 				if (insn.RType.rd != 1 ||
    440 				    insn.RType.rs != 31) {
    441 					errstr = "NON at,ra MOVE";
    442 					goto out;
    443 				}
    444 				break;
    445 #endif /* GPROF */
    446 			case OP_DSLL:
    447 			default:
    448 				errstr = "SPECIAL";
    449 				goto out;
    450 			}
    451 			break;
    452 		default:
    453 			errstr = "mips";
    454 			goto out;
    455 		}
    456 	}
    457 
    458   out:
    459 	printf("%s: unexpected %s insn %#x at %p\n",
    460 	    __func__, errstr,
    461 	    stubp[n], &stubp[n]);
    462 	return 0;
    463 }
    464 
    465 void
    466 mips_fixup_stubs(uint32_t *start, uint32_t *end)
    467 {
    468 #ifdef DEBUG
    469 	size_t fixups_done = 0;
    470 	uint32_t cycles =
    471 #if (MIPS3 + MIPS4 + MIPS32 + MIPS32R2 + MIPS64 + MIPS64R2) > 0
    472 	    (CPUISMIPS3 ? mips3_cp0_count_read() : 0);
    473 #else
    474 	    0;
    475 #endif
    476 #endif
    477 	extern uint32_t __stub_start[], __stub_end[];
    478 
    479 	KASSERT(MIPS_KSEG0_P(start));
    480 	KASSERT(MIPS_KSEG0_P(end));
    481 	KASSERT(MIPS_KSEG0_START == (((intptr_t)start >> 28) << 28));
    482 
    483 	if (end > __stub_start)
    484 		end = __stub_start;
    485 
    486 	for (uint32_t *insnp = start; insnp < end; insnp++) {
    487 		uint32_t insn = *insnp;
    488 		uint32_t offset = insn & 0x03ffffff;
    489 		uint32_t opcode = insn >> 26;
    490 		const uint32_t * const stubp =
    491 		    &((uint32_t *)(((intptr_t)insnp >> 28) << 28))[offset];
    492 
    493 		/*
    494 		 * First we check to see if this is a jump and whether it is
    495 		 * within the range we are interested in.
    496 		 */
    497 		if ((opcode != OPCODE_J && opcode != OPCODE_JAL)
    498 		    || stubp < __stub_start || __stub_end <= stubp)
    499 			continue;
    500 
    501 		const intptr_t real_addr = mips_fixup_addr(stubp);
    502 
    503 		/*
    504 		 * If the real_addr has been set yet, don't fix up.
    505 		 */
    506 		if (real_addr == 0) {
    507 			continue;
    508 		}
    509 		/*
    510 		 * Verify the real destination is in the same 256MB
    511 		 * as the location of the jump instruction.
    512 		 */
    513 		KASSERT((real_addr >> 28) == ((intptr_t)insnp >> 28));
    514 
    515 		/*
    516 		 * Now fix it up.  Replace the old displacement to the stub
    517 		 * with the real displacement.
    518 		 */
    519 		struct mips_jump_fixup_info fixup = {
    520 		    .jfi_stub = fixup_addr2offset(stubp),
    521 		    .jfi_real = fixup_addr2offset(real_addr),
    522 		};
    523 
    524 		fixup_mips_jump(insnp, &fixup);
    525 #ifdef DEBUG
    526 		fixups_done++;
    527 #endif
    528 	}
    529 
    530 	if (sizeof(uint32_t [end - start]) > mips_cache_info.mci_picache_size)
    531 		mips_icache_sync_all();
    532 	else
    533 		mips_icache_sync_range((intptr_t)start,
    534 		    sizeof(uint32_t [end - start]));
    535 
    536 #ifdef DEBUG
    537 #if (MIPS3 + MIPS4 + MIPS32 + MIPS32R2 + MIPS64 + MIPS64R2) > 0
    538 	if (CPUISMIPS3)
    539 		cycles = mips3_cp0_count_read() - cycles;
    540 #endif
    541 	printf("%s: %zu fixup%s done in %u cycles\n", __func__,
    542 	    fixups_done, fixups_done == 1 ? "" : "s",
    543 	    cycles);
    544 #endif
    545 }
    546 
    547 #define	__stub		__section(".stub")
    548 
    549 void	mips_cpu_switch_resume(struct lwp *)		__stub;
    550 tlb_asid_t
    551 	tlb_get_asid(void)				__stub;
    552 void	tlb_set_asid(uint32_t, struct pmap *)		__stub;
    553 void	tlb_invalidate_all(void)			__stub;
    554 void	tlb_invalidate_globals(void)			__stub;
    555 void	tlb_invalidate_asids(uint32_t, uint32_t)	__stub;
    556 void	tlb_invalidate_addr(vaddr_t, tlb_asid_t)	__stub;
    557 u_int	tlb_record_asids(u_long *, uint32_t)		__stub;
    558 bool	tlb_update_addr(vaddr_t, tlb_asid_t, pt_entry_t, bool)
    559 							__stub;
    560 void	tlb_read_entry(size_t, struct tlbmask *)	__stub;
    561 void	tlb_write_entry(size_t, const struct tlbmask *) __stub;
    562 
    563 /*
    564  * wbflush isn't a stub since it gets overridden quite late
    565  * (after mips_vector_init returns).
    566  */
    567 void	wbflush(void)					/*__stub*/;
    568 
    569 void
    570 mips_cpu_switch_resume(struct lwp *l)
    571 {
    572 	(*mips_locore_jumpvec.ljv_cpu_switch_resume)(l);
    573 }
    574 
    575 tlb_asid_t
    576 tlb_get_asid(void)
    577 {
    578 	return (*mips_locore_jumpvec.ljv_tlb_get_asid)();
    579 }
    580 
    581 void
    582 tlb_set_asid(uint32_t asid, struct pmap *pm)
    583 {
    584 	(*mips_locore_jumpvec.ljv_tlb_set_asid)(asid);
    585 }
    586 
    587 void
    588 tlb_invalidate_all(void)
    589 {
    590 	(*mips_locore_jumpvec.ljv_tlb_invalidate_all)();
    591 }
    592 
    593 void
    594 tlb_invalidate_addr(vaddr_t va, tlb_asid_t asid)
    595 {
    596 	(*mips_locore_jumpvec.ljv_tlb_invalidate_addr)(va, asid);
    597 }
    598 
    599 void
    600 tlb_invalidate_globals(void)
    601 {
    602 	(*mips_locore_jumpvec.ljv_tlb_invalidate_globals)();
    603 }
    604 
    605 void
    606 tlb_invalidate_asids(uint32_t asid_lo, uint32_t asid_hi)
    607 {
    608 	(*mips_locore_jumpvec.ljv_tlb_invalidate_asids)(asid_lo, asid_hi);
    609 }
    610 
    611 u_int
    612 tlb_record_asids(u_long *bitmap, tlb_asid_t asid_max)
    613 {
    614 	return (*mips_locore_jumpvec.ljv_tlb_record_asids)(bitmap, asid_max);
    615 }
    616 
    617 #if 0
    618 bool
    619 tlb_update_addr(vaddr_t va, tlb_asid_t asid, pt_entry_t pte, bool insert)
    620 {
    621 	return (*mips_locore_jumpvec.ljv_tlb_update_addr)(va, asid, pte, insert);
    622 }
    623 #endif
    624 
    625 void
    626 tlb_read_entry(size_t tlbno, struct tlbmask *tlb)
    627 {
    628 	(*mips_locore_jumpvec.ljv_tlb_read_entry)(tlbno, tlb);
    629 }
    630 
    631 void
    632 tlb_write_entry(size_t tlbno, const struct tlbmask *tlb)
    633 {
    634 	(*mips_locore_jumpvec.ljv_tlb_write_entry)(tlbno, tlb);
    635 }
    636 
    637 void
    638 wbflush(void)
    639 {
    640 	(*mips_locoresw.lsw_wbflush)();
    641 }
    642 
    643 #ifndef LOCKDEBUG
    644 void mutex_enter(kmutex_t *mtx)				__stub;
    645 void mutex_exit(kmutex_t *mtx)				__stub;
    646 void mutex_spin_enter(kmutex_t *mtx)			__stub;
    647 void mutex_spin_exit(kmutex_t *mtx)			__stub;
    648 
    649 void
    650 mutex_enter(kmutex_t *mtx)
    651 {
    652 
    653 	(*mips_locore_atomicvec.lav_mutex_enter)(mtx);
    654 }
    655 
    656 void
    657 mutex_exit(kmutex_t *mtx)
    658 {
    659 
    660 	(*mips_locore_atomicvec.lav_mutex_exit)(mtx);
    661 }
    662 
    663 void
    664 mutex_spin_enter(kmutex_t *mtx)
    665 {
    666 
    667 	(*mips_locore_atomicvec.lav_mutex_spin_enter)(mtx);
    668 }
    669 
    670 void
    671 mutex_spin_exit(kmutex_t *mtx)
    672 {
    673 
    674 	(*mips_locore_atomicvec.lav_mutex_spin_exit)(mtx);
    675 }
    676 #endif	/* !LOCKDEBUG */
    677 
    678 u_int _atomic_cas_uint(volatile u_int *, u_int, u_int)		__stub;
    679 u_long _atomic_cas_ulong(volatile u_long *, u_long, u_long)	__stub;
    680 
    681 u_int
    682 _atomic_cas_uint(volatile u_int *ptr, u_int old, u_int new)
    683 {
    684 
    685 	return (*mips_locore_atomicvec.lav_atomic_cas_uint)(ptr, old, new);
    686 }
    687 
    688 u_long
    689 _atomic_cas_ulong(volatile u_long *ptr, u_long old, u_long new)
    690 {
    691 
    692 	return (*mips_locore_atomicvec.lav_atomic_cas_ulong)(ptr, old, new);
    693 }
    694 
    695 __strong_alias(atomic_cas_uint, _atomic_cas_uint)
    696 __strong_alias(atomic_cas_uint_ni, _atomic_cas_uint)
    697 __strong_alias(_atomic_cas_32, _atomic_cas_uint)
    698 __strong_alias(_atomic_cas_32_ni, _atomic_cas_uint)
    699 __strong_alias(atomic_cas_32, _atomic_cas_uint)
    700 __strong_alias(atomic_cas_32_ni, _atomic_cas_uint)
    701 __strong_alias(atomic_cas_ptr, _atomic_cas_ulong)
    702 __strong_alias(atomic_cas_ptr_ni, _atomic_cas_ulong)
    703 __strong_alias(atomic_cas_ulong, _atomic_cas_ulong)
    704 __strong_alias(atomic_cas_ulong_ni, _atomic_cas_ulong)
    705 #ifdef _LP64
    706 __strong_alias(atomic_cas_64, _atomic_cas_ulong)
    707 __strong_alias(atomic_cas_64_ni, _atomic_cas_ulong)
    708 __strong_alias(_atomic_cas_64, _atomic_cas_ulong)
    709 __strong_alias(_atomic_cas_64_ni, _atomic_cas_ulong)
    710 #endif
    711 
    712 int	__ucas_32(volatile uint32_t *, uint32_t, uint32_t, uint32_t *) __stub;
    713 int
    714 __ucas_32(volatile uint32_t *ptr, uint32_t old, uint32_t new, uint32_t *retp)
    715 {
    716 
    717 	return (*mips_locore_atomicvec.lav_ucas_32)(ptr, old, new, retp);
    718 }
    719 __strong_alias(_ucas_32,__ucas_32);
    720 
    721 #ifdef _LP64
    722 int	__ucas_64(volatile uint64_t *, uint64_t, uint64_t, uint64_t *) __stub;
    723 int
    724 __ucas_64(volatile uint64_t *ptr, uint64_t old, uint64_t new, uint64_t *retp)
    725 {
    726 
    727 	return (*mips_locore_atomicvec.lav_ucas_64)(ptr, old, new, retp);
    728 }
    729 __strong_alias(_ucas_64,__ucas_64);
    730 #endif /* _LP64 */
    731