Home | History | Annotate | Line # | Download | only in mips
      1 /*	$NetBSD: cpu_subr.c,v 1.66 2025/09/06 12:42:16 riastradh Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2010, 2019, 2023 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Matt Thomas of 3am Software Foundry.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #include <sys/cdefs.h>
     33 __KERNEL_RCSID(0, "$NetBSD: cpu_subr.c,v 1.66 2025/09/06 12:42:16 riastradh Exp $");
     34 
     35 #include "opt_cputype.h"
     36 #include "opt_ddb.h"
     37 #include "opt_modular.h"
     38 #include "opt_multiprocessor.h"
     39 
     40 #include <sys/param.h>
     41 #include <sys/atomic.h>
     42 #include <sys/bitops.h>
     43 #include <sys/cpu.h>
     44 #include <sys/device.h>
     45 #include <sys/device_impl.h>	/* XXX autoconf abuse */
     46 #include <sys/idle.h>
     47 #include <sys/intr.h>
     48 #include <sys/ipi.h>
     49 #include <sys/kernel.h>
     50 #include <sys/lwp.h>
     51 #include <sys/module.h>
     52 #include <sys/paravirt_membar.h>
     53 #include <sys/proc.h>
     54 #include <sys/ras.h>
     55 #include <sys/reboot.h>
     56 #include <sys/xcall.h>
     57 
     58 #include <uvm/uvm.h>
     59 
     60 #include <mips/locore.h>
     61 #include <mips/regnum.h>
     62 #include <mips/pcb.h>
     63 #include <mips/cache.h>
     64 #include <mips/frame.h>
     65 #include <mips/userret.h>
     66 #include <mips/pte.h>
     67 
     68 #if defined(DDB) || defined(KGDB)
     69 #ifdef DDB
     70 #include <mips/db_machdep.h>
     71 #include <ddb/db_command.h>
     72 #include <ddb/db_output.h>
     73 #endif
     74 #endif
     75 
     76 #ifdef MIPS64_OCTEON
     77 #include <mips/cavium/octeonvar.h>
     78 extern struct cpu_softc octeon_cpu_softc[];
     79 #endif
     80 
     81 struct cpu_info cpu_info_store
     82 #if defined(MULTIPROCESSOR) && !defined(MIPS64_OCTEON)
     83 	__section(".data1")
     84 	__aligned(1LU << ilog2((2*sizeof(struct cpu_info)-1)))
     85 #endif
     86     = {
     87 	.ci_curlwp = &lwp0,
     88 	.ci_tlb_info = &pmap_tlb0_info,
     89 	.ci_pmap_kern_segtab = &pmap_kern_segtab,
     90 	.ci_pmap_user_segtab = NULL,
     91 #ifdef _LP64
     92 	.ci_pmap_user_seg0tab = NULL,
     93 #endif
     94 	.ci_cpl = IPL_HIGH,
     95 	.ci_tlb_slot = -1,
     96 #ifdef MULTIPROCESSOR
     97 	.ci_flags = CPUF_PRIMARY|CPUF_PRESENT|CPUF_RUNNING,
     98 #endif
     99 #ifdef MIPS64_OCTEON
    100 	.ci_softc = &octeon_cpu_softc[0],
    101 #endif
    102 };
    103 
    104 const pcu_ops_t * const pcu_ops_md_defs[PCU_UNIT_COUNT] = {
    105 	[PCU_FPU] = &mips_fpu_ops,
    106 #if (MIPS32R2 + MIPS64R2) > 0
    107 	[PCU_DSP] = &mips_dsp_ops,
    108 #endif
    109 };
    110 
    111 #ifdef MULTIPROCESSOR
    112 struct cpu_info * cpuid_infos[MAXCPUS] = {
    113 	[0] = &cpu_info_store,
    114 };
    115 
    116 kcpuset_t *cpus_halted;
    117 kcpuset_t *cpus_hatched;
    118 kcpuset_t *cpus_paused;
    119 kcpuset_t *cpus_resumed;
    120 kcpuset_t *cpus_running;
    121 
    122 static void cpu_ipi_wait(const char *, const kcpuset_t *, const kcpuset_t *);
    123 
    124 struct cpu_info *
    125 cpu_info_alloc(struct pmap_tlb_info *ti, cpuid_t cpu_id, cpuid_t cpu_package_id,
    126 	cpuid_t cpu_core_id, cpuid_t cpu_smt_id)
    127 {
    128 
    129 	KASSERT(cpu_id < MAXCPUS);
    130 
    131 #ifdef MIPS64_OCTEON
    132 	const int exc_step = 1 << MIPS_EBASE_EXC_BASE_SHIFT;
    133 	vaddr_t exc_page = MIPS_UTLB_MISS_EXC_VEC + exc_step * cpu_id;
    134 	__CTASSERT(sizeof(struct cpu_info) + sizeof(struct pmap_tlb_info)
    135 	    <= exc_step - 0x280);
    136 
    137 	struct cpu_info * const ci = ((struct cpu_info *)(exc_page + exc_step)) - 1;
    138 	memset((void *)exc_page, 0, exc_step);
    139 
    140 	if (ti == NULL) {
    141 		ti = ((struct pmap_tlb_info *)ci) - 1;
    142 		pmap_tlb_info_init(ti);
    143 	}
    144 #else
    145 	const vaddr_t cpu_info_offset = (vaddr_t)&cpu_info_store & PAGE_MASK;
    146 	struct pglist pglist;
    147 	int error;
    148 
    149 	/*
    150 	* Grab a page from the first 512MB (mappable by KSEG0) to use to store
    151 	* exception vectors and cpu_info for this cpu.
    152 	*/
    153 	error = uvm_pglistalloc(PAGE_SIZE,
    154 	    0, MIPS_KSEG1_START - MIPS_KSEG0_START,
    155 	    PAGE_SIZE, PAGE_SIZE, &pglist, 1, false);
    156 	if (error)
    157 		return NULL;
    158 
    159 	const paddr_t pa = VM_PAGE_TO_PHYS(TAILQ_FIRST(&pglist));
    160 	const vaddr_t va = MIPS_PHYS_TO_KSEG0(pa);
    161 	struct cpu_info * const ci = (void *) (va + cpu_info_offset);
    162 	memset((void *)va, 0, PAGE_SIZE);
    163 
    164 	/*
    165 	 * If we weren't passed a pmap_tlb_info to use, the caller wants us
    166 	 * to take care of that for him.  Since we have room left over in the
    167 	 * page we just allocated, just use a piece of that for it.
    168 	 */
    169 	if (ti == NULL) {
    170 		if (cpu_info_offset >= sizeof(*ti)) {
    171 			ti = (void *) va;
    172 		} else {
    173 			KASSERT(PAGE_SIZE - cpu_info_offset + sizeof(*ci) >= sizeof(*ti));
    174 			ti = (struct pmap_tlb_info *)(va + PAGE_SIZE) - 1;
    175 		}
    176 		pmap_tlb_info_init(ti);
    177 	}
    178 
    179 	/*
    180 	 * Attach its TLB info (which must be direct-mapped)
    181 	 */
    182 #ifdef _LP64
    183 	KASSERT(MIPS_KSEG0_P(ti) || MIPS_XKPHYS_P(ti));
    184 #else
    185 	KASSERT(MIPS_KSEG0_P(ti));
    186 #endif
    187 #endif /* MIPS64_OCTEON */
    188 
    189 	KASSERT(cpu_id != 0);
    190 	ci->ci_cpuid = cpu_id;
    191 	ci->ci_pmap_kern_segtab = &pmap_kern_segtab,
    192 	ci->ci_cpu_freq = cpu_info_store.ci_cpu_freq;
    193 	ci->ci_cctr_freq = cpu_info_store.ci_cctr_freq;
    194 	ci->ci_cycles_per_hz = cpu_info_store.ci_cycles_per_hz;
    195 	ci->ci_divisor_delay = cpu_info_store.ci_divisor_delay;
    196 	ci->ci_divisor_recip = cpu_info_store.ci_divisor_recip;
    197 	ci->ci_cpuwatch_count = cpu_info_store.ci_cpuwatch_count;
    198 
    199 	cpu_topology_set(ci, cpu_package_id, cpu_core_id, cpu_smt_id, 0);
    200 
    201 	pmap_md_alloc_ephemeral_address_space(ci);
    202 
    203 	mi_cpu_attach(ci);
    204 
    205 	pmap_tlb_info_attach(ti, ci);
    206 
    207 	return ci;
    208 }
    209 #endif /* MULTIPROCESSOR */
    210 
    211 static void
    212 cpu_hwrena_setup(void)
    213 {
    214 #if (MIPS32R2 + MIPS64R2) > 0
    215 	const int cp0flags = mips_options.mips_cpu->cpu_cp0flags;
    216 
    217 	if ((cp0flags & MIPS_CP0FL_USE) == 0)
    218 		return;
    219 
    220 	if (CPUISMIPSNNR2) {
    221 		mipsNN_cp0_hwrena_write(
    222 		    (MIPS_HAS_USERLOCAL ? MIPS_HWRENA_ULR : 0)
    223 		    | MIPS_HWRENA_CCRES
    224 		    | MIPS_HWRENA_CC
    225 		    | MIPS_HWRENA_SYNCI_STEP
    226 		    | MIPS_HWRENA_CPUNUM);
    227 		if (MIPS_HAS_USERLOCAL) {
    228 			mipsNN_cp0_userlocal_write(curlwp->l_private);
    229 		}
    230 	}
    231 #endif
    232 }
    233 
    234 void
    235 cpu_attach_common(device_t self, struct cpu_info *ci)
    236 {
    237 	const char * const xname = device_xname(self);
    238 
    239 	/*
    240 	 * Cross link cpu_info and its device together
    241 	 *
    242 	 * XXX autoconf abuse: Can't use device_set_private here
    243 	 * because some callers already do so -- and some callers
    244 	 * (sbmips cpu_attach) already have a softc allocated by
    245 	 * autoconf.
    246 	 */
    247 	ci->ci_dev = self;
    248 	self->dv_private = ci;
    249 	KASSERT(ci->ci_idepth == 0);
    250 
    251 	evcnt_attach_dynamic(&ci->ci_ev_count_compare,
    252 		EVCNT_TYPE_INTR, NULL, xname,
    253 		"int 5 (clock)");
    254 	evcnt_attach_dynamic(&ci->ci_ev_count_compare_missed,
    255 		EVCNT_TYPE_INTR, NULL, xname,
    256 		"int 5 (clock) missed");
    257 	evcnt_attach_dynamic(&ci->ci_ev_fpu_loads,
    258 		EVCNT_TYPE_MISC, NULL, xname,
    259 		"fpu loads");
    260 	evcnt_attach_dynamic(&ci->ci_ev_fpu_saves,
    261 		EVCNT_TYPE_MISC, NULL, xname,
    262 		"fpu saves");
    263 	evcnt_attach_dynamic(&ci->ci_ev_dsp_loads,
    264 		EVCNT_TYPE_MISC, NULL, xname,
    265 		"dsp loads");
    266 	evcnt_attach_dynamic(&ci->ci_ev_dsp_saves,
    267 		EVCNT_TYPE_MISC, NULL, xname,
    268 		"dsp saves");
    269 	evcnt_attach_dynamic(&ci->ci_ev_tlbmisses,
    270 		EVCNT_TYPE_TRAP, NULL, xname,
    271 		"tlb misses");
    272 
    273 #ifdef MULTIPROCESSOR
    274 	if (ci != &cpu_info_store) {
    275 		/*
    276 		 * Tail insert this onto the list of cpu_info's.
    277 		 * atomic_store_release matches PTR_L/SYNC_ACQ in
    278 		 * locore_octeon.S (XXX what about non-Octeon?).
    279 		 */
    280 		KASSERT(cpuid_infos[ci->ci_cpuid] == NULL);
    281 		atomic_store_release(&cpuid_infos[ci->ci_cpuid], ci);
    282 		membar_producer(); /* Cavium sync plunger */
    283 	}
    284 	KASSERT(cpuid_infos[ci->ci_cpuid] != NULL);
    285 	evcnt_attach_dynamic(&ci->ci_evcnt_synci_activate_rqst,
    286 	    EVCNT_TYPE_MISC, NULL, xname,
    287 	    "syncicache activate request");
    288 	evcnt_attach_dynamic(&ci->ci_evcnt_synci_deferred_rqst,
    289 	    EVCNT_TYPE_MISC, NULL, xname,
    290 	    "syncicache deferred request");
    291 	evcnt_attach_dynamic(&ci->ci_evcnt_synci_ipi_rqst,
    292 	    EVCNT_TYPE_MISC, NULL, xname,
    293 	    "syncicache ipi request");
    294 	evcnt_attach_dynamic(&ci->ci_evcnt_synci_onproc_rqst,
    295 	    EVCNT_TYPE_MISC, NULL, xname,
    296 	    "syncicache onproc request");
    297 
    298 	/*
    299 	 * Initialize IPI framework for this cpu instance
    300 	 */
    301 	ipi_init(ci);
    302 
    303 	kcpuset_create(&ci->ci_shootdowncpus, true);
    304 	kcpuset_create(&ci->ci_multicastcpus, true);
    305 	kcpuset_create(&ci->ci_watchcpus, true);
    306 	kcpuset_create(&ci->ci_ddbcpus, true);
    307 #endif
    308 }
    309 
    310 void
    311 cpu_startup_common(void)
    312 {
    313 	vaddr_t minaddr, maxaddr;
    314 	char pbuf[9];	/* "99999 MB" */
    315 
    316 	pmap_tlb_info_evcnt_attach(&pmap_tlb0_info);
    317 
    318 #ifdef MULTIPROCESSOR
    319 	kcpuset_create(&cpus_halted, true);
    320 		KASSERT(cpus_halted != NULL);
    321 	kcpuset_create(&cpus_hatched, true);
    322 		KASSERT(cpus_hatched != NULL);
    323 	kcpuset_create(&cpus_paused, true);
    324 		KASSERT(cpus_paused != NULL);
    325 	kcpuset_create(&cpus_resumed, true);
    326 		KASSERT(cpus_resumed != NULL);
    327 	kcpuset_create(&cpus_running, true);
    328 		KASSERT(cpus_running != NULL);
    329 	kcpuset_set(cpus_hatched, cpu_number());
    330 	kcpuset_set(cpus_running, cpu_number());
    331 #endif
    332 
    333 	cpu_hwrena_setup();
    334 
    335 	/*
    336 	 * Good {morning,afternoon,evening,night}.
    337 	 */
    338 	printf("%s%s", copyright, version);
    339 	printf("%s\n", cpu_getmodel());
    340 	format_bytes(pbuf, sizeof(pbuf), ctob(physmem));
    341 	printf("total memory = %s\n", pbuf);
    342 
    343 	minaddr = 0;
    344 	/*
    345 	 * Allocate a submap for physio.
    346 	 */
    347 	phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
    348 				    VM_PHYS_SIZE, 0, FALSE, NULL);
    349 
    350 	/*
    351 	 * (No need to allocate an mbuf cluster submap.  Mbuf clusters
    352 	 * are allocated via the pool allocator, and we use KSEG/XKPHYS to
    353 	 * map those pages.)
    354 	 */
    355 
    356 	format_bytes(pbuf, sizeof(pbuf), ptoa(uvm_availmem(false)));
    357 	printf("avail memory = %s\n", pbuf);
    358 
    359 #if defined(__mips_n32)
    360 	module_machine = "mips-n32";
    361 #endif
    362 }
    363 
    364 void
    365 cpu_getmcontext(struct lwp *l, mcontext_t *mcp, unsigned int *flags)
    366 {
    367 	const struct trapframe *tf = l->l_md.md_utf;
    368 	__greg_t *gr = mcp->__gregs;
    369 	__greg_t ras_pc;
    370 
    371 	/* Save register context. Dont copy R0 - it is always 0 */
    372 	memcpy(&gr[_REG_AT], &tf->tf_regs[_R_AST], sizeof(mips_reg_t) * 31);
    373 
    374 	gr[_REG_MDLO]  = tf->tf_regs[_R_MULLO];
    375 	gr[_REG_MDHI]  = tf->tf_regs[_R_MULHI];
    376 	gr[_REG_CAUSE] = tf->tf_regs[_R_CAUSE];
    377 	gr[_REG_EPC]   = tf->tf_regs[_R_PC];
    378 	gr[_REG_SR]    = tf->tf_regs[_R_SR];
    379 	mcp->_mc_tlsbase = (intptr_t)l->l_private;
    380 
    381 	if ((ras_pc = (intptr_t)ras_lookup(l->l_proc,
    382 	    (void *) (intptr_t)gr[_REG_EPC])) != -1)
    383 		gr[_REG_EPC] = ras_pc;
    384 
    385 	*flags |= _UC_CPU | _UC_TLSBASE;
    386 
    387 	/* Save floating point register context, if any. */
    388 	KASSERT(l == curlwp);
    389 	if (fpu_used_p(l)) {
    390 		size_t fplen;
    391 		/*
    392 		 * If this process is the current FP owner, dump its
    393 		 * context to the PCB first.
    394 		 */
    395 		fpu_save(l);
    396 
    397 		/*
    398 		 * The PCB FP regs struct includes the FP CSR, so use the
    399 		 * size of __fpregs.__fp_r when copying.
    400 		 */
    401 #if !defined(__mips_o32)
    402 		if (_MIPS_SIM_NEWABI_P(l->l_proc->p_md.md_abi)) {
    403 #endif
    404 			fplen = sizeof(struct fpreg);
    405 #if !defined(__mips_o32)
    406 		} else {
    407 			fplen = sizeof(struct fpreg_oabi);
    408 		}
    409 #endif
    410 		struct pcb * const pcb = lwp_getpcb(l);
    411 		memcpy(&mcp->__fpregs, &pcb->pcb_fpregs, fplen);
    412 		*flags |= _UC_FPU;
    413 	}
    414 }
    415 
    416 int
    417 cpu_mcontext_validate(struct lwp *l, const mcontext_t *mcp)
    418 {
    419 
    420 	/* XXX:  Do we validate the addresses?? */
    421 	return 0;
    422 }
    423 
    424 int
    425 cpu_setmcontext(struct lwp *l, const mcontext_t *mcp, unsigned int flags)
    426 {
    427 	struct trapframe *tf = l->l_md.md_utf;
    428 	struct proc *p = l->l_proc;
    429 	const __greg_t *gr = mcp->__gregs;
    430 	int error;
    431 
    432 	/* Restore register context, if any. */
    433 	if (flags & _UC_CPU) {
    434 		error = cpu_mcontext_validate(l, mcp);
    435 		if (error)
    436 			return error;
    437 
    438 		/* Save register context. */
    439 
    440 #ifdef __mips_n32
    441 		CTASSERT(_R_AST == _REG_AT);
    442 		if (__predict_false(p->p_md.md_abi == _MIPS_BSD_API_O32)) {
    443 			const mcontext_o32_t *mcp32 = (const mcontext_o32_t *)mcp;
    444 			const __greg32_t *gr32 = mcp32->__gregs;
    445 			for (size_t i = _R_AST; i < 32; i++) {
    446 				tf->tf_regs[i] = gr32[i];
    447 			}
    448 		} else
    449 #endif
    450 		memcpy(&tf->tf_regs[_R_AST], &gr[_REG_AT],
    451 		       sizeof(mips_reg_t) * 31);
    452 
    453 		tf->tf_regs[_R_MULLO] = gr[_REG_MDLO];
    454 		tf->tf_regs[_R_MULHI] = gr[_REG_MDHI];
    455 		tf->tf_regs[_R_CAUSE] = gr[_REG_CAUSE];
    456 		tf->tf_regs[_R_PC]    = gr[_REG_EPC];
    457 		/* Do not restore SR. */
    458 	}
    459 
    460 	/* Restore the private thread context */
    461 	if (flags & _UC_TLSBASE) {
    462 		lwp_setprivate(l, (void *)(intptr_t)mcp->_mc_tlsbase);
    463 	}
    464 
    465 	/* Restore floating point register context, if any. */
    466 	if (flags & _UC_FPU) {
    467 		size_t fplen;
    468 
    469 		/* Disable the FPU contents. */
    470 		fpu_discard(l);
    471 
    472 #if !defined(__mips_o32)
    473 		if (_MIPS_SIM_NEWABI_P(l->l_proc->p_md.md_abi)) {
    474 #endif
    475 			fplen = sizeof(struct fpreg);
    476 #if !defined(__mips_o32)
    477 		} else {
    478 			fplen = sizeof(struct fpreg_oabi);
    479 		}
    480 #endif
    481 		/*
    482 		 * The PCB FP regs struct includes the FP CSR, so use the
    483 		 * proper size of fpreg when copying.
    484 		 */
    485 		struct pcb * const pcb = lwp_getpcb(l);
    486 		memcpy(&pcb->pcb_fpregs, &mcp->__fpregs, fplen);
    487 	}
    488 
    489 	mutex_enter(p->p_lock);
    490 	if (flags & _UC_SETSTACK)
    491 		l->l_sigstk.ss_flags |= SS_ONSTACK;
    492 	if (flags & _UC_CLRSTACK)
    493 		l->l_sigstk.ss_flags &= ~SS_ONSTACK;
    494 	mutex_exit(p->p_lock);
    495 
    496 	return (0);
    497 }
    498 
    499 void
    500 cpu_need_resched(struct cpu_info *ci, struct lwp *l, int flags)
    501 {
    502 
    503 	KASSERT(kpreempt_disabled());
    504 
    505 	if ((flags & RESCHED_KPREEMPT) != 0) {
    506 #ifdef __HAVE_PREEMPTION
    507 		if ((flags & RESCHED_REMOTE) != 0) {
    508 			cpu_send_ipi(ci, IPI_KPREEMPT);
    509 		} else {
    510 			softint_trigger(SOFTINT_KPREEMPT);
    511 		}
    512 #endif
    513 		return;
    514 	}
    515 	if ((flags & RESCHED_REMOTE) != 0) {
    516 #ifdef MULTIPROCESSOR
    517 		cpu_send_ipi(ci, IPI_AST);
    518 #endif
    519 	} else {
    520 		l->l_md.md_astpending = 1;		/* force call to ast() */
    521 	}
    522 }
    523 
    524 uint32_t
    525 cpu_clkf_usermode_mask(void)
    526 {
    527 
    528 	return CPUISMIPS3 ? MIPS_SR_KSU_USER : MIPS_SR_KU_PREV;
    529 }
    530 
    531 void
    532 cpu_signotify(struct lwp *l)
    533 {
    534 
    535 	KASSERT(kpreempt_disabled());
    536 #ifdef __HAVE_FAST_SOFTINTS
    537 	KASSERT(lwp_locked(l, NULL));
    538 #endif
    539 
    540 	if (l->l_cpu != curcpu()) {
    541 #ifdef MULTIPROCESSOR
    542 		cpu_send_ipi(l->l_cpu, IPI_AST);
    543 #endif
    544 	} else {
    545 		l->l_md.md_astpending = 1; 	/* force call to ast() */
    546 	}
    547 }
    548 
    549 void
    550 cpu_need_proftick(struct lwp *l)
    551 {
    552 
    553 	KASSERT(kpreempt_disabled());
    554 	KASSERT(l->l_cpu == curcpu());
    555 
    556 	l->l_pflag |= LP_OWEUPC;
    557 	l->l_md.md_astpending = 1;		/* force call to ast() */
    558 }
    559 
    560 #ifdef __HAVE_PREEMPTION
    561 bool
    562 cpu_kpreempt_enter(uintptr_t where, int s)
    563 {
    564 
    565 	KASSERT(kpreempt_disabled());
    566 
    567 #if 0
    568 	if (where == (intptr_t)-2) {
    569 		KASSERT(curcpu()->ci_mtx_count == 0);
    570 		/*
    571 		 * We must be called via kern_intr (which already checks for
    572 		 * IPL_NONE so of course we call be preempted).
    573 		 */
    574 		return true;
    575 	}
    576 	/*
    577 	 * We are called from KPREEMPT_ENABLE().  If we are at IPL_NONE,
    578 	 * of course we can be preempted.  If we aren't, ask for a
    579 	 * softint so that kern_intr can call kpreempt.
    580 	 */
    581 	if (s == IPL_NONE) {
    582 		KASSERT(curcpu()->ci_mtx_count == 0);
    583 		return true;
    584 	}
    585 	softint_trigger(SOFTINT_KPREEMPT);
    586 #endif
    587 	return false;
    588 }
    589 
    590 void
    591 cpu_kpreempt_exit(uintptr_t where)
    592 {
    593 
    594 	/* do nothing */
    595 }
    596 
    597 /*
    598  * Return true if preemption is disabled for MD reasons.  Must be called
    599  * with preemption disabled, and thus is only for diagnostic checks.
    600  */
    601 bool
    602 cpu_kpreempt_disabled(void)
    603 {
    604 
    605 	/*
    606 	 * Any elevated IPL disables preemption.
    607 	 */
    608 	return curcpu()->ci_cpl > IPL_NONE;
    609 }
    610 #endif /* __HAVE_PREEMPTION */
    611 
    612 void
    613 cpu_idle(void)
    614 {
    615 	void (*const mach_idle)(void) = mips_locoresw.lsw_cpu_idle;
    616 	struct cpu_info * const ci = curcpu();
    617 
    618 	while (!ci->ci_want_resched) {
    619 #ifdef __HAVE_FAST_SOFTINTS
    620 		KASSERT(ci->ci_data.cpu_softints == 0);
    621 #endif
    622 		(*mach_idle)();
    623 	}
    624 }
    625 
    626 bool
    627 cpu_intr_p(void)
    628 {
    629 	int idepth;
    630 	long pctr;
    631 	lwp_t *l;
    632 
    633 	l = curlwp;
    634 	do {
    635 		pctr = lwp_pctr();
    636 		idepth = l->l_cpu->ci_idepth;
    637 	} while (__predict_false(pctr != lwp_pctr()));
    638 
    639 	return idepth != 0;
    640 }
    641 
    642 #ifdef MULTIPROCESSOR
    643 
    644 void
    645 cpu_broadcast_ipi(int tag)
    646 {
    647 
    648 	// No reason to remove ourselves since multicast_ipi will do that for us
    649 	cpu_multicast_ipi(cpus_running, tag);
    650 }
    651 
    652 void
    653 cpu_multicast_ipi(const kcpuset_t *kcp, int tag)
    654 {
    655 	struct cpu_info * const ci = curcpu();
    656 	kcpuset_t *kcp2 = ci->ci_multicastcpus;
    657 
    658 	if (kcpuset_match(cpus_running, ci->ci_kcpuset))
    659 		return;
    660 
    661 	kcpuset_copy(kcp2, kcp);
    662 	kcpuset_remove(kcp2, ci->ci_kcpuset);
    663 	for (cpuid_t cii; (cii = kcpuset_ffs(kcp2)) != 0; ) {
    664 		kcpuset_clear(kcp2, --cii);
    665 		(void)cpu_send_ipi(cpu_lookup(cii), tag);
    666 	}
    667 }
    668 
    669 int
    670 cpu_send_ipi(struct cpu_info *ci, int tag)
    671 {
    672 
    673 	return (*mips_locoresw.lsw_send_ipi)(ci, tag);
    674 }
    675 
    676 static void
    677 cpu_ipi_wait(const char *s, const kcpuset_t *watchset, const kcpuset_t *wanted)
    678 {
    679 	bool done = false;
    680 	struct cpu_info * const ci = curcpu();
    681 	kcpuset_t *kcp = ci->ci_watchcpus;
    682 
    683 	/* some finite amount of time */
    684 
    685 	for (u_long limit = curcpu()->ci_cpu_freq/10; !done && limit--; ) {
    686 		kcpuset_copy(kcp, watchset);
    687 		kcpuset_intersect(kcp, wanted);
    688 		done = kcpuset_match(kcp, wanted);
    689 	}
    690 
    691 	if (!done) {
    692 		cpuid_t cii;
    693 		kcpuset_copy(kcp, wanted);
    694 		kcpuset_remove(kcp, watchset);
    695 		if ((cii = kcpuset_ffs(kcp)) != 0) {
    696 			printf("Failed to %s:", s);
    697 			do {
    698 				kcpuset_clear(kcp, --cii);
    699 				printf(" cpu%lu", cii);
    700 			} while ((cii = kcpuset_ffs(kcp)) != 0);
    701 			printf("\n");
    702 		}
    703 	}
    704 }
    705 
    706 /*
    707  * Halt this cpu
    708  */
    709 void
    710 cpu_halt(void)
    711 {
    712 	cpuid_t cii = cpu_index(curcpu());
    713 
    714 	printf("cpu%lu: shutting down\n", cii);
    715 	kcpuset_atomic_set(cpus_halted, cii);
    716 	spl0();		/* allow interrupts e.g. further ipi ? */
    717 	for (;;) ;	/* spin */
    718 
    719 	/* NOTREACHED */
    720 }
    721 
    722 /*
    723  * Halt all running cpus, excluding current cpu.
    724  */
    725 void
    726 cpu_halt_others(void)
    727 {
    728 	kcpuset_t *kcp;
    729 
    730 	// If we are the only CPU running, there's nothing to do.
    731 	if (kcpuset_match(cpus_running, curcpu()->ci_kcpuset))
    732 		return;
    733 
    734 	// Get all running CPUs
    735 	kcpuset_clone(&kcp, cpus_running);
    736 	// Remove ourself
    737 	kcpuset_remove(kcp, curcpu()->ci_kcpuset);
    738 	// Remove any halted CPUs
    739 	kcpuset_remove(kcp, cpus_halted);
    740 	// If there are CPUs left, send the IPIs
    741 	if (!kcpuset_iszero(kcp)) {
    742 		cpu_multicast_ipi(kcp, IPI_HALT);
    743 		cpu_ipi_wait("halt", cpus_halted, kcp);
    744 	}
    745 	kcpuset_destroy(kcp);
    746 
    747 	/*
    748 	 * TBD
    749 	 * Depending on available firmware methods, other cpus will
    750 	 * either shut down themselves, or spin and wait for us to
    751 	 * stop them.
    752 	 */
    753 }
    754 
    755 /*
    756  * Pause this cpu
    757  */
    758 void
    759 cpu_pause(struct reg *regsp)
    760 {
    761 	int s = splhigh();
    762 	cpuid_t cii = cpu_index(curcpu());
    763 
    764 	if (__predict_false(cold)) {
    765 		splx(s);
    766 		return;
    767 	}
    768 
    769 	do {
    770 		kcpuset_atomic_set(cpus_paused, cii);
    771 		do {
    772 			;
    773 		} while (kcpuset_isset(cpus_paused, cii));
    774 		kcpuset_atomic_set(cpus_resumed, cii);
    775 #if defined(DDB)
    776 		if (ddb_running_on_this_cpu_p())
    777 			cpu_Debugger();
    778 		if (ddb_running_on_any_cpu_p())
    779 			continue;
    780 #endif
    781 	} while (false);
    782 
    783 	splx(s);
    784 }
    785 
    786 /*
    787  * Pause all running cpus, excluding current cpu.
    788  */
    789 void
    790 cpu_pause_others(void)
    791 {
    792 	struct cpu_info * const ci = curcpu();
    793 
    794 	if (cold || kcpuset_match(cpus_running, ci->ci_kcpuset))
    795 		return;
    796 
    797 	kcpuset_t *kcp = ci->ci_ddbcpus;
    798 
    799 	kcpuset_copy(kcp, cpus_running);
    800 	kcpuset_remove(kcp, ci->ci_kcpuset);
    801 	kcpuset_remove(kcp, cpus_paused);
    802 
    803 	cpu_broadcast_ipi(IPI_SUSPEND);
    804 	cpu_ipi_wait("pause", cpus_paused, kcp);
    805 }
    806 
    807 /*
    808  * Resume a single cpu
    809  */
    810 void
    811 cpu_resume(cpuid_t cii)
    812 {
    813 
    814 	if (__predict_false(cold))
    815 		return;
    816 
    817 	struct cpu_info * const ci = curcpu();
    818 	kcpuset_t *kcp = ci->ci_ddbcpus;
    819 
    820 	kcpuset_set(kcp, cii);
    821 	kcpuset_atomicly_remove(cpus_resumed, cpus_resumed);
    822 	kcpuset_atomic_clear(cpus_paused, cii);
    823 
    824 	cpu_ipi_wait("resume", cpus_resumed, kcp);
    825 }
    826 
    827 /*
    828  * Resume all paused cpus.
    829  */
    830 void
    831 cpu_resume_others(void)
    832 {
    833 
    834 	if (__predict_false(cold))
    835 		return;
    836 
    837 	struct cpu_info * const ci = curcpu();
    838 	kcpuset_t *kcp = ci->ci_ddbcpus;
    839 
    840 	kcpuset_atomicly_remove(cpus_resumed, cpus_resumed);
    841 	kcpuset_copy(kcp, cpus_paused);
    842 	kcpuset_atomicly_remove(cpus_paused, cpus_paused);
    843 
    844 	/* CPUs awake on cpus_paused clear */
    845 	cpu_ipi_wait("resume", cpus_resumed, kcp);
    846 }
    847 
    848 bool
    849 cpu_is_paused(cpuid_t cii)
    850 {
    851 
    852 	return !cold && kcpuset_isset(cpus_paused, cii);
    853 }
    854 
    855 #ifdef DDB
    856 void
    857 cpu_debug_dump(void)
    858 {
    859 	CPU_INFO_ITERATOR cii;
    860 	struct cpu_info *ci;
    861 	char running, hatched, paused, resumed, halted;
    862 	db_printf("CPU CPUID STATE CPUINFO            CPL INT MTX IPIS(A/R)\n");
    863 	for (CPU_INFO_FOREACH(cii, ci)) {
    864 		hatched = (kcpuset_isset(cpus_hatched, cpu_index(ci)) ? 'H' : '-');
    865 		running = (kcpuset_isset(cpus_running, cpu_index(ci)) ? 'R' : '-');
    866 		paused  = (kcpuset_isset(cpus_paused,  cpu_index(ci)) ? 'P' : '-');
    867 		resumed = (kcpuset_isset(cpus_resumed, cpu_index(ci)) ? 'r' : '-');
    868 		halted  = (kcpuset_isset(cpus_halted,  cpu_index(ci)) ? 'h' : '-');
    869 		db_printf("%3d 0x%03lx %c%c%c%c%c %p "
    870 			"%3d %3d %3d "
    871 			"0x%02" PRIx64 "/0x%02" PRIx64 "\n",
    872 			cpu_index(ci), ci->ci_cpuid,
    873 			running, hatched, paused, resumed, halted,
    874 			ci, ci->ci_cpl, ci->ci_idepth, ci->ci_mtx_count,
    875 			ci->ci_active_ipis, ci->ci_request_ipis);
    876 	}
    877 }
    878 #endif
    879 
    880 void
    881 cpu_hatch(struct cpu_info *ci)
    882 {
    883 	struct pmap_tlb_info * const ti = ci->ci_tlb_info;
    884 
    885 	/*
    886 	 * Invalidate all the TLB enties (even wired ones) and then reserve
    887 	 * space for the wired TLB entries.
    888 	 */
    889 	mips3_cp0_wired_write(0);
    890 	tlb_invalidate_all();
    891 	mips3_cp0_wired_write(ti->ti_wired);
    892 
    893 	/*
    894 	 * Setup HWRENA and USERLOCAL COP0 registers (MIPSxxR2).
    895 	 */
    896 	cpu_hwrena_setup();
    897 
    898 	/*
    899 	 * If we are using register zero relative addressing to access cpu_info
    900 	 * in the exception vectors, enter that mapping into TLB now.
    901 	 */
    902 	if (ci->ci_tlb_slot >= 0) {
    903 		const uint32_t tlb_lo = MIPS3_PG_G|MIPS3_PG_V
    904 		    | mips3_paddr_to_tlbpfn((vaddr_t)ci);
    905 		const struct tlbmask tlbmask = {
    906 			.tlb_hi = -PAGE_SIZE | KERNEL_PID,
    907 #if (PGSHIFT & 1)
    908 			.tlb_lo0 = tlb_lo,
    909 			.tlb_lo1 = tlb_lo + MIPS3_PG_NEXT,
    910 #else
    911 			.tlb_lo0 = 0,
    912 			.tlb_lo1 = tlb_lo,
    913 #endif
    914 			.tlb_mask = -1,
    915 		};
    916 
    917 		tlb_invalidate_addr(tlbmask.tlb_hi, KERNEL_PID);
    918 		tlb_write_entry(ci->ci_tlb_slot, &tlbmask);
    919 	}
    920 
    921 	/*
    922 	 * Flush the icache just be sure.
    923 	 */
    924 	mips_icache_sync_all();
    925 
    926 	/*
    927 	 * Let this CPU do its own initialization (for things that have to be
    928 	 * done on the local CPU).
    929 	 */
    930 	(*mips_locoresw.lsw_cpu_init)(ci);
    931 
    932 	// Show this CPU as present.
    933 	atomic_or_ulong(&ci->ci_flags, CPUF_PRESENT);
    934 
    935 	/*
    936 	 * Announce we are hatched
    937 	 */
    938 	kcpuset_atomic_set(cpus_hatched, cpu_index(ci));
    939 
    940 	/*
    941 	 * Now wait to be set free!
    942 	 */
    943 	while (! kcpuset_isset(cpus_running, cpu_index(ci))) {
    944 		/* spin, spin, spin */
    945 	}
    946 
    947 	/*
    948 	 * initialize the MIPS count/compare clock
    949 	 */
    950 	mips3_cp0_count_write(ci->ci_data.cpu_cc_skew);
    951 	KASSERT(ci->ci_cycles_per_hz != 0);
    952 	ci->ci_next_cp0_clk_intr = ci->ci_data.cpu_cc_skew + ci->ci_cycles_per_hz;
    953 	mips3_cp0_compare_write(ci->ci_next_cp0_clk_intr);
    954 	ci->ci_data.cpu_cc_skew = 0;
    955 
    956 	/*
    957 	 * Let this CPU do its own post-running initialization
    958 	 * (for things that have to be done on the local CPU).
    959 	 */
    960 	(*mips_locoresw.lsw_cpu_run)(ci);
    961 
    962 	/*
    963 	 * Now turn on interrupts (and verify they are on).
    964 	 */
    965 	spl0();
    966 	KASSERTMSG(ci->ci_cpl == IPL_NONE, "cpl %d", ci->ci_cpl);
    967 	KASSERT(mips_cp0_status_read() & MIPS_SR_INT_IE);
    968 
    969 	kcpuset_atomic_set(pmap_kernel()->pm_onproc, cpu_index(ci));
    970 	kcpuset_atomic_set(pmap_kernel()->pm_active, cpu_index(ci));
    971 
    972 	/*
    973 	 * And do a tail call to idle_loop
    974 	 */
    975 	idle_loop(NULL);
    976 }
    977 
    978 void
    979 cpu_boot_secondary_processors(void)
    980 {
    981 	CPU_INFO_ITERATOR cii;
    982 	struct cpu_info *ci;
    983 
    984 	if ((boothowto & RB_MD1) != 0)
    985 		return;
    986 
    987 	for (CPU_INFO_FOREACH(cii, ci)) {
    988 		if (CPU_IS_PRIMARY(ci))
    989 			continue;
    990 		KASSERT(ci->ci_data.cpu_idlelwp);
    991 
    992 		/*
    993 		 * Skip this CPU if it didn't successfully hatch.
    994 		 */
    995 		if (!kcpuset_isset(cpus_hatched, cpu_index(ci)))
    996 			continue;
    997 
    998 		ci->ci_data.cpu_cc_skew = mips3_cp0_count_read();
    999 		atomic_or_ulong(&ci->ci_flags, CPUF_RUNNING);
   1000 		kcpuset_set(cpus_running, cpu_index(ci));
   1001 		// Spin until the cpu calls idle_loop
   1002 		for (u_int i = 0; i < 10000; i++) {
   1003 			if (kcpuset_isset(kcpuset_running, cpu_index(ci)))
   1004 				break;
   1005 			delay(1000);
   1006 		}
   1007 	}
   1008 }
   1009 
   1010 void
   1011 xc_send_ipi(struct cpu_info *ci)
   1012 {
   1013 
   1014 	(*mips_locoresw.lsw_send_ipi)(ci, IPI_XCALL);
   1015 }
   1016 
   1017 void
   1018 cpu_ipi(struct cpu_info *ci)
   1019 {
   1020 
   1021 	(*mips_locoresw.lsw_send_ipi)(ci, IPI_GENERIC);
   1022 }
   1023 
   1024 #endif /* MULTIPROCESSOR */
   1025 
   1026 void
   1027 cpu_offline_md(void)
   1028 {
   1029 
   1030 	(*mips_locoresw.lsw_cpu_offline_md)();
   1031 }
   1032 
   1033 #ifdef _LP64
   1034 void
   1035 cpu_vmspace_exec(lwp_t *l, vaddr_t start, vaddr_t end)
   1036 {
   1037 	/*
   1038 	 * We need to turn on/off UX so that copyout/copyin will work
   1039 	 * well before setreg gets called.
   1040 	 */
   1041 	uint32_t sr = mips_cp0_status_read();
   1042 
   1043 	if (end != (uint32_t) end) {
   1044 		mips_cp0_status_write(sr | MIPS3_SR_UX);
   1045 	} else {
   1046 		mips_cp0_status_write(sr & ~MIPS3_SR_UX);
   1047 	}
   1048 }
   1049 #endif
   1050 
   1051 int
   1052 cpu_lwp_setprivate(lwp_t *l, void *v)
   1053 {
   1054 
   1055 #if (MIPS32R2 + MIPS64R2) > 0
   1056 	if (l == curlwp && MIPS_HAS_USERLOCAL) {
   1057 		mipsNN_cp0_userlocal_write(v);
   1058 	}
   1059 #endif
   1060 	return 0;
   1061 }
   1062 
   1063 
   1064 #if (MIPS32 + MIPS32R2 + MIPS64 + MIPS64R2) > 0
   1065 
   1066 #if (CPUWATCH_MAX != 8)
   1067 # error CPUWATCH_MAX
   1068 #endif
   1069 
   1070 /*
   1071  * cpuwatch_discover - determine how many COP0 watchpoints this CPU supports
   1072  */
   1073 u_int
   1074 cpuwatch_discover(void)
   1075 {
   1076 	int i;
   1077 
   1078 	for (i=0; i < CPUWATCH_MAX; i++) {
   1079 		uint32_t watchhi = mipsNN_cp0_watchhi_read(i);
   1080 		if ((watchhi & __BIT(31)) == 0)	/* test 'M' bit */
   1081 			break;
   1082 	}
   1083 	return i + 1;
   1084 }
   1085 
   1086 void
   1087 cpuwatch_free(cpu_watchpoint_t *cwp)
   1088 {
   1089 #ifdef DIAGNOSTIC
   1090 	struct cpu_info * const ci = curcpu();
   1091 
   1092 	KASSERT(cwp >= &ci->ci_cpuwatch_tab[0] &&
   1093 		cwp <= &ci->ci_cpuwatch_tab[ci->ci_cpuwatch_count-1]);
   1094 #endif
   1095 	cwp->cw_mode = 0;
   1096 	cwp->cw_asid = 0;
   1097 	cwp->cw_addr = 0;
   1098 	cpuwatch_clr(cwp);
   1099 }
   1100 
   1101 /*
   1102  * cpuwatch_alloc
   1103  * 	find an empty slot
   1104  *	no locking for the table since it is CPU private
   1105  */
   1106 cpu_watchpoint_t *
   1107 cpuwatch_alloc(void)
   1108 {
   1109 	struct cpu_info * const ci = curcpu();
   1110 	cpu_watchpoint_t *cwp;
   1111 
   1112 	for (int i=0; i < ci->ci_cpuwatch_count; i++) {
   1113 		cwp = &ci->ci_cpuwatch_tab[i];
   1114 		if ((cwp->cw_mode & CPUWATCH_RWX) == 0)
   1115 			return cwp;
   1116 	}
   1117 	return NULL;
   1118 }
   1119 
   1120 
   1121 void
   1122 cpuwatch_set_all(void)
   1123 {
   1124 	struct cpu_info * const ci = curcpu();
   1125 	cpu_watchpoint_t *cwp;
   1126 	int i;
   1127 
   1128 	for (i=0; i < ci->ci_cpuwatch_count; i++) {
   1129 		cwp = &ci->ci_cpuwatch_tab[i];
   1130 		if ((cwp->cw_mode & CPUWATCH_RWX) != 0)
   1131 			cpuwatch_set(cwp);
   1132 	}
   1133 }
   1134 
   1135 void
   1136 cpuwatch_clr_all(void)
   1137 {
   1138 	struct cpu_info * const ci = curcpu();
   1139 	cpu_watchpoint_t *cwp;
   1140 	int i;
   1141 
   1142 	for (i=0; i < ci->ci_cpuwatch_count; i++) {
   1143 		cwp = &ci->ci_cpuwatch_tab[i];
   1144 		if ((cwp->cw_mode & CPUWATCH_RWX) != 0)
   1145 			cpuwatch_clr(cwp);
   1146 	}
   1147 }
   1148 
   1149 /*
   1150  * cpuwatch_set - establish a MIPS COP0 watchpoint
   1151  */
   1152 void
   1153 cpuwatch_set(cpu_watchpoint_t *cwp)
   1154 {
   1155 	struct cpu_info * const ci = curcpu();
   1156 	uint32_t watchhi;
   1157 	register_t watchlo;
   1158 	int cwnum = cwp - &ci->ci_cpuwatch_tab[0];
   1159 
   1160 	KASSERT(cwp >= &ci->ci_cpuwatch_tab[0] &&
   1161 		cwp <= &ci->ci_cpuwatch_tab[ci->ci_cpuwatch_count-1]);
   1162 
   1163 	watchlo = cwp->cw_addr;
   1164 	if (cwp->cw_mode & CPUWATCH_WRITE)
   1165 		watchlo |= __BIT(0);
   1166 	if (cwp->cw_mode & CPUWATCH_READ)
   1167 		watchlo |= __BIT(1);
   1168 	if (cwp->cw_mode & CPUWATCH_EXEC)
   1169 		watchlo |= __BIT(2);
   1170 
   1171 	if (cwp->cw_mode & CPUWATCH_ASID)
   1172 		watchhi = cwp->cw_asid << 16;	/* addr qualified by asid */
   1173 	else
   1174 		watchhi = __BIT(30);		/* addr not qual. by asid (Global) */
   1175 	if (cwp->cw_mode & CPUWATCH_MASK)
   1176 		watchhi |= cwp->cw_mask;	/* set "dont care" addr match bits */
   1177 
   1178 	mipsNN_cp0_watchhi_write(cwnum, watchhi);
   1179 	mipsNN_cp0_watchlo_write(cwnum, watchlo);
   1180 }
   1181 
   1182 /*
   1183  * cpuwatch_clr - disestablish a MIPS COP0 watchpoint
   1184  */
   1185 void
   1186 cpuwatch_clr(cpu_watchpoint_t *cwp)
   1187 {
   1188 	struct cpu_info * const ci = curcpu();
   1189 	int cwnum = cwp - &ci->ci_cpuwatch_tab[0];
   1190 
   1191 	KASSERT(cwp >= &ci->ci_cpuwatch_tab[0] &&
   1192 		cwp <= &ci->ci_cpuwatch_tab[ci->ci_cpuwatch_count-1]);
   1193 
   1194 	mipsNN_cp0_watchhi_write(cwnum, 0);
   1195 	mipsNN_cp0_watchlo_write(cwnum, 0);
   1196 }
   1197 
   1198 #endif	/* (MIPS32 + MIPS32R2 + MIPS64 + MIPS64R2) > 0 */
   1199 
   1200 #if (MIPS2 + MIPS3 + MIPS4 + MIPS5 + MIPS32 + MIPS32R2 + MIPS64 + MIPS64R2) > 0
   1201 void
   1202 paravirt_membar_sync(void)
   1203 {
   1204 
   1205 	/*
   1206 	 * Store-before-load ordering with respect to matching logic
   1207 	 * on the hypervisor side.
   1208 	 *
   1209 	 * This is the same as membar_sync, but guaranteed never to be
   1210 	 * conditionalized or hotpatched away even on uniprocessor
   1211 	 * builds and boots -- because under virtualization, we still
   1212 	 * have to coordinate with a `device' backed by a hypervisor
   1213 	 * that is potentially on another physical CPU even if we
   1214 	 * observe only one virtual CPU as the guest.
   1215 	 *
   1216 	 * Prior to MIPS-II, there was no SYNC instruction.[1]  CPUs
   1217 	 * with only MIPS-I presumably don't exist in multiprocessor
   1218 	 * configurations.  But what if we're running a _kernel_ built
   1219 	 * for a uniprocessor MIPS-I CPU, as a virtual machine guest of
   1220 	 * a _host_ with a newer multiprocessor CPU?  How do we enforce
   1221 	 * store-before-load ordering for a paravirtualized device
   1222 	 * driver, coordinating with host software `device' potentially
   1223 	 * on another CPU?  You'll have to answer that before you can
   1224 	 * use virtio drivers!
   1225 	 *
   1226 	 * [1] MIPS32 Architecture For Programmers, Volume II: The
   1227 	 *     MIPS32 Instruction Set, Document Number: MD00086,
   1228 	 *     Revision 0.95, March 12, 2001, MIPS Technologies, p. 215
   1229 	 */
   1230 	__asm volatile(
   1231 	    ".set push"			"\n\t"
   1232 	    ".set mips2"		"\n\t"
   1233 	    "sync"			"\n\t"
   1234 	    ".set pop");
   1235 }
   1236 #endif	/* !MIPS1 */
   1237