Home | History | Annotate | Line # | Download | only in mips
cpu_subr.c revision 1.32
      1 /*	$NetBSD: cpu_subr.c,v 1.31 2017/03/16 16:13:20 chs Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2010 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Matt Thomas of 3am Software Foundry.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #include <sys/cdefs.h>
     33 __KERNEL_RCSID(0, "$NetBSD: cpu_subr.c,v 1.31 2017/03/16 16:13:20 chs Exp $");
     34 
     35 #include "opt_cputype.h"
     36 #include "opt_ddb.h"
     37 #include "opt_modular.h"
     38 #include "opt_multiprocessor.h"
     39 
     40 #include <sys/param.h>
     41 #include <sys/cpu.h>
     42 #include <sys/intr.h>
     43 #include <sys/atomic.h>
     44 #include <sys/device.h>
     45 #include <sys/lwp.h>
     46 #include <sys/proc.h>
     47 #include <sys/ras.h>
     48 #include <sys/module.h>
     49 #include <sys/bitops.h>
     50 #include <sys/idle.h>
     51 #include <sys/xcall.h>
     52 #include <sys/kernel.h>
     53 #include <sys/ipi.h>
     54 
     55 #include <uvm/uvm.h>
     56 
     57 #include <mips/locore.h>
     58 #include <mips/regnum.h>
     59 #include <mips/pcb.h>
     60 #include <mips/cache.h>
     61 #include <mips/frame.h>
     62 #include <mips/userret.h>
     63 #include <mips/pte.h>
     64 
     65 #if defined(DDB) || defined(KGDB)
     66 #ifdef DDB
     67 #include <mips/db_machdep.h>
     68 #include <ddb/db_command.h>
     69 #include <ddb/db_output.h>
     70 #endif
     71 #endif
     72 
     73 #ifdef MIPS64_OCTEON
     74 extern struct cpu_softc octeon_cpu0_softc;
     75 #endif
     76 
     77 struct cpu_info cpu_info_store
     78 #if defined(MULTIPROCESSOR) && !defined(MIPS64_OCTEON)
     79 	__section(".data1")
     80 	__aligned(1LU << ilog2((2*sizeof(struct cpu_info)-1)))
     81 #endif
     82     = {
     83 	.ci_curlwp = &lwp0,
     84 	.ci_tlb_info = &pmap_tlb0_info,
     85 	.ci_pmap_kern_segtab = &pmap_kern_segtab,
     86 	.ci_pmap_user_segtab = NULL,
     87 #ifdef _LP64
     88 	.ci_pmap_user_seg0tab = NULL,
     89 #endif
     90 	.ci_cpl = IPL_HIGH,
     91 	.ci_tlb_slot = -1,
     92 #ifdef MULTIPROCESSOR
     93 	.ci_flags = CPUF_PRIMARY|CPUF_PRESENT|CPUF_RUNNING,
     94 #endif
     95 #ifdef MIPS64_OCTEON
     96 	.ci_softc = &octeon_cpu0_softc,
     97 #endif
     98 };
     99 
    100 const pcu_ops_t * const pcu_ops_md_defs[PCU_UNIT_COUNT] = {
    101 	[PCU_FPU] = &mips_fpu_ops,
    102 #if (MIPS32R2 + MIPS64R2) > 0
    103 	[PCU_DSP] = &mips_dsp_ops,
    104 #endif
    105 };
    106 
    107 #ifdef MULTIPROCESSOR
    108 struct cpu_info * cpuid_infos[MAXCPUS] = {
    109 	[0] = &cpu_info_store,
    110 };
    111 
    112 kcpuset_t *cpus_halted;
    113 kcpuset_t *cpus_hatched;
    114 kcpuset_t *cpus_paused;
    115 kcpuset_t *cpus_resumed;
    116 kcpuset_t *cpus_running;
    117 
    118 static void cpu_ipi_wait(const char *, const kcpuset_t *, const kcpuset_t *);
    119 
    120 struct cpu_info *
    121 cpu_info_alloc(struct pmap_tlb_info *ti, cpuid_t cpu_id, cpuid_t cpu_package_id,
    122 	cpuid_t cpu_core_id, cpuid_t cpu_smt_id)
    123 {
    124 	KASSERT(cpu_id < MAXCPUS);
    125 
    126 #ifdef MIPS64_OCTEON
    127 	vaddr_t exc_page = MIPS_UTLB_MISS_EXC_VEC + 0x1000*cpu_id;
    128 	__CTASSERT(sizeof(struct cpu_info) + sizeof(struct pmap_tlb_info) <= 0x1000 - 0x280);
    129 
    130 	struct cpu_info * const ci = ((struct cpu_info *)(exc_page + 0x1000)) - 1;
    131 	memset((void *)exc_page, 0, PAGE_SIZE);
    132 
    133 	if (ti == NULL) {
    134 		ti = ((struct pmap_tlb_info *)ci) - 1;
    135 		pmap_tlb_info_init(ti);
    136 	}
    137 #else
    138 	const vaddr_t cpu_info_offset = (vaddr_t)&cpu_info_store & PAGE_MASK;
    139 	struct pglist pglist;
    140 	int error;
    141 
    142 	/*
    143 	* Grab a page from the first 512MB (mappable by KSEG0) to use to store
    144 	* exception vectors and cpu_info for this cpu.
    145 	*/
    146 	error = uvm_pglistalloc(PAGE_SIZE,
    147 	    0, MIPS_KSEG1_START - MIPS_KSEG0_START,
    148 	    PAGE_SIZE, PAGE_SIZE, &pglist, 1, false);
    149 	if (error)
    150 		return NULL;
    151 
    152 	const paddr_t pa = VM_PAGE_TO_PHYS(TAILQ_FIRST(&pglist));
    153 	const vaddr_t va = MIPS_PHYS_TO_KSEG0(pa);
    154 	struct cpu_info * const ci = (void *) (va + cpu_info_offset);
    155 	memset((void *)va, 0, PAGE_SIZE);
    156 
    157 	/*
    158 	 * If we weren't passed a pmap_tlb_info to use, the caller wants us
    159 	 * to take care of that for him.  Since we have room left over in the
    160 	 * page we just allocated, just use a piece of that for it.
    161 	 */
    162 	if (ti == NULL) {
    163 		if (cpu_info_offset >= sizeof(*ti)) {
    164 			ti = (void *) va;
    165 		} else {
    166 			KASSERT(PAGE_SIZE - cpu_info_offset + sizeof(*ci) >= sizeof(*ti));
    167 			ti = (struct pmap_tlb_info *)(va + PAGE_SIZE) - 1;
    168 		}
    169 		pmap_tlb_info_init(ti);
    170 	}
    171 
    172 	/*
    173 	 * Attach its TLB info (which must be direct-mapped)
    174 	 */
    175 #ifdef _LP64
    176 	KASSERT(MIPS_KSEG0_P(ti) || MIPS_XKPHYS_P(ti));
    177 #else
    178 	KASSERT(MIPS_KSEG0_P(ti));
    179 #endif
    180 #endif /* MIPS64_OCTEON */
    181 
    182 	KASSERT(cpu_id != 0);
    183 	ci->ci_cpuid = cpu_id;
    184 	ci->ci_pmap_kern_segtab = &pmap_kern_segtab,
    185 	ci->ci_data.cpu_package_id = cpu_package_id;
    186 	ci->ci_data.cpu_core_id = cpu_core_id;
    187 	ci->ci_data.cpu_smt_id = cpu_smt_id;
    188 	ci->ci_cpu_freq = cpu_info_store.ci_cpu_freq;
    189 	ci->ci_cctr_freq = cpu_info_store.ci_cctr_freq;
    190 	ci->ci_cycles_per_hz = cpu_info_store.ci_cycles_per_hz;
    191 	ci->ci_divisor_delay = cpu_info_store.ci_divisor_delay;
    192 	ci->ci_divisor_recip = cpu_info_store.ci_divisor_recip;
    193 	ci->ci_cpuwatch_count = cpu_info_store.ci_cpuwatch_count;
    194 
    195 	pmap_md_alloc_ephemeral_address_space(ci);
    196 
    197 	mi_cpu_attach(ci);
    198 
    199 	pmap_tlb_info_attach(ti, ci);
    200 
    201 	return ci;
    202 }
    203 #endif /* MULTIPROCESSOR */
    204 
    205 static void
    206 cpu_hwrena_setup(void)
    207 {
    208 #if (MIPS32R2 + MIPS64R2) > 0
    209 	const int cp0flags = mips_options.mips_cpu->cpu_cp0flags;
    210 	if ((cp0flags & MIPS_CP0FL_USE) == 0)
    211 		return;
    212 
    213 	if (cp0flags & MIPS_CP0FL_HWRENA) {
    214 		mipsNN_cp0_hwrena_write(
    215 		    MIPS_HWRENA_UL
    216 		    |MIPS_HWRENA_CCRES
    217 		    |MIPS_HWRENA_CC
    218 		    |MIPS_HWRENA_SYNCI_STEP
    219 		    |MIPS_HWRENA_CPUNUM);
    220 		if (cp0flags & MIPS_CP0FL_USERLOCAL) {
    221 			mipsNN_cp0_userlocal_write(curlwp->l_private);
    222 		}
    223 	}
    224 #endif
    225 }
    226 
    227 void
    228 cpu_attach_common(device_t self, struct cpu_info *ci)
    229 {
    230 	const char * const xname = device_xname(self);
    231 
    232 	/*
    233 	 * Cross link cpu_info and its device together
    234 	 */
    235 	ci->ci_dev = self;
    236 	self->dv_private = ci;
    237 	KASSERT(ci->ci_idepth == 0);
    238 
    239 	evcnt_attach_dynamic(&ci->ci_ev_count_compare,
    240 		EVCNT_TYPE_INTR, NULL, xname,
    241 		"int 5 (clock)");
    242 	evcnt_attach_dynamic(&ci->ci_ev_count_compare_missed,
    243 		EVCNT_TYPE_INTR, NULL, xname,
    244 		"int 5 (clock) missed");
    245 	evcnt_attach_dynamic(&ci->ci_ev_fpu_loads,
    246 		EVCNT_TYPE_MISC, NULL, xname,
    247 		"fpu loads");
    248 	evcnt_attach_dynamic(&ci->ci_ev_fpu_saves,
    249 		EVCNT_TYPE_MISC, NULL, xname,
    250 		"fpu saves");
    251 	evcnt_attach_dynamic(&ci->ci_ev_dsp_loads,
    252 		EVCNT_TYPE_MISC, NULL, xname,
    253 		"dsp loads");
    254 	evcnt_attach_dynamic(&ci->ci_ev_dsp_saves,
    255 		EVCNT_TYPE_MISC, NULL, xname,
    256 		"dsp saves");
    257 	evcnt_attach_dynamic(&ci->ci_ev_tlbmisses,
    258 		EVCNT_TYPE_TRAP, NULL, xname,
    259 		"tlb misses");
    260 
    261 #ifdef MULTIPROCESSOR
    262 	if (ci != &cpu_info_store) {
    263 		/*
    264 		 * Tail insert this onto the list of cpu_info's.
    265 		 */
    266 		KASSERT(cpuid_infos[ci->ci_cpuid] == NULL);
    267 		cpuid_infos[ci->ci_cpuid] = ci;
    268 		membar_producer();
    269 	}
    270 	KASSERT(cpuid_infos[ci->ci_cpuid] != NULL);
    271 	evcnt_attach_dynamic(&ci->ci_evcnt_synci_activate_rqst,
    272 	    EVCNT_TYPE_MISC, NULL, xname,
    273 	    "syncicache activate request");
    274 	evcnt_attach_dynamic(&ci->ci_evcnt_synci_deferred_rqst,
    275 	    EVCNT_TYPE_MISC, NULL, xname,
    276 	    "syncicache deferred request");
    277 	evcnt_attach_dynamic(&ci->ci_evcnt_synci_ipi_rqst,
    278 	    EVCNT_TYPE_MISC, NULL, xname,
    279 	    "syncicache ipi request");
    280 	evcnt_attach_dynamic(&ci->ci_evcnt_synci_onproc_rqst,
    281 	    EVCNT_TYPE_MISC, NULL, xname,
    282 	    "syncicache onproc request");
    283 
    284 	/*
    285 	 * Initialize IPI framework for this cpu instance
    286 	 */
    287 	ipi_init(ci);
    288 
    289 	kcpuset_create(&ci->ci_multicastcpus, true);
    290 	kcpuset_create(&ci->ci_watchcpus, true);
    291 	kcpuset_create(&ci->ci_ddbcpus, true);
    292 #endif
    293 }
    294 
    295 void
    296 cpu_startup_common(void)
    297 {
    298 	vaddr_t minaddr, maxaddr;
    299 	char pbuf[9];	/* "99999 MB" */
    300 
    301 	pmap_tlb_info_evcnt_attach(&pmap_tlb0_info);
    302 
    303 #ifdef MULTIPROCESSOR
    304 	kcpuset_create(&cpus_halted, true);
    305 		KASSERT(cpus_halted != NULL);
    306 	kcpuset_create(&cpus_hatched, true);
    307 		KASSERT(cpus_hatched != NULL);
    308 	kcpuset_create(&cpus_paused, true);
    309 		KASSERT(cpus_paused != NULL);
    310 	kcpuset_create(&cpus_resumed, true);
    311 		KASSERT(cpus_resumed != NULL);
    312 	kcpuset_create(&cpus_running, true);
    313 		KASSERT(cpus_running != NULL);
    314 	kcpuset_set(cpus_hatched, cpu_number());
    315 	kcpuset_set(cpus_running, cpu_number());
    316 #endif
    317 
    318 	cpu_hwrena_setup();
    319 
    320 	/*
    321 	 * Good {morning,afternoon,evening,night}.
    322 	 */
    323 	printf("%s%s", copyright, version);
    324 	printf("%s\n", cpu_getmodel());
    325 	format_bytes(pbuf, sizeof(pbuf), ctob(physmem));
    326 	printf("total memory = %s\n", pbuf);
    327 
    328 	minaddr = 0;
    329 	/*
    330 	 * Allocate a submap for physio.
    331 	 */
    332 	phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
    333 				    VM_PHYS_SIZE, 0, FALSE, NULL);
    334 
    335 	/*
    336 	 * (No need to allocate an mbuf cluster submap.  Mbuf clusters
    337 	 * are allocated via the pool allocator, and we use KSEG/XKPHYS to
    338 	 * map those pages.)
    339 	 */
    340 
    341 	format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free));
    342 	printf("avail memory = %s\n", pbuf);
    343 
    344 #if defined(__mips_n32)
    345 	module_machine = "mips-n32";
    346 #endif
    347 }
    348 
    349 void
    350 cpu_getmcontext(struct lwp *l, mcontext_t *mcp, unsigned int *flags)
    351 {
    352 	const struct trapframe *tf = l->l_md.md_utf;
    353 	__greg_t *gr = mcp->__gregs;
    354 	__greg_t ras_pc;
    355 
    356 	/* Save register context. Dont copy R0 - it is always 0 */
    357 	memcpy(&gr[_REG_AT], &tf->tf_regs[_R_AST], sizeof(mips_reg_t) * 31);
    358 
    359 	gr[_REG_MDLO]  = tf->tf_regs[_R_MULLO];
    360 	gr[_REG_MDHI]  = tf->tf_regs[_R_MULHI];
    361 	gr[_REG_CAUSE] = tf->tf_regs[_R_CAUSE];
    362 	gr[_REG_EPC]   = tf->tf_regs[_R_PC];
    363 	gr[_REG_SR]    = tf->tf_regs[_R_SR];
    364 	mcp->_mc_tlsbase = (intptr_t)l->l_private;
    365 
    366 	if ((ras_pc = (intptr_t)ras_lookup(l->l_proc,
    367 	    (void *) (intptr_t)gr[_REG_EPC])) != -1)
    368 		gr[_REG_EPC] = ras_pc;
    369 
    370 	*flags |= _UC_CPU | _UC_TLSBASE;
    371 
    372 	/* Save floating point register context, if any. */
    373 	KASSERT(l == curlwp);
    374 	if (fpu_used_p(l)) {
    375 		size_t fplen;
    376 		/*
    377 		 * If this process is the current FP owner, dump its
    378 		 * context to the PCB first.
    379 		 */
    380 		fpu_save(l);
    381 
    382 		/*
    383 		 * The PCB FP regs struct includes the FP CSR, so use the
    384 		 * size of __fpregs.__fp_r when copying.
    385 		 */
    386 #if !defined(__mips_o32)
    387 		if (_MIPS_SIM_NEWABI_P(l->l_proc->p_md.md_abi)) {
    388 #endif
    389 			fplen = sizeof(struct fpreg);
    390 #if !defined(__mips_o32)
    391 		} else {
    392 			fplen = sizeof(struct fpreg_oabi);
    393 		}
    394 #endif
    395 		struct pcb * const pcb = lwp_getpcb(l);
    396 		memcpy(&mcp->__fpregs, &pcb->pcb_fpregs, fplen);
    397 		*flags |= _UC_FPU;
    398 	}
    399 }
    400 
    401 int
    402 cpu_mcontext_validate(struct lwp *l, const mcontext_t *mcp)
    403 {
    404 	/* XXX:  Do we validate the addresses?? */
    405 	return 0;
    406 }
    407 
    408 int
    409 cpu_setmcontext(struct lwp *l, const mcontext_t *mcp, unsigned int flags)
    410 {
    411 	struct trapframe *tf = l->l_md.md_utf;
    412 	struct proc *p = l->l_proc;
    413 	const __greg_t *gr = mcp->__gregs;
    414 	int error;
    415 
    416 	/* Restore register context, if any. */
    417 	if (flags & _UC_CPU) {
    418 		error = cpu_mcontext_validate(l, mcp);
    419 		if (error)
    420 			return error;
    421 
    422 		/* Save register context. */
    423 
    424 #ifdef __mips_n32
    425 		CTASSERT(_R_AST == _REG_AT);
    426 		if (__predict_false(p->p_md.md_abi == _MIPS_BSD_API_O32)) {
    427 			const mcontext_o32_t *mcp32 = (const mcontext_o32_t *)mcp;
    428 			const __greg32_t *gr32 = mcp32->__gregs;
    429 			for (size_t i = _R_AST; i < 32; i++) {
    430 				tf->tf_regs[i] = gr32[i];
    431 			}
    432 		} else
    433 #endif
    434 		memcpy(&tf->tf_regs[_R_AST], &gr[_REG_AT],
    435 		       sizeof(mips_reg_t) * 31);
    436 
    437 		tf->tf_regs[_R_MULLO] = gr[_REG_MDLO];
    438 		tf->tf_regs[_R_MULHI] = gr[_REG_MDHI];
    439 		tf->tf_regs[_R_CAUSE] = gr[_REG_CAUSE];
    440 		tf->tf_regs[_R_PC]    = gr[_REG_EPC];
    441 		/* Do not restore SR. */
    442 	}
    443 
    444 	/* Restore the private thread context */
    445 	if (flags & _UC_TLSBASE) {
    446 		lwp_setprivate(l, (void *)(intptr_t)mcp->_mc_tlsbase);
    447 	}
    448 
    449 	/* Restore floating point register context, if any. */
    450 	if (flags & _UC_FPU) {
    451 		size_t fplen;
    452 
    453 		/* Disable the FPU contents. */
    454 		fpu_discard(l);
    455 
    456 #if !defined(__mips_o32)
    457 		if (_MIPS_SIM_NEWABI_P(l->l_proc->p_md.md_abi)) {
    458 #endif
    459 			fplen = sizeof(struct fpreg);
    460 #if !defined(__mips_o32)
    461 		} else {
    462 			fplen = sizeof(struct fpreg_oabi);
    463 		}
    464 #endif
    465 		/*
    466 		 * The PCB FP regs struct includes the FP CSR, so use the
    467 		 * proper size of fpreg when copying.
    468 		 */
    469 		struct pcb * const pcb = lwp_getpcb(l);
    470 		memcpy(&pcb->pcb_fpregs, &mcp->__fpregs, fplen);
    471 	}
    472 
    473 	mutex_enter(p->p_lock);
    474 	if (flags & _UC_SETSTACK)
    475 		l->l_sigstk.ss_flags |= SS_ONSTACK;
    476 	if (flags & _UC_CLRSTACK)
    477 		l->l_sigstk.ss_flags &= ~SS_ONSTACK;
    478 	mutex_exit(p->p_lock);
    479 
    480 	return (0);
    481 }
    482 
    483 void
    484 cpu_need_resched(struct cpu_info *ci, int flags)
    485 {
    486 	struct lwp * const l = ci->ci_data.cpu_onproc;
    487 #ifdef MULTIPROCESSOR
    488 	struct cpu_info * const cur_ci = curcpu();
    489 #endif
    490 
    491 	KASSERT(kpreempt_disabled());
    492 
    493 	ci->ci_want_resched |= flags;
    494 
    495 	if (__predict_false((l->l_pflag & LP_INTR) != 0)) {
    496 		/*
    497 		 * No point doing anything, it will switch soon.
    498 		 * Also here to prevent an assertion failure in
    499 		 * kpreempt() due to preemption being set on a
    500 		 * soft interrupt LWP.
    501 		 */
    502 		return;
    503 	}
    504 
    505 	if (__predict_false(l == ci->ci_data.cpu_idlelwp)) {
    506 #ifdef MULTIPROCESSOR
    507 		/*
    508 		 * If the other CPU is idling, it must be waiting for an
    509 		 * interrupt.  So give it one.
    510 		 */
    511 		if (__predict_false(ci != cur_ci))
    512 			cpu_send_ipi(ci, IPI_NOP);
    513 #endif
    514 		return;
    515 	}
    516 
    517 #ifdef MULTIPROCESSOR
    518 	atomic_or_uint(&ci->ci_want_resched, flags);
    519 #else
    520 	ci->ci_want_resched |= flags;
    521 #endif
    522 
    523 	if (flags & RESCHED_KPREEMPT) {
    524 #ifdef __HAVE_PREEMPTION
    525 		atomic_or_uint(&l->l_dopreempt, DOPREEMPT_ACTIVE);
    526 		if (ci == cur_ci) {
    527 			softint_trigger(SOFTINT_KPREEMPT);
    528 		} else {
    529 			cpu_send_ipi(ci, IPI_KPREEMPT);
    530 		}
    531 #endif
    532 		return;
    533 	}
    534 	l->l_md.md_astpending = 1;		/* force call to ast() */
    535 #ifdef MULTIPROCESSOR
    536 	if (ci != cur_ci && (flags & RESCHED_IMMED)) {
    537 		cpu_send_ipi(ci, IPI_AST);
    538 	}
    539 #endif
    540 }
    541 
    542 uint32_t
    543 cpu_clkf_usermode_mask(void)
    544 {
    545 	return CPUISMIPS3 ? MIPS_SR_KSU_USER : MIPS_SR_KU_PREV;
    546 }
    547 
    548 void
    549 cpu_signotify(struct lwp *l)
    550 {
    551 	KASSERT(kpreempt_disabled());
    552 #ifdef __HAVE_FAST_SOFTINTS
    553 	KASSERT(lwp_locked(l, NULL));
    554 #endif
    555 	KASSERT(l->l_stat == LSONPROC || l->l_stat == LSRUN || l->l_stat == LSSTOP);
    556 
    557 	l->l_md.md_astpending = 1; 		/* force call to ast() */
    558 }
    559 
    560 void
    561 cpu_need_proftick(struct lwp *l)
    562 {
    563 	KASSERT(kpreempt_disabled());
    564 	KASSERT(l->l_cpu == curcpu());
    565 
    566 	l->l_pflag |= LP_OWEUPC;
    567 	l->l_md.md_astpending = 1;		/* force call to ast() */
    568 }
    569 
    570 void
    571 cpu_set_curpri(int pri)
    572 {
    573 	kpreempt_disable();
    574 	curcpu()->ci_schedstate.spc_curpriority = pri;
    575 	kpreempt_enable();
    576 }
    577 
    578 
    579 #ifdef __HAVE_PREEMPTION
    580 bool
    581 cpu_kpreempt_enter(uintptr_t where, int s)
    582 {
    583 	KASSERT(kpreempt_disabled());
    584 
    585 #if 0
    586 	if (where == (intptr_t)-2) {
    587 		KASSERT(curcpu()->ci_mtx_count == 0);
    588 		/*
    589 		 * We must be called via kern_intr (which already checks for
    590 		 * IPL_NONE so of course we call be preempted).
    591 		 */
    592 		return true;
    593 	}
    594 	/*
    595 	 * We are called from KPREEMPT_ENABLE().  If we are at IPL_NONE,
    596 	 * of course we can be preempted.  If we aren't, ask for a
    597 	 * softint so that kern_intr can call kpreempt.
    598 	 */
    599 	if (s == IPL_NONE) {
    600 		KASSERT(curcpu()->ci_mtx_count == 0);
    601 		return true;
    602 	}
    603 	softint_trigger(SOFTINT_KPREEMPT);
    604 #endif
    605 	return false;
    606 }
    607 
    608 void
    609 cpu_kpreempt_exit(uintptr_t where)
    610 {
    611 
    612 	/* do nothing */
    613 }
    614 
    615 /*
    616  * Return true if preemption is disabled for MD reasons.  Must be called
    617  * with preemption disabled, and thus is only for diagnostic checks.
    618  */
    619 bool
    620 cpu_kpreempt_disabled(void)
    621 {
    622 	/*
    623 	 * Any elevated IPL disables preemption.
    624 	 */
    625 	return curcpu()->ci_cpl > IPL_NONE;
    626 }
    627 #endif /* __HAVE_PREEMPTION */
    628 
    629 void
    630 cpu_idle(void)
    631 {
    632 	void (*const mach_idle)(void) = mips_locoresw.lsw_cpu_idle;
    633 	struct cpu_info * const ci = curcpu();
    634 
    635 	while (!ci->ci_want_resched) {
    636 #ifdef __HAVE_FAST_SOFTINTS
    637 		KASSERT(ci->ci_data.cpu_softints == 0);
    638 #endif
    639 		(*mach_idle)();
    640 	}
    641 }
    642 
    643 bool
    644 cpu_intr_p(void)
    645 {
    646 	bool rv;
    647 	kpreempt_disable();
    648 	rv = (curcpu()->ci_idepth != 0);
    649 	kpreempt_enable();
    650 	return rv;
    651 }
    652 
    653 #ifdef MULTIPROCESSOR
    654 
    655 void
    656 cpu_broadcast_ipi(int tag)
    657 {
    658 	// No reason to remove ourselves since multicast_ipi will do that for us
    659 	cpu_multicast_ipi(cpus_running, tag);
    660 }
    661 
    662 void
    663 cpu_multicast_ipi(const kcpuset_t *kcp, int tag)
    664 {
    665 	struct cpu_info * const ci = curcpu();
    666 	kcpuset_t *kcp2 = ci->ci_multicastcpus;
    667 
    668 	if (kcpuset_match(cpus_running, ci->ci_data.cpu_kcpuset))
    669 		return;
    670 
    671 	kcpuset_copy(kcp2, kcp);
    672 	kcpuset_remove(kcp2, ci->ci_data.cpu_kcpuset);
    673 	for (cpuid_t cii; (cii = kcpuset_ffs(kcp2)) != 0; ) {
    674 		kcpuset_clear(kcp2, --cii);
    675 		(void)cpu_send_ipi(cpu_lookup(cii), tag);
    676 	}
    677 }
    678 
    679 int
    680 cpu_send_ipi(struct cpu_info *ci, int tag)
    681 {
    682 
    683 	return (*mips_locoresw.lsw_send_ipi)(ci, tag);
    684 }
    685 
    686 static void
    687 cpu_ipi_wait(const char *s, const kcpuset_t *watchset, const kcpuset_t *wanted)
    688 {
    689 	bool done = false;
    690 	struct cpu_info * const ci = curcpu();
    691 	kcpuset_t *kcp = ci->ci_watchcpus;
    692 
    693 	/* some finite amount of time */
    694 
    695 	for (u_long limit = curcpu()->ci_cpu_freq/10; !done && limit--; ) {
    696 		kcpuset_copy(kcp, watchset);
    697 		kcpuset_intersect(kcp, wanted);
    698 		done = kcpuset_match(kcp, wanted);
    699 	}
    700 
    701 	if (!done) {
    702 		cpuid_t cii;
    703 		kcpuset_copy(kcp, wanted);
    704 		kcpuset_remove(kcp, watchset);
    705 		if ((cii = kcpuset_ffs(kcp)) != 0) {
    706 			printf("Failed to %s:", s);
    707 			do {
    708 				kcpuset_clear(kcp, --cii);
    709 				printf(" cpu%lu", cii);
    710 			} while ((cii = kcpuset_ffs(kcp)) != 0);
    711 			printf("\n");
    712 		}
    713 	}
    714 }
    715 
    716 /*
    717  * Halt this cpu
    718  */
    719 void
    720 cpu_halt(void)
    721 {
    722 	cpuid_t cii = cpu_index(curcpu());
    723 
    724 	printf("cpu%lu: shutting down\n", cii);
    725 	kcpuset_atomic_set(cpus_halted, cii);
    726 	spl0();		/* allow interrupts e.g. further ipi ? */
    727 	for (;;) ;	/* spin */
    728 
    729 	/* NOTREACHED */
    730 }
    731 
    732 /*
    733  * Halt all running cpus, excluding current cpu.
    734  */
    735 void
    736 cpu_halt_others(void)
    737 {
    738 	kcpuset_t *kcp;
    739 
    740 	// If we are the only CPU running, there's nothing to do.
    741 	if (kcpuset_match(cpus_running, curcpu()->ci_data.cpu_kcpuset))
    742 		return;
    743 
    744 	// Get all running CPUs
    745 	kcpuset_clone(&kcp, cpus_running);
    746 	// Remove ourself
    747 	kcpuset_remove(kcp, curcpu()->ci_data.cpu_kcpuset);
    748 	// Remove any halted CPUs
    749 	kcpuset_remove(kcp, cpus_halted);
    750 	// If there are CPUs left, send the IPIs
    751 	if (!kcpuset_iszero(kcp)) {
    752 		cpu_multicast_ipi(kcp, IPI_HALT);
    753 		cpu_ipi_wait("halt", cpus_halted, kcp);
    754 	}
    755 	kcpuset_destroy(kcp);
    756 
    757 	/*
    758 	 * TBD
    759 	 * Depending on available firmware methods, other cpus will
    760 	 * either shut down themselves, or spin and wait for us to
    761 	 * stop them.
    762 	 */
    763 }
    764 
    765 /*
    766  * Pause this cpu
    767  */
    768 void
    769 cpu_pause(struct reg *regsp)
    770 {
    771 	int s = splhigh();
    772 	cpuid_t cii = cpu_index(curcpu());
    773 
    774 	if (__predict_false(cold))
    775 		return;
    776 
    777 	do {
    778 		kcpuset_atomic_set(cpus_paused, cii);
    779 		do {
    780 			;
    781 		} while (kcpuset_isset(cpus_paused, cii));
    782 		kcpuset_atomic_set(cpus_resumed, cii);
    783 #if defined(DDB)
    784 		if (ddb_running_on_this_cpu_p())
    785 			cpu_Debugger();
    786 		if (ddb_running_on_any_cpu_p())
    787 			continue;
    788 #endif
    789 	} while (false);
    790 
    791 	splx(s);
    792 }
    793 
    794 /*
    795  * Pause all running cpus, excluding current cpu.
    796  */
    797 void
    798 cpu_pause_others(void)
    799 {
    800 	struct cpu_info * const ci = curcpu();
    801 	if (cold || kcpuset_match(cpus_running, ci->ci_data.cpu_kcpuset))
    802 		return;
    803 
    804 	kcpuset_t *kcp = ci->ci_ddbcpus;
    805 
    806 	kcpuset_copy(kcp, cpus_running);
    807 	kcpuset_remove(kcp, ci->ci_data.cpu_kcpuset);
    808 	kcpuset_remove(kcp, cpus_paused);
    809 
    810 	cpu_broadcast_ipi(IPI_SUSPEND);
    811 	cpu_ipi_wait("pause", cpus_paused, kcp);
    812 }
    813 
    814 /*
    815  * Resume a single cpu
    816  */
    817 void
    818 cpu_resume(cpuid_t cii)
    819 {
    820 	if (__predict_false(cold))
    821 		return;
    822 
    823 	struct cpu_info * const ci = curcpu();
    824 	kcpuset_t *kcp = ci->ci_ddbcpus;
    825 
    826 	kcpuset_set(kcp, cii);
    827 	kcpuset_atomicly_remove(cpus_resumed, cpus_resumed);
    828 	kcpuset_atomic_clear(cpus_paused, cii);
    829 
    830 	cpu_ipi_wait("resume", cpus_resumed, kcp);
    831 }
    832 
    833 /*
    834  * Resume all paused cpus.
    835  */
    836 void
    837 cpu_resume_others(void)
    838 {
    839 	if (__predict_false(cold))
    840 		return;
    841 
    842 	struct cpu_info * const ci = curcpu();
    843 	kcpuset_t *kcp = ci->ci_ddbcpus;
    844 
    845 	kcpuset_atomicly_remove(cpus_resumed, cpus_resumed);
    846 	kcpuset_copy(kcp, cpus_paused);
    847 	kcpuset_atomicly_remove(cpus_paused, cpus_paused);
    848 
    849 	/* CPUs awake on cpus_paused clear */
    850 	cpu_ipi_wait("resume", cpus_resumed, kcp);
    851 }
    852 
    853 bool
    854 cpu_is_paused(cpuid_t cii)
    855 {
    856 
    857 	return !cold && kcpuset_isset(cpus_paused, cii);
    858 }
    859 
    860 #ifdef DDB
    861 void
    862 cpu_debug_dump(void)
    863 {
    864 	CPU_INFO_ITERATOR cii;
    865 	struct cpu_info *ci;
    866 	char running, hatched, paused, resumed, halted;
    867 
    868 	db_printf("CPU CPUID STATE CPUINFO            CPL INT MTX IPIS\n");
    869 	for (CPU_INFO_FOREACH(cii, ci)) {
    870 		hatched = (kcpuset_isset(cpus_hatched, cpu_index(ci)) ? 'H' : '-');
    871 		running = (kcpuset_isset(cpus_running, cpu_index(ci)) ? 'R' : '-');
    872 		paused  = (kcpuset_isset(cpus_paused,  cpu_index(ci)) ? 'P' : '-');
    873 		resumed = (kcpuset_isset(cpus_resumed, cpu_index(ci)) ? 'r' : '-');
    874 		halted  = (kcpuset_isset(cpus_halted,  cpu_index(ci)) ? 'h' : '-');
    875 		db_printf("%3d 0x%03lx %c%c%c%c%c %p "
    876 			"%3d %3d %3d "
    877 			"0x%02" PRIx64 "/0x%02" PRIx64 "\n",
    878 			cpu_index(ci), ci->ci_cpuid,
    879 			running, hatched, paused, resumed, halted,
    880 			ci, ci->ci_cpl, ci->ci_idepth, ci->ci_mtx_count,
    881 			ci->ci_active_ipis, ci->ci_request_ipis);
    882 	}
    883 }
    884 #endif
    885 
    886 void
    887 cpu_hatch(struct cpu_info *ci)
    888 {
    889 	struct pmap_tlb_info * const ti = ci->ci_tlb_info;
    890 
    891 	/*
    892 	 * Invalidate all the TLB enties (even wired ones) and then reserve
    893 	 * space for the wired TLB entries.
    894 	 */
    895 	mips3_cp0_wired_write(0);
    896 	tlb_invalidate_all();
    897 	mips3_cp0_wired_write(ti->ti_wired);
    898 
    899 	/*
    900 	 * Setup HWRENA and USERLOCAL COP0 registers (MIPSxxR2).
    901 	 */
    902 	cpu_hwrena_setup();
    903 
    904 	/*
    905 	 * If we are using register zero relative addressing to access cpu_info
    906 	 * in the exception vectors, enter that mapping into TLB now.
    907 	 */
    908 	if (ci->ci_tlb_slot >= 0) {
    909 		const uint32_t tlb_lo = MIPS3_PG_G|MIPS3_PG_V
    910 		    | mips3_paddr_to_tlbpfn((vaddr_t)ci);
    911 		const struct tlbmask tlbmask = {
    912 			.tlb_hi = -PAGE_SIZE | KERNEL_PID,
    913 #if (PGSHIFT & 1)
    914 			.tlb_lo0 = tlb_lo,
    915 			.tlb_lo1 = tlb_lo + MIPS3_PG_NEXT,
    916 #else
    917 			.tlb_lo0 = 0,
    918 			.tlb_lo1 = tlb_lo,
    919 #endif
    920 			.tlb_mask = -1,
    921 		};
    922 
    923 		tlb_invalidate_addr(tlbmask.tlb_hi, KERNEL_PID);
    924 		tlb_write_entry(ci->ci_tlb_slot, &tlbmask);
    925 	}
    926 
    927 	/*
    928 	 * Flush the icache just be sure.
    929 	 */
    930 	mips_icache_sync_all();
    931 
    932 	/*
    933 	 * Let this CPU do its own initialization (for things that have to be
    934 	 * done on the local CPU).
    935 	 */
    936 	(*mips_locoresw.lsw_cpu_init)(ci);
    937 
    938 	// Show this CPU as present.
    939 	atomic_or_ulong(&ci->ci_flags, CPUF_PRESENT);
    940 
    941 	/*
    942 	 * Announce we are hatched
    943 	 */
    944 	kcpuset_atomic_set(cpus_hatched, cpu_index(ci));
    945 
    946 	/*
    947 	 * Now wait to be set free!
    948 	 */
    949 	while (! kcpuset_isset(cpus_running, cpu_index(ci))) {
    950 		/* spin, spin, spin */
    951 	}
    952 
    953 	/*
    954 	 * initialize the MIPS count/compare clock
    955 	 */
    956 	mips3_cp0_count_write(ci->ci_data.cpu_cc_skew);
    957 	KASSERT(ci->ci_cycles_per_hz != 0);
    958 	ci->ci_next_cp0_clk_intr = ci->ci_data.cpu_cc_skew + ci->ci_cycles_per_hz;
    959 	mips3_cp0_compare_write(ci->ci_next_cp0_clk_intr);
    960 	ci->ci_data.cpu_cc_skew = 0;
    961 
    962 	/*
    963 	 * Let this CPU do its own post-running initialization
    964 	 * (for things that have to be done on the local CPU).
    965 	 */
    966 	(*mips_locoresw.lsw_cpu_run)(ci);
    967 
    968 	/*
    969 	 * Now turn on interrupts (and verify they are on).
    970 	 */
    971 	spl0();
    972 	KASSERTMSG(ci->ci_cpl == IPL_NONE, "cpl %d", ci->ci_cpl);
    973 	KASSERT(mips_cp0_status_read() & MIPS_SR_INT_IE);
    974 
    975 	kcpuset_atomic_set(pmap_kernel()->pm_onproc, cpu_index(ci));
    976 	kcpuset_atomic_set(pmap_kernel()->pm_active, cpu_index(ci));
    977 
    978 	/*
    979 	 * And do a tail call to idle_loop
    980 	 */
    981 	idle_loop(NULL);
    982 }
    983 
    984 void
    985 cpu_boot_secondary_processors(void)
    986 {
    987 	CPU_INFO_ITERATOR cii;
    988 	struct cpu_info *ci;
    989 	for (CPU_INFO_FOREACH(cii, ci)) {
    990 		if (CPU_IS_PRIMARY(ci))
    991 			continue;
    992 		KASSERT(ci->ci_data.cpu_idlelwp);
    993 
    994 		/*
    995 		 * Skip this CPU if it didn't sucessfully hatch.
    996 		 */
    997 		if (!kcpuset_isset(cpus_hatched, cpu_index(ci)))
    998 			continue;
    999 
   1000 		ci->ci_data.cpu_cc_skew = mips3_cp0_count_read();
   1001 		atomic_or_ulong(&ci->ci_flags, CPUF_RUNNING);
   1002 		kcpuset_set(cpus_running, cpu_index(ci));
   1003 		// Spin until the cpu calls idle_loop
   1004 		for (u_int i = 0; i < 100; i++) {
   1005 			if (kcpuset_isset(cpus_running, cpu_index(ci)))
   1006 				break;
   1007 			delay(1000);
   1008 		}
   1009 	}
   1010 }
   1011 
   1012 void
   1013 xc_send_ipi(struct cpu_info *ci)
   1014 {
   1015 
   1016 	(*mips_locoresw.lsw_send_ipi)(ci, IPI_XCALL);
   1017 }
   1018 
   1019 void
   1020 cpu_ipi(struct cpu_info *ci)
   1021 {
   1022 	(*mips_locoresw.lsw_send_ipi)(ci, IPI_GENERIC);
   1023 }
   1024 
   1025 #endif /* MULTIPROCESSOR */
   1026 
   1027 void
   1028 cpu_offline_md(void)
   1029 {
   1030 
   1031 	(*mips_locoresw.lsw_cpu_offline_md)();
   1032 }
   1033 
   1034 #ifdef _LP64
   1035 void
   1036 cpu_vmspace_exec(lwp_t *l, vaddr_t start, vaddr_t end)
   1037 {
   1038 	/*
   1039 	 * We need to turn on/off UX so that copyout/copyin will work
   1040 	 * well before setreg gets called.
   1041 	 */
   1042 	uint32_t sr = mips_cp0_status_read();
   1043 	if (end != (uint32_t) end) {
   1044 		mips_cp0_status_write(sr | MIPS3_SR_UX);
   1045 	} else {
   1046 		mips_cp0_status_write(sr & ~MIPS3_SR_UX);
   1047 	}
   1048 }
   1049 #endif
   1050 
   1051 int
   1052 cpu_lwp_setprivate(lwp_t *l, void *v)
   1053 {
   1054 #if (MIPS32R2 + MIPS64R2) > 0
   1055 	if (l == curlwp
   1056 	    && (mips_options.mips_cpu->cpu_cp0flags & MIPS_CP0FL_USERLOCAL)) {
   1057 		mipsNN_cp0_userlocal_write(v);
   1058 	}
   1059 #endif
   1060 	return 0;
   1061 }
   1062 
   1063 
   1064 #if (MIPS32 + MIPS32R2 + MIPS64 + MIPS64R2) > 0
   1065 
   1066 #if (CPUWATCH_MAX != 8)
   1067 # error CPUWATCH_MAX
   1068 #endif
   1069 
   1070 /*
   1071  * cpuwatch_discover - determine how many COP0 watchpoints this CPU supports
   1072  */
   1073 u_int
   1074 cpuwatch_discover(void)
   1075 {
   1076 	int i;
   1077 
   1078 	for (i=0; i < CPUWATCH_MAX; i++) {
   1079 		uint32_t watchhi = mipsNN_cp0_watchhi_read(i);
   1080 		if ((watchhi & __BIT(31)) == 0)	/* test 'M' bit */
   1081 			break;
   1082 	}
   1083 	return i + 1;
   1084 }
   1085 
   1086 void
   1087 cpuwatch_free(cpu_watchpoint_t *cwp)
   1088 {
   1089 #ifdef DIAGNOSTIC
   1090 	struct cpu_info * const ci = curcpu();
   1091 	KASSERT(cwp >= &ci->ci_cpuwatch_tab[0] &&
   1092 		cwp <= &ci->ci_cpuwatch_tab[ci->ci_cpuwatch_count-1]);
   1093 #endif
   1094 	cwp->cw_mode = 0;
   1095 	cwp->cw_asid = 0;
   1096 	cwp->cw_addr = 0;
   1097 	cpuwatch_clr(cwp);
   1098 }
   1099 
   1100 /*
   1101  * cpuwatch_alloc
   1102  * 	find an empty slot
   1103  *	no locking for the table since it is CPU private
   1104  */
   1105 cpu_watchpoint_t *
   1106 cpuwatch_alloc(void)
   1107 {
   1108 	struct cpu_info * const ci = curcpu();
   1109 	cpu_watchpoint_t *cwp;
   1110 
   1111 	for (int i=0; i < ci->ci_cpuwatch_count; i++) {
   1112 		cwp = &ci->ci_cpuwatch_tab[i];
   1113 		if ((cwp->cw_mode & CPUWATCH_RWX) == 0)
   1114 			return cwp;
   1115 	}
   1116 	return NULL;
   1117 }
   1118 
   1119 
   1120 void
   1121 cpuwatch_set_all(void)
   1122 {
   1123 	struct cpu_info * const ci = curcpu();
   1124 	cpu_watchpoint_t *cwp;
   1125 	int i;
   1126 
   1127 	for (i=0; i < ci->ci_cpuwatch_count; i++) {
   1128 		cwp = &ci->ci_cpuwatch_tab[i];
   1129 		if ((cwp->cw_mode & CPUWATCH_RWX) != 0)
   1130 			cpuwatch_set(cwp);
   1131 	}
   1132 }
   1133 
   1134 void
   1135 cpuwatch_clr_all(void)
   1136 {
   1137 	struct cpu_info * const ci = curcpu();
   1138 	cpu_watchpoint_t *cwp;
   1139 	int i;
   1140 
   1141 	for (i=0; i < ci->ci_cpuwatch_count; i++) {
   1142 		cwp = &ci->ci_cpuwatch_tab[i];
   1143 		if ((cwp->cw_mode & CPUWATCH_RWX) != 0)
   1144 			cpuwatch_clr(cwp);
   1145 	}
   1146 }
   1147 
   1148 /*
   1149  * cpuwatch_set - establish a MIPS COP0 watchpoint
   1150  */
   1151 void
   1152 cpuwatch_set(cpu_watchpoint_t *cwp)
   1153 {
   1154 	struct cpu_info * const ci = curcpu();
   1155 	uint32_t watchhi;
   1156 	register_t watchlo;
   1157 	int cwnum = cwp - &ci->ci_cpuwatch_tab[0];
   1158 
   1159 	KASSERT(cwp >= &ci->ci_cpuwatch_tab[0] &&
   1160 		cwp <= &ci->ci_cpuwatch_tab[ci->ci_cpuwatch_count-1]);
   1161 
   1162 	watchlo = cwp->cw_addr;
   1163 	if (cwp->cw_mode & CPUWATCH_WRITE)
   1164 		watchlo |= __BIT(0);
   1165 	if (cwp->cw_mode & CPUWATCH_READ)
   1166 		watchlo |= __BIT(1);
   1167 	if (cwp->cw_mode & CPUWATCH_EXEC)
   1168 		watchlo |= __BIT(2);
   1169 
   1170 	if (cwp->cw_mode & CPUWATCH_ASID)
   1171 		watchhi = cwp->cw_asid << 16;	/* addr qualified by asid */
   1172 	else
   1173 		watchhi = __BIT(30);		/* addr not qual. by asid (Global) */
   1174 	if (cwp->cw_mode & CPUWATCH_MASK)
   1175 		watchhi |= cwp->cw_mask;	/* set "dont care" addr match bits */
   1176 
   1177 	mipsNN_cp0_watchhi_write(cwnum, watchhi);
   1178 	mipsNN_cp0_watchlo_write(cwnum, watchlo);
   1179 }
   1180 
   1181 /*
   1182  * cpuwatch_clr - disestablish a MIPS COP0 watchpoint
   1183  */
   1184 void
   1185 cpuwatch_clr(cpu_watchpoint_t *cwp)
   1186 {
   1187 	struct cpu_info * const ci = curcpu();
   1188 	int cwnum = cwp - &ci->ci_cpuwatch_tab[0];
   1189 
   1190 	KASSERT(cwp >= &ci->ci_cpuwatch_tab[0] &&
   1191 		cwp <= &ci->ci_cpuwatch_tab[ci->ci_cpuwatch_count-1]);
   1192 
   1193 	mipsNN_cp0_watchhi_write(cwnum, 0);
   1194 	mipsNN_cp0_watchlo_write(cwnum, 0);
   1195 }
   1196 
   1197 #endif	/* (MIPS32 + MIPS32R2 + MIPS64 + MIPS64R2) > 0 */
   1198