rmixl_cpu.c revision 1.3
11.3Scliff/*	$NetBSD: rmixl_cpu.c,v 1.3 2011/04/14 05:12:58 cliff Exp $	*/
21.2Smatt
31.2Smatt/*
41.2Smatt * Copyright 2002 Wasabi Systems, Inc.
51.2Smatt * All rights reserved.
61.2Smatt *
71.2Smatt * Written by Simon Burge for Wasabi Systems, Inc.
81.2Smatt *
91.2Smatt * Redistribution and use in source and binary forms, with or without
101.2Smatt * modification, are permitted provided that the following conditions
111.2Smatt * are met:
121.2Smatt * 1. Redistributions of source code must retain the above copyright
131.2Smatt *    notice, this list of conditions and the following disclaimer.
141.2Smatt * 2. Redistributions in binary form must reproduce the above copyright
151.2Smatt *    notice, this list of conditions and the following disclaimer in the
161.2Smatt *    documentation and/or other materials provided with the distribution.
171.2Smatt * 3. All advertising materials mentioning features or use of this software
181.2Smatt *    must display the following acknowledgement:
191.2Smatt *      This product includes software developed for the NetBSD Project by
201.2Smatt *      Wasabi Systems, Inc.
211.2Smatt * 4. The name of Wasabi Systems, Inc. may not be used to endorse
221.2Smatt *    or promote products derived from this software without specific prior
231.2Smatt *    written permission.
241.2Smatt *
251.2Smatt * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
261.2Smatt * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
271.2Smatt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
281.2Smatt * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
291.2Smatt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
301.2Smatt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
311.2Smatt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
321.2Smatt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
331.2Smatt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
341.2Smatt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
351.2Smatt * POSSIBILITY OF SUCH DAMAGE.
361.2Smatt */
371.2Smatt
381.2Smatt#include "locators.h"
391.2Smatt
401.2Smatt#include <sys/cdefs.h>
411.3Scliff__KERNEL_RCSID(0, "$NetBSD: rmixl_cpu.c,v 1.3 2011/04/14 05:12:58 cliff Exp $");
421.2Smatt
431.2Smatt#include "opt_multiprocessor.h"
441.2Smatt#include "opt_ddb.h"
451.2Smatt
461.2Smatt#include "opt_multiprocessor.h"
471.2Smatt
481.2Smatt#include <sys/param.h>
491.2Smatt#include <sys/device.h>
501.2Smatt#include <sys/systm.h>
511.2Smatt#include <sys/cpu.h>
521.2Smatt#include <sys/lock.h>
531.2Smatt#include <sys/lwp.h>
541.2Smatt#include <sys/cpu.h>
551.2Smatt#include <sys/malloc.h>
561.2Smatt#include <uvm/uvm_pglist.h>
571.2Smatt#include <uvm/uvm_extern.h>
581.2Smatt#include <mips/regnum.h>
591.2Smatt#include <mips/asm.h>
601.2Smatt#include <mips/pmap.h>
611.2Smatt#include <mips/rmi/rmixlreg.h>
621.2Smatt#include <mips/rmi/rmixlvar.h>
631.2Smatt#include <mips/rmi/rmixl_cpucorevar.h>
641.2Smatt#include <mips/rmi/rmixl_cpuvar.h>
651.2Smatt#include <mips/rmi/rmixl_intr.h>
661.2Smatt#include <mips/rmi/rmixl_fmnvar.h>
671.2Smatt#ifdef DDB
681.2Smatt#include <mips/db_machdep.h>
691.2Smatt#endif
701.2Smatt
711.2Smatt
721.2Smattstatic int	cpu_rmixl_match(device_t, cfdata_t, void *);
731.2Smattstatic void	cpu_rmixl_attach(device_t, device_t, void *);
741.2Smattstatic void	cpu_rmixl_attach_primary(struct rmixl_cpu_softc * const);
751.2Smatt#ifdef NOTYET
761.2Smattstatic int	cpu_fmn_intr(void *, rmixl_fmn_rxmsg_t *);
771.2Smatt#endif
781.2Smatt
791.2Smatt#ifdef MULTIPROCESSOR
801.2Smattvoid		cpu_rmixl_hatch(struct cpu_info *);
811.3Scliffvoid		cpu_rmixl_run(struct cpu_info *);
821.2Smatt#if 0
831.2Smattstatic void	cpu_setup_trampoline_ipi(struct device *, struct cpu_info *);
841.2Smatt#endif
851.2Smattstatic int	cpu_setup_trampoline_common(struct cpu_info *, struct rmixl_cpu_trampoline_args *);
861.2Smattstatic void	cpu_setup_trampoline_callback(struct cpu_info *);
871.2Smatt#endif	/* MULTIPROCESSOR */
881.2Smatt
891.2Smatt#ifdef DEBUG
901.2Smattvoid		rmixl_cpu_data_print(struct cpu_data *);
911.2Smattstruct cpu_info *
921.2Smatt		rmixl_cpuinfo_print(u_int);
931.2Smatt#endif	/* DEBUG */
941.2Smatt
951.2SmattCFATTACH_DECL_NEW(cpu_rmixl, sizeof(struct rmixl_cpu_softc),
961.2Smatt	cpu_rmixl_match, cpu_rmixl_attach, NULL, NULL);
971.2Smatt
981.2Smatt#ifdef MULTIPROCESSOR
991.2Smattstatic struct rmixl_cpu_trampoline_args rmixl_cpu_trampoline_args;
1001.2Smatt#endif
1011.2Smatt
1021.2Smatt/*
1031.2Smatt * cpu_rmixl_db_watch_init - initialize COP0 watchpoint stuff
1041.2Smatt *
1051.2Smatt * clear IEU_DEFEATURE[DBE] to ensure T_WATCH on watchpoint exception
1061.2Smatt * set COP0 watchhi and watchlo
1071.3Scliff *
1081.3Scliff * disable all watchpoints
1091.2Smatt */
1101.2Smattstatic void
1111.2Smattcpu_rmixl_db_watch_init(void)
1121.2Smatt{
1131.3Scliff	uint32_t r;
1141.3Scliff
1151.3Scliff	r = rmixl_mfcr(RMIXL_PCR_IEU_DEFEATURE);
1161.3Scliff	r &= ~__BIT(7);		/* DBE */
1171.3Scliff	rmixl_mtcr(RMIXL_PCR_IEU_DEFEATURE, r);
1181.3Scliff
1191.3Scliff	cpuwatch_clr_all();
1201.2Smatt}
1211.2Smatt
1221.2Smatt/*
1231.2Smatt * cpu_xls616_erratum
1241.2Smatt *
1251.2Smatt * on the XLS616, COUNT/COMPARE clock regs seem to interact between
1261.2Smatt * threads on a core
1271.2Smatt *
1281.2Smatt * the symptom of the error is retarded clock interrupts
1291.2Smatt * and very slow apparent system performance
1301.2Smatt *
1311.2Smatt * other XLS chips may have the same problem.
1321.2Smatt * we may need to add other PID checks.
1331.2Smatt */
1341.2Smattstatic inline bool
1351.2Smattcpu_xls616_erratum(device_t parent, struct cpucore_attach_args *ca)
1361.2Smatt{
1371.2Smatt#if 0
1381.2Smatt	if (mips_options.mips_cpu->cpu_pid == MIPS_XLS616) {
1391.2Smatt		if (ca->ca_thread > 0) {
1401.2Smatt			aprint_error_dev(parent, "XLS616 CLOCK ERRATUM: "
1411.2Smatt				"deconfigure cpu%d\n", ca->ca_thread);
1421.2Smatt			return true;
1431.2Smatt		}
1441.2Smatt	}
1451.2Smatt#endif
1461.2Smatt	return false;
1471.2Smatt}
1481.2Smatt
1491.2Smattstatic bool
1501.2Smattcpu_rmixl_erratum(device_t parent, struct cpucore_attach_args *ca)
1511.2Smatt{
1521.2Smatt	return cpu_xls616_erratum(parent, ca);
1531.2Smatt}
1541.2Smatt
1551.2Smattstatic int
1561.2Smattcpu_rmixl_match(device_t parent, cfdata_t cf, void *aux)
1571.2Smatt{
1581.2Smatt	struct cpucore_attach_args *ca = aux;
1591.2Smatt	int thread = cf->cf_loc[CPUCORECF_THREAD];
1601.2Smatt
1611.2Smatt	if (!cpu_rmixl(mips_options.mips_cpu))
1621.2Smatt		return 0;
1631.2Smatt
1641.2Smatt	if (strncmp(ca->ca_name, cf->cf_name, strlen(cf->cf_name)) == 0
1651.2Smatt#ifndef MULTIPROCESSOR
1661.2Smatt	    && ca->ca_thread == 0
1671.2Smatt#endif
1681.2Smatt	    && (thread == CPUCORECF_THREAD_DEFAULT || thread == ca->ca_thread)
1691.2Smatt	    && (!cpu_rmixl_erratum(parent, ca)))
1701.2Smatt			return 1;
1711.2Smatt
1721.2Smatt	return 0;
1731.2Smatt}
1741.2Smatt
1751.2Smattstatic void
1761.2Smattcpu_rmixl_attach(device_t parent, device_t self, void *aux)
1771.2Smatt{
1781.2Smatt	struct rmixl_cpu_softc * const sc = device_private(self);
1791.2Smatt	struct cpu_info *ci = NULL;
1801.2Smatt	static bool once = false;
1811.2Smatt	extern void rmixl_spl_init_cpu(void);
1821.2Smatt
1831.2Smatt	if (once == false) {
1841.2Smatt		/* first attach is the primary cpu */
1851.2Smatt		once = true;
1861.2Smatt		ci = curcpu();
1871.2Smatt		sc->sc_dev = self;
1881.2Smatt		sc->sc_ci = ci;
1891.2Smatt		ci->ci_softc = (void *)sc;
1901.2Smatt
1911.2Smatt		rmixl_spl_init_cpu();	/* spl initialization for CPU#0 */
1921.2Smatt		cpu_rmixl_attach_primary(sc);
1931.2Smatt
1941.2Smatt#ifdef MULTIPROCESSOR
1951.2Smatt		mips_locoresw.lsw_cpu_init = cpu_rmixl_hatch;
1961.3Scliff		mips_locoresw.lsw_cpu_run = cpu_rmixl_run;
1971.2Smatt	} else {
1981.2Smatt		struct cpucore_attach_args *ca = aux;
1991.2Smatt		struct cpucore_softc * const ccsc = device_private(parent);
2001.2Smatt		rmixlfw_psb_type_t psb_type = rmixl_configuration.rc_psb_type;
2011.2Smatt		cpuid_t cpuid;
2021.2Smatt
2031.2Smatt		KASSERT(ca->ca_core < 8);
2041.2Smatt		KASSERT(ca->ca_thread < 4);
2051.2Smatt		cpuid = (ca->ca_core << 2) | ca->ca_thread;
2061.2Smatt		ci = cpu_info_alloc(ccsc->sc_tlbinfo, cpuid,
2071.2Smatt		    /* XXX */ 0, ca->ca_core, ca->ca_thread);
2081.2Smatt		KASSERT(ci != NULL);
2091.2Smatt		if (ccsc->sc_tlbinfo == NULL)
2101.2Smatt			ccsc->sc_tlbinfo = ci->ci_tlb_info;
2111.2Smatt		sc->sc_dev = self;
2121.2Smatt		sc->sc_ci = ci;
2131.2Smatt		ci->ci_softc = (void *)sc;
2141.2Smatt
2151.2Smatt		switch (psb_type) {
2161.2Smatt		case PSB_TYPE_RMI:
2171.2Smatt		case PSB_TYPE_DELL:
2181.2Smatt			cpu_setup_trampoline_callback(ci);
2191.2Smatt			break;
2201.2Smatt		default:
2211.2Smatt			aprint_error(": psb type=%s cpu_wakeup unsupported\n",
2221.2Smatt				rmixlfw_psb_type_name(psb_type));
2231.2Smatt			return;
2241.2Smatt		}
2251.2Smatt
2261.2Smatt		const u_long cpu_mask = 1L << cpu_index(ci);
2271.2Smatt		for (size_t i=0; i < 10000; i++) {
2281.2Smatt			if ((cpus_hatched & cpu_mask) != 0)
2291.2Smatt				 break;
2301.2Smatt			DELAY(100);
2311.2Smatt		}
2321.2Smatt		if ((cpus_hatched & cpu_mask) == 0) {
2331.2Smatt			aprint_error(": failed to hatch\n");
2341.2Smatt			return;
2351.2Smatt		}
2361.2Smatt#endif	/* MULTIPROCESSOR */
2371.2Smatt	}
2381.2Smatt
2391.2Smatt	/*
2401.2Smatt	 * do per-cpu interrupt initialization
2411.2Smatt	 */
2421.2Smatt	rmixl_intr_init_cpu(ci);
2431.2Smatt
2441.2Smatt	aprint_normal("\n");
2451.2Smatt
2461.2Smatt        cpu_attach_common(self, ci);
2471.2Smatt}
2481.2Smatt
2491.2Smatt/*
2501.2Smatt * attach the primary processor
2511.2Smatt */
2521.2Smattstatic void
2531.2Smattcpu_rmixl_attach_primary(struct rmixl_cpu_softc * const sc)
2541.2Smatt{
2551.2Smatt	struct cpu_info *ci = sc->sc_ci;
2561.2Smatt	uint32_t ebase;
2571.2Smatt
2581.2Smatt	KASSERT(CPU_IS_PRIMARY(ci));
2591.2Smatt
2601.2Smatt	/*
2611.2Smatt	 * obtain and set cpuid of the primary processor
2621.2Smatt	 */
2631.2Smatt	asm volatile("dmfc0 %0, $15, 1;" : "=r"(ebase));
2641.2Smatt	ci->ci_cpuid = ebase & __BITS(9,0);
2651.2Smatt
2661.2Smatt	cpu_rmixl_db_watch_init();
2671.2Smatt
2681.2Smatt	rmixl_fmn_init();
2691.2Smatt
2701.2Smatt	rmixl_intr_init_clk();
2711.2Smatt#ifdef MULTIPROCESSOR
2721.2Smatt	rmixl_intr_init_ipi();
2731.2Smatt#endif
2741.2Smatt
2751.2Smatt#ifdef NOTYET
2761.2Smatt	void *ih = rmixl_fmn_intr_establish(RMIXL_FMN_STID_CORE0,
2771.2Smatt		cpu_fmn_intr, ci);
2781.2Smatt	if (ih == NULL)
2791.2Smatt		panic("%s: rmixl_fmn_intr_establish failed",
2801.2Smatt			__func__);
2811.2Smatt	sc->sc_ih_fmn = ih;
2821.2Smatt#endif
2831.2Smatt
2841.2Smatt}
2851.2Smatt
2861.2Smatt#ifdef NOTYET
2871.2Smattstatic int
2881.2Smattcpu_fmn_intr(void *arg, rmixl_fmn_rxmsg_t *rxmsg)
2891.2Smatt{
2901.2Smatt	if (CPU_IS_PRIMARY(curcpu())) {
2911.2Smatt		printf("%s: cpu%ld: rxsid=%#x, code=%d, size=%d\n",
2921.2Smatt			__func__, cpu_number(),
2931.2Smatt			rxmsg->rxsid, rxmsg->code, rxmsg->size);
2941.2Smatt		for (int i=0; i < rxmsg->size; i++)
2951.2Smatt			printf("\t%#"PRIx64"\n", rxmsg->msg.data[i]);
2961.2Smatt	}
2971.2Smatt
2981.2Smatt	return 1;
2991.2Smatt}
3001.2Smatt#endif
3011.2Smatt
3021.2Smatt#ifdef MULTIPROCESSOR
3031.2Smatt/*
3041.3Scliff * cpu_rmixl_run
3051.3Scliff *
3061.3Scliff * - chip-specific post-running code called from cpu_hatch via lsw_cpu_run
3071.3Scliff */
3081.3Scliffvoid
3091.3Scliffcpu_rmixl_run(struct cpu_info *ci)
3101.3Scliff{
3111.3Scliff	struct rmixl_cpu_softc * const sc = (void *)ci->ci_softc;
3121.3Scliff	cpucore_rmixl_run(device_parent(sc->sc_dev));
3131.3Scliff}
3141.3Scliff
3151.3Scliff/*
3161.2Smatt * cpu_rmixl_hatch
3171.2Smatt *
3181.2Smatt * - chip-specific hatch code called from cpu_hatch via lsw_cpu_init
3191.2Smatt */
3201.2Smattvoid
3211.2Smattcpu_rmixl_hatch(struct cpu_info *ci)
3221.2Smatt{
3231.2Smatt	struct rmixl_cpu_softc * const sc = (void *)ci->ci_softc;
3241.2Smatt	extern void rmixl_spl_init_cpu(void);
3251.2Smatt
3261.2Smatt	rmixl_spl_init_cpu();	/* spl initialization for this CPU */
3271.2Smatt
3281.2Smatt	(void)splhigh();
3291.2Smatt
3301.2Smatt#ifdef DEBUG
3311.2Smatt	uint32_t ebase;
3321.2Smatt	asm volatile("dmfc0 %0, $15, 1;" : "=r"(ebase));
3331.2Smatt	KASSERT((ebase & __BITS(9,0)) == ci->ci_cpuid);
3341.2Smatt	KASSERT(curcpu() == ci);
3351.2Smatt#endif
3361.2Smatt
3371.2Smatt	cpucore_rmixl_hatch(device_parent(sc->sc_dev));
3381.2Smatt
3391.2Smatt	cpu_rmixl_db_watch_init();
3401.2Smatt}
3411.2Smatt
3421.2Smattstatic int
3431.2Smattcpu_setup_trampoline_common(struct cpu_info *ci, struct rmixl_cpu_trampoline_args *ta)
3441.2Smatt{
3451.2Smatt	struct lwp *l = ci->ci_data.cpu_idlelwp;
3461.2Smatt	uintptr_t stacktop;
3471.2Smatt
3481.2Smatt#ifdef DIAGNOSTIC
3491.2Smatt	/* Ensure our current stack can be used by the firmware */
3501.2Smatt	uint64_t sp;
3511.2Smatt	__asm__ volatile("move	%0, $sp\n" : "=r"(sp));
3521.2Smatt#ifdef _LP64
3531.2Smatt	/* can be made into a KSEG0 addr */
3541.2Smatt	KASSERT(MIPS_XKPHYS_P(sp));
3551.2Smatt	KASSERT((MIPS_XKPHYS_TO_PHYS(sp) >> 32) == 0);
3561.2Smatt#else
3571.2Smatt	/* is a KSEG0 addr */
3581.2Smatt	KASSERT(MIPS_KSEG0_P(sp));
3591.2Smatt#endif	/* _LP64 */
3601.2Smatt#endif	/* DIAGNOSTIC */
3611.2Smatt
3621.2Smatt#ifndef _LP64
3631.2Smatt	/*
3641.2Smatt	 * Ensure 'ci' is a KSEG0 address for trampoline args
3651.2Smatt	 * to avoid TLB fault in cpu_trampoline() when loading ci_idlelwp
3661.2Smatt	 */
3671.2Smatt	KASSERT(MIPS_KSEG0_P(ci));
3681.2Smatt#endif
3691.2Smatt
3701.2Smatt	/*
3711.2Smatt	 * Ensure 'ta' is a KSEG0 address for trampoline args
3721.2Smatt	 * to avoid TLB fault in trampoline when loading args.
3731.2Smatt	 *
3741.2Smatt	 * Note:
3751.2Smatt	 *   RMI firmware only passes the lower 32-bit half of 'ta'
3761.2Smatt	 *   to rmixl_cpu_trampoline (the upper half is clear)
3771.2Smatt	 *   so rmixl_cpu_trampoline must reconstruct the missing upper half
3781.3Scliff	 *   rmixl_cpu_trampoline "knows" 'ta' is a KSEG0 address
3791.3Scliff	 *   and sign-extends to make an LP64 KSEG0 address.
3801.2Smatt	 */
3811.2Smatt	KASSERT(MIPS_KSEG0_P(ta));
3821.2Smatt
3831.2Smatt	/*
3841.2Smatt	 * marshal args for rmixl_cpu_trampoline;
3851.2Smatt	 * note for non-LP64 kernel, use of intptr_t
3861.2Smatt	 * forces sign extension of 32 bit pointers
3871.2Smatt	 */
3881.2Smatt	stacktop = (uintptr_t)l->l_md.md_utf - CALLFRAME_SIZ;
3891.2Smatt	ta->ta_sp = (uint64_t)(intptr_t)stacktop;
3901.2Smatt	ta->ta_lwp = (uint64_t)(intptr_t)l;
3911.2Smatt	ta->ta_cpuinfo = (uint64_t)(intptr_t)ci;
3921.2Smatt
3931.2Smatt	return 0;
3941.2Smatt}
3951.2Smatt
3961.2Smattstatic void
3971.2Smattcpu_setup_trampoline_callback(struct cpu_info *ci)
3981.2Smatt{
3991.2Smatt	void (*wakeup_cpu)(void *, void *, unsigned int);
4001.2Smatt	struct rmixl_cpu_trampoline_args *ta = &rmixl_cpu_trampoline_args;
4011.2Smatt	extern void rmixl_cpu_trampoline(void *);
4021.2Smatt	extern void rmixlfw_wakeup_cpu(void *, void *, u_int64_t, void *);
4031.2Smatt
4041.2Smatt	cpu_setup_trampoline_common(ci, ta);
4051.2Smatt
4061.2Smatt#if _LP64
4071.2Smatt	wakeup_cpu = (void *)rmixl_configuration.rc_psb_info.wakeup;
4081.2Smatt#else
4091.2Smatt	wakeup_cpu = (void *)(intptr_t)
4101.2Smatt		(rmixl_configuration.rc_psb_info.wakeup & 0xffffffff);
4111.2Smatt#endif
4121.2Smatt
4131.2Smatt	rmixlfw_wakeup_cpu(rmixl_cpu_trampoline, (void *)ta,
4141.2Smatt		(uint64_t)1 << ci->ci_cpuid, wakeup_cpu);
4151.2Smatt}
4161.2Smatt#endif	/* MULTIPROCESSOR */
4171.2Smatt
4181.2Smatt
4191.2Smatt#ifdef DEBUG
4201.2Smattvoid
4211.2Smattrmixl_cpu_data_print(struct cpu_data *dp)
4221.2Smatt{
4231.2Smatt	printf("cpu_biglock_wanted %p\n", dp->cpu_biglock_wanted);
4241.2Smatt	printf("cpu_callout %p\n", dp->cpu_callout);
4251.2Smatt	printf("cpu_unused1 %p\n", dp->cpu_unused1);
4261.2Smatt	printf("cpu_unused2 %d\n", dp->cpu_unused2);
4271.2Smatt	printf("&cpu_schedstate %p\n", &dp->cpu_schedstate);	/* TBD */
4281.2Smatt	printf("&cpu_xcall %p\n", &dp->cpu_xcall);		/* TBD */
4291.2Smatt	printf("cpu_xcall_pending %d\n", dp->cpu_xcall_pending);
4301.2Smatt	printf("cpu_onproc %p\n", dp->cpu_onproc);
4311.2Smatt	printf("&cpu_qchain %p\n", &dp->cpu_qchain);		/* TBD */
4321.2Smatt	printf("cpu_idlelwp %p\n", dp->cpu_idlelwp);
4331.2Smatt	printf("cpu_lockstat %p\n", dp->cpu_lockstat);
4341.2Smatt	printf("cpu_index %d\n", dp->cpu_index);
4351.2Smatt	printf("cpu_biglock_count %d\n", dp->cpu_biglock_count);
4361.2Smatt	printf("cpu_spin_locks %d\n", dp->cpu_spin_locks);
4371.2Smatt	printf("cpu_simple_locks %d\n", dp->cpu_simple_locks);
4381.2Smatt	printf("cpu_spin_locks2 %d\n", dp->cpu_spin_locks2);
4391.2Smatt	printf("cpu_lkdebug_recurse %d\n", dp->cpu_lkdebug_recurse);
4401.2Smatt	printf("cpu_softints %d\n", dp->cpu_softints);
4411.2Smatt	printf("cpu_nsyscall %"PRIu64"\n", dp->cpu_nsyscall);
4421.2Smatt	printf("cpu_ntrap %"PRIu64"\n", dp->cpu_ntrap);
4431.2Smatt	printf("cpu_nfault %"PRIu64"\n", dp->cpu_nfault);
4441.2Smatt	printf("cpu_nintr %"PRIu64"\n", dp->cpu_nintr);
4451.2Smatt	printf("cpu_nsoft %"PRIu64"\n", dp->cpu_nsoft);
4461.2Smatt	printf("cpu_nswtch %"PRIu64"\n", dp->cpu_nswtch);
4471.2Smatt	printf("cpu_uvm %p\n", dp->cpu_uvm);
4481.2Smatt	printf("cpu_softcpu %p\n", dp->cpu_softcpu);
4491.2Smatt	printf("&cpu_biodone %p\n", &dp->cpu_biodone);		/* TBD */
4501.2Smatt	printf("&cpu_percpu %p\n", &dp->cpu_percpu);		/* TBD */
4511.2Smatt	printf("cpu_selcluster %p\n", dp->cpu_selcluster);
4521.2Smatt	printf("cpu_nch %p\n", dp->cpu_nch);
4531.2Smatt	printf("&cpu_ld_locks %p\n", &dp->cpu_ld_locks);	/* TBD */
4541.2Smatt	printf("&cpu_ld_lock %p\n", &dp->cpu_ld_lock);		/* TBD */
4551.2Smatt	printf("cpu_cc_freq %#"PRIx64"\n", dp->cpu_cc_freq);
4561.2Smatt	printf("cpu_cc_skew %#"PRIx64"\n", dp->cpu_cc_skew);
4571.2Smatt}
4581.2Smatt
4591.2Smattstruct cpu_info *
4601.2Smattrmixl_cpuinfo_print(u_int cpuindex)
4611.2Smatt{
4621.2Smatt	struct cpu_info * const ci = cpu_lookup(cpuindex);
4631.2Smatt
4641.2Smatt	if (ci != NULL) {
4651.2Smatt		rmixl_cpu_data_print(&ci->ci_data);
4661.2Smatt		printf("ci_dev %p\n", ci->ci_dev);
4671.2Smatt		printf("ci_cpuid %ld\n", ci->ci_cpuid);
4681.2Smatt		printf("ci_cctr_freq %ld\n", ci->ci_cctr_freq);
4691.2Smatt		printf("ci_cpu_freq %ld\n", ci->ci_cpu_freq);
4701.2Smatt		printf("ci_cycles_per_hz %ld\n", ci->ci_cycles_per_hz);
4711.2Smatt		printf("ci_divisor_delay %ld\n", ci->ci_divisor_delay);
4721.2Smatt		printf("ci_divisor_recip %ld\n", ci->ci_divisor_recip);
4731.2Smatt		printf("ci_curlwp %p\n", ci->ci_curlwp);
4741.2Smatt		printf("ci_want_resched %d\n", ci->ci_want_resched);
4751.2Smatt		printf("ci_mtx_count %d\n", ci->ci_mtx_count);
4761.2Smatt		printf("ci_mtx_oldspl %d\n", ci->ci_mtx_oldspl);
4771.2Smatt		printf("ci_idepth %d\n", ci->ci_idepth);
4781.2Smatt		printf("ci_cpl %d\n", ci->ci_cpl);
4791.2Smatt		printf("&ci_cpl %p\n", &ci->ci_cpl);	/* XXX */
4801.2Smatt		printf("ci_next_cp0_clk_intr %#x\n", ci->ci_next_cp0_clk_intr);
4811.2Smatt		for (int i=0; i < SOFTINT_COUNT; i++)
4821.2Smatt			printf("ci_softlwps[%d] %p\n", i, ci->ci_softlwps[i]);
4831.2Smatt		printf("ci_tlb_slot %d\n", ci->ci_tlb_slot);
4841.2Smatt		printf("ci_pmap_asid_cur %d\n", ci->ci_pmap_asid_cur);
4851.2Smatt		printf("ci_tlb_info %p\n", ci->ci_tlb_info);
4861.2Smatt		printf("ci_pmap_seg0tab %p\n", ci->ci_pmap_seg0tab);
4871.2Smatt#ifdef _LP64
4881.2Smatt		printf("ci_pmap_segtab %p\n", ci->ci_pmap_segtab);
4891.2Smatt#else
4901.2Smatt		printf("ci_pmap_srcbase %#"PRIxVADDR"\n", ci->ci_pmap_srcbase);
4911.2Smatt		printf("ci_pmap_dstbase %#"PRIxVADDR"\n", ci->ci_pmap_dstbase);
4921.2Smatt#endif
4931.2Smatt#ifdef MULTIPROCESSOR
4941.2Smatt		printf("ci_flags %#lx\n", ci->ci_flags);
4951.2Smatt		printf("ci_request_ipis %#"PRIx64"\n", ci->ci_request_ipis);
4961.2Smatt		printf("ci_active_ipis %#"PRIx64"\n", ci->ci_active_ipis);
4971.2Smatt		printf("ci_ksp_tlb_slot %d\n", ci->ci_ksp_tlb_slot);
4981.2Smatt#endif
4991.2Smatt	}
5001.2Smatt
5011.2Smatt	return ci;
5021.2Smatt}
5031.2Smatt#endif	/* DEBUG */
504