Home | History | Annotate | Line # | Download | only in mips
      1 /*	$NetBSD: mips_machdep.c,v 1.308 2025/04/24 23:55:18 riastradh Exp $	*/
      2 
      3 /*
      4  * Copyright 2002 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Simon Burge for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*
     39  * Copyright 2000, 2001
     40  * Broadcom Corporation. All rights reserved.
     41  *
     42  * This software is furnished under license and may be used and copied only
     43  * in accordance with the following terms and conditions.  Subject to these
     44  * conditions, you may download, copy, install, use, modify and distribute
     45  * modified or unmodified copies of this software in source and/or binary
     46  * form. No title or ownership is transferred hereby.
     47  *
     48  * 1) Any source code used, modified or distributed must reproduce and
     49  *    retain this copyright notice and list of conditions as they appear in
     50  *    the source file.
     51  *
     52  * 2) No right is granted to use any trade name, trademark, or logo of
     53  *    Broadcom Corporation.  The "Broadcom Corporation" name may not be
     54  *    used to endorse or promote products derived from this software
     55  *    without the prior written permission of Broadcom Corporation.
     56  *
     57  * 3) THIS SOFTWARE IS PROVIDED "AS-IS" AND ANY EXPRESS OR IMPLIED
     58  *    WARRANTIES, INCLUDING BUT NOT LIMITED TO, ANY IMPLIED WARRANTIES OF
     59  *    MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR
     60  *    NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM BE LIABLE
     61  *    FOR ANY DAMAGES WHATSOEVER, AND IN PARTICULAR, BROADCOM SHALL NOT BE
     62  *    LIABLE FOR DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     63  *    CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     64  *    SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
     65  *    BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
     66  *    WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
     67  *    OR OTHERWISE), EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     68  */
     69 
     70 /*-
     71  * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc.
     72  * All rights reserved.
     73  *
     74  * This code is derived from software contributed to The NetBSD Foundation
     75  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
     76  * NASA Ames Research Center and by Chris Demetriou.
     77  *
     78  * Redistribution and use in source and binary forms, with or without
     79  * modification, are permitted provided that the following conditions
     80  * are met:
     81  * 1. Redistributions of source code must retain the above copyright
     82  *    notice, this list of conditions and the following disclaimer.
     83  * 2. Redistributions in binary form must reproduce the above copyright
     84  *    notice, this list of conditions and the following disclaimer in the
     85  *    documentation and/or other materials provided with the distribution.
     86  *
     87  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     88  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     89  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     90  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     91  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     92  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     93  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     94  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     95  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     96  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     97  * POSSIBILITY OF SUCH DAMAGE.
     98  */
     99 
    100 /*
    101  * Copyright 1996 The Board of Trustees of The Leland Stanford
    102  * Junior University. All Rights Reserved.
    103  *
    104  * Permission to use, copy, modify, and distribute this
    105  * software and its documentation for any purpose and without
    106  * fee is hereby granted, provided that the above copyright
    107  * notice appear in all copies.  Stanford University
    108  * makes no representations about the suitability of this
    109  * software for any purpose.  It is provided "as is" without
    110  * express or implied warranty.
    111  */
    112 
    113 #include <sys/cdefs.h>			/* RCS ID & Copyright macro defns */
    114 __KERNEL_RCSID(0, "$NetBSD: mips_machdep.c,v 1.308 2025/04/24 23:55:18 riastradh Exp $");
    115 
    116 #define __INTR_PRIVATE
    117 #include "opt_cputype.h"
    118 #include "opt_compat_netbsd32.h"
    119 #include "opt_multiprocessor.h"
    120 
    121 #include <sys/param.h>
    122 #include <sys/systm.h>
    123 #include <sys/proc.h>
    124 #include <sys/intr.h>
    125 #include <sys/exec.h>
    126 #include <sys/reboot.h>
    127 #include <sys/module.h>
    128 #include <sys/mount.h>			/* fsid_t for syscallargs */
    129 #include <sys/lwp.h>
    130 #include <sys/sysctl.h>
    131 #include <sys/msgbuf.h>
    132 #include <sys/conf.h>
    133 #include <sys/core.h>
    134 #include <sys/device.h>
    135 #include <sys/kcore.h>
    136 #include <sys/kmem.h>
    137 #include <sys/ras.h>
    138 #include <sys/cpu.h>
    139 #include <sys/atomic.h>
    140 #include <sys/ucontext.h>
    141 #include <sys/bitops.h>
    142 
    143 #include <mips/kcore.h>
    144 
    145 #ifdef COMPAT_NETBSD32
    146 #include <compat/netbsd32/netbsd32.h>
    147 #endif
    148 
    149 #include <uvm/uvm.h>
    150 #include <uvm/uvm_physseg.h>
    151 
    152 #include <dev/cons.h>
    153 #include <dev/mm.h>
    154 
    155 #include <mips/pcb.h>
    156 #include <mips/cache.h>
    157 #include <mips/frame.h>
    158 #include <mips/regnum.h>
    159 #include <mips/mips_opcode.h>
    160 
    161 #include <mips/cpu.h>
    162 #include <mips/locore.h>
    163 #include <mips/psl.h>
    164 #include <mips/pte.h>
    165 #include <mips/userret.h>
    166 
    167 #ifdef __HAVE_BOOTINFO_H
    168 #include <machine/bootinfo.h>
    169 #endif
    170 
    171 #ifdef MIPS64_OCTEON
    172 #include <mips/cavium/octeonvar.h>
    173 #endif
    174 
    175 #if (MIPS32 + MIPS32R2 + MIPS64 + MIPS64R2) > 0
    176 #include <mips/mipsNN.h>		/* MIPS32/MIPS64 registers */
    177 
    178 #define	_MKINSN(a,b,c,d,e) ((uint32_t)(((a) << 26)|((b) << 21)|((c) << 16)|((d) << 11)|(e)))
    179 
    180 #ifdef _LP64
    181 #define	_LOAD_V0_L_PRIVATE_A0	_MKINSN(OP_LD, _R_A0, _R_V0, 0, offsetof(lwp_t, l_private))
    182 #define	_MTC0_V0_USERLOCAL	_MKINSN(OP_COP0, OP_DMT, _R_V0, MIPS_COP_0_TLB_CONTEXT, 2)
    183 #else
    184 #define	_LOAD_V0_L_PRIVATE_A0	_MKINSN(OP_LW, _R_A0, _R_V0, 0, offsetof(lwp_t, l_private))
    185 #define	_MTC0_V0_USERLOCAL	_MKINSN(OP_COP0, OP_MT, _R_V0, MIPS_COP_0_TLB_CONTEXT, 2)
    186 #endif
    187 #define	JR_RA			_MKINSN(OP_SPECIAL, _R_RA, 0, 0, OP_JR)
    188 
    189 #endif
    190 
    191 /* Internal routines. */
    192 int	cpu_dumpsize(void);
    193 u_long	cpu_dump_mempagecnt(void);
    194 int	cpu_dump(void);
    195 
    196 #if (MIPS32 + MIPS32R2 + MIPS64 + MIPS64R2) > 0
    197 static void mips_watchpoint_init(void);
    198 #endif
    199 
    200 #if defined(_LP64) && defined(ENABLE_MIPS_16KB_PAGE)
    201 vaddr_t mips_vm_maxuser_address = MIPS_VM_MAXUSER_ADDRESS;
    202 #endif
    203 
    204 #if defined(MIPS3_PLUS)
    205 uint32_t mips3_cp0_tlb_page_mask_probe(void);
    206 uint64_t mips3_cp0_tlb_entry_hi_probe(void);
    207 uint64_t mips3_cp0_tlb_entry_lo_probe(void);
    208 
    209 static void mips3_tlb_probe(void);
    210 #endif
    211 
    212 #if defined(MIPS1)
    213 static void	mips1_vector_init(const struct splsw *);
    214 extern const struct locoresw mips1_locoresw;
    215 extern const mips_locore_jumpvec_t mips1_locore_vec;
    216 #endif
    217 
    218 #if defined(MIPS3)
    219 static void	mips3_vector_init(const struct splsw *);
    220 extern const struct locoresw mips3_locoresw;
    221 extern const mips_locore_jumpvec_t mips3_locore_vec;
    222 #endif
    223 
    224 #if defined(MIPS3_LOONGSON2)
    225 static void	loongson2_vector_init(const struct splsw *);
    226 extern const struct locoresw loongson2_locoresw;
    227 extern const mips_locore_jumpvec_t loongson2_locore_vec;
    228 #endif
    229 
    230 #if defined(MIPS32)
    231 static void	mips32_vector_init(const struct splsw *);
    232 extern const struct locoresw mips32_locoresw;
    233 extern const mips_locore_jumpvec_t mips32_locore_vec;
    234 #endif
    235 
    236 #if defined(MIPS32R2)
    237 static void	mips32r2_vector_init(const struct splsw *);
    238 extern const struct locoresw mips32r2_locoresw;
    239 extern const mips_locore_jumpvec_t mips32r2_locore_vec;
    240 #endif
    241 
    242 #if defined(MIPS64)
    243 static void	mips64_vector_init(const struct splsw *);
    244 extern const struct locoresw mips64_locoresw;
    245 extern const mips_locore_jumpvec_t mips64_locore_vec;
    246 #endif
    247 
    248 #if defined(MIPS64R2)
    249 extern const struct locoresw mips64r2_locoresw;
    250 extern const mips_locore_jumpvec_t mips64r2_locore_vec;
    251 #endif
    252 
    253 #if defined(PARANOIA)
    254 void std_splsw_test(void);
    255 #endif
    256 
    257 mips_locore_jumpvec_t mips_locore_jumpvec;
    258 
    259 struct locoresw mips_locoresw;
    260 
    261 extern const struct splsw std_splsw;
    262 struct splsw mips_splsw;
    263 
    264 struct mips_options mips_options = {
    265 	.mips_cpu_id = 0xffffffff,
    266 	.mips_fpu_id = 0xffffffff,
    267 };
    268 
    269 void *	msgbufaddr;
    270 
    271 /* the following is used by DDB to reset the system */
    272 void	(*cpu_reset_address)(void);
    273 
    274 /* the following is used externally (sysctl_hw) */
    275 char	machine[] = MACHINE;		/* from <machine/param.h> */
    276 char	machine_arch[] = MACHINE_ARCH;	/* from <machine/param.h> */
    277 
    278 /*
    279  * Assumptions:
    280  *  - All MIPS3+ have an r4k-style MMU.  _Many_ assumptions throughout
    281  *    much of the mips code about this.  Includes overloaded usage of
    282  *    MIPS3_PLUS.
    283  *  - All MIPS3+ use the same exception model (cp0 status, cause bits,
    284  *    etc).  _Many_ assumptions throughout much of the mips code about
    285  *    this.  Includes overloaded usage of MIPS3_PLUS.
    286  *  - All MIPS3+ have a count register.  MIPS_HAS_CLOCK in <mips/cpu.h>
    287  *    will need to be revised if this is false.
    288  */
    289 #define	MIPS32_FLAGS	CPU_MIPS_R4K_MMU | CPU_MIPS_CAUSE_IV | CPU_MIPS_USE_WAIT
    290 #define	MIPS64_FLAGS	MIPS32_FLAGS	/* same as MIPS32 flags (for now) */
    291 
    292 static const struct pridtab cputab[] = {
    293 	{ 0, MIPS_R2000, -1, -1,		CPU_ARCH_MIPS1, 64,
    294 	  CPU_MIPS_NO_LLSC, 0, 0,		"MIPS R2000 CPU"	},
    295 	{ 0, MIPS_R3000, MIPS_REV_R2000A, -1,	CPU_ARCH_MIPS1, 64,
    296 	  CPU_MIPS_NO_LLSC, 0, 0,		"MIPS R2000A CPU"	},
    297 	{ 0, MIPS_R3000, MIPS_REV_R3000, -1,	CPU_ARCH_MIPS1, 64,
    298 	  CPU_MIPS_NO_LLSC, 0, 0,		"MIPS R3000 CPU"	},
    299 	{ 0, MIPS_R3000, MIPS_REV_R3000A, -1,	CPU_ARCH_MIPS1, 64,
    300 	  CPU_MIPS_NO_LLSC, 0, 0,		"MIPS R3000A CPU"	},
    301 	{ 0, MIPS_R6000, -1, -1,		CPU_ARCH_MIPS2, 32,
    302 	  MIPS_NOT_SUPP, 0, 0,			"MIPS R6000 CPU"	},
    303 
    304 	/*
    305 	 * rev 0x00, 0x22 and 0x30 are R4000, 0x40, 0x50 and 0x60 are R4400.
    306 	 * should we allow ranges and use 0x00 - 0x3f for R4000 and
    307 	 * 0x40 - 0xff for R4400?
    308 	 */
    309 	{ 0, MIPS_R4000, MIPS_REV_R4000_A, -1,	CPU_ARCH_MIPS3, 48,
    310 	  CPU_MIPS_R4K_MMU | CPU_MIPS_DOUBLE_COUNT, 0, 0,
    311 						"MIPS R4000 CPU"	},
    312 	{ 0, MIPS_R4000, MIPS_REV_R4000_B, -1,	CPU_ARCH_MIPS3, 48,
    313 	  CPU_MIPS_R4K_MMU | CPU_MIPS_DOUBLE_COUNT, 0, 0,
    314 						"MIPS R4000 CPU"	},
    315 	{ 0, MIPS_R4000, MIPS_REV_R4000_C, -1,	CPU_ARCH_MIPS3, 48,
    316 	  CPU_MIPS_R4K_MMU | CPU_MIPS_DOUBLE_COUNT, 0, 0,
    317 						"MIPS R4000 CPU"	},
    318 	{ 0, MIPS_R4000, MIPS_REV_R4400_A, -1,	CPU_ARCH_MIPS3, 48,
    319 	  CPU_MIPS_R4K_MMU | CPU_MIPS_DOUBLE_COUNT, 0, 0,
    320 						"MIPS R4400 CPU"	},
    321 	{ 0, MIPS_R4000, MIPS_REV_R4400_B, -1,	CPU_ARCH_MIPS3, 48,
    322 	  CPU_MIPS_R4K_MMU | CPU_MIPS_DOUBLE_COUNT, 0, 0,
    323 						"MIPS R4400 CPU"	},
    324 	{ 0, MIPS_R4000, MIPS_REV_R4400_C, -1,	CPU_ARCH_MIPS3, 48,
    325 	  CPU_MIPS_R4K_MMU | CPU_MIPS_DOUBLE_COUNT, 0, 0,
    326 						"MIPS R4400 CPU"	},
    327 
    328 	{ 0, MIPS_R3LSI, -1, -1,		CPU_ARCH_MIPS1, -1,
    329 	  MIPS_NOT_SUPP, 0, 0,			"LSI Logic R3000 derivative" },
    330 	{ 0, MIPS_R6000A, -1, -1,		CPU_ARCH_MIPS2, 32,
    331 	  MIPS_NOT_SUPP, 0, 0,			"MIPS R6000A CPU"	},
    332 	{ 0, MIPS_R3IDT, -1, -1,		CPU_ARCH_MIPS1, -1,
    333 	  MIPS_NOT_SUPP, 0, 0,			"IDT R3041 or RC36100 CPU" },
    334 	{ 0, MIPS_R4100, -1, -1,		CPU_ARCH_MIPS3, 32,
    335 	  CPU_MIPS_R4K_MMU | CPU_MIPS_NO_LLSC, 0, 0,
    336 						"NEC VR4100 CPU"	},
    337 	{ 0, MIPS_R4200, -1, -1,		CPU_ARCH_MIPS3, -1,
    338 	  MIPS_NOT_SUPP | CPU_MIPS_R4K_MMU, 0, 0,
    339 						"NEC VR4200 CPU"	},
    340 	{ 0, MIPS_R4300, -1, -1,		CPU_ARCH_MIPS3, 32,
    341 	  CPU_MIPS_R4K_MMU, 0, 0,		"NEC VR4300 CPU"	},
    342 	{ 0, MIPS_R4600, -1, -1,		CPU_ARCH_MIPS3, 48,
    343 	  CPU_MIPS_R4K_MMU | CPU_MIPS_DOUBLE_COUNT, 0, 0,
    344 						"QED R4600 Orion CPU"	},
    345 	{ 0, MIPS_R4700, -1, -1,		CPU_ARCH_MIPS3, 48,
    346 	  CPU_MIPS_R4K_MMU, 0, 0,		"QED R4700 Orion CPU"	},
    347 
    348 	{ 0, MIPS_R8000, -1, -1,		CPU_ARCH_MIPS4, 384,
    349 	  MIPS_NOT_SUPP | CPU_MIPS_R4K_MMU, 0, 0,
    350 					 "MIPS R8000 Blackbird/TFP CPU" },
    351 	{ 0, MIPS_R10000, -1, -1,		CPU_ARCH_MIPS4, 64,
    352 	  CPU_MIPS_R4K_MMU | CPU_MIPS_DOUBLE_COUNT, 0, 0,
    353 						"MIPS R10000 CPU"	},
    354 	{ 0, MIPS_R12000, -1, -1,		CPU_ARCH_MIPS4, 64,
    355 	  CPU_MIPS_R4K_MMU | CPU_MIPS_DOUBLE_COUNT, 0, 0,
    356 						"MIPS R12000 CPU"	},
    357 	{ 0, MIPS_R14000, -1, -1,		CPU_ARCH_MIPS4, 64,
    358 	  CPU_MIPS_R4K_MMU | CPU_MIPS_DOUBLE_COUNT, 0, 0,
    359 						"MIPS R14000 CPU"	},
    360 
    361 	/* XXX
    362 	 * If the Processor Revision ID of the 4650 isn't 0, the following
    363 	 * entry needs to be adjusted.  Can't use a wildcard match because
    364 	 * the TX39 series processors share the same Processor ID value.
    365 	 * Or maybe put TX39 CPUs first if the revid doesn't overlap with
    366 	 * the 4650...
    367 	 */
    368 	{ 0, MIPS_R4650, 0, -1,			CPU_ARCH_MIPS3, -1,
    369 	  MIPS_NOT_SUPP /* no MMU! */, 0, 0,	"QED R4650 CPU"	},
    370 	{ 0, MIPS_TX3900, MIPS_REV_TX3912, -1,	CPU_ARCH_MIPS1, 32,
    371 	  CPU_MIPS_NO_LLSC, 0, 0,		"Toshiba TX3912 CPU"	},
    372 	{ 0, MIPS_TX3900, MIPS_REV_TX3922, -1,	CPU_ARCH_MIPS1, 64,
    373 	  CPU_MIPS_NO_LLSC, 0, 0,		"Toshiba TX3922 CPU"	},
    374 	{ 0, MIPS_TX3900, MIPS_REV_TX3927, -1,	CPU_ARCH_MIPS1, 64,
    375 	  CPU_MIPS_NO_LLSC, 0, 0,		"Toshiba TX3927 CPU"	},
    376 	{ 0, MIPS_R5000, -1, -1,		CPU_ARCH_MIPS4, 48,
    377 	  CPU_MIPS_R4K_MMU | CPU_MIPS_DOUBLE_COUNT, 0, 0,
    378 						"MIPS R5000 CPU"	},
    379 	{ 0, MIPS_RM5200, -1, -1,		CPU_ARCH_MIPS4, 48,
    380 	  CPU_MIPS_R4K_MMU | CPU_MIPS_CAUSE_IV | CPU_MIPS_DOUBLE_COUNT |
    381 	  CPU_MIPS_USE_WAIT, 0, 0,		"QED RM5200 CPU"	},
    382 
    383 	/* XXX
    384 	 * The rm7000 rev 2.0 can have 64 tlbs, and has 6 extra interrupts.  See
    385 	 *    "Migrating to the RM7000 from other MIPS Microprocessors"
    386 	 * for more details.
    387 	 */
    388 	{ 0, MIPS_RM7000, -1, -1,		CPU_ARCH_MIPS4, 48,
    389 	  MIPS_NOT_SUPP | CPU_MIPS_CAUSE_IV | CPU_MIPS_DOUBLE_COUNT |
    390 	  CPU_MIPS_USE_WAIT, 0, 0,		"QED RM7000 CPU"	},
    391 
    392 	/*
    393 	 * IDT RC32300 core is a 32 bit MIPS2 processor with
    394 	 * MIPS3/MIPS4 extensions. It has an R4000-style TLB,
    395 	 * while all registers are 32 bits and any 64 bit
    396 	 * instructions like ld/sd/dmfc0/dmtc0 are not allowed.
    397 	 *
    398 	 * note that the Config register has a non-standard base
    399 	 * for IC and DC (2^9 instead of 2^12).
    400 	 *
    401 	 */
    402 	{ 0, MIPS_RC32300, -1, -1,		CPU_ARCH_MIPS3, 16,
    403 	  MIPS_NOT_SUPP | CPU_MIPS_R4K_MMU, 0, 0,
    404 						"IDT RC32300 CPU"	},
    405 	{ 0, MIPS_RC32364, -1, -1,		CPU_ARCH_MIPS3, 16,
    406 	  MIPS_NOT_SUPP | CPU_MIPS_R4K_MMU, 0, 0,
    407 						"IDT RC32364 CPU"	},
    408 	{ 0, MIPS_RC64470, -1, -1,		CPU_ARCH_MIPSx, -1,
    409 	  MIPS_NOT_SUPP | CPU_MIPS_R4K_MMU, 0, 0,
    410 						"IDT RC64474/RC64475 CPU" },
    411 
    412 	{ 0, MIPS_R5400, -1, -1,		CPU_ARCH_MIPSx, -1,
    413 	  MIPS_NOT_SUPP | CPU_MIPS_R4K_MMU, 0, 0,
    414 						"NEC VR5400 CPU"	},
    415 	{ 0, MIPS_R5900, -1, -1,		CPU_ARCH_MIPS3, 48,
    416 	  CPU_MIPS_NO_LLSC | CPU_MIPS_R4K_MMU, 0, 0,
    417 						"Toshiba R5900 CPU"	},
    418 
    419 	{ 0, MIPS_TX4900, MIPS_REV_TX4927, -1,	CPU_ARCH_MIPS3, 48,
    420 	  CPU_MIPS_R4K_MMU | CPU_MIPS_DOUBLE_COUNT, 0, 0,
    421 						"Toshiba TX4927 CPU"	},
    422 
    423 	{ 0, MIPS_TX4900, -1, -1,		CPU_ARCH_MIPS3, 48,
    424 	  CPU_MIPS_R4K_MMU | CPU_MIPS_DOUBLE_COUNT, 0, 0,
    425 						"Toshiba TX4900 CPU"	},
    426 
    427 	/*
    428 	 * ICT Loongson2 is a MIPS64 CPU with a few quirks.  For some reason
    429 	 * the virtual aliases present with 4KB pages make the caches misbehave
    430 	 * so we make all accesses uncached.  With 16KB pages, no virtual
    431 	 * aliases are possible so we can use caching.
    432 	 */
    433 #ifdef ENABLE_MIPS_16KB_PAGE
    434 #define	MIPS_LOONGSON2_CCA	0
    435 #else
    436 #define	MIPS_LOONGSON2_CCA	(CPU_MIPS_HAVE_SPECIAL_CCA | \
    437 				(2 << CPU_MIPS_CACHED_CCA_SHIFT))
    438 #endif
    439 	{ 0, MIPS_LOONGSON2, MIPS_REV_LOONGSON2E, -1, CPU_ARCH_MIPS3, 64,
    440 	  CPU_MIPS_R4K_MMU | CPU_MIPS_DOUBLE_COUNT | CPU_MIPS_LOONGSON2
    441 	  | MIPS_LOONGSON2_CCA, 0, 0, "ICT Loongson 2E CPU"	},
    442 	{ 0, MIPS_LOONGSON2, MIPS_REV_LOONGSON2F, -1, CPU_ARCH_MIPS3, 64,
    443 	  CPU_MIPS_R4K_MMU | CPU_MIPS_DOUBLE_COUNT | CPU_MIPS_LOONGSON2
    444 	  | MIPS_LOONGSON2_CCA, 0, 0, "ICT Loongson 2F CPU"	},
    445 
    446 #if 0 /* ID collisions : can we use a CU1 test or similar? */
    447 	{ 0, MIPS_R3SONY, -1, -1,		CPU_ARCH_MIPS1, -1,
    448 	  MIPS_NOT_SUPP, 0, 0,			"SONY R3000 derivative"	},	/* 0x21; crash R4700? */
    449 	{ 0, MIPS_R3NKK, -1, -1,		CPU_ARCH_MIPS1, -1,
    450 	  MIPS_NOT_SUPP, 0, 0,			"NKK R3000 derivative"	},	/* 0x23; crash R5000? */
    451 #endif
    452 
    453 	{ MIPS_PRID_CID_MTI, MIPS_4Kc, -1, -1,	-1, 0,
    454 	  MIPS32_FLAGS | CPU_MIPS_DOUBLE_COUNT, 0, 0, "4Kc"		},
    455 	{ MIPS_PRID_CID_MTI, MIPS_4KEc, -1, -1,	-1, 0,
    456 	  MIPS32_FLAGS | CPU_MIPS_DOUBLE_COUNT, 0, 0, "4KEc"		},
    457 	{ MIPS_PRID_CID_MTI, MIPS_4KEc_R2, -1, -1, -1, 0,
    458 	  MIPS32_FLAGS | CPU_MIPS_DOUBLE_COUNT, 0, 0, "4KEc (Rev 2)"	},
    459 	{ MIPS_PRID_CID_MTI, MIPS_4KSc, -1, -1,	-1, 0,
    460 	  MIPS32_FLAGS | CPU_MIPS_DOUBLE_COUNT, 0, 0, "4KSc"		},
    461 	{ MIPS_PRID_CID_MTI, MIPS_5Kc, -1, -1,	-1, 0,
    462 	  MIPS64_FLAGS | CPU_MIPS_DOUBLE_COUNT, 0, 0, "5Kc"		},
    463 	{ MIPS_PRID_CID_MTI, MIPS_20Kc, -1, -1,	-1, 0,
    464 	  MIPS64_FLAGS,				0, 0, "20Kc"		},
    465 	{ MIPS_PRID_CID_MTI, MIPS_25Kf, -1, -1,	-1, 0,
    466 	  MIPS64_FLAGS,				0, 0, "25Kf"		},
    467 	{ MIPS_PRID_CID_MTI, MIPS_24K, -1, -1,	-1, 0,
    468 	  MIPS32_FLAGS | CPU_MIPS_DOUBLE_COUNT,
    469 	  MIPS_CP0FL_USE |
    470 	  MIPS_CP0FL_EBASE |
    471 	  MIPS_CP0FL_CONFIG | MIPS_CP0FL_CONFIG1 | MIPS_CP0FL_CONFIG2 |
    472 	  MIPS_CP0FL_CONFIG3 | MIPS_CP0FL_CONFIG7,
    473 	  0, "24K" },
    474 	{ MIPS_PRID_CID_MTI, MIPS_24KE, -1, -1,	-1, 0,
    475 	  MIPS32_FLAGS | CPU_MIPS_DOUBLE_COUNT,
    476 	  MIPS_CP0FL_USE |
    477 	  MIPS_CP0FL_EBASE |
    478 	  MIPS_CP0FL_CONFIG | MIPS_CP0FL_CONFIG1 | MIPS_CP0FL_CONFIG2 |
    479 	  MIPS_CP0FL_CONFIG3 | MIPS_CP0FL_CONFIG7,
    480 	  0, "24KE" },
    481 	{ MIPS_PRID_CID_MTI, MIPS_34K, -1, -1,	-1, 0,
    482 	  MIPS32_FLAGS | CPU_MIPS_DOUBLE_COUNT,
    483 	  MIPS_CP0FL_USE |
    484 	  MIPS_CP0FL_EBASE |
    485 	  MIPS_CP0FL_CONFIG | MIPS_CP0FL_CONFIG1 | MIPS_CP0FL_CONFIG2 |
    486 	  MIPS_CP0FL_CONFIG3 | MIPS_CP0FL_CONFIG7,
    487 	  0, "34K" },
    488 	{ MIPS_PRID_CID_MTI, MIPS_74K, -1, -1,	-1, 0,
    489 	  CPU_MIPS_HAVE_SPECIAL_CCA | (0 << CPU_MIPS_CACHED_CCA_SHIFT) |
    490 	  MIPS32_FLAGS | CPU_MIPS_DOUBLE_COUNT,
    491 	  MIPS_CP0FL_USE |
    492 	  MIPS_CP0FL_EBASE |
    493 	  MIPS_CP0FL_CONFIG | MIPS_CP0FL_CONFIG1 | MIPS_CP0FL_CONFIG2 |
    494 	  MIPS_CP0FL_CONFIG3 | MIPS_CP0FL_CONFIG6 | MIPS_CP0FL_CONFIG7,
    495 	  0, "74K" },
    496 	{ MIPS_PRID_CID_MTI, MIPS_1004K, -1, -1,	-1, 0,
    497 	  MIPS32_FLAGS | CPU_MIPS_DOUBLE_COUNT,
    498 	  MIPS_CP0FL_USE |
    499 	  MIPS_CP0FL_EBASE |
    500 	  MIPS_CP0FL_CONFIG | MIPS_CP0FL_CONFIG1 | MIPS_CP0FL_CONFIG2 |
    501 	  MIPS_CP0FL_CONFIG3 | MIPS_CP0FL_CONFIG6 | MIPS_CP0FL_CONFIG7,
    502 	  0, "1004K" },
    503 	{ MIPS_PRID_CID_MTI, MIPS_1074K, -1, -1,	-1, 0,
    504 	  MIPS32_FLAGS | CPU_MIPS_DOUBLE_COUNT,
    505 	  MIPS_CP0FL_USE |
    506 	  MIPS_CP0FL_EBASE |
    507 	  MIPS_CP0FL_CONFIG | MIPS_CP0FL_CONFIG1 | MIPS_CP0FL_CONFIG2 |
    508 	  MIPS_CP0FL_CONFIG3 | MIPS_CP0FL_CONFIG6 | MIPS_CP0FL_CONFIG7,
    509 	  0, "1074K" },
    510 
    511 	{ MIPS_PRID_CID_BROADCOM, MIPS_BCM3302, -1, -1, -1, 0,
    512 	  MIPS32_FLAGS | CPU_MIPS_DOUBLE_COUNT, 0, 0, "BCM3302"	},
    513 
    514 	{ MIPS_PRID_CID_ALCHEMY, MIPS_AU_REV1, -1, MIPS_AU1000, -1, 0,
    515 	  MIPS32_FLAGS | CPU_MIPS_NO_WAIT | CPU_MIPS_I_D_CACHE_COHERENT, 0, 0,
    516 						"Au1000 (Rev 1 core)"	},
    517 	{ MIPS_PRID_CID_ALCHEMY, MIPS_AU_REV2, -1, MIPS_AU1000, -1, 0,
    518 	  MIPS32_FLAGS | CPU_MIPS_NO_WAIT | CPU_MIPS_I_D_CACHE_COHERENT, 0, 0,
    519 						"Au1000 (Rev 2 core)" 	},
    520 
    521 	{ MIPS_PRID_CID_ALCHEMY, MIPS_AU_REV1, -1, MIPS_AU1100, -1, 0,
    522 	  MIPS32_FLAGS | CPU_MIPS_NO_WAIT | CPU_MIPS_I_D_CACHE_COHERENT, 0, 0,
    523 						"Au1100 (Rev 1 core)"	},
    524 	{ MIPS_PRID_CID_ALCHEMY, MIPS_AU_REV2, -1, MIPS_AU1100, -1, 0,
    525 	  MIPS32_FLAGS | CPU_MIPS_NO_WAIT | CPU_MIPS_I_D_CACHE_COHERENT, 0, 0,
    526 						"Au1100 (Rev 2 core)" 	},
    527 
    528 	{ MIPS_PRID_CID_ALCHEMY, MIPS_AU_REV1, -1, MIPS_AU1500, -1, 0,
    529 	  MIPS32_FLAGS | CPU_MIPS_NO_WAIT | CPU_MIPS_I_D_CACHE_COHERENT, 0, 0,
    530 						"Au1500 (Rev 1 core)"	},
    531 	{ MIPS_PRID_CID_ALCHEMY, MIPS_AU_REV2, -1, MIPS_AU1500, -1, 0,
    532 	  MIPS32_FLAGS | CPU_MIPS_NO_WAIT | CPU_MIPS_I_D_CACHE_COHERENT, 0, 0,
    533 						"Au1500 (Rev 2 core)" 	},
    534 
    535 	{ MIPS_PRID_CID_ALCHEMY, MIPS_AU_REV2, -1, MIPS_AU1550, -1, 0,
    536 	  MIPS32_FLAGS | CPU_MIPS_NO_WAIT | CPU_MIPS_I_D_CACHE_COHERENT, 0, 0,
    537 						"Au1550 (Rev 2 core)" 	},
    538 
    539 	/* The SB-1 CPU uses a CCA of 5 - "Cacheable Coherent Shareable" */
    540 	{ MIPS_PRID_CID_SIBYTE, MIPS_SB1, -1,	-1, -1, 0,
    541 	  MIPS64_FLAGS | CPU_MIPS_D_CACHE_COHERENT |
    542 	  CPU_MIPS_HAVE_SPECIAL_CCA |
    543 	  (CCA_SB_CACHEABLE_COHERENT << CPU_MIPS_CACHED_CCA_SHIFT), 0, 0,
    544 						"SB-1"			},
    545 	{ MIPS_PRID_CID_SIBYTE, MIPS_SB1_11, -1,	-1, -1, 0,
    546 	  MIPS64_FLAGS | CPU_MIPS_D_CACHE_COHERENT |
    547 	  CPU_MIPS_HAVE_SPECIAL_CCA |
    548 	  (CCA_SB_CACHEABLE_COHERENT << CPU_MIPS_CACHED_CCA_SHIFT), 0, 0,
    549 						"SB-1 (0x11)"		},
    550 
    551 	{ MIPS_PRID_CID_RMI, MIPS_XLR732B, -1,	-1, -1, 0,
    552 	  MIPS64_FLAGS | CPU_MIPS_D_CACHE_COHERENT | CPU_MIPS_NO_LLADDR |
    553 	  CPU_MIPS_I_D_CACHE_COHERENT | CPU_MIPS_HAVE_MxCR,
    554 	  MIPS_CP0FL_USE |
    555 	  MIPS_CP0FL_EIRR | MIPS_CP0FL_EIMR | MIPS_CP0FL_EBASE |
    556 	  MIPS_CP0FL_CONFIG | MIPS_CP0FL_CONFIG1 | MIPS_CP0FL_CONFIG7,
    557 	  CIDFL_RMI_TYPE_XLR|MIPS_CIDFL_RMI_CPUS(8,4)|MIPS_CIDFL_RMI_L2(2MB),
    558 	  "XLR732B"		},
    559 
    560 	{ MIPS_PRID_CID_RMI, MIPS_XLR732C, -1,	-1, -1, 0,
    561 	  MIPS64_FLAGS | CPU_MIPS_D_CACHE_COHERENT | CPU_MIPS_NO_LLADDR |
    562 	  CPU_MIPS_I_D_CACHE_COHERENT | CPU_MIPS_HAVE_MxCR,
    563 	  MIPS_CP0FL_USE |
    564 	  MIPS_CP0FL_EIRR | MIPS_CP0FL_EIMR | MIPS_CP0FL_EBASE |
    565 	  MIPS_CP0FL_CONFIG | MIPS_CP0FL_CONFIG1 | MIPS_CP0FL_CONFIG7,
    566 	  CIDFL_RMI_TYPE_XLR|MIPS_CIDFL_RMI_CPUS(8,4)|MIPS_CIDFL_RMI_L2(2MB),
    567 	  "XLR732C"		},
    568 
    569 	{ MIPS_PRID_CID_RMI, MIPS_XLS616, -1,	-1, -1, 0,
    570 	  MIPS64_FLAGS | CPU_MIPS_D_CACHE_COHERENT | CPU_MIPS_NO_LLADDR |
    571 	  CPU_MIPS_I_D_CACHE_COHERENT | CPU_MIPS_HAVE_MxCR,
    572 	  MIPS_CP0FL_USE |
    573 	  MIPS_CP0FL_EIRR | MIPS_CP0FL_EIMR | MIPS_CP0FL_EBASE |
    574 	  MIPS_CP0FL_CONFIG | MIPS_CP0FL_CONFIG1 | MIPS_CP0FL_CONFIG7,
    575 	  CIDFL_RMI_TYPE_XLS|MIPS_CIDFL_RMI_CPUS(4,4)|MIPS_CIDFL_RMI_L2(1MB),
    576 	  "XLS616"		},
    577 
    578 	{ MIPS_PRID_CID_RMI, MIPS_XLS416, -1,	-1, -1, 0,
    579 	  MIPS64_FLAGS | CPU_MIPS_D_CACHE_COHERENT | CPU_MIPS_NO_LLADDR |
    580 	  CPU_MIPS_I_D_CACHE_COHERENT | CPU_MIPS_HAVE_MxCR,
    581 	  MIPS_CP0FL_USE |
    582 	  MIPS_CP0FL_EIRR | MIPS_CP0FL_EIMR | MIPS_CP0FL_EBASE |
    583 	  MIPS_CP0FL_CONFIG | MIPS_CP0FL_CONFIG1 | MIPS_CP0FL_CONFIG7,
    584 	  CIDFL_RMI_TYPE_XLS|MIPS_CIDFL_RMI_CPUS(4,4)|MIPS_CIDFL_RMI_L2(1MB),
    585 	  "XLS416"		},
    586 
    587 	{ MIPS_PRID_CID_RMI, MIPS_XLS408, -1,	-1, -1, 0,
    588 	  MIPS64_FLAGS | CPU_MIPS_D_CACHE_COHERENT | CPU_MIPS_NO_LLADDR |
    589 	  CPU_MIPS_I_D_CACHE_COHERENT | CPU_MIPS_HAVE_MxCR,
    590 	  MIPS_CP0FL_USE |
    591 	  MIPS_CP0FL_EIRR | MIPS_CP0FL_EIMR | MIPS_CP0FL_EBASE |
    592 	  MIPS_CP0FL_CONFIG | MIPS_CP0FL_CONFIG1 | MIPS_CP0FL_CONFIG7,
    593 	  CIDFL_RMI_TYPE_XLS|MIPS_CIDFL_RMI_CPUS(2,4)|MIPS_CIDFL_RMI_L2(1MB),
    594 	  "XLS408"		},
    595 
    596 	{ MIPS_PRID_CID_RMI, MIPS_XLS408LITE, -1, -1, -1, 0,
    597 	  MIPS64_FLAGS | CPU_MIPS_D_CACHE_COHERENT | CPU_MIPS_NO_LLADDR |
    598 	  CPU_MIPS_I_D_CACHE_COHERENT | CPU_MIPS_HAVE_MxCR,
    599 	  MIPS_CP0FL_USE |
    600 	  MIPS_CP0FL_EIRR | MIPS_CP0FL_EIMR | MIPS_CP0FL_EBASE |
    601 	  MIPS_CP0FL_CONFIG | MIPS_CP0FL_CONFIG1 | MIPS_CP0FL_CONFIG7,
    602 	  CIDFL_RMI_TYPE_XLS|MIPS_CIDFL_RMI_CPUS(2,4)|MIPS_CIDFL_RMI_L2(1MB),
    603 	  "XLS408lite"		},
    604 
    605 	{ MIPS_PRID_CID_RMI, MIPS_XLS404LITE, -1, -1, -1, 0,
    606 	  MIPS64_FLAGS | CPU_MIPS_D_CACHE_COHERENT | CPU_MIPS_NO_LLADDR |
    607 	  CPU_MIPS_I_D_CACHE_COHERENT | CPU_MIPS_HAVE_MxCR,
    608 	  MIPS_CP0FL_USE |
    609 	  MIPS_CP0FL_EIRR | MIPS_CP0FL_EIMR | MIPS_CP0FL_EBASE |
    610 	  MIPS_CP0FL_CONFIG | MIPS_CP0FL_CONFIG1 | MIPS_CP0FL_CONFIG7,
    611 	  CIDFL_RMI_TYPE_XLS|MIPS_CIDFL_RMI_CPUS(1,4)|MIPS_CIDFL_RMI_L2(512KB),
    612 	  "XLS404lite"		},
    613 
    614 	{ MIPS_PRID_CID_RMI, MIPS_XLS208, -1,	-1, -1, 0,
    615 	  MIPS64_FLAGS | CPU_MIPS_D_CACHE_COHERENT | CPU_MIPS_NO_LLADDR |
    616 	  CPU_MIPS_I_D_CACHE_COHERENT | CPU_MIPS_HAVE_MxCR,
    617 	  MIPS_CP0FL_USE |
    618 	  MIPS_CP0FL_EIRR | MIPS_CP0FL_EIMR | MIPS_CP0FL_EBASE |
    619 	  MIPS_CP0FL_CONFIG | MIPS_CP0FL_CONFIG1 | MIPS_CP0FL_CONFIG7,
    620 	  CIDFL_RMI_TYPE_XLS|MIPS_CIDFL_RMI_CPUS(2,4)|MIPS_CIDFL_RMI_L2(512KB),
    621 	  "XLS208"		},
    622 
    623 	{ MIPS_PRID_CID_RMI, MIPS_XLS204, -1,	-1, -1, 0,
    624 	  MIPS64_FLAGS | CPU_MIPS_D_CACHE_COHERENT | CPU_MIPS_NO_LLADDR |
    625 	  CPU_MIPS_I_D_CACHE_COHERENT | CPU_MIPS_HAVE_MxCR,
    626 	  MIPS_CP0FL_USE |
    627 	  MIPS_CP0FL_EIRR | MIPS_CP0FL_EIMR | MIPS_CP0FL_EBASE |
    628 	  MIPS_CP0FL_CONFIG | MIPS_CP0FL_CONFIG1 | MIPS_CP0FL_CONFIG7,
    629 	  CIDFL_RMI_TYPE_XLS|MIPS_CIDFL_RMI_CPUS(1,4)|MIPS_CIDFL_RMI_L2(256KB),
    630 	  "XLS204"		},
    631 
    632 	{ MIPS_PRID_CID_RMI, MIPS_XLS108, -1,	-1, -1, 0,
    633 	  MIPS64_FLAGS | CPU_MIPS_D_CACHE_COHERENT | CPU_MIPS_NO_LLADDR |
    634 	  CPU_MIPS_I_D_CACHE_COHERENT | CPU_MIPS_HAVE_MxCR,
    635 	  MIPS_CP0FL_USE |
    636 	  MIPS_CP0FL_EIRR | MIPS_CP0FL_EIMR | MIPS_CP0FL_EBASE |
    637 	  MIPS_CP0FL_CONFIG | MIPS_CP0FL_CONFIG1 | MIPS_CP0FL_CONFIG7,
    638 	  CIDFL_RMI_TYPE_XLS|MIPS_CIDFL_RMI_CPUS(2,4)|MIPS_CIDFL_RMI_L2(512KB),
    639 	  "XLS108"		},
    640 
    641 	{ MIPS_PRID_CID_RMI, MIPS_XLS104, -1,	-1, -1, 0,
    642 	  MIPS64_FLAGS | CPU_MIPS_D_CACHE_COHERENT | CPU_MIPS_NO_LLADDR |
    643 	  CPU_MIPS_I_D_CACHE_COHERENT | CPU_MIPS_HAVE_MxCR,
    644 	  MIPS_CP0FL_USE |
    645 	  MIPS_CP0FL_EIRR | MIPS_CP0FL_EIMR | MIPS_CP0FL_EBASE |
    646 	  MIPS_CP0FL_CONFIG | MIPS_CP0FL_CONFIG1 | MIPS_CP0FL_CONFIG7,
    647 	  CIDFL_RMI_TYPE_XLS|MIPS_CIDFL_RMI_CPUS(1,4)|MIPS_CIDFL_RMI_L2(256KB),
    648 	  "XLS104"		},
    649 
    650 	{ MIPS_PRID_CID_CAVIUM, MIPS_CN31XX, -1, -1, -1, 0,
    651 	  MIPS64_FLAGS | CPU_MIPS_D_CACHE_COHERENT | CPU_MIPS_NO_LLADDR,
    652 	  MIPS_CP0FL_USE |
    653 	  MIPS_CP0FL_EBASE | MIPS_CP0FL_CONFIG |
    654 	  MIPS_CP0FL_CONFIG1 | MIPS_CP0FL_CONFIG2 | MIPS_CP0FL_CONFIG3,
    655 	  0,
    656 	  "CN31xx"		},
    657 
    658 	{ MIPS_PRID_CID_CAVIUM, MIPS_CN30XX, -1, -1, -1, 0,
    659 	  MIPS64_FLAGS | CPU_MIPS_D_CACHE_COHERENT | CPU_MIPS_NO_LLADDR,
    660 	  MIPS_CP0FL_USE |
    661 	  MIPS_CP0FL_EBASE | MIPS_CP0FL_CONFIG |
    662 	  MIPS_CP0FL_CONFIG1 | MIPS_CP0FL_CONFIG2 | MIPS_CP0FL_CONFIG3,
    663 	  0,
    664 	  "CN30xx"		},
    665 
    666 	{ MIPS_PRID_CID_CAVIUM, MIPS_CN50XX, -1, -1, -1, 0,
    667 	  MIPS64_FLAGS | CPU_MIPS_D_CACHE_COHERENT | CPU_MIPS_NO_LLADDR,
    668 	  MIPS_CP0FL_USE |
    669 	  MIPS_CP0FL_EBASE | MIPS_CP0FL_CONFIG |
    670 	  MIPS_CP0FL_CONFIG1 | MIPS_CP0FL_CONFIG2 | MIPS_CP0FL_CONFIG3,
    671 	  0,
    672 	  "CN50xx"		},
    673 
    674 	{ MIPS_PRID_CID_CAVIUM, MIPS_CN68XX, -1, -1, -1, 0,
    675 	  MIPS64_FLAGS | CPU_MIPS_D_CACHE_COHERENT | CPU_MIPS_NO_LLADDR,
    676 	  MIPS_CP0FL_USE |
    677 	  MIPS_CP0FL_CONFIG  | MIPS_CP0FL_CONFIG1 | MIPS_CP0FL_CONFIG2 |
    678 	  MIPS_CP0FL_CONFIG3 | MIPS_CP0FL_CONFIG4,
    679 	  0,
    680 	  "CN68xx"		},
    681 
    682 	{ MIPS_PRID_CID_CAVIUM, MIPS_CN70XX, -1, -1, -1, 0,
    683 	  MIPS64_FLAGS | CPU_MIPS_D_CACHE_COHERENT | CPU_MIPS_NO_LLADDR,
    684 	  MIPS_CP0FL_USE | MIPS_CP0FL_EBASE |
    685 	  MIPS_CP0FL_CONFIG  | MIPS_CP0FL_CONFIG1 | MIPS_CP0FL_CONFIG2 |
    686 	  MIPS_CP0FL_CONFIG3 | MIPS_CP0FL_CONFIG4 | MIPS_CP0FL_CONFIG5 |
    687 	  MIPS_CP0FL_CONFIG6 | MIPS_CP0FL_CONFIG7,
    688 	  0,
    689 	  "CN70xx/CN71xx"	},
    690 
    691 	/* Microsoft Research' extensible MIPS */
    692 	{ MIPS_PRID_CID_MICROSOFT, MIPS_eMIPS, 1, -1, CPU_ARCH_MIPS1, 64,
    693 	  CPU_MIPS_NO_WAIT, 0, 0,		"eMIPS CPU"		},
    694 
    695 	/* Ingenic XBurst */
    696 	{ MIPS_PRID_CID_INGENIC, MIPS_XBURST,  -1, -1,	-1, 0,
    697 	  MIPS32_FLAGS | CPU_MIPS_D_CACHE_COHERENT | CPU_MIPS_DOUBLE_COUNT,
    698 	  0, 0, "XBurst"		},
    699 
    700 	{ 0, 0, 0,				0, 0, 0,
    701 	  0, 0, 0,				NULL			}
    702 };
    703 
    704 static const struct pridtab fputab[] = {
    705     { 0, MIPS_SOFT,  -1, 0, 0, 0, 0, 0, 0, "software emulated floating point" },
    706     { 0, MIPS_R2360, -1, 0, 0, 0, 0, 0, 0, "MIPS R2360 Floating Point Board" },
    707     { 0, MIPS_R2010, -1, 0, 0, 0, 0, 0, 0, "MIPS R2010 FPC" },
    708     { 0, MIPS_R3010, -1, 0, 0, 0, 0, 0, 0, "MIPS R3010 FPC" },
    709     { 0, MIPS_R6010, -1, 0, 0, 0, 0, 0, 0, "MIPS R6010 FPC" },
    710     { 0, MIPS_R4010, -1, 0, 0, 0, 0, 0, 0, "MIPS R4010 FPC" },
    711 };
    712 
    713 /*
    714  * Company ID's are not sparse (yet), this array is indexed directly
    715  * by pridtab->cpu_cid.
    716  */
    717 static const char * const cidnames[] = {
    718 	"Prehistoric",
    719 	"MIPS",		/* or "MIPS Technologies, Inc.	*/
    720 	"Broadcom",	/* or "Broadcom Corp."		*/
    721 	"Alchemy",	/* or "Alchemy Semiconductor"	*/
    722 	"SiByte",	/* or "Broadcom Corp. (SiByte)"	*/
    723 	"SandCraft",
    724 	"Phillips",
    725 	"Toshiba or Microsoft",
    726 	"LSI",
    727 	"(unannounced)",
    728 	"(unannounced)",
    729 	"Lexra",
    730 	"RMI",
    731 	"Cavium",
    732 };
    733 #define	ncidnames __arraycount(cidnames)
    734 
    735 #if defined(MIPS1)
    736 /*
    737  * MIPS-I locore function vector
    738  */
    739 
    740 static void
    741 mips1_vector_init(const struct splsw *splsw)
    742 {
    743 	extern char mips1_utlb_miss[], mips1_utlb_miss_end[];
    744 	extern char mips1_exception[], mips1_exception_end[];
    745 
    746 	/*
    747 	 * Copy down exception vector code.
    748 	 */
    749 	if (mips1_utlb_miss_end - mips1_utlb_miss > 0x80)
    750 		panic("startup: UTLB vector code too large");
    751 	if (mips1_exception_end - mips1_exception > 0x80)
    752 		panic("startup: general exception vector code too large");
    753 	memcpy((void *)MIPS_UTLB_MISS_EXC_VEC, mips1_utlb_miss,
    754 		mips1_exception_end - mips1_utlb_miss);
    755 
    756 	/*
    757 	 * Copy locore-function vector.
    758 	 */
    759 	mips_locore_jumpvec = mips1_locore_vec;
    760 
    761 	/*
    762 	 * Clear out the I and D caches.
    763 	 */
    764 	mips_icache_sync_all();
    765 	mips_dcache_wbinv_all();
    766 }
    767 #endif /* MIPS1 */
    768 
    769 #if defined(MIPS3)
    770 static void
    771 mips3_vector_init(const struct splsw *splsw)
    772 {
    773 	/* r4000 exception handler address and end */
    774 	extern char mips3_exception[], mips3_exception_end[];
    775 
    776 	/* TLB miss handler address and end */
    777 	extern char mips3_tlb_miss[];
    778 	extern char mips3_xtlb_miss[];
    779 
    780 	/* Cache error handler */
    781 	extern char mips3_cache[];
    782 	/*
    783 	 * Copy down exception vector code.
    784 	 */
    785 
    786 	if (mips3_xtlb_miss - mips3_tlb_miss != 0x80)
    787 		panic("startup: %s vector code not 128 bytes in length",
    788 		    "UTLB");
    789 	if (mips3_cache - mips3_xtlb_miss != 0x80)
    790 		panic("startup: %s vector code not 128 bytes in length",
    791 		    "XTLB");
    792 	if (mips3_exception - mips3_cache != 0x80)
    793 		panic("startup: %s vector code not 128 bytes in length",
    794 		    "Cache error");
    795 	if (mips3_exception_end - mips3_exception > 0x80)
    796 		panic("startup: %s vector code too large",
    797 		    "General exception");
    798 
    799 	memcpy((void *)MIPS_UTLB_MISS_EXC_VEC, mips3_tlb_miss,
    800 	      mips3_exception_end - mips3_tlb_miss);
    801 
    802 	/*
    803 	 * Copy locore-function vector.
    804 	 */
    805 	mips_locore_jumpvec = mips3_locore_vec;
    806 
    807 	mips_icache_sync_all();
    808 	mips_dcache_wbinv_all();
    809 
    810 	/* Clear BEV in SR so we start handling our own exceptions */
    811 	mips_cp0_status_write(mips_cp0_status_read() & ~MIPS_SR_BEV);
    812 }
    813 #endif /* MIPS3 */
    814 
    815 #if defined(MIPS3_LOONGSON2)
    816 static void
    817 loongson2_vector_init(const struct splsw *splsw)
    818 {
    819 	/* r4000 exception handler address and end */
    820 	extern char loongson2_exception[], loongson2_exception_end[];
    821 
    822 	/* TLB miss handler address and end */
    823 	extern char loongson2_tlb_miss[];
    824 	extern char loongson2_xtlb_miss[];
    825 
    826 	/* Cache error handler */
    827 	extern char loongson2_cache[];
    828 
    829 	/*
    830 	 * Copy down exception vector code.
    831 	 */
    832 
    833 	if (loongson2_xtlb_miss - loongson2_tlb_miss != 0x80)
    834 		panic("startup: %s vector code not 128 bytes in length",
    835 		    "UTLB");
    836 	if (loongson2_cache - loongson2_xtlb_miss != 0x80)
    837 		panic("startup: %s vector code not 128 bytes in length",
    838 		    "XTLB");
    839 	if (loongson2_exception - loongson2_cache != 0x80)
    840 		panic("startup: %s vector code not 128 bytes in length",
    841 		    "Cache error");
    842 	if (loongson2_exception_end - loongson2_exception > 0x80)
    843 		panic("startup: %s vector code too large",
    844 		    "General exception");
    845 
    846 	memcpy((void *)MIPS_UTLB_MISS_EXC_VEC, loongson2_tlb_miss,
    847 	      loongson2_exception_end - loongson2_tlb_miss);
    848 
    849 	/*
    850 	 * Copy locore-function vector.
    851 	 */
    852 	mips_locore_jumpvec = loongson2_locore_vec;
    853 
    854 	mips_icache_sync_all();
    855 	mips_dcache_wbinv_all();
    856 
    857 	/* Clear BEV in SR so we start handling our own exceptions */
    858 	mips_cp0_status_write(mips_cp0_status_read() & ~MIPS_SR_BEV);
    859 }
    860 #endif /* MIPS3_LOONGSON2 */
    861 
    862 #if defined(MIPS32)
    863 static void
    864 mips32_vector_init(const struct splsw *splsw)
    865 {
    866 	/* r4000 exception handler address */
    867 	extern char mips32_exception[];
    868 
    869 	/* TLB miss handler addresses */
    870 	extern char mips32_tlb_miss[];
    871 
    872 	/* Cache error handler */
    873 	extern char mips32_cache[];
    874 
    875 	/* MIPS32 interrupt exception handler */
    876 	extern char mips32_intr[], mips32_intr_end[];
    877 
    878 	/*
    879 	 * Copy down exception vector code.
    880 	 */
    881 
    882 	if (mips32_cache - mips32_tlb_miss != 0x100)
    883 		panic("startup: %s vector code not 128 bytes in length",
    884 		    "UTLB");
    885 	if (mips32_exception - mips32_cache != 0x80)
    886 		panic("startup: %s vector code not 128 bytes in length",
    887 		    "Cache error");
    888 	if (mips32_intr - mips32_exception != 0x80)
    889 		panic("startup: %s vector code not 128 bytes in length",
    890 		    "General exception");
    891 	if (mips32_intr_end - mips32_intr > 0x80)
    892 		panic("startup: %s vector code too large",
    893 		    "interrupt exception");
    894 
    895 	memcpy((void *)MIPS_UTLB_MISS_EXC_VEC, mips32_tlb_miss,
    896 	      mips32_intr_end - mips32_tlb_miss);
    897 
    898 	/*
    899 	 * Copy locore-function vector.
    900 	 */
    901 	mips_locore_jumpvec = mips32_locore_vec;
    902 
    903 	mips_icache_sync_all();
    904 	mips_dcache_wbinv_all();
    905 
    906 	/* Clear BEV in SR so we start handling our own exceptions */
    907 	mips_cp0_status_write(mips_cp0_status_read() & ~MIPS_SR_BEV);
    908 
    909 	mips_watchpoint_init();
    910 }
    911 #endif /* MIPS32 */
    912 
    913 #if defined(MIPS32R2)
    914 static void
    915 mips32r2_vector_init(const struct splsw *splsw)
    916 {
    917 	/* r4000 exception handler address */
    918 	extern char mips32r2_exception[];
    919 
    920 	/* TLB miss handler addresses */
    921 	extern char mips32r2_tlb_miss[];
    922 
    923 	/* Cache error handler */
    924 	extern char mips32r2_cache[];
    925 
    926 	/* MIPS32 interrupt exception handler */
    927 	extern char mips32r2_intr[], mips32r2_intr_end[];
    928 
    929 	/*
    930 	 * Copy down exception vector code.
    931 	 */
    932 	if (mips32r2_cache - mips32r2_tlb_miss != 0x100)
    933 		panic("startup: %s vector code not 128 bytes in length",
    934 		    "UTLB");
    935 	if (mips32r2_exception - mips32r2_cache != 0x80)
    936 		panic("startup: %s vector code not 128 bytes in length",
    937 		    "Cache error");
    938 	if (mips32r2_intr - mips32r2_exception != 0x80)
    939 		panic("startup: %s vector code not 128 bytes in length",
    940 		    "General exception");
    941 	if (mips32r2_intr_end - mips32r2_intr > 0x80)
    942 		panic("startup: %s vector code too large",
    943 		    "interrupt exception");
    944 
    945 	memcpy((void *)MIPS_UTLB_MISS_EXC_VEC, mips32r2_tlb_miss,
    946 	      mips32r2_intr_end - mips32r2_tlb_miss);
    947 
    948 	/*
    949 	 * Let's see if this cpu has USERLOCAL or DSP V2 ASE...
    950 	 */
    951 	if (mipsNN_cp0_config2_read() & MIPSNN_CFG2_M) {
    952 		const uint32_t cfg3 = mipsNN_cp0_config3_read();
    953 		if (cfg3 & MIPSNN_CFG3_ULRI) {
    954 			mips_options.mips_cpu_flags |= CPU_MIPS_HAVE_USERLOCAL;
    955 		}
    956 		if (cfg3 & MIPSNN_CFG3_DSP2P) {
    957 			mips_options.mips_cpu_flags |= CPU_MIPS_HAVE_DSP;
    958 		}
    959 	}
    960 
    961 	/*
    962 	 * If this CPU doesn't have a COP0 USERLOCAL register, at the end
    963 	 * of cpu_switch resume overwrite the instructions which update it.
    964 	 */
    965 	if (!MIPS_HAS_USERLOCAL) {
    966 		extern uint32_t mips32r2_cpu_switch_resume[];
    967 		for (uint32_t *insnp = mips32r2_cpu_switch_resume;; insnp++) {
    968 			KASSERT(insnp[0] != JR_RA);
    969 			if (insnp[0] == _LOAD_V0_L_PRIVATE_A0
    970 			    && insnp[1] == _MTC0_V0_USERLOCAL) {
    971 				insnp[0] = JR_RA;
    972 				insnp[1] = 0;		/* NOP */
    973 				break;
    974 			}
    975 		}
    976 	}
    977 
    978 	/*
    979 	 * Copy locore-function vector.
    980 	 */
    981 	mips_locore_jumpvec = mips32r2_locore_vec;
    982 
    983 	mips_icache_sync_all();
    984 	mips_dcache_wbinv_all();
    985 
    986 	/* Clear BEV in SR so we start handling our own exceptions */
    987 	mips_cp0_status_write(mips_cp0_status_read() & ~MIPS_SR_BEV);
    988 
    989 	mips_watchpoint_init();
    990 }
    991 #endif /* MIPS32R2 */
    992 
    993 #if defined(MIPS64)
    994 static void
    995 mips64_vector_init(const struct splsw *splsw)
    996 {
    997 	/* r4000 exception handler address */
    998 	extern char mips64_exception[];
    999 
   1000 	/* TLB miss handler addresses */
   1001 	extern char mips64_tlb_miss[];
   1002 	extern char mips64_xtlb_miss[];
   1003 
   1004 	/* Cache error handler */
   1005 	extern char mips64_cache[];
   1006 
   1007 	/* MIPS64 interrupt exception handler */
   1008 	extern char mips64_intr[], mips64_intr_end[];
   1009 
   1010 	/*
   1011 	 * Copy down exception vector code.
   1012 	 */
   1013 
   1014 	if (mips64_xtlb_miss - mips64_tlb_miss != 0x80)
   1015 		panic("startup: %s vector code not 128 bytes in length",
   1016 		    "UTLB");
   1017 	if (mips64_cache - mips64_xtlb_miss != 0x80)
   1018 		panic("startup: %s vector code not 128 bytes in length",
   1019 		    "XTLB");
   1020 	if (mips64_exception - mips64_cache != 0x80)
   1021 		panic("startup: %s vector code not 128 bytes in length",
   1022 		    "Cache error");
   1023 	if (mips64_intr - mips64_exception != 0x80)
   1024 		panic("startup: %s vector code not 128 bytes in length",
   1025 		    "General exception");
   1026 	if (mips64_intr_end - mips64_intr > 0x80)
   1027 		panic("startup: %s vector code too large",
   1028 		    "interrupt exception");
   1029 
   1030 	memcpy((void *)MIPS_UTLB_MISS_EXC_VEC, mips64_tlb_miss,
   1031 	      mips64_intr_end - mips64_tlb_miss);
   1032 
   1033 	/*
   1034 	 * Copy locore-function vector.
   1035 	 */
   1036 	mips_locore_jumpvec = mips64_locore_vec;
   1037 
   1038 	mips_icache_sync_all();
   1039 	mips_dcache_wbinv_all();
   1040 
   1041 	/* Clear BEV in SR so we start handling our own exceptions */
   1042 	mips_cp0_status_write(mips_cp0_status_read() & ~MIPS_SR_BEV);
   1043 
   1044 	mips_watchpoint_init();
   1045 }
   1046 #endif /* MIPS64 */
   1047 
   1048 #if defined(MIPS64R2)
   1049 void
   1050 mips64r2_vector_init(const struct splsw *splsw)
   1051 {
   1052 	/* r4000 exception handler address */
   1053 	extern char mips64r2_exception[];
   1054 
   1055 	/* TLB miss handler addresses */
   1056 	extern char mips64r2_tlb_miss[];
   1057 	extern char mips64r2_xtlb_miss[];
   1058 
   1059 	/* Cache error handler */
   1060 	extern char mips64r2_cache[];
   1061 
   1062 	/* MIPS64 interrupt exception handler */
   1063 	extern char mips64r2_intr[], mips64r2_intr_end[];
   1064 
   1065 	/*
   1066 	 * Copy down exception vector code.
   1067 	 */
   1068 
   1069 	if (mips64r2_xtlb_miss - mips64r2_tlb_miss != 0x80)
   1070 		panic("startup: %s vector code not 128 bytes in length",
   1071 		    "UTLB");
   1072 	if (mips64r2_cache - mips64r2_xtlb_miss != 0x80)
   1073 		panic("startup: %s vector code not 128 bytes in length",
   1074 		    "XTLB");
   1075 	if (mips64r2_exception - mips64r2_cache != 0x80)
   1076 		panic("startup: %s vector code not 128 bytes in length",
   1077 		    "Cache error");
   1078 	if (mips64r2_intr - mips64r2_exception != 0x80)
   1079 		panic("startup: %s vector code not 128 bytes in length",
   1080 		    "General exception");
   1081 	if (mips64r2_intr_end - mips64r2_intr > 0x80)
   1082 		panic("startup: %s vector code too large",
   1083 		    "interrupt exception");
   1084 
   1085 	const intptr_t ebase = (intptr_t)mipsNN_cp0_ebase_read();
   1086 	const int cpunum = ebase & MIPS_EBASE_CPUNUM;
   1087 
   1088 	// This may need to be on CPUs other CPU0 so use EBASE to fetch
   1089 	// the appropriate address for exception code.  EBASE also contains
   1090 	// the cpunum so remove that.
   1091 	memcpy((void *)(intptr_t)(ebase & ~MIPS_EBASE_CPUNUM), mips64r2_tlb_miss,
   1092 	      mips64r2_intr_end - mips64r2_tlb_miss);
   1093 
   1094 	/*
   1095 	 * Let's see if this cpu has USERLOCAL or DSP V2 ASE...
   1096 	 */
   1097 	if (mipsNN_cp0_config2_read() & MIPSNN_CFG2_M) {
   1098 		const uint32_t cfg3 = mipsNN_cp0_config3_read();
   1099 		if (cfg3 & MIPSNN_CFG3_ULRI) {
   1100 			mips_options.mips_cpu_flags |= CPU_MIPS_HAVE_USERLOCAL;
   1101 		}
   1102 		if (cfg3 & MIPSNN_CFG3_DSP2P) {
   1103 			mips_options.mips_cpu_flags |= CPU_MIPS_HAVE_DSP;
   1104 		}
   1105 	}
   1106 
   1107 	/*
   1108 	 * If this CPU doesn't have a COP0 USERLOCAL register, at the end
   1109 	 * of cpu_switch resume overwrite the instructions which update it.
   1110 	 */
   1111 	if (!MIPS_HAS_USERLOCAL && cpunum == 0) {
   1112 		extern uint32_t mips64r2_cpu_switch_resume[];
   1113 		for (uint32_t *insnp = mips64r2_cpu_switch_resume;; insnp++) {
   1114 			KASSERT(insnp[0] != JR_RA);
   1115 			if (insnp[0] == _LOAD_V0_L_PRIVATE_A0
   1116 			    && insnp[1] == _MTC0_V0_USERLOCAL) {
   1117 				insnp[0] = JR_RA;
   1118 				insnp[1] = 0;		/* NOP */
   1119 				break;
   1120 			}
   1121 		}
   1122 	}
   1123 
   1124 	/*
   1125 	 * Copy locore-function vector.
   1126 	 */
   1127 	if (cpunum == 0)
   1128 		mips_locore_jumpvec = mips64r2_locore_vec;
   1129 
   1130 	mips_icache_sync_all();
   1131 	mips_dcache_wbinv_all();
   1132 
   1133 	/* Clear BEV in SR so we start handling our own exceptions */
   1134 	mips_cp0_status_write(mips_cp0_status_read() & ~MIPS_SR_BEV);
   1135 
   1136 	mips_watchpoint_init();
   1137 }
   1138 #endif /* MIPS64R2 */
   1139 
   1140 /*
   1141  * Do all the stuff that locore normally does before calling main(),
   1142  * that is common to all mips-CPU NetBSD ports.
   1143  *
   1144  * The principal purpose of this function is to examine the
   1145  * variable cpu_id, into which the kernel locore start code
   1146  * writes the CPU ID register, and to then copy appropriate
   1147  * code into the CPU exception-vector entries and the jump tables
   1148  * used to hide the differences in cache and TLB handling in
   1149  * different MIPS CPUs.
   1150  *
   1151  * This should be the very first thing called by each port's
   1152  * init_main() function.
   1153  */
   1154 
   1155 /*
   1156  * Initialize the hardware exception vectors, and the jump table used to
   1157  * call locore cache and TLB management functions, based on the kind
   1158  * of CPU the kernel is running on.
   1159  */
   1160 void
   1161 mips_vector_init(const struct splsw *splsw, bool multicpu_p)
   1162 {
   1163 	struct mips_options * const opts = &mips_options;
   1164 	const struct pridtab *ct;
   1165 	const mips_prid_t cpu_id = opts->mips_cpu_id;
   1166 
   1167 	for (ct = cputab; ct->cpu_name != NULL; ct++) {
   1168 		if (MIPS_PRID_CID(cpu_id) != ct->cpu_cid ||
   1169 		    MIPS_PRID_IMPL(cpu_id) != ct->cpu_pid)
   1170 			continue;
   1171 		if (ct->cpu_rev >= 0 &&
   1172 		    MIPS_PRID_REV(cpu_id) != ct->cpu_rev)
   1173 			continue;
   1174 		if (ct->cpu_copts >= 0 &&
   1175 		    MIPS_PRID_COPTS(cpu_id) != ct->cpu_copts)
   1176 			continue;
   1177 
   1178 		opts->mips_cpu = ct;
   1179 		opts->mips_cpu_arch = ct->cpu_isa;
   1180 		opts->mips_num_tlb_entries = ct->cpu_ntlb;
   1181 		break;
   1182 	}
   1183 
   1184 	if (opts->mips_cpu == NULL)
   1185 		panic("CPU type (0x%x) not supported", cpu_id);
   1186 
   1187 #if (MIPS32 + MIPS32R2 + MIPS64 + MIPS64R2) > 0
   1188 	if (MIPS_PRID_CID(cpu_id) != 0) {
   1189 		/* MIPS32/MIPS64, use coprocessor 0 config registers */
   1190 		uint32_t cfg, cfg1, cfg4;
   1191 
   1192 		cfg = mips3_cp0_config_read();
   1193 		cfg1 = mipsNN_cp0_config1_read();
   1194 		if (opts->mips_cpu->cpu_cp0flags & MIPS_CP0FL_CONFIG4)
   1195 			cfg4 = mipsNN_cp0_config4_read();
   1196 		else
   1197 			cfg4 = 0;
   1198 
   1199 		/* pick CPU type */
   1200 		switch (MIPSNN_GET(CFG_AT, cfg)) {
   1201 		case MIPSNN_CFG_AT_MIPS32:
   1202 			opts->mips_cpu_arch = CPU_ARCH_MIPS32;
   1203 			break;
   1204 		case MIPSNN_CFG_AT_MIPS64:
   1205 			opts->mips_cpu_arch = CPU_ARCH_MIPS64;
   1206 			break;
   1207 		case MIPSNN_CFG_AT_MIPS64S:
   1208 		default:
   1209 			panic("MIPS32/64 architecture type %d not supported",
   1210 			    MIPSNN_GET(CFG_AT, cfg));
   1211 		}
   1212 
   1213 		switch (MIPSNN_GET(CFG_AR, cfg)) {
   1214 		case MIPSNN_CFG_AR_REV1:
   1215 			break;
   1216 		case MIPSNN_CFG_AR_REV2:
   1217 			switch (opts->mips_cpu_arch) {
   1218 			case CPU_ARCH_MIPS32:
   1219 				opts->mips_cpu_arch = CPU_ARCH_MIPS32R2;
   1220 				break;
   1221 			case CPU_ARCH_MIPS64:
   1222 				opts->mips_cpu_arch = CPU_ARCH_MIPS64R2;
   1223 				break;
   1224 			default:
   1225 				printf("WARNING: MIPS32/64 arch %d revision %d "
   1226 				    "unknown!\n", opts->mips_cpu_arch,
   1227 				    MIPSNN_GET(CFG_AR, cfg));
   1228 				break;
   1229 			}
   1230 			break;
   1231 		default:
   1232 			printf("WARNING: MIPS32/64 arch revision %d "
   1233 			    "unknown!\n", MIPSNN_GET(CFG_AR, cfg));
   1234 			break;
   1235 		}
   1236 
   1237 		/* figure out MMU type (and number of TLB entries) */
   1238 		switch (MIPSNN_GET(CFG_MT, cfg)) {
   1239 		case MIPSNN_CFG_MT_TLB:
   1240 			/*
   1241 			 * Config1[MMUSize-1] defines the number of TLB
   1242 			 * entries minus 1, allowing up to 64 TLBs to be
   1243 			 * defined.  For MIPS32R2 and MIPS64R2 and later
   1244 			 * if the Config4[MMUExtDef] field is 1 then the
   1245 			 * Config4[MMUSizeExt] field is an extension of
   1246 			 * Config1[MMUSize-1] field.
   1247 			 */
   1248 			opts->mips_num_tlb_entries = MIPSNN_CFG1_MS(cfg1);
   1249 			if (__SHIFTOUT(cfg4, MIPSNN_CFG4_MMU_EXT_DEF) ==
   1250 			    MIPSNN_CFG4_MMU_EXT_DEF_MMU) {
   1251 				opts->mips_num_tlb_entries +=
   1252 				__SHIFTOUT(cfg4, MIPSNN_CFG4_MMU_SIZE_EXT) <<
   1253 				    popcount(MIPSNN_CFG1_MS_MASK);
   1254 			}
   1255 			break;
   1256 		case MIPSNN_CFG_MT_NONE:
   1257 		case MIPSNN_CFG_MT_BAT:
   1258 		case MIPSNN_CFG_MT_FIXED:
   1259 		default:
   1260 			panic("MIPS32/64 MMU type %d not supported",
   1261 			    MIPSNN_GET(CFG_MT, cfg));
   1262 		}
   1263 	}
   1264 #endif /* (MIPS32 + MIPS32R2 + MIPS64 + MIPS64R2) > 0 */
   1265 
   1266 	if (opts->mips_cpu_arch < 1)
   1267 		panic("Unknown CPU ISA for CPU type 0x%x", cpu_id);
   1268 	if (opts->mips_num_tlb_entries < 1)
   1269 		panic("Unknown number of TLBs for CPU type 0x%x", cpu_id);
   1270 
   1271 	/*
   1272 	 * Check CPU-specific flags.
   1273 	 */
   1274 	opts->mips_cpu_flags = opts->mips_cpu->cpu_flags;
   1275 	opts->mips_has_r4k_mmu = (opts->mips_cpu_flags & CPU_MIPS_R4K_MMU) != 0;
   1276 	opts->mips_has_llsc = (opts->mips_cpu_flags & CPU_MIPS_NO_LLSC) == 0;
   1277 #if defined(MIPS3_4100)
   1278 	if (MIPS_PRID_IMPL(cpu_id) == MIPS_R4100)
   1279 		opts->mips3_pg_shift = MIPS3_4100_PG_SHIFT;
   1280 	else
   1281 #endif
   1282 		opts->mips3_pg_shift = MIPS3_DEFAULT_PG_SHIFT;
   1283 
   1284 	opts->mips3_cca_devmem = CCA_UNCACHED;
   1285 	if (opts->mips_cpu_flags & CPU_MIPS_HAVE_SPECIAL_CCA) {
   1286 		uint32_t cca;
   1287 
   1288 		cca = (opts->mips_cpu_flags & CPU_MIPS_CACHED_CCA_MASK) >>
   1289 		    CPU_MIPS_CACHED_CCA_SHIFT;
   1290 		opts->mips3_pg_cached = MIPS3_CCA_TO_PG(cca);
   1291 #ifndef __mips_o32
   1292 		opts->mips3_xkphys_cached = MIPS_PHYS_TO_XKPHYS(cca, 0);
   1293 #endif
   1294 	} else {
   1295 		opts->mips3_pg_cached = MIPS3_DEFAULT_PG_CACHED;
   1296 #ifndef __mips_o32
   1297 		opts->mips3_xkphys_cached = MIPS3_DEFAULT_XKPHYS_CACHED;
   1298 #endif
   1299 	}
   1300 
   1301 #ifdef __HAVE_MIPS_MACHDEP_CACHE_CONFIG
   1302 	mips_machdep_cache_config();
   1303 #endif
   1304 
   1305 	/*
   1306 	 * if 'splsw' is NULL, use standard SPL with COP0 status/cause
   1307 	 * otherwise use chip-specific splsw
   1308 	 */
   1309 	if (splsw == NULL) {
   1310 		mips_splsw = std_splsw;
   1311 #ifdef PARANOIA
   1312 		std_splsw_test();	/* only works with std_splsw */
   1313 #endif
   1314 	} else {
   1315 		mips_splsw = *splsw;
   1316 	}
   1317 
   1318 	/*
   1319 	 * Determine cache configuration and initialize our cache
   1320 	 * frobbing routine function pointers.
   1321 	 */
   1322 	mips_config_cache();
   1323 
   1324 	/*
   1325 	 * We default to RAS atomic ops since they are the lowest overhead.
   1326 	 */
   1327 #ifdef MULTIPROCESSOR
   1328 	if (multicpu_p) {
   1329 		/*
   1330 		 * If we could have multiple CPUs active,
   1331 		 * use the ll/sc variants.
   1332 		 */
   1333 		mips_locore_atomicvec = mips_llsc_locore_atomicvec;
   1334 	}
   1335 #endif
   1336 	/*
   1337 	 * Now initialize our ISA-dependent function vector.
   1338 	 */
   1339 	switch (opts->mips_cpu_arch) {
   1340 #if defined(MIPS1)
   1341 	case CPU_ARCH_MIPS1:
   1342 		(*mips1_locore_vec.ljv_tlb_invalidate_all)();
   1343 		mips1_vector_init(splsw);
   1344 		mips_locoresw = mips1_locoresw;
   1345 		break;
   1346 #endif
   1347 #if defined(MIPS3)
   1348 	case CPU_ARCH_MIPS3:
   1349 	case CPU_ARCH_MIPS4:
   1350 		mips3_tlb_probe();
   1351 #if defined(MIPS3_4100)
   1352 		if (MIPS_PRID_IMPL(cpu_id) == MIPS_R4100)
   1353 			mips3_cp0_pg_mask_write(MIPS4100_PG_SIZE_TO_MASK(PAGE_SIZE));
   1354 		else
   1355 #endif
   1356 		mips3_cp0_pg_mask_write(MIPS3_PG_SIZE_TO_MASK(PAGE_SIZE));
   1357 		mips3_cp0_wired_write(0);
   1358 #if defined(MIPS3_LOONGSON2)
   1359 		if (opts->mips_cpu_flags & CPU_MIPS_LOONGSON2) {
   1360 			(*loongson2_locore_vec.ljv_tlb_invalidate_all)();
   1361 			mips3_cp0_wired_write(pmap_tlb0_info.ti_wired);
   1362 			loongson2_vector_init(splsw);
   1363 			mips_locoresw = loongson2_locoresw;
   1364 			opts->mips3_cca_devmem = CCA_ACCEL;
   1365 			break;
   1366 		}
   1367 #endif /* MIPS3_LOONGSON2 */
   1368 		(*mips3_locore_vec.ljv_tlb_invalidate_all)();
   1369 		mips3_cp0_wired_write(pmap_tlb0_info.ti_wired);
   1370 		mips3_vector_init(splsw);
   1371 		mips_locoresw = mips3_locoresw;
   1372 		break;
   1373 
   1374 #endif /* MIPS3 */
   1375 #if defined(MIPS32)
   1376 	case CPU_ARCH_MIPS32:
   1377 		mips3_tlb_probe();
   1378 		mips3_cp0_pg_mask_write(MIPS3_PG_SIZE_TO_MASK(PAGE_SIZE));
   1379 		mips3_cp0_wired_write(0);
   1380 		(*mips32_locore_vec.ljv_tlb_invalidate_all)();
   1381 		mips3_cp0_wired_write(pmap_tlb0_info.ti_wired);
   1382 		mips32_vector_init(splsw);
   1383 		mips_locoresw = mips32_locoresw;
   1384 		break;
   1385 #endif
   1386 #if defined(MIPS32R2)
   1387 	case CPU_ARCH_MIPS32R2:
   1388 		mips3_tlb_probe();
   1389 		mips3_cp0_pg_mask_write(MIPS3_PG_SIZE_TO_MASK(PAGE_SIZE));
   1390 		mips3_cp0_wired_write(0);
   1391 		(*mips32r2_locore_vec.ljv_tlb_invalidate_all)();
   1392 		mips3_cp0_wired_write(pmap_tlb0_info.ti_wired);
   1393 		mips32r2_vector_init(splsw);
   1394 		mips_locoresw = mips32r2_locoresw;
   1395 		break;
   1396 #endif
   1397 #if defined(MIPS64)
   1398 	case CPU_ARCH_MIPS64: {
   1399 		mips3_tlb_probe();
   1400 		mips3_cp0_pg_mask_write(MIPS3_PG_SIZE_TO_MASK(PAGE_SIZE));
   1401 		mips3_cp0_wired_write(0);
   1402 		(*mips64_locore_vec.ljv_tlb_invalidate_all)();
   1403 		mips3_cp0_wired_write(pmap_tlb0_info.ti_wired);
   1404 		mips64_vector_init(splsw);
   1405 		mips_locoresw = mips64_locoresw;
   1406 		break;
   1407 	}
   1408 #endif
   1409 #if defined(MIPS64R2)
   1410 	case CPU_ARCH_MIPS64R2: {
   1411 		mips3_tlb_probe();
   1412 		mips3_cp0_pg_mask_write(MIPS3_PG_SIZE_TO_MASK(PAGE_SIZE));
   1413 		mips3_cp0_wired_write(0);
   1414 		(*mips64r2_locore_vec.ljv_tlb_invalidate_all)();
   1415 		mips3_cp0_wired_write(pmap_tlb0_info.ti_wired);
   1416 		mips64r2_vector_init(splsw);
   1417 		mips_locoresw = mips64r2_locoresw;
   1418 		break;
   1419 	}
   1420 #endif
   1421 	default:
   1422 		printf("cpu_arch 0x%x: not supported\n", opts->mips_cpu_arch);
   1423 		cpu_reboot(RB_HALT, NULL);
   1424 	}
   1425 
   1426 	/*
   1427 	 * Now that the splsw and locoresw have been filled in, fixup the
   1428 	 * jumps to any stubs to actually jump to the real routines.
   1429 	 */
   1430 	extern uint32_t _ftext[];
   1431 	extern uint32_t _etext[];
   1432 	mips_fixup_stubs(_ftext, _etext);
   1433 
   1434 #if (MIPS3 + MIPS32 + MIPS32R2 + MIPS64 + MIPS64R2) > 0
   1435 	/*
   1436 	 * Install power-saving idle routines.
   1437 	 */
   1438 	if ((opts->mips_cpu_flags & CPU_MIPS_USE_WAIT) &&
   1439 	    !(opts->mips_cpu_flags & CPU_MIPS_NO_WAIT))
   1440 		mips_locoresw.lsw_cpu_idle = mips_wait_idle;
   1441 #endif /* (MIPS3 + MIPS32 + MIPS32R2 + MIPS64 + MIPS64R2) > 0 */
   1442 }
   1443 
   1444 void
   1445 mips_set_wbflush(void (*flush_fn)(void))
   1446 {
   1447 	mips_locoresw.lsw_wbflush = flush_fn;
   1448 	(*flush_fn)();
   1449 }
   1450 
   1451 #if defined(MIPS3_PLUS)
   1452 static void
   1453 mips3_tlb_probe(void)
   1454 {
   1455 	struct mips_options * const opts = &mips_options;
   1456 	opts->mips3_tlb_pg_mask = mips3_cp0_tlb_page_mask_probe();
   1457 	if (CPUIS64BITS) {
   1458 		opts->mips3_tlb_vpn_mask = mips3_cp0_tlb_entry_hi_probe();
   1459 		opts->mips3_tlb_vpn_mask |= PAGE_MASK;
   1460 		opts->mips3_tlb_vpn_mask <<= 2;
   1461 		opts->mips3_tlb_vpn_mask >>= 2;
   1462 		opts->mips3_tlb_pfn_mask = mips3_cp0_tlb_entry_lo_probe();
   1463 #if defined(_LP64) && defined(ENABLE_MIPS_16KB_PAGE)
   1464 		/*
   1465 		 * 16KB pages could cause our page table being able to address
   1466 		 * a larger address space than the actual chip supports.  So
   1467 		 * we need to limit the address space to what it can really
   1468 		 * address.
   1469 		 */
   1470 		if (mips_vm_maxuser_address > opts->mips3_tlb_vpn_mask + 1)
   1471 			mips_vm_maxuser_address = opts->mips3_tlb_vpn_mask + 1;
   1472 #endif
   1473 	}
   1474 }
   1475 #endif
   1476 
   1477 static const char *
   1478 wayname(int ways)
   1479 {
   1480 	static char buf[sizeof("xxx-way set-associative")];
   1481 
   1482 #ifdef DIAGNOSTIC
   1483 	if (ways > 999)
   1484 		panic("mips cache - too many ways (%d)", ways);
   1485 #endif
   1486 
   1487 	switch (ways) {
   1488 	case 0:
   1489 		return "fully set-associative";
   1490 	case 1:
   1491 		return "direct-mapped";
   1492 	default:
   1493 		snprintf(buf, sizeof(buf), "%d-way set-associative", ways);
   1494 		return buf;
   1495 	}
   1496 }
   1497 
   1498 /*
   1499  * Identify product revision IDs of CPU and FPU.
   1500  */
   1501 void
   1502 cpu_identify(device_t dev)
   1503 {
   1504 	const struct mips_options * const opts = &mips_options;
   1505 	const struct mips_cache_info * const mci = &mips_cache_info;
   1506 	const mips_prid_t cpu_id = opts->mips_cpu_id;
   1507 	const mips_prid_t fpu_id = opts->mips_fpu_id;
   1508 	static const char * const wtnames[] = {
   1509 		"write-back",
   1510 		"write-through",
   1511 	};
   1512 	const char *cpuname, *fpuname;
   1513 	int i;
   1514 
   1515 	cpuname = opts->mips_cpu->cpu_name;
   1516 #ifdef MIPS64_OCTEON
   1517 	if (MIPS_PRID_CID(cpu_id) == MIPS_PRID_CID_CAVIUM) {
   1518 		cpuname = octeon_cpu_model(cpu_id);
   1519 	}
   1520 #endif
   1521 
   1522 	fpuname = NULL;
   1523 	for (i = 0; i < sizeof(fputab)/sizeof(fputab[0]); i++) {
   1524 		if (MIPS_PRID_CID(fpu_id) == fputab[i].cpu_cid &&
   1525 		    MIPS_PRID_IMPL(fpu_id) == fputab[i].cpu_pid) {
   1526 			fpuname = fputab[i].cpu_name;
   1527 			break;
   1528 		}
   1529 	}
   1530 	if (fpuname == NULL && MIPS_PRID_IMPL(fpu_id) == MIPS_PRID_IMPL(cpu_id))
   1531 		fpuname = "built-in FPU";
   1532 	if (MIPS_PRID_IMPL(cpu_id) == MIPS_R4700)	/* FPU PRid is 0x20 */
   1533 		fpuname = "built-in FPU";
   1534 	if (MIPS_PRID_IMPL(cpu_id) == MIPS_RC64470)	/* FPU PRid is 0x21 */
   1535 		fpuname = "built-in FPU";
   1536 #ifdef MIPSNN
   1537 	if (CPUISMIPSNN) {
   1538 		uint32_t cfg1;
   1539 
   1540 		switch (MIPS_PRID_CID(cpu_id)) {
   1541 		/*
   1542 		 * CPUs from the following companies have a built-in
   1543 		 * FPU if Config1[FP] is set.
   1544 		 */
   1545 		case MIPS_PRID_CID_SIBYTE:
   1546 		case MIPS_PRID_CID_CAVIUM:
   1547 			cfg1 = mipsNN_cp0_config1_read();
   1548 			if (cfg1 & MIPSNN_CFG1_FP)
   1549 				fpuname = "built-in FPU";
   1550 			break;
   1551 		}
   1552 	}
   1553 #endif
   1554 
   1555 	if (opts->mips_cpu->cpu_cid != 0) {
   1556 		if (opts->mips_cpu->cpu_cid <= ncidnames)
   1557 			aprint_normal("%s ", cidnames[opts->mips_cpu->cpu_cid]);
   1558 		else if (opts->mips_cpu->cpu_cid == MIPS_PRID_CID_INGENIC) {
   1559 			aprint_normal("Ingenic ");
   1560 		} else {
   1561 			aprint_normal("Unknown Company ID - 0x%x",
   1562 			    opts->mips_cpu->cpu_cid);
   1563 			aprint_normal_dev(dev, "");
   1564 		}
   1565 	}
   1566 	if (cpuname != NULL)
   1567 		aprint_normal("%s (0x%x)", cpuname, cpu_id);
   1568 	else
   1569 		aprint_normal("unknown CPU type (0x%x)", cpu_id);
   1570 	if (MIPS_PRID_CID(cpu_id) == MIPS_PRID_CID_PREHISTORIC)
   1571 		aprint_normal(" Rev. %d.%d", MIPS_PRID_REV_MAJ(cpu_id),
   1572 		    MIPS_PRID_REV_MIN(cpu_id));
   1573 	else
   1574 		aprint_normal(" Rev. %d", MIPS_PRID_REV(cpu_id));
   1575 
   1576 	if (fpuname != NULL)
   1577 		aprint_normal(" with %s", fpuname);
   1578 	else
   1579 		aprint_normal(" with unknown FPC type (0x%x)", fpu_id);
   1580 	if (opts->mips_fpu_id != 0) {
   1581 		if (MIPS_PRID_CID(cpu_id) == MIPS_PRID_CID_PREHISTORIC)
   1582 			aprint_normal(" Rev. %d.%d", MIPS_PRID_REV_MAJ(fpu_id),
   1583 			    MIPS_PRID_REV_MIN(fpu_id));
   1584 		else
   1585 			aprint_normal(" Rev. %d", MIPS_PRID_REV(fpu_id));
   1586 	}
   1587 	if (opts->mips_cpu_flags & MIPS_HAS_DSP) {
   1588 		aprint_normal(" and DSPv2");
   1589 	}
   1590 	aprint_normal("\n");
   1591 
   1592 	if (MIPS_PRID_CID(cpu_id) == MIPS_PRID_CID_PREHISTORIC &&
   1593 	    MIPS_PRID_RSVD(cpu_id) != 0) {
   1594 		aprint_normal_dev(dev,
   1595 		    "NOTE: top 8 bits of prehistoric PRID not 0!\n");
   1596 		aprint_normal_dev(dev, "Please mail port-mips (at) NetBSD.org "
   1597 		    "with %s dmesg lines.\n", device_xname(dev));
   1598 	}
   1599 
   1600 	switch (opts->mips_cpu_arch) {
   1601 #if defined(MIPS1)
   1602 	case CPU_ARCH_MIPS1:
   1603 		if (mci->mci_picache_size)
   1604 			aprint_normal_dev(dev, "%dKB/%dB %s Instruction cache, "
   1605 			    "%d TLB entries\n", mci->mci_picache_size / 1024,
   1606 			    mci->mci_picache_line_size,
   1607 			    wayname(mci->mci_picache_ways),
   1608 			    opts->mips_num_tlb_entries);
   1609 		else
   1610 			aprint_normal_dev(dev, "%d TLB entries\n",
   1611 			    opts->mips_num_tlb_entries);
   1612 		if (mci->mci_pdcache_size)
   1613 			aprint_normal_dev(dev, "%dKB/%dB %s %s Data cache\n",
   1614 			    mci->mci_pdcache_size / 1024,
   1615 			    mci->mci_pdcache_line_size,
   1616 			    wayname(mci->mci_pdcache_ways),
   1617 			    wtnames[mci->mci_pdcache_write_through]);
   1618 		break;
   1619 #endif /* MIPS1 */
   1620 #if (MIPS3 + MIPS32 + MIPS32R2 + MIPS64 + MIPS64R2) > 0
   1621 	case CPU_ARCH_MIPS3:
   1622 	case CPU_ARCH_MIPS4:
   1623 	case CPU_ARCH_MIPS32:
   1624 	case CPU_ARCH_MIPS32R2:
   1625 	case CPU_ARCH_MIPS64:
   1626 	case CPU_ARCH_MIPS64R2: {
   1627 		const char *sufx = "KMGTPE";
   1628 		uint32_t pg_mask;
   1629 		aprint_normal_dev(dev, "%d TLB entries",
   1630 		    opts->mips_num_tlb_entries);
   1631 #if !defined(__mips_o32)
   1632 		if (CPUIS64BITS) {
   1633 			int64_t pfn_mask;
   1634 			i = ffs(~(opts->mips3_tlb_vpn_mask >> 31)) + 30;
   1635 			aprint_normal(", %d%cB (%d-bit) VAs",
   1636 			    1 << (i % 10), sufx[(i / 10) - 1], i);
   1637 			for (i = 64, pfn_mask = opts->mips3_tlb_pfn_mask << 6;
   1638 			     pfn_mask > 0; i--, pfn_mask <<= 1)
   1639 				;
   1640 			aprint_normal(", %d%cB (%d-bit) PAs",
   1641 			      1 << (i % 10), sufx[(i / 10) - 1], i);
   1642 		}
   1643 #endif
   1644 		for (i = 4, pg_mask = opts->mips3_tlb_pg_mask >> 13;
   1645 		     pg_mask != 0; ) {
   1646 			if ((pg_mask & 3) != 3)
   1647 				break;
   1648 			pg_mask >>= 2;
   1649 			i *= 4;
   1650 			if (i == 1024) {
   1651 				i = 1;
   1652 				sufx++;
   1653 			}
   1654 		}
   1655 		aprint_normal(", %d%cB max page size\n", i, sufx[0]);
   1656 		if (mci->mci_picache_size)
   1657 			aprint_normal_dev(dev,
   1658 			    "%dKB/%dB %s L1 instruction cache\n",
   1659 			    mci->mci_picache_size / 1024,
   1660 			    mci->mci_picache_line_size,
   1661 			    wayname(mci->mci_picache_ways));
   1662 		if (mci->mci_pdcache_size)
   1663 			aprint_normal_dev(dev,
   1664 			    "%dKB/%dB %s %s %sL1 data cache\n",
   1665 			    mci->mci_pdcache_size / 1024,
   1666 			    mci->mci_pdcache_line_size,
   1667 			    wayname(mci->mci_pdcache_ways),
   1668 			    wtnames[mci->mci_pdcache_write_through],
   1669 			    ((opts->mips_cpu_flags & CPU_MIPS_D_CACHE_COHERENT)
   1670 				? "coherent " : ""));
   1671 		if (mci->mci_sdcache_line_size)
   1672 			aprint_normal_dev(dev,
   1673 			    "%dKB/%dB %s %s L2 %s cache\n",
   1674 			    mci->mci_sdcache_size / 1024,
   1675 			    mci->mci_sdcache_line_size,
   1676 			    wayname(mci->mci_sdcache_ways),
   1677 			    wtnames[mci->mci_sdcache_write_through],
   1678 			    mci->mci_scache_unified ? "unified" : "data");
   1679 		break;
   1680 	}
   1681 #endif /* (MIPS3 + MIPS32 + MIPS32R2 + MIPS64 + MIPS64R2) > 0 */
   1682 	default:
   1683 		panic("cpu_identify: impossible");
   1684 	}
   1685 }
   1686 
   1687 /*
   1688  * Set registers on exec.
   1689  * Clear all registers except sp, pc, and t9.
   1690  * $sp is set to the stack pointer passed in.  $pc is set to the entry
   1691  * point given by the exec_package passed in, as is $t9 (used for PIC
   1692  * code by the MIPS elf abi).
   1693  */
   1694 void
   1695 setregs(struct lwp *l, struct exec_package *pack, vaddr_t stack)
   1696 {
   1697 	struct trapframe * const tf = l->l_md.md_utf;
   1698 	struct proc * const p = l->l_proc;
   1699 
   1700 	KASSERTMSG((stack & STACK_ALIGNBYTES) == 0, "stack=%"PRIxVADDR, stack);
   1701 
   1702 	memset(tf, 0, sizeof(*tf));
   1703 	tf->tf_regs[_R_SP] = (intptr_t)stack & ~STACK_ALIGNBYTES;
   1704 	tf->tf_regs[_R_PC] = (intptr_t)pack->ep_entry & ~3;
   1705 	tf->tf_regs[_R_T9] = (intptr_t)pack->ep_entry & ~3; /* abicall requirement */
   1706 	tf->tf_regs[_R_SR] = PSL_USERSET;
   1707 #if !defined(__mips_o32)
   1708 	/*
   1709 	 * allow 64bit ops in userland for non-O32 ABIs
   1710 	 */
   1711 	if (p->p_md.md_abi == _MIPS_BSD_API_N32
   1712 	    && (CPUISMIPS64 || CPUISMIPS64R2)) {
   1713 		tf->tf_regs[_R_SR] |= MIPS_SR_PX;
   1714 	} else if (p->p_md.md_abi != _MIPS_BSD_API_O32) {
   1715 		tf->tf_regs[_R_SR] |= MIPS_SR_UX;
   1716 	}
   1717 	if (_MIPS_SIM_NEWABI_P(p->p_md.md_abi))
   1718 		tf->tf_regs[_R_SR] |= MIPS3_SR_FR;
   1719 #endif
   1720 #ifdef _LP64
   1721 	/*
   1722 	 * If we are using a 32-bit ABI on a 64-bit kernel, mark the process
   1723 	 * that way.  If we aren't, clear it.
   1724 	 */
   1725 	if (p->p_md.md_abi == _MIPS_BSD_API_N32
   1726 	    || p->p_md.md_abi == _MIPS_BSD_API_O32) {
   1727 		p->p_flag |= PK_32;
   1728 	} else {
   1729 		p->p_flag &= ~PK_32;
   1730 	}
   1731 #endif
   1732 	/*
   1733 	 * Set up arguments for _start():
   1734 	 *	_start(stack, obj, cleanup, ps_strings);
   1735 	 *
   1736 	 * Notes:
   1737 	 *	- obj and cleanup are the auxiliary and termination
   1738 	 *	  vectors.  They are fixed up by ld.elf_so.
   1739 	 *	- ps_strings is a NetBSD extension.
   1740 	 */
   1741 	tf->tf_regs[_R_A0] = (intptr_t)stack;
   1742 	tf->tf_regs[_R_A1] = 0;
   1743 	tf->tf_regs[_R_A2] = 0;
   1744 	tf->tf_regs[_R_A3] = p->p_psstrp;
   1745 
   1746 	l->l_md.md_ss_addr = 0;
   1747 }
   1748 
   1749 #ifdef __HAVE_BOOTINFO_H
   1750 /*
   1751  * Machine dependent system variables.
   1752  */
   1753 static int
   1754 sysctl_machdep_booted_kernel(SYSCTLFN_ARGS)
   1755 {
   1756 	struct btinfo_bootpath *bibp;
   1757 	struct sysctlnode node;
   1758 
   1759 	bibp = lookup_bootinfo(BTINFO_BOOTPATH);
   1760 	if(!bibp)
   1761 		return(ENOENT); /* ??? */
   1762 
   1763 	node = *rnode;
   1764 	node.sysctl_data = bibp->bootpath;
   1765 	node.sysctl_size = sizeof(bibp->bootpath);
   1766 	return (sysctl_lookup(SYSCTLFN_CALL(&node)));
   1767 }
   1768 #endif
   1769 
   1770 SYSCTL_SETUP(sysctl_machdep_setup, "sysctl machdep subtree setup")
   1771 {
   1772 
   1773 	sysctl_createv(clog, 0, NULL, NULL,
   1774 		       CTLFLAG_PERMANENT,
   1775 		       CTLTYPE_NODE, "machdep", NULL,
   1776 		       NULL, 0, NULL, 0,
   1777 		       CTL_MACHDEP, CTL_EOL);
   1778 
   1779 	sysctl_createv(clog, 0, NULL, NULL,
   1780 		       CTLFLAG_PERMANENT,
   1781 		       CTLTYPE_STRUCT, "console_device", NULL,
   1782 		       sysctl_consdev, 0, NULL, sizeof(dev_t),
   1783 		       CTL_MACHDEP, CPU_CONSDEV, CTL_EOL);
   1784 #ifdef __HAVE_BOOTINFO_H
   1785 	sysctl_createv(clog, 0, NULL, NULL,
   1786 		       CTLFLAG_PERMANENT,
   1787 		       CTLTYPE_STRING, "booted_kernel", NULL,
   1788 		       sysctl_machdep_booted_kernel, 0, NULL, 0,
   1789 		       CTL_MACHDEP, CPU_BOOTED_KERNEL, CTL_EOL);
   1790 #endif
   1791 	sysctl_createv(clog, 0, NULL, NULL,
   1792 		       CTLFLAG_PERMANENT,
   1793 		       CTLTYPE_STRING, "root_device", NULL,
   1794 		       sysctl_root_device, 0, NULL, 0,
   1795 		       CTL_MACHDEP, CPU_ROOT_DEVICE, CTL_EOL);
   1796 	sysctl_createv(clog, 0, NULL, NULL,
   1797 		       CTLFLAG_PERMANENT|CTLFLAG_IMMEDIATE,
   1798 		       CTLTYPE_INT, "llsc", NULL,
   1799 		       NULL, MIPS_HAS_LLSC, NULL, 0,
   1800 		       CTL_MACHDEP, CPU_LLSC, CTL_EOL);
   1801 #ifdef MIPS3_LOONGSON2
   1802 	sysctl_createv(clog, 0, NULL, NULL,
   1803 		       CTLFLAG_PERMANENT|CTLFLAG_IMMEDIATE,
   1804 		       CTLTYPE_INT, "loongson-mmi", NULL,
   1805 		       NULL, MIPS_HAS_LMMI, NULL, 0,
   1806 		       CTL_MACHDEP, CPU_LMMI, CTL_EOL);
   1807 #endif
   1808 	sysctl_createv(clog, 0, NULL, NULL,
   1809 		       CTLFLAG_PERMANENT|CTLFLAG_IMMEDIATE,
   1810 		       CTLTYPE_INT, "fpu_present", NULL,
   1811 		       NULL,
   1812 #ifdef NOFPU
   1813 		       0,
   1814 #else
   1815 		       1,
   1816 #endif
   1817 		       NULL, 0, CTL_MACHDEP, CTL_CREATE, CTL_EOL);
   1818 }
   1819 
   1820 /*
   1821  * These are imported from platform-specific code.
   1822  * XXX Should be declared in a header file.
   1823  */
   1824 extern phys_ram_seg_t mem_clusters[];
   1825 extern int mem_cluster_cnt;
   1826 
   1827 /*
   1828  * These variables are needed by /sbin/savecore.
   1829  */
   1830 u_int32_t dumpmag = 0x8fca0101;	/* magic number */
   1831 int	dumpsize = 0;		/* pages */
   1832 long	dumplo = 0;		/* blocks */
   1833 
   1834 struct pcb dumppcb;
   1835 
   1836 /*
   1837  * cpu_dumpsize: calculate size of machine-dependent kernel core dump headers.
   1838  */
   1839 int
   1840 cpu_dumpsize(void)
   1841 {
   1842 	int size;
   1843 
   1844 	size = ALIGN(sizeof(kcore_seg_t)) + ALIGN(sizeof(cpu_kcore_hdr_t)) +
   1845 	    ALIGN(mem_cluster_cnt * sizeof(phys_ram_seg_t));
   1846 	if (roundup(size, dbtob(1)) != dbtob(1))
   1847 		return (-1);
   1848 
   1849 	return (1);
   1850 }
   1851 
   1852 /*
   1853  * cpu_dump_mempagecnt: calculate size of RAM (in pages) to be dumped.
   1854  */
   1855 u_long
   1856 cpu_dump_mempagecnt(void)
   1857 {
   1858 	u_long i, n;
   1859 
   1860 	n = 0;
   1861 	for (i = 0; i < mem_cluster_cnt; i++)
   1862 		n += atop(mem_clusters[i].size);
   1863 	return (n);
   1864 }
   1865 
   1866 /*
   1867  * cpu_dump: dump machine-dependent kernel core dump headers.
   1868  */
   1869 int
   1870 cpu_dump(void)
   1871 {
   1872 	int (*dump)(dev_t, daddr_t, void *, size_t);
   1873 	char buf[dbtob(1)];
   1874 	kcore_seg_t *segp;
   1875 	cpu_kcore_hdr_t *cpuhdrp;
   1876 	phys_ram_seg_t *memsegp;
   1877 	const struct bdevsw *bdev;
   1878 	int i;
   1879 
   1880 	bdev = bdevsw_lookup(dumpdev);
   1881 	if (bdev == NULL)
   1882 		return (ENXIO);
   1883 
   1884 	dump = bdev->d_dump;
   1885 
   1886 	memset(buf, 0, sizeof buf);
   1887 	segp = (kcore_seg_t *)buf;
   1888 	cpuhdrp = (cpu_kcore_hdr_t *)&buf[ALIGN(sizeof(*segp))];
   1889 	memsegp = (phys_ram_seg_t *)&buf[ ALIGN(sizeof(*segp)) +
   1890 	    ALIGN(sizeof(*cpuhdrp))];
   1891 
   1892 	/*
   1893 	 * Generate a segment header.
   1894 	 */
   1895 	CORE_SETMAGIC(*segp, KCORE_MAGIC, MID_MACHINE, CORE_CPU);
   1896 	segp->c_size = dbtob(1) - ALIGN(sizeof(*segp));
   1897 
   1898 	/*
   1899 	 * Add the machine-dependent header info.
   1900 	 */
   1901 	if (MIPS_HAS_R4K_MMU) {
   1902 		cpuhdrp->archlevel = 3;
   1903 		cpuhdrp->pg_shift  = MIPS3_PG_SHIFT;
   1904 		cpuhdrp->pg_frame  = MIPS3_PG_FRAME;
   1905 		cpuhdrp->pg_v      = MIPS3_PG_V;
   1906 	} else {
   1907 		cpuhdrp->archlevel = 1;
   1908 		cpuhdrp->pg_shift  = MIPS1_PG_SHIFT;
   1909 		cpuhdrp->pg_frame  = MIPS1_PG_FRAME;
   1910 		cpuhdrp->pg_v      = MIPS1_PG_V;
   1911 	}
   1912 	cpuhdrp->sysmappa   = MIPS_KSEG0_TO_PHYS(curcpu()->ci_pmap_kern_segtab);
   1913 	cpuhdrp->nmemsegs   = mem_cluster_cnt;
   1914 
   1915 	/*
   1916 	 * Fill in the memory segment descriptors.
   1917 	 */
   1918 	for (i = 0; i < mem_cluster_cnt; i++) {
   1919 		memsegp[i].start = mem_clusters[i].start;
   1920 		memsegp[i].size = mem_clusters[i].size;
   1921 	}
   1922 
   1923 	return (dump(dumpdev, dumplo, (void *)buf, dbtob(1)));
   1924 }
   1925 
   1926 /*
   1927  * This is called by main to set dumplo and dumpsize.
   1928  * Dumps always skip the first CLBYTES of disk space
   1929  * in case there might be a disk label stored there.
   1930  * If there is extra space, put dump at the end to
   1931  * reduce the chance that swapping trashes it.
   1932  */
   1933 void
   1934 cpu_dumpconf(void)
   1935 {
   1936 	int nblks, dumpblks;	/* size of dump area */
   1937 
   1938 	if (dumpdev == NODEV)
   1939 		goto bad;
   1940 	nblks = bdev_size(dumpdev);
   1941 	if (nblks <= ctod(1))
   1942 		goto bad;
   1943 
   1944 	dumpblks = cpu_dumpsize();
   1945 	if (dumpblks < 0)
   1946 		goto bad;
   1947 	dumpblks += ctod(cpu_dump_mempagecnt());
   1948 
   1949 	/* If dump won't fit (incl. room for possible label), punt. */
   1950 	if (dumpblks > (nblks - ctod(1)))
   1951 		goto bad;
   1952 
   1953 	/* Put dump at end of partition */
   1954 	dumplo = nblks - dumpblks;
   1955 
   1956 	/* dumpsize is in page units, and doesn't include headers. */
   1957 	dumpsize = cpu_dump_mempagecnt();
   1958 	return;
   1959 
   1960  bad:
   1961 	dumpsize = 0;
   1962 }
   1963 
   1964 /*
   1965  * Dump the kernel's image to the swap partition.
   1966  */
   1967 #define	BYTES_PER_DUMP	PAGE_SIZE
   1968 
   1969 void
   1970 dumpsys(void)
   1971 {
   1972 	u_long totalbytesleft, bytes, i, n, memcl;
   1973 	u_long maddr;
   1974 	int psize;
   1975 	daddr_t blkno;
   1976 	const struct bdevsw *bdev;
   1977 	int (*dump)(dev_t, daddr_t, void *, size_t);
   1978 	int error;
   1979 
   1980 	/* Save registers. */
   1981 	savectx(&dumppcb);
   1982 
   1983 	if (dumpdev == NODEV)
   1984 		return;
   1985 	bdev = bdevsw_lookup(dumpdev);
   1986 	if (bdev == NULL || bdev->d_psize == NULL)
   1987 		return;
   1988 
   1989 	/*
   1990 	 * For dumps during autoconfiguration,
   1991 	 * if dump device has already configured...
   1992 	 */
   1993 	if (dumpsize == 0)
   1994 		cpu_dumpconf();
   1995 	if (dumplo <= 0) {
   1996 		printf("\ndump to dev %u,%u not possible\n", major(dumpdev),
   1997 		    minor(dumpdev));
   1998 		return;
   1999 	}
   2000 	printf("\ndumping to dev %u,%u offset %ld\n", major(dumpdev),
   2001 	    minor(dumpdev), dumplo);
   2002 
   2003 	psize = bdev_size(dumpdev);
   2004 	printf("dump ");
   2005 	if (psize == -1) {
   2006 		printf("area unavailable\n");
   2007 		return;
   2008 	}
   2009 
   2010 	/* XXX should purge all outstanding keystrokes. */
   2011 
   2012 	if ((error = cpu_dump()) != 0)
   2013 		goto err;
   2014 
   2015 	totalbytesleft = ptoa(cpu_dump_mempagecnt());
   2016 	blkno = dumplo + cpu_dumpsize();
   2017 	dump = bdev->d_dump;
   2018 	error = 0;
   2019 
   2020 	for (memcl = 0; memcl < mem_cluster_cnt; memcl++) {
   2021 		maddr = mem_clusters[memcl].start;
   2022 		bytes = mem_clusters[memcl].size;
   2023 
   2024 		for (i = 0; i < bytes; i += n, totalbytesleft -= n) {
   2025 			void *maddr_va;
   2026 
   2027 			/* Print out how many MBs we have left to go. */
   2028 			if ((totalbytesleft % (1024*1024)) == 0)
   2029 				printf_nolog("%ld ",
   2030 				    totalbytesleft / (1024 * 1024));
   2031 
   2032 			/* Limit size for next transfer. */
   2033 			n = bytes - i;
   2034 			if (n > BYTES_PER_DUMP)
   2035 				n = BYTES_PER_DUMP;
   2036 
   2037 #ifdef _LP64
   2038 			maddr_va = (void *)MIPS_PHYS_TO_XKPHYS_CACHED(maddr);
   2039 #else
   2040 			maddr_va = (void *)MIPS_PHYS_TO_KSEG0(maddr);
   2041 #endif
   2042 			error = (*dump)(dumpdev, blkno, maddr_va, n);
   2043 			if (error)
   2044 				goto err;
   2045 			maddr += n;
   2046 			blkno += btodb(n);		/* XXX? */
   2047 
   2048 			/* XXX should look for keystrokes, to cancel. */
   2049 		}
   2050 	}
   2051 
   2052  err:
   2053 	switch (error) {
   2054 
   2055 	case ENXIO:
   2056 		printf("device bad\n");
   2057 		break;
   2058 
   2059 	case EFAULT:
   2060 		printf("device not ready\n");
   2061 		break;
   2062 
   2063 	case EINVAL:
   2064 		printf("area improper\n");
   2065 		break;
   2066 
   2067 	case EIO:
   2068 		printf("i/o error\n");
   2069 		break;
   2070 
   2071 	case EINTR:
   2072 		printf("aborted from console\n");
   2073 		break;
   2074 
   2075 	case 0:
   2076 		printf("succeeded\n");
   2077 		break;
   2078 
   2079 	default:
   2080 		printf("error %d\n", error);
   2081 		break;
   2082 	}
   2083 	printf("\n\n");
   2084 	delay(5000000);		/* 5 seconds */
   2085 }
   2086 
   2087 void
   2088 mips_init_msgbuf(void)
   2089 {
   2090 	vsize_t sz = (vsize_t)round_page(MSGBUFSIZE);
   2091 	vsize_t reqsz = sz;
   2092 	uvm_physseg_t bank = uvm_physseg_get_last();
   2093 #ifndef _LP64
   2094 	/*
   2095 	 * First the physical segment that can be mapped to KSEG0
   2096 	 */
   2097 	for (; uvm_physseg_valid_p(bank); bank = uvm_physseg_get_prev(bank)) {
   2098 		if (uvm_physseg_get_avail_start(bank) + atop(sz) <= atop(MIPS_PHYS_MASK))
   2099 			break;
   2100 	}
   2101 #endif
   2102 
   2103 	paddr_t start = uvm_physseg_get_start(bank);
   2104 	paddr_t end = uvm_physseg_get_end(bank);
   2105 
   2106 	/* shrink so that it'll fit in the last segment */
   2107 	if ((end - start) < atop(sz))
   2108 		sz = ptoa(end - start);
   2109 
   2110 	end -= atop(sz);
   2111 	uvm_physseg_unplug(end, atop(sz));
   2112 
   2113 #ifdef _LP64
   2114 	msgbufaddr = (void *) MIPS_PHYS_TO_XKPHYS_CACHED(ptoa(end));
   2115 #else
   2116 	msgbufaddr = (void *) MIPS_PHYS_TO_KSEG0(ptoa(end));
   2117 #endif
   2118 	initmsgbuf(msgbufaddr, sz);
   2119 
   2120 	/* warn if the message buffer had to be shrunk */
   2121 	if (sz != reqsz)
   2122 		printf("WARNING: %"PRIdVSIZE" bytes not available for msgbuf "
   2123 		    "in last cluster (%"PRIdVSIZE" used)\n", reqsz, sz);
   2124 }
   2125 
   2126 void
   2127 mips_init_lwp0_uarea(void)
   2128 {
   2129 	struct lwp * const l = &lwp0;
   2130 	vaddr_t v;
   2131 
   2132 	if (l->l_addr == NULL) {
   2133 		v = uvm_pageboot_alloc(USPACE);
   2134 		uvm_lwp_setuarea(&lwp0, v);
   2135 	} else {
   2136 		v = (vaddr_t)l->l_addr;
   2137 	}
   2138 
   2139 	l->l_md.md_utf = (struct trapframe *)(v + USPACE) - 1;
   2140 	struct pcb * const pcb = lwp_getpcb(l);
   2141 	/*
   2142 	 * Now zero out the only two areas of the uarea that we care about.
   2143 	 */
   2144 	memset(l->l_md.md_utf, 0, sizeof(*l->l_md.md_utf));
   2145 	memset(pcb, 0, sizeof(*pcb));
   2146 
   2147 	pcb->pcb_context.val[_L_SR] = MIPS_SR_INT_IE
   2148 	    | (ipl_sr_map.sr_bits[IPL_SCHED] ^ MIPS_INT_MASK);
   2149 #ifdef __mips_n32
   2150 	pcb->pcb_context.val[_L_SR] |= MIPS_SR_KX;
   2151 	l->l_md.md_utf->tf_regs[_R_SR] = MIPS_SR_KX;
   2152 #endif
   2153 #ifdef _LP64
   2154 	pcb->pcb_context.val[_L_SR] |= MIPS_SR_KX | MIPS_SR_UX;
   2155 	l->l_md.md_utf->tf_regs[_R_SR] = MIPS_SR_KX | MIPS_SR_UX;
   2156 #endif
   2157 }
   2158 
   2159 int mips_poolpage_vmfreelist = VM_FREELIST_DEFAULT;
   2160 
   2161 #define	HALFGIG		((paddr_t)512 * 1024 * 1024)
   2162 #define	FOURGIG		((paddr_t)4 * 1024 * 1024 * 1024)
   2163 
   2164 void
   2165 mips_page_physload(vaddr_t vkernstart, vaddr_t vkernend,
   2166 	const phys_ram_seg_t *segs, size_t nseg,
   2167 	const struct mips_vmfreelist *flp, size_t nfl)
   2168 {
   2169 	const paddr_t kernstart = MIPS_KSEG0_TO_PHYS(trunc_page(vkernstart));
   2170 	const paddr_t kernend = MIPS_KSEG0_TO_PHYS(round_page(vkernend));
   2171 #if defined(VM_FREELIST_FIRST4G) || defined(VM_FREELIST_FIRST512M)
   2172 #ifdef VM_FREELIST_FIRST512M
   2173 	bool need512m = false;
   2174 #endif
   2175 #ifdef VM_FREELIST_FIRST4G
   2176 	bool need4g = false;
   2177 #endif
   2178 
   2179 	/*
   2180 	 * Do a first pass and see what ranges memory we have to deal with.
   2181 	 */
   2182 	for (size_t i = 0; i < nseg; i++) {
   2183 #ifdef VM_FREELIST_FIRST4G
   2184 		if (round_page(segs[i].start + segs[i].size) > FOURGIG) {
   2185 			need4g = true;
   2186 		}
   2187 #endif
   2188 #ifdef VM_FREELIST_FIRST512M
   2189 		if (round_page(segs[i].start + segs[i].size) > HALFGIG) {
   2190 			need512m = true;
   2191 #if !defined(_LP64)
   2192 			mips_poolpage_vmfreelist = VM_FREELIST_FIRST512M;
   2193 #endif
   2194 		}
   2195 #endif
   2196 	}
   2197 #endif /* VM_FREELIST_FIRST512M || VM_FREELIST_FIRST4G */
   2198 
   2199 	for (; nseg-- > 0; segs++) {
   2200 		/*
   2201 		 * Make sure everything is in page units.
   2202 		 */
   2203 		paddr_t segstart = round_page(segs->start);
   2204 		const paddr_t segfinish = trunc_page(segs->start + segs->size);
   2205 
   2206 		if (segstart >= segfinish) {
   2207 			/*
   2208 			 * This is purely cosmetic, to avoid output like
   2209 			 *    phys segment: 0xffffffffffffe000 @ 0xffb6000
   2210 			 * when a segment starts and finishes in the same page.
   2211 			 */
   2212 			printf("phys segment: %#"PRIxPADDR" @ %#"PRIxPADDR
   2213 			    " (short)\n", (paddr_t)segs->size, segstart);
   2214 			continue;
   2215 		}
   2216 
   2217 		printf("phys segment: %#"PRIxPADDR" @ %#"PRIxPADDR"\n",
   2218 		    segfinish - segstart, segstart);
   2219 
   2220 		/*
   2221 		 * Page 0 is reserved for exception vectors.
   2222 		 */
   2223 		if (segstart == 0) {
   2224 			segstart = PAGE_SIZE;
   2225 		}
   2226 		while (segstart < segfinish) {
   2227 			int freelist = -1;	/* unknown freelist */
   2228 			paddr_t segend = segfinish;
   2229 			for (size_t i = 0; i < nfl; i++) {
   2230 				/*
   2231 				 * If this segment doesn't overlap the freelist
   2232 				 * at all, skip it.
   2233 				 */
   2234 				if (segstart >= flp[i].fl_end
   2235 				    || segend <= flp[i].fl_start)
   2236 					continue;
   2237 				/*
   2238 				 * If the start of this segment starts before
   2239 				 * the start of the freelist, then limit the
   2240 				 * segment to loaded to the part that doesn't
   2241 				 * match this freelist and fall back to normal
   2242 				 * freelist matching.
   2243 				 */
   2244 				if (segstart < flp[i].fl_start) {
   2245 					segstart = flp[i].fl_start;
   2246 					break;
   2247 				}
   2248 
   2249 				/*
   2250 				 * We've matched this freelist so remember it.
   2251 				 */
   2252 				freelist = flp->fl_freelist;
   2253 
   2254 				/*
   2255 				 * If this segment extends past the end of this
   2256 				 * freelist, bound to segment to the freelist.
   2257 				 */
   2258 				if (segend > flp[i].fl_end)
   2259 					segend = flp[i].fl_end;
   2260 				break;
   2261 			}
   2262 			/*
   2263 			 * If we didn't match one of the port dependent
   2264 			 * freelists, let's try the common ones.
   2265 			 */
   2266 			if (freelist == -1) {
   2267 #ifdef VM_FREELIST_FIRST512M
   2268 				if (need512m && segstart < HALFGIG) {
   2269 					freelist = VM_FREELIST_FIRST512M;
   2270 					if (segend > HALFGIG)
   2271 						segend = HALFGIG;
   2272 				} else
   2273 #endif
   2274 #ifdef VM_FREELIST_FIRST4G
   2275 				if (need4g && segstart < FOURGIG) {
   2276 					freelist = VM_FREELIST_FIRST4G;
   2277 					if (segend > FOURGIG)
   2278 						segend = FOURGIG;
   2279 				} else
   2280 #endif
   2281 					freelist = VM_FREELIST_DEFAULT;
   2282 			}
   2283 
   2284 			/*
   2285 			 * Make sure the memory we provide to uvm doesn't
   2286 			 * include the kernel.
   2287 			 */
   2288 			if (segstart < kernend && segend > kernstart) {
   2289 				if (segstart < kernstart) {
   2290 					/*
   2291 					 * Only add the memory before the
   2292 					 * kernel.
   2293 					 */
   2294 					segend = kernstart;
   2295 				} else if (segend > kernend) {
   2296 					/*
   2297 					 * Only add the memory after the
   2298 					 * kernel.
   2299 					 */
   2300 					segstart = kernend;
   2301 				} else {
   2302 					/*
   2303 					 * Just skip the segment entirely since
   2304 					 * it's completely inside the kernel.
   2305 					 */
   2306 					printf("skipping %#"PRIxPADDR" @ %#"PRIxPADDR" (kernel)\n",
   2307 					    segend - segstart, segstart);
   2308 					break;
   2309 				}
   2310 			}
   2311 
   2312 			/*
   2313 			 * Now we give this segment to uvm.
   2314 			 */
   2315 			printf("adding %#"PRIxPADDR" @ %#"PRIxPADDR" to freelist %d\n",
   2316 			    segend - segstart, segstart, freelist);
   2317 			paddr_t first = atop(segstart);
   2318 			paddr_t last = atop(segend);
   2319 			uvm_page_physload(first, last, first, last, freelist);
   2320 
   2321 			/*
   2322 			 * Start where we finished.
   2323 			 */
   2324 			segstart = segend;
   2325 		}
   2326 	}
   2327 }
   2328 
   2329 /*
   2330  * Start a new LWP
   2331  */
   2332 void
   2333 startlwp(void *arg)
   2334 {
   2335 	ucontext_t * const uc = arg;
   2336 	lwp_t * const l = curlwp;
   2337 	int error __diagused;
   2338 
   2339 	error = cpu_setmcontext(l, &uc->uc_mcontext, uc->uc_flags);
   2340 	KASSERT(error == 0);
   2341 
   2342 	kmem_free(uc, sizeof(ucontext_t));
   2343 	userret(l);
   2344 }
   2345 
   2346 #ifdef COMPAT_NETBSD32
   2347 /*
   2348  * Start a new LWP
   2349  */
   2350 void
   2351 startlwp32(void *arg)
   2352 {
   2353 	ucontext32_t * const uc = arg;
   2354 	lwp_t * const l = curlwp;
   2355 	int error __diagused;
   2356 
   2357 	error = cpu_setmcontext32(l, &uc->uc_mcontext, uc->uc_flags);
   2358 	KASSERT(error == 0);
   2359 
   2360 	/* Note: we are freeing ucontext_t, not ucontext32_t. */
   2361 	kmem_free(uc, sizeof(ucontext_t));
   2362 	userret(l);
   2363 }
   2364 #endif /* COMPAT_NETBSD32 */
   2365 
   2366 #ifdef PARANOIA
   2367 void
   2368 std_splsw_test(void)
   2369 {
   2370 	struct cpu_info * const ci = curcpu();
   2371 	const uint32_t * const sr_map = ipl_sr_map.sr_bits;
   2372 	uint32_t status = mips_cp0_status_read();
   2373 	uint32_t sr_bits;
   2374 	int s;
   2375 
   2376 	KASSERT((status & MIPS_SR_INT_IE) == 0);
   2377 
   2378 	sr_bits = sr_map[IPL_NONE];
   2379 
   2380 	splx(IPL_NONE);
   2381 	status = mips_cp0_status_read() & MIPS_INT_MASK;
   2382 	KASSERT(status == MIPS_INT_MASK);
   2383 	KASSERT(ci->ci_cpl == IPL_NONE);
   2384 
   2385 	s = splsoftclock();
   2386 	status = mips_cp0_status_read() & MIPS_INT_MASK;
   2387 	KASSERT((status ^ sr_map[IPL_SOFTCLOCK]) == MIPS_INT_MASK);
   2388 	KASSERT(ci->ci_cpl == IPL_SOFTCLOCK);
   2389 	KASSERT(s == IPL_NONE);
   2390 
   2391 	s = splsoftbio();
   2392 	status = mips_cp0_status_read() & MIPS_INT_MASK;
   2393 	KASSERT((status ^ sr_map[IPL_SOFTBIO]) == MIPS_INT_MASK);
   2394 	KASSERT(ci->ci_cpl == IPL_SOFTBIO);
   2395 	KASSERT(s == IPL_SOFTCLOCK);
   2396 
   2397 	s = splsoftnet();
   2398 	status = mips_cp0_status_read() & MIPS_INT_MASK;
   2399 	KASSERT((status ^ sr_map[IPL_SOFTNET]) == MIPS_INT_MASK);
   2400 	KASSERT(ci->ci_cpl == IPL_SOFTNET);
   2401 	KASSERT(s == IPL_SOFTBIO);
   2402 
   2403 	s = splsoftserial();
   2404 	status = mips_cp0_status_read() & MIPS_INT_MASK;
   2405 	KASSERT((status ^ sr_map[IPL_SOFTSERIAL]) == MIPS_INT_MASK);
   2406 	KASSERT(ci->ci_cpl == IPL_SOFTSERIAL);
   2407 	KASSERT(s == IPL_SOFTNET);
   2408 
   2409 	s = splvm();
   2410 	status = mips_cp0_status_read() & MIPS_INT_MASK;
   2411 	KASSERT((status ^ sr_map[IPL_VM]) == MIPS_INT_MASK);
   2412 	KASSERT(ci->ci_cpl == IPL_VM);
   2413 	KASSERT(s == IPL_SOFTSERIAL);
   2414 
   2415 	s = splsched();
   2416 	status = mips_cp0_status_read() & MIPS_INT_MASK;
   2417 	KASSERT((status ^ sr_map[IPL_SCHED]) == MIPS_INT_MASK);
   2418 	KASSERT(ci->ci_cpl == IPL_SCHED);
   2419 	KASSERT(s == IPL_VM);
   2420 
   2421 	s = splhigh();
   2422 	status = mips_cp0_status_read() & MIPS_INT_MASK;
   2423 	KASSERT((status ^ sr_map[IPL_HIGH]) == MIPS_INT_MASK);
   2424 	KASSERT(ci->ci_cpl == IPL_HIGH);
   2425 	KASSERT(s == IPL_SCHED);
   2426 
   2427 	splx(IPL_NONE);
   2428 	status = mips_cp0_status_read() & MIPS_INT_MASK;
   2429 	KASSERT(status == MIPS_INT_MASK);
   2430 	KASSERT(ci->ci_cpl == IPL_NONE);
   2431 
   2432 	for (int r = IPL_SOFTCLOCK; r <= IPL_HIGH; r++) {
   2433 		/*
   2434 		 * As IPL increases, more intrs may be masked but no intrs
   2435 		 * may become unmasked.
   2436 		 */
   2437 		KASSERT((sr_map[r] & sr_bits) == sr_bits);
   2438 		sr_bits |= sr_map[r];
   2439 		s = splraise(r);
   2440 		KASSERT(s == IPL_NONE);
   2441 
   2442 		for (int t = r; t <= IPL_HIGH; t++) {
   2443 			int o = splraise(t);
   2444 			status = mips_cp0_status_read() & MIPS_INT_MASK;
   2445 			KASSERT((status ^ sr_map[t]) == MIPS_INT_MASK);
   2446 			KASSERT(ci->ci_cpl == t);
   2447 			KASSERT(o == r);
   2448 
   2449 			splx(o);
   2450 			status = mips_cp0_status_read() & MIPS_INT_MASK;
   2451 			KASSERT((status ^ sr_map[r]) == MIPS_INT_MASK);
   2452 			KASSERT(ci->ci_cpl == r);
   2453 		}
   2454 
   2455 		splx(s);
   2456 		status = mips_cp0_status_read() & MIPS_INT_MASK;
   2457 		KASSERT((status ^ sr_map[s]) == MIPS_INT_MASK);
   2458 		KASSERT(ci->ci_cpl == s);
   2459 	}
   2460 
   2461 	status = mips_cp0_status_read() & MIPS_INT_MASK;
   2462 	KASSERT(status == MIPS_INT_MASK);
   2463 	KASSERT(ci->ci_cpl == IPL_NONE);
   2464 }
   2465 
   2466 #endif /* PARANOIA */
   2467 
   2468 #ifdef MODULAR
   2469 /*
   2470  * Push any modules loaded by the boot loader.
   2471  */
   2472 void
   2473 module_init_md(void)
   2474 {
   2475 
   2476 	/* XXX Do something board/machine specific here one day... */
   2477 }
   2478 #endif /* MODULAR */
   2479 
   2480 bool
   2481 mm_md_direct_mapped_phys(paddr_t pa, vaddr_t *vap)
   2482 {
   2483 #ifdef _LP64
   2484 	if (MIPS_XKSEG_P(pa)) {
   2485 		*vap = MIPS_PHYS_TO_XKPHYS_CACHED(pa);
   2486 		return true;
   2487 	}
   2488 #endif
   2489 	if (MIPS_KSEG0_P(pa)) {
   2490 		*vap = MIPS_PHYS_TO_KSEG0(pa);
   2491 		return true;
   2492 	}
   2493 	return false;
   2494 }
   2495 
   2496 bool
   2497 mm_md_page_color(paddr_t pa, int *colorp)
   2498 {
   2499 	if (MIPS_CACHE_VIRTUAL_ALIAS) {
   2500 		struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
   2501 		KASSERT(pg != NULL);
   2502 		struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
   2503 		*colorp = atop(mdpg->mdpg_first.pv_va);
   2504 		return !mips_cache_badalias(pa, mdpg->mdpg_first.pv_va);
   2505 	}
   2506 	*colorp = 0;
   2507 	return true;
   2508 }
   2509 
   2510 int
   2511 mm_md_physacc(paddr_t pa, vm_prot_t prot)
   2512 {
   2513 
   2514 	return (pa < ctob(physmem)) ? 0 : EFAULT;
   2515 }
   2516 
   2517 int
   2518 mm_md_kernacc(void *ptr, vm_prot_t prot, bool *handled)
   2519 {
   2520 	const vaddr_t v = (vaddr_t)ptr;
   2521 
   2522 #ifdef _LP64
   2523 	extern char end[];
   2524 
   2525 	/* For any address < XKPHYS cached address 0, fault */
   2526 	if (v < MIPS_PHYS_TO_XKPHYS_CACHED(0)) {
   2527 		return EFAULT;
   2528 	}
   2529 
   2530 	/* If address < XKPHY(end of message buffer), good! */
   2531 	if (v < MIPS_PHYS_TO_XKPHYS_CACHED(pmap_limits.avail_end +
   2532 	    mips_round_page(MSGBUFSIZE))) {
   2533 		/* XXX holes in RAM (eg, EdgeRouter 4) */
   2534 		*handled = true;
   2535 		return 0;
   2536 	}
   2537 
   2538 	/* If address in KSEG0 and is before end of kernel, good! */
   2539 	if (MIPS_KSEG0_P(v) && v < (vaddr_t)end) {
   2540 		*handled = true;
   2541 		return 0;
   2542 	}
   2543 
   2544 	/* Otherwise, fall back to the uvm_kernacc() check. */
   2545 #else
   2546 	if (v < MIPS_KSEG0_START) {
   2547 		return EFAULT;
   2548 	}
   2549 	if (v < MIPS_PHYS_TO_KSEG0(pmap_limits.avail_end +
   2550 	    mips_round_page(MSGBUFSIZE))) {
   2551 		*handled = true;
   2552 		return 0;
   2553 	}
   2554 	if (v < MIPS_KSEG2_START) {
   2555 		return EFAULT;
   2556 	}
   2557 #endif
   2558 	*handled = false;
   2559 	return 0;
   2560 }
   2561 
   2562 #if (MIPS32 + MIPS32R2 + MIPS64 + MIPS64R2) > 0
   2563 static void
   2564 mips_watchpoint_init(void)
   2565 {
   2566 	/*
   2567 	 * determine number of CPU watchpoints
   2568 	 */
   2569 	curcpu()->ci_cpuwatch_count = cpuwatch_discover();
   2570 }
   2571 #endif
   2572 
   2573 
   2574 /*
   2575  * Process the tail end of a posix_spawn() for the child.
   2576  */
   2577 void
   2578 cpu_spawn_return(struct lwp *l)
   2579 {
   2580 	userret(l);
   2581 }
   2582