Home | History | Annotate | Line # | Download | only in arm
cpufunc.c revision 1.156
      1 /*	$NetBSD: cpufunc.c,v 1.155 2015/06/03 02:30:11 hsuenaga Exp $	*/
      2 
      3 /*
      4  * arm7tdmi support code Copyright (c) 2001 John Fremlin
      5  * arm8 support code Copyright (c) 1997 ARM Limited
      6  * arm8 support code Copyright (c) 1997 Causality Limited
      7  * arm9 support code Copyright (C) 2001 ARM Ltd
      8  * arm11 support code Copyright (c) 2007 Microsoft
      9  * cortexa8 support code Copyright (c) 2008 3am Software Foundry
     10  * cortexa8 improvements Copyright (c) Goeran Weinholt
     11  * Copyright (c) 1997 Mark Brinicombe.
     12  * Copyright (c) 1997 Causality Limited
     13  * All rights reserved.
     14  *
     15  * Redistribution and use in source and binary forms, with or without
     16  * modification, are permitted provided that the following conditions
     17  * are met:
     18  * 1. Redistributions of source code must retain the above copyright
     19  *    notice, this list of conditions and the following disclaimer.
     20  * 2. Redistributions in binary form must reproduce the above copyright
     21  *    notice, this list of conditions and the following disclaimer in the
     22  *    documentation and/or other materials provided with the distribution.
     23  * 3. All advertising materials mentioning features or use of this software
     24  *    must display the following acknowledgement:
     25  *	This product includes software developed by Causality Limited.
     26  * 4. The name of Causality Limited may not be used to endorse or promote
     27  *    products derived from this software without specific prior written
     28  *    permission.
     29  *
     30  * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
     31  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
     32  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
     33  * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
     34  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
     35  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     36  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     37  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     38  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     39  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     40  * SUCH DAMAGE.
     41  *
     42  * RiscBSD kernel project
     43  *
     44  * cpufuncs.c
     45  *
     46  * C functions for supporting CPU / MMU / TLB specific operations.
     47  *
     48  * Created	: 30/01/97
     49  */
     50 
     51 #include <sys/cdefs.h>
     52 __KERNEL_RCSID(0, "$NetBSD: cpufunc.c,v 1.155 2015/06/03 02:30:11 hsuenaga Exp $");
     53 
     54 #include "opt_compat_netbsd.h"
     55 #include "opt_cpuoptions.h"
     56 #include "opt_perfctrs.h"
     57 
     58 #include <sys/types.h>
     59 #include <sys/param.h>
     60 #include <sys/pmc.h>
     61 #include <sys/systm.h>
     62 #include <machine/cpu.h>
     63 #include <machine/bootconfig.h>
     64 #include <arch/arm/arm/disassem.h>
     65 
     66 #include <uvm/uvm.h>
     67 
     68 #include <arm/cpufunc_proto.h>
     69 #include <arm/cpuconf.h>
     70 #include <arm/locore.h>
     71 
     72 #ifdef CPU_XSCALE_80200
     73 #include <arm/xscale/i80200reg.h>
     74 #include <arm/xscale/i80200var.h>
     75 #endif
     76 
     77 #ifdef CPU_XSCALE_80321
     78 #include <arm/xscale/i80321reg.h>
     79 #include <arm/xscale/i80321var.h>
     80 #endif
     81 
     82 #ifdef CPU_XSCALE_IXP425
     83 #include <arm/xscale/ixp425reg.h>
     84 #include <arm/xscale/ixp425var.h>
     85 #endif
     86 
     87 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321)
     88 #include <arm/xscale/xscalereg.h>
     89 #endif
     90 
     91 #if defined(CPU_PJ4B)
     92 #include "opt_cputypes.h"
     93 #include "opt_mvsoc.h"
     94 #include <machine/bus_defs.h>
     95 #if defined(ARMADAXP)
     96 #include <arm/marvell/armadaxpreg.h>
     97 #include <arm/marvell/armadaxpvar.h>
     98 #endif
     99 #endif
    100 
    101 #if defined(PERFCTRS)
    102 struct arm_pmc_funcs *arm_pmc;
    103 #endif
    104 
    105 #if defined(CPU_ARMV7) && (defined(CPU_ARMV6) || defined(CPU_PRE_ARMV6))
    106 bool cpu_armv7_p;
    107 #endif
    108 
    109 #if defined(CPU_ARMV6) && (defined(CPU_ARMV7) || defined(CPU_PRE_ARMV6))
    110 bool cpu_armv6_p;
    111 #endif
    112 
    113 
    114 /* PRIMARY CACHE VARIABLES */
    115 #if (ARM_MMU_V6 + ARM_MMU_V7) != 0
    116 u_int	arm_cache_prefer_mask;
    117 #endif
    118 struct	arm_cache_info arm_pcache;
    119 struct	arm_cache_info arm_scache;
    120 
    121 u_int	arm_dcache_align;
    122 u_int	arm_dcache_align_mask;
    123 
    124 /* 1 == use cpu_sleep(), 0 == don't */
    125 int cpu_do_powersave;
    126 
    127 #ifdef CPU_ARM2
    128 struct cpu_functions arm2_cpufuncs = {
    129 	/* CPU functions */
    130 
    131 	.cf_id			= arm2_id,
    132 	.cf_cpwait		= cpufunc_nullop,
    133 
    134 	/* MMU functions */
    135 
    136 	.cf_control		= (void *)cpufunc_nullop,
    137 
    138 	/* TLB functions */
    139 
    140 	.cf_tlb_flushID		= cpufunc_nullop,
    141 	.cf_tlb_flushID_SE	= (void *)cpufunc_nullop,
    142 	.cf_tlb_flushI		= cpufunc_nullop,
    143 	.cf_tlb_flushI_SE	= (void *)cpufunc_nullop,
    144 	.cf_tlb_flushD		= cpufunc_nullop,
    145 	.cf_tlb_flushD_SE	= (void *)cpufunc_nullop,
    146 
    147 	/* Cache operations */
    148 
    149 	.cf_icache_sync_all	= cpufunc_nullop,
    150 	.cf_icache_sync_range	= (void *) cpufunc_nullop,
    151 
    152 	.cf_dcache_wbinv_all	= arm3_cache_flush,
    153 	.cf_dcache_wbinv_range	= (void *)cpufunc_nullop,
    154 	.cf_dcache_inv_range	= (void *)cpufunc_nullop,
    155 	.cf_dcache_wb_range	= (void *)cpufunc_nullop,
    156 
    157 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
    158 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
    159 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
    160 
    161 	.cf_idcache_wbinv_all	= cpufunc_nullop,
    162 	.cf_idcache_wbinv_range	= (void *)cpufunc_nullop,
    163 
    164 	/* Other functions */
    165 
    166 	.cf_flush_prefetchbuf	= cpufunc_nullop,
    167 	.cf_drain_writebuf	= cpufunc_nullop,
    168 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
    169 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
    170 
    171 	.cf_sleep		= (void *)cpufunc_nullop,
    172 
    173 	/* Soft functions */
    174 
    175 	.cf_dataabt_fixup	= early_abort_fixup,
    176 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
    177 
    178 	.cf_setup		= (void *)cpufunc_nullop
    179 
    180 };
    181 #endif	/* CPU_ARM2 */
    182 
    183 #ifdef CPU_ARM250
    184 struct cpu_functions arm250_cpufuncs = {
    185 	/* CPU functions */
    186 
    187 	.cf_id			= arm250_id,
    188 	.cf_cpwait		= cpufunc_nullop,
    189 
    190 	/* MMU functions */
    191 
    192 	.cf_control		= (void *)cpufunc_nullop,
    193 
    194 	/* TLB functions */
    195 
    196 	.cf_tlb_flushID		= cpufunc_nullop,
    197 	.cf_tlb_flushID_SE	= (void *)cpufunc_nullop,
    198 	.cf_tlb_flushI		= cpufunc_nullop,
    199 	.cf_tlb_flushI_SE	= (void *)cpufunc_nullop,
    200 	.cf_tlb_flushD		= cpufunc_nullop,
    201 	.cf_tlb_flushD_SE	= (void *)cpufunc_nullop,
    202 
    203 	/* Cache operations */
    204 
    205 	.cf_icache_sync_all	= cpufunc_nullop,
    206 	.cf_icache_sync_range	= (void *) cpufunc_nullop,
    207 
    208 	.cf_dcache_wbinv_all	= arm3_cache_flush,
    209 	.cf_dcache_wbinv_range	= (void *)cpufunc_nullop,
    210 	.cf_dcache_inv_range	= (void *)cpufunc_nullop,
    211 	.cf_dcache_wb_range	= (void *)cpufunc_nullop,
    212 
    213 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
    214 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
    215 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
    216 
    217 	.cf_idcache_wbinv_all	= cpufunc_nullop,
    218 	.cf_idcache_wbinv_range	= (void *)cpufunc_nullop,
    219 
    220 	/* Other functions */
    221 
    222 	.cf_flush_prefetchbuf	= cpufunc_nullop,
    223 	.cf_drain_writebuf	= cpufunc_nullop,
    224 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
    225 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
    226 
    227 	.cf_sleep		= (void *)cpufunc_nullop,
    228 
    229 	/* Soft functions */
    230 
    231 	.cf_dataabt_fixup	= early_abort_fixup,
    232 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
    233 
    234 	.cf_setup		= (void *)cpufunc_nullop
    235 
    236 };
    237 #endif	/* CPU_ARM250 */
    238 
    239 #ifdef CPU_ARM3
    240 struct cpu_functions arm3_cpufuncs = {
    241 	/* CPU functions */
    242 
    243 	.cf_id			= cpufunc_id,
    244 	.cf_cpwait		= cpufunc_nullop,
    245 
    246 	/* MMU functions */
    247 
    248 	.cf_control		= arm3_control,
    249 
    250 	/* TLB functions */
    251 
    252 	.cf_tlb_flushID		= cpufunc_nullop,
    253 	.cf_tlb_flushID_SE	= (void *)cpufunc_nullop,
    254 	.cf_tlb_flushI		= cpufunc_nullop,
    255 	.cf_tlb_flushI_SE	= (void *)cpufunc_nullop,
    256 	.cf_tlb_flushD		= cpufunc_nullop,
    257 	.cf_tlb_flushD_SE	= (void *)cpufunc_nullop,
    258 
    259 	/* Cache operations */
    260 
    261 	.cf_icache_sync_all	= cpufunc_nullop,
    262 	.cf_icache_sync_range	= (void *) cpufunc_nullop,
    263 
    264 	.cf_dcache_wbinv_all	= arm3_cache_flush,
    265 	.cf_dcache_wbinv_range	= (void *)arm3_cache_flush,
    266 	.cf_dcache_inv_range	= (void *)arm3_cache_flush,
    267 	.cf_dcache_wb_range	= (void *)cpufunc_nullop,
    268 
    269 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
    270 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
    271 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
    272 
    273 	.cf_idcache_wbinv_all	= arm3_cache_flush,
    274 	.cf_idcache_wbinv_range	= (void *)arm3_cache_flush,
    275 
    276 	/* Other functions */
    277 
    278 	.cf_flush_prefetchbuf	= cpufunc_nullop,
    279 	.cf_drain_writebuf	= cpufunc_nullop,
    280 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
    281 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
    282 
    283 	.cf_sleep		= (void *)cpufunc_nullop,
    284 
    285 	/* Soft functions */
    286 
    287 	.cf_dataabt_fixup	= early_abort_fixup,
    288 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
    289 
    290 	.cf_setup		= (void *)cpufunc_nullop
    291 
    292 };
    293 #endif	/* CPU_ARM3 */
    294 
    295 #ifdef CPU_ARM6
    296 struct cpu_functions arm6_cpufuncs = {
    297 	/* CPU functions */
    298 
    299 	.cf_id			= cpufunc_id,
    300 	.cf_cpwait		= cpufunc_nullop,
    301 
    302 	/* MMU functions */
    303 
    304 	.cf_control		= cpufunc_control,
    305 	.cf_domains		= cpufunc_domains,
    306 	.cf_setttb		= arm67_setttb,
    307 	.cf_faultstatus		= cpufunc_faultstatus,
    308 	.cf_faultaddress	= cpufunc_faultaddress,
    309 
    310 	/* TLB functions */
    311 
    312 	.cf_tlb_flushID		= arm67_tlb_flush,
    313 	.cf_tlb_flushID_SE	= arm67_tlb_purge,
    314 	.cf_tlb_flushI		= arm67_tlb_flush,
    315 	.cf_tlb_flushI_SE	= arm67_tlb_purge,
    316 	.cf_tlb_flushD		= arm67_tlb_flush,
    317 	.cf_tlb_flushD_SE	= arm67_tlb_purge,
    318 
    319 	/* Cache operations */
    320 
    321 	.cf_icache_sync_all	= cpufunc_nullop,
    322 	.cf_icache_sync_range	= (void *) cpufunc_nullop,
    323 
    324 	.cf_dcache_wbinv_all	= arm67_cache_flush,
    325 	.cf_dcache_wbinv_range	= (void *)arm67_cache_flush,
    326 	.cf_dcache_inv_range	= (void *)arm67_cache_flush,
    327 	.cf_dcache_wb_range	= (void *)cpufunc_nullop,
    328 
    329 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
    330 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
    331 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
    332 
    333 	.cf_idcache_wbinv_all	= arm67_cache_flush,
    334 	.cf_idcache_wbinv_range	= (void *)arm67_cache_flush,
    335 
    336 	/* Other functions */
    337 
    338 	.cf_flush_prefetchbuf	= cpufunc_nullop,
    339 	.cf_drain_writebuf	= cpufunc_nullop,
    340 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
    341 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
    342 
    343 	.cf_sleep		= (void *)cpufunc_nullop,
    344 
    345 	/* Soft functions */
    346 
    347 #ifdef ARM6_LATE_ABORT
    348 	.cf_dataabt_fixup	= late_abort_fixup,
    349 #else
    350 	.cf_dataabt_fixup	= early_abort_fixup,
    351 #endif
    352 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
    353 
    354 	.cf_context_switch	= arm67_context_switch,
    355 
    356 	.cf_setup		= arm6_setup
    357 
    358 };
    359 #endif	/* CPU_ARM6 */
    360 
    361 #ifdef CPU_ARM7
    362 struct cpu_functions arm7_cpufuncs = {
    363 	/* CPU functions */
    364 
    365 	.cf_id			= cpufunc_id,
    366 	.cf_cpwait		= cpufunc_nullop,
    367 
    368 	/* MMU functions */
    369 
    370 	.cf_control		= cpufunc_control,
    371 	.cf_domains		= cpufunc_domains,
    372 	.cf_setttb		= arm67_setttb,
    373 	.cf_faultstatus		= cpufunc_faultstatus,
    374 	.cf_faultaddress	= cpufunc_faultaddress,
    375 
    376 	/* TLB functions */
    377 
    378 	.cf_tlb_flushID		= arm67_tlb_flush,
    379 	.cf_tlb_flushID_SE	= arm67_tlb_purge,
    380 	.cf_tlb_flushI		= arm67_tlb_flush,
    381 	.cf_tlb_flushI_SE	= arm67_tlb_purge,
    382 	.cf_tlb_flushD		= arm67_tlb_flush,
    383 	.cf_tlb_flushD_SE	= arm67_tlb_purge,
    384 
    385 	/* Cache operations */
    386 
    387 	.cf_icache_sync_all	= cpufunc_nullop,
    388 	.cf_icache_sync_range	= (void *)cpufunc_nullop,
    389 
    390 	.cf_dcache_wbinv_all	= arm67_cache_flush,
    391 	.cf_dcache_wbinv_range	= (void *)arm67_cache_flush,
    392 	.cf_dcache_inv_range	= (void *)arm67_cache_flush,
    393 	.cf_dcache_wb_range	= (void *)cpufunc_nullop,
    394 
    395 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
    396 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
    397 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
    398 
    399 	.cf_idcache_wbinv_all	= arm67_cache_flush,
    400 	.cf_idcache_wbinv_range	= (void *)arm67_cache_flush,
    401 
    402 	/* Other functions */
    403 
    404 	.cf_flush_prefetchbuf	= cpufunc_nullop,
    405 	.cf_drain_writebuf	= cpufunc_nullop,
    406 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
    407 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
    408 
    409 	.cf_sleep		= (void *)cpufunc_nullop,
    410 
    411 	/* Soft functions */
    412 
    413 	.cf_dataabt_fixup	= late_abort_fixup,
    414 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
    415 
    416 	.cf_context_switch	= arm67_context_switch,
    417 
    418 	.cf_setup		= arm7_setup
    419 
    420 };
    421 #endif	/* CPU_ARM7 */
    422 
    423 #ifdef CPU_ARM7TDMI
    424 struct cpu_functions arm7tdmi_cpufuncs = {
    425 	/* CPU functions */
    426 
    427 	.cf_id			= cpufunc_id,
    428 	.cf_cpwait		= cpufunc_nullop,
    429 
    430 	/* MMU functions */
    431 
    432 	.cf_control		= cpufunc_control,
    433 	.cf_domains		= cpufunc_domains,
    434 	.cf_setttb		= arm7tdmi_setttb,
    435 	.cf_faultstatus		= cpufunc_faultstatus,
    436 	.cf_faultaddress	= cpufunc_faultaddress,
    437 
    438 	/* TLB functions */
    439 
    440 	.cf_tlb_flushID		= arm7tdmi_tlb_flushID,
    441 	.cf_tlb_flushID_SE	= arm7tdmi_tlb_flushID_SE,
    442 	.cf_tlb_flushI		= arm7tdmi_tlb_flushID,
    443 	.cf_tlb_flushI_SE	= arm7tdmi_tlb_flushID_SE,
    444 	.cf_tlb_flushD		= arm7tdmi_tlb_flushID,
    445 	.cf_tlb_flushD_SE	= arm7tdmi_tlb_flushID_SE,
    446 
    447 	/* Cache operations */
    448 
    449 	.cf_icache_sync_all	= cpufunc_nullop,
    450 	.cf_icache_sync_range	= (void *)cpufunc_nullop,
    451 
    452 	.cf_dcache_wbinv_all	= arm7tdmi_cache_flushID,
    453 	.cf_dcache_wbinv_range	= (void *)arm7tdmi_cache_flushID,
    454 	.cf_dcache_inv_range	= (void *)arm7tdmi_cache_flushID,
    455 	.cf_dcache_wb_range	= (void *)cpufunc_nullop,
    456 
    457 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
    458 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
    459 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
    460 
    461 	.cf_idcache_wbinv_all	= arm7tdmi_cache_flushID,
    462 	.cf_idcache_wbinv_range	= (void *)arm7tdmi_cache_flushID,
    463 
    464 	/* Other functions */
    465 
    466 	.cf_flush_prefetchbuf	= cpufunc_nullop,
    467 	.cf_drain_writebuf	= cpufunc_nullop,
    468 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
    469 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
    470 
    471 	.cf_sleep		= (void *)cpufunc_nullop,
    472 
    473 	/* Soft functions */
    474 
    475 	.cf_dataabt_fixup	= late_abort_fixup,
    476 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
    477 
    478 	.cf_context_switch	= arm7tdmi_context_switch,
    479 
    480 	.cf_setup		= arm7tdmi_setup
    481 
    482 };
    483 #endif	/* CPU_ARM7TDMI */
    484 
    485 #ifdef CPU_ARM8
    486 struct cpu_functions arm8_cpufuncs = {
    487 	/* CPU functions */
    488 
    489 	.cf_id			= cpufunc_id,
    490 	.cf_cpwait		= cpufunc_nullop,
    491 
    492 	/* MMU functions */
    493 
    494 	.cf_control		= cpufunc_control,
    495 	.cf_domains		= cpufunc_domains,
    496 	.cf_setttb		= arm8_setttb,
    497 	.cf_faultstatus		= cpufunc_faultstatus,
    498 	.cf_faultaddress	= cpufunc_faultaddress,
    499 
    500 	/* TLB functions */
    501 
    502 	.cf_tlb_flushID		= arm8_tlb_flushID,
    503 	.cf_tlb_flushID_SE	= arm8_tlb_flushID_SE,
    504 	.cf_tlb_flushI		= arm8_tlb_flushID,
    505 	.cf_tlb_flushI_SE	= arm8_tlb_flushID_SE,
    506 	.cf_tlb_flushD		= arm8_tlb_flushID,
    507 	.cf_tlb_flushD_SE	= arm8_tlb_flushID_SE,
    508 
    509 	/* Cache operations */
    510 
    511 	.cf_icache_sync_all	= cpufunc_nullop,
    512 	.cf_icache_sync_range	= (void *)cpufunc_nullop,
    513 
    514 	.cf_dcache_wbinv_all	= arm8_cache_purgeID,
    515 	.cf_dcache_wbinv_range	= (void *)arm8_cache_purgeID,
    516 /*XXX*/	.cf_dcache_inv_range	= (void *)arm8_cache_purgeID,
    517 	.cf_dcache_wb_range	= (void *)arm8_cache_cleanID,
    518 
    519 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
    520 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
    521 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
    522 
    523 	.cf_idcache_wbinv_all	= arm8_cache_purgeID,
    524 	.cf_idcache_wbinv_range = (void *)arm8_cache_purgeID,
    525 
    526 	/* Other functions */
    527 
    528 	.cf_flush_prefetchbuf	= cpufunc_nullop,
    529 	.cf_drain_writebuf	= cpufunc_nullop,
    530 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
    531 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
    532 
    533 	.cf_sleep		= (void *)cpufunc_nullop,
    534 
    535 	/* Soft functions */
    536 
    537 	.cf_dataabt_fixup	= cpufunc_null_fixup,
    538 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
    539 
    540 	.cf_context_switch	= arm8_context_switch,
    541 
    542 	.cf_setup		= arm8_setup
    543 };
    544 #endif	/* CPU_ARM8 */
    545 
    546 #ifdef CPU_ARM9
    547 struct cpu_functions arm9_cpufuncs = {
    548 	/* CPU functions */
    549 
    550 	.cf_id			= cpufunc_id,
    551 	.cf_cpwait		= cpufunc_nullop,
    552 
    553 	/* MMU functions */
    554 
    555 	.cf_control		= cpufunc_control,
    556 	.cf_domains		= cpufunc_domains,
    557 	.cf_setttb		= arm9_setttb,
    558 	.cf_faultstatus		= cpufunc_faultstatus,
    559 	.cf_faultaddress	= cpufunc_faultaddress,
    560 
    561 	/* TLB functions */
    562 
    563 	.cf_tlb_flushID		= armv4_tlb_flushID,
    564 	.cf_tlb_flushID_SE	= arm9_tlb_flushID_SE,
    565 	.cf_tlb_flushI		= armv4_tlb_flushI,
    566 	.cf_tlb_flushI_SE	= (void *)armv4_tlb_flushI,
    567 	.cf_tlb_flushD		= armv4_tlb_flushD,
    568 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
    569 
    570 	/* Cache operations */
    571 
    572 	.cf_icache_sync_all	= arm9_icache_sync_all,
    573 	.cf_icache_sync_range	= arm9_icache_sync_range,
    574 
    575 	.cf_dcache_wbinv_all	= arm9_dcache_wbinv_all,
    576 	.cf_dcache_wbinv_range	= arm9_dcache_wbinv_range,
    577 /*XXX*/	.cf_dcache_inv_range	= arm9_dcache_wbinv_range,
    578 	.cf_dcache_wb_range	= arm9_dcache_wb_range,
    579 
    580 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
    581 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
    582 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
    583 
    584 	.cf_idcache_wbinv_all	= arm9_idcache_wbinv_all,
    585 	.cf_idcache_wbinv_range = arm9_idcache_wbinv_range,
    586 
    587 	/* Other functions */
    588 
    589 	.cf_flush_prefetchbuf	= cpufunc_nullop,
    590 	.cf_drain_writebuf	= armv4_drain_writebuf,
    591 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
    592 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
    593 
    594 	.cf_sleep		= (void *)cpufunc_nullop,
    595 
    596 	/* Soft functions */
    597 
    598 	.cf_dataabt_fixup	= cpufunc_null_fixup,
    599 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
    600 
    601 	.cf_context_switch	= arm9_context_switch,
    602 
    603 	.cf_setup		= arm9_setup
    604 
    605 };
    606 #endif /* CPU_ARM9 */
    607 
    608 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
    609 struct cpu_functions armv5_ec_cpufuncs = {
    610 	/* CPU functions */
    611 
    612 	.cf_id			= cpufunc_id,
    613 	.cf_cpwait		= cpufunc_nullop,
    614 
    615 	/* MMU functions */
    616 
    617 	.cf_control		= cpufunc_control,
    618 	.cf_domains		= cpufunc_domains,
    619 	.cf_setttb		= armv5_ec_setttb,
    620 	.cf_faultstatus		= cpufunc_faultstatus,
    621 	.cf_faultaddress	= cpufunc_faultaddress,
    622 
    623 	/* TLB functions */
    624 
    625 	.cf_tlb_flushID		= armv4_tlb_flushID,
    626 	.cf_tlb_flushID_SE	= arm10_tlb_flushID_SE,
    627 	.cf_tlb_flushI		= armv4_tlb_flushI,
    628 	.cf_tlb_flushI_SE	= arm10_tlb_flushI_SE,
    629 	.cf_tlb_flushD		= armv4_tlb_flushD,
    630 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
    631 
    632 	/* Cache operations */
    633 
    634 	.cf_icache_sync_all	= armv5_ec_icache_sync_all,
    635 	.cf_icache_sync_range	= armv5_ec_icache_sync_range,
    636 
    637 	.cf_dcache_wbinv_all	= armv5_ec_dcache_wbinv_all,
    638 	.cf_dcache_wbinv_range	= armv5_ec_dcache_wbinv_range,
    639 /*XXX*/	.cf_dcache_inv_range	= armv5_ec_dcache_wbinv_range,
    640 	.cf_dcache_wb_range	= armv5_ec_dcache_wb_range,
    641 
    642 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
    643 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
    644 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
    645 
    646 	.cf_idcache_wbinv_all	= armv5_ec_idcache_wbinv_all,
    647 	.cf_idcache_wbinv_range = armv5_ec_idcache_wbinv_range,
    648 
    649 	/* Other functions */
    650 
    651 	.cf_flush_prefetchbuf	= cpufunc_nullop,
    652 	.cf_drain_writebuf	= armv4_drain_writebuf,
    653 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
    654 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
    655 
    656 	.cf_sleep		= (void *)cpufunc_nullop,
    657 
    658 	/* Soft functions */
    659 
    660 	.cf_dataabt_fixup	= cpufunc_null_fixup,
    661 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
    662 
    663 	.cf_context_switch	= arm10_context_switch,
    664 
    665 	.cf_setup		= arm10_setup
    666 
    667 };
    668 #endif /* CPU_ARM9E || CPU_ARM10 */
    669 
    670 #ifdef CPU_ARM10
    671 struct cpu_functions arm10_cpufuncs = {
    672 	/* CPU functions */
    673 
    674 	.cf_id			= cpufunc_id,
    675 	.cf_cpwait		= cpufunc_nullop,
    676 
    677 	/* MMU functions */
    678 
    679 	.cf_control		= cpufunc_control,
    680 	.cf_domains		= cpufunc_domains,
    681 	.cf_setttb		= armv5_setttb,
    682 	.cf_faultstatus		= cpufunc_faultstatus,
    683 	.cf_faultaddress	= cpufunc_faultaddress,
    684 
    685 	/* TLB functions */
    686 
    687 	.cf_tlb_flushID		= armv4_tlb_flushID,
    688 	.cf_tlb_flushID_SE	= arm10_tlb_flushID_SE,
    689 	.cf_tlb_flushI		= armv4_tlb_flushI,
    690 	.cf_tlb_flushI_SE	= arm10_tlb_flushI_SE,
    691 	.cf_tlb_flushD		= armv4_tlb_flushD,
    692 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
    693 
    694 	/* Cache operations */
    695 
    696 	.cf_icache_sync_all	= armv5_icache_sync_all,
    697 	.cf_icache_sync_range	= armv5_icache_sync_range,
    698 
    699 	.cf_dcache_wbinv_all	= armv5_dcache_wbinv_all,
    700 	.cf_dcache_wbinv_range	= armv5_dcache_wbinv_range,
    701 /*XXX*/	.cf_dcache_inv_range	= armv5_dcache_wbinv_range,
    702 	.cf_dcache_wb_range	= armv5_dcache_wb_range,
    703 
    704 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
    705 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
    706 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
    707 
    708 	.cf_idcache_wbinv_all	= armv5_idcache_wbinv_all,
    709 	.cf_idcache_wbinv_range = armv5_idcache_wbinv_range,
    710 
    711 	/* Other functions */
    712 
    713 	.cf_flush_prefetchbuf	= cpufunc_nullop,
    714 	.cf_drain_writebuf	= armv4_drain_writebuf,
    715 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
    716 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
    717 
    718 	.cf_sleep		= (void *)cpufunc_nullop,
    719 
    720 	/* Soft functions */
    721 
    722 	.cf_dataabt_fixup	= cpufunc_null_fixup,
    723 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
    724 
    725 	.cf_context_switch	= arm10_context_switch,
    726 
    727 	.cf_setup		= arm10_setup
    728 
    729 };
    730 #endif /* CPU_ARM10 */
    731 
    732 #ifdef CPU_ARM11
    733 struct cpu_functions arm11_cpufuncs = {
    734 	/* CPU functions */
    735 
    736 	.cf_id			= cpufunc_id,
    737 	.cf_cpwait		= cpufunc_nullop,
    738 
    739 	/* MMU functions */
    740 
    741 	.cf_control		= cpufunc_control,
    742 	.cf_domains		= cpufunc_domains,
    743 	.cf_setttb		= arm11_setttb,
    744 	.cf_faultstatus		= cpufunc_faultstatus,
    745 	.cf_faultaddress	= cpufunc_faultaddress,
    746 
    747 	/* TLB functions */
    748 
    749 	.cf_tlb_flushID		= arm11_tlb_flushID,
    750 	.cf_tlb_flushID_SE	= arm11_tlb_flushID_SE,
    751 	.cf_tlb_flushI		= arm11_tlb_flushI,
    752 	.cf_tlb_flushI_SE	= arm11_tlb_flushI_SE,
    753 	.cf_tlb_flushD		= arm11_tlb_flushD,
    754 	.cf_tlb_flushD_SE	= arm11_tlb_flushD_SE,
    755 
    756 	/* Cache operations */
    757 
    758 	.cf_icache_sync_all	= armv6_icache_sync_all,
    759 	.cf_icache_sync_range	= armv6_icache_sync_range,
    760 
    761 	.cf_dcache_wbinv_all	= armv6_dcache_wbinv_all,
    762 	.cf_dcache_wbinv_range	= armv6_dcache_wbinv_range,
    763 	.cf_dcache_inv_range	= armv6_dcache_inv_range,
    764 	.cf_dcache_wb_range	= armv6_dcache_wb_range,
    765 
    766 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
    767 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
    768 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
    769 
    770 	.cf_idcache_wbinv_all	= armv6_idcache_wbinv_all,
    771 	.cf_idcache_wbinv_range = armv6_idcache_wbinv_range,
    772 
    773 	/* Other functions */
    774 
    775 	.cf_flush_prefetchbuf	= cpufunc_nullop,
    776 	.cf_drain_writebuf	= arm11_drain_writebuf,
    777 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
    778 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
    779 
    780 	.cf_sleep		= arm11_sleep,
    781 
    782 	/* Soft functions */
    783 
    784 	.cf_dataabt_fixup	= cpufunc_null_fixup,
    785 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
    786 
    787 	.cf_context_switch	= arm11_context_switch,
    788 
    789 	.cf_setup		= arm11_setup
    790 
    791 };
    792 #endif /* CPU_ARM11 */
    793 
    794 #ifdef CPU_ARM1136
    795 struct cpu_functions arm1136_cpufuncs = {
    796 	/* CPU functions */
    797 
    798 	.cf_id			= cpufunc_id,
    799 	.cf_cpwait		= cpufunc_nullop,
    800 
    801 	/* MMU functions */
    802 
    803 	.cf_control		= cpufunc_control,
    804 	.cf_domains		= cpufunc_domains,
    805 	.cf_setttb		= arm11_setttb,
    806 	.cf_faultstatus		= cpufunc_faultstatus,
    807 	.cf_faultaddress	= cpufunc_faultaddress,
    808 
    809 	/* TLB functions */
    810 
    811 	.cf_tlb_flushID		= arm11_tlb_flushID,
    812 	.cf_tlb_flushID_SE	= arm11_tlb_flushID_SE,
    813 	.cf_tlb_flushI		= arm11_tlb_flushI,
    814 	.cf_tlb_flushI_SE	= arm11_tlb_flushI_SE,
    815 	.cf_tlb_flushD		= arm11_tlb_flushD,
    816 	.cf_tlb_flushD_SE	= arm11_tlb_flushD_SE,
    817 
    818 	/* Cache operations */
    819 
    820 	.cf_icache_sync_all	= arm11x6_icache_sync_all,	/* 411920 */
    821 	.cf_icache_sync_range	= arm11x6_icache_sync_range,	/* 371025 */
    822 
    823 	.cf_dcache_wbinv_all	= arm11x6_dcache_wbinv_all,	/* 411920 */
    824 	.cf_dcache_wbinv_range	= armv6_dcache_wbinv_range,
    825 	.cf_dcache_inv_range	= armv6_dcache_inv_range,
    826 	.cf_dcache_wb_range	= armv6_dcache_wb_range,
    827 
    828 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
    829 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
    830 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
    831 
    832 	.cf_idcache_wbinv_all	= arm11x6_idcache_wbinv_all,	/* 411920 */
    833 	.cf_idcache_wbinv_range = arm11x6_idcache_wbinv_range,	/* 371025 */
    834 
    835 	/* Other functions */
    836 
    837 	.cf_flush_prefetchbuf	= arm11x6_flush_prefetchbuf,
    838 	.cf_drain_writebuf	= arm11_drain_writebuf,
    839 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
    840 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
    841 
    842 	.cf_sleep		= arm11_sleep,	/* arm1136_sleep_rev0 */
    843 
    844 	/* Soft functions */
    845 
    846 	.cf_dataabt_fixup	= cpufunc_null_fixup,
    847 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
    848 
    849 	.cf_context_switch	= arm11_context_switch,
    850 
    851 	.cf_setup		= arm11x6_setup
    852 
    853 };
    854 #endif /* CPU_ARM1136 */
    855 
    856 #ifdef CPU_ARM1176
    857 struct cpu_functions arm1176_cpufuncs = {
    858 	/* CPU functions */
    859 
    860 	.cf_id			= cpufunc_id,
    861 	.cf_cpwait		= cpufunc_nullop,
    862 
    863 	/* MMU functions */
    864 
    865 	.cf_control		= cpufunc_control,
    866 	.cf_domains		= cpufunc_domains,
    867 	.cf_setttb		= arm11_setttb,
    868 	.cf_faultstatus		= cpufunc_faultstatus,
    869 	.cf_faultaddress	= cpufunc_faultaddress,
    870 
    871 	/* TLB functions */
    872 
    873 	.cf_tlb_flushID		= arm11_tlb_flushID,
    874 	.cf_tlb_flushID_SE	= arm11_tlb_flushID_SE,
    875 	.cf_tlb_flushI		= arm11_tlb_flushI,
    876 	.cf_tlb_flushI_SE	= arm11_tlb_flushI_SE,
    877 	.cf_tlb_flushD		= arm11_tlb_flushD,
    878 	.cf_tlb_flushD_SE	= arm11_tlb_flushD_SE,
    879 
    880 	/* Cache operations */
    881 
    882 	.cf_icache_sync_all	= arm11x6_icache_sync_all,	/* 415045 */
    883 	.cf_icache_sync_range	= arm11x6_icache_sync_range,	/* 371367 */
    884 
    885 	.cf_dcache_wbinv_all	= arm11x6_dcache_wbinv_all,	/* 415045 */
    886 	.cf_dcache_wbinv_range	= armv6_dcache_wbinv_range,
    887 	.cf_dcache_inv_range	= armv6_dcache_inv_range,
    888 	.cf_dcache_wb_range	= armv6_dcache_wb_range,
    889 
    890 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
    891 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
    892 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
    893 
    894 	.cf_idcache_wbinv_all	= arm11x6_idcache_wbinv_all,	/* 415045 */
    895 	.cf_idcache_wbinv_range = arm11x6_idcache_wbinv_range,	/* 371367 */
    896 
    897 	/* Other functions */
    898 
    899 	.cf_flush_prefetchbuf	= arm11x6_flush_prefetchbuf,
    900 	.cf_drain_writebuf	= arm11_drain_writebuf,
    901 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
    902 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
    903 
    904 	.cf_sleep		= arm11x6_sleep,		/* no ref. */
    905 
    906 	/* Soft functions */
    907 
    908 	.cf_dataabt_fixup	= cpufunc_null_fixup,
    909 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
    910 
    911 	.cf_context_switch	= arm11_context_switch,
    912 
    913 	.cf_setup		= arm11x6_setup
    914 
    915 };
    916 #endif /* CPU_ARM1176 */
    917 
    918 
    919 #ifdef CPU_ARM11MPCORE
    920 struct cpu_functions arm11mpcore_cpufuncs = {
    921 	/* CPU functions */
    922 
    923 	.cf_id			= cpufunc_id,
    924 	.cf_cpwait		= cpufunc_nullop,
    925 
    926 	/* MMU functions */
    927 
    928 	.cf_control		= cpufunc_control,
    929 	.cf_domains		= cpufunc_domains,
    930 	.cf_setttb		= arm11_setttb,
    931 	.cf_faultstatus		= cpufunc_faultstatus,
    932 	.cf_faultaddress	= cpufunc_faultaddress,
    933 
    934 	/* TLB functions */
    935 
    936 	.cf_tlb_flushID		= arm11_tlb_flushID,
    937 	.cf_tlb_flushID_SE	= arm11_tlb_flushID_SE,
    938 	.cf_tlb_flushI		= arm11_tlb_flushI,
    939 	.cf_tlb_flushI_SE	= arm11_tlb_flushI_SE,
    940 	.cf_tlb_flushD		= arm11_tlb_flushD,
    941 	.cf_tlb_flushD_SE	= arm11_tlb_flushD_SE,
    942 
    943 	/* Cache operations */
    944 
    945 	.cf_icache_sync_all	= armv6_icache_sync_all,
    946 	.cf_icache_sync_range	= armv5_icache_sync_range,
    947 
    948 	.cf_dcache_wbinv_all	= armv6_dcache_wbinv_all,
    949 	.cf_dcache_wbinv_range	= armv5_dcache_wbinv_range,
    950 	.cf_dcache_inv_range	= armv5_dcache_inv_range,
    951 	.cf_dcache_wb_range	= armv5_dcache_wb_range,
    952 
    953 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
    954 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
    955 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
    956 
    957 	.cf_idcache_wbinv_all	= armv6_idcache_wbinv_all,
    958 	.cf_idcache_wbinv_range = armv5_idcache_wbinv_range,
    959 
    960 	/* Other functions */
    961 
    962 	.cf_flush_prefetchbuf	= cpufunc_nullop,
    963 	.cf_drain_writebuf	= arm11_drain_writebuf,
    964 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
    965 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
    966 
    967 	.cf_sleep		= arm11_sleep,
    968 
    969 	/* Soft functions */
    970 
    971 	.cf_dataabt_fixup	= cpufunc_null_fixup,
    972 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
    973 
    974 	.cf_context_switch	= arm11_context_switch,
    975 
    976 	.cf_setup		= arm11mpcore_setup
    977 
    978 };
    979 #endif /* CPU_ARM11MPCORE */
    980 
    981 #ifdef CPU_SA110
    982 struct cpu_functions sa110_cpufuncs = {
    983 	/* CPU functions */
    984 
    985 	.cf_id			= cpufunc_id,
    986 	.cf_cpwait		= cpufunc_nullop,
    987 
    988 	/* MMU functions */
    989 
    990 	.cf_control		= cpufunc_control,
    991 	.cf_domains		= cpufunc_domains,
    992 	.cf_setttb		= sa1_setttb,
    993 	.cf_faultstatus		= cpufunc_faultstatus,
    994 	.cf_faultaddress	= cpufunc_faultaddress,
    995 
    996 	/* TLB functions */
    997 
    998 	.cf_tlb_flushID		= armv4_tlb_flushID,
    999 	.cf_tlb_flushID_SE	= sa1_tlb_flushID_SE,
   1000 	.cf_tlb_flushI		= armv4_tlb_flushI,
   1001 	.cf_tlb_flushI_SE	= (void *)armv4_tlb_flushI,
   1002 	.cf_tlb_flushD		= armv4_tlb_flushD,
   1003 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
   1004 
   1005 	/* Cache operations */
   1006 
   1007 	.cf_icache_sync_all	= sa1_cache_syncI,
   1008 	.cf_icache_sync_range	= sa1_cache_syncI_rng,
   1009 
   1010 	.cf_dcache_wbinv_all	= sa1_cache_purgeD,
   1011 	.cf_dcache_wbinv_range	= sa1_cache_purgeD_rng,
   1012 /*XXX*/	.cf_dcache_inv_range	= sa1_cache_purgeD_rng,
   1013 	.cf_dcache_wb_range	= sa1_cache_cleanD_rng,
   1014 
   1015 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
   1016 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
   1017 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
   1018 
   1019 	.cf_idcache_wbinv_all	= sa1_cache_purgeID,
   1020 	.cf_idcache_wbinv_range	= sa1_cache_purgeID_rng,
   1021 
   1022 	/* Other functions */
   1023 
   1024 	.cf_flush_prefetchbuf	= cpufunc_nullop,
   1025 	.cf_drain_writebuf	= armv4_drain_writebuf,
   1026 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
   1027 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
   1028 
   1029 	.cf_sleep		= (void *)cpufunc_nullop,
   1030 
   1031 	/* Soft functions */
   1032 
   1033 	.cf_dataabt_fixup	= cpufunc_null_fixup,
   1034 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
   1035 
   1036 	.cf_context_switch	= sa110_context_switch,
   1037 
   1038 	.cf_setup		= sa110_setup
   1039 };
   1040 #endif	/* CPU_SA110 */
   1041 
   1042 #if defined(CPU_SA1100) || defined(CPU_SA1110)
   1043 struct cpu_functions sa11x0_cpufuncs = {
   1044 	/* CPU functions */
   1045 
   1046 	.cf_id			= cpufunc_id,
   1047 	.cf_cpwait		= cpufunc_nullop,
   1048 
   1049 	/* MMU functions */
   1050 
   1051 	.cf_control		= cpufunc_control,
   1052 	.cf_domains		= cpufunc_domains,
   1053 	.cf_setttb		= sa1_setttb,
   1054 	.cf_faultstatus		= cpufunc_faultstatus,
   1055 	.cf_faultaddress	= cpufunc_faultaddress,
   1056 
   1057 	/* TLB functions */
   1058 
   1059 	.cf_tlb_flushID		= armv4_tlb_flushID,
   1060 	.cf_tlb_flushID_SE	= sa1_tlb_flushID_SE,
   1061 	.cf_tlb_flushI		= armv4_tlb_flushI,
   1062 	.cf_tlb_flushI_SE	= (void *)armv4_tlb_flushI,
   1063 	.cf_tlb_flushD		= armv4_tlb_flushD,
   1064 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
   1065 
   1066 	/* Cache operations */
   1067 
   1068 	.cf_icache_sync_all	= sa1_cache_syncI,
   1069 	.cf_icache_sync_range	= sa1_cache_syncI_rng,
   1070 
   1071 	.cf_dcache_wbinv_all	= sa1_cache_purgeD,
   1072 	.cf_dcache_wbinv_range	= sa1_cache_purgeD_rng,
   1073 /*XXX*/	.cf_dcache_inv_range	= sa1_cache_purgeD_rng,
   1074 	.cf_dcache_wb_range	= sa1_cache_cleanD_rng,
   1075 
   1076 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
   1077 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
   1078 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
   1079 
   1080 	.cf_idcache_wbinv_all	= sa1_cache_purgeID,
   1081 	.cf_idcache_wbinv_range	= sa1_cache_purgeID_rng,
   1082 
   1083 	/* Other functions */
   1084 
   1085 	.cf_flush_prefetchbuf	= sa11x0_drain_readbuf,
   1086 	.cf_drain_writebuf	= armv4_drain_writebuf,
   1087 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
   1088 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
   1089 
   1090 	.cf_sleep		= sa11x0_cpu_sleep,
   1091 
   1092 	/* Soft functions */
   1093 
   1094 	.cf_dataabt_fixup	= cpufunc_null_fixup,
   1095 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
   1096 
   1097 	.cf_context_switch	= sa11x0_context_switch,
   1098 
   1099 	.cf_setup		= sa11x0_setup
   1100 };
   1101 #endif	/* CPU_SA1100 || CPU_SA1110 */
   1102 
   1103 #if defined(CPU_FA526)
   1104 struct cpu_functions fa526_cpufuncs = {
   1105 	/* CPU functions */
   1106 
   1107 	.cf_id			= cpufunc_id,
   1108 	.cf_cpwait		= cpufunc_nullop,
   1109 
   1110 	/* MMU functions */
   1111 
   1112 	.cf_control		= cpufunc_control,
   1113 	.cf_domains		= cpufunc_domains,
   1114 	.cf_setttb		= fa526_setttb,
   1115 	.cf_faultstatus		= cpufunc_faultstatus,
   1116 	.cf_faultaddress	= cpufunc_faultaddress,
   1117 
   1118 	/* TLB functions */
   1119 
   1120 	.cf_tlb_flushID		= armv4_tlb_flushID,
   1121 	.cf_tlb_flushID_SE	= fa526_tlb_flushID_SE,
   1122 	.cf_tlb_flushI		= armv4_tlb_flushI,
   1123 	.cf_tlb_flushI_SE	= fa526_tlb_flushI_SE,
   1124 	.cf_tlb_flushD		= armv4_tlb_flushD,
   1125 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
   1126 
   1127 	/* Cache operations */
   1128 
   1129 	.cf_icache_sync_all	= fa526_icache_sync_all,
   1130 	.cf_icache_sync_range	= fa526_icache_sync_range,
   1131 
   1132 	.cf_dcache_wbinv_all	= fa526_dcache_wbinv_all,
   1133 	.cf_dcache_wbinv_range	= fa526_dcache_wbinv_range,
   1134 	.cf_dcache_inv_range	= fa526_dcache_inv_range,
   1135 	.cf_dcache_wb_range	= fa526_dcache_wb_range,
   1136 
   1137 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
   1138 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
   1139 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
   1140 
   1141 	.cf_idcache_wbinv_all	= fa526_idcache_wbinv_all,
   1142 	.cf_idcache_wbinv_range	= fa526_idcache_wbinv_range,
   1143 
   1144 	/* Other functions */
   1145 
   1146 	.cf_flush_prefetchbuf	= fa526_flush_prefetchbuf,
   1147 	.cf_drain_writebuf	= armv4_drain_writebuf,
   1148 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
   1149 	.cf_flush_brnchtgt_E	= fa526_flush_brnchtgt_E,
   1150 
   1151 	.cf_sleep		= fa526_cpu_sleep,
   1152 
   1153 	/* Soft functions */
   1154 
   1155 	.cf_dataabt_fixup	= cpufunc_null_fixup,
   1156 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
   1157 
   1158 	.cf_context_switch	= fa526_context_switch,
   1159 
   1160 	.cf_setup		= fa526_setup
   1161 };
   1162 #endif	/* CPU_FA526 */
   1163 
   1164 #ifdef CPU_IXP12X0
   1165 struct cpu_functions ixp12x0_cpufuncs = {
   1166 	/* CPU functions */
   1167 
   1168 	.cf_id			= cpufunc_id,
   1169 	.cf_cpwait		= cpufunc_nullop,
   1170 
   1171 	/* MMU functions */
   1172 
   1173 	.cf_control		= cpufunc_control,
   1174 	.cf_domains		= cpufunc_domains,
   1175 	.cf_setttb		= sa1_setttb,
   1176 	.cf_faultstatus		= cpufunc_faultstatus,
   1177 	.cf_faultaddress	= cpufunc_faultaddress,
   1178 
   1179 	/* TLB functions */
   1180 
   1181 	.cf_tlb_flushID		= armv4_tlb_flushID,
   1182 	.cf_tlb_flushID_SE	= sa1_tlb_flushID_SE,
   1183 	.cf_tlb_flushI		= armv4_tlb_flushI,
   1184 	.cf_tlb_flushI_SE	= (void *)armv4_tlb_flushI,
   1185 	.cf_tlb_flushD		= armv4_tlb_flushD,
   1186 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
   1187 
   1188 	/* Cache operations */
   1189 
   1190 	.cf_icache_sync_all	= sa1_cache_syncI,
   1191 	.cf_icache_sync_range	= sa1_cache_syncI_rng,
   1192 
   1193 	.cf_dcache_wbinv_all	= sa1_cache_purgeD,
   1194 	.cf_dcache_wbinv_range	= sa1_cache_purgeD_rng,
   1195 /*XXX*/	.cf_dcache_inv_range	= sa1_cache_purgeD_rng,
   1196 	.cf_dcache_wb_range	= sa1_cache_cleanD_rng,
   1197 
   1198 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
   1199 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
   1200 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
   1201 
   1202 	.cf_idcache_wbinv_all	= sa1_cache_purgeID,
   1203 	.cf_idcache_wbinv_range	= sa1_cache_purgeID_rng,
   1204 
   1205 	/* Other functions */
   1206 
   1207 	.cf_flush_prefetchbuf	= ixp12x0_drain_readbuf,
   1208 	.cf_drain_writebuf	= armv4_drain_writebuf,
   1209 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
   1210 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
   1211 
   1212 	.cf_sleep		= (void *)cpufunc_nullop,
   1213 
   1214 	/* Soft functions */
   1215 
   1216 	.cf_dataabt_fixup	= cpufunc_null_fixup,
   1217 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
   1218 
   1219 	.cf_context_switch	= ixp12x0_context_switch,
   1220 
   1221 	.cf_setup		= ixp12x0_setup
   1222 };
   1223 #endif	/* CPU_IXP12X0 */
   1224 
   1225 #if defined(CPU_XSCALE)
   1226 struct cpu_functions xscale_cpufuncs = {
   1227 	/* CPU functions */
   1228 
   1229 	.cf_id			= cpufunc_id,
   1230 	.cf_cpwait		= xscale_cpwait,
   1231 
   1232 	/* MMU functions */
   1233 
   1234 	.cf_control		= xscale_control,
   1235 	.cf_domains		= cpufunc_domains,
   1236 	.cf_setttb		= xscale_setttb,
   1237 	.cf_faultstatus		= cpufunc_faultstatus,
   1238 	.cf_faultaddress	= cpufunc_faultaddress,
   1239 
   1240 	/* TLB functions */
   1241 
   1242 	.cf_tlb_flushID		= armv4_tlb_flushID,
   1243 	.cf_tlb_flushID_SE	= xscale_tlb_flushID_SE,
   1244 	.cf_tlb_flushI		= armv4_tlb_flushI,
   1245 	.cf_tlb_flushI_SE	= (void *)armv4_tlb_flushI,
   1246 	.cf_tlb_flushD		= armv4_tlb_flushD,
   1247 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
   1248 
   1249 	/* Cache operations */
   1250 
   1251 	.cf_icache_sync_all	= xscale_cache_syncI,
   1252 	.cf_icache_sync_range	= xscale_cache_syncI_rng,
   1253 
   1254 	.cf_dcache_wbinv_all	= xscale_cache_purgeD,
   1255 	.cf_dcache_wbinv_range	= xscale_cache_purgeD_rng,
   1256 	.cf_dcache_inv_range	= xscale_cache_flushD_rng,
   1257 	.cf_dcache_wb_range	= xscale_cache_cleanD_rng,
   1258 
   1259 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
   1260 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
   1261 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
   1262 
   1263 	.cf_idcache_wbinv_all	= xscale_cache_purgeID,
   1264 	.cf_idcache_wbinv_range = xscale_cache_purgeID_rng,
   1265 
   1266 	/* Other functions */
   1267 
   1268 	.cf_flush_prefetchbuf	= cpufunc_nullop,
   1269 	.cf_drain_writebuf	= armv4_drain_writebuf,
   1270 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
   1271 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
   1272 
   1273 	.cf_sleep		= xscale_cpu_sleep,
   1274 
   1275 	/* Soft functions */
   1276 
   1277 	.cf_dataabt_fixup	= cpufunc_null_fixup,
   1278 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
   1279 
   1280 	.cf_context_switch	= xscale_context_switch,
   1281 
   1282 	.cf_setup		= xscale_setup
   1283 };
   1284 #endif /* CPU_XSCALE */
   1285 
   1286 #if defined(CPU_ARMV7)
   1287 struct cpu_functions armv7_cpufuncs = {
   1288 	/* CPU functions */
   1289 
   1290 	.cf_id			= cpufunc_id,
   1291 	.cf_cpwait		= cpufunc_nullop,
   1292 
   1293 	/* MMU functions */
   1294 
   1295 	.cf_control		= cpufunc_control,
   1296 	.cf_domains		= cpufunc_domains,
   1297 	.cf_setttb		= armv7_setttb,
   1298 	.cf_faultstatus		= cpufunc_faultstatus,
   1299 	.cf_faultaddress	= cpufunc_faultaddress,
   1300 
   1301 	/* TLB functions */
   1302 
   1303 	.cf_tlb_flushID		= armv7_tlb_flushID,
   1304 	.cf_tlb_flushID_SE	= armv7_tlb_flushID_SE,
   1305 	.cf_tlb_flushI		= armv7_tlb_flushI,
   1306 	.cf_tlb_flushI_SE	= armv7_tlb_flushI_SE,
   1307 	.cf_tlb_flushD		= armv7_tlb_flushD,
   1308 	.cf_tlb_flushD_SE	= armv7_tlb_flushD_SE,
   1309 
   1310 	/* Cache operations */
   1311 
   1312 	.cf_icache_sync_all	= armv7_icache_sync_all,
   1313 	.cf_dcache_wbinv_all	= armv7_dcache_wbinv_all,
   1314 
   1315 	.cf_dcache_inv_range	= armv7_dcache_inv_range,
   1316 	.cf_dcache_wb_range	= armv7_dcache_wb_range,
   1317 	.cf_dcache_wbinv_range	= armv7_dcache_wbinv_range,
   1318 
   1319 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
   1320 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
   1321 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
   1322 
   1323 	.cf_icache_sync_range	= armv7_icache_sync_range,
   1324 	.cf_idcache_wbinv_range = armv7_idcache_wbinv_range,
   1325 
   1326 
   1327 	.cf_idcache_wbinv_all	= armv7_idcache_wbinv_all,
   1328 
   1329 	/* Other functions */
   1330 
   1331 	.cf_flush_prefetchbuf	= cpufunc_nullop,
   1332 	.cf_drain_writebuf	= armv7_drain_writebuf,
   1333 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
   1334 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
   1335 
   1336 	.cf_sleep		= armv7_cpu_sleep,
   1337 
   1338 	/* Soft functions */
   1339 
   1340 	.cf_dataabt_fixup	= cpufunc_null_fixup,
   1341 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
   1342 
   1343 	.cf_context_switch	= armv7_context_switch,
   1344 
   1345 	.cf_setup		= armv7_setup
   1346 
   1347 };
   1348 #endif /* CPU_ARMV7 */
   1349 
   1350 #ifdef CPU_PJ4B
   1351 struct cpu_functions pj4bv7_cpufuncs = {
   1352 	/* CPU functions */
   1353 
   1354 	.cf_id			= cpufunc_id,
   1355 	.cf_cpwait		= armv7_drain_writebuf,
   1356 
   1357 	/* MMU functions */
   1358 
   1359 	.cf_control		= cpufunc_control,
   1360 	.cf_domains		= cpufunc_domains,
   1361 	.cf_setttb		= armv7_setttb,
   1362 	.cf_faultstatus		= cpufunc_faultstatus,
   1363 	.cf_faultaddress	= cpufunc_faultaddress,
   1364 
   1365 	/* TLB functions */
   1366 
   1367 	.cf_tlb_flushID		= armv7_tlb_flushID,
   1368 	.cf_tlb_flushID_SE	= armv7_tlb_flushID_SE,
   1369 	.cf_tlb_flushI		= armv7_tlb_flushID,
   1370 	.cf_tlb_flushI_SE	= armv7_tlb_flushID_SE,
   1371 	.cf_tlb_flushD		= armv7_tlb_flushID,
   1372 	.cf_tlb_flushD_SE	= armv7_tlb_flushID_SE,
   1373 
   1374 	/* Cache operations (see also pj4bv7_setup) */
   1375 	.cf_icache_sync_all	= armv7_idcache_wbinv_all,
   1376 	.cf_icache_sync_range	= armv7_icache_sync_range,
   1377 
   1378 	.cf_dcache_wbinv_all	= armv7_dcache_wbinv_all,
   1379 	.cf_dcache_wbinv_range	= armv7_dcache_wbinv_range,
   1380 	.cf_dcache_inv_range	= armv7_dcache_inv_range,
   1381 	.cf_dcache_wb_range	= armv7_dcache_wb_range,
   1382 
   1383 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
   1384 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
   1385 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
   1386 
   1387 	.cf_idcache_wbinv_all	= armv7_idcache_wbinv_all,
   1388 	.cf_idcache_wbinv_range	= armv7_idcache_wbinv_range,
   1389 
   1390 	/* Other functions */
   1391 
   1392 	.cf_flush_prefetchbuf	= cpufunc_nullop,
   1393 	.cf_drain_writebuf	= armv7_drain_writebuf,
   1394 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
   1395 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
   1396 
   1397 	.cf_sleep		= pj4b_cpu_sleep,
   1398 
   1399 	/* Soft functions */
   1400 
   1401 	.cf_dataabt_fixup	= cpufunc_null_fixup,
   1402 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
   1403 
   1404 	.cf_context_switch	= armv7_context_switch,
   1405 
   1406 	.cf_setup		= pj4bv7_setup
   1407 };
   1408 #endif /* CPU_PJ4B */
   1409 
   1410 #ifdef CPU_SHEEVA
   1411 struct cpu_functions sheeva_cpufuncs = {
   1412 	/* CPU functions */
   1413 
   1414 	.cf_id			= cpufunc_id,
   1415 	.cf_cpwait		= cpufunc_nullop,
   1416 
   1417 	/* MMU functions */
   1418 
   1419 	.cf_control		= cpufunc_control,
   1420 	.cf_domains		= cpufunc_domains,
   1421 	.cf_setttb		= armv5_ec_setttb,
   1422 	.cf_faultstatus		= cpufunc_faultstatus,
   1423 	.cf_faultaddress	= cpufunc_faultaddress,
   1424 
   1425 	/* TLB functions */
   1426 
   1427 	.cf_tlb_flushID		= armv4_tlb_flushID,
   1428 	.cf_tlb_flushID_SE	= arm10_tlb_flushID_SE,
   1429 	.cf_tlb_flushI		= armv4_tlb_flushI,
   1430 	.cf_tlb_flushI_SE	= arm10_tlb_flushI_SE,
   1431 	.cf_tlb_flushD		= armv4_tlb_flushD,
   1432 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
   1433 
   1434 	/* Cache operations */
   1435 
   1436 	.cf_icache_sync_all	= armv5_ec_icache_sync_all,
   1437 	.cf_icache_sync_range	= armv5_ec_icache_sync_range,
   1438 
   1439 	.cf_dcache_wbinv_all	= armv5_ec_dcache_wbinv_all,
   1440 	.cf_dcache_wbinv_range	= sheeva_dcache_wbinv_range,
   1441 	.cf_dcache_inv_range	= sheeva_dcache_inv_range,
   1442 	.cf_dcache_wb_range	= sheeva_dcache_wb_range,
   1443 
   1444 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
   1445 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
   1446 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
   1447 
   1448 	.cf_idcache_wbinv_all	= armv5_ec_idcache_wbinv_all,
   1449 	.cf_idcache_wbinv_range = sheeva_idcache_wbinv_range,
   1450 
   1451 	/* Other functions */
   1452 
   1453 	.cf_flush_prefetchbuf	= cpufunc_nullop,
   1454 	.cf_drain_writebuf	= armv4_drain_writebuf,
   1455 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
   1456 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
   1457 
   1458 	.cf_sleep		= (void *)sheeva_cpu_sleep,
   1459 
   1460 	/* Soft functions */
   1461 
   1462 	.cf_dataabt_fixup	= cpufunc_null_fixup,
   1463 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
   1464 
   1465 	.cf_context_switch	= arm10_context_switch,
   1466 
   1467 	.cf_setup		= sheeva_setup
   1468 };
   1469 #endif /* CPU_SHEEVA */
   1470 
   1471 
   1472 /*
   1473  * Global constants also used by locore.s
   1474  */
   1475 
   1476 struct cpu_functions cpufuncs;
   1477 u_int cputype;
   1478 
   1479 #if defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined(CPU_ARM9) || \
   1480     defined(CPU_ARM9E) || defined(CPU_ARM10) || defined(CPU_FA526) || \
   1481     defined(CPU_SHEEVA) || \
   1482     defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
   1483     defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425) || \
   1484     defined(CPU_ARMV6) || defined(CPU_ARMV7)
   1485 static void get_cachetype_cp15(void);
   1486 
   1487 /* Additional cache information local to this file.  Log2 of some of the
   1488    above numbers.  */
   1489 static int	arm_dcache_log2_nsets;
   1490 static int	arm_dcache_log2_assoc;
   1491 static int	arm_dcache_log2_linesize;
   1492 
   1493 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
   1494 static inline u_int
   1495 get_cachesize_cp15(int cssr)
   1496 {
   1497 #if defined(CPU_ARMV7)
   1498 	__asm volatile(".arch\tarmv7a");
   1499 
   1500 	armreg_csselr_write(cssr);
   1501 	arm_isb();			 /* sync to the new cssr */
   1502 
   1503 #else
   1504 	__asm volatile("mcr p15, 1, %0, c0, c0, 2" :: "r" (cssr) : "memory");
   1505 #endif
   1506 	return armreg_ccsidr_read();
   1507 }
   1508 #endif
   1509 
   1510 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
   1511 static void
   1512 get_cacheinfo_clidr(struct arm_cache_info *info, u_int level, u_int clidr)
   1513 {
   1514 	u_int csid;
   1515 
   1516 	if (clidr & 6) {
   1517 		csid = get_cachesize_cp15(level << 1); /* select dcache values */
   1518 		info->dcache_sets = CPU_CSID_NUMSETS(csid) + 1;
   1519 		info->dcache_ways = CPU_CSID_ASSOC(csid) + 1;
   1520 		info->dcache_line_size = 1U << (CPU_CSID_LEN(csid) + 4);
   1521 		info->dcache_way_size =
   1522 		    info->dcache_line_size * info->dcache_sets;
   1523 		info->dcache_size = info->dcache_way_size * info->dcache_ways;
   1524 
   1525 		if (level == 0) {
   1526 			arm_dcache_log2_assoc = CPU_CSID_ASSOC(csid) + 1;
   1527 			arm_dcache_log2_linesize = CPU_CSID_LEN(csid) + 4;
   1528 			arm_dcache_log2_nsets =
   1529 			    31 - __builtin_clz(info->dcache_sets*2-1);
   1530 		}
   1531 	}
   1532 
   1533 	info->cache_unified = (clidr == 4);
   1534 
   1535 	if (level > 0) {
   1536 		info->dcache_type = CACHE_TYPE_PIPT;
   1537 		info->icache_type = CACHE_TYPE_PIPT;
   1538 	}
   1539 
   1540 	if (info->cache_unified) {
   1541 		info->icache_ways = info->dcache_ways;
   1542 		info->icache_line_size = info->dcache_line_size;
   1543 		info->icache_way_size = info->dcache_way_size;
   1544 		info->icache_size = info->dcache_size;
   1545 	} else {
   1546 		csid = get_cachesize_cp15((level << 1)|CPU_CSSR_InD); /* select icache values */
   1547 		info->icache_sets = CPU_CSID_NUMSETS(csid) + 1;
   1548 		info->icache_ways = CPU_CSID_ASSOC(csid) + 1;
   1549 		info->icache_line_size = 1U << (CPU_CSID_LEN(csid) + 4);
   1550 		info->icache_way_size = info->icache_line_size * info->icache_sets;
   1551 		info->icache_size = info->icache_way_size * info->icache_ways;
   1552 	}
   1553 	if (level == 0
   1554 	    && info->dcache_way_size <= PAGE_SIZE
   1555 	    && info->icache_way_size <= PAGE_SIZE) {
   1556 		arm_cache_prefer_mask = 0;
   1557 	}
   1558 }
   1559 #endif /* (ARM_MMU_V6 + ARM_MMU_V7) > 0 */
   1560 
   1561 static void
   1562 get_cachetype_cp15(void)
   1563 {
   1564 	u_int ctype, isize, dsize;
   1565 	u_int multiplier;
   1566 
   1567 	ctype = armreg_ctr_read();
   1568 
   1569 	/*
   1570 	 * ...and thus spake the ARM ARM:
   1571 	 *
   1572 	 * If an <opcode2> value corresponding to an unimplemented or
   1573 	 * reserved ID register is encountered, the System Control
   1574 	 * processor returns the value of the main ID register.
   1575 	 */
   1576 	if (ctype == cpu_id())
   1577 		goto out;
   1578 
   1579 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
   1580 	if (CPU_CT_FORMAT(ctype) == 4) {
   1581 		u_int clidr = armreg_clidr_read();
   1582 
   1583 		if (CPU_CT4_L1IPOLICY(ctype) == CPU_CT4_L1_PIPT) {
   1584 			arm_pcache.icache_type = CACHE_TYPE_PIPT;
   1585 		} else {
   1586 			arm_pcache.icache_type = CACHE_TYPE_VIPT;
   1587 			arm_cache_prefer_mask = PAGE_SIZE;
   1588 		}
   1589 #ifdef CPU_CORTEX
   1590 		if (CPU_ID_CORTEX_P(cpu_id())) {
   1591 			arm_pcache.dcache_type = CACHE_TYPE_PIPT;
   1592 		} else
   1593 #endif
   1594 		{
   1595 			arm_pcache.dcache_type = CACHE_TYPE_VIPT;
   1596 		}
   1597 		arm_pcache.cache_type = CPU_CT_CTYPE_WB14;
   1598 
   1599 		get_cacheinfo_clidr(&arm_pcache, 0, clidr & 7);
   1600 		arm_dcache_align = arm_pcache.dcache_line_size;
   1601 		clidr >>= 3;
   1602 		if (clidr & 7) {
   1603 			get_cacheinfo_clidr(&arm_scache, 1, clidr & 7);
   1604 			if (arm_scache.dcache_line_size < arm_dcache_align)
   1605 				arm_dcache_align = arm_scache.dcache_line_size;
   1606 		}
   1607 		/*
   1608 		 * The pmap cleans an entire way for an exec page so
   1609 		 * we don't care that it's VIPT anymore.
   1610 		 */
   1611 		if (arm_pcache.dcache_type == CACHE_TYPE_PIPT) {
   1612 			arm_cache_prefer_mask = 0;
   1613 		}
   1614 		goto out;
   1615 	}
   1616 #endif /* ARM_MMU_V6 + ARM_MMU_V7 > 0 */
   1617 
   1618 	if ((ctype & CPU_CT_S) == 0)
   1619 		arm_pcache.cache_unified = 1;
   1620 
   1621 	/*
   1622 	 * If you want to know how this code works, go read the ARM ARM.
   1623 	 */
   1624 
   1625 	arm_pcache.cache_type = CPU_CT_CTYPE(ctype);
   1626 
   1627 	if (arm_pcache.cache_unified == 0) {
   1628 		isize = CPU_CT_ISIZE(ctype);
   1629 		multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
   1630 		arm_pcache.icache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
   1631 		if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
   1632 			if (isize & CPU_CT_xSIZE_M)
   1633 				arm_pcache.icache_line_size = 0; /* not present */
   1634 			else
   1635 				arm_pcache.icache_ways = 1;
   1636 		} else {
   1637 			arm_pcache.icache_ways = multiplier <<
   1638 			    (CPU_CT_xSIZE_ASSOC(isize) - 1);
   1639 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
   1640 			arm_pcache.icache_type = CACHE_TYPE_VIPT;
   1641 			if (CPU_CT_xSIZE_P & isize)
   1642 				arm_cache_prefer_mask |=
   1643 				    __BIT(9 + CPU_CT_xSIZE_SIZE(isize)
   1644 					  - CPU_CT_xSIZE_ASSOC(isize))
   1645 				    - PAGE_SIZE;
   1646 #endif
   1647 		}
   1648 		arm_pcache.icache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
   1649 		arm_pcache.icache_way_size =
   1650 		    __BIT(9 + CPU_CT_xSIZE_SIZE(isize) - CPU_CT_xSIZE_ASSOC(isize));
   1651 	}
   1652 
   1653 	dsize = CPU_CT_DSIZE(ctype);
   1654 	multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
   1655 	arm_pcache.dcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
   1656 	if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
   1657 		if (dsize & CPU_CT_xSIZE_M)
   1658 			arm_pcache.dcache_line_size = 0; /* not present */
   1659 		else
   1660 			arm_pcache.dcache_ways = 1;
   1661 	} else {
   1662 		arm_pcache.dcache_ways = multiplier <<
   1663 		    (CPU_CT_xSIZE_ASSOC(dsize) - 1);
   1664 #if (ARM_MMU_V6) > 0
   1665 		arm_pcache.dcache_type = CACHE_TYPE_VIPT;
   1666 		if ((CPU_CT_xSIZE_P & dsize)
   1667 		    && CPU_ID_ARM11_P(curcpu()->ci_arm_cpuid)) {
   1668 			arm_cache_prefer_mask |=
   1669 			    __BIT(9 + CPU_CT_xSIZE_SIZE(dsize)
   1670 				  - CPU_CT_xSIZE_ASSOC(dsize)) - PAGE_SIZE;
   1671 		}
   1672 #endif
   1673 	}
   1674 	arm_pcache.dcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
   1675 	arm_pcache.dcache_way_size =
   1676 	    __BIT(9 + CPU_CT_xSIZE_SIZE(dsize) - CPU_CT_xSIZE_ASSOC(dsize));
   1677 
   1678 	arm_dcache_align = arm_pcache.dcache_line_size;
   1679 
   1680 	arm_dcache_log2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
   1681 	arm_dcache_log2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
   1682 	arm_dcache_log2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
   1683 	    CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
   1684 
   1685  out:
   1686 	KASSERTMSG(arm_dcache_align <= CACHE_LINE_SIZE,
   1687 	    "arm_dcache_align=%u CACHE_LINE_SIZE=%u",
   1688 	    arm_dcache_align, CACHE_LINE_SIZE);
   1689 	arm_dcache_align_mask = arm_dcache_align - 1;
   1690 }
   1691 #endif /* ARM7TDMI || ARM8 || ARM9 || XSCALE */
   1692 
   1693 #if defined(CPU_ARM2) || defined(CPU_ARM250) || defined(CPU_ARM3) || \
   1694     defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_SA110) || \
   1695     defined(CPU_SA1100) || defined(CPU_SA1110) || defined(CPU_IXP12X0)
   1696 /* Cache information for CPUs without cache type registers. */
   1697 struct cachetab {
   1698 	uint32_t ct_cpuid;
   1699 	int	ct_pcache_type;
   1700 	int	ct_pcache_unified;
   1701 	int	ct_pdcache_size;
   1702 	int	ct_pdcache_line_size;
   1703 	int	ct_pdcache_ways;
   1704 	int	ct_picache_size;
   1705 	int	ct_picache_line_size;
   1706 	int	ct_picache_ways;
   1707 };
   1708 
   1709 struct cachetab cachetab[] = {
   1710     /* cpuid,           cache type,       u,  dsiz, ls, wy,  isiz, ls, wy */
   1711     { CPU_ID_ARM2,      0,                1,     0,  0,  0,     0,  0,  0 },
   1712     { CPU_ID_ARM250,    0,                1,     0,  0,  0,     0,  0,  0 },
   1713     { CPU_ID_ARM3,      CPU_CT_CTYPE_WT,  1,  4096, 16, 64,     0,  0,  0 },
   1714     { CPU_ID_ARM610,	CPU_CT_CTYPE_WT,  1,  4096, 16, 64,     0,  0,  0 },
   1715     { CPU_ID_ARM710,    CPU_CT_CTYPE_WT,  1,  8192, 32,  4,     0,  0,  0 },
   1716     { CPU_ID_ARM7500,   CPU_CT_CTYPE_WT,  1,  4096, 16,  4,     0,  0,  0 },
   1717     { CPU_ID_ARM710A,   CPU_CT_CTYPE_WT,  1,  8192, 16,  4,     0,  0,  0 },
   1718     { CPU_ID_ARM7500FE, CPU_CT_CTYPE_WT,  1,  4096, 16,  4,     0,  0,  0 },
   1719     /* XXX is this type right for SA-1? */
   1720     { CPU_ID_SA110,	CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 },
   1721     { CPU_ID_SA1100,	CPU_CT_CTYPE_WB1, 0,  8192, 32, 32, 16384, 32, 32 },
   1722     { CPU_ID_SA1110,	CPU_CT_CTYPE_WB1, 0,  8192, 32, 32, 16384, 32, 32 },
   1723     { CPU_ID_IXP1200,	CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 }, /* XXX */
   1724     { 0, 0, 0, 0, 0, 0, 0, 0}
   1725 };
   1726 
   1727 static void get_cachetype_table(void);
   1728 
   1729 static void
   1730 get_cachetype_table(void)
   1731 {
   1732 	int i;
   1733 	uint32_t cpuid = cpu_id();
   1734 
   1735 	for (i = 0; cachetab[i].ct_cpuid != 0; i++) {
   1736 		if (cachetab[i].ct_cpuid == (cpuid & CPU_ID_CPU_MASK)) {
   1737 			arm_pcache.cache_type = cachetab[i].ct_pcache_type;
   1738 			arm_pcache.cache_unified = cachetab[i].ct_pcache_unified;
   1739 			arm_pcache.dcache_size = cachetab[i].ct_pdcache_size;
   1740 			arm_pcache.dcache_line_size =
   1741 			    cachetab[i].ct_pdcache_line_size;
   1742 			arm_pcache.dcache_ways = cachetab[i].ct_pdcache_ways;
   1743 			if (arm_pcache.dcache_ways) {
   1744 				arm_pcache.dcache_way_size =
   1745 				    arm_pcache.dcache_line_size
   1746 				    / arm_pcache.dcache_ways;
   1747 			}
   1748 			arm_pcache.icache_size = cachetab[i].ct_picache_size;
   1749 			arm_pcache.icache_line_size =
   1750 			    cachetab[i].ct_picache_line_size;
   1751 			arm_pcache.icache_ways = cachetab[i].ct_picache_ways;
   1752 			if (arm_pcache.icache_ways) {
   1753 				arm_pcache.icache_way_size =
   1754 				    arm_pcache.icache_line_size
   1755 				    / arm_pcache.icache_ways;
   1756 			}
   1757 		}
   1758 	}
   1759 
   1760 	arm_dcache_align = arm_pcache.dcache_line_size;
   1761 	arm_dcache_align_mask = arm_dcache_align - 1;
   1762 }
   1763 
   1764 #endif /* ARM2 || ARM250 || ARM3 || ARM6 || ARM7 || SA110 || SA1100 || SA1111 || IXP12X0 */
   1765 
   1766 /*
   1767  * Cannot panic here as we may not have a console yet ...
   1768  */
   1769 
   1770 int
   1771 set_cpufuncs(void)
   1772 {
   1773 	if (cputype == 0) {
   1774 		cputype = cpufunc_id();
   1775 		cputype &= CPU_ID_CPU_MASK;
   1776 	}
   1777 
   1778 	/*
   1779 	 * NOTE: cpu_do_powersave defaults to off.  If we encounter a
   1780 	 * CPU type where we want to use it by default, then we set it.
   1781 	 */
   1782 #ifdef CPU_ARM2
   1783 	if (cputype == CPU_ID_ARM2) {
   1784 		cpufuncs = arm2_cpufuncs;
   1785 		get_cachetype_table();
   1786 		return 0;
   1787 	}
   1788 #endif /* CPU_ARM2 */
   1789 #ifdef CPU_ARM250
   1790 	if (cputype == CPU_ID_ARM250) {
   1791 		cpufuncs = arm250_cpufuncs;
   1792 		get_cachetype_table();
   1793 		return 0;
   1794 	}
   1795 #endif
   1796 #ifdef CPU_ARM3
   1797 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
   1798 	    (cputype & 0x00000f00) == 0x00000300) {
   1799 		cpufuncs = arm3_cpufuncs;
   1800 		get_cachetype_table();
   1801 		return 0;
   1802 	}
   1803 #endif	/* CPU_ARM3 */
   1804 #ifdef CPU_ARM6
   1805 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
   1806 	    (cputype & 0x00000f00) == 0x00000600) {
   1807 		cpufuncs = arm6_cpufuncs;
   1808 		get_cachetype_table();
   1809 		pmap_pte_init_generic();
   1810 		return 0;
   1811 	}
   1812 #endif	/* CPU_ARM6 */
   1813 #ifdef CPU_ARM7
   1814 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
   1815 	    CPU_ID_IS7(cputype) &&
   1816 	    (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V3) {
   1817 		cpufuncs = arm7_cpufuncs;
   1818 		get_cachetype_table();
   1819 		pmap_pte_init_generic();
   1820 		return 0;
   1821 	}
   1822 #endif	/* CPU_ARM7 */
   1823 #ifdef CPU_ARM7TDMI
   1824 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
   1825 	    CPU_ID_IS7(cputype) &&
   1826 	    (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V4T) {
   1827 		cpufuncs = arm7tdmi_cpufuncs;
   1828 		get_cachetype_cp15();
   1829 		pmap_pte_init_generic();
   1830 		return 0;
   1831 	}
   1832 #endif
   1833 #ifdef CPU_ARM8
   1834 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
   1835 	    (cputype & 0x0000f000) == 0x00008000) {
   1836 		cpufuncs = arm8_cpufuncs;
   1837 		get_cachetype_cp15();
   1838 		pmap_pte_init_arm8();
   1839 		return 0;
   1840 	}
   1841 #endif	/* CPU_ARM8 */
   1842 #ifdef CPU_ARM9
   1843 	if (((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD ||
   1844 	     (cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_TI) &&
   1845 	    (cputype & 0x0000f000) == 0x00009000) {
   1846 		cpufuncs = arm9_cpufuncs;
   1847 		get_cachetype_cp15();
   1848 		arm9_dcache_sets_inc = 1U << arm_dcache_log2_linesize;
   1849 		arm9_dcache_sets_max =
   1850 		    (1U << (arm_dcache_log2_linesize + arm_dcache_log2_nsets)) -
   1851 		    arm9_dcache_sets_inc;
   1852 		arm9_dcache_index_inc = 1U << (32 - arm_dcache_log2_assoc);
   1853 		arm9_dcache_index_max = 0U - arm9_dcache_index_inc;
   1854 #ifdef	ARM9_CACHE_WRITE_THROUGH
   1855 		pmap_pte_init_arm9();
   1856 #else
   1857 		pmap_pte_init_generic();
   1858 #endif
   1859 		return 0;
   1860 	}
   1861 #endif /* CPU_ARM9 */
   1862 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
   1863 	if (cputype == CPU_ID_ARM926EJS ||
   1864 	    cputype == CPU_ID_ARM1026EJS) {
   1865 		cpufuncs = armv5_ec_cpufuncs;
   1866 		get_cachetype_cp15();
   1867 		pmap_pte_init_generic();
   1868 		return 0;
   1869 	}
   1870 #endif /* CPU_ARM9E || CPU_ARM10 */
   1871 #if defined(CPU_SHEEVA)
   1872 	if (cputype == CPU_ID_MV88SV131 ||
   1873 	    cputype == CPU_ID_MV88FR571_VD) {
   1874 		cpufuncs = sheeva_cpufuncs;
   1875 		get_cachetype_cp15();
   1876 		pmap_pte_init_generic();
   1877 		cpu_do_powersave = 1;			/* Enable powersave */
   1878 		return 0;
   1879 	}
   1880 #endif /* CPU_SHEEVA */
   1881 #ifdef CPU_ARM10
   1882 	if (/* cputype == CPU_ID_ARM1020T || */
   1883 	    cputype == CPU_ID_ARM1020E) {
   1884 		/*
   1885 		 * Select write-through cacheing (this isn't really an
   1886 		 * option on ARM1020T).
   1887 		 */
   1888 		cpufuncs = arm10_cpufuncs;
   1889 		get_cachetype_cp15();
   1890 		armv5_dcache_sets_inc = 1U << arm_dcache_log2_linesize;
   1891 		armv5_dcache_sets_max =
   1892 		    (1U << (arm_dcache_log2_linesize + arm_dcache_log2_nsets)) -
   1893 		    armv5_dcache_sets_inc;
   1894 		armv5_dcache_index_inc = 1U << (32 - arm_dcache_log2_assoc);
   1895 		armv5_dcache_index_max = 0U - armv5_dcache_index_inc;
   1896 		pmap_pte_init_generic();
   1897 		return 0;
   1898 	}
   1899 #endif /* CPU_ARM10 */
   1900 
   1901 
   1902 #if defined(CPU_ARM11MPCORE)
   1903 	if (cputype == CPU_ID_ARM11MPCORE) {
   1904 		cpufuncs = arm11mpcore_cpufuncs;
   1905 #if defined(CPU_ARMV7) || defined(CPU_PRE_ARMV6)
   1906 		cpu_armv6_p = true;
   1907 #endif
   1908 		get_cachetype_cp15();
   1909 		armv5_dcache_sets_inc = 1U << arm_dcache_log2_linesize;
   1910 		armv5_dcache_sets_max = (1U << (arm_dcache_log2_linesize +
   1911 			arm_dcache_log2_nsets)) - armv5_dcache_sets_inc;
   1912 		armv5_dcache_index_inc = 1U << (32 - arm_dcache_log2_assoc);
   1913 		armv5_dcache_index_max = 0U - armv5_dcache_index_inc;
   1914 		cpu_do_powersave = 1;			/* Enable powersave */
   1915 		pmap_pte_init_arm11mpcore();
   1916 		if (arm_cache_prefer_mask)
   1917 			uvmexp.ncolors = (arm_cache_prefer_mask >> PGSHIFT) + 1;
   1918 
   1919 		return 0;
   1920 
   1921 	}
   1922 #endif	/* CPU_ARM11MPCORE */
   1923 
   1924 #if defined(CPU_ARM11)
   1925 	if (cputype == CPU_ID_ARM1136JS ||
   1926 	    cputype == CPU_ID_ARM1136JSR1 ||
   1927 	    cputype == CPU_ID_ARM1176JZS) {
   1928 		cpufuncs = arm11_cpufuncs;
   1929 #if defined(CPU_ARM1136)
   1930 		if (cputype == CPU_ID_ARM1136JS &&
   1931 		    cputype == CPU_ID_ARM1136JSR1) {
   1932 			cpufuncs = arm1136_cpufuncs;
   1933 			if (cputype == CPU_ID_ARM1136JS)
   1934 				cpufuncs.cf_sleep = arm1136_sleep_rev0;
   1935 		}
   1936 #endif
   1937 #if defined(CPU_ARM1176)
   1938 		if (cputype == CPU_ID_ARM1176JZS) {
   1939 			cpufuncs = arm1176_cpufuncs;
   1940 		}
   1941 #endif
   1942 #if defined(CPU_ARMV7) || defined(CPU_PRE_ARMV6)
   1943 		cpu_armv6_p = true;
   1944 #endif
   1945 		cpu_do_powersave = 1;			/* Enable powersave */
   1946 		get_cachetype_cp15();
   1947 #ifdef ARM11_CACHE_WRITE_THROUGH
   1948 		pmap_pte_init_arm11();
   1949 #else
   1950 		pmap_pte_init_generic();
   1951 #endif
   1952 		if (arm_cache_prefer_mask)
   1953 			uvmexp.ncolors = (arm_cache_prefer_mask >> PGSHIFT) + 1;
   1954 
   1955 		/*
   1956 		 * Start and reset the PMC Cycle Counter.
   1957 		 */
   1958 		armreg_pmcrv6_write(ARM11_PMCCTL_E | ARM11_PMCCTL_P | ARM11_PMCCTL_C);
   1959 		return 0;
   1960 	}
   1961 #endif /* CPU_ARM11 */
   1962 #ifdef CPU_SA110
   1963 	if (cputype == CPU_ID_SA110) {
   1964 		cpufuncs = sa110_cpufuncs;
   1965 		get_cachetype_table();
   1966 		pmap_pte_init_sa1();
   1967 		return 0;
   1968 	}
   1969 #endif	/* CPU_SA110 */
   1970 #ifdef CPU_SA1100
   1971 	if (cputype == CPU_ID_SA1100) {
   1972 		cpufuncs = sa11x0_cpufuncs;
   1973 		get_cachetype_table();
   1974 		pmap_pte_init_sa1();
   1975 
   1976 		/* Use powersave on this CPU. */
   1977 		cpu_do_powersave = 1;
   1978 
   1979 		return 0;
   1980 	}
   1981 #endif	/* CPU_SA1100 */
   1982 #ifdef CPU_SA1110
   1983 	if (cputype == CPU_ID_SA1110) {
   1984 		cpufuncs = sa11x0_cpufuncs;
   1985 		get_cachetype_table();
   1986 		pmap_pte_init_sa1();
   1987 
   1988 		/* Use powersave on this CPU. */
   1989 		cpu_do_powersave = 1;
   1990 
   1991 		return 0;
   1992 	}
   1993 #endif	/* CPU_SA1110 */
   1994 #ifdef CPU_FA526
   1995 	if (cputype == CPU_ID_FA526) {
   1996 		cpufuncs = fa526_cpufuncs;
   1997 		get_cachetype_cp15();
   1998 		pmap_pte_init_generic();
   1999 
   2000 		/* Use powersave on this CPU. */
   2001 		cpu_do_powersave = 1;
   2002 
   2003 		return 0;
   2004 	}
   2005 #endif	/* CPU_FA526 */
   2006 #ifdef CPU_IXP12X0
   2007 	if (cputype == CPU_ID_IXP1200) {
   2008 		cpufuncs = ixp12x0_cpufuncs;
   2009 		get_cachetype_table();
   2010 		pmap_pte_init_sa1();
   2011 		return 0;
   2012 	}
   2013 #endif  /* CPU_IXP12X0 */
   2014 #ifdef CPU_XSCALE_80200
   2015 	if (cputype == CPU_ID_80200) {
   2016 		int rev = cpufunc_id() & CPU_ID_REVISION_MASK;
   2017 
   2018 		i80200_icu_init();
   2019 
   2020 		/*
   2021 		 * Reset the Performance Monitoring Unit to a
   2022 		 * pristine state:
   2023 		 *	- CCNT, PMN0, PMN1 reset to 0
   2024 		 *	- overflow indications cleared
   2025 		 *	- all counters disabled
   2026 		 */
   2027 		__asm volatile("mcr p14, 0, %0, c0, c0, 0"
   2028 			:
   2029 			: "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF|
   2030 			       PMNC_CC_IF));
   2031 
   2032 #if defined(XSCALE_CCLKCFG)
   2033 		/*
   2034 		 * Crank CCLKCFG to maximum legal value.
   2035 		 */
   2036 		__asm volatile ("mcr p14, 0, %0, c6, c0, 0"
   2037 			:
   2038 			: "r" (XSCALE_CCLKCFG));
   2039 #endif
   2040 
   2041 		/*
   2042 		 * XXX Disable ECC in the Bus Controller Unit; we
   2043 		 * don't really support it, yet.  Clear any pending
   2044 		 * error indications.
   2045 		 */
   2046 		__asm volatile("mcr p13, 0, %0, c0, c1, 0"
   2047 			:
   2048 			: "r" (BCUCTL_E0|BCUCTL_E1|BCUCTL_EV));
   2049 
   2050 		cpufuncs = xscale_cpufuncs;
   2051 #if defined(PERFCTRS)
   2052 		xscale_pmu_init();
   2053 #endif
   2054 
   2055 		/*
   2056 		 * i80200 errata: Step-A0 and A1 have a bug where
   2057 		 * D$ dirty bits are not cleared on "invalidate by
   2058 		 * address".
   2059 		 *
   2060 		 * Workaround: Clean cache line before invalidating.
   2061 		 */
   2062 		if (rev == 0 || rev == 1)
   2063 			cpufuncs.cf_dcache_inv_range = xscale_cache_purgeD_rng;
   2064 
   2065 		get_cachetype_cp15();
   2066 		pmap_pte_init_xscale();
   2067 		return 0;
   2068 	}
   2069 #endif /* CPU_XSCALE_80200 */
   2070 #ifdef CPU_XSCALE_80321
   2071 	if (cputype == CPU_ID_80321_400 || cputype == CPU_ID_80321_600 ||
   2072 	    cputype == CPU_ID_80321_400_B0 || cputype == CPU_ID_80321_600_B0 ||
   2073 	    cputype == CPU_ID_80219_400 || cputype == CPU_ID_80219_600) {
   2074 		i80321_icu_init();
   2075 
   2076 		/*
   2077 		 * Reset the Performance Monitoring Unit to a
   2078 		 * pristine state:
   2079 		 *	- CCNT, PMN0, PMN1 reset to 0
   2080 		 *	- overflow indications cleared
   2081 		 *	- all counters disabled
   2082 		 */
   2083 		__asm volatile("mcr p14, 0, %0, c0, c0, 0"
   2084 			:
   2085 			: "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF|
   2086 			       PMNC_CC_IF));
   2087 
   2088 		cpufuncs = xscale_cpufuncs;
   2089 #if defined(PERFCTRS)
   2090 		xscale_pmu_init();
   2091 #endif
   2092 
   2093 		get_cachetype_cp15();
   2094 		pmap_pte_init_xscale();
   2095 		return 0;
   2096 	}
   2097 #endif /* CPU_XSCALE_80321 */
   2098 #ifdef __CPU_XSCALE_PXA2XX
   2099 	/* ignore core revision to test PXA2xx CPUs */
   2100 	if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA27X ||
   2101 	    (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 ||
   2102 	    (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) {
   2103 
   2104 		cpufuncs = xscale_cpufuncs;
   2105 #if defined(PERFCTRS)
   2106 		xscale_pmu_init();
   2107 #endif
   2108 
   2109 		get_cachetype_cp15();
   2110 		pmap_pte_init_xscale();
   2111 
   2112 		/* Use powersave on this CPU. */
   2113 		cpu_do_powersave = 1;
   2114 
   2115 		return 0;
   2116 	}
   2117 #endif /* __CPU_XSCALE_PXA2XX */
   2118 #ifdef CPU_XSCALE_IXP425
   2119 	if (cputype == CPU_ID_IXP425_533 || cputype == CPU_ID_IXP425_400 ||
   2120 	    cputype == CPU_ID_IXP425_266) {
   2121 		ixp425_icu_init();
   2122 
   2123 		cpufuncs = xscale_cpufuncs;
   2124 #if defined(PERFCTRS)
   2125 		xscale_pmu_init();
   2126 #endif
   2127 
   2128 		get_cachetype_cp15();
   2129 		pmap_pte_init_xscale();
   2130 
   2131 		return 0;
   2132 	}
   2133 #endif /* CPU_XSCALE_IXP425 */
   2134 #if defined(CPU_CORTEX)
   2135 	if (CPU_ID_CORTEX_P(cputype)) {
   2136 		cpufuncs = armv7_cpufuncs;
   2137 		cpu_do_powersave = 1;			/* Enable powersave */
   2138 #if defined(CPU_ARMV6) || defined(CPU_PRE_ARMV6)
   2139 		cpu_armv7_p = true;
   2140 #endif
   2141 		get_cachetype_cp15();
   2142 		pmap_pte_init_armv7();
   2143 		if (arm_cache_prefer_mask)
   2144 			uvmexp.ncolors = (arm_cache_prefer_mask >> PGSHIFT) + 1;
   2145 		/*
   2146 		 * Start and reset the PMC Cycle Counter.
   2147 		 */
   2148 		armreg_pmcr_write(ARM11_PMCCTL_E | ARM11_PMCCTL_P | ARM11_PMCCTL_C);
   2149 		armreg_pmcntenset_write(CORTEX_CNTENS_C);
   2150 		return 0;
   2151 	}
   2152 #endif /* CPU_CORTEX */
   2153 
   2154 #if defined(CPU_PJ4B)
   2155 	if ((cputype == CPU_ID_MV88SV581X_V6 ||
   2156 	    cputype == CPU_ID_MV88SV581X_V7 ||
   2157 	    cputype == CPU_ID_MV88SV584X_V7 ||
   2158 	    cputype == CPU_ID_ARM_88SV581X_V6 ||
   2159 	    cputype == CPU_ID_ARM_88SV581X_V7) &&
   2160 	    (armreg_pfr0_read() & ARM_PFR0_THUMBEE_MASK)) {
   2161 			cpufuncs = pj4bv7_cpufuncs;
   2162 #if defined(CPU_ARMV6) || defined(CPU_PRE_ARMV6)
   2163 			cpu_armv7_p = true;
   2164 #endif
   2165 			get_cachetype_cp15();
   2166 			pmap_pte_init_armv7();
   2167 			return 0;
   2168 	}
   2169 #endif /* CPU_PJ4B */
   2170 
   2171 	/*
   2172 	 * Bzzzz. And the answer was ...
   2173 	 */
   2174 	panic("No support for this CPU type (%08x) in kernel", cputype);
   2175 	return(ARCHITECTURE_NOT_PRESENT);
   2176 }
   2177 
   2178 #ifdef CPU_ARM2
   2179 u_int arm2_id(void)
   2180 {
   2181 
   2182 	return CPU_ID_ARM2;
   2183 }
   2184 #endif /* CPU_ARM2 */
   2185 
   2186 #ifdef CPU_ARM250
   2187 u_int arm250_id(void)
   2188 {
   2189 
   2190 	return CPU_ID_ARM250;
   2191 }
   2192 #endif /* CPU_ARM250 */
   2193 
   2194 /*
   2195  * Fixup routines for data and prefetch aborts.
   2196  *
   2197  * Several compile time symbols are used
   2198  *
   2199  * DEBUG_FAULT_CORRECTION - Print debugging information during the
   2200  * correction of registers after a fault.
   2201  * ARM6_LATE_ABORT - ARM6 supports both early and late aborts
   2202  * when defined should use late aborts
   2203  */
   2204 
   2205 
   2206 /*
   2207  * Null abort fixup routine.
   2208  * For use when no fixup is required.
   2209  */
   2210 int
   2211 cpufunc_null_fixup(void *arg)
   2212 {
   2213 	return(ABORT_FIXUP_OK);
   2214 }
   2215 
   2216 
   2217 #if defined(CPU_ARM2) || defined(CPU_ARM250) || defined(CPU_ARM3) || \
   2218     defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI)
   2219 
   2220 #ifdef DEBUG_FAULT_CORRECTION
   2221 #define DFC_PRINTF(x)		printf x
   2222 #define DFC_DISASSEMBLE(x)	disassemble(x)
   2223 #else
   2224 #define DFC_PRINTF(x)		/* nothing */
   2225 #define DFC_DISASSEMBLE(x)	/* nothing */
   2226 #endif
   2227 
   2228 /*
   2229  * "Early" data abort fixup.
   2230  *
   2231  * For ARM2, ARM2as, ARM3 and ARM6 (in early-abort mode).  Also used
   2232  * indirectly by ARM6 (in late-abort mode) and ARM7[TDMI].
   2233  *
   2234  * In early aborts, we may have to fix up LDM, STM, LDC and STC.
   2235  */
   2236 int
   2237 early_abort_fixup(void *arg)
   2238 {
   2239 	trapframe_t *frame = arg;
   2240 	u_int fault_pc;
   2241 	u_int fault_instruction;
   2242 	int saved_lr = 0;
   2243 
   2244 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
   2245 
   2246 		/* Ok an abort in SVC mode */
   2247 
   2248 		/*
   2249 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
   2250 		 * as the fault happened in svc mode but we need it in the
   2251 		 * usr slot so we can treat the registers as an array of ints
   2252 		 * during fixing.
   2253 		 * NOTE: This PC is in the position but writeback is not
   2254 		 * allowed on r15.
   2255 		 * Doing it like this is more efficient than trapping this
   2256 		 * case in all possible locations in the following fixup code.
   2257 		 */
   2258 
   2259 		saved_lr = frame->tf_usr_lr;
   2260 		frame->tf_usr_lr = frame->tf_svc_lr;
   2261 
   2262 		/*
   2263 		 * Note the trapframe does not have the SVC r13 so a fault
   2264 		 * from an instruction with writeback to r13 in SVC mode is
   2265 		 * not allowed. This should not happen as the kstack is
   2266 		 * always valid.
   2267 		 */
   2268 	}
   2269 
   2270 	/* Get fault address and status from the CPU */
   2271 
   2272 	fault_pc = frame->tf_pc;
   2273 	fault_instruction = *((volatile unsigned int *)fault_pc);
   2274 
   2275 	/* Decode the fault instruction and fix the registers as needed */
   2276 
   2277 	if ((fault_instruction & 0x0e000000) == 0x08000000) {
   2278 		int base;
   2279 		int loop;
   2280 		int count;
   2281 		int *registers = &frame->tf_r0;
   2282 
   2283 		DFC_PRINTF(("LDM/STM\n"));
   2284 		DFC_DISASSEMBLE(fault_pc);
   2285 		if (fault_instruction & (1 << 21)) {
   2286 			DFC_PRINTF(("This instruction must be corrected\n"));
   2287 			base = (fault_instruction >> 16) & 0x0f;
   2288 			if (base == 15)
   2289 				return ABORT_FIXUP_FAILED;
   2290 			/* Count registers transferred */
   2291 			count = 0;
   2292 			for (loop = 0; loop < 16; ++loop) {
   2293 				if (fault_instruction & (1<<loop))
   2294 					++count;
   2295 			}
   2296 			DFC_PRINTF(("%d registers used\n", count));
   2297 			DFC_PRINTF(("Corrected r%d by %d bytes ",
   2298 				       base, count * 4));
   2299 			if (fault_instruction & (1 << 23)) {
   2300 				DFC_PRINTF(("down\n"));
   2301 				registers[base] -= count * 4;
   2302 			} else {
   2303 				DFC_PRINTF(("up\n"));
   2304 				registers[base] += count * 4;
   2305 			}
   2306 		}
   2307 	} else if ((fault_instruction & 0x0e000000) == 0x0c000000) {
   2308 		int base;
   2309 		int offset;
   2310 		int *registers = &frame->tf_r0;
   2311 
   2312 		/* REGISTER CORRECTION IS REQUIRED FOR THESE INSTRUCTIONS */
   2313 
   2314 		DFC_DISASSEMBLE(fault_pc);
   2315 
   2316 		/* Only need to fix registers if write back is turned on */
   2317 
   2318 		if ((fault_instruction & (1 << 21)) != 0) {
   2319 			base = (fault_instruction >> 16) & 0x0f;
   2320 			if (base == 13 &&
   2321 			    (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
   2322 				return ABORT_FIXUP_FAILED;
   2323 			if (base == 15)
   2324 				return ABORT_FIXUP_FAILED;
   2325 
   2326 			offset = (fault_instruction & 0xff) << 2;
   2327 			DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
   2328 			if ((fault_instruction & (1 << 23)) != 0)
   2329 				offset = -offset;
   2330 			registers[base] += offset;
   2331 			DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
   2332 		}
   2333 	} else if ((fault_instruction & 0x0e000000) == 0x0c000000)
   2334 		return ABORT_FIXUP_FAILED;
   2335 
   2336 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
   2337 
   2338 		/* Ok an abort in SVC mode */
   2339 
   2340 		/*
   2341 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
   2342 		 * as the fault happened in svc mode but we need it in the
   2343 		 * usr slot so we can treat the registers as an array of ints
   2344 		 * during fixing.
   2345 		 * NOTE: This PC is in the position but writeback is not
   2346 		 * allowed on r15.
   2347 		 * Doing it like this is more efficient than trapping this
   2348 		 * case in all possible locations in the prior fixup code.
   2349 		 */
   2350 
   2351 		frame->tf_svc_lr = frame->tf_usr_lr;
   2352 		frame->tf_usr_lr = saved_lr;
   2353 
   2354 		/*
   2355 		 * Note the trapframe does not have the SVC r13 so a fault
   2356 		 * from an instruction with writeback to r13 in SVC mode is
   2357 		 * not allowed. This should not happen as the kstack is
   2358 		 * always valid.
   2359 		 */
   2360 	}
   2361 
   2362 	return(ABORT_FIXUP_OK);
   2363 }
   2364 #endif	/* CPU_ARM2/250/3/6/7 */
   2365 
   2366 
   2367 #if (defined(CPU_ARM6) && defined(ARM6_LATE_ABORT)) || defined(CPU_ARM7) || \
   2368 	defined(CPU_ARM7TDMI)
   2369 /*
   2370  * "Late" (base updated) data abort fixup
   2371  *
   2372  * For ARM6 (in late-abort mode) and ARM7.
   2373  *
   2374  * In this model, all data-transfer instructions need fixing up.  We defer
   2375  * LDM, STM, LDC and STC fixup to the early-abort handler.
   2376  */
   2377 int
   2378 late_abort_fixup(void *arg)
   2379 {
   2380 	trapframe_t *frame = arg;
   2381 	u_int fault_pc;
   2382 	u_int fault_instruction;
   2383 	int saved_lr = 0;
   2384 
   2385 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
   2386 
   2387 		/* Ok an abort in SVC mode */
   2388 
   2389 		/*
   2390 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
   2391 		 * as the fault happened in svc mode but we need it in the
   2392 		 * usr slot so we can treat the registers as an array of ints
   2393 		 * during fixing.
   2394 		 * NOTE: This PC is in the position but writeback is not
   2395 		 * allowed on r15.
   2396 		 * Doing it like this is more efficient than trapping this
   2397 		 * case in all possible locations in the following fixup code.
   2398 		 */
   2399 
   2400 		saved_lr = frame->tf_usr_lr;
   2401 		frame->tf_usr_lr = frame->tf_svc_lr;
   2402 
   2403 		/*
   2404 		 * Note the trapframe does not have the SVC r13 so a fault
   2405 		 * from an instruction with writeback to r13 in SVC mode is
   2406 		 * not allowed. This should not happen as the kstack is
   2407 		 * always valid.
   2408 		 */
   2409 	}
   2410 
   2411 	/* Get fault address and status from the CPU */
   2412 
   2413 	fault_pc = frame->tf_pc;
   2414 	fault_instruction = *((volatile unsigned int *)fault_pc);
   2415 
   2416 	/* Decode the fault instruction and fix the registers as needed */
   2417 
   2418 	/* Was is a swap instruction ? */
   2419 
   2420 	if ((fault_instruction & 0x0fb00ff0) == 0x01000090) {
   2421 		DFC_DISASSEMBLE(fault_pc);
   2422 	} else if ((fault_instruction & 0x0c000000) == 0x04000000) {
   2423 
   2424 		/* Was is a ldr/str instruction */
   2425 		/* This is for late abort only */
   2426 
   2427 		int base;
   2428 		int offset;
   2429 		int *registers = &frame->tf_r0;
   2430 
   2431 		DFC_DISASSEMBLE(fault_pc);
   2432 
   2433 		/* This is for late abort only */
   2434 
   2435 		if ((fault_instruction & (1 << 24)) == 0
   2436 		    || (fault_instruction & (1 << 21)) != 0) {
   2437 			/* postindexed ldr/str with no writeback */
   2438 
   2439 			base = (fault_instruction >> 16) & 0x0f;
   2440 			if (base == 13 &&
   2441 			    (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
   2442 				return ABORT_FIXUP_FAILED;
   2443 			if (base == 15)
   2444 				return ABORT_FIXUP_FAILED;
   2445 			DFC_PRINTF(("late abt fix: r%d=%08x : ",
   2446 				       base, registers[base]));
   2447 			if ((fault_instruction & (1 << 25)) == 0) {
   2448 				/* Immediate offset - easy */
   2449 
   2450 				offset = fault_instruction & 0xfff;
   2451 				if ((fault_instruction & (1 << 23)))
   2452 					offset = -offset;
   2453 				registers[base] += offset;
   2454 				DFC_PRINTF(("imm=%08x ", offset));
   2455 			} else {
   2456 				/* offset is a shifted register */
   2457 				int shift;
   2458 
   2459 				offset = fault_instruction & 0x0f;
   2460 				if (offset == base)
   2461 					return ABORT_FIXUP_FAILED;
   2462 
   2463 				/*
   2464 				 * Register offset - hard we have to
   2465 				 * cope with shifts !
   2466 				 */
   2467 				offset = registers[offset];
   2468 
   2469 				if ((fault_instruction & (1 << 4)) == 0)
   2470 					/* shift with amount */
   2471 					shift = (fault_instruction >> 7) & 0x1f;
   2472 				else {
   2473 					/* shift with register */
   2474 					if ((fault_instruction & (1 << 7)) != 0)
   2475 						/* undefined for now so bail out */
   2476 						return ABORT_FIXUP_FAILED;
   2477 					shift = ((fault_instruction >> 8) & 0xf);
   2478 					if (base == shift)
   2479 						return ABORT_FIXUP_FAILED;
   2480 					DFC_PRINTF(("shift reg=%d ", shift));
   2481 					shift = registers[shift];
   2482 				}
   2483 				DFC_PRINTF(("shift=%08x ", shift));
   2484 				switch (((fault_instruction >> 5) & 0x3)) {
   2485 				case 0 : /* Logical left */
   2486 					offset = (int)(((u_int)offset) << shift);
   2487 					break;
   2488 				case 1 : /* Logical Right */
   2489 					if (shift == 0) shift = 32;
   2490 					offset = (int)(((u_int)offset) >> shift);
   2491 					break;
   2492 				case 2 : /* Arithmetic Right */
   2493 					if (shift == 0) shift = 32;
   2494 					offset = (int)(((int)offset) >> shift);
   2495 					break;
   2496 				case 3 : /* Rotate right (rol or rxx) */
   2497 					return ABORT_FIXUP_FAILED;
   2498 					break;
   2499 				}
   2500 
   2501 				DFC_PRINTF(("abt: fixed LDR/STR with "
   2502 					       "register offset\n"));
   2503 				if ((fault_instruction & (1 << 23)))
   2504 					offset = -offset;
   2505 				DFC_PRINTF(("offset=%08x ", offset));
   2506 				registers[base] += offset;
   2507 			}
   2508 			DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
   2509 		}
   2510 	}
   2511 
   2512 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
   2513 
   2514 		/* Ok an abort in SVC mode */
   2515 
   2516 		/*
   2517 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
   2518 		 * as the fault happened in svc mode but we need it in the
   2519 		 * usr slot so we can treat the registers as an array of ints
   2520 		 * during fixing.
   2521 		 * NOTE: This PC is in the position but writeback is not
   2522 		 * allowed on r15.
   2523 		 * Doing it like this is more efficient than trapping this
   2524 		 * case in all possible locations in the prior fixup code.
   2525 		 */
   2526 
   2527 		frame->tf_svc_lr = frame->tf_usr_lr;
   2528 		frame->tf_usr_lr = saved_lr;
   2529 
   2530 		/*
   2531 		 * Note the trapframe does not have the SVC r13 so a fault
   2532 		 * from an instruction with writeback to r13 in SVC mode is
   2533 		 * not allowed. This should not happen as the kstack is
   2534 		 * always valid.
   2535 		 */
   2536 	}
   2537 
   2538 	/*
   2539 	 * Now let the early-abort fixup routine have a go, in case it
   2540 	 * was an LDM, STM, LDC or STC that faulted.
   2541 	 */
   2542 
   2543 	return early_abort_fixup(arg);
   2544 }
   2545 #endif	/* CPU_ARM6(LATE)/7/7TDMI */
   2546 
   2547 /*
   2548  * CPU Setup code
   2549  */
   2550 
   2551 #if defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) || \
   2552 	defined(CPU_ARM8) || defined (CPU_ARM9) || defined (CPU_ARM9E) || \
   2553 	defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) || \
   2554 	defined(CPU_FA526) || \
   2555 	defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
   2556 	defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425) || \
   2557 	defined(CPU_ARM10) || defined(CPU_SHEEVA) || \
   2558 	defined(CPU_ARMV6) || defined(CPU_ARMV7)
   2559 
   2560 #define IGN	0
   2561 #define OR	1
   2562 #define BIC	2
   2563 
   2564 struct cpu_option {
   2565 	const char *co_name;
   2566 	int	co_falseop;
   2567 	int	co_trueop;
   2568 	int	co_value;
   2569 };
   2570 
   2571 static u_int parse_cpu_options(char *, struct cpu_option *, u_int);
   2572 
   2573 static u_int
   2574 parse_cpu_options(char *args, struct cpu_option *optlist, u_int cpuctrl)
   2575 {
   2576 	int integer;
   2577 
   2578 	if (args == NULL)
   2579 		return(cpuctrl);
   2580 
   2581 	while (optlist->co_name) {
   2582 		if (get_bootconf_option(args, optlist->co_name,
   2583 		    BOOTOPT_TYPE_BOOLEAN, &integer)) {
   2584 			if (integer) {
   2585 				if (optlist->co_trueop == OR)
   2586 					cpuctrl |= optlist->co_value;
   2587 				else if (optlist->co_trueop == BIC)
   2588 					cpuctrl &= ~optlist->co_value;
   2589 			} else {
   2590 				if (optlist->co_falseop == OR)
   2591 					cpuctrl |= optlist->co_value;
   2592 				else if (optlist->co_falseop == BIC)
   2593 					cpuctrl &= ~optlist->co_value;
   2594 			}
   2595 		}
   2596 		++optlist;
   2597 	}
   2598 	return(cpuctrl);
   2599 }
   2600 #endif /* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 || CPU_SA110 */
   2601 
   2602 #if defined (CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) \
   2603 	|| defined(CPU_ARM8)
   2604 struct cpu_option arm678_options[] = {
   2605 #ifdef COMPAT_12
   2606 	{ "nocache",		IGN, BIC, CPU_CONTROL_IDC_ENABLE },
   2607 	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
   2608 #endif	/* COMPAT_12 */
   2609 	{ "cpu.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
   2610 	{ "cpu.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
   2611 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
   2612 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
   2613 	{ NULL,			IGN, IGN, 0 }
   2614 };
   2615 
   2616 #endif	/* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 */
   2617 
   2618 #ifdef CPU_ARM6
   2619 struct cpu_option arm6_options[] = {
   2620 	{ "arm6.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
   2621 	{ "arm6.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
   2622 	{ "arm6.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
   2623 	{ "arm6.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
   2624 	{ NULL,			IGN, IGN, 0 }
   2625 };
   2626 
   2627 void
   2628 arm6_setup(char *args)
   2629 {
   2630 
   2631 	/* Set up default control registers bits */
   2632 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
   2633 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
   2634 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
   2635 #if 0
   2636 	int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
   2637 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
   2638 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
   2639 		 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BEND_ENABLE
   2640 		 | CPU_CONTROL_AFLT_ENABLE;
   2641 #endif
   2642 
   2643 #ifdef ARM6_LATE_ABORT
   2644 	cpuctrl |= CPU_CONTROL_LABT_ENABLE;
   2645 #endif	/* ARM6_LATE_ABORT */
   2646 
   2647 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
   2648 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
   2649 #endif
   2650 
   2651 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
   2652 	cpuctrl = parse_cpu_options(args, arm6_options, cpuctrl);
   2653 
   2654 #ifdef __ARMEB__
   2655 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
   2656 #endif
   2657 
   2658 	/* Clear out the cache */
   2659 	cpu_idcache_wbinv_all();
   2660 
   2661 	/* Set the control register */
   2662 	curcpu()->ci_ctrl = cpuctrl;
   2663 	cpu_control(0xffffffff, cpuctrl);
   2664 }
   2665 #endif	/* CPU_ARM6 */
   2666 
   2667 #ifdef CPU_ARM7
   2668 struct cpu_option arm7_options[] = {
   2669 	{ "arm7.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
   2670 	{ "arm7.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
   2671 	{ "arm7.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
   2672 	{ "arm7.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
   2673 #ifdef COMPAT_12
   2674 	{ "fpaclk2",		BIC, OR,  CPU_CONTROL_CPCLK },
   2675 #endif	/* COMPAT_12 */
   2676 	{ "arm700.fpaclk",	BIC, OR,  CPU_CONTROL_CPCLK },
   2677 	{ NULL,			IGN, IGN, 0 }
   2678 };
   2679 
   2680 void
   2681 arm7_setup(char *args)
   2682 {
   2683 
   2684 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
   2685 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
   2686 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
   2687 #if 0
   2688 	int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
   2689 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
   2690 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
   2691 		 | CPU_CONTROL_CPCLK | CPU_CONTROL_LABT_ENABLE
   2692 		 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BEND_ENABLE
   2693 		 | CPU_CONTROL_AFLT_ENABLE;
   2694 #endif
   2695 
   2696 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
   2697 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
   2698 #endif
   2699 
   2700 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
   2701 	cpuctrl = parse_cpu_options(args, arm7_options, cpuctrl);
   2702 
   2703 #ifdef __ARMEB__
   2704 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
   2705 #endif
   2706 
   2707 	/* Clear out the cache */
   2708 	cpu_idcache_wbinv_all();
   2709 
   2710 	/* Set the control register */
   2711 	curcpu()->ci_ctrl = cpuctrl;
   2712 	cpu_control(0xffffffff, cpuctrl);
   2713 }
   2714 #endif	/* CPU_ARM7 */
   2715 
   2716 #ifdef CPU_ARM7TDMI
   2717 struct cpu_option arm7tdmi_options[] = {
   2718 	{ "arm7.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
   2719 	{ "arm7.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
   2720 	{ "arm7.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
   2721 	{ "arm7.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
   2722 #ifdef COMPAT_12
   2723 	{ "fpaclk2",		BIC, OR,  CPU_CONTROL_CPCLK },
   2724 #endif	/* COMPAT_12 */
   2725 	{ "arm700.fpaclk",	BIC, OR,  CPU_CONTROL_CPCLK },
   2726 	{ NULL,			IGN, IGN, 0 }
   2727 };
   2728 
   2729 void
   2730 arm7tdmi_setup(char *args)
   2731 {
   2732 	int cpuctrl;
   2733 
   2734 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
   2735 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
   2736 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
   2737 
   2738 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
   2739 	cpuctrl = parse_cpu_options(args, arm7tdmi_options, cpuctrl);
   2740 
   2741 #ifdef __ARMEB__
   2742 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
   2743 #endif
   2744 
   2745 	/* Clear out the cache */
   2746 	cpu_idcache_wbinv_all();
   2747 
   2748 	/* Set the control register */
   2749 	curcpu()->ci_ctrl = cpuctrl;
   2750 	cpu_control(0xffffffff, cpuctrl);
   2751 }
   2752 #endif	/* CPU_ARM7TDMI */
   2753 
   2754 #ifdef CPU_ARM8
   2755 struct cpu_option arm8_options[] = {
   2756 	{ "arm8.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
   2757 	{ "arm8.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
   2758 	{ "arm8.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
   2759 	{ "arm8.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
   2760 #ifdef COMPAT_12
   2761 	{ "branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
   2762 #endif	/* COMPAT_12 */
   2763 	{ "cpu.branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
   2764 	{ "arm8.branchpredict",	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
   2765 	{ NULL,			IGN, IGN, 0 }
   2766 };
   2767 
   2768 void
   2769 arm8_setup(char *args)
   2770 {
   2771 	int integer;
   2772 	int clocktest;
   2773 	int setclock = 0;
   2774 
   2775 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
   2776 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
   2777 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
   2778 #if 0
   2779 	int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
   2780 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
   2781 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
   2782 		 | CPU_CONTROL_BPRD_ENABLE | CPU_CONTROL_ROM_ENABLE
   2783 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE;
   2784 #endif
   2785 
   2786 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
   2787 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
   2788 #endif
   2789 
   2790 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
   2791 	cpuctrl = parse_cpu_options(args, arm8_options, cpuctrl);
   2792 
   2793 #ifdef __ARMEB__
   2794 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
   2795 #endif
   2796 
   2797 	/* Get clock configuration */
   2798 	clocktest = arm8_clock_config(0, 0) & 0x0f;
   2799 
   2800 	/* Special ARM8 clock and test configuration */
   2801 	if (get_bootconf_option(args, "arm8.clock.reset", BOOTOPT_TYPE_BOOLEAN, &integer)) {
   2802 		clocktest = 0;
   2803 		setclock = 1;
   2804 	}
   2805 	if (get_bootconf_option(args, "arm8.clock.dynamic", BOOTOPT_TYPE_BOOLEAN, &integer)) {
   2806 		if (integer)
   2807 			clocktest |= 0x01;
   2808 		else
   2809 			clocktest &= ~(0x01);
   2810 		setclock = 1;
   2811 	}
   2812 	if (get_bootconf_option(args, "arm8.clock.sync", BOOTOPT_TYPE_BOOLEAN, &integer)) {
   2813 		if (integer)
   2814 			clocktest |= 0x02;
   2815 		else
   2816 			clocktest &= ~(0x02);
   2817 		setclock = 1;
   2818 	}
   2819 	if (get_bootconf_option(args, "arm8.clock.fast", BOOTOPT_TYPE_BININT, &integer)) {
   2820 		clocktest = (clocktest & ~0xc0) | (integer & 3) << 2;
   2821 		setclock = 1;
   2822 	}
   2823 	if (get_bootconf_option(args, "arm8.test", BOOTOPT_TYPE_BININT, &integer)) {
   2824 		clocktest |= (integer & 7) << 5;
   2825 		setclock = 1;
   2826 	}
   2827 
   2828 	/* Clear out the cache */
   2829 	cpu_idcache_wbinv_all();
   2830 
   2831 	/* Set the control register */
   2832 	curcpu()->ci_ctrl = cpuctrl;
   2833 	cpu_control(0xffffffff, cpuctrl);
   2834 
   2835 	/* Set the clock/test register */
   2836 	if (setclock)
   2837 		arm8_clock_config(0x7f, clocktest);
   2838 }
   2839 #endif	/* CPU_ARM8 */
   2840 
   2841 #ifdef CPU_ARM9
   2842 struct cpu_option arm9_options[] = {
   2843 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
   2844 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
   2845 	{ "arm9.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
   2846 	{ "arm9.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
   2847 	{ "arm9.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
   2848 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
   2849 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
   2850 	{ "arm9.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
   2851 	{ NULL,			IGN, IGN, 0 }
   2852 };
   2853 
   2854 void
   2855 arm9_setup(char *args)
   2856 {
   2857 
   2858 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
   2859 	    | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
   2860 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
   2861 	    | CPU_CONTROL_WBUF_ENABLE;
   2862 	int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
   2863 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
   2864 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
   2865 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
   2866 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
   2867 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC
   2868 		 | CPU_CONTROL_ROUNDROBIN;
   2869 
   2870 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
   2871 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
   2872 #endif
   2873 
   2874 	cpuctrl = parse_cpu_options(args, arm9_options, cpuctrl);
   2875 
   2876 #ifdef __ARMEB__
   2877 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
   2878 #endif
   2879 
   2880 #ifndef ARM_HAS_VBAR
   2881 	if (vector_page == ARM_VECTORS_HIGH)
   2882 		cpuctrl |= CPU_CONTROL_VECRELOC;
   2883 #endif
   2884 
   2885 	/* Clear out the cache */
   2886 	cpu_idcache_wbinv_all();
   2887 
   2888 	/* Set the control register */
   2889 	curcpu()->ci_ctrl = cpuctrl;
   2890 	cpu_control(cpuctrlmask, cpuctrl);
   2891 
   2892 }
   2893 #endif	/* CPU_ARM9 */
   2894 
   2895 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
   2896 struct cpu_option arm10_options[] = {
   2897 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
   2898 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
   2899 	{ "arm10.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
   2900 	{ "arm10.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
   2901 	{ "arm10.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
   2902 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
   2903 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
   2904 	{ "arm10.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
   2905 	{ NULL,			IGN, IGN, 0 }
   2906 };
   2907 
   2908 void
   2909 arm10_setup(char *args)
   2910 {
   2911 
   2912 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
   2913 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
   2914 	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
   2915 #if 0
   2916 	int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
   2917 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
   2918 	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
   2919 	    | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
   2920 	    | CPU_CONTROL_BPRD_ENABLE
   2921 	    | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
   2922 #endif
   2923 
   2924 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
   2925 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
   2926 #endif
   2927 
   2928 	cpuctrl = parse_cpu_options(args, arm10_options, cpuctrl);
   2929 
   2930 #ifdef __ARMEB__
   2931 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
   2932 #endif
   2933 
   2934 #ifndef ARM_HAS_VBAR
   2935 	if (vector_page == ARM_VECTORS_HIGH)
   2936 		cpuctrl |= CPU_CONTROL_VECRELOC;
   2937 #endif
   2938 
   2939 	/* Clear out the cache */
   2940 	cpu_idcache_wbinv_all();
   2941 
   2942 	/* Now really make sure they are clean.  */
   2943 	__asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
   2944 
   2945 	/* Set the control register */
   2946 	curcpu()->ci_ctrl = cpuctrl;
   2947 	cpu_control(0xffffffff, cpuctrl);
   2948 
   2949 	/* And again. */
   2950 	cpu_idcache_wbinv_all();
   2951 }
   2952 #endif	/* CPU_ARM9E || CPU_ARM10 */
   2953 
   2954 #if defined(CPU_ARM11)
   2955 struct cpu_option arm11_options[] = {
   2956 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
   2957 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
   2958 	{ "arm11.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
   2959 	{ "arm11.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
   2960 	{ "arm11.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
   2961 	{ "cpu.branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
   2962 	{ "arm11.branchpredict", BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
   2963 	{ NULL,			IGN, IGN, 0 }
   2964 };
   2965 
   2966 void
   2967 arm11_setup(char *args)
   2968 {
   2969 
   2970 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
   2971 #ifdef ARM_MMU_EXTENDED
   2972 	    | CPU_CONTROL_XP_ENABLE
   2973 #endif
   2974 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
   2975 	    /* | CPU_CONTROL_BPRD_ENABLE */;
   2976 	int cpuctrlmask = cpuctrl
   2977 	    | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BPRD_ENABLE
   2978 	    | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
   2979 	    | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
   2980 
   2981 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
   2982 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
   2983 #endif
   2984 
   2985 	cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl);
   2986 
   2987 #ifdef __ARMEB__
   2988 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
   2989 #endif
   2990 
   2991 #ifndef ARM_HAS_VBAR
   2992 	if (vector_page == ARM_VECTORS_HIGH)
   2993 		cpuctrl |= CPU_CONTROL_VECRELOC;
   2994 #endif
   2995 
   2996 	/* Clear out the cache */
   2997 	cpu_idcache_wbinv_all();
   2998 
   2999 	/* Now really make sure they are clean.  */
   3000 	__asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
   3001 
   3002 	/* Allow detection code to find the VFP if it's fitted.  */
   3003 	armreg_cpacr_write(0x0fffffff);
   3004 
   3005 	/* Set the control register */
   3006 	curcpu()->ci_ctrl = cpuctrl;
   3007 	cpu_control(cpuctrlmask, cpuctrl);
   3008 
   3009 	/* And again. */
   3010 	cpu_idcache_wbinv_all();
   3011 }
   3012 #endif	/* CPU_ARM11 */
   3013 
   3014 #if defined(CPU_ARM11MPCORE)
   3015 
   3016 void
   3017 arm11mpcore_setup(char *args)
   3018 {
   3019 
   3020 	int cpuctrl = CPU_CONTROL_IC_ENABLE
   3021 	    | CPU_CONTROL_DC_ENABLE
   3022 #ifdef ARM_MMU_EXTENDED
   3023 	    | CPU_CONTROL_XP_ENABLE
   3024 #endif
   3025 	    | CPU_CONTROL_BPRD_ENABLE ;
   3026 	int cpuctrlmask = cpuctrl
   3027 	    | CPU_CONTROL_AFLT_ENABLE
   3028 	    | CPU_CONTROL_VECRELOC;
   3029 
   3030 #ifdef	ARM11MPCORE_MMU_COMPAT
   3031 	/* XXX: S and R? */
   3032 #endif
   3033 
   3034 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
   3035 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
   3036 #endif
   3037 
   3038 	cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl);
   3039 
   3040 #ifndef ARM_HAS_VBAR
   3041 	if (vector_page == ARM_VECTORS_HIGH)
   3042 		cpuctrl |= CPU_CONTROL_VECRELOC;
   3043 #endif
   3044 
   3045 	/* Clear out the cache */
   3046 	cpu_idcache_wbinv_all();
   3047 
   3048 	/* Now really make sure they are clean.  */
   3049 	__asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
   3050 
   3051 	/* Allow detection code to find the VFP if it's fitted.  */
   3052 	armreg_cpacr_write(0x0fffffff);
   3053 
   3054 	/* Set the control register */
   3055 	curcpu()->ci_ctrl = cpu_control(cpuctrlmask, cpuctrl);
   3056 
   3057 	/* And again. */
   3058 	cpu_idcache_wbinv_all();
   3059 }
   3060 #endif	/* CPU_ARM11MPCORE */
   3061 
   3062 #ifdef CPU_PJ4B
   3063 void
   3064 pj4bv7_setup(char *args)
   3065 {
   3066 	int cpuctrl;
   3067 
   3068 	pj4b_config();
   3069 
   3070 	cpuctrl = CPU_CONTROL_MMU_ENABLE;
   3071 #ifdef ARM32_DISABLE_ALIGNMENT_FAULTS
   3072 	cpuctrl |= CPU_CONTROL_UNAL_ENABLE;
   3073 #else
   3074 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
   3075 #endif
   3076 	cpuctrl |= CPU_CONTROL_DC_ENABLE;
   3077 	cpuctrl |= CPU_CONTROL_IC_ENABLE;
   3078 	cpuctrl |= (0xf << 3);
   3079 	cpuctrl |= CPU_CONTROL_BPRD_ENABLE;
   3080 	cpuctrl |= (0x5 << 16) | (1 < 22);
   3081 	cpuctrl |= CPU_CONTROL_XP_ENABLE;
   3082 
   3083 #ifndef ARM_HAS_VBAR
   3084 	if (vector_page == ARM_VECTORS_HIGH)
   3085 		cpuctrl |= CPU_CONTROL_VECRELOC;
   3086 #endif
   3087 
   3088 #ifdef L2CACHE_ENABLE
   3089 	/* Setup L2 cache */
   3090 	arm_scache.cache_type = CPU_CT_CTYPE_WT;
   3091 	arm_scache.cache_unified = 1;
   3092 	arm_scache.dcache_type = arm_scache.icache_type = CACHE_TYPE_PIPT;
   3093 	arm_scache.dcache_size = arm_scache.icache_size = ARMADAXP_L2_SIZE;
   3094 	arm_scache.dcache_ways = arm_scache.icache_ways = ARMADAXP_L2_WAYS;
   3095 	arm_scache.dcache_way_size = arm_scache.icache_way_size =
   3096 	    ARMADAXP_L2_WAY_SIZE;
   3097 	arm_scache.dcache_line_size = arm_scache.icache_line_size =
   3098 	    ARMADAXP_L2_LINE_SIZE;
   3099 	arm_scache.dcache_sets = arm_scache.icache_sets =
   3100 	    ARMADAXP_L2_SETS;
   3101 
   3102 	cpufuncs.cf_sdcache_wbinv_range	= armadaxp_sdcache_wbinv_range;
   3103 	cpufuncs.cf_sdcache_inv_range	= armadaxp_sdcache_inv_range;
   3104 	cpufuncs.cf_sdcache_wb_range	= armadaxp_sdcache_wb_range;
   3105 #endif
   3106 
   3107 #ifdef AURORA_IO_CACHE_COHERENCY
   3108 	/* use AMBA and I/O Coherency Fabric to maintain cache */
   3109 	cpufuncs.cf_dcache_wbinv_range	= pj4b_dcache_cfu_wbinv_range;
   3110 	cpufuncs.cf_dcache_inv_range	= pj4b_dcache_cfu_inv_range;
   3111 	cpufuncs.cf_dcache_wb_range	= pj4b_dcache_cfu_wb_range;
   3112 
   3113 	cpufuncs.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop;
   3114 	cpufuncs.cf_sdcache_inv_range	= (void *)cpufunc_nullop;
   3115 	cpufuncs.cf_sdcache_wb_range	= (void *)cpufunc_nullop;
   3116 #endif
   3117 
   3118 	/* Clear out the cache */
   3119 	cpu_idcache_wbinv_all();
   3120 
   3121 	/* Set the control register */
   3122 	cpu_control(0xffffffff, cpuctrl);
   3123 
   3124 	/* And again. */
   3125 	cpu_idcache_wbinv_all();
   3126 #ifdef L2CACHE_ENABLE
   3127 	armadaxp_sdcache_wbinv_all();
   3128 #endif
   3129 
   3130 	curcpu()->ci_ctrl = cpuctrl;
   3131 }
   3132 #endif /* CPU_PJ4B */
   3133 
   3134 #if defined(CPU_ARMV7)
   3135 struct cpu_option armv7_options[] = {
   3136     { "cpu.cache",      BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
   3137     { "cpu.nocache",    OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
   3138     { "armv7.cache",    BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
   3139     { "armv7.icache",   BIC, OR,  CPU_CONTROL_IC_ENABLE },
   3140     { "armv7.dcache",   BIC, OR,  CPU_CONTROL_DC_ENABLE },
   3141 	{ NULL, 			IGN, IGN, 0}
   3142 };
   3143 
   3144 void
   3145 armv7_setup(char *args)
   3146 {
   3147 
   3148 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_IC_ENABLE
   3149 	    | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_BPRD_ENABLE
   3150 #ifdef __ARMEB__
   3151 	    | CPU_CONTROL_EX_BEND
   3152 #endif
   3153 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
   3154 	    | CPU_CONTROL_AFLT_ENABLE;
   3155 #endif
   3156 	    | CPU_CONTROL_UNAL_ENABLE;
   3157 
   3158 	int cpuctrlmask = cpuctrl | CPU_CONTROL_AFLT_ENABLE;
   3159 
   3160 
   3161 	cpuctrl = parse_cpu_options(args, armv7_options, cpuctrl);
   3162 
   3163 #ifndef ARM_HAS_VBAR
   3164 	if (vector_page == ARM_VECTORS_HIGH)
   3165 		cpuctrl |= CPU_CONTROL_VECRELOC;
   3166 #endif
   3167 
   3168 	/* Clear out the cache */
   3169 	cpu_idcache_wbinv_all();
   3170 
   3171 	/* Set the control register */
   3172 	curcpu()->ci_ctrl = cpuctrl;
   3173 	cpu_control(cpuctrlmask, cpuctrl);
   3174 }
   3175 #endif /* CPU_ARMV7 */
   3176 
   3177 
   3178 #if defined(CPU_ARM1136) || defined(CPU_ARM1176)
   3179 void
   3180 arm11x6_setup(char *args)
   3181 {
   3182 	int cpuctrl, cpuctrl_wax;
   3183 	uint32_t auxctrl;
   3184 	uint32_t sbz=0;
   3185 	uint32_t cpuid;
   3186 
   3187 	cpuid = cpu_id();
   3188 
   3189 	cpuctrl =
   3190 		CPU_CONTROL_MMU_ENABLE  |
   3191 		CPU_CONTROL_DC_ENABLE   |
   3192 		CPU_CONTROL_WBUF_ENABLE |
   3193 		CPU_CONTROL_32BP_ENABLE |
   3194 		CPU_CONTROL_32BD_ENABLE |
   3195 		CPU_CONTROL_LABT_ENABLE |
   3196 		CPU_CONTROL_UNAL_ENABLE |
   3197 #ifdef ARM_MMU_EXTENDED
   3198 		CPU_CONTROL_XP_ENABLE   |
   3199 #else
   3200 		CPU_CONTROL_SYST_ENABLE |
   3201 #endif
   3202 		CPU_CONTROL_IC_ENABLE;
   3203 
   3204 	/*
   3205 	 * "write as existing" bits
   3206 	 * inverse of this is mask
   3207 	 */
   3208 	cpuctrl_wax =
   3209 		(3 << 30) |
   3210 		(1 << 29) |
   3211 		(1 << 28) |
   3212 		(3 << 26) |
   3213 		(3 << 19) |
   3214 		(1 << 17);
   3215 
   3216 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
   3217 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
   3218 #endif
   3219 
   3220 	cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl);
   3221 
   3222 #ifdef __ARMEB__
   3223 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
   3224 #endif
   3225 
   3226 #ifndef ARM_HAS_VBAR
   3227 	if (vector_page == ARM_VECTORS_HIGH)
   3228 		cpuctrl |= CPU_CONTROL_VECRELOC;
   3229 #endif
   3230 
   3231 	auxctrl = armreg_auxctl_read();
   3232 	/*
   3233 	 * This options enables the workaround for the 364296 ARM1136
   3234 	 * r0pX errata (possible cache data corruption with
   3235 	 * hit-under-miss enabled). It sets the undocumented bit 31 in
   3236 	 * the auxiliary control register and the FI bit in the control
   3237 	 * register, thus disabling hit-under-miss without putting the
   3238 	 * processor into full low interrupt latency mode. ARM11MPCore
   3239 	 * is not affected.
   3240 	 */
   3241 	if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1136JS) { /* ARM1136JSr0pX */
   3242 		cpuctrl |= CPU_CONTROL_FI_ENABLE;
   3243 		auxctrl |= ARM1136_AUXCTL_PFI;
   3244 	}
   3245 
   3246 	/*
   3247 	 * This enables the workaround for the following ARM1176 r0pX
   3248 	 * errata.
   3249 	 *
   3250 	 * 394601: In low interrupt latency configuration, interrupted clean
   3251 	 * and invalidate operation may not clean dirty data.
   3252 	 *
   3253 	 * 716151: Clean Data Cache line by MVA can corrupt subsequent
   3254 	 * stores to the same cache line.
   3255 	 *
   3256 	 * 714068: Prefetch Instruction Cache Line or Invalidate Instruction
   3257 	 * Cache Line by MVA can cause deadlock.
   3258 	 */
   3259 	if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1176JZS) { /* ARM1176JZSr0 */
   3260 		/* 394601 and 716151 */
   3261 		cpuctrl |= CPU_CONTROL_FI_ENABLE;
   3262 		auxctrl |= ARM1176_AUXCTL_FIO;
   3263 
   3264 		/* 714068 */
   3265 		auxctrl |= ARM1176_AUXCTL_PHD;
   3266 	}
   3267 
   3268 	/* Clear out the cache */
   3269 	cpu_idcache_wbinv_all();
   3270 
   3271 	/* Now really make sure they are clean.  */
   3272 	__asm volatile ("mcr\tp15, 0, %0, c7, c7, 0" : : "r"(sbz));
   3273 
   3274 	/* Allow detection code to find the VFP if it's fitted.  */
   3275 	armreg_cpacr_write(0x0fffffff);
   3276 
   3277 	/* Set the control register */
   3278 	curcpu()->ci_ctrl = cpuctrl;
   3279 	cpu_control(~cpuctrl_wax, cpuctrl);
   3280 
   3281 	/* Update auxctlr */
   3282 	armreg_auxctl_write(auxctrl);
   3283 
   3284 	/* And again. */
   3285 	cpu_idcache_wbinv_all();
   3286 }
   3287 #endif	/* CPU_ARM1136 || CPU_ARM1176 */
   3288 
   3289 #ifdef CPU_SA110
   3290 struct cpu_option sa110_options[] = {
   3291 #ifdef COMPAT_12
   3292 	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
   3293 	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
   3294 #endif	/* COMPAT_12 */
   3295 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
   3296 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
   3297 	{ "sa110.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
   3298 	{ "sa110.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
   3299 	{ "sa110.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
   3300 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
   3301 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
   3302 	{ "sa110.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
   3303 	{ NULL,			IGN, IGN, 0 }
   3304 };
   3305 
   3306 void
   3307 sa110_setup(char *args)
   3308 {
   3309 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
   3310 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
   3311 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
   3312 		 | CPU_CONTROL_WBUF_ENABLE;
   3313 #if 0
   3314 	int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
   3315 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
   3316 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
   3317 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
   3318 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
   3319 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
   3320 		 | CPU_CONTROL_CPCLK;
   3321 #endif
   3322 
   3323 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
   3324 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
   3325 #endif
   3326 
   3327 	cpuctrl = parse_cpu_options(args, sa110_options, cpuctrl);
   3328 
   3329 #ifdef __ARMEB__
   3330 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
   3331 #endif
   3332 
   3333 #ifndef ARM_HAS_VBAR
   3334 	if (vector_page == ARM_VECTORS_HIGH)
   3335 		cpuctrl |= CPU_CONTROL_VECRELOC;
   3336 #endif
   3337 
   3338 	/* Clear out the cache */
   3339 	cpu_idcache_wbinv_all();
   3340 
   3341 	/* Set the control register */
   3342 	curcpu()->ci_ctrl = cpuctrl;
   3343 #if 0
   3344 	cpu_control(cpuctrlmask, cpuctrl);
   3345 #endif
   3346 	cpu_control(0xffffffff, cpuctrl);
   3347 
   3348 	/*
   3349 	 * enable clockswitching, note that this doesn't read or write to r0,
   3350 	 * r0 is just to make it valid asm
   3351 	 */
   3352 	__asm volatile ("mcr p15, 0, r0, c15, c1, 2");
   3353 }
   3354 #endif	/* CPU_SA110 */
   3355 
   3356 #if defined(CPU_SA1100) || defined(CPU_SA1110)
   3357 struct cpu_option sa11x0_options[] = {
   3358 #ifdef COMPAT_12
   3359 	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
   3360 	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
   3361 #endif	/* COMPAT_12 */
   3362 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
   3363 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
   3364 	{ "sa11x0.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
   3365 	{ "sa11x0.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
   3366 	{ "sa11x0.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
   3367 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
   3368 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
   3369 	{ "sa11x0.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
   3370 	{ NULL,			IGN, IGN, 0 }
   3371 };
   3372 
   3373 void
   3374 sa11x0_setup(char *args)
   3375 {
   3376 
   3377 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
   3378 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
   3379 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
   3380 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE;
   3381 #if 0
   3382 	int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
   3383 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
   3384 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
   3385 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
   3386 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
   3387 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
   3388 		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
   3389 #endif
   3390 
   3391 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
   3392 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
   3393 #endif
   3394 
   3395 	cpuctrl = parse_cpu_options(args, sa11x0_options, cpuctrl);
   3396 
   3397 #ifdef __ARMEB__
   3398 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
   3399 #endif
   3400 
   3401 #ifndef ARM_HAS_VBAR
   3402 	if (vector_page == ARM_VECTORS_HIGH)
   3403 		cpuctrl |= CPU_CONTROL_VECRELOC;
   3404 #endif
   3405 
   3406 	/* Clear out the cache */
   3407 	cpu_idcache_wbinv_all();
   3408 
   3409 	/* Set the control register */
   3410 	curcpu()->ci_ctrl = cpuctrl;
   3411 	cpu_control(0xffffffff, cpuctrl);
   3412 }
   3413 #endif	/* CPU_SA1100 || CPU_SA1110 */
   3414 
   3415 #if defined(CPU_FA526)
   3416 struct cpu_option fa526_options[] = {
   3417 #ifdef COMPAT_12
   3418 	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
   3419 	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
   3420 #endif	/* COMPAT_12 */
   3421 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
   3422 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
   3423 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
   3424 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
   3425 	{ NULL,			IGN, IGN, 0 }
   3426 };
   3427 
   3428 void
   3429 fa526_setup(char *args)
   3430 {
   3431 
   3432 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
   3433 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
   3434 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
   3435 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE;
   3436 #if 0
   3437 	int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
   3438 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
   3439 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
   3440 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
   3441 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
   3442 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
   3443 		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
   3444 #endif
   3445 
   3446 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
   3447 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
   3448 #endif
   3449 
   3450 	cpuctrl = parse_cpu_options(args, fa526_options, cpuctrl);
   3451 
   3452 #ifdef __ARMEB__
   3453 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
   3454 #endif
   3455 
   3456 #ifndef ARM_HAS_VBAR
   3457 	if (vector_page == ARM_VECTORS_HIGH)
   3458 		cpuctrl |= CPU_CONTROL_VECRELOC;
   3459 #endif
   3460 
   3461 	/* Clear out the cache */
   3462 	cpu_idcache_wbinv_all();
   3463 
   3464 	/* Set the control register */
   3465 	curcpu()->ci_ctrl = cpuctrl;
   3466 	cpu_control(0xffffffff, cpuctrl);
   3467 }
   3468 #endif	/* CPU_FA526 */
   3469 
   3470 #if defined(CPU_IXP12X0)
   3471 struct cpu_option ixp12x0_options[] = {
   3472 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
   3473 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
   3474 	{ "ixp12x0.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
   3475 	{ "ixp12x0.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
   3476 	{ "ixp12x0.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
   3477 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
   3478 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
   3479 	{ "ixp12x0.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
   3480 	{ NULL,			IGN, IGN, 0 }
   3481 };
   3482 
   3483 void
   3484 ixp12x0_setup(char *args)
   3485 {
   3486 
   3487 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE
   3488 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_SYST_ENABLE
   3489 		 | CPU_CONTROL_IC_ENABLE;
   3490 
   3491 	int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_AFLT_ENABLE
   3492 		 | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_WBUF_ENABLE
   3493 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_SYST_ENABLE
   3494 		 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_IC_ENABLE
   3495 		 | CPU_CONTROL_VECRELOC;
   3496 
   3497 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
   3498 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
   3499 #endif
   3500 
   3501 	cpuctrl = parse_cpu_options(args, ixp12x0_options, cpuctrl);
   3502 
   3503 #ifdef __ARMEB__
   3504 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
   3505 #endif
   3506 
   3507 #ifndef ARM_HAS_VBAR
   3508 	if (vector_page == ARM_VECTORS_HIGH)
   3509 		cpuctrl |= CPU_CONTROL_VECRELOC;
   3510 #endif
   3511 
   3512 	/* Clear out the cache */
   3513 	cpu_idcache_wbinv_all();
   3514 
   3515 	/* Set the control register */
   3516 	curcpu()->ci_ctrl = cpuctrl;
   3517 	/* cpu_control(0xffffffff, cpuctrl); */
   3518 	cpu_control(cpuctrlmask, cpuctrl);
   3519 }
   3520 #endif /* CPU_IXP12X0 */
   3521 
   3522 #if defined(CPU_XSCALE)
   3523 struct cpu_option xscale_options[] = {
   3524 #ifdef COMPAT_12
   3525 	{ "branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
   3526 	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
   3527 #endif	/* COMPAT_12 */
   3528 	{ "cpu.branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
   3529 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
   3530 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
   3531 	{ "xscale.branchpredict", BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
   3532 	{ "xscale.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
   3533 	{ "xscale.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
   3534 	{ "xscale.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
   3535 	{ NULL,			IGN, IGN, 0 }
   3536 };
   3537 
   3538 void
   3539 xscale_setup(char *args)
   3540 {
   3541 	uint32_t auxctl;
   3542 
   3543 	/*
   3544 	 * The XScale Write Buffer is always enabled.  Our option
   3545 	 * is to enable/disable coalescing.  Note that bits 6:3
   3546 	 * must always be enabled.
   3547 	 */
   3548 
   3549 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
   3550 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
   3551 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
   3552 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
   3553 		 | CPU_CONTROL_BPRD_ENABLE;
   3554 #if 0
   3555 	int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
   3556 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
   3557 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
   3558 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
   3559 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
   3560 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
   3561 		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
   3562 #endif
   3563 
   3564 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
   3565 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
   3566 #endif
   3567 
   3568 	cpuctrl = parse_cpu_options(args, xscale_options, cpuctrl);
   3569 
   3570 #ifdef __ARMEB__
   3571 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
   3572 #endif
   3573 
   3574 #ifndef ARM_HAS_VBAR
   3575 	if (vector_page == ARM_VECTORS_HIGH)
   3576 		cpuctrl |= CPU_CONTROL_VECRELOC;
   3577 #endif
   3578 
   3579 	/* Clear out the cache */
   3580 	cpu_idcache_wbinv_all();
   3581 
   3582 	/*
   3583 	 * Set the control register.  Note that bits 6:3 must always
   3584 	 * be set to 1.
   3585 	 */
   3586 	curcpu()->ci_ctrl = cpuctrl;
   3587 #if 0
   3588 	cpu_control(cpuctrlmask, cpuctrl);
   3589 #endif
   3590 	cpu_control(0xffffffff, cpuctrl);
   3591 
   3592 	/* Make sure write coalescing is turned on */
   3593 	auxctl = armreg_auxctl_read();
   3594 #ifdef XSCALE_NO_COALESCE_WRITES
   3595 	auxctl |= XSCALE_AUXCTL_K;
   3596 #else
   3597 	auxctl &= ~XSCALE_AUXCTL_K;
   3598 #endif
   3599 	armreg_auxctl_write(auxctl);
   3600 }
   3601 #endif	/* CPU_XSCALE */
   3602 
   3603 #if defined(CPU_SHEEVA)
   3604 struct cpu_option sheeva_options[] = {
   3605 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
   3606 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
   3607 	{ "sheeva.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
   3608 	{ "sheeva.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
   3609 	{ "sheeva.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
   3610 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
   3611 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
   3612 	{ "sheeva.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
   3613 	{ NULL,			IGN, IGN, 0 }
   3614 };
   3615 
   3616 void
   3617 sheeva_setup(char *args)
   3618 {
   3619 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
   3620 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
   3621 	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
   3622 #if 0
   3623 	int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
   3624 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
   3625 	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
   3626 	    | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
   3627 	    | CPU_CONTROL_BPRD_ENABLE
   3628 	    | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
   3629 #endif
   3630 
   3631 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
   3632 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
   3633 #endif
   3634 
   3635 	cpuctrl = parse_cpu_options(args, sheeva_options, cpuctrl);
   3636 
   3637 	/* Enable DCache Streaming Switch and Write Allocate */
   3638 	uint32_t sheeva_ext = armreg_sheeva_xctrl_read();
   3639 
   3640 	sheeva_ext |= FC_DCACHE_STREAM_EN | FC_WR_ALLOC_EN;
   3641 #ifdef SHEEVA_L2_CACHE
   3642 	sheeva_ext |= FC_L2CACHE_EN;
   3643 	sheeva_ext &= ~FC_L2_PREF_DIS;
   3644 #endif
   3645 
   3646 	armreg_sheeva_xctrl_write(sheeva_ext);
   3647 
   3648 #ifdef SHEEVA_L2_CACHE
   3649 #ifndef SHEEVA_L2_CACHE_WT
   3650 	arm_scache.cache_type = CPU_CT_CTYPE_WB2;
   3651 #elif CPU_CT_CTYPE_WT != 0
   3652 	arm_scache.cache_type = CPU_CT_CTYPE_WT;
   3653 #endif
   3654 	arm_scache.cache_unified = 1;
   3655 	arm_scache.dcache_type = arm_scache.icache_type = CACHE_TYPE_PIPT;
   3656 	arm_scache.dcache_size = arm_scache.icache_size = 256*1024;
   3657 	arm_scache.dcache_ways = arm_scache.icache_ways = 4;
   3658 	arm_scache.dcache_way_size = arm_scache.icache_way_size =
   3659 	    arm_scache.dcache_size / arm_scache.dcache_ways;
   3660 	arm_scache.dcache_line_size = arm_scache.icache_line_size = 32;
   3661 	arm_scache.dcache_sets = arm_scache.icache_sets =
   3662 	    arm_scache.dcache_way_size / arm_scache.dcache_line_size;
   3663 
   3664 	cpufuncs.cf_sdcache_wb_range = sheeva_sdcache_wb_range;
   3665 	cpufuncs.cf_sdcache_inv_range = sheeva_sdcache_inv_range;
   3666 	cpufuncs.cf_sdcache_wbinv_range = sheeva_sdcache_wbinv_range;
   3667 #endif /* SHEEVA_L2_CACHE */
   3668 
   3669 #ifdef __ARMEB__
   3670 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
   3671 #endif
   3672 
   3673 #ifndef ARM_HAS_VBAR
   3674 	if (vector_page == ARM_VECTORS_HIGH)
   3675 		cpuctrl |= CPU_CONTROL_VECRELOC;
   3676 #endif
   3677 
   3678 	/* Clear out the cache */
   3679 	cpu_idcache_wbinv_all();
   3680 
   3681 	/* Now really make sure they are clean.  */
   3682 	__asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
   3683 
   3684 	/* Set the control register */
   3685 	curcpu()->ci_ctrl = cpuctrl;
   3686 	cpu_control(0xffffffff, cpuctrl);
   3687 
   3688 	/* And again. */
   3689 	cpu_idcache_wbinv_all();
   3690 #ifdef SHEEVA_L2_CACHE
   3691 	sheeva_sdcache_wbinv_all();
   3692 #endif
   3693 }
   3694 #endif	/* CPU_SHEEVA */
   3695