Home | History | Annotate | Line # | Download | only in arm32
arm32_machdep.c revision 1.111
      1  1.111     skrll /*	$NetBSD: arm32_machdep.c,v 1.111 2016/07/15 05:59:46 skrll Exp $	*/
      2    1.1     chris 
      3    1.1     chris /*
      4    1.1     chris  * Copyright (c) 1994-1998 Mark Brinicombe.
      5    1.1     chris  * Copyright (c) 1994 Brini.
      6    1.1     chris  * All rights reserved.
      7    1.1     chris  *
      8    1.1     chris  * This code is derived from software written for Brini by Mark Brinicombe
      9    1.1     chris  *
     10    1.1     chris  * Redistribution and use in source and binary forms, with or without
     11    1.1     chris  * modification, are permitted provided that the following conditions
     12    1.1     chris  * are met:
     13    1.1     chris  * 1. Redistributions of source code must retain the above copyright
     14    1.1     chris  *    notice, this list of conditions and the following disclaimer.
     15    1.1     chris  * 2. Redistributions in binary form must reproduce the above copyright
     16    1.1     chris  *    notice, this list of conditions and the following disclaimer in the
     17    1.1     chris  *    documentation and/or other materials provided with the distribution.
     18    1.1     chris  * 3. All advertising materials mentioning features or use of this software
     19    1.1     chris  *    must display the following acknowledgement:
     20    1.1     chris  *	This product includes software developed by Mark Brinicombe
     21    1.1     chris  *	for the NetBSD Project.
     22    1.1     chris  * 4. The name of the company nor the name of the author may be used to
     23    1.1     chris  *    endorse or promote products derived from this software without specific
     24    1.1     chris  *    prior written permission.
     25    1.1     chris  *
     26    1.1     chris  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
     27    1.1     chris  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
     28    1.1     chris  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     29    1.1     chris  * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
     30    1.1     chris  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
     31    1.1     chris  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     32    1.1     chris  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     33    1.1     chris  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     34    1.1     chris  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     35    1.1     chris  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     36    1.1     chris  * SUCH DAMAGE.
     37    1.1     chris  *
     38   1.76       wiz  * Machine dependent functions for kernel setup
     39    1.1     chris  *
     40    1.1     chris  * Created      : 17/09/94
     41    1.1     chris  * Updated	: 18/04/01 updated for new wscons
     42    1.1     chris  */
     43   1.37     lukem 
     44   1.37     lukem #include <sys/cdefs.h>
     45  1.111     skrll __KERNEL_RCSID(0, "$NetBSD: arm32_machdep.c,v 1.111 2016/07/15 05:59:46 skrll Exp $");
     46    1.1     chris 
     47   1.72      jmmv #include "opt_modular.h"
     48    1.1     chris #include "opt_md.h"
     49    1.1     chris #include "opt_pmap_debug.h"
     50  1.107     skrll #include "opt_multiprocessor.h"
     51    1.1     chris 
     52    1.1     chris #include <sys/param.h>
     53    1.1     chris #include <sys/systm.h>
     54    1.1     chris #include <sys/reboot.h>
     55    1.1     chris #include <sys/proc.h>
     56   1.75     rmind #include <sys/kauth.h>
     57    1.1     chris #include <sys/kernel.h>
     58    1.1     chris #include <sys/mbuf.h>
     59    1.1     chris #include <sys/mount.h>
     60    1.1     chris #include <sys/buf.h>
     61    1.1     chris #include <sys/msgbuf.h>
     62    1.1     chris #include <sys/device.h>
     63    1.1     chris #include <sys/sysctl.h>
     64   1.49      yamt #include <sys/cpu.h>
     65   1.83      matt #include <sys/intr.h>
     66   1.72      jmmv #include <sys/module.h>
     67   1.83      matt #include <sys/atomic.h>
     68   1.83      matt #include <sys/xcall.h>
     69  1.105     rmind #include <sys/ipi.h>
     70    1.1     chris 
     71   1.77     skrll #include <uvm/uvm_extern.h>
     72   1.77     skrll 
     73    1.1     chris #include <dev/cons.h>
     74   1.75     rmind #include <dev/mm.h>
     75    1.1     chris 
     76   1.96      matt #include <arm/locore.h>
     77   1.96      matt 
     78    1.9     chris #include <arm/arm32/machdep.h>
     79   1.81      matt 
     80    1.1     chris #include <machine/bootconfig.h>
     81   1.81      matt #include <machine/pcb.h>
     82   1.81      matt 
     83   1.82      matt void (*cpu_reset_address)(void);	/* Used by locore */
     84   1.82      matt paddr_t cpu_reset_address_paddr;	/* Used by locore */
     85    1.1     chris 
     86    1.1     chris struct vm_map *phys_map = NULL;
     87    1.1     chris 
     88   1.74   hannken #if defined(MEMORY_DISK_HOOKS) && !defined(MEMORY_DISK_ROOT_SIZE)
     89   1.24  jdolecek extern size_t md_root_size;		/* Memory disc size */
     90   1.74   hannken #endif	/* MEMORY_DISK_HOOKS && !MEMORY_DISK_ROOT_SIZE */
     91    1.1     chris 
     92    1.1     chris pv_addr_t kernelstack;
     93   1.79      matt pv_addr_t abtstack;
     94   1.79      matt pv_addr_t fiqstack;
     95   1.79      matt pv_addr_t irqstack;
     96   1.79      matt pv_addr_t undstack;
     97   1.83      matt pv_addr_t idlestack;
     98    1.1     chris 
     99   1.48  christos void *	msgbufaddr;
    100    1.1     chris extern paddr_t msgbufphys;
    101    1.1     chris 
    102    1.1     chris int kernel_debug = 0;
    103   1.99      matt int cpu_printfataltraps = 0;
    104   1.90      matt int cpu_fpu_present;
    105   1.97      matt int cpu_hwdiv_present;
    106   1.91      matt int cpu_neon_present;
    107   1.91      matt int cpu_simd_present;
    108   1.91      matt int cpu_simdex_present;
    109   1.92      matt int cpu_umull_present;
    110  1.101      matt int cpu_synchprim_present;
    111  1.108    martin int cpu_unaligned_sigbus;
    112   1.92      matt const char *cpu_arch = "";
    113   1.91      matt 
    114   1.91      matt int cpu_instruction_set_attributes[6];
    115   1.91      matt int cpu_memory_model_features[4];
    116   1.91      matt int cpu_processor_features[2];
    117   1.91      matt int cpu_media_and_vfp_features[2];
    118    1.1     chris 
    119   1.12   reinoud /* exported variable to be filled in by the bootloaders */
    120    1.1     chris char *booted_kernel;
    121    1.1     chris 
    122    1.1     chris /* Prototypes */
    123    1.1     chris 
    124   1.63       dsl void data_abort_handler(trapframe_t *frame);
    125   1.63       dsl void prefetch_abort_handler(trapframe_t *frame);
    126   1.63       dsl extern void configure(void);
    127    1.1     chris 
    128    1.1     chris /*
    129   1.22   thorpej  * arm32_vector_init:
    130   1.22   thorpej  *
    131   1.22   thorpej  *	Initialize the vector page, and select whether or not to
    132   1.22   thorpej  *	relocate the vectors.
    133   1.22   thorpej  *
    134   1.22   thorpej  *	NOTE: We expect the vector page to be mapped at its expected
    135   1.22   thorpej  *	destination.
    136   1.22   thorpej  */
    137   1.22   thorpej void
    138   1.22   thorpej arm32_vector_init(vaddr_t va, int which)
    139   1.22   thorpej {
    140   1.94      matt #if defined(CPU_ARMV7) || defined(CPU_ARM11) || defined(ARM_HAS_VBAR)
    141   1.93      matt 	/*
    142   1.93      matt 	 * If this processor has the security extension, don't bother
    143   1.93      matt 	 * to move/map the vector page.  Simply point VBAR to the copy
    144   1.93      matt 	 * that exists in the .text segment.
    145   1.93      matt 	 */
    146   1.94      matt #ifndef ARM_HAS_VBAR
    147   1.93      matt 	if (va == ARM_VECTORS_LOW
    148   1.95      matt 	    && (armreg_pfr1_read() & ARM_PFR1_SEC_MASK) != 0) {
    149   1.94      matt #endif
    150   1.93      matt 		extern const uint32_t page0rel[];
    151   1.93      matt 		vector_page = (vaddr_t)page0rel;
    152   1.93      matt 		KASSERT((vector_page & 0x1f) == 0);
    153   1.93      matt 		armreg_vbar_write(vector_page);
    154   1.93      matt #ifdef VERBOSE_INIT_ARM
    155   1.93      matt 		printf(" vbar=%p", page0rel);
    156   1.93      matt #endif
    157   1.93      matt 		cpu_control(CPU_CONTROL_VECRELOC, 0);
    158   1.93      matt 		return;
    159   1.94      matt #ifndef ARM_HAS_VBAR
    160   1.93      matt 	}
    161   1.93      matt #endif
    162   1.94      matt #endif
    163   1.94      matt #ifndef ARM_HAS_VBAR
    164   1.83      matt 	if (CPU_IS_PRIMARY(curcpu())) {
    165   1.83      matt 		extern unsigned int page0[], page0_data[];
    166   1.83      matt 		unsigned int *vectors = (int *) va;
    167   1.83      matt 		unsigned int *vectors_data = vectors + (page0_data - page0);
    168   1.83      matt 		int vec;
    169   1.22   thorpej 
    170   1.83      matt 		/*
    171   1.83      matt 		 * Loop through the vectors we're taking over, and copy the
    172   1.83      matt 		 * vector's insn and data word.
    173   1.83      matt 		 */
    174   1.83      matt 		for (vec = 0; vec < ARM_NVEC; vec++) {
    175   1.83      matt 			if ((which & (1 << vec)) == 0) {
    176   1.83      matt 				/* Don't want to take over this vector. */
    177   1.83      matt 				continue;
    178   1.83      matt 			}
    179   1.83      matt 			vectors[vec] = page0[vec];
    180   1.83      matt 			vectors_data[vec] = page0_data[vec];
    181   1.22   thorpej 		}
    182   1.22   thorpej 
    183   1.83      matt 		/* Now sync the vectors. */
    184   1.83      matt 		cpu_icache_sync_range(va, (ARM_NVEC * 2) * sizeof(u_int));
    185   1.22   thorpej 
    186   1.83      matt 		vector_page = va;
    187   1.83      matt 	}
    188   1.30       scw 
    189   1.30       scw 	if (va == ARM_VECTORS_HIGH) {
    190   1.30       scw 		/*
    191   1.30       scw 		 * Assume the MD caller knows what it's doing here, and
    192   1.30       scw 		 * really does want the vector page relocated.
    193   1.30       scw 		 *
    194   1.30       scw 		 * Note: This has to be done here (and not just in
    195   1.30       scw 		 * cpu_setup()) because the vector page needs to be
    196   1.30       scw 		 * accessible *before* cpu_startup() is called.
    197   1.30       scw 		 * Think ddb(9) ...
    198   1.32   thorpej 		 *
    199   1.32   thorpej 		 * NOTE: If the CPU control register is not readable,
    200   1.32   thorpej 		 * this will totally fail!  We'll just assume that
    201   1.32   thorpej 		 * any system that has high vector support has a
    202   1.32   thorpej 		 * readable CPU control register, for now.  If we
    203   1.32   thorpej 		 * ever encounter one that does not, we'll have to
    204   1.32   thorpej 		 * rethink this.
    205   1.30       scw 		 */
    206   1.30       scw 		cpu_control(CPU_CONTROL_VECRELOC, CPU_CONTROL_VECRELOC);
    207   1.30       scw 	}
    208   1.94      matt #endif
    209   1.22   thorpej }
    210   1.22   thorpej 
    211   1.22   thorpej /*
    212    1.1     chris  * Debug function just to park the CPU
    213    1.1     chris  */
    214    1.1     chris 
    215    1.1     chris void
    216   1.65    cegger halt(void)
    217    1.1     chris {
    218    1.1     chris 	while (1)
    219    1.1     chris 		cpu_sleep(0);
    220    1.1     chris }
    221    1.1     chris 
    222    1.1     chris 
    223   1.88  jmcneill /* Sync the discs, unmount the filesystems, and adjust the todr */
    224    1.1     chris 
    225    1.1     chris void
    226    1.1     chris bootsync(void)
    227    1.1     chris {
    228   1.58      matt 	static bool bootsyncdone = false;
    229    1.1     chris 
    230    1.1     chris 	if (bootsyncdone) return;
    231    1.1     chris 
    232   1.58      matt 	bootsyncdone = true;
    233    1.1     chris 
    234    1.1     chris 	/* Make sure we can still manage to do things */
    235    1.1     chris 	if (GetCPSR() & I32_bit) {
    236    1.1     chris 		/*
    237    1.1     chris 		 * If we get here then boot has been called without RB_NOSYNC
    238    1.1     chris 		 * and interrupts were disabled. This means the boot() call
    239    1.1     chris 		 * did not come from a user process e.g. shutdown, but must
    240    1.1     chris 		 * have come from somewhere in the kernel.
    241    1.1     chris 		 */
    242    1.1     chris 		IRQenable;
    243    1.1     chris 		printf("Warning IRQ's disabled during boot()\n");
    244    1.1     chris 	}
    245    1.1     chris 
    246    1.1     chris 	vfs_shutdown();
    247   1.88  jmcneill 
    248   1.88  jmcneill 	resettodr();
    249    1.1     chris }
    250    1.1     chris 
    251    1.1     chris /*
    252    1.1     chris  * void cpu_startup(void)
    253    1.1     chris  *
    254   1.76       wiz  * Machine dependent startup code.
    255    1.1     chris  *
    256    1.1     chris  */
    257    1.1     chris void
    258   1.58      matt cpu_startup(void)
    259    1.1     chris {
    260   1.42        pk 	vaddr_t minaddr;
    261   1.42        pk 	vaddr_t maxaddr;
    262    1.1     chris 	char pbuf[9];
    263    1.1     chris 
    264   1.83      matt 	/*
    265   1.83      matt 	 * Until we better locking, we have to live under the kernel lock.
    266   1.83      matt 	 */
    267   1.83      matt 	//KERNEL_LOCK(1, NULL);
    268   1.83      matt 
    269   1.43       wiz 	/* Set the CPU control register */
    270    1.1     chris 	cpu_setup(boot_args);
    271    1.1     chris 
    272   1.94      matt #ifndef ARM_HAS_VBAR
    273    1.1     chris 	/* Lock down zero page */
    274   1.22   thorpej 	vector_page_setprot(VM_PROT_READ);
    275   1.94      matt #endif
    276    1.1     chris 
    277    1.1     chris 	/*
    278    1.1     chris 	 * Give pmap a chance to set up a few more things now the vm
    279    1.1     chris 	 * is initialised
    280    1.1     chris 	 */
    281    1.1     chris 	pmap_postinit();
    282    1.1     chris 
    283    1.1     chris 	/*
    284    1.1     chris 	 * Initialize error message buffer (at end of core).
    285    1.1     chris 	 */
    286    1.1     chris 
    287    1.1     chris 	/* msgbufphys was setup during the secondary boot strap */
    288  1.103      matt 	if (!pmap_extract(pmap_kernel(), (vaddr_t)msgbufaddr, NULL)) {
    289  1.103      matt 		for (u_int loop = 0; loop < btoc(MSGBUFSIZE); ++loop) {
    290  1.103      matt 			pmap_kenter_pa((vaddr_t)msgbufaddr + loop * PAGE_SIZE,
    291  1.103      matt 			    msgbufphys + loop * PAGE_SIZE,
    292  1.103      matt 			    VM_PROT_READ|VM_PROT_WRITE, 0);
    293  1.103      matt 		}
    294  1.103      matt 	}
    295    1.5     chris 	pmap_update(pmap_kernel());
    296    1.1     chris 	initmsgbuf(msgbufaddr, round_page(MSGBUFSIZE));
    297    1.1     chris 
    298    1.1     chris 	/*
    299    1.1     chris 	 * Identify ourselves for the msgbuf (everything printed earlier will
    300    1.1     chris 	 * not be buffered).
    301    1.1     chris 	 */
    302   1.45     lukem 	printf("%s%s", copyright, version);
    303    1.1     chris 
    304   1.20   thorpej 	format_bytes(pbuf, sizeof(pbuf), arm_ptob(physmem));
    305    1.1     chris 	printf("total memory = %s\n", pbuf);
    306    1.1     chris 
    307   1.42        pk 	minaddr = 0;
    308    1.1     chris 
    309    1.1     chris 	/*
    310    1.1     chris 	 * Allocate a submap for physio
    311    1.1     chris 	 */
    312    1.1     chris 	phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
    313   1.47   thorpej 				   VM_PHYS_SIZE, 0, false, NULL);
    314    1.1     chris 
    315    1.1     chris 	format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free));
    316    1.1     chris 	printf("avail memory = %s\n", pbuf);
    317    1.1     chris 
    318   1.81      matt 	struct lwp * const l = &lwp0;
    319   1.81      matt 	struct pcb * const pcb = lwp_getpcb(l);
    320   1.86      matt 	pcb->pcb_ksp = uvm_lwp_getuarea(l) + USPACE_SVC_STACK_TOP;
    321   1.86      matt 	lwp_settrapframe(l, (struct trapframe *)pcb->pcb_ksp - 1);
    322    1.1     chris }
    323    1.1     chris 
    324    1.1     chris /*
    325    1.1     chris  * machine dependent system variables.
    326    1.1     chris  */
    327   1.39    atatat static int
    328   1.39    atatat sysctl_machdep_booted_device(SYSCTLFN_ARGS)
    329   1.39    atatat {
    330   1.39    atatat 	struct sysctlnode node;
    331   1.39    atatat 
    332   1.39    atatat 	if (booted_device == NULL)
    333   1.39    atatat 		return (EOPNOTSUPP);
    334   1.39    atatat 
    335   1.39    atatat 	node = *rnode;
    336   1.85       chs 	node.sysctl_data = __UNCONST(device_xname(booted_device));
    337   1.85       chs 	node.sysctl_size = strlen(device_xname(booted_device)) + 1;
    338   1.39    atatat 	return (sysctl_lookup(SYSCTLFN_CALL(&node)));
    339   1.39    atatat }
    340    1.1     chris 
    341   1.39    atatat static int
    342   1.39    atatat sysctl_machdep_booted_kernel(SYSCTLFN_ARGS)
    343    1.1     chris {
    344   1.39    atatat 	struct sysctlnode node;
    345   1.39    atatat 
    346   1.39    atatat 	if (booted_kernel == NULL || booted_kernel[0] == '\0')
    347    1.1     chris 		return (EOPNOTSUPP);
    348    1.1     chris 
    349   1.39    atatat 	node = *rnode;
    350   1.39    atatat 	node.sysctl_data = booted_kernel;
    351   1.39    atatat 	node.sysctl_size = strlen(booted_kernel) + 1;
    352   1.39    atatat 	return (sysctl_lookup(SYSCTLFN_CALL(&node)));
    353   1.39    atatat }
    354   1.25   thorpej 
    355   1.39    atatat static int
    356   1.92      matt sysctl_machdep_cpu_arch(SYSCTLFN_ARGS)
    357   1.92      matt {
    358   1.92      matt 	struct sysctlnode node = *rnode;
    359   1.92      matt 	node.sysctl_data = __UNCONST(cpu_arch);
    360   1.92      matt 	node.sysctl_size = strlen(cpu_arch) + 1;
    361   1.92      matt 	return sysctl_lookup(SYSCTLFN_CALL(&node));
    362   1.92      matt }
    363   1.92      matt 
    364   1.92      matt static int
    365   1.39    atatat sysctl_machdep_powersave(SYSCTLFN_ARGS)
    366   1.39    atatat {
    367   1.39    atatat 	struct sysctlnode node = *rnode;
    368   1.39    atatat 	int error, newval;
    369   1.25   thorpej 
    370   1.39    atatat 	newval = cpu_do_powersave;
    371   1.39    atatat 	node.sysctl_data = &newval;
    372   1.39    atatat 	if (cpufuncs.cf_sleep == (void *) cpufunc_nullop)
    373   1.44    atatat 		node.sysctl_flags &= ~CTLFLAG_READWRITE;
    374   1.39    atatat 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
    375   1.39    atatat 	if (error || newp == NULL || newval == cpu_do_powersave)
    376   1.39    atatat 		return (error);
    377   1.39    atatat 
    378   1.39    atatat 	if (newval < 0 || newval > 1)
    379   1.39    atatat 		return (EINVAL);
    380   1.39    atatat 	cpu_do_powersave = newval;
    381   1.25   thorpej 
    382   1.39    atatat 	return (0);
    383   1.39    atatat }
    384   1.25   thorpej 
    385   1.98      matt static int
    386   1.98      matt sysctl_hw_machine_arch(SYSCTLFN_ARGS)
    387   1.98      matt {
    388   1.98      matt 	struct sysctlnode node = *rnode;
    389   1.98      matt 	node.sysctl_data = l->l_proc->p_md.md_march;
    390   1.98      matt 	node.sysctl_size = strlen(l->l_proc->p_md.md_march) + 1;
    391   1.98      matt 	return sysctl_lookup(SYSCTLFN_CALL(&node));
    392   1.98      matt }
    393   1.98      matt 
    394   1.39    atatat SYSCTL_SETUP(sysctl_machdep_setup, "sysctl machdep subtree setup")
    395   1.39    atatat {
    396    1.1     chris 
    397   1.44    atatat 	sysctl_createv(clog, 0, NULL, NULL,
    398   1.44    atatat 		       CTLFLAG_PERMANENT,
    399   1.39    atatat 		       CTLTYPE_NODE, "machdep", NULL,
    400   1.39    atatat 		       NULL, 0, NULL, 0,
    401   1.39    atatat 		       CTL_MACHDEP, CTL_EOL);
    402   1.39    atatat 
    403   1.44    atatat 	sysctl_createv(clog, 0, NULL, NULL,
    404   1.44    atatat 		       CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
    405   1.40    atatat 		       CTLTYPE_INT, "debug", NULL,
    406   1.39    atatat 		       NULL, 0, &kernel_debug, 0,
    407   1.41  rearnsha 		       CTL_MACHDEP, CPU_DEBUG, CTL_EOL);
    408   1.44    atatat 	sysctl_createv(clog, 0, NULL, NULL,
    409   1.44    atatat 		       CTLFLAG_PERMANENT,
    410   1.39    atatat 		       CTLTYPE_STRING, "booted_device", NULL,
    411   1.39    atatat 		       sysctl_machdep_booted_device, 0, NULL, 0,
    412   1.39    atatat 		       CTL_MACHDEP, CPU_BOOTED_DEVICE, CTL_EOL);
    413   1.44    atatat 	sysctl_createv(clog, 0, NULL, NULL,
    414   1.44    atatat 		       CTLFLAG_PERMANENT,
    415   1.39    atatat 		       CTLTYPE_STRING, "booted_kernel", NULL,
    416   1.39    atatat 		       sysctl_machdep_booted_kernel, 0, NULL, 0,
    417   1.39    atatat 		       CTL_MACHDEP, CPU_BOOTED_KERNEL, CTL_EOL);
    418   1.44    atatat 	sysctl_createv(clog, 0, NULL, NULL,
    419   1.44    atatat 		       CTLFLAG_PERMANENT,
    420   1.39    atatat 		       CTLTYPE_STRUCT, "console_device", NULL,
    421   1.39    atatat 		       sysctl_consdev, 0, NULL, sizeof(dev_t),
    422   1.39    atatat 		       CTL_MACHDEP, CPU_CONSDEV, CTL_EOL);
    423   1.44    atatat 	sysctl_createv(clog, 0, NULL, NULL,
    424   1.92      matt 		       CTLFLAG_PERMANENT,
    425   1.92      matt 		       CTLTYPE_STRING, "cpu_arch", NULL,
    426   1.92      matt 		       sysctl_machdep_cpu_arch, 0, NULL, 0,
    427   1.92      matt 		       CTL_MACHDEP, CTL_CREATE, CTL_EOL);
    428   1.92      matt 	sysctl_createv(clog, 0, NULL, NULL,
    429   1.44    atatat 		       CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
    430   1.39    atatat 		       CTLTYPE_INT, "powersave", NULL,
    431   1.39    atatat 		       sysctl_machdep_powersave, 0, &cpu_do_powersave, 0,
    432   1.39    atatat 		       CTL_MACHDEP, CPU_POWERSAVE, CTL_EOL);
    433   1.90      matt 	sysctl_createv(clog, 0, NULL, NULL,
    434   1.91      matt 		       CTLFLAG_PERMANENT|CTLFLAG_IMMEDIATE,
    435   1.91      matt 		       CTLTYPE_INT, "cpu_id", NULL,
    436   1.91      matt 		       NULL, curcpu()->ci_arm_cpuid, NULL, 0,
    437   1.91      matt 		       CTL_MACHDEP, CTL_CREATE, CTL_EOL);
    438   1.91      matt #ifdef FPU_VFP
    439   1.91      matt 	sysctl_createv(clog, 0, NULL, NULL,
    440   1.91      matt 		       CTLFLAG_PERMANENT|CTLFLAG_READONLY,
    441   1.91      matt 		       CTLTYPE_INT, "fpu_id", NULL,
    442   1.91      matt 		       NULL, 0, &cpu_info_store.ci_vfp_id, 0,
    443   1.91      matt 		       CTL_MACHDEP, CTL_CREATE, CTL_EOL);
    444   1.91      matt #endif
    445   1.91      matt 	sysctl_createv(clog, 0, NULL, NULL,
    446   1.90      matt 		       CTLFLAG_PERMANENT|CTLFLAG_READONLY,
    447   1.90      matt 		       CTLTYPE_INT, "fpu_present", NULL,
    448   1.90      matt 		       NULL, 0, &cpu_fpu_present, 0,
    449   1.90      matt 		       CTL_MACHDEP, CTL_CREATE, CTL_EOL);
    450   1.91      matt 	sysctl_createv(clog, 0, NULL, NULL,
    451   1.91      matt 		       CTLFLAG_PERMANENT|CTLFLAG_READONLY,
    452   1.97      matt 		       CTLTYPE_INT, "hwdiv_present", NULL,
    453   1.97      matt 		       NULL, 0, &cpu_hwdiv_present, 0,
    454   1.97      matt 		       CTL_MACHDEP, CTL_CREATE, CTL_EOL);
    455   1.97      matt 	sysctl_createv(clog, 0, NULL, NULL,
    456   1.97      matt 		       CTLFLAG_PERMANENT|CTLFLAG_READONLY,
    457   1.91      matt 		       CTLTYPE_INT, "neon_present", NULL,
    458   1.91      matt 		       NULL, 0, &cpu_neon_present, 0,
    459   1.91      matt 		       CTL_MACHDEP, CTL_CREATE, CTL_EOL);
    460   1.91      matt 	sysctl_createv(clog, 0, NULL, NULL,
    461   1.91      matt 		       CTLFLAG_PERMANENT|CTLFLAG_READONLY,
    462   1.91      matt 		       CTLTYPE_STRUCT, "id_isar", NULL,
    463   1.91      matt 		       NULL, 0,
    464   1.91      matt 		       cpu_instruction_set_attributes,
    465   1.91      matt 		       sizeof(cpu_instruction_set_attributes),
    466   1.91      matt 		       CTL_MACHDEP, CTL_CREATE, CTL_EOL);
    467   1.91      matt 	sysctl_createv(clog, 0, NULL, NULL,
    468   1.91      matt 		       CTLFLAG_PERMANENT|CTLFLAG_READONLY,
    469   1.91      matt 		       CTLTYPE_STRUCT, "id_mmfr", NULL,
    470   1.91      matt 		       NULL, 0,
    471   1.91      matt 		       cpu_memory_model_features,
    472   1.91      matt 		       sizeof(cpu_memory_model_features),
    473   1.91      matt 		       CTL_MACHDEP, CTL_CREATE, CTL_EOL);
    474   1.91      matt 	sysctl_createv(clog, 0, NULL, NULL,
    475   1.91      matt 		       CTLFLAG_PERMANENT|CTLFLAG_READONLY,
    476   1.91      matt 		       CTLTYPE_STRUCT, "id_pfr", NULL,
    477   1.91      matt 		       NULL, 0,
    478   1.91      matt 		       cpu_processor_features,
    479   1.91      matt 		       sizeof(cpu_processor_features),
    480   1.91      matt 		       CTL_MACHDEP, CTL_CREATE, CTL_EOL);
    481   1.91      matt 	sysctl_createv(clog, 0, NULL, NULL,
    482   1.91      matt 		       CTLFLAG_PERMANENT|CTLFLAG_READONLY,
    483   1.91      matt 		       CTLTYPE_STRUCT, "id_mvfr", NULL,
    484   1.91      matt 		       NULL, 0,
    485   1.91      matt 		       cpu_media_and_vfp_features,
    486   1.91      matt 		       sizeof(cpu_media_and_vfp_features),
    487   1.91      matt 		       CTL_MACHDEP, CTL_CREATE, CTL_EOL);
    488   1.91      matt 	sysctl_createv(clog, 0, NULL, NULL,
    489   1.91      matt 		       CTLFLAG_PERMANENT|CTLFLAG_READONLY,
    490   1.91      matt 		       CTLTYPE_INT, "simd_present", NULL,
    491   1.91      matt 		       NULL, 0, &cpu_simd_present, 0,
    492   1.91      matt 		       CTL_MACHDEP, CTL_CREATE, CTL_EOL);
    493   1.91      matt 	sysctl_createv(clog, 0, NULL, NULL,
    494   1.91      matt 		       CTLFLAG_PERMANENT|CTLFLAG_READONLY,
    495   1.91      matt 		       CTLTYPE_INT, "simdex_present", NULL,
    496   1.91      matt 		       NULL, 0, &cpu_simdex_present, 0,
    497   1.91      matt 		       CTL_MACHDEP, CTL_CREATE, CTL_EOL);
    498   1.99      matt 	sysctl_createv(clog, 0, NULL, NULL,
    499  1.101      matt 		       CTLFLAG_PERMANENT|CTLFLAG_READONLY,
    500  1.101      matt 		       CTLTYPE_INT, "synchprim_present", NULL,
    501  1.101      matt 		       NULL, 0, &cpu_synchprim_present, 0,
    502  1.101      matt 		       CTL_MACHDEP, CTL_CREATE, CTL_EOL);
    503  1.101      matt 	sysctl_createv(clog, 0, NULL, NULL,
    504   1.99      matt 		       CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
    505   1.99      matt 		       CTLTYPE_INT, "printfataltraps", NULL,
    506   1.99      matt 		       NULL, 0, &cpu_printfataltraps, 0,
    507   1.99      matt 		       CTL_MACHDEP, CTL_CREATE, CTL_EOL);
    508  1.108    martin 	cpu_unaligned_sigbus = !CPU_IS_ARMV6_P() && !CPU_IS_ARMV7_P();
    509  1.108    martin 	sysctl_createv(clog, 0, NULL, NULL,
    510  1.108    martin 		       CTLFLAG_PERMANENT|CTLFLAG_READONLY,
    511  1.108    martin 		       CTLTYPE_INT, "unaligned_sigbus",
    512  1.108    martin 		       SYSCTL_DESCR("Do SIGBUS for fixed unaligned accesses"),
    513  1.108    martin 		       NULL, 0, &cpu_unaligned_sigbus, 0,
    514  1.108    martin 		       CTL_MACHDEP, CTL_CREATE, CTL_EOL);
    515   1.99      matt 
    516   1.98      matt 
    517   1.98      matt 	/*
    518   1.98      matt 	 * We need override the usual CTL_HW HW_MACHINE_ARCH so we
    519   1.98      matt 	 * return the right machine_arch based on the running executable.
    520   1.98      matt 	 */
    521   1.98      matt 	sysctl_createv(clog, 0, NULL, NULL,
    522   1.98      matt 		       CTLFLAG_PERMANENT|CTLFLAG_READONLY,
    523   1.98      matt 		       CTLTYPE_STRING, "machine_arch",
    524   1.98      matt 		       SYSCTL_DESCR("Machine CPU class"),
    525   1.98      matt 		       sysctl_hw_machine_arch, 0, NULL, 0,
    526   1.98      matt 		       CTL_HW, HW_MACHINE_ARCH, CTL_EOL);
    527    1.1     chris }
    528    1.1     chris 
    529    1.1     chris void
    530   1.64       dsl parse_mi_bootargs(char *args)
    531    1.1     chris {
    532    1.1     chris 	int integer;
    533    1.1     chris 
    534    1.1     chris 	if (get_bootconf_option(args, "single", BOOTOPT_TYPE_BOOLEAN, &integer)
    535    1.1     chris 	    || get_bootconf_option(args, "-s", BOOTOPT_TYPE_BOOLEAN, &integer))
    536    1.1     chris 		if (integer)
    537    1.1     chris 			boothowto |= RB_SINGLE;
    538    1.1     chris 	if (get_bootconf_option(args, "kdb", BOOTOPT_TYPE_BOOLEAN, &integer)
    539   1.89     skrll 	    || get_bootconf_option(args, "-k", BOOTOPT_TYPE_BOOLEAN, &integer)
    540   1.89     skrll 	    || get_bootconf_option(args, "-d", BOOTOPT_TYPE_BOOLEAN, &integer))
    541    1.1     chris 		if (integer)
    542    1.1     chris 			boothowto |= RB_KDB;
    543    1.1     chris 	if (get_bootconf_option(args, "ask", BOOTOPT_TYPE_BOOLEAN, &integer)
    544    1.1     chris 	    || get_bootconf_option(args, "-a", BOOTOPT_TYPE_BOOLEAN, &integer))
    545    1.1     chris 		if (integer)
    546    1.1     chris 			boothowto |= RB_ASKNAME;
    547    1.1     chris 
    548    1.1     chris #ifdef PMAP_DEBUG
    549    1.1     chris 	if (get_bootconf_option(args, "pmapdebug", BOOTOPT_TYPE_INT, &integer)) {
    550    1.1     chris 		pmap_debug_level = integer;
    551    1.1     chris 		pmap_debug(pmap_debug_level);
    552    1.1     chris 	}
    553    1.1     chris #endif	/* PMAP_DEBUG */
    554    1.1     chris 
    555    1.1     chris /*	if (get_bootconf_option(args, "nbuf", BOOTOPT_TYPE_INT, &integer))
    556    1.1     chris 		bufpages = integer;*/
    557    1.1     chris 
    558   1.74   hannken #if defined(MEMORY_DISK_HOOKS) && !defined(MEMORY_DISK_ROOT_SIZE)
    559    1.1     chris 	if (get_bootconf_option(args, "memorydisc", BOOTOPT_TYPE_INT, &integer)
    560    1.1     chris 	    || get_bootconf_option(args, "memorydisk", BOOTOPT_TYPE_INT, &integer)) {
    561   1.24  jdolecek 		md_root_size = integer;
    562   1.24  jdolecek 		md_root_size *= 1024;
    563   1.24  jdolecek 		if (md_root_size < 32*1024)
    564   1.24  jdolecek 			md_root_size = 32*1024;
    565   1.24  jdolecek 		if (md_root_size > 2048*1024)
    566   1.24  jdolecek 			md_root_size = 2048*1024;
    567    1.1     chris 	}
    568   1.74   hannken #endif	/* MEMORY_DISK_HOOKS && !MEMORY_DISK_ROOT_SIZE */
    569    1.1     chris 
    570    1.1     chris 	if (get_bootconf_option(args, "quiet", BOOTOPT_TYPE_BOOLEAN, &integer)
    571    1.1     chris 	    || get_bootconf_option(args, "-q", BOOTOPT_TYPE_BOOLEAN, &integer))
    572    1.1     chris 		if (integer)
    573    1.1     chris 			boothowto |= AB_QUIET;
    574    1.1     chris 	if (get_bootconf_option(args, "verbose", BOOTOPT_TYPE_BOOLEAN, &integer)
    575    1.1     chris 	    || get_bootconf_option(args, "-v", BOOTOPT_TYPE_BOOLEAN, &integer))
    576    1.1     chris 		if (integer)
    577    1.1     chris 			boothowto |= AB_VERBOSE;
    578  1.109    bouyer 	if (get_bootconf_option(args, "debug", BOOTOPT_TYPE_BOOLEAN, &integer)
    579  1.109    bouyer 	    || get_bootconf_option(args, "-x", BOOTOPT_TYPE_BOOLEAN, &integer))
    580  1.109    bouyer 		if (integer)
    581  1.109    bouyer 			boothowto |= AB_DEBUG;
    582    1.1     chris }
    583   1.49      yamt 
    584   1.56      matt #ifdef __HAVE_FAST_SOFTINTS
    585   1.56      matt #if IPL_SOFTSERIAL != IPL_SOFTNET + 1
    586   1.56      matt #error IPLs are screwed up
    587   1.58      matt #elif IPL_SOFTNET != IPL_SOFTBIO + 1
    588   1.58      matt #error IPLs are screwed up
    589   1.58      matt #elif IPL_SOFTBIO != IPL_SOFTCLOCK + 1
    590   1.56      matt #error IPLs are screwed up
    591   1.58      matt #elif !(IPL_SOFTCLOCK > IPL_NONE)
    592   1.56      matt #error IPLs are screwed up
    593   1.58      matt #elif (IPL_NONE != 0)
    594   1.56      matt #error IPLs are screwed up
    595   1.56      matt #endif
    596   1.58      matt 
    597   1.83      matt #ifndef __HAVE_PIC_FAST_SOFTINTS
    598   1.56      matt #define	SOFTINT2IPLMAP \
    599   1.58      matt 	(((IPL_SOFTSERIAL - IPL_SOFTCLOCK) << (SOFTINT_SERIAL * 4)) | \
    600   1.58      matt 	 ((IPL_SOFTNET    - IPL_SOFTCLOCK) << (SOFTINT_NET    * 4)) | \
    601   1.58      matt 	 ((IPL_SOFTBIO    - IPL_SOFTCLOCK) << (SOFTINT_BIO    * 4)) | \
    602   1.58      matt 	 ((IPL_SOFTCLOCK  - IPL_SOFTCLOCK) << (SOFTINT_CLOCK  * 4)))
    603   1.56      matt #define	SOFTINT2IPL(l)	((SOFTINT2IPLMAP >> ((l) * 4)) & 0x0f)
    604   1.56      matt 
    605   1.56      matt /*
    606   1.56      matt  * This returns a mask of softint IPLs that be dispatch at <ipl>
    607   1.59      matt  * SOFTIPLMASK(IPL_NONE)	= 0x0000000f
    608   1.59      matt  * SOFTIPLMASK(IPL_SOFTCLOCK)	= 0x0000000e
    609   1.59      matt  * SOFTIPLMASK(IPL_SOFTBIO)	= 0x0000000c
    610   1.59      matt  * SOFTIPLMASK(IPL_SOFTNET)	= 0x00000008
    611   1.59      matt  * SOFTIPLMASK(IPL_SOFTSERIAL)	= 0x00000000
    612   1.56      matt  */
    613   1.78     skrll #define	SOFTIPLMASK(ipl) ((0x0f << (ipl)) & 0x0f)
    614   1.56      matt 
    615   1.56      matt void softint_switch(lwp_t *, int);
    616   1.56      matt 
    617   1.56      matt void
    618   1.56      matt softint_trigger(uintptr_t mask)
    619   1.56      matt {
    620   1.56      matt 	curcpu()->ci_softints |= mask;
    621   1.56      matt }
    622   1.56      matt 
    623   1.56      matt void
    624   1.56      matt softint_init_md(lwp_t *l, u_int level, uintptr_t *machdep)
    625   1.56      matt {
    626   1.83      matt 	lwp_t ** lp = &l->l_cpu->ci_softlwps[level];
    627   1.56      matt 	KASSERT(*lp == NULL || *lp == l);
    628   1.56      matt 	*lp = l;
    629   1.56      matt 	*machdep = 1 << SOFTINT2IPL(level);
    630   1.59      matt 	KASSERT(level != SOFTINT_CLOCK || *machdep == (1 << (IPL_SOFTCLOCK - IPL_SOFTCLOCK)));
    631   1.59      matt 	KASSERT(level != SOFTINT_BIO || *machdep == (1 << (IPL_SOFTBIO - IPL_SOFTCLOCK)));
    632   1.59      matt 	KASSERT(level != SOFTINT_NET || *machdep == (1 << (IPL_SOFTNET - IPL_SOFTCLOCK)));
    633   1.59      matt 	KASSERT(level != SOFTINT_SERIAL || *machdep == (1 << (IPL_SOFTSERIAL - IPL_SOFTCLOCK)));
    634   1.56      matt }
    635   1.53       mrg 
    636   1.56      matt void
    637   1.56      matt dosoftints(void)
    638   1.56      matt {
    639   1.56      matt 	struct cpu_info * const ci = curcpu();
    640   1.56      matt 	const int opl = ci->ci_cpl;
    641   1.56      matt 	const uint32_t softiplmask = SOFTIPLMASK(opl);
    642   1.56      matt 
    643   1.77     skrll 	splhigh();
    644   1.56      matt 	for (;;) {
    645   1.56      matt 		u_int softints = ci->ci_softints & softiplmask;
    646   1.59      matt 		KASSERT((softints != 0) == ((ci->ci_softints >> opl) != 0));
    647   1.77     skrll 		KASSERT(opl == IPL_NONE || (softints & (1 << (opl - IPL_SOFTCLOCK))) == 0);
    648   1.77     skrll 		if (softints == 0) {
    649   1.77     skrll 			splx(opl);
    650   1.56      matt 			return;
    651   1.77     skrll 		}
    652   1.56      matt #define	DOSOFTINT(n) \
    653   1.77     skrll 		if (ci->ci_softints & (1 << (IPL_SOFT ## n - IPL_SOFTCLOCK))) { \
    654   1.58      matt 			ci->ci_softints &= \
    655   1.58      matt 			    ~(1 << (IPL_SOFT ## n - IPL_SOFTCLOCK)); \
    656   1.56      matt 			softint_switch(ci->ci_softlwps[SOFTINT_ ## n], \
    657   1.56      matt 			    IPL_SOFT ## n); \
    658   1.56      matt 			continue; \
    659   1.56      matt 		}
    660   1.56      matt 		DOSOFTINT(SERIAL);
    661   1.56      matt 		DOSOFTINT(NET);
    662   1.56      matt 		DOSOFTINT(BIO);
    663   1.56      matt 		DOSOFTINT(CLOCK);
    664   1.56      matt 		panic("dosoftints wtf (softints=%u?, ipl=%d)", softints, opl);
    665   1.56      matt 	}
    666   1.53       mrg }
    667   1.83      matt #endif /* !__HAVE_PIC_FAST_SOFTINTS */
    668   1.56      matt #endif /* __HAVE_FAST_SOFTINTS */
    669   1.72      jmmv 
    670   1.72      jmmv #ifdef MODULAR
    671   1.72      jmmv /*
    672   1.72      jmmv  * Push any modules loaded by the boot loader.
    673   1.72      jmmv  */
    674   1.72      jmmv void
    675   1.72      jmmv module_init_md(void)
    676   1.72      jmmv {
    677   1.72      jmmv }
    678   1.72      jmmv #endif /* MODULAR */
    679   1.75     rmind 
    680   1.75     rmind int
    681   1.75     rmind mm_md_physacc(paddr_t pa, vm_prot_t prot)
    682   1.75     rmind {
    683  1.110       ryo 	if (pa >= physical_start && pa < physical_end)
    684  1.110       ryo 		return 0;
    685   1.75     rmind 
    686  1.110       ryo 	return kauth_authorize_machdep(kauth_cred_get(),
    687  1.110       ryo 	    KAUTH_MACHDEP_UNMANAGEDMEM, NULL, NULL, NULL, NULL);
    688   1.75     rmind }
    689   1.83      matt 
    690   1.83      matt #ifdef __HAVE_CPU_UAREA_ALLOC_IDLELWP
    691   1.83      matt vaddr_t
    692   1.83      matt cpu_uarea_alloc_idlelwp(struct cpu_info *ci)
    693   1.83      matt {
    694   1.83      matt 	const vaddr_t va = idlestack.pv_va + ci->ci_cpuid * USPACE;
    695   1.83      matt 	// printf("%s: %s: va=%lx\n", __func__, ci->ci_data.cpu_name, va);
    696   1.83      matt 	return va;
    697   1.83      matt }
    698   1.83      matt #endif
    699   1.83      matt 
    700   1.83      matt #ifdef MULTIPROCESSOR
    701   1.83      matt void
    702   1.83      matt cpu_boot_secondary_processors(void)
    703   1.83      matt {
    704  1.102      matt #ifdef VERBOSE_INIT_ARM
    705  1.102      matt 	printf("%s: writing mbox with %#x\n", __func__, arm_cpu_hatched);
    706  1.101      matt #endif
    707  1.102      matt 	arm_cpu_mbox = arm_cpu_hatched;
    708   1.83      matt 	membar_producer();
    709   1.83      matt #ifdef _ARM_ARCH_7
    710   1.83      matt 	__asm __volatile("sev; sev; sev");
    711   1.83      matt #endif
    712  1.102      matt 	while (arm_cpu_mbox) {
    713  1.102      matt 		__asm("wfe");
    714  1.102      matt 	}
    715   1.83      matt }
    716   1.83      matt 
    717   1.83      matt void
    718   1.83      matt xc_send_ipi(struct cpu_info *ci)
    719   1.83      matt {
    720   1.83      matt 	KASSERT(kpreempt_disabled());
    721   1.83      matt 	KASSERT(curcpu() != ci);
    722   1.83      matt 
    723  1.102      matt 	intr_ipi_send(ci != NULL ? ci->ci_kcpuset : NULL, IPI_XCALL);
    724   1.83      matt }
    725  1.105     rmind 
    726  1.105     rmind void
    727  1.105     rmind cpu_ipi(struct cpu_info *ci)
    728  1.105     rmind {
    729  1.105     rmind 	KASSERT(kpreempt_disabled());
    730  1.105     rmind 	KASSERT(curcpu() != ci);
    731  1.105     rmind 
    732  1.105     rmind 	intr_ipi_send(ci != NULL ? ci->ci_kcpuset : NULL, IPI_GENERIC);
    733  1.105     rmind }
    734  1.105     rmind 
    735   1.83      matt #endif /* MULTIPROCESSOR */
    736   1.87      matt 
    737   1.87      matt #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS
    738   1.87      matt bool
    739   1.87      matt mm_md_direct_mapped_phys(paddr_t pa, vaddr_t *vap)
    740   1.87      matt {
    741  1.104      matt 	bool rv;
    742  1.104      matt 	vaddr_t va = pmap_direct_mapped_phys(pa, &rv, 0);
    743  1.104      matt 	if (rv) {
    744  1.104      matt 		*vap = va;
    745   1.87      matt 	}
    746  1.104      matt 	return rv;
    747   1.87      matt }
    748   1.87      matt #endif
    749  1.111     skrll 
    750  1.111     skrll bool
    751  1.111     skrll mm_md_page_color(paddr_t pa, int *colorp)
    752  1.111     skrll {
    753  1.111     skrll 	*colorp = atop(pa & arm_cache_prefer_mask);
    754  1.111     skrll 
    755  1.111     skrll 	return arm_cache_prefer_mask ? false : true;
    756  1.111     skrll }
    757