Home | History | Annotate | Line # | Download | only in arm
      1 /*	$NetBSD: cpu_subr.c,v 1.6 2025/09/06 02:53:22 riastradh Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2020 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Nick Hudson
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #include "opt_cputypes.h"
     33 #include "opt_multiprocessor.h"
     34 
     35 #include <sys/cdefs.h>
     36 __KERNEL_RCSID(0, "$NetBSD: cpu_subr.c,v 1.6 2025/09/06 02:53:22 riastradh Exp $");
     37 
     38 #include <sys/param.h>
     39 #include <sys/atomic.h>
     40 #include <sys/cpu.h>
     41 #include <sys/paravirt_membar.h>
     42 #include <sys/reboot.h>
     43 
     44 #include <arm/cpufunc.h>
     45 
     46 #ifdef VERBOSE_INIT_ARM
     47 #define VPRINTF(...)	printf(__VA_ARGS__)
     48 #else
     49 #define VPRINTF(...)	__nothing
     50 #endif
     51 
     52 #ifdef MULTIPROCESSOR
     53 #define NCPUINFO	MAXCPUS
     54 #else
     55 #define NCPUINFO	1
     56 #endif /* MULTIPROCESSOR */
     57 
     58 mpidr_t cpu_mpidr[NCPUINFO] = {
     59 	[0 ... NCPUINFO - 1] = ~0,
     60 };
     61 
     62 struct cpu_info *cpu_info[NCPUINFO] __read_mostly = {
     63 	[0] = &cpu_info_store[0]
     64 };
     65 
     66 #ifdef MULTIPROCESSOR
     67 
     68 #define	CPUINDEX_DIVISOR	(sizeof(u_long) * NBBY)
     69 
     70 volatile u_long arm_cpu_hatched[howmany(MAXCPUS, CPUINDEX_DIVISOR)] __cacheline_aligned = { 0 };
     71 volatile u_long arm_cpu_mbox[howmany(MAXCPUS, CPUINDEX_DIVISOR)] __cacheline_aligned = { 0 };
     72 u_int arm_cpu_max = 1;
     73 
     74 void
     75 cpu_boot_secondary_processors(void)
     76 {
     77 	u_int cpuno;
     78 
     79 	if ((boothowto & RB_MD1) != 0)
     80 		return;
     81 
     82 	VPRINTF("%s: starting secondary processors\n", __func__);
     83 
     84 	/* send mbox to have secondary processors do cpu_hatch() */
     85 	dmb(ish);	/* store-release matches locore.S/armv6_start.S */
     86 	for (size_t n = 0; n < __arraycount(arm_cpu_mbox); n++)
     87 		atomic_or_ulong(&arm_cpu_mbox[n], arm_cpu_hatched[n]);
     88 
     89 	dsb(ishst);
     90 	sev();
     91 
     92 	/* wait all cpus have done cpu_hatch() */
     93 	for (cpuno = 1; cpuno < ncpu; cpuno++) {
     94 		if (!cpu_hatched_p(cpuno))
     95 			continue;
     96 
     97 		const size_t off = cpuno / CPUINDEX_DIVISOR;
     98 		const u_long bit = __BIT(cpuno % CPUINDEX_DIVISOR);
     99 
    100 		/* load-acquire matches cpu_clr_mbox */
    101 		while (atomic_load_acquire(&arm_cpu_mbox[off]) & bit) {
    102 			__asm __volatile ("wfe");
    103 		}
    104 		/* Add processor to kcpuset */
    105 		kcpuset_set(kcpuset_attached, cpuno);
    106 	}
    107 
    108 	VPRINTF("%s: secondary processors hatched\n", __func__);
    109 }
    110 
    111 bool
    112 cpu_hatched_p(u_int cpuindex)
    113 {
    114 	const u_int off = cpuindex / CPUINDEX_DIVISOR;
    115 	const u_int bit = cpuindex % CPUINDEX_DIVISOR;
    116 
    117 	/* load-acquire matches cpu_set_hatched */
    118 	return (atomic_load_acquire(&arm_cpu_hatched[off]) & __BIT(bit)) != 0;
    119 }
    120 
    121 void
    122 cpu_set_hatched(int cpuindex)
    123 {
    124 
    125 	const size_t off = cpuindex / CPUINDEX_DIVISOR;
    126 	const u_long bit = __BIT(cpuindex % CPUINDEX_DIVISOR);
    127 
    128 	dmb(ish);		/* store-release matches cpu_hatched_p */
    129 	atomic_or_ulong(&arm_cpu_hatched[off], bit);
    130 	dsb(ishst);
    131 	sev();
    132 }
    133 
    134 void
    135 cpu_clr_mbox(int cpuindex)
    136 {
    137 
    138 	const size_t off = cpuindex / CPUINDEX_DIVISOR;
    139 	const u_long bit = __BIT(cpuindex % CPUINDEX_DIVISOR);
    140 
    141 	/* Notify cpu_boot_secondary_processors that we're done */
    142 	dmb(ish);		/* store-release */
    143 	atomic_and_ulong(&arm_cpu_mbox[off], ~bit);
    144 	dsb(ishst);
    145 	sev();
    146 }
    147 
    148 #endif
    149 
    150 #if defined _ARM_ARCH_6 || defined _ARM_ARCH_7 /* see below regarding armv<6 */
    151 void
    152 paravirt_membar_sync(void)
    153 {
    154 
    155 	/*
    156 	 * Store-before-load ordering with respect to matching logic
    157 	 * on the hypervisor side.
    158 	 *
    159 	 * This is the same as membar_sync, but guaranteed never to be
    160 	 * conditionalized or hotpatched away even on uniprocessor
    161 	 * builds and boots -- because under virtualization, we still
    162 	 * have to coordinate with a `device' backed by a hypervisor
    163 	 * that is potentially on another physical CPU even if we
    164 	 * observe only one virtual CPU as the guest.
    165 	 *
    166 	 * Prior to armv6, there was no data memory barrier
    167 	 * instruction.  Such CPUs presumably don't exist in
    168 	 * multiprocessor configurations.  But what if we're running a
    169 	 * _kernel_ built for a uniprocessor armv5 CPU, as a virtual
    170 	 * machine guest of a _host_ with a newer multiprocessor CPU?
    171 	 * How do we enforce store-before-load ordering for a
    172 	 * paravirtualized device driver, coordinating with host
    173 	 * software `device' potentially on another CPU?  You'll have
    174 	 * to answer that before you can use virtio drivers!
    175 	 */
    176 	dmb(ish);
    177 }
    178 #endif	/* defined _ARM_ARCH_6 || defined _ARM_ARCH_7 */
    179