Home | History | Annotate | Line # | Download | only in aarch64
      1 /* $NetBSD: cpu_machdep.c,v 1.16 2024/12/30 19:13:48 jmcneill Exp $ */
      2 
      3 /*-
      4  * Copyright (c) 2014, 2019 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Matt Thomas of 3am Software Foundry.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #include <sys/cdefs.h>
     33 
     34 __KERNEL_RCSID(1, "$NetBSD: cpu_machdep.c,v 1.16 2024/12/30 19:13:48 jmcneill Exp $");
     35 
     36 #include "opt_multiprocessor.h"
     37 
     38 #define _INTR_PRIVATE
     39 
     40 #include <sys/param.h>
     41 #include <sys/types.h>
     42 #include <sys/atomic.h>
     43 #include <sys/cpu.h>
     44 #include <sys/intr.h>
     45 #include <sys/kmem.h>
     46 #include <sys/xcall.h>
     47 
     48 #include <aarch64/armreg.h>
     49 #include <aarch64/db_machdep.h>
     50 #include <aarch64/frame.h>
     51 #include <aarch64/machdep.h>
     52 #include <aarch64/pcb.h>
     53 #include <aarch64/userret.h>
     54 #include <aarch64/cpufunc.h>
     55 
     56 void (*arm_cpu_idle)(void) = aarch64_cpu_idle_wfi;
     57 
     58 #ifdef __HAVE_FAST_SOFTINTS
     59 #if IPL_VM != IPL_SOFTSERIAL + 1
     60 #error IPLs are screwed up
     61 #elif IPL_SOFTSERIAL != IPL_SOFTNET + 1
     62 #error IPLs are screwed up
     63 #elif IPL_SOFTNET != IPL_SOFTBIO + 1
     64 #error IPLs are screwed up
     65 #elif IPL_SOFTBIO != IPL_SOFTCLOCK + 1
     66 #error IPLs are screwed up
     67 #elif !(IPL_SOFTCLOCK > IPL_NONE)
     68 #error IPLs are screwed up
     69 #elif (IPL_NONE != 0)
     70 #error IPLs are screwed up
     71 #endif
     72 
     73 #ifndef __HAVE_PIC_FAST_SOFTINTS
     74 #define SOFTINT2IPLMAP \
     75 	(((IPL_SOFTSERIAL - IPL_SOFTCLOCK) << (SOFTINT_SERIAL * 4)) | \
     76 	 ((IPL_SOFTNET    - IPL_SOFTCLOCK) << (SOFTINT_NET    * 4)) | \
     77 	 ((IPL_SOFTBIO    - IPL_SOFTCLOCK) << (SOFTINT_BIO    * 4)) | \
     78 	 ((IPL_SOFTCLOCK  - IPL_SOFTCLOCK) << (SOFTINT_CLOCK  * 4)))
     79 #define SOFTINT2IPL(l)	((SOFTINT2IPLMAP >> ((l) * 4)) & 0x0f)
     80 
     81 /*
     82  * This returns a mask of softint IPLs that be dispatch at <ipl>
     83  */
     84 #define SOFTIPLMASK(ipl) ((0x0f << (ipl)) & 0x0f)
     85 CTASSERT(SOFTIPLMASK(IPL_NONE)		== 0x0000000f);
     86 CTASSERT(SOFTIPLMASK(IPL_SOFTCLOCK)	== 0x0000000e);
     87 CTASSERT(SOFTIPLMASK(IPL_SOFTBIO)	== 0x0000000c);
     88 CTASSERT(SOFTIPLMASK(IPL_SOFTNET)	== 0x00000008);
     89 CTASSERT(SOFTIPLMASK(IPL_SOFTSERIAL)	== 0x00000000);
     90 
     91 void
     92 softint_trigger(uintptr_t mask)
     93 {
     94 	curcpu()->ci_softints |= mask;
     95 }
     96 
     97 void
     98 softint_init_md(lwp_t *l, u_int level, uintptr_t *machdep)
     99 {
    100 	lwp_t ** lp = &l->l_cpu->ci_softlwps[level];
    101 	KASSERT(*lp == NULL || *lp == l);
    102 	*lp = l;
    103 	*machdep = 1 << SOFTINT2IPL(level);
    104 	KASSERT(level != SOFTINT_CLOCK ||
    105 	    *machdep == (1 << (IPL_SOFTCLOCK  - IPL_SOFTCLOCK)));
    106 	KASSERT(level != SOFTINT_BIO ||
    107 	    *machdep == (1 << (IPL_SOFTBIO    - IPL_SOFTCLOCK)));
    108 	KASSERT(level != SOFTINT_NET ||
    109 	    *machdep == (1 << (IPL_SOFTNET    - IPL_SOFTCLOCK)));
    110 	KASSERT(level != SOFTINT_SERIAL ||
    111 	    *machdep == (1 << (IPL_SOFTSERIAL - IPL_SOFTCLOCK)));
    112 }
    113 
    114 void
    115 dosoftints(void)
    116 {
    117 	struct cpu_info * const ci = curcpu();
    118 	const int opl = ci->ci_cpl;
    119 	const uint32_t softiplmask = SOFTIPLMASK(opl);
    120 	int s;
    121 
    122 	KDASSERT(kpreempt_disabled());
    123 
    124 	s = splhigh();
    125 	KASSERT(s == opl);
    126 	for (;;) {
    127 		u_int softints = ci->ci_softints & softiplmask;
    128 		KASSERT((softints != 0) == ((ci->ci_softints >> opl) != 0));
    129 		KASSERT(opl == IPL_NONE ||
    130 		    (softints & (1 << (opl - IPL_SOFTCLOCK))) == 0);
    131 		if (softints == 0) {
    132 #ifdef __HAVE_PREEMPTION
    133 			if (ci->ci_want_resched & RESCHED_KPREEMPT) {
    134 				atomic_and_uint(&ci->ci_want_resched,
    135 				    ~RESCHED_KPREEMPT);
    136 				splsched();
    137 				kpreempt(-2);
    138 			}
    139 #endif
    140 			break;
    141 		}
    142 #define DOSOFTINT(n) \
    143 		if (ci->ci_softints & (1 << (IPL_SOFT ## n - IPL_SOFTCLOCK))) {\
    144 			ci->ci_softints &= \
    145 			    ~(1 << (IPL_SOFT ## n - IPL_SOFTCLOCK)); \
    146 			cpu_switchto_softint(ci->ci_softlwps[SOFTINT_ ## n], \
    147 			    IPL_SOFT ## n); \
    148 			continue; \
    149 		}
    150 		DOSOFTINT(SERIAL);
    151 		DOSOFTINT(NET);
    152 		DOSOFTINT(BIO);
    153 		DOSOFTINT(CLOCK);
    154 		panic("dosoftints wtf (softints=%u?, ipl=%d)", softints, opl);
    155 	}
    156 	splx(s);
    157 }
    158 #endif /* !__HAVE_PIC_FAST_SOFTINTS */
    159 #endif /* __HAVE_FAST_SOFTINTS */
    160 
    161 int
    162 cpu_mcontext_validate(struct lwp *l, const mcontext_t *mcp)
    163 {
    164 	/*
    165 	 * We intentionally don't verify that _REG_SP is aligned to
    166 	 * 16-bytes boundaries because it can be legally misaligned as long
    167 	 * as it's not used for accessing memory.
    168 	 */
    169 	if ((mcp->__gregs[_REG_SPSR] & ~SPSR_NZCV)
    170 	    || (mcp->__gregs[_REG_PC] & 3))
    171 		return EINVAL;
    172 
    173 	return 0;
    174 }
    175 
    176 /*
    177  * Since the ucontext_t will be on the stack most of the time, make sure
    178  * it will keep the stack aligned.
    179  */
    180 CTASSERT(sizeof(ucontext_t) % 16 == 0);
    181 
    182 CTASSERT(sizeof(struct reg) == sizeof(__gregset_t));
    183 CTASSERT(offsetof(struct reg, r_pc) == _REG_PC * sizeof(__greg_t));
    184 CTASSERT(offsetof(struct reg, r_sp) == _REG_SP * sizeof(__greg_t));
    185 CTASSERT(offsetof(struct reg, r_spsr) == _REG_SPSR * sizeof(__greg_t));
    186 CTASSERT(offsetof(struct reg, r_tpidr) == _REG_TPIDR * sizeof(__greg_t));
    187 
    188 CTASSERT(sizeof(struct fpreg) == sizeof(__fregset_t));
    189 CTASSERT(offsetof(struct fpreg, fpcr) == offsetof(__fregset_t, __fpcr));
    190 CTASSERT(offsetof(struct fpreg, fpsr) == offsetof(__fregset_t, __fpsr));
    191 
    192 void
    193 cpu_getmcontext(struct lwp *l, mcontext_t *mcp, unsigned int *flagsp)
    194 {
    195 	const struct trapframe * const tf = lwp_trapframe(l);
    196 
    197 	memcpy(mcp->__gregs, &tf->tf_regs, sizeof(mcp->__gregs));
    198 	mcp->__gregs[_REG_TPIDR] = (uintptr_t)l->l_private;
    199 	mcp->__gregs[_REG_SPSR] &= ~SPSR_A64_BTYPE;
    200 
    201 	if (fpu_used_p(l)) {
    202 		const struct pcb * const pcb = lwp_getpcb(l);
    203 		fpu_save(l);
    204 		*flagsp |= _UC_FPU;
    205 		mcp->__fregs = *(const __fregset_t *) &pcb->pcb_fpregs;
    206 	}
    207 	*flagsp |= _UC_CPU|_UC_TLSBASE;
    208 }
    209 
    210 int
    211 cpu_setmcontext(struct lwp *l, const mcontext_t *mcp, unsigned int flags)
    212 {
    213 	struct proc * const p = l->l_proc;
    214 
    215 	if (flags & _UC_CPU) {
    216 		struct trapframe * const tf = lwp_trapframe(l);
    217 		int error = cpu_mcontext_validate(l, mcp);
    218 		if (error)
    219 			return error;
    220 
    221 		memcpy(&tf->tf_regs, mcp->__gregs, sizeof(tf->tf_regs));
    222 	}
    223 
    224 	if (flags & _UC_TLSBASE)
    225 		l->l_private = (void *)mcp->__gregs[_REG_TPIDR];
    226 
    227 	if (flags & _UC_FPU) {
    228 		struct pcb * const pcb = lwp_getpcb(l);
    229 		fpu_discard(l, true);
    230 		pcb->pcb_fpregs = *(const struct fpreg *)&mcp->__fregs;
    231 	}
    232 
    233 	mutex_enter(p->p_lock);
    234 	if (flags & _UC_SETSTACK)
    235 		l->l_sigstk.ss_flags |= SS_ONSTACK;
    236 	if (flags & _UC_CLRSTACK)
    237 		l->l_sigstk.ss_flags &= ~SS_ONSTACK;
    238 	mutex_exit(p->p_lock);
    239 
    240 	return 0;
    241 }
    242 
    243 void
    244 startlwp(void *arg)
    245 {
    246 	ucontext_t * const uc = arg;
    247 	lwp_t * const l = curlwp;
    248 	int error __diagused;
    249 
    250 	error = cpu_setmcontext(l, &uc->uc_mcontext, uc->uc_flags);
    251 	KASSERT(error == 0);
    252 
    253 	kmem_free(uc, sizeof(*uc));
    254 	userret(l);
    255 }
    256 
    257 void
    258 cpu_need_resched(struct cpu_info *ci, struct lwp *l, int flags)
    259 {
    260 	KASSERT(kpreempt_disabled());
    261 
    262 	if ((flags & RESCHED_KPREEMPT) != 0) {
    263 #ifdef __HAVE_PREEMPTION
    264 		if ((flags & RESCHED_REMOTE) != 0) {
    265 			intr_ipi_send(ci->ci_kcpuset, IPI_KPREEMPT);
    266 		}
    267 #endif
    268 		return;
    269 	}
    270 	if ((flags & RESCHED_REMOTE) != 0) {
    271 #ifdef MULTIPROCESSOR
    272 		intr_ipi_send(ci->ci_kcpuset, IPI_AST);
    273 #endif
    274 	} else {
    275 		l->l_md.md_astpending = 1;
    276 	}
    277 }
    278 
    279 void
    280 cpu_need_proftick(struct lwp *l)
    281 {
    282 	KASSERT(kpreempt_disabled());
    283 	KASSERT(l->l_cpu == curcpu());
    284 
    285 	l->l_pflag |= LP_OWEUPC;
    286 	l->l_md.md_astpending = 1;
    287 }
    288 
    289 void
    290 cpu_signotify(struct lwp *l)
    291 {
    292 
    293 	KASSERT(kpreempt_disabled());
    294 
    295 	if (l->l_cpu != curcpu()) {
    296 #ifdef MULTIPROCESSOR
    297 		intr_ipi_send(l->l_cpu->ci_kcpuset, IPI_AST);
    298 #endif
    299 	} else {
    300 		l->l_md.md_astpending = 1;
    301 	}
    302 }
    303 
    304 #ifdef __HAVE_PREEMPTION
    305 bool
    306 cpu_kpreempt_enter(uintptr_t where, int s)
    307 {
    308 	KASSERT(kpreempt_disabled());
    309 
    310 #if 0
    311 	if (where == (intptr_t)-2) {
    312 		KASSERT(curcpu()->ci_mtx_count == 0);
    313 		/*
    314 		 * We must be called via kern_intr (which already checks for
    315 		 * IPL_NONE so of course we call be preempted).
    316 		 */
    317 		return true;
    318 	}
    319 	/*
    320 	 * We are called from KPREEMPT_ENABLE().  If we are at IPL_NONE,
    321 	 * of course we can be preempted.  If we aren't, ask for a
    322 	 * softint so that kern_intr can call kpreempt.
    323 	 */
    324 	if (s == IPL_NONE) {
    325 		KASSERT(curcpu()->ci_mtx_count == 0);
    326 		return true;
    327 	}
    328 	atomic_or_uint(curcpu()->ci_want_resched, RESCHED_KPREEMPT);
    329 #endif
    330 	return false;
    331 }
    332 
    333 void
    334 cpu_kpreempt_exit(uintptr_t where)
    335 {
    336 
    337 	/* do nothing */
    338 }
    339 
    340 /*
    341  * Return true if preemption is disabled for MD reasons.  Must be called
    342  * with preemption disabled, and thus is only for diagnostic checks.
    343  */
    344 bool
    345 cpu_kpreempt_disabled(void)
    346 {
    347 	/*
    348 	 * Any elevated IPL disables preemption.
    349 	 */
    350 	return curcpu()->ci_cpl > IPL_NONE;
    351 }
    352 #endif /* __HAVE_PREEMPTION */
    353 
    354 #ifdef MULTIPROCESSOR
    355 void
    356 xc_send_ipi(struct cpu_info *ci)
    357 {
    358 	KASSERT(kpreempt_disabled());
    359 	KASSERT(curcpu() != ci);
    360 
    361 	intr_ipi_send(ci != NULL ? ci->ci_kcpuset : NULL, IPI_XCALL);
    362 }
    363 
    364 void
    365 cpu_ipi(struct cpu_info *ci)
    366 {
    367 	KASSERT(kpreempt_disabled());
    368 	KASSERT(curcpu() != ci);
    369 
    370 	intr_ipi_send(ci != NULL ? ci->ci_kcpuset : NULL, IPI_GENERIC);
    371 }
    372 
    373 int
    374 pic_ipi_shootdown(void *arg)
    375 {
    376 	/* may be populated in pmap.c */
    377 	return 1;
    378 }
    379 #endif /* MULTIPROCESSOR */
    380 
    381 void
    382 cpu_idle(void)
    383 {
    384 	arm_cpu_idle();
    385 }
    386