Home | History | Annotate | Line # | Download | only in arm32
cpuswitch.S revision 1.107
      1 /*	$NetBSD: cpuswitch.S,v 1.107 2023/03/01 08:17:53 riastradh Exp $	*/
      2 
      3 /*
      4  * Copyright 2003 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Steve C. Woodford for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *      This product includes software developed for the NetBSD Project by
     20  *      Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 /*
     38  * Copyright (c) 1994-1998 Mark Brinicombe.
     39  * Copyright (c) 1994 Brini.
     40  * All rights reserved.
     41  *
     42  * This code is derived from software written for Brini by Mark Brinicombe
     43  *
     44  * Redistribution and use in source and binary forms, with or without
     45  * modification, are permitted provided that the following conditions
     46  * are met:
     47  * 1. Redistributions of source code must retain the above copyright
     48  *    notice, this list of conditions and the following disclaimer.
     49  * 2. Redistributions in binary form must reproduce the above copyright
     50  *    notice, this list of conditions and the following disclaimer in the
     51  *    documentation and/or other materials provided with the distribution.
     52  * 3. All advertising materials mentioning features or use of this software
     53  *    must display the following acknowledgement:
     54  *	This product includes software developed by Brini.
     55  * 4. The name of the company nor the name of the author may be used to
     56  *    endorse or promote products derived from this software without specific
     57  *    prior written permission.
     58  *
     59  * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
     60  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
     61  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     62  * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
     63  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
     64  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     65  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     66  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     67  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     68  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     69  * SUCH DAMAGE.
     70  *
     71  * RiscBSD kernel project
     72  *
     73  * cpuswitch.S
     74  *
     75  * cpu switching functions
     76  *
     77  * Created      : 15/10/94
     78  */
     79 
     80 #include "opt_armfpe.h"
     81 #include "opt_cpuoptions.h"
     82 #include "opt_kasan.h"
     83 #include "opt_lockdebug.h"
     84 #include "opt_multiprocessor.h"
     85 
     86 #include "assym.h"
     87 #include <arm/asm.h>
     88 #include <arm/locore.h>
     89 
     90 	RCSID("$NetBSD: cpuswitch.S,v 1.107 2023/03/01 08:17:53 riastradh Exp $")
     91 
     92 /* LINTSTUB: include <sys/param.h> */
     93 
     94 #ifdef FPU_VFP
     95 	.fpu vfpv2
     96 #endif
     97 
     98 #undef IRQdisable
     99 #undef IRQenable
    100 
    101 /*
    102  * New experimental definitions of IRQdisable and IRQenable
    103  * These keep FIQ's enabled since FIQ's are special.
    104  */
    105 
    106 #ifdef _ARM_ARCH_6
    107 #define	IRQdisable	cpsid	i
    108 #define	IRQenable	cpsie	i
    109 #else
    110 #define IRQdisable \
    111 	mrs	r14, cpsr ; \
    112 	orr	r14, r14, #(I32_bit) ; \
    113 	msr	cpsr_c, r14
    114 
    115 #define IRQenable \
    116 	mrs	r14, cpsr ; \
    117 	bic	r14, r14, #(I32_bit) ; \
    118 	msr	cpsr_c, r14
    119 
    120 #endif
    121 
    122 	.text
    123 
    124 /*
    125  * struct lwp *
    126  * cpu_switchto(struct lwp *current, struct lwp *next)
    127  *
    128  * Switch to the specified next LWP
    129  * Arguments:
    130  *
    131  *	r0	'struct lwp *' of the current LWP
    132  *	r1	'struct lwp *' of the LWP to switch to
    133  *	r2	returning
    134  */
    135 ENTRY(cpu_switchto)
    136 	mov	ip, sp
    137 	push	{r4-r7, ip, lr}
    138 
    139 	/* move lwps into callee saved registers */
    140 	mov	r6, r1
    141 	mov	r4, r0
    142 
    143 #ifdef TPIDRPRW_IS_CURCPU
    144 	GET_CURCPU(r5)
    145 #else
    146 	ldr	r5, [r6, #L_CPU]		/* get cpu from new lwp */
    147 #endif
    148 
    149 	/* rem: r4 = old lwp */
    150 	/* rem: r5 = curcpu() */
    151 	/* rem: r6 = new lwp */
    152 	/* rem: interrupts are enabled */
    153 
    154 	/* Save old context */
    155 
    156 	/* Get the user structure for the old lwp. */
    157 	ldr	r7, [r4, #(L_PCB)]
    158 
    159 	/* Save all the registers in the old lwp's pcb */
    160 #if defined(_ARM_ARCH_DWORD_OK)
    161 	strd	r8, r9, [r7, #(PCB_R8)]
    162 	strd	r10, r11, [r7, #(PCB_R10)]
    163 	strd	r12, r13, [r7, #(PCB_R12)]
    164 #else
    165 	add	r0, r7, #(PCB_R8)
    166 	stmia	r0, {r8-r13}
    167 #endif
    168 
    169 #ifdef _ARM_ARCH_6
    170 	/*
    171 	 * Save user read/write thread/process id register
    172 	 */
    173 	mrc	p15, 0, r0, c13, c0, 2
    174 	str	r0, [r7, #(PCB_USER_PID_RW)]
    175 #endif
    176 	/*
    177 	 * NOTE: We can now use r8-r13 until it is time to restore
    178 	 * them for the new process.
    179 	 */
    180 
    181 	/* Restore saved context */
    182 
    183 	/* rem: r4 = old lwp */
    184 	/* rem: r5 = curcpu() */
    185 	/* rem: r6 = new lwp */
    186 
    187 	IRQdisable
    188 #if defined(TPIDRPRW_IS_CURLWP)
    189 	mcr	p15, 0, r6, c13, c0, 4		/* set current lwp */
    190 #endif
    191 
    192 	/*
    193 	 * Issue barriers to coordinate mutex_exit on this CPU with
    194 	 * mutex_vector_enter on another CPU.
    195 	 *
    196 	 * 1. Any prior mutex_exit by oldlwp must be visible to other
    197 	 *    CPUs before we set ci_curlwp := newlwp on this one,
    198 	 *    requiring a store-before-store barrier.
    199 	 *
    200 	 * 2. ci_curlwp := newlwp must be visible on all other CPUs
    201 	 *    before any subsequent mutex_exit by newlwp can even test
    202 	 *    whether there might be waiters, requiring a
    203 	 *    store-before-load barrier.
    204 	 *
    205 	 * See kern_mutex.c for details -- this is necessary for
    206 	 * adaptive mutexes to detect whether the lwp is on the CPU in
    207 	 * order to safely block without requiring atomic r/m/w in
    208 	 * mutex_exit.
    209 	 */
    210 
    211 	/* We have a new curlwp now so make a note of it */
    212 #ifdef _ARM_ARCH_7
    213 	dmb				/* store-before-store */
    214 #endif
    215 	str	r6, [r5, #(CI_CURLWP)]
    216 #ifdef _ARM_ARCH_7
    217 	dmb				/* store-before-load */
    218 #endif
    219 
    220 	/* Get the new pcb */
    221 	ldr	r7, [r6, #(L_PCB)]
    222 
    223 	/* make sure we are using the new lwp's stack */
    224 	ldr	sp, [r7, #(PCB_KSP)]
    225 
    226 	/* At this point we can allow IRQ's again. */
    227 	IRQenable
    228 
    229 	/* rem: r4 = old lwp */
    230 	/* rem: r5 = curcpu() */
    231 	/* rem: r6 = new lwp */
    232 	/* rem: r7 = new pcb */
    233 	/* rem: interrupts are enabled */
    234 
    235 	/*
    236 	 * If we are switching to a system lwp, don't bother restoring
    237 	 * thread or vfp registers and skip the ras check.
    238 	 */
    239 	ldr	r0, [r6, #(L_FLAG)]
    240 	tst	r0, #(LW_SYSTEM)
    241 	bne	.Lswitch_do_restore
    242 
    243 #ifdef _ARM_ARCH_6
    244 	/*
    245 	 * Restore user thread/process id registers
    246 	 */
    247 	ldr	r0, [r7, #(PCB_USER_PID_RW)]
    248 	mcr	p15, 0, r0, c13, c0, 2
    249 	ldr	r0, [r6, #(L_PRIVATE)]
    250 	mcr	p15, 0, r0, c13, c0, 3
    251 #endif
    252 
    253 #ifdef FPU_VFP
    254 	/*
    255 	 * If we have a VFP, we need to load FPEXC.
    256 	 */
    257 	ldr	r0, [r5, #(CI_VFP_ID)]
    258 	cmp	r0, #0
    259 	ldrne	r0, [r7, #(PCB_VFP_FPEXC)]
    260 	vmsrne	fpexc, r0
    261 #endif
    262 
    263 	/*
    264 	 * Check for restartable atomic sequences (RAS).
    265 	 */
    266 	ldr	r0, [r6, #(L_PROC)]	/* fetch the proc for ras_lookup */
    267 	ldr	r2, [r0, #(P_RASLIST)]
    268 	cmp	r2, #0			/* p->p_nras == 0? */
    269 	beq	.Lswitch_do_restore
    270 
    271 	/* we can use r8 since we haven't restored saved registers yet. */
    272 	ldr	r8, [r6, #(L_MD_TF)]	/* r1 = trapframe (used below) */
    273 	ldr	r1, [r8, #(TF_PC)]	/* second ras_lookup() arg */
    274 	bl	_C_LABEL(ras_lookup)
    275 	cmn	r0, #1			/* -1 means "not in a RAS" */
    276 	strne	r0, [r8, #(TF_PC)]
    277 
    278 	/* rem: r4 = old lwp */
    279 	/* rem: r5 = curcpu() */
    280 	/* rem: r6 = new lwp */
    281 	/* rem: r7 = new pcb */
    282 
    283 .Lswitch_do_restore:
    284 	/* Restore all the saved registers */
    285 #ifdef __XSCALE__
    286 	ldr	r8, [r7, #(PCB_R8)]
    287 	ldr	r9, [r7, #(PCB_R9)]
    288 	ldr	r10, [r7, #(PCB_R10)]
    289 	ldr	r11, [r7, #(PCB_R11)]
    290 	ldr	r12, [r7, #(PCB_R12)]
    291 #elif defined(_ARM_ARCH_DWORD_OK)
    292 	ldrd	r8, r9, [r7, #(PCB_R8)]
    293 	ldrd	r10, r11, [r7, #(PCB_R10)]
    294 	ldr	r12, [r7, #(PCB_R12)]
    295 #else
    296 	add	r0, r7, #PCB_R8
    297 	ldmia	r0, {r8-r12}
    298 #endif
    299 
    300 	/* Record the old lwp for pmap_activate()'s benefit */
    301 #ifndef ARM_MMU_EXTENDED
    302 	str	r4, [r5, #CI_LASTLWP]
    303 #endif
    304 
    305 	/* cpu_switchto returns the old lwp */
    306 	mov	r0, r4
    307 	/* lwp_trampoline expects new lwp as its second argument */
    308 	mov	r1, r6
    309 
    310 #ifdef _ARM_ARCH_7
    311 	clrex				/* cause any subsequent STREX* to fail */
    312 #endif
    313 
    314 	/*
    315 	 * Pull the registers that got pushed when cpu_switchto() was called,
    316 	 * and return.
    317 	 */
    318 	pop	{r4-r7, ip, pc}
    319 
    320 END(cpu_switchto)
    321 
    322 ENTRY_NP(lwp_trampoline)
    323 	/*
    324 	 * cpu_switchto gives us:
    325 	 *	arg0(r0) = old lwp
    326 	 *	arg1(r1) = new lwp
    327 	 * setup by cpu_lwp_fork:
    328 	 *	r4 = func to call
    329 	 *	r5 = arg to func
    330 	 *	r6 = <unused>
    331 	 *	r7 = spsr mode
    332 	 */
    333 	bl	_C_LABEL(lwp_startup)
    334 
    335 	mov	fp, #0			/* top stack frame */
    336 	mov	r0, r5
    337 	mov	r1, sp
    338 #ifdef _ARM_ARCH_5
    339 	blx	r4
    340 #else
    341 	mov	lr, pc
    342 	mov	pc, r4
    343 #endif
    344 
    345 	GET_CPSR(r0)
    346 	CPSID_I(r0, r0)			/* Kill irq's */
    347 
    348 	/* for DO_AST */
    349 	GET_CURX(r4, r5)		/* r4 = curcpu, r5 = curlwp */
    350 	DO_AST_AND_RESTORE_ALIGNMENT_FAULTS
    351 	PULLFRAME
    352 
    353 	movs	pc, lr			/* Exit */
    354 END(lwp_trampoline)
    355 
    356 AST_ALIGNMENT_FAULT_LOCALS
    357 
    358 #ifdef __HAVE_FAST_SOFTINTS
    359 /*
    360  *	Called at IPL_HIGH
    361  *	r0 = new lwp
    362  *	r1 = ipl for softint_dispatch
    363  */
    364 ENTRY_NP(softint_switch)
    365 	push	{r4, r6, r7, lr}
    366 
    367 	ldr	r7, [r0, #L_CPU]	/* get curcpu */
    368 #if defined(TPIDRPRW_IS_CURLWP)
    369 	mrc	p15, 0, r4, c13, c0, 4	/* get old lwp */
    370 #else
    371 	ldr	r4, [r7, #(CI_CURLWP)]	/* get old lwp */
    372 #endif
    373 	mrs	r6, cpsr		/* we need to save this */
    374 
    375 	/*
    376 	 * If the soft lwp blocks, it needs to return to softint_tramp
    377 	 */
    378 	mov	r2, sp			/* think ip */
    379 	adr	r3, softint_tramp	/* think lr */
    380 	push	{r2-r3}
    381 	push	{r4-r7}
    382 
    383 	mov	r5, r0			/* save new lwp */
    384 
    385 	ldr	r2, [r4, #(L_PCB)]	/* get old lwp's pcb */
    386 
    387 	/* Save all the registers into the old lwp's pcb */
    388 #if defined(__XSCALE__) || defined(_ARM_ARCH_6)
    389 	strd	r8, r9, [r2, #(PCB_R8)]
    390 	strd	r10, r11, [r2, #(PCB_R10)]
    391 	strd	r12, r13, [r2, #(PCB_R12)]
    392 #else
    393 	add	r3, r2, #(PCB_R8)
    394 	stmia	r3, {r8-r13}
    395 #endif
    396 
    397 #ifdef _ARM_ARCH_6
    398 	/*
    399 	 * Save user read/write thread/process id register in case it was
    400 	 * set in userland.
    401 	 */
    402 	mrc	p15, 0, r0, c13, c0, 2
    403 	str	r0, [r2, #(PCB_USER_PID_RW)]
    404 #endif
    405 
    406 	/* this is an invariant so load before disabling intrs */
    407 	ldr	r2, [r5, #(L_PCB)]	/* get new lwp's pcb */
    408 
    409 	IRQdisable
    410 	/*
    411 	 * We're switching to a bound LWP so its l_cpu is already correct.
    412 	 */
    413 #if defined(TPIDRPRW_IS_CURLWP)
    414 	mcr	p15, 0, r5, c13, c0, 4	/* save new lwp */
    415 #endif
    416 #ifdef _ARM_ARCH_7
    417 	dmb				/* for mutex_enter; see cpu_switchto */
    418 #endif
    419 	str	r5, [r7, #(CI_CURLWP)]	/* save new lwp */
    420 	/*
    421 	 * No need for barrier after ci->ci_curlwp = softlwp -- when we
    422 	 * enter a softint lwp, it can't be holding any mutexes, so it
    423 	 * can't release any until after it has acquired them, so we
    424 	 * need not participate in the protocol with mutex_vector_enter
    425 	 * barriers here.
    426 	 */
    427 
    428 #ifdef KASAN
    429 	mov	r0, r5
    430 	bl	_C_LABEL(kasan_softint)
    431 #endif
    432 
    433 	/*
    434 	 * Normally, we'd get {r8-r13} but since this is a softint lwp
    435 	 * its existing state doesn't matter.  We start the stack just
    436 	 * below the trapframe.
    437 	 */
    438 	ldr	sp, [r5, #(L_MD_TF)]	/* get new lwp's stack ptr */
    439 
    440 	/* At this point we can allow IRQ's again. */
    441 	IRQenable
    442 					/* r1 still has ipl */
    443 	mov	r0, r4			/* r0 has pinned (old) lwp */
    444 	bl	_C_LABEL(softint_dispatch)
    445 	/*
    446 	 * If we've returned, we need to change everything back and return.
    447 	 */
    448 	ldr	r2, [r4, #(L_PCB)]	/* get pinned lwp's pcb */
    449 
    450 	/*
    451 	 * We don't need to restore all the registers since another lwp was
    452 	 * never executed.  But we do need the SP from the formerly pinned lwp.
    453 	 */
    454 
    455 	IRQdisable
    456 #if defined(TPIDRPRW_IS_CURLWP)
    457 	mcr	p15, 0, r4, c13, c0, 4	/* restore pinned lwp */
    458 #endif
    459 #ifdef _ARM_ARCH_7
    460 	dmb				/* for mutex_enter; see cpu_switchto */
    461 #endif
    462 	str	r4, [r7, #(CI_CURLWP)]	/* restore pinned lwp */
    463 #ifdef _ARM_ARCH_7
    464 	dmb				/* for mutex_enter; see cpu_switchto */
    465 #endif
    466 	ldr	sp, [r2, #(PCB_KSP)]	/* now running on the old stack. */
    467 
    468 	/* At this point we can allow IRQ's again. */
    469 	msr	cpsr_c, r6
    470 
    471 	/*
    472 	 * Grab the registers that got pushed at the start and return.
    473 	 */
    474 	pop	{r4-r7, ip, lr}		/* eat switch frame */
    475 	pop	{r4, r6, r7, pc}	/* pop stack and return */
    476 
    477 END(softint_switch)
    478 
    479 /*
    480  * r0 = previous LWP (the soft lwp)
    481  * r4 = original LWP (the current lwp)
    482  * r6 = original CPSR
    483  * r7 = curcpu()
    484  */
    485 ENTRY_NP(softint_tramp)
    486 	ldr	r3, [r7, #(CI_MTX_COUNT)]	/* readjust after mi_switch */
    487 	add	r3, r3, #1
    488 	str	r3, [r7, #(CI_MTX_COUNT)]
    489 
    490 	msr	cpsr_c, r6			/* restore interrupts */
    491 	pop	{r4, r6, r7, pc}		/* pop stack and return */
    492 END(softint_tramp)
    493 #endif /* __HAVE_FAST_SOFTINTS */
    494