Home | History | Annotate | Line # | Download | only in arm32
      1  1.108     skrll /*	$NetBSD: cpuswitch.S,v 1.108 2025/10/07 10:35:06 skrll Exp $	*/
      2    1.1     chris 
      3    1.1     chris /*
      4   1.30       scw  * Copyright 2003 Wasabi Systems, Inc.
      5   1.30       scw  * All rights reserved.
      6   1.30       scw  *
      7   1.30       scw  * Written by Steve C. Woodford for Wasabi Systems, Inc.
      8   1.30       scw  *
      9   1.30       scw  * Redistribution and use in source and binary forms, with or without
     10   1.30       scw  * modification, are permitted provided that the following conditions
     11   1.30       scw  * are met:
     12   1.30       scw  * 1. Redistributions of source code must retain the above copyright
     13   1.30       scw  *    notice, this list of conditions and the following disclaimer.
     14   1.30       scw  * 2. Redistributions in binary form must reproduce the above copyright
     15   1.30       scw  *    notice, this list of conditions and the following disclaimer in the
     16   1.30       scw  *    documentation and/or other materials provided with the distribution.
     17   1.30       scw  * 3. All advertising materials mentioning features or use of this software
     18   1.30       scw  *    must display the following acknowledgement:
     19   1.30       scw  *      This product includes software developed for the NetBSD Project by
     20   1.30       scw  *      Wasabi Systems, Inc.
     21   1.30       scw  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22   1.30       scw  *    or promote products derived from this software without specific prior
     23   1.30       scw  *    written permission.
     24   1.30       scw  *
     25   1.30       scw  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26   1.30       scw  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27   1.30       scw  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28   1.30       scw  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29   1.30       scw  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30   1.30       scw  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31   1.30       scw  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32   1.30       scw  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33   1.30       scw  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34   1.30       scw  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35   1.30       scw  * POSSIBILITY OF SUCH DAMAGE.
     36   1.30       scw  */
     37   1.30       scw /*
     38    1.1     chris  * Copyright (c) 1994-1998 Mark Brinicombe.
     39    1.1     chris  * Copyright (c) 1994 Brini.
     40    1.1     chris  * All rights reserved.
     41    1.1     chris  *
     42    1.1     chris  * This code is derived from software written for Brini by Mark Brinicombe
     43    1.1     chris  *
     44    1.1     chris  * Redistribution and use in source and binary forms, with or without
     45    1.1     chris  * modification, are permitted provided that the following conditions
     46    1.1     chris  * are met:
     47    1.1     chris  * 1. Redistributions of source code must retain the above copyright
     48    1.1     chris  *    notice, this list of conditions and the following disclaimer.
     49    1.1     chris  * 2. Redistributions in binary form must reproduce the above copyright
     50    1.1     chris  *    notice, this list of conditions and the following disclaimer in the
     51    1.1     chris  *    documentation and/or other materials provided with the distribution.
     52    1.1     chris  * 3. All advertising materials mentioning features or use of this software
     53    1.1     chris  *    must display the following acknowledgement:
     54    1.1     chris  *	This product includes software developed by Brini.
     55    1.1     chris  * 4. The name of the company nor the name of the author may be used to
     56    1.1     chris  *    endorse or promote products derived from this software without specific
     57    1.1     chris  *    prior written permission.
     58    1.1     chris  *
     59    1.1     chris  * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
     60    1.1     chris  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
     61    1.1     chris  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     62    1.1     chris  * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
     63    1.1     chris  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
     64    1.1     chris  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     65    1.1     chris  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     66    1.1     chris  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     67    1.1     chris  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     68    1.1     chris  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     69    1.1     chris  * SUCH DAMAGE.
     70    1.1     chris  *
     71    1.1     chris  * RiscBSD kernel project
     72    1.1     chris  *
     73    1.1     chris  * cpuswitch.S
     74    1.1     chris  *
     75    1.1     chris  * cpu switching functions
     76    1.1     chris  *
     77    1.1     chris  * Created      : 15/10/94
     78    1.1     chris  */
     79    1.1     chris 
     80    1.1     chris #include "opt_armfpe.h"
     81   1.58      matt #include "opt_cpuoptions.h"
     82  1.101     skrll #include "opt_kasan.h"
     83   1.36    martin #include "opt_lockdebug.h"
     84   1.99     skrll #include "opt_multiprocessor.h"
     85    1.1     chris 
     86    1.1     chris #include "assym.h"
     87   1.79      matt #include <arm/asm.h>
     88   1.79      matt #include <arm/locore.h>
     89   1.58      matt 
     90  1.108     skrll 	RCSID("$NetBSD: cpuswitch.S,v 1.108 2025/10/07 10:35:06 skrll Exp $")
     91    1.1     chris 
     92   1.34  kristerw /* LINTSTUB: include <sys/param.h> */
     93   1.90      matt 
     94   1.95     joerg #ifdef FPU_VFP
     95   1.95     joerg 	.fpu vfpv2
     96   1.95     joerg #endif
     97   1.95     joerg 
     98    1.1     chris 	.text
     99   1.30       scw 
    100    1.1     chris /*
    101   1.47      yamt  * struct lwp *
    102   1.47      yamt  * cpu_switchto(struct lwp *current, struct lwp *next)
    103   1.48     skrll  *
    104   1.47      yamt  * Switch to the specified next LWP
    105   1.47      yamt  * Arguments:
    106   1.16   thorpej  *
    107   1.97     skrll  *	r0	'struct lwp *' of the current LWP
    108   1.47      yamt  *	r1	'struct lwp *' of the LWP to switch to
    109   1.58      matt  *	r2	returning
    110    1.1     chris  */
    111   1.47      yamt ENTRY(cpu_switchto)
    112   1.51     skrll 	mov	ip, sp
    113   1.78      matt 	push	{r4-r7, ip, lr}
    114    1.1     chris 
    115  1.105  dholland 	/* move lwps into callee saved registers */
    116   1.55     chris 	mov	r6, r1
    117   1.55     chris 	mov	r4, r0
    118   1.58      matt 
    119   1.68      matt #ifdef TPIDRPRW_IS_CURCPU
    120   1.88      matt 	GET_CURCPU(r5)
    121   1.90      matt #else
    122   1.88      matt 	ldr	r5, [r6, #L_CPU]		/* get cpu from new lwp */
    123   1.59      matt #endif
    124    1.1     chris 
    125   1.47      yamt 	/* rem: r4 = old lwp */
    126   1.88      matt 	/* rem: r5 = curcpu() */
    127   1.43     skrll 	/* rem: r6 = new lwp */
    128    1.4     chris 	/* rem: interrupts are enabled */
    129    1.1     chris 
    130   1.48     skrll 	/* Save old context */
    131    1.1     chris 
    132   1.29   thorpej 	/* Get the user structure for the old lwp. */
    133   1.88      matt 	ldr	r7, [r4, #(L_PCB)]
    134    1.1     chris 
    135   1.29   thorpej 	/* Save all the registers in the old lwp's pcb */
    136   1.76      matt #if defined(_ARM_ARCH_DWORD_OK)
    137   1.88      matt 	strd	r8, r9, [r7, #(PCB_R8)]
    138   1.88      matt 	strd	r10, r11, [r7, #(PCB_R10)]
    139   1.88      matt 	strd	r12, r13, [r7, #(PCB_R12)]
    140   1.58      matt #else
    141   1.88      matt 	add	r0, r7, #(PCB_R8)
    142   1.58      matt 	stmia	r0, {r8-r13}
    143   1.37       scw #endif
    144    1.1     chris 
    145   1.58      matt #ifdef _ARM_ARCH_6
    146   1.58      matt 	/*
    147   1.58      matt 	 * Save user read/write thread/process id register
    148   1.58      matt 	 */
    149   1.58      matt 	mrc	p15, 0, r0, c13, c0, 2
    150   1.88      matt 	str	r0, [r7, #(PCB_USER_PID_RW)]
    151   1.58      matt #endif
    152    1.1     chris 	/*
    153   1.29   thorpej 	 * NOTE: We can now use r8-r13 until it is time to restore
    154   1.29   thorpej 	 * them for the new process.
    155   1.29   thorpej 	 */
    156   1.29   thorpej 
    157   1.48     skrll 	/* Restore saved context */
    158    1.1     chris 
    159   1.90      matt 	/* rem: r4 = old lwp */
    160   1.90      matt 	/* rem: r5 = curcpu() */
    161   1.90      matt 	/* rem: r6 = new lwp */
    162   1.90      matt 
    163  1.108     skrll 	IRQ_DISABLE(lr)
    164   1.90      matt #if defined(TPIDRPRW_IS_CURLWP)
    165   1.90      matt 	mcr	p15, 0, r6, c13, c0, 4		/* set current lwp */
    166   1.90      matt #endif
    167   1.90      matt 
    168  1.106  riastrad 	/*
    169  1.106  riastrad 	 * Issue barriers to coordinate mutex_exit on this CPU with
    170  1.106  riastrad 	 * mutex_vector_enter on another CPU.
    171  1.106  riastrad 	 *
    172  1.106  riastrad 	 * 1. Any prior mutex_exit by oldlwp must be visible to other
    173  1.106  riastrad 	 *    CPUs before we set ci_curlwp := newlwp on this one,
    174  1.106  riastrad 	 *    requiring a store-before-store barrier.
    175  1.106  riastrad 	 *
    176  1.106  riastrad 	 * 2. ci_curlwp := newlwp must be visible on all other CPUs
    177  1.106  riastrad 	 *    before any subsequent mutex_exit by newlwp can even test
    178  1.106  riastrad 	 *    whether there might be waiters, requiring a
    179  1.106  riastrad 	 *    store-before-load barrier.
    180  1.106  riastrad 	 *
    181  1.106  riastrad 	 * See kern_mutex.c for details -- this is necessary for
    182  1.106  riastrad 	 * adaptive mutexes to detect whether the lwp is on the CPU in
    183  1.106  riastrad 	 * order to safely block without requiring atomic r/m/w in
    184  1.106  riastrad 	 * mutex_exit.
    185  1.106  riastrad 	 */
    186  1.106  riastrad 
    187   1.90      matt 	/* We have a new curlwp now so make a note of it */
    188  1.106  riastrad #ifdef _ARM_ARCH_7
    189  1.106  riastrad 	dmb				/* store-before-store */
    190  1.106  riastrad #endif
    191   1.90      matt 	str	r6, [r5, #(CI_CURLWP)]
    192  1.103     skrll #ifdef _ARM_ARCH_7
    193  1.106  riastrad 	dmb				/* store-before-load */
    194  1.103     skrll #endif
    195  1.100     skrll 
    196   1.88      matt 	/* Get the new pcb */
    197   1.88      matt 	ldr	r7, [r6, #(L_PCB)]
    198   1.88      matt 
    199   1.90      matt 	/* make sure we are using the new lwp's stack */
    200   1.90      matt 	ldr	sp, [r7, #(PCB_KSP)]
    201   1.90      matt 
    202   1.90      matt 	/* At this point we can allow IRQ's again. */
    203  1.108     skrll 	IRQ_ENABLE(lr)
    204   1.90      matt 
    205   1.47      yamt 	/* rem: r4 = old lwp */
    206   1.88      matt 	/* rem: r5 = curcpu() */
    207   1.29   thorpej 	/* rem: r6 = new lwp */
    208   1.55     chris 	/* rem: r7 = new pcb */
    209   1.53     chris 	/* rem: interrupts are enabled */
    210   1.29   thorpej 
    211   1.88      matt 	/*
    212   1.88      matt 	 * If we are switching to a system lwp, don't bother restoring
    213   1.88      matt 	 * thread or vfp registers and skip the ras check.
    214   1.88      matt 	 */
    215   1.88      matt 	ldr	r0, [r6, #(L_FLAG)]
    216   1.88      matt 	tst	r0, #(LW_SYSTEM)
    217   1.88      matt 	bne	.Lswitch_do_restore
    218   1.88      matt 
    219   1.58      matt #ifdef _ARM_ARCH_6
    220   1.58      matt 	/*
    221   1.58      matt 	 * Restore user thread/process id registers
    222   1.58      matt 	 */
    223   1.58      matt 	ldr	r0, [r7, #(PCB_USER_PID_RW)]
    224   1.58      matt 	mcr	p15, 0, r0, c13, c0, 2
    225   1.63      matt 	ldr	r0, [r6, #(L_PRIVATE)]
    226   1.58      matt 	mcr	p15, 0, r0, c13, c0, 3
    227   1.58      matt #endif
    228   1.58      matt 
    229   1.76      matt #ifdef FPU_VFP
    230   1.76      matt 	/*
    231   1.76      matt 	 * If we have a VFP, we need to load FPEXC.
    232   1.76      matt 	 */
    233   1.88      matt 	ldr	r0, [r5, #(CI_VFP_ID)]
    234   1.76      matt 	cmp	r0, #0
    235   1.76      matt 	ldrne	r0, [r7, #(PCB_VFP_FPEXC)]
    236   1.81     joerg 	vmsrne	fpexc, r0
    237   1.76      matt #endif
    238   1.76      matt 
    239   1.91     skrll 	/*
    240   1.88      matt 	 * Check for restartable atomic sequences (RAS).
    241   1.88      matt 	 */
    242   1.88      matt 	ldr	r0, [r6, #(L_PROC)]	/* fetch the proc for ras_lookup */
    243   1.88      matt 	ldr	r2, [r0, #(P_RASLIST)]
    244   1.88      matt 	cmp	r2, #0			/* p->p_nras == 0? */
    245   1.88      matt 	beq	.Lswitch_do_restore
    246   1.88      matt 
    247   1.88      matt 	/* we can use r8 since we haven't restored saved registers yet. */
    248   1.88      matt 	ldr	r8, [r6, #(L_MD_TF)]	/* r1 = trapframe (used below) */
    249   1.88      matt 	ldr	r1, [r8, #(TF_PC)]	/* second ras_lookup() arg */
    250   1.88      matt 	bl	_C_LABEL(ras_lookup)
    251   1.88      matt 	cmn	r0, #1			/* -1 means "not in a RAS" */
    252   1.88      matt 	strne	r0, [r8, #(TF_PC)]
    253   1.88      matt 
    254   1.88      matt 	/* rem: r4 = old lwp */
    255   1.88      matt 	/* rem: r5 = curcpu() */
    256   1.88      matt 	/* rem: r6 = new lwp */
    257   1.88      matt 	/* rem: r7 = new pcb */
    258   1.88      matt 
    259   1.88      matt .Lswitch_do_restore:
    260   1.52     skrll 	/* Restore all the saved registers */
    261   1.58      matt #ifdef __XSCALE__
    262   1.37       scw 	ldr	r8, [r7, #(PCB_R8)]
    263   1.37       scw 	ldr	r9, [r7, #(PCB_R9)]
    264   1.37       scw 	ldr	r10, [r7, #(PCB_R10)]
    265   1.37       scw 	ldr	r11, [r7, #(PCB_R11)]
    266   1.37       scw 	ldr	r12, [r7, #(PCB_R12)]
    267   1.76      matt #elif defined(_ARM_ARCH_DWORD_OK)
    268   1.80     joerg 	ldrd	r8, r9, [r7, #(PCB_R8)]
    269   1.80     joerg 	ldrd	r10, r11, [r7, #(PCB_R10)]
    270   1.90      matt 	ldr	r12, [r7, #(PCB_R12)]
    271   1.58      matt #else
    272   1.58      matt 	add	r0, r7, #PCB_R8
    273   1.90      matt 	ldmia	r0, {r8-r12}
    274   1.37       scw #endif
    275   1.29   thorpej 
    276   1.57       scw 	/* Record the old lwp for pmap_activate()'s benefit */
    277   1.83      matt #ifndef ARM_MMU_EXTENDED
    278   1.88      matt 	str	r4, [r5, #CI_LASTLWP]
    279   1.83      matt #endif
    280   1.57       scw 
    281   1.47      yamt 	/* cpu_switchto returns the old lwp */
    282   1.29   thorpej 	mov	r0, r4
    283   1.85       snj 	/* lwp_trampoline expects new lwp as its second argument */
    284   1.47      yamt 	mov	r1, r6
    285    1.1     chris 
    286   1.67      matt #ifdef _ARM_ARCH_7
    287   1.67      matt 	clrex				/* cause any subsequent STREX* to fail */
    288   1.67      matt #endif
    289   1.67      matt 
    290    1.1     chris 	/*
    291   1.51     skrll 	 * Pull the registers that got pushed when cpu_switchto() was called,
    292   1.51     skrll 	 * and return.
    293    1.1     chris 	 */
    294   1.78      matt 	pop	{r4-r7, ip, pc}
    295   1.18   thorpej 
    296   1.78      matt END(cpu_switchto)
    297    1.1     chris 
    298   1.73     skrll ENTRY_NP(lwp_trampoline)
    299   1.52     skrll 	/*
    300   1.52     skrll 	 * cpu_switchto gives us:
    301   1.67      matt 	 *	arg0(r0) = old lwp
    302   1.67      matt 	 *	arg1(r1) = new lwp
    303   1.67      matt 	 * setup by cpu_lwp_fork:
    304   1.67      matt 	 *	r4 = func to call
    305   1.67      matt 	 *	r5 = arg to func
    306   1.67      matt 	 *	r6 = <unused>
    307   1.67      matt 	 *	r7 = spsr mode
    308   1.52     skrll 	 */
    309   1.47      yamt 	bl	_C_LABEL(lwp_startup)
    310   1.38       scw 
    311   1.72      matt 	mov	fp, #0			/* top stack frame */
    312    1.1     chris 	mov	r0, r5
    313    1.1     chris 	mov	r1, sp
    314   1.70      matt #ifdef _ARM_ARCH_5
    315   1.67      matt 	blx	r4
    316   1.67      matt #else
    317   1.24     bjh21 	mov	lr, pc
    318    1.1     chris 	mov	pc, r4
    319   1.67      matt #endif
    320    1.1     chris 
    321   1.67      matt 	GET_CPSR(r0)
    322   1.67      matt 	CPSID_I(r0, r0)			/* Kill irq's */
    323    1.1     chris 
    324  1.104     skrll 	/* for DO_AST */
    325  1.104     skrll 	GET_CURX(r4, r5)		/* r4 = curcpu, r5 = curlwp */
    326   1.67      matt 	DO_AST_AND_RESTORE_ALIGNMENT_FAULTS
    327    1.1     chris 	PULLFRAME
    328    1.1     chris 
    329    1.1     chris 	movs	pc, lr			/* Exit */
    330   1.78      matt END(lwp_trampoline)
    331   1.58      matt 
    332   1.69     skrll AST_ALIGNMENT_FAULT_LOCALS
    333   1.69     skrll 
    334   1.58      matt #ifdef __HAVE_FAST_SOFTINTS
    335   1.58      matt /*
    336   1.58      matt  *	Called at IPL_HIGH
    337   1.58      matt  *	r0 = new lwp
    338   1.58      matt  *	r1 = ipl for softint_dispatch
    339   1.58      matt  */
    340   1.58      matt ENTRY_NP(softint_switch)
    341   1.78      matt 	push	{r4, r6, r7, lr}
    342   1.58      matt 
    343   1.92     skrll 	ldr	r7, [r0, #L_CPU]	/* get curcpu */
    344   1.67      matt #if defined(TPIDRPRW_IS_CURLWP)
    345   1.92     skrll 	mrc	p15, 0, r4, c13, c0, 4	/* get old lwp */
    346   1.58      matt #else
    347   1.92     skrll 	ldr	r4, [r7, #(CI_CURLWP)]	/* get old lwp */
    348   1.58      matt #endif
    349   1.92     skrll 	mrs	r6, cpsr		/* we need to save this */
    350   1.58      matt 
    351   1.58      matt 	/*
    352   1.58      matt 	 * If the soft lwp blocks, it needs to return to softint_tramp
    353   1.58      matt 	 */
    354   1.92     skrll 	mov	r2, sp			/* think ip */
    355   1.92     skrll 	adr	r3, softint_tramp	/* think lr */
    356   1.78      matt 	push	{r2-r3}
    357   1.78      matt 	push	{r4-r7}
    358   1.58      matt 
    359   1.92     skrll 	mov	r5, r0			/* save new lwp */
    360   1.58      matt 
    361   1.92     skrll 	ldr	r2, [r4, #(L_PCB)]	/* get old lwp's pcb */
    362   1.58      matt 
    363   1.58      matt 	/* Save all the registers into the old lwp's pcb */
    364   1.58      matt #if defined(__XSCALE__) || defined(_ARM_ARCH_6)
    365   1.80     joerg 	strd	r8, r9, [r2, #(PCB_R8)]
    366   1.80     joerg 	strd	r10, r11, [r2, #(PCB_R10)]
    367   1.80     joerg 	strd	r12, r13, [r2, #(PCB_R12)]
    368   1.58      matt #else
    369   1.58      matt 	add	r3, r2, #(PCB_R8)
    370   1.58      matt 	stmia	r3, {r8-r13}
    371   1.58      matt #endif
    372   1.58      matt 
    373   1.86      matt #ifdef _ARM_ARCH_6
    374   1.86      matt 	/*
    375   1.93     skrll 	 * Save user read/write thread/process id register in case it was
    376   1.86      matt 	 * set in userland.
    377   1.86      matt 	 */
    378   1.86      matt 	mrc	p15, 0, r0, c13, c0, 2
    379   1.87      matt 	str	r0, [r2, #(PCB_USER_PID_RW)]
    380   1.86      matt #endif
    381   1.86      matt 
    382   1.58      matt 	/* this is an invariant so load before disabling intrs */
    383   1.60     rmind 	ldr	r2, [r5, #(L_PCB)]	/* get new lwp's pcb */
    384   1.58      matt 
    385  1.108     skrll 	IRQ_DISABLE(lr)
    386  1.108     skrll 
    387   1.58      matt 	/*
    388   1.58      matt 	 * We're switching to a bound LWP so its l_cpu is already correct.
    389   1.58      matt 	 */
    390   1.67      matt #if defined(TPIDRPRW_IS_CURLWP)
    391   1.92     skrll 	mcr	p15, 0, r5, c13, c0, 4	/* save new lwp */
    392   1.58      matt #endif
    393  1.106  riastrad #ifdef _ARM_ARCH_7
    394  1.106  riastrad 	dmb				/* for mutex_enter; see cpu_switchto */
    395  1.106  riastrad #endif
    396   1.92     skrll 	str	r5, [r7, #(CI_CURLWP)]	/* save new lwp */
    397  1.107  riastrad 	/*
    398  1.107  riastrad 	 * No need for barrier after ci->ci_curlwp = softlwp -- when we
    399  1.107  riastrad 	 * enter a softint lwp, it can't be holding any mutexes, so it
    400  1.107  riastrad 	 * can't release any until after it has acquired them, so we
    401  1.107  riastrad 	 * need not participate in the protocol with mutex_vector_enter
    402  1.107  riastrad 	 * barriers here.
    403  1.107  riastrad 	 */
    404   1.58      matt 
    405  1.101     skrll #ifdef KASAN
    406  1.101     skrll 	mov	r0, r5
    407  1.101     skrll 	bl	_C_LABEL(kasan_softint)
    408  1.101     skrll #endif
    409  1.101     skrll 
    410   1.58      matt 	/*
    411   1.58      matt 	 * Normally, we'd get {r8-r13} but since this is a softint lwp
    412   1.85       snj 	 * its existing state doesn't matter.  We start the stack just
    413   1.58      matt 	 * below the trapframe.
    414   1.58      matt 	 */
    415   1.66      matt 	ldr	sp, [r5, #(L_MD_TF)]	/* get new lwp's stack ptr */
    416   1.58      matt 
    417   1.58      matt 	/* At this point we can allow IRQ's again. */
    418  1.108     skrll 	IRQ_ENABLE(lr)
    419   1.58      matt 					/* r1 still has ipl */
    420   1.58      matt 	mov	r0, r4			/* r0 has pinned (old) lwp */
    421   1.58      matt 	bl	_C_LABEL(softint_dispatch)
    422   1.58      matt 	/*
    423   1.58      matt 	 * If we've returned, we need to change everything back and return.
    424   1.58      matt 	 */
    425   1.60     rmind 	ldr	r2, [r4, #(L_PCB)]	/* get pinned lwp's pcb */
    426   1.58      matt 
    427   1.58      matt 	/*
    428   1.58      matt 	 * We don't need to restore all the registers since another lwp was
    429   1.58      matt 	 * never executed.  But we do need the SP from the formerly pinned lwp.
    430   1.58      matt 	 */
    431   1.58      matt 
    432  1.108     skrll 	IRQ_DISABLE(lr)
    433  1.108     skrll 
    434   1.67      matt #if defined(TPIDRPRW_IS_CURLWP)
    435   1.92     skrll 	mcr	p15, 0, r4, c13, c0, 4	/* restore pinned lwp */
    436   1.58      matt #endif
    437  1.106  riastrad #ifdef _ARM_ARCH_7
    438  1.106  riastrad 	dmb				/* for mutex_enter; see cpu_switchto */
    439  1.106  riastrad #endif
    440   1.92     skrll 	str	r4, [r7, #(CI_CURLWP)]	/* restore pinned lwp */
    441  1.106  riastrad #ifdef _ARM_ARCH_7
    442  1.106  riastrad 	dmb				/* for mutex_enter; see cpu_switchto */
    443  1.106  riastrad #endif
    444   1.75      matt 	ldr	sp, [r2, #(PCB_KSP)]	/* now running on the old stack. */
    445   1.58      matt 
    446   1.58      matt 	/* At this point we can allow IRQ's again. */
    447   1.58      matt 	msr	cpsr_c, r6
    448   1.58      matt 
    449   1.58      matt 	/*
    450   1.58      matt 	 * Grab the registers that got pushed at the start and return.
    451   1.58      matt 	 */
    452   1.78      matt 	pop	{r4-r7, ip, lr}		/* eat switch frame */
    453   1.78      matt 	pop	{r4, r6, r7, pc}	/* pop stack and return */
    454   1.58      matt 
    455   1.58      matt END(softint_switch)
    456   1.58      matt 
    457   1.58      matt /*
    458   1.58      matt  * r0 = previous LWP (the soft lwp)
    459   1.58      matt  * r4 = original LWP (the current lwp)
    460   1.58      matt  * r6 = original CPSR
    461   1.58      matt  * r7 = curcpu()
    462   1.58      matt  */
    463   1.58      matt ENTRY_NP(softint_tramp)
    464   1.94     skrll 	ldr	r3, [r7, #(CI_MTX_COUNT)]	/* readjust after mi_switch */
    465   1.58      matt 	add	r3, r3, #1
    466   1.58      matt 	str	r3, [r7, #(CI_MTX_COUNT)]
    467   1.58      matt 
    468   1.58      matt 	msr	cpsr_c, r6			/* restore interrupts */
    469   1.78      matt 	pop	{r4, r6, r7, pc}		/* pop stack and return */
    470   1.58      matt END(softint_tramp)
    471   1.58      matt #endif /* __HAVE_FAST_SOFTINTS */
    472