Home | History | Annotate | Line # | Download | only in arm32
cpuswitch.S revision 1.58
      1  1.58      matt /*	$NetBSD: cpuswitch.S,v 1.58 2008/04/27 18:58:43 matt Exp $	*/
      2   1.1     chris 
      3   1.1     chris /*
      4  1.30       scw  * Copyright 2003 Wasabi Systems, Inc.
      5  1.30       scw  * All rights reserved.
      6  1.30       scw  *
      7  1.30       scw  * Written by Steve C. Woodford for Wasabi Systems, Inc.
      8  1.30       scw  *
      9  1.30       scw  * Redistribution and use in source and binary forms, with or without
     10  1.30       scw  * modification, are permitted provided that the following conditions
     11  1.30       scw  * are met:
     12  1.30       scw  * 1. Redistributions of source code must retain the above copyright
     13  1.30       scw  *    notice, this list of conditions and the following disclaimer.
     14  1.30       scw  * 2. Redistributions in binary form must reproduce the above copyright
     15  1.30       scw  *    notice, this list of conditions and the following disclaimer in the
     16  1.30       scw  *    documentation and/or other materials provided with the distribution.
     17  1.30       scw  * 3. All advertising materials mentioning features or use of this software
     18  1.30       scw  *    must display the following acknowledgement:
     19  1.30       scw  *      This product includes software developed for the NetBSD Project by
     20  1.30       scw  *      Wasabi Systems, Inc.
     21  1.30       scw  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  1.30       scw  *    or promote products derived from this software without specific prior
     23  1.30       scw  *    written permission.
     24  1.30       scw  *
     25  1.30       scw  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  1.30       scw  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  1.30       scw  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  1.30       scw  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  1.30       scw  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  1.30       scw  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  1.30       scw  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  1.30       scw  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  1.30       scw  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  1.30       scw  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  1.30       scw  * POSSIBILITY OF SUCH DAMAGE.
     36  1.30       scw  */
     37  1.30       scw /*
     38   1.1     chris  * Copyright (c) 1994-1998 Mark Brinicombe.
     39   1.1     chris  * Copyright (c) 1994 Brini.
     40   1.1     chris  * All rights reserved.
     41   1.1     chris  *
     42   1.1     chris  * This code is derived from software written for Brini by Mark Brinicombe
     43   1.1     chris  *
     44   1.1     chris  * Redistribution and use in source and binary forms, with or without
     45   1.1     chris  * modification, are permitted provided that the following conditions
     46   1.1     chris  * are met:
     47   1.1     chris  * 1. Redistributions of source code must retain the above copyright
     48   1.1     chris  *    notice, this list of conditions and the following disclaimer.
     49   1.1     chris  * 2. Redistributions in binary form must reproduce the above copyright
     50   1.1     chris  *    notice, this list of conditions and the following disclaimer in the
     51   1.1     chris  *    documentation and/or other materials provided with the distribution.
     52   1.1     chris  * 3. All advertising materials mentioning features or use of this software
     53   1.1     chris  *    must display the following acknowledgement:
     54   1.1     chris  *	This product includes software developed by Brini.
     55   1.1     chris  * 4. The name of the company nor the name of the author may be used to
     56   1.1     chris  *    endorse or promote products derived from this software without specific
     57   1.1     chris  *    prior written permission.
     58   1.1     chris  *
     59   1.1     chris  * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
     60   1.1     chris  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
     61   1.1     chris  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     62   1.1     chris  * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
     63   1.1     chris  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
     64   1.1     chris  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     65   1.1     chris  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     66   1.1     chris  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     67   1.1     chris  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     68   1.1     chris  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     69   1.1     chris  * SUCH DAMAGE.
     70   1.1     chris  *
     71   1.1     chris  * RiscBSD kernel project
     72   1.1     chris  *
     73   1.1     chris  * cpuswitch.S
     74   1.1     chris  *
     75   1.1     chris  * cpu switching functions
     76   1.1     chris  *
     77   1.1     chris  * Created      : 15/10/94
     78   1.1     chris  */
     79   1.1     chris 
     80   1.1     chris #include "opt_armfpe.h"
     81  1.30       scw #include "opt_arm32_pmap.h"
     82  1.19     bjh21 #include "opt_multiprocessor.h"
     83  1.58      matt #include "opt_cpuoptions.h"
     84  1.36    martin #include "opt_lockdebug.h"
     85   1.1     chris 
     86   1.1     chris #include "assym.h"
     87  1.46    briggs #include <arm/arm32/pte.h>
     88   1.1     chris #include <machine/param.h>
     89   1.1     chris #include <machine/frame.h>
     90   1.1     chris #include <machine/asm.h>
     91  1.58      matt #include <machine/cpu.h>
     92  1.58      matt 
     93  1.58      matt 	RCSID("$NetBSD: cpuswitch.S,v 1.58 2008/04/27 18:58:43 matt Exp $")
     94   1.1     chris 
     95  1.34  kristerw /* LINTSTUB: include <sys/param.h> */
     96  1.34  kristerw 
     97   1.1     chris #undef IRQdisable
     98   1.1     chris #undef IRQenable
     99   1.1     chris 
    100   1.1     chris /*
    101   1.1     chris  * New experimental definitions of IRQdisable and IRQenable
    102   1.1     chris  * These keep FIQ's enabled since FIQ's are special.
    103   1.1     chris  */
    104   1.1     chris 
    105  1.58      matt #ifdef _ARM_ARCH_6
    106  1.58      matt #define	IRQdisable	cpsid	i
    107  1.58      matt #define	IRQenable	cpsie	i
    108  1.58      matt #else
    109   1.1     chris #define IRQdisable \
    110  1.13   thorpej 	mrs	r14, cpsr ; \
    111   1.1     chris 	orr	r14, r14, #(I32_bit) ; \
    112  1.58      matt 	msr	cpsr_c, r14
    113   1.1     chris 
    114   1.1     chris #define IRQenable \
    115  1.13   thorpej 	mrs	r14, cpsr ; \
    116   1.1     chris 	bic	r14, r14, #(I32_bit) ; \
    117  1.58      matt 	msr	cpsr_c, r14
    118   1.1     chris 
    119  1.22     bjh21 #endif
    120   1.1     chris 
    121   1.1     chris 	.text
    122  1.57       scw .Lpmap_previous_active_lwp:
    123  1.57       scw 	.word	_C_LABEL(pmap_previous_active_lwp)
    124  1.30       scw 
    125   1.1     chris /*
    126  1.47      yamt  * struct lwp *
    127  1.47      yamt  * cpu_switchto(struct lwp *current, struct lwp *next)
    128  1.48     skrll  *
    129  1.47      yamt  * Switch to the specified next LWP
    130  1.47      yamt  * Arguments:
    131  1.16   thorpej  *
    132  1.58      matt  *	r0	'struct lwp *' of the current LWP (or NULL if exiting)
    133  1.47      yamt  *	r1	'struct lwp *' of the LWP to switch to
    134  1.58      matt  *	r2	returning
    135   1.1     chris  */
    136  1.47      yamt ENTRY(cpu_switchto)
    137  1.51     skrll 	mov	ip, sp
    138  1.51     skrll 	stmfd	sp!, {r4-r7, ip, lr}
    139   1.1     chris 
    140  1.58      matt 	/* move lwps into caller saved registers */
    141  1.55     chris 	mov	r6, r1
    142  1.55     chris 	mov	r4, r0
    143  1.58      matt 
    144  1.58      matt #ifdef PROCESS_ID_CURCPU
    145  1.58      matt 	GET_CURCPU(r7)
    146  1.58      matt #elif defined(PROCESS_ID_IS_CURLWP)
    147  1.58      matt 	mcr	p15, 0, r0, c13, c0, 4		/* get old lwp (r4 maybe 0) */
    148  1.58      matt 	ldr	r7, [r0, #(L_CPU)]		/* get cpu from old lwp */
    149  1.58      matt #elif !defined(MULTIPROCESSOR)
    150  1.58      matt 	ldr	r7, [r6, #L_CPU]		/* get cpu from new lwp */
    151  1.58      matt #else
    152  1.58      matt #error curcpu() method not defined
    153  1.58      matt #endif
    154   1.7     chris 
    155  1.55     chris 	/* rem: r4 = old lwp */
    156  1.55     chris 	/* rem: r6 = new lwp */
    157  1.58      matt 	/* rem: r7 = curcpu() */
    158  1.55     chris 
    159   1.1     chris 	IRQdisable
    160   1.7     chris 
    161  1.19     bjh21 #ifdef MULTIPROCESSOR
    162  1.58      matt 	str	r7, [r6, #(L_CPU)]
    163  1.19     bjh21 #else
    164  1.29   thorpej 	/* l->l_cpu initialized in fork1() for single-processor */
    165  1.19     bjh21 #endif
    166   1.1     chris 
    167  1.58      matt #if defined(PROCESS_ID_IS_CURLWP)
    168  1.58      matt 	mcr	p15, 0, r6, c13, c0, 4		/* set current lwp */
    169  1.58      matt #endif
    170  1.58      matt #if !defined(PROCESS_ID_IS_CURLWP) || defined(MULTIPROCESSOR)
    171  1.58      matt 	/* We have a new curlwp now so make a note it */
    172  1.58      matt 	str	r6, [r7, #(CI_CURLWP)]
    173  1.58      matt #endif
    174  1.55     chris 
    175  1.58      matt 	/* Hook in a new pcb */
    176  1.58      matt 	ldr	r0, [r6, #(L_ADDR)]
    177  1.58      matt 	str	r0, [r7, #(CI_CURPCB)]
    178  1.58      matt 	mov	r7, r0
    179   1.1     chris 
    180   1.1     chris 	/* At this point we can allow IRQ's again. */
    181   1.1     chris 	IRQenable
    182   1.1     chris 
    183  1.47      yamt 	/* rem: r4 = old lwp */
    184  1.43     skrll 	/* rem: r6 = new lwp */
    185  1.55     chris 	/* rem: r7 = new pcb */
    186   1.4     chris 	/* rem: interrupts are enabled */
    187   1.1     chris 
    188   1.1     chris 	/*
    189  1.47      yamt 	 * If the old lwp on entry to cpu_switchto was zero then the
    190   1.1     chris 	 * process that called it was exiting. This means that we do
    191   1.1     chris 	 * not need to save the current context. Instead we can jump
    192   1.1     chris 	 * straight to restoring the context for the new process.
    193   1.1     chris 	 */
    194  1.58      matt 	teq	r4, #0
    195  1.49       scw 	beq	.Ldo_switch
    196   1.1     chris 
    197  1.47      yamt 	/* rem: r4 = old lwp */
    198  1.43     skrll 	/* rem: r6 = new lwp */
    199  1.55     chris 	/* rem: r7 = new pcb */
    200   1.4     chris 	/* rem: interrupts are enabled */
    201   1.1     chris 
    202  1.48     skrll 	/* Save old context */
    203   1.1     chris 
    204  1.29   thorpej 	/* Get the user structure for the old lwp. */
    205  1.55     chris 	ldr	r5, [r4, #(L_ADDR)]
    206   1.1     chris 
    207  1.29   thorpej 	/* Save all the registers in the old lwp's pcb */
    208  1.58      matt #if defined(__XSCALE__) || defined(_ARM_ARCH_6)
    209  1.55     chris 	strd	r8, [r5, #(PCB_R8)]
    210  1.55     chris 	strd	r10, [r5, #(PCB_R10)]
    211  1.55     chris 	strd	r12, [r5, #(PCB_R12)]
    212  1.58      matt #else
    213  1.58      matt 	add	r0, r5, #(PCB_R8)
    214  1.58      matt 	stmia	r0, {r8-r13}
    215  1.37       scw #endif
    216   1.1     chris 
    217  1.58      matt #ifdef _ARM_ARCH_6
    218  1.58      matt 	/*
    219  1.58      matt 	 * Save user read/write thread/process id register
    220  1.58      matt 	 */
    221  1.58      matt 	mrc	p15, 0, r0, c13, c0, 2
    222  1.58      matt 	str	r0, [r5, #(PCB_USER_PID_RW)]
    223  1.58      matt #endif
    224   1.1     chris 	/*
    225  1.29   thorpej 	 * NOTE: We can now use r8-r13 until it is time to restore
    226  1.29   thorpej 	 * them for the new process.
    227  1.29   thorpej 	 */
    228  1.29   thorpej 
    229  1.47      yamt 	/* rem: r4 = old lwp */
    230  1.55     chris 	/* rem: r5 = old pcb */
    231  1.47      yamt 	/* rem: r6 = new lwp */
    232  1.55     chris 	/* rem: r7 = new pcb */
    233  1.47      yamt 	/* rem: interrupts are enabled */
    234  1.47      yamt 
    235  1.56  rearnsha #ifdef FPU_VFP
    236  1.56  rearnsha 	/*
    237  1.56  rearnsha 	 * Now's a good time to 'save' the VFP context.  Note that we
    238  1.56  rearnsha 	 * don't really force a save here, which can save time if we
    239  1.56  rearnsha 	 * end up restarting the same context.
    240  1.56  rearnsha 	 */
    241  1.56  rearnsha 	bl	_C_LABEL(vfp_savecontext)
    242  1.56  rearnsha #endif
    243   1.1     chris 
    244  1.48     skrll 	/* Restore saved context */
    245   1.1     chris 
    246  1.49       scw .Ldo_switch:
    247  1.47      yamt 	/* rem: r4 = old lwp */
    248  1.29   thorpej 	/* rem: r6 = new lwp */
    249  1.55     chris 	/* rem: r7 = new pcb */
    250  1.53     chris 	/* rem: interrupts are enabled */
    251  1.29   thorpej 
    252  1.58      matt #ifdef _ARM_ARCH_6
    253  1.58      matt 	/*
    254  1.58      matt 	 * Restore user thread/process id registers
    255  1.58      matt 	 */
    256  1.58      matt 	ldr	r0, [r7, #(PCB_USER_PID_RW)]
    257  1.58      matt 	mcr	p15, 0, r0, c13, c0, 2
    258  1.58      matt 	ldr	r0, [r7, #(PCB_USER_PID_RO)]
    259  1.58      matt 	mcr	p15, 0, r0, c13, c0, 3
    260  1.58      matt #endif
    261  1.58      matt 
    262  1.55     chris 	ldr	r5, [r6, #(L_PROC)]	/* fetch the proc for below */
    263  1.55     chris 
    264  1.52     skrll 	/* Restore all the saved registers */
    265  1.58      matt #ifdef __XSCALE__
    266  1.37       scw 	ldr	r8, [r7, #(PCB_R8)]
    267  1.37       scw 	ldr	r9, [r7, #(PCB_R9)]
    268  1.37       scw 	ldr	r10, [r7, #(PCB_R10)]
    269  1.37       scw 	ldr	r11, [r7, #(PCB_R11)]
    270  1.37       scw 	ldr	r12, [r7, #(PCB_R12)]
    271  1.37       scw 	ldr	r13, [r7, #(PCB_SP)]
    272  1.58      matt #elif defined(_ARM_ARCH_6)
    273  1.58      matt 	ldrd	r8, [r7, #(PCB_R8)]
    274  1.58      matt 	ldrd	r10, [r7, #(PCB_R10)]
    275  1.58      matt 	ldrd	r12, [r7, #(PCB_R12)]
    276  1.58      matt #else
    277  1.58      matt 	add	r0, r7, #PCB_R8
    278  1.58      matt 	ldmia	r0, {r8-r13}
    279  1.37       scw #endif
    280  1.29   thorpej 
    281  1.57       scw 	/* Record the old lwp for pmap_activate()'s benefit */
    282  1.58      matt 	ldr	r1, .Lpmap_previous_active_lwp
    283  1.57       scw 	str	r4, [r1]
    284  1.57       scw 
    285  1.47      yamt 	/* rem: r4 = old lwp */
    286  1.29   thorpej 	/* rem: r5 = new lwp's proc */
    287  1.29   thorpej 	/* rem: r6 = new lwp */
    288  1.29   thorpej 	/* rem: r7 = new pcb */
    289  1.18   thorpej 
    290  1.56  rearnsha #ifdef FPU_VFP
    291  1.56  rearnsha 	mov	r0, r6
    292  1.56  rearnsha 	bl	_C_LABEL(vfp_loadcontext)
    293  1.56  rearnsha #endif
    294   1.1     chris #ifdef ARMFPE
    295  1.29   thorpej 	add	r0, r7, #(USER_SIZE) & 0x00ff
    296   1.1     chris 	add	r0, r0, #(USER_SIZE) & 0xff00
    297   1.1     chris 	bl	_C_LABEL(arm_fpe_core_changecontext)
    298   1.1     chris #endif
    299   1.1     chris 
    300  1.47      yamt 	/* rem: r4 = old lwp */
    301  1.29   thorpej 	/* rem: r5 = new lwp's proc */
    302  1.29   thorpej 	/* rem: r6 = new lwp */
    303  1.18   thorpej 	/* rem: r7 = new PCB */
    304  1.18   thorpej 
    305  1.18   thorpej 	/*
    306  1.18   thorpej 	 * Check for restartable atomic sequences (RAS).
    307  1.18   thorpej 	 */
    308  1.18   thorpej 
    309  1.39       dsl 	ldr	r2, [r5, #(P_RASLIST)]
    310  1.38       scw 	ldr	r1, [r7, #(PCB_TF)]	/* r1 = trapframe (used below) */
    311  1.18   thorpej 	teq	r2, #0			/* p->p_nras == 0? */
    312  1.18   thorpej 	bne	.Lswitch_do_ras		/* no, check for one */
    313  1.18   thorpej 
    314  1.14    briggs .Lswitch_return:
    315  1.47      yamt 	/* cpu_switchto returns the old lwp */
    316  1.29   thorpej 	mov	r0, r4
    317  1.47      yamt 	/* lwp_trampoline expects new lwp as it's second argument */
    318  1.47      yamt 	mov	r1, r6
    319   1.1     chris 
    320   1.1     chris 	/*
    321  1.51     skrll 	 * Pull the registers that got pushed when cpu_switchto() was called,
    322  1.51     skrll 	 * and return.
    323   1.1     chris 	 */
    324  1.51     skrll 	ldmfd	sp, {r4-r7, sp, pc}
    325  1.18   thorpej 
    326  1.18   thorpej .Lswitch_do_ras:
    327  1.38       scw 	ldr	r1, [r1, #(TF_PC)]	/* second ras_lookup() arg */
    328  1.29   thorpej 	mov	r0, r5			/* first ras_lookup() arg */
    329  1.18   thorpej 	bl	_C_LABEL(ras_lookup)
    330  1.18   thorpej 	cmn	r0, #1			/* -1 means "not in a RAS" */
    331  1.38       scw 	ldrne	r1, [r7, #(PCB_TF)]
    332  1.38       scw 	strne	r0, [r1, #(TF_PC)]
    333  1.18   thorpej 	b	.Lswitch_return
    334   1.1     chris 
    335  1.47      yamt ENTRY(lwp_trampoline)
    336  1.52     skrll 	/*
    337  1.52     skrll 	 * cpu_switchto gives us:
    338  1.52     skrll 	 *
    339  1.52     skrll 	 * arg0(r0) = old lwp
    340  1.52     skrll 	 * arg1(r1) = new lwp
    341  1.52     skrll 	 */
    342  1.47      yamt 	bl	_C_LABEL(lwp_startup)
    343  1.38       scw 
    344   1.1     chris 	mov	r0, r5
    345   1.1     chris 	mov	r1, sp
    346  1.24     bjh21 	mov	lr, pc
    347   1.1     chris 	mov	pc, r4
    348   1.1     chris 
    349   1.1     chris 	/* Kill irq's */
    350  1.13   thorpej         mrs     r0, cpsr
    351   1.1     chris         orr     r0, r0, #(I32_bit)
    352  1.13   thorpej         msr     cpsr_c, r0
    353   1.1     chris 
    354   1.1     chris 	PULLFRAME
    355   1.1     chris 
    356   1.1     chris 	movs	pc, lr			/* Exit */
    357  1.58      matt 
    358  1.58      matt #ifdef __HAVE_FAST_SOFTINTS
    359  1.58      matt /*
    360  1.58      matt  *	Called at IPL_HIGH
    361  1.58      matt  *	r0 = new lwp
    362  1.58      matt  *	r1 = ipl for softint_dispatch
    363  1.58      matt  */
    364  1.58      matt ENTRY_NP(softint_switch)
    365  1.58      matt 	stmfd	sp!, {r4, r6, r7, lr}
    366  1.58      matt 
    367  1.58      matt 	ldr	r7, [r0, #L_CPU]		/* get curcpu */
    368  1.58      matt #if defined(PROCESS_ID_IS_CURLWP)
    369  1.58      matt 	mrc	p15, 0, r4, c13, c0, 4		/* get old lwp */
    370  1.58      matt #else
    371  1.58      matt 	ldr	r4, [r7, #(CI_CURLWP)]		/* get old lwp */
    372  1.58      matt #endif
    373  1.58      matt 	mrs	r6, cpsr			/* we need to save this */
    374  1.58      matt 
    375  1.58      matt 	/*
    376  1.58      matt 	 * If the soft lwp blocks, it needs to return to softint_tramp
    377  1.58      matt 	 */
    378  1.58      matt 	mov	r2, sp				/* think ip */
    379  1.58      matt 	adr	r3, softint_tramp		/* think lr */
    380  1.58      matt 	stmfd	sp!, {r2-r3}
    381  1.58      matt 	stmfd	sp!, {r4-r7}
    382  1.58      matt 
    383  1.58      matt 	mov	r5, r0				/* save new lwp */
    384  1.58      matt 
    385  1.58      matt 	ldr	r2, [r4, #(L_ADDR)]		/* get old lwp's pcb */
    386  1.58      matt 
    387  1.58      matt 	/* Save all the registers into the old lwp's pcb */
    388  1.58      matt #if defined(__XSCALE__) || defined(_ARM_ARCH_6)
    389  1.58      matt 	strd	r8, [r2, #(PCB_R8)]
    390  1.58      matt 	strd	r10, [r2, #(PCB_R10)]
    391  1.58      matt 	strd	r12, [r2, #(PCB_R12)]
    392  1.58      matt #else
    393  1.58      matt 	add	r3, r2, #(PCB_R8)
    394  1.58      matt 	stmia	r3, {r8-r13}
    395  1.58      matt #endif
    396  1.58      matt 
    397  1.58      matt 	/* this is an invariant so load before disabling intrs */
    398  1.58      matt 	ldr	r2, [r5, #(L_ADDR)]	/* get new lwp's pcb */
    399  1.58      matt 
    400  1.58      matt 	IRQdisable
    401  1.58      matt 	/*
    402  1.58      matt 	 * We're switching to a bound LWP so its l_cpu is already correct.
    403  1.58      matt 	 */
    404  1.58      matt #if defined(PROCESS_ID_IS_CURLWP)
    405  1.58      matt 	mcr	p15, 0, r5, c13, c0, 4		/* save new lwp */
    406  1.58      matt #endif
    407  1.58      matt #if !defined(PROCESS_ID_IS_CURLWP) || defined(MULTIPROCESSOR)
    408  1.58      matt 	str	r5, [r7, #(CI_CURLWP)]		/* save new lwp */
    409  1.58      matt #endif
    410  1.58      matt 
    411  1.58      matt 	/* Hook in a new pcb */
    412  1.58      matt 	str	r2, [r7, #(CI_CURPCB)]
    413  1.58      matt 
    414  1.58      matt 	/*
    415  1.58      matt 	 * Normally, we'd get {r8-r13} but since this is a softint lwp
    416  1.58      matt 	 * it's existing state doesn't matter.  We start the stack just
    417  1.58      matt 	 * below the trapframe.
    418  1.58      matt 	 */
    419  1.58      matt 	ldr	sp, [r2, #(PCB_TF)]	/* get new lwp's stack ptr */
    420  1.58      matt 
    421  1.58      matt 	/* At this point we can allow IRQ's again. */
    422  1.58      matt 	IRQenable
    423  1.58      matt 
    424  1.58      matt 					/* r1 still has ipl */
    425  1.58      matt 	mov	r0, r4			/* r0 has pinned (old) lwp */
    426  1.58      matt 	bl	_C_LABEL(softint_dispatch)
    427  1.58      matt 	/*
    428  1.58      matt 	 * If we've returned, we need to change everything back and return.
    429  1.58      matt 	 */
    430  1.58      matt 	ldr	r2, [r4, #(L_ADDR)]	/* get pinned lwp's pcb */
    431  1.58      matt 
    432  1.58      matt 	IRQdisable
    433  1.58      matt 	/*
    434  1.58      matt 	 * We don't need to restore all the registers since another lwp was
    435  1.58      matt 	 * never executed.  But we do need the SP from the formerly pinned lwp.
    436  1.58      matt 	 */
    437  1.58      matt 
    438  1.58      matt #if defined(PROCESS_ID_IS_CURLWP)
    439  1.58      matt 	mcr	p15, 0, r4, c13, c0, 4		/* restore pinned lwp */
    440  1.58      matt #endif
    441  1.58      matt #if !defined(PROCESS_ID_IS_CURLWP) || defined(MULTIPROCESSOR)
    442  1.58      matt 	str	r4, [r7, #(CI_CURLWP)]		/* restore pinned lwp */
    443  1.58      matt #endif
    444  1.58      matt 	str	r2, [r7, #(CI_CURPCB)]		/* restore the curpcb */
    445  1.58      matt 	ldr	sp, [r2, #(PCB_SP)]	/* now running on the old stack. */
    446  1.58      matt 
    447  1.58      matt 	/* At this point we can allow IRQ's again. */
    448  1.58      matt 	msr	cpsr_c, r6
    449  1.58      matt 
    450  1.58      matt 	/*
    451  1.58      matt 	 * Grab the registers that got pushed at the start and return.
    452  1.58      matt 	 */
    453  1.58      matt 	ldmfd	sp!, {r4-r7, ip, lr}	/* eat switch frame */
    454  1.58      matt 	ldmfd	sp!, {r4, r6, r7, pc}	/* pop stack and return */
    455  1.58      matt 
    456  1.58      matt END(softint_switch)
    457  1.58      matt 
    458  1.58      matt /*
    459  1.58      matt  * r0 = previous LWP (the soft lwp)
    460  1.58      matt  * r4 = original LWP (the current lwp)
    461  1.58      matt  * r6 = original CPSR
    462  1.58      matt  * r7 = curcpu()
    463  1.58      matt  */
    464  1.58      matt ENTRY_NP(softint_tramp)
    465  1.58      matt 	ldr	r3, [r7, #(CI_MTX_COUNT)]	/* readust after mi_switch */
    466  1.58      matt 	add	r3, r3, #1
    467  1.58      matt 	str	r3, [r7, #(CI_MTX_COUNT)]
    468  1.58      matt 
    469  1.58      matt 	mov	r3, #0				/* tell softint_dispatch */
    470  1.58      matt 	str	r3, [r0, #(L_CTXSWTCH)]		/*    the soft lwp blocked */
    471  1.58      matt 
    472  1.58      matt 	msr	cpsr_c, r6			/* restore interrupts */
    473  1.58      matt 	ldmfd	sp!, {r4, r6, r7, pc}		/* pop stack and return */
    474  1.58      matt END(softint_tramp)
    475  1.58      matt #endif /* __HAVE_FAST_SOFTINTS */
    476