Home | History | Annotate | Line # | Download | only in arm32
cpuswitch.S revision 1.58.14.1
      1  1.58.14.1      matt /*	$NetBSD: cpuswitch.S,v 1.58.14.1 2014/02/15 16:18:36 matt Exp $	*/
      2        1.1     chris 
      3        1.1     chris /*
      4       1.30       scw  * Copyright 2003 Wasabi Systems, Inc.
      5       1.30       scw  * All rights reserved.
      6       1.30       scw  *
      7       1.30       scw  * Written by Steve C. Woodford for Wasabi Systems, Inc.
      8       1.30       scw  *
      9       1.30       scw  * Redistribution and use in source and binary forms, with or without
     10       1.30       scw  * modification, are permitted provided that the following conditions
     11       1.30       scw  * are met:
     12       1.30       scw  * 1. Redistributions of source code must retain the above copyright
     13       1.30       scw  *    notice, this list of conditions and the following disclaimer.
     14       1.30       scw  * 2. Redistributions in binary form must reproduce the above copyright
     15       1.30       scw  *    notice, this list of conditions and the following disclaimer in the
     16       1.30       scw  *    documentation and/or other materials provided with the distribution.
     17       1.30       scw  * 3. All advertising materials mentioning features or use of this software
     18       1.30       scw  *    must display the following acknowledgement:
     19       1.30       scw  *      This product includes software developed for the NetBSD Project by
     20       1.30       scw  *      Wasabi Systems, Inc.
     21       1.30       scw  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22       1.30       scw  *    or promote products derived from this software without specific prior
     23       1.30       scw  *    written permission.
     24       1.30       scw  *
     25       1.30       scw  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26       1.30       scw  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27       1.30       scw  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28       1.30       scw  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29       1.30       scw  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30       1.30       scw  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31       1.30       scw  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32       1.30       scw  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33       1.30       scw  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34       1.30       scw  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35       1.30       scw  * POSSIBILITY OF SUCH DAMAGE.
     36       1.30       scw  */
     37       1.30       scw /*
     38        1.1     chris  * Copyright (c) 1994-1998 Mark Brinicombe.
     39        1.1     chris  * Copyright (c) 1994 Brini.
     40        1.1     chris  * All rights reserved.
     41        1.1     chris  *
     42        1.1     chris  * This code is derived from software written for Brini by Mark Brinicombe
     43        1.1     chris  *
     44        1.1     chris  * Redistribution and use in source and binary forms, with or without
     45        1.1     chris  * modification, are permitted provided that the following conditions
     46        1.1     chris  * are met:
     47        1.1     chris  * 1. Redistributions of source code must retain the above copyright
     48        1.1     chris  *    notice, this list of conditions and the following disclaimer.
     49        1.1     chris  * 2. Redistributions in binary form must reproduce the above copyright
     50        1.1     chris  *    notice, this list of conditions and the following disclaimer in the
     51        1.1     chris  *    documentation and/or other materials provided with the distribution.
     52        1.1     chris  * 3. All advertising materials mentioning features or use of this software
     53        1.1     chris  *    must display the following acknowledgement:
     54        1.1     chris  *	This product includes software developed by Brini.
     55        1.1     chris  * 4. The name of the company nor the name of the author may be used to
     56        1.1     chris  *    endorse or promote products derived from this software without specific
     57        1.1     chris  *    prior written permission.
     58        1.1     chris  *
     59        1.1     chris  * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
     60        1.1     chris  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
     61        1.1     chris  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     62        1.1     chris  * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
     63        1.1     chris  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
     64        1.1     chris  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     65        1.1     chris  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     66        1.1     chris  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     67        1.1     chris  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     68        1.1     chris  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     69        1.1     chris  * SUCH DAMAGE.
     70        1.1     chris  *
     71        1.1     chris  * RiscBSD kernel project
     72        1.1     chris  *
     73        1.1     chris  * cpuswitch.S
     74        1.1     chris  *
     75        1.1     chris  * cpu switching functions
     76        1.1     chris  *
     77        1.1     chris  * Created      : 15/10/94
     78        1.1     chris  */
     79        1.1     chris 
     80        1.1     chris #include "opt_armfpe.h"
     81       1.30       scw #include "opt_arm32_pmap.h"
     82       1.19     bjh21 #include "opt_multiprocessor.h"
     83       1.58      matt #include "opt_cpuoptions.h"
     84       1.36    martin #include "opt_lockdebug.h"
     85        1.1     chris 
     86        1.1     chris #include "assym.h"
     87  1.58.14.1      matt #include <arm/asm.h>
     88  1.58.14.1      matt #include <arm/locore.h>
     89       1.58      matt 
     90  1.58.14.1      matt 	RCSID("$NetBSD: cpuswitch.S,v 1.58.14.1 2014/02/15 16:18:36 matt Exp $")
     91        1.1     chris 
     92       1.34  kristerw /* LINTSTUB: include <sys/param.h> */
     93       1.34  kristerw 
     94        1.1     chris #undef IRQdisable
     95        1.1     chris #undef IRQenable
     96        1.1     chris 
     97        1.1     chris /*
     98        1.1     chris  * New experimental definitions of IRQdisable and IRQenable
     99        1.1     chris  * These keep FIQ's enabled since FIQ's are special.
    100        1.1     chris  */
    101        1.1     chris 
    102       1.58      matt #ifdef _ARM_ARCH_6
    103       1.58      matt #define	IRQdisable	cpsid	i
    104       1.58      matt #define	IRQenable	cpsie	i
    105       1.58      matt #else
    106        1.1     chris #define IRQdisable \
    107       1.13   thorpej 	mrs	r14, cpsr ; \
    108        1.1     chris 	orr	r14, r14, #(I32_bit) ; \
    109       1.58      matt 	msr	cpsr_c, r14
    110        1.1     chris 
    111        1.1     chris #define IRQenable \
    112       1.13   thorpej 	mrs	r14, cpsr ; \
    113        1.1     chris 	bic	r14, r14, #(I32_bit) ; \
    114       1.58      matt 	msr	cpsr_c, r14
    115        1.1     chris 
    116       1.22     bjh21 #endif
    117        1.1     chris 
    118        1.1     chris 	.text
    119       1.57       scw .Lpmap_previous_active_lwp:
    120       1.57       scw 	.word	_C_LABEL(pmap_previous_active_lwp)
    121       1.30       scw 
    122        1.1     chris /*
    123       1.47      yamt  * struct lwp *
    124       1.47      yamt  * cpu_switchto(struct lwp *current, struct lwp *next)
    125       1.48     skrll  *
    126       1.47      yamt  * Switch to the specified next LWP
    127       1.47      yamt  * Arguments:
    128       1.16   thorpej  *
    129       1.58      matt  *	r0	'struct lwp *' of the current LWP (or NULL if exiting)
    130       1.47      yamt  *	r1	'struct lwp *' of the LWP to switch to
    131       1.58      matt  *	r2	returning
    132        1.1     chris  */
    133       1.47      yamt ENTRY(cpu_switchto)
    134       1.51     skrll 	mov	ip, sp
    135  1.58.14.1      matt 	push	{r4-r7, ip, lr}
    136        1.1     chris 
    137       1.58      matt 	/* move lwps into caller saved registers */
    138       1.55     chris 	mov	r6, r1
    139       1.55     chris 	mov	r4, r0
    140       1.58      matt 
    141  1.58.14.1      matt #ifdef TPIDRPRW_IS_CURCPU
    142  1.58.14.1      matt 	GET_CURCPU(r3)
    143  1.58.14.1      matt #elif defined(TPIDRPRW_IS_CURLWP)
    144       1.58      matt 	mcr	p15, 0, r0, c13, c0, 4		/* get old lwp (r4 maybe 0) */
    145  1.58.14.1      matt 	ldr	r3, [r0, #(L_CPU)]		/* get cpu from old lwp */
    146       1.58      matt #elif !defined(MULTIPROCESSOR)
    147  1.58.14.1      matt 	ldr	r3, [r6, #L_CPU]		/* get cpu from new lwp */
    148       1.58      matt #else
    149       1.58      matt #error curcpu() method not defined
    150       1.58      matt #endif
    151        1.7     chris 
    152  1.58.14.1      matt 	/* rem: r3 = curcpu() */
    153       1.55     chris 	/* rem: r4 = old lwp */
    154       1.55     chris 	/* rem: r6 = new lwp */
    155       1.55     chris 
    156  1.58.14.1      matt #ifndef __HAVE_UNNESTED_INTRS
    157        1.1     chris 	IRQdisable
    158  1.58.14.1      matt #endif
    159        1.7     chris 
    160       1.19     bjh21 #ifdef MULTIPROCESSOR
    161  1.58.14.1      matt 	str	r3, [r6, #(L_CPU)]
    162       1.19     bjh21 #else
    163       1.29   thorpej 	/* l->l_cpu initialized in fork1() for single-processor */
    164       1.19     bjh21 #endif
    165        1.1     chris 
    166  1.58.14.1      matt #if defined(TPIDRPRW_IS_CURLWP)
    167       1.58      matt 	mcr	p15, 0, r6, c13, c0, 4		/* set current lwp */
    168       1.58      matt #endif
    169       1.58      matt 	/* We have a new curlwp now so make a note it */
    170  1.58.14.1      matt 	str	r6, [r3, #(CI_CURLWP)]
    171       1.55     chris 
    172  1.58.14.1      matt 	/* Get the new pcb */
    173  1.58.14.1      matt 	ldr	r7, [r6, #(L_PCB)]
    174        1.1     chris 
    175        1.1     chris 	/* At this point we can allow IRQ's again. */
    176  1.58.14.1      matt #ifndef __HAVE_UNNESTED_INTRS
    177        1.1     chris 	IRQenable
    178  1.58.14.1      matt #endif
    179        1.1     chris 
    180  1.58.14.1      matt 	/* rem: r3 = curlwp */
    181       1.47      yamt 	/* rem: r4 = old lwp */
    182       1.43     skrll 	/* rem: r6 = new lwp */
    183       1.55     chris 	/* rem: r7 = new pcb */
    184        1.4     chris 	/* rem: interrupts are enabled */
    185        1.1     chris 
    186        1.1     chris 	/*
    187       1.47      yamt 	 * If the old lwp on entry to cpu_switchto was zero then the
    188        1.1     chris 	 * process that called it was exiting. This means that we do
    189        1.1     chris 	 * not need to save the current context. Instead we can jump
    190        1.1     chris 	 * straight to restoring the context for the new process.
    191        1.1     chris 	 */
    192       1.58      matt 	teq	r4, #0
    193       1.49       scw 	beq	.Ldo_switch
    194        1.1     chris 
    195  1.58.14.1      matt 	/* rem: r3 = curlwp */
    196       1.47      yamt 	/* rem: r4 = old lwp */
    197       1.43     skrll 	/* rem: r6 = new lwp */
    198       1.55     chris 	/* rem: r7 = new pcb */
    199        1.4     chris 	/* rem: interrupts are enabled */
    200        1.1     chris 
    201       1.48     skrll 	/* Save old context */
    202        1.1     chris 
    203       1.29   thorpej 	/* Get the user structure for the old lwp. */
    204  1.58.14.1      matt 	ldr	r5, [r4, #(L_PCB)]
    205        1.1     chris 
    206       1.29   thorpej 	/* Save all the registers in the old lwp's pcb */
    207  1.58.14.1      matt #if defined(_ARM_ARCH_DWORD_OK)
    208  1.58.14.1      matt 	strd	r8, r9, [r5, #(PCB_R8)]
    209  1.58.14.1      matt 	strd	r10, r11, [r5, #(PCB_R10)]
    210  1.58.14.1      matt 	strd	r12, r13, [r5, #(PCB_R12)]
    211       1.58      matt #else
    212       1.58      matt 	add	r0, r5, #(PCB_R8)
    213       1.58      matt 	stmia	r0, {r8-r13}
    214       1.37       scw #endif
    215        1.1     chris 
    216       1.58      matt #ifdef _ARM_ARCH_6
    217       1.58      matt 	/*
    218       1.58      matt 	 * Save user read/write thread/process id register
    219       1.58      matt 	 */
    220       1.58      matt 	mrc	p15, 0, r0, c13, c0, 2
    221       1.58      matt 	str	r0, [r5, #(PCB_USER_PID_RW)]
    222       1.58      matt #endif
    223        1.1     chris 	/*
    224       1.29   thorpej 	 * NOTE: We can now use r8-r13 until it is time to restore
    225       1.29   thorpej 	 * them for the new process.
    226       1.29   thorpej 	 */
    227       1.29   thorpej 
    228  1.58.14.1      matt 	/* rem: r3 = curlwp */
    229       1.47      yamt 	/* rem: r4 = old lwp */
    230       1.55     chris 	/* rem: r5 = old pcb */
    231       1.47      yamt 	/* rem: r6 = new lwp */
    232       1.55     chris 	/* rem: r7 = new pcb */
    233       1.47      yamt 	/* rem: interrupts are enabled */
    234       1.47      yamt 
    235       1.48     skrll 	/* Restore saved context */
    236        1.1     chris 
    237       1.49       scw .Ldo_switch:
    238  1.58.14.1      matt 	/* rem: r3 = curlwp */
    239       1.47      yamt 	/* rem: r4 = old lwp */
    240       1.29   thorpej 	/* rem: r6 = new lwp */
    241       1.55     chris 	/* rem: r7 = new pcb */
    242       1.53     chris 	/* rem: interrupts are enabled */
    243       1.29   thorpej 
    244       1.58      matt #ifdef _ARM_ARCH_6
    245       1.58      matt 	/*
    246       1.58      matt 	 * Restore user thread/process id registers
    247       1.58      matt 	 */
    248       1.58      matt 	ldr	r0, [r7, #(PCB_USER_PID_RW)]
    249       1.58      matt 	mcr	p15, 0, r0, c13, c0, 2
    250  1.58.14.1      matt 	ldr	r0, [r6, #(L_PRIVATE)]
    251       1.58      matt 	mcr	p15, 0, r0, c13, c0, 3
    252       1.58      matt #endif
    253       1.58      matt 
    254  1.58.14.1      matt #ifdef FPU_VFP
    255  1.58.14.1      matt 	/*
    256  1.58.14.1      matt 	 * If we have a VFP, we need to load FPEXC.
    257  1.58.14.1      matt 	 */
    258  1.58.14.1      matt 	ldr	r0, [r3, #(CI_VFP_ID)]
    259  1.58.14.1      matt 	cmp	r0, #0
    260  1.58.14.1      matt 	ldrne	r0, [r7, #(PCB_VFP_FPEXC)]
    261  1.58.14.1      matt 	vmsrne	fpexc, r0
    262  1.58.14.1      matt #endif
    263  1.58.14.1      matt 
    264       1.55     chris 	ldr	r5, [r6, #(L_PROC)]	/* fetch the proc for below */
    265       1.55     chris 
    266       1.52     skrll 	/* Restore all the saved registers */
    267       1.58      matt #ifdef __XSCALE__
    268       1.37       scw 	ldr	r8, [r7, #(PCB_R8)]
    269       1.37       scw 	ldr	r9, [r7, #(PCB_R9)]
    270       1.37       scw 	ldr	r10, [r7, #(PCB_R10)]
    271       1.37       scw 	ldr	r11, [r7, #(PCB_R11)]
    272       1.37       scw 	ldr	r12, [r7, #(PCB_R12)]
    273  1.58.14.1      matt 	ldr	r13, [r7, #(PCB_KSP)]	/* sp */
    274  1.58.14.1      matt #elif defined(_ARM_ARCH_DWORD_OK)
    275  1.58.14.1      matt 	ldrd	r8, r9, [r7, #(PCB_R8)]
    276  1.58.14.1      matt 	ldrd	r10, r10, [r7, #(PCB_R10)]
    277  1.58.14.1      matt 	ldrd	r12, r13, [r7, #(PCB_R12)]	/* sp */
    278       1.58      matt #else
    279       1.58      matt 	add	r0, r7, #PCB_R8
    280       1.58      matt 	ldmia	r0, {r8-r13}
    281       1.37       scw #endif
    282       1.29   thorpej 
    283       1.57       scw 	/* Record the old lwp for pmap_activate()'s benefit */
    284  1.58.14.1      matt 	ldr	r1, .Lpmap_previous_active_lwp		/* XXXSMP */
    285       1.57       scw 	str	r4, [r1]
    286       1.57       scw 
    287       1.47      yamt 	/* rem: r4 = old lwp */
    288       1.29   thorpej 	/* rem: r5 = new lwp's proc */
    289       1.29   thorpej 	/* rem: r6 = new lwp */
    290       1.29   thorpej 	/* rem: r7 = new pcb */
    291       1.18   thorpej 
    292       1.18   thorpej 	/*
    293       1.18   thorpej 	 * Check for restartable atomic sequences (RAS).
    294       1.18   thorpej 	 */
    295       1.18   thorpej 
    296       1.39       dsl 	ldr	r2, [r5, #(P_RASLIST)]
    297  1.58.14.1      matt 	ldr	r1, [r6, #(L_MD_TF)]	/* r1 = trapframe (used below) */
    298       1.18   thorpej 	teq	r2, #0			/* p->p_nras == 0? */
    299       1.18   thorpej 	bne	.Lswitch_do_ras		/* no, check for one */
    300       1.18   thorpej 
    301       1.14    briggs .Lswitch_return:
    302       1.47      yamt 	/* cpu_switchto returns the old lwp */
    303       1.29   thorpej 	mov	r0, r4
    304       1.47      yamt 	/* lwp_trampoline expects new lwp as it's second argument */
    305       1.47      yamt 	mov	r1, r6
    306        1.1     chris 
    307  1.58.14.1      matt #ifdef _ARM_ARCH_7
    308  1.58.14.1      matt 	clrex				/* cause any subsequent STREX* to fail */
    309  1.58.14.1      matt #endif
    310  1.58.14.1      matt 
    311        1.1     chris 	/*
    312       1.51     skrll 	 * Pull the registers that got pushed when cpu_switchto() was called,
    313       1.51     skrll 	 * and return.
    314        1.1     chris 	 */
    315  1.58.14.1      matt 	pop	{r4-r7, ip, pc}
    316       1.18   thorpej 
    317       1.18   thorpej .Lswitch_do_ras:
    318       1.38       scw 	ldr	r1, [r1, #(TF_PC)]	/* second ras_lookup() arg */
    319       1.29   thorpej 	mov	r0, r5			/* first ras_lookup() arg */
    320       1.18   thorpej 	bl	_C_LABEL(ras_lookup)
    321       1.18   thorpej 	cmn	r0, #1			/* -1 means "not in a RAS" */
    322  1.58.14.1      matt 	ldrne	r1, [r6, #(L_MD_TF)]
    323       1.38       scw 	strne	r0, [r1, #(TF_PC)]
    324       1.18   thorpej 	b	.Lswitch_return
    325  1.58.14.1      matt END(cpu_switchto)
    326        1.1     chris 
    327  1.58.14.1      matt ENTRY_NP(lwp_trampoline)
    328       1.52     skrll 	/*
    329       1.52     skrll 	 * cpu_switchto gives us:
    330  1.58.14.1      matt 	 *	arg0(r0) = old lwp
    331  1.58.14.1      matt 	 *	arg1(r1) = new lwp
    332  1.58.14.1      matt 	 * setup by cpu_lwp_fork:
    333  1.58.14.1      matt 	 *	r4 = func to call
    334  1.58.14.1      matt 	 *	r5 = arg to func
    335  1.58.14.1      matt 	 *	r6 = <unused>
    336  1.58.14.1      matt 	 *	r7 = spsr mode
    337       1.52     skrll 	 */
    338       1.47      yamt 	bl	_C_LABEL(lwp_startup)
    339       1.38       scw 
    340  1.58.14.1      matt 	mov	fp, #0			/* top stack frame */
    341        1.1     chris 	mov	r0, r5
    342        1.1     chris 	mov	r1, sp
    343  1.58.14.1      matt #ifdef _ARM_ARCH_5
    344  1.58.14.1      matt 	blx	r4
    345  1.58.14.1      matt #else
    346       1.24     bjh21 	mov	lr, pc
    347        1.1     chris 	mov	pc, r4
    348  1.58.14.1      matt #endif
    349        1.1     chris 
    350  1.58.14.1      matt 	GET_CPSR(r0)
    351  1.58.14.1      matt 	CPSID_I(r0, r0)			/* Kill irq's */
    352        1.1     chris 
    353  1.58.14.1      matt 	GET_CURCPU(r4)			/* for DO_AST */
    354  1.58.14.1      matt 	DO_AST_AND_RESTORE_ALIGNMENT_FAULTS
    355        1.1     chris 	PULLFRAME
    356        1.1     chris 
    357        1.1     chris 	movs	pc, lr			/* Exit */
    358  1.58.14.1      matt END(lwp_trampoline)
    359  1.58.14.1      matt 
    360  1.58.14.1      matt AST_ALIGNMENT_FAULT_LOCALS
    361       1.58      matt 
    362       1.58      matt #ifdef __HAVE_FAST_SOFTINTS
    363       1.58      matt /*
    364       1.58      matt  *	Called at IPL_HIGH
    365       1.58      matt  *	r0 = new lwp
    366       1.58      matt  *	r1 = ipl for softint_dispatch
    367       1.58      matt  */
    368       1.58      matt ENTRY_NP(softint_switch)
    369  1.58.14.1      matt 	push	{r4, r6, r7, lr}
    370       1.58      matt 
    371       1.58      matt 	ldr	r7, [r0, #L_CPU]		/* get curcpu */
    372  1.58.14.1      matt #if defined(TPIDRPRW_IS_CURLWP)
    373       1.58      matt 	mrc	p15, 0, r4, c13, c0, 4		/* get old lwp */
    374       1.58      matt #else
    375       1.58      matt 	ldr	r4, [r7, #(CI_CURLWP)]		/* get old lwp */
    376       1.58      matt #endif
    377       1.58      matt 	mrs	r6, cpsr			/* we need to save this */
    378       1.58      matt 
    379       1.58      matt 	/*
    380       1.58      matt 	 * If the soft lwp blocks, it needs to return to softint_tramp
    381       1.58      matt 	 */
    382       1.58      matt 	mov	r2, sp				/* think ip */
    383       1.58      matt 	adr	r3, softint_tramp		/* think lr */
    384  1.58.14.1      matt 	push	{r2-r3}
    385  1.58.14.1      matt 	push	{r4-r7}
    386       1.58      matt 
    387       1.58      matt 	mov	r5, r0				/* save new lwp */
    388       1.58      matt 
    389  1.58.14.1      matt 	ldr	r2, [r4, #(L_PCB)]		/* get old lwp's pcb */
    390       1.58      matt 
    391       1.58      matt 	/* Save all the registers into the old lwp's pcb */
    392       1.58      matt #if defined(__XSCALE__) || defined(_ARM_ARCH_6)
    393  1.58.14.1      matt 	strd	r8, r9, [r2, #(PCB_R8)]
    394  1.58.14.1      matt 	strd	r10, r11, [r2, #(PCB_R10)]
    395  1.58.14.1      matt 	strd	r12, r13, [r2, #(PCB_R12)]
    396       1.58      matt #else
    397       1.58      matt 	add	r3, r2, #(PCB_R8)
    398       1.58      matt 	stmia	r3, {r8-r13}
    399       1.58      matt #endif
    400       1.58      matt 
    401       1.58      matt 	/* this is an invariant so load before disabling intrs */
    402  1.58.14.1      matt 	ldr	r2, [r5, #(L_PCB)]	/* get new lwp's pcb */
    403       1.58      matt 
    404  1.58.14.1      matt #ifndef __HAVE_UNNESTED_INTRS
    405       1.58      matt 	IRQdisable
    406  1.58.14.1      matt #endif
    407       1.58      matt 	/*
    408       1.58      matt 	 * We're switching to a bound LWP so its l_cpu is already correct.
    409       1.58      matt 	 */
    410  1.58.14.1      matt #if defined(TPIDRPRW_IS_CURLWP)
    411       1.58      matt 	mcr	p15, 0, r5, c13, c0, 4		/* save new lwp */
    412       1.58      matt #endif
    413       1.58      matt 	str	r5, [r7, #(CI_CURLWP)]		/* save new lwp */
    414       1.58      matt 
    415       1.58      matt 	/*
    416       1.58      matt 	 * Normally, we'd get {r8-r13} but since this is a softint lwp
    417       1.58      matt 	 * it's existing state doesn't matter.  We start the stack just
    418       1.58      matt 	 * below the trapframe.
    419       1.58      matt 	 */
    420  1.58.14.1      matt 	ldr	sp, [r5, #(L_MD_TF)]	/* get new lwp's stack ptr */
    421       1.58      matt 
    422       1.58      matt 	/* At this point we can allow IRQ's again. */
    423  1.58.14.1      matt #ifndef __HAVE_UNNESTED_INTRS
    424       1.58      matt 	IRQenable
    425  1.58.14.1      matt #endif
    426       1.58      matt 
    427       1.58      matt 					/* r1 still has ipl */
    428       1.58      matt 	mov	r0, r4			/* r0 has pinned (old) lwp */
    429       1.58      matt 	bl	_C_LABEL(softint_dispatch)
    430       1.58      matt 	/*
    431       1.58      matt 	 * If we've returned, we need to change everything back and return.
    432       1.58      matt 	 */
    433  1.58.14.1      matt 	ldr	r2, [r4, #(L_PCB)]	/* get pinned lwp's pcb */
    434       1.58      matt 
    435  1.58.14.1      matt #ifndef __HAVE_UNNESTED_INTRS
    436       1.58      matt 	IRQdisable
    437  1.58.14.1      matt #endif
    438       1.58      matt 	/*
    439       1.58      matt 	 * We don't need to restore all the registers since another lwp was
    440       1.58      matt 	 * never executed.  But we do need the SP from the formerly pinned lwp.
    441       1.58      matt 	 */
    442       1.58      matt 
    443  1.58.14.1      matt #if defined(TPIDRPRW_IS_CURLWP)
    444       1.58      matt 	mcr	p15, 0, r4, c13, c0, 4		/* restore pinned lwp */
    445       1.58      matt #endif
    446       1.58      matt 	str	r4, [r7, #(CI_CURLWP)]		/* restore pinned lwp */
    447  1.58.14.1      matt 	ldr	sp, [r2, #(PCB_KSP)]	/* now running on the old stack. */
    448       1.58      matt 
    449       1.58      matt 	/* At this point we can allow IRQ's again. */
    450       1.58      matt 	msr	cpsr_c, r6
    451       1.58      matt 
    452       1.58      matt 	/*
    453       1.58      matt 	 * Grab the registers that got pushed at the start and return.
    454       1.58      matt 	 */
    455  1.58.14.1      matt 	pop	{r4-r7, ip, lr}		/* eat switch frame */
    456  1.58.14.1      matt 	pop	{r4, r6, r7, pc}	/* pop stack and return */
    457       1.58      matt 
    458       1.58      matt END(softint_switch)
    459       1.58      matt 
    460       1.58      matt /*
    461       1.58      matt  * r0 = previous LWP (the soft lwp)
    462       1.58      matt  * r4 = original LWP (the current lwp)
    463       1.58      matt  * r6 = original CPSR
    464       1.58      matt  * r7 = curcpu()
    465       1.58      matt  */
    466       1.58      matt ENTRY_NP(softint_tramp)
    467       1.58      matt 	ldr	r3, [r7, #(CI_MTX_COUNT)]	/* readust after mi_switch */
    468       1.58      matt 	add	r3, r3, #1
    469       1.58      matt 	str	r3, [r7, #(CI_MTX_COUNT)]
    470       1.58      matt 
    471       1.58      matt 	mov	r3, #0				/* tell softint_dispatch */
    472       1.58      matt 	str	r3, [r0, #(L_CTXSWTCH)]		/*    the soft lwp blocked */
    473       1.58      matt 
    474       1.58      matt 	msr	cpsr_c, r6			/* restore interrupts */
    475  1.58.14.1      matt 	pop	{r4, r6, r7, pc}		/* pop stack and return */
    476       1.58      matt END(softint_tramp)
    477       1.58      matt #endif /* __HAVE_FAST_SOFTINTS */
    478