Home | History | Annotate | Line # | Download | only in arm32
cpuswitch.S revision 1.72.2.3
      1  1.72.2.3       tls /*	$NetBSD: cpuswitch.S,v 1.72.2.3 2013/06/23 06:19:59 tls Exp $	*/
      2       1.1     chris 
      3       1.1     chris /*
      4      1.30       scw  * Copyright 2003 Wasabi Systems, Inc.
      5      1.30       scw  * All rights reserved.
      6      1.30       scw  *
      7      1.30       scw  * Written by Steve C. Woodford for Wasabi Systems, Inc.
      8      1.30       scw  *
      9      1.30       scw  * Redistribution and use in source and binary forms, with or without
     10      1.30       scw  * modification, are permitted provided that the following conditions
     11      1.30       scw  * are met:
     12      1.30       scw  * 1. Redistributions of source code must retain the above copyright
     13      1.30       scw  *    notice, this list of conditions and the following disclaimer.
     14      1.30       scw  * 2. Redistributions in binary form must reproduce the above copyright
     15      1.30       scw  *    notice, this list of conditions and the following disclaimer in the
     16      1.30       scw  *    documentation and/or other materials provided with the distribution.
     17      1.30       scw  * 3. All advertising materials mentioning features or use of this software
     18      1.30       scw  *    must display the following acknowledgement:
     19      1.30       scw  *      This product includes software developed for the NetBSD Project by
     20      1.30       scw  *      Wasabi Systems, Inc.
     21      1.30       scw  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22      1.30       scw  *    or promote products derived from this software without specific prior
     23      1.30       scw  *    written permission.
     24      1.30       scw  *
     25      1.30       scw  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26      1.30       scw  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27      1.30       scw  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28      1.30       scw  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29      1.30       scw  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30      1.30       scw  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31      1.30       scw  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32      1.30       scw  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33      1.30       scw  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34      1.30       scw  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35      1.30       scw  * POSSIBILITY OF SUCH DAMAGE.
     36      1.30       scw  */
     37      1.30       scw /*
     38       1.1     chris  * Copyright (c) 1994-1998 Mark Brinicombe.
     39       1.1     chris  * Copyright (c) 1994 Brini.
     40       1.1     chris  * All rights reserved.
     41       1.1     chris  *
     42       1.1     chris  * This code is derived from software written for Brini by Mark Brinicombe
     43       1.1     chris  *
     44       1.1     chris  * Redistribution and use in source and binary forms, with or without
     45       1.1     chris  * modification, are permitted provided that the following conditions
     46       1.1     chris  * are met:
     47       1.1     chris  * 1. Redistributions of source code must retain the above copyright
     48       1.1     chris  *    notice, this list of conditions and the following disclaimer.
     49       1.1     chris  * 2. Redistributions in binary form must reproduce the above copyright
     50       1.1     chris  *    notice, this list of conditions and the following disclaimer in the
     51       1.1     chris  *    documentation and/or other materials provided with the distribution.
     52       1.1     chris  * 3. All advertising materials mentioning features or use of this software
     53       1.1     chris  *    must display the following acknowledgement:
     54       1.1     chris  *	This product includes software developed by Brini.
     55       1.1     chris  * 4. The name of the company nor the name of the author may be used to
     56       1.1     chris  *    endorse or promote products derived from this software without specific
     57       1.1     chris  *    prior written permission.
     58       1.1     chris  *
     59       1.1     chris  * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
     60       1.1     chris  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
     61       1.1     chris  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     62       1.1     chris  * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
     63       1.1     chris  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
     64       1.1     chris  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     65       1.1     chris  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     66       1.1     chris  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     67       1.1     chris  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     68       1.1     chris  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     69       1.1     chris  * SUCH DAMAGE.
     70       1.1     chris  *
     71       1.1     chris  * RiscBSD kernel project
     72       1.1     chris  *
     73       1.1     chris  * cpuswitch.S
     74       1.1     chris  *
     75       1.1     chris  * cpu switching functions
     76       1.1     chris  *
     77       1.1     chris  * Created      : 15/10/94
     78       1.1     chris  */
     79       1.1     chris 
     80       1.1     chris #include "opt_armfpe.h"
     81      1.30       scw #include "opt_arm32_pmap.h"
     82      1.19     bjh21 #include "opt_multiprocessor.h"
     83      1.58      matt #include "opt_cpuoptions.h"
     84      1.36    martin #include "opt_lockdebug.h"
     85       1.1     chris 
     86       1.1     chris #include "assym.h"
     87       1.1     chris #include <machine/asm.h>
     88      1.58      matt #include <machine/cpu.h>
     89  1.72.2.3       tls #include <machine/frame.h>
     90      1.58      matt 
     91  1.72.2.3       tls 	RCSID("$NetBSD: cpuswitch.S,v 1.72.2.3 2013/06/23 06:19:59 tls Exp $")
     92       1.1     chris 
     93      1.34  kristerw /* LINTSTUB: include <sys/param.h> */
     94      1.34  kristerw 
     95       1.1     chris #undef IRQdisable
     96       1.1     chris #undef IRQenable
     97       1.1     chris 
     98       1.1     chris /*
     99       1.1     chris  * New experimental definitions of IRQdisable and IRQenable
    100       1.1     chris  * These keep FIQ's enabled since FIQ's are special.
    101       1.1     chris  */
    102       1.1     chris 
    103      1.58      matt #ifdef _ARM_ARCH_6
    104      1.58      matt #define	IRQdisable	cpsid	i
    105      1.58      matt #define	IRQenable	cpsie	i
    106      1.58      matt #else
    107       1.1     chris #define IRQdisable \
    108      1.13   thorpej 	mrs	r14, cpsr ; \
    109       1.1     chris 	orr	r14, r14, #(I32_bit) ; \
    110      1.58      matt 	msr	cpsr_c, r14
    111       1.1     chris 
    112       1.1     chris #define IRQenable \
    113      1.13   thorpej 	mrs	r14, cpsr ; \
    114       1.1     chris 	bic	r14, r14, #(I32_bit) ; \
    115      1.58      matt 	msr	cpsr_c, r14
    116       1.1     chris 
    117      1.22     bjh21 #endif
    118       1.1     chris 
    119       1.1     chris 	.text
    120      1.57       scw .Lpmap_previous_active_lwp:
    121      1.57       scw 	.word	_C_LABEL(pmap_previous_active_lwp)
    122      1.30       scw 
    123       1.1     chris /*
    124      1.47      yamt  * struct lwp *
    125      1.47      yamt  * cpu_switchto(struct lwp *current, struct lwp *next)
    126      1.48     skrll  *
    127      1.47      yamt  * Switch to the specified next LWP
    128      1.47      yamt  * Arguments:
    129      1.16   thorpej  *
    130      1.58      matt  *	r0	'struct lwp *' of the current LWP (or NULL if exiting)
    131      1.47      yamt  *	r1	'struct lwp *' of the LWP to switch to
    132      1.58      matt  *	r2	returning
    133       1.1     chris  */
    134      1.47      yamt ENTRY(cpu_switchto)
    135      1.51     skrll 	mov	ip, sp
    136      1.51     skrll 	stmfd	sp!, {r4-r7, ip, lr}
    137       1.1     chris 
    138      1.58      matt 	/* move lwps into caller saved registers */
    139      1.55     chris 	mov	r6, r1
    140      1.55     chris 	mov	r4, r0
    141      1.58      matt 
    142      1.68      matt #ifdef TPIDRPRW_IS_CURCPU
    143  1.72.2.2       tls 	GET_CURCPU(r3)
    144      1.67      matt #elif defined(TPIDRPRW_IS_CURLWP)
    145      1.58      matt 	mcr	p15, 0, r0, c13, c0, 4		/* get old lwp (r4 maybe 0) */
    146  1.72.2.2       tls 	ldr	r3, [r0, #(L_CPU)]		/* get cpu from old lwp */
    147      1.58      matt #elif !defined(MULTIPROCESSOR)
    148  1.72.2.2       tls 	ldr	r3, [r6, #L_CPU]		/* get cpu from new lwp */
    149      1.58      matt #else
    150      1.58      matt #error curcpu() method not defined
    151      1.58      matt #endif
    152       1.7     chris 
    153  1.72.2.2       tls 	/* rem: r3 = curcpu() */
    154      1.55     chris 	/* rem: r4 = old lwp */
    155      1.55     chris 	/* rem: r6 = new lwp */
    156      1.55     chris 
    157      1.59      matt #ifndef __HAVE_UNNESTED_INTRS
    158       1.1     chris 	IRQdisable
    159      1.59      matt #endif
    160       1.7     chris 
    161      1.19     bjh21 #ifdef MULTIPROCESSOR
    162  1.72.2.2       tls 	str	r3, [r6, #(L_CPU)]
    163      1.19     bjh21 #else
    164      1.29   thorpej 	/* l->l_cpu initialized in fork1() for single-processor */
    165      1.19     bjh21 #endif
    166       1.1     chris 
    167      1.67      matt #if defined(TPIDRPRW_IS_CURLWP)
    168      1.58      matt 	mcr	p15, 0, r6, c13, c0, 4		/* set current lwp */
    169      1.58      matt #endif
    170      1.58      matt 	/* We have a new curlwp now so make a note it */
    171  1.72.2.2       tls 	str	r6, [r3, #(CI_CURLWP)]
    172      1.55     chris 
    173      1.65      matt 	/* Get the new pcb */
    174      1.65      matt 	ldr	r7, [r6, #(L_PCB)]
    175       1.1     chris 
    176       1.1     chris 	/* At this point we can allow IRQ's again. */
    177      1.59      matt #ifndef __HAVE_UNNESTED_INTRS
    178       1.1     chris 	IRQenable
    179      1.59      matt #endif
    180       1.1     chris 
    181  1.72.2.2       tls 	/* rem: r3 = curlwp */
    182      1.47      yamt 	/* rem: r4 = old lwp */
    183      1.43     skrll 	/* rem: r6 = new lwp */
    184      1.55     chris 	/* rem: r7 = new pcb */
    185       1.4     chris 	/* rem: interrupts are enabled */
    186       1.1     chris 
    187       1.1     chris 	/*
    188      1.47      yamt 	 * If the old lwp on entry to cpu_switchto was zero then the
    189       1.1     chris 	 * process that called it was exiting. This means that we do
    190       1.1     chris 	 * not need to save the current context. Instead we can jump
    191       1.1     chris 	 * straight to restoring the context for the new process.
    192       1.1     chris 	 */
    193      1.58      matt 	teq	r4, #0
    194      1.49       scw 	beq	.Ldo_switch
    195       1.1     chris 
    196  1.72.2.2       tls 	/* rem: r3 = curlwp */
    197      1.47      yamt 	/* rem: r4 = old lwp */
    198      1.43     skrll 	/* rem: r6 = new lwp */
    199      1.55     chris 	/* rem: r7 = new pcb */
    200       1.4     chris 	/* rem: interrupts are enabled */
    201       1.1     chris 
    202      1.48     skrll 	/* Save old context */
    203       1.1     chris 
    204      1.29   thorpej 	/* Get the user structure for the old lwp. */
    205      1.60     rmind 	ldr	r5, [r4, #(L_PCB)]
    206       1.1     chris 
    207      1.29   thorpej 	/* Save all the registers in the old lwp's pcb */
    208  1.72.2.2       tls #if defined(_ARM_ARCH_DWORD_OK)
    209      1.55     chris 	strd	r8, [r5, #(PCB_R8)]
    210      1.55     chris 	strd	r10, [r5, #(PCB_R10)]
    211      1.55     chris 	strd	r12, [r5, #(PCB_R12)]
    212      1.58      matt #else
    213      1.58      matt 	add	r0, r5, #(PCB_R8)
    214      1.58      matt 	stmia	r0, {r8-r13}
    215      1.37       scw #endif
    216       1.1     chris 
    217      1.58      matt #ifdef _ARM_ARCH_6
    218      1.58      matt 	/*
    219      1.58      matt 	 * Save user read/write thread/process id register
    220      1.58      matt 	 */
    221      1.58      matt 	mrc	p15, 0, r0, c13, c0, 2
    222      1.58      matt 	str	r0, [r5, #(PCB_USER_PID_RW)]
    223      1.58      matt #endif
    224       1.1     chris 	/*
    225      1.29   thorpej 	 * NOTE: We can now use r8-r13 until it is time to restore
    226      1.29   thorpej 	 * them for the new process.
    227      1.29   thorpej 	 */
    228      1.29   thorpej 
    229  1.72.2.2       tls 	/* rem: r3 = curlwp */
    230      1.47      yamt 	/* rem: r4 = old lwp */
    231      1.55     chris 	/* rem: r5 = old pcb */
    232      1.47      yamt 	/* rem: r6 = new lwp */
    233      1.55     chris 	/* rem: r7 = new pcb */
    234      1.47      yamt 	/* rem: interrupts are enabled */
    235      1.47      yamt 
    236      1.48     skrll 	/* Restore saved context */
    237       1.1     chris 
    238      1.49       scw .Ldo_switch:
    239  1.72.2.2       tls 	/* rem: r3 = curlwp */
    240      1.47      yamt 	/* rem: r4 = old lwp */
    241      1.29   thorpej 	/* rem: r6 = new lwp */
    242      1.55     chris 	/* rem: r7 = new pcb */
    243      1.53     chris 	/* rem: interrupts are enabled */
    244      1.29   thorpej 
    245      1.58      matt #ifdef _ARM_ARCH_6
    246      1.58      matt 	/*
    247      1.58      matt 	 * Restore user thread/process id registers
    248      1.58      matt 	 */
    249      1.58      matt 	ldr	r0, [r7, #(PCB_USER_PID_RW)]
    250      1.58      matt 	mcr	p15, 0, r0, c13, c0, 2
    251      1.63      matt 	ldr	r0, [r6, #(L_PRIVATE)]
    252      1.58      matt 	mcr	p15, 0, r0, c13, c0, 3
    253      1.58      matt #endif
    254      1.58      matt 
    255  1.72.2.2       tls #ifdef FPU_VFP
    256  1.72.2.2       tls 	/*
    257  1.72.2.2       tls 	 * If we have a VFP, we need to load FPEXC.
    258  1.72.2.2       tls 	 */
    259  1.72.2.2       tls 	ldr	r0, [r3, #(CI_VFP_ID)]
    260  1.72.2.2       tls 	cmp	r0, #0
    261  1.72.2.2       tls 	ldrne	r0, [r7, #(PCB_VFP_FPEXC)]
    262  1.72.2.2       tls 	mcrne	p10, 7, r0, c8, c0, 0
    263  1.72.2.2       tls #endif
    264  1.72.2.2       tls 
    265      1.55     chris 	ldr	r5, [r6, #(L_PROC)]	/* fetch the proc for below */
    266      1.55     chris 
    267      1.52     skrll 	/* Restore all the saved registers */
    268      1.58      matt #ifdef __XSCALE__
    269      1.37       scw 	ldr	r8, [r7, #(PCB_R8)]
    270      1.37       scw 	ldr	r9, [r7, #(PCB_R9)]
    271      1.37       scw 	ldr	r10, [r7, #(PCB_R10)]
    272      1.37       scw 	ldr	r11, [r7, #(PCB_R11)]
    273      1.37       scw 	ldr	r12, [r7, #(PCB_R12)]
    274  1.72.2.2       tls 	ldr	r13, [r7, #(PCB_KSP)]	/* sp */
    275  1.72.2.2       tls #elif defined(_ARM_ARCH_DWORD_OK)
    276      1.58      matt 	ldrd	r8, [r7, #(PCB_R8)]
    277      1.58      matt 	ldrd	r10, [r7, #(PCB_R10)]
    278  1.72.2.2       tls 	ldrd	r12, [r7, #(PCB_R12)]	/* sp */
    279      1.58      matt #else
    280      1.58      matt 	add	r0, r7, #PCB_R8
    281      1.58      matt 	ldmia	r0, {r8-r13}
    282      1.37       scw #endif
    283      1.29   thorpej 
    284      1.57       scw 	/* Record the old lwp for pmap_activate()'s benefit */
    285  1.72.2.2       tls 	ldr	r1, .Lpmap_previous_active_lwp		/* XXXSMP */
    286      1.57       scw 	str	r4, [r1]
    287      1.57       scw 
    288      1.47      yamt 	/* rem: r4 = old lwp */
    289      1.29   thorpej 	/* rem: r5 = new lwp's proc */
    290      1.29   thorpej 	/* rem: r6 = new lwp */
    291      1.29   thorpej 	/* rem: r7 = new pcb */
    292      1.18   thorpej 
    293      1.18   thorpej 	/*
    294      1.18   thorpej 	 * Check for restartable atomic sequences (RAS).
    295      1.18   thorpej 	 */
    296      1.18   thorpej 
    297      1.39       dsl 	ldr	r2, [r5, #(P_RASLIST)]
    298      1.66      matt 	ldr	r1, [r6, #(L_MD_TF)]	/* r1 = trapframe (used below) */
    299      1.18   thorpej 	teq	r2, #0			/* p->p_nras == 0? */
    300      1.18   thorpej 	bne	.Lswitch_do_ras		/* no, check for one */
    301      1.18   thorpej 
    302      1.14    briggs .Lswitch_return:
    303      1.47      yamt 	/* cpu_switchto returns the old lwp */
    304      1.29   thorpej 	mov	r0, r4
    305      1.47      yamt 	/* lwp_trampoline expects new lwp as it's second argument */
    306      1.47      yamt 	mov	r1, r6
    307       1.1     chris 
    308      1.67      matt #ifdef _ARM_ARCH_7
    309      1.67      matt 	clrex				/* cause any subsequent STREX* to fail */
    310      1.67      matt #endif
    311      1.67      matt 
    312       1.1     chris 	/*
    313      1.51     skrll 	 * Pull the registers that got pushed when cpu_switchto() was called,
    314      1.51     skrll 	 * and return.
    315       1.1     chris 	 */
    316      1.51     skrll 	ldmfd	sp, {r4-r7, sp, pc}
    317      1.18   thorpej 
    318      1.18   thorpej .Lswitch_do_ras:
    319      1.38       scw 	ldr	r1, [r1, #(TF_PC)]	/* second ras_lookup() arg */
    320      1.29   thorpej 	mov	r0, r5			/* first ras_lookup() arg */
    321      1.18   thorpej 	bl	_C_LABEL(ras_lookup)
    322      1.18   thorpej 	cmn	r0, #1			/* -1 means "not in a RAS" */
    323      1.66      matt 	ldrne	r1, [r6, #(L_MD_TF)]
    324      1.38       scw 	strne	r0, [r1, #(TF_PC)]
    325      1.18   thorpej 	b	.Lswitch_return
    326       1.1     chris 
    327  1.72.2.1       tls ENTRY_NP(lwp_trampoline)
    328      1.52     skrll 	/*
    329      1.52     skrll 	 * cpu_switchto gives us:
    330      1.67      matt 	 *	arg0(r0) = old lwp
    331      1.67      matt 	 *	arg1(r1) = new lwp
    332      1.67      matt 	 * setup by cpu_lwp_fork:
    333      1.67      matt 	 *	r4 = func to call
    334      1.67      matt 	 *	r5 = arg to func
    335      1.67      matt 	 *	r6 = <unused>
    336      1.67      matt 	 *	r7 = spsr mode
    337      1.52     skrll 	 */
    338      1.47      yamt 	bl	_C_LABEL(lwp_startup)
    339      1.38       scw 
    340      1.72      matt 	mov	fp, #0			/* top stack frame */
    341       1.1     chris 	mov	r0, r5
    342       1.1     chris 	mov	r1, sp
    343      1.70      matt #ifdef _ARM_ARCH_5
    344      1.67      matt 	blx	r4
    345      1.67      matt #else
    346      1.24     bjh21 	mov	lr, pc
    347       1.1     chris 	mov	pc, r4
    348      1.67      matt #endif
    349       1.1     chris 
    350      1.67      matt 	GET_CPSR(r0)
    351      1.67      matt 	CPSID_I(r0, r0)			/* Kill irq's */
    352       1.1     chris 
    353      1.71      matt 	GET_CURCPU(r4)			/* for DO_AST */
    354      1.67      matt 	DO_AST_AND_RESTORE_ALIGNMENT_FAULTS
    355       1.1     chris 	PULLFRAME
    356       1.1     chris 
    357       1.1     chris 	movs	pc, lr			/* Exit */
    358      1.58      matt 
    359      1.69     skrll AST_ALIGNMENT_FAULT_LOCALS
    360      1.69     skrll 
    361      1.58      matt #ifdef __HAVE_FAST_SOFTINTS
    362      1.58      matt /*
    363      1.58      matt  *	Called at IPL_HIGH
    364      1.58      matt  *	r0 = new lwp
    365      1.58      matt  *	r1 = ipl for softint_dispatch
    366      1.58      matt  */
    367      1.58      matt ENTRY_NP(softint_switch)
    368      1.58      matt 	stmfd	sp!, {r4, r6, r7, lr}
    369      1.58      matt 
    370      1.58      matt 	ldr	r7, [r0, #L_CPU]		/* get curcpu */
    371      1.67      matt #if defined(TPIDRPRW_IS_CURLWP)
    372      1.58      matt 	mrc	p15, 0, r4, c13, c0, 4		/* get old lwp */
    373      1.58      matt #else
    374      1.58      matt 	ldr	r4, [r7, #(CI_CURLWP)]		/* get old lwp */
    375      1.58      matt #endif
    376      1.58      matt 	mrs	r6, cpsr			/* we need to save this */
    377      1.58      matt 
    378      1.58      matt 	/*
    379      1.58      matt 	 * If the soft lwp blocks, it needs to return to softint_tramp
    380      1.58      matt 	 */
    381      1.58      matt 	mov	r2, sp				/* think ip */
    382      1.58      matt 	adr	r3, softint_tramp		/* think lr */
    383      1.58      matt 	stmfd	sp!, {r2-r3}
    384      1.58      matt 	stmfd	sp!, {r4-r7}
    385      1.58      matt 
    386      1.58      matt 	mov	r5, r0				/* save new lwp */
    387      1.58      matt 
    388      1.60     rmind 	ldr	r2, [r4, #(L_PCB)]		/* get old lwp's pcb */
    389      1.58      matt 
    390      1.58      matt 	/* Save all the registers into the old lwp's pcb */
    391      1.58      matt #if defined(__XSCALE__) || defined(_ARM_ARCH_6)
    392      1.58      matt 	strd	r8, [r2, #(PCB_R8)]
    393      1.58      matt 	strd	r10, [r2, #(PCB_R10)]
    394      1.58      matt 	strd	r12, [r2, #(PCB_R12)]
    395      1.58      matt #else
    396      1.58      matt 	add	r3, r2, #(PCB_R8)
    397      1.58      matt 	stmia	r3, {r8-r13}
    398      1.58      matt #endif
    399      1.58      matt 
    400      1.58      matt 	/* this is an invariant so load before disabling intrs */
    401      1.60     rmind 	ldr	r2, [r5, #(L_PCB)]	/* get new lwp's pcb */
    402      1.58      matt 
    403      1.59      matt #ifndef __HAVE_UNNESTED_INTRS
    404      1.58      matt 	IRQdisable
    405      1.59      matt #endif
    406      1.58      matt 	/*
    407      1.58      matt 	 * We're switching to a bound LWP so its l_cpu is already correct.
    408      1.58      matt 	 */
    409      1.67      matt #if defined(TPIDRPRW_IS_CURLWP)
    410      1.58      matt 	mcr	p15, 0, r5, c13, c0, 4		/* save new lwp */
    411      1.58      matt #endif
    412      1.58      matt 	str	r5, [r7, #(CI_CURLWP)]		/* save new lwp */
    413      1.58      matt 
    414      1.58      matt 	/*
    415      1.58      matt 	 * Normally, we'd get {r8-r13} but since this is a softint lwp
    416      1.58      matt 	 * it's existing state doesn't matter.  We start the stack just
    417      1.58      matt 	 * below the trapframe.
    418      1.58      matt 	 */
    419      1.66      matt 	ldr	sp, [r5, #(L_MD_TF)]	/* get new lwp's stack ptr */
    420      1.58      matt 
    421      1.58      matt 	/* At this point we can allow IRQ's again. */
    422      1.59      matt #ifndef __HAVE_UNNESTED_INTRS
    423      1.58      matt 	IRQenable
    424      1.59      matt #endif
    425      1.58      matt 
    426      1.58      matt 					/* r1 still has ipl */
    427      1.58      matt 	mov	r0, r4			/* r0 has pinned (old) lwp */
    428      1.58      matt 	bl	_C_LABEL(softint_dispatch)
    429      1.58      matt 	/*
    430      1.58      matt 	 * If we've returned, we need to change everything back and return.
    431      1.58      matt 	 */
    432      1.60     rmind 	ldr	r2, [r4, #(L_PCB)]	/* get pinned lwp's pcb */
    433      1.58      matt 
    434      1.59      matt #ifndef __HAVE_UNNESTED_INTRS
    435      1.58      matt 	IRQdisable
    436      1.59      matt #endif
    437      1.58      matt 	/*
    438      1.58      matt 	 * We don't need to restore all the registers since another lwp was
    439      1.58      matt 	 * never executed.  But we do need the SP from the formerly pinned lwp.
    440      1.58      matt 	 */
    441      1.58      matt 
    442      1.67      matt #if defined(TPIDRPRW_IS_CURLWP)
    443      1.58      matt 	mcr	p15, 0, r4, c13, c0, 4		/* restore pinned lwp */
    444      1.58      matt #endif
    445      1.58      matt 	str	r4, [r7, #(CI_CURLWP)]		/* restore pinned lwp */
    446  1.72.2.2       tls 	ldr	sp, [r2, #(PCB_KSP)]	/* now running on the old stack. */
    447      1.58      matt 
    448      1.58      matt 	/* At this point we can allow IRQ's again. */
    449      1.58      matt 	msr	cpsr_c, r6
    450      1.58      matt 
    451      1.58      matt 	/*
    452      1.58      matt 	 * Grab the registers that got pushed at the start and return.
    453      1.58      matt 	 */
    454      1.58      matt 	ldmfd	sp!, {r4-r7, ip, lr}	/* eat switch frame */
    455      1.58      matt 	ldmfd	sp!, {r4, r6, r7, pc}	/* pop stack and return */
    456      1.58      matt 
    457      1.58      matt END(softint_switch)
    458      1.58      matt 
    459      1.58      matt /*
    460      1.58      matt  * r0 = previous LWP (the soft lwp)
    461      1.58      matt  * r4 = original LWP (the current lwp)
    462      1.58      matt  * r6 = original CPSR
    463      1.58      matt  * r7 = curcpu()
    464      1.58      matt  */
    465      1.58      matt ENTRY_NP(softint_tramp)
    466      1.58      matt 	ldr	r3, [r7, #(CI_MTX_COUNT)]	/* readust after mi_switch */
    467      1.58      matt 	add	r3, r3, #1
    468      1.58      matt 	str	r3, [r7, #(CI_MTX_COUNT)]
    469      1.58      matt 
    470      1.58      matt 	mov	r3, #0				/* tell softint_dispatch */
    471      1.58      matt 	str	r3, [r0, #(L_CTXSWTCH)]		/*    the soft lwp blocked */
    472      1.58      matt 
    473      1.58      matt 	msr	cpsr_c, r6			/* restore interrupts */
    474      1.58      matt 	ldmfd	sp!, {r4, r6, r7, pc}		/* pop stack and return */
    475      1.58      matt END(softint_tramp)
    476      1.58      matt #endif /* __HAVE_FAST_SOFTINTS */
    477