Home | History | Annotate | Line # | Download | only in arm32
cpuswitch.S revision 1.55.6.2
      1 /*	$NetBSD: cpuswitch.S,v 1.55.6.2 2008/06/02 13:21:52 mjf Exp $	*/
      2 
      3 /*
      4  * Copyright 2003 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Steve C. Woodford for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *      This product includes software developed for the NetBSD Project by
     20  *      Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 /*
     38  * Copyright (c) 1994-1998 Mark Brinicombe.
     39  * Copyright (c) 1994 Brini.
     40  * All rights reserved.
     41  *
     42  * This code is derived from software written for Brini by Mark Brinicombe
     43  *
     44  * Redistribution and use in source and binary forms, with or without
     45  * modification, are permitted provided that the following conditions
     46  * are met:
     47  * 1. Redistributions of source code must retain the above copyright
     48  *    notice, this list of conditions and the following disclaimer.
     49  * 2. Redistributions in binary form must reproduce the above copyright
     50  *    notice, this list of conditions and the following disclaimer in the
     51  *    documentation and/or other materials provided with the distribution.
     52  * 3. All advertising materials mentioning features or use of this software
     53  *    must display the following acknowledgement:
     54  *	This product includes software developed by Brini.
     55  * 4. The name of the company nor the name of the author may be used to
     56  *    endorse or promote products derived from this software without specific
     57  *    prior written permission.
     58  *
     59  * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
     60  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
     61  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     62  * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
     63  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
     64  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     65  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     66  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     67  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     68  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     69  * SUCH DAMAGE.
     70  *
     71  * RiscBSD kernel project
     72  *
     73  * cpuswitch.S
     74  *
     75  * cpu switching functions
     76  *
     77  * Created      : 15/10/94
     78  */
     79 
     80 #include "opt_armfpe.h"
     81 #include "opt_arm32_pmap.h"
     82 #include "opt_multiprocessor.h"
     83 #include "opt_cpuoptions.h"
     84 #include "opt_lockdebug.h"
     85 
     86 #include "assym.h"
     87 #include <arm/arm32/pte.h>
     88 #include <machine/param.h>
     89 #include <machine/frame.h>
     90 #include <machine/asm.h>
     91 #include <machine/cpu.h>
     92 
     93 	RCSID("$NetBSD: cpuswitch.S,v 1.55.6.2 2008/06/02 13:21:52 mjf Exp $")
     94 
     95 /* LINTSTUB: include <sys/param.h> */
     96 
     97 #undef IRQdisable
     98 #undef IRQenable
     99 
    100 /*
    101  * New experimental definitions of IRQdisable and IRQenable
    102  * These keep FIQ's enabled since FIQ's are special.
    103  */
    104 
    105 #ifdef _ARM_ARCH_6
    106 #define	IRQdisable	cpsid	i
    107 #define	IRQenable	cpsie	i
    108 #else
    109 #define IRQdisable \
    110 	mrs	r14, cpsr ; \
    111 	orr	r14, r14, #(I32_bit) ; \
    112 	msr	cpsr_c, r14
    113 
    114 #define IRQenable \
    115 	mrs	r14, cpsr ; \
    116 	bic	r14, r14, #(I32_bit) ; \
    117 	msr	cpsr_c, r14
    118 
    119 #endif
    120 
    121 	.text
    122 .Lpmap_previous_active_lwp:
    123 	.word	_C_LABEL(pmap_previous_active_lwp)
    124 
    125 /*
    126  * struct lwp *
    127  * cpu_switchto(struct lwp *current, struct lwp *next)
    128  *
    129  * Switch to the specified next LWP
    130  * Arguments:
    131  *
    132  *	r0	'struct lwp *' of the current LWP (or NULL if exiting)
    133  *	r1	'struct lwp *' of the LWP to switch to
    134  *	r2	returning
    135  */
    136 ENTRY(cpu_switchto)
    137 	mov	ip, sp
    138 	stmfd	sp!, {r4-r7, ip, lr}
    139 
    140 	/* move lwps into caller saved registers */
    141 	mov	r6, r1
    142 	mov	r4, r0
    143 
    144 #ifdef PROCESS_ID_CURCPU
    145 	GET_CURCPU(r7)
    146 #elif defined(PROCESS_ID_IS_CURLWP)
    147 	mcr	p15, 0, r0, c13, c0, 4		/* get old lwp (r4 maybe 0) */
    148 	ldr	r7, [r0, #(L_CPU)]		/* get cpu from old lwp */
    149 #elif !defined(MULTIPROCESSOR)
    150 	ldr	r7, [r6, #L_CPU]		/* get cpu from new lwp */
    151 #else
    152 #error curcpu() method not defined
    153 #endif
    154 
    155 	/* rem: r4 = old lwp */
    156 	/* rem: r6 = new lwp */
    157 	/* rem: r7 = curcpu() */
    158 
    159 	IRQdisable
    160 
    161 #ifdef MULTIPROCESSOR
    162 	str	r7, [r6, #(L_CPU)]
    163 #else
    164 	/* l->l_cpu initialized in fork1() for single-processor */
    165 #endif
    166 
    167 #if defined(PROCESS_ID_IS_CURLWP)
    168 	mcr	p15, 0, r6, c13, c0, 4		/* set current lwp */
    169 #endif
    170 #if !defined(PROCESS_ID_IS_CURLWP) || defined(MULTIPROCESSOR)
    171 	/* We have a new curlwp now so make a note it */
    172 	str	r6, [r7, #(CI_CURLWP)]
    173 #endif
    174 
    175 	/* Hook in a new pcb */
    176 	ldr	r0, [r6, #(L_ADDR)]
    177 	str	r0, [r7, #(CI_CURPCB)]
    178 	mov	r7, r0
    179 
    180 	/* At this point we can allow IRQ's again. */
    181 	IRQenable
    182 
    183 	/* rem: r4 = old lwp */
    184 	/* rem: r6 = new lwp */
    185 	/* rem: r7 = new pcb */
    186 	/* rem: interrupts are enabled */
    187 
    188 	/*
    189 	 * If the old lwp on entry to cpu_switchto was zero then the
    190 	 * process that called it was exiting. This means that we do
    191 	 * not need to save the current context. Instead we can jump
    192 	 * straight to restoring the context for the new process.
    193 	 */
    194 	teq	r4, #0
    195 	beq	.Ldo_switch
    196 
    197 	/* rem: r4 = old lwp */
    198 	/* rem: r6 = new lwp */
    199 	/* rem: r7 = new pcb */
    200 	/* rem: interrupts are enabled */
    201 
    202 	/* Save old context */
    203 
    204 	/* Get the user structure for the old lwp. */
    205 	ldr	r5, [r4, #(L_ADDR)]
    206 
    207 	/* Save all the registers in the old lwp's pcb */
    208 #if defined(__XSCALE__) || defined(_ARM_ARCH_6)
    209 	strd	r8, [r5, #(PCB_R8)]
    210 	strd	r10, [r5, #(PCB_R10)]
    211 	strd	r12, [r5, #(PCB_R12)]
    212 #else
    213 	add	r0, r5, #(PCB_R8)
    214 	stmia	r0, {r8-r13}
    215 #endif
    216 
    217 #ifdef _ARM_ARCH_6
    218 	/*
    219 	 * Save user read/write thread/process id register
    220 	 */
    221 	mrc	p15, 0, r0, c13, c0, 2
    222 	str	r0, [r5, #(PCB_USER_PID_RW)]
    223 #endif
    224 	/*
    225 	 * NOTE: We can now use r8-r13 until it is time to restore
    226 	 * them for the new process.
    227 	 */
    228 
    229 	/* rem: r4 = old lwp */
    230 	/* rem: r5 = old pcb */
    231 	/* rem: r6 = new lwp */
    232 	/* rem: r7 = new pcb */
    233 	/* rem: interrupts are enabled */
    234 
    235 #ifdef FPU_VFP
    236 	/*
    237 	 * Now's a good time to 'save' the VFP context.  Note that we
    238 	 * don't really force a save here, which can save time if we
    239 	 * end up restarting the same context.
    240 	 */
    241 	bl	_C_LABEL(vfp_savecontext)
    242 #endif
    243 
    244 	/* Restore saved context */
    245 
    246 .Ldo_switch:
    247 	/* rem: r4 = old lwp */
    248 	/* rem: r6 = new lwp */
    249 	/* rem: r7 = new pcb */
    250 	/* rem: interrupts are enabled */
    251 
    252 #ifdef _ARM_ARCH_6
    253 	/*
    254 	 * Restore user thread/process id registers
    255 	 */
    256 	ldr	r0, [r7, #(PCB_USER_PID_RW)]
    257 	mcr	p15, 0, r0, c13, c0, 2
    258 	ldr	r0, [r7, #(PCB_USER_PID_RO)]
    259 	mcr	p15, 0, r0, c13, c0, 3
    260 #endif
    261 
    262 	ldr	r5, [r6, #(L_PROC)]	/* fetch the proc for below */
    263 
    264 	/* Restore all the saved registers */
    265 #ifdef __XSCALE__
    266 	ldr	r8, [r7, #(PCB_R8)]
    267 	ldr	r9, [r7, #(PCB_R9)]
    268 	ldr	r10, [r7, #(PCB_R10)]
    269 	ldr	r11, [r7, #(PCB_R11)]
    270 	ldr	r12, [r7, #(PCB_R12)]
    271 	ldr	r13, [r7, #(PCB_SP)]
    272 #elif defined(_ARM_ARCH_6)
    273 	ldrd	r8, [r7, #(PCB_R8)]
    274 	ldrd	r10, [r7, #(PCB_R10)]
    275 	ldrd	r12, [r7, #(PCB_R12)]
    276 #else
    277 	add	r0, r7, #PCB_R8
    278 	ldmia	r0, {r8-r13}
    279 #endif
    280 
    281 	/* Record the old lwp for pmap_activate()'s benefit */
    282 	ldr	r1, .Lpmap_previous_active_lwp
    283 	str	r4, [r1]
    284 
    285 	/* rem: r4 = old lwp */
    286 	/* rem: r5 = new lwp's proc */
    287 	/* rem: r6 = new lwp */
    288 	/* rem: r7 = new pcb */
    289 
    290 #ifdef FPU_VFP
    291 	mov	r0, r6
    292 	bl	_C_LABEL(vfp_loadcontext)
    293 #endif
    294 #ifdef ARMFPE
    295 	add	r0, r7, #(USER_SIZE) & 0x00ff
    296 	add	r0, r0, #(USER_SIZE) & 0xff00
    297 	bl	_C_LABEL(arm_fpe_core_changecontext)
    298 #endif
    299 
    300 	/* rem: r4 = old lwp */
    301 	/* rem: r5 = new lwp's proc */
    302 	/* rem: r6 = new lwp */
    303 	/* rem: r7 = new PCB */
    304 
    305 	/*
    306 	 * Check for restartable atomic sequences (RAS).
    307 	 */
    308 
    309 	ldr	r2, [r5, #(P_RASLIST)]
    310 	ldr	r1, [r7, #(PCB_TF)]	/* r1 = trapframe (used below) */
    311 	teq	r2, #0			/* p->p_nras == 0? */
    312 	bne	.Lswitch_do_ras		/* no, check for one */
    313 
    314 .Lswitch_return:
    315 	/* cpu_switchto returns the old lwp */
    316 	mov	r0, r4
    317 	/* lwp_trampoline expects new lwp as it's second argument */
    318 	mov	r1, r6
    319 
    320 	/*
    321 	 * Pull the registers that got pushed when cpu_switchto() was called,
    322 	 * and return.
    323 	 */
    324 	ldmfd	sp, {r4-r7, sp, pc}
    325 
    326 .Lswitch_do_ras:
    327 	ldr	r1, [r1, #(TF_PC)]	/* second ras_lookup() arg */
    328 	mov	r0, r5			/* first ras_lookup() arg */
    329 	bl	_C_LABEL(ras_lookup)
    330 	cmn	r0, #1			/* -1 means "not in a RAS" */
    331 	ldrne	r1, [r7, #(PCB_TF)]
    332 	strne	r0, [r1, #(TF_PC)]
    333 	b	.Lswitch_return
    334 
    335 ENTRY(lwp_trampoline)
    336 	/*
    337 	 * cpu_switchto gives us:
    338 	 *
    339 	 * arg0(r0) = old lwp
    340 	 * arg1(r1) = new lwp
    341 	 */
    342 	bl	_C_LABEL(lwp_startup)
    343 
    344 	mov	r0, r5
    345 	mov	r1, sp
    346 	mov	lr, pc
    347 	mov	pc, r4
    348 
    349 	/* Kill irq's */
    350         mrs     r0, cpsr
    351         orr     r0, r0, #(I32_bit)
    352         msr     cpsr_c, r0
    353 
    354 	PULLFRAME
    355 
    356 	movs	pc, lr			/* Exit */
    357 
    358 #ifdef __HAVE_FAST_SOFTINTS
    359 /*
    360  *	Called at IPL_HIGH
    361  *	r0 = new lwp
    362  *	r1 = ipl for softint_dispatch
    363  */
    364 ENTRY_NP(softint_switch)
    365 	stmfd	sp!, {r4, r6, r7, lr}
    366 
    367 	ldr	r7, [r0, #L_CPU]		/* get curcpu */
    368 #if defined(PROCESS_ID_IS_CURLWP)
    369 	mrc	p15, 0, r4, c13, c0, 4		/* get old lwp */
    370 #else
    371 	ldr	r4, [r7, #(CI_CURLWP)]		/* get old lwp */
    372 #endif
    373 	mrs	r6, cpsr			/* we need to save this */
    374 
    375 	/*
    376 	 * If the soft lwp blocks, it needs to return to softint_tramp
    377 	 */
    378 	mov	r2, sp				/* think ip */
    379 	adr	r3, softint_tramp		/* think lr */
    380 	stmfd	sp!, {r2-r3}
    381 	stmfd	sp!, {r4-r7}
    382 
    383 	mov	r5, r0				/* save new lwp */
    384 
    385 	ldr	r2, [r4, #(L_ADDR)]		/* get old lwp's pcb */
    386 
    387 	/* Save all the registers into the old lwp's pcb */
    388 #if defined(__XSCALE__) || defined(_ARM_ARCH_6)
    389 	strd	r8, [r2, #(PCB_R8)]
    390 	strd	r10, [r2, #(PCB_R10)]
    391 	strd	r12, [r2, #(PCB_R12)]
    392 #else
    393 	add	r3, r2, #(PCB_R8)
    394 	stmia	r3, {r8-r13}
    395 #endif
    396 
    397 	/* this is an invariant so load before disabling intrs */
    398 	ldr	r2, [r5, #(L_ADDR)]	/* get new lwp's pcb */
    399 
    400 	IRQdisable
    401 	/*
    402 	 * We're switching to a bound LWP so its l_cpu is already correct.
    403 	 */
    404 #if defined(PROCESS_ID_IS_CURLWP)
    405 	mcr	p15, 0, r5, c13, c0, 4		/* save new lwp */
    406 #endif
    407 #if !defined(PROCESS_ID_IS_CURLWP) || defined(MULTIPROCESSOR)
    408 	str	r5, [r7, #(CI_CURLWP)]		/* save new lwp */
    409 #endif
    410 
    411 	/* Hook in a new pcb */
    412 	str	r2, [r7, #(CI_CURPCB)]
    413 
    414 	/*
    415 	 * Normally, we'd get {r8-r13} but since this is a softint lwp
    416 	 * it's existing state doesn't matter.  We start the stack just
    417 	 * below the trapframe.
    418 	 */
    419 	ldr	sp, [r2, #(PCB_TF)]	/* get new lwp's stack ptr */
    420 
    421 	/* At this point we can allow IRQ's again. */
    422 	IRQenable
    423 
    424 					/* r1 still has ipl */
    425 	mov	r0, r4			/* r0 has pinned (old) lwp */
    426 	bl	_C_LABEL(softint_dispatch)
    427 	/*
    428 	 * If we've returned, we need to change everything back and return.
    429 	 */
    430 	ldr	r2, [r4, #(L_ADDR)]	/* get pinned lwp's pcb */
    431 
    432 	IRQdisable
    433 	/*
    434 	 * We don't need to restore all the registers since another lwp was
    435 	 * never executed.  But we do need the SP from the formerly pinned lwp.
    436 	 */
    437 
    438 #if defined(PROCESS_ID_IS_CURLWP)
    439 	mcr	p15, 0, r4, c13, c0, 4		/* restore pinned lwp */
    440 #endif
    441 #if !defined(PROCESS_ID_IS_CURLWP) || defined(MULTIPROCESSOR)
    442 	str	r4, [r7, #(CI_CURLWP)]		/* restore pinned lwp */
    443 #endif
    444 	str	r2, [r7, #(CI_CURPCB)]		/* restore the curpcb */
    445 	ldr	sp, [r2, #(PCB_SP)]	/* now running on the old stack. */
    446 
    447 	/* At this point we can allow IRQ's again. */
    448 	msr	cpsr_c, r6
    449 
    450 	/*
    451 	 * Grab the registers that got pushed at the start and return.
    452 	 */
    453 	ldmfd	sp!, {r4-r7, ip, lr}	/* eat switch frame */
    454 	ldmfd	sp!, {r4, r6, r7, pc}	/* pop stack and return */
    455 
    456 END(softint_switch)
    457 
    458 /*
    459  * r0 = previous LWP (the soft lwp)
    460  * r4 = original LWP (the current lwp)
    461  * r6 = original CPSR
    462  * r7 = curcpu()
    463  */
    464 ENTRY_NP(softint_tramp)
    465 	ldr	r3, [r7, #(CI_MTX_COUNT)]	/* readust after mi_switch */
    466 	add	r3, r3, #1
    467 	str	r3, [r7, #(CI_MTX_COUNT)]
    468 
    469 	mov	r3, #0				/* tell softint_dispatch */
    470 	str	r3, [r0, #(L_CTXSWTCH)]		/*    the soft lwp blocked */
    471 
    472 	msr	cpsr_c, r6			/* restore interrupts */
    473 	ldmfd	sp!, {r4, r6, r7, pc}		/* pop stack and return */
    474 END(softint_tramp)
    475 #endif /* __HAVE_FAST_SOFTINTS */
    476