Home | History | Annotate | Line # | Download | only in arm32
cpuswitch.S revision 1.32
      1  1.32    chris /*	$NetBSD: cpuswitch.S,v 1.32 2003/04/26 17:50:21 chris Exp $	*/
      2   1.1    chris 
      3   1.1    chris /*
      4  1.30      scw  * Copyright 2003 Wasabi Systems, Inc.
      5  1.30      scw  * All rights reserved.
      6  1.30      scw  *
      7  1.30      scw  * Written by Steve C. Woodford for Wasabi Systems, Inc.
      8  1.30      scw  *
      9  1.30      scw  * Redistribution and use in source and binary forms, with or without
     10  1.30      scw  * modification, are permitted provided that the following conditions
     11  1.30      scw  * are met:
     12  1.30      scw  * 1. Redistributions of source code must retain the above copyright
     13  1.30      scw  *    notice, this list of conditions and the following disclaimer.
     14  1.30      scw  * 2. Redistributions in binary form must reproduce the above copyright
     15  1.30      scw  *    notice, this list of conditions and the following disclaimer in the
     16  1.30      scw  *    documentation and/or other materials provided with the distribution.
     17  1.30      scw  * 3. All advertising materials mentioning features or use of this software
     18  1.30      scw  *    must display the following acknowledgement:
     19  1.30      scw  *      This product includes software developed for the NetBSD Project by
     20  1.30      scw  *      Wasabi Systems, Inc.
     21  1.30      scw  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  1.30      scw  *    or promote products derived from this software without specific prior
     23  1.30      scw  *    written permission.
     24  1.30      scw  *
     25  1.30      scw  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  1.30      scw  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  1.30      scw  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  1.30      scw  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  1.30      scw  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  1.30      scw  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  1.30      scw  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  1.30      scw  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  1.30      scw  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  1.30      scw  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  1.30      scw  * POSSIBILITY OF SUCH DAMAGE.
     36  1.30      scw  */
     37  1.30      scw /*
     38   1.1    chris  * Copyright (c) 1994-1998 Mark Brinicombe.
     39   1.1    chris  * Copyright (c) 1994 Brini.
     40   1.1    chris  * All rights reserved.
     41   1.1    chris  *
     42   1.1    chris  * This code is derived from software written for Brini by Mark Brinicombe
     43   1.1    chris  *
     44   1.1    chris  * Redistribution and use in source and binary forms, with or without
     45   1.1    chris  * modification, are permitted provided that the following conditions
     46   1.1    chris  * are met:
     47   1.1    chris  * 1. Redistributions of source code must retain the above copyright
     48   1.1    chris  *    notice, this list of conditions and the following disclaimer.
     49   1.1    chris  * 2. Redistributions in binary form must reproduce the above copyright
     50   1.1    chris  *    notice, this list of conditions and the following disclaimer in the
     51   1.1    chris  *    documentation and/or other materials provided with the distribution.
     52   1.1    chris  * 3. All advertising materials mentioning features or use of this software
     53   1.1    chris  *    must display the following acknowledgement:
     54   1.1    chris  *	This product includes software developed by Brini.
     55   1.1    chris  * 4. The name of the company nor the name of the author may be used to
     56   1.1    chris  *    endorse or promote products derived from this software without specific
     57   1.1    chris  *    prior written permission.
     58   1.1    chris  *
     59   1.1    chris  * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
     60   1.1    chris  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
     61   1.1    chris  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     62   1.1    chris  * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
     63   1.1    chris  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
     64   1.1    chris  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     65   1.1    chris  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     66   1.1    chris  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     67   1.1    chris  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     68   1.1    chris  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     69   1.1    chris  * SUCH DAMAGE.
     70   1.1    chris  *
     71   1.1    chris  * RiscBSD kernel project
     72   1.1    chris  *
     73   1.1    chris  * cpuswitch.S
     74   1.1    chris  *
     75   1.1    chris  * cpu switching functions
     76   1.1    chris  *
     77   1.1    chris  * Created      : 15/10/94
     78   1.1    chris  */
     79   1.1    chris 
     80   1.1    chris #include "opt_armfpe.h"
     81  1.30      scw #include "opt_arm32_pmap.h"
     82  1.19    bjh21 #include "opt_multiprocessor.h"
     83   1.1    chris 
     84   1.1    chris #include "assym.h"
     85   1.1    chris #include <machine/param.h>
     86   1.1    chris #include <machine/cpu.h>
     87   1.1    chris #include <machine/frame.h>
     88   1.1    chris #include <machine/asm.h>
     89   1.1    chris 
     90   1.1    chris #undef IRQdisable
     91   1.1    chris #undef IRQenable
     92   1.1    chris 
     93   1.1    chris /*
     94   1.1    chris  * New experimental definitions of IRQdisable and IRQenable
     95   1.1    chris  * These keep FIQ's enabled since FIQ's are special.
     96   1.1    chris  */
     97   1.1    chris 
     98   1.1    chris #define IRQdisable \
     99  1.13  thorpej 	mrs	r14, cpsr ; \
    100   1.1    chris 	orr	r14, r14, #(I32_bit) ; \
    101  1.13  thorpej 	msr	cpsr_c, r14 ; \
    102   1.1    chris 
    103   1.1    chris #define IRQenable \
    104  1.13  thorpej 	mrs	r14, cpsr ; \
    105   1.1    chris 	bic	r14, r14, #(I32_bit) ; \
    106  1.13  thorpej 	msr	cpsr_c, r14 ; \
    107   1.1    chris 
    108  1.30      scw #ifdef ARM32_PMAP_NEW
    109  1.30      scw /*
    110  1.30      scw  * These are used for switching the translation table/DACR.
    111  1.30      scw  * Since the vector page can be invalid for a short time, we must
    112  1.30      scw  * disable both regular IRQs *and* FIQs.
    113  1.30      scw  *
    114  1.30      scw  * XXX: This is not necessary if the vector table is relocated.
    115  1.30      scw  */
    116  1.30      scw #define IRQdisableALL \
    117  1.30      scw 	mrs	r14, cpsr ; \
    118  1.30      scw 	orr	r14, r14, #(I32_bit | F32_bit) ; \
    119  1.30      scw 	msr	cpsr_c, r14
    120  1.30      scw 
    121  1.30      scw #define IRQenableALL \
    122  1.30      scw 	mrs	r14, cpsr ; \
    123  1.30      scw 	bic	r14, r14, #(I32_bit | F32_bit) ; \
    124  1.30      scw 	msr	cpsr_c, r14
    125  1.30      scw #endif
    126  1.30      scw 
    127   1.1    chris 	.text
    128   1.1    chris 
    129  1.17  thorpej .Lwhichqs:
    130   1.1    chris 	.word	_C_LABEL(sched_whichqs)
    131   1.1    chris 
    132  1.17  thorpej .Lqs:
    133   1.1    chris 	.word	_C_LABEL(sched_qs)
    134   1.1    chris 
    135   1.1    chris /*
    136   1.1    chris  * cpuswitch()
    137   1.1    chris  *
    138   1.1    chris  * preforms a process context switch.
    139   1.1    chris  * This function has several entry points
    140   1.1    chris  */
    141   1.1    chris 
    142  1.19    bjh21 #ifdef MULTIPROCESSOR
    143  1.19    bjh21 .Lcpu_info_store:
    144  1.19    bjh21 	.word	_C_LABEL(cpu_info_store)
    145  1.29  thorpej .Lcurlwp:
    146  1.19    bjh21 	/* FIXME: This is bogus in the general case. */
    147  1.29  thorpej 	.word	_C_LABEL(cpu_info_store) + CI_CURLWP
    148  1.22    bjh21 
    149  1.22    bjh21 .Lcurpcb:
    150  1.22    bjh21 	.word	_C_LABEL(cpu_info_store) + CI_CURPCB
    151  1.19    bjh21 #else
    152  1.29  thorpej .Lcurlwp:
    153  1.29  thorpej 	.word	_C_LABEL(curlwp)
    154   1.1    chris 
    155  1.17  thorpej .Lcurpcb:
    156   1.1    chris 	.word	_C_LABEL(curpcb)
    157  1.22    bjh21 #endif
    158   1.1    chris 
    159  1.17  thorpej .Lwant_resched:
    160   1.1    chris 	.word	_C_LABEL(want_resched)
    161   1.1    chris 
    162  1.17  thorpej .Lcpufuncs:
    163   1.1    chris 	.word	_C_LABEL(cpufuncs)
    164   1.1    chris 
    165  1.22    bjh21 #ifndef MULTIPROCESSOR
    166   1.1    chris 	.data
    167   1.1    chris 	.global	_C_LABEL(curpcb)
    168   1.1    chris _C_LABEL(curpcb):
    169   1.1    chris 	.word	0x00000000
    170   1.1    chris 	.text
    171  1.22    bjh21 #endif
    172   1.1    chris 
    173  1.17  thorpej .Lblock_userspace_access:
    174   1.1    chris 	.word	_C_LABEL(block_userspace_access)
    175   1.1    chris 
    176  1.15  thorpej .Lcpu_do_powersave:
    177  1.15  thorpej 	.word	_C_LABEL(cpu_do_powersave)
    178  1.15  thorpej 
    179  1.30      scw #ifdef ARM32_PMAP_NEW
    180  1.30      scw .Lpmap_kernel_cstate:
    181  1.30      scw 	.word	(kernel_pmap_store + PMAP_CSTATE)
    182  1.30      scw 
    183  1.30      scw .Llast_cache_state_ptr:
    184  1.30      scw 	.word	_C_LABEL(pmap_cache_state)
    185  1.30      scw #endif
    186  1.30      scw 
    187   1.1    chris /*
    188   1.1    chris  * Idle loop, exercised while waiting for a process to wake up.
    189  1.16  thorpej  *
    190  1.16  thorpej  * NOTE: When we jump back to .Lswitch_search, we must have a
    191  1.16  thorpej  * pointer to whichqs in r7, which is what it is when we arrive
    192  1.16  thorpej  * here.
    193   1.1    chris  */
    194   1.7    chris /* LINTSTUB: Ignore */
    195   1.4    chris ASENTRY_NP(idle)
    196  1.19    bjh21 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
    197   1.7    chris 	bl	_C_LABEL(sched_unlock_idle)
    198   1.7    chris #endif
    199  1.16  thorpej 	ldr	r3, .Lcpu_do_powersave
    200  1.16  thorpej 
    201   1.1    chris 	/* Enable interrupts */
    202   1.1    chris 	IRQenable
    203   1.1    chris 
    204  1.15  thorpej 	/* If we don't want to sleep, use a simpler loop. */
    205  1.16  thorpej 	ldr	r3, [r3]		/* r3 = cpu_do_powersave */
    206  1.15  thorpej 	teq	r3, #0
    207  1.16  thorpej 	bne	2f
    208  1.16  thorpej 
    209  1.16  thorpej 	/* Non-powersave idle. */
    210  1.16  thorpej 1:	/* should maybe do uvm pageidlezero stuff here */
    211  1.16  thorpej 	ldr	r3, [r7]		/* r3 = whichqs */
    212  1.16  thorpej 	teq	r3, #0x00000000
    213  1.16  thorpej 	bne	.Lswitch_search
    214  1.16  thorpej 	b	1b
    215  1.15  thorpej 
    216  1.16  thorpej 2:	/* Powersave idle. */
    217  1.17  thorpej 	ldr	r4, .Lcpufuncs
    218  1.16  thorpej 3:	ldr	r3, [r7]		/* r3 = whichqs */
    219  1.15  thorpej 	teq	r3, #0x00000000
    220  1.15  thorpej 	bne	.Lswitch_search
    221  1.15  thorpej 
    222  1.15  thorpej 	/* if saving power, don't want to pageidlezero */
    223   1.1    chris 	mov	r0, #0
    224  1.21    bjh21 	adr	lr, 3b
    225  1.15  thorpej 	ldr	pc, [r4, #(CF_SLEEP)]
    226  1.15  thorpej 	/* loops back around */
    227  1.15  thorpej 
    228   1.1    chris 
    229   1.1    chris /*
    230  1.29  thorpej  * Find a new lwp to run, save the current context and
    231   1.1    chris  * load the new context
    232  1.29  thorpej  *
    233  1.29  thorpej  * Arguments:
    234  1.29  thorpej  *	r0	'struct lwp *' of the current LWP
    235   1.1    chris  */
    236   1.1    chris 
    237   1.1    chris ENTRY(cpu_switch)
    238   1.1    chris /*
    239   1.1    chris  * Local register usage. Some of these registers are out of date.
    240  1.29  thorpej  * r1 = oldlwp
    241  1.29  thorpej  * r2 = spl level
    242   1.1    chris  * r3 = whichqs
    243   1.1    chris  * r4 = queue
    244   1.1    chris  * r5 = &qs[queue]
    245  1.29  thorpej  * r6 = newlwp
    246  1.28    bjh21  * r7 = scratch
    247   1.1    chris  */
    248  1.28    bjh21 	stmfd	sp!, {r4-r7, lr}
    249   1.1    chris 
    250   1.1    chris 	/*
    251  1.29  thorpej 	 * Indicate that there is no longer a valid process (curlwp = 0).
    252  1.29  thorpej 	 * Zero the current PCB pointer while we're at it.
    253   1.1    chris 	 */
    254  1.29  thorpej 	ldr	r7, .Lcurlwp
    255  1.28    bjh21 	ldr	r6, .Lcurpcb
    256  1.29  thorpej 	mov	r2, #0x00000000
    257  1.29  thorpej 	str	r2, [r7]		/* curproc = NULL */
    258  1.29  thorpej 	str	r2, [r6]		/* curpcb = NULL */
    259  1.28    bjh21 
    260  1.28    bjh21 	/* stash the old proc while we call functions */
    261  1.29  thorpej 	mov	r5, r0
    262   1.1    chris 
    263  1.19    bjh21 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
    264   1.7    chris 	/* release the sched_lock before handling interrupts */
    265   1.7    chris 	bl	_C_LABEL(sched_unlock_idle)
    266   1.7    chris #endif
    267   1.7    chris 
    268   1.7    chris 	/* Lower the spl level to spl0 and get the current spl level. */
    269   1.5  thorpej #ifdef __NEWINTR
    270   1.5  thorpej 	mov	r0, #(IPL_NONE)
    271   1.5  thorpej 	bl	_C_LABEL(_spllower)
    272   1.5  thorpej #else /* ! __NEWINTR */
    273   1.1    chris #ifdef spl0
    274   1.1    chris 	mov	r0, #(_SPL_0)
    275   1.1    chris 	bl	_C_LABEL(splx)
    276   1.1    chris #else
    277   1.1    chris 	bl	_C_LABEL(spl0)
    278   1.5  thorpej #endif /* spl0 */
    279   1.5  thorpej #endif /* __NEWINTR */
    280   1.1    chris 
    281   1.1    chris 	/* Push the old spl level onto the stack */
    282   1.1    chris 	str	r0, [sp, #-0x0004]!
    283   1.1    chris 
    284  1.29  thorpej 	/* First phase : find a new lwp */
    285   1.1    chris 
    286  1.17  thorpej 	ldr	r7, .Lwhichqs
    287  1.16  thorpej 
    288  1.29  thorpej 	/* rem: r5 = old lwp */
    289  1.16  thorpej 	/* rem: r7 = &whichqs */
    290   1.7    chris 
    291  1.14   briggs .Lswitch_search:
    292   1.1    chris 	IRQdisable
    293  1.19    bjh21 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
    294   1.7    chris 	bl	_C_LABEL(sched_lock_idle)
    295   1.7    chris #endif
    296   1.7    chris 
    297   1.1    chris 	/* Do we have any active queues  */
    298   1.1    chris 	ldr	r3, [r7]
    299   1.1    chris 
    300   1.1    chris 	/* If not we must idle until we do. */
    301   1.1    chris 	teq	r3, #0x00000000
    302   1.4    chris 	beq	_ASM_LABEL(idle)
    303   1.7    chris 
    304  1.28    bjh21 	/* put old proc back in r1 */
    305  1.28    bjh21 	mov	r1, r5
    306  1.28    bjh21 
    307  1.29  thorpej 	/* rem: r1 = old lwp */
    308   1.1    chris 	/* rem: r3 = whichqs */
    309   1.1    chris 	/* rem: interrupts are disabled */
    310   1.1    chris 
    311   1.1    chris 	/*
    312   1.1    chris 	 * We have found an active queue. Currently we do not know which queue
    313   1.1    chris 	 * is active just that one of them is.
    314   1.1    chris 	 */
    315   1.1    chris 	/* this is the ffs algorithm devised by d.seal and posted to
    316   1.1    chris 	 * comp.sys.arm on 16 Feb 1994.
    317   1.1    chris 	 */
    318   1.1    chris  	rsb	r5, r3, #0
    319   1.1    chris  	ands	r0, r3, r5
    320   1.1    chris 
    321  1.17  thorpej 	adr	r5, .Lcpu_switch_ffs_table
    322   1.1    chris 
    323   1.3    chris 				    /* X = R0 */
    324   1.3    chris 	orr	r4, r0, r0, lsl #4  /* r4 = X * 0x11 */
    325   1.3    chris 	orr	r4, r4, r4, lsl #6  /* r4 = X * 0x451 */
    326   1.3    chris 	rsb	r4, r4, r4, lsl #16 /* r4 = X * 0x0450fbaf */
    327   1.1    chris 
    328   1.1    chris 	/* used further down, saves SA stall */
    329  1.17  thorpej 	ldr	r6, .Lqs
    330   1.1    chris 
    331   1.3    chris 	/* now lookup in table indexed on top 6 bits of a4 */
    332   1.1    chris 	ldrb	r4, [ r5, r4, lsr #26 ]
    333   1.1    chris 
    334   1.1    chris 	/* rem: r0 = bit mask of chosen queue (1 << r4) */
    335  1.29  thorpej 	/* rem: r1 = old lwp */
    336   1.1    chris 	/* rem: r3 = whichqs */
    337   1.1    chris 	/* rem: r4 = queue number */
    338   1.1    chris 	/* rem: interrupts are disabled */
    339   1.1    chris 
    340   1.1    chris 	/* Get the address of the queue (&qs[queue]) */
    341   1.1    chris 	add	r5, r6, r4, lsl #3
    342   1.1    chris 
    343   1.1    chris 	/*
    344  1.29  thorpej 	 * Get the lwp from the queue and place the next process in
    345  1.29  thorpej 	 * the queue at the head. This basically unlinks the lwp at
    346   1.1    chris 	 * the head of the queue.
    347   1.1    chris 	 */
    348  1.29  thorpej 	ldr	r6, [r5, #(L_FORW)]
    349   1.1    chris 
    350  1.29  thorpej 	/* rem: r6 = new lwp */
    351  1.29  thorpej 	ldr	r7, [r6, #(L_FORW)]
    352  1.29  thorpej 	str	r7, [r5, #(L_FORW)]
    353   1.1    chris 
    354   1.1    chris 	/*
    355   1.1    chris 	 * Test to see if the queue is now empty. If the head of the queue
    356  1.29  thorpej 	 * points to the queue itself then there are no more lwps in
    357   1.1    chris 	 * the queue. We can therefore clear the queue not empty flag held
    358   1.1    chris 	 * in r3.
    359   1.1    chris 	 */
    360   1.1    chris 
    361   1.1    chris 	teq	r5, r7
    362   1.1    chris 	biceq	r3, r3, r0
    363   1.1    chris 
    364  1.28    bjh21 	/* rem: r0 = bit mask of chosen queue (1 << r4) - NOT NEEDED AN MORE */
    365  1.28    bjh21 
    366  1.29  thorpej 	/* Fix the back pointer for the lwp now at the head of the queue. */
    367  1.29  thorpej 	ldr	r0, [r6, #(L_BACK)]
    368  1.29  thorpej 	str	r0, [r7, #(L_BACK)]
    369   1.1    chris 
    370   1.1    chris 	/* Update the RAM copy of the queue not empty flags word. */
    371  1.17  thorpej 	ldr	r7, .Lwhichqs
    372   1.1    chris 	str	r3, [r7]
    373   1.1    chris 
    374  1.29  thorpej 	/* rem: r1 = old lwp */
    375   1.1    chris 	/* rem: r3 = whichqs - NOT NEEDED ANY MORE */
    376   1.1    chris 	/* rem: r4 = queue number - NOT NEEDED ANY MORE */
    377  1.29  thorpej 	/* rem: r6 = new lwp */
    378   1.1    chris 	/* rem: interrupts are disabled */
    379   1.1    chris 
    380   1.1    chris 	/* Clear the want_resched flag */
    381  1.28    bjh21 	ldr	r7, .Lwant_resched
    382   1.1    chris 	mov	r0, #0x00000000
    383  1.28    bjh21 	str	r0, [r7]
    384   1.1    chris 
    385   1.1    chris 	/*
    386  1.29  thorpej 	 * Clear the back pointer of the lwp we have removed from
    387  1.29  thorpej 	 * the head of the queue. The new lwp is isolated now.
    388   1.1    chris 	 */
    389  1.29  thorpej 	str	r0, [r6, #(L_BACK)]
    390   1.1    chris 
    391  1.19    bjh21 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
    392   1.7    chris 	/*
    393   1.7    chris 	 * unlock the sched_lock, but leave interrupts off, for now.
    394   1.7    chris 	 */
    395  1.28    bjh21 	mov	r7, r1
    396   1.7    chris 	bl	_C_LABEL(sched_unlock_idle)
    397  1.28    bjh21 	mov	r1, r7
    398   1.7    chris #endif
    399   1.7    chris 
    400  1.29  thorpej .Lswitch_resume:
    401  1.19    bjh21 #ifdef MULTIPROCESSOR
    402  1.19    bjh21 	/* XXX use curcpu() */
    403  1.19    bjh21 	ldr	r0, .Lcpu_info_store
    404  1.29  thorpej 	str	r0, [r6, #(L_CPU)]
    405  1.19    bjh21 #else
    406  1.29  thorpej 	/* l->l_cpu initialized in fork1() for single-processor */
    407  1.19    bjh21 #endif
    408   1.1    chris 
    409   1.1    chris 	/* Process is now on a processor. */
    410  1.29  thorpej 	mov	r0, #LSONPROC			/* l->l_stat = LSONPROC */
    411  1.29  thorpej 	str	r0, [r6, #(L_STAT)]
    412   1.1    chris 
    413  1.29  thorpej 	/* We have a new curlwp now so make a note it */
    414  1.29  thorpej 	ldr	r7, .Lcurlwp
    415   1.1    chris 	str	r6, [r7]
    416   1.1    chris 
    417   1.1    chris 	/* Hook in a new pcb */
    418  1.17  thorpej 	ldr	r7, .Lcurpcb
    419  1.29  thorpej 	ldr	r0, [r6, #(L_ADDR)]
    420   1.1    chris 	str	r0, [r7]
    421   1.1    chris 
    422   1.1    chris 	/* At this point we can allow IRQ's again. */
    423   1.1    chris 	IRQenable
    424   1.1    chris 
    425  1.29  thorpej 	/* rem: r1 = old lwp */
    426  1.29  thorpej 	/* rem: r4 = return value */
    427   1.1    chris 	/* rem: r6 = new process */
    428   1.4    chris 	/* rem: interrupts are enabled */
    429   1.1    chris 
    430   1.1    chris 	/*
    431   1.1    chris 	 * If the new process is the same as the process that called
    432   1.1    chris 	 * cpu_switch() then we do not need to save and restore any
    433   1.1    chris 	 * contexts. This means we can make a quick exit.
    434  1.29  thorpej 	 * The test is simple if curlwp on entry (now in r1) is the
    435   1.1    chris 	 * same as the proc removed from the queue we can jump to the exit.
    436   1.1    chris 	 */
    437  1.28    bjh21 	teq	r1, r6
    438  1.29  thorpej 	moveq	r4, #0x00000000		/* default to "didn't switch" */
    439  1.14   briggs 	beq	.Lswitch_return
    440   1.1    chris 
    441  1.29  thorpej 	/*
    442  1.29  thorpej 	 * At this point, we are guaranteed to be switching to
    443  1.29  thorpej 	 * a new lwp.
    444  1.29  thorpej 	 */
    445  1.29  thorpej 	mov	r4, #0x00000001
    446  1.29  thorpej 
    447  1.29  thorpej 	/* Remember the old lwp in r0 */
    448  1.28    bjh21 	mov	r0, r1
    449  1.28    bjh21 
    450   1.1    chris 	/*
    451  1.29  thorpej 	 * If the old lwp on entry to cpu_switch was zero then the
    452   1.1    chris 	 * process that called it was exiting. This means that we do
    453   1.1    chris 	 * not need to save the current context. Instead we can jump
    454   1.1    chris 	 * straight to restoring the context for the new process.
    455   1.1    chris 	 */
    456  1.28    bjh21 	teq	r0, #0x00000000
    457  1.14   briggs 	beq	.Lswitch_exited
    458   1.1    chris 
    459  1.29  thorpej 	/* rem: r0 = old lwp */
    460  1.29  thorpej 	/* rem: r4 = return value */
    461   1.1    chris 	/* rem: r6 = new process */
    462   1.4    chris 	/* rem: interrupts are enabled */
    463   1.1    chris 
    464   1.1    chris 	/* Stage two : Save old context */
    465   1.1    chris 
    466  1.29  thorpej 	/* Get the user structure for the old lwp. */
    467  1.29  thorpej 	ldr	r1, [r0, #(L_ADDR)]
    468   1.1    chris 
    469  1.29  thorpej 	/* Save all the registers in the old lwp's pcb */
    470  1.28    bjh21 	add	r7, r1, #(PCB_R8)
    471  1.28    bjh21 	stmia	r7, {r8-r13}
    472   1.1    chris 
    473   1.1    chris 	/*
    474  1.29  thorpej 	 * NOTE: We can now use r8-r13 until it is time to restore
    475  1.29  thorpej 	 * them for the new process.
    476  1.29  thorpej 	 */
    477  1.29  thorpej 
    478  1.29  thorpej 	/* Remember the old PCB. */
    479  1.29  thorpej 	mov	r8, r1
    480  1.29  thorpej 
    481  1.29  thorpej 	/* r1 now free! */
    482  1.29  thorpej 
    483  1.29  thorpej 	/* Get the user structure for the new process in r9 */
    484  1.29  thorpej 	ldr	r9, [r6, #(L_ADDR)]
    485  1.29  thorpej 
    486  1.29  thorpej 	/*
    487   1.1    chris 	 * This can be optimised... We know we want to go from SVC32
    488   1.1    chris 	 * mode to UND32 mode
    489   1.1    chris 	 */
    490  1.13  thorpej         mrs	r3, cpsr
    491   1.1    chris 	bic	r2, r3, #(PSR_MODE)
    492   1.1    chris 	orr	r2, r2, #(PSR_UND32_MODE | I32_bit)
    493  1.13  thorpej         msr	cpsr_c, r2
    494   1.1    chris 
    495  1.29  thorpej 	str	sp, [r8, #(PCB_UND_SP)]
    496   1.1    chris 
    497  1.13  thorpej         msr	cpsr_c, r3		/* Restore the old mode */
    498   1.1    chris 
    499  1.29  thorpej 	/* rem: r0 = old lwp */
    500  1.29  thorpej 	/* rem: r4 = return value */
    501   1.1    chris 	/* rem: r6 = new process */
    502  1.29  thorpej 	/* rem: r8 = old PCB */
    503  1.29  thorpej 	/* rem: r9 = new PCB */
    504   1.4    chris 	/* rem: interrupts are enabled */
    505   1.1    chris 
    506   1.1    chris 	/* What else needs to be saved  Only FPA stuff when that is supported */
    507   1.1    chris 
    508   1.1    chris 	/* Third phase : restore saved context */
    509   1.1    chris 
    510  1.29  thorpej 	/* rem: r0 = old lwp */
    511  1.29  thorpej 	/* rem: r4 = return value */
    512  1.29  thorpej 	/* rem: r6 = new lwp */
    513  1.29  thorpej 	/* rem: r8 = old PCB */
    514  1.29  thorpej 	/* rem: r9 = new PCB */
    515   1.9  thorpej 	/* rem: interrupts are enabled */
    516   1.9  thorpej 
    517   1.9  thorpej 	/*
    518  1.29  thorpej 	 * Get the new L1 table pointer into r11.  If we're switching to
    519  1.29  thorpej 	 * an LWP with the same address space as the outgoing one, we can
    520  1.29  thorpej 	 * skip the cache purge and the TTB load.
    521  1.29  thorpej 	 *
    522  1.29  thorpej 	 * To avoid data dep stalls that would happen anyway, we try
    523  1.29  thorpej 	 * and get some useful work done in the mean time.
    524  1.29  thorpej 	 */
    525  1.29  thorpej 	ldr	r10, [r8, #(PCB_PAGEDIR)]	/* r10 = old L1 */
    526  1.29  thorpej 	ldr	r11, [r9, #(PCB_PAGEDIR)]	/* r11 = new L1 */
    527  1.29  thorpej 
    528  1.30      scw #ifndef ARM32_PMAP_NEW
    529  1.29  thorpej 	ldr	r3, .Lblock_userspace_access
    530  1.29  thorpej 	mov	r1, #0x00000001
    531  1.29  thorpej 	mov	r2, #0x00000000
    532  1.29  thorpej 	teq	r10, r11			/* r10 == r11? */
    533  1.29  thorpej 	beq	.Lcs_context_switched		/* yes! */
    534  1.29  thorpej 
    535  1.29  thorpej 	/*
    536   1.9  thorpej 	 * Don't allow user space access between the purge and the switch.
    537   1.9  thorpej 	 */
    538  1.17  thorpej 	ldr	r3, .Lblock_userspace_access
    539   1.9  thorpej 	mov	r1, #0x00000001
    540   1.9  thorpej 	mov	r2, #0x00000000
    541   1.9  thorpej 	str	r1, [r3]
    542   1.1    chris 
    543   1.1    chris 	stmfd	sp!, {r0-r3}
    544  1.17  thorpej 	ldr	r1, .Lcpufuncs
    545  1.23    bjh21 	mov	lr, pc
    546   1.9  thorpej 	ldr	pc, [r1, #CF_IDCACHE_WBINV_ALL]
    547   1.1    chris 	ldmfd	sp!, {r0-r3}
    548   1.1    chris 
    549  1.14   briggs .Lcs_cache_purge_skipped:
    550   1.1    chris 	/* At this point we need to kill IRQ's again. */
    551   1.1    chris 	IRQdisable
    552   1.1    chris 
    553  1.29  thorpej 	/* rem: r2 = 0 */
    554  1.29  thorpej 	/* rem: r3 = &block_userspace_access */
    555  1.29  thorpej 	/* rem: r4 = return value */
    556  1.29  thorpej 	/* rem: r6 = new lwp */
    557  1.29  thorpej 	/* rem: r9 = new PCB */
    558  1.29  thorpej 	/* rem: r11 == new L1 */
    559  1.29  thorpej 
    560   1.9  thorpej 	/*
    561   1.9  thorpej 	 * Interrupts are disabled so we can allow user space accesses again
    562   1.1    chris 	 * as none will occur until interrupts are re-enabled after the
    563   1.1    chris 	 * switch.
    564   1.1    chris 	 */
    565   1.1    chris 	str	r2, [r3]
    566   1.1    chris 
    567   1.1    chris 	/* Switch the memory to the new process */
    568  1.17  thorpej 	ldr	r3, .Lcpufuncs
    569  1.29  thorpej 	mov	r0, r11
    570  1.23    bjh21 	mov	lr, pc
    571   1.1    chris 	ldr	pc, [r3, #CF_CONTEXT_SWITCH]
    572  1.29  thorpej 
    573  1.29  thorpej .Lcs_context_switched:
    574  1.30      scw 
    575  1.30      scw 
    576  1.30      scw #else	/* ARM32_PMAP_NEW */
    577  1.30      scw 
    578  1.30      scw 	ldr	r0, [r8, #(PCB_DACR)]		/* r0 = old DACR */
    579  1.30      scw 	ldr	r1, [r9, #(PCB_DACR)]		/* r1 = new DACR */
    580  1.30      scw 	ldr	r8, [r9, #(PCB_CSTATE)]		/* r8 = &new_pmap->pm_cstate */
    581  1.30      scw 	ldr	r5, .Llast_cache_state_ptr	/* Previous thread's cstate */
    582  1.30      scw 
    583  1.30      scw 	teq	r10, r11			/* Same L1? */
    584  1.30      scw 	ldr	r5, [r5]
    585  1.30      scw 	cmpeq	r0, r1				/* Same DACR? */
    586  1.30      scw 	beq	.Lcs_context_switched		/* yes! */
    587  1.30      scw 
    588  1.30      scw 	ldr	r3, .Lblock_userspace_access
    589  1.30      scw 	mov	r12, #0
    590  1.30      scw 	cmp	r5, #0				/* No last vm? (switch_exit) */
    591  1.30      scw 	beq	.Lcs_cache_purge_skipped	/* No, we can skip cache flsh */
    592  1.30      scw 
    593  1.30      scw 	mov	r2, #DOMAIN_CLIENT
    594  1.30      scw 	cmp	r1, r2, lsl #(PMAP_DOMAIN_KERNEL * 2) /* Sw to kernel thread? */
    595  1.30      scw 	beq	.Lcs_cache_purge_skipped	/* Yup. Don't flush cache */
    596  1.30      scw 
    597  1.30      scw 	cmp	r5, r8				/* Same userland VM space? */
    598  1.30      scw 	ldrneb	r12, [r5, #(CS_CACHE_ID)]	/* Last VM space cache state */
    599  1.30      scw 
    600  1.30      scw 	/*
    601  1.30      scw 	 * We're definately switching to a new userland VM space,
    602  1.30      scw 	 * and the previous userland VM space has yet to be flushed
    603  1.30      scw 	 * from the cache/tlb.
    604  1.30      scw 	 *
    605  1.30      scw 	 * r12 holds the previous VM space's cs_cache_id state
    606  1.30      scw 	 */
    607  1.30      scw 	tst	r12, #0xff			/* Test cs_cache_id */
    608  1.30      scw 	beq	.Lcs_cache_purge_skipped	/* VM space is not in cache */
    609  1.30      scw 
    610  1.30      scw 	/*
    611  1.30      scw 	 * Definately need to flush the cache.
    612  1.30      scw 	 * Mark the old VM space as NOT being resident in the cache.
    613  1.30      scw 	 */
    614  1.30      scw 	mov	r2, #0x00000000
    615  1.32    chris 	strb	r2, [r5, #(CS_CACHE_ID)]
    616  1.32    chris 	strb	r2, [r5, #(CS_CACHE_D)]
    617  1.30      scw 
    618  1.30      scw 	/*
    619  1.30      scw 	 * Don't allow user space access between the purge and the switch.
    620  1.30      scw 	 */
    621  1.30      scw 	mov	r2, #0x00000001
    622  1.30      scw 	str	r2, [r3]
    623  1.30      scw 
    624  1.30      scw 	stmfd	sp!, {r0-r3}
    625  1.30      scw 	ldr	r1, .Lcpufuncs
    626  1.30      scw 	mov	lr, pc
    627  1.30      scw 	ldr	pc, [r1, #CF_IDCACHE_WBINV_ALL]
    628  1.30      scw 	ldmfd	sp!, {r0-r3}
    629  1.30      scw 
    630  1.30      scw .Lcs_cache_purge_skipped:
    631  1.30      scw 	/* rem: r1 = new DACR */
    632  1.30      scw 	/* rem: r3 = &block_userspace_access */
    633  1.30      scw 	/* rem: r4 = return value */
    634  1.30      scw 	/* rem: r5 = &old_pmap->pm_cstate (or NULL) */
    635  1.30      scw 	/* rem: r6 = new lwp */
    636  1.30      scw 	/* rem: r8 = &new_pmap->pm_cstate */
    637  1.30      scw 	/* rem: r9 = new PCB */
    638  1.30      scw 	/* rem: r10 = old L1 */
    639  1.30      scw 	/* rem: r11 = new L1 */
    640  1.30      scw 
    641  1.30      scw 	mov	r2, #0x00000000
    642  1.30      scw 	ldr	r7, [r9, #(PCB_PL1VEC)]
    643  1.30      scw 
    644  1.30      scw 	/*
    645  1.30      scw 	 * At this point we need to kill IRQ's again.
    646  1.30      scw 	 *
    647  1.30      scw 	 * XXXSCW: Don't need to block FIQs if vectors have been relocated
    648  1.30      scw 	 */
    649  1.30      scw 	IRQdisableALL
    650  1.30      scw 
    651  1.30      scw 	/*
    652  1.30      scw 	 * Interrupts are disabled so we can allow user space accesses again
    653  1.30      scw 	 * as none will occur until interrupts are re-enabled after the
    654  1.30      scw 	 * switch.
    655  1.30      scw 	 */
    656  1.30      scw 	str	r2, [r3]
    657  1.30      scw 
    658  1.30      scw 	/*
    659  1.30      scw 	 * Ensure the vector table is accessible by fixing up the L1
    660  1.30      scw 	 */
    661  1.30      scw 	cmp	r7, #0			/* No need to fixup vector table? */
    662  1.30      scw 	ldrne	r2, [r7]		/* But if yes, fetch current value */
    663  1.30      scw 	ldrne	r0, [r9, #(PCB_L1VEC)]	/* Fetch new vector_page value */
    664  1.30      scw 	mcr	p15, 0, r1, c3, c0, 0	/* Update DACR for new context */
    665  1.30      scw 	cmpne	r2, r0			/* Stuffing the same value? */
    666  1.31  thorpej #ifndef PMAP_INCLUDE_PTE_SYNC
    667  1.30      scw 	strne	r0, [r7]		/* Nope, update it */
    668  1.30      scw #else
    669  1.30      scw 	beq	.Lcs_same_vector
    670  1.30      scw 	str	r0, [r7]		/* Otherwise, update it */
    671  1.30      scw 
    672  1.30      scw 	/*
    673  1.30      scw 	 * Need to sync the cache to make sure that last store is
    674  1.30      scw 	 * visible to the MMU.
    675  1.30      scw 	 */
    676  1.30      scw 	ldr	r2, .Lcpufuncs
    677  1.30      scw 	mov	r0, r7
    678  1.30      scw 	mov	r1, #4
    679  1.30      scw 	mov	lr, pc
    680  1.30      scw 	ldr	pc, [r2, #CF_DCACHE_WB_RANGE]
    681  1.30      scw 
    682  1.30      scw .Lcs_same_vector:
    683  1.30      scw #endif
    684  1.30      scw 
    685  1.30      scw 	cmp	r10, r11		/* Switching to the same L1? */
    686  1.30      scw 	ldr	r10, .Lcpufuncs
    687  1.30      scw 	beq	.Lcs_same_l1		/* Yup. */
    688  1.30      scw 
    689  1.30      scw 	/*
    690  1.30      scw 	 * Do a full context switch, including full TLB flush.
    691  1.30      scw 	 */
    692  1.30      scw 	mov	r0, r11
    693  1.30      scw 	mov	lr, pc
    694  1.30      scw 	ldr	pc, [r10, #CF_CONTEXT_SWITCH]
    695  1.30      scw 
    696  1.30      scw 	/*
    697  1.30      scw 	 * Mark the old VM space as NOT being resident in the TLB
    698  1.30      scw 	 */
    699  1.30      scw 	mov	r2, #0x00000000
    700  1.30      scw 	cmp	r5, #0
    701  1.30      scw 	strneh	r2, [r5, #(CS_TLB_ID)]
    702  1.30      scw 	b	.Lcs_context_switched
    703  1.30      scw 
    704  1.30      scw 	/*
    705  1.30      scw 	 * We're switching to a different process in the same L1.
    706  1.30      scw 	 * In this situation, we only need to flush the TLB for the
    707  1.30      scw 	 * vector_page mapping, and even then only if r7 is non-NULL.
    708  1.30      scw 	 */
    709  1.30      scw .Lcs_same_l1:
    710  1.30      scw 	cmp	r7, #0
    711  1.30      scw 	movne	r0, #0			/* We *know* vector_page's VA is 0x0 */
    712  1.30      scw 	movne	lr, pc
    713  1.30      scw 	ldrne	pc, [r10, #CF_TLB_FLUSHID_SE]
    714  1.30      scw 
    715  1.30      scw .Lcs_context_switched:
    716  1.30      scw 	/* rem: r8 = &new_pmap->pm_cstate */
    717  1.30      scw 
    718  1.30      scw 	/* XXXSCW: Safe to re-enable FIQs here */
    719  1.30      scw 
    720  1.30      scw 	/*
    721  1.30      scw 	 * The new VM space is live in the cache and TLB.
    722  1.30      scw 	 * Update its cache/tlb state, and if it's not the kernel
    723  1.30      scw 	 * pmap, update the 'last cache state' pointer.
    724  1.30      scw 	 */
    725  1.30      scw 	mov	r2, #-1
    726  1.30      scw 	ldr	r5, .Lpmap_kernel_cstate
    727  1.30      scw 	ldr	r0, .Llast_cache_state_ptr
    728  1.30      scw 	str	r2, [r8, #(CS_ALL)]
    729  1.30      scw 	cmp	r5, r8
    730  1.30      scw 	strne	r8, [r0]
    731  1.30      scw 
    732  1.30      scw #endif	/* ARM32_PMAP_NEW */
    733  1.30      scw 
    734  1.29  thorpej 	/* rem: r4 = return value */
    735  1.29  thorpej 	/* rem: r6 = new lwp */
    736  1.29  thorpej 	/* rem: r9 = new PCB */
    737  1.29  thorpej 
    738   1.1    chris 	/*
    739   1.1    chris 	 * This can be optimised... We know we want to go from SVC32
    740   1.1    chris 	 * mode to UND32 mode
    741   1.1    chris 	 */
    742  1.13  thorpej         mrs	r3, cpsr
    743   1.1    chris 	bic	r2, r3, #(PSR_MODE)
    744   1.1    chris 	orr	r2, r2, #(PSR_UND32_MODE)
    745  1.13  thorpej         msr	cpsr_c, r2
    746   1.1    chris 
    747  1.29  thorpej 	ldr	sp, [r9, #(PCB_UND_SP)]
    748   1.1    chris 
    749  1.13  thorpej         msr	cpsr_c, r3		/* Restore the old mode */
    750   1.1    chris 
    751  1.28    bjh21 	/* Restore all the save registers */
    752  1.29  thorpej 	add	r7, r9, #PCB_R8
    753  1.28    bjh21 	ldmia	r7, {r8-r13}
    754  1.28    bjh21 
    755  1.29  thorpej 	sub	r7, r7, #PCB_R8		/* restore PCB pointer */
    756  1.29  thorpej 
    757  1.29  thorpej 	ldr	r5, [r6, #(L_PROC)]	/* fetch the proc for below */
    758  1.29  thorpej 
    759  1.29  thorpej 	/* rem: r4 = return value */
    760  1.29  thorpej 	/* rem: r5 = new lwp's proc */
    761  1.29  thorpej 	/* rem: r6 = new lwp */
    762  1.29  thorpej 	/* rem: r7 = new pcb */
    763  1.18  thorpej 
    764   1.1    chris #ifdef ARMFPE
    765  1.29  thorpej 	add	r0, r7, #(USER_SIZE) & 0x00ff
    766   1.1    chris 	add	r0, r0, #(USER_SIZE) & 0xff00
    767   1.1    chris 	bl	_C_LABEL(arm_fpe_core_changecontext)
    768   1.1    chris #endif
    769   1.1    chris 
    770   1.1    chris 	/* We can enable interrupts again */
    771  1.30      scw #ifndef ARM32_PMAP_NEW
    772   1.1    chris 	IRQenable
    773  1.30      scw #else
    774  1.30      scw 	IRQenableALL
    775  1.30      scw #endif
    776   1.1    chris 
    777  1.29  thorpej 	/* rem: r4 = return value */
    778  1.29  thorpej 	/* rem: r5 = new lwp's proc */
    779  1.29  thorpej 	/* rem: r6 = new lwp */
    780  1.18  thorpej 	/* rem: r7 = new PCB */
    781  1.18  thorpej 
    782  1.18  thorpej 	/*
    783  1.18  thorpej 	 * Check for restartable atomic sequences (RAS).
    784  1.18  thorpej 	 */
    785  1.18  thorpej 
    786  1.29  thorpej 	ldr	r2, [r5, #(P_NRAS)]
    787  1.18  thorpej 	ldr	r4, [r7, #(PCB_TF)]	/* r4 = trapframe (used below) */
    788  1.18  thorpej 	teq	r2, #0			/* p->p_nras == 0? */
    789  1.18  thorpej 	bne	.Lswitch_do_ras		/* no, check for one */
    790  1.18  thorpej 
    791  1.14   briggs .Lswitch_return:
    792   1.1    chris 
    793   1.1    chris 	/* Get the spl level from the stack and update the current spl level */
    794   1.1    chris 	ldr	r0, [sp], #0x0004
    795   1.1    chris 	bl	_C_LABEL(splx)
    796   1.1    chris 
    797  1.29  thorpej 	/* cpu_switch returns 1 == switched, 0 == didn't switch */
    798  1.29  thorpej 	mov	r0, r4
    799   1.1    chris 
    800   1.1    chris 	/*
    801   1.1    chris 	 * Pull the registers that got pushed when either savectx() or
    802   1.1    chris 	 * cpu_switch() was called and return.
    803   1.1    chris 	 */
    804  1.28    bjh21 	ldmfd	sp!, {r4-r7, pc}
    805  1.18  thorpej 
    806  1.18  thorpej .Lswitch_do_ras:
    807  1.18  thorpej 	ldr	r1, [r4, #(TF_PC)]	/* second ras_lookup() arg */
    808  1.29  thorpej 	mov	r0, r5			/* first ras_lookup() arg */
    809  1.18  thorpej 	bl	_C_LABEL(ras_lookup)
    810  1.18  thorpej 	cmn	r0, #1			/* -1 means "not in a RAS" */
    811  1.18  thorpej 	strne	r0, [r4, #(TF_PC)]
    812  1.18  thorpej 	b	.Lswitch_return
    813   1.1    chris 
    814  1.14   briggs .Lswitch_exited:
    815   1.9  thorpej 	/*
    816  1.29  thorpej 	 * We skip the cache purge because switch_exit() already did it.
    817  1.29  thorpej 	 * Load up registers the way .Lcs_cache_purge_skipped expects.
    818  1.29  thorpej 	 * Userpsace access already blocked by switch_exit().
    819   1.9  thorpej 	 */
    820  1.29  thorpej 	ldr	r9, [r6, #(L_ADDR)]		/* r9 = new PCB */
    821  1.17  thorpej 	ldr	r3, .Lblock_userspace_access
    822  1.30      scw #ifndef ARM32_PMAP_NEW
    823   1.9  thorpej 	mov	r2, #0x00000000
    824  1.30      scw #else
    825  1.30      scw 	mrc	p15, 0, r10, c2, c0, 0		/* r10 = old L1 */
    826  1.30      scw 	mov	r5, #0				/* No previous cache state */
    827  1.30      scw 	ldr	r1, [r9, #(PCB_DACR)]		/* r1 = new DACR */
    828  1.30      scw 	ldr	r8, [r9, #(PCB_CSTATE)]		/* r8 = new cache state */
    829  1.30      scw #endif
    830  1.29  thorpej 	ldr	r11, [r9, #(PCB_PAGEDIR)]	/* r11 = new L1 */
    831  1.14   briggs 	b	.Lcs_cache_purge_skipped
    832   1.9  thorpej 
    833   1.7    chris /*
    834  1.29  thorpej  * cpu_switchto(struct lwp *current, struct lwp *next)
    835  1.29  thorpej  * Switch to the specified next LWP
    836  1.29  thorpej  * Arguments:
    837  1.29  thorpej  *
    838  1.29  thorpej  *	r0	'struct lwp *' of the current LWP
    839  1.29  thorpej  *	r1	'struct lwp *' of the LWP to switch to
    840  1.29  thorpej  */
    841  1.29  thorpej ENTRY(cpu_switchto)
    842  1.29  thorpej 	stmfd	sp!, {r4-r7, lr}
    843  1.29  thorpej 
    844  1.29  thorpej 	/* Lower the spl level to spl0 and get the current spl level. */
    845  1.29  thorpej 	mov	r6, r0		/* save old lwp */
    846  1.29  thorpej 	mov	r5, r1		/* save new lwp */
    847  1.29  thorpej 
    848  1.29  thorpej #if defined(LOCKDEBUG)
    849  1.29  thorpej 	/* release the sched_lock before handling interrupts */
    850  1.29  thorpej 	bl	_C_LABEL(sched_unlock_idle)
    851  1.29  thorpej #endif
    852  1.29  thorpej 
    853  1.29  thorpej #ifdef __NEWINTR
    854  1.29  thorpej 	mov	r0, #(IPL_NONE)
    855  1.29  thorpej 	bl	_C_LABEL(_spllower)
    856  1.29  thorpej #else /* ! __NEWINTR */
    857  1.29  thorpej #ifdef spl0
    858  1.29  thorpej 	mov	r0, #(_SPL_0)
    859  1.29  thorpej 	bl	_C_LABEL(splx)
    860  1.29  thorpej #else
    861  1.29  thorpej 	bl	_C_LABEL(spl0)
    862  1.29  thorpej #endif /* spl0 */
    863  1.29  thorpej #endif /* __NEWINTR */
    864  1.29  thorpej 
    865  1.29  thorpej 	/* Push the old spl level onto the stack */
    866  1.29  thorpej 	str	r0, [sp, #-0x0004]!
    867  1.29  thorpej 
    868  1.29  thorpej 	IRQdisable
    869  1.29  thorpej #if defined(LOCKDEBUG)
    870  1.29  thorpej 	bl	_C_LABEL(sched_lock_idle)
    871  1.29  thorpej #endif
    872  1.29  thorpej 
    873  1.29  thorpej 	mov	r0, r6		/* restore old lwp */
    874  1.29  thorpej 	mov	r1, r5		/* restore new lwp */
    875  1.29  thorpej 
    876  1.29  thorpej 	/* rem: r0 = old lwp */
    877  1.29  thorpej 	/* rem: r1 = new lwp */
    878  1.29  thorpej 	/* rem: interrupts are disabled */
    879  1.29  thorpej 
    880  1.29  thorpej 	/*
    881  1.29  thorpej 	 * Okay, set up registers the way cpu_switch() wants them,
    882  1.29  thorpej 	 * and jump into the middle of it (where we bring up the
    883  1.29  thorpej 	 * new process).
    884  1.29  thorpej 	 */
    885  1.29  thorpej 	mov	r6, r1			/* r6 = new lwp */
    886  1.29  thorpej #if defined(LOCKDEBUG)
    887  1.29  thorpej 	mov	r5, r0			/* preserve old lwp */
    888  1.29  thorpej 	bl	_C_LABEL(sched_unlock_idle)
    889  1.29  thorpej 	mov	r1, r5			/* r1 = old lwp */
    890  1.29  thorpej #else
    891  1.29  thorpej 	mov	r1, r0			/* r1 = old lwp */
    892  1.29  thorpej #endif
    893  1.29  thorpej 	b	.Lswitch_resume
    894  1.29  thorpej 
    895  1.29  thorpej /*
    896  1.29  thorpej  * void switch_exit(struct lwp *l, struct lwp *l0, void (*exit)(struct lwp *));
    897  1.29  thorpej  * Switch to lwp0's saved context and deallocate the address space and kernel
    898  1.29  thorpej  * stack for l.  Then jump into cpu_switch(), as if we were in lwp0 all along.
    899   1.7    chris  */
    900   1.1    chris 
    901  1.29  thorpej /* LINTSTUB: Func: void switch_exit(struct lwp *l, struct lwp *l0, void (*)(struct lwp *)) */
    902   1.1    chris ENTRY(switch_exit)
    903   1.1    chris 	/*
    904  1.29  thorpej 	 * The process is going away, so we can use callee-saved
    905  1.29  thorpej 	 * registers here without having to save them.
    906   1.1    chris 	 */
    907   1.1    chris 
    908  1.29  thorpej 	mov	r4, r0
    909  1.29  thorpej 	ldr	r0, .Lcurlwp
    910  1.29  thorpej 
    911  1.29  thorpej 	mov	r5, r1
    912  1.29  thorpej 	ldr	r1, .Lblock_userspace_access
    913   1.1    chris 
    914  1.29  thorpej 	mov	r6, r2
    915  1.29  thorpej 
    916  1.29  thorpej 	/*
    917  1.29  thorpej 	 * r4 = lwp
    918  1.29  thorpej 	 * r5 = lwp0
    919  1.29  thorpej 	 * r6 = exit func
    920  1.29  thorpej 	 */
    921  1.29  thorpej 
    922  1.29  thorpej 	mov	r2, #0x00000000		/* curlwp = NULL */
    923   1.1    chris 	str	r2, [r0]
    924   1.1    chris 
    925  1.30      scw #ifdef ARM32_PMAP_NEW
    926  1.30      scw 	/*
    927  1.30      scw 	 * We're about to clear both the cache and the TLB.
    928  1.30      scw 	 * Make sure to zap the 'last cache state' pointer since the
    929  1.30      scw 	 * pmap might be about to go away. Also ensure the outgoing
    930  1.30      scw 	 * VM space's cache state is marked as NOT resident in the
    931  1.30      scw 	 * cache, and that lwp0's cache state IS resident.
    932  1.30      scw 	 */
    933  1.30      scw 	ldr	r7, [r4, #(L_ADDR)]		/* r7 = old lwp's PCB */
    934  1.30      scw 	ldr	r0, .Llast_cache_state_ptr	/* Last userland cache state */
    935  1.30      scw 	ldr	r9, [r7, #(PCB_CSTATE)]		/* Fetch cache state pointer */
    936  1.30      scw 	ldr	r3, [r5, #(L_ADDR)]		/* r3 = lwp0's PCB */
    937  1.30      scw 	str	r2, [r0]			/* No previous cache state */
    938  1.30      scw 	str	r2, [r9, #(CS_ALL)]		/* Zap old lwp's cache state */
    939  1.30      scw 	ldr	r3, [r3, #(PCB_CSTATE)]		/* lwp0's cache state */
    940  1.30      scw 	mov	r2, #-1
    941  1.30      scw 	str	r2, [r3, #(CS_ALL)]		/* lwp0 is in da cache! */
    942  1.30      scw #endif
    943  1.30      scw 
    944   1.9  thorpej 	/*
    945   1.9  thorpej 	 * Don't allow user space access between the purge and the switch.
    946   1.9  thorpej 	 */
    947   1.9  thorpej 	mov	r2, #0x00000001
    948  1.29  thorpej 	str	r2, [r1]
    949   1.1    chris 
    950  1.30      scw #ifndef ARM32_PMAP_NEW
    951  1.29  thorpej 	/* Switch to lwp0 context */
    952   1.1    chris 
    953  1.17  thorpej 	ldr	r0, .Lcpufuncs
    954  1.23    bjh21 	mov	lr, pc
    955   1.6  thorpej 	ldr	pc, [r0, #CF_IDCACHE_WBINV_ALL]
    956   1.1    chris 
    957  1.29  thorpej 	ldr	r2, [r5, #(L_ADDR)]
    958  1.29  thorpej 
    959  1.29  thorpej 	/*
    960  1.29  thorpej 	 * r2 = lwp0's PCB
    961  1.29  thorpej 	 */
    962   1.1    chris 
    963   1.1    chris 	IRQdisable
    964   1.1    chris 
    965   1.1    chris 	ldr	r0, [r2, #(PCB_PAGEDIR)]
    966   1.1    chris 
    967   1.1    chris 	/* Switch the memory to the new process */
    968  1.29  thorpej 	ldr	r1, .Lcpufuncs
    969  1.23    bjh21 	mov	lr, pc
    970  1.29  thorpej 	ldr	pc, [r1, #CF_CONTEXT_SWITCH]
    971  1.29  thorpej 
    972  1.29  thorpej 	ldr	r0, .Lcurpcb
    973  1.29  thorpej 
    974   1.1    chris 	/* Restore all the save registers */
    975  1.28    bjh21 	add	r7, r2, #PCB_R8
    976  1.28    bjh21 	ldmia	r7, {r8-r13}
    977   1.1    chris 
    978  1.29  thorpej 	str	r2, [r0]	/* curpcb = lwp0's PCB */
    979   1.1    chris 
    980   1.1    chris 	IRQenable
    981  1.30      scw 
    982  1.30      scw #else	/* ARM32_PMAP_NEW */
    983  1.30      scw 	/* Switch to lwp0 context */
    984  1.30      scw 
    985  1.30      scw 	ldr	r9, .Lcpufuncs
    986  1.30      scw 	mov	lr, pc
    987  1.30      scw 	ldr	pc, [r9, #CF_IDCACHE_WBINV_ALL]
    988  1.30      scw 
    989  1.30      scw 	ldr	r0, [r7, #(PCB_PL1VEC)]
    990  1.30      scw 	ldr	r1, [r7, #(PCB_DACR)]
    991  1.30      scw 
    992  1.30      scw 	/*
    993  1.30      scw 	 * r0 = Pointer to L1 slot for vector_page (or NULL)
    994  1.30      scw 	 * r1 = lwp0's DACR
    995  1.30      scw 	 * r4 = lwp we're switching from
    996  1.30      scw 	 * r5 = lwp0
    997  1.30      scw 	 * r6 = exit func
    998  1.30      scw 	 * r7 = lwp0's PCB
    999  1.30      scw 	 * r9 = cpufuncs
   1000  1.30      scw 	 */
   1001  1.30      scw 
   1002  1.30      scw 	IRQdisableALL
   1003  1.30      scw 
   1004  1.30      scw 	/*
   1005  1.30      scw 	 * Ensure the vector table is accessible by fixing up lwp0's L1
   1006  1.30      scw 	 */
   1007  1.30      scw 	cmp	r0, #0			/* No need to fixup vector table? */
   1008  1.30      scw 	ldrne	r3, [r0]		/* But if yes, fetch current value */
   1009  1.30      scw 	ldrne	r2, [r7, #(PCB_L1VEC)]	/* Fetch new vector_page value */
   1010  1.30      scw 	mcr	p15, 0, r1, c3, c0, 0	/* Update DACR for lwp0's context */
   1011  1.30      scw 	cmpne	r3, r2			/* Stuffing the same value? */
   1012  1.30      scw 	strne	r2, [r0]		/* Store if not. */
   1013  1.30      scw 
   1014  1.31  thorpej #ifdef PMAP_INCLUDE_PTE_SYNC
   1015  1.30      scw 	/*
   1016  1.30      scw 	 * Need to sync the cache to make sure that last store is
   1017  1.30      scw 	 * visible to the MMU.
   1018  1.30      scw 	 */
   1019  1.30      scw 	movne	r1, #4
   1020  1.30      scw 	movne	lr, pc
   1021  1.30      scw 	ldrne	pc, [r9, #CF_DCACHE_WB_RANGE]
   1022  1.30      scw #endif
   1023  1.30      scw 
   1024  1.30      scw 	/*
   1025  1.30      scw 	 * Note: We don't do the same optimisation as cpu_switch() with
   1026  1.30      scw 	 * respect to avoiding flushing the TLB if we're switching to
   1027  1.30      scw 	 * the same L1 since this process' VM space may be about to go
   1028  1.30      scw 	 * away, so we don't want *any* turds left in the TLB.
   1029  1.30      scw 	 */
   1030  1.30      scw 
   1031  1.30      scw 	/* Switch the memory to the new process */
   1032  1.30      scw 	ldr	r0, [r7, #(PCB_PAGEDIR)]
   1033  1.30      scw 	mov	lr, pc
   1034  1.30      scw 	ldr	pc, [r9, #CF_CONTEXT_SWITCH]
   1035  1.30      scw 
   1036  1.30      scw 	ldr	r0, .Lcurpcb
   1037  1.30      scw 
   1038  1.30      scw 	/* Restore all the save registers */
   1039  1.30      scw 	add	r1, r7, #PCB_R8
   1040  1.30      scw 	ldmia	r1, {r8-r13}
   1041  1.30      scw 
   1042  1.30      scw 	str	r7, [r0]	/* curpcb = lwp0's PCB */
   1043  1.30      scw 
   1044  1.30      scw 	IRQenableALL
   1045  1.30      scw #endif
   1046   1.1    chris 
   1047   1.1    chris 	/*
   1048   1.1    chris 	 * Schedule the vmspace and stack to be freed.
   1049   1.1    chris 	 */
   1050  1.29  thorpej 	mov	r0, r4			/* {lwp_}exit2(l) */
   1051  1.29  thorpej 	mov	lr, pc
   1052  1.29  thorpej 	mov	pc, r6
   1053   1.1    chris 
   1054  1.17  thorpej 	ldr	r7, .Lwhichqs		/* r7 = &whichqs */
   1055  1.29  thorpej 	mov	r5, #0x00000000		/* r5 = old lwp = NULL */
   1056  1.14   briggs 	b	.Lswitch_search
   1057   1.1    chris 
   1058   1.7    chris /* LINTSTUB: Func: void savectx(struct pcb *pcb) */
   1059   1.1    chris ENTRY(savectx)
   1060   1.1    chris 	/*
   1061   1.1    chris 	 * r0 = pcb
   1062   1.1    chris 	 */
   1063   1.1    chris 
   1064   1.1    chris 	/* Push registers.*/
   1065  1.28    bjh21 	stmfd	sp!, {r4-r7, lr}
   1066   1.1    chris 
   1067   1.1    chris 	/* Store all the registers in the process's pcb */
   1068  1.28    bjh21 	add	r2, r0, #(PCB_R8)
   1069  1.28    bjh21 	stmia	r2, {r8-r13}
   1070   1.1    chris 
   1071   1.1    chris 	/* Pull the regs of the stack */
   1072  1.28    bjh21 	ldmfd	sp!, {r4-r7, pc}
   1073   1.1    chris 
   1074   1.1    chris ENTRY(proc_trampoline)
   1075  1.19    bjh21 #ifdef MULTIPROCESSOR
   1076  1.19    bjh21 	bl	_C_LABEL(proc_trampoline_mp)
   1077  1.19    bjh21 #endif
   1078   1.1    chris 	mov	r0, r5
   1079   1.1    chris 	mov	r1, sp
   1080  1.24    bjh21 	mov	lr, pc
   1081   1.1    chris 	mov	pc, r4
   1082   1.1    chris 
   1083   1.1    chris 	/* Kill irq's */
   1084  1.13  thorpej         mrs     r0, cpsr
   1085   1.1    chris         orr     r0, r0, #(I32_bit)
   1086  1.13  thorpej         msr     cpsr_c, r0
   1087   1.1    chris 
   1088   1.1    chris 	PULLFRAME
   1089   1.1    chris 
   1090   1.1    chris 	movs	pc, lr			/* Exit */
   1091   1.1    chris 
   1092  1.17  thorpej 	.type .Lcpu_switch_ffs_table, _ASM_TYPE_OBJECT;
   1093  1.17  thorpej .Lcpu_switch_ffs_table:
   1094   1.1    chris /* same as ffs table but all nums are -1 from that */
   1095   1.1    chris /*               0   1   2   3   4   5   6   7           */
   1096   1.1    chris 	.byte	 0,  0,  1, 12,  2,  6,  0, 13  /*  0- 7 */
   1097   1.1    chris 	.byte	 3,  0,  7,  0,  0,  0,  0, 14  /*  8-15 */
   1098   1.1    chris 	.byte	10,  4,  0,  0,  8,  0,  0, 25  /* 16-23 */
   1099   1.1    chris 	.byte	 0,  0,  0,  0,  0, 21, 27, 15  /* 24-31 */
   1100   1.1    chris 	.byte	31, 11,  5,  0,  0,  0,  0,  0	/* 32-39 */
   1101   1.1    chris 	.byte	 9,  0,  0, 24,  0,  0, 20, 26  /* 40-47 */
   1102   1.1    chris 	.byte	30,  0,  0,  0,  0, 23,  0, 19  /* 48-55 */
   1103   1.1    chris 	.byte   29,  0, 22, 18, 28, 17, 16,  0  /* 56-63 */
   1104