Home | History | Annotate | Line # | Download | only in arm32
cpuswitch.S revision 1.43
      1  1.43     skrll /*	$NetBSD: cpuswitch.S,v 1.43 2006/05/10 07:13:30 skrll Exp $	*/
      2   1.1     chris 
      3   1.1     chris /*
      4  1.30       scw  * Copyright 2003 Wasabi Systems, Inc.
      5  1.30       scw  * All rights reserved.
      6  1.30       scw  *
      7  1.30       scw  * Written by Steve C. Woodford for Wasabi Systems, Inc.
      8  1.30       scw  *
      9  1.30       scw  * Redistribution and use in source and binary forms, with or without
     10  1.30       scw  * modification, are permitted provided that the following conditions
     11  1.30       scw  * are met:
     12  1.30       scw  * 1. Redistributions of source code must retain the above copyright
     13  1.30       scw  *    notice, this list of conditions and the following disclaimer.
     14  1.30       scw  * 2. Redistributions in binary form must reproduce the above copyright
     15  1.30       scw  *    notice, this list of conditions and the following disclaimer in the
     16  1.30       scw  *    documentation and/or other materials provided with the distribution.
     17  1.30       scw  * 3. All advertising materials mentioning features or use of this software
     18  1.30       scw  *    must display the following acknowledgement:
     19  1.30       scw  *      This product includes software developed for the NetBSD Project by
     20  1.30       scw  *      Wasabi Systems, Inc.
     21  1.30       scw  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  1.30       scw  *    or promote products derived from this software without specific prior
     23  1.30       scw  *    written permission.
     24  1.30       scw  *
     25  1.30       scw  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  1.30       scw  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  1.30       scw  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  1.30       scw  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  1.30       scw  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  1.30       scw  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  1.30       scw  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  1.30       scw  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  1.30       scw  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  1.30       scw  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  1.30       scw  * POSSIBILITY OF SUCH DAMAGE.
     36  1.30       scw  */
     37  1.30       scw /*
     38   1.1     chris  * Copyright (c) 1994-1998 Mark Brinicombe.
     39   1.1     chris  * Copyright (c) 1994 Brini.
     40   1.1     chris  * All rights reserved.
     41   1.1     chris  *
     42   1.1     chris  * This code is derived from software written for Brini by Mark Brinicombe
     43   1.1     chris  *
     44   1.1     chris  * Redistribution and use in source and binary forms, with or without
     45   1.1     chris  * modification, are permitted provided that the following conditions
     46   1.1     chris  * are met:
     47   1.1     chris  * 1. Redistributions of source code must retain the above copyright
     48   1.1     chris  *    notice, this list of conditions and the following disclaimer.
     49   1.1     chris  * 2. Redistributions in binary form must reproduce the above copyright
     50   1.1     chris  *    notice, this list of conditions and the following disclaimer in the
     51   1.1     chris  *    documentation and/or other materials provided with the distribution.
     52   1.1     chris  * 3. All advertising materials mentioning features or use of this software
     53   1.1     chris  *    must display the following acknowledgement:
     54   1.1     chris  *	This product includes software developed by Brini.
     55   1.1     chris  * 4. The name of the company nor the name of the author may be used to
     56   1.1     chris  *    endorse or promote products derived from this software without specific
     57   1.1     chris  *    prior written permission.
     58   1.1     chris  *
     59   1.1     chris  * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
     60   1.1     chris  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
     61   1.1     chris  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     62   1.1     chris  * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
     63   1.1     chris  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
     64   1.1     chris  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     65   1.1     chris  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     66   1.1     chris  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     67   1.1     chris  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     68   1.1     chris  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     69   1.1     chris  * SUCH DAMAGE.
     70   1.1     chris  *
     71   1.1     chris  * RiscBSD kernel project
     72   1.1     chris  *
     73   1.1     chris  * cpuswitch.S
     74   1.1     chris  *
     75   1.1     chris  * cpu switching functions
     76   1.1     chris  *
     77   1.1     chris  * Created      : 15/10/94
     78   1.1     chris  */
     79   1.1     chris 
     80   1.1     chris #include "opt_armfpe.h"
     81  1.30       scw #include "opt_arm32_pmap.h"
     82  1.19     bjh21 #include "opt_multiprocessor.h"
     83  1.36    martin #include "opt_lockdebug.h"
     84   1.1     chris 
     85   1.1     chris #include "assym.h"
     86   1.1     chris #include <machine/param.h>
     87   1.1     chris #include <machine/cpu.h>
     88   1.1     chris #include <machine/frame.h>
     89   1.1     chris #include <machine/asm.h>
     90   1.1     chris 
     91  1.34  kristerw /* LINTSTUB: include <sys/param.h> */
     92  1.34  kristerw 
     93   1.1     chris #undef IRQdisable
     94   1.1     chris #undef IRQenable
     95   1.1     chris 
     96   1.1     chris /*
     97   1.1     chris  * New experimental definitions of IRQdisable and IRQenable
     98   1.1     chris  * These keep FIQ's enabled since FIQ's are special.
     99   1.1     chris  */
    100   1.1     chris 
    101   1.1     chris #define IRQdisable \
    102  1.13   thorpej 	mrs	r14, cpsr ; \
    103   1.1     chris 	orr	r14, r14, #(I32_bit) ; \
    104  1.13   thorpej 	msr	cpsr_c, r14 ; \
    105   1.1     chris 
    106   1.1     chris #define IRQenable \
    107  1.13   thorpej 	mrs	r14, cpsr ; \
    108   1.1     chris 	bic	r14, r14, #(I32_bit) ; \
    109  1.13   thorpej 	msr	cpsr_c, r14 ; \
    110   1.1     chris 
    111  1.30       scw /*
    112  1.30       scw  * These are used for switching the translation table/DACR.
    113  1.30       scw  * Since the vector page can be invalid for a short time, we must
    114  1.30       scw  * disable both regular IRQs *and* FIQs.
    115  1.30       scw  *
    116  1.30       scw  * XXX: This is not necessary if the vector table is relocated.
    117  1.30       scw  */
    118  1.30       scw #define IRQdisableALL \
    119  1.30       scw 	mrs	r14, cpsr ; \
    120  1.30       scw 	orr	r14, r14, #(I32_bit | F32_bit) ; \
    121  1.30       scw 	msr	cpsr_c, r14
    122  1.30       scw 
    123  1.30       scw #define IRQenableALL \
    124  1.30       scw 	mrs	r14, cpsr ; \
    125  1.30       scw 	bic	r14, r14, #(I32_bit | F32_bit) ; \
    126  1.30       scw 	msr	cpsr_c, r14
    127  1.30       scw 
    128   1.1     chris 	.text
    129   1.1     chris 
    130  1.17   thorpej .Lwhichqs:
    131   1.1     chris 	.word	_C_LABEL(sched_whichqs)
    132   1.1     chris 
    133  1.17   thorpej .Lqs:
    134   1.1     chris 	.word	_C_LABEL(sched_qs)
    135   1.1     chris 
    136   1.1     chris /*
    137   1.1     chris  * cpuswitch()
    138   1.1     chris  *
    139   1.1     chris  * preforms a process context switch.
    140   1.1     chris  * This function has several entry points
    141   1.1     chris  */
    142   1.1     chris 
    143  1.19     bjh21 #ifdef MULTIPROCESSOR
    144  1.19     bjh21 .Lcpu_info_store:
    145  1.19     bjh21 	.word	_C_LABEL(cpu_info_store)
    146  1.29   thorpej .Lcurlwp:
    147  1.19     bjh21 	/* FIXME: This is bogus in the general case. */
    148  1.29   thorpej 	.word	_C_LABEL(cpu_info_store) + CI_CURLWP
    149  1.22     bjh21 
    150  1.22     bjh21 .Lcurpcb:
    151  1.22     bjh21 	.word	_C_LABEL(cpu_info_store) + CI_CURPCB
    152  1.19     bjh21 #else
    153  1.29   thorpej .Lcurlwp:
    154  1.29   thorpej 	.word	_C_LABEL(curlwp)
    155   1.1     chris 
    156  1.17   thorpej .Lcurpcb:
    157   1.1     chris 	.word	_C_LABEL(curpcb)
    158  1.22     bjh21 #endif
    159   1.1     chris 
    160  1.17   thorpej .Lwant_resched:
    161   1.1     chris 	.word	_C_LABEL(want_resched)
    162   1.1     chris 
    163  1.17   thorpej .Lcpufuncs:
    164   1.1     chris 	.word	_C_LABEL(cpufuncs)
    165   1.1     chris 
    166  1.22     bjh21 #ifndef MULTIPROCESSOR
    167   1.1     chris 	.data
    168   1.1     chris 	.global	_C_LABEL(curpcb)
    169   1.1     chris _C_LABEL(curpcb):
    170   1.1     chris 	.word	0x00000000
    171   1.1     chris 	.text
    172  1.22     bjh21 #endif
    173   1.1     chris 
    174  1.17   thorpej .Lblock_userspace_access:
    175   1.1     chris 	.word	_C_LABEL(block_userspace_access)
    176   1.1     chris 
    177  1.15   thorpej .Lcpu_do_powersave:
    178  1.15   thorpej 	.word	_C_LABEL(cpu_do_powersave)
    179  1.15   thorpej 
    180  1.30       scw .Lpmap_kernel_cstate:
    181  1.30       scw 	.word	(kernel_pmap_store + PMAP_CSTATE)
    182  1.30       scw 
    183  1.30       scw .Llast_cache_state_ptr:
    184  1.30       scw 	.word	_C_LABEL(pmap_cache_state)
    185  1.30       scw 
    186   1.1     chris /*
    187   1.1     chris  * Idle loop, exercised while waiting for a process to wake up.
    188  1.16   thorpej  *
    189  1.16   thorpej  * NOTE: When we jump back to .Lswitch_search, we must have a
    190  1.16   thorpej  * pointer to whichqs in r7, which is what it is when we arrive
    191  1.16   thorpej  * here.
    192   1.1     chris  */
    193   1.7     chris /* LINTSTUB: Ignore */
    194   1.4     chris ASENTRY_NP(idle)
    195  1.41       scw 	ldr	r6, .Lcpu_do_powersave
    196  1.41       scw 	IRQenable			/* Enable interrupts */
    197  1.41       scw 	ldr	r6, [r6]		/* r6 = cpu_do_powersave */
    198  1.41       scw 
    199  1.19     bjh21 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
    200   1.7     chris 	bl	_C_LABEL(sched_unlock_idle)
    201   1.7     chris #endif
    202  1.16   thorpej 
    203  1.41       scw 	/* Drop to spl0 (returns the current spl level in r0). */
    204  1.38       scw #ifdef __NEWINTR
    205  1.38       scw 	mov	r0, #(IPL_NONE)
    206  1.38       scw 	bl	_C_LABEL(_spllower)
    207  1.38       scw #else /* ! __NEWINTR */
    208  1.38       scw 	mov	r0, #(_SPL_0)
    209  1.38       scw 	bl	_C_LABEL(splx)
    210  1.38       scw #endif /* __NEWINTR */
    211  1.38       scw 
    212  1.41       scw 	teq	r6, #0			/* cpu_do_powersave non zero? */
    213  1.41       scw 	ldrne	r6, .Lcpufuncs
    214  1.41       scw 	mov	r4, r0			/* Old interrupt level to r4 */
    215  1.41       scw 	ldrne	r6, [r6, #(CF_SLEEP)]
    216  1.41       scw 
    217  1.41       scw 	/*
    218  1.41       scw 	 * Main idle loop.
    219  1.41       scw 	 * r6 points to power-save idle function if required, else NULL.
    220  1.41       scw 	 */
    221  1.41       scw 1:	ldr	r3, [r7]		/* r3 = sched_whichqs */
    222  1.41       scw 	teq	r3, #0
    223  1.41       scw 	bne	2f			/* We have work to do */
    224  1.41       scw 	teq	r6, #0			/* Powersave idle? */
    225  1.41       scw 	beq	1b			/* Nope. Just sit-n-spin. */
    226  1.38       scw 
    227  1.41       scw 	/*
    228  1.41       scw 	 * Before going into powersave idle mode, disable interrupts
    229  1.41       scw 	 * and check sched_whichqs one more time.
    230  1.41       scw 	 */
    231  1.41       scw 	IRQdisableALL
    232  1.41       scw 	ldr	r3, [r7]
    233  1.41       scw 	mov	r0, #0
    234  1.41       scw 	teq	r3, #0			/* sched_whichqs still zero? */
    235  1.41       scw 	moveq	lr, pc
    236  1.41       scw 	moveq	pc, r6			/* If so, do powersave idle */
    237  1.41       scw 	IRQenableALL
    238  1.41       scw 	b	1b			/* Back around */
    239  1.16   thorpej 
    240  1.41       scw 	/*
    241  1.41       scw 	 * sched_whichqs indicates that at least one lwp is ready to run.
    242  1.41       scw 	 * Restore the original interrupt priority level, grab the
    243  1.41       scw 	 * scheduler lock if necessary, and jump back into cpu_switch.
    244  1.41       scw 	 */
    245  1.41       scw 2:	mov	r0, r4
    246  1.41       scw #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
    247  1.41       scw 	bl	_C_LABEL(splx)
    248  1.41       scw 	adr	lr, .Lswitch_search
    249  1.41       scw 	b	_C_LABEL(sched_lock_idle)
    250  1.41       scw #else
    251  1.38       scw 	adr	lr, .Lswitch_search
    252  1.41       scw 	b	_C_LABEL(splx)
    253  1.41       scw #endif
    254  1.15   thorpej 
    255   1.1     chris 
    256   1.1     chris /*
    257  1.29   thorpej  * Find a new lwp to run, save the current context and
    258   1.1     chris  * load the new context
    259  1.29   thorpej  *
    260  1.29   thorpej  * Arguments:
    261  1.29   thorpej  *	r0	'struct lwp *' of the current LWP
    262   1.1     chris  */
    263   1.1     chris 
    264   1.1     chris ENTRY(cpu_switch)
    265   1.1     chris /*
    266   1.1     chris  * Local register usage. Some of these registers are out of date.
    267  1.29   thorpej  * r1 = oldlwp
    268  1.29   thorpej  * r2 = spl level
    269   1.1     chris  * r3 = whichqs
    270   1.1     chris  * r4 = queue
    271   1.1     chris  * r5 = &qs[queue]
    272  1.29   thorpej  * r6 = newlwp
    273  1.28     bjh21  * r7 = scratch
    274   1.1     chris  */
    275  1.28     bjh21 	stmfd	sp!, {r4-r7, lr}
    276   1.1     chris 
    277   1.1     chris 	/*
    278  1.29   thorpej 	 * Indicate that there is no longer a valid process (curlwp = 0).
    279  1.29   thorpej 	 * Zero the current PCB pointer while we're at it.
    280   1.1     chris 	 */
    281  1.29   thorpej 	ldr	r7, .Lcurlwp
    282  1.28     bjh21 	ldr	r6, .Lcurpcb
    283  1.29   thorpej 	mov	r2, #0x00000000
    284  1.29   thorpej 	str	r2, [r7]		/* curproc = NULL */
    285  1.29   thorpej 	str	r2, [r6]		/* curpcb = NULL */
    286  1.28     bjh21 
    287  1.28     bjh21 	/* stash the old proc while we call functions */
    288  1.29   thorpej 	mov	r5, r0
    289   1.1     chris 
    290  1.29   thorpej 	/* First phase : find a new lwp */
    291  1.17   thorpej 	ldr	r7, .Lwhichqs
    292  1.16   thorpej 
    293  1.29   thorpej 	/* rem: r5 = old lwp */
    294  1.16   thorpej 	/* rem: r7 = &whichqs */
    295   1.7     chris 
    296  1.14    briggs .Lswitch_search:
    297   1.1     chris 	IRQdisable
    298   1.7     chris 
    299   1.1     chris 	/* Do we have any active queues  */
    300   1.1     chris 	ldr	r3, [r7]
    301   1.1     chris 
    302   1.1     chris 	/* If not we must idle until we do. */
    303   1.1     chris 	teq	r3, #0x00000000
    304   1.4     chris 	beq	_ASM_LABEL(idle)
    305   1.7     chris 
    306  1.28     bjh21 	/* put old proc back in r1 */
    307  1.28     bjh21 	mov	r1, r5
    308  1.28     bjh21 
    309  1.29   thorpej 	/* rem: r1 = old lwp */
    310   1.1     chris 	/* rem: r3 = whichqs */
    311   1.1     chris 	/* rem: interrupts are disabled */
    312   1.1     chris 
    313  1.37       scw 	/* used further down, saves SA stall */
    314  1.37       scw 	ldr	r6, .Lqs
    315  1.37       scw 
    316   1.1     chris 	/*
    317   1.1     chris 	 * We have found an active queue. Currently we do not know which queue
    318   1.1     chris 	 * is active just that one of them is.
    319   1.1     chris 	 */
    320  1.37       scw 	/* Non-Xscale version of the ffs algorithm devised by d.seal and
    321  1.37       scw 	 * posted to comp.sys.arm on 16 Feb 1994.
    322   1.1     chris 	 */
    323   1.1     chris  	rsb	r5, r3, #0
    324   1.1     chris  	ands	r0, r3, r5
    325  1.37       scw 
    326  1.37       scw #ifndef __XSCALE__
    327  1.17   thorpej 	adr	r5, .Lcpu_switch_ffs_table
    328  1.37       scw 
    329   1.3     chris 				    /* X = R0 */
    330   1.3     chris 	orr	r4, r0, r0, lsl #4  /* r4 = X * 0x11 */
    331   1.3     chris 	orr	r4, r4, r4, lsl #6  /* r4 = X * 0x451 */
    332   1.3     chris 	rsb	r4, r4, r4, lsl #16 /* r4 = X * 0x0450fbaf */
    333   1.1     chris 
    334   1.3     chris 	/* now lookup in table indexed on top 6 bits of a4 */
    335   1.1     chris 	ldrb	r4, [ r5, r4, lsr #26 ]
    336   1.1     chris 
    337  1.37       scw #else	/* __XSCALE__ */
    338  1.37       scw 	clz	r4, r0
    339  1.37       scw 	rsb	r4, r4, #31
    340  1.37       scw #endif	/* __XSCALE__ */
    341  1.37       scw 
    342   1.1     chris 	/* rem: r0 = bit mask of chosen queue (1 << r4) */
    343  1.29   thorpej 	/* rem: r1 = old lwp */
    344   1.1     chris 	/* rem: r3 = whichqs */
    345   1.1     chris 	/* rem: r4 = queue number */
    346   1.1     chris 	/* rem: interrupts are disabled */
    347   1.1     chris 
    348   1.1     chris 	/* Get the address of the queue (&qs[queue]) */
    349   1.1     chris 	add	r5, r6, r4, lsl #3
    350   1.1     chris 
    351   1.1     chris 	/*
    352  1.29   thorpej 	 * Get the lwp from the queue and place the next process in
    353  1.29   thorpej 	 * the queue at the head. This basically unlinks the lwp at
    354   1.1     chris 	 * the head of the queue.
    355   1.1     chris 	 */
    356  1.29   thorpej 	ldr	r6, [r5, #(L_FORW)]
    357   1.1     chris 
    358  1.41       scw #ifdef DIAGNOSTIC
    359  1.41       scw 	cmp	r6, r5
    360  1.41       scw 	beq	.Lswitch_bogons
    361  1.41       scw #endif
    362  1.41       scw 
    363  1.29   thorpej 	/* rem: r6 = new lwp */
    364  1.29   thorpej 	ldr	r7, [r6, #(L_FORW)]
    365  1.29   thorpej 	str	r7, [r5, #(L_FORW)]
    366   1.1     chris 
    367   1.1     chris 	/*
    368   1.1     chris 	 * Test to see if the queue is now empty. If the head of the queue
    369  1.29   thorpej 	 * points to the queue itself then there are no more lwps in
    370   1.1     chris 	 * the queue. We can therefore clear the queue not empty flag held
    371   1.1     chris 	 * in r3.
    372   1.1     chris 	 */
    373   1.1     chris 
    374   1.1     chris 	teq	r5, r7
    375   1.1     chris 	biceq	r3, r3, r0
    376   1.1     chris 
    377  1.28     bjh21 	/* rem: r0 = bit mask of chosen queue (1 << r4) - NOT NEEDED AN MORE */
    378  1.28     bjh21 
    379  1.29   thorpej 	/* Fix the back pointer for the lwp now at the head of the queue. */
    380  1.29   thorpej 	ldr	r0, [r6, #(L_BACK)]
    381  1.29   thorpej 	str	r0, [r7, #(L_BACK)]
    382   1.1     chris 
    383   1.1     chris 	/* Update the RAM copy of the queue not empty flags word. */
    384  1.38       scw 	ldreq	r7, .Lwhichqs
    385  1.38       scw 	streq	r3, [r7]
    386   1.1     chris 
    387  1.29   thorpej 	/* rem: r1 = old lwp */
    388   1.1     chris 	/* rem: r3 = whichqs - NOT NEEDED ANY MORE */
    389   1.1     chris 	/* rem: r4 = queue number - NOT NEEDED ANY MORE */
    390  1.29   thorpej 	/* rem: r6 = new lwp */
    391   1.1     chris 	/* rem: interrupts are disabled */
    392   1.1     chris 
    393   1.1     chris 	/* Clear the want_resched flag */
    394  1.28     bjh21 	ldr	r7, .Lwant_resched
    395   1.1     chris 	mov	r0, #0x00000000
    396  1.28     bjh21 	str	r0, [r7]
    397   1.1     chris 
    398   1.1     chris 	/*
    399  1.29   thorpej 	 * Clear the back pointer of the lwp we have removed from
    400  1.29   thorpej 	 * the head of the queue. The new lwp is isolated now.
    401   1.1     chris 	 */
    402  1.29   thorpej 	str	r0, [r6, #(L_BACK)]
    403   1.1     chris 
    404  1.19     bjh21 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
    405   1.7     chris 	/*
    406   1.7     chris 	 * unlock the sched_lock, but leave interrupts off, for now.
    407   1.7     chris 	 */
    408  1.28     bjh21 	mov	r7, r1
    409   1.7     chris 	bl	_C_LABEL(sched_unlock_idle)
    410  1.28     bjh21 	mov	r1, r7
    411   1.7     chris #endif
    412   1.7     chris 
    413  1.38       scw 
    414  1.29   thorpej .Lswitch_resume:
    415  1.38       scw 	/* rem: r1 = old lwp */
    416  1.38       scw 	/* rem: r4 = return value [not used if came from cpu_switchto()] */
    417  1.43     skrll 	/* rem: r6 = new lwp */
    418  1.38       scw 	/* rem: interrupts are disabled */
    419  1.38       scw 
    420  1.19     bjh21 #ifdef MULTIPROCESSOR
    421  1.19     bjh21 	/* XXX use curcpu() */
    422  1.19     bjh21 	ldr	r0, .Lcpu_info_store
    423  1.29   thorpej 	str	r0, [r6, #(L_CPU)]
    424  1.19     bjh21 #else
    425  1.29   thorpej 	/* l->l_cpu initialized in fork1() for single-processor */
    426  1.19     bjh21 #endif
    427   1.1     chris 
    428   1.1     chris 	/* Process is now on a processor. */
    429  1.29   thorpej 	mov	r0, #LSONPROC			/* l->l_stat = LSONPROC */
    430  1.29   thorpej 	str	r0, [r6, #(L_STAT)]
    431   1.1     chris 
    432  1.29   thorpej 	/* We have a new curlwp now so make a note it */
    433  1.29   thorpej 	ldr	r7, .Lcurlwp
    434   1.1     chris 	str	r6, [r7]
    435   1.1     chris 
    436   1.1     chris 	/* Hook in a new pcb */
    437  1.17   thorpej 	ldr	r7, .Lcurpcb
    438  1.29   thorpej 	ldr	r0, [r6, #(L_ADDR)]
    439   1.1     chris 	str	r0, [r7]
    440   1.1     chris 
    441   1.1     chris 	/* At this point we can allow IRQ's again. */
    442   1.1     chris 	IRQenable
    443   1.1     chris 
    444  1.29   thorpej 	/* rem: r1 = old lwp */
    445  1.29   thorpej 	/* rem: r4 = return value */
    446  1.43     skrll 	/* rem: r6 = new lwp */
    447   1.4     chris 	/* rem: interrupts are enabled */
    448   1.1     chris 
    449   1.1     chris 	/*
    450  1.43     skrll 	 * If the new lwp is the same as the lwp that called
    451   1.1     chris 	 * cpu_switch() then we do not need to save and restore any
    452   1.1     chris 	 * contexts. This means we can make a quick exit.
    453  1.29   thorpej 	 * The test is simple if curlwp on entry (now in r1) is the
    454  1.43     skrll 	 * same as the lwp removed from the queue we can jump to the exit.
    455   1.1     chris 	 */
    456  1.28     bjh21 	teq	r1, r6
    457  1.29   thorpej 	moveq	r4, #0x00000000		/* default to "didn't switch" */
    458  1.14    briggs 	beq	.Lswitch_return
    459   1.1     chris 
    460  1.29   thorpej 	/*
    461  1.29   thorpej 	 * At this point, we are guaranteed to be switching to
    462  1.29   thorpej 	 * a new lwp.
    463  1.29   thorpej 	 */
    464  1.29   thorpej 	mov	r4, #0x00000001
    465  1.29   thorpej 
    466  1.29   thorpej 	/* Remember the old lwp in r0 */
    467  1.28     bjh21 	mov	r0, r1
    468  1.28     bjh21 
    469   1.1     chris 	/*
    470  1.29   thorpej 	 * If the old lwp on entry to cpu_switch was zero then the
    471   1.1     chris 	 * process that called it was exiting. This means that we do
    472   1.1     chris 	 * not need to save the current context. Instead we can jump
    473   1.1     chris 	 * straight to restoring the context for the new process.
    474   1.1     chris 	 */
    475  1.28     bjh21 	teq	r0, #0x00000000
    476  1.14    briggs 	beq	.Lswitch_exited
    477   1.1     chris 
    478  1.29   thorpej 	/* rem: r0 = old lwp */
    479  1.29   thorpej 	/* rem: r4 = return value */
    480  1.43     skrll 	/* rem: r6 = new lwp */
    481   1.4     chris 	/* rem: interrupts are enabled */
    482   1.1     chris 
    483   1.1     chris 	/* Stage two : Save old context */
    484   1.1     chris 
    485  1.29   thorpej 	/* Get the user structure for the old lwp. */
    486  1.29   thorpej 	ldr	r1, [r0, #(L_ADDR)]
    487   1.1     chris 
    488  1.29   thorpej 	/* Save all the registers in the old lwp's pcb */
    489  1.37       scw #ifndef __XSCALE__
    490  1.28     bjh21 	add	r7, r1, #(PCB_R8)
    491  1.28     bjh21 	stmia	r7, {r8-r13}
    492  1.37       scw #else
    493  1.37       scw 	strd	r8, [r1, #(PCB_R8)]
    494  1.37       scw 	strd	r10, [r1, #(PCB_R10)]
    495  1.37       scw 	strd	r12, [r1, #(PCB_R12)]
    496  1.37       scw #endif
    497   1.1     chris 
    498   1.1     chris 	/*
    499  1.29   thorpej 	 * NOTE: We can now use r8-r13 until it is time to restore
    500  1.29   thorpej 	 * them for the new process.
    501  1.29   thorpej 	 */
    502  1.29   thorpej 
    503  1.29   thorpej 	/* Remember the old PCB. */
    504  1.29   thorpej 	mov	r8, r1
    505  1.29   thorpej 
    506  1.29   thorpej 	/* r1 now free! */
    507  1.29   thorpej 
    508  1.29   thorpej 	/* Get the user structure for the new process in r9 */
    509  1.29   thorpej 	ldr	r9, [r6, #(L_ADDR)]
    510  1.29   thorpej 
    511  1.29   thorpej 	/*
    512   1.1     chris 	 * This can be optimised... We know we want to go from SVC32
    513   1.1     chris 	 * mode to UND32 mode
    514   1.1     chris 	 */
    515  1.13   thorpej         mrs	r3, cpsr
    516   1.1     chris 	bic	r2, r3, #(PSR_MODE)
    517   1.1     chris 	orr	r2, r2, #(PSR_UND32_MODE | I32_bit)
    518  1.13   thorpej         msr	cpsr_c, r2
    519   1.1     chris 
    520  1.29   thorpej 	str	sp, [r8, #(PCB_UND_SP)]
    521   1.1     chris 
    522  1.13   thorpej         msr	cpsr_c, r3		/* Restore the old mode */
    523   1.1     chris 
    524  1.29   thorpej 	/* rem: r0 = old lwp */
    525  1.29   thorpej 	/* rem: r4 = return value */
    526  1.43     skrll 	/* rem: r6 = new lwp */
    527  1.29   thorpej 	/* rem: r8 = old PCB */
    528  1.29   thorpej 	/* rem: r9 = new PCB */
    529   1.4     chris 	/* rem: interrupts are enabled */
    530   1.1     chris 
    531   1.1     chris 	/* What else needs to be saved  Only FPA stuff when that is supported */
    532   1.1     chris 
    533   1.1     chris 	/* Third phase : restore saved context */
    534   1.1     chris 
    535  1.29   thorpej 	/* rem: r0 = old lwp */
    536  1.29   thorpej 	/* rem: r4 = return value */
    537  1.29   thorpej 	/* rem: r6 = new lwp */
    538  1.29   thorpej 	/* rem: r8 = old PCB */
    539  1.29   thorpej 	/* rem: r9 = new PCB */
    540   1.9   thorpej 	/* rem: interrupts are enabled */
    541   1.9   thorpej 
    542   1.9   thorpej 	/*
    543  1.29   thorpej 	 * Get the new L1 table pointer into r11.  If we're switching to
    544  1.29   thorpej 	 * an LWP with the same address space as the outgoing one, we can
    545  1.29   thorpej 	 * skip the cache purge and the TTB load.
    546  1.29   thorpej 	 *
    547  1.29   thorpej 	 * To avoid data dep stalls that would happen anyway, we try
    548  1.29   thorpej 	 * and get some useful work done in the mean time.
    549  1.29   thorpej 	 */
    550  1.29   thorpej 	ldr	r10, [r8, #(PCB_PAGEDIR)]	/* r10 = old L1 */
    551  1.29   thorpej 	ldr	r11, [r9, #(PCB_PAGEDIR)]	/* r11 = new L1 */
    552  1.29   thorpej 
    553  1.30       scw 	ldr	r0, [r8, #(PCB_DACR)]		/* r0 = old DACR */
    554  1.30       scw 	ldr	r1, [r9, #(PCB_DACR)]		/* r1 = new DACR */
    555  1.30       scw 	ldr	r8, [r9, #(PCB_CSTATE)]		/* r8 = &new_pmap->pm_cstate */
    556  1.30       scw 	ldr	r5, .Llast_cache_state_ptr	/* Previous thread's cstate */
    557  1.30       scw 
    558  1.30       scw 	teq	r10, r11			/* Same L1? */
    559  1.30       scw 	ldr	r5, [r5]
    560  1.30       scw 	cmpeq	r0, r1				/* Same DACR? */
    561  1.30       scw 	beq	.Lcs_context_switched		/* yes! */
    562  1.30       scw 
    563  1.30       scw 	ldr	r3, .Lblock_userspace_access
    564  1.30       scw 	mov	r12, #0
    565  1.30       scw 	cmp	r5, #0				/* No last vm? (switch_exit) */
    566  1.30       scw 	beq	.Lcs_cache_purge_skipped	/* No, we can skip cache flsh */
    567  1.30       scw 
    568  1.30       scw 	mov	r2, #DOMAIN_CLIENT
    569  1.30       scw 	cmp	r1, r2, lsl #(PMAP_DOMAIN_KERNEL * 2) /* Sw to kernel thread? */
    570  1.30       scw 	beq	.Lcs_cache_purge_skipped	/* Yup. Don't flush cache */
    571  1.30       scw 
    572  1.30       scw 	cmp	r5, r8				/* Same userland VM space? */
    573  1.30       scw 	ldrneb	r12, [r5, #(CS_CACHE_ID)]	/* Last VM space cache state */
    574  1.30       scw 
    575  1.30       scw 	/*
    576  1.30       scw 	 * We're definately switching to a new userland VM space,
    577  1.30       scw 	 * and the previous userland VM space has yet to be flushed
    578  1.30       scw 	 * from the cache/tlb.
    579  1.30       scw 	 *
    580  1.30       scw 	 * r12 holds the previous VM space's cs_cache_id state
    581  1.30       scw 	 */
    582  1.30       scw 	tst	r12, #0xff			/* Test cs_cache_id */
    583  1.30       scw 	beq	.Lcs_cache_purge_skipped	/* VM space is not in cache */
    584  1.30       scw 
    585  1.30       scw 	/*
    586  1.30       scw 	 * Definately need to flush the cache.
    587  1.30       scw 	 * Mark the old VM space as NOT being resident in the cache.
    588  1.30       scw 	 */
    589  1.30       scw 	mov	r2, #0x00000000
    590  1.32     chris 	strb	r2, [r5, #(CS_CACHE_ID)]
    591  1.32     chris 	strb	r2, [r5, #(CS_CACHE_D)]
    592  1.30       scw 
    593  1.30       scw 	/*
    594  1.30       scw 	 * Don't allow user space access between the purge and the switch.
    595  1.30       scw 	 */
    596  1.30       scw 	mov	r2, #0x00000001
    597  1.30       scw 	str	r2, [r3]
    598  1.30       scw 
    599  1.30       scw 	stmfd	sp!, {r0-r3}
    600  1.30       scw 	ldr	r1, .Lcpufuncs
    601  1.30       scw 	mov	lr, pc
    602  1.30       scw 	ldr	pc, [r1, #CF_IDCACHE_WBINV_ALL]
    603  1.30       scw 	ldmfd	sp!, {r0-r3}
    604  1.30       scw 
    605  1.30       scw .Lcs_cache_purge_skipped:
    606  1.30       scw 	/* rem: r1 = new DACR */
    607  1.30       scw 	/* rem: r3 = &block_userspace_access */
    608  1.30       scw 	/* rem: r4 = return value */
    609  1.30       scw 	/* rem: r5 = &old_pmap->pm_cstate (or NULL) */
    610  1.30       scw 	/* rem: r6 = new lwp */
    611  1.30       scw 	/* rem: r8 = &new_pmap->pm_cstate */
    612  1.30       scw 	/* rem: r9 = new PCB */
    613  1.30       scw 	/* rem: r10 = old L1 */
    614  1.30       scw 	/* rem: r11 = new L1 */
    615  1.30       scw 
    616  1.30       scw 	mov	r2, #0x00000000
    617  1.30       scw 	ldr	r7, [r9, #(PCB_PL1VEC)]
    618  1.30       scw 
    619  1.30       scw 	/*
    620  1.30       scw 	 * At this point we need to kill IRQ's again.
    621  1.30       scw 	 *
    622  1.30       scw 	 * XXXSCW: Don't need to block FIQs if vectors have been relocated
    623  1.30       scw 	 */
    624  1.30       scw 	IRQdisableALL
    625  1.30       scw 
    626  1.30       scw 	/*
    627  1.30       scw 	 * Interrupts are disabled so we can allow user space accesses again
    628  1.30       scw 	 * as none will occur until interrupts are re-enabled after the
    629  1.30       scw 	 * switch.
    630  1.30       scw 	 */
    631  1.30       scw 	str	r2, [r3]
    632  1.30       scw 
    633  1.30       scw 	/*
    634  1.30       scw 	 * Ensure the vector table is accessible by fixing up the L1
    635  1.30       scw 	 */
    636  1.30       scw 	cmp	r7, #0			/* No need to fixup vector table? */
    637  1.30       scw 	ldrne	r2, [r7]		/* But if yes, fetch current value */
    638  1.30       scw 	ldrne	r0, [r9, #(PCB_L1VEC)]	/* Fetch new vector_page value */
    639  1.30       scw 	mcr	p15, 0, r1, c3, c0, 0	/* Update DACR for new context */
    640  1.30       scw 	cmpne	r2, r0			/* Stuffing the same value? */
    641  1.31   thorpej #ifndef PMAP_INCLUDE_PTE_SYNC
    642  1.30       scw 	strne	r0, [r7]		/* Nope, update it */
    643  1.30       scw #else
    644  1.30       scw 	beq	.Lcs_same_vector
    645  1.30       scw 	str	r0, [r7]		/* Otherwise, update it */
    646  1.30       scw 
    647  1.30       scw 	/*
    648  1.30       scw 	 * Need to sync the cache to make sure that last store is
    649  1.30       scw 	 * visible to the MMU.
    650  1.30       scw 	 */
    651  1.30       scw 	ldr	r2, .Lcpufuncs
    652  1.30       scw 	mov	r0, r7
    653  1.30       scw 	mov	r1, #4
    654  1.30       scw 	mov	lr, pc
    655  1.30       scw 	ldr	pc, [r2, #CF_DCACHE_WB_RANGE]
    656  1.30       scw 
    657  1.30       scw .Lcs_same_vector:
    658  1.33   thorpej #endif /* PMAP_INCLUDE_PTE_SYNC */
    659  1.30       scw 
    660  1.30       scw 	cmp	r10, r11		/* Switching to the same L1? */
    661  1.30       scw 	ldr	r10, .Lcpufuncs
    662  1.30       scw 	beq	.Lcs_same_l1		/* Yup. */
    663  1.30       scw 
    664  1.30       scw 	/*
    665  1.30       scw 	 * Do a full context switch, including full TLB flush.
    666  1.30       scw 	 */
    667  1.30       scw 	mov	r0, r11
    668  1.30       scw 	mov	lr, pc
    669  1.30       scw 	ldr	pc, [r10, #CF_CONTEXT_SWITCH]
    670  1.30       scw 
    671  1.30       scw 	/*
    672  1.30       scw 	 * Mark the old VM space as NOT being resident in the TLB
    673  1.30       scw 	 */
    674  1.30       scw 	mov	r2, #0x00000000
    675  1.30       scw 	cmp	r5, #0
    676  1.30       scw 	strneh	r2, [r5, #(CS_TLB_ID)]
    677  1.30       scw 	b	.Lcs_context_switched
    678  1.30       scw 
    679  1.30       scw 	/*
    680  1.30       scw 	 * We're switching to a different process in the same L1.
    681  1.30       scw 	 * In this situation, we only need to flush the TLB for the
    682  1.30       scw 	 * vector_page mapping, and even then only if r7 is non-NULL.
    683  1.30       scw 	 */
    684  1.30       scw .Lcs_same_l1:
    685  1.30       scw 	cmp	r7, #0
    686  1.30       scw 	movne	r0, #0			/* We *know* vector_page's VA is 0x0 */
    687  1.30       scw 	movne	lr, pc
    688  1.30       scw 	ldrne	pc, [r10, #CF_TLB_FLUSHID_SE]
    689  1.30       scw 
    690  1.30       scw .Lcs_context_switched:
    691  1.30       scw 	/* rem: r8 = &new_pmap->pm_cstate */
    692  1.30       scw 
    693  1.30       scw 	/* XXXSCW: Safe to re-enable FIQs here */
    694  1.30       scw 
    695  1.30       scw 	/*
    696  1.30       scw 	 * The new VM space is live in the cache and TLB.
    697  1.30       scw 	 * Update its cache/tlb state, and if it's not the kernel
    698  1.30       scw 	 * pmap, update the 'last cache state' pointer.
    699  1.30       scw 	 */
    700  1.30       scw 	mov	r2, #-1
    701  1.30       scw 	ldr	r5, .Lpmap_kernel_cstate
    702  1.30       scw 	ldr	r0, .Llast_cache_state_ptr
    703  1.30       scw 	str	r2, [r8, #(CS_ALL)]
    704  1.30       scw 	cmp	r5, r8
    705  1.30       scw 	strne	r8, [r0]
    706  1.30       scw 
    707  1.29   thorpej 	/* rem: r4 = return value */
    708  1.29   thorpej 	/* rem: r6 = new lwp */
    709  1.29   thorpej 	/* rem: r9 = new PCB */
    710  1.29   thorpej 
    711   1.1     chris 	/*
    712   1.1     chris 	 * This can be optimised... We know we want to go from SVC32
    713   1.1     chris 	 * mode to UND32 mode
    714   1.1     chris 	 */
    715  1.13   thorpej         mrs	r3, cpsr
    716   1.1     chris 	bic	r2, r3, #(PSR_MODE)
    717   1.1     chris 	orr	r2, r2, #(PSR_UND32_MODE)
    718  1.13   thorpej         msr	cpsr_c, r2
    719   1.1     chris 
    720  1.29   thorpej 	ldr	sp, [r9, #(PCB_UND_SP)]
    721   1.1     chris 
    722  1.13   thorpej         msr	cpsr_c, r3		/* Restore the old mode */
    723   1.1     chris 
    724  1.28     bjh21 	/* Restore all the save registers */
    725  1.37       scw #ifndef __XSCALE__
    726  1.29   thorpej 	add	r7, r9, #PCB_R8
    727  1.28     bjh21 	ldmia	r7, {r8-r13}
    728  1.28     bjh21 
    729  1.29   thorpej 	sub	r7, r7, #PCB_R8		/* restore PCB pointer */
    730  1.37       scw #else
    731  1.37       scw 	mov	r7, r9
    732  1.37       scw 	ldr	r8, [r7, #(PCB_R8)]
    733  1.37       scw 	ldr	r9, [r7, #(PCB_R9)]
    734  1.37       scw 	ldr	r10, [r7, #(PCB_R10)]
    735  1.37       scw 	ldr	r11, [r7, #(PCB_R11)]
    736  1.37       scw 	ldr	r12, [r7, #(PCB_R12)]
    737  1.37       scw 	ldr	r13, [r7, #(PCB_SP)]
    738  1.37       scw #endif
    739  1.29   thorpej 
    740  1.29   thorpej 	ldr	r5, [r6, #(L_PROC)]	/* fetch the proc for below */
    741  1.29   thorpej 
    742  1.29   thorpej 	/* rem: r4 = return value */
    743  1.29   thorpej 	/* rem: r5 = new lwp's proc */
    744  1.29   thorpej 	/* rem: r6 = new lwp */
    745  1.29   thorpej 	/* rem: r7 = new pcb */
    746  1.18   thorpej 
    747   1.1     chris #ifdef ARMFPE
    748  1.29   thorpej 	add	r0, r7, #(USER_SIZE) & 0x00ff
    749   1.1     chris 	add	r0, r0, #(USER_SIZE) & 0xff00
    750   1.1     chris 	bl	_C_LABEL(arm_fpe_core_changecontext)
    751   1.1     chris #endif
    752   1.1     chris 
    753   1.1     chris 	/* We can enable interrupts again */
    754  1.30       scw 	IRQenableALL
    755   1.1     chris 
    756  1.29   thorpej 	/* rem: r4 = return value */
    757  1.29   thorpej 	/* rem: r5 = new lwp's proc */
    758  1.29   thorpej 	/* rem: r6 = new lwp */
    759  1.18   thorpej 	/* rem: r7 = new PCB */
    760  1.18   thorpej 
    761  1.18   thorpej 	/*
    762  1.18   thorpej 	 * Check for restartable atomic sequences (RAS).
    763  1.18   thorpej 	 */
    764  1.18   thorpej 
    765  1.39       dsl 	ldr	r2, [r5, #(P_RASLIST)]
    766  1.38       scw 	ldr	r1, [r7, #(PCB_TF)]	/* r1 = trapframe (used below) */
    767  1.18   thorpej 	teq	r2, #0			/* p->p_nras == 0? */
    768  1.18   thorpej 	bne	.Lswitch_do_ras		/* no, check for one */
    769  1.18   thorpej 
    770  1.14    briggs .Lswitch_return:
    771  1.29   thorpej 	/* cpu_switch returns 1 == switched, 0 == didn't switch */
    772  1.29   thorpej 	mov	r0, r4
    773   1.1     chris 
    774   1.1     chris 	/*
    775   1.1     chris 	 * Pull the registers that got pushed when either savectx() or
    776   1.1     chris 	 * cpu_switch() was called and return.
    777   1.1     chris 	 */
    778  1.28     bjh21 	ldmfd	sp!, {r4-r7, pc}
    779  1.18   thorpej 
    780  1.18   thorpej .Lswitch_do_ras:
    781  1.38       scw 	ldr	r1, [r1, #(TF_PC)]	/* second ras_lookup() arg */
    782  1.29   thorpej 	mov	r0, r5			/* first ras_lookup() arg */
    783  1.18   thorpej 	bl	_C_LABEL(ras_lookup)
    784  1.18   thorpej 	cmn	r0, #1			/* -1 means "not in a RAS" */
    785  1.38       scw 	ldrne	r1, [r7, #(PCB_TF)]
    786  1.38       scw 	strne	r0, [r1, #(TF_PC)]
    787  1.18   thorpej 	b	.Lswitch_return
    788   1.1     chris 
    789  1.14    briggs .Lswitch_exited:
    790   1.9   thorpej 	/*
    791  1.29   thorpej 	 * We skip the cache purge because switch_exit() already did it.
    792  1.29   thorpej 	 * Load up registers the way .Lcs_cache_purge_skipped expects.
    793  1.43     skrll 	 * Userspace access already blocked by switch_exit().
    794   1.9   thorpej 	 */
    795  1.29   thorpej 	ldr	r9, [r6, #(L_ADDR)]		/* r9 = new PCB */
    796  1.17   thorpej 	ldr	r3, .Lblock_userspace_access
    797  1.30       scw 	mrc	p15, 0, r10, c2, c0, 0		/* r10 = old L1 */
    798  1.30       scw 	mov	r5, #0				/* No previous cache state */
    799  1.30       scw 	ldr	r1, [r9, #(PCB_DACR)]		/* r1 = new DACR */
    800  1.30       scw 	ldr	r8, [r9, #(PCB_CSTATE)]		/* r8 = new cache state */
    801  1.29   thorpej 	ldr	r11, [r9, #(PCB_PAGEDIR)]	/* r11 = new L1 */
    802  1.14    briggs 	b	.Lcs_cache_purge_skipped
    803   1.9   thorpej 
    804  1.41       scw 
    805  1.41       scw #ifdef DIAGNOSTIC
    806  1.41       scw .Lswitch_bogons:
    807  1.41       scw 	adr	r0, .Lswitch_panic_str
    808  1.41       scw 	bl	_C_LABEL(panic)
    809  1.41       scw 1:	nop
    810  1.41       scw 	b	1b
    811  1.41       scw 
    812  1.41       scw .Lswitch_panic_str:
    813  1.41       scw 	.asciz	"cpu_switch: sched_qs empty with non-zero sched_whichqs!\n"
    814  1.41       scw #endif
    815  1.41       scw 
    816   1.7     chris /*
    817  1.29   thorpej  * cpu_switchto(struct lwp *current, struct lwp *next)
    818  1.29   thorpej  * Switch to the specified next LWP
    819  1.29   thorpej  * Arguments:
    820  1.29   thorpej  *
    821  1.29   thorpej  *	r0	'struct lwp *' of the current LWP
    822  1.29   thorpej  *	r1	'struct lwp *' of the LWP to switch to
    823  1.29   thorpej  */
    824  1.29   thorpej ENTRY(cpu_switchto)
    825  1.29   thorpej 	stmfd	sp!, {r4-r7, lr}
    826  1.29   thorpej 
    827  1.38       scw 	mov	r6, r1		/* save new lwp */
    828  1.29   thorpej 
    829  1.29   thorpej #if defined(LOCKDEBUG)
    830  1.38       scw 	mov	r5, r0		/* save old lwp */
    831  1.29   thorpej 	bl	_C_LABEL(sched_unlock_idle)
    832  1.38       scw 	mov	r1, r5
    833  1.38       scw #else
    834  1.38       scw 	mov	r1, r0
    835  1.29   thorpej #endif
    836  1.29   thorpej 
    837  1.29   thorpej 	IRQdisable
    838  1.29   thorpej 
    839  1.29   thorpej 	/*
    840  1.29   thorpej 	 * Okay, set up registers the way cpu_switch() wants them,
    841  1.29   thorpej 	 * and jump into the middle of it (where we bring up the
    842  1.29   thorpej 	 * new process).
    843  1.38       scw 	 *
    844  1.38       scw 	 * r1 = old lwp (r6 = new lwp)
    845  1.29   thorpej 	 */
    846  1.29   thorpej 	b	.Lswitch_resume
    847  1.29   thorpej 
    848  1.29   thorpej /*
    849  1.29   thorpej  * void switch_exit(struct lwp *l, struct lwp *l0, void (*exit)(struct lwp *));
    850  1.29   thorpej  * Switch to lwp0's saved context and deallocate the address space and kernel
    851  1.29   thorpej  * stack for l.  Then jump into cpu_switch(), as if we were in lwp0 all along.
    852   1.7     chris  */
    853   1.1     chris 
    854  1.34  kristerw /* LINTSTUB: Func: void switch_exit(struct lwp *l, struct lwp *l0, void (*func)(struct lwp *)) */
    855   1.1     chris ENTRY(switch_exit)
    856   1.1     chris 	/*
    857  1.29   thorpej 	 * The process is going away, so we can use callee-saved
    858  1.29   thorpej 	 * registers here without having to save them.
    859   1.1     chris 	 */
    860   1.1     chris 
    861  1.29   thorpej 	mov	r4, r0
    862  1.29   thorpej 	ldr	r0, .Lcurlwp
    863  1.29   thorpej 
    864  1.29   thorpej 	mov	r5, r1
    865  1.29   thorpej 	ldr	r1, .Lblock_userspace_access
    866   1.1     chris 
    867  1.29   thorpej 	mov	r6, r2
    868  1.29   thorpej 
    869  1.29   thorpej 	/*
    870  1.29   thorpej 	 * r4 = lwp
    871  1.29   thorpej 	 * r5 = lwp0
    872  1.29   thorpej 	 * r6 = exit func
    873  1.29   thorpej 	 */
    874  1.29   thorpej 
    875  1.29   thorpej 	mov	r2, #0x00000000		/* curlwp = NULL */
    876   1.1     chris 	str	r2, [r0]
    877   1.1     chris 
    878  1.30       scw 	/*
    879  1.30       scw 	 * We're about to clear both the cache and the TLB.
    880  1.30       scw 	 * Make sure to zap the 'last cache state' pointer since the
    881  1.30       scw 	 * pmap might be about to go away. Also ensure the outgoing
    882  1.30       scw 	 * VM space's cache state is marked as NOT resident in the
    883  1.30       scw 	 * cache, and that lwp0's cache state IS resident.
    884  1.30       scw 	 */
    885  1.30       scw 	ldr	r7, [r4, #(L_ADDR)]		/* r7 = old lwp's PCB */
    886  1.30       scw 	ldr	r0, .Llast_cache_state_ptr	/* Last userland cache state */
    887  1.30       scw 	ldr	r9, [r7, #(PCB_CSTATE)]		/* Fetch cache state pointer */
    888  1.30       scw 	ldr	r3, [r5, #(L_ADDR)]		/* r3 = lwp0's PCB */
    889  1.30       scw 	str	r2, [r0]			/* No previous cache state */
    890  1.30       scw 	str	r2, [r9, #(CS_ALL)]		/* Zap old lwp's cache state */
    891  1.30       scw 	ldr	r3, [r3, #(PCB_CSTATE)]		/* lwp0's cache state */
    892  1.30       scw 	mov	r2, #-1
    893  1.30       scw 	str	r2, [r3, #(CS_ALL)]		/* lwp0 is in da cache! */
    894  1.30       scw 
    895   1.9   thorpej 	/*
    896   1.9   thorpej 	 * Don't allow user space access between the purge and the switch.
    897   1.9   thorpej 	 */
    898   1.9   thorpej 	mov	r2, #0x00000001
    899  1.29   thorpej 	str	r2, [r1]
    900   1.1     chris 
    901  1.30       scw 	/* Switch to lwp0 context */
    902  1.30       scw 
    903  1.30       scw 	ldr	r9, .Lcpufuncs
    904  1.30       scw 	mov	lr, pc
    905  1.30       scw 	ldr	pc, [r9, #CF_IDCACHE_WBINV_ALL]
    906  1.30       scw 
    907  1.30       scw 	ldr	r0, [r7, #(PCB_PL1VEC)]
    908  1.30       scw 	ldr	r1, [r7, #(PCB_DACR)]
    909  1.30       scw 
    910  1.30       scw 	/*
    911  1.30       scw 	 * r0 = Pointer to L1 slot for vector_page (or NULL)
    912  1.30       scw 	 * r1 = lwp0's DACR
    913  1.30       scw 	 * r4 = lwp we're switching from
    914  1.30       scw 	 * r5 = lwp0
    915  1.30       scw 	 * r6 = exit func
    916  1.30       scw 	 * r7 = lwp0's PCB
    917  1.30       scw 	 * r9 = cpufuncs
    918  1.30       scw 	 */
    919  1.30       scw 
    920  1.30       scw 	IRQdisableALL
    921  1.30       scw 
    922  1.30       scw 	/*
    923  1.30       scw 	 * Ensure the vector table is accessible by fixing up lwp0's L1
    924  1.30       scw 	 */
    925  1.30       scw 	cmp	r0, #0			/* No need to fixup vector table? */
    926  1.30       scw 	ldrne	r3, [r0]		/* But if yes, fetch current value */
    927  1.30       scw 	ldrne	r2, [r7, #(PCB_L1VEC)]	/* Fetch new vector_page value */
    928  1.30       scw 	mcr	p15, 0, r1, c3, c0, 0	/* Update DACR for lwp0's context */
    929  1.30       scw 	cmpne	r3, r2			/* Stuffing the same value? */
    930  1.30       scw 	strne	r2, [r0]		/* Store if not. */
    931  1.30       scw 
    932  1.31   thorpej #ifdef PMAP_INCLUDE_PTE_SYNC
    933  1.30       scw 	/*
    934  1.30       scw 	 * Need to sync the cache to make sure that last store is
    935  1.30       scw 	 * visible to the MMU.
    936  1.30       scw 	 */
    937  1.30       scw 	movne	r1, #4
    938  1.30       scw 	movne	lr, pc
    939  1.30       scw 	ldrne	pc, [r9, #CF_DCACHE_WB_RANGE]
    940  1.33   thorpej #endif /* PMAP_INCLUDE_PTE_SYNC */
    941  1.30       scw 
    942  1.30       scw 	/*
    943  1.30       scw 	 * Note: We don't do the same optimisation as cpu_switch() with
    944  1.30       scw 	 * respect to avoiding flushing the TLB if we're switching to
    945  1.30       scw 	 * the same L1 since this process' VM space may be about to go
    946  1.30       scw 	 * away, so we don't want *any* turds left in the TLB.
    947  1.30       scw 	 */
    948  1.30       scw 
    949  1.30       scw 	/* Switch the memory to the new process */
    950  1.30       scw 	ldr	r0, [r7, #(PCB_PAGEDIR)]
    951  1.30       scw 	mov	lr, pc
    952  1.30       scw 	ldr	pc, [r9, #CF_CONTEXT_SWITCH]
    953  1.30       scw 
    954  1.30       scw 	ldr	r0, .Lcurpcb
    955  1.30       scw 
    956  1.30       scw 	/* Restore all the save registers */
    957  1.37       scw #ifndef __XSCALE__
    958  1.30       scw 	add	r1, r7, #PCB_R8
    959  1.30       scw 	ldmia	r1, {r8-r13}
    960  1.37       scw #else
    961  1.37       scw 	ldr	r8, [r7, #(PCB_R8)]
    962  1.37       scw 	ldr	r9, [r7, #(PCB_R9)]
    963  1.37       scw 	ldr	r10, [r7, #(PCB_R10)]
    964  1.37       scw 	ldr	r11, [r7, #(PCB_R11)]
    965  1.37       scw 	ldr	r12, [r7, #(PCB_R12)]
    966  1.37       scw 	ldr	r13, [r7, #(PCB_SP)]
    967  1.37       scw #endif
    968  1.30       scw 	str	r7, [r0]	/* curpcb = lwp0's PCB */
    969  1.30       scw 
    970  1.30       scw 	IRQenableALL
    971   1.1     chris 
    972   1.1     chris 	/*
    973   1.1     chris 	 * Schedule the vmspace and stack to be freed.
    974   1.1     chris 	 */
    975  1.29   thorpej 	mov	r0, r4			/* {lwp_}exit2(l) */
    976  1.29   thorpej 	mov	lr, pc
    977  1.29   thorpej 	mov	pc, r6
    978  1.41       scw 
    979  1.41       scw #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
    980  1.41       scw 	bl	_C_LABEL(sched_lock_idle)
    981  1.41       scw #endif
    982   1.1     chris 
    983  1.17   thorpej 	ldr	r7, .Lwhichqs		/* r7 = &whichqs */
    984  1.29   thorpej 	mov	r5, #0x00000000		/* r5 = old lwp = NULL */
    985  1.14    briggs 	b	.Lswitch_search
    986   1.1     chris 
    987   1.7     chris /* LINTSTUB: Func: void savectx(struct pcb *pcb) */
    988   1.1     chris ENTRY(savectx)
    989   1.1     chris 	/*
    990   1.1     chris 	 * r0 = pcb
    991   1.1     chris 	 */
    992   1.1     chris 
    993   1.1     chris 	/* Push registers.*/
    994  1.28     bjh21 	stmfd	sp!, {r4-r7, lr}
    995   1.1     chris 
    996   1.1     chris 	/* Store all the registers in the process's pcb */
    997  1.37       scw #ifndef __XSCALE__
    998  1.28     bjh21 	add	r2, r0, #(PCB_R8)
    999  1.28     bjh21 	stmia	r2, {r8-r13}
   1000  1.37       scw #else
   1001  1.37       scw 	strd	r8, [r0, #(PCB_R8)]
   1002  1.37       scw 	strd	r10, [r0, #(PCB_R10)]
   1003  1.37       scw 	strd	r12, [r0, #(PCB_R12)]
   1004  1.37       scw #endif
   1005   1.1     chris 
   1006   1.1     chris 	/* Pull the regs of the stack */
   1007  1.28     bjh21 	ldmfd	sp!, {r4-r7, pc}
   1008   1.1     chris 
   1009   1.1     chris ENTRY(proc_trampoline)
   1010  1.38       scw #ifdef __NEWINTR
   1011  1.38       scw 	mov	r0, #(IPL_NONE)
   1012  1.38       scw 	bl	_C_LABEL(_spllower)
   1013  1.38       scw #else /* ! __NEWINTR */
   1014  1.38       scw 	mov	r0, #(_SPL_0)
   1015  1.38       scw 	bl	_C_LABEL(splx)
   1016  1.38       scw #endif /* __NEWINTR */
   1017  1.38       scw 
   1018  1.19     bjh21 #ifdef MULTIPROCESSOR
   1019  1.19     bjh21 	bl	_C_LABEL(proc_trampoline_mp)
   1020  1.19     bjh21 #endif
   1021   1.1     chris 	mov	r0, r5
   1022   1.1     chris 	mov	r1, sp
   1023  1.24     bjh21 	mov	lr, pc
   1024   1.1     chris 	mov	pc, r4
   1025   1.1     chris 
   1026   1.1     chris 	/* Kill irq's */
   1027  1.13   thorpej         mrs     r0, cpsr
   1028   1.1     chris         orr     r0, r0, #(I32_bit)
   1029  1.13   thorpej         msr     cpsr_c, r0
   1030   1.1     chris 
   1031   1.1     chris 	PULLFRAME
   1032   1.1     chris 
   1033   1.1     chris 	movs	pc, lr			/* Exit */
   1034   1.1     chris 
   1035  1.37       scw #ifndef __XSCALE__
   1036  1.17   thorpej 	.type .Lcpu_switch_ffs_table, _ASM_TYPE_OBJECT;
   1037  1.17   thorpej .Lcpu_switch_ffs_table:
   1038   1.1     chris /* same as ffs table but all nums are -1 from that */
   1039   1.1     chris /*               0   1   2   3   4   5   6   7           */
   1040   1.1     chris 	.byte	 0,  0,  1, 12,  2,  6,  0, 13  /*  0- 7 */
   1041   1.1     chris 	.byte	 3,  0,  7,  0,  0,  0,  0, 14  /*  8-15 */
   1042   1.1     chris 	.byte	10,  4,  0,  0,  8,  0,  0, 25  /* 16-23 */
   1043   1.1     chris 	.byte	 0,  0,  0,  0,  0, 21, 27, 15  /* 24-31 */
   1044   1.1     chris 	.byte	31, 11,  5,  0,  0,  0,  0,  0	/* 32-39 */
   1045   1.1     chris 	.byte	 9,  0,  0, 24,  0,  0, 20, 26  /* 40-47 */
   1046   1.1     chris 	.byte	30,  0,  0,  0,  0, 23,  0, 19  /* 48-55 */
   1047   1.1     chris 	.byte   29,  0, 22, 18, 28, 17, 16,  0  /* 56-63 */
   1048  1.37       scw #endif	/* !__XSCALE_ */
   1049