Home | History | Annotate | Line # | Download | only in powerpc
      1 /*	$NetBSD: locore_subr.S,v 1.68 2023/03/01 08:18:13 riastradh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Eduardo Horvath and Simon Burge for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*
     39  * Copyright (C) 1995, 1996 Wolfgang Solfrank.
     40  * Copyright (C) 1995, 1996 TooLs GmbH.
     41  * All rights reserved.
     42  *
     43  * Redistribution and use in source and binary forms, with or without
     44  * modification, are permitted provided that the following conditions
     45  * are met:
     46  * 1. Redistributions of source code must retain the above copyright
     47  *    notice, this list of conditions and the following disclaimer.
     48  * 2. Redistributions in binary form must reproduce the above copyright
     49  *    notice, this list of conditions and the following disclaimer in the
     50  *    documentation and/or other materials provided with the distribution.
     51  * 3. All advertising materials mentioning features or use of this software
     52  *    must display the following acknowledgement:
     53  *	This product includes software developed by TooLs GmbH.
     54  * 4. The name of TooLs GmbH may not be used to endorse or promote products
     55  *    derived from this software without specific prior written permission.
     56  *
     57  * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
     58  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     59  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     60  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     61  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
     62  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
     63  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
     64  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
     65  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
     66  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     67  */
     68 
     69 /*
     70  * NOTICE: This is not a standalone file.  to use it, #include it in
     71  * your port's locore.S, like so:
     72  *
     73  *	#include <powerpc/powerpc/locore_subr.S>
     74  */
     75 
     76 #ifdef _KERNEL_OPT
     77 #include "opt_ddb.h"
     78 #include "opt_modular.h"
     79 #include "opt_multiprocessor.h"
     80 #include "opt_ppcarch.h"
     81 #endif
     82 
     83 #ifdef DDB
     84 #define	CFRAME_LRSAVE(t0)					\
     85 	bl	90f;				/* get the LR */\
     86 90:	mflr	%r0;						\
     87 	streg	%r0,(CFRAME_LR)(t0)
     88 #else
     89 #define	CFRAME_LRSAVE(t0)	/* nothing */
     90 #endif
     91 
     92 /*
     93  * We don't save r30&r31 since they saved in the callframe.
     94  * We also want the "current" value of r30 instead of the saved value
     95  * since we need to return the LWP that ran before us, not ourselves.
     96  * if we save r30, when we got restored, that would be the r30 that
     97  * would have been saved when we were replaced which would be ourself.
     98  */
     99 #define	SWITCHFRAME_SAVE(t0)					\
    100 	streg	%r10,(SFRAME_USER_SR)(t0);	/* USER_SR */	\
    101 	streg	%r11,(SFRAME_CR)(t0);		/* CR */	\
    102 	streg	%r12,(SFRAME_R2)(t0);		/* R2 */	\
    103      /* streg	%r13,(SFRAME_R13)(t0); */	/* volatile */	\
    104 	streg	%r14,(SFRAME_R14)(t0);				\
    105 	streg	%r15,(SFRAME_R15)(t0);				\
    106 	streg	%r16,(SFRAME_R16)(t0);				\
    107 	streg	%r17,(SFRAME_R17)(t0);				\
    108 	streg	%r18,(SFRAME_R18)(t0);				\
    109 	streg	%r19,(SFRAME_R19)(t0);				\
    110 	streg	%r20,(SFRAME_R20)(t0);				\
    111 	streg	%r21,(SFRAME_R21)(t0);				\
    112 	streg	%r22,(SFRAME_R22)(t0);				\
    113 	streg	%r23,(SFRAME_R23)(t0);				\
    114 	streg	%r24,(SFRAME_R24)(t0);				\
    115 	streg	%r25,(SFRAME_R25)(t0);				\
    116 	streg	%r26,(SFRAME_R26)(t0);				\
    117 	streg	%r27,(SFRAME_R27)(t0);				\
    118 	streg	%r28,(SFRAME_R28)(t0);				\
    119 	streg	%r29,(SFRAME_R29)(t0)
    120 
    121 #define	SWITCHFRAME_RESTORE(t0)					\
    122 	ldreg	%r10,(SFRAME_USER_SR)(t0);	/* USER_SR */	\
    123 	ldreg	%r11,(SFRAME_CR)(t0);		/* CR */	\
    124 	ldreg	%r12,(SFRAME_R2)(t0);		/* R2 */	\
    125      /* ldreg	%r13,(SFRAME_R13)(t0); */	/* volatile */	\
    126 	ldreg	%r14,(SFRAME_R14)(t0);				\
    127 	ldreg	%r15,(SFRAME_R15)(t0);				\
    128 	ldreg	%r16,(SFRAME_R16)(t0);				\
    129 	ldreg	%r17,(SFRAME_R17)(t0);				\
    130 	ldreg	%r18,(SFRAME_R18)(t0);				\
    131 	ldreg	%r19,(SFRAME_R19)(t0);				\
    132 	ldreg	%r20,(SFRAME_R20)(t0);				\
    133 	ldreg	%r21,(SFRAME_R21)(t0);				\
    134 	ldreg	%r22,(SFRAME_R22)(t0);				\
    135 	ldreg	%r23,(SFRAME_R23)(t0);				\
    136 	ldreg	%r24,(SFRAME_R24)(t0);				\
    137 	ldreg	%r25,(SFRAME_R25)(t0);				\
    138 	ldreg	%r26,(SFRAME_R26)(t0);				\
    139 	ldreg	%r27,(SFRAME_R27)(t0);				\
    140 	ldreg	%r28,(SFRAME_R28)(t0);				\
    141 	ldreg	%r29,(SFRAME_R29)(t0)
    142 
    143 	.data
    144 GLOBAL(powersave)
    145 	.long	-1
    146 
    147 #ifdef MODULAR
    148 	.global	__USRSTACK
    149 	.equ	__USRSTACK, USRSTACK
    150 	.global	__CPU_MAXNUM
    151 	.equ	__CPU_MAXNUM, CPU_MAXNUM
    152 #endif
    153 
    154 	.text
    155 	.align 2
    156 /*
    157  * struct lwp *
    158  * cpu_switchto(struct lwp *current, struct lwp *new)
    159  * switch to the indicated new LWP.
    160  * 	r3 - current LWP
    161  * 	r4 - LWP to switch to
    162  *	scheduler lock held
    163  *	SPL is IPL_SCHED.
    164  */
    165 ENTRY(cpu_switchto)
    166 	mflr	%r0			/* save lr */
    167 	streg	%r0,CFRAME_LR(%r1)
    168 	stptru	%r1,-CALLFRAMELEN(%r1)
    169 	streg	%r31,CFRAME_R31(%r1)
    170 	streg	%r30,CFRAME_R30(%r1)
    171 	mr	%r30,%r3		/* r30 = curlwp */
    172 	mr	%r31,%r4		/* r31 = newlwp */
    173 
    174 #ifdef PPC_BOOKE
    175 	mfmsr	%r0
    176 	andis.	%r0,%r0,PSL_CE@h
    177 	tweqi	%r0,0
    178 #endif
    179 
    180 #if defined (PPC_OEA) || defined (PPC_OEA64_BRIDGE)
    181 	mfsr	%r10,USER_SR		/* save USER_SR for copyin/copyout */
    182 #else
    183 	li	%r10,0			/* USER_SR not needed */
    184 #endif
    185 	mfcr	%r11			/* save cr */
    186 	mr	%r12,%r2		/* save r2 */
    187 	CFRAME_LRSAVE(%r1)
    188 	stptru	%r1,-SFRAMELEN(%r1)	/* still running on old stack */
    189 	SWITCHFRAME_SAVE(%r1)		/* save USER_SR, CR, R2, non-volatile */
    190 	ldptr	%r4,L_PCB(%r30)		/* put PCB addr in r4 */
    191 	streg	%r1,PCB_SP(%r4)		/* store old lwp's SP */
    192 #if defined(PPC_BOOKE)
    193 	mfspr	%r9,SPR_USPRG0
    194 	streg	%r9,PCB_USPRG0(%r4)	/* save in PCB, not switchframe. */
    195 #endif
    196 
    197 /* Lock the scheduler. */
    198 #if defined(PPC_IBM4XX) || defined(PPC_BOOKE)
    199 	wrteei	0			/* disable interrupts while
    200 					   manipulating runque */
    201 #else /* PPC_OEA */
    202 	mfmsr	%r3
    203 	andi.	%r3,%r3,~PSL_EE@l	/* disable interrupts while
    204 					   manipulating runque */
    205 	mtmsr	%r3
    206 	isync
    207 #endif
    208 
    209 	/*
    210 	 * r31 = lwp now running on this cpu
    211 	 * r30 = previous lwp (maybe be NULL)
    212 	 * scheduler lock is held.
    213 	 * spl is IPL_SCHED.
    214 	 * MSR[EE] == 0 (interrupts are off)
    215 	 */
    216 
    217 	GET_CPUINFO(%r7)
    218 
    219 	/*
    220 	 * Issue barriers to coordinate mutex_exit on this CPU with
    221 	 * mutex_vector_enter on another CPU.
    222 	 *
    223 	 * 1. Any prior mutex_exit by oldlwp must be visible to other
    224 	 *    CPUs before we set ci_curlwp := newlwp on this one,
    225 	 *    requiring a store-before-store barrier.
    226 	 *
    227 	 * 2. ci_curlwp := newlwp must be visible on all other CPUs
    228 	 *    before any subsequent mutex_exit by newlwp can even test
    229 	 *    whether there might be waiters, requiring a
    230 	 *    store-before-load barrier.
    231 	 *
    232 	 * See kern_mutex.c for details -- this is necessary for
    233 	 * adaptive mutexes to detect whether the lwp is on the CPU in
    234 	 * order to safely block without requiring atomic r/m/w in
    235 	 * mutex_exit.
    236 	 */
    237 #ifdef MULTIPROCESSOR
    238 	sync	/* store-before-store XXX use eieio if available -- cheaper */
    239 #endif
    240 	stptr	%r31,CI_CURLWP(%r7)
    241 #ifdef MULTIPROCESSOR
    242 	sync	/* store-before-load */
    243 #endif
    244 	mr	%r13,%r31
    245 #ifdef PPC_BOOKE
    246 	mtsprg2	%r31			/* save curlwp in sprg2 */
    247 #endif
    248 #ifdef MULTIPROCESSOR
    249 	stptr	%r7,L_CPU(%r31)		/* update cpu pointer */
    250 #endif
    251 	ldptr	%r4,L_PCB(%r31)		/* put PCB addr in r4 */
    252 	stptr	%r4,CI_CURPCB(%r7)	/* using a new pcb */
    253 	ldptr	%r3,PCB_PM(%r4)
    254 	stptr	%r3,CI_CURPM(%r7)	/* and maybe a new pmap */
    255 
    256 	/*
    257 	 * Now restore the register state.
    258 	 */
    259 	ldreg	%r1,PCB_SP(%r4)		/* get new lwp's SP */
    260 	SWITCHFRAME_RESTORE(%r1)	/* get non-volatile, CR, R2, USER_SR */
    261 #if defined(PPC_BOOKE)
    262 	ldreg	%r9,PCB_USPRG0(%r4)
    263 	mtspr	SPR_USPRG0,%r9
    264 #endif
    265 	ldreg	%r1,0(%r1)		/* get saved SP */
    266 	mr	%r2,%r12		/* get saved r2 */
    267 	mtcr	%r11			/* get saved cr */
    268 #if defined (PPC_OEA) || defined (PPC_OEA64_BRIDGE)
    269 	mtsr	USER_SR,%r10		/* get saved USER_SR */
    270 #endif
    271 	isync
    272 
    273 #if defined(PPC_IBM4XX) || defined(PPC_BOOKE)
    274 	wrteei	1			/* interrupts are okay again */
    275 #else /* PPC_OEA */
    276 	mfmsr	%r4
    277 	ori	%r4,%r4,PSL_EE@l	/* interrupts are okay again */
    278 	mtmsr	%r4
    279 #endif
    280 
    281 #if defined(PPC_IBM4XX)
    282 0:
    283 	ldreg	%r3,CI_CURPM(%r7)	/* Do we need a context? */
    284 	ldreg	%r4,PM_CTX(%r3)
    285 	cmpwi	%r4,0
    286 	bne	1f
    287 	bl	_C_LABEL(ctx_alloc)
    288 	GET_CPUINFO(%r7)
    289 	b	0b			/* reload */
    290 1:
    291 #endif
    292 
    293 	/*
    294 	 * Move back old-lwp and new-lwp to r3 and r4.  We need to return
    295 	 * r3.  However, lwp_startup needs r4 and we return to fork_trampoline
    296 	 * will directly invoke lwp_startup.  So we "waste" an instruction by
    297 	 * always doing it here.
    298 	 */
    299 	mr	%r3,%r30
    300 	mr	%r4,%r31
    301 
    302 /*
    303  * Note that callframe linkages are setup in cpu_lwp_fork().
    304  */
    305 	ldreg	%r31,CFRAME_R31(%r1)	/* restore saved registers */
    306 	ldreg	%r30,CFRAME_R30(%r1)
    307 	IBM405_ERRATA77_DCBT(0,%r1)
    308 	stwcx.	%r1,0,%r1		/* clear reservation */
    309 #if 1
    310 	addi	%r1,%r1,CALLFRAMELEN
    311 #else
    312 	ldreg	%r1,CFRAME_SP(%r1)	/* pop stack frame */
    313 #endif
    314 	ldreg	%r0,CFRAME_LR(%r1)
    315 	mtlr	%r0
    316 	blr				/* CPUINIT needs a raw blr */
    317 
    318 ENTRY_NOPROFILE(emptyidlespin)
    319 #ifdef DIAGNOSTIC
    320 	GET_CPUINFO(%r3)
    321 	lbz	%r4,CI_CPL(%r3)
    322 	twnei	%r4,IPL_NONE
    323 	mfmsr	%r5
    324 	andi.	%r5,%r5,PSL_EE@l
    325 	tweqi	%r5,0
    326 #endif
    327 	blr				/* CPUINIT needs a raw blr */
    328 
    329 #ifdef __HAVE_FAST_SOFTINTS
    330 	/*
    331 	 * This gets executed if the softint thread blocks.
    332 	 * cpu_switchto has restored r30/r31 for us.
    333 	 */
    334 _ENTRY(softint_cleanup)
    335 	GET_CPUINFO(%r7)
    336 	ldint	%r5, CI_MTX_COUNT(%r7)
    337 	addi	%r5, %r5, 1
    338 	stint	%r5, CI_MTX_COUNT(%r7)
    339 	ldreg	%r0, CFRAME_R31(%r1)	/* get saved MSR */
    340 #if defined(PPC_IBM4XX) || defined(PPC_BOOKE)
    341 	wrtee	%r0			/* restore EE */
    342 #endif
    343 #if defined(PPC_OEA) || defined(PPC_OEA64_BRIDGE) || defined(PPC_OEA64)
    344 	mtmsr	%r0
    345 	isync
    346 #endif
    347 	IBM405_ERRATA77_DCBT(0,%r1)
    348 	stwcx.	%r1,0,%r1		/* clear reservation */
    349 	addi	%r1, %r1, CALLFRAMELEN
    350 	ldreg	%r0, CFRAME_LR(%r1)
    351 	mtlr	%r0
    352 #if IPL_SCHED != IPL_HIGH
    353 	li	%r3, IPL_HIGH
    354 	b	_C_LABEL(splraise)
    355 #else
    356 	blr
    357 #endif /* IPL SCHED != IPL_HIGH */
    358 
    359 _ENTRY(softint_fast_dispatch)
    360 	/*
    361 	 * Our call frame which softint will grab LR from.
    362 	 */
    363 	mflr	%r0
    364 	streg	%r0, CFRAME_LR(%r1)
    365 	stptru	%r1, -CALLFRAMELEN(%r1)
    366 	mfmsr	%r0
    367 	streg	%r0, CFRAME_R31(%r1)
    368 
    369 	/*
    370 	 * We need a 2nd callframe from which cpu_switchto will consume
    371 	 * if the softint thread blocks.
    372 	 */
    373 	lis	%r8, _C_LABEL(softint_cleanup)@ha
    374 	addi	%r8, %r8, _C_LABEL(softint_cleanup)@l
    375 	streg	%r8, CFRAME_LR(%r1)
    376 	stptru	%r1, -CALLFRAMELEN(%r1)
    377 	streg	%r30, CFRAME_R30(%r1)
    378 	streg	%r31, CFRAME_R31(%r1)
    379 
    380 	GET_CPUINFO(%r7)
    381 	mr	%r30, %r13		/* curlwp is now in r13 */
    382 
    383 #if defined (PPC_OEA) || defined (PPC_OEA64_BRIDGE)
    384 	mfsr	%r10,USER_SR		/* save USER_SR for copyin/copyout */
    385 #else
    386 	li	%r10,0			/* USER_SR not needed */
    387 #endif
    388 	mfcr	%r11			/* save cr */
    389 	mr	%r12,%r2		/* save r2 */
    390 	CFRAME_LRSAVE(%r1)
    391 	stptru	%r1, -SFRAMELEN(%r1)	/* still running on old stack */
    392 	SWITCHFRAME_SAVE(%r1)		/* save USER_SR, CR, R2, non-volatile */
    393 	mr	%r31, %r1
    394 	ldptr	%r5, L_PCB(%r30)	/* put PCB addr in r5 */
    395 	streg	%r1, PCB_SP(%r5)	/* store old lwp's SP */
    396 #if defined(PPC_BOOKE)
    397 	mfspr	%r9,SPR_USPRG0
    398 	streg	%r9,PCB_USPRG0(%r5)	/* save in PCB, not switchframe. */
    399 #endif
    400 
    401 	mfmsr	%r29
    402 #if defined(PPC_IBM4XX) || defined(PPC_BOOKE)
    403 	wrteei	0			/* disable interrupts while
    404 					   manipulating runque */
    405 #else /* PPC_OEA */
    406 	andi.	%r28,%r29,~PSL_EE@l	/* disable interrupts while
    407 					   manipulating runque */
    408 	mtmsr	%r28
    409 	isync
    410 #endif
    411 
    412 	/*
    413 	 * We don't need a ctx for ibm4xx since we are switching
    414 	 * to a kernel thread
    415 	 */
    416 
    417 #ifdef MULTIPROCESSOR
    418 	sync	/* XXX eieio */		/* for mutex_enter; see cpu_switchto */
    419 #endif
    420 	stptr	%r3, CI_CURLWP(%r7)
    421 	/*
    422 	 * No need for barrier after ci->ci_curlwp = softlwp -- when we
    423 	 * enter a softint lwp, it can't be holding any mutexes, so it
    424 	 * can't release any until after it has acquired them, so we
    425 	 * need not participate in the protocol with mutex_vector_enter
    426 	 * barriers here.
    427 	 */
    428 	mr	%r13, %r3
    429 #ifdef PPC_BOOKE
    430 	mtsprg2	%r3
    431 #endif
    432 	ldptr	%r5, L_PCB(%r3)
    433 	stptr	%r5, CI_CURPCB(%r7)
    434 	ldptr	%r0, PCB_PM(%r5)
    435 	stptr	%r0, CI_CURPM(%r7)	/* and maybe a new pmap */
    436 	addi	%r1, %r5, USPACE - FRAMELEN - CALLFRAMELEN
    437 
    438 #if defined(PPC_IBM4XX) || defined(PPC_BOOKE)
    439 	wrtee	%r29			/* interrupts are okay again */
    440 #else /* PPC_OEA */
    441 	mtmsr	%r29
    442 #endif
    443 
    444 	/*
    445 	 * %r3 = softint thread
    446 	 * %r4 = ipl
    447 	 */
    448 	mr	%r3, %r30		/* pass former curlwp */
    449 	bl	_C_LABEL(softint_dispatch)
    450 
    451 #if defined(PPC_IBM4XX) || defined(PPC_BOOKE)
    452 	wrteei	0			/* disable interrupts while
    453 					   manipulating runque */
    454 #else /* PPC_OEA */
    455 	mtmsr	%r28			/* disable interrupts while
    456 					   manipulating runque */
    457 	isync
    458 #endif
    459 
    460 	GET_CPUINFO(%r7)
    461 #ifdef MULTIPROCESSOR
    462 	sync	/* XXX eieio */		/* for mutex_enter; see cpu_switchto */
    463 #endif
    464 	stptr	%r30, CI_CURLWP(%r7)
    465 #ifdef MULTIPROCESSOR
    466 	sync				/* for mutex_enter; see cpu_switchto */
    467 #endif
    468 	mr	%r13, %r30
    469 #ifdef PPC_BOOKE
    470 	mtsprg2	%r30
    471 #endif
    472 	ldptr	%r5, L_PCB(%r30)
    473 	stptr	%r5, CI_CURPCB(%r7)
    474 	ldptr	%r0, PCB_PM(%r5)
    475 	stptr	%r0, CI_CURPM(%r7)	/* and maybe a new pmap */
    476 	mr	%r1, %r31		/* get saved SP */
    477 
    478 #if defined(PPC_IBM4XX) || defined(PPC_BOOKE)
    479 	wrtee	%r29			/* interrupts are okay again */
    480 #else /* PPC_OEA */
    481 	mtmsr	%r29
    482 #endif
    483 
    484 	/*
    485 	 * Since softint_dispatch returned to us, we know that the callee-saved
    486 	 * registers are intact and thus don't have to restored (except for
    487 	 * r28/r29/r30/r31 which we used).
    488 	 * Since softint's can't copyin/copyout USER_SR couldn't have been
    489 	 * modified either and CR/R2 could not have been changed as well.
    490 	 * We can just eat the switchframe and continue.
    491 	 */
    492 #if 0
    493 #if defined (PPC_OEA) || defined (PPC_OEA64_BRIDGE)
    494 	ldreg	%r10,SFRAME_USER_SR(%r1) /* USER_SR */
    495 	mtsr	USER_SR,%r10		/* restore USER_SR for copyin/copyout */
    496 #endif
    497 	ldreg	%r28,SFRAME_R28(%r1)	/* R28 */
    498 	ldreg	%r29,SFRAME_R29(%r1)	/* R29 */
    499 	ldreg	%r11,SFRAME_CR(%r1)	/* CR */
    500 	mtcr	%r11
    501 	ldreg	%r2,SFRAME_R2(%r1)	/* R2 */
    502 #endif
    503 #if 0
    504 	addi	%r1,%r1,SFRAMELEN	/* remove switch frame */
    505 
    506 	ldreg	%r31,CFRAME_R31(%r1)	/* restore saved registers */
    507 	ldreg	%r30,CFRAME_R30(%r1)	/* from switchto callframe */
    508 	addi	%r1,%r1,CALLFRAMELEN	/* remove switchto call frame */
    509 	addi	%r1,%r1,CALLFRAMELEN	/* remove our call frame */
    510 #else
    511 	ldreg	%r28,SFRAME_R28(%r1)	/* R28 */
    512 	ldreg	%r29,SFRAME_R29(%r1)	/* R29 */
    513 	ldreg	%r31,SFRAMELEN+CFRAME_R31(%r1)	/* restore saved registers */
    514 	ldreg	%r30,SFRAMELEN+CFRAME_R30(%r1)
    515 	IBM405_ERRATA77_DCBT(0,%r1)
    516 	stwcx.	%r1,0,%r1		/* clear reservation */
    517 	addi	%r1,%r1,SFRAMELEN+2*CALLFRAMELEN /* remove switch & callframes */
    518 #endif
    519 	ldreg	%r0,CFRAME_LR(%r1)
    520 	mtlr	%r0
    521 	blr
    522 #endif /* __HAVE_FAST_SOFTINTS */
    523 
    524 /*
    525  * Child comes here at the end of a fork.
    526  * Return to userspace via the trap return path.
    527  */
    528 	.globl	_C_LABEL(cpu_lwp_bootstrap)
    529 _ENTRY(cpu_lwp_bootstrap)
    530 #if defined(MULTIPROCESSOR) && 0
    531 	mr	%r28,%r3
    532 	mr	%r29,%r4
    533 	bl	_C_LABEL(proc_trampoline_mp)
    534 	mr	%r4,%r29
    535 	mr	%r3,%r28
    536 #endif
    537 	/*
    538 	 * r3 (old lwp) and r4 (new lwp) are setup in cpu_switchto.
    539 	 */
    540 	bl	_C_LABEL(lwp_startup)
    541 
    542 	mtlr	%r31
    543 	mr	%r3,%r30
    544 	blrl				/* jump indirect to r31 */
    545 	lwz	%r31, FRAME_SRR1(%r1)	/* trapexit wants srr1 in r31 */
    546 #ifdef PPC_BOOKE
    547 	lis	%r30, 0xbeeffeed@h
    548 	ori	%r30, %r30, 0xbeeffeed@l
    549 	andis.	%r0,%r31,PSL_CE@h
    550 	tweqi	%r0,0
    551 #endif
    552 	li	%r4, 1			/* make sure userret gets called */
    553 	stint	%r4, L_MD_ASTPENDING(%r13)
    554 	b	trapexit
    555 
    556 #if defined(MULTIPROCESSOR) && (defined(PPC_OEA) || defined (PPC_OEA64_BRIDGE))
    557 ENTRY(cpu_spinup_trampoline)
    558 	li	%r0,0
    559 	mtmsr	%r0
    560 
    561 	lis	%r5,oeacpufeat@ha
    562 	lwz	%r5,oeacpufeat@l(%r5)
    563 	andi.	%r5,%r5,OEACPU_64_BRIDGE
    564 	beq	6f
    565 	sync
    566 	slbia
    567 	sync
    568 	isync
    569 	clrldi	%r0,%r0,32
    570 	mtmsrd	%r0
    571 6:
    572 	isync
    573 
    574 	lis	%r4,_C_LABEL(cpu_hatch_stack)@ha
    575 	lwz	%r1,_C_LABEL(cpu_hatch_stack)@l(%r4)
    576 
    577 	bl	_C_LABEL(cpu_hatch)
    578 	mr	%r1,%r3
    579 	b	_C_LABEL(idle_loop)
    580 
    581 ENTRY(cpu_spinstart)
    582 	li	%r0,0
    583 	mtmsr	%r0
    584 	lis	%r5,oeacpufeat@ha
    585 	lwz	%r5,oeacpufeat@l(%r5)
    586 	andi.	%r5,%r5,OEACPU_64_BRIDGE
    587 	beq	4f
    588 	sync
    589 	slbia
    590 	sync
    591 	isync
    592 	clrldi	%r0,%r0,32
    593 	mtmsrd	%r0
    594 	mtspr	SPR_ASR,%r0
    595 4:
    596 	lis	%r5,_C_LABEL(cpu_spinstart_ack)@ha
    597 	addi	%r5,%r5,_C_LABEL(cpu_spinstart_ack)@l
    598 	stw	%r3,0(%r5)
    599 	dcbf	0,%r5
    600 	lis	%r6,_C_LABEL(cpu_spinstart_cpunum)@ha
    601 5:
    602 	lwz	%r4,_C_LABEL(cpu_spinstart_cpunum)@l(%r6)
    603 	cmpw	%r4,%r3
    604 	bne	5b
    605 	lis	%r4,_C_LABEL(cpu_hatch_stack)@ha
    606 	lwz	%r1,_C_LABEL(cpu_hatch_stack)@l(%r4)
    607 	bla	_C_LABEL(cpu_hatch)
    608 	mr	%r1,%r3	/* move the return from cpu_hatch to the stackpointer*/
    609 	b	_C_LABEL(idle_loop)
    610 
    611 #endif /*MULTIPROCESSOR + OEA*/
    612 
    613 #if 0 /* XXX CPU configuration spaghetti */
    614 /*
    615  * int do_ucas_32(uint32_t *uptr, uint32_t old, uint32_t new, uint32_t *ret);
    616  */
    617 ENTRY(do_ucas_32)
    618 1:
    619 	lwarx	%r10,0,%r3
    620 	cmpw	%r10, %r4
    621 	bne	2f
    622 	IBM405_ERRATA77_DCBT(0,%r3)
    623 	stwcx.	%r5,0,%r3
    624 	bne	1b
    625 	mr	%r5,%r10
    626 2:
    627 	li	%r3,0
    628 	stw	%r10,0(%r6)
    629 	blr
    630 #endif
    631