Home | History | Annotate | Line # | Download | only in hppa
      1 /*	$NetBSD: locore.S,v 1.7 2025/04/03 17:49:49 skrll Exp $	*/
      2 /*	$OpenBSD: locore.S,v 1.158 2008/07/28 19:08:46 miod Exp $	*/
      3 
      4 /*
      5  * Copyright (c) 1998-2004 Michael Shalayeff
      6  * All rights reserved.
      7  *
      8  * Redistribution and use in source and binary forms, with or without
      9  * modification, are permitted provided that the following conditions
     10  * are met:
     11  * 1. Redistributions of source code must retain the above copyright
     12  *    notice, this list of conditions and the following disclaimer.
     13  * 2. Redistributions in binary form must reproduce the above copyright
     14  *    notice, this list of conditions and the following disclaimer in the
     15  *    documentation and/or other materials provided with the distribution.
     16  *
     17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     20  * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT,
     21  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
     22  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     23  * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
     25  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
     26  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
     27  * THE POSSIBILITY OF SUCH DAMAGE.
     28  *
     29  * Portitions of this file are derived from other sources, see
     30  * the copyrights and acknowledgements below.
     31  */
     32 /*
     33  * Copyright (c) 1990,1991,1992,1994 The University of Utah and
     34  * the Computer Systems Laboratory (CSL).  All rights reserved.
     35  *
     36  * THE UNIVERSITY OF UTAH AND CSL PROVIDE THIS SOFTWARE IN ITS "AS IS"
     37  * CONDITION, AND DISCLAIM ANY LIABILITY OF ANY KIND FOR ANY DAMAGES
     38  * WHATSOEVER RESULTING FROM ITS USE.
     39  *
     40  * CSL requests users of this software to return to csl-dist (at) cs.utah.edu any
     41  * improvements that they make and grant CSL redistribution rights.
     42  *
     43  *	Utah $Hdr: locore.s 1.62 94/12/15$
     44  */
     45 /*
     46  *  (c) Copyright 1988 HEWLETT-PACKARD COMPANY
     47  *
     48  *  To anyone who acknowledges that this file is provided "AS IS"
     49  *  without any express or implied warranty:
     50  *      permission to use, copy, modify, and distribute this file
     51  *  for any purpose is hereby granted without fee, provided that
     52  *  the above copyright notice and this notice appears in all
     53  *  copies, and that the name of Hewlett-Packard Company not be
     54  *  used in advertising or publicity pertaining to distribution
     55  *  of the software without specific, written prior permission.
     56  *  Hewlett-Packard Company makes no representations about the
     57  *  suitability of this software for any purpose.
     58  */
     59 
     60 #include "opt_multiprocessor.h"
     61 #include "opt_cputype.h"
     62 #include "opt_ddb.h"
     63 #include "opt_kgdb.h"
     64 
     65 #include <sys/errno.h>
     66 #include <machine/param.h>
     67 #include <machine/asm.h>
     68 #include <machine/psl.h>
     69 #include <machine/trap.h>
     70 #include <machine/iomod.h>
     71 #include <machine/pdc.h>
     72 #include <machine/reg.h>
     73 #include <machine/cpu.h>
     74 
     75 #include "assym.h"
     76 
     77 /* Some aliases for the macros in assym.h. */
     78 #define	TRAPFRAME_SIZEOF	trapframe_SIZEOF
     79 
     80 /*
     81  * Very crude debugging macros that write to com1.
     82  */
     83 
     84 #if 1
     85 #define	COM1_TX_REG	(0xffd00000 + 0x5000 + 0x800)
     86 #else
     87 #define	COM1_TX_REG	(0xf0823000 + 0x800)
     88 #endif
     89 #define _DEBUG_PUTCHAR(reg1, reg2)		! \
     90 	ldil	L%COM1_TX_REG, %reg1		! \
     91 	stb	%reg2, R%COM1_TX_REG(%sr1, %reg1) ! \
     92 	ldil	L%10000000, %reg1		! \
     93 	ldi	1, %reg2			! \
     94 	comb,<>,n	%reg1, %r0, -8		! \
     95 	sub	%reg1, %reg2, %reg1
     96 #define DEBUG_PUTCHAR(reg1, reg2, ch)		! \
     97 	ldi	ch, %reg2			! \
     98 	_DEBUG_PUTCHAR(reg1,reg2)
     99 #define _DEBUG_DUMPN(reg1, reg2, reg3, p)	! \
    100 	extru	%reg3, p, 4, %reg2		! \
    101 	comib,>>,n	10, %reg2, 0		! \
    102 	addi	39, %reg2, %reg2		! \
    103 	addi	48, %reg2, %reg2		! \
    104 	_DEBUG_PUTCHAR(reg1,reg2)
    105 #define DEBUG_DUMP32(reg1, reg2, reg3)		! \
    106 	DEBUG_PUTCHAR(reg1,reg2,58)		! \
    107 	_DEBUG_DUMPN(reg1, reg2, reg3, 3)	! \
    108 	_DEBUG_DUMPN(reg1, reg2, reg3, 7)	! \
    109 	_DEBUG_DUMPN(reg1, reg2, reg3, 11)	! \
    110 	_DEBUG_DUMPN(reg1, reg2, reg3, 15)	! \
    111 	_DEBUG_DUMPN(reg1, reg2, reg3, 19)	! \
    112 	_DEBUG_DUMPN(reg1, reg2, reg3, 23)	! \
    113 	_DEBUG_DUMPN(reg1, reg2, reg3, 27)	! \
    114 	_DEBUG_DUMPN(reg1, reg2, reg3, 31)
    115 
    116 /*
    117  * hv-specific instructions
    118  */
    119 #define	DR_PAGE0	diag (0x70 << 5)
    120 #define	DR_PAGE1	diag (0x72 << 5)
    121 
    122 #define	MTCPU_T(x,t)	diag ((t) << 21) | ((x) << 16) | (0xb0 << 5)
    123 #define	MFCPU_T(r,x)	diag ((r) << 21) | ((x) << 16) | (0xd0 << 5)
    124 #define	MTCPU_C(x,t)	diag ((t) << 21) | ((x) << 16) | (0x12 << 5)
    125 #define	MFCPU_C(r,x)	diag ((r) << 21) | ((x) << 16) | (0x30 << 5)
    126 #define	MFCPU_U(r,x)	diag ((r) << 21) | ((x))       | (0x45 << 5)
    127 #define	MTCPU_U(x,r)	diag ((r) << 21) | ((x) << 16) | (0xc2 << 5)
    128 
    129 	.import	$global$, data
    130 	.import	boothowto, data
    131 	.import	bootdev, data
    132 	.import	esym, data
    133 	.import virtual_avail, data
    134 	.import	lwp0, data
    135 	.import	panic, code
    136 	.import fpu_csw, data
    137 	.import hppa_interrupt_register, data
    138 
    139 	BSS(pdc_stack, 4)	/* temp stack for PDC call */
    140 	BSS(kernelmapped, 4)	/* set when kernel is mapped */
    141 	BSS(hppa_vtop, 4)	/* a vtop translation table addr (pa=va) */
    142 
    143 	.text
    144 	.import kernel_setup, entry
    145 
    146 /*
    147  * This is the starting location for the kernel
    148  */
    149 ENTRY_NOPROFILE(start,0)
    150 /*
    151  *	bootapiver <= 2
    152  *		start(pdc, boothowto, bootdev, esym, bootapiver, argv, argc)
    153  *
    154  *	bootapiver == 3
    155  *		start(pdc, boothowto, bootdev, esym, bootapiver, bootinfo)
    156  *
    157  *	bootapiver == start
    158  *		qemu/seabios-hppa
    159  *
    160  *	pdc - PDC entry point
    161  *	boothowto - boot flags (see "reboot.h")
    162  *	bootdev - boot device (index into bdevsw)
    163  *	esym - end of symbol table (or &end if not present)
    164  *	bootapiver - /boot API version
    165  *	argv - options block passed from /boot
    166  *	argc - the length of the block
    167  *	bootinfo - pointer to a struct bootinfo.
    168  */
    169 
    170 	ldil	L%start, %r1
    171 	ldo	R%start(%r1), %r1
    172 	ldw	HPPA_FRAME_ARG(4)(%sp), %t1
    173 	copy	%r0, %r5
    174 	comb,=	%r1, %t1, .Lseabios
    175 	 nop
    176 
    177 	/*
    178 	 * save the boothowto, bootdev and esym arguments
    179 	 * don't save pdc - it's unused before pdc_init which
    180 	 * gets it from PAGE0
    181 	 */
    182 	ldil	L%boothowto,%r1
    183 	stw	%arg1,R%boothowto(%r1)
    184 	ldil	L%bootdev,%r1
    185 	stw	%arg2,R%bootdev(%r1)
    186 
    187 	/* bootinfo struct address for hppa_init, if bootapiver (%t1) is > 2 */
    188 	ldw	HPPA_FRAME_ARG(5)(%sp), %r5
    189 	comiclr,< 2, %t1, %r0
    190 	copy	%r0, %r5
    191 
    192 	comb,<>	%r0, %arg3, 1f
    193 	 nop
    194 
    195 .Lseabios:
    196 	ldil	L%end, %arg3
    197 	ldo	R%end(%arg3), %arg3
    198 
    199 1:
    200 	ldil	L%esym,%r1
    201 	stw	%arg3,R%esym(%r1)
    202 
    203 	/*
    204 	 * Put page aligned %arg3 into %t3. It is the start of available
    205 	 * memory.
    206 	 */
    207 	ldo	NBPG-1(%arg3), %t3
    208 	dep	%r0, 31, PGSHIFT, %t3
    209 
    210 	/* assuming size being page-aligned */
    211 #define STACK_ALLOC(n,s)		\
    212 	ldil	L%(n), %t1		! \
    213 	ldil	L%(s), %t2		! \
    214 	stw	%t3, R%(n)(%t1)		! \
    215 	add	%t3, %t2, %t3
    216 
    217 	STACK_ALLOC(pdc_stack, PDC_STACKSIZE)
    218 
    219 	/* zero fake trapframe and lwp0 u-area */
    220 	/* XXX - we should create a real trapframe for lwp0 */
    221 	copy	%t3, %t2
    222 	ldi	NBPG+TRAPFRAME_SIZEOF, %t1
    223 L$start_zero_tf:
    224 	stws,ma %r0, 4(%t2)
    225 	addib,>= -8, %t1, L$start_zero_tf
    226 	stws,ma %r0, 4(%t2)	/* XXX could use ,bc here, but gas is broken */
    227 
    228 	/*
    229 	 * kernel stack starts a page and a trapframe above uarea address.
    230 	 */
    231 	ldo	NBPG+TRAPFRAME_SIZEOF(%t3), %sp
    232 	mtctl	%t3, CR_FPPADDR
    233 
    234 	/* initialize the pcb */
    235 	stw	%r0, PCB_ONFAULT(%t3)
    236 	stw	%r0, PCB_SPACE(%t3)	/* XXX HPPA_SID_KERNEL == 0 */
    237 
    238 	/*
    239 	 * Setup various pointers.
    240 	 *
    241 	 * First free memory is %t3 plus normal U space. The last page of
    242 	 * USPACE is the redzone if DIAGNOSTIC (see param.h).
    243 	 */
    244 	ldil	L%USPACE, %r4
    245 	add	%t3, %r4, %r4
    246 
    247 	ldil	L%lwp0, %t2
    248 	stw	%t3, R%lwp0+L_PCB(%t2)		/* XXXuvm_lwp_getuarea */
    249 	ldo	NBPG(%t3), %t1
    250 	stw	%t1, R%lwp0+L_MD_REGS(%t2)
    251 
    252 	ldil	L%TFF_LAST, %t1
    253 	stw	%t1, TF_FLAGS-TRAPFRAME_SIZEOF(%sp)
    254 	stw	%t3, TF_CR30-TRAPFRAME_SIZEOF(%sp)
    255 
    256 	/*
    257 	 * disable all coprocessors
    258 	 */
    259 	mtctl	%r0, CR_CCR
    260 
    261 #ifdef MULTIPROCESSOR
    262 
    263 #define	PZ_MEM_RENDEZ		0x10
    264 #define	PZ_MEM_RENDEZ_HI	0x28
    265 
    266 	/* Setup SMP rendezvous address. */
    267 	ldil	L%hw_cpu_spinup_trampoline, %r1
    268 	ldo	R%hw_cpu_spinup_trampoline(%r1), %r1
    269 	stw	%r1, PZ_MEM_RENDEZ(%r0)
    270 	stw	%r0, PZ_MEM_RENDEZ_HI(%r0)
    271 #endif
    272 
    273 	/*
    274 	 * We need to set the Q bit so that we can take TLB misses after we
    275 	 * turn on virtual memory.
    276 	 */
    277 	copy	%sp, %arg0
    278 	ldil	L%qisnowon, %rp
    279 	ldo	R%qisnowon(%rp), %rp
    280 
    281 	b	kernel_setup
    282 	ldi	PSW_Q|PSW_I, %arg1
    283 
    284 qisnowon:
    285 	copy	%r4, %arg0
    286 	copy	%r5, %arg1
    287 	/*
    288 	 * call C routine hppa_init() to initialize VM
    289 	 */
    290 	.import hppa_init, code
    291 	CALL(hppa_init, %r1)
    292 
    293 	/*
    294 	 * Cannot change the queues or IPSW with the Q-bit on
    295 	 */
    296 	rsm	RESET_PSW, %r0
    297 	nop ! nop ! nop ! nop ! nop ! nop ! nop
    298 
    299 	/*
    300 	 * We need to do an rfi to get the C bit set
    301 	 */
    302 	mtctl	%r0, %pcsq
    303 	mtctl	%r0, %pcsq
    304 	ldil	L%virtual_mode, %t1
    305 	ldo	R%virtual_mode(%t1), %t1
    306 	mtctl	%t1, %pcoq
    307 	ldo	4(%t1), %t1
    308 	mtctl	%t1, %pcoq
    309 	GET_CURCPU(%t1)
    310 	ldw	CI_PSW(%t1), %t2
    311 	mtctl	%t2, %ipsw
    312 	rfi
    313 	nop
    314 	nop
    315 	nop
    316 	nop
    317 	nop
    318 	nop
    319 	nop
    320 
    321 virtual_mode:
    322 
    323 	ldil	L%kernelmapped, %t1
    324 	stw	%t1, R%kernelmapped(%t1)
    325 
    326 #ifdef DDB
    327 	.import	Debugger, code
    328 	/* have to call debugger from here, from virtual mode */
    329 	ldil	L%boothowto, %r1
    330 	ldw	R%boothowto(%r1), %r1
    331 	bb,>=	%r1, 25, L$noddb
    332 	nop
    333 
    334 	break	HPPA_BREAK_KERNEL, HPPA_BREAK_KGDB
    335 	nop
    336 L$noddb:
    337 #endif
    338 
    339 	.import main,code
    340 	CALL(main, %r1)
    341 	/* should never return... */
    342 	bv	(%rp)
    343 	nop
    344 EXIT(start)
    345 
    346 
    347 /*
    348  * void kernel_setup(register_t sp, register_t psw)
    349  */
    350 LEAF_ENTRY_NOPROFILE(kernel_setup)
    351 
    352 	/*
    353 	 * disable interrupts and turn off all bits in the psw so that
    354 	 * we start in a known state.
    355 	 */
    356 	rsm	RESET_PSW, %r0
    357 	nop ! nop ! nop ! nop ! nop ! nop
    358 
    359 	/*
    360 	 * go to virtual mode...
    361 	 * get things ready for the kernel to run in virtual mode
    362 	 */
    363 	ldi	HPPA_PID_KERNEL, %r1
    364 	mtctl	%r1, %pidr1
    365 	mtctl	%r1, %pidr2
    366 #if pbably_not_worth_it
    367 	mtctl	%r0, %pidr3
    368 	mtctl	%r0, %pidr4
    369 #endif
    370 	mtsp	%r0, %sr0
    371 	mtsp	%r0, %sr1
    372 	mtsp	%r0, %sr2
    373 	mtsp	%r0, %sr3
    374 	mtsp	%r0, %sr4
    375 	mtsp	%r0, %sr5
    376 	mtsp	%r0, %sr6
    377 	mtsp	%r0, %sr7
    378 
    379 	/*
    380 	 * to keep the spl() routines consistent we need to put the correct
    381 	 * spl level into eiem, and reset any pending interrupts
    382 	 */
    383 	ldi	-1, %r1
    384 	mtctl	%r0, %eiem		/* disable interrupts */
    385 	mtctl	%r1, %eirr
    386 
    387 	/*
    388 	 * load address of interrupt vector table
    389 	 */
    390 	ldil	L%ivaaddr, %t2
    391 	ldo	R%ivaaddr(%t2), %t2
    392 	mtctl	%t2, %iva
    393 
    394 	/*
    395 	 * set up the dp pointer so that we can do quick references off of it
    396 	 */
    397 	ldil	L%$global$, %dp
    398 	ldo	R%$global$(%dp), %dp
    399 
    400 	/*
    401 	 * Create a stack frame for us to call C with. Clear out the previous
    402 	 * sp marker to mark that this is the first frame on the stack.
    403 	 */
    404 	copy	%arg0, %sp
    405 	ldo	0(%arg0), %r3
    406 	stw,ma	%r0, HPPA_FRAME_SIZE(%sp)
    407 	stw	%r0, HPPA_FRAME_CRP(%sp)
    408 	stw	%r0, HPPA_FRAME_PSP(%sp)
    409 
    410 	/*
    411 	 * We need to set the Q bit so that we can take TLB misses after we
    412 	 * turn on virtual memory.
    413 	 */
    414 
    415 	mtctl	%r0, %pcsq
    416 	mtctl	%r0, %pcsq
    417 	mtctl	%rp, %pcoq
    418 	ldo	4(%rp), %rp
    419 	mtctl	%rp, %pcoq
    420 	mtctl	%arg1, %ipsw
    421 	rfi
    422 	nop
    423 	nop
    424 EXIT(kernel_setup)
    425 
    426 
    427 #ifdef MULTIPROCESSOR
    428 /*
    429  * Trampoline to spin up secondary processors.
    430  */
    431 LEAF_ENTRY_NOPROFILE(hw_cpu_spinup_trampoline)
    432 
    433 	/*
    434 	 * disable interrupts and turn off all bits in the psw so that
    435 	 * we start in a known state.
    436 	 */
    437 	rsm	RESET_PSW, %r0
    438 	nop ! nop ! nop ! nop ! nop ! nop
    439 
    440 	/* go to virtual mode...
    441 	/* get things ready for the kernel to run in virtual mode */
    442 	ldi	HPPA_PID_KERNEL, %r1
    443 	mtctl	%r1, %pidr1
    444 	mtctl	%r1, %pidr2
    445 #if pbably_not_worth_it
    446 	mtctl	%r0, %pidr3
    447 	mtctl	%r0, %pidr4
    448 #endif
    449 	mtsp	%r0, %sr0
    450 	mtsp	%r0, %sr1
    451 	mtsp	%r0, %sr2
    452 	mtsp	%r0, %sr3
    453 	mtsp	%r0, %sr4
    454 	mtsp	%r0, %sr5
    455 	mtsp	%r0, %sr6
    456 	mtsp	%r0, %sr7
    457 
    458 	/*
    459 	 * disable all coprocessors
    460 	 */
    461 	mtctl   %r0, CR_CCR
    462 
    463 	/*
    464 	 * to keep the spl() routines consistent we need to put the correct
    465 	 * spl level into eiem, and reset any pending interrupts
    466 	 */
    467 	ldi	-1, %r1
    468 	mtctl	%r0, %eiem		/* disable interrupts */
    469 	mtctl	%r1, %eirr
    470 
    471 	/*
    472 	 * load address of interrupt vector table
    473 	 */
    474 	ldil	L%ivaaddr, %t2
    475 	ldo	R%ivaaddr(%t2), %t2
    476 	mtctl	%t2, %iva
    477 
    478 	/*
    479 	 * set up the dp pointer so that we can do quick references off of it
    480 	 */
    481 	ldil	L%$global$, %dp
    482 	ldo	R%$global$(%dp), %dp
    483 
    484 	/*
    485 	 * Store address of cpu_info in CR_CURCPU.
    486 	 */
    487 	ldil	L%cpu_hatch_info, %r3
    488 	ldw	R%cpu_hatch_info(%r3), %r3
    489 	mtctl	%r3, CR_CURCPU
    490 
    491 	/*
    492 	 * Setup the stack frame for us to call C with and mark this as the
    493 	 * first frame on the stack.
    494 	 */
    495 	ldw	CI_STACK(%r3), %sp
    496 	stw,ma	%r0, HPPA_FRAME_SIZE(%sp)
    497 	stw	%r0, HPPA_FRAME_CRP(%sp)
    498 	stw	%r0, HPPA_FRAME_PSP(%sp)
    499 
    500 	/* Provide CPU with page tables. */
    501 	ldil	L%hppa_vtop, %t1
    502 	ldw	R%hppa_vtop(%t1), %t1
    503 	mtctl	%t1, CR_VTOP
    504 
    505 	/* Turn on the Q bit so that we can handle TLB traps. */
    506 	ldil	L%qenabled, %t1
    507 	ldo	R%qenabled(%t1), %t1
    508 	mtctl	%r0, %pcsq
    509 	mtctl	%r0, %pcsq
    510 	mtctl	%t1, %pcoq
    511 	ldo	4(%t1), %t1
    512 	mtctl	%t1, %pcoq
    513 	ldi	PSW_Q|PSW_I, %t2
    514 	mtctl	%t2, %ipsw
    515 	rfi
    516 	nop
    517 
    518 qenabled:
    519 	/* Call C routine to setup CPU. */
    520 	.import cpu_hw_init, code
    521 	CALL(cpu_hw_init, %r1)
    522 
    523 	/* Switch CPU mode. */
    524 	ldil	L%cpu_spinup_vm, %t1
    525 	ldo	R%cpu_spinup_vm(%t1), %t1
    526 	mtctl	%r0, %pcsq
    527 	mtctl	%r0, %pcsq
    528 	mtctl	%t1, %pcoq
    529 	ldo	4(%t1), %t1
    530 	mtctl	%t1, %pcoq
    531 	mfctl	CR_CURCPU, %t2
    532 	ldw	CI_PSW(%t2), %t2
    533 	mtctl	%t2, %ipsw
    534 	rfi
    535 	nop
    536 
    537 cpu_spinup_vm:
    538 
    539 	/*
    540 	 * Okay, time to return to the land of C.
    541 	 */
    542 	b	cpu_hatch
    543 	nop
    544 
    545 EXIT(hw_cpu_spinup_trampoline)
    546 #endif
    547 
    548 
    549 /*
    550  * int pdc_call(iodcio_t func,int pdc_flag, ...)
    551  */
    552 ENTRY(pdc_call,160)
    553 
    554 	mfctl	%eiem, %t1
    555 	mtctl	%r0, %eiem		/* disable interrupts */
    556 	stw	%rp, HPPA_FRAME_CRP(%sp)
    557 	copy	%arg0, %r31
    558 	copy	%sp, %ret1
    559 
    560 	ldil	L%kernelmapped, %ret0
    561 	ldw	R%kernelmapped(%ret0), %ret0
    562 	comb,=	%r0, %ret0, pdc_call_unmapped1
    563 	nop
    564 
    565 	ldil	L%pdc_stack, %ret1
    566 	ldw	R%pdc_stack(%ret1), %ret1
    567 
    568 pdc_call_unmapped1:
    569 	copy	%sp, %r1
    570 	ldo	HPPA_FRAME_SIZE+24*4(%ret1), %sp
    571 
    572 	stw	%r1, HPPA_FRAME_PSP(%sp)
    573 
    574 	/* save kernelmapped and eiem */
    575 	stw	%ret0, HPPA_FRAME_ARG(21)(%sp)
    576 	stw	%t1, HPPA_FRAME_ARG(22)(%sp)
    577 
    578 	/* copy arguments */
    579 	copy	%arg2, %arg0
    580 	copy	%arg3, %arg1
    581 	ldw	HPPA_FRAME_ARG(4)(%r1), %arg2
    582 	ldw	HPPA_FRAME_ARG(5)(%r1), %arg3
    583 	ldw	HPPA_FRAME_ARG(6)(%r1), %t1
    584 	ldw	HPPA_FRAME_ARG(7)(%r1), %t2
    585 	ldw	HPPA_FRAME_ARG(8)(%r1), %t3
    586 	ldw	HPPA_FRAME_ARG(9)(%r1), %t4
    587 	stw	%t1, HPPA_FRAME_ARG(4)(%sp)	/* XXX can use ,bc */
    588 	stw	%t2, HPPA_FRAME_ARG(5)(%sp)
    589 	stw	%t3, HPPA_FRAME_ARG(6)(%sp)
    590 	stw	%t4, HPPA_FRAME_ARG(7)(%sp)
    591 	ldw	HPPA_FRAME_ARG(10)(%r1), %t1
    592 	ldw	HPPA_FRAME_ARG(11)(%r1), %t2
    593 	ldw	HPPA_FRAME_ARG(12)(%r1), %t3
    594 	ldw	HPPA_FRAME_ARG(13)(%r1), %t4
    595 	stw	%t1, HPPA_FRAME_ARG(8)(%sp)
    596 	stw	%t2, HPPA_FRAME_ARG(9)(%sp)
    597 	stw	%t3, HPPA_FRAME_ARG(10)(%sp)
    598 	stw	%t4, HPPA_FRAME_ARG(11)(%sp)
    599 
    600 	/* save temp control regs */
    601 	mfctl	%cr24, %t1
    602 	mfctl	%cr25, %t2
    603 	mfctl	%cr26, %t3
    604 	mfctl	%cr27, %t4
    605 	stw	%t1, HPPA_FRAME_ARG(12)(%sp)	/* XXX can use ,bc */
    606 	stw	%t2, HPPA_FRAME_ARG(13)(%sp)
    607 	stw	%t3, HPPA_FRAME_ARG(14)(%sp)
    608 	stw	%t4, HPPA_FRAME_ARG(15)(%sp)
    609 	mfctl	%cr28, %t1
    610 	mfctl	%cr29, %t2
    611 	mfctl	%cr30, %t3
    612 	mfctl	%cr31, %t4
    613 	stw	%t1, HPPA_FRAME_ARG(16)(%sp)
    614 	stw	%t2, HPPA_FRAME_ARG(17)(%sp)
    615 	stw	%t3, HPPA_FRAME_ARG(18)(%sp)
    616 	stw	%t4, HPPA_FRAME_ARG(19)(%sp)
    617 
    618 	comb,=	%r0, %ret0, pdc_call_unmapped2
    619 	nop
    620 
    621 	copy	%arg0, %t4
    622 	ldi	PSW_Q, %arg0 /* (!pdc_flag && args[0] == PDC_PIM)? PSW_M:0) */
    623 	break	HPPA_BREAK_KERNEL, HPPA_BREAK_SET_PSW
    624 	nop
    625 	stw	%ret0, HPPA_FRAME_ARG(23)(%sp)
    626 	copy	%t4, %arg0
    627 
    628 pdc_call_unmapped2:
    629 	.call
    630 	blr	%r0, %rp
    631 	bv,n	(%r31)
    632 	nop
    633 
    634 	/* load temp control regs */
    635 	ldw	HPPA_FRAME_ARG(12)(%sp), %t1
    636 	ldw	HPPA_FRAME_ARG(13)(%sp), %t2
    637 	ldw	HPPA_FRAME_ARG(14)(%sp), %t3
    638 	ldw	HPPA_FRAME_ARG(15)(%sp), %t4
    639 	mtctl	%t1, %cr24
    640 	mtctl	%t2, %cr25
    641 	mtctl	%t3, %cr26
    642 	mtctl	%t4, %cr27
    643 	ldw	HPPA_FRAME_ARG(16)(%sp), %t1
    644 	ldw	HPPA_FRAME_ARG(17)(%sp), %t2
    645 	ldw	HPPA_FRAME_ARG(18)(%sp), %t3
    646 	ldw	HPPA_FRAME_ARG(19)(%sp), %t4
    647 	mtctl	%t1, %cr28
    648 	mtctl	%t2, %cr29
    649 	mtctl	%t3, %cr30
    650 	mtctl	%t4, %cr31
    651 
    652 	ldw	HPPA_FRAME_ARG(21)(%sp), %t1
    653 	ldw	HPPA_FRAME_ARG(22)(%sp), %t2
    654 	comb,=	%r0, %t1, pdc_call_unmapped3
    655 	nop
    656 
    657 	copy	%ret0, %t3
    658 	ldw	HPPA_FRAME_ARG(23)(%sp), %arg0
    659 	break	HPPA_BREAK_KERNEL, HPPA_BREAK_SET_PSW
    660 	nop
    661 	copy	%t3, %ret0
    662 
    663 pdc_call_unmapped3:
    664 	ldw	HPPA_FRAME_PSP(%sp), %sp
    665 	ldw	HPPA_FRAME_CRP(%sp), %rp
    666 	bv	%r0(%rp)
    667 	 mtctl	%t2, %eiem		/* enable interrupts */
    668 EXIT(pdc_call)
    669 
    670 /*
    671  * int splraise(int ncpl);
    672  */
    673 LEAF_ENTRY(splraise)
    674 	GET_CURCPU(%t1)
    675 	sh2addl	%arg0, %t1, %arg0
    676 	ldw	CI_IMASK(%arg0), %arg0
    677 	ldw	CI_CPL(%t1), %ret0
    678 	or	%ret0, %arg0, %arg0
    679 	bv	%r0(%rp)
    680 	stw	%arg0, CI_CPL(%t1)
    681 EXIT(splraise)
    682 
    683 /*
    684  * int spllower(int ncpl);
    685  */
    686 ENTRY(spllower,HPPA_FRAME_SIZE)
    687 	GET_CURCPU(%t1)
    688 
    689 	ldw	CI_IPENDING(%t1), %r1	; load ipending
    690 	andcm,<> %r1, %arg0, %r1	; and with complement of new cpl
    691 	bv	%r0(%rp)
    692 	stw	%arg0, CI_CPL(%t1)	; store new cpl
    693 
    694 	/*
    695 	 * Dispatch interrupts.  There's a chance
    696 	 * that we may end up not dispatching anything;
    697 	 * in between our load of ipending and this
    698 	 * disabling of interrupts, something else may
    699 	 * have come in and dispatched some or all
    700 	 * of what we previously saw in ipending.
    701 	 */
    702 	mfctl	%eiem, %arg1
    703 	mtctl	%r0, %eiem		; disable interrupts
    704 
    705 	ldw	CI_IPENDING(%t1), %r1	; load ipending
    706 	andcm,<> %r1, %arg0, %r1	; and with complement of new cpl
    707 	b,n	spllower_out		; branch if we got beaten
    708 
    709 spllower_dispatch:
    710 	/* start stack calling convention */
    711 	stw	%rp, HPPA_FRAME_CRP(%sp)
    712 	copy	%r3, %r1
    713 	copy	%sp, %r3
    714 	stw,ma	%r1, HPPA_FRAME_SIZE(%sp)
    715 
    716 	/* save ncpl and %eiem */
    717 	stw	%arg0, HPPA_FRAME_ARG(0)(%r3)
    718 	stw	%arg1, HPPA_FRAME_ARG(1)(%r3)
    719 
    720 	/* call hppa_intr_dispatch */
    721 	ldil	L%hppa_intr_dispatch, %r1
    722 	ldo	R%hppa_intr_dispatch(%r1), %r1
    723 	blr	%r0, %rp
    724 	.call
    725 	bv	%r0(%r1)
    726 	copy	%r0, %arg2		; call with a NULL frame
    727 
    728 	/* restore %eiem, we don't need ncpl */
    729 	ldw	HPPA_FRAME_ARG(1)(%r3), %arg1
    730 
    731 	/* end stack calling convention */
    732 	ldw	HPPA_FRAME_CRP(%r3), %rp
    733 	ldo	HPPA_FRAME_SIZE(%r3), %sp
    734 	ldw,mb	-HPPA_FRAME_SIZE(%sp), %r3
    735 
    736 spllower_out:
    737 	/*
    738 	 * Now return, storing %eiem in the delay slot.
    739 	 * (hppa_intr_dispatch leaves it zero).  I think
    740 	 * doing this in the delay slot is important to
    741 	 * prevent recursion, but I might be being too
    742 	 * paranoid.
    743 	 */
    744 	bv	%r0(%rp)
    745 	mtctl	%arg1, %eiem
    746 EXIT(spllower)
    747 
    748 /*
    749  * void hppa_intr_schedule(int mask);
    750  */
    751 ENTRY(hppa_intr_schedule,0)
    752 	GET_CURCPU(%t2)
    753 	mfctl	%eiem, %arg1
    754 	mtctl	%r0, %eiem			; disable interrupts
    755 	ldw	CI_IPENDING(%t2), %r1		; load ipending
    756 	or	%r1, %arg0, %r1			; or in mask
    757 	stw	%r1, CI_IPENDING(%t2)		; store ipending
    758 	ldw	CI_CPL(%t2), %arg0		; load cpl
    759 	andcm,= %r1, %arg0, %r1			; and ipending with ~cpl
    760 	b,n	spllower_dispatch		; dispatch if we can
    761 	bv	%r0(%rp)
    762 	mtctl	%arg1, %eiem
    763 EXIT(hppa_intr_schedule)
    764 
    765 /*
    766  * void cpu_die(void);
    767  */
    768 LEAF_ENTRY_NOPROFILE(cpu_die)
    769 	rsm	RESET_PSW, %r0
    770 	nop
    771 	nop
    772 	mtsp	%r0, %sr0
    773 	ldil	L%LBCAST_ADDR, %r25
    774 	ldi	CMD_RESET, %r26
    775 	stw	%r26, R%iomod_command(%r25)
    776 forever:				; Loop until bus reset takes effect.
    777 	b,n	forever
    778 	nop
    779 	nop
    780 EXIT(cpu_die)
    781 
    782 /* Include the system call and trap handling. */
    783 #include <hppa/hppa/trap.S>
    784 
    785 /* Include the userspace copyin/copyout functions. */
    786 #include <hppa/hppa/copy.S>
    787 
    788 /* Include the support functions. */
    789 #include <hppa/hppa/support.S>
    790 
    791 /*
    792  * struct lwp *
    793  * cpu_switchto(struct lwp *oldl, struct lwp *newl, bool returning)
    794  */
    795 	.align	32
    796 ENTRY(cpu_switchto,128)
    797 	/* start stack calling convention */
    798 	stw	%rp, HPPA_FRAME_CRP(%sp)
    799 	copy	%r3, %r1
    800 	copy	%sp, %r3
    801 	stwm	%r1, HPPA_FRAME_SIZE+16*4(%sp)
    802 					/* Frame marker and callee saves */
    803 	stw	%r3, HPPA_FRAME_PSP(%sp)
    804 
    805 #ifdef DIAGNOSTIC
    806 	b,n	switch_diag
    807 
    808 switch_error:
    809 	copy	%t1, %arg1
    810 	ldil	L%panic, %r1
    811 	ldil	L%Lcspstr, %arg0
    812 	ldo	R%panic(%r1), %r1
    813 	ldo	R%Lcspstr(%arg0), %arg0
    814 	.call
    815 	blr	%r0, %rp
    816 	bv,n	%r0(%r1)
    817 	nop
    818 Lcspstr:
    819 	.asciz	"cpu_switchto: 0x%08x stack/len 0x%08x"
    820 	.align	8
    821 
    822 switch_diag:
    823 	/*
    824 	 * Either we must be switching to the same LWP, or
    825 	 * the new LWP's kernel stack must be reasonable.
    826 	 */
    827 	comb,=,n %arg0, %arg1, kstack_ok
    828 
    829 	/*
    830 	 * cpu_lwp_fork sets the initial stack to a page above uarea address.
    831 	 * Check that the stack is above this value for oldl.
    832 	 */
    833 	ldw	L_PCB(%arg1), %arg2
    834 	ldw	PCB_KSP(%arg2), %t1		/* t1 for switch_error */
    835 	ldo	NBPG(%arg2), %arg2
    836 	comb,>>,n %arg2, %t1, switch_error
    837 	nop
    838 
    839 	/* make sure the stack hasn't grown too big (> USPACE) */
    840 	sub	%t1, %arg2, %t1			/* t1 for switch_error */
    841 	ldil	L%USPACE, %arg2
    842 	ldo	R%USPACE(%arg2), %arg2
    843 	comb,<<=,n %arg2, %t1, switch_error
    844 	nop
    845 kstack_ok:
    846 #endif
    847 
    848 	/*
    849 	 * save old LWP context
    850 	 *
    851 	 * arg0: old LWP (oldl)
    852 	 * arg1: new LWP (newl)
    853 	 */
    854 
    855 	ldw	L_PCB(%arg0), %t3	/* oldl pcb */
    856 	stw	%sp, PCB_KSP(%t3)
    857 	fdc	%r0(%t3)		/* flush oldl pcb  - surely fdc PCB_KSP(%t3) */
    858 
    859 	/*
    860 	 * Save the callee-save registers. We don't need to do
    861 	 * r3 here as it was done during stack calling convention.
    862 	 */
    863 	stw	%r4,   1*4(%r3)
    864 	stw	%r5,   2*4(%r3)
    865 	stw	%r6,   3*4(%r3)
    866 	stw	%r7,   4*4(%r3)
    867 	stw	%r8,   5*4(%r3)
    868 	stw	%r9,   6*4(%r3)
    869 	stw	%r10,  7*4(%r3)
    870 	stw	%r11,  8*4(%r3)
    871 	stw	%r12,  9*4(%r3)
    872 	stw	%r13, 10*4(%r3)
    873 	stw	%r14, 11*4(%r3)
    874 	stw	%r15, 12*4(%r3)
    875 	stw	%r16, 13*4(%r3)
    876 	stw	%r17, 14*4(%r3)
    877 	stw	%r18, 15*4(%r3)
    878 
    879 	/*
    880 	 * restore new LWP context
    881 	 *
    882 	 * arg0: old LWP (oldl)
    883 	 * arg1: new LWP (newl)
    884 	 */
    885 	ldw	L_MD(%arg1), %t1
    886 	ldw	L_PCB(%arg1), %t3
    887 	ldw	PCB_KSP(%t3), %sp		/* restore stack of newl */
    888 
    889 	fdc	%r0(%t3)			/* Flush newl PCB - why? */
    890 
    891 #if 0
    892 	ldw	TF_CR9(%t1), %t3		/* pmap_activate? */
    893 	mtctl	%t3, %pidr2			/* pmap_activate? */
    894 #endif
    895 	ldw	TF_CR30(%t1), %t2		/* pmap_activate? */
    896 	mtctl	%t2, CR_FPPADDR			/* pmap_activate? */
    897 
    898 	SET_CURLWP(%arg1, %t2)
    899 
    900 	ldo	-(HPPA_FRAME_SIZE+16*4)(%sp), %r3
    901 
    902 	ldw	 1*4(%r3), %r4
    903 	ldw	 2*4(%r3), %r5
    904 	ldw	 3*4(%r3), %r6
    905 	ldw	 4*4(%r3), %r7
    906 	ldw	 5*4(%r3), %r8
    907 	ldw	 6*4(%r3), %r9
    908 	ldw	 7*4(%r3), %r10
    909 	ldw	 8*4(%r3), %r11
    910 	ldw	 9*4(%r3), %r12
    911 	ldw	10*4(%r3), %r13
    912 	ldw	11*4(%r3), %r14
    913 	ldw	12*4(%r3), %r15
    914 	ldw	13*4(%r3), %r16
    915 	ldw	14*4(%r3), %r17
    916 	ldw	15*4(%r3), %r18
    917 
    918 	/*
    919 	 * Check for restartable atomic sequences (RAS)
    920 	 */
    921 	ldw	L_PROC(%arg1), %t1
    922 	ldw	P_RASLIST(%t1), %t1
    923 	comb,=,n %r0, %t1, noras
    924 
    925 	/*
    926 	 * Save some caller-saves we want to preserve.
    927 	 *
    928 	 * We save oldl (%arg0) and newl (%arg1) for the benefit of
    929 	 * lwp_trampoline() for when it calls lwp_startup().
    930 	 *
    931 	 * oldl (%arg0) is saved as it's the return value
    932 	 */
    933 	stw	%arg0, HPPA_FRAME_ARG(0)(%r3)		/* oldl */
    934 	stw	%arg1, HPPA_FRAME_ARG(1)(%r3)		/* newl */
    935 
    936 	copy	%arg1, %arg0
    937 
    938 	.import	hppa_ras, code
    939 	CALL(hppa_ras, %r1)
    940 
    941 	/* restore caller-saves */
    942 	ldw	HPPA_FRAME_ARG(1)(%r3), %arg1
    943 	ldw	HPPA_FRAME_ARG(0)(%r3), %arg0
    944 
    945 noras:
    946 
    947 	/*
    948 	 * We do have a hardware FPU.  If the LWP
    949 	 * that we just switched to has its state in the
    950 	 * FPU, enable the FPU, else disable it, so if
    951 	 * the LWP does try to use the coprocessor
    952 	 * we'll get an assist emulation trap to swap
    953 	 * states.
    954 	 */
    955 	GET_CURCPU(%t1)
    956 	mfctl	CR_CCR, %r1
    957 	mfctl	CR_FPPADDR, %t2
    958 	ldw	CI_FPU_STATE(%t1), %t1
    959 	depi	0, 25, 2, %r1		; disables the FPU
    960 	comb,<>,n %t1, %t2, 0		; nullify if LWPs different
    961 	depi	3, 25, 2, %r1		; enables the FPU
    962 	mtctl	%r1, CR_CCR
    963 
    964 switch_return:
    965 	copy	%arg0, %ret0
    966 
    967 	ldw	HPPA_FRAME_CRP(%r3), %rp
    968 	bv	0(%rp)
    969 	ldwm	-(HPPA_FRAME_SIZE+16*4)(%sp), %r3
    970 EXIT(cpu_switchto)
    971 
    972 /*
    973  * This is the first code run in a new LWP after
    974  * cpu_switchto() has switched to it for the first time.
    975  *
    976  * This happens courtesy of the setup in cpu_lwp_fork() which
    977  * arranges for cpu_switchto() to call us with a frame containing
    978  * the first kernel function to call, and its argument.
    979  *
    980  * cpu_switchto() also makes sure that %arg0 and %arg1 are (still)
    981  * oldl and newl respectively.
    982  */
    983 ENTRY_NOPROFILE(lwp_trampoline,HPPA_FRAME_SIZE)
    984 	/* no return point */
    985 	stw	%r0, HPPA_FRAME_CRP(%sp)
    986 
    987 	/* %arg0, %arg1 are still valid from cpu_switchto */
    988 	.import	lwp_startup, code
    989 	CALL(lwp_startup, %r1)
    990 
    991 	/* get trampoline func (%t3) and arg (%arg0) */
    992 	ldw	HPPA_FRAME_ARG(3)(%sp), %arg0
    993 	ldw	HPPA_FRAME_ARG(2)(%sp), %t3
    994 
    995 	/* call the first kernel function */
    996 	.call
    997 	blr	%r0, %rp
    998 	bv,n	%r0(%t3)
    999 	nop
   1000 
   1001 	/*
   1002 	 * Since the first kernel function returned,
   1003 	 * this LWP was created by the fork()
   1004 	 * syscall, which we now return from.
   1005 	 */
   1006 	GET_CURLWP(%t2)
   1007 	.call
   1008 	b	syscall_return
   1009 	ldw	L_MD(%t2), %t3
   1010 EXIT(lwp_trampoline)
   1011 
   1012 /* Include the signal code, used in compat code */
   1013 #include <hppa/hppa/sigcode.S>
   1014 
   1015 	.end
   1016