Home | History | Annotate | Line # | Download | only in riscv
      1 /* $NetBSD: locore.S,v 1.47 2025/10/12 04:08:26 thorpej Exp $ */
      2 
      3 /*-
      4  * Copyright (c) 2014, 2022 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Matt Thomas of 3am Software Foundry, and by Nick Hudson.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #include "opt_console.h"
     33 #include "opt_multiprocessor.h"
     34 #include "opt_riscv_debug.h"
     35 
     36 #include <machine/asm.h>
     37 #include "assym.h"
     38 
     39 #define BOOT_AP_STACKSIZE	1024	/* size of temporary stack for APs */
     40 #define NBBY_SHIFT		3	/* log2(8 bits per byte) */
     41 
     42 #define PRINTS(string)		\
     43 	call	locore_prints	; \
     44 	.asciz string		; \
     45 	.align 3		; \
     46 
     47 
     48 #if defined(VERBOSE_INIT_RISCV)
     49 
     50 #define VPRINTS(string)		\
     51 	call	locore_prints	; \
     52 	.asciz string		; \
     53 	.align 3		; \
     54 
     55 #define VPRINTX(regno)		\
     56 	mv	a0, regno	; \
     57 	call	locore_printx
     58 
     59 #define VPRINTXNL(regno)	\
     60 	mv	a0, regno	; \
     61 	call	locore_printxnl
     62 
     63 /* Need to turn relaxation off for VPRINTS */
     64 	.option norelax
     65 
     66 #else
     67 #define VPRINTS(string)		/* nothing */
     68 #define VPRINTX(regno)		/* nothing */
     69 #define VPRINTXNL(regno)	/* nothing */
     70 #endif
     71 
     72 #if VM_MIN_KERNEL_ADDRESS != VM_KERNEL_BASE
     73 #error VM_MIN_KERNEL_ADDRESS assumed to match VM_KERNEL_BASE
     74 #endif
     75 
     76 /*
     77  * Entry point where.
     78  *    a0 is hartid
     79  *    a1 is pointer to dtb (PA)
     80  */
     81 ENTRY_NP(start)
     82 	csrw	sie, zero		// disable interrupts
     83 	csrw	sip, zero		// clear any pending
     84 
     85 	li	s0, SR_FS
     86 	csrc	sstatus, s0		// disable FP
     87 
     88 	mv	s10, a0			// copy hartid
     89 	mv	s11, a1			// copy dtb PA
     90 
     91 	/* set the stack pointer for boot */
     92 	PTR_LA	t0, _C_LABEL(bootstk)
     93 	mv	sp, t0
     94 
     95 	VPRINTS("\n------------\nNetBSD start\n\n")
     96 	VPRINTS("sp:      ")
     97 	VPRINTXNL(sp)
     98 
     99 	VPRINTS("pc:      ")
    100 	auipc	a0, 0
    101 	VPRINTXNL(a0)
    102 
    103 	VPRINTS("hart:    ")
    104 	VPRINTXNL(s10)
    105 
    106 	VPRINTS("dtb:     ")
    107 	VPRINTXNL(s11)
    108 
    109 	/*
    110 	 * Calculate the difference between the VA and PA for start and
    111 	 * keep in s8.  Store this in kern_vtopdiff once the MMU is on.
    112 	 */
    113 	PTR_LA	t0, start
    114 	PTR_L	s8, .Lstart
    115 
    116 	sub	s8, s8, t0
    117 
    118 	PTR_LA	s5, _C_LABEL(lwp0uspace)
    119 	PTR_LA	s6, _C_LABEL(bootstk)
    120 
    121 	/*
    122 	 * Before we construct the initial MMU tables, we need to know
    123 	 * if there are any extra PTE bits that we need to include.
    124 	 * (Grumble, mumble, T-Head XMAE).
    125 	 *
    126 	 * We'll push space for 2 PTE prototypes, and fill them with
    127 	 * zeros for now.  sp[0] is for "regular memory", sp[1] is
    128 	 * for "devices".
    129 	 */
    130 	addi	sp, sp, -(2 * SZREG)
    131 	REG_S	zero, 0(sp)
    132 	REG_S	zero, SZREG(sp)
    133 
    134 	/* Get the CPU vendor ID, which requires an SBI call. */
    135 	VPRINTS("vendor:  ")
    136 	li	a7, SBI_EID_BASE
    137 	li	a6, SBI_FID_BASE_GETMVENDORID
    138 	ecall
    139 	VPRINTXNL(a1)
    140 
    141 #ifdef _LP64
    142 	li	a0, CPU_VENDOR_THEAD
    143 	bne	a0, a1, 9f		/* not T-Head, skip */
    144 	csrr	a1, 0x5c0		/* read T-Head TX.SXSTATUS reg */
    145 	li	a0, TX_SXSTATUS_MAEE	/* is MAEE bit set? */
    146 	and	a0, a0, a1
    147 	beqz	a0, .Lpdetab		/* nope, carry on */
    148 	VPRINTS("extsn:   T-Head XMAE\r\n")
    149 	li	a0, PTE_XMAE_PMA
    150 	REG_S	a0, 0(sp)
    151 	li	a0, PTE_XMAE_IO
    152 	REG_S	a0, SZREG(sp)
    153 	j	.Lpdetab		/* done. */
    154 9:
    155 #endif /* _LP64 */
    156 
    157 	/* Next vendor quirk check goes here. */
    158 
    159 .Lpdetab:
    160 	/*
    161 	 * Our load address is not fixed, but our VA is.  We need to construct
    162 	 * an initial PDETAB.
    163 	 *
    164 	 * The space for the initial page table is included in the kernel
    165 	 * .bss size calculation so we know the space exists.
    166 	 */
    167 
    168 	li	a1, 0
    169 	PTR_LA	s2, _C_LABEL(l1_pte)
    170 	mv	s4, s2			// last page table
    171 #ifdef _LP64
    172 	PTR_LA	s3, _C_LABEL(l2_pte)	// s3 = second PDE page (RV64 only)
    173 	mv	s4, s3			// last page table
    174 #ifdef notyet
    175 	PTR_LA	s4, _C_LABEL(l3_pte)
    176 #endif
    177 #endif
    178 	PTR_LA	s7, _C_LABEL(mmutables_end)
    179 
    180 
    181 	// s2	L1 PDE (SV32:4MiB megapages, SV{39,48}: 2MiB megapages)
    182 	// s3	L2 PDE (_LP64 SV39 only)
    183 	// s5	lwp0uspace
    184 	// s6	bootstk
    185 	// s7   end of memory to clear
    186 
    187 	VPRINTS("l1:      ")
    188 	VPRINTXNL(s2)
    189 #ifdef _LP64
    190 	VPRINTS("l2:      ")
    191 	VPRINTXNL(s3)
    192 #endif
    193 
    194 	VPRINTS("uspace:  ")
    195 	VPRINTXNL(s5)
    196 	VPRINTS("bootstk: ")
    197 	VPRINTXNL(s6)
    198 
    199 	VPRINTS("vtopdiff:")
    200 	VPRINTXNL(s8)
    201 
    202 	VPRINTS("\n\r")
    203 
    204 	VPRINTS("bss:     ")
    205 	PTR_LA	a0, _C_LABEL(__bss_start)
    206 	VPRINTX(a0)
    207 	VPRINTS(" - ")
    208 	VPRINTXNL(s7)
    209 
    210 	VPRINTS("\n\r")
    211 
    212 	// a0	start of memory to clear
    213 	// a1	end of memory to clear
    214 	PTR_LA	a0, _C_LABEL(__bss_start)
    215 	mv	a1, s7
    216 
    217 	call	clear_bss		// zero through kernel_end (inc. stack)
    218 
    219 	li	s7, PTE_V		// page table pointer {X,W,R} = {0,0,0}
    220 
    221 	// We allocated the kernel first PDE page so let's insert in the
    222 	// page table.
    223 
    224 	// Need to setup tables so that for
    225 	// sv32 : s2
    226 	// sv39 : s3 -> s2
    227 
    228 #ifdef _LP64
    229 	VPRINTS("l2pde:   ")
    230 	srli	t0, s2, (PGSHIFT - PTE_PPN_SHIFT)
    231 	or	t0, t0, s7		// Assumes s2[11:0] == 0
    232 #if ((VM_MIN_KERNEL_ADDRESS >> XSEGSHIFT) & (NPDEPG - 1)) * SZREG
    233 	li	t1, ((VM_MIN_KERNEL_ADDRESS >> XSEGSHIFT) & (NPDEPG - 1)) * SZREG
    234 	add	t1, t1, s3
    235 	REG_S	t0, 0(t1)
    236 
    237 	VPRINTX(t1)
    238 #else
    239 	REG_S	t0, 0(s3)
    240 
    241 	VPRINTX(s3)
    242 #endif
    243 
    244 	VPRINTS(":  ")
    245 	VPRINTXNL(t0)
    246 	VPRINTS("\n\r")
    247 #endif // _LP64
    248 
    249 	// kernel VA
    250 	li	t1,  ((VM_MIN_KERNEL_ADDRESS >> SEGSHIFT) & (NPDEPG - 1)) * SZREG
    251 	add	s9, s2, t1
    252 
    253 #if PGSHIFT < PTE_PPN_SHIFT
    254 #error Code assumes PGSHIFT is greater than PTE_PPN_SHIFT
    255 #endif
    256 
    257 	li	s5, (VM_KERNEL_SIZE >> SEGSHIFT)		// # of megapages
    258 	li	s6, (NBSEG >> (PGSHIFT - PTE_PPN_SHIFT))	// load for ease
    259 	li	s7, PTE_KERN | PTE_HARDWIRED | PTE_R | PTE_W | PTE_X
    260 	REG_L	a0, 0(sp)
    261 	or	s7, s7, a0
    262 
    263 	//
    264 	// Fill in the PDEs for kernel.
    265 	//
    266 	PTR_LA	s0, start
    267 	srli	s0, s0, SEGSHIFT	// round down to NBSEG, and shift in
    268 	slli	s0, s0, (SEGSHIFT - PGSHIFT + PTE_PPN_SHIFT)	// ... to PPN
    269 	or	s0, s0, s7
    270 1:
    271 	VPRINTS("kern:    ")
    272 	VPRINTX(s9)
    273 	VPRINTS(":  ")
    274 	VPRINTXNL(s0)
    275 
    276 	REG_S	s0, 0(s9)		// store PDE
    277 	add	s0, s0, s6		// advance PA in PDE to next segment
    278 	add	s9, s9, SZREG		// advance to next PDE slot
    279 	addi	s5, s5, -1		// count down segment
    280 	bnez	s5, 1b			// loop if more
    281 
    282 	// DTB VA
    283 	li	t1,  ((VM_KERNEL_DTB_BASE >> SEGSHIFT) & (NPDEPG - 1)) * SZREG
    284 	add	s9, s2, t1
    285 
    286 	li	s7, PTE_KERN | PTE_HARDWIRED | PTE_R | PTE_W
    287 	REG_L	a0, 0(sp)
    288 	or	s7, s7, a0
    289 
    290 	//
    291 	// Fill in the PDE for the DTB. Only do one - if any more are required
    292 	// they will be mapped in later.
    293 	//
    294 	mv	s0, s11
    295 	srli	s0, s0, SEGSHIFT	// round down to NBSEG, and shift in
    296 	slli	s0, s0, (SEGSHIFT - PGSHIFT + PTE_PPN_SHIFT)	// ... to PPN
    297 	or	s0, s0, s7
    298 
    299 	VPRINTS("dtb:     ")
    300 	VPRINTX(s9)
    301 	VPRINTS(":  ")
    302 	VPRINTXNL(s0)
    303 
    304 	REG_S	s0, 0(s9)
    305 
    306 #ifdef CONSADDR
    307 	li	s7, PTE_KERN | PTE_HARDWIRED | PTE_R | PTE_W
    308 	REG_L	a0, SZREG(sp)
    309 	or	s7, s7, a0
    310 	li	t1,  ((VM_KERNEL_IO_BASE >> SEGSHIFT) & (NPDEPG - 1)) * SZREG
    311 	add	s9, s2, t1
    312 
    313 	// Fill in the PDE for CONSADDR.
    314 	PTR_L	t0, .Lconsaddr
    315 	mv	s0, t0
    316 	srli	s0, s0, SEGSHIFT	// round down to NBSEG, and shift in
    317 	slli	s0, s0, (SEGSHIFT - PGSHIFT + PTE_PPN_SHIFT)	// ... to PPN
    318 	or	s0, s0, s7
    319 
    320 	VPRINTS("cons:    ")
    321 	VPRINTX(s9)
    322 	VPRINTS(":  ")
    323 	VPRINTXNL(s0)
    324 
    325 	REG_S	s0, 0(s9)
    326 #endif
    327 
    328 	/* Pop the PTE prototypes pushed way up above. */
    329 	addi	sp, sp, (2 * SZREG)
    330 
    331 	li	a0, 'P'
    332 	call	_C_LABEL(uartputc)
    333 
    334 	/* Set supervisor trap vector base register */
    335 	PTR_LA	t0, vstart
    336 	add	t0, t0, s8
    337 	csrw	stvec, t0
    338 
    339 	/* Set supervisor address translation and protection register */
    340 	srli	t1, s4, PGSHIFT
    341 #ifdef _LP64
    342 	li	t0, SATP_MODE_SV39
    343 #else
    344 	li	t0, SATP_MODE_SV32
    345 #endif
    346 	or	t0, t0, t1
    347 	sfence.vma
    348 	csrw	satp, t0
    349 
    350 	.align 2
    351 	.global vstart
    352 vstart:
    353 	// MMU is on!
    354 	csrw	sscratch, zero		// zero in sscratch to mark kernel
    355 
    356 #ifdef CONSADDR
    357 	add	sp, sp, s8
    358 #endif
    359 	li	a0, 'M'
    360 	call	_C_LABEL(uartputc)	// uartputs doesn't use stack
    361 	li	a0, '\n'
    362 	call	_C_LABEL(uartputc)	// uartputs doesn't use stack
    363 	li	a0, '\r'
    364 	call	_C_LABEL(uartputc)	// uartputs doesn't use stack
    365 
    366 	PTR_LA	tp, _C_LABEL(lwp0)	// put curlwp in tp
    367 
    368 	/* Set supervisor trap vector base register */
    369 	PTR_LA	a0, _C_LABEL(cpu_exception_handler)
    370 	csrw	stvec, a0
    371 
    372 	PTR_LA	t0, bootstk		// top of lwp0uspace
    373 	PTR_S	t0, L_PCB(tp)		// set uarea of lwp (already zeroed)
    374 	addi	sp, t0, -TF_LEN		// switch to new stack
    375 	PTR_S	sp, L_MD_UTF(tp)	// store pointer to empty trapframe
    376 
    377 	PTR_LA	t1, _C_LABEL(kernel_pmap_store)
    378 	add	t2, s4, s8 		// PA -> VA
    379 	srli	t3, s4, PGSHIFT
    380 	PTR_S	t2, PM_PDETAB(t1)	// VA of kernel PDETAB
    381 	PTR_S	t3, PM_MD_PPN(t1)	// PPN of kernel PDETAB
    382 
    383 	/*
    384 	 * Store kern_vtopdiff (the difference between the physical
    385 	 * and virtual address of the "start" symbol).
    386 	 *
    387 	 * XXX For some reason doing this store to the physical
    388 	 * XXX address of kern_vtopdiff before the MMU is enabled
    389 	 * XXX doesn't work on the AllWinner D1.
    390 	 */
    391 	PTR_LA	t0, _C_LABEL(kern_vtopdiff)
    392 	PTR_S	s8, 0(t0)	/* kern_vtopdiff = start(virt) - start(phys) */
    393 
    394 #if notyet
    395 	mv	a0, s11			// dtb
    396 	call	_C_LABEL(init_mmu)
    397 #endif
    398 
    399 	li	t0, VM_MIN_KERNEL_ADDRESS + VM_KERNEL_SIZE
    400 	li	t1, NBSEG - 1
    401 	and	t1, s11, t1
    402 	or	t0, t0, t1
    403 
    404 	/* Set the global pointer */
    405 	.option push
    406 	.option norelax
    407 	lla	gp, __global_pointer$
    408 	.option pop
    409 
    410 	// Now we should ready to start initializing the kernel.
    411 	mv	a0, s10			// hartid
    412 	mv	a1, s11			// dtb (physical)
    413 
    414 	li	s0, 0			// zero frame pointer
    415 	call	_C_LABEL(init_riscv)	// do MD startup
    416 	tail	_C_LABEL(main)		// and transfer to main
    417 	/* No return from main */
    418 END(start)
    419 
    420 
    421 #if defined(MULTIPROCESSOR)
    422 
    423 // a0 is hartid
    424 // a1 is the cookie from sbi_hart_start
    425 ENTRY(cpu_mpstart)
    426 	mv	s10, a0			// copy hartid
    427 	mv	s11, a1			// copy sbi_hart_start cookie
    428 
    429 	/*
    430 	 * s11 = cpuindex
    431 	 */
    432 
    433 	/* set stack pointer for boot */
    434 	li	t1, BOOT_AP_STACKSIZE		// XXXNH do a shift
    435 	mul	t1, s11, t1
    436 	PTR_LA	t0, _C_LABEL(bootstk)
    437 	/* sp = bootstk + (BOOT_AP_STACKSIZE * cpuindex) */
    438 	add	sp, t0, t1
    439 
    440 
    441 	/*
    442 	 * Calculate the difference between the VA and PA for start and
    443 	 * keep in s8.
    444 	 */
    445 	PTR_LA	t0, start
    446 	PTR_L	s8, .Lstart
    447 
    448 	sub	s8, s8, t0
    449 
    450 #ifdef _LP64
    451 	PTR_LA	s4, _C_LABEL(l2_pte)
    452 #else
    453 	PTR_LA	s4, _C_LABEL(l1_pte)
    454 #endif
    455 
    456 	// s4 is satp address....
    457 	// s8 is kern_vtopdiff
    458 	//
    459 
    460 	/* Set supervisor trap vector base register */
    461 	PTR_LA	t0, vmpstart
    462 	add	t0, t0, s8
    463 	csrw	stvec, t0
    464 
    465 	/* Set supervisor address translation and protection register */
    466 	srli	t1, s4, PGSHIFT
    467 #ifdef _LP64
    468 	li	t0, SATP_MODE_SV39
    469 #else
    470 	li	t0, SATP_MODE_SV32
    471 #endif
    472 	or	t0, t0, t1
    473 	sfence.vma
    474 	csrw	satp, t0
    475 
    476 	.align 2
    477 	.global vmpstart
    478 vmpstart:
    479 	// MMU is on!
    480 	csrw	sscratch, zero		// zero in sscratch to mark kernel
    481 
    482 	/* Set the global pointer */
    483 	.option push
    484 	.option norelax
    485 	lla	gp, __global_pointer$
    486 	.option pop
    487 
    488 	/* Set SP to VA */
    489 	add	sp, sp, s8
    490 
    491 	/* Set supervisor trap vector base register with ipi_handler */
    492 	PTR_LA	a0, _C_LABEL(ipi_handler)
    493 	csrw	stvec, a0
    494 	csrsi	sie, SIE_SSIE
    495 	csrsi	sstatus, SR_SIE		// enable interrupts
    496 
    497 	li	tp, 0
    498 	mv	a0, s11
    499 	call	_C_LABEL(cpu_init_secondary_processor)
    500 
    501 	/* t3 = __BIT(cpuindex % (sizeof(u_long) * NBBY)) */
    502 	li	t3, 1
    503 	andi	t0, s11, (1U << (LONG_SCALESHIFT + NBBY_SHIFT)) - 1
    504 	sll	t3, t3, t0
    505 
    506 	/* t4 = &riscv_cpu_mbox[cpuindex / (sizeof(u_long) * NBBY)] */
    507 	PTR_LA	t0, _C_LABEL(riscv_cpu_mbox)
    508 	srli	t1, s11, LONG_SCALESHIFT + NBBY_SHIFT
    509 	slli	t1, t1, LONG_SCALESHIFT
    510 	add	t4, t0, t1
    511 
    512 	/* wait for the mailbox start bit to become true */
    513 1:
    514 	fence	rw, r		/* matches cpu_boot_secondary_processors */
    515 	LONG_L	t0, 0(t4)
    516 	and	t0, t0, t3
    517 	bne	t0, zero, 9f
    518 	wfi
    519 	j	1b
    520 9:
    521 
    522 	/* Set supervisor trap vector base register */
    523 	PTR_LA	a0, _C_LABEL(cpu_exception_handler)
    524 	csrw	stvec, a0
    525 
    526 	li	t0, CI_SIZE
    527 	mul	t0, s11, t0
    528 	PTR_LA	t1, _C_LABEL(cpu_info_store)
    529 	add	a0, t0, t1		/* a0 = &cpu_info_store[cpuindex] */
    530 
    531 	/*
    532 	 * set curlwp (tp and curcpu()->ci_curlwp) now we know the
    533 	 * idle lwp from curcpu()->ci_idlelwp
    534 	 */
    535 	PTR_L	tp, CI_IDLELWP(a0)	/* tp = curcpu()->ci_idlelwp */
    536 	PTR_S	tp, CI_CURLWP(a0)	/* curlwp is idlelwp */
    537 
    538 	/* get my stack from lwp */
    539 	PTR_L	t2, L_PCB(tp)		/* t2 = lwp_getpcb(idlelwp) */
    540 	li	t3, UPAGES * PAGE_SIZE
    541 	add	t2, t2, t3
    542 	addi	sp, t2, -TF_LEN		/* sp = pcb + USPACE - TF_LEN */
    543 
    544 	li	s0, 0			/* trace back starts here (fp = 0) */
    545 	PTR_L	a0, L_CPU(tp)		/* curlwp->l_cpu */
    546 	mv	a1, s11			/* cpuindex */
    547 	call	_C_LABEL(cpu_hatch)
    548 
    549 	li	s0, 0			// zero frame pointer
    550 	tail	idle_loop
    551 	/* No return from idle_loop */
    552 END(cpu_mpstart)
    553 
    554 
    555 toomanyharts:
    556 	PRINTS("too many harts, or hart id doesn't exist in cpu_hart[]\n")
    557 1:	wfi
    558 	j	1b
    559 
    560 /*
    561  * A very basic exception handler to just return when an IPI comes in during
    562  * AP bringup.
    563  *
    564  * The handler address needs to have bottom two bits as zero.
    565  */
    566 	.align 2
    567 
    568 ipi_handler:
    569 	csrrw	tp, sscratch, tp	// swap scratch and thread pointer
    570 	bnez	tp, 1f			//   tp != 0, something went wrong.
    571 
    572 	csrr	tp, scause		// get cause
    573 	bgez	tp, 2f			// MSB is set if interrupt
    574 
    575 	csrw	sip, zero		// clear all interrupts
    576 
    577 	csrrw	tp, sscratch, zero	// get back our thread pointer
    578 	sret
    579 
    580 1:
    581 	wfi
    582 	j	1b
    583 2:
    584 	wfi
    585 	j	2b
    586 #endif
    587 
    588 	.align 3
    589 .Lstart:
    590 #ifdef _LP64
    591 	.quad	start
    592 #else
    593 	.word	start
    594 #endif
    595 
    596 
    597 #ifdef CONSADDR
    598 	.align 3
    599 .Lconsaddr:
    600 #ifdef _LP64
    601 	.quad	CONSADDR
    602 #else
    603 	.word	CONSADDR
    604 #endif
    605 #endif
    606 
    607 
    608 ENTRY_NP(uartputc)
    609 #ifdef EARLYCONS
    610 	tail	___CONCAT(EARLYCONS, _platform_early_putchar)
    611 #else
    612 #define	SBI_LEGACY_CONSOLE_PUTCHAR	1
    613 	li	a7, SBI_LEGACY_CONSOLE_PUTCHAR
    614 	ecall
    615 	ret
    616 #endif
    617 END(uartputc)
    618 
    619 
    620 ENTRY_NP(uartgetc)
    621 #ifdef EARLYCONS
    622 	li	a0, -1
    623 #else
    624 #define	SBI_LEGACY_CONSOLE_GETCHAR	2
    625 	li	a7, SBI_LEGACY_CONSOLE_GETCHAR
    626 	ecall
    627 	ret
    628 #endif
    629 
    630 
    631 ENTRY_NP(clear_bss)
    632 	bgeu	a0, a1, 1f
    633 2:
    634 	sb	zero, 0(a0)
    635 	addi	a0, a0, 1
    636 	bne	a1, a0, 2b
    637 1:
    638 	ret
    639 END(clear_bss)
    640 
    641 
    642 	.globl  _C_LABEL(cpu_Debugger_insn)
    643 	.globl  _C_LABEL(cpu_Debugger_ret)
    644 
    645 ENTRY_NP(cpu_Debugger)
    646 cpu_Debugger_insn:
    647 	ebreak
    648 cpu_Debugger_ret:
    649 	ret
    650 END(cpu_Debugger)
    651 
    652 ENTRY_NP(locore_prints)
    653 	addi	sp, sp, -(SZREG * 2)
    654 	REG_S	s0, (0 * SZREG)(sp)
    655 	mv	s0, ra
    656 1:
    657 	lbu	a0, 0(s0)
    658 	beqz	a0, 2f
    659 
    660 	call	uartputc
    661 
    662 	addi	s0, s0, 1
    663 	j	1b
    664 2:
    665 	addi	s0, s0, 8	// s0 points to the null terminator
    666 	andi	ra, s0, -8
    667 
    668 	REG_L	s0, (0 * SZREG)(sp)
    669 	addi	sp, sp, (SZREG * 2)
    670 	ret
    671 
    672 END(locore_prints)
    673 
    674 
    675 #if defined(VERBOSE_INIT_RISCV)
    676 ENTRY_NP(locore_printx)
    677 	addi	sp, sp, -(SZREG * 4)
    678 	REG_S	s0, (0 * SZREG)(sp)
    679 	REG_S	s1, (1 * SZREG)(sp)
    680 	REG_S	s2, (2 * SZREG)(sp)
    681 	REG_S	ra, (3 * SZREG)(sp)
    682 
    683 	mv	s1, a0		// our print value
    684 	li	s2, 10
    685 
    686 	li	a0, '0'
    687 	call	uartputc
    688 	li	a0, 'x'
    689 	call	uartputc
    690 
    691 	// Word size in bits
    692 	li	s0, (SZREG * 8)
    693 1:
    694 	addi	s0, s0, -4	// nibble shift
    695 
    696 	srl	a0, s1, s0	// extract ...
    697 	andi	a0, a0, 0xf
    698 
    699 	bltu	a0, s2, 2f
    700 	addi	a0, a0, ('a' - '0' - 10)
    701 2:	addi	a0, a0, '0'
    702 
    703 	call	uartputc
    704 
    705 	beqz	s0, 3f
    706 
    707 	and	a0, s0, (16 - 1)
    708 	bnez	a0, 1b
    709 
    710 	li	a0, '_'
    711 	call	uartputc
    712 
    713 	j	1b
    714 
    715 3:
    716 	REG_L	s0, (0 * SZREG)(sp)
    717 	REG_L	s1, (1 * SZREG)(sp)
    718 	REG_L	s2, (2 * SZREG)(sp)
    719 	REG_L	ra, (3 * SZREG)(sp)
    720 	addi	sp, sp, (SZREG * 4)
    721 	ret
    722 END(locore_printx)
    723 
    724 
    725 ENTRY_NP(locore_printxnl)
    726 	addi	sp, sp, -(SZREG * 2)
    727 	REG_S	ra, (1 * SZREG)(sp)
    728 
    729 	call	locore_printx
    730 	li	a0, '\n'
    731 	call	uartputc
    732 
    733 	li	a0, '\r'
    734 	call	uartputc
    735 
    736 	REG_L	ra, (1 * SZREG)(sp)
    737 	addi	sp, sp, (SZREG * 2)
    738 
    739 	ret
    740 END(locore_printxnl)
    741 #endif	/* VERBOSE_INIT_RISCV */
    742 
    743 
    744 	.data
    745 	.align	2
    746 hart_boot:
    747 	.word	0
    748 
    749 	/*
    750 	 * Allocate some memory after the kernel image for stacks and
    751 	 * bootstrap L1PT
    752 	 */
    753 
    754 //	.section "_init_memory", "aw", %nobits
    755 	.align PGSHIFT
    756 	.global _C_LABEL(lwp0uspace)
    757 _C_LABEL(lwp0uspace):
    758 	.space	UPAGES * PAGE_SIZE
    759 bootstk:
    760 
    761 #ifdef MULTIPROCESSOR
    762 	.space	BOOT_AP_STACKSIZE * (MAXCPUS - 1)
    763 #endif
    764 
    765 //	.section "_init_memory", "aw", %nobits
    766 	.align PGSHIFT
    767 mmutables_start:
    768 bootstrap_pde:
    769 	.global _C_LABEL(bootstrap_pde)
    770 #ifdef _LP64
    771 	.global _C_LABEL(l2_pte)
    772 l2_pte:
    773 	.space PAGE_SIZE
    774 #endif
    775 	.global _C_LABEL(l1_pte)
    776 l1_pte:
    777 	.space PAGE_SIZE
    778 mmutables_end:
    779 
    780