Home | History | Annotate | Line # | Download | only in riscv
      1 /* $NetBSD: locore.S,v 1.46 2025/03/02 08:14:26 skrll Exp $ */
      2 
      3 /*-
      4  * Copyright (c) 2014, 2022 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Matt Thomas of 3am Software Foundry, and by Nick Hudson.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #include "opt_console.h"
     33 #include "opt_multiprocessor.h"
     34 #include "opt_riscv_debug.h"
     35 
     36 #include <machine/asm.h>
     37 #include "assym.h"
     38 
     39 #define BOOT_AP_STACKSIZE	1024	/* size of temporary stack for APs */
     40 #define NBBY_SHIFT		3	/* log2(8 bits per byte) */
     41 
     42 #define PRINTS(string)		\
     43 	call	locore_prints	; \
     44 	.asciz string		; \
     45 	.align 3		; \
     46 
     47 
     48 #if defined(VERBOSE_INIT_RISCV)
     49 
     50 #define VPRINTS(string)		\
     51 	call	locore_prints	; \
     52 	.asciz string		; \
     53 	.align 3		; \
     54 
     55 #define VPRINTX(regno)		\
     56 	mv	a0, regno	; \
     57 	call	locore_printx
     58 
     59 #define VPRINTXNL(regno)	\
     60 	mv	a0, regno	; \
     61 	call	locore_printxnl
     62 
     63 /* Need to turn relaxation off for VPRINTS */
     64 	.option norelax
     65 
     66 #else
     67 #define VPRINTS(string)		/* nothing */
     68 #define VPRINTX(regno)		/* nothing */
     69 #define VPRINTXNL(regno)	/* nothing */
     70 #endif
     71 
     72 #if VM_MIN_KERNEL_ADDRESS != VM_KERNEL_BASE
     73 #error VM_MIN_KERNEL_ADDRESS assumed to match VM_KERNEL_BASE
     74 #endif
     75 
     76 /*
     77  * Entry point where.
     78  *    a0 is hartid
     79  *    a1 is pointer to dtb (PA)
     80  */
     81 ENTRY_NP(start)
     82 	csrw	sie, zero		// disable interrupts
     83 	csrw	sip, zero		// clear any pending
     84 
     85 	li	s0, SR_FS
     86 	csrc	sstatus, s0		// disable FP
     87 
     88 	mv	s10, a0			// copy hartid
     89 	mv	s11, a1			// copy dtb PA
     90 
     91 	/* set the stack pointer for boot */
     92 	PTR_LA	t0, _C_LABEL(bootstk)
     93 	mv	sp, t0
     94 
     95 	VPRINTS("\n------------\nNetBSD start\n\n")
     96 	VPRINTS("sp:      ")
     97 	VPRINTXNL(sp)
     98 
     99 	VPRINTS("pc:      ")
    100 	auipc	a0, 0
    101 	VPRINTXNL(a0)
    102 
    103 	VPRINTS("hart:    ")
    104 	VPRINTXNL(s10)
    105 
    106 	VPRINTS("dtb:     ")
    107 	VPRINTXNL(s11)
    108 
    109 	/*
    110 	 * Calculate the difference between the VA and PA for start and
    111 	 * keep in s8.  Store this in kern_vtopdiff once the MMU is on.
    112 	 */
    113 	PTR_LA	t0, start
    114 	PTR_L	s8, .Lstart
    115 
    116 	sub	s8, s8, t0
    117 
    118 	PTR_LA	s5, _C_LABEL(lwp0uspace)
    119 	PTR_LA	s6, _C_LABEL(bootstk)
    120 
    121 	/*
    122 	 * Our load address is not fixed, but our VA is.  We need to construct
    123 	 * an initial PDETAB.
    124 	 *
    125 	 * The space for the initial page table is included in the kernel
    126 	 * .bss size calculation so we know the space exists.
    127 	 */
    128 
    129 	li	a1, 0
    130 	PTR_LA	s2, _C_LABEL(l1_pte)
    131 	mv	s4, s2			// last page table
    132 #ifdef _LP64
    133 	PTR_LA	s3, _C_LABEL(l2_pte)	// s3 = second PDE page (RV64 only)
    134 	mv	s4, s3			// last page table
    135 #ifdef notyet
    136 	PTR_LA	s4, _C_LABEL(l3_pte)
    137 #endif
    138 #endif
    139 	PTR_LA	s7, _C_LABEL(mmutables_end)
    140 
    141 
    142 	// s2	L1 PDE (SV32:4MiB megapages, SV{39,48}: 2MiB megapages)
    143 	// s3	L2 PDE (_LP64 SV39 only)
    144 	// s5	lwp0uspace
    145 	// s6	bootstk
    146 	// s7   end of memory to clear
    147 
    148 	VPRINTS("l1:      ")
    149 	VPRINTXNL(s2)
    150 #ifdef _LP64
    151 	VPRINTS("l2:      ")
    152 	VPRINTXNL(s3)
    153 #endif
    154 
    155 	VPRINTS("uspace:  ")
    156 	VPRINTXNL(s5)
    157 	VPRINTS("bootstk: ")
    158 	VPRINTXNL(s6)
    159 
    160 	VPRINTS("vtopdiff:")
    161 	VPRINTXNL(s8)
    162 
    163 	VPRINTS("\n\r")
    164 
    165 	VPRINTS("bss:     ")
    166 	PTR_LA	a0, _C_LABEL(__bss_start)
    167 	VPRINTX(a0)
    168 	VPRINTS(" - ")
    169 	VPRINTXNL(s7)
    170 
    171 	VPRINTS("\n\r")
    172 
    173 	// a0	start of memory to clear
    174 	// a1	end of memory to clear
    175 	PTR_LA	a0, _C_LABEL(__bss_start)
    176 	mv	a1, s7
    177 
    178 	call	clear_bss		// zero through kernel_end (inc. stack)
    179 
    180 	li	s7, PTE_V		// page table pointer {X,W,R} = {0,0,0}
    181 
    182 	// We allocated the kernel first PDE page so let's insert in the
    183 	// page table.
    184 
    185 	// Need to setup tables so that for
    186 	// sv32 : s2
    187 	// sv39 : s3 -> s2
    188 
    189 #ifdef _LP64
    190 	VPRINTS("l2pde:   ")
    191 	srli	t0, s2, (PGSHIFT - PTE_PPN_SHIFT)
    192 	or	t0, t0, s7		// Assumes s2[11:0] == 0
    193 #if ((VM_MIN_KERNEL_ADDRESS >> XSEGSHIFT) & (NPDEPG - 1)) * SZREG
    194 	li	t1, ((VM_MIN_KERNEL_ADDRESS >> XSEGSHIFT) & (NPDEPG - 1)) * SZREG
    195 	add	t1, t1, s3
    196 	REG_S	t0, 0(t1)
    197 
    198 	VPRINTX(t1)
    199 #else
    200 	REG_S	t0, 0(s3)
    201 
    202 	VPRINTX(s3)
    203 #endif
    204 
    205 	VPRINTS(":  ")
    206 	VPRINTXNL(t0)
    207 	VPRINTS("\n\r")
    208 #endif // _LP64
    209 
    210 	// kernel VA
    211 	li	t1,  ((VM_MIN_KERNEL_ADDRESS >> SEGSHIFT) & (NPDEPG - 1)) * SZREG
    212 	add	s9, s2, t1
    213 
    214 #if PGSHIFT < PTE_PPN_SHIFT
    215 #error Code assumes PGSHIFT is greater than PTE_PPN_SHIFT
    216 #endif
    217 
    218 	li	s5, (VM_KERNEL_SIZE >> SEGSHIFT)		// # of megapages
    219 	li	s6, (NBSEG >> (PGSHIFT - PTE_PPN_SHIFT))	// load for ease
    220 	li	s7, PTE_KERN | PTE_HARDWIRED | PTE_R | PTE_W | PTE_X
    221 
    222 	//
    223 	// Fill in the PDEs for kernel.
    224 	//
    225 	PTR_LA	s0, start
    226 	srli	s0, s0, SEGSHIFT	// round down to NBSEG, and shift in
    227 	slli	s0, s0, (SEGSHIFT - PGSHIFT + PTE_PPN_SHIFT)	// ... to PPN
    228 	or	s0, s0, s7
    229 1:
    230 	VPRINTS("kern:    ")
    231 	VPRINTX(s9)
    232 	VPRINTS(":  ")
    233 	VPRINTXNL(s0)
    234 
    235 	REG_S	s0, 0(s9)		// store PDE
    236 	add	s0, s0, s6		// advance PA in PDE to next segment
    237 	add	s9, s9, SZREG		// advance to next PDE slot
    238 	addi	s5, s5, -1		// count down segment
    239 	bnez	s5, 1b			// loop if more
    240 
    241 	// DTB VA
    242 	li	t1,  ((VM_KERNEL_DTB_BASE >> SEGSHIFT) & (NPDEPG - 1)) * SZREG
    243 	add	s9, s2, t1
    244 
    245 	li	s7, PTE_KERN | PTE_HARDWIRED | PTE_R | PTE_W
    246 
    247 	//
    248 	// Fill in the PDE for the DTB. Only do one - if any more are required
    249 	// they will be mapped in later.
    250 	//
    251 	mv	s0, s11
    252 	srli	s0, s0, SEGSHIFT	// round down to NBSEG, and shift in
    253 	slli	s0, s0, (SEGSHIFT - PGSHIFT + PTE_PPN_SHIFT)	// ... to PPN
    254 	or	s0, s0, s7
    255 
    256 	VPRINTS("dtb:     ")
    257 	VPRINTX(s9)
    258 	VPRINTS(":  ")
    259 	VPRINTXNL(s0)
    260 
    261 	REG_S	s0, 0(s9)
    262 
    263 #ifdef CONSADDR
    264 	li	t1,  ((VM_KERNEL_IO_BASE >> SEGSHIFT) & (NPDEPG - 1)) * SZREG
    265 	add	s9, s2, t1
    266 
    267 	// Fill in the PDE for CONSADDR.
    268 	PTR_L	t0, .Lconsaddr
    269 	mv	s0, t0
    270 	srli	s0, s0, SEGSHIFT	// round down to NBSEG, and shift in
    271 	slli	s0, s0, (SEGSHIFT - PGSHIFT + PTE_PPN_SHIFT)	// ... to PPN
    272 	or	s0, s0, s7
    273 
    274 	VPRINTS("cons:    ")
    275 	VPRINTX(s9)
    276 	VPRINTS(":  ")
    277 	VPRINTXNL(s0)
    278 
    279 	REG_S	s0, 0(s9)
    280 #endif
    281 
    282 	li	a0, 'P'
    283 	call	_C_LABEL(uartputc)
    284 
    285 	/* Set supervisor trap vector base register */
    286 	PTR_LA	t0, vstart
    287 	add	t0, t0, s8
    288 	csrw	stvec, t0
    289 
    290 	/* Set supervisor address translation and protection register */
    291 	srli	t1, s4, PGSHIFT
    292 #ifdef _LP64
    293 	li	t0, SATP_MODE_SV39
    294 #else
    295 	li	t0, SATP_MODE_SV32
    296 #endif
    297 	or	t0, t0, t1
    298 	sfence.vma
    299 	csrw	satp, t0
    300 
    301 	.align 2
    302 	.global vstart
    303 vstart:
    304 	// MMU is on!
    305 	csrw	sscratch, zero		// zero in sscratch to mark kernel
    306 
    307 #ifdef CONSADDR
    308 	add	sp, sp, s8
    309 #endif
    310 	li	a0, 'M'
    311 	call	_C_LABEL(uartputc)	// uartputs doesn't use stack
    312 	li	a0, '\n'
    313 	call	_C_LABEL(uartputc)	// uartputs doesn't use stack
    314 	li	a0, '\r'
    315 	call	_C_LABEL(uartputc)	// uartputs doesn't use stack
    316 
    317 	PTR_LA	tp, _C_LABEL(lwp0)	// put curlwp in tp
    318 
    319 	/* Set supervisor trap vector base register */
    320 	PTR_LA	a0, _C_LABEL(cpu_exception_handler)
    321 	csrw	stvec, a0
    322 
    323 	PTR_LA	t0, bootstk		// top of lwp0uspace
    324 	PTR_S	t0, L_PCB(tp)		// set uarea of lwp (already zeroed)
    325 	addi	sp, t0, -TF_LEN		// switch to new stack
    326 	PTR_S	sp, L_MD_UTF(tp)	// store pointer to empty trapframe
    327 
    328 	PTR_LA	t1, _C_LABEL(kernel_pmap_store)
    329 	add	t2, s4, s8 		// PA -> VA
    330 	srli	t3, s4, PGSHIFT
    331 	PTR_S	t2, PM_PDETAB(t1)	// VA of kernel PDETAB
    332 	PTR_S	t3, PM_MD_PPN(t1)	// PPN of kernel PDETAB
    333 
    334 	/*
    335 	 * Store kern_vtopdiff (the difference between the physical
    336 	 * and virtual address of the "start" symbol).
    337 	 *
    338 	 * XXX For some reason doing this store to the physical
    339 	 * XXX address of kern_vtopdiff before the MMU is enabled
    340 	 * XXX doesn't work on the AllWinner D1.
    341 	 */
    342 	PTR_LA	t0, _C_LABEL(kern_vtopdiff)
    343 	PTR_S	s8, 0(t0)	/* kern_vtopdiff = start(virt) - start(phys) */
    344 
    345 #if notyet
    346 	mv	a0, s11			// dtb
    347 	call	_C_LABEL(init_mmu)
    348 #endif
    349 
    350 	li	t0, VM_MIN_KERNEL_ADDRESS + VM_KERNEL_SIZE
    351 	li	t1, NBSEG - 1
    352 	and	t1, s11, t1
    353 	or	t0, t0, t1
    354 
    355 	/* Set the global pointer */
    356 	.option push
    357 	.option norelax
    358 	lla	gp, __global_pointer$
    359 	.option pop
    360 
    361 	// Now we should ready to start initializing the kernel.
    362 	mv	a0, s10			// hartid
    363 	mv	a1, s11			// dtb (physical)
    364 
    365 	li	s0, 0			// zero frame pointer
    366 	call	_C_LABEL(init_riscv)	// do MD startup
    367 	tail	_C_LABEL(main)		// and transfer to main
    368 	/* No return from main */
    369 END(start)
    370 
    371 
    372 #if defined(MULTIPROCESSOR)
    373 
    374 // a0 is hartid
    375 // a1 is the cookie from sbi_hart_start
    376 ENTRY(cpu_mpstart)
    377 	mv	s10, a0			// copy hartid
    378 	mv	s11, a1			// copy sbi_hart_start cookie
    379 
    380 	/*
    381 	 * s11 = cpuindex
    382 	 */
    383 
    384 	/* set stack pointer for boot */
    385 	li	t1, BOOT_AP_STACKSIZE		// XXXNH do a shift
    386 	mul	t1, s11, t1
    387 	PTR_LA	t0, _C_LABEL(bootstk)
    388 	/* sp = bootstk + (BOOT_AP_STACKSIZE * cpuindex) */
    389 	add	sp, t0, t1
    390 
    391 
    392 	/*
    393 	 * Calculate the difference between the VA and PA for start and
    394 	 * keep in s8.
    395 	 */
    396 	PTR_LA	t0, start
    397 	PTR_L	s8, .Lstart
    398 
    399 	sub	s8, s8, t0
    400 
    401 #ifdef _LP64
    402 	PTR_LA	s4, _C_LABEL(l2_pte)
    403 #else
    404 	PTR_LA	s4, _C_LABEL(l1_pte)
    405 #endif
    406 
    407 	// s4 is satp address....
    408 	// s8 is kern_vtopdiff
    409 	//
    410 
    411 	/* Set supervisor trap vector base register */
    412 	PTR_LA	t0, vmpstart
    413 	add	t0, t0, s8
    414 	csrw	stvec, t0
    415 
    416 	/* Set supervisor address translation and protection register */
    417 	srli	t1, s4, PGSHIFT
    418 #ifdef _LP64
    419 	li	t0, SATP_MODE_SV39
    420 #else
    421 	li	t0, SATP_MODE_SV32
    422 #endif
    423 	or	t0, t0, t1
    424 	sfence.vma
    425 	csrw	satp, t0
    426 
    427 	.align 2
    428 	.global vmpstart
    429 vmpstart:
    430 	// MMU is on!
    431 	csrw	sscratch, zero		// zero in sscratch to mark kernel
    432 
    433 	/* Set the global pointer */
    434 	.option push
    435 	.option norelax
    436 	lla	gp, __global_pointer$
    437 	.option pop
    438 
    439 	/* Set SP to VA */
    440 	add	sp, sp, s8
    441 
    442 	/* Set supervisor trap vector base register with ipi_handler */
    443 	PTR_LA	a0, _C_LABEL(ipi_handler)
    444 	csrw	stvec, a0
    445 	csrsi	sie, SIE_SSIE
    446 	csrsi	sstatus, SR_SIE		// enable interrupts
    447 
    448 	li	tp, 0
    449 	mv	a0, s11
    450 	call	_C_LABEL(cpu_init_secondary_processor)
    451 
    452 	/* t3 = __BIT(cpuindex % (sizeof(u_long) * NBBY)) */
    453 	li	t3, 1
    454 	andi	t0, s11, (1U << (LONG_SCALESHIFT + NBBY_SHIFT)) - 1
    455 	sll	t3, t3, t0
    456 
    457 	/* t4 = &riscv_cpu_mbox[cpuindex / (sizeof(u_long) * NBBY)] */
    458 	PTR_LA	t0, _C_LABEL(riscv_cpu_mbox)
    459 	srli	t1, s11, LONG_SCALESHIFT + NBBY_SHIFT
    460 	slli	t1, t1, LONG_SCALESHIFT
    461 	add	t4, t0, t1
    462 
    463 	/* wait for the mailbox start bit to become true */
    464 1:
    465 	fence	rw, r		/* matches cpu_boot_secondary_processors */
    466 	LONG_L	t0, 0(t4)
    467 	and	t0, t0, t3
    468 	bne	t0, zero, 9f
    469 	wfi
    470 	j	1b
    471 9:
    472 
    473 	/* Set supervisor trap vector base register */
    474 	PTR_LA	a0, _C_LABEL(cpu_exception_handler)
    475 	csrw	stvec, a0
    476 
    477 	li	t0, CI_SIZE
    478 	mul	t0, s11, t0
    479 	PTR_LA	t1, _C_LABEL(cpu_info_store)
    480 	add	a0, t0, t1		/* a0 = &cpu_info_store[cpuindex] */
    481 
    482 	/*
    483 	 * set curlwp (tp and curcpu()->ci_curlwp) now we know the
    484 	 * idle lwp from curcpu()->ci_idlelwp
    485 	 */
    486 	PTR_L	tp, CI_IDLELWP(a0)	/* tp = curcpu()->ci_idlelwp */
    487 	PTR_S	tp, CI_CURLWP(a0)	/* curlwp is idlelwp */
    488 
    489 	/* get my stack from lwp */
    490 	PTR_L	t2, L_PCB(tp)		/* t2 = lwp_getpcb(idlelwp) */
    491 	li	t3, UPAGES * PAGE_SIZE
    492 	add	t2, t2, t3
    493 	addi	sp, t2, -TF_LEN		/* sp = pcb + USPACE - TF_LEN */
    494 
    495 	li	s0, 0			/* trace back starts here (fp = 0) */
    496 	PTR_L	a0, L_CPU(tp)		/* curlwp->l_cpu */
    497 	mv	a1, s11			/* cpuindex */
    498 	call	_C_LABEL(cpu_hatch)
    499 
    500 	li	s0, 0			// zero frame pointer
    501 	tail	idle_loop
    502 	/* No return from idle_loop */
    503 END(cpu_mpstart)
    504 
    505 
    506 toomanyharts:
    507 	PRINTS("too many harts, or hart id doesn't exist in cpu_hart[]\n")
    508 1:	wfi
    509 	j	1b
    510 
    511 /*
    512  * A very basic exception handler to just return when an IPI comes in during
    513  * AP bringup.
    514  *
    515  * The handler address needs to have bottom two bits as zero.
    516  */
    517 	.align 2
    518 
    519 ipi_handler:
    520 	csrrw	tp, sscratch, tp	// swap scratch and thread pointer
    521 	bnez	tp, 1f			//   tp != 0, something went wrong.
    522 
    523 	csrr	tp, scause		// get cause
    524 	bgez	tp, 2f			// MSB is set if interrupt
    525 
    526 	csrw	sip, zero		// clear all interrupts
    527 
    528 	csrrw	tp, sscratch, zero	// get back our thread pointer
    529 	sret
    530 
    531 1:
    532 	wfi
    533 	j	1b
    534 2:
    535 	wfi
    536 	j	2b
    537 #endif
    538 
    539 	.align 3
    540 .Lstart:
    541 #ifdef _LP64
    542 	.quad	start
    543 #else
    544 	.word	start
    545 #endif
    546 
    547 
    548 #ifdef CONSADDR
    549 	.align 3
    550 .Lconsaddr:
    551 #ifdef _LP64
    552 	.quad	CONSADDR
    553 #else
    554 	.word	CONSADDR
    555 #endif
    556 #endif
    557 
    558 
    559 ENTRY_NP(uartputc)
    560 #ifdef EARLYCONS
    561 	tail	___CONCAT(EARLYCONS, _platform_early_putchar)
    562 #else
    563 #define	SBI_LEGACY_CONSOLE_PUTCHAR	1
    564 	li	a7, SBI_LEGACY_CONSOLE_PUTCHAR
    565 	ecall
    566 	ret
    567 #endif
    568 END(uartputc)
    569 
    570 
    571 ENTRY_NP(uartgetc)
    572 #ifdef EARLYCONS
    573 	li	a0, -1
    574 #else
    575 #define	SBI_LEGACY_CONSOLE_GETCHAR	2
    576 	li	a7, SBI_LEGACY_CONSOLE_GETCHAR
    577 	ecall
    578 	ret
    579 #endif
    580 
    581 
    582 ENTRY_NP(clear_bss)
    583 	bgeu	a0, a1, 1f
    584 2:
    585 	sb	zero, 0(a0)
    586 	addi	a0, a0, 1
    587 	bne	a1, a0, 2b
    588 1:
    589 	ret
    590 END(clear_bss)
    591 
    592 
    593 	.globl  _C_LABEL(cpu_Debugger_insn)
    594 	.globl  _C_LABEL(cpu_Debugger_ret)
    595 
    596 ENTRY_NP(cpu_Debugger)
    597 cpu_Debugger_insn:
    598 	ebreak
    599 cpu_Debugger_ret:
    600 	ret
    601 END(cpu_Debugger)
    602 
    603 ENTRY_NP(locore_prints)
    604 	addi	sp, sp, -(SZREG * 2)
    605 	REG_S	s0, (0 * SZREG)(sp)
    606 	mv	s0, ra
    607 1:
    608 	lbu	a0, 0(s0)
    609 	beqz	a0, 2f
    610 
    611 	call	uartputc
    612 
    613 	addi	s0, s0, 1
    614 	j	1b
    615 2:
    616 	addi	s0, s0, 8	// s0 points to the null terminator
    617 	andi	ra, s0, -8
    618 
    619 	REG_L	s0, (0 * SZREG)(sp)
    620 	addi	sp, sp, (SZREG * 2)
    621 	ret
    622 
    623 END(locore_prints)
    624 
    625 
    626 #if defined(VERBOSE_INIT_RISCV)
    627 ENTRY_NP(locore_printx)
    628 	addi	sp, sp, -(SZREG * 4)
    629 	REG_S	s0, (0 * SZREG)(sp)
    630 	REG_S	s1, (1 * SZREG)(sp)
    631 	REG_S	s2, (2 * SZREG)(sp)
    632 	REG_S	ra, (3 * SZREG)(sp)
    633 
    634 	mv	s1, a0		// our print value
    635 	li	s2, 10
    636 
    637 	li	a0, '0'
    638 	call	uartputc
    639 	li	a0, 'x'
    640 	call	uartputc
    641 
    642 	// Word size in bits
    643 	li	s0, (SZREG * 8)
    644 1:
    645 	addi	s0, s0, -4	// nibble shift
    646 
    647 	srl	a0, s1, s0	// extract ...
    648 	andi	a0, a0, 0xf
    649 
    650 	bltu	a0, s2, 2f
    651 	addi	a0, a0, ('a' - '0' - 10)
    652 2:	addi	a0, a0, '0'
    653 
    654 	call	uartputc
    655 
    656 	beqz	s0, 3f
    657 
    658 	and	a0, s0, (16 - 1)
    659 	bnez	a0, 1b
    660 
    661 	li	a0, '_'
    662 	call	uartputc
    663 
    664 	j	1b
    665 
    666 3:
    667 	REG_L	s0, (0 * SZREG)(sp)
    668 	REG_L	s1, (1 * SZREG)(sp)
    669 	REG_L	s2, (2 * SZREG)(sp)
    670 	REG_L	ra, (3 * SZREG)(sp)
    671 	addi	sp, sp, (SZREG * 4)
    672 	ret
    673 END(locore_printx)
    674 
    675 
    676 ENTRY_NP(locore_printxnl)
    677 	addi	sp, sp, -(SZREG * 2)
    678 	REG_S	ra, (1 * SZREG)(sp)
    679 
    680 	call	locore_printx
    681 	li	a0, '\n'
    682 	call	uartputc
    683 
    684 	li	a0, '\r'
    685 	call	uartputc
    686 
    687 	REG_L	ra, (1 * SZREG)(sp)
    688 	addi	sp, sp, (SZREG * 2)
    689 
    690 	ret
    691 END(locore_printxnl)
    692 #endif	/* VERBOSE_INIT_RISCV */
    693 
    694 
    695 	.data
    696 	.align	2
    697 hart_boot:
    698 	.word	0
    699 
    700 	/*
    701 	 * Allocate some memory after the kernel image for stacks and
    702 	 * bootstrap L1PT
    703 	 */
    704 
    705 //	.section "_init_memory", "aw", %nobits
    706 	.align PGSHIFT
    707 	.global _C_LABEL(lwp0uspace)
    708 _C_LABEL(lwp0uspace):
    709 	.space	UPAGES * PAGE_SIZE
    710 bootstk:
    711 
    712 #ifdef MULTIPROCESSOR
    713 	.space	BOOT_AP_STACKSIZE * (MAXCPUS - 1)
    714 #endif
    715 
    716 //	.section "_init_memory", "aw", %nobits
    717 	.align PGSHIFT
    718 mmutables_start:
    719 bootstrap_pde:
    720 	.global _C_LABEL(bootstrap_pde)
    721 #ifdef _LP64
    722 	.global _C_LABEL(l2_pte)
    723 l2_pte:
    724 	.space PAGE_SIZE
    725 #endif
    726 	.global _C_LABEL(l1_pte)
    727 l1_pte:
    728 	.space PAGE_SIZE
    729 mmutables_end:
    730 
    731