Home | History | Annotate | Line # | Download | only in riscv
riscv_machdep.c revision 1.13
      1  1.13    skrll /*	$NetBSD: riscv_machdep.c,v 1.13 2020/11/04 20:05:47 skrll Exp $	*/
      2  1.12    skrll 
      3   1.1     matt /*-
      4   1.6       ad  * Copyright (c) 2014, 2019 The NetBSD Foundation, Inc.
      5   1.1     matt  * All rights reserved.
      6   1.1     matt  *
      7   1.1     matt  * This code is derived from software contributed to The NetBSD Foundation
      8   1.1     matt  * by Matt Thomas of 3am Software Foundry.
      9   1.1     matt  *
     10   1.1     matt  * Redistribution and use in source and binary forms, with or without
     11   1.1     matt  * modification, are permitted provided that the following conditions
     12   1.1     matt  * are met:
     13   1.1     matt  * 1. Redistributions of source code must retain the above copyright
     14   1.1     matt  *    notice, this list of conditions and the following disclaimer.
     15   1.1     matt  * 2. Redistributions in binary form must reproduce the above copyright
     16   1.1     matt  *    notice, this list of conditions and the following disclaimer in the
     17   1.1     matt  *    documentation and/or other materials provided with the distribution.
     18   1.1     matt  *
     19   1.1     matt  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20   1.1     matt  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21   1.1     matt  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22   1.1     matt  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23   1.1     matt  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24   1.1     matt  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25   1.1     matt  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26   1.1     matt  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27   1.1     matt  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28   1.1     matt  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29   1.1     matt  * POSSIBILITY OF SUCH DAMAGE.
     30   1.1     matt  */
     31   1.1     matt 
     32   1.1     matt #include <sys/cdefs.h>
     33   1.1     matt 
     34   1.1     matt #include "opt_modular.h"
     35   1.1     matt 
     36  1.13    skrll __RCSID("$NetBSD: riscv_machdep.c,v 1.13 2020/11/04 20:05:47 skrll Exp $");
     37   1.1     matt 
     38   1.1     matt #include <sys/param.h>
     39   1.1     matt #include <sys/systm.h>
     40   1.1     matt #include <sys/cpu.h>
     41   1.1     matt #include <sys/exec.h>
     42   1.1     matt #include <sys/lwp.h>
     43   1.1     matt #include <sys/kmem.h>
     44   1.1     matt #include <sys/ktrace.h>
     45   1.1     matt #include <sys/module.h>
     46   1.1     matt #include <sys/proc.h>
     47   1.1     matt #include <sys/reboot.h>
     48   1.1     matt #include <sys/syscall.h>
     49   1.1     matt 
     50   1.1     matt #include <uvm/uvm_extern.h>
     51   1.1     matt 
     52   1.1     matt #include <riscv/locore.h>
     53   1.1     matt 
     54   1.1     matt int cpu_printfataltraps;
     55   1.1     matt char machine[] = MACHINE;
     56   1.1     matt char machine_arch[] = MACHINE_ARCH;
     57   1.1     matt 
     58   1.1     matt struct vm_map *phys_map;
     59   1.1     matt 
     60   1.1     matt struct trapframe cpu_ddb_regs;
     61   1.1     matt 
     62   1.1     matt struct cpu_info cpu_info_store = {
     63   1.1     matt 	.ci_cpl = IPL_HIGH,
     64   1.1     matt 	.ci_ddb_regs = &cpu_ddb_regs,
     65   1.1     matt };
     66   1.1     matt 
     67   1.1     matt const pcu_ops_t * const pcu_ops_md_defs[PCU_UNIT_COUNT] = {
     68   1.1     matt 	[PCU_FPU] = &pcu_fpu_ops,
     69   1.1     matt };
     70   1.1     matt 
     71   1.1     matt void
     72   1.1     matt delay(unsigned long us)
     73   1.1     matt {
     74   1.1     matt 	const uint32_t cycles_per_us = curcpu()->ci_data.cpu_cc_freq / 1000000;
     75   1.1     matt 	const uint64_t cycles = (uint64_t)us * cycles_per_us;
     76   1.1     matt 	const uint64_t finish = riscvreg_cycle_read() + cycles;
     77   1.1     matt 
     78   1.1     matt 	while (riscvreg_cycle_read() < finish) {
     79   1.1     matt 		/* spin, baby spin */
     80   1.1     matt 	}
     81   1.1     matt }
     82   1.1     matt 
     83   1.1     matt #ifdef MODULAR
     84   1.1     matt /*
     85  1.10    skrll  * Push any modules loaded by the boot loader.
     86   1.1     matt  */
     87   1.1     matt void
     88   1.1     matt module_init_md(void)
     89   1.1     matt {
     90   1.1     matt }
     91   1.1     matt #endif /* MODULAR */
     92   1.1     matt 
     93   1.1     matt /*
     94   1.1     matt  * Set registers on exec.
     95   1.1     matt  * Clear all registers except sp, pc, and t9.
     96   1.1     matt  * $sp is set to the stack pointer passed in.  $pc is set to the entry
     97   1.1     matt  * point given by the exec_package passed in, as is $t9 (used for PIC
     98   1.1     matt  * code by the MIPS elf abi).
     99   1.1     matt  */
    100   1.1     matt void
    101   1.1     matt setregs(struct lwp *l, struct exec_package *pack, vaddr_t stack)
    102   1.1     matt {
    103   1.1     matt 	struct trapframe * const tf = l->l_md.md_utf;
    104   1.1     matt 	struct proc * const p = l->l_proc;
    105   1.1     matt 
    106   1.1     matt 	memset(tf, 0, sizeof(struct trapframe));
    107   1.1     matt 	tf->tf_sp = (intptr_t)stack_align(stack);
    108   1.1     matt 	tf->tf_pc = (intptr_t)pack->ep_entry & ~1;
    109   1.1     matt #ifdef _LP64
    110   1.1     matt 	tf->tf_sr = (p->p_flag & PK_32) ? SR_USER32 : SR_USER;
    111   1.1     matt #else
    112   1.1     matt 	tf->tf_sr = SR_USER;
    113   1.1     matt #endif
    114   1.1     matt 	// Set up arguments for _start(obj, cleanup, ps_strings)
    115   1.1     matt 	tf->tf_a0 = 0;			// obj
    116   1.1     matt 	tf->tf_a1 = 0;			// cleanup
    117   1.1     matt 	tf->tf_a2 = p->p_psstrp;	// ps_strings
    118   1.1     matt }
    119   1.1     matt 
    120   1.1     matt void
    121   1.4    kamil md_child_return(struct lwp *l)
    122   1.1     matt {
    123   1.1     matt 	struct trapframe * const tf = l->l_md.md_utf;
    124   1.1     matt 
    125   1.1     matt 	tf->tf_a0 = 0;
    126   1.1     matt 	tf->tf_a1 = 1;
    127  1.13    skrll #ifdef FPE
    128   1.1     matt 	tf->tf_sr &= ~SR_EF;		/* Disable FP as we can't be them. */
    129  1.13    skrll #endif
    130   1.1     matt }
    131   1.1     matt 
    132   1.1     matt void
    133   1.1     matt cpu_spawn_return(struct lwp *l)
    134   1.1     matt {
    135   1.1     matt 	userret(l);
    136   1.1     matt }
    137   1.1     matt 
    138  1.10    skrll /*
    139   1.1     matt  * Start a new LWP
    140   1.1     matt  */
    141   1.1     matt void
    142   1.1     matt startlwp(void *arg)
    143   1.1     matt {
    144   1.1     matt 	ucontext_t * const uc = arg;
    145   1.1     matt 	lwp_t * const l = curlwp;
    146   1.1     matt 	int error __diagused;
    147   1.1     matt 
    148   1.1     matt 	error = cpu_setmcontext(l, &uc->uc_mcontext, uc->uc_flags);
    149   1.1     matt 	KASSERT(error == 0);
    150   1.1     matt 
    151   1.1     matt 	kmem_free(uc, sizeof(ucontext_t));
    152   1.1     matt 	userret(l);
    153   1.1     matt }
    154   1.1     matt 
    155   1.1     matt // We've worked hard to make sure struct reg and __gregset_t are the same.
    156   1.1     matt // Ditto for struct fpreg and fregset_t.
    157   1.1     matt 
    158   1.1     matt CTASSERT(sizeof(struct reg) == sizeof(__gregset_t));
    159   1.1     matt CTASSERT(sizeof(struct fpreg) == sizeof(__fregset_t));
    160   1.1     matt 
    161   1.1     matt void
    162   1.1     matt cpu_getmcontext(struct lwp *l, mcontext_t *mcp, unsigned int *flags)
    163   1.1     matt {
    164   1.1     matt 	const struct trapframe * const tf = l->l_md.md_utf;
    165   1.1     matt 
    166   1.1     matt 	/* Save register context. */
    167   1.1     matt 	*(struct reg *)mcp->__gregs = tf->tf_regs;
    168   1.1     matt 
    169   1.1     matt 	mcp->__private = (intptr_t)l->l_private;
    170   1.1     matt 
    171   1.1     matt 	*flags |= _UC_CPU | _UC_TLSBASE;
    172   1.1     matt 
    173   1.1     matt 	/* Save floating point register context, if any. */
    174   1.1     matt 	KASSERT(l == curlwp);
    175   1.2      chs 	if (fpu_valid_p(l)) {
    176   1.1     matt 		/*
    177   1.1     matt 		 * If this process is the current FP owner, dump its
    178   1.1     matt 		 * context to the PCB first.
    179   1.1     matt 		 */
    180   1.2      chs 		fpu_save(l);
    181   1.1     matt 
    182   1.1     matt 		struct pcb * const pcb = lwp_getpcb(l);
    183   1.1     matt 		*(struct fpreg *)mcp->__fregs = pcb->pcb_fpregs;
    184   1.1     matt 		*flags |= _UC_FPU;
    185   1.1     matt 	}
    186   1.1     matt }
    187   1.1     matt 
    188   1.1     matt int
    189   1.1     matt cpu_mcontext_validate(struct lwp *l, const mcontext_t *mcp)
    190   1.1     matt {
    191   1.1     matt 	/*
    192   1.1     matt 	 * Verify that at least the PC and SP are user addresses.
    193   1.1     matt 	 */
    194   1.1     matt 	if ((intptr_t) mcp->__gregs[_REG_PC] < 0
    195   1.1     matt 	    || (intptr_t) mcp->__gregs[_REG_SP] < 0
    196   1.1     matt 	    || (mcp->__gregs[_REG_PC] & 1))
    197   1.1     matt 		return EINVAL;
    198   1.1     matt 
    199   1.1     matt 	return 0;
    200   1.1     matt }
    201   1.1     matt 
    202   1.1     matt int
    203   1.1     matt cpu_setmcontext(struct lwp *l, const mcontext_t *mcp, unsigned int flags)
    204   1.1     matt {
    205   1.1     matt 	struct trapframe * const tf = l->l_md.md_utf;
    206   1.1     matt 	struct proc * const p = l->l_proc;
    207   1.1     matt 	const __greg_t * const gr = mcp->__gregs;
    208   1.1     matt 	int error;
    209   1.1     matt 
    210   1.1     matt 	/* Restore register context, if any. */
    211   1.1     matt 	if (flags & _UC_CPU) {
    212   1.1     matt 		error = cpu_mcontext_validate(l, mcp);
    213   1.1     matt 		if (error)
    214   1.1     matt 			return error;
    215   1.1     matt 
    216   1.1     matt 		/* Save register context. */
    217   1.1     matt 		tf->tf_regs = *(const struct reg *)gr;
    218   1.1     matt 	}
    219   1.1     matt 
    220   1.1     matt 	/* Restore the private thread context */
    221   1.1     matt 	if (flags & _UC_TLSBASE) {
    222   1.1     matt 		lwp_setprivate(l, (void *)(intptr_t)mcp->__private);
    223   1.1     matt 	}
    224   1.1     matt 
    225   1.1     matt 	/* Restore floating point register context, if any. */
    226   1.1     matt 	if (flags & _UC_FPU) {
    227   1.1     matt 		KASSERT(l == curlwp);
    228   1.1     matt 		/* Tell PCU we are replacing the FPU contents. */
    229   1.2      chs 		fpu_replace(l);
    230   1.1     matt 
    231   1.1     matt 		/*
    232   1.1     matt 		 * The PCB FP regs struct includes the FP CSR, so use the
    233   1.1     matt 		 * proper size of fpreg when copying.
    234   1.1     matt 		 */
    235   1.1     matt 		struct pcb * const pcb = lwp_getpcb(l);
    236   1.1     matt 		pcb->pcb_fpregs = *(const struct fpreg *)mcp->__fregs;
    237   1.1     matt 	}
    238   1.1     matt 
    239   1.1     matt 	mutex_enter(p->p_lock);
    240   1.1     matt 	if (flags & _UC_SETSTACK)
    241   1.1     matt 		l->l_sigstk.ss_flags |= SS_ONSTACK;
    242   1.1     matt 	if (flags & _UC_CLRSTACK)
    243   1.1     matt 		l->l_sigstk.ss_flags &= ~SS_ONSTACK;
    244   1.1     matt 	mutex_exit(p->p_lock);
    245   1.1     matt 
    246   1.1     matt 	return (0);
    247   1.1     matt }
    248   1.1     matt 
    249   1.1     matt void
    250   1.6       ad cpu_need_resched(struct cpu_info *ci, struct lwp *l, int flags)
    251   1.1     matt {
    252   1.1     matt 	KASSERT(kpreempt_disabled());
    253   1.1     matt 
    254   1.6       ad 	if ((flags & RESCHED_KPREEMPT) != 0) {
    255   1.1     matt #ifdef __HAVE_PREEMPTION
    256   1.6       ad 		if ((flags & RESCHED_REMOTE) != 0) {
    257   1.6       ad                         cpu_send_ipi(ci, IPI_KPREEMPT);
    258   1.6       ad 		} else {
    259   1.1     matt 			softint_trigger(SOFTINT_KPREEMPT);
    260   1.1     matt                 }
    261   1.1     matt #endif
    262   1.1     matt 		return;
    263   1.1     matt 	}
    264   1.6       ad 	if ((flags & RESCHED_REMOTE) != 0) {
    265   1.1     matt #ifdef MULTIPROCESSOR
    266   1.1     matt 		cpu_send_ipi(ci, IPI_AST);
    267   1.1     matt #endif
    268   1.6       ad 	} else {
    269   1.6       ad 		l->l_md.md_astpending = 1;		/* force call to ast() */
    270   1.6       ad 	}
    271   1.1     matt }
    272   1.1     matt 
    273   1.1     matt void
    274   1.1     matt cpu_signotify(struct lwp *l)
    275   1.1     matt {
    276   1.1     matt 	KASSERT(kpreempt_disabled());
    277   1.1     matt #ifdef __HAVE_FAST_SOFTINTS
    278   1.1     matt 	KASSERT(lwp_locked(l, NULL));
    279   1.1     matt #endif
    280   1.1     matt 
    281   1.6       ad 	if (l->l_cpu != curcpu()) {
    282   1.6       ad #ifdef MULTIPROCESSOR
    283   1.6       ad 		cpu_send_ipi(ci, IPI_AST);
    284   1.6       ad #endif
    285   1.6       ad 	} else {
    286   1.6       ad 		l->l_md.md_astpending = 1; 	/* force call to ast() */
    287   1.6       ad 	}
    288   1.1     matt }
    289   1.1     matt 
    290   1.1     matt void
    291   1.1     matt cpu_need_proftick(struct lwp *l)
    292   1.1     matt {
    293   1.1     matt 	KASSERT(kpreempt_disabled());
    294   1.1     matt 	KASSERT(l->l_cpu == curcpu());
    295   1.1     matt 
    296   1.1     matt 	l->l_pflag |= LP_OWEUPC;
    297   1.1     matt 	l->l_md.md_astpending = 1;		/* force call to ast() */
    298   1.1     matt }
    299   1.1     matt 
    300   1.1     matt void
    301   1.1     matt cpu_reboot(int how, char *bootstr)
    302   1.1     matt {
    303   1.1     matt 	for (;;) {
    304   1.1     matt 	}
    305   1.1     matt }
    306   1.1     matt 
    307   1.1     matt void
    308   1.1     matt cpu_dumpconf(void)
    309   1.1     matt {
    310   1.1     matt 	// TBD!!
    311   1.1     matt }
    312   1.1     matt 
    313   1.1     matt void
    314   1.1     matt cpu_startup(void)
    315   1.1     matt {
    316   1.1     matt 	vaddr_t minaddr, maxaddr;
    317   1.1     matt 	char pbuf[9];	/* "99999 MB" */
    318   1.1     matt 
    319   1.1     matt 	/*
    320   1.1     matt 	 * Good {morning,afternoon,evening,night}.
    321   1.1     matt 	 */
    322   1.1     matt 	printf("%s%s", copyright, version);
    323   1.1     matt 	format_bytes(pbuf, sizeof(pbuf), ctob(physmem));
    324   1.1     matt 	printf("total memory = %s\n", pbuf);
    325   1.1     matt 
    326   1.1     matt 	minaddr = 0;
    327   1.1     matt 	/*
    328   1.1     matt 	 * Allocate a submap for physio.
    329   1.1     matt 	 */
    330   1.1     matt 	phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
    331   1.1     matt 	    VM_PHYS_SIZE, 0, FALSE, NULL);
    332   1.1     matt 
    333  1.11       ad 	format_bytes(pbuf, sizeof(pbuf), ptoa(uvm_availmem(false)));
    334   1.1     matt 	printf("avail memory = %s\n", pbuf);
    335   1.1     matt }
    336   1.1     matt 
    337   1.1     matt void
    338   1.1     matt init_riscv(vaddr_t kernstart, vaddr_t kernend)
    339   1.1     matt {
    340   1.9  thorpej 
    341   1.9  thorpej 	/* Early VM bootstrap. */
    342   1.9  thorpej 	pmap_bootstrap();
    343   1.1     matt }
    344