riscv_machdep.c revision 1.14 1 1.14 skrll /* $NetBSD: riscv_machdep.c,v 1.14 2021/05/01 06:53:08 skrll Exp $ */
2 1.12 skrll
3 1.1 matt /*-
4 1.6 ad * Copyright (c) 2014, 2019 The NetBSD Foundation, Inc.
5 1.1 matt * All rights reserved.
6 1.1 matt *
7 1.1 matt * This code is derived from software contributed to The NetBSD Foundation
8 1.1 matt * by Matt Thomas of 3am Software Foundry.
9 1.1 matt *
10 1.1 matt * Redistribution and use in source and binary forms, with or without
11 1.1 matt * modification, are permitted provided that the following conditions
12 1.1 matt * are met:
13 1.1 matt * 1. Redistributions of source code must retain the above copyright
14 1.1 matt * notice, this list of conditions and the following disclaimer.
15 1.1 matt * 2. Redistributions in binary form must reproduce the above copyright
16 1.1 matt * notice, this list of conditions and the following disclaimer in the
17 1.1 matt * documentation and/or other materials provided with the distribution.
18 1.1 matt *
19 1.1 matt * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.1 matt * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.1 matt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.1 matt * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.1 matt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.1 matt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.1 matt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.1 matt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.1 matt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.1 matt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.1 matt * POSSIBILITY OF SUCH DAMAGE.
30 1.1 matt */
31 1.1 matt
32 1.1 matt #include <sys/cdefs.h>
33 1.1 matt
34 1.1 matt #include "opt_modular.h"
35 1.1 matt
36 1.14 skrll __RCSID("$NetBSD: riscv_machdep.c,v 1.14 2021/05/01 06:53:08 skrll Exp $");
37 1.1 matt
38 1.1 matt #include <sys/param.h>
39 1.1 matt #include <sys/systm.h>
40 1.1 matt #include <sys/cpu.h>
41 1.1 matt #include <sys/exec.h>
42 1.1 matt #include <sys/lwp.h>
43 1.1 matt #include <sys/kmem.h>
44 1.1 matt #include <sys/ktrace.h>
45 1.1 matt #include <sys/module.h>
46 1.1 matt #include <sys/proc.h>
47 1.1 matt #include <sys/reboot.h>
48 1.1 matt #include <sys/syscall.h>
49 1.1 matt
50 1.1 matt #include <uvm/uvm_extern.h>
51 1.1 matt
52 1.1 matt #include <riscv/locore.h>
53 1.1 matt
54 1.1 matt int cpu_printfataltraps;
55 1.1 matt char machine[] = MACHINE;
56 1.1 matt char machine_arch[] = MACHINE_ARCH;
57 1.1 matt
58 1.1 matt struct vm_map *phys_map;
59 1.1 matt
60 1.1 matt struct trapframe cpu_ddb_regs;
61 1.1 matt
62 1.1 matt struct cpu_info cpu_info_store = {
63 1.1 matt .ci_cpl = IPL_HIGH,
64 1.1 matt .ci_ddb_regs = &cpu_ddb_regs,
65 1.1 matt };
66 1.1 matt
67 1.1 matt const pcu_ops_t * const pcu_ops_md_defs[PCU_UNIT_COUNT] = {
68 1.14 skrll #ifdef FPE
69 1.1 matt [PCU_FPU] = &pcu_fpu_ops,
70 1.14 skrll #endif
71 1.1 matt };
72 1.1 matt
73 1.1 matt void
74 1.1 matt delay(unsigned long us)
75 1.1 matt {
76 1.1 matt const uint32_t cycles_per_us = curcpu()->ci_data.cpu_cc_freq / 1000000;
77 1.1 matt const uint64_t cycles = (uint64_t)us * cycles_per_us;
78 1.1 matt const uint64_t finish = riscvreg_cycle_read() + cycles;
79 1.1 matt
80 1.1 matt while (riscvreg_cycle_read() < finish) {
81 1.1 matt /* spin, baby spin */
82 1.1 matt }
83 1.1 matt }
84 1.1 matt
85 1.1 matt #ifdef MODULAR
86 1.1 matt /*
87 1.10 skrll * Push any modules loaded by the boot loader.
88 1.1 matt */
89 1.1 matt void
90 1.1 matt module_init_md(void)
91 1.1 matt {
92 1.1 matt }
93 1.1 matt #endif /* MODULAR */
94 1.1 matt
95 1.1 matt /*
96 1.1 matt * Set registers on exec.
97 1.1 matt * Clear all registers except sp, pc, and t9.
98 1.1 matt * $sp is set to the stack pointer passed in. $pc is set to the entry
99 1.1 matt * point given by the exec_package passed in, as is $t9 (used for PIC
100 1.1 matt * code by the MIPS elf abi).
101 1.1 matt */
102 1.1 matt void
103 1.1 matt setregs(struct lwp *l, struct exec_package *pack, vaddr_t stack)
104 1.1 matt {
105 1.1 matt struct trapframe * const tf = l->l_md.md_utf;
106 1.1 matt struct proc * const p = l->l_proc;
107 1.1 matt
108 1.1 matt memset(tf, 0, sizeof(struct trapframe));
109 1.1 matt tf->tf_sp = (intptr_t)stack_align(stack);
110 1.1 matt tf->tf_pc = (intptr_t)pack->ep_entry & ~1;
111 1.1 matt #ifdef _LP64
112 1.1 matt tf->tf_sr = (p->p_flag & PK_32) ? SR_USER32 : SR_USER;
113 1.1 matt #else
114 1.1 matt tf->tf_sr = SR_USER;
115 1.1 matt #endif
116 1.1 matt // Set up arguments for _start(obj, cleanup, ps_strings)
117 1.1 matt tf->tf_a0 = 0; // obj
118 1.1 matt tf->tf_a1 = 0; // cleanup
119 1.1 matt tf->tf_a2 = p->p_psstrp; // ps_strings
120 1.1 matt }
121 1.1 matt
122 1.1 matt void
123 1.4 kamil md_child_return(struct lwp *l)
124 1.1 matt {
125 1.1 matt struct trapframe * const tf = l->l_md.md_utf;
126 1.1 matt
127 1.1 matt tf->tf_a0 = 0;
128 1.1 matt tf->tf_a1 = 1;
129 1.13 skrll #ifdef FPE
130 1.1 matt tf->tf_sr &= ~SR_EF; /* Disable FP as we can't be them. */
131 1.13 skrll #endif
132 1.1 matt }
133 1.1 matt
134 1.1 matt void
135 1.1 matt cpu_spawn_return(struct lwp *l)
136 1.1 matt {
137 1.1 matt userret(l);
138 1.1 matt }
139 1.1 matt
140 1.10 skrll /*
141 1.1 matt * Start a new LWP
142 1.1 matt */
143 1.1 matt void
144 1.1 matt startlwp(void *arg)
145 1.1 matt {
146 1.1 matt ucontext_t * const uc = arg;
147 1.1 matt lwp_t * const l = curlwp;
148 1.1 matt int error __diagused;
149 1.1 matt
150 1.1 matt error = cpu_setmcontext(l, &uc->uc_mcontext, uc->uc_flags);
151 1.1 matt KASSERT(error == 0);
152 1.1 matt
153 1.1 matt kmem_free(uc, sizeof(ucontext_t));
154 1.1 matt userret(l);
155 1.1 matt }
156 1.1 matt
157 1.1 matt // We've worked hard to make sure struct reg and __gregset_t are the same.
158 1.1 matt // Ditto for struct fpreg and fregset_t.
159 1.1 matt
160 1.1 matt CTASSERT(sizeof(struct reg) == sizeof(__gregset_t));
161 1.1 matt CTASSERT(sizeof(struct fpreg) == sizeof(__fregset_t));
162 1.1 matt
163 1.1 matt void
164 1.1 matt cpu_getmcontext(struct lwp *l, mcontext_t *mcp, unsigned int *flags)
165 1.1 matt {
166 1.1 matt const struct trapframe * const tf = l->l_md.md_utf;
167 1.1 matt
168 1.1 matt /* Save register context. */
169 1.1 matt *(struct reg *)mcp->__gregs = tf->tf_regs;
170 1.1 matt
171 1.1 matt mcp->__private = (intptr_t)l->l_private;
172 1.1 matt
173 1.1 matt *flags |= _UC_CPU | _UC_TLSBASE;
174 1.1 matt
175 1.1 matt /* Save floating point register context, if any. */
176 1.1 matt KASSERT(l == curlwp);
177 1.2 chs if (fpu_valid_p(l)) {
178 1.1 matt /*
179 1.1 matt * If this process is the current FP owner, dump its
180 1.1 matt * context to the PCB first.
181 1.1 matt */
182 1.2 chs fpu_save(l);
183 1.1 matt
184 1.1 matt struct pcb * const pcb = lwp_getpcb(l);
185 1.1 matt *(struct fpreg *)mcp->__fregs = pcb->pcb_fpregs;
186 1.1 matt *flags |= _UC_FPU;
187 1.1 matt }
188 1.1 matt }
189 1.1 matt
190 1.1 matt int
191 1.1 matt cpu_mcontext_validate(struct lwp *l, const mcontext_t *mcp)
192 1.1 matt {
193 1.1 matt /*
194 1.1 matt * Verify that at least the PC and SP are user addresses.
195 1.1 matt */
196 1.1 matt if ((intptr_t) mcp->__gregs[_REG_PC] < 0
197 1.1 matt || (intptr_t) mcp->__gregs[_REG_SP] < 0
198 1.1 matt || (mcp->__gregs[_REG_PC] & 1))
199 1.1 matt return EINVAL;
200 1.1 matt
201 1.1 matt return 0;
202 1.1 matt }
203 1.1 matt
204 1.1 matt int
205 1.1 matt cpu_setmcontext(struct lwp *l, const mcontext_t *mcp, unsigned int flags)
206 1.1 matt {
207 1.1 matt struct trapframe * const tf = l->l_md.md_utf;
208 1.1 matt struct proc * const p = l->l_proc;
209 1.1 matt const __greg_t * const gr = mcp->__gregs;
210 1.1 matt int error;
211 1.1 matt
212 1.1 matt /* Restore register context, if any. */
213 1.1 matt if (flags & _UC_CPU) {
214 1.1 matt error = cpu_mcontext_validate(l, mcp);
215 1.1 matt if (error)
216 1.1 matt return error;
217 1.1 matt
218 1.1 matt /* Save register context. */
219 1.1 matt tf->tf_regs = *(const struct reg *)gr;
220 1.1 matt }
221 1.1 matt
222 1.1 matt /* Restore the private thread context */
223 1.1 matt if (flags & _UC_TLSBASE) {
224 1.1 matt lwp_setprivate(l, (void *)(intptr_t)mcp->__private);
225 1.1 matt }
226 1.1 matt
227 1.1 matt /* Restore floating point register context, if any. */
228 1.1 matt if (flags & _UC_FPU) {
229 1.1 matt KASSERT(l == curlwp);
230 1.1 matt /* Tell PCU we are replacing the FPU contents. */
231 1.2 chs fpu_replace(l);
232 1.1 matt
233 1.1 matt /*
234 1.1 matt * The PCB FP regs struct includes the FP CSR, so use the
235 1.1 matt * proper size of fpreg when copying.
236 1.1 matt */
237 1.1 matt struct pcb * const pcb = lwp_getpcb(l);
238 1.1 matt pcb->pcb_fpregs = *(const struct fpreg *)mcp->__fregs;
239 1.1 matt }
240 1.1 matt
241 1.1 matt mutex_enter(p->p_lock);
242 1.1 matt if (flags & _UC_SETSTACK)
243 1.1 matt l->l_sigstk.ss_flags |= SS_ONSTACK;
244 1.1 matt if (flags & _UC_CLRSTACK)
245 1.1 matt l->l_sigstk.ss_flags &= ~SS_ONSTACK;
246 1.1 matt mutex_exit(p->p_lock);
247 1.1 matt
248 1.1 matt return (0);
249 1.1 matt }
250 1.1 matt
251 1.1 matt void
252 1.6 ad cpu_need_resched(struct cpu_info *ci, struct lwp *l, int flags)
253 1.1 matt {
254 1.1 matt KASSERT(kpreempt_disabled());
255 1.1 matt
256 1.6 ad if ((flags & RESCHED_KPREEMPT) != 0) {
257 1.1 matt #ifdef __HAVE_PREEMPTION
258 1.6 ad if ((flags & RESCHED_REMOTE) != 0) {
259 1.6 ad cpu_send_ipi(ci, IPI_KPREEMPT);
260 1.6 ad } else {
261 1.1 matt softint_trigger(SOFTINT_KPREEMPT);
262 1.1 matt }
263 1.1 matt #endif
264 1.1 matt return;
265 1.1 matt }
266 1.6 ad if ((flags & RESCHED_REMOTE) != 0) {
267 1.1 matt #ifdef MULTIPROCESSOR
268 1.1 matt cpu_send_ipi(ci, IPI_AST);
269 1.1 matt #endif
270 1.6 ad } else {
271 1.6 ad l->l_md.md_astpending = 1; /* force call to ast() */
272 1.6 ad }
273 1.1 matt }
274 1.1 matt
275 1.1 matt void
276 1.1 matt cpu_signotify(struct lwp *l)
277 1.1 matt {
278 1.1 matt KASSERT(kpreempt_disabled());
279 1.1 matt #ifdef __HAVE_FAST_SOFTINTS
280 1.1 matt KASSERT(lwp_locked(l, NULL));
281 1.1 matt #endif
282 1.1 matt
283 1.6 ad if (l->l_cpu != curcpu()) {
284 1.6 ad #ifdef MULTIPROCESSOR
285 1.6 ad cpu_send_ipi(ci, IPI_AST);
286 1.6 ad #endif
287 1.6 ad } else {
288 1.6 ad l->l_md.md_astpending = 1; /* force call to ast() */
289 1.6 ad }
290 1.1 matt }
291 1.1 matt
292 1.1 matt void
293 1.1 matt cpu_need_proftick(struct lwp *l)
294 1.1 matt {
295 1.1 matt KASSERT(kpreempt_disabled());
296 1.1 matt KASSERT(l->l_cpu == curcpu());
297 1.1 matt
298 1.1 matt l->l_pflag |= LP_OWEUPC;
299 1.1 matt l->l_md.md_astpending = 1; /* force call to ast() */
300 1.1 matt }
301 1.1 matt
302 1.1 matt void
303 1.1 matt cpu_reboot(int how, char *bootstr)
304 1.1 matt {
305 1.1 matt for (;;) {
306 1.1 matt }
307 1.1 matt }
308 1.1 matt
309 1.1 matt void
310 1.1 matt cpu_dumpconf(void)
311 1.1 matt {
312 1.1 matt // TBD!!
313 1.1 matt }
314 1.1 matt
315 1.1 matt void
316 1.1 matt cpu_startup(void)
317 1.1 matt {
318 1.1 matt vaddr_t minaddr, maxaddr;
319 1.1 matt char pbuf[9]; /* "99999 MB" */
320 1.1 matt
321 1.1 matt /*
322 1.1 matt * Good {morning,afternoon,evening,night}.
323 1.1 matt */
324 1.1 matt printf("%s%s", copyright, version);
325 1.1 matt format_bytes(pbuf, sizeof(pbuf), ctob(physmem));
326 1.1 matt printf("total memory = %s\n", pbuf);
327 1.1 matt
328 1.1 matt minaddr = 0;
329 1.1 matt /*
330 1.1 matt * Allocate a submap for physio.
331 1.1 matt */
332 1.1 matt phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
333 1.1 matt VM_PHYS_SIZE, 0, FALSE, NULL);
334 1.1 matt
335 1.11 ad format_bytes(pbuf, sizeof(pbuf), ptoa(uvm_availmem(false)));
336 1.1 matt printf("avail memory = %s\n", pbuf);
337 1.1 matt }
338 1.1 matt
339 1.1 matt void
340 1.1 matt init_riscv(vaddr_t kernstart, vaddr_t kernend)
341 1.1 matt {
342 1.9 thorpej
343 1.9 thorpej /* Early VM bootstrap. */
344 1.9 thorpej pmap_bootstrap();
345 1.1 matt }
346