riscv_machdep.c revision 1.1 1 1.1 matt /*-
2 1.1 matt * Copyright (c) 2014 The NetBSD Foundation, Inc.
3 1.1 matt * All rights reserved.
4 1.1 matt *
5 1.1 matt * This code is derived from software contributed to The NetBSD Foundation
6 1.1 matt * by Matt Thomas of 3am Software Foundry.
7 1.1 matt *
8 1.1 matt * Redistribution and use in source and binary forms, with or without
9 1.1 matt * modification, are permitted provided that the following conditions
10 1.1 matt * are met:
11 1.1 matt * 1. Redistributions of source code must retain the above copyright
12 1.1 matt * notice, this list of conditions and the following disclaimer.
13 1.1 matt * 2. Redistributions in binary form must reproduce the above copyright
14 1.1 matt * notice, this list of conditions and the following disclaimer in the
15 1.1 matt * documentation and/or other materials provided with the distribution.
16 1.1 matt *
17 1.1 matt * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
18 1.1 matt * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
19 1.1 matt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20 1.1 matt * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
21 1.1 matt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 1.1 matt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 1.1 matt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 1.1 matt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 1.1 matt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 1.1 matt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 1.1 matt * POSSIBILITY OF SUCH DAMAGE.
28 1.1 matt */
29 1.1 matt
30 1.1 matt #include <sys/cdefs.h>
31 1.1 matt
32 1.1 matt #include "opt_modular.h"
33 1.1 matt
34 1.1 matt __RCSID("$NetBSD: riscv_machdep.c,v 1.1 2015/03/28 16:13:56 matt Exp $");
35 1.1 matt
36 1.1 matt #include <sys/param.h>
37 1.1 matt #include <sys/systm.h>
38 1.1 matt #include <sys/cpu.h>
39 1.1 matt #include <sys/exec.h>
40 1.1 matt #include <sys/lwp.h>
41 1.1 matt #include <sys/kmem.h>
42 1.1 matt #include <sys/ktrace.h>
43 1.1 matt #include <sys/module.h>
44 1.1 matt #include <sys/proc.h>
45 1.1 matt #include <sys/reboot.h>
46 1.1 matt #include <sys/syscall.h>
47 1.1 matt
48 1.1 matt #include <uvm/uvm_extern.h>
49 1.1 matt
50 1.1 matt #include <riscv/locore.h>
51 1.1 matt
52 1.1 matt int cpu_printfataltraps;
53 1.1 matt char machine[] = MACHINE;
54 1.1 matt char machine_arch[] = MACHINE_ARCH;
55 1.1 matt
56 1.1 matt struct vm_map *phys_map;
57 1.1 matt
58 1.1 matt struct trapframe cpu_ddb_regs;
59 1.1 matt
60 1.1 matt struct cpu_info cpu_info_store = {
61 1.1 matt .ci_cpl = IPL_HIGH,
62 1.1 matt .ci_ddb_regs = &cpu_ddb_regs,
63 1.1 matt };
64 1.1 matt
65 1.1 matt const pcu_ops_t * const pcu_ops_md_defs[PCU_UNIT_COUNT] = {
66 1.1 matt [PCU_FPU] = &pcu_fpu_ops,
67 1.1 matt };
68 1.1 matt
69 1.1 matt void
70 1.1 matt delay(unsigned long us)
71 1.1 matt {
72 1.1 matt const uint32_t cycles_per_us = curcpu()->ci_data.cpu_cc_freq / 1000000;
73 1.1 matt const uint64_t cycles = (uint64_t)us * cycles_per_us;
74 1.1 matt const uint64_t finish = riscvreg_cycle_read() + cycles;
75 1.1 matt
76 1.1 matt while (riscvreg_cycle_read() < finish) {
77 1.1 matt /* spin, baby spin */
78 1.1 matt }
79 1.1 matt }
80 1.1 matt
81 1.1 matt #ifdef MODULAR
82 1.1 matt /*
83 1.1 matt * Push any modules loaded by the boot loader.
84 1.1 matt */
85 1.1 matt void
86 1.1 matt module_init_md(void)
87 1.1 matt {
88 1.1 matt }
89 1.1 matt #endif /* MODULAR */
90 1.1 matt
91 1.1 matt /*
92 1.1 matt * Set registers on exec.
93 1.1 matt * Clear all registers except sp, pc, and t9.
94 1.1 matt * $sp is set to the stack pointer passed in. $pc is set to the entry
95 1.1 matt * point given by the exec_package passed in, as is $t9 (used for PIC
96 1.1 matt * code by the MIPS elf abi).
97 1.1 matt */
98 1.1 matt void
99 1.1 matt setregs(struct lwp *l, struct exec_package *pack, vaddr_t stack)
100 1.1 matt {
101 1.1 matt struct trapframe * const tf = l->l_md.md_utf;
102 1.1 matt struct proc * const p = l->l_proc;
103 1.1 matt
104 1.1 matt memset(tf, 0, sizeof(struct trapframe));
105 1.1 matt tf->tf_sp = (intptr_t)stack_align(stack);
106 1.1 matt tf->tf_pc = (intptr_t)pack->ep_entry & ~1;
107 1.1 matt #ifdef _LP64
108 1.1 matt tf->tf_sr = (p->p_flag & PK_32) ? SR_USER32 : SR_USER;
109 1.1 matt #else
110 1.1 matt tf->tf_sr = SR_USER;
111 1.1 matt #endif
112 1.1 matt // Set up arguments for _start(obj, cleanup, ps_strings)
113 1.1 matt tf->tf_a0 = 0; // obj
114 1.1 matt tf->tf_a1 = 0; // cleanup
115 1.1 matt tf->tf_a2 = p->p_psstrp; // ps_strings
116 1.1 matt }
117 1.1 matt
118 1.1 matt void
119 1.1 matt child_return(void *arg)
120 1.1 matt {
121 1.1 matt struct lwp * const l = arg;
122 1.1 matt struct trapframe * const tf = l->l_md.md_utf;
123 1.1 matt
124 1.1 matt tf->tf_a0 = 0;
125 1.1 matt tf->tf_a1 = 1;
126 1.1 matt tf->tf_sr &= ~SR_EF; /* Disable FP as we can't be them. */
127 1.1 matt ktrsysret(SYS_fork, 0, 0);
128 1.1 matt }
129 1.1 matt
130 1.1 matt void
131 1.1 matt cpu_spawn_return(struct lwp *l)
132 1.1 matt {
133 1.1 matt userret(l);
134 1.1 matt }
135 1.1 matt
136 1.1 matt /*
137 1.1 matt * Start a new LWP
138 1.1 matt */
139 1.1 matt void
140 1.1 matt startlwp(void *arg)
141 1.1 matt {
142 1.1 matt ucontext_t * const uc = arg;
143 1.1 matt lwp_t * const l = curlwp;
144 1.1 matt int error __diagused;
145 1.1 matt
146 1.1 matt error = cpu_setmcontext(l, &uc->uc_mcontext, uc->uc_flags);
147 1.1 matt KASSERT(error == 0);
148 1.1 matt
149 1.1 matt kmem_free(uc, sizeof(ucontext_t));
150 1.1 matt userret(l);
151 1.1 matt }
152 1.1 matt
153 1.1 matt // We've worked hard to make sure struct reg and __gregset_t are the same.
154 1.1 matt // Ditto for struct fpreg and fregset_t.
155 1.1 matt
156 1.1 matt CTASSERT(sizeof(struct reg) == sizeof(__gregset_t));
157 1.1 matt CTASSERT(sizeof(struct fpreg) == sizeof(__fregset_t));
158 1.1 matt
159 1.1 matt void
160 1.1 matt cpu_getmcontext(struct lwp *l, mcontext_t *mcp, unsigned int *flags)
161 1.1 matt {
162 1.1 matt const struct trapframe * const tf = l->l_md.md_utf;
163 1.1 matt
164 1.1 matt /* Save register context. */
165 1.1 matt *(struct reg *)mcp->__gregs = tf->tf_regs;
166 1.1 matt
167 1.1 matt mcp->__private = (intptr_t)l->l_private;
168 1.1 matt
169 1.1 matt *flags |= _UC_CPU | _UC_TLSBASE;
170 1.1 matt
171 1.1 matt /* Save floating point register context, if any. */
172 1.1 matt KASSERT(l == curlwp);
173 1.1 matt if (fpu_valid_p()) {
174 1.1 matt /*
175 1.1 matt * If this process is the current FP owner, dump its
176 1.1 matt * context to the PCB first.
177 1.1 matt */
178 1.1 matt fpu_save();
179 1.1 matt
180 1.1 matt struct pcb * const pcb = lwp_getpcb(l);
181 1.1 matt *(struct fpreg *)mcp->__fregs = pcb->pcb_fpregs;
182 1.1 matt *flags |= _UC_FPU;
183 1.1 matt }
184 1.1 matt }
185 1.1 matt
186 1.1 matt int
187 1.1 matt cpu_mcontext_validate(struct lwp *l, const mcontext_t *mcp)
188 1.1 matt {
189 1.1 matt /*
190 1.1 matt * Verify that at least the PC and SP are user addresses.
191 1.1 matt */
192 1.1 matt if ((intptr_t) mcp->__gregs[_REG_PC] < 0
193 1.1 matt || (intptr_t) mcp->__gregs[_REG_SP] < 0
194 1.1 matt || (mcp->__gregs[_REG_PC] & 1))
195 1.1 matt return EINVAL;
196 1.1 matt
197 1.1 matt return 0;
198 1.1 matt }
199 1.1 matt
200 1.1 matt int
201 1.1 matt cpu_setmcontext(struct lwp *l, const mcontext_t *mcp, unsigned int flags)
202 1.1 matt {
203 1.1 matt struct trapframe * const tf = l->l_md.md_utf;
204 1.1 matt struct proc * const p = l->l_proc;
205 1.1 matt const __greg_t * const gr = mcp->__gregs;
206 1.1 matt int error;
207 1.1 matt
208 1.1 matt /* Restore register context, if any. */
209 1.1 matt if (flags & _UC_CPU) {
210 1.1 matt error = cpu_mcontext_validate(l, mcp);
211 1.1 matt if (error)
212 1.1 matt return error;
213 1.1 matt
214 1.1 matt /* Save register context. */
215 1.1 matt tf->tf_regs = *(const struct reg *)gr;
216 1.1 matt }
217 1.1 matt
218 1.1 matt /* Restore the private thread context */
219 1.1 matt if (flags & _UC_TLSBASE) {
220 1.1 matt lwp_setprivate(l, (void *)(intptr_t)mcp->__private);
221 1.1 matt }
222 1.1 matt
223 1.1 matt /* Restore floating point register context, if any. */
224 1.1 matt if (flags & _UC_FPU) {
225 1.1 matt KASSERT(l == curlwp);
226 1.1 matt /* Tell PCU we are replacing the FPU contents. */
227 1.1 matt fpu_replace();
228 1.1 matt
229 1.1 matt /*
230 1.1 matt * The PCB FP regs struct includes the FP CSR, so use the
231 1.1 matt * proper size of fpreg when copying.
232 1.1 matt */
233 1.1 matt struct pcb * const pcb = lwp_getpcb(l);
234 1.1 matt pcb->pcb_fpregs = *(const struct fpreg *)mcp->__fregs;
235 1.1 matt }
236 1.1 matt
237 1.1 matt mutex_enter(p->p_lock);
238 1.1 matt if (flags & _UC_SETSTACK)
239 1.1 matt l->l_sigstk.ss_flags |= SS_ONSTACK;
240 1.1 matt if (flags & _UC_CLRSTACK)
241 1.1 matt l->l_sigstk.ss_flags &= ~SS_ONSTACK;
242 1.1 matt mutex_exit(p->p_lock);
243 1.1 matt
244 1.1 matt return (0);
245 1.1 matt }
246 1.1 matt
247 1.1 matt void
248 1.1 matt cpu_need_resched(struct cpu_info *ci, int flags)
249 1.1 matt {
250 1.1 matt struct lwp * const l = ci->ci_data.cpu_onproc;
251 1.1 matt #ifdef MULTIPROCESSOR
252 1.1 matt struct cpu_info * const cur_ci = curcpu();
253 1.1 matt #endif
254 1.1 matt
255 1.1 matt KASSERT(kpreempt_disabled());
256 1.1 matt
257 1.1 matt ci->ci_want_resched |= flags;
258 1.1 matt
259 1.1 matt if (__predict_false((l->l_pflag & LP_INTR) != 0)) {
260 1.1 matt /*
261 1.1 matt * No point doing anything, it will switch soon.
262 1.1 matt * Also here to prevent an assertion failure in
263 1.1 matt * kpreempt() due to preemption being set on a
264 1.1 matt * soft interrupt LWP.
265 1.1 matt */
266 1.1 matt return;
267 1.1 matt }
268 1.1 matt
269 1.1 matt if (__predict_false(l == ci->ci_data.cpu_idlelwp)) {
270 1.1 matt #ifdef MULTIPROCESSOR
271 1.1 matt /*
272 1.1 matt * If the other CPU is idling, it must be waiting for an
273 1.1 matt * interrupt. So give it one.
274 1.1 matt */
275 1.1 matt if (__predict_false(ci != cur_ci))
276 1.1 matt cpu_send_ipi(ci, IPI_NOP);
277 1.1 matt #endif
278 1.1 matt return;
279 1.1 matt }
280 1.1 matt
281 1.1 matt #ifdef MULTIPROCESSOR
282 1.1 matt atomic_or_uint(&ci->ci_want_resched, flags);
283 1.1 matt #else
284 1.1 matt ci->ci_want_resched |= flags;
285 1.1 matt #endif
286 1.1 matt
287 1.1 matt if (flags & RESCHED_KPREEMPT) {
288 1.1 matt #ifdef __HAVE_PREEMPTION
289 1.1 matt atomic_or_uint(&l->l_dopreempt, DOPREEMPT_ACTIVE);
290 1.1 matt if (ci == cur_ci) {
291 1.1 matt softint_trigger(SOFTINT_KPREEMPT);
292 1.1 matt } else {
293 1.1 matt cpu_send_ipi(ci, IPI_KPREEMPT);
294 1.1 matt }
295 1.1 matt #endif
296 1.1 matt return;
297 1.1 matt }
298 1.1 matt l->l_md.md_astpending = 1; /* force call to ast() */
299 1.1 matt #ifdef MULTIPROCESSOR
300 1.1 matt if (ci != cur_ci && (flags & RESCHED_IMMED)) {
301 1.1 matt cpu_send_ipi(ci, IPI_AST);
302 1.1 matt }
303 1.1 matt #endif
304 1.1 matt }
305 1.1 matt
306 1.1 matt void
307 1.1 matt cpu_signotify(struct lwp *l)
308 1.1 matt {
309 1.1 matt KASSERT(kpreempt_disabled());
310 1.1 matt #ifdef __HAVE_FAST_SOFTINTS
311 1.1 matt KASSERT(lwp_locked(l, NULL));
312 1.1 matt #endif
313 1.1 matt KASSERT(l->l_stat == LSONPROC || l->l_stat == LSRUN || l->l_stat == LSSTOP);
314 1.1 matt
315 1.1 matt l->l_md.md_astpending = 1; /* force call to ast() */
316 1.1 matt }
317 1.1 matt
318 1.1 matt void
319 1.1 matt cpu_need_proftick(struct lwp *l)
320 1.1 matt {
321 1.1 matt KASSERT(kpreempt_disabled());
322 1.1 matt KASSERT(l->l_cpu == curcpu());
323 1.1 matt
324 1.1 matt l->l_pflag |= LP_OWEUPC;
325 1.1 matt l->l_md.md_astpending = 1; /* force call to ast() */
326 1.1 matt }
327 1.1 matt
328 1.1 matt void
329 1.1 matt cpu_set_curpri(int pri)
330 1.1 matt {
331 1.1 matt kpreempt_disable();
332 1.1 matt curcpu()->ci_schedstate.spc_curpriority = pri;
333 1.1 matt kpreempt_enable();
334 1.1 matt }
335 1.1 matt
336 1.1 matt void
337 1.1 matt cpu_reboot(int how, char *bootstr)
338 1.1 matt {
339 1.1 matt for (;;) {
340 1.1 matt }
341 1.1 matt }
342 1.1 matt
343 1.1 matt void
344 1.1 matt cpu_dumpconf(void)
345 1.1 matt {
346 1.1 matt // TBD!!
347 1.1 matt }
348 1.1 matt
349 1.1 matt void
350 1.1 matt cpu_startup(void)
351 1.1 matt {
352 1.1 matt vaddr_t minaddr, maxaddr;
353 1.1 matt char pbuf[9]; /* "99999 MB" */
354 1.1 matt
355 1.1 matt /*
356 1.1 matt * Good {morning,afternoon,evening,night}.
357 1.1 matt */
358 1.1 matt printf("%s%s", copyright, version);
359 1.1 matt format_bytes(pbuf, sizeof(pbuf), ctob(physmem));
360 1.1 matt printf("total memory = %s\n", pbuf);
361 1.1 matt
362 1.1 matt minaddr = 0;
363 1.1 matt /*
364 1.1 matt * Allocate a submap for physio.
365 1.1 matt */
366 1.1 matt phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
367 1.1 matt VM_PHYS_SIZE, 0, FALSE, NULL);
368 1.1 matt
369 1.1 matt format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free));
370 1.1 matt printf("avail memory = %s\n", pbuf);
371 1.1 matt }
372 1.1 matt
373 1.1 matt void
374 1.1 matt init_riscv(vaddr_t kernstart, vaddr_t kernend)
375 1.1 matt {
376 1.1 matt }
377