riscv_machdep.c revision 1.1 1 /*-
2 * Copyright (c) 2014 The NetBSD Foundation, Inc.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to The NetBSD Foundation
6 * by Matt Thomas of 3am Software Foundry.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
19 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
21 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 * POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 #include <sys/cdefs.h>
31
32 #include "opt_modular.h"
33
34 __RCSID("$NetBSD: riscv_machdep.c,v 1.1 2015/03/28 16:13:56 matt Exp $");
35
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/cpu.h>
39 #include <sys/exec.h>
40 #include <sys/lwp.h>
41 #include <sys/kmem.h>
42 #include <sys/ktrace.h>
43 #include <sys/module.h>
44 #include <sys/proc.h>
45 #include <sys/reboot.h>
46 #include <sys/syscall.h>
47
48 #include <uvm/uvm_extern.h>
49
50 #include <riscv/locore.h>
51
52 int cpu_printfataltraps;
53 char machine[] = MACHINE;
54 char machine_arch[] = MACHINE_ARCH;
55
56 struct vm_map *phys_map;
57
58 struct trapframe cpu_ddb_regs;
59
60 struct cpu_info cpu_info_store = {
61 .ci_cpl = IPL_HIGH,
62 .ci_ddb_regs = &cpu_ddb_regs,
63 };
64
65 const pcu_ops_t * const pcu_ops_md_defs[PCU_UNIT_COUNT] = {
66 [PCU_FPU] = &pcu_fpu_ops,
67 };
68
69 void
70 delay(unsigned long us)
71 {
72 const uint32_t cycles_per_us = curcpu()->ci_data.cpu_cc_freq / 1000000;
73 const uint64_t cycles = (uint64_t)us * cycles_per_us;
74 const uint64_t finish = riscvreg_cycle_read() + cycles;
75
76 while (riscvreg_cycle_read() < finish) {
77 /* spin, baby spin */
78 }
79 }
80
81 #ifdef MODULAR
82 /*
83 * Push any modules loaded by the boot loader.
84 */
85 void
86 module_init_md(void)
87 {
88 }
89 #endif /* MODULAR */
90
91 /*
92 * Set registers on exec.
93 * Clear all registers except sp, pc, and t9.
94 * $sp is set to the stack pointer passed in. $pc is set to the entry
95 * point given by the exec_package passed in, as is $t9 (used for PIC
96 * code by the MIPS elf abi).
97 */
98 void
99 setregs(struct lwp *l, struct exec_package *pack, vaddr_t stack)
100 {
101 struct trapframe * const tf = l->l_md.md_utf;
102 struct proc * const p = l->l_proc;
103
104 memset(tf, 0, sizeof(struct trapframe));
105 tf->tf_sp = (intptr_t)stack_align(stack);
106 tf->tf_pc = (intptr_t)pack->ep_entry & ~1;
107 #ifdef _LP64
108 tf->tf_sr = (p->p_flag & PK_32) ? SR_USER32 : SR_USER;
109 #else
110 tf->tf_sr = SR_USER;
111 #endif
112 // Set up arguments for _start(obj, cleanup, ps_strings)
113 tf->tf_a0 = 0; // obj
114 tf->tf_a1 = 0; // cleanup
115 tf->tf_a2 = p->p_psstrp; // ps_strings
116 }
117
118 void
119 child_return(void *arg)
120 {
121 struct lwp * const l = arg;
122 struct trapframe * const tf = l->l_md.md_utf;
123
124 tf->tf_a0 = 0;
125 tf->tf_a1 = 1;
126 tf->tf_sr &= ~SR_EF; /* Disable FP as we can't be them. */
127 ktrsysret(SYS_fork, 0, 0);
128 }
129
130 void
131 cpu_spawn_return(struct lwp *l)
132 {
133 userret(l);
134 }
135
136 /*
137 * Start a new LWP
138 */
139 void
140 startlwp(void *arg)
141 {
142 ucontext_t * const uc = arg;
143 lwp_t * const l = curlwp;
144 int error __diagused;
145
146 error = cpu_setmcontext(l, &uc->uc_mcontext, uc->uc_flags);
147 KASSERT(error == 0);
148
149 kmem_free(uc, sizeof(ucontext_t));
150 userret(l);
151 }
152
153 // We've worked hard to make sure struct reg and __gregset_t are the same.
154 // Ditto for struct fpreg and fregset_t.
155
156 CTASSERT(sizeof(struct reg) == sizeof(__gregset_t));
157 CTASSERT(sizeof(struct fpreg) == sizeof(__fregset_t));
158
159 void
160 cpu_getmcontext(struct lwp *l, mcontext_t *mcp, unsigned int *flags)
161 {
162 const struct trapframe * const tf = l->l_md.md_utf;
163
164 /* Save register context. */
165 *(struct reg *)mcp->__gregs = tf->tf_regs;
166
167 mcp->__private = (intptr_t)l->l_private;
168
169 *flags |= _UC_CPU | _UC_TLSBASE;
170
171 /* Save floating point register context, if any. */
172 KASSERT(l == curlwp);
173 if (fpu_valid_p()) {
174 /*
175 * If this process is the current FP owner, dump its
176 * context to the PCB first.
177 */
178 fpu_save();
179
180 struct pcb * const pcb = lwp_getpcb(l);
181 *(struct fpreg *)mcp->__fregs = pcb->pcb_fpregs;
182 *flags |= _UC_FPU;
183 }
184 }
185
186 int
187 cpu_mcontext_validate(struct lwp *l, const mcontext_t *mcp)
188 {
189 /*
190 * Verify that at least the PC and SP are user addresses.
191 */
192 if ((intptr_t) mcp->__gregs[_REG_PC] < 0
193 || (intptr_t) mcp->__gregs[_REG_SP] < 0
194 || (mcp->__gregs[_REG_PC] & 1))
195 return EINVAL;
196
197 return 0;
198 }
199
200 int
201 cpu_setmcontext(struct lwp *l, const mcontext_t *mcp, unsigned int flags)
202 {
203 struct trapframe * const tf = l->l_md.md_utf;
204 struct proc * const p = l->l_proc;
205 const __greg_t * const gr = mcp->__gregs;
206 int error;
207
208 /* Restore register context, if any. */
209 if (flags & _UC_CPU) {
210 error = cpu_mcontext_validate(l, mcp);
211 if (error)
212 return error;
213
214 /* Save register context. */
215 tf->tf_regs = *(const struct reg *)gr;
216 }
217
218 /* Restore the private thread context */
219 if (flags & _UC_TLSBASE) {
220 lwp_setprivate(l, (void *)(intptr_t)mcp->__private);
221 }
222
223 /* Restore floating point register context, if any. */
224 if (flags & _UC_FPU) {
225 KASSERT(l == curlwp);
226 /* Tell PCU we are replacing the FPU contents. */
227 fpu_replace();
228
229 /*
230 * The PCB FP regs struct includes the FP CSR, so use the
231 * proper size of fpreg when copying.
232 */
233 struct pcb * const pcb = lwp_getpcb(l);
234 pcb->pcb_fpregs = *(const struct fpreg *)mcp->__fregs;
235 }
236
237 mutex_enter(p->p_lock);
238 if (flags & _UC_SETSTACK)
239 l->l_sigstk.ss_flags |= SS_ONSTACK;
240 if (flags & _UC_CLRSTACK)
241 l->l_sigstk.ss_flags &= ~SS_ONSTACK;
242 mutex_exit(p->p_lock);
243
244 return (0);
245 }
246
247 void
248 cpu_need_resched(struct cpu_info *ci, int flags)
249 {
250 struct lwp * const l = ci->ci_data.cpu_onproc;
251 #ifdef MULTIPROCESSOR
252 struct cpu_info * const cur_ci = curcpu();
253 #endif
254
255 KASSERT(kpreempt_disabled());
256
257 ci->ci_want_resched |= flags;
258
259 if (__predict_false((l->l_pflag & LP_INTR) != 0)) {
260 /*
261 * No point doing anything, it will switch soon.
262 * Also here to prevent an assertion failure in
263 * kpreempt() due to preemption being set on a
264 * soft interrupt LWP.
265 */
266 return;
267 }
268
269 if (__predict_false(l == ci->ci_data.cpu_idlelwp)) {
270 #ifdef MULTIPROCESSOR
271 /*
272 * If the other CPU is idling, it must be waiting for an
273 * interrupt. So give it one.
274 */
275 if (__predict_false(ci != cur_ci))
276 cpu_send_ipi(ci, IPI_NOP);
277 #endif
278 return;
279 }
280
281 #ifdef MULTIPROCESSOR
282 atomic_or_uint(&ci->ci_want_resched, flags);
283 #else
284 ci->ci_want_resched |= flags;
285 #endif
286
287 if (flags & RESCHED_KPREEMPT) {
288 #ifdef __HAVE_PREEMPTION
289 atomic_or_uint(&l->l_dopreempt, DOPREEMPT_ACTIVE);
290 if (ci == cur_ci) {
291 softint_trigger(SOFTINT_KPREEMPT);
292 } else {
293 cpu_send_ipi(ci, IPI_KPREEMPT);
294 }
295 #endif
296 return;
297 }
298 l->l_md.md_astpending = 1; /* force call to ast() */
299 #ifdef MULTIPROCESSOR
300 if (ci != cur_ci && (flags & RESCHED_IMMED)) {
301 cpu_send_ipi(ci, IPI_AST);
302 }
303 #endif
304 }
305
306 void
307 cpu_signotify(struct lwp *l)
308 {
309 KASSERT(kpreempt_disabled());
310 #ifdef __HAVE_FAST_SOFTINTS
311 KASSERT(lwp_locked(l, NULL));
312 #endif
313 KASSERT(l->l_stat == LSONPROC || l->l_stat == LSRUN || l->l_stat == LSSTOP);
314
315 l->l_md.md_astpending = 1; /* force call to ast() */
316 }
317
318 void
319 cpu_need_proftick(struct lwp *l)
320 {
321 KASSERT(kpreempt_disabled());
322 KASSERT(l->l_cpu == curcpu());
323
324 l->l_pflag |= LP_OWEUPC;
325 l->l_md.md_astpending = 1; /* force call to ast() */
326 }
327
328 void
329 cpu_set_curpri(int pri)
330 {
331 kpreempt_disable();
332 curcpu()->ci_schedstate.spc_curpriority = pri;
333 kpreempt_enable();
334 }
335
336 void
337 cpu_reboot(int how, char *bootstr)
338 {
339 for (;;) {
340 }
341 }
342
343 void
344 cpu_dumpconf(void)
345 {
346 // TBD!!
347 }
348
349 void
350 cpu_startup(void)
351 {
352 vaddr_t minaddr, maxaddr;
353 char pbuf[9]; /* "99999 MB" */
354
355 /*
356 * Good {morning,afternoon,evening,night}.
357 */
358 printf("%s%s", copyright, version);
359 format_bytes(pbuf, sizeof(pbuf), ctob(physmem));
360 printf("total memory = %s\n", pbuf);
361
362 minaddr = 0;
363 /*
364 * Allocate a submap for physio.
365 */
366 phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
367 VM_PHYS_SIZE, 0, FALSE, NULL);
368
369 format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free));
370 printf("avail memory = %s\n", pbuf);
371 }
372
373 void
374 init_riscv(vaddr_t kernstart, vaddr_t kernend)
375 {
376 }
377