arm32_machdep.c revision 1.88 1 /* $NetBSD: arm32_machdep.c,v 1.88 2012/12/31 21:34:31 jmcneill Exp $ */
2
3 /*
4 * Copyright (c) 1994-1998 Mark Brinicombe.
5 * Copyright (c) 1994 Brini.
6 * All rights reserved.
7 *
8 * This code is derived from software written for Brini by Mark Brinicombe
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by Mark Brinicombe
21 * for the NetBSD Project.
22 * 4. The name of the company nor the name of the author may be used to
23 * endorse or promote products derived from this software without specific
24 * prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
27 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
28 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
29 * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
30 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
31 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
32 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * Machine dependent functions for kernel setup
39 *
40 * Created : 17/09/94
41 * Updated : 18/04/01 updated for new wscons
42 */
43
44 #include <sys/cdefs.h>
45 __KERNEL_RCSID(0, "$NetBSD: arm32_machdep.c,v 1.88 2012/12/31 21:34:31 jmcneill Exp $");
46
47 #include "opt_modular.h"
48 #include "opt_md.h"
49 #include "opt_pmap_debug.h"
50
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/reboot.h>
54 #include <sys/proc.h>
55 #include <sys/kauth.h>
56 #include <sys/kernel.h>
57 #include <sys/mbuf.h>
58 #include <sys/mount.h>
59 #include <sys/buf.h>
60 #include <sys/msgbuf.h>
61 #include <sys/device.h>
62 #include <sys/sysctl.h>
63 #include <sys/cpu.h>
64 #include <sys/intr.h>
65 #include <sys/module.h>
66 #include <sys/atomic.h>
67 #include <sys/xcall.h>
68
69 #include <uvm/uvm_extern.h>
70
71 #include <dev/cons.h>
72 #include <dev/mm.h>
73
74 #include <arm/arm32/katelib.h>
75 #include <arm/arm32/machdep.h>
76
77 #include <machine/bootconfig.h>
78 #include <machine/pcb.h>
79
80 void (*cpu_reset_address)(void); /* Used by locore */
81 paddr_t cpu_reset_address_paddr; /* Used by locore */
82
83 struct vm_map *phys_map = NULL;
84
85 #if defined(MEMORY_DISK_HOOKS) && !defined(MEMORY_DISK_ROOT_SIZE)
86 extern size_t md_root_size; /* Memory disc size */
87 #endif /* MEMORY_DISK_HOOKS && !MEMORY_DISK_ROOT_SIZE */
88
89 pv_addr_t kernelstack;
90 pv_addr_t abtstack;
91 pv_addr_t fiqstack;
92 pv_addr_t irqstack;
93 pv_addr_t undstack;
94 pv_addr_t idlestack;
95
96 void * msgbufaddr;
97 extern paddr_t msgbufphys;
98
99 int kernel_debug = 0;
100
101 /* exported variable to be filled in by the bootloaders */
102 char *booted_kernel;
103
104 /* Prototypes */
105
106 void data_abort_handler(trapframe_t *frame);
107 void prefetch_abort_handler(trapframe_t *frame);
108 extern void configure(void);
109
110 /*
111 * arm32_vector_init:
112 *
113 * Initialize the vector page, and select whether or not to
114 * relocate the vectors.
115 *
116 * NOTE: We expect the vector page to be mapped at its expected
117 * destination.
118 */
119 void
120 arm32_vector_init(vaddr_t va, int which)
121 {
122 if (CPU_IS_PRIMARY(curcpu())) {
123 extern unsigned int page0[], page0_data[];
124 unsigned int *vectors = (int *) va;
125 unsigned int *vectors_data = vectors + (page0_data - page0);
126 int vec;
127
128 /*
129 * Loop through the vectors we're taking over, and copy the
130 * vector's insn and data word.
131 */
132 for (vec = 0; vec < ARM_NVEC; vec++) {
133 if ((which & (1 << vec)) == 0) {
134 /* Don't want to take over this vector. */
135 continue;
136 }
137 vectors[vec] = page0[vec];
138 vectors_data[vec] = page0_data[vec];
139 }
140
141 /* Now sync the vectors. */
142 cpu_icache_sync_range(va, (ARM_NVEC * 2) * sizeof(u_int));
143
144 vector_page = va;
145 }
146
147 if (va == ARM_VECTORS_HIGH) {
148 /*
149 * Assume the MD caller knows what it's doing here, and
150 * really does want the vector page relocated.
151 *
152 * Note: This has to be done here (and not just in
153 * cpu_setup()) because the vector page needs to be
154 * accessible *before* cpu_startup() is called.
155 * Think ddb(9) ...
156 *
157 * NOTE: If the CPU control register is not readable,
158 * this will totally fail! We'll just assume that
159 * any system that has high vector support has a
160 * readable CPU control register, for now. If we
161 * ever encounter one that does not, we'll have to
162 * rethink this.
163 */
164 cpu_control(CPU_CONTROL_VECRELOC, CPU_CONTROL_VECRELOC);
165 }
166 }
167
168 /*
169 * Debug function just to park the CPU
170 */
171
172 void
173 halt(void)
174 {
175 while (1)
176 cpu_sleep(0);
177 }
178
179
180 /* Sync the discs, unmount the filesystems, and adjust the todr */
181
182 void
183 bootsync(void)
184 {
185 static bool bootsyncdone = false;
186
187 if (bootsyncdone) return;
188
189 bootsyncdone = true;
190
191 /* Make sure we can still manage to do things */
192 if (GetCPSR() & I32_bit) {
193 /*
194 * If we get here then boot has been called without RB_NOSYNC
195 * and interrupts were disabled. This means the boot() call
196 * did not come from a user process e.g. shutdown, but must
197 * have come from somewhere in the kernel.
198 */
199 IRQenable;
200 printf("Warning IRQ's disabled during boot()\n");
201 }
202
203 vfs_shutdown();
204
205 resettodr();
206 }
207
208 /*
209 * void cpu_startup(void)
210 *
211 * Machine dependent startup code.
212 *
213 */
214 void
215 cpu_startup(void)
216 {
217 vaddr_t minaddr;
218 vaddr_t maxaddr;
219 u_int loop;
220 char pbuf[9];
221
222 /*
223 * Until we better locking, we have to live under the kernel lock.
224 */
225 //KERNEL_LOCK(1, NULL);
226
227 /* Set the CPU control register */
228 cpu_setup(boot_args);
229
230 /* Lock down zero page */
231 vector_page_setprot(VM_PROT_READ);
232
233 /*
234 * Give pmap a chance to set up a few more things now the vm
235 * is initialised
236 */
237 pmap_postinit();
238
239 /*
240 * Initialize error message buffer (at end of core).
241 */
242
243 /* msgbufphys was setup during the secondary boot strap */
244 for (loop = 0; loop < btoc(MSGBUFSIZE); ++loop)
245 pmap_kenter_pa((vaddr_t)msgbufaddr + loop * PAGE_SIZE,
246 msgbufphys + loop * PAGE_SIZE,
247 VM_PROT_READ|VM_PROT_WRITE, 0);
248 pmap_update(pmap_kernel());
249 initmsgbuf(msgbufaddr, round_page(MSGBUFSIZE));
250
251 /*
252 * Identify ourselves for the msgbuf (everything printed earlier will
253 * not be buffered).
254 */
255 printf("%s%s", copyright, version);
256
257 format_bytes(pbuf, sizeof(pbuf), arm_ptob(physmem));
258 printf("total memory = %s\n", pbuf);
259
260 minaddr = 0;
261
262 /*
263 * Allocate a submap for physio
264 */
265 phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
266 VM_PHYS_SIZE, 0, false, NULL);
267
268 format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free));
269 printf("avail memory = %s\n", pbuf);
270
271 struct lwp * const l = &lwp0;
272 struct pcb * const pcb = lwp_getpcb(l);
273 pcb->pcb_ksp = uvm_lwp_getuarea(l) + USPACE_SVC_STACK_TOP;
274 lwp_settrapframe(l, (struct trapframe *)pcb->pcb_ksp - 1);
275 }
276
277 /*
278 * machine dependent system variables.
279 */
280 static int
281 sysctl_machdep_booted_device(SYSCTLFN_ARGS)
282 {
283 struct sysctlnode node;
284
285 if (booted_device == NULL)
286 return (EOPNOTSUPP);
287
288 node = *rnode;
289 node.sysctl_data = __UNCONST(device_xname(booted_device));
290 node.sysctl_size = strlen(device_xname(booted_device)) + 1;
291 return (sysctl_lookup(SYSCTLFN_CALL(&node)));
292 }
293
294 static int
295 sysctl_machdep_booted_kernel(SYSCTLFN_ARGS)
296 {
297 struct sysctlnode node;
298
299 if (booted_kernel == NULL || booted_kernel[0] == '\0')
300 return (EOPNOTSUPP);
301
302 node = *rnode;
303 node.sysctl_data = booted_kernel;
304 node.sysctl_size = strlen(booted_kernel) + 1;
305 return (sysctl_lookup(SYSCTLFN_CALL(&node)));
306 }
307
308 static int
309 sysctl_machdep_powersave(SYSCTLFN_ARGS)
310 {
311 struct sysctlnode node = *rnode;
312 int error, newval;
313
314 newval = cpu_do_powersave;
315 node.sysctl_data = &newval;
316 if (cpufuncs.cf_sleep == (void *) cpufunc_nullop)
317 node.sysctl_flags &= ~CTLFLAG_READWRITE;
318 error = sysctl_lookup(SYSCTLFN_CALL(&node));
319 if (error || newp == NULL || newval == cpu_do_powersave)
320 return (error);
321
322 if (newval < 0 || newval > 1)
323 return (EINVAL);
324 cpu_do_powersave = newval;
325
326 return (0);
327 }
328
329 SYSCTL_SETUP(sysctl_machdep_setup, "sysctl machdep subtree setup")
330 {
331
332 sysctl_createv(clog, 0, NULL, NULL,
333 CTLFLAG_PERMANENT,
334 CTLTYPE_NODE, "machdep", NULL,
335 NULL, 0, NULL, 0,
336 CTL_MACHDEP, CTL_EOL);
337
338 sysctl_createv(clog, 0, NULL, NULL,
339 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
340 CTLTYPE_INT, "debug", NULL,
341 NULL, 0, &kernel_debug, 0,
342 CTL_MACHDEP, CPU_DEBUG, CTL_EOL);
343 sysctl_createv(clog, 0, NULL, NULL,
344 CTLFLAG_PERMANENT,
345 CTLTYPE_STRING, "booted_device", NULL,
346 sysctl_machdep_booted_device, 0, NULL, 0,
347 CTL_MACHDEP, CPU_BOOTED_DEVICE, CTL_EOL);
348 sysctl_createv(clog, 0, NULL, NULL,
349 CTLFLAG_PERMANENT,
350 CTLTYPE_STRING, "booted_kernel", NULL,
351 sysctl_machdep_booted_kernel, 0, NULL, 0,
352 CTL_MACHDEP, CPU_BOOTED_KERNEL, CTL_EOL);
353 sysctl_createv(clog, 0, NULL, NULL,
354 CTLFLAG_PERMANENT,
355 CTLTYPE_STRUCT, "console_device", NULL,
356 sysctl_consdev, 0, NULL, sizeof(dev_t),
357 CTL_MACHDEP, CPU_CONSDEV, CTL_EOL);
358 sysctl_createv(clog, 0, NULL, NULL,
359 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
360 CTLTYPE_INT, "powersave", NULL,
361 sysctl_machdep_powersave, 0, &cpu_do_powersave, 0,
362 CTL_MACHDEP, CPU_POWERSAVE, CTL_EOL);
363 }
364
365 void
366 parse_mi_bootargs(char *args)
367 {
368 int integer;
369
370 if (get_bootconf_option(args, "single", BOOTOPT_TYPE_BOOLEAN, &integer)
371 || get_bootconf_option(args, "-s", BOOTOPT_TYPE_BOOLEAN, &integer))
372 if (integer)
373 boothowto |= RB_SINGLE;
374 if (get_bootconf_option(args, "kdb", BOOTOPT_TYPE_BOOLEAN, &integer)
375 || get_bootconf_option(args, "-k", BOOTOPT_TYPE_BOOLEAN, &integer))
376 if (integer)
377 boothowto |= RB_KDB;
378 if (get_bootconf_option(args, "ask", BOOTOPT_TYPE_BOOLEAN, &integer)
379 || get_bootconf_option(args, "-a", BOOTOPT_TYPE_BOOLEAN, &integer))
380 if (integer)
381 boothowto |= RB_ASKNAME;
382
383 #ifdef PMAP_DEBUG
384 if (get_bootconf_option(args, "pmapdebug", BOOTOPT_TYPE_INT, &integer)) {
385 pmap_debug_level = integer;
386 pmap_debug(pmap_debug_level);
387 }
388 #endif /* PMAP_DEBUG */
389
390 /* if (get_bootconf_option(args, "nbuf", BOOTOPT_TYPE_INT, &integer))
391 bufpages = integer;*/
392
393 #if defined(MEMORY_DISK_HOOKS) && !defined(MEMORY_DISK_ROOT_SIZE)
394 if (get_bootconf_option(args, "memorydisc", BOOTOPT_TYPE_INT, &integer)
395 || get_bootconf_option(args, "memorydisk", BOOTOPT_TYPE_INT, &integer)) {
396 md_root_size = integer;
397 md_root_size *= 1024;
398 if (md_root_size < 32*1024)
399 md_root_size = 32*1024;
400 if (md_root_size > 2048*1024)
401 md_root_size = 2048*1024;
402 }
403 #endif /* MEMORY_DISK_HOOKS && !MEMORY_DISK_ROOT_SIZE */
404
405 if (get_bootconf_option(args, "quiet", BOOTOPT_TYPE_BOOLEAN, &integer)
406 || get_bootconf_option(args, "-q", BOOTOPT_TYPE_BOOLEAN, &integer))
407 if (integer)
408 boothowto |= AB_QUIET;
409 if (get_bootconf_option(args, "verbose", BOOTOPT_TYPE_BOOLEAN, &integer)
410 || get_bootconf_option(args, "-v", BOOTOPT_TYPE_BOOLEAN, &integer))
411 if (integer)
412 boothowto |= AB_VERBOSE;
413 }
414
415 #ifdef __HAVE_FAST_SOFTINTS
416 #if IPL_SOFTSERIAL != IPL_SOFTNET + 1
417 #error IPLs are screwed up
418 #elif IPL_SOFTNET != IPL_SOFTBIO + 1
419 #error IPLs are screwed up
420 #elif IPL_SOFTBIO != IPL_SOFTCLOCK + 1
421 #error IPLs are screwed up
422 #elif !(IPL_SOFTCLOCK > IPL_NONE)
423 #error IPLs are screwed up
424 #elif (IPL_NONE != 0)
425 #error IPLs are screwed up
426 #endif
427
428 #ifndef __HAVE_PIC_FAST_SOFTINTS
429 #define SOFTINT2IPLMAP \
430 (((IPL_SOFTSERIAL - IPL_SOFTCLOCK) << (SOFTINT_SERIAL * 4)) | \
431 ((IPL_SOFTNET - IPL_SOFTCLOCK) << (SOFTINT_NET * 4)) | \
432 ((IPL_SOFTBIO - IPL_SOFTCLOCK) << (SOFTINT_BIO * 4)) | \
433 ((IPL_SOFTCLOCK - IPL_SOFTCLOCK) << (SOFTINT_CLOCK * 4)))
434 #define SOFTINT2IPL(l) ((SOFTINT2IPLMAP >> ((l) * 4)) & 0x0f)
435
436 /*
437 * This returns a mask of softint IPLs that be dispatch at <ipl>
438 * SOFTIPLMASK(IPL_NONE) = 0x0000000f
439 * SOFTIPLMASK(IPL_SOFTCLOCK) = 0x0000000e
440 * SOFTIPLMASK(IPL_SOFTBIO) = 0x0000000c
441 * SOFTIPLMASK(IPL_SOFTNET) = 0x00000008
442 * SOFTIPLMASK(IPL_SOFTSERIAL) = 0x00000000
443 */
444 #define SOFTIPLMASK(ipl) ((0x0f << (ipl)) & 0x0f)
445
446 void softint_switch(lwp_t *, int);
447
448 void
449 softint_trigger(uintptr_t mask)
450 {
451 curcpu()->ci_softints |= mask;
452 }
453
454 void
455 softint_init_md(lwp_t *l, u_int level, uintptr_t *machdep)
456 {
457 lwp_t ** lp = &l->l_cpu->ci_softlwps[level];
458 KASSERT(*lp == NULL || *lp == l);
459 *lp = l;
460 *machdep = 1 << SOFTINT2IPL(level);
461 KASSERT(level != SOFTINT_CLOCK || *machdep == (1 << (IPL_SOFTCLOCK - IPL_SOFTCLOCK)));
462 KASSERT(level != SOFTINT_BIO || *machdep == (1 << (IPL_SOFTBIO - IPL_SOFTCLOCK)));
463 KASSERT(level != SOFTINT_NET || *machdep == (1 << (IPL_SOFTNET - IPL_SOFTCLOCK)));
464 KASSERT(level != SOFTINT_SERIAL || *machdep == (1 << (IPL_SOFTSERIAL - IPL_SOFTCLOCK)));
465 }
466
467 void
468 dosoftints(void)
469 {
470 struct cpu_info * const ci = curcpu();
471 const int opl = ci->ci_cpl;
472 const uint32_t softiplmask = SOFTIPLMASK(opl);
473
474 splhigh();
475 for (;;) {
476 u_int softints = ci->ci_softints & softiplmask;
477 KASSERT((softints != 0) == ((ci->ci_softints >> opl) != 0));
478 KASSERT(opl == IPL_NONE || (softints & (1 << (opl - IPL_SOFTCLOCK))) == 0);
479 if (softints == 0) {
480 splx(opl);
481 return;
482 }
483 #define DOSOFTINT(n) \
484 if (ci->ci_softints & (1 << (IPL_SOFT ## n - IPL_SOFTCLOCK))) { \
485 ci->ci_softints &= \
486 ~(1 << (IPL_SOFT ## n - IPL_SOFTCLOCK)); \
487 softint_switch(ci->ci_softlwps[SOFTINT_ ## n], \
488 IPL_SOFT ## n); \
489 continue; \
490 }
491 DOSOFTINT(SERIAL);
492 DOSOFTINT(NET);
493 DOSOFTINT(BIO);
494 DOSOFTINT(CLOCK);
495 panic("dosoftints wtf (softints=%u?, ipl=%d)", softints, opl);
496 }
497 }
498 #endif /* !__HAVE_PIC_FAST_SOFTINTS */
499 #endif /* __HAVE_FAST_SOFTINTS */
500
501 #ifdef MODULAR
502 /*
503 * Push any modules loaded by the boot loader.
504 */
505 void
506 module_init_md(void)
507 {
508 }
509 #endif /* MODULAR */
510
511 int
512 mm_md_physacc(paddr_t pa, vm_prot_t prot)
513 {
514
515 return (pa < ctob(physmem)) ? 0 : EFAULT;
516 }
517
518 #ifdef __HAVE_CPU_UAREA_ALLOC_IDLELWP
519 vaddr_t
520 cpu_uarea_alloc_idlelwp(struct cpu_info *ci)
521 {
522 const vaddr_t va = idlestack.pv_va + ci->ci_cpuid * USPACE;
523 // printf("%s: %s: va=%lx\n", __func__, ci->ci_data.cpu_name, va);
524 return va;
525 }
526 #endif
527
528 #ifdef MULTIPROCESSOR
529 void
530 cpu_boot_secondary_processors(void)
531 {
532 uint32_t mbox;
533 kcpuset_export_u32(kcpuset_attached, &mbox, sizeof(mbox));
534 atomic_swap_32(&arm_cpu_mbox, mbox);
535 membar_producer();
536 #ifdef _ARM_ARCH_7
537 __asm __volatile("sev; sev; sev");
538 #endif
539 }
540
541 void
542 xc_send_ipi(struct cpu_info *ci)
543 {
544 KASSERT(kpreempt_disabled());
545 KASSERT(curcpu() != ci);
546
547
548 if (ci) {
549 /* Unicast, remote CPU */
550 printf("%s: -> %s", __func__, ci->ci_data.cpu_name);
551 intr_ipi_send(ci->ci_kcpuset, IPI_XCALL);
552 } else {
553 printf("%s: -> !%s", __func__, ci->ci_data.cpu_name);
554 /* Broadcast to all but ourselves */
555 kcpuset_t *kcp;
556 kcpuset_create(&kcp, (ci != NULL));
557 KASSERT(kcp != NULL);
558 kcpuset_copy(kcp, kcpuset_running);
559 kcpuset_clear(kcp, cpu_index(ci));
560 intr_ipi_send(kcp, IPI_XCALL);
561 kcpuset_destroy(kcp);
562 }
563 printf("\n");
564 }
565 #endif /* MULTIPROCESSOR */
566
567 #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS
568 bool
569 mm_md_direct_mapped_phys(paddr_t pa, vaddr_t *vap)
570 {
571 if (physical_start <= pa && pa < physical_end) {
572 *vap = KERNEL_BASE + (pa - physical_start);
573 return true;
574 }
575
576 return false;
577 }
578 #endif
579