arm32_machdep.c revision 1.76.2.3 1 /* $NetBSD: arm32_machdep.c,v 1.76.2.3 2013/01/23 00:05:39 yamt Exp $ */
2
3 /*
4 * Copyright (c) 1994-1998 Mark Brinicombe.
5 * Copyright (c) 1994 Brini.
6 * All rights reserved.
7 *
8 * This code is derived from software written for Brini by Mark Brinicombe
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by Mark Brinicombe
21 * for the NetBSD Project.
22 * 4. The name of the company nor the name of the author may be used to
23 * endorse or promote products derived from this software without specific
24 * prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
27 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
28 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
29 * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
30 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
31 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
32 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * Machine dependent functions for kernel setup
39 *
40 * Created : 17/09/94
41 * Updated : 18/04/01 updated for new wscons
42 */
43
44 #include <sys/cdefs.h>
45 __KERNEL_RCSID(0, "$NetBSD: arm32_machdep.c,v 1.76.2.3 2013/01/23 00:05:39 yamt Exp $");
46
47 #include "opt_modular.h"
48 #include "opt_md.h"
49 #include "opt_pmap_debug.h"
50
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/reboot.h>
54 #include <sys/proc.h>
55 #include <sys/kauth.h>
56 #include <sys/kernel.h>
57 #include <sys/mbuf.h>
58 #include <sys/mount.h>
59 #include <sys/buf.h>
60 #include <sys/msgbuf.h>
61 #include <sys/device.h>
62 #include <sys/sysctl.h>
63 #include <sys/cpu.h>
64 #include <sys/intr.h>
65 #include <sys/module.h>
66 #include <sys/atomic.h>
67 #include <sys/xcall.h>
68
69 #include <uvm/uvm_extern.h>
70
71 #include <dev/cons.h>
72 #include <dev/mm.h>
73
74 #include <arm/arm32/katelib.h>
75 #include <arm/arm32/machdep.h>
76
77 #include <machine/bootconfig.h>
78 #include <machine/pcb.h>
79
80 void (*cpu_reset_address)(void); /* Used by locore */
81 paddr_t cpu_reset_address_paddr; /* Used by locore */
82
83 struct vm_map *phys_map = NULL;
84
85 #if defined(MEMORY_DISK_HOOKS) && !defined(MEMORY_DISK_ROOT_SIZE)
86 extern size_t md_root_size; /* Memory disc size */
87 #endif /* MEMORY_DISK_HOOKS && !MEMORY_DISK_ROOT_SIZE */
88
89 pv_addr_t kernelstack;
90 pv_addr_t abtstack;
91 pv_addr_t fiqstack;
92 pv_addr_t irqstack;
93 pv_addr_t undstack;
94 pv_addr_t idlestack;
95
96 void * msgbufaddr;
97 extern paddr_t msgbufphys;
98
99 int kernel_debug = 0;
100
101 /* exported variable to be filled in by the bootloaders */
102 char *booted_kernel;
103
104 /* Prototypes */
105
106 void data_abort_handler(trapframe_t *frame);
107 void prefetch_abort_handler(trapframe_t *frame);
108 extern void configure(void);
109
110 /*
111 * arm32_vector_init:
112 *
113 * Initialize the vector page, and select whether or not to
114 * relocate the vectors.
115 *
116 * NOTE: We expect the vector page to be mapped at its expected
117 * destination.
118 */
119 void
120 arm32_vector_init(vaddr_t va, int which)
121 {
122 if (CPU_IS_PRIMARY(curcpu())) {
123 extern unsigned int page0[], page0_data[];
124 unsigned int *vectors = (int *) va;
125 unsigned int *vectors_data = vectors + (page0_data - page0);
126 int vec;
127
128 /*
129 * Loop through the vectors we're taking over, and copy the
130 * vector's insn and data word.
131 */
132 for (vec = 0; vec < ARM_NVEC; vec++) {
133 if ((which & (1 << vec)) == 0) {
134 /* Don't want to take over this vector. */
135 continue;
136 }
137 vectors[vec] = page0[vec];
138 vectors_data[vec] = page0_data[vec];
139 }
140
141 /* Now sync the vectors. */
142 cpu_icache_sync_range(va, (ARM_NVEC * 2) * sizeof(u_int));
143
144 vector_page = va;
145 }
146
147 if (va == ARM_VECTORS_HIGH) {
148 /*
149 * Assume the MD caller knows what it's doing here, and
150 * really does want the vector page relocated.
151 *
152 * Note: This has to be done here (and not just in
153 * cpu_setup()) because the vector page needs to be
154 * accessible *before* cpu_startup() is called.
155 * Think ddb(9) ...
156 *
157 * NOTE: If the CPU control register is not readable,
158 * this will totally fail! We'll just assume that
159 * any system that has high vector support has a
160 * readable CPU control register, for now. If we
161 * ever encounter one that does not, we'll have to
162 * rethink this.
163 */
164 cpu_control(CPU_CONTROL_VECRELOC, CPU_CONTROL_VECRELOC);
165 }
166 }
167
168 /*
169 * Debug function just to park the CPU
170 */
171
172 void
173 halt(void)
174 {
175 while (1)
176 cpu_sleep(0);
177 }
178
179
180 /* Sync the discs, unmount the filesystems, and adjust the todr */
181
182 void
183 bootsync(void)
184 {
185 static bool bootsyncdone = false;
186
187 if (bootsyncdone) return;
188
189 bootsyncdone = true;
190
191 /* Make sure we can still manage to do things */
192 if (GetCPSR() & I32_bit) {
193 /*
194 * If we get here then boot has been called without RB_NOSYNC
195 * and interrupts were disabled. This means the boot() call
196 * did not come from a user process e.g. shutdown, but must
197 * have come from somewhere in the kernel.
198 */
199 IRQenable;
200 printf("Warning IRQ's disabled during boot()\n");
201 }
202
203 vfs_shutdown();
204
205 resettodr();
206 }
207
208 /*
209 * void cpu_startup(void)
210 *
211 * Machine dependent startup code.
212 *
213 */
214 void
215 cpu_startup(void)
216 {
217 vaddr_t minaddr;
218 vaddr_t maxaddr;
219 u_int loop;
220 char pbuf[9];
221
222 /*
223 * Until we better locking, we have to live under the kernel lock.
224 */
225 //KERNEL_LOCK(1, NULL);
226
227 /* Set the CPU control register */
228 cpu_setup(boot_args);
229
230 /* Lock down zero page */
231 vector_page_setprot(VM_PROT_READ);
232
233 /*
234 * Give pmap a chance to set up a few more things now the vm
235 * is initialised
236 */
237 pmap_postinit();
238
239 /*
240 * Initialize error message buffer (at end of core).
241 */
242
243 /* msgbufphys was setup during the secondary boot strap */
244 for (loop = 0; loop < btoc(MSGBUFSIZE); ++loop)
245 pmap_kenter_pa((vaddr_t)msgbufaddr + loop * PAGE_SIZE,
246 msgbufphys + loop * PAGE_SIZE,
247 VM_PROT_READ|VM_PROT_WRITE, 0);
248 pmap_update(pmap_kernel());
249 initmsgbuf(msgbufaddr, round_page(MSGBUFSIZE));
250
251 /*
252 * Identify ourselves for the msgbuf (everything printed earlier will
253 * not be buffered).
254 */
255 printf("%s%s", copyright, version);
256
257 format_bytes(pbuf, sizeof(pbuf), arm_ptob(physmem));
258 printf("total memory = %s\n", pbuf);
259
260 minaddr = 0;
261
262 /*
263 * Allocate a submap for physio
264 */
265 phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
266 VM_PHYS_SIZE, 0, false, NULL);
267
268 format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free));
269 printf("avail memory = %s\n", pbuf);
270
271 struct lwp * const l = &lwp0;
272 struct pcb * const pcb = lwp_getpcb(l);
273 pcb->pcb_ksp = uvm_lwp_getuarea(l) + USPACE_SVC_STACK_TOP;
274 lwp_settrapframe(l, (struct trapframe *)pcb->pcb_ksp - 1);
275 }
276
277 /*
278 * machine dependent system variables.
279 */
280 static int
281 sysctl_machdep_booted_device(SYSCTLFN_ARGS)
282 {
283 struct sysctlnode node;
284
285 if (booted_device == NULL)
286 return (EOPNOTSUPP);
287
288 node = *rnode;
289 node.sysctl_data = __UNCONST(device_xname(booted_device));
290 node.sysctl_size = strlen(device_xname(booted_device)) + 1;
291 return (sysctl_lookup(SYSCTLFN_CALL(&node)));
292 }
293
294 static int
295 sysctl_machdep_booted_kernel(SYSCTLFN_ARGS)
296 {
297 struct sysctlnode node;
298
299 if (booted_kernel == NULL || booted_kernel[0] == '\0')
300 return (EOPNOTSUPP);
301
302 node = *rnode;
303 node.sysctl_data = booted_kernel;
304 node.sysctl_size = strlen(booted_kernel) + 1;
305 return (sysctl_lookup(SYSCTLFN_CALL(&node)));
306 }
307
308 static int
309 sysctl_machdep_powersave(SYSCTLFN_ARGS)
310 {
311 struct sysctlnode node = *rnode;
312 int error, newval;
313
314 newval = cpu_do_powersave;
315 node.sysctl_data = &newval;
316 if (cpufuncs.cf_sleep == (void *) cpufunc_nullop)
317 node.sysctl_flags &= ~CTLFLAG_READWRITE;
318 error = sysctl_lookup(SYSCTLFN_CALL(&node));
319 if (error || newp == NULL || newval == cpu_do_powersave)
320 return (error);
321
322 if (newval < 0 || newval > 1)
323 return (EINVAL);
324 cpu_do_powersave = newval;
325
326 return (0);
327 }
328
329 SYSCTL_SETUP(sysctl_machdep_setup, "sysctl machdep subtree setup")
330 {
331
332 sysctl_createv(clog, 0, NULL, NULL,
333 CTLFLAG_PERMANENT,
334 CTLTYPE_NODE, "machdep", NULL,
335 NULL, 0, NULL, 0,
336 CTL_MACHDEP, CTL_EOL);
337
338 sysctl_createv(clog, 0, NULL, NULL,
339 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
340 CTLTYPE_INT, "debug", NULL,
341 NULL, 0, &kernel_debug, 0,
342 CTL_MACHDEP, CPU_DEBUG, CTL_EOL);
343 sysctl_createv(clog, 0, NULL, NULL,
344 CTLFLAG_PERMANENT,
345 CTLTYPE_STRING, "booted_device", NULL,
346 sysctl_machdep_booted_device, 0, NULL, 0,
347 CTL_MACHDEP, CPU_BOOTED_DEVICE, CTL_EOL);
348 sysctl_createv(clog, 0, NULL, NULL,
349 CTLFLAG_PERMANENT,
350 CTLTYPE_STRING, "booted_kernel", NULL,
351 sysctl_machdep_booted_kernel, 0, NULL, 0,
352 CTL_MACHDEP, CPU_BOOTED_KERNEL, CTL_EOL);
353 sysctl_createv(clog, 0, NULL, NULL,
354 CTLFLAG_PERMANENT,
355 CTLTYPE_STRUCT, "console_device", NULL,
356 sysctl_consdev, 0, NULL, sizeof(dev_t),
357 CTL_MACHDEP, CPU_CONSDEV, CTL_EOL);
358 sysctl_createv(clog, 0, NULL, NULL,
359 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
360 CTLTYPE_INT, "powersave", NULL,
361 sysctl_machdep_powersave, 0, &cpu_do_powersave, 0,
362 CTL_MACHDEP, CPU_POWERSAVE, CTL_EOL);
363 }
364
365 void
366 parse_mi_bootargs(char *args)
367 {
368 int integer;
369
370 if (get_bootconf_option(args, "single", BOOTOPT_TYPE_BOOLEAN, &integer)
371 || get_bootconf_option(args, "-s", BOOTOPT_TYPE_BOOLEAN, &integer))
372 if (integer)
373 boothowto |= RB_SINGLE;
374 if (get_bootconf_option(args, "kdb", BOOTOPT_TYPE_BOOLEAN, &integer)
375 || get_bootconf_option(args, "-k", BOOTOPT_TYPE_BOOLEAN, &integer)
376 || get_bootconf_option(args, "-d", BOOTOPT_TYPE_BOOLEAN, &integer))
377 if (integer)
378 boothowto |= RB_KDB;
379 if (get_bootconf_option(args, "ask", BOOTOPT_TYPE_BOOLEAN, &integer)
380 || get_bootconf_option(args, "-a", BOOTOPT_TYPE_BOOLEAN, &integer))
381 if (integer)
382 boothowto |= RB_ASKNAME;
383
384 #ifdef PMAP_DEBUG
385 if (get_bootconf_option(args, "pmapdebug", BOOTOPT_TYPE_INT, &integer)) {
386 pmap_debug_level = integer;
387 pmap_debug(pmap_debug_level);
388 }
389 #endif /* PMAP_DEBUG */
390
391 /* if (get_bootconf_option(args, "nbuf", BOOTOPT_TYPE_INT, &integer))
392 bufpages = integer;*/
393
394 #if defined(MEMORY_DISK_HOOKS) && !defined(MEMORY_DISK_ROOT_SIZE)
395 if (get_bootconf_option(args, "memorydisc", BOOTOPT_TYPE_INT, &integer)
396 || get_bootconf_option(args, "memorydisk", BOOTOPT_TYPE_INT, &integer)) {
397 md_root_size = integer;
398 md_root_size *= 1024;
399 if (md_root_size < 32*1024)
400 md_root_size = 32*1024;
401 if (md_root_size > 2048*1024)
402 md_root_size = 2048*1024;
403 }
404 #endif /* MEMORY_DISK_HOOKS && !MEMORY_DISK_ROOT_SIZE */
405
406 if (get_bootconf_option(args, "quiet", BOOTOPT_TYPE_BOOLEAN, &integer)
407 || get_bootconf_option(args, "-q", BOOTOPT_TYPE_BOOLEAN, &integer))
408 if (integer)
409 boothowto |= AB_QUIET;
410 if (get_bootconf_option(args, "verbose", BOOTOPT_TYPE_BOOLEAN, &integer)
411 || get_bootconf_option(args, "-v", BOOTOPT_TYPE_BOOLEAN, &integer))
412 if (integer)
413 boothowto |= AB_VERBOSE;
414 }
415
416 #ifdef __HAVE_FAST_SOFTINTS
417 #if IPL_SOFTSERIAL != IPL_SOFTNET + 1
418 #error IPLs are screwed up
419 #elif IPL_SOFTNET != IPL_SOFTBIO + 1
420 #error IPLs are screwed up
421 #elif IPL_SOFTBIO != IPL_SOFTCLOCK + 1
422 #error IPLs are screwed up
423 #elif !(IPL_SOFTCLOCK > IPL_NONE)
424 #error IPLs are screwed up
425 #elif (IPL_NONE != 0)
426 #error IPLs are screwed up
427 #endif
428
429 #ifndef __HAVE_PIC_FAST_SOFTINTS
430 #define SOFTINT2IPLMAP \
431 (((IPL_SOFTSERIAL - IPL_SOFTCLOCK) << (SOFTINT_SERIAL * 4)) | \
432 ((IPL_SOFTNET - IPL_SOFTCLOCK) << (SOFTINT_NET * 4)) | \
433 ((IPL_SOFTBIO - IPL_SOFTCLOCK) << (SOFTINT_BIO * 4)) | \
434 ((IPL_SOFTCLOCK - IPL_SOFTCLOCK) << (SOFTINT_CLOCK * 4)))
435 #define SOFTINT2IPL(l) ((SOFTINT2IPLMAP >> ((l) * 4)) & 0x0f)
436
437 /*
438 * This returns a mask of softint IPLs that be dispatch at <ipl>
439 * SOFTIPLMASK(IPL_NONE) = 0x0000000f
440 * SOFTIPLMASK(IPL_SOFTCLOCK) = 0x0000000e
441 * SOFTIPLMASK(IPL_SOFTBIO) = 0x0000000c
442 * SOFTIPLMASK(IPL_SOFTNET) = 0x00000008
443 * SOFTIPLMASK(IPL_SOFTSERIAL) = 0x00000000
444 */
445 #define SOFTIPLMASK(ipl) ((0x0f << (ipl)) & 0x0f)
446
447 void softint_switch(lwp_t *, int);
448
449 void
450 softint_trigger(uintptr_t mask)
451 {
452 curcpu()->ci_softints |= mask;
453 }
454
455 void
456 softint_init_md(lwp_t *l, u_int level, uintptr_t *machdep)
457 {
458 lwp_t ** lp = &l->l_cpu->ci_softlwps[level];
459 KASSERT(*lp == NULL || *lp == l);
460 *lp = l;
461 *machdep = 1 << SOFTINT2IPL(level);
462 KASSERT(level != SOFTINT_CLOCK || *machdep == (1 << (IPL_SOFTCLOCK - IPL_SOFTCLOCK)));
463 KASSERT(level != SOFTINT_BIO || *machdep == (1 << (IPL_SOFTBIO - IPL_SOFTCLOCK)));
464 KASSERT(level != SOFTINT_NET || *machdep == (1 << (IPL_SOFTNET - IPL_SOFTCLOCK)));
465 KASSERT(level != SOFTINT_SERIAL || *machdep == (1 << (IPL_SOFTSERIAL - IPL_SOFTCLOCK)));
466 }
467
468 void
469 dosoftints(void)
470 {
471 struct cpu_info * const ci = curcpu();
472 const int opl = ci->ci_cpl;
473 const uint32_t softiplmask = SOFTIPLMASK(opl);
474
475 splhigh();
476 for (;;) {
477 u_int softints = ci->ci_softints & softiplmask;
478 KASSERT((softints != 0) == ((ci->ci_softints >> opl) != 0));
479 KASSERT(opl == IPL_NONE || (softints & (1 << (opl - IPL_SOFTCLOCK))) == 0);
480 if (softints == 0) {
481 splx(opl);
482 return;
483 }
484 #define DOSOFTINT(n) \
485 if (ci->ci_softints & (1 << (IPL_SOFT ## n - IPL_SOFTCLOCK))) { \
486 ci->ci_softints &= \
487 ~(1 << (IPL_SOFT ## n - IPL_SOFTCLOCK)); \
488 softint_switch(ci->ci_softlwps[SOFTINT_ ## n], \
489 IPL_SOFT ## n); \
490 continue; \
491 }
492 DOSOFTINT(SERIAL);
493 DOSOFTINT(NET);
494 DOSOFTINT(BIO);
495 DOSOFTINT(CLOCK);
496 panic("dosoftints wtf (softints=%u?, ipl=%d)", softints, opl);
497 }
498 }
499 #endif /* !__HAVE_PIC_FAST_SOFTINTS */
500 #endif /* __HAVE_FAST_SOFTINTS */
501
502 #ifdef MODULAR
503 /*
504 * Push any modules loaded by the boot loader.
505 */
506 void
507 module_init_md(void)
508 {
509 }
510 #endif /* MODULAR */
511
512 int
513 mm_md_physacc(paddr_t pa, vm_prot_t prot)
514 {
515
516 return (pa < ctob(physmem)) ? 0 : EFAULT;
517 }
518
519 #ifdef __HAVE_CPU_UAREA_ALLOC_IDLELWP
520 vaddr_t
521 cpu_uarea_alloc_idlelwp(struct cpu_info *ci)
522 {
523 const vaddr_t va = idlestack.pv_va + ci->ci_cpuid * USPACE;
524 // printf("%s: %s: va=%lx\n", __func__, ci->ci_data.cpu_name, va);
525 return va;
526 }
527 #endif
528
529 #ifdef MULTIPROCESSOR
530 void
531 cpu_boot_secondary_processors(void)
532 {
533 uint32_t mbox;
534 kcpuset_export_u32(kcpuset_attached, &mbox, sizeof(mbox));
535 atomic_swap_32(&arm_cpu_mbox, mbox);
536 membar_producer();
537 #ifdef _ARM_ARCH_7
538 __asm __volatile("sev; sev; sev");
539 #endif
540 }
541
542 void
543 xc_send_ipi(struct cpu_info *ci)
544 {
545 KASSERT(kpreempt_disabled());
546 KASSERT(curcpu() != ci);
547
548
549 if (ci) {
550 /* Unicast, remote CPU */
551 printf("%s: -> %s", __func__, ci->ci_data.cpu_name);
552 intr_ipi_send(ci->ci_kcpuset, IPI_XCALL);
553 } else {
554 printf("%s: -> !%s", __func__, ci->ci_data.cpu_name);
555 /* Broadcast to all but ourselves */
556 kcpuset_t *kcp;
557 kcpuset_create(&kcp, (ci != NULL));
558 KASSERT(kcp != NULL);
559 kcpuset_copy(kcp, kcpuset_running);
560 kcpuset_clear(kcp, cpu_index(ci));
561 intr_ipi_send(kcp, IPI_XCALL);
562 kcpuset_destroy(kcp);
563 }
564 printf("\n");
565 }
566 #endif /* MULTIPROCESSOR */
567
568 #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS
569 bool
570 mm_md_direct_mapped_phys(paddr_t pa, vaddr_t *vap)
571 {
572 if (physical_start <= pa && pa < physical_end) {
573 *vap = KERNEL_BASE + (pa - physical_start);
574 return true;
575 }
576
577 return false;
578 }
579 #endif
580