arm32_machdep.c revision 1.10 1 /* $NetBSD: arm32_machdep.c,v 1.10 2002/01/12 13:37:55 chris Exp $ */
2
3 /*
4 * Copyright (c) 1994-1998 Mark Brinicombe.
5 * Copyright (c) 1994 Brini.
6 * All rights reserved.
7 *
8 * This code is derived from software written for Brini by Mark Brinicombe
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by Mark Brinicombe
21 * for the NetBSD Project.
22 * 4. The name of the company nor the name of the author may be used to
23 * endorse or promote products derived from this software without specific
24 * prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
27 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
28 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
29 * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
30 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
31 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
32 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * Machine dependant functions for kernel setup
39 *
40 * Created : 17/09/94
41 * Updated : 18/04/01 updated for new wscons
42 */
43
44 #include "opt_md.h"
45 #include "opt_pmap_debug.h"
46
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/reboot.h>
50 #include <sys/proc.h>
51 #include <sys/user.h>
52 #include <sys/kernel.h>
53 #include <sys/mbuf.h>
54 #include <sys/mount.h>
55 #include <sys/buf.h>
56 #include <sys/msgbuf.h>
57 #include <sys/device.h>
58 #include <uvm/uvm_extern.h>
59 #include <sys/sysctl.h>
60
61 #include <dev/cons.h>
62
63 #include <arm/arm32/katelib.h>
64 #include <arm/arm32/machdep.h>
65 #include <machine/bootconfig.h>
66
67 #include "opt_ipkdb.h"
68 #include "opt_mdsize.h"
69 #include "md.h"
70
71 struct vm_map *exec_map = NULL;
72 struct vm_map *mb_map = NULL;
73 struct vm_map *phys_map = NULL;
74
75 extern int physmem;
76
77 #ifndef PMAP_STATIC_L1S
78 extern int max_processes;
79 #endif /* !PMAP_STATIC_L1S */
80 #if NMD > 0 && defined(MEMORY_DISK_HOOKS) && !defined(MINIROOTSIZE)
81 extern u_int memory_disc_size; /* Memory disc size */
82 #endif /* NMD && MEMORY_DISK_HOOKS && !MINIROOTSIZE */
83
84 pv_addr_t systempage;
85 pv_addr_t kernelstack;
86
87 /* the following is used externally (sysctl_hw) */
88 char machine[] = MACHINE; /* from <machine/param.h> */
89 char machine_arch[] = MACHINE_ARCH; /* from <machine/param.h> */
90
91 /* Our exported CPU info; we can have only one. */
92 struct cpu_info cpu_info_store;
93
94 extern pt_entry_t msgbufpte;
95 caddr_t msgbufaddr;
96 extern paddr_t msgbufphys;
97
98 int kernel_debug = 0;
99
100 struct user *proc0paddr;
101
102 char *booted_kernel;
103
104
105 /* Prototypes */
106
107 void map_section __P((vaddr_t pt, vaddr_t va, paddr_t pa,
108 int cacheable));
109 void map_pagetable __P((vaddr_t pt, vaddr_t va, paddr_t pa));
110 void map_entry __P((vaddr_t pt, vaddr_t va, paddr_t pa));
111 void map_entry_nc __P((vaddr_t pt, vaddr_t va, paddr_t pa));
112 void map_entry_ro __P((vaddr_t pt, vaddr_t va, paddr_t pa));
113
114 u_long strtoul __P((const char *s, char **ptr, int base));
115 void data_abort_handler __P((trapframe_t *frame));
116 void prefetch_abort_handler __P((trapframe_t *frame));
117 void zero_page_readonly __P((void));
118 void zero_page_readwrite __P((void));
119 extern void configure __P((void));
120
121 /*
122 * Debug function just to park the CPU
123 */
124
125 void
126 halt()
127 {
128 while (1)
129 cpu_sleep(0);
130 }
131
132
133 /* Sync the discs and unmount the filesystems */
134
135 void
136 bootsync(void)
137 {
138 static int bootsyncdone = 0;
139
140 if (bootsyncdone) return;
141
142 bootsyncdone = 1;
143
144 /* Make sure we can still manage to do things */
145 if (GetCPSR() & I32_bit) {
146 /*
147 * If we get here then boot has been called without RB_NOSYNC
148 * and interrupts were disabled. This means the boot() call
149 * did not come from a user process e.g. shutdown, but must
150 * have come from somewhere in the kernel.
151 */
152 IRQenable;
153 printf("Warning IRQ's disabled during boot()\n");
154 }
155
156 vfs_shutdown();
157 }
158
159 /*
160 * A few functions that are used to help construct the page tables
161 * during the bootstrap process.
162 */
163
164 void
165 map_section(pagetable, va, pa, cacheable)
166 vaddr_t pagetable;
167 vaddr_t va;
168 paddr_t pa;
169 int cacheable;
170 {
171 #ifdef DIAGNOSTIC
172 if (((va | pa) & (L1_SEC_SIZE - 1)) != 0)
173 panic("initarm: Cannot allocate 1MB section on non 1MB boundry\n");
174 #endif /* DIAGNOSTIC */
175
176 if (cacheable)
177 ((u_int *)pagetable)[(va >> PDSHIFT)] =
178 L1_SEC((pa & PD_MASK), pte_cache_mode);
179 else
180 ((u_int *)pagetable)[(va >> PDSHIFT)] =
181 L1_SEC((pa & PD_MASK), 0);
182 }
183
184
185 void
186 map_pagetable(pagetable, va, pa)
187 vaddr_t pagetable;
188 vaddr_t va;
189 paddr_t pa;
190 {
191 #ifdef DIAGNOSTIC
192 if ((pa & 0xc00) != 0)
193 panic("pagetables should be group allocated on pageboundry");
194 #endif /* DIAGNOSTIC */
195
196 ((u_int *)pagetable)[(va >> PDSHIFT) + 0] =
197 L1_PTE((pa & PG_FRAME) + 0x000);
198 ((u_int *)pagetable)[(va >> PDSHIFT) + 1] =
199 L1_PTE((pa & PG_FRAME) + 0x400);
200 ((u_int *)pagetable)[(va >> PDSHIFT) + 2] =
201 L1_PTE((pa & PG_FRAME) + 0x800);
202 ((u_int *)pagetable)[(va >> PDSHIFT) + 3] =
203 L1_PTE((pa & PG_FRAME) + 0xc00);
204 }
205
206 /* cats kernels have a 2nd l2 pt, so the range is bigger hence the 0x7ff etc */
207 vsize_t
208 map_chunk(pd, pt, va, pa, size, acc, flg)
209 vaddr_t pd;
210 vaddr_t pt;
211 vaddr_t va;
212 paddr_t pa;
213 vsize_t size;
214 u_int acc;
215 u_int flg;
216 {
217 pd_entry_t *l1pt = (pd_entry_t *)pd;
218 pt_entry_t *l2pt = (pt_entry_t *)pt;
219 vsize_t remain;
220 u_int loop;
221
222 remain = (size + (NBPG - 1)) & ~(NBPG - 1);
223 #ifdef VERBOSE_INIT_ARM
224 printf("map_chunk: pa=%lx va=%lx sz=%lx rem=%lx acc=%x flg=%x\n",
225 pa, va, size, remain, acc, flg);
226 printf("map_chunk: ");
227 #endif
228 size = remain;
229
230 while (remain > 0) {
231 /* Can we do a section mapping ? */
232 if (l1pt && !((pa | va) & (L1_SEC_SIZE - 1))
233 && remain >= L1_SEC_SIZE) {
234 #ifdef VERBOSE_INIT_ARM
235 printf("S");
236 #endif
237 l1pt[(va >> PDSHIFT)] = L1_SECPTE(pa, acc, flg);
238 va += L1_SEC_SIZE;
239 pa += L1_SEC_SIZE;
240 remain -= L1_SEC_SIZE;
241 } else
242 /* Can we do a large page mapping ? */
243 if (!((pa | va) & (L2_LPAGE_SIZE - 1))
244 && (remain >= L2_LPAGE_SIZE)) {
245 #ifdef VERBOSE_INIT_ARM
246 printf("L");
247 #endif
248 for (loop = 0; loop < 16; ++loop)
249 #ifndef cats
250 l2pt[((va >> PGSHIFT) & 0x3f0) + loop] =
251 L2_LPTE(pa, acc, flg);
252 #else
253 l2pt[((va >> PGSHIFT) & 0x7f0) + loop] =
254 L2_LPTE(pa, acc, flg);
255 #endif
256 va += L2_LPAGE_SIZE;
257 pa += L2_LPAGE_SIZE;
258 remain -= L2_LPAGE_SIZE;
259 } else
260 /* All we can do is a small page mapping */
261 {
262 #ifdef VERBOSE_INIT_ARM
263 printf("P");
264 #endif
265 #ifndef cats
266 l2pt[((va >> PGSHIFT) & 0x3ff)] = L2_SPTE(pa, acc, flg);
267 #else
268 l2pt[((va >> PGSHIFT) & 0x7ff)] = L2_SPTE(pa, acc, flg);
269 #endif
270 va += NBPG;
271 pa += NBPG;
272 remain -= NBPG;
273 }
274 }
275 #ifdef VERBOSE_INIT_ARM
276 printf("\n");
277 #endif
278 return(size);
279 }
280
281 /* cats versions have larger 2 l2pt's next to each other */
282 void
283 map_entry(pagetable, va, pa)
284 vaddr_t pagetable;
285 vaddr_t va;
286 paddr_t pa;
287 {
288 #ifndef cats
289 ((pt_entry_t *)pagetable)[((va >> PGSHIFT) & 0x000003ff)] =
290 L2_PTE((pa & PG_FRAME), AP_KRW);
291 #else
292 ((pt_entry_t *)pagetable)[((va >> PGSHIFT) & 0x000007ff)] =
293 L2_PTE((pa & PG_FRAME), AP_KRW);
294 #endif
295 }
296
297
298 void
299 map_entry_nc(pagetable, va, pa)
300 vaddr_t pagetable;
301 vaddr_t va;
302 paddr_t pa;
303 {
304 #ifndef cats
305 ((pt_entry_t *)pagetable)[((va >> PGSHIFT) & 0x000003ff)] =
306 L2_PTE_NC_NB((pa & PG_FRAME), AP_KRW);
307 #else
308 ((pt_entry_t *)pagetable)[((va >> PGSHIFT) & 0x000007ff)] =
309 L2_PTE_NC_NB((pa & PG_FRAME), AP_KRW);
310 #endif
311 }
312
313
314 void
315 map_entry_ro(pagetable, va, pa)
316 vaddr_t pagetable;
317 vaddr_t va;
318 paddr_t pa;
319 {
320 #ifndef cats
321 ((pt_entry_t *)pagetable)[((va >> PGSHIFT) & 0x000003ff)] =
322 L2_PTE((pa & PG_FRAME), AP_KR);
323 #else
324 ((pt_entry_t *)pagetable)[((va >> PGSHIFT) & 0x000007ff)] =
325 L2_PTE((pa & PG_FRAME), AP_KR);
326 #endif
327 }
328
329
330 /*
331 * void cpu_startup(void)
332 *
333 * Machine dependant startup code.
334 *
335 */
336
337 void
338 cpu_startup()
339 {
340 int loop;
341 paddr_t minaddr;
342 paddr_t maxaddr;
343 caddr_t sysbase;
344 caddr_t size;
345 vsize_t bufsize;
346 int base, residual;
347 char pbuf[9];
348
349 proc0paddr = (struct user *)kernelstack.pv_va;
350 proc0.p_addr = proc0paddr;
351
352 /* Set the cpu control register */
353 cpu_setup(boot_args);
354
355 /* All domains MUST be clients, permissions are VERY important */
356 cpu_domains(DOMAIN_CLIENT);
357
358 /* Lock down zero page */
359 zero_page_readonly();
360
361 /*
362 * Give pmap a chance to set up a few more things now the vm
363 * is initialised
364 */
365 pmap_postinit();
366
367 /*
368 * Initialize error message buffer (at end of core).
369 */
370
371 /* msgbufphys was setup during the secondary boot strap */
372 for (loop = 0; loop < btoc(MSGBUFSIZE); ++loop)
373 pmap_kenter_pa((vaddr_t)msgbufaddr + loop * NBPG,
374 msgbufphys + loop * NBPG, VM_PROT_READ|VM_PROT_WRITE);
375 pmap_update(pmap_kernel());
376 initmsgbuf(msgbufaddr, round_page(MSGBUFSIZE));
377
378 /*
379 * Identify ourselves for the msgbuf (everything printed earlier will
380 * not be buffered).
381 */
382 printf(version);
383
384 format_bytes(pbuf, sizeof(pbuf), arm_page_to_byte(physmem));
385 printf("total memory = %s\n", pbuf);
386
387 /*
388 * Find out how much space we need, allocate it,
389 * and then give everything true virtual addresses.
390 */
391 size = allocsys(NULL, NULL);
392 sysbase = (caddr_t)uvm_km_zalloc(kernel_map, round_page((vaddr_t)size));
393 if (sysbase == 0)
394 panic(
395 "cpu_startup: no room for system tables; %d bytes required",
396 (u_int)size);
397 if ((caddr_t)((allocsys(sysbase, NULL) - sysbase)) != size)
398 panic("cpu_startup: system table size inconsistency");
399
400 /*
401 * Now allocate buffers proper. They are different than the above
402 * in that they usually occupy more virtual memory than physical.
403 */
404 bufsize = MAXBSIZE * nbuf;
405 if (uvm_map(kernel_map, (vaddr_t *)&buffers, round_page(bufsize),
406 NULL, UVM_UNKNOWN_OFFSET, 0,
407 UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
408 UVM_ADV_NORMAL, 0)) != 0)
409 panic("cpu_startup: cannot allocate UVM space for buffers");
410 minaddr = (vaddr_t)buffers;
411 if ((bufpages / nbuf) >= btoc(MAXBSIZE)) {
412 /* don't want to alloc more physical mem than needed */
413 bufpages = btoc(MAXBSIZE) * nbuf;
414 }
415
416 base = bufpages / nbuf;
417 residual = bufpages % nbuf;
418 for (loop = 0; loop < nbuf; ++loop) {
419 vsize_t curbufsize;
420 vaddr_t curbuf;
421 struct vm_page *pg;
422
423 /*
424 * Each buffer has MAXBSIZE bytes of VM space allocated. Of
425 * that MAXBSIZE space, we allocate and map (base+1) pages
426 * for the first "residual" buffers, and then we allocate
427 * "base" pages for the rest.
428 */
429 curbuf = (vaddr_t) buffers + (loop * MAXBSIZE);
430 curbufsize = NBPG * ((loop < residual) ? (base+1) : base);
431
432 while (curbufsize) {
433 pg = uvm_pagealloc(NULL, 0, NULL, 0);
434 if (pg == NULL)
435 panic("cpu_startup: not enough memory for buffer cache");
436 pmap_kenter_pa(curbuf, VM_PAGE_TO_PHYS(pg),
437 VM_PROT_READ|VM_PROT_WRITE);
438 curbuf += PAGE_SIZE;
439 curbufsize -= PAGE_SIZE;
440 }
441 }
442 pmap_update(pmap_kernel());
443
444 /*
445 * Allocate a submap for exec arguments. This map effectively
446 * limits the number of processes exec'ing at any time.
447 */
448 exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
449 16*NCARGS, VM_MAP_PAGEABLE, FALSE, NULL);
450
451 /*
452 * Allocate a submap for physio
453 */
454 phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
455 VM_PHYS_SIZE, 0, FALSE, NULL);
456
457 /*
458 * Finally, allocate mbuf cluster submap.
459 */
460 mb_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
461 nmbclusters * mclbytes, VM_MAP_INTRSAFE,
462 FALSE, NULL);
463
464 format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free));
465 printf("avail memory = %s\n", pbuf);
466 format_bytes(pbuf, sizeof(pbuf), bufpages * NBPG);
467 printf("using %d buffers containing %s of memory\n", nbuf, pbuf);
468
469 /*
470 * Set up buffers, so they can be used to read disk labels.
471 */
472 bufinit();
473
474 curpcb = &proc0.p_addr->u_pcb;
475 curpcb->pcb_flags = 0;
476 curpcb->pcb_un.un_32.pcb32_und_sp = (u_int)proc0.p_addr +
477 USPACE_UNDEF_STACK_TOP;
478 curpcb->pcb_un.un_32.pcb32_sp = (u_int)proc0.p_addr +
479 USPACE_SVC_STACK_TOP;
480 (void) pmap_extract(pmap_kernel(), (vaddr_t)(pmap_kernel())->pm_pdir,
481 (paddr_t *)&curpcb->pcb_pagedir);
482
483 curpcb->pcb_tf = (struct trapframe *)curpcb->pcb_un.un_32.pcb32_sp - 1;
484 }
485
486 /*
487 * Modify the current mapping for zero page to make it read only
488 *
489 * This routine is only used until things start forking. Then new
490 * system pages are mapped read only in pmap_enter().
491 */
492
493 void
494 zero_page_readonly()
495 {
496 WriteWord(PROCESS_PAGE_TBLS_BASE + 0,
497 L2_PTE((systempage.pv_pa & PG_FRAME), AP_KR));
498 cpu_tlb_flushID_SE(0x00000000);
499 }
500
501
502 /*
503 * Modify the current mapping for zero page to make it read/write
504 *
505 * This routine is only used until things start forking. Then system
506 * pages belonging to user processes are never made writable.
507 */
508
509 void
510 zero_page_readwrite()
511 {
512 WriteWord(PROCESS_PAGE_TBLS_BASE + 0,
513 L2_PTE((systempage.pv_pa & PG_FRAME), AP_KRW));
514 cpu_tlb_flushID_SE(0x00000000);
515 }
516
517
518 /*
519 * machine dependent system variables.
520 */
521
522 int
523 cpu_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
524 int *name;
525 u_int namelen;
526 void *oldp;
527 size_t *oldlenp;
528 void *newp;
529 size_t newlen;
530 struct proc *p;
531 {
532 /* all sysctl names at this level are terminal */
533 if (namelen != 1)
534 return (ENOTDIR); /* overloaded */
535
536 switch (name[0]) {
537 case CPU_DEBUG:
538 return(sysctl_int(oldp, oldlenp, newp, newlen, &kernel_debug));
539
540 case CPU_BOOTED_DEVICE:
541 if (booted_device != NULL)
542 return (sysctl_rdstring(oldp, oldlenp, newp,
543 booted_device->dv_xname));
544 return (EOPNOTSUPP);
545
546 case CPU_CONSDEV: {
547 dev_t consdev;
548 if (cn_tab != NULL)
549 consdev = cn_tab->cn_dev;
550 else
551 consdev = NODEV;
552 return (sysctl_rdstruct(oldp, oldlenp, newp, &consdev,
553 sizeof consdev));
554 }
555 case CPU_BOOTED_KERNEL: {
556 if (booted_kernel != NULL && booted_kernel[0] != '\0')
557 return sysctl_rdstring(oldp, oldlenp, newp,
558 booted_kernel);
559 return (EOPNOTSUPP);
560 }
561
562 default:
563 return (EOPNOTSUPP);
564 }
565 /* NOTREACHED */
566 }
567
568 void
569 parse_mi_bootargs(args)
570 char *args;
571 {
572 int integer;
573
574 if (get_bootconf_option(args, "single", BOOTOPT_TYPE_BOOLEAN, &integer)
575 || get_bootconf_option(args, "-s", BOOTOPT_TYPE_BOOLEAN, &integer))
576 if (integer)
577 boothowto |= RB_SINGLE;
578 if (get_bootconf_option(args, "kdb", BOOTOPT_TYPE_BOOLEAN, &integer)
579 || get_bootconf_option(args, "-k", BOOTOPT_TYPE_BOOLEAN, &integer))
580 if (integer)
581 boothowto |= RB_KDB;
582 if (get_bootconf_option(args, "ask", BOOTOPT_TYPE_BOOLEAN, &integer)
583 || get_bootconf_option(args, "-a", BOOTOPT_TYPE_BOOLEAN, &integer))
584 if (integer)
585 boothowto |= RB_ASKNAME;
586
587 #ifdef PMAP_DEBUG
588 if (get_bootconf_option(args, "pmapdebug", BOOTOPT_TYPE_INT, &integer)) {
589 pmap_debug_level = integer;
590 pmap_debug(pmap_debug_level);
591 }
592 #endif /* PMAP_DEBUG */
593
594 /* if (get_bootconf_option(args, "nbuf", BOOTOPT_TYPE_INT, &integer))
595 bufpages = integer;*/
596
597 #ifndef PMAP_STATIC_L1S
598 if (get_bootconf_option(args, "maxproc", BOOTOPT_TYPE_INT, &integer)) {
599 max_processes = integer;
600 if (max_processes < 16)
601 max_processes = 16;
602 /* Limit is PDSIZE * (max_processes + 1) <= 4MB */
603 if (max_processes > 255)
604 max_processes = 255;
605 }
606 #endif /* !PMAP_STATUC_L1S */
607 #if NMD > 0 && defined(MEMORY_DISK_HOOKS) && !defined(MINIROOTSIZE)
608 if (get_bootconf_option(args, "memorydisc", BOOTOPT_TYPE_INT, &integer)
609 || get_bootconf_option(args, "memorydisk", BOOTOPT_TYPE_INT, &integer)) {
610 memory_disc_size = integer;
611 memory_disc_size *= 1024;
612 if (memory_disc_size < 32*1024)
613 memory_disc_size = 32*1024;
614 if (memory_disc_size > 2048*1024)
615 memory_disc_size = 2048*1024;
616 }
617 #endif /* NMD && MEMORY_DISK_HOOKS && !MINIROOTSIZE */
618
619 if (get_bootconf_option(args, "quiet", BOOTOPT_TYPE_BOOLEAN, &integer)
620 || get_bootconf_option(args, "-q", BOOTOPT_TYPE_BOOLEAN, &integer))
621 if (integer)
622 boothowto |= AB_QUIET;
623 if (get_bootconf_option(args, "verbose", BOOTOPT_TYPE_BOOLEAN, &integer)
624 || get_bootconf_option(args, "-v", BOOTOPT_TYPE_BOOLEAN, &integer))
625 if (integer)
626 boothowto |= AB_VERBOSE;
627 }
628
629 /* End of machdep.c */
630