pmap_bootstrap.c revision 1.25 1 /* $NetBSD: pmap_bootstrap.c,v 1.25 2004/08/28 19:11:19 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * the Systems Programming Group of the University of Utah Computer
9 * Science Department.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 * @(#)pmap_bootstrap.c 8.1 (Berkeley) 6/10/93
36 */
37
38 #include <sys/cdefs.h>
39 __KERNEL_RCSID(0, "$NetBSD: pmap_bootstrap.c,v 1.25 2004/08/28 19:11:19 thorpej Exp $");
40
41 #include <sys/param.h>
42 #include <sys/proc.h>
43
44 #include <machine/frame.h>
45 #include <machine/cpu.h>
46 #include <machine/hp300spu.h>
47 #include <machine/vmparam.h>
48 #include <machine/pte.h>
49
50 #include <hp300/hp300/clockreg.h>
51
52 #include <uvm/uvm_extern.h>
53
54 #define RELOC(v, t) *((t*)((u_int)&(v) + firstpa))
55
56 extern char *etext;
57 extern int Sysptsize;
58 extern char *extiobase, *proc0paddr;
59 extern st_entry_t *Sysseg;
60 extern pt_entry_t *Sysptmap, *Sysmap;
61 extern vaddr_t CLKbase, MMUbase;
62 extern paddr_t bootinfo_pa;
63 extern vaddr_t bootinfo_va;
64
65 extern int maxmem, physmem;
66 extern paddr_t avail_start, avail_end;
67 extern vaddr_t virtual_avail, virtual_end;
68 extern vsize_t mem_size;
69 extern int protection_codes[];
70 #ifdef M68K_MMU_HP
71 extern int pmap_aliasmask;
72 #endif
73
74 void pmap_bootstrap __P((paddr_t, paddr_t));
75
76 /*
77 * Special purpose kernel virtual addresses, used for mapping
78 * physical pages for a variety of temporary or permanent purposes:
79 *
80 * CADDR1, CADDR2: pmap zero/copy operations
81 * vmmap: /dev/mem, crash dumps, parity error checking
82 * ledbase: SPU LEDs
83 * msgbufaddr: kernel message buffer
84 */
85 caddr_t CADDR1, CADDR2, vmmap, ledbase;
86 extern caddr_t msgbufaddr;
87
88 /*
89 * Bootstrap the VM system.
90 *
91 * Called with MMU off so we must relocate all global references by `firstpa'
92 * (don't call any functions here!) `nextpa' is the first available physical
93 * memory address. Returns an updated first PA reflecting the memory we
94 * have allocated. MMU is still off when we return.
95 *
96 * XXX assumes sizeof(u_int) == sizeof(pt_entry_t)
97 * XXX a PIC compiler would make this much easier.
98 */
99 void
100 pmap_bootstrap(paddr_t nextpa, paddr_t firstpa)
101 {
102 paddr_t kstpa, kptpa, iiopa, eiopa, kptmpa, lkptpa, p0upa;
103 u_int nptpages, kstsize;
104 st_entry_t protoste, *ste;
105 pt_entry_t protopte, *pte, *epte;
106
107 /*
108 * Calculate important physical addresses:
109 *
110 * kstpa kernel segment table 1 page (!040)
111 * N pages (040)
112 *
113 * kptpa statically allocated
114 * kernel PT pages Sysptsize+ pages
115 *
116 * iiopa internal IO space
117 * PT pages IIOMAPSIZE pages
118 *
119 * eiopa external IO space
120 * PT pages EIOMAPSIZE pages
121 *
122 * [ Sysptsize is the number of pages of PT, IIOMAPSIZE and
123 * EIOMAPSIZE are the number of PTEs, hence we need to round
124 * the total to a page boundary with IO maps at the end. ]
125 *
126 * kptmpa kernel PT map 1 page
127 *
128 * lkptpa last kernel PT page 1 page
129 *
130 * p0upa proc 0 u-area UPAGES pages
131 *
132 * The KVA corresponding to any of these PAs is:
133 * (PA - firstpa + KERNBASE).
134 */
135 if (RELOC(mmutype, int) == MMU_68040)
136 kstsize = MAXKL2SIZE / (NPTEPG/SG4_LEV2SIZE);
137 else
138 kstsize = 1;
139 kstpa = nextpa;
140 nextpa += kstsize * PAGE_SIZE;
141 kptpa = nextpa;
142 nptpages = RELOC(Sysptsize, int) +
143 (IIOMAPSIZE + EIOMAPSIZE + NPTEPG - 1) / NPTEPG;
144 nextpa += nptpages * PAGE_SIZE;
145 eiopa = nextpa - EIOMAPSIZE * sizeof(pt_entry_t);
146 iiopa = eiopa - IIOMAPSIZE * sizeof(pt_entry_t);
147 kptmpa = nextpa;
148 nextpa += PAGE_SIZE;
149 lkptpa = nextpa;
150 nextpa += PAGE_SIZE;
151 p0upa = nextpa;
152 nextpa += USPACE;
153
154 /*
155 * Initialize segment table and kernel page table map.
156 *
157 * On 68030s and earlier MMUs the two are identical except for
158 * the valid bits so both are initialized with essentially the
159 * same values. On the 68040, which has a mandatory 3-level
160 * structure, the segment table holds the level 1 table and part
161 * (or all) of the level 2 table and hence is considerably
162 * different. Here the first level consists of 128 descriptors
163 * (512 bytes) each mapping 32mb of address space. Each of these
164 * points to blocks of 128 second level descriptors (512 bytes)
165 * each mapping 256kb. Note that there may be additional "segment
166 * table" pages depending on how large MAXKL2SIZE is.
167 *
168 * Portions of the last segment of KVA space (0xFFF00000 -
169 * 0xFFFFFFFF) are mapped for a couple of purposes. 0xFFF00000
170 * for UPAGES is used for mapping the current process u-area
171 * (u + kernel stack). The very last page (0xFFFFF000) is mapped
172 * to the last physical page of RAM to give us a region in which
173 * PA == VA. We use the first part of this page for enabling
174 * and disabling mapping. The last part of this page also contains
175 * info left by the boot ROM.
176 *
177 * XXX cramming two levels of mapping into the single "segment"
178 * table on the 68040 is intended as a temporary hack to get things
179 * working. The 224mb of address space that this allows will most
180 * likely be insufficient in the future (at least for the kernel).
181 */
182 if (RELOC(mmutype, int) == MMU_68040) {
183 int num;
184
185 /*
186 * First invalidate the entire "segment table" pages
187 * (levels 1 and 2 have the same "invalid" value).
188 */
189 pte = (u_int *)kstpa;
190 epte = &pte[kstsize * NPTEPG];
191 while (pte < epte)
192 *pte++ = SG_NV;
193 /*
194 * Initialize level 2 descriptors (which immediately
195 * follow the level 1 table). We need:
196 * NPTEPG / SG4_LEV3SIZE
197 * level 2 descriptors to map each of the nptpages+1
198 * pages of PTEs. Note that we set the "used" bit
199 * now to save the HW the expense of doing it.
200 */
201 num = (nptpages + 1) * (NPTEPG / SG4_LEV3SIZE);
202 pte = &((u_int *)kstpa)[SG4_LEV1SIZE];
203 epte = &pte[num];
204 protoste = kptpa | SG_U | SG_RW | SG_V;
205 while (pte < epte) {
206 *pte++ = protoste;
207 protoste += (SG4_LEV3SIZE * sizeof(st_entry_t));
208 }
209 /*
210 * Initialize level 1 descriptors. We need:
211 * roundup(num, SG4_LEV2SIZE) / SG4_LEV2SIZE
212 * level 1 descriptors to map the `num' level 2's.
213 */
214 pte = (u_int *)kstpa;
215 epte = &pte[roundup(num, SG4_LEV2SIZE) / SG4_LEV2SIZE];
216 protoste = (u_int)&pte[SG4_LEV1SIZE] | SG_U | SG_RW | SG_V;
217 while (pte < epte) {
218 *pte++ = protoste;
219 protoste += (SG4_LEV2SIZE * sizeof(st_entry_t));
220 }
221 /*
222 * Initialize the final level 1 descriptor to map the last
223 * block of level 2 descriptors.
224 */
225 ste = &((u_int *)kstpa)[SG4_LEV1SIZE-1];
226 pte = &((u_int *)kstpa)[kstsize*NPTEPG - SG4_LEV2SIZE];
227 *ste = (u_int)pte | SG_U | SG_RW | SG_V;
228 /*
229 * Now initialize the final portion of that block of
230 * descriptors to map the "last PT page".
231 */
232 pte = &((u_int *)kstpa)[kstsize*NPTEPG - NPTEPG/SG4_LEV3SIZE];
233 epte = &pte[NPTEPG/SG4_LEV3SIZE];
234 protoste = lkptpa | SG_U | SG_RW | SG_V;
235 while (pte < epte) {
236 *pte++ = protoste;
237 protoste += (SG4_LEV3SIZE * sizeof(st_entry_t));
238 }
239 /*
240 * Initialize Sysptmap
241 */
242 pte = (u_int *)kptmpa;
243 epte = &pte[nptpages+1];
244 protopte = kptpa | PG_RW | PG_CI | PG_V;
245 while (pte < epte) {
246 *pte++ = protopte;
247 protopte += PAGE_SIZE;
248 }
249 /*
250 * Invalidate all but the last remaining entry.
251 */
252 epte = &((u_int *)kptmpa)[NPTEPG-1];
253 while (pte < epte) {
254 *pte++ = PG_NV;
255 }
256 /*
257 * Initialize the last to point to the page
258 * table page allocated earlier.
259 */
260 *pte = lkptpa | PG_RW | PG_CI | PG_V;
261 } else {
262 /*
263 * Map the page table pages in both the HW segment table
264 * and the software Sysptmap. Note that Sysptmap is also
265 * considered a PT page hence the +1.
266 */
267 ste = (u_int *)kstpa;
268 pte = (u_int *)kptmpa;
269 epte = &pte[nptpages+1];
270 protoste = kptpa | SG_RW | SG_V;
271 protopte = kptpa | PG_RW | PG_CI | PG_V;
272 while (pte < epte) {
273 *ste++ = protoste;
274 *pte++ = protopte;
275 protoste += PAGE_SIZE;
276 protopte += PAGE_SIZE;
277 }
278 /*
279 * Invalidate all but the last remaining entries in both.
280 */
281 epte = &((u_int *)kptmpa)[NPTEPG-1];
282 while (pte < epte) {
283 *ste++ = SG_NV;
284 *pte++ = PG_NV;
285 }
286 /*
287 * Initialize the last to point to point to the page
288 * table page allocated earlier.
289 */
290 *ste = lkptpa | SG_RW | SG_V;
291 *pte = lkptpa | PG_RW | PG_CI | PG_V;
292 }
293 /*
294 * Invalidate all but the final entry in the last kernel PT page
295 * (u-area PTEs will be validated later). The final entry maps
296 * the last page of physical memory.
297 */
298 pte = (u_int *)lkptpa;
299 epte = &pte[NPTEPG-1];
300 while (pte < epte)
301 *pte++ = PG_NV;
302 *pte = MAXADDR | PG_RW | PG_CI | PG_V;
303 /*
304 * Initialize kernel page table.
305 * Start by invalidating the `nptpages' that we have allocated.
306 */
307 pte = (u_int *)kptpa;
308 epte = &pte[nptpages * NPTEPG];
309 while (pte < epte)
310 *pte++ = PG_NV;
311
312 /*
313 * The page of kernel text is zero-filled in locore.s,
314 * and not mapped (at VA 0). The boot loader places the
315 * bootinfo here after the kernel is loaded. Remember
316 * the physical address; we'll map it to a virtual address
317 * later.
318 */
319 RELOC(bootinfo_pa, paddr_t) = firstpa;
320
321 /*
322 * Validate PTEs for kernel text (RO). The first page
323 * of kernel text remains invalid; see locore.s
324 */
325 pte = &((u_int *)kptpa)[m68k_btop(KERNBASE + PAGE_SIZE)];
326 epte = &pte[m68k_btop(m68k_trunc_page(&etext))];
327 protopte = (firstpa + PAGE_SIZE) | PG_RO | PG_V;
328 while (pte < epte) {
329 *pte++ = protopte;
330 protopte += PAGE_SIZE;
331 }
332 /*
333 * Validate PTEs for kernel data/bss, dynamic data allocated
334 * by us so far (nextpa - firstpa bytes), and pages for proc0
335 * u-area and page table allocated below (RW).
336 */
337 epte = &((u_int *)kptpa)[m68k_btop(nextpa - firstpa)];
338 protopte = (protopte & ~PG_PROT) | PG_RW;
339 /*
340 * Enable copy-back caching of data pages
341 */
342 if (RELOC(mmutype, int) == MMU_68040)
343 protopte |= PG_CCB;
344 while (pte < epte) {
345 *pte++ = protopte;
346 protopte += PAGE_SIZE;
347 }
348 /*
349 * Finally, validate the internal IO space PTEs (RW+CI).
350 * We do this here since the 320/350 MMU registers (also
351 * used, but to a lesser extent, on other models) are mapped
352 * in this range and it would be nice to be able to access
353 * them after the MMU is turned on.
354 */
355 pte = (u_int *)iiopa;
356 epte = (u_int *)eiopa;
357 protopte = INTIOBASE | PG_RW | PG_CI | PG_V;
358 while (pte < epte) {
359 *pte++ = protopte;
360 protopte += PAGE_SIZE;
361 }
362
363 /*
364 * Calculate important exported kernel virtual addresses
365 */
366 /*
367 * Sysseg: base of kernel segment table
368 */
369 RELOC(Sysseg, st_entry_t *) =
370 (st_entry_t *)(kstpa - firstpa);
371 /*
372 * Sysptmap: base of kernel page table map
373 */
374 RELOC(Sysptmap, pt_entry_t *) =
375 (pt_entry_t *)(kptmpa - firstpa);
376 /*
377 * Sysmap: kernel page table (as mapped through Sysptmap)
378 * Immediately follows `nptpages' of static kernel page table.
379 */
380 RELOC(Sysmap, pt_entry_t *) =
381 (pt_entry_t *)m68k_ptob(nptpages * NPTEPG);
382 /*
383 * intiobase, intiolimit: base and end of internal (DIO) IO space.
384 * IIOMAPSIZE pages prior to external IO space at end of static
385 * kernel page table.
386 */
387 RELOC(intiobase, char *) =
388 (char *)m68k_ptob(nptpages*NPTEPG - (IIOMAPSIZE+EIOMAPSIZE));
389 RELOC(intiolimit, char *) =
390 (char *)m68k_ptob(nptpages*NPTEPG - EIOMAPSIZE);
391 /*
392 * extiobase: base of external (DIO-II) IO space.
393 * EIOMAPSIZE pages at the end of the static kernel page table.
394 */
395 RELOC(extiobase, char *) =
396 (char *)m68k_ptob(nptpages*NPTEPG - EIOMAPSIZE);
397 /*
398 * CLKbase, MMUbase: important registers in internal IO space
399 * accessed from assembly language.
400 */
401 RELOC(CLKbase, vaddr_t) =
402 (vaddr_t)RELOC(intiobase, char *) + CLKBASE;
403 RELOC(MMUbase, vaddr_t) =
404 (vaddr_t)RELOC(intiobase, char *) + MMUBASE;
405
406 /*
407 * Setup u-area for process 0.
408 */
409 /*
410 * Zero the u-area.
411 * NOTE: `pte' and `epte' aren't PTEs here.
412 */
413 pte = (u_int *)p0upa;
414 epte = (u_int *)(p0upa + USPACE);
415 while (pte < epte)
416 *pte++ = 0;
417 /*
418 * Remember the u-area address so it can be loaded in the
419 * proc struct p_addr field later.
420 */
421 RELOC(proc0paddr, char *) = (char *)(p0upa - firstpa);
422
423 /*
424 * VM data structures are now initialized, set up data for
425 * the pmap module.
426 *
427 * Note about avail_end: msgbuf is initialized just after
428 * avail_end in machdep.c. Since the last page is used
429 * for rebooting the system (code is copied there and
430 * excution continues from copied code before the MMU
431 * is disabled), the msgbuf will get trounced between
432 * reboots if it's placed in the last physical page.
433 * To work around this, we move avail_end back one more
434 * page so the msgbuf can be preserved.
435 */
436 RELOC(avail_start, paddr_t) = nextpa;
437 RELOC(avail_end, paddr_t) = m68k_ptob(RELOC(maxmem, int)) -
438 (m68k_round_page(MSGBUFSIZE) + m68k_ptob(1));
439 RELOC(mem_size, vsize_t) = m68k_ptob(RELOC(physmem, int));
440 RELOC(virtual_avail, vaddr_t) =
441 VM_MIN_KERNEL_ADDRESS + (nextpa - firstpa);
442 RELOC(virtual_end, vaddr_t) = VM_MAX_KERNEL_ADDRESS;
443
444 #ifdef M68K_MMU_HP
445 /*
446 * Determine VA aliasing distance if any
447 */
448 if (RELOC(ectype, int) == EC_VIRT) {
449 if (RELOC(machineid, int) == HP_320)
450 RELOC(pmap_aliasmask, int) = 0x3fff; /* 16k */
451 else if (RELOC(machineid, int) == HP_350)
452 RELOC(pmap_aliasmask, int) = 0x7fff; /* 32k */
453 }
454 #endif
455
456 /*
457 * Initialize protection array.
458 * XXX don't use a switch statement, it might produce an
459 * absolute "jmp" table.
460 */
461 {
462 int *kp;
463
464 kp = &RELOC(protection_codes, int);
465 kp[VM_PROT_NONE|VM_PROT_NONE|VM_PROT_NONE] = 0;
466 kp[VM_PROT_READ|VM_PROT_NONE|VM_PROT_NONE] = PG_RO;
467 kp[VM_PROT_READ|VM_PROT_NONE|VM_PROT_EXECUTE] = PG_RO;
468 kp[VM_PROT_NONE|VM_PROT_NONE|VM_PROT_EXECUTE] = PG_RO;
469 kp[VM_PROT_NONE|VM_PROT_WRITE|VM_PROT_NONE] = PG_RW;
470 kp[VM_PROT_NONE|VM_PROT_WRITE|VM_PROT_EXECUTE] = PG_RW;
471 kp[VM_PROT_READ|VM_PROT_WRITE|VM_PROT_NONE] = PG_RW;
472 kp[VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE] = PG_RW;
473 }
474
475 /*
476 * Kernel page/segment table allocated in locore,
477 * just initialize pointers.
478 */
479 {
480 struct pmap *kpm = &RELOC(kernel_pmap_store, struct pmap);
481
482 kpm->pm_stab = RELOC(Sysseg, st_entry_t *);
483 kpm->pm_ptab = RELOC(Sysmap, pt_entry_t *);
484 simple_lock_init(&kpm->pm_lock);
485 kpm->pm_count = 1;
486 kpm->pm_stpa = (st_entry_t *)kstpa;
487 /*
488 * For the 040 we also initialize the free level 2
489 * descriptor mask noting that we have used:
490 * 0: level 1 table
491 * 1 to `num': map page tables
492 * MAXKL2SIZE-1: maps last-page page table
493 */
494 if (RELOC(mmutype, int) == MMU_68040) {
495 int num;
496
497 kpm->pm_stfree = ~l2tobm(0);
498 num = roundup((nptpages + 1) * (NPTEPG / SG4_LEV3SIZE),
499 SG4_LEV2SIZE) / SG4_LEV2SIZE;
500 while (num)
501 kpm->pm_stfree &= ~l2tobm(num--);
502 kpm->pm_stfree &= ~l2tobm(MAXKL2SIZE-1);
503 for (num = MAXKL2SIZE;
504 num < sizeof(kpm->pm_stfree)*NBBY;
505 num++)
506 kpm->pm_stfree &= ~l2tobm(num);
507 }
508 }
509
510 /*
511 * Allocate some fixed, special purpose kernel virtual addresses
512 */
513 {
514 vaddr_t va = RELOC(virtual_avail, vaddr_t);
515
516 RELOC(bootinfo_va, vaddr_t) = (vaddr_t)va;
517 va += PAGE_SIZE;
518 RELOC(CADDR1, caddr_t) = (caddr_t)va;
519 va += PAGE_SIZE;
520 RELOC(CADDR2, caddr_t) = (caddr_t)va;
521 va += PAGE_SIZE;
522 RELOC(vmmap, caddr_t) = (caddr_t)va;
523 va += PAGE_SIZE;
524 RELOC(ledbase, caddr_t) = (caddr_t)va;
525 va += PAGE_SIZE;
526 RELOC(msgbufaddr, caddr_t) = (caddr_t)va;
527 va += m68k_round_page(MSGBUFSIZE);
528 RELOC(virtual_avail, vaddr_t) = va;
529 }
530 }
531
532 void
533 pmap_init_md(void)
534 {
535 vaddr_t addr;
536
537 addr = (vaddr_t) intiobase;
538 if (uvm_map(kernel_map, &addr,
539 m68k_ptob(IIOMAPSIZE+EIOMAPSIZE),
540 NULL, UVM_UNKNOWN_OFFSET, 0,
541 UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE,
542 UVM_INH_NONE, UVM_ADV_RANDOM,
543 UVM_FLAG_FIXED)) != 0)
544 panic("pmap_init_md: uvm_map failed");
545 }
546