pmap_bootstrap.c revision 1.20 1 /* $NetBSD: pmap_bootstrap.c,v 1.20 2002/03/15 05:55:38 gmcgarry Exp $ */
2
3 /*
4 * Copyright (c) 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * the Systems Programming Group of the University of Utah Computer
9 * Science Department.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the University of
22 * California, Berkeley and its contributors.
23 * 4. Neither the name of the University nor the names of its contributors
24 * may be used to endorse or promote products derived from this software
25 * without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * SUCH DAMAGE.
38 *
39 * @(#)pmap_bootstrap.c 8.1 (Berkeley) 6/10/93
40 */
41
42 #include <sys/cdefs.h>
43 __KERNEL_RCSID(0, "$NetBSD: pmap_bootstrap.c,v 1.20 2002/03/15 05:55:38 gmcgarry Exp $");
44
45 #include <sys/param.h>
46 #include <sys/proc.h>
47
48 #include <machine/frame.h>
49 #include <machine/cpu.h>
50 #include <machine/hp300spu.h>
51 #include <machine/vmparam.h>
52 #include <machine/pte.h>
53
54 #include <hp300/hp300/clockreg.h>
55
56 #include <uvm/uvm_extern.h>
57
58 #define RELOC(v, t) *((t*)((u_int)&(v) + firstpa))
59
60 extern char *etext;
61 extern int Sysptsize;
62 extern char *extiobase, *proc0paddr;
63 extern st_entry_t *Sysseg;
64 extern pt_entry_t *Sysptmap, *Sysmap;
65 extern vaddr_t CLKbase, MMUbase;
66 extern paddr_t bootinfo_pa;
67 extern vaddr_t bootinfo_va;
68
69 extern int maxmem, physmem;
70 extern paddr_t avail_start, avail_end;
71 extern vaddr_t virtual_avail, virtual_end;
72 extern vsize_t mem_size;
73 extern int protection_codes[];
74 #ifdef M68K_MMU_HP
75 extern int pmap_aliasmask;
76 #endif
77
78 void pmap_bootstrap __P((paddr_t, paddr_t));
79
80 /*
81 * Special purpose kernel virtual addresses, used for mapping
82 * physical pages for a variety of temporary or permanent purposes:
83 *
84 * CADDR1, CADDR2: pmap zero/copy operations
85 * vmmap: /dev/mem, crash dumps, parity error checking
86 * ledbase: SPU LEDs
87 * msgbufaddr: kernel message buffer
88 */
89 caddr_t CADDR1, CADDR2, vmmap, ledbase;
90 extern caddr_t msgbufaddr;
91
92 /*
93 * Bootstrap the VM system.
94 *
95 * Called with MMU off so we must relocate all global references by `firstpa'
96 * (don't call any functions here!) `nextpa' is the first available physical
97 * memory address. Returns an updated first PA reflecting the memory we
98 * have allocated. MMU is still off when we return.
99 *
100 * XXX assumes sizeof(u_int) == sizeof(pt_entry_t)
101 * XXX a PIC compiler would make this much easier.
102 */
103 void
104 pmap_bootstrap(nextpa, firstpa)
105 paddr_t nextpa;
106 paddr_t firstpa;
107 {
108 paddr_t kstpa, kptpa, iiopa, eiopa, kptmpa, lkptpa, p0upa;
109 u_int nptpages, kstsize;
110 st_entry_t protoste, *ste;
111 pt_entry_t protopte, *pte, *epte;
112
113 /*
114 * Calculate important physical addresses:
115 *
116 * kstpa kernel segment table 1 page (!040)
117 * N pages (040)
118 *
119 * kptpa statically allocated
120 * kernel PT pages Sysptsize+ pages
121 *
122 * iiopa internal IO space
123 * PT pages IIOMAPSIZE pages
124 *
125 * eiopa external IO space
126 * PT pages EIOMAPSIZE pages
127 *
128 * [ Sysptsize is the number of pages of PT, IIOMAPSIZE and
129 * EIOMAPSIZE are the number of PTEs, hence we need to round
130 * the total to a page boundary with IO maps at the end. ]
131 *
132 * kptmpa kernel PT map 1 page
133 *
134 * lkptpa last kernel PT page 1 page
135 *
136 * p0upa proc 0 u-area UPAGES pages
137 *
138 * The KVA corresponding to any of these PAs is:
139 * (PA - firstpa + KERNBASE).
140 */
141 if (RELOC(mmutype, int) == MMU_68040)
142 kstsize = MAXKL2SIZE / (NPTEPG/SG4_LEV2SIZE);
143 else
144 kstsize = 1;
145 kstpa = nextpa;
146 nextpa += kstsize * NBPG;
147 kptpa = nextpa;
148 nptpages = RELOC(Sysptsize, int) +
149 (IIOMAPSIZE + EIOMAPSIZE + NPTEPG - 1) / NPTEPG;
150 nextpa += nptpages * NBPG;
151 eiopa = nextpa - EIOMAPSIZE * sizeof(pt_entry_t);
152 iiopa = eiopa - IIOMAPSIZE * sizeof(pt_entry_t);
153 kptmpa = nextpa;
154 nextpa += NBPG;
155 lkptpa = nextpa;
156 nextpa += NBPG;
157 p0upa = nextpa;
158 nextpa += USPACE;
159
160 /*
161 * Initialize segment table and kernel page table map.
162 *
163 * On 68030s and earlier MMUs the two are identical except for
164 * the valid bits so both are initialized with essentially the
165 * same values. On the 68040, which has a mandatory 3-level
166 * structure, the segment table holds the level 1 table and part
167 * (or all) of the level 2 table and hence is considerably
168 * different. Here the first level consists of 128 descriptors
169 * (512 bytes) each mapping 32mb of address space. Each of these
170 * points to blocks of 128 second level descriptors (512 bytes)
171 * each mapping 256kb. Note that there may be additional "segment
172 * table" pages depending on how large MAXKL2SIZE is.
173 *
174 * Portions of the last segment of KVA space (0xFFF00000 -
175 * 0xFFFFFFFF) are mapped for a couple of purposes. 0xFFF00000
176 * for UPAGES is used for mapping the current process u-area
177 * (u + kernel stack). The very last page (0xFFFFF000) is mapped
178 * to the last physical page of RAM to give us a region in which
179 * PA == VA. We use the first part of this page for enabling
180 * and disabling mapping. The last part of this page also contains
181 * info left by the boot ROM.
182 *
183 * XXX cramming two levels of mapping into the single "segment"
184 * table on the 68040 is intended as a temporary hack to get things
185 * working. The 224mb of address space that this allows will most
186 * likely be insufficient in the future (at least for the kernel).
187 */
188 if (RELOC(mmutype, int) == MMU_68040) {
189 int num;
190
191 /*
192 * First invalidate the entire "segment table" pages
193 * (levels 1 and 2 have the same "invalid" value).
194 */
195 pte = (u_int *)kstpa;
196 epte = &pte[kstsize * NPTEPG];
197 while (pte < epte)
198 *pte++ = SG_NV;
199 /*
200 * Initialize level 2 descriptors (which immediately
201 * follow the level 1 table). We need:
202 * NPTEPG / SG4_LEV3SIZE
203 * level 2 descriptors to map each of the nptpages+1
204 * pages of PTEs. Note that we set the "used" bit
205 * now to save the HW the expense of doing it.
206 */
207 num = (nptpages + 1) * (NPTEPG / SG4_LEV3SIZE);
208 pte = &((u_int *)kstpa)[SG4_LEV1SIZE];
209 epte = &pte[num];
210 protoste = kptpa | SG_U | SG_RW | SG_V;
211 while (pte < epte) {
212 *pte++ = protoste;
213 protoste += (SG4_LEV3SIZE * sizeof(st_entry_t));
214 }
215 /*
216 * Initialize level 1 descriptors. We need:
217 * roundup(num, SG4_LEV2SIZE) / SG4_LEV2SIZE
218 * level 1 descriptors to map the `num' level 2's.
219 */
220 pte = (u_int *)kstpa;
221 epte = &pte[roundup(num, SG4_LEV2SIZE) / SG4_LEV2SIZE];
222 protoste = (u_int)&pte[SG4_LEV1SIZE] | SG_U | SG_RW | SG_V;
223 while (pte < epte) {
224 *pte++ = protoste;
225 protoste += (SG4_LEV2SIZE * sizeof(st_entry_t));
226 }
227 /*
228 * Initialize the final level 1 descriptor to map the last
229 * block of level 2 descriptors.
230 */
231 ste = &((u_int *)kstpa)[SG4_LEV1SIZE-1];
232 pte = &((u_int *)kstpa)[kstsize*NPTEPG - SG4_LEV2SIZE];
233 *ste = (u_int)pte | SG_U | SG_RW | SG_V;
234 /*
235 * Now initialize the final portion of that block of
236 * descriptors to map the "last PT page".
237 */
238 pte = &((u_int *)kstpa)[kstsize*NPTEPG - NPTEPG/SG4_LEV3SIZE];
239 epte = &pte[NPTEPG/SG4_LEV3SIZE];
240 protoste = lkptpa | SG_U | SG_RW | SG_V;
241 while (pte < epte) {
242 *pte++ = protoste;
243 protoste += (SG4_LEV3SIZE * sizeof(st_entry_t));
244 }
245 /*
246 * Initialize Sysptmap
247 */
248 pte = (u_int *)kptmpa;
249 epte = &pte[nptpages+1];
250 protopte = kptpa | PG_RW | PG_CI | PG_V;
251 while (pte < epte) {
252 *pte++ = protopte;
253 protopte += NBPG;
254 }
255 /*
256 * Invalidate all but the last remaining entry.
257 */
258 epte = &((u_int *)kptmpa)[NPTEPG-1];
259 while (pte < epte) {
260 *pte++ = PG_NV;
261 }
262 /*
263 * Initialize the last to point to the page
264 * table page allocated earlier.
265 */
266 *pte = lkptpa | PG_RW | PG_CI | PG_V;
267 } else {
268 /*
269 * Map the page table pages in both the HW segment table
270 * and the software Sysptmap. Note that Sysptmap is also
271 * considered a PT page hence the +1.
272 */
273 ste = (u_int *)kstpa;
274 pte = (u_int *)kptmpa;
275 epte = &pte[nptpages+1];
276 protoste = kptpa | SG_RW | SG_V;
277 protopte = kptpa | PG_RW | PG_CI | PG_V;
278 while (pte < epte) {
279 *ste++ = protoste;
280 *pte++ = protopte;
281 protoste += NBPG;
282 protopte += NBPG;
283 }
284 /*
285 * Invalidate all but the last remaining entries in both.
286 */
287 epte = &((u_int *)kptmpa)[NPTEPG-1];
288 while (pte < epte) {
289 *ste++ = SG_NV;
290 *pte++ = PG_NV;
291 }
292 /*
293 * Initialize the last to point to point to the page
294 * table page allocated earlier.
295 */
296 *ste = lkptpa | SG_RW | SG_V;
297 *pte = lkptpa | PG_RW | PG_CI | PG_V;
298 }
299 /*
300 * Invalidate all but the final entry in the last kernel PT page
301 * (u-area PTEs will be validated later). The final entry maps
302 * the last page of physical memory.
303 */
304 pte = (u_int *)lkptpa;
305 epte = &pte[NPTEPG-1];
306 while (pte < epte)
307 *pte++ = PG_NV;
308 *pte = MAXADDR | PG_RW | PG_CI | PG_V;
309 /*
310 * Initialize kernel page table.
311 * Start by invalidating the `nptpages' that we have allocated.
312 */
313 pte = (u_int *)kptpa;
314 epte = &pte[nptpages * NPTEPG];
315 while (pte < epte)
316 *pte++ = PG_NV;
317
318 /*
319 * The page of kernel text is zero-filled in locore.s,
320 * and not mapped (at VA 0). The boot loader places the
321 * bootinfo here after the kernel is loaded. Remember
322 * the physical address; we'll map it to a virtual address
323 * later.
324 */
325 RELOC(bootinfo_pa, paddr_t) = firstpa;
326
327 /*
328 * Validate PTEs for kernel text (RO). The first page
329 * of kernel text remains invalid; see locore.s
330 */
331 pte = &((u_int *)kptpa)[m68k_btop(KERNBASE + NBPG)];
332 epte = &pte[m68k_btop(m68k_trunc_page(&etext))];
333 protopte = (firstpa + NBPG) | PG_RO | PG_V;
334 while (pte < epte) {
335 *pte++ = protopte;
336 protopte += NBPG;
337 }
338 /*
339 * Validate PTEs for kernel data/bss, dynamic data allocated
340 * by us so far (nextpa - firstpa bytes), and pages for proc0
341 * u-area and page table allocated below (RW).
342 */
343 epte = &((u_int *)kptpa)[m68k_btop(nextpa - firstpa)];
344 protopte = (protopte & ~PG_PROT) | PG_RW;
345 /*
346 * Enable copy-back caching of data pages
347 */
348 if (RELOC(mmutype, int) == MMU_68040)
349 protopte |= PG_CCB;
350 while (pte < epte) {
351 *pte++ = protopte;
352 protopte += NBPG;
353 }
354 /*
355 * Finally, validate the internal IO space PTEs (RW+CI).
356 * We do this here since the 320/350 MMU registers (also
357 * used, but to a lesser extent, on other models) are mapped
358 * in this range and it would be nice to be able to access
359 * them after the MMU is turned on.
360 */
361 pte = (u_int *)iiopa;
362 epte = (u_int *)eiopa;
363 protopte = INTIOBASE | PG_RW | PG_CI | PG_V;
364 while (pte < epte) {
365 *pte++ = protopte;
366 protopte += NBPG;
367 }
368
369 /*
370 * Calculate important exported kernel virtual addresses
371 */
372 /*
373 * Sysseg: base of kernel segment table
374 */
375 RELOC(Sysseg, st_entry_t *) =
376 (st_entry_t *)(kstpa - firstpa);
377 /*
378 * Sysptmap: base of kernel page table map
379 */
380 RELOC(Sysptmap, pt_entry_t *) =
381 (pt_entry_t *)(kptmpa - firstpa);
382 /*
383 * Sysmap: kernel page table (as mapped through Sysptmap)
384 * Immediately follows `nptpages' of static kernel page table.
385 */
386 RELOC(Sysmap, pt_entry_t *) =
387 (pt_entry_t *)m68k_ptob(nptpages * NPTEPG);
388 /*
389 * intiobase, intiolimit: base and end of internal (DIO) IO space.
390 * IIOMAPSIZE pages prior to external IO space at end of static
391 * kernel page table.
392 */
393 RELOC(intiobase, char *) =
394 (char *)m68k_ptob(nptpages*NPTEPG - (IIOMAPSIZE+EIOMAPSIZE));
395 RELOC(intiolimit, char *) =
396 (char *)m68k_ptob(nptpages*NPTEPG - EIOMAPSIZE);
397 /*
398 * extiobase: base of external (DIO-II) IO space.
399 * EIOMAPSIZE pages at the end of the static kernel page table.
400 */
401 RELOC(extiobase, char *) =
402 (char *)m68k_ptob(nptpages*NPTEPG - EIOMAPSIZE);
403 /*
404 * CLKbase, MMUbase: important registers in internal IO space
405 * accessed from assembly language.
406 */
407 RELOC(CLKbase, vaddr_t) =
408 (vaddr_t)RELOC(intiobase, char *) + CLKBASE;
409 RELOC(MMUbase, vaddr_t) =
410 (vaddr_t)RELOC(intiobase, char *) + MMUBASE;
411
412 /*
413 * Setup u-area for process 0.
414 */
415 /*
416 * Zero the u-area.
417 * NOTE: `pte' and `epte' aren't PTEs here.
418 */
419 pte = (u_int *)p0upa;
420 epte = (u_int *)(p0upa + USPACE);
421 while (pte < epte)
422 *pte++ = 0;
423 /*
424 * Remember the u-area address so it can be loaded in the
425 * proc struct p_addr field later.
426 */
427 RELOC(proc0paddr, char *) = (char *)(p0upa - firstpa);
428
429 /*
430 * VM data structures are now initialized, set up data for
431 * the pmap module.
432 *
433 * Note about avail_end: msgbuf is initialized just after
434 * avail_end in machdep.c. Since the last page is used
435 * for rebooting the system (code is copied there and
436 * excution continues from copied code before the MMU
437 * is disabled), the msgbuf will get trounced between
438 * reboots if it's placed in the last physical page.
439 * To work around this, we move avail_end back one more
440 * page so the msgbuf can be preserved.
441 */
442 RELOC(avail_start, paddr_t) = nextpa;
443 RELOC(avail_end, paddr_t) = m68k_ptob(RELOC(maxmem, int)) -
444 (m68k_round_page(MSGBUFSIZE) + m68k_ptob(1));
445 RELOC(mem_size, vsize_t) = m68k_ptob(RELOC(physmem, int));
446 RELOC(virtual_avail, vaddr_t) =
447 VM_MIN_KERNEL_ADDRESS + (nextpa - firstpa);
448 RELOC(virtual_end, vaddr_t) = VM_MAX_KERNEL_ADDRESS;
449
450 #ifdef M68K_MMU_HP
451 /*
452 * Determine VA aliasing distance if any
453 */
454 if (RELOC(ectype, int) == EC_VIRT) {
455 if (RELOC(machineid, int) == HP_320)
456 RELOC(pmap_aliasmask, int) = 0x3fff; /* 16k */
457 else if (RELOC(machineid, int) == HP_350)
458 RELOC(pmap_aliasmask, int) = 0x7fff; /* 32k */
459 }
460 #endif
461
462 /*
463 * Initialize protection array.
464 * XXX don't use a switch statement, it might produce an
465 * absolute "jmp" table.
466 */
467 {
468 int *kp;
469
470 kp = &RELOC(protection_codes, int);
471 kp[VM_PROT_NONE|VM_PROT_NONE|VM_PROT_NONE] = 0;
472 kp[VM_PROT_READ|VM_PROT_NONE|VM_PROT_NONE] = PG_RO;
473 kp[VM_PROT_READ|VM_PROT_NONE|VM_PROT_EXECUTE] = PG_RO;
474 kp[VM_PROT_NONE|VM_PROT_NONE|VM_PROT_EXECUTE] = PG_RO;
475 kp[VM_PROT_NONE|VM_PROT_WRITE|VM_PROT_NONE] = PG_RW;
476 kp[VM_PROT_NONE|VM_PROT_WRITE|VM_PROT_EXECUTE] = PG_RW;
477 kp[VM_PROT_READ|VM_PROT_WRITE|VM_PROT_NONE] = PG_RW;
478 kp[VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE] = PG_RW;
479 }
480
481 /*
482 * Kernel page/segment table allocated in locore,
483 * just initialize pointers.
484 */
485 {
486 struct pmap *kpm = &RELOC(kernel_pmap_store, struct pmap);
487
488 kpm->pm_stab = RELOC(Sysseg, st_entry_t *);
489 kpm->pm_ptab = RELOC(Sysmap, pt_entry_t *);
490 simple_lock_init(&kpm->pm_lock);
491 kpm->pm_count = 1;
492 kpm->pm_stpa = (st_entry_t *)kstpa;
493 /*
494 * For the 040 we also initialize the free level 2
495 * descriptor mask noting that we have used:
496 * 0: level 1 table
497 * 1 to `num': map page tables
498 * MAXKL2SIZE-1: maps last-page page table
499 */
500 if (RELOC(mmutype, int) == MMU_68040) {
501 int num;
502
503 kpm->pm_stfree = ~l2tobm(0);
504 num = roundup((nptpages + 1) * (NPTEPG / SG4_LEV3SIZE),
505 SG4_LEV2SIZE) / SG4_LEV2SIZE;
506 while (num)
507 kpm->pm_stfree &= ~l2tobm(num--);
508 kpm->pm_stfree &= ~l2tobm(MAXKL2SIZE-1);
509 for (num = MAXKL2SIZE;
510 num < sizeof(kpm->pm_stfree)*NBBY;
511 num++)
512 kpm->pm_stfree &= ~l2tobm(num);
513 }
514 }
515
516 /*
517 * Allocate some fixed, special purpose kernel virtual addresses
518 */
519 {
520 vaddr_t va = RELOC(virtual_avail, vaddr_t);
521
522 RELOC(bootinfo_va, vaddr_t) = (vaddr_t)va;
523 va += NBPG;
524 RELOC(CADDR1, caddr_t) = (caddr_t)va;
525 va += NBPG;
526 RELOC(CADDR2, caddr_t) = (caddr_t)va;
527 va += NBPG;
528 RELOC(vmmap, caddr_t) = (caddr_t)va;
529 va += NBPG;
530 RELOC(ledbase, caddr_t) = (caddr_t)va;
531 va += NBPG;
532 RELOC(msgbufaddr, caddr_t) = (caddr_t)va;
533 va += m68k_round_page(MSGBUFSIZE);
534 RELOC(virtual_avail, vaddr_t) = va;
535 }
536 }
537