pmap_bootstrap.c revision 1.15 1 /* $NetBSD: pmap_bootstrap.c,v 1.15 1998/08/20 08:33:45 kleink Exp $ */
2
3 /*
4 * Copyright (c) 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * the Systems Programming Group of the University of Utah Computer
9 * Science Department.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the University of
22 * California, Berkeley and its contributors.
23 * 4. Neither the name of the University nor the names of its contributors
24 * may be used to endorse or promote products derived from this software
25 * without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * SUCH DAMAGE.
38 *
39 * @(#)pmap_bootstrap.c 8.1 (Berkeley) 6/10/93
40 */
41
42 #include <sys/param.h>
43 #include <sys/proc.h>
44
45 #include <machine/frame.h>
46 #include <machine/cpu.h>
47 #include <machine/hp300spu.h>
48 #include <machine/vmparam.h>
49 #include <machine/pte.h>
50
51 #include <hp300/hp300/clockreg.h>
52
53 #include <vm/vm.h>
54 #include <vm/pmap.h>
55
56 #define RELOC(v, t) *((t*)((u_int)&(v) + firstpa))
57
58 extern char *etext;
59 extern int Sysptsize;
60 extern char *extiobase, *proc0paddr;
61 extern st_entry_t *Sysseg;
62 extern pt_entry_t *Sysptmap, *Sysmap;
63 extern vaddr_t CLKbase, MMUbase;
64 extern paddr_t pagezero;
65
66 extern int maxmem, physmem;
67 extern paddr_t avail_start, avail_end;
68 extern vaddr_t virtual_avail, virtual_end;
69 extern vsize_t mem_size;
70 extern int protection_codes[];
71 #ifdef M68K_MMU_HP
72 extern int pmap_aliasmask;
73 #endif
74
75 void pmap_bootstrap __P((paddr_t, paddr_t));
76
77 /*
78 * Special purpose kernel virtual addresses, used for mapping
79 * physical pages for a variety of temporary or permanent purposes:
80 *
81 * CADDR1, CADDR2: pmap zero/copy operations
82 * vmmap: /dev/mem, crash dumps, parity error checking
83 * ledbase: SPU LEDs
84 * msgbufaddr: kernel message buffer
85 */
86 caddr_t CADDR1, CADDR2, vmmap, ledbase;
87 extern caddr_t msgbufaddr;
88
89 /*
90 * Bootstrap the VM system.
91 *
92 * Called with MMU off so we must relocate all global references by `firstpa'
93 * (don't call any functions here!) `nextpa' is the first available physical
94 * memory address. Returns an updated first PA reflecting the memory we
95 * have allocated. MMU is still off when we return.
96 *
97 * XXX assumes sizeof(u_int) == sizeof(pt_entry_t)
98 * XXX a PIC compiler would make this much easier.
99 */
100 void
101 pmap_bootstrap(nextpa, firstpa)
102 paddr_t nextpa;
103 paddr_t firstpa;
104 {
105 paddr_t kstpa, kptpa, iiopa, eiopa, kptmpa, lkptpa, p0upa;
106 u_int nptpages, kstsize;
107 st_entry_t protoste, *ste;
108 pt_entry_t protopte, *pte, *epte;
109
110 /*
111 * Calculate important physical addresses:
112 *
113 * kstpa kernel segment table 1 page (!040)
114 * N pages (040)
115 *
116 * kptpa statically allocated
117 * kernel PT pages Sysptsize+ pages
118 *
119 * iiopa internal IO space
120 * PT pages IIOMAPSIZE pages
121 *
122 * eiopa external IO space
123 * PT pages EIOMAPSIZE pages
124 *
125 * [ Sysptsize is the number of pages of PT, IIOMAPSIZE and
126 * EIOMAPSIZE are the number of PTEs, hence we need to round
127 * the total to a page boundary with IO maps at the end. ]
128 *
129 * kptmpa kernel PT map 1 page
130 *
131 * lkptpa last kernel PT page 1 page
132 *
133 * p0upa proc 0 u-area UPAGES pages
134 *
135 * The KVA corresponding to any of these PAs is:
136 * (PA - firstpa + KERNBASE).
137 */
138 if (RELOC(mmutype, int) == MMU_68040)
139 kstsize = MAXKL2SIZE / (NPTEPG/SG4_LEV2SIZE);
140 else
141 kstsize = 1;
142 kstpa = nextpa;
143 nextpa += kstsize * NBPG;
144 kptpa = nextpa;
145 nptpages = RELOC(Sysptsize, int) +
146 (IIOMAPSIZE + EIOMAPSIZE + NPTEPG - 1) / NPTEPG;
147 nextpa += nptpages * NBPG;
148 eiopa = nextpa - EIOMAPSIZE * sizeof(pt_entry_t);
149 iiopa = eiopa - IIOMAPSIZE * sizeof(pt_entry_t);
150 kptmpa = nextpa;
151 nextpa += NBPG;
152 lkptpa = nextpa;
153 nextpa += NBPG;
154 p0upa = nextpa;
155 nextpa += USPACE;
156
157 /*
158 * Initialize segment table and kernel page table map.
159 *
160 * On 68030s and earlier MMUs the two are identical except for
161 * the valid bits so both are initialized with essentially the
162 * same values. On the 68040, which has a mandatory 3-level
163 * structure, the segment table holds the level 1 table and part
164 * (or all) of the level 2 table and hence is considerably
165 * different. Here the first level consists of 128 descriptors
166 * (512 bytes) each mapping 32mb of address space. Each of these
167 * points to blocks of 128 second level descriptors (512 bytes)
168 * each mapping 256kb. Note that there may be additional "segment
169 * table" pages depending on how large MAXKL2SIZE is.
170 *
171 * Portions of the last segment of KVA space (0xFFF00000 -
172 * 0xFFFFFFFF) are mapped for a couple of purposes. 0xFFF00000
173 * for UPAGES is used for mapping the current process u-area
174 * (u + kernel stack). The very last page (0xFFFFF000) is mapped
175 * to the last physical page of RAM to give us a region in which
176 * PA == VA. We use the first part of this page for enabling
177 * and disabling mapping. The last part of this page also contains
178 * info left by the boot ROM.
179 *
180 * XXX cramming two levels of mapping into the single "segment"
181 * table on the 68040 is intended as a temporary hack to get things
182 * working. The 224mb of address space that this allows will most
183 * likely be insufficient in the future (at least for the kernel).
184 */
185 if (RELOC(mmutype, int) == MMU_68040) {
186 int num;
187
188 /*
189 * First invalidate the entire "segment table" pages
190 * (levels 1 and 2 have the same "invalid" value).
191 */
192 pte = (u_int *)kstpa;
193 epte = &pte[kstsize * NPTEPG];
194 while (pte < epte)
195 *pte++ = SG_NV;
196 /*
197 * Initialize level 2 descriptors (which immediately
198 * follow the level 1 table). We need:
199 * NPTEPG / SG4_LEV3SIZE
200 * level 2 descriptors to map each of the nptpages+1
201 * pages of PTEs. Note that we set the "used" bit
202 * now to save the HW the expense of doing it.
203 */
204 num = (nptpages + 1) * (NPTEPG / SG4_LEV3SIZE);
205 pte = &((u_int *)kstpa)[SG4_LEV1SIZE];
206 epte = &pte[num];
207 protoste = kptpa | SG_U | SG_RW | SG_V;
208 while (pte < epte) {
209 *pte++ = protoste;
210 protoste += (SG4_LEV3SIZE * sizeof(st_entry_t));
211 }
212 /*
213 * Initialize level 1 descriptors. We need:
214 * roundup(num, SG4_LEV2SIZE) / SG4_LEV2SIZE
215 * level 1 descriptors to map the `num' level 2's.
216 */
217 pte = (u_int *)kstpa;
218 epte = &pte[roundup(num, SG4_LEV2SIZE) / SG4_LEV2SIZE];
219 protoste = (u_int)&pte[SG4_LEV1SIZE] | SG_U | SG_RW | SG_V;
220 while (pte < epte) {
221 *pte++ = protoste;
222 protoste += (SG4_LEV2SIZE * sizeof(st_entry_t));
223 }
224 /*
225 * Initialize the final level 1 descriptor to map the last
226 * block of level 2 descriptors.
227 */
228 ste = &((u_int *)kstpa)[SG4_LEV1SIZE-1];
229 pte = &((u_int *)kstpa)[kstsize*NPTEPG - SG4_LEV2SIZE];
230 *ste = (u_int)pte | SG_U | SG_RW | SG_V;
231 /*
232 * Now initialize the final portion of that block of
233 * descriptors to map the "last PT page".
234 */
235 pte = &((u_int *)kstpa)[kstsize*NPTEPG - NPTEPG/SG4_LEV3SIZE];
236 epte = &pte[NPTEPG/SG4_LEV3SIZE];
237 protoste = lkptpa | SG_U | SG_RW | SG_V;
238 while (pte < epte) {
239 *pte++ = protoste;
240 protoste += (SG4_LEV3SIZE * sizeof(st_entry_t));
241 }
242 /*
243 * Initialize Sysptmap
244 */
245 pte = (u_int *)kptmpa;
246 epte = &pte[nptpages+1];
247 protopte = kptpa | PG_RW | PG_CI | PG_V;
248 while (pte < epte) {
249 *pte++ = protopte;
250 protopte += NBPG;
251 }
252 /*
253 * Invalidate all but the last remaining entry.
254 */
255 epte = &((u_int *)kptmpa)[NPTEPG-1];
256 while (pte < epte) {
257 *pte++ = PG_NV;
258 }
259 /*
260 * Initialize the last to point to the page
261 * table page allocated earlier.
262 */
263 *pte = lkptpa | PG_RW | PG_CI | PG_V;
264 } else {
265 /*
266 * Map the page table pages in both the HW segment table
267 * and the software Sysptmap. Note that Sysptmap is also
268 * considered a PT page hence the +1.
269 */
270 ste = (u_int *)kstpa;
271 pte = (u_int *)kptmpa;
272 epte = &pte[nptpages+1];
273 protoste = kptpa | SG_RW | SG_V;
274 protopte = kptpa | PG_RW | PG_CI | PG_V;
275 while (pte < epte) {
276 *ste++ = protoste;
277 *pte++ = protopte;
278 protoste += NBPG;
279 protopte += NBPG;
280 }
281 /*
282 * Invalidate all but the last remaining entries in both.
283 */
284 epte = &((u_int *)kptmpa)[NPTEPG-1];
285 while (pte < epte) {
286 *ste++ = SG_NV;
287 *pte++ = PG_NV;
288 }
289 /*
290 * Initialize the last to point to point to the page
291 * table page allocated earlier.
292 */
293 *ste = lkptpa | SG_RW | SG_V;
294 *pte = lkptpa | PG_RW | PG_CI | PG_V;
295 }
296 /*
297 * Invalidate all but the final entry in the last kernel PT page
298 * (u-area PTEs will be validated later). The final entry maps
299 * the last page of physical memory.
300 */
301 pte = (u_int *)lkptpa;
302 epte = &pte[NPTEPG-1];
303 while (pte < epte)
304 *pte++ = PG_NV;
305 *pte = MAXADDR | PG_RW | PG_CI | PG_V;
306 /*
307 * Initialize kernel page table.
308 * Start by invalidating the `nptpages' that we have allocated.
309 */
310 pte = (u_int *)kptpa;
311 epte = &pte[nptpages * NPTEPG];
312 while (pte < epte)
313 *pte++ = PG_NV;
314
315 /*
316 * Save the physical address of `page zero'. This is
317 * a page of memory at the beginning of kernel text
318 * not mapped at VA 0. But, we might want to use it
319 * for something later.
320 */
321 RELOC(pagezero, paddr_t) = firstpa;
322
323 /*
324 * Validate PTEs for kernel text (RO). The first page
325 * of kernel text remains invalid; see locore.s
326 */
327 pte = &((u_int *)kptpa)[m68k_btop(KERNBASE + NBPG)];
328 epte = &pte[m68k_btop(m68k_trunc_page(&etext))];
329 protopte = (firstpa + NBPG) | PG_RO | PG_V;
330 while (pte < epte) {
331 *pte++ = protopte;
332 protopte += NBPG;
333 }
334 /*
335 * Validate PTEs for kernel data/bss, dynamic data allocated
336 * by us so far (nextpa - firstpa bytes), and pages for proc0
337 * u-area and page table allocated below (RW).
338 */
339 epte = &((u_int *)kptpa)[m68k_btop(nextpa - firstpa)];
340 protopte = (protopte & ~PG_PROT) | PG_RW;
341 /*
342 * Enable copy-back caching of data pages
343 */
344 if (RELOC(mmutype, int) == MMU_68040)
345 protopte |= PG_CCB;
346 while (pte < epte) {
347 *pte++ = protopte;
348 protopte += NBPG;
349 }
350 /*
351 * Finally, validate the internal IO space PTEs (RW+CI).
352 * We do this here since the 320/350 MMU registers (also
353 * used, but to a lesser extent, on other models) are mapped
354 * in this range and it would be nice to be able to access
355 * them after the MMU is turned on.
356 */
357 pte = (u_int *)iiopa;
358 epte = (u_int *)eiopa;
359 protopte = INTIOBASE | PG_RW | PG_CI | PG_V;
360 while (pte < epte) {
361 *pte++ = protopte;
362 protopte += NBPG;
363 }
364
365 /*
366 * Calculate important exported kernel virtual addresses
367 */
368 /*
369 * Sysseg: base of kernel segment table
370 */
371 RELOC(Sysseg, st_entry_t *) =
372 (st_entry_t *)(kstpa - firstpa);
373 /*
374 * Sysptmap: base of kernel page table map
375 */
376 RELOC(Sysptmap, pt_entry_t *) =
377 (pt_entry_t *)(kptmpa - firstpa);
378 /*
379 * Sysmap: kernel page table (as mapped through Sysptmap)
380 * Immediately follows `nptpages' of static kernel page table.
381 */
382 RELOC(Sysmap, pt_entry_t *) =
383 (pt_entry_t *)m68k_ptob(nptpages * NPTEPG);
384 /*
385 * intiobase, intiolimit: base and end of internal (DIO) IO space.
386 * IIOMAPSIZE pages prior to external IO space at end of static
387 * kernel page table.
388 */
389 RELOC(intiobase, char *) =
390 (char *)m68k_ptob(nptpages*NPTEPG - (IIOMAPSIZE+EIOMAPSIZE));
391 RELOC(intiolimit, char *) =
392 (char *)m68k_ptob(nptpages*NPTEPG - EIOMAPSIZE);
393 /*
394 * extiobase: base of external (DIO-II) IO space.
395 * EIOMAPSIZE pages at the end of the static kernel page table.
396 */
397 RELOC(extiobase, char *) =
398 (char *)m68k_ptob(nptpages*NPTEPG - EIOMAPSIZE);
399 /*
400 * CLKbase, MMUbase: important registers in internal IO space
401 * accessed from assembly language.
402 */
403 RELOC(CLKbase, vaddr_t) =
404 (vaddr_t)RELOC(intiobase, char *) + CLKBASE;
405 RELOC(MMUbase, vaddr_t) =
406 (vaddr_t)RELOC(intiobase, char *) + MMUBASE;
407
408 /*
409 * Setup u-area for process 0.
410 */
411 /*
412 * Zero the u-area.
413 * NOTE: `pte' and `epte' aren't PTEs here.
414 */
415 pte = (u_int *)p0upa;
416 epte = (u_int *)(p0upa + USPACE);
417 while (pte < epte)
418 *pte++ = 0;
419 /*
420 * Remember the u-area address so it can be loaded in the
421 * proc struct p_addr field later.
422 */
423 RELOC(proc0paddr, char *) = (char *)(p0upa - firstpa);
424
425 /*
426 * VM data structures are now initialized, set up data for
427 * the pmap module.
428 *
429 * Note about avail_end: msgbuf is initialized just after
430 * avail_end in machdep.c. Since the last page is used
431 * for rebooting the system (code is copied there and
432 * excution continues from copied code before the MMU
433 * is disabled), the msgbuf will get trounced between
434 * reboots if it's placed in the last physical page.
435 * To work around this, we move avail_end back one more
436 * page so the msgbuf can be preserved.
437 */
438 RELOC(avail_start, paddr_t) = nextpa;
439 RELOC(avail_end, paddr_t) = m68k_ptob(RELOC(maxmem, int)) -
440 (m68k_round_page(MSGBUFSIZE) + m68k_ptob(1));
441 RELOC(mem_size, vsize_t) = m68k_ptob(RELOC(physmem, int));
442 RELOC(virtual_avail, vaddr_t) =
443 VM_MIN_KERNEL_ADDRESS + (nextpa - firstpa);
444 RELOC(virtual_end, vaddr_t) = VM_MAX_KERNEL_ADDRESS;
445
446 #ifdef M68K_MMU_HP
447 /*
448 * Determine VA aliasing distance if any
449 */
450 if (RELOC(ectype, int) == EC_VIRT)
451 if (RELOC(machineid, int) == HP_320)
452 RELOC(pmap_aliasmask, int) = 0x3fff; /* 16k */
453 else if (RELOC(machineid, int) == HP_350)
454 RELOC(pmap_aliasmask, int) = 0x7fff; /* 32k */
455 #endif
456
457 /*
458 * Initialize protection array.
459 * XXX don't use a switch statement, it might produce an
460 * absolute "jmp" table.
461 */
462 {
463 int *kp;
464
465 kp = &RELOC(protection_codes, int);
466 kp[VM_PROT_NONE|VM_PROT_NONE|VM_PROT_NONE] = 0;
467 kp[VM_PROT_READ|VM_PROT_NONE|VM_PROT_NONE] = PG_RO;
468 kp[VM_PROT_READ|VM_PROT_NONE|VM_PROT_EXECUTE] = PG_RO;
469 kp[VM_PROT_NONE|VM_PROT_NONE|VM_PROT_EXECUTE] = PG_RO;
470 kp[VM_PROT_NONE|VM_PROT_WRITE|VM_PROT_NONE] = PG_RW;
471 kp[VM_PROT_NONE|VM_PROT_WRITE|VM_PROT_EXECUTE] = PG_RW;
472 kp[VM_PROT_READ|VM_PROT_WRITE|VM_PROT_NONE] = PG_RW;
473 kp[VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE] = PG_RW;
474 }
475
476 /*
477 * Kernel page/segment table allocated in locore,
478 * just initialize pointers.
479 */
480 {
481 struct pmap *kpm = &RELOC(kernel_pmap_store, struct pmap);
482
483 kpm->pm_stab = RELOC(Sysseg, st_entry_t *);
484 kpm->pm_ptab = RELOC(Sysmap, pt_entry_t *);
485 simple_lock_init(&kpm->pm_lock);
486 kpm->pm_count = 1;
487 kpm->pm_stpa = (st_entry_t *)kstpa;
488 /*
489 * For the 040 we also initialize the free level 2
490 * descriptor mask noting that we have used:
491 * 0: level 1 table
492 * 1 to `num': map page tables
493 * MAXKL2SIZE-1: maps last-page page table
494 */
495 if (RELOC(mmutype, int) == MMU_68040) {
496 int num;
497
498 kpm->pm_stfree = ~l2tobm(0);
499 num = roundup((nptpages + 1) * (NPTEPG / SG4_LEV3SIZE),
500 SG4_LEV2SIZE) / SG4_LEV2SIZE;
501 while (num)
502 kpm->pm_stfree &= ~l2tobm(num--);
503 kpm->pm_stfree &= ~l2tobm(MAXKL2SIZE-1);
504 for (num = MAXKL2SIZE;
505 num < sizeof(kpm->pm_stfree)*NBBY;
506 num++)
507 kpm->pm_stfree &= ~l2tobm(num);
508 }
509 }
510
511 /*
512 * Allocate some fixed, special purpose kernel virtual addresses
513 */
514 {
515 vaddr_t va = RELOC(virtual_avail, vaddr_t);
516
517 RELOC(CADDR1, caddr_t) = (caddr_t)va;
518 va += NBPG;
519 RELOC(CADDR2, caddr_t) = (caddr_t)va;
520 va += NBPG;
521 RELOC(vmmap, caddr_t) = (caddr_t)va;
522 va += NBPG;
523 RELOC(ledbase, caddr_t) = (caddr_t)va;
524 va += NBPG;
525 RELOC(msgbufaddr, caddr_t) = (caddr_t)va;
526 va += m68k_round_page(MSGBUFSIZE);
527 RELOC(virtual_avail, vaddr_t) = va;
528 }
529 }
530