pmap_bootstrap.c revision 1.44 1 /* $NetBSD: pmap_bootstrap.c,v 1.44 2009/12/06 02:42:34 tsutsui Exp $ */
2
3 /*
4 * Copyright (c) 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * the Systems Programming Group of the University of Utah Computer
9 * Science Department.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 * @(#)pmap_bootstrap.c 8.1 (Berkeley) 6/10/93
36 */
37
38 #include <sys/cdefs.h>
39 __KERNEL_RCSID(0, "$NetBSD: pmap_bootstrap.c,v 1.44 2009/12/06 02:42:34 tsutsui Exp $");
40
41 #include <sys/param.h>
42
43 #include <machine/frame.h>
44 #include <machine/cpu.h>
45 #include <machine/hp300spu.h>
46 #include <machine/vmparam.h>
47 #include <machine/pte.h>
48
49 #include <hp300/hp300/clockreg.h>
50
51 #include <uvm/uvm_extern.h>
52
53 #define RELOC(v, t) *((t*)((uintptr_t)&(v) + firstpa))
54 #define RELOCPTR(v, t) ((t)((uintptr_t)RELOC((v), t) + firstpa))
55
56 extern char *etext;
57 extern vaddr_t CLKbase, MMUbase;
58 extern paddr_t bootinfo_pa;
59 extern vaddr_t bootinfo_va;
60
61 extern int maxmem, physmem;
62 extern paddr_t avail_start, avail_end;
63 #ifdef M68K_MMU_HP
64 extern int pmap_aliasmask;
65 #endif
66
67 void pmap_bootstrap(paddr_t, paddr_t);
68
69 /*
70 * Special purpose kernel virtual addresses, used for mapping
71 * physical pages for a variety of temporary or permanent purposes:
72 *
73 * CADDR1, CADDR2: pmap zero/copy operations
74 * vmmap: /dev/mem, crash dumps, parity error checking
75 * ledbase: SPU LEDs
76 * msgbufaddr: kernel message buffer
77 */
78 void *CADDR1, *CADDR2, *ledbase;
79 char *vmmap;
80 void *msgbufaddr;
81
82 /*
83 * Bootstrap the VM system.
84 *
85 * Called with MMU off so we must relocate all global references by `firstpa'
86 * (don't call any functions here!) `nextpa' is the first available physical
87 * memory address. Returns an updated first PA reflecting the memory we
88 * have allocated. MMU is still off when we return.
89 *
90 * XXX assumes sizeof(u_int) == sizeof(pt_entry_t)
91 * XXX a PIC compiler would make this much easier.
92 */
93 void
94 pmap_bootstrap(paddr_t nextpa, paddr_t firstpa)
95 {
96 paddr_t kstpa, kptpa, kptmpa, lkptpa, lwp0upa;
97 u_int nptpages, kstsize;
98 st_entry_t protoste, *ste, *este;
99 pt_entry_t protopte, *pte, *epte;
100 u_int stfree = 0; /* XXX: gcc -Wuninitialized */
101
102 /*
103 * Calculate important physical addresses:
104 *
105 * lwp0upa lwp 0 u-area UPAGES pages
106 *
107 * kstpa kernel segment table 1 page (!040)
108 * N pages (040)
109 *
110 * kptmpa kernel PT map 1 page
111 *
112 * lkptpa last kernel PT page 1 page
113 *
114 * kptpa statically allocated
115 * kernel PT pages Sysptsize+ pages
116 *
117 * [ Sysptsize is the number of pages of PT, IIOMAPSIZE and
118 * EIOMAPSIZE are the number of PTEs, hence we need to round
119 * the total to a page boundary with IO maps at the end. ]
120 *
121 * The KVA corresponding to any of these PAs is:
122 * (PA - firstpa + KERNBASE).
123 */
124 lwp0upa = nextpa;
125 nextpa += USPACE;
126 if (RELOC(mmutype, int) == MMU_68040)
127 kstsize = MAXKL2SIZE / (NPTEPG/SG4_LEV2SIZE);
128 else
129 kstsize = 1;
130 kstpa = nextpa;
131 nextpa += kstsize * PAGE_SIZE;
132 kptmpa = nextpa;
133 nextpa += PAGE_SIZE;
134 lkptpa = nextpa;
135 nextpa += PAGE_SIZE;
136 kptpa = nextpa;
137 nptpages = RELOC(Sysptsize, int) +
138 (IIOMAPSIZE + EIOMAPSIZE + NPTEPG - 1) / NPTEPG;
139 nextpa += nptpages * PAGE_SIZE;
140
141 /*
142 * Initialize segment table and kernel page table map.
143 *
144 * On 68030s and earlier MMUs the two are identical except for
145 * the valid bits so both are initialized with essentially the
146 * same values. On the 68040, which has a mandatory 3-level
147 * structure, the segment table holds the level 1 table and part
148 * (or all) of the level 2 table and hence is considerably
149 * different. Here the first level consists of 128 descriptors
150 * (512 bytes) each mapping 32mb of address space. Each of these
151 * points to blocks of 128 second level descriptors (512 bytes)
152 * each mapping 256kb. Note that there may be additional "segment
153 * table" pages depending on how large MAXKL2SIZE is.
154 *
155 * Portions of the last two segment of KVA space (0xFF800000 -
156 * 0xFFFFFFFF) are mapped for a couple of purposes.
157 * The first segment (0xFF800000 - 0xFFBFFFFF) is mapped
158 * for the kernel page tables.
159 * The very last page (0xFFFFF000) in the second segment is mapped
160 * to the last physical page of RAM to give us a region in which
161 * PA == VA. We use the first part of this page for enabling
162 * and disabling mapping. The last part of this page also contains
163 * info left by the boot ROM.
164 *
165 * XXX cramming two levels of mapping into the single "segment"
166 * table on the 68040 is intended as a temporary hack to get things
167 * working. The 224mb of address space that this allows will most
168 * likely be insufficient in the future (at least for the kernel).
169 */
170 if (RELOC(mmutype, int) == MMU_68040) {
171 int nl1desc, nl2desc, i;
172
173 /*
174 * First invalidate the entire "segment table" pages
175 * (levels 1 and 2 have the same "invalid" value).
176 */
177 ste = (st_entry_t *)kstpa;
178 este = &ste[kstsize * NPTEPG];
179 while (ste < este)
180 *ste++ = SG_NV;
181 /*
182 * Initialize level 2 descriptors (which immediately
183 * follow the level 1 table). We need:
184 * NPTEPG / SG4_LEV3SIZE
185 * level 2 descriptors to map each of the nptpages
186 * pages of PTEs. Note that we set the "used" bit
187 * now to save the HW the expense of doing it.
188 */
189 nl2desc = nptpages * (NPTEPG / SG4_LEV3SIZE);
190 ste = (st_entry_t *)kstpa;
191 ste = &ste[SG4_LEV1SIZE];
192 este = &ste[nl2desc];
193 protoste = kptpa | SG_U | SG_RW | SG_V;
194 while (ste < este) {
195 *ste++ = protoste;
196 protoste += (SG4_LEV3SIZE * sizeof(st_entry_t));
197 }
198 /*
199 * Initialize level 1 descriptors. We need:
200 * howmany(nl2desc, SG4_LEV2SIZE)
201 * level 1 descriptors to map the `nl2desc' level 2's.
202 */
203 nl1desc = howmany(nl2desc, SG4_LEV2SIZE);
204 ste = (st_entry_t *)kstpa;
205 este = &ste[nl1desc];
206 protoste = (paddr_t)&ste[SG4_LEV1SIZE] | SG_U | SG_RW | SG_V;
207 while (ste < este) {
208 *ste++ = protoste;
209 protoste += (SG4_LEV2SIZE * sizeof(st_entry_t));
210 }
211 /*
212 * Initialize the final level 1 descriptor to map the next
213 * block of level 2 descriptors for Sysptmap.
214 */
215 ste = (st_entry_t *)kstpa;
216 ste = &ste[SG4_LEV1SIZE - 1];
217 *ste = protoste;
218 /*
219 * Now initialize the final portion of that block of
220 * descriptors to map kptmpa and the "last PT page".
221 */
222 i = SG4_LEV1SIZE + (nl1desc * SG4_LEV2SIZE);
223 ste = (st_entry_t *)kstpa;
224 ste = &ste[i + SG4_LEV2SIZE - (NPTEPG / SG4_LEV3SIZE) * 2];
225 este = &ste[NPTEPG / SG4_LEV3SIZE];
226 protoste = kptmpa | SG_U | SG_RW | SG_V;
227 while (ste < este) {
228 *ste++ = protoste;
229 protoste += (SG4_LEV3SIZE * sizeof(st_entry_t));
230 }
231 este = &ste[NPTEPG / SG4_LEV3SIZE];
232 protoste = lkptpa | SG_U | SG_RW | SG_V;
233 while (ste < este) {
234 *ste++ = protoste;
235 protoste += (SG4_LEV3SIZE * sizeof(st_entry_t));
236 }
237 /*
238 * Calculate the free level 2 descriptor mask
239 * noting that we have used:
240 * 0: level 1 table
241 * 1 to nl1desc: map page tables
242 * nl1desc + 1: maps kptmpa and last-page page table
243 */
244 /* mark an entry for level 1 table */
245 stfree = ~l2tobm(0);
246 /* mark entries for map page tables */
247 for (i = 1; i <= nl1desc; i++)
248 stfree &= ~l2tobm(i);
249 /* mark an entry for kptmpa and lkptpa */
250 stfree &= ~l2tobm(i);
251 /* mark entries not available */
252 for (i = MAXKL2SIZE; i < sizeof(stfree) * NBBY; i++)
253 stfree &= ~l2tobm(i);
254
255 /*
256 * Initialize Sysptmap
257 */
258 pte = (pt_entry_t *)kptmpa;
259 epte = &pte[nptpages];
260 protopte = kptpa | PG_RW | PG_CI | PG_V;
261 while (pte < epte) {
262 *pte++ = protopte;
263 protopte += PAGE_SIZE;
264 }
265 /*
266 * Invalidate all remaining entries.
267 */
268 epte = (pt_entry_t *)kptmpa;
269 epte = &epte[NPTEPG]; /* XXX: should be TIB_SIZE */
270 while (pte < epte) {
271 *pte++ = PG_NV;
272 }
273 /*
274 * Initialize the last ones to point to kptmpa and the page
275 * table page allocated earlier.
276 */
277 pte = (pt_entry_t *)kptmpa;
278 pte = &pte[NPTEPG - 2]; /* XXX: should be TIA_SIZE */
279 *pte = kptmpa | PG_RW | PG_CI | PG_V;
280 pte++;
281 *pte = lkptpa | PG_RW | PG_CI | PG_V;
282 } else {
283 /*
284 * Map the page table pages in both the HW segment table
285 * and the software Sysptmap.
286 */
287 ste = (st_entry_t *)kstpa;
288 pte = (pt_entry_t *)kptmpa;
289 epte = &pte[nptpages];
290 protoste = kptpa | SG_RW | SG_V;
291 protopte = kptpa | PG_RW | PG_CI | PG_V;
292 while (pte < epte) {
293 *ste++ = protoste;
294 *pte++ = protopte;
295 protoste += PAGE_SIZE;
296 protopte += PAGE_SIZE;
297 }
298 /*
299 * Invalidate all remaining entries in both.
300 */
301 este = (st_entry_t *)kstpa;
302 este = &epte[NPTEPG]; /* XXX: should be TIA_SIZE */
303 while (ste < este)
304 *ste++ = SG_NV;
305 epte = (pt_entry_t *)kptmpa;
306 epte = &epte[NPTEPG]; /* XXX: should be TIB_SIZE */
307 while (pte < epte)
308 *pte++ = PG_NV;
309 /*
310 * Initialize the last ones to point to kptmpa and the page
311 * table page allocated earlier.
312 */
313 ste = (st_entry_t *)kstpa;
314 ste = &ste[NPTEPG - 2]; /* XXX: should be TIA_SIZE */
315 pte = (pt_entry_t *)kptmpa;
316 pte = &pte[NPTEPG - 2]; /* XXX: should be TIA_SIZE */
317 *ste = kptmpa | SG_RW | SG_V;
318 *pte = kptmpa | PG_RW | PG_CI | PG_V;
319 ste++;
320 pte++;
321 *ste = lkptpa | SG_RW | SG_V;
322 *pte = lkptpa | PG_RW | PG_CI | PG_V;
323 }
324 /*
325 * Invalidate all but the final entry in the last kernel PT page.
326 * The final entry maps the last page of physical memory to
327 * prepare a page that is PA == VA to turn on the MMU.
328 */
329 pte = (pt_entry_t *)lkptpa;
330 epte = &pte[NPTEPG - 1];
331 while (pte < epte)
332 *pte++ = PG_NV;
333 *pte = MAXADDR | PG_RW | PG_CI | PG_V;
334 /*
335 * Initialize kernel page table.
336 * Start by invalidating the `nptpages' that we have allocated.
337 */
338 pte = (pt_entry_t *)kptpa;
339 epte = &pte[nptpages * NPTEPG];
340 while (pte < epte)
341 *pte++ = PG_NV;
342
343 /*
344 * The page of kernel text is zero-filled in locore.s,
345 * and not mapped (at VA 0). The boot loader places the
346 * bootinfo here after the kernel is loaded. Remember
347 * the physical address; we'll map it to a virtual address
348 * later.
349 */
350 RELOC(bootinfo_pa, paddr_t) = firstpa;
351
352 /*
353 * Validate PTEs for kernel text (RO). The first page
354 * of kernel text remains invalid; see locore.s
355 */
356 pte = (pt_entry_t *)kptpa;
357 pte = &pte[m68k_btop(KERNBASE + PAGE_SIZE)];
358 epte = &pte[m68k_btop(m68k_trunc_page(&etext))];
359 protopte = (firstpa + PAGE_SIZE) | PG_RO | PG_V;
360 while (pte < epte) {
361 *pte++ = protopte;
362 protopte += PAGE_SIZE;
363 }
364 /*
365 * Validate PTEs for kernel data/bss, dynamic data allocated
366 * by us so far (nextpa - firstpa bytes), and pages for lwp0
367 * u-area and page table allocated below (RW).
368 */
369 epte = (pt_entry_t *)kptpa;
370 epte = &epte[m68k_btop(nextpa - firstpa)];
371 protopte = (protopte & ~PG_PROT) | PG_RW;
372 /*
373 * Enable copy-back caching of data pages
374 */
375 if (RELOC(mmutype, int) == MMU_68040)
376 protopte |= PG_CCB;
377 while (pte < epte) {
378 *pte++ = protopte;
379 protopte += PAGE_SIZE;
380 }
381 /*
382 * Finally, validate the internal IO space PTEs (RW+CI).
383 * We do this here since the 320/350 MMU registers (also
384 * used, but to a lesser extent, on other models) are mapped
385 * in this range and it would be nice to be able to access
386 * them after the MMU is turned on.
387 */
388
389 #define PTE2VA(pte) m68k_ptob(pte - ((pt_entry_t *)kptpa))
390
391 protopte = INTIOBASE | PG_RW | PG_CI | PG_V;
392 epte = &pte[IIOMAPSIZE];
393 RELOC(intiobase, uint8_t *) = (uint8_t *)PTE2VA(pte);
394 RELOC(intiolimit, uint8_t *) = (uint8_t *)PTE2VA(epte);
395 while (pte < epte) {
396 *pte++ = protopte;
397 protopte += PAGE_SIZE;
398 }
399 RELOC(extiobase, uint8_t *) = (uint8_t *)PTE2VA(pte);
400 pte += EIOMAPSIZE;
401 RELOC(virtual_avail, vaddr_t) = PTE2VA(pte);
402
403 /*
404 * Calculate important exported kernel virtual addresses
405 */
406 /*
407 * Sysseg: base of kernel segment table
408 */
409 RELOC(Sysseg, st_entry_t *) = (st_entry_t *)(kstpa - firstpa);
410 /*
411 * Sysptmap: base of kernel page table map
412 */
413 RELOC(Sysptmap, pt_entry_t *) = (pt_entry_t *)(kptmpa - firstpa);
414 /*
415 * Sysmap: kernel page table (as mapped through Sysptmap)
416 * Allocated at the end of KVA space.
417 */
418 RELOC(Sysmap, pt_entry_t *) =
419 (pt_entry_t *)m68k_ptob((NPTEPG - 2) * NPTEPG);
420 /*
421 * CLKbase, MMUbase: important registers in internal IO space
422 * accessed from assembly language.
423 */
424 RELOC(CLKbase, vaddr_t) =
425 (vaddr_t)RELOC(intiobase, char *) + CLKBASE;
426 RELOC(MMUbase, vaddr_t) =
427 (vaddr_t)RELOC(intiobase, char *) + MMUBASE;
428
429 /*
430 * Remember the u-area address so it can be loaded in the lwp0
431 * via uvm_lwp_setuarea() later in pmap_bootstrap_finalize().
432 */
433 RELOC(lwp0uarea, vaddr_t) = lwp0upa - firstpa;
434
435 /*
436 * VM data structures are now initialized, set up data for
437 * the pmap module.
438 *
439 * Note about avail_end: msgbuf is initialized just after
440 * avail_end in machdep.c. Since the last page is used
441 * for rebooting the system (code is copied there and
442 * excution continues from copied code before the MMU
443 * is disabled), the msgbuf will get trounced between
444 * reboots if it's placed in the last physical page.
445 * To work around this, we move avail_end back one more
446 * page so the msgbuf can be preserved.
447 */
448 RELOC(avail_start, paddr_t) = nextpa;
449 RELOC(avail_end, paddr_t) = m68k_ptob(RELOC(maxmem, int)) -
450 (m68k_round_page(MSGBUFSIZE) + m68k_ptob(1));
451 RELOC(mem_size, vsize_t) = m68k_ptob(RELOC(physmem, int));
452 RELOC(virtual_end, vaddr_t) = VM_MAX_KERNEL_ADDRESS;
453
454 #ifdef M68K_MMU_HP
455 /*
456 * Determine VA aliasing distance if any
457 */
458 if (RELOC(ectype, int) == EC_VIRT) {
459 if (RELOC(machineid, int) == HP_320)
460 RELOC(pmap_aliasmask, int) = 0x3fff; /* 16k */
461 else if (RELOC(machineid, int) == HP_350)
462 RELOC(pmap_aliasmask, int) = 0x7fff; /* 32k */
463 }
464 #endif
465
466 /*
467 * Initialize protection array.
468 * XXX don't use a switch statement, it might produce an
469 * absolute "jmp" table.
470 */
471 {
472 u_int *kp;
473
474 kp = &RELOC(protection_codes, u_int);
475 kp[VM_PROT_NONE|VM_PROT_NONE|VM_PROT_NONE] = 0;
476 kp[VM_PROT_READ|VM_PROT_NONE|VM_PROT_NONE] = PG_RO;
477 kp[VM_PROT_READ|VM_PROT_NONE|VM_PROT_EXECUTE] = PG_RO;
478 kp[VM_PROT_NONE|VM_PROT_NONE|VM_PROT_EXECUTE] = PG_RO;
479 kp[VM_PROT_NONE|VM_PROT_WRITE|VM_PROT_NONE] = PG_RW;
480 kp[VM_PROT_NONE|VM_PROT_WRITE|VM_PROT_EXECUTE] = PG_RW;
481 kp[VM_PROT_READ|VM_PROT_WRITE|VM_PROT_NONE] = PG_RW;
482 kp[VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE] = PG_RW;
483 }
484
485 /*
486 * Kernel page/segment table allocated above,
487 * just initialize pointers.
488 */
489 {
490 struct pmap *kpm;
491
492 kpm = RELOCPTR(kernel_pmap_ptr, struct pmap *);
493
494 kpm->pm_stab = RELOC(Sysseg, st_entry_t *);
495 kpm->pm_ptab = RELOC(Sysmap, pt_entry_t *);
496 simple_lock_init(&kpm->pm_lock);
497 kpm->pm_count = 1;
498 kpm->pm_stpa = (st_entry_t *)kstpa;
499 if (RELOC(mmutype, int) == MMU_68040)
500 kpm->pm_stfree = stfree;
501 }
502
503 /*
504 * Allocate some fixed, special purpose kernel virtual addresses
505 */
506 {
507 vaddr_t va = RELOC(virtual_avail, vaddr_t);
508
509 RELOC(bootinfo_va, vaddr_t) = (vaddr_t)va;
510 va += PAGE_SIZE;
511 RELOC(CADDR1, void *) = (void *)va;
512 va += PAGE_SIZE;
513 RELOC(CADDR2, void *) = (void *)va;
514 va += PAGE_SIZE;
515 RELOC(vmmap, void *) = (void *)va;
516 va += PAGE_SIZE;
517 RELOC(ledbase, void *) = (void *)va;
518 va += PAGE_SIZE;
519 RELOC(msgbufaddr, void *) = (void *)va;
520 va += m68k_round_page(MSGBUFSIZE);
521 RELOC(virtual_avail, vaddr_t) = va;
522 }
523 }
524