pmap_bootstrap.c revision 1.50 1 /* $NetBSD: pmap_bootstrap.c,v 1.50 2011/01/02 07:03:46 tsutsui Exp $ */
2
3 /*
4 * Copyright (c) 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * the Systems Programming Group of the University of Utah Computer
9 * Science Department.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 * @(#)pmap_bootstrap.c 8.1 (Berkeley) 6/10/93
36 */
37
38 #include <sys/cdefs.h>
39 __KERNEL_RCSID(0, "$NetBSD: pmap_bootstrap.c,v 1.50 2011/01/02 07:03:46 tsutsui Exp $");
40
41 #include <sys/param.h>
42 #include <uvm/uvm_extern.h>
43
44 #include <machine/cpu.h>
45 #include <machine/hp300spu.h>
46 #include <machine/pte.h>
47 #include <machine/vmparam.h>
48
49 #include <hp300/hp300/clockreg.h>
50
51 #define RELOC(v, t) *((t*)((uintptr_t)&(v) + firstpa))
52
53 extern char *etext;
54 extern vaddr_t CLKbase, MMUbase;
55 extern paddr_t bootinfo_pa;
56 extern vaddr_t bootinfo_va;
57
58 extern int maxmem, physmem;
59 extern paddr_t avail_start, avail_end;
60
61 /*
62 * Special purpose kernel virtual addresses, used for mapping
63 * physical pages for a variety of temporary or permanent purposes:
64 *
65 * CADDR1, CADDR2: pmap zero/copy operations
66 * vmmap: /dev/mem, crash dumps, parity error checking
67 * msgbufaddr: kernel message buffer
68 * ledbase: SPU LEDs
69 */
70 void *CADDR1, *CADDR2;
71 char *vmmap;
72 void *msgbufaddr;
73 void *ledbase;
74
75 void pmap_bootstrap(paddr_t, paddr_t);
76
77 /*
78 * Bootstrap the VM system.
79 *
80 * Called with MMU off so we must relocate all global references by `firstpa'
81 * (don't call any functions here!) `nextpa' is the first available physical
82 * memory address. Returns an updated first PA reflecting the memory we
83 * have allocated. MMU is still off when we return.
84 *
85 * XXX assumes sizeof(u_int) == sizeof(pt_entry_t)
86 * XXX a PIC compiler would make this much easier.
87 */
88 void
89 pmap_bootstrap(paddr_t nextpa, paddr_t firstpa)
90 {
91 paddr_t lwp0upa, kstpa, kptmpa, kptpa;
92 paddr_t lkptpa;
93 u_int nptpages, kstsize;
94 st_entry_t protoste, *ste, *este;
95 pt_entry_t protopte, *pte, *epte;
96 u_int stfree = 0; /* XXX: gcc -Wuninitialized */
97
98 /*
99 * Calculate important physical addresses:
100 *
101 * lwp0upa lwp0 u-area UPAGES pages
102 *
103 * kstpa kernel segment table 1 page (!040)
104 * N pages (040)
105 *
106 * kptmpa kernel PT map 1 page
107 *
108 * lkptpa last kernel PT page 1 page
109 *
110 * kptpa statically allocated
111 * kernel PT pages Sysptsize+ pages
112 *
113 * [ Sysptsize is the number of pages of PT, and IIOMAPSIZE and
114 * EIOMAPSIZE are the number of PTEs, hence we need to round
115 * the total to a page boundary with IO maps at the end. ]
116 *
117 * The KVA corresponding to any of these PAs is:
118 * (PA - firstpa + KERNBASE).
119 */
120 lwp0upa = nextpa;
121 nextpa += USPACE;
122 if (RELOC(mmutype, int) == MMU_68040)
123 kstsize = MAXKL2SIZE / (NPTEPG/SG4_LEV2SIZE);
124 else
125 kstsize = 1;
126 kstpa = nextpa;
127 nextpa += kstsize * PAGE_SIZE;
128 kptmpa = nextpa;
129 nextpa += PAGE_SIZE;
130 lkptpa = nextpa;
131 nextpa += PAGE_SIZE;
132 kptpa = nextpa;
133 nptpages = RELOC(Sysptsize, int) +
134 (IIOMAPSIZE + EIOMAPSIZE + NPTEPG - 1) / NPTEPG;
135 nextpa += nptpages * PAGE_SIZE;
136
137 /*
138 * Initialize segment table and kernel page table map.
139 *
140 * On 68030s and earlier MMUs the two are identical except for
141 * the valid bits so both are initialized with essentially the
142 * same values. On the 68040, which has a mandatory 3-level
143 * structure, the segment table holds the level 1 table and part
144 * (or all) of the level 2 table and hence is considerably
145 * different. Here the first level consists of 128 descriptors
146 * (512 bytes) each mapping 32mb of address space. Each of these
147 * points to blocks of 128 second level descriptors (512 bytes)
148 * each mapping 256kb. Note that there may be additional "segment
149 * table" pages depending on how large MAXKL2SIZE is.
150 *
151 * Portions of the last two segment of KVA space (0xFF800000 -
152 * 0xFFFFFFFF) are mapped for a couple of purposes.
153 * The first segment (0xFF800000 - 0xFFBFFFFF) is mapped
154 * for the kernel page tables.
155 * The very last page (0xFFFFF000) in the second segment is mapped
156 * to the last physical page of RAM to give us a region in which
157 * PA == VA. We use the first part of this page for enabling
158 * and disabling mapping. The last part of this page also contains
159 * info left by the boot ROM.
160 *
161 * XXX cramming two levels of mapping into the single "segment"
162 * table on the 68040 is intended as a temporary hack to get things
163 * working. The 224mb of address space that this allows will most
164 * likely be insufficient in the future (at least for the kernel).
165 */
166 if (RELOC(mmutype, int) == MMU_68040) {
167 int nl1desc, nl2desc, i;
168
169 /*
170 * First invalidate the entire "segment table" pages
171 * (levels 1 and 2 have the same "invalid" value).
172 */
173 ste = (st_entry_t *)kstpa;
174 este = &ste[kstsize * NPTEPG];
175 while (ste < este)
176 *ste++ = SG_NV;
177 /*
178 * Initialize level 2 descriptors (which immediately
179 * follow the level 1 table). We need:
180 * NPTEPG / SG4_LEV3SIZE
181 * level 2 descriptors to map each of the nptpages
182 * pages of PTEs. Note that we set the "used" bit
183 * now to save the HW the expense of doing it.
184 */
185 nl2desc = nptpages * (NPTEPG / SG4_LEV3SIZE);
186 ste = (st_entry_t *)kstpa;
187 ste = &ste[SG4_LEV1SIZE];
188 este = &ste[nl2desc];
189 protoste = kptpa | SG_U | SG_RW | SG_V;
190 while (ste < este) {
191 *ste++ = protoste;
192 protoste += (SG4_LEV3SIZE * sizeof(st_entry_t));
193 }
194 /*
195 * Initialize level 1 descriptors. We need:
196 * howmany(nl2desc, SG4_LEV2SIZE)
197 * level 1 descriptors to map the `nl2desc' level 2's.
198 */
199 nl1desc = howmany(nl2desc, SG4_LEV2SIZE);
200 ste = (st_entry_t *)kstpa;
201 este = &ste[nl1desc];
202 protoste = (paddr_t)&ste[SG4_LEV1SIZE] | SG_U | SG_RW | SG_V;
203 while (ste < este) {
204 *ste++ = protoste;
205 protoste += (SG4_LEV2SIZE * sizeof(st_entry_t));
206 }
207 /*
208 * Initialize the final level 1 descriptor to map the next
209 * block of level 2 descriptors for Sysptmap.
210 */
211 ste = (st_entry_t *)kstpa;
212 ste = &ste[SG4_LEV1SIZE - 1];
213 *ste = protoste;
214 /*
215 * Now initialize the final portion of that block of
216 * descriptors to map Sysmap and the "last PT page".
217 */
218 i = SG4_LEV1SIZE + (nl1desc * SG4_LEV2SIZE);
219 ste = (st_entry_t *)kstpa;
220 ste = &ste[i + SG4_LEV2SIZE - (NPTEPG / SG4_LEV3SIZE) * 2];
221 este = &ste[NPTEPG / SG4_LEV3SIZE];
222 protoste = kptmpa | SG_U | SG_RW | SG_V;
223 while (ste < este) {
224 *ste++ = protoste;
225 protoste += (SG4_LEV3SIZE * sizeof(st_entry_t));
226 }
227 este = &ste[NPTEPG / SG4_LEV3SIZE];
228 protoste = lkptpa | SG_U | SG_RW | SG_V;
229 while (ste < este) {
230 *ste++ = protoste;
231 protoste += (SG4_LEV3SIZE * sizeof(st_entry_t));
232 }
233 /*
234 * Calculate the free level 2 descriptor mask
235 * noting that we have used:
236 * 0: level 1 table
237 * 1 to nl1desc: map page tables
238 * nl1desc + 1: maps kptmpa and last-page page table
239 */
240 /* mark an entry for level 1 table */
241 stfree = ~l2tobm(0);
242 /* mark entries for map page tables */
243 for (i = 1; i <= nl1desc; i++)
244 stfree &= ~l2tobm(i);
245 /* mark an entry for kptmpa and lkptpa */
246 stfree &= ~l2tobm(i);
247 /* mark entries not available */
248 for (i = MAXKL2SIZE; i < sizeof(stfree) * NBBY; i++)
249 stfree &= ~l2tobm(i);
250
251 /*
252 * Initialize Sysptmap
253 */
254 pte = (pt_entry_t *)kptmpa;
255 epte = &pte[nptpages];
256 protopte = kptpa | PG_RW | PG_CI | PG_V;
257 while (pte < epte) {
258 *pte++ = protopte;
259 protopte += PAGE_SIZE;
260 }
261 /*
262 * Invalidate all remaining entries.
263 */
264 epte = (pt_entry_t *)kptmpa;
265 epte = &epte[TIB_SIZE];
266 while (pte < epte) {
267 *pte++ = PG_NV;
268 }
269 /*
270 * Initialize the last ones to point to Sysptmap and the page
271 * table page allocated earlier.
272 */
273 pte = (pt_entry_t *)kptmpa;
274 pte = &pte[SYSMAP_VA >> SEGSHIFT];
275 *pte = kptmpa | PG_RW | PG_CI | PG_V;
276 pte = (pt_entry_t *)kptmpa;
277 pte = &pte[MAXADDR >> SEGSHIFT];
278 *pte = lkptpa | PG_RW | PG_CI | PG_V;
279 } else {
280 /*
281 * Map the page table pages in both the HW segment table
282 * and the software Sysptmap.
283 */
284 ste = (st_entry_t *)kstpa;
285 pte = (pt_entry_t *)kptmpa;
286 epte = &pte[nptpages];
287 protoste = kptpa | SG_RW | SG_V;
288 protopte = kptpa | PG_RW | PG_CI | PG_V;
289 while (pte < epte) {
290 *ste++ = protoste;
291 *pte++ = protopte;
292 protoste += PAGE_SIZE;
293 protopte += PAGE_SIZE;
294 }
295 /*
296 * Invalidate all remaining entries in both.
297 */
298 este = (st_entry_t *)kstpa;
299 este = &este[TIA_SIZE];
300 while (ste < este)
301 *ste++ = SG_NV;
302 epte = (pt_entry_t *)kptmpa;
303 epte = &epte[TIB_SIZE];
304 while (pte < epte)
305 *pte++ = PG_NV;
306 /*
307 * Initialize the last ones to point to Sysptmap and the page
308 * table page allocated earlier.
309 */
310 ste = (st_entry_t *)kstpa;
311 ste = &ste[SYSMAP_VA >> SEGSHIFT];
312 pte = (pt_entry_t *)kptmpa;
313 pte = &pte[SYSMAP_VA >> SEGSHIFT];
314 *ste = kptmpa | SG_RW | SG_V;
315 *pte = kptmpa | PG_RW | PG_CI | PG_V;
316 ste = (st_entry_t *)kstpa;
317 ste = &ste[MAXADDR >> SEGSHIFT];
318 pte = (pt_entry_t *)kptmpa;
319 pte = &pte[MAXADDR >> SEGSHIFT];
320 *ste = lkptpa | SG_RW | SG_V;
321 *pte = lkptpa | PG_RW | PG_CI | PG_V;
322 }
323
324 /*
325 * Invalidate all but the final entry in the last kernel PT page.
326 * The final entry maps the last page of physical memory to
327 * prepare a page that is PA == VA to turn on the MMU.
328 */
329 pte = (pt_entry_t *)lkptpa;
330 epte = &pte[NPTEPG - 1];
331 while (pte < epte)
332 *pte++ = PG_NV;
333 *pte = MAXADDR | PG_RW | PG_CI | PG_V;
334 /*
335 * Initialize kernel page table.
336 * Start by invalidating the `nptpages' that we have allocated.
337 */
338 pte = (pt_entry_t *)kptpa;
339 epte = &pte[nptpages * NPTEPG];
340 while (pte < epte)
341 *pte++ = PG_NV;
342
343 /*
344 * The page of kernel text is zero-filled in locore.s,
345 * and not mapped (at VA 0). The boot loader places the
346 * bootinfo here after the kernel is loaded. Remember
347 * the physical address; we'll map it to a virtual address
348 * later.
349 */
350 RELOC(bootinfo_pa, paddr_t) = firstpa;
351
352 /*
353 * Validate PTEs for kernel text (RO).
354 * The first page of kernel text remains invalid; see locore.s
355 */
356 pte = (pt_entry_t *)kptpa;
357 pte = &pte[m68k_btop(KERNBASE + PAGE_SIZE)];
358 epte = &pte[m68k_btop(m68k_trunc_page(&etext))];
359 protopte = (firstpa + PAGE_SIZE) | PG_RO | PG_V;
360 while (pte < epte) {
361 *pte++ = protopte;
362 protopte += PAGE_SIZE;
363 }
364 /*
365 * Validate PTEs for kernel data/bss, dynamic data allocated
366 * by us so far (kstpa - firstpa bytes), and pages for lwp0
367 * u-area and page table allocated below (RW).
368 */
369 epte = (pt_entry_t *)kptpa;
370 epte = &epte[m68k_btop(kstpa - firstpa)];
371 protopte = (protopte & ~PG_PROT) | PG_RW;
372 /*
373 * Enable copy-back caching of data pages
374 */
375 if (RELOC(mmutype, int) == MMU_68040)
376 protopte |= PG_CCB;
377 while (pte < epte) {
378 *pte++ = protopte;
379 protopte += PAGE_SIZE;
380 }
381 /*
382 * map the kernel segment table cache invalidated for
383 * these machines (for the 68040 not strictly necessary, but
384 * recommended by Motorola; for the 68060 mandatory)
385 */
386 epte = (pt_entry_t *)kptpa;
387 epte = &epte[m68k_btop(nextpa - firstpa)];
388 protopte = (protopte & ~PG_PROT) | PG_RW;
389 if (RELOC(mmutype, int) == MMU_68040) {
390 protopte &= ~PG_CCB;
391 protopte |= PG_CIN;
392 }
393 while (pte < epte) {
394 *pte++ = protopte;
395 protopte += PAGE_SIZE;
396 }
397
398 /*
399 * Finally, validate the internal IO space PTEs (RW+CI).
400 * We do this here since the 320/350 MMU registers (also
401 * used, but to a lesser extent, on other models) are mapped
402 * in this range and it would be nice to be able to access
403 * them after the MMU is turned on.
404 */
405
406 #define PTE2VA(pte) m68k_ptob(pte - ((pt_entry_t *)kptpa))
407
408 protopte = INTIOBASE | PG_RW | PG_CI | PG_V;
409 epte = &pte[IIOMAPSIZE];
410 RELOC(intiobase, uint8_t *) = (uint8_t *)PTE2VA(pte);
411 RELOC(intiolimit, uint8_t *) = (uint8_t *)PTE2VA(epte);
412 while (pte < epte) {
413 *pte++ = protopte;
414 protopte += PAGE_SIZE;
415 }
416 RELOC(extiobase, uint8_t *) = (uint8_t *)PTE2VA(pte);
417 pte += EIOMAPSIZE;
418 RELOC(virtual_avail, vaddr_t) = PTE2VA(pte);
419
420 /*
421 * Calculate important exported kernel addresses and related values.
422 */
423 /*
424 * Sysseg: base of kernel segment table
425 */
426 RELOC(Sysseg, st_entry_t *) = (st_entry_t *)(kstpa - firstpa);
427 RELOC(Sysseg_pa, paddr_t) = kstpa;
428 if (RELOC(mmutype, int) == MMU_68040)
429 RELOC(protostfree, u_int) = stfree;
430 /*
431 * Sysptmap: base of kernel page table map
432 */
433 RELOC(Sysptmap, pt_entry_t *) = (pt_entry_t *)(kptmpa - firstpa);
434 /*
435 * Sysmap: kernel page table (as mapped through Sysptmap)
436 * Allocated at the end of KVA space.
437 */
438 RELOC(Sysmap, pt_entry_t *) = (pt_entry_t *)SYSMAP_VA;
439 /*
440 * CLKbase, MMUbase: important registers in internal IO space
441 * accessed from assembly language.
442 */
443 RELOC(CLKbase, vaddr_t) =
444 (vaddr_t)RELOC(intiobase, char *) + CLKBASE;
445 RELOC(MMUbase, vaddr_t) =
446 (vaddr_t)RELOC(intiobase, char *) + MMUBASE;
447
448 /*
449 * Remember the u-area address so it can be loaded in the lwp0
450 * via uvm_lwp_setuarea() later in pmap_bootstrap_finalize().
451 */
452 RELOC(lwp0uarea, vaddr_t) = lwp0upa - firstpa;
453
454 /*
455 * VM data structures are now initialized, set up data for
456 * the pmap module.
457 *
458 * Note about avail_end: msgbuf is initialized just after
459 * avail_end in machdep.c.
460 * Since the last page is used for rebooting the system
461 * (code is copied there and excution continues from copied code
462 * before the MMU is disabled), the msgbuf will get trounced
463 * between reboots if it's placed in the last physical page.
464 * To work around this, we move avail_end back one more
465 * page so the msgbuf can be preserved.
466 */
467 RELOC(avail_start, paddr_t) = nextpa;
468 RELOC(avail_end, paddr_t) = m68k_ptob(RELOC(maxmem, int)) -
469 (m68k_round_page(MSGBUFSIZE) + m68k_ptob(1));
470 RELOC(mem_size, vsize_t) = m68k_ptob(RELOC(physmem, int));
471
472 RELOC(virtual_end, vaddr_t) = VM_MAX_KERNEL_ADDRESS;
473
474 #ifdef M68K_MMU_HP
475 /*
476 * Determine VA aliasing distance if any
477 */
478 if (RELOC(ectype, int) == EC_VIRT) {
479 if (RELOC(machineid, int) == HP_320)
480 RELOC(pmap_aliasmask, int) = 0x3fff; /* 16k */
481 else if (RELOC(machineid, int) == HP_350)
482 RELOC(pmap_aliasmask, int) = 0x7fff; /* 32k */
483 }
484 #endif
485
486 /*
487 * Allocate some fixed, special purpose kernel virtual addresses
488 */
489 {
490 vaddr_t va = RELOC(virtual_avail, vaddr_t);
491
492 RELOC(bootinfo_va, vaddr_t) = (vaddr_t)va;
493 va += PAGE_SIZE;
494 RELOC(CADDR1, void *) = (void *)va;
495 va += PAGE_SIZE;
496 RELOC(CADDR2, void *) = (void *)va;
497 va += PAGE_SIZE;
498 RELOC(vmmap, void *) = (void *)va;
499 va += PAGE_SIZE;
500 RELOC(ledbase, void *) = (void *)va;
501 va += PAGE_SIZE;
502 RELOC(msgbufaddr, void *) = (void *)va;
503 va += m68k_round_page(MSGBUFSIZE);
504 RELOC(virtual_avail, vaddr_t) = va;
505 }
506 }
507