pmap_bootstrap.c revision 1.47 1 /* $NetBSD: pmap_bootstrap.c,v 1.47 2010/12/25 14:43:00 tsutsui Exp $ */
2
3 /*
4 * Copyright (c) 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * the Systems Programming Group of the University of Utah Computer
9 * Science Department.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 * @(#)pmap_bootstrap.c 8.1 (Berkeley) 6/10/93
36 */
37
38 #include <sys/cdefs.h>
39 __KERNEL_RCSID(0, "$NetBSD: pmap_bootstrap.c,v 1.47 2010/12/25 14:43:00 tsutsui Exp $");
40
41 #include <sys/param.h>
42
43 #include <machine/frame.h>
44 #include <machine/cpu.h>
45 #include <machine/hp300spu.h>
46 #include <machine/vmparam.h>
47 #include <machine/pte.h>
48
49 #include <hp300/hp300/clockreg.h>
50
51 #include <uvm/uvm_extern.h>
52
53 #define RELOC(v, t) *((t*)((uintptr_t)&(v) + firstpa))
54
55 extern char *etext;
56 extern vaddr_t CLKbase, MMUbase;
57 extern paddr_t bootinfo_pa;
58 extern vaddr_t bootinfo_va;
59
60 extern int maxmem, physmem;
61 extern paddr_t avail_start, avail_end;
62 #ifdef M68K_MMU_HP
63 extern int pmap_aliasmask;
64 #endif
65
66 void pmap_bootstrap(paddr_t, paddr_t);
67
68 /*
69 * Special purpose kernel virtual addresses, used for mapping
70 * physical pages for a variety of temporary or permanent purposes:
71 *
72 * CADDR1, CADDR2: pmap zero/copy operations
73 * vmmap: /dev/mem, crash dumps, parity error checking
74 * ledbase: SPU LEDs
75 * msgbufaddr: kernel message buffer
76 */
77 void *CADDR1, *CADDR2, *ledbase;
78 char *vmmap;
79 void *msgbufaddr;
80
81 /*
82 * Bootstrap the VM system.
83 *
84 * Called with MMU off so we must relocate all global references by `firstpa'
85 * (don't call any functions here!) `nextpa' is the first available physical
86 * memory address. Returns an updated first PA reflecting the memory we
87 * have allocated. MMU is still off when we return.
88 *
89 * XXX assumes sizeof(u_int) == sizeof(pt_entry_t)
90 * XXX a PIC compiler would make this much easier.
91 */
92 void
93 pmap_bootstrap(paddr_t nextpa, paddr_t firstpa)
94 {
95 paddr_t kstpa, kptpa, kptmpa, lkptpa, lwp0upa;
96 u_int nptpages, kstsize;
97 st_entry_t protoste, *ste, *este;
98 pt_entry_t protopte, *pte, *epte;
99 u_int stfree = 0; /* XXX: gcc -Wuninitialized */
100
101 /*
102 * Calculate important physical addresses:
103 *
104 * lwp0upa lwp 0 u-area UPAGES pages
105 *
106 * kstpa kernel segment table 1 page (!040)
107 * N pages (040)
108 *
109 * kptmpa kernel PT map 1 page
110 *
111 * lkptpa last kernel PT page 1 page
112 *
113 * kptpa statically allocated
114 * kernel PT pages Sysptsize+ pages
115 *
116 * [ Sysptsize is the number of pages of PT, IIOMAPSIZE and
117 * EIOMAPSIZE are the number of PTEs, hence we need to round
118 * the total to a page boundary with IO maps at the end. ]
119 *
120 * The KVA corresponding to any of these PAs is:
121 * (PA - firstpa + KERNBASE).
122 */
123 lwp0upa = nextpa;
124 nextpa += USPACE;
125 if (RELOC(mmutype, int) == MMU_68040)
126 kstsize = MAXKL2SIZE / (NPTEPG/SG4_LEV2SIZE);
127 else
128 kstsize = 1;
129 kstpa = nextpa;
130 nextpa += kstsize * PAGE_SIZE;
131 kptmpa = nextpa;
132 nextpa += PAGE_SIZE;
133 lkptpa = nextpa;
134 nextpa += PAGE_SIZE;
135 kptpa = nextpa;
136 nptpages = RELOC(Sysptsize, int) +
137 (IIOMAPSIZE + EIOMAPSIZE + NPTEPG - 1) / NPTEPG;
138 nextpa += nptpages * PAGE_SIZE;
139
140 /*
141 * Initialize segment table and kernel page table map.
142 *
143 * On 68030s and earlier MMUs the two are identical except for
144 * the valid bits so both are initialized with essentially the
145 * same values. On the 68040, which has a mandatory 3-level
146 * structure, the segment table holds the level 1 table and part
147 * (or all) of the level 2 table and hence is considerably
148 * different. Here the first level consists of 128 descriptors
149 * (512 bytes) each mapping 32mb of address space. Each of these
150 * points to blocks of 128 second level descriptors (512 bytes)
151 * each mapping 256kb. Note that there may be additional "segment
152 * table" pages depending on how large MAXKL2SIZE is.
153 *
154 * Portions of the last two segment of KVA space (0xFF800000 -
155 * 0xFFFFFFFF) are mapped for a couple of purposes.
156 * The first segment (0xFF800000 - 0xFFBFFFFF) is mapped
157 * for the kernel page tables.
158 * The very last page (0xFFFFF000) in the second segment is mapped
159 * to the last physical page of RAM to give us a region in which
160 * PA == VA. We use the first part of this page for enabling
161 * and disabling mapping. The last part of this page also contains
162 * info left by the boot ROM.
163 *
164 * XXX cramming two levels of mapping into the single "segment"
165 * table on the 68040 is intended as a temporary hack to get things
166 * working. The 224mb of address space that this allows will most
167 * likely be insufficient in the future (at least for the kernel).
168 */
169 if (RELOC(mmutype, int) == MMU_68040) {
170 int nl1desc, nl2desc, i;
171
172 /*
173 * First invalidate the entire "segment table" pages
174 * (levels 1 and 2 have the same "invalid" value).
175 */
176 ste = (st_entry_t *)kstpa;
177 este = &ste[kstsize * NPTEPG];
178 while (ste < este)
179 *ste++ = SG_NV;
180 /*
181 * Initialize level 2 descriptors (which immediately
182 * follow the level 1 table). We need:
183 * NPTEPG / SG4_LEV3SIZE
184 * level 2 descriptors to map each of the nptpages
185 * pages of PTEs. Note that we set the "used" bit
186 * now to save the HW the expense of doing it.
187 */
188 nl2desc = nptpages * (NPTEPG / SG4_LEV3SIZE);
189 ste = (st_entry_t *)kstpa;
190 ste = &ste[SG4_LEV1SIZE];
191 este = &ste[nl2desc];
192 protoste = kptpa | SG_U | SG_RW | SG_V;
193 while (ste < este) {
194 *ste++ = protoste;
195 protoste += (SG4_LEV3SIZE * sizeof(st_entry_t));
196 }
197 /*
198 * Initialize level 1 descriptors. We need:
199 * howmany(nl2desc, SG4_LEV2SIZE)
200 * level 1 descriptors to map the `nl2desc' level 2's.
201 */
202 nl1desc = howmany(nl2desc, SG4_LEV2SIZE);
203 ste = (st_entry_t *)kstpa;
204 este = &ste[nl1desc];
205 protoste = (paddr_t)&ste[SG4_LEV1SIZE] | SG_U | SG_RW | SG_V;
206 while (ste < este) {
207 *ste++ = protoste;
208 protoste += (SG4_LEV2SIZE * sizeof(st_entry_t));
209 }
210 /*
211 * Initialize the final level 1 descriptor to map the next
212 * block of level 2 descriptors for Sysptmap.
213 */
214 ste = (st_entry_t *)kstpa;
215 ste = &ste[SG4_LEV1SIZE - 1];
216 *ste = protoste;
217 /*
218 * Now initialize the final portion of that block of
219 * descriptors to map kptmpa and the "last PT page".
220 */
221 i = SG4_LEV1SIZE + (nl1desc * SG4_LEV2SIZE);
222 ste = (st_entry_t *)kstpa;
223 ste = &ste[i + SG4_LEV2SIZE - (NPTEPG / SG4_LEV3SIZE) * 2];
224 este = &ste[NPTEPG / SG4_LEV3SIZE];
225 protoste = kptmpa | SG_U | SG_RW | SG_V;
226 while (ste < este) {
227 *ste++ = protoste;
228 protoste += (SG4_LEV3SIZE * sizeof(st_entry_t));
229 }
230 este = &ste[NPTEPG / SG4_LEV3SIZE];
231 protoste = lkptpa | SG_U | SG_RW | SG_V;
232 while (ste < este) {
233 *ste++ = protoste;
234 protoste += (SG4_LEV3SIZE * sizeof(st_entry_t));
235 }
236 /*
237 * Calculate the free level 2 descriptor mask
238 * noting that we have used:
239 * 0: level 1 table
240 * 1 to nl1desc: map page tables
241 * nl1desc + 1: maps kptmpa and last-page page table
242 */
243 /* mark an entry for level 1 table */
244 stfree = ~l2tobm(0);
245 /* mark entries for map page tables */
246 for (i = 1; i <= nl1desc; i++)
247 stfree &= ~l2tobm(i);
248 /* mark an entry for kptmpa and lkptpa */
249 stfree &= ~l2tobm(i);
250 /* mark entries not available */
251 for (i = MAXKL2SIZE; i < sizeof(stfree) * NBBY; i++)
252 stfree &= ~l2tobm(i);
253
254 /*
255 * Initialize Sysptmap
256 */
257 pte = (pt_entry_t *)kptmpa;
258 epte = &pte[nptpages];
259 protopte = kptpa | PG_RW | PG_CI | PG_V;
260 while (pte < epte) {
261 *pte++ = protopte;
262 protopte += PAGE_SIZE;
263 }
264 /*
265 * Invalidate all remaining entries.
266 */
267 epte = (pt_entry_t *)kptmpa;
268 epte = &epte[TIB_SIZE];
269 while (pte < epte) {
270 *pte++ = PG_NV;
271 }
272 /*
273 * Initialize the last ones to point to kptmpa and the page
274 * table page allocated earlier.
275 */
276 pte = (pt_entry_t *)kptmpa;
277 pte = &pte[SYSMAP_VA >> SEGSHIFT];
278 *pte = kptmpa | PG_RW | PG_CI | PG_V;
279 pte = (pt_entry_t *)kptmpa;
280 pte = &pte[MAXADDR >> SEGSHIFT];
281 *pte = lkptpa | PG_RW | PG_CI | PG_V;
282 } else {
283 /*
284 * Map the page table pages in both the HW segment table
285 * and the software Sysptmap.
286 */
287 ste = (st_entry_t *)kstpa;
288 pte = (pt_entry_t *)kptmpa;
289 epte = &pte[nptpages];
290 protoste = kptpa | SG_RW | SG_V;
291 protopte = kptpa | PG_RW | PG_CI | PG_V;
292 while (pte < epte) {
293 *ste++ = protoste;
294 *pte++ = protopte;
295 protoste += PAGE_SIZE;
296 protopte += PAGE_SIZE;
297 }
298 /*
299 * Invalidate all remaining entries in both.
300 */
301 este = (st_entry_t *)kstpa;
302 este = &este[TIA_SIZE];
303 while (ste < este)
304 *ste++ = SG_NV;
305 epte = (pt_entry_t *)kptmpa;
306 epte = &epte[TIB_SIZE];
307 while (pte < epte)
308 *pte++ = PG_NV;
309 /*
310 * Initialize the last ones to point to kptmpa and the page
311 * table page allocated earlier.
312 */
313 ste = (st_entry_t *)kstpa;
314 ste = &ste[SYSMAP_VA >> SEGSHIFT];
315 pte = (pt_entry_t *)kptmpa;
316 pte = &pte[SYSMAP_VA >> SEGSHIFT];
317 *ste = kptmpa | SG_RW | SG_V;
318 *pte = kptmpa | PG_RW | PG_CI | PG_V;
319 ste = (st_entry_t *)kstpa;
320 ste = &ste[MAXADDR >> SEGSHIFT];
321 pte = (pt_entry_t *)kptmpa;
322 pte = &pte[MAXADDR >> SEGSHIFT];
323 *ste = lkptpa | SG_RW | SG_V;
324 *pte = lkptpa | PG_RW | PG_CI | PG_V;
325 }
326 /*
327 * Invalidate all but the final entry in the last kernel PT page.
328 * The final entry maps the last page of physical memory to
329 * prepare a page that is PA == VA to turn on the MMU.
330 */
331 pte = (pt_entry_t *)lkptpa;
332 epte = &pte[NPTEPG - 1];
333 while (pte < epte)
334 *pte++ = PG_NV;
335 *pte = MAXADDR | PG_RW | PG_CI | PG_V;
336 /*
337 * Initialize kernel page table.
338 * Start by invalidating the `nptpages' that we have allocated.
339 */
340 pte = (pt_entry_t *)kptpa;
341 epte = &pte[nptpages * NPTEPG];
342 while (pte < epte)
343 *pte++ = PG_NV;
344
345 /*
346 * The page of kernel text is zero-filled in locore.s,
347 * and not mapped (at VA 0). The boot loader places the
348 * bootinfo here after the kernel is loaded. Remember
349 * the physical address; we'll map it to a virtual address
350 * later.
351 */
352 RELOC(bootinfo_pa, paddr_t) = firstpa;
353
354 /*
355 * Validate PTEs for kernel text (RO). The first page
356 * of kernel text remains invalid; see locore.s
357 */
358 pte = (pt_entry_t *)kptpa;
359 pte = &pte[m68k_btop(KERNBASE + PAGE_SIZE)];
360 epte = &pte[m68k_btop(m68k_trunc_page(&etext))];
361 protopte = (firstpa + PAGE_SIZE) | PG_RO | PG_V;
362 while (pte < epte) {
363 *pte++ = protopte;
364 protopte += PAGE_SIZE;
365 }
366 /*
367 * Validate PTEs for kernel data/bss, dynamic data allocated
368 * by us so far (nextpa - firstpa bytes), and pages for lwp0
369 * u-area and page table allocated below (RW).
370 */
371 epte = (pt_entry_t *)kptpa;
372 epte = &epte[m68k_btop(nextpa - firstpa)];
373 protopte = (protopte & ~PG_PROT) | PG_RW;
374 /*
375 * Enable copy-back caching of data pages
376 */
377 if (RELOC(mmutype, int) == MMU_68040)
378 protopte |= PG_CCB;
379 while (pte < epte) {
380 *pte++ = protopte;
381 protopte += PAGE_SIZE;
382 }
383 /*
384 * Finally, validate the internal IO space PTEs (RW+CI).
385 * We do this here since the 320/350 MMU registers (also
386 * used, but to a lesser extent, on other models) are mapped
387 * in this range and it would be nice to be able to access
388 * them after the MMU is turned on.
389 */
390
391 #define PTE2VA(pte) m68k_ptob(pte - ((pt_entry_t *)kptpa))
392
393 protopte = INTIOBASE | PG_RW | PG_CI | PG_V;
394 epte = &pte[IIOMAPSIZE];
395 RELOC(intiobase, uint8_t *) = (uint8_t *)PTE2VA(pte);
396 RELOC(intiolimit, uint8_t *) = (uint8_t *)PTE2VA(epte);
397 while (pte < epte) {
398 *pte++ = protopte;
399 protopte += PAGE_SIZE;
400 }
401 RELOC(extiobase, uint8_t *) = (uint8_t *)PTE2VA(pte);
402 pte += EIOMAPSIZE;
403 RELOC(virtual_avail, vaddr_t) = PTE2VA(pte);
404
405 /*
406 * Calculate important exported kernel addresses and related values.
407 */
408 /*
409 * Sysseg: base of kernel segment table
410 */
411 RELOC(Sysseg, st_entry_t *) = (st_entry_t *)(kstpa - firstpa);
412 RELOC(Sysseg_pa, paddr_t) = kstpa;
413 if (RELOC(mmutype, int) == MMU_68040)
414 RELOC(protostfree, u_int) = stfree;
415 /*
416 * Sysptmap: base of kernel page table map
417 */
418 RELOC(Sysptmap, pt_entry_t *) = (pt_entry_t *)(kptmpa - firstpa);
419 /*
420 * Sysmap: kernel page table (as mapped through Sysptmap)
421 * Allocated at the end of KVA space.
422 */
423 RELOC(Sysmap, pt_entry_t *) = (pt_entry_t *)SYSMAP_VA;
424 /*
425 * CLKbase, MMUbase: important registers in internal IO space
426 * accessed from assembly language.
427 */
428 RELOC(CLKbase, vaddr_t) =
429 (vaddr_t)RELOC(intiobase, char *) + CLKBASE;
430 RELOC(MMUbase, vaddr_t) =
431 (vaddr_t)RELOC(intiobase, char *) + MMUBASE;
432
433 /*
434 * Remember the u-area address so it can be loaded in the lwp0
435 * via uvm_lwp_setuarea() later in pmap_bootstrap_finalize().
436 */
437 RELOC(lwp0uarea, vaddr_t) = lwp0upa - firstpa;
438
439 /*
440 * VM data structures are now initialized, set up data for
441 * the pmap module.
442 *
443 * Note about avail_end: msgbuf is initialized just after
444 * avail_end in machdep.c. Since the last page is used
445 * for rebooting the system (code is copied there and
446 * excution continues from copied code before the MMU
447 * is disabled), the msgbuf will get trounced between
448 * reboots if it's placed in the last physical page.
449 * To work around this, we move avail_end back one more
450 * page so the msgbuf can be preserved.
451 */
452 RELOC(avail_start, paddr_t) = nextpa;
453 RELOC(avail_end, paddr_t) = m68k_ptob(RELOC(maxmem, int)) -
454 (m68k_round_page(MSGBUFSIZE) + m68k_ptob(1));
455 RELOC(mem_size, vsize_t) = m68k_ptob(RELOC(physmem, int));
456 RELOC(virtual_end, vaddr_t) = VM_MAX_KERNEL_ADDRESS;
457
458 #ifdef M68K_MMU_HP
459 /*
460 * Determine VA aliasing distance if any
461 */
462 if (RELOC(ectype, int) == EC_VIRT) {
463 if (RELOC(machineid, int) == HP_320)
464 RELOC(pmap_aliasmask, int) = 0x3fff; /* 16k */
465 else if (RELOC(machineid, int) == HP_350)
466 RELOC(pmap_aliasmask, int) = 0x7fff; /* 32k */
467 }
468 #endif
469
470 /*
471 * Allocate some fixed, special purpose kernel virtual addresses
472 */
473 {
474 vaddr_t va = RELOC(virtual_avail, vaddr_t);
475
476 RELOC(bootinfo_va, vaddr_t) = (vaddr_t)va;
477 va += PAGE_SIZE;
478 RELOC(CADDR1, void *) = (void *)va;
479 va += PAGE_SIZE;
480 RELOC(CADDR2, void *) = (void *)va;
481 va += PAGE_SIZE;
482 RELOC(vmmap, void *) = (void *)va;
483 va += PAGE_SIZE;
484 RELOC(ledbase, void *) = (void *)va;
485 va += PAGE_SIZE;
486 RELOC(msgbufaddr, void *) = (void *)va;
487 va += m68k_round_page(MSGBUFSIZE);
488 RELOC(virtual_avail, vaddr_t) = va;
489 }
490 }
491