pmap_bootstrap.c revision 1.35 1 1.35 tsutsui /* $NetBSD: pmap_bootstrap.c,v 1.35 2008/12/20 14:35:53 tsutsui Exp $ */
2 1.4 cgd
3 1.24 tsutsui /*
4 1.1 mycroft * Copyright (c) 1991, 1993
5 1.1 mycroft * The Regents of the University of California. All rights reserved.
6 1.1 mycroft *
7 1.1 mycroft * This code is derived from software contributed to Berkeley by
8 1.1 mycroft * the Systems Programming Group of the University of Utah Computer
9 1.1 mycroft * Science Department.
10 1.1 mycroft *
11 1.1 mycroft * Redistribution and use in source and binary forms, with or without
12 1.1 mycroft * modification, are permitted provided that the following conditions
13 1.1 mycroft * are met:
14 1.1 mycroft * 1. Redistributions of source code must retain the above copyright
15 1.1 mycroft * notice, this list of conditions and the following disclaimer.
16 1.1 mycroft * 2. Redistributions in binary form must reproduce the above copyright
17 1.1 mycroft * notice, this list of conditions and the following disclaimer in the
18 1.1 mycroft * documentation and/or other materials provided with the distribution.
19 1.23 agc * 3. Neither the name of the University nor the names of its contributors
20 1.1 mycroft * may be used to endorse or promote products derived from this software
21 1.1 mycroft * without specific prior written permission.
22 1.1 mycroft *
23 1.1 mycroft * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 1.1 mycroft * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 1.1 mycroft * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 1.1 mycroft * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 1.1 mycroft * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 1.1 mycroft * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 1.1 mycroft * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 1.1 mycroft * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 1.1 mycroft * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 1.1 mycroft * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 1.1 mycroft * SUCH DAMAGE.
34 1.1 mycroft *
35 1.4 cgd * @(#)pmap_bootstrap.c 8.1 (Berkeley) 6/10/93
36 1.1 mycroft */
37 1.20 gmcgarry
38 1.20 gmcgarry #include <sys/cdefs.h>
39 1.35 tsutsui __KERNEL_RCSID(0, "$NetBSD: pmap_bootstrap.c,v 1.35 2008/12/20 14:35:53 tsutsui Exp $");
40 1.1 mycroft
41 1.1 mycroft #include <sys/param.h>
42 1.11 scottr #include <sys/proc.h>
43 1.11 scottr
44 1.11 scottr #include <machine/frame.h>
45 1.11 scottr #include <machine/cpu.h>
46 1.12 thorpej #include <machine/hp300spu.h>
47 1.11 scottr #include <machine/vmparam.h>
48 1.1 mycroft #include <machine/pte.h>
49 1.11 scottr
50 1.1 mycroft #include <hp300/hp300/clockreg.h>
51 1.1 mycroft
52 1.19 mrg #include <uvm/uvm_extern.h>
53 1.1 mycroft
54 1.1 mycroft #define RELOC(v, t) *((t*)((u_int)&(v) + firstpa))
55 1.1 mycroft
56 1.1 mycroft extern char *etext;
57 1.1 mycroft extern int Sysptsize;
58 1.32 tsutsui extern char *proc0paddr;
59 1.3 mycroft extern st_entry_t *Sysseg;
60 1.3 mycroft extern pt_entry_t *Sysptmap, *Sysmap;
61 1.15 kleink extern vaddr_t CLKbase, MMUbase;
62 1.17 thorpej extern paddr_t bootinfo_pa;
63 1.17 thorpej extern vaddr_t bootinfo_va;
64 1.1 mycroft
65 1.1 mycroft extern int maxmem, physmem;
66 1.15 kleink extern paddr_t avail_start, avail_end;
67 1.15 kleink extern vaddr_t virtual_avail, virtual_end;
68 1.15 kleink extern vsize_t mem_size;
69 1.1 mycroft extern int protection_codes[];
70 1.9 thorpej #ifdef M68K_MMU_HP
71 1.1 mycroft extern int pmap_aliasmask;
72 1.1 mycroft #endif
73 1.1 mycroft
74 1.34 tsutsui void pmap_bootstrap(paddr_t, paddr_t);
75 1.11 scottr
76 1.1 mycroft /*
77 1.1 mycroft * Special purpose kernel virtual addresses, used for mapping
78 1.1 mycroft * physical pages for a variety of temporary or permanent purposes:
79 1.1 mycroft *
80 1.1 mycroft * CADDR1, CADDR2: pmap zero/copy operations
81 1.1 mycroft * vmmap: /dev/mem, crash dumps, parity error checking
82 1.1 mycroft * ledbase: SPU LEDs
83 1.14 leo * msgbufaddr: kernel message buffer
84 1.1 mycroft */
85 1.29 tsutsui void *CADDR1, *CADDR2, *ledbase;
86 1.29 tsutsui char *vmmap;
87 1.30 tsutsui void *msgbufaddr;
88 1.1 mycroft
89 1.1 mycroft /*
90 1.1 mycroft * Bootstrap the VM system.
91 1.1 mycroft *
92 1.1 mycroft * Called with MMU off so we must relocate all global references by `firstpa'
93 1.1 mycroft * (don't call any functions here!) `nextpa' is the first available physical
94 1.1 mycroft * memory address. Returns an updated first PA reflecting the memory we
95 1.1 mycroft * have allocated. MMU is still off when we return.
96 1.1 mycroft *
97 1.3 mycroft * XXX assumes sizeof(u_int) == sizeof(pt_entry_t)
98 1.1 mycroft * XXX a PIC compiler would make this much easier.
99 1.1 mycroft */
100 1.1 mycroft void
101 1.25 thorpej pmap_bootstrap(paddr_t nextpa, paddr_t firstpa)
102 1.1 mycroft {
103 1.26 yamt paddr_t kstpa, kptpa, kptmpa, lkptpa, p0upa;
104 1.1 mycroft u_int nptpages, kstsize;
105 1.11 scottr st_entry_t protoste, *ste;
106 1.11 scottr pt_entry_t protopte, *pte, *epte;
107 1.1 mycroft
108 1.1 mycroft /*
109 1.1 mycroft * Calculate important physical addresses:
110 1.1 mycroft *
111 1.1 mycroft * kstpa kernel segment table 1 page (!040)
112 1.1 mycroft * N pages (040)
113 1.1 mycroft *
114 1.1 mycroft * kptpa statically allocated
115 1.1 mycroft * kernel PT pages Sysptsize+ pages
116 1.1 mycroft *
117 1.1 mycroft * [ Sysptsize is the number of pages of PT, IIOMAPSIZE and
118 1.1 mycroft * EIOMAPSIZE are the number of PTEs, hence we need to round
119 1.1 mycroft * the total to a page boundary with IO maps at the end. ]
120 1.1 mycroft *
121 1.1 mycroft * kptmpa kernel PT map 1 page
122 1.1 mycroft *
123 1.1 mycroft * lkptpa last kernel PT page 1 page
124 1.1 mycroft *
125 1.1 mycroft * p0upa proc 0 u-area UPAGES pages
126 1.1 mycroft *
127 1.1 mycroft * The KVA corresponding to any of these PAs is:
128 1.1 mycroft * (PA - firstpa + KERNBASE).
129 1.1 mycroft */
130 1.1 mycroft if (RELOC(mmutype, int) == MMU_68040)
131 1.1 mycroft kstsize = MAXKL2SIZE / (NPTEPG/SG4_LEV2SIZE);
132 1.1 mycroft else
133 1.1 mycroft kstsize = 1;
134 1.1 mycroft kstpa = nextpa;
135 1.22 thorpej nextpa += kstsize * PAGE_SIZE;
136 1.1 mycroft kptmpa = nextpa;
137 1.22 thorpej nextpa += PAGE_SIZE;
138 1.1 mycroft lkptpa = nextpa;
139 1.22 thorpej nextpa += PAGE_SIZE;
140 1.1 mycroft p0upa = nextpa;
141 1.5 mycroft nextpa += USPACE;
142 1.26 yamt kptpa = nextpa;
143 1.26 yamt nptpages = RELOC(Sysptsize, int) +
144 1.26 yamt (IIOMAPSIZE + EIOMAPSIZE + NPTEPG - 1) / NPTEPG;
145 1.26 yamt nextpa += nptpages * PAGE_SIZE;
146 1.1 mycroft
147 1.1 mycroft /*
148 1.1 mycroft * Initialize segment table and kernel page table map.
149 1.1 mycroft *
150 1.1 mycroft * On 68030s and earlier MMUs the two are identical except for
151 1.1 mycroft * the valid bits so both are initialized with essentially the
152 1.1 mycroft * same values. On the 68040, which has a mandatory 3-level
153 1.1 mycroft * structure, the segment table holds the level 1 table and part
154 1.1 mycroft * (or all) of the level 2 table and hence is considerably
155 1.1 mycroft * different. Here the first level consists of 128 descriptors
156 1.1 mycroft * (512 bytes) each mapping 32mb of address space. Each of these
157 1.1 mycroft * points to blocks of 128 second level descriptors (512 bytes)
158 1.1 mycroft * each mapping 256kb. Note that there may be additional "segment
159 1.1 mycroft * table" pages depending on how large MAXKL2SIZE is.
160 1.1 mycroft *
161 1.35 tsutsui * Portions of the last two segment of KVA space (0xFF800000 -
162 1.35 tsutsui * 0xFFFFFFFF) are mapped for a couple of purposes.
163 1.35 tsutsui * The first segment (0xFF800000 - 0xFFBFFFFF) is mapped
164 1.35 tsutsui * for the kernel page tables.
165 1.35 tsutsui * The very last page (0xFFFFF000) in the second segment is mapped
166 1.1 mycroft * to the last physical page of RAM to give us a region in which
167 1.1 mycroft * PA == VA. We use the first part of this page for enabling
168 1.1 mycroft * and disabling mapping. The last part of this page also contains
169 1.1 mycroft * info left by the boot ROM.
170 1.1 mycroft *
171 1.1 mycroft * XXX cramming two levels of mapping into the single "segment"
172 1.1 mycroft * table on the 68040 is intended as a temporary hack to get things
173 1.1 mycroft * working. The 224mb of address space that this allows will most
174 1.1 mycroft * likely be insufficient in the future (at least for the kernel).
175 1.1 mycroft */
176 1.1 mycroft if (RELOC(mmutype, int) == MMU_68040) {
177 1.11 scottr int num;
178 1.1 mycroft
179 1.1 mycroft /*
180 1.1 mycroft * First invalidate the entire "segment table" pages
181 1.1 mycroft * (levels 1 and 2 have the same "invalid" value).
182 1.1 mycroft */
183 1.1 mycroft pte = (u_int *)kstpa;
184 1.1 mycroft epte = &pte[kstsize * NPTEPG];
185 1.1 mycroft while (pte < epte)
186 1.1 mycroft *pte++ = SG_NV;
187 1.1 mycroft /*
188 1.1 mycroft * Initialize level 2 descriptors (which immediately
189 1.1 mycroft * follow the level 1 table). We need:
190 1.1 mycroft * NPTEPG / SG4_LEV3SIZE
191 1.26 yamt * level 2 descriptors to map each of the nptpages
192 1.1 mycroft * pages of PTEs. Note that we set the "used" bit
193 1.1 mycroft * now to save the HW the expense of doing it.
194 1.1 mycroft */
195 1.26 yamt num = nptpages * (NPTEPG / SG4_LEV3SIZE);
196 1.1 mycroft pte = &((u_int *)kstpa)[SG4_LEV1SIZE];
197 1.1 mycroft epte = &pte[num];
198 1.1 mycroft protoste = kptpa | SG_U | SG_RW | SG_V;
199 1.1 mycroft while (pte < epte) {
200 1.1 mycroft *pte++ = protoste;
201 1.3 mycroft protoste += (SG4_LEV3SIZE * sizeof(st_entry_t));
202 1.1 mycroft }
203 1.1 mycroft /*
204 1.1 mycroft * Initialize level 1 descriptors. We need:
205 1.1 mycroft * roundup(num, SG4_LEV2SIZE) / SG4_LEV2SIZE
206 1.1 mycroft * level 1 descriptors to map the `num' level 2's.
207 1.1 mycroft */
208 1.1 mycroft pte = (u_int *)kstpa;
209 1.1 mycroft epte = &pte[roundup(num, SG4_LEV2SIZE) / SG4_LEV2SIZE];
210 1.1 mycroft protoste = (u_int)&pte[SG4_LEV1SIZE] | SG_U | SG_RW | SG_V;
211 1.1 mycroft while (pte < epte) {
212 1.1 mycroft *pte++ = protoste;
213 1.3 mycroft protoste += (SG4_LEV2SIZE * sizeof(st_entry_t));
214 1.1 mycroft }
215 1.1 mycroft /*
216 1.1 mycroft * Initialize the final level 1 descriptor to map the last
217 1.1 mycroft * block of level 2 descriptors.
218 1.1 mycroft */
219 1.1 mycroft ste = &((u_int *)kstpa)[SG4_LEV1SIZE-1];
220 1.1 mycroft pte = &((u_int *)kstpa)[kstsize*NPTEPG - SG4_LEV2SIZE];
221 1.1 mycroft *ste = (u_int)pte | SG_U | SG_RW | SG_V;
222 1.1 mycroft /*
223 1.1 mycroft * Now initialize the final portion of that block of
224 1.26 yamt * descriptors to map kptmpa and the "last PT page".
225 1.1 mycroft */
226 1.26 yamt pte = &((u_int *)kstpa)[kstsize*NPTEPG - NPTEPG/SG4_LEV3SIZE*2];
227 1.26 yamt epte = &pte[NPTEPG/SG4_LEV3SIZE];
228 1.26 yamt protoste = kptmpa | SG_U | SG_RW | SG_V;
229 1.26 yamt while (pte < epte) {
230 1.26 yamt *pte++ = protoste;
231 1.26 yamt protoste += (SG4_LEV3SIZE * sizeof(st_entry_t));
232 1.26 yamt }
233 1.1 mycroft epte = &pte[NPTEPG/SG4_LEV3SIZE];
234 1.1 mycroft protoste = lkptpa | SG_U | SG_RW | SG_V;
235 1.1 mycroft while (pte < epte) {
236 1.1 mycroft *pte++ = protoste;
237 1.3 mycroft protoste += (SG4_LEV3SIZE * sizeof(st_entry_t));
238 1.1 mycroft }
239 1.1 mycroft /*
240 1.1 mycroft * Initialize Sysptmap
241 1.1 mycroft */
242 1.1 mycroft pte = (u_int *)kptmpa;
243 1.26 yamt epte = &pte[nptpages];
244 1.1 mycroft protopte = kptpa | PG_RW | PG_CI | PG_V;
245 1.1 mycroft while (pte < epte) {
246 1.1 mycroft *pte++ = protopte;
247 1.22 thorpej protopte += PAGE_SIZE;
248 1.1 mycroft }
249 1.7 thorpej /*
250 1.7 thorpej * Invalidate all but the last remaining entry.
251 1.7 thorpej */
252 1.26 yamt epte = &((u_int *)kptmpa)[NPTEPG-2];
253 1.7 thorpej while (pte < epte) {
254 1.7 thorpej *pte++ = PG_NV;
255 1.7 thorpej }
256 1.10 thorpej /*
257 1.26 yamt * Initialize the last ones to point to kptmpa and the page
258 1.7 thorpej * table page allocated earlier.
259 1.7 thorpej */
260 1.26 yamt *pte = kptmpa | PG_RW | PG_CI | PG_V;
261 1.26 yamt pte++;
262 1.1 mycroft *pte = lkptpa | PG_RW | PG_CI | PG_V;
263 1.1 mycroft } else {
264 1.1 mycroft /*
265 1.1 mycroft * Map the page table pages in both the HW segment table
266 1.26 yamt * and the software Sysptmap.
267 1.1 mycroft */
268 1.1 mycroft ste = (u_int *)kstpa;
269 1.1 mycroft pte = (u_int *)kptmpa;
270 1.26 yamt epte = &pte[nptpages];
271 1.1 mycroft protoste = kptpa | SG_RW | SG_V;
272 1.1 mycroft protopte = kptpa | PG_RW | PG_CI | PG_V;
273 1.1 mycroft while (pte < epte) {
274 1.1 mycroft *ste++ = protoste;
275 1.1 mycroft *pte++ = protopte;
276 1.22 thorpej protoste += PAGE_SIZE;
277 1.22 thorpej protopte += PAGE_SIZE;
278 1.1 mycroft }
279 1.1 mycroft /*
280 1.1 mycroft * Invalidate all but the last remaining entries in both.
281 1.1 mycroft */
282 1.26 yamt epte = &((u_int *)kptmpa)[NPTEPG-2];
283 1.1 mycroft while (pte < epte) {
284 1.1 mycroft *ste++ = SG_NV;
285 1.1 mycroft *pte++ = PG_NV;
286 1.1 mycroft }
287 1.1 mycroft /*
288 1.26 yamt * Initialize the last ones to point to kptmpa and the page
289 1.1 mycroft * table page allocated earlier.
290 1.1 mycroft */
291 1.26 yamt *ste = kptmpa | SG_RW | SG_V;
292 1.26 yamt *pte = kptmpa | PG_RW | PG_CI | PG_V;
293 1.26 yamt ste++;
294 1.26 yamt pte++;
295 1.1 mycroft *ste = lkptpa | SG_RW | SG_V;
296 1.1 mycroft *pte = lkptpa | PG_RW | PG_CI | PG_V;
297 1.1 mycroft }
298 1.1 mycroft /*
299 1.1 mycroft * Invalidate all but the final entry in the last kernel PT page
300 1.1 mycroft * (u-area PTEs will be validated later). The final entry maps
301 1.1 mycroft * the last page of physical memory.
302 1.1 mycroft */
303 1.1 mycroft pte = (u_int *)lkptpa;
304 1.1 mycroft epte = &pte[NPTEPG-1];
305 1.1 mycroft while (pte < epte)
306 1.1 mycroft *pte++ = PG_NV;
307 1.1 mycroft *pte = MAXADDR | PG_RW | PG_CI | PG_V;
308 1.1 mycroft /*
309 1.1 mycroft * Initialize kernel page table.
310 1.1 mycroft * Start by invalidating the `nptpages' that we have allocated.
311 1.1 mycroft */
312 1.1 mycroft pte = (u_int *)kptpa;
313 1.1 mycroft epte = &pte[nptpages * NPTEPG];
314 1.1 mycroft while (pte < epte)
315 1.1 mycroft *pte++ = PG_NV;
316 1.10 thorpej
317 1.1 mycroft /*
318 1.17 thorpej * The page of kernel text is zero-filled in locore.s,
319 1.17 thorpej * and not mapped (at VA 0). The boot loader places the
320 1.17 thorpej * bootinfo here after the kernel is loaded. Remember
321 1.17 thorpej * the physical address; we'll map it to a virtual address
322 1.17 thorpej * later.
323 1.1 mycroft */
324 1.17 thorpej RELOC(bootinfo_pa, paddr_t) = firstpa;
325 1.10 thorpej
326 1.10 thorpej /*
327 1.10 thorpej * Validate PTEs for kernel text (RO). The first page
328 1.10 thorpej * of kernel text remains invalid; see locore.s
329 1.10 thorpej */
330 1.22 thorpej pte = &((u_int *)kptpa)[m68k_btop(KERNBASE + PAGE_SIZE)];
331 1.13 veego epte = &pte[m68k_btop(m68k_trunc_page(&etext))];
332 1.22 thorpej protopte = (firstpa + PAGE_SIZE) | PG_RO | PG_V;
333 1.1 mycroft while (pte < epte) {
334 1.1 mycroft *pte++ = protopte;
335 1.22 thorpej protopte += PAGE_SIZE;
336 1.1 mycroft }
337 1.1 mycroft /*
338 1.1 mycroft * Validate PTEs for kernel data/bss, dynamic data allocated
339 1.1 mycroft * by us so far (nextpa - firstpa bytes), and pages for proc0
340 1.1 mycroft * u-area and page table allocated below (RW).
341 1.1 mycroft */
342 1.13 veego epte = &((u_int *)kptpa)[m68k_btop(nextpa - firstpa)];
343 1.1 mycroft protopte = (protopte & ~PG_PROT) | PG_RW;
344 1.1 mycroft /*
345 1.1 mycroft * Enable copy-back caching of data pages
346 1.1 mycroft */
347 1.1 mycroft if (RELOC(mmutype, int) == MMU_68040)
348 1.1 mycroft protopte |= PG_CCB;
349 1.1 mycroft while (pte < epte) {
350 1.1 mycroft *pte++ = protopte;
351 1.22 thorpej protopte += PAGE_SIZE;
352 1.1 mycroft }
353 1.1 mycroft /*
354 1.1 mycroft * Finally, validate the internal IO space PTEs (RW+CI).
355 1.1 mycroft * We do this here since the 320/350 MMU registers (also
356 1.1 mycroft * used, but to a lesser extent, on other models) are mapped
357 1.1 mycroft * in this range and it would be nice to be able to access
358 1.1 mycroft * them after the MMU is turned on.
359 1.1 mycroft */
360 1.26 yamt
361 1.26 yamt #define PTE2VA(pte) m68k_ptob(pte - ((pt_entry_t *)kptpa))
362 1.26 yamt
363 1.1 mycroft protopte = INTIOBASE | PG_RW | PG_CI | PG_V;
364 1.26 yamt epte = &pte[IIOMAPSIZE];
365 1.26 yamt RELOC(intiobase, char *) = (char *)PTE2VA(pte);
366 1.26 yamt RELOC(intiolimit, char *) = (char *)PTE2VA(epte);
367 1.1 mycroft while (pte < epte) {
368 1.1 mycroft *pte++ = protopte;
369 1.22 thorpej protopte += PAGE_SIZE;
370 1.1 mycroft }
371 1.26 yamt RELOC(extiobase, char *) = (char *)PTE2VA(pte);
372 1.26 yamt pte += EIOMAPSIZE;
373 1.26 yamt RELOC(virtual_avail, vaddr_t) = PTE2VA(pte);
374 1.1 mycroft
375 1.1 mycroft /*
376 1.1 mycroft * Calculate important exported kernel virtual addresses
377 1.1 mycroft */
378 1.1 mycroft /*
379 1.1 mycroft * Sysseg: base of kernel segment table
380 1.1 mycroft */
381 1.3 mycroft RELOC(Sysseg, st_entry_t *) =
382 1.3 mycroft (st_entry_t *)(kstpa - firstpa);
383 1.1 mycroft /*
384 1.1 mycroft * Sysptmap: base of kernel page table map
385 1.1 mycroft */
386 1.3 mycroft RELOC(Sysptmap, pt_entry_t *) =
387 1.3 mycroft (pt_entry_t *)(kptmpa - firstpa);
388 1.1 mycroft /*
389 1.1 mycroft * Sysmap: kernel page table (as mapped through Sysptmap)
390 1.31 tsutsui * Allocated at the end of KVA space.
391 1.1 mycroft */
392 1.3 mycroft RELOC(Sysmap, pt_entry_t *) =
393 1.26 yamt (pt_entry_t *)m68k_ptob((NPTEPG - 2) * NPTEPG);
394 1.1 mycroft /*
395 1.1 mycroft * CLKbase, MMUbase: important registers in internal IO space
396 1.1 mycroft * accessed from assembly language.
397 1.1 mycroft */
398 1.15 kleink RELOC(CLKbase, vaddr_t) =
399 1.15 kleink (vaddr_t)RELOC(intiobase, char *) + CLKBASE;
400 1.15 kleink RELOC(MMUbase, vaddr_t) =
401 1.15 kleink (vaddr_t)RELOC(intiobase, char *) + MMUBASE;
402 1.1 mycroft
403 1.1 mycroft /*
404 1.1 mycroft * Setup u-area for process 0.
405 1.1 mycroft */
406 1.1 mycroft /*
407 1.1 mycroft * Zero the u-area.
408 1.1 mycroft * NOTE: `pte' and `epte' aren't PTEs here.
409 1.1 mycroft */
410 1.1 mycroft pte = (u_int *)p0upa;
411 1.5 mycroft epte = (u_int *)(p0upa + USPACE);
412 1.1 mycroft while (pte < epte)
413 1.1 mycroft *pte++ = 0;
414 1.1 mycroft /*
415 1.1 mycroft * Remember the u-area address so it can be loaded in the
416 1.1 mycroft * proc struct p_addr field later.
417 1.1 mycroft */
418 1.1 mycroft RELOC(proc0paddr, char *) = (char *)(p0upa - firstpa);
419 1.1 mycroft
420 1.1 mycroft /*
421 1.1 mycroft * VM data structures are now initialized, set up data for
422 1.1 mycroft * the pmap module.
423 1.8 thorpej *
424 1.8 thorpej * Note about avail_end: msgbuf is initialized just after
425 1.8 thorpej * avail_end in machdep.c. Since the last page is used
426 1.8 thorpej * for rebooting the system (code is copied there and
427 1.8 thorpej * excution continues from copied code before the MMU
428 1.8 thorpej * is disabled), the msgbuf will get trounced between
429 1.8 thorpej * reboots if it's placed in the last physical page.
430 1.8 thorpej * To work around this, we move avail_end back one more
431 1.8 thorpej * page so the msgbuf can be preserved.
432 1.1 mycroft */
433 1.15 kleink RELOC(avail_start, paddr_t) = nextpa;
434 1.15 kleink RELOC(avail_end, paddr_t) = m68k_ptob(RELOC(maxmem, int)) -
435 1.14 leo (m68k_round_page(MSGBUFSIZE) + m68k_ptob(1));
436 1.15 kleink RELOC(mem_size, vsize_t) = m68k_ptob(RELOC(physmem, int));
437 1.15 kleink RELOC(virtual_end, vaddr_t) = VM_MAX_KERNEL_ADDRESS;
438 1.1 mycroft
439 1.9 thorpej #ifdef M68K_MMU_HP
440 1.1 mycroft /*
441 1.1 mycroft * Determine VA aliasing distance if any
442 1.1 mycroft */
443 1.16 thorpej if (RELOC(ectype, int) == EC_VIRT) {
444 1.1 mycroft if (RELOC(machineid, int) == HP_320)
445 1.1 mycroft RELOC(pmap_aliasmask, int) = 0x3fff; /* 16k */
446 1.1 mycroft else if (RELOC(machineid, int) == HP_350)
447 1.1 mycroft RELOC(pmap_aliasmask, int) = 0x7fff; /* 32k */
448 1.16 thorpej }
449 1.1 mycroft #endif
450 1.1 mycroft
451 1.1 mycroft /*
452 1.1 mycroft * Initialize protection array.
453 1.1 mycroft * XXX don't use a switch statement, it might produce an
454 1.1 mycroft * absolute "jmp" table.
455 1.1 mycroft */
456 1.1 mycroft {
457 1.11 scottr int *kp;
458 1.1 mycroft
459 1.1 mycroft kp = &RELOC(protection_codes, int);
460 1.1 mycroft kp[VM_PROT_NONE|VM_PROT_NONE|VM_PROT_NONE] = 0;
461 1.1 mycroft kp[VM_PROT_READ|VM_PROT_NONE|VM_PROT_NONE] = PG_RO;
462 1.1 mycroft kp[VM_PROT_READ|VM_PROT_NONE|VM_PROT_EXECUTE] = PG_RO;
463 1.1 mycroft kp[VM_PROT_NONE|VM_PROT_NONE|VM_PROT_EXECUTE] = PG_RO;
464 1.1 mycroft kp[VM_PROT_NONE|VM_PROT_WRITE|VM_PROT_NONE] = PG_RW;
465 1.1 mycroft kp[VM_PROT_NONE|VM_PROT_WRITE|VM_PROT_EXECUTE] = PG_RW;
466 1.1 mycroft kp[VM_PROT_READ|VM_PROT_WRITE|VM_PROT_NONE] = PG_RW;
467 1.1 mycroft kp[VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE] = PG_RW;
468 1.1 mycroft }
469 1.1 mycroft
470 1.1 mycroft /*
471 1.26 yamt * Kernel page/segment table allocated above,
472 1.1 mycroft * just initialize pointers.
473 1.1 mycroft */
474 1.1 mycroft {
475 1.1 mycroft struct pmap *kpm = &RELOC(kernel_pmap_store, struct pmap);
476 1.1 mycroft
477 1.3 mycroft kpm->pm_stab = RELOC(Sysseg, st_entry_t *);
478 1.3 mycroft kpm->pm_ptab = RELOC(Sysmap, pt_entry_t *);
479 1.1 mycroft simple_lock_init(&kpm->pm_lock);
480 1.1 mycroft kpm->pm_count = 1;
481 1.3 mycroft kpm->pm_stpa = (st_entry_t *)kstpa;
482 1.1 mycroft /*
483 1.1 mycroft * For the 040 we also initialize the free level 2
484 1.1 mycroft * descriptor mask noting that we have used:
485 1.1 mycroft * 0: level 1 table
486 1.1 mycroft * 1 to `num': map page tables
487 1.26 yamt * MAXKL2SIZE-1: maps kptmpa and last-page page table
488 1.1 mycroft */
489 1.1 mycroft if (RELOC(mmutype, int) == MMU_68040) {
490 1.11 scottr int num;
491 1.24 tsutsui
492 1.1 mycroft kpm->pm_stfree = ~l2tobm(0);
493 1.26 yamt num = roundup(nptpages * (NPTEPG / SG4_LEV3SIZE),
494 1.1 mycroft SG4_LEV2SIZE) / SG4_LEV2SIZE;
495 1.1 mycroft while (num)
496 1.1 mycroft kpm->pm_stfree &= ~l2tobm(num--);
497 1.1 mycroft kpm->pm_stfree &= ~l2tobm(MAXKL2SIZE-1);
498 1.1 mycroft for (num = MAXKL2SIZE;
499 1.1 mycroft num < sizeof(kpm->pm_stfree)*NBBY;
500 1.1 mycroft num++)
501 1.1 mycroft kpm->pm_stfree &= ~l2tobm(num);
502 1.1 mycroft }
503 1.1 mycroft }
504 1.1 mycroft
505 1.1 mycroft /*
506 1.1 mycroft * Allocate some fixed, special purpose kernel virtual addresses
507 1.1 mycroft */
508 1.1 mycroft {
509 1.15 kleink vaddr_t va = RELOC(virtual_avail, vaddr_t);
510 1.1 mycroft
511 1.17 thorpej RELOC(bootinfo_va, vaddr_t) = (vaddr_t)va;
512 1.22 thorpej va += PAGE_SIZE;
513 1.28 christos RELOC(CADDR1, void *) = (void *)va;
514 1.22 thorpej va += PAGE_SIZE;
515 1.28 christos RELOC(CADDR2, void *) = (void *)va;
516 1.22 thorpej va += PAGE_SIZE;
517 1.28 christos RELOC(vmmap, void *) = (void *)va;
518 1.22 thorpej va += PAGE_SIZE;
519 1.28 christos RELOC(ledbase, void *) = (void *)va;
520 1.22 thorpej va += PAGE_SIZE;
521 1.28 christos RELOC(msgbufaddr, void *) = (void *)va;
522 1.14 leo va += m68k_round_page(MSGBUFSIZE);
523 1.15 kleink RELOC(virtual_avail, vaddr_t) = va;
524 1.1 mycroft }
525 1.21 chs }
526