loadfile_machdep.c revision 1.2 1 1.2 uwe /* $NetBSD: loadfile_machdep.c,v 1.2 2006/03/04 03:03:31 uwe Exp $ */
2 1.1 cdi
3 1.1 cdi /*-
4 1.1 cdi * Copyright (c) 2005 The NetBSD Foundation, Inc.
5 1.1 cdi * All rights reserved.
6 1.1 cdi *
7 1.1 cdi * This work is based on the code contributed by Robert Drehmel to the
8 1.1 cdi * FreeBSD project.
9 1.1 cdi *
10 1.1 cdi * Redistribution and use in source and binary forms, with or without
11 1.1 cdi * modification, are permitted provided that the following conditions
12 1.1 cdi * are met:
13 1.1 cdi * 1. Redistributions of source code must retain the above copyright
14 1.1 cdi * notice, this list of conditions and the following disclaimer.
15 1.1 cdi * 2. Redistributions in binary form must reproduce the above copyright
16 1.1 cdi * notice, this list of conditions and the following disclaimer in the
17 1.1 cdi * documentation and/or other materials provided with the distribution.
18 1.1 cdi * 3. All advertising materials mentioning features or use of this software
19 1.1 cdi * must display the following acknowledgement:
20 1.1 cdi * This product includes software developed by the NetBSD
21 1.1 cdi * Foundation, Inc. and its contributors.
22 1.1 cdi * 4. Neither the name of The NetBSD Foundation nor the names of its
23 1.1 cdi * contributors may be used to endorse or promote products derived
24 1.1 cdi * from this software without specific prior written permission.
25 1.1 cdi *
26 1.1 cdi * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 1.1 cdi * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 1.1 cdi * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 1.1 cdi * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 1.1 cdi * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 1.1 cdi * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 1.1 cdi * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 1.1 cdi * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 1.1 cdi * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 1.1 cdi * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 1.1 cdi * POSSIBILITY OF SUCH DAMAGE.
37 1.1 cdi */
38 1.1 cdi
39 1.1 cdi #include <lib/libsa/stand.h>
40 1.1 cdi
41 1.1 cdi #include <machine/pte.h>
42 1.1 cdi #include <machine/cpu.h>
43 1.1 cdi #include <machine/ctlreg.h>
44 1.1 cdi #include <machine/vmparam.h>
45 1.1 cdi #include <machine/promlib.h>
46 1.1 cdi
47 1.1 cdi #include "boot.h"
48 1.1 cdi #include "openfirm.h"
49 1.1 cdi
50 1.1 cdi
51 1.1 cdi #define MAXSEGNUM 50
52 1.2 uwe #define hi(val) ((uint32_t)(((val) >> 32) & (uint32_t)-1))
53 1.2 uwe #define lo(val) ((uint32_t)((val) & (uint32_t)-1))
54 1.1 cdi
55 1.1 cdi #define roundup2(x, y) (((x)+((y)-1))&(~((y)-1)))
56 1.1 cdi
57 1.1 cdi
58 1.1 cdi typedef int phandle_t;
59 1.1 cdi
60 1.2 uwe extern void itlb_enter(vaddr_t, uint32_t, uint32_t);
61 1.2 uwe extern void dtlb_enter(vaddr_t, uint32_t, uint32_t);
62 1.1 cdi extern vaddr_t itlb_va_to_pa(vaddr_t);
63 1.1 cdi extern vaddr_t dtlb_va_to_pa(vaddr_t);
64 1.1 cdi
65 1.1 cdi static void tlb_init(void);
66 1.1 cdi
67 1.1 cdi static int mmu_mapin(vaddr_t, vsize_t);
68 1.1 cdi static ssize_t mmu_read(int, void *, size_t);
69 1.1 cdi static void* mmu_memcpy(void *, const void *, size_t);
70 1.1 cdi static void* mmu_memset(void *, int, size_t);
71 1.1 cdi static void mmu_freeall(void);
72 1.1 cdi
73 1.1 cdi static int ofw_mapin(vaddr_t, vsize_t);
74 1.1 cdi static ssize_t ofw_read(int, void *, size_t);
75 1.1 cdi static void* ofw_memcpy(void *, const void *, size_t);
76 1.1 cdi static void* ofw_memset(void *, int, size_t);
77 1.1 cdi static void ofw_freeall(void);
78 1.1 cdi
79 1.1 cdi static int nop_mapin(vaddr_t, vsize_t);
80 1.1 cdi static ssize_t nop_read(int, void *, size_t);
81 1.1 cdi static void* nop_memcpy(void *, const void *, size_t);
82 1.1 cdi static void* nop_memset(void *, int, size_t);
83 1.1 cdi static void nop_freeall(void);
84 1.1 cdi
85 1.1 cdi
86 1.1 cdi struct tlb_entry *dtlb_store = 0;
87 1.1 cdi struct tlb_entry *itlb_store = 0;
88 1.1 cdi
89 1.1 cdi int dtlb_slot;
90 1.1 cdi int itlb_slot;
91 1.1 cdi int dtlb_slot_max;
92 1.1 cdi int itlb_slot_max;
93 1.1 cdi
94 1.1 cdi static struct kvamap {
95 1.1 cdi uint64_t start;
96 1.1 cdi uint64_t end;
97 1.1 cdi } kvamap[MAXSEGNUM];
98 1.1 cdi
99 1.1 cdi static struct memsw {
100 1.1 cdi ssize_t (* read)(int f, void *addr, size_t size);
101 1.1 cdi void* (* memcpy)(void *dst, const void *src, size_t size);
102 1.1 cdi void* (* memset)(void *dst, int c, size_t size);
103 1.1 cdi void (* freeall)(void);
104 1.1 cdi } memswa[] = {
105 1.1 cdi { nop_read, nop_memcpy, nop_memset, nop_freeall },
106 1.1 cdi { ofw_read, ofw_memcpy, ofw_memset, ofw_freeall },
107 1.1 cdi { mmu_read, mmu_memcpy, mmu_memset, mmu_freeall }
108 1.1 cdi };
109 1.1 cdi
110 1.1 cdi static struct memsw *memsw = &memswa[0];
111 1.1 cdi
112 1.1 cdi
113 1.1 cdi /*
114 1.1 cdi * Check if a memory region is already mapped. Return length and virtual
115 1.1 cdi * address of unmapped sub-region, if any.
116 1.1 cdi */
117 1.1 cdi static uint64_t
118 1.1 cdi kvamap_extract(vaddr_t va, vsize_t len, vaddr_t *new_va)
119 1.1 cdi {
120 1.1 cdi int i;
121 1.1 cdi
122 1.1 cdi *new_va = va;
123 1.1 cdi for (i = 0; (len > 0) && (i < MAXSEGNUM); i++) {
124 1.1 cdi if (kvamap[i].start == NULL)
125 1.1 cdi break;
126 1.1 cdi if ((kvamap[i].start <= va) && (va < kvamap[i].end)) {
127 1.1 cdi uint64_t va_len = kvamap[i].end - va + kvamap[i].start;
128 1.1 cdi len = (va_len < len) ? len - va_len : 0;
129 1.1 cdi *new_va = kvamap[i].end;
130 1.1 cdi }
131 1.1 cdi }
132 1.1 cdi
133 1.1 cdi return (len);
134 1.1 cdi }
135 1.1 cdi
136 1.1 cdi /*
137 1.1 cdi * Record new kernel mapping.
138 1.1 cdi */
139 1.1 cdi static void
140 1.1 cdi kvamap_enter(uint64_t va, uint64_t len)
141 1.1 cdi {
142 1.1 cdi int i;
143 1.1 cdi
144 1.1 cdi DPRINTF(("kvamap_enter: %d@%p\n", (int)len, (void*)(u_long)va));
145 1.1 cdi for (i = 0; (len > 0) && (i < MAXSEGNUM); i++) {
146 1.1 cdi if (kvamap[i].start == NULL) {
147 1.1 cdi kvamap[i].start = va;
148 1.1 cdi kvamap[i].end = va + len;
149 1.1 cdi break;
150 1.1 cdi }
151 1.1 cdi }
152 1.1 cdi
153 1.1 cdi if (i == MAXSEGNUM) {
154 1.1 cdi panic("Too many allocations requested.");
155 1.1 cdi }
156 1.1 cdi }
157 1.1 cdi
158 1.1 cdi /*
159 1.1 cdi * Initialize TLB as required by MMU mapping functions.
160 1.1 cdi */
161 1.1 cdi static void
162 1.1 cdi tlb_init(void)
163 1.1 cdi {
164 1.1 cdi phandle_t child;
165 1.1 cdi phandle_t root;
166 1.1 cdi char buf[128];
167 1.1 cdi u_int bootcpu;
168 1.1 cdi u_int cpu;
169 1.1 cdi
170 1.1 cdi if (dtlb_store != NULL) {
171 1.1 cdi return;
172 1.1 cdi }
173 1.1 cdi
174 1.1 cdi bootcpu = get_cpuid();
175 1.1 cdi
176 1.1 cdi if ( (root = prom_findroot()) == -1) {
177 1.1 cdi panic("tlb_init: prom_findroot()");
178 1.1 cdi }
179 1.1 cdi
180 1.1 cdi for (child = prom_firstchild(root); child != 0;
181 1.1 cdi child = prom_nextsibling(child)) {
182 1.1 cdi if (child == -1) {
183 1.1 cdi panic("tlb_init: OF_child");
184 1.1 cdi }
185 1.1 cdi if (_prom_getprop(child, "device_type", buf, sizeof(buf)) > 0 &&
186 1.1 cdi strcmp(buf, "cpu") == 0) {
187 1.1 cdi if (_prom_getprop(child, "upa-portid", &cpu,
188 1.1 cdi sizeof(cpu)) == -1 && _prom_getprop(child, "portid",
189 1.1 cdi &cpu, sizeof(cpu)) == -1)
190 1.1 cdi panic("main: prom_getprop");
191 1.1 cdi if (cpu == bootcpu)
192 1.1 cdi break;
193 1.1 cdi }
194 1.1 cdi }
195 1.1 cdi if (cpu != bootcpu)
196 1.1 cdi panic("init_tlb: no node for bootcpu?!?!");
197 1.1 cdi if (_prom_getprop(child, "#dtlb-entries", &dtlb_slot_max,
198 1.1 cdi sizeof(dtlb_slot_max)) == -1 ||
199 1.1 cdi _prom_getprop(child, "#itlb-entries", &itlb_slot_max,
200 1.1 cdi sizeof(itlb_slot_max)) == -1)
201 1.1 cdi panic("init_tlb: prom_getprop");
202 1.1 cdi dtlb_store = alloc(dtlb_slot_max * sizeof(*dtlb_store));
203 1.1 cdi itlb_store = alloc(itlb_slot_max * sizeof(*itlb_store));
204 1.1 cdi if (dtlb_store == NULL || itlb_store == NULL) {
205 1.1 cdi panic("init_tlb: malloc");
206 1.1 cdi }
207 1.1 cdi
208 1.1 cdi dtlb_slot = itlb_slot = 0;
209 1.1 cdi }
210 1.1 cdi
211 1.1 cdi /*
212 1.1 cdi * Map requested memory region with permanent 4MB pages.
213 1.1 cdi */
214 1.1 cdi static int
215 1.1 cdi mmu_mapin(vaddr_t rva, vsize_t len)
216 1.1 cdi {
217 1.1 cdi int64_t data;
218 1.1 cdi vaddr_t va, pa, mva;
219 1.1 cdi
220 1.1 cdi len = roundup2(len + (rva & PAGE_MASK_4M), PAGE_SIZE_4M);
221 1.1 cdi rva &= ~PAGE_MASK_4M;
222 1.1 cdi
223 1.1 cdi tlb_init();
224 1.1 cdi for (pa = (vaddr_t)-1; len > 0; rva = va) {
225 1.1 cdi if ( (len = kvamap_extract(rva, len, &va)) == 0) {
226 1.1 cdi /* The rest is already mapped */
227 1.1 cdi break;
228 1.1 cdi }
229 1.1 cdi
230 1.1 cdi if (dtlb_va_to_pa(va) == (u_long)-1 ||
231 1.1 cdi itlb_va_to_pa(va) == (u_long)-1) {
232 1.1 cdi /* Allocate a physical page, claim the virtual area */
233 1.1 cdi if (pa == (vaddr_t)-1) {
234 1.1 cdi pa = (vaddr_t)OF_alloc_phys(PAGE_SIZE_4M,
235 1.1 cdi PAGE_SIZE_4M);
236 1.1 cdi if (pa == (vaddr_t)-1)
237 1.1 cdi panic("out of memory");
238 1.1 cdi mva = (vaddr_t)OF_claim_virt(va,
239 1.1 cdi PAGE_SIZE_4M, 0);
240 1.1 cdi if (mva != va) {
241 1.1 cdi panic("can't claim virtual page "
242 1.1 cdi "(wanted %#lx, got %#lx)",
243 1.1 cdi va, mva);
244 1.1 cdi }
245 1.1 cdi /* The mappings may have changed, be paranoid. */
246 1.1 cdi continue;
247 1.1 cdi }
248 1.1 cdi
249 1.1 cdi /*
250 1.1 cdi * Actually, we can only allocate two pages less at
251 1.1 cdi * most (depending on the kernel TSB size).
252 1.1 cdi */
253 1.1 cdi if (dtlb_slot >= dtlb_slot_max)
254 1.1 cdi panic("mmu_mapin: out of dtlb_slots");
255 1.1 cdi if (itlb_slot >= itlb_slot_max)
256 1.1 cdi panic("mmu_mapin: out of itlb_slots");
257 1.1 cdi
258 1.1 cdi DPRINTF(("mmu_mapin: %p:%p\n", va, pa));
259 1.1 cdi
260 1.1 cdi data = TSB_DATA(0, /* global */
261 1.1 cdi PGSZ_4M, /* 4mb page */
262 1.1 cdi pa, /* phys.address */
263 1.1 cdi 1, /* privileged */
264 1.1 cdi 1, /* write */
265 1.1 cdi 1, /* cache */
266 1.1 cdi 1, /* alias */
267 1.1 cdi 1, /* valid */
268 1.1 cdi 0 /* endianness */
269 1.1 cdi );
270 1.1 cdi data |= TLB_L | TLB_CV; /* locked, virt.cache */
271 1.1 cdi
272 1.1 cdi dtlb_store[dtlb_slot].te_pa = pa;
273 1.1 cdi dtlb_store[dtlb_slot].te_va = va;
274 1.1 cdi itlb_store[itlb_slot].te_pa = pa;
275 1.1 cdi itlb_store[itlb_slot].te_va = va;
276 1.1 cdi dtlb_slot++;
277 1.1 cdi itlb_slot++;
278 1.1 cdi dtlb_enter(va, hi(data), lo(data));
279 1.1 cdi itlb_enter(va, hi(data), lo(data));
280 1.1 cdi pa = (vaddr_t)-1;
281 1.1 cdi }
282 1.1 cdi
283 1.1 cdi kvamap_enter(va, PAGE_SIZE_4M);
284 1.1 cdi
285 1.1 cdi len -= len > PAGE_SIZE_4M ? PAGE_SIZE_4M : len;
286 1.1 cdi va += PAGE_SIZE_4M;
287 1.1 cdi }
288 1.1 cdi
289 1.1 cdi if (pa != (vaddr_t)-1) {
290 1.1 cdi OF_free_phys(pa, PAGE_SIZE_4M);
291 1.1 cdi }
292 1.1 cdi
293 1.1 cdi return (0);
294 1.1 cdi }
295 1.1 cdi
296 1.1 cdi static ssize_t
297 1.1 cdi mmu_read(int f, void *addr, size_t size)
298 1.1 cdi {
299 1.1 cdi mmu_mapin((vaddr_t)addr, size);
300 1.1 cdi return read(f, addr, size);
301 1.1 cdi }
302 1.1 cdi
303 1.1 cdi static void*
304 1.1 cdi mmu_memcpy(void *dst, const void *src, size_t size)
305 1.1 cdi {
306 1.1 cdi mmu_mapin((vaddr_t)dst, size);
307 1.1 cdi return memcpy(dst, src, size);
308 1.1 cdi }
309 1.1 cdi
310 1.1 cdi static void*
311 1.1 cdi mmu_memset(void *dst, int c, size_t size)
312 1.1 cdi {
313 1.1 cdi mmu_mapin((vaddr_t)dst, size);
314 1.1 cdi return memset(dst, c, size);
315 1.1 cdi }
316 1.1 cdi
317 1.1 cdi static void
318 1.1 cdi mmu_freeall(void)
319 1.1 cdi {
320 1.1 cdi int i;
321 1.1 cdi
322 1.1 cdi dtlb_slot = itlb_slot = 0;
323 1.1 cdi for (i = 0; i < MAXSEGNUM; i++) {
324 1.1 cdi /* XXX return all mappings to PROM and unmap the pages! */
325 1.1 cdi kvamap[i].start = kvamap[i].end = 0;
326 1.1 cdi }
327 1.1 cdi }
328 1.1 cdi
329 1.1 cdi /*
330 1.1 cdi * Claim requested memory region in OpenFirmware allocation pool.
331 1.1 cdi */
332 1.1 cdi static int
333 1.1 cdi ofw_mapin(vaddr_t rva, vsize_t len)
334 1.1 cdi {
335 1.1 cdi vaddr_t va;
336 1.1 cdi
337 1.1 cdi len = roundup2(len + (rva & PAGE_MASK_4M), PAGE_SIZE_4M);
338 1.1 cdi rva &= ~PAGE_MASK_4M;
339 1.1 cdi
340 1.1 cdi if ( (len = kvamap_extract(rva, len, &va)) != 0) {
341 1.1 cdi if (OF_claim((void *)(long)va, len, PAGE_SIZE_4M) == (void*)-1){
342 1.1 cdi panic("ofw_mapin: Cannot claim memory.");
343 1.1 cdi }
344 1.1 cdi kvamap_enter(va, len);
345 1.1 cdi }
346 1.1 cdi
347 1.1 cdi return (0);
348 1.1 cdi }
349 1.1 cdi
350 1.1 cdi static ssize_t
351 1.1 cdi ofw_read(int f, void *addr, size_t size)
352 1.1 cdi {
353 1.1 cdi ofw_mapin((vaddr_t)addr, size);
354 1.1 cdi return read(f, addr, size);
355 1.1 cdi }
356 1.1 cdi
357 1.1 cdi static void*
358 1.1 cdi ofw_memcpy(void *dst, const void *src, size_t size)
359 1.1 cdi {
360 1.1 cdi ofw_mapin((vaddr_t)dst, size);
361 1.1 cdi return memcpy(dst, src, size);
362 1.1 cdi }
363 1.1 cdi
364 1.1 cdi static void*
365 1.1 cdi ofw_memset(void *dst, int c, size_t size)
366 1.1 cdi {
367 1.1 cdi ofw_mapin((vaddr_t)dst, size);
368 1.1 cdi return memset(dst, c, size);
369 1.1 cdi }
370 1.1 cdi
371 1.1 cdi static void
372 1.1 cdi ofw_freeall(void)
373 1.1 cdi {
374 1.1 cdi int i;
375 1.1 cdi
376 1.1 cdi dtlb_slot = itlb_slot = 0;
377 1.1 cdi for (i = 0; i < MAXSEGNUM; i++) {
378 1.1 cdi OF_release((void*)(u_long)kvamap[i].start,
379 1.1 cdi (u_int)(kvamap[i].end - kvamap[i].start));
380 1.1 cdi kvamap[i].start = kvamap[i].end = 0;
381 1.1 cdi }
382 1.1 cdi }
383 1.1 cdi
384 1.1 cdi /*
385 1.1 cdi * NOP implementation exists solely for kernel header loading sake. Here
386 1.1 cdi * we use alloc() interface to allocate memory and avoid doing some dangerous
387 1.1 cdi * things.
388 1.1 cdi */
389 1.1 cdi static ssize_t
390 1.1 cdi nop_read(int f, void *addr, size_t size)
391 1.1 cdi {
392 1.1 cdi return read(f, addr, size);
393 1.1 cdi }
394 1.1 cdi
395 1.1 cdi static void*
396 1.1 cdi nop_memcpy(void *dst, const void *src, size_t size)
397 1.1 cdi {
398 1.1 cdi /*
399 1.1 cdi * Real NOP to make LOAD_HDR work: loadfile_elfXX copies ELF headers
400 1.1 cdi * right after the highest kernel address which will not be mapped with
401 1.1 cdi * nop_XXX operations.
402 1.1 cdi */
403 1.1 cdi return (dst);
404 1.1 cdi }
405 1.1 cdi
406 1.1 cdi static void*
407 1.1 cdi nop_memset(void *dst, int c, size_t size)
408 1.1 cdi {
409 1.1 cdi return memset(dst, c, size);
410 1.1 cdi }
411 1.1 cdi
412 1.1 cdi static void
413 1.1 cdi nop_freeall(void)
414 1.1 cdi { }
415 1.1 cdi
416 1.1 cdi /*
417 1.1 cdi * loadfile() hooks.
418 1.1 cdi */
419 1.1 cdi ssize_t
420 1.1 cdi sparc64_read(int f, void *addr, size_t size)
421 1.1 cdi {
422 1.1 cdi return (*memsw->read)(f, addr, size);
423 1.1 cdi }
424 1.1 cdi
425 1.1 cdi void*
426 1.1 cdi sparc64_memcpy(void *dst, const void *src, size_t size)
427 1.1 cdi {
428 1.1 cdi return (*memsw->memcpy)(dst, src, size);
429 1.1 cdi }
430 1.1 cdi
431 1.1 cdi void*
432 1.1 cdi sparc64_memset(void *dst, int c, size_t size)
433 1.1 cdi {
434 1.1 cdi return (*memsw->memset)(dst, c, size);
435 1.1 cdi }
436 1.1 cdi
437 1.1 cdi /*
438 1.1 cdi * Record kernel mappings in bootinfo structure.
439 1.1 cdi */
440 1.1 cdi void
441 1.1 cdi sparc64_bi_add(void)
442 1.1 cdi {
443 1.1 cdi int i;
444 1.1 cdi int itlb_size, dtlb_size;
445 1.1 cdi struct btinfo_count bi_count;
446 1.1 cdi struct btinfo_tlb *bi_itlb, *bi_dtlb;
447 1.1 cdi
448 1.1 cdi #ifdef LOADER_DEBUG
449 1.1 cdi pmap_print_tlb('i');
450 1.1 cdi pmap_print_tlb('d');
451 1.1 cdi #endif
452 1.1 cdi
453 1.1 cdi bi_count.count = itlb_slot;
454 1.1 cdi bi_add(&bi_count, BTINFO_ITLB_SLOTS, sizeof(bi_count));
455 1.1 cdi bi_count.count = dtlb_slot;
456 1.1 cdi bi_add(&bi_count, BTINFO_DTLB_SLOTS, sizeof(bi_count));
457 1.1 cdi
458 1.1 cdi itlb_size = sizeof(*bi_itlb) + sizeof(struct tlb_entry) * itlb_slot;
459 1.1 cdi dtlb_size = sizeof(*bi_dtlb) + sizeof(struct tlb_entry) * dtlb_slot;
460 1.1 cdi
461 1.1 cdi bi_itlb = alloc(itlb_size);
462 1.1 cdi bi_dtlb = alloc(dtlb_size);
463 1.1 cdi
464 1.1 cdi if ((bi_itlb == NULL) || (bi_dtlb == NULL)) {
465 1.1 cdi panic("Out of memory in sparc64_bi_add.\n");
466 1.1 cdi }
467 1.1 cdi
468 1.1 cdi for (i = 0; i < itlb_slot; i++) {
469 1.1 cdi bi_itlb->tlb[i].te_va = itlb_store[i].te_va;
470 1.1 cdi bi_itlb->tlb[i].te_pa = itlb_store[i].te_pa;
471 1.1 cdi }
472 1.1 cdi bi_add(bi_itlb, BTINFO_ITLB, itlb_size);
473 1.1 cdi
474 1.1 cdi for (i = 0; i < dtlb_slot; i++) {
475 1.1 cdi bi_dtlb->tlb[i].te_va = dtlb_store[i].te_va;
476 1.1 cdi bi_dtlb->tlb[i].te_pa = dtlb_store[i].te_pa;
477 1.1 cdi }
478 1.1 cdi bi_add(bi_dtlb, BTINFO_DTLB, dtlb_size);
479 1.1 cdi }
480 1.1 cdi
481 1.1 cdi /*
482 1.1 cdi * Choose kernel image mapping strategy:
483 1.1 cdi *
484 1.1 cdi * LOADFILE_NOP_ALLOCATOR To load kernel image headers
485 1.1 cdi * LOADFILE_OFW_ALLOCATOR To map the kernel by OpenFirmware means
486 1.1 cdi * LOADFILE_MMU_ALLOCATOR To use permanent 4MB mappings
487 1.1 cdi */
488 1.1 cdi void
489 1.1 cdi loadfile_set_allocator(int type)
490 1.1 cdi {
491 1.1 cdi if (type >= (sizeof(memswa) / sizeof(struct memsw))) {
492 1.1 cdi panic("Bad allocator request.\n");
493 1.1 cdi }
494 1.1 cdi
495 1.1 cdi /*
496 1.1 cdi * Release all memory claimed by previous allocator and schedule
497 1.1 cdi * another allocator for succeeding memory allocation calls.
498 1.1 cdi */
499 1.1 cdi (*memsw->freeall)();
500 1.1 cdi memsw = &memswa[type];
501 1.1 cdi }
502