loadfile_machdep.c revision 1.2 1 /* $NetBSD: loadfile_machdep.c,v 1.2 2006/03/04 03:03:31 uwe Exp $ */
2
3 /*-
4 * Copyright (c) 2005 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This work is based on the code contributed by Robert Drehmel to the
8 * FreeBSD project.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 #include <lib/libsa/stand.h>
40
41 #include <machine/pte.h>
42 #include <machine/cpu.h>
43 #include <machine/ctlreg.h>
44 #include <machine/vmparam.h>
45 #include <machine/promlib.h>
46
47 #include "boot.h"
48 #include "openfirm.h"
49
50
51 #define MAXSEGNUM 50
52 #define hi(val) ((uint32_t)(((val) >> 32) & (uint32_t)-1))
53 #define lo(val) ((uint32_t)((val) & (uint32_t)-1))
54
55 #define roundup2(x, y) (((x)+((y)-1))&(~((y)-1)))
56
57
58 typedef int phandle_t;
59
60 extern void itlb_enter(vaddr_t, uint32_t, uint32_t);
61 extern void dtlb_enter(vaddr_t, uint32_t, uint32_t);
62 extern vaddr_t itlb_va_to_pa(vaddr_t);
63 extern vaddr_t dtlb_va_to_pa(vaddr_t);
64
65 static void tlb_init(void);
66
67 static int mmu_mapin(vaddr_t, vsize_t);
68 static ssize_t mmu_read(int, void *, size_t);
69 static void* mmu_memcpy(void *, const void *, size_t);
70 static void* mmu_memset(void *, int, size_t);
71 static void mmu_freeall(void);
72
73 static int ofw_mapin(vaddr_t, vsize_t);
74 static ssize_t ofw_read(int, void *, size_t);
75 static void* ofw_memcpy(void *, const void *, size_t);
76 static void* ofw_memset(void *, int, size_t);
77 static void ofw_freeall(void);
78
79 static int nop_mapin(vaddr_t, vsize_t);
80 static ssize_t nop_read(int, void *, size_t);
81 static void* nop_memcpy(void *, const void *, size_t);
82 static void* nop_memset(void *, int, size_t);
83 static void nop_freeall(void);
84
85
86 struct tlb_entry *dtlb_store = 0;
87 struct tlb_entry *itlb_store = 0;
88
89 int dtlb_slot;
90 int itlb_slot;
91 int dtlb_slot_max;
92 int itlb_slot_max;
93
94 static struct kvamap {
95 uint64_t start;
96 uint64_t end;
97 } kvamap[MAXSEGNUM];
98
99 static struct memsw {
100 ssize_t (* read)(int f, void *addr, size_t size);
101 void* (* memcpy)(void *dst, const void *src, size_t size);
102 void* (* memset)(void *dst, int c, size_t size);
103 void (* freeall)(void);
104 } memswa[] = {
105 { nop_read, nop_memcpy, nop_memset, nop_freeall },
106 { ofw_read, ofw_memcpy, ofw_memset, ofw_freeall },
107 { mmu_read, mmu_memcpy, mmu_memset, mmu_freeall }
108 };
109
110 static struct memsw *memsw = &memswa[0];
111
112
113 /*
114 * Check if a memory region is already mapped. Return length and virtual
115 * address of unmapped sub-region, if any.
116 */
117 static uint64_t
118 kvamap_extract(vaddr_t va, vsize_t len, vaddr_t *new_va)
119 {
120 int i;
121
122 *new_va = va;
123 for (i = 0; (len > 0) && (i < MAXSEGNUM); i++) {
124 if (kvamap[i].start == NULL)
125 break;
126 if ((kvamap[i].start <= va) && (va < kvamap[i].end)) {
127 uint64_t va_len = kvamap[i].end - va + kvamap[i].start;
128 len = (va_len < len) ? len - va_len : 0;
129 *new_va = kvamap[i].end;
130 }
131 }
132
133 return (len);
134 }
135
136 /*
137 * Record new kernel mapping.
138 */
139 static void
140 kvamap_enter(uint64_t va, uint64_t len)
141 {
142 int i;
143
144 DPRINTF(("kvamap_enter: %d@%p\n", (int)len, (void*)(u_long)va));
145 for (i = 0; (len > 0) && (i < MAXSEGNUM); i++) {
146 if (kvamap[i].start == NULL) {
147 kvamap[i].start = va;
148 kvamap[i].end = va + len;
149 break;
150 }
151 }
152
153 if (i == MAXSEGNUM) {
154 panic("Too many allocations requested.");
155 }
156 }
157
158 /*
159 * Initialize TLB as required by MMU mapping functions.
160 */
161 static void
162 tlb_init(void)
163 {
164 phandle_t child;
165 phandle_t root;
166 char buf[128];
167 u_int bootcpu;
168 u_int cpu;
169
170 if (dtlb_store != NULL) {
171 return;
172 }
173
174 bootcpu = get_cpuid();
175
176 if ( (root = prom_findroot()) == -1) {
177 panic("tlb_init: prom_findroot()");
178 }
179
180 for (child = prom_firstchild(root); child != 0;
181 child = prom_nextsibling(child)) {
182 if (child == -1) {
183 panic("tlb_init: OF_child");
184 }
185 if (_prom_getprop(child, "device_type", buf, sizeof(buf)) > 0 &&
186 strcmp(buf, "cpu") == 0) {
187 if (_prom_getprop(child, "upa-portid", &cpu,
188 sizeof(cpu)) == -1 && _prom_getprop(child, "portid",
189 &cpu, sizeof(cpu)) == -1)
190 panic("main: prom_getprop");
191 if (cpu == bootcpu)
192 break;
193 }
194 }
195 if (cpu != bootcpu)
196 panic("init_tlb: no node for bootcpu?!?!");
197 if (_prom_getprop(child, "#dtlb-entries", &dtlb_slot_max,
198 sizeof(dtlb_slot_max)) == -1 ||
199 _prom_getprop(child, "#itlb-entries", &itlb_slot_max,
200 sizeof(itlb_slot_max)) == -1)
201 panic("init_tlb: prom_getprop");
202 dtlb_store = alloc(dtlb_slot_max * sizeof(*dtlb_store));
203 itlb_store = alloc(itlb_slot_max * sizeof(*itlb_store));
204 if (dtlb_store == NULL || itlb_store == NULL) {
205 panic("init_tlb: malloc");
206 }
207
208 dtlb_slot = itlb_slot = 0;
209 }
210
211 /*
212 * Map requested memory region with permanent 4MB pages.
213 */
214 static int
215 mmu_mapin(vaddr_t rva, vsize_t len)
216 {
217 int64_t data;
218 vaddr_t va, pa, mva;
219
220 len = roundup2(len + (rva & PAGE_MASK_4M), PAGE_SIZE_4M);
221 rva &= ~PAGE_MASK_4M;
222
223 tlb_init();
224 for (pa = (vaddr_t)-1; len > 0; rva = va) {
225 if ( (len = kvamap_extract(rva, len, &va)) == 0) {
226 /* The rest is already mapped */
227 break;
228 }
229
230 if (dtlb_va_to_pa(va) == (u_long)-1 ||
231 itlb_va_to_pa(va) == (u_long)-1) {
232 /* Allocate a physical page, claim the virtual area */
233 if (pa == (vaddr_t)-1) {
234 pa = (vaddr_t)OF_alloc_phys(PAGE_SIZE_4M,
235 PAGE_SIZE_4M);
236 if (pa == (vaddr_t)-1)
237 panic("out of memory");
238 mva = (vaddr_t)OF_claim_virt(va,
239 PAGE_SIZE_4M, 0);
240 if (mva != va) {
241 panic("can't claim virtual page "
242 "(wanted %#lx, got %#lx)",
243 va, mva);
244 }
245 /* The mappings may have changed, be paranoid. */
246 continue;
247 }
248
249 /*
250 * Actually, we can only allocate two pages less at
251 * most (depending on the kernel TSB size).
252 */
253 if (dtlb_slot >= dtlb_slot_max)
254 panic("mmu_mapin: out of dtlb_slots");
255 if (itlb_slot >= itlb_slot_max)
256 panic("mmu_mapin: out of itlb_slots");
257
258 DPRINTF(("mmu_mapin: %p:%p\n", va, pa));
259
260 data = TSB_DATA(0, /* global */
261 PGSZ_4M, /* 4mb page */
262 pa, /* phys.address */
263 1, /* privileged */
264 1, /* write */
265 1, /* cache */
266 1, /* alias */
267 1, /* valid */
268 0 /* endianness */
269 );
270 data |= TLB_L | TLB_CV; /* locked, virt.cache */
271
272 dtlb_store[dtlb_slot].te_pa = pa;
273 dtlb_store[dtlb_slot].te_va = va;
274 itlb_store[itlb_slot].te_pa = pa;
275 itlb_store[itlb_slot].te_va = va;
276 dtlb_slot++;
277 itlb_slot++;
278 dtlb_enter(va, hi(data), lo(data));
279 itlb_enter(va, hi(data), lo(data));
280 pa = (vaddr_t)-1;
281 }
282
283 kvamap_enter(va, PAGE_SIZE_4M);
284
285 len -= len > PAGE_SIZE_4M ? PAGE_SIZE_4M : len;
286 va += PAGE_SIZE_4M;
287 }
288
289 if (pa != (vaddr_t)-1) {
290 OF_free_phys(pa, PAGE_SIZE_4M);
291 }
292
293 return (0);
294 }
295
296 static ssize_t
297 mmu_read(int f, void *addr, size_t size)
298 {
299 mmu_mapin((vaddr_t)addr, size);
300 return read(f, addr, size);
301 }
302
303 static void*
304 mmu_memcpy(void *dst, const void *src, size_t size)
305 {
306 mmu_mapin((vaddr_t)dst, size);
307 return memcpy(dst, src, size);
308 }
309
310 static void*
311 mmu_memset(void *dst, int c, size_t size)
312 {
313 mmu_mapin((vaddr_t)dst, size);
314 return memset(dst, c, size);
315 }
316
317 static void
318 mmu_freeall(void)
319 {
320 int i;
321
322 dtlb_slot = itlb_slot = 0;
323 for (i = 0; i < MAXSEGNUM; i++) {
324 /* XXX return all mappings to PROM and unmap the pages! */
325 kvamap[i].start = kvamap[i].end = 0;
326 }
327 }
328
329 /*
330 * Claim requested memory region in OpenFirmware allocation pool.
331 */
332 static int
333 ofw_mapin(vaddr_t rva, vsize_t len)
334 {
335 vaddr_t va;
336
337 len = roundup2(len + (rva & PAGE_MASK_4M), PAGE_SIZE_4M);
338 rva &= ~PAGE_MASK_4M;
339
340 if ( (len = kvamap_extract(rva, len, &va)) != 0) {
341 if (OF_claim((void *)(long)va, len, PAGE_SIZE_4M) == (void*)-1){
342 panic("ofw_mapin: Cannot claim memory.");
343 }
344 kvamap_enter(va, len);
345 }
346
347 return (0);
348 }
349
350 static ssize_t
351 ofw_read(int f, void *addr, size_t size)
352 {
353 ofw_mapin((vaddr_t)addr, size);
354 return read(f, addr, size);
355 }
356
357 static void*
358 ofw_memcpy(void *dst, const void *src, size_t size)
359 {
360 ofw_mapin((vaddr_t)dst, size);
361 return memcpy(dst, src, size);
362 }
363
364 static void*
365 ofw_memset(void *dst, int c, size_t size)
366 {
367 ofw_mapin((vaddr_t)dst, size);
368 return memset(dst, c, size);
369 }
370
371 static void
372 ofw_freeall(void)
373 {
374 int i;
375
376 dtlb_slot = itlb_slot = 0;
377 for (i = 0; i < MAXSEGNUM; i++) {
378 OF_release((void*)(u_long)kvamap[i].start,
379 (u_int)(kvamap[i].end - kvamap[i].start));
380 kvamap[i].start = kvamap[i].end = 0;
381 }
382 }
383
384 /*
385 * NOP implementation exists solely for kernel header loading sake. Here
386 * we use alloc() interface to allocate memory and avoid doing some dangerous
387 * things.
388 */
389 static ssize_t
390 nop_read(int f, void *addr, size_t size)
391 {
392 return read(f, addr, size);
393 }
394
395 static void*
396 nop_memcpy(void *dst, const void *src, size_t size)
397 {
398 /*
399 * Real NOP to make LOAD_HDR work: loadfile_elfXX copies ELF headers
400 * right after the highest kernel address which will not be mapped with
401 * nop_XXX operations.
402 */
403 return (dst);
404 }
405
406 static void*
407 nop_memset(void *dst, int c, size_t size)
408 {
409 return memset(dst, c, size);
410 }
411
412 static void
413 nop_freeall(void)
414 { }
415
416 /*
417 * loadfile() hooks.
418 */
419 ssize_t
420 sparc64_read(int f, void *addr, size_t size)
421 {
422 return (*memsw->read)(f, addr, size);
423 }
424
425 void*
426 sparc64_memcpy(void *dst, const void *src, size_t size)
427 {
428 return (*memsw->memcpy)(dst, src, size);
429 }
430
431 void*
432 sparc64_memset(void *dst, int c, size_t size)
433 {
434 return (*memsw->memset)(dst, c, size);
435 }
436
437 /*
438 * Record kernel mappings in bootinfo structure.
439 */
440 void
441 sparc64_bi_add(void)
442 {
443 int i;
444 int itlb_size, dtlb_size;
445 struct btinfo_count bi_count;
446 struct btinfo_tlb *bi_itlb, *bi_dtlb;
447
448 #ifdef LOADER_DEBUG
449 pmap_print_tlb('i');
450 pmap_print_tlb('d');
451 #endif
452
453 bi_count.count = itlb_slot;
454 bi_add(&bi_count, BTINFO_ITLB_SLOTS, sizeof(bi_count));
455 bi_count.count = dtlb_slot;
456 bi_add(&bi_count, BTINFO_DTLB_SLOTS, sizeof(bi_count));
457
458 itlb_size = sizeof(*bi_itlb) + sizeof(struct tlb_entry) * itlb_slot;
459 dtlb_size = sizeof(*bi_dtlb) + sizeof(struct tlb_entry) * dtlb_slot;
460
461 bi_itlb = alloc(itlb_size);
462 bi_dtlb = alloc(dtlb_size);
463
464 if ((bi_itlb == NULL) || (bi_dtlb == NULL)) {
465 panic("Out of memory in sparc64_bi_add.\n");
466 }
467
468 for (i = 0; i < itlb_slot; i++) {
469 bi_itlb->tlb[i].te_va = itlb_store[i].te_va;
470 bi_itlb->tlb[i].te_pa = itlb_store[i].te_pa;
471 }
472 bi_add(bi_itlb, BTINFO_ITLB, itlb_size);
473
474 for (i = 0; i < dtlb_slot; i++) {
475 bi_dtlb->tlb[i].te_va = dtlb_store[i].te_va;
476 bi_dtlb->tlb[i].te_pa = dtlb_store[i].te_pa;
477 }
478 bi_add(bi_dtlb, BTINFO_DTLB, dtlb_size);
479 }
480
481 /*
482 * Choose kernel image mapping strategy:
483 *
484 * LOADFILE_NOP_ALLOCATOR To load kernel image headers
485 * LOADFILE_OFW_ALLOCATOR To map the kernel by OpenFirmware means
486 * LOADFILE_MMU_ALLOCATOR To use permanent 4MB mappings
487 */
488 void
489 loadfile_set_allocator(int type)
490 {
491 if (type >= (sizeof(memswa) / sizeof(struct memsw))) {
492 panic("Bad allocator request.\n");
493 }
494
495 /*
496 * Release all memory claimed by previous allocator and schedule
497 * another allocator for succeeding memory allocation calls.
498 */
499 (*memsw->freeall)();
500 memsw = &memswa[type];
501 }
502