loadfile_machdep.c revision 1.6 1 /* $NetBSD: loadfile_machdep.c,v 1.6 2008/08/25 22:31:12 martin Exp $ */
2
3 /*-
4 * Copyright (c) 2005 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This work is based on the code contributed by Robert Drehmel to the
8 * FreeBSD project.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <lib/libsa/stand.h>
33
34 #include <machine/pte.h>
35 #include <machine/cpu.h>
36 #include <machine/ctlreg.h>
37 #include <machine/vmparam.h>
38 #include <machine/promlib.h>
39
40 #include "boot.h"
41 #include "openfirm.h"
42
43
44 #define MAXSEGNUM 50
45 #define hi(val) ((uint32_t)(((val) >> 32) & (uint32_t)-1))
46 #define lo(val) ((uint32_t)((val) & (uint32_t)-1))
47
48 #define roundup2(x, y) (((x)+((y)-1))&(~((y)-1)))
49
50
51 typedef int phandle_t;
52
53 extern void itlb_enter(vaddr_t, uint32_t, uint32_t);
54 extern void dtlb_enter(vaddr_t, uint32_t, uint32_t);
55 extern void dtlb_replace(vaddr_t, uint32_t, uint32_t);
56 extern vaddr_t itlb_va_to_pa(vaddr_t);
57 extern vaddr_t dtlb_va_to_pa(vaddr_t);
58
59 static void tlb_init(void);
60
61 static int mmu_mapin(vaddr_t, vsize_t);
62 static ssize_t mmu_read(int, void *, size_t);
63 static void* mmu_memcpy(void *, const void *, size_t);
64 static void* mmu_memset(void *, int, size_t);
65 static void mmu_freeall(void);
66
67 static int ofw_mapin(vaddr_t, vsize_t);
68 static ssize_t ofw_read(int, void *, size_t);
69 static void* ofw_memcpy(void *, const void *, size_t);
70 static void* ofw_memset(void *, int, size_t);
71 static void ofw_freeall(void);
72
73 static int nop_mapin(vaddr_t, vsize_t);
74 static ssize_t nop_read(int, void *, size_t);
75 static void* nop_memcpy(void *, const void *, size_t);
76 static void* nop_memset(void *, int, size_t);
77 static void nop_freeall(void);
78
79
80 struct tlb_entry *dtlb_store = 0;
81 struct tlb_entry *itlb_store = 0;
82
83 int dtlb_slot;
84 int itlb_slot;
85 int dtlb_slot_max;
86 int itlb_slot_max;
87
88 static struct kvamap {
89 uint64_t start;
90 uint64_t end;
91 } kvamap[MAXSEGNUM];
92
93 static struct memsw {
94 ssize_t (* read)(int f, void *addr, size_t size);
95 void* (* memcpy)(void *dst, const void *src, size_t size);
96 void* (* memset)(void *dst, int c, size_t size);
97 void (* freeall)(void);
98 } memswa[] = {
99 { nop_read, nop_memcpy, nop_memset, nop_freeall },
100 { ofw_read, ofw_memcpy, ofw_memset, ofw_freeall },
101 { mmu_read, mmu_memcpy, mmu_memset, mmu_freeall }
102 };
103
104 static struct memsw *memsw = &memswa[0];
105
106
107 /*
108 * Check if a memory region is already mapped. Return length and virtual
109 * address of unmapped sub-region, if any.
110 */
111 static uint64_t
112 kvamap_extract(vaddr_t va, vsize_t len, vaddr_t *new_va)
113 {
114 int i;
115
116 *new_va = va;
117 for (i = 0; (len > 0) && (i < MAXSEGNUM); i++) {
118 if (kvamap[i].start == NULL)
119 break;
120 if ((kvamap[i].start <= va) && (va < kvamap[i].end)) {
121 uint64_t va_len = kvamap[i].end - va + kvamap[i].start;
122 len = (va_len < len) ? len - va_len : 0;
123 *new_va = kvamap[i].end;
124 }
125 }
126
127 return (len);
128 }
129
130 /*
131 * Record new kernel mapping.
132 */
133 static void
134 kvamap_enter(uint64_t va, uint64_t len)
135 {
136 int i;
137
138 DPRINTF(("kvamap_enter: %d@%p\n", (int)len, (void*)(u_long)va));
139 for (i = 0; (len > 0) && (i < MAXSEGNUM); i++) {
140 if (kvamap[i].start == NULL) {
141 kvamap[i].start = va;
142 kvamap[i].end = va + len;
143 break;
144 }
145 }
146
147 if (i == MAXSEGNUM) {
148 panic("Too many allocations requested.");
149 }
150 }
151
152 /*
153 * Initialize TLB as required by MMU mapping functions.
154 */
155 static void
156 tlb_init(void)
157 {
158 phandle_t child;
159 phandle_t root;
160 char buf[128];
161 u_int bootcpu;
162 u_int cpu;
163
164 if (dtlb_store != NULL) {
165 return;
166 }
167
168 bootcpu = get_cpuid();
169
170 if ( (root = prom_findroot()) == -1) {
171 panic("tlb_init: prom_findroot()");
172 }
173
174 for (child = prom_firstchild(root); child != 0;
175 child = prom_nextsibling(child)) {
176 if (child == -1) {
177 panic("tlb_init: OF_child");
178 }
179 if (_prom_getprop(child, "device_type", buf, sizeof(buf)) > 0 &&
180 strcmp(buf, "cpu") == 0) {
181 if (_prom_getprop(child, "upa-portid", &cpu,
182 sizeof(cpu)) == -1 && _prom_getprop(child, "portid",
183 &cpu, sizeof(cpu)) == -1)
184 panic("main: prom_getprop");
185 if (cpu == bootcpu)
186 break;
187 }
188 }
189 if (cpu != bootcpu)
190 panic("init_tlb: no node for bootcpu?!?!");
191 if (_prom_getprop(child, "#dtlb-entries", &dtlb_slot_max,
192 sizeof(dtlb_slot_max)) == -1 ||
193 _prom_getprop(child, "#itlb-entries", &itlb_slot_max,
194 sizeof(itlb_slot_max)) == -1)
195 panic("init_tlb: prom_getprop");
196 dtlb_store = alloc(dtlb_slot_max * sizeof(*dtlb_store));
197 itlb_store = alloc(itlb_slot_max * sizeof(*itlb_store));
198 if (dtlb_store == NULL || itlb_store == NULL) {
199 panic("init_tlb: malloc");
200 }
201
202 dtlb_slot = itlb_slot = 0;
203 }
204
205 /*
206 * Map requested memory region with permanent 4MB pages.
207 */
208 static int
209 mmu_mapin(vaddr_t rva, vsize_t len)
210 {
211 int64_t data;
212 vaddr_t va, pa, mva;
213
214 len = roundup2(len + (rva & PAGE_MASK_4M), PAGE_SIZE_4M);
215 rva &= ~PAGE_MASK_4M;
216
217 tlb_init();
218 for (pa = (vaddr_t)-1; len > 0; rva = va) {
219 if ( (len = kvamap_extract(rva, len, &va)) == 0) {
220 /* The rest is already mapped */
221 break;
222 }
223
224 if (dtlb_va_to_pa(va) == (u_long)-1 ||
225 itlb_va_to_pa(va) == (u_long)-1) {
226 /* Allocate a physical page, claim the virtual area */
227 if (pa == (vaddr_t)-1) {
228 pa = (vaddr_t)OF_alloc_phys(PAGE_SIZE_4M,
229 PAGE_SIZE_4M);
230 if (pa == (vaddr_t)-1)
231 panic("out of memory");
232 mva = (vaddr_t)OF_claim_virt(va,
233 PAGE_SIZE_4M, 0);
234 if (mva != va) {
235 panic("can't claim virtual page "
236 "(wanted %#lx, got %#lx)",
237 va, mva);
238 }
239 /* The mappings may have changed, be paranoid. */
240 continue;
241 }
242
243 /*
244 * Actually, we can only allocate two pages less at
245 * most (depending on the kernel TSB size).
246 */
247 if (dtlb_slot >= dtlb_slot_max)
248 panic("mmu_mapin: out of dtlb_slots");
249 if (itlb_slot >= itlb_slot_max)
250 panic("mmu_mapin: out of itlb_slots");
251
252 DPRINTF(("mmu_mapin: %p:%p\n", va, pa));
253
254 data = TSB_DATA(0, /* global */
255 PGSZ_4M, /* 4mb page */
256 pa, /* phys.address */
257 1, /* privileged */
258 1, /* write */
259 1, /* cache */
260 1, /* alias */
261 1, /* valid */
262 0 /* endianness */
263 );
264 data |= TLB_L | TLB_CV; /* locked, virt.cache */
265
266 dtlb_store[dtlb_slot].te_pa = pa;
267 dtlb_store[dtlb_slot].te_va = va;
268 dtlb_slot++;
269 dtlb_enter(va, hi(data), lo(data));
270 pa = (vaddr_t)-1;
271 }
272
273 kvamap_enter(va, PAGE_SIZE_4M);
274
275 len -= len > PAGE_SIZE_4M ? PAGE_SIZE_4M : len;
276 va += PAGE_SIZE_4M;
277 }
278
279 if (pa != (vaddr_t)-1) {
280 OF_free_phys(pa, PAGE_SIZE_4M);
281 }
282
283 return (0);
284 }
285
286 static ssize_t
287 mmu_read(int f, void *addr, size_t size)
288 {
289 mmu_mapin((vaddr_t)addr, size);
290 return read(f, addr, size);
291 }
292
293 static void*
294 mmu_memcpy(void *dst, const void *src, size_t size)
295 {
296 mmu_mapin((vaddr_t)dst, size);
297 return memcpy(dst, src, size);
298 }
299
300 static void*
301 mmu_memset(void *dst, int c, size_t size)
302 {
303 mmu_mapin((vaddr_t)dst, size);
304 return memset(dst, c, size);
305 }
306
307 static void
308 mmu_freeall(void)
309 {
310 int i;
311
312 dtlb_slot = itlb_slot = 0;
313 for (i = 0; i < MAXSEGNUM; i++) {
314 /* XXX return all mappings to PROM and unmap the pages! */
315 kvamap[i].start = kvamap[i].end = 0;
316 }
317 }
318
319 /*
320 * Claim requested memory region in OpenFirmware allocation pool.
321 */
322 static int
323 ofw_mapin(vaddr_t rva, vsize_t len)
324 {
325 vaddr_t va;
326
327 len = roundup2(len + (rva & PAGE_MASK_4M), PAGE_SIZE_4M);
328 rva &= ~PAGE_MASK_4M;
329
330 if ( (len = kvamap_extract(rva, len, &va)) != 0) {
331 if (OF_claim((void *)(long)va, len, PAGE_SIZE_4M) == (void*)-1){
332 panic("ofw_mapin: Cannot claim memory.");
333 }
334 kvamap_enter(va, len);
335 }
336
337 return (0);
338 }
339
340 static ssize_t
341 ofw_read(int f, void *addr, size_t size)
342 {
343 ofw_mapin((vaddr_t)addr, size);
344 return read(f, addr, size);
345 }
346
347 static void*
348 ofw_memcpy(void *dst, const void *src, size_t size)
349 {
350 ofw_mapin((vaddr_t)dst, size);
351 return memcpy(dst, src, size);
352 }
353
354 static void*
355 ofw_memset(void *dst, int c, size_t size)
356 {
357 ofw_mapin((vaddr_t)dst, size);
358 return memset(dst, c, size);
359 }
360
361 static void
362 ofw_freeall(void)
363 {
364 int i;
365
366 dtlb_slot = itlb_slot = 0;
367 for (i = 0; i < MAXSEGNUM; i++) {
368 OF_release((void*)(u_long)kvamap[i].start,
369 (u_int)(kvamap[i].end - kvamap[i].start));
370 kvamap[i].start = kvamap[i].end = 0;
371 }
372 }
373
374 /*
375 * NOP implementation exists solely for kernel header loading sake. Here
376 * we use alloc() interface to allocate memory and avoid doing some dangerous
377 * things.
378 */
379 static ssize_t
380 nop_read(int f, void *addr, size_t size)
381 {
382 return read(f, addr, size);
383 }
384
385 static void*
386 nop_memcpy(void *dst, const void *src, size_t size)
387 {
388 /*
389 * Real NOP to make LOAD_HDR work: loadfile_elfXX copies ELF headers
390 * right after the highest kernel address which will not be mapped with
391 * nop_XXX operations.
392 */
393 return (dst);
394 }
395
396 static void*
397 nop_memset(void *dst, int c, size_t size)
398 {
399 return memset(dst, c, size);
400 }
401
402 static void
403 nop_freeall(void)
404 { }
405
406 /*
407 * loadfile() hooks.
408 */
409 ssize_t
410 sparc64_read(int f, void *addr, size_t size)
411 {
412 return (*memsw->read)(f, addr, size);
413 }
414
415 void*
416 sparc64_memcpy(void *dst, const void *src, size_t size)
417 {
418 return (*memsw->memcpy)(dst, src, size);
419 }
420
421 void*
422 sparc64_memset(void *dst, int c, size_t size)
423 {
424 return (*memsw->memset)(dst, c, size);
425 }
426
427 /*
428 * Remove write permissions from text mappings in the dTLB.
429 * Add entries in the iTLB.
430 */
431 void
432 sparc64_finalize_tlb(u_long data_va)
433 {
434 int i;
435 int64_t data;
436 bool writable_text = false;
437
438 for (i = 0; i < dtlb_slot; i++) {
439 if (dtlb_store[i].te_va >= data_va) {
440 /*
441 * If (for whatever reason) the start of the
442 * writable section is right at the start of
443 * the kernel, we need to map it into the ITLB
444 * nevertheless (and don't make it readonly).
445 */
446 if (i == 0 && dtlb_store[i].te_va == data_va)
447 writable_text = true;
448 else
449 continue;
450 }
451
452 data = TSB_DATA(0, /* global */
453 PGSZ_4M, /* 4mb page */
454 dtlb_store[i].te_pa, /* phys.address */
455 1, /* privileged */
456 0, /* write */
457 1, /* cache */
458 1, /* alias */
459 1, /* valid */
460 0 /* endianness */
461 );
462 data |= TLB_L | TLB_CV; /* locked, virt.cache */
463 if (!writable_text)
464 dtlb_replace(dtlb_store[i].te_va, hi(data), lo(data));
465 itlb_store[itlb_slot] = dtlb_store[i];
466 itlb_slot++;
467 itlb_enter(dtlb_store[i].te_va, hi(data), lo(data));
468 }
469 if (writable_text)
470 printf("WARNING: kernel text mapped writable!\n");
471 }
472
473 /*
474 * Record kernel mappings in bootinfo structure.
475 */
476 void
477 sparc64_bi_add(void)
478 {
479 int i;
480 int itlb_size, dtlb_size;
481 struct btinfo_count bi_count;
482 struct btinfo_tlb *bi_itlb, *bi_dtlb;
483
484 bi_count.count = itlb_slot;
485 bi_add(&bi_count, BTINFO_ITLB_SLOTS, sizeof(bi_count));
486 bi_count.count = dtlb_slot;
487 bi_add(&bi_count, BTINFO_DTLB_SLOTS, sizeof(bi_count));
488
489 itlb_size = sizeof(*bi_itlb) + sizeof(struct tlb_entry) * itlb_slot;
490 dtlb_size = sizeof(*bi_dtlb) + sizeof(struct tlb_entry) * dtlb_slot;
491
492 bi_itlb = alloc(itlb_size);
493 bi_dtlb = alloc(dtlb_size);
494
495 if ((bi_itlb == NULL) || (bi_dtlb == NULL)) {
496 panic("Out of memory in sparc64_bi_add.\n");
497 }
498
499 for (i = 0; i < itlb_slot; i++) {
500 bi_itlb->tlb[i].te_va = itlb_store[i].te_va;
501 bi_itlb->tlb[i].te_pa = itlb_store[i].te_pa;
502 }
503 bi_add(bi_itlb, BTINFO_ITLB, itlb_size);
504
505 for (i = 0; i < dtlb_slot; i++) {
506 bi_dtlb->tlb[i].te_va = dtlb_store[i].te_va;
507 bi_dtlb->tlb[i].te_pa = dtlb_store[i].te_pa;
508 }
509 bi_add(bi_dtlb, BTINFO_DTLB, dtlb_size);
510 }
511
512 /*
513 * Choose kernel image mapping strategy:
514 *
515 * LOADFILE_NOP_ALLOCATOR To load kernel image headers
516 * LOADFILE_OFW_ALLOCATOR To map the kernel by OpenFirmware means
517 * LOADFILE_MMU_ALLOCATOR To use permanent 4MB mappings
518 */
519 void
520 loadfile_set_allocator(int type)
521 {
522 if (type >= (sizeof(memswa) / sizeof(struct memsw))) {
523 panic("Bad allocator request.\n");
524 }
525
526 /*
527 * Release all memory claimed by previous allocator and schedule
528 * another allocator for succeeding memory allocation calls.
529 */
530 (*memsw->freeall)();
531 memsw = &memswa[type];
532 }
533