loadfile_machdep.c revision 1.3 1 /* $NetBSD: loadfile_machdep.c,v 1.3 2007/06/05 08:52:20 martin Exp $ */
2
3 /*-
4 * Copyright (c) 2005 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This work is based on the code contributed by Robert Drehmel to the
8 * FreeBSD project.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 #include <lib/libsa/stand.h>
40
41 #include <machine/pte.h>
42 #include <machine/cpu.h>
43 #include <machine/ctlreg.h>
44 #include <machine/vmparam.h>
45 #include <machine/promlib.h>
46
47 #include "boot.h"
48 #include "openfirm.h"
49
50
51 #define MAXSEGNUM 50
52 #define hi(val) ((uint32_t)(((val) >> 32) & (uint32_t)-1))
53 #define lo(val) ((uint32_t)((val) & (uint32_t)-1))
54
55 #define roundup2(x, y) (((x)+((y)-1))&(~((y)-1)))
56
57
58 typedef int phandle_t;
59
60 extern void itlb_enter(vaddr_t, uint32_t, uint32_t);
61 extern void dtlb_enter(vaddr_t, uint32_t, uint32_t);
62 extern void dtlb_replace(vaddr_t, uint32_t, uint32_t);
63 extern vaddr_t itlb_va_to_pa(vaddr_t);
64 extern vaddr_t dtlb_va_to_pa(vaddr_t);
65
66 static void tlb_init(void);
67
68 static int mmu_mapin(vaddr_t, vsize_t);
69 static ssize_t mmu_read(int, void *, size_t);
70 static void* mmu_memcpy(void *, const void *, size_t);
71 static void* mmu_memset(void *, int, size_t);
72 static void mmu_freeall(void);
73
74 static int ofw_mapin(vaddr_t, vsize_t);
75 static ssize_t ofw_read(int, void *, size_t);
76 static void* ofw_memcpy(void *, const void *, size_t);
77 static void* ofw_memset(void *, int, size_t);
78 static void ofw_freeall(void);
79
80 static int nop_mapin(vaddr_t, vsize_t);
81 static ssize_t nop_read(int, void *, size_t);
82 static void* nop_memcpy(void *, const void *, size_t);
83 static void* nop_memset(void *, int, size_t);
84 static void nop_freeall(void);
85
86
87 struct tlb_entry *dtlb_store = 0;
88 struct tlb_entry *itlb_store = 0;
89
90 int dtlb_slot;
91 int itlb_slot;
92 int dtlb_slot_max;
93 int itlb_slot_max;
94
95 static struct kvamap {
96 uint64_t start;
97 uint64_t end;
98 } kvamap[MAXSEGNUM];
99
100 static struct memsw {
101 ssize_t (* read)(int f, void *addr, size_t size);
102 void* (* memcpy)(void *dst, const void *src, size_t size);
103 void* (* memset)(void *dst, int c, size_t size);
104 void (* freeall)(void);
105 } memswa[] = {
106 { nop_read, nop_memcpy, nop_memset, nop_freeall },
107 { ofw_read, ofw_memcpy, ofw_memset, ofw_freeall },
108 { mmu_read, mmu_memcpy, mmu_memset, mmu_freeall }
109 };
110
111 static struct memsw *memsw = &memswa[0];
112
113
114 /*
115 * Check if a memory region is already mapped. Return length and virtual
116 * address of unmapped sub-region, if any.
117 */
118 static uint64_t
119 kvamap_extract(vaddr_t va, vsize_t len, vaddr_t *new_va)
120 {
121 int i;
122
123 *new_va = va;
124 for (i = 0; (len > 0) && (i < MAXSEGNUM); i++) {
125 if (kvamap[i].start == NULL)
126 break;
127 if ((kvamap[i].start <= va) && (va < kvamap[i].end)) {
128 uint64_t va_len = kvamap[i].end - va + kvamap[i].start;
129 len = (va_len < len) ? len - va_len : 0;
130 *new_va = kvamap[i].end;
131 }
132 }
133
134 return (len);
135 }
136
137 /*
138 * Record new kernel mapping.
139 */
140 static void
141 kvamap_enter(uint64_t va, uint64_t len)
142 {
143 int i;
144
145 DPRINTF(("kvamap_enter: %d@%p\n", (int)len, (void*)(u_long)va));
146 for (i = 0; (len > 0) && (i < MAXSEGNUM); i++) {
147 if (kvamap[i].start == NULL) {
148 kvamap[i].start = va;
149 kvamap[i].end = va + len;
150 break;
151 }
152 }
153
154 if (i == MAXSEGNUM) {
155 panic("Too many allocations requested.");
156 }
157 }
158
159 /*
160 * Initialize TLB as required by MMU mapping functions.
161 */
162 static void
163 tlb_init(void)
164 {
165 phandle_t child;
166 phandle_t root;
167 char buf[128];
168 u_int bootcpu;
169 u_int cpu;
170
171 if (dtlb_store != NULL) {
172 return;
173 }
174
175 bootcpu = get_cpuid();
176
177 if ( (root = prom_findroot()) == -1) {
178 panic("tlb_init: prom_findroot()");
179 }
180
181 for (child = prom_firstchild(root); child != 0;
182 child = prom_nextsibling(child)) {
183 if (child == -1) {
184 panic("tlb_init: OF_child");
185 }
186 if (_prom_getprop(child, "device_type", buf, sizeof(buf)) > 0 &&
187 strcmp(buf, "cpu") == 0) {
188 if (_prom_getprop(child, "upa-portid", &cpu,
189 sizeof(cpu)) == -1 && _prom_getprop(child, "portid",
190 &cpu, sizeof(cpu)) == -1)
191 panic("main: prom_getprop");
192 if (cpu == bootcpu)
193 break;
194 }
195 }
196 if (cpu != bootcpu)
197 panic("init_tlb: no node for bootcpu?!?!");
198 if (_prom_getprop(child, "#dtlb-entries", &dtlb_slot_max,
199 sizeof(dtlb_slot_max)) == -1 ||
200 _prom_getprop(child, "#itlb-entries", &itlb_slot_max,
201 sizeof(itlb_slot_max)) == -1)
202 panic("init_tlb: prom_getprop");
203 dtlb_store = alloc(dtlb_slot_max * sizeof(*dtlb_store));
204 itlb_store = alloc(itlb_slot_max * sizeof(*itlb_store));
205 if (dtlb_store == NULL || itlb_store == NULL) {
206 panic("init_tlb: malloc");
207 }
208
209 dtlb_slot = itlb_slot = 0;
210 }
211
212 /*
213 * Map requested memory region with permanent 4MB pages.
214 */
215 static int
216 mmu_mapin(vaddr_t rva, vsize_t len)
217 {
218 int64_t data;
219 vaddr_t va, pa, mva;
220
221 len = roundup2(len + (rva & PAGE_MASK_4M), PAGE_SIZE_4M);
222 rva &= ~PAGE_MASK_4M;
223
224 tlb_init();
225 for (pa = (vaddr_t)-1; len > 0; rva = va) {
226 if ( (len = kvamap_extract(rva, len, &va)) == 0) {
227 /* The rest is already mapped */
228 break;
229 }
230
231 if (dtlb_va_to_pa(va) == (u_long)-1 ||
232 itlb_va_to_pa(va) == (u_long)-1) {
233 /* Allocate a physical page, claim the virtual area */
234 if (pa == (vaddr_t)-1) {
235 pa = (vaddr_t)OF_alloc_phys(PAGE_SIZE_4M,
236 PAGE_SIZE_4M);
237 if (pa == (vaddr_t)-1)
238 panic("out of memory");
239 mva = (vaddr_t)OF_claim_virt(va,
240 PAGE_SIZE_4M, 0);
241 if (mva != va) {
242 panic("can't claim virtual page "
243 "(wanted %#lx, got %#lx)",
244 va, mva);
245 }
246 /* The mappings may have changed, be paranoid. */
247 continue;
248 }
249
250 /*
251 * Actually, we can only allocate two pages less at
252 * most (depending on the kernel TSB size).
253 */
254 if (dtlb_slot >= dtlb_slot_max)
255 panic("mmu_mapin: out of dtlb_slots");
256 if (itlb_slot >= itlb_slot_max)
257 panic("mmu_mapin: out of itlb_slots");
258
259 DPRINTF(("mmu_mapin: %p:%p\n", va, pa));
260
261 data = TSB_DATA(0, /* global */
262 PGSZ_4M, /* 4mb page */
263 pa, /* phys.address */
264 1, /* privileged */
265 1, /* write */
266 1, /* cache */
267 1, /* alias */
268 1, /* valid */
269 0 /* endianness */
270 );
271 data |= TLB_L | TLB_CV; /* locked, virt.cache */
272
273 dtlb_store[dtlb_slot].te_pa = pa;
274 dtlb_store[dtlb_slot].te_va = va;
275 dtlb_slot++;
276 dtlb_enter(va, hi(data), lo(data));
277 pa = (vaddr_t)-1;
278 }
279
280 kvamap_enter(va, PAGE_SIZE_4M);
281
282 len -= len > PAGE_SIZE_4M ? PAGE_SIZE_4M : len;
283 va += PAGE_SIZE_4M;
284 }
285
286 if (pa != (vaddr_t)-1) {
287 OF_free_phys(pa, PAGE_SIZE_4M);
288 }
289
290 return (0);
291 }
292
293 static ssize_t
294 mmu_read(int f, void *addr, size_t size)
295 {
296 mmu_mapin((vaddr_t)addr, size);
297 return read(f, addr, size);
298 }
299
300 static void*
301 mmu_memcpy(void *dst, const void *src, size_t size)
302 {
303 mmu_mapin((vaddr_t)dst, size);
304 return memcpy(dst, src, size);
305 }
306
307 static void*
308 mmu_memset(void *dst, int c, size_t size)
309 {
310 mmu_mapin((vaddr_t)dst, size);
311 return memset(dst, c, size);
312 }
313
314 static void
315 mmu_freeall(void)
316 {
317 int i;
318
319 dtlb_slot = itlb_slot = 0;
320 for (i = 0; i < MAXSEGNUM; i++) {
321 /* XXX return all mappings to PROM and unmap the pages! */
322 kvamap[i].start = kvamap[i].end = 0;
323 }
324 }
325
326 /*
327 * Claim requested memory region in OpenFirmware allocation pool.
328 */
329 static int
330 ofw_mapin(vaddr_t rva, vsize_t len)
331 {
332 vaddr_t va;
333
334 len = roundup2(len + (rva & PAGE_MASK_4M), PAGE_SIZE_4M);
335 rva &= ~PAGE_MASK_4M;
336
337 if ( (len = kvamap_extract(rva, len, &va)) != 0) {
338 if (OF_claim((void *)(long)va, len, PAGE_SIZE_4M) == (void*)-1){
339 panic("ofw_mapin: Cannot claim memory.");
340 }
341 kvamap_enter(va, len);
342 }
343
344 return (0);
345 }
346
347 static ssize_t
348 ofw_read(int f, void *addr, size_t size)
349 {
350 ofw_mapin((vaddr_t)addr, size);
351 return read(f, addr, size);
352 }
353
354 static void*
355 ofw_memcpy(void *dst, const void *src, size_t size)
356 {
357 ofw_mapin((vaddr_t)dst, size);
358 return memcpy(dst, src, size);
359 }
360
361 static void*
362 ofw_memset(void *dst, int c, size_t size)
363 {
364 ofw_mapin((vaddr_t)dst, size);
365 return memset(dst, c, size);
366 }
367
368 static void
369 ofw_freeall(void)
370 {
371 int i;
372
373 dtlb_slot = itlb_slot = 0;
374 for (i = 0; i < MAXSEGNUM; i++) {
375 OF_release((void*)(u_long)kvamap[i].start,
376 (u_int)(kvamap[i].end - kvamap[i].start));
377 kvamap[i].start = kvamap[i].end = 0;
378 }
379 }
380
381 /*
382 * NOP implementation exists solely for kernel header loading sake. Here
383 * we use alloc() interface to allocate memory and avoid doing some dangerous
384 * things.
385 */
386 static ssize_t
387 nop_read(int f, void *addr, size_t size)
388 {
389 return read(f, addr, size);
390 }
391
392 static void*
393 nop_memcpy(void *dst, const void *src, size_t size)
394 {
395 /*
396 * Real NOP to make LOAD_HDR work: loadfile_elfXX copies ELF headers
397 * right after the highest kernel address which will not be mapped with
398 * nop_XXX operations.
399 */
400 return (dst);
401 }
402
403 static void*
404 nop_memset(void *dst, int c, size_t size)
405 {
406 return memset(dst, c, size);
407 }
408
409 static void
410 nop_freeall(void)
411 { }
412
413 /*
414 * loadfile() hooks.
415 */
416 ssize_t
417 sparc64_read(int f, void *addr, size_t size)
418 {
419 return (*memsw->read)(f, addr, size);
420 }
421
422 void*
423 sparc64_memcpy(void *dst, const void *src, size_t size)
424 {
425 return (*memsw->memcpy)(dst, src, size);
426 }
427
428 void*
429 sparc64_memset(void *dst, int c, size_t size)
430 {
431 return (*memsw->memset)(dst, c, size);
432 }
433
434 /*
435 * Remove write permissions from text mappings in the dTLB.
436 * Add entries in the iTLB.
437 */
438 void
439 sparc64_finalize_tlb(u_long data_va)
440 {
441 int i;
442 int64_t data;
443
444 for (i = 0; i < dtlb_slot; i++) {
445 if (dtlb_store[i].te_va >= data_va)
446 continue;
447
448 data = TSB_DATA(0, /* global */
449 PGSZ_4M, /* 4mb page */
450 dtlb_store[i].te_pa, /* phys.address */
451 1, /* privileged */
452 0, /* write */
453 1, /* cache */
454 1, /* alias */
455 1, /* valid */
456 0 /* endianness */
457 );
458 data |= TLB_L | TLB_CV; /* locked, virt.cache */
459 dtlb_replace(dtlb_store[i].te_va, hi(data), lo(data));
460 itlb_store[itlb_slot] = dtlb_store[i];
461 itlb_slot++;
462 itlb_enter(dtlb_store[i].te_va, hi(data), lo(data));
463 }
464 }
465
466 /*
467 * Record kernel mappings in bootinfo structure.
468 */
469 void
470 sparc64_bi_add(void)
471 {
472 int i;
473 int itlb_size, dtlb_size;
474 struct btinfo_count bi_count;
475 struct btinfo_tlb *bi_itlb, *bi_dtlb;
476
477 bi_count.count = itlb_slot;
478 bi_add(&bi_count, BTINFO_ITLB_SLOTS, sizeof(bi_count));
479 bi_count.count = dtlb_slot;
480 bi_add(&bi_count, BTINFO_DTLB_SLOTS, sizeof(bi_count));
481
482 itlb_size = sizeof(*bi_itlb) + sizeof(struct tlb_entry) * itlb_slot;
483 dtlb_size = sizeof(*bi_dtlb) + sizeof(struct tlb_entry) * dtlb_slot;
484
485 bi_itlb = alloc(itlb_size);
486 bi_dtlb = alloc(dtlb_size);
487
488 if ((bi_itlb == NULL) || (bi_dtlb == NULL)) {
489 panic("Out of memory in sparc64_bi_add.\n");
490 }
491
492 for (i = 0; i < itlb_slot; i++) {
493 bi_itlb->tlb[i].te_va = itlb_store[i].te_va;
494 bi_itlb->tlb[i].te_pa = itlb_store[i].te_pa;
495 }
496 bi_add(bi_itlb, BTINFO_ITLB, itlb_size);
497
498 for (i = 0; i < dtlb_slot; i++) {
499 bi_dtlb->tlb[i].te_va = dtlb_store[i].te_va;
500 bi_dtlb->tlb[i].te_pa = dtlb_store[i].te_pa;
501 }
502 bi_add(bi_dtlb, BTINFO_DTLB, dtlb_size);
503 }
504
505 /*
506 * Choose kernel image mapping strategy:
507 *
508 * LOADFILE_NOP_ALLOCATOR To load kernel image headers
509 * LOADFILE_OFW_ALLOCATOR To map the kernel by OpenFirmware means
510 * LOADFILE_MMU_ALLOCATOR To use permanent 4MB mappings
511 */
512 void
513 loadfile_set_allocator(int type)
514 {
515 if (type >= (sizeof(memswa) / sizeof(struct memsw))) {
516 panic("Bad allocator request.\n");
517 }
518
519 /*
520 * Release all memory claimed by previous allocator and schedule
521 * another allocator for succeeding memory allocation calls.
522 */
523 (*memsw->freeall)();
524 memsw = &memswa[type];
525 }
526