Home | History | Annotate | Line # | Download | only in ofwboot
loadfile_machdep.c revision 1.14
      1 /*	$NetBSD: loadfile_machdep.c,v 1.13 2014/04/21 18:10:40 palle Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2005 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This work is based on the code contributed by Robert Drehmel to the
      8  * FreeBSD project.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #include <lib/libsa/stand.h>
     33 #include <lib/libkern/libkern.h>
     34 
     35 #include <machine/pte.h>
     36 #include <machine/cpu.h>
     37 #include <machine/ctlreg.h>
     38 #include <machine/vmparam.h>
     39 #include <machine/promlib.h>
     40 #include <machine/hypervisor.h>
     41 
     42 #include "boot.h"
     43 #include "openfirm.h"
     44 
     45 
     46 #define MAXSEGNUM	50
     47 #define hi(val)		((uint32_t)(((val) >> 32) & (uint32_t)-1))
     48 #define lo(val)		((uint32_t)((val) & (uint32_t)-1))
     49 
     50 #define roundup2(x, y)	(((x)+((y)-1))&(~((y)-1)))
     51 
     52 
     53 typedef int phandle_t;
     54 
     55 extern void	itlb_enter(vaddr_t, uint32_t, uint32_t);
     56 extern void	dtlb_enter(vaddr_t, uint32_t, uint32_t);
     57 extern void	dtlb_replace(vaddr_t, uint32_t, uint32_t);
     58 extern vaddr_t	itlb_va_to_pa(vaddr_t);
     59 extern vaddr_t	dtlb_va_to_pa(vaddr_t);
     60 
     61 static void	tlb_init(void);
     62 static void	tlb_init_sun4u(void);
     63 #ifdef SUN4V
     64 static void	tlb_init_sun4v(void);
     65 #endif
     66 void	sparc64_finalize_tlb_sun4u(u_long);
     67 #ifdef SUN4V
     68 void	sparc64_finalize_tlb_sun4v(u_long);
     69 #endif
     70 static int	mmu_mapin(vaddr_t, vsize_t);
     71 static int	mmu_mapin_sun4u(vaddr_t, vsize_t);
     72 #ifdef SUN4V
     73 static int	mmu_mapin_sun4v(vaddr_t, vsize_t);
     74 #endif
     75 static ssize_t	mmu_read(int, void *, size_t);
     76 static void*	mmu_memcpy(void *, const void *, size_t);
     77 static void*	mmu_memset(void *, int, size_t);
     78 static void	mmu_freeall(void);
     79 
     80 static int	ofw_mapin(vaddr_t, vsize_t);
     81 static ssize_t	ofw_read(int, void *, size_t);
     82 static void*	ofw_memcpy(void *, const void *, size_t);
     83 static void*	ofw_memset(void *, int, size_t);
     84 static void	ofw_freeall(void);
     85 
     86 #if 0
     87 static int	nop_mapin(vaddr_t, vsize_t);
     88 #endif
     89 static ssize_t	nop_read(int, void *, size_t);
     90 static void*	nop_memcpy(void *, const void *, size_t);
     91 static void*	nop_memset(void *, int, size_t);
     92 static void	nop_freeall(void);
     93 
     94 
     95 struct tlb_entry *dtlb_store = 0;
     96 struct tlb_entry *itlb_store = 0;
     97 
     98 int dtlb_slot;
     99 int itlb_slot;
    100 int dtlb_slot_max;
    101 int itlb_slot_max;
    102 
    103 static struct kvamap {
    104 	uint64_t start;
    105 	uint64_t end;
    106 } kvamap[MAXSEGNUM];
    107 
    108 static struct memsw {
    109 	ssize_t	(* read)(int f, void *addr, size_t size);
    110 	void*	(* memcpy)(void *dst, const void *src, size_t size);
    111 	void*	(* memset)(void *dst, int c, size_t size);
    112 	void	(* freeall)(void);
    113 } memswa[] = {
    114 	{ nop_read, nop_memcpy, nop_memset, nop_freeall },
    115 	{ ofw_read, ofw_memcpy, ofw_memset, ofw_freeall },
    116 	{ mmu_read, mmu_memcpy, mmu_memset, mmu_freeall }
    117 };
    118 
    119 static struct memsw *memsw = &memswa[0];
    120 
    121 #ifdef SUN4V
    122 static int sun4v = 0;
    123 #endif
    124 
    125 /*
    126  * Check if a memory region is already mapped. Return length and virtual
    127  * address of unmapped sub-region, if any.
    128  */
    129 static uint64_t
    130 kvamap_extract(vaddr_t va, vsize_t len, vaddr_t *new_va)
    131 {
    132 	int i;
    133 
    134 	*new_va  = va;
    135 	for (i = 0; (len > 0) && (i < MAXSEGNUM); i++) {
    136 		if (kvamap[i].start == NULL)
    137 			break;
    138 		if ((kvamap[i].start <= va) && (va < kvamap[i].end)) {
    139 			uint64_t va_len = kvamap[i].end - va;
    140 			len = (va_len < len) ? len - va_len : 0;
    141 			*new_va = kvamap[i].end;
    142 		}
    143 	}
    144 
    145 	return len;
    146 }
    147 
    148 /*
    149  * Record new kernel mapping.
    150  */
    151 static void
    152 kvamap_enter(uint64_t va, uint64_t len)
    153 {
    154 	int i;
    155 
    156 	DPRINTF(("kvamap_enter: %d@%p\n", (int)len, (void*)(u_long)va));
    157 	for (i = 0; (len > 0) && (i < MAXSEGNUM); i++) {
    158 		if (kvamap[i].start == NULL) {
    159 			kvamap[i].start = va;
    160 			kvamap[i].end = va + len;
    161 			break;
    162 		}
    163 	}
    164 
    165 	if (i == MAXSEGNUM) {
    166 		panic("Too many allocations requested.");
    167 	}
    168 }
    169 
    170 /*
    171  * Initialize TLB as required by MMU mapping functions.
    172  */
    173 static void
    174 tlb_init(void)
    175 {
    176 	phandle_t root;
    177 #ifdef SUN4V
    178 	char buf[128];
    179 #endif
    180 
    181 	if (dtlb_store != NULL) {
    182 		return;
    183 	}
    184 
    185 	if ( (root = prom_findroot()) == -1) {
    186 		panic("tlb_init: prom_findroot()");
    187 	}
    188 #ifdef SUN4V
    189 	if (_prom_getprop(root, "compatible", buf, sizeof(buf)) > 0 &&
    190 		    strcmp(buf, "sun4v") == 0) {
    191 		tlb_init_sun4v();
    192 		sun4v = 1;
    193 	}
    194 	else {
    195 #endif
    196 		tlb_init_sun4u();
    197 #ifdef SUN4V
    198 	}
    199 #endif
    200 
    201 	dtlb_store = alloc(dtlb_slot_max * sizeof(*dtlb_store));
    202 	itlb_store = alloc(itlb_slot_max * sizeof(*itlb_store));
    203 	if (dtlb_store == NULL || itlb_store == NULL) {
    204 		panic("tlb_init: malloc");
    205 	}
    206 
    207 	dtlb_slot = itlb_slot = 0;
    208 }
    209 
    210 /*
    211  * Initialize TLB as required by MMU mapping functions - sun4u.
    212  */
    213 static void
    214 tlb_init_sun4u(void)
    215 {
    216 	phandle_t child;
    217 	phandle_t root;
    218 	char buf[128];
    219 	u_int bootcpu;
    220 	u_int cpu;
    221 
    222 	bootcpu = get_cpuid();
    223 
    224 	if ( (root = prom_findroot()) == -1) {
    225 		panic("tlb_init: prom_findroot()");
    226 	}
    227 
    228 	for (child = prom_firstchild(root); child != 0;
    229 			child = prom_nextsibling(child)) {
    230 		if (child == -1) {
    231 			panic("tlb_init: OF_child");
    232 		}
    233 		if (_prom_getprop(child, "device_type", buf, sizeof(buf)) > 0 &&
    234 		    strcmp(buf, "cpu") == 0) {
    235 			if (_prom_getprop(child, "upa-portid", &cpu,
    236 			    sizeof(cpu)) == -1 && _prom_getprop(child, "portid",
    237 			    &cpu, sizeof(cpu)) == -1)
    238 				panic("tlb_init: prom_getprop");
    239 			if (cpu == bootcpu)
    240 				break;
    241 		}
    242 	}
    243 	if (cpu != bootcpu)
    244 		panic("tlb_init: no node for bootcpu?!?!");
    245 	if (_prom_getprop(child, "#dtlb-entries", &dtlb_slot_max,
    246 	    sizeof(dtlb_slot_max)) == -1 ||
    247 	    _prom_getprop(child, "#itlb-entries", &itlb_slot_max,
    248 	    sizeof(itlb_slot_max)) == -1)
    249 		panic("tlb_init: prom_getprop");
    250 }
    251 
    252 #ifdef SUN4V
    253 /*
    254  * Initialize TLB as required by MMU mapping functions - sun4v.
    255  */
    256 static void
    257 tlb_init_sun4v(void)
    258 {
    259 	psize_t len;
    260 	paddr_t pa;
    261 	int64_t hv_rc;
    262 
    263 	hv_mach_desc((paddr_t)NULL, &len); /* Trick to get actual length */
    264 	if ( !len ) {
    265 		panic("init_tlb: hv_mach_desc() failed");
    266 	}
    267 	pa = OF_alloc_phys(len, 16);
    268 	if ( pa == -1 ) {
    269 		panic("OF_alloc_phys() failed");
    270 	}
    271 	hv_rc = hv_mach_desc(pa, &len);
    272 	if (hv_rc != H_EOK) {
    273 		panic("hv_mach_desc() failed");
    274 	}
    275 	/* XXX dig out TLB node info - 64 is ok for loading the kernel */
    276 	dtlb_slot_max = itlb_slot_max = 64;
    277 }
    278 #endif
    279 
    280 /*
    281  * Map requested memory region with permanent 4MB pages.
    282  */
    283 static int
    284 mmu_mapin(vaddr_t rva, vsize_t len)
    285 {
    286 	len  = roundup2(len + (rva & PAGE_MASK_4M), PAGE_SIZE_4M);
    287 	rva &= ~PAGE_MASK_4M;
    288 
    289 	tlb_init();
    290 
    291 #if SUN4V
    292 	if ( sun4v )
    293 		return mmu_mapin_sun4v(rva, len);
    294 	else
    295 #endif
    296 		return mmu_mapin_sun4u(rva, len);
    297 }
    298 
    299 /*
    300  * Map requested memory region with permanent 4MB pages - sun4u.
    301  */
    302 static int
    303 mmu_mapin_sun4u(vaddr_t rva, vsize_t len)
    304 {
    305 	uint64_t data;
    306 	paddr_t pa;
    307 	vaddr_t va, mva;
    308 
    309 	for (pa = (paddr_t)-1; len > 0; rva = va) {
    310 		if ( (len = kvamap_extract(rva, len, &va)) == 0) {
    311 			/* The rest is already mapped */
    312 			break;
    313 		}
    314 
    315 		if (dtlb_va_to_pa(va) == (u_long)-1 ||
    316 		    itlb_va_to_pa(va) == (u_long)-1) {
    317 			/* Allocate a physical page, claim the virtual area */
    318 			if (pa == (paddr_t)-1) {
    319 				pa = OF_alloc_phys(PAGE_SIZE_4M, PAGE_SIZE_4M);
    320 				if (pa == (paddr_t)-1)
    321 					panic("out of memory");
    322 				mva = OF_claim_virt(va, PAGE_SIZE_4M);
    323 				if (mva != va) {
    324 					panic("can't claim virtual page "
    325 					    "(wanted %#lx, got %#lx)",
    326 					    va, mva);
    327 				}
    328 				/* The mappings may have changed, be paranoid. */
    329 				continue;
    330 			}
    331 
    332 			/*
    333 			 * Actually, we can only allocate two pages less at
    334 			 * most (depending on the kernel TSB size).
    335 			 */
    336 			if (dtlb_slot >= dtlb_slot_max)
    337 				panic("mmu_mapin: out of dtlb_slots");
    338 			if (itlb_slot >= itlb_slot_max)
    339 				panic("mmu_mapin: out of itlb_slots");
    340 
    341 			DPRINTF(("mmu_mapin: 0x%lx:0x%x.0x%x\n", va,
    342 			    hi(pa), lo(pa)));
    343 
    344 			data = SUN4U_TSB_DATA(0,	/* global */
    345 					PGSZ_4M,	/* 4mb page */
    346 					pa,		/* phys.address */
    347 					1,		/* privileged */
    348 					1,		/* write */
    349 					1,		/* cache */
    350 					1,		/* alias */
    351 					1,		/* valid */
    352 					0		/* endianness */
    353 					);
    354 			data |= SUN4U_TLB_L | SUN4U_TLB_CV; /* locked, virt.cache */
    355 
    356 			dtlb_store[dtlb_slot].te_pa = pa;
    357 			dtlb_store[dtlb_slot].te_va = va;
    358 			dtlb_slot++;
    359 			dtlb_enter(va, hi(data), lo(data));
    360 			pa = (paddr_t)-1;
    361 		}
    362 
    363 		kvamap_enter(va, PAGE_SIZE_4M);
    364 
    365 		len -= len > PAGE_SIZE_4M ? PAGE_SIZE_4M : len;
    366 		va += PAGE_SIZE_4M;
    367 	}
    368 
    369 	if (pa != (paddr_t)-1) {
    370 		OF_free_phys(pa, PAGE_SIZE_4M);
    371 	}
    372 
    373 	return (0);
    374 }
    375 
    376 #ifdef SUN4V
    377 /*
    378  * Map requested memory region with permanent 4MB pages - sun4v.
    379  */
    380 static int
    381 mmu_mapin_sun4v(vaddr_t rva, vsize_t len)
    382 {
    383 	uint64_t data;
    384 	paddr_t pa;
    385 	vaddr_t va, mva;
    386 	int64_t hv_rc;
    387 
    388 	for (pa = (paddr_t)-1; len > 0; rva = va) {
    389 		if ( (len = kvamap_extract(rva, len, &va)) == 0) {
    390 			/* The rest is already mapped */
    391 			break;
    392 		}
    393 
    394 		/* Allocate a physical page, claim the virtual area */
    395 		if (pa == (paddr_t)-1) {
    396 			pa = OF_alloc_phys(PAGE_SIZE_4M, PAGE_SIZE_4M);
    397 			if (pa == (paddr_t)-1)
    398 				panic("out of memory");
    399 			mva = OF_claim_virt(va, PAGE_SIZE_4M);
    400 			if (mva != va) {
    401 				panic("can't claim virtual page "
    402 				    "(wanted %#lx, got %#lx)",
    403 				    va, mva);
    404 			}
    405 		}
    406 
    407 		/*
    408 		 * Actually, we can only allocate two pages less at
    409 		 * most (depending on the kernel TSB size).
    410 		 */
    411 		if (dtlb_slot >= dtlb_slot_max)
    412 			panic("mmu_mapin: out of dtlb_slots");
    413 		if (itlb_slot >= itlb_slot_max)
    414 			panic("mmu_mapin: out of itlb_slots");
    415 
    416 		DPRINTF(("mmu_mapin: 0x%lx:0x%x.0x%x\n", va,
    417 		    hi(pa), lo(pa)));
    418 
    419 		data = SUN4V_TSB_DATA(
    420 			0,		/* global */
    421 			PGSZ_4M,	/* 4mb page */
    422 			pa,		/* phys.address */
    423 			1,		/* privileged */
    424 			1,		/* write */
    425 			1,		/* cache */
    426 			1,		/* alias */
    427 			1,		/* valid */
    428 			0		/* endianness */
    429 			);
    430 		data |= SUN4V_TLB_CV; /* virt.cache */
    431 
    432 		dtlb_store[dtlb_slot].te_pa = pa;
    433 		dtlb_store[dtlb_slot].te_va = va;
    434 		dtlb_slot++;
    435 		hv_rc = hv_mmu_map_perm_addr(va, data, MAP_DTLB);
    436 		if ( hv_rc != H_EOK ) {
    437 			panic("hv_mmu_map_perm_addr() failed - rc = %ld", hv_rc);
    438 		}
    439 
    440 		kvamap_enter(va, PAGE_SIZE_4M);
    441 
    442 		pa = (paddr_t)-1;
    443 
    444 		len -= len > PAGE_SIZE_4M ? PAGE_SIZE_4M : len;
    445 		va += PAGE_SIZE_4M;
    446 	}
    447 
    448 	if (pa != (paddr_t)-1) {
    449 		OF_free_phys(pa, PAGE_SIZE_4M);
    450 	}
    451 
    452 	return (0);
    453 }
    454 #endif
    455 
    456 static ssize_t
    457 mmu_read(int f, void *addr, size_t size)
    458 {
    459 	mmu_mapin((vaddr_t)addr, size);
    460 	return read(f, addr, size);
    461 }
    462 
    463 static void*
    464 mmu_memcpy(void *dst, const void *src, size_t size)
    465 {
    466 	mmu_mapin((vaddr_t)dst, size);
    467 	return memcpy(dst, src, size);
    468 }
    469 
    470 static void*
    471 mmu_memset(void *dst, int c, size_t size)
    472 {
    473 	mmu_mapin((vaddr_t)dst, size);
    474 	return memset(dst, c, size);
    475 }
    476 
    477 static void
    478 mmu_freeall(void)
    479 {
    480 	int i;
    481 
    482 	dtlb_slot = itlb_slot = 0;
    483 	for (i = 0; i < MAXSEGNUM; i++) {
    484 		/* XXX return all mappings to PROM and unmap the pages! */
    485 		kvamap[i].start = kvamap[i].end = 0;
    486 	}
    487 }
    488 
    489 /*
    490  * Claim requested memory region in OpenFirmware allocation pool.
    491  */
    492 static int
    493 ofw_mapin(vaddr_t rva, vsize_t len)
    494 {
    495 	vaddr_t va;
    496 
    497 	len  = roundup2(len + (rva & PAGE_MASK_4M), PAGE_SIZE_4M);
    498 	rva &= ~PAGE_MASK_4M;
    499 
    500 	if ( (len = kvamap_extract(rva, len, &va)) != 0) {
    501 		if (OF_claim((void *)(long)va, len, PAGE_SIZE_4M) == (void*)-1){
    502 			panic("ofw_mapin: Cannot claim memory.");
    503 		}
    504 		kvamap_enter(va, len);
    505 	}
    506 
    507 	return (0);
    508 }
    509 
    510 static ssize_t
    511 ofw_read(int f, void *addr, size_t size)
    512 {
    513 	ofw_mapin((vaddr_t)addr, size);
    514 	return read(f, addr, size);
    515 }
    516 
    517 static void*
    518 ofw_memcpy(void *dst, const void *src, size_t size)
    519 {
    520 	ofw_mapin((vaddr_t)dst, size);
    521 	return memcpy(dst, src, size);
    522 }
    523 
    524 static void*
    525 ofw_memset(void *dst, int c, size_t size)
    526 {
    527 	ofw_mapin((vaddr_t)dst, size);
    528 	return memset(dst, c, size);
    529 }
    530 
    531 static void
    532 ofw_freeall(void)
    533 {
    534 	int i;
    535 
    536 	dtlb_slot = itlb_slot = 0;
    537 	for (i = 0; i < MAXSEGNUM; i++) {
    538 		OF_release((void*)(u_long)kvamap[i].start,
    539 				(u_int)(kvamap[i].end - kvamap[i].start));
    540 		kvamap[i].start = kvamap[i].end = 0;
    541 	}
    542 }
    543 
    544 /*
    545  * NOP implementation exists solely for kernel header loading sake. Here
    546  * we use alloc() interface to allocate memory and avoid doing some dangerous
    547  * things.
    548  */
    549 static ssize_t
    550 nop_read(int f, void *addr, size_t size)
    551 {
    552 	return read(f, addr, size);
    553 }
    554 
    555 static void*
    556 nop_memcpy(void *dst, const void *src, size_t size)
    557 {
    558 	/*
    559 	 * Real NOP to make LOAD_HDR work: loadfile_elfXX copies ELF headers
    560 	 * right after the highest kernel address which will not be mapped with
    561 	 * nop_XXX operations.
    562 	 */
    563 	return (dst);
    564 }
    565 
    566 static void*
    567 nop_memset(void *dst, int c, size_t size)
    568 {
    569 	return memset(dst, c, size);
    570 }
    571 
    572 static void
    573 nop_freeall(void)
    574 { }
    575 
    576 /*
    577  * loadfile() hooks.
    578  */
    579 ssize_t
    580 sparc64_read(int f, void *addr, size_t size)
    581 {
    582 	return (*memsw->read)(f, addr, size);
    583 }
    584 
    585 void*
    586 sparc64_memcpy(void *dst, const void *src, size_t size)
    587 {
    588 	return (*memsw->memcpy)(dst, src, size);
    589 }
    590 
    591 void*
    592 sparc64_memset(void *dst, int c, size_t size)
    593 {
    594 	return (*memsw->memset)(dst, c, size);
    595 }
    596 
    597 /*
    598  * Remove write permissions from text mappings in the dTLB.
    599  * Add entries in the iTLB.
    600  */
    601 void
    602 sparc64_finalize_tlb(u_long data_va)
    603 {
    604 #ifdef SUN4V
    605 	if ( sun4v )
    606 		sparc64_finalize_tlb_sun4v(data_va);
    607 	else
    608 #endif
    609 		sparc64_finalize_tlb_sun4u(data_va);
    610 }
    611 
    612 /*
    613  * Remove write permissions from text mappings in the dTLB - sun4u.
    614  * Add entries in the iTLB.
    615  */
    616 void
    617 sparc64_finalize_tlb_sun4u(u_long data_va)
    618 {
    619 	int i;
    620 	int64_t data;
    621 	bool writable_text = false;
    622 
    623 	for (i = 0; i < dtlb_slot; i++) {
    624 		if (dtlb_store[i].te_va >= data_va) {
    625 			/*
    626 			 * If (for whatever reason) the start of the
    627 			 * writable section is right at the start of
    628 			 * the kernel, we need to map it into the ITLB
    629 			 * nevertheless (and don't make it readonly).
    630 			 */
    631 			if (i == 0 && dtlb_store[i].te_va == data_va)
    632 				writable_text = true;
    633 			else
    634 				continue;
    635 		}
    636 
    637 		data = SUN4U_TSB_DATA(0,	/* global */
    638 				PGSZ_4M,	/* 4mb page */
    639 				dtlb_store[i].te_pa,	/* phys.address */
    640 				1,		/* privileged */
    641 				0,		/* write */
    642 				1,		/* cache */
    643 				1,		/* alias */
    644 				1,		/* valid */
    645 				0		/* endianness */
    646 				);
    647 		data |= SUN4U_TLB_L | SUN4U_TLB_CV; /* locked, virt.cache */
    648 		if (!writable_text)
    649 			dtlb_replace(dtlb_store[i].te_va, hi(data), lo(data));
    650 		itlb_store[itlb_slot] = dtlb_store[i];
    651 		itlb_slot++;
    652 		itlb_enter(dtlb_store[i].te_va, hi(data), lo(data));
    653 	}
    654 	if (writable_text)
    655 		printf("WARNING: kernel text mapped writable!\n");
    656 
    657 }
    658 
    659 #ifdef SUN4V
    660 /*
    661  * Remove write permissions from text mappings in the dTLB - sun4v.
    662  * Add entries in the iTLB.
    663  */
    664 void
    665 sparc64_finalize_tlb_sun4v(u_long data_va)
    666 {
    667 	int i;
    668 	int64_t data;
    669 	bool writable_text = false;
    670 	int64_t hv_rc;
    671 
    672 	for (i = 0; i < dtlb_slot; i++) {
    673 		if (dtlb_store[i].te_va >= data_va) {
    674 			/*
    675 			 * If (for whatever reason) the start of the
    676 			 * writable section is right at the start of
    677 			 * the kernel, we need to map it into the ITLB
    678 			 * nevertheless (and don't make it readonly).
    679 			 */
    680 			if (i == 0 && dtlb_store[i].te_va == data_va)
    681 				writable_text = true;
    682 			else
    683 				continue;
    684 		}
    685 
    686 		data = SUN4V_TSB_DATA(
    687 			0,		/* global */
    688 			PGSZ_4M,	/* 4mb page */
    689 			dtlb_store[i].te_pa,	/* phys.address */
    690 			1,		/* privileged */
    691 			0,		/* write */
    692 			1,		/* cache */
    693 			1,		/* alias */
    694 			1,		/* valid */
    695 			0		/* endianness */
    696 			);
    697 		data |= SUN4V_TLB_CV|SUN4V_TLB_X; /* virt.cache, executable */
    698 		if (!writable_text) {
    699 			hv_rc = hv_mmu_unmap_perm_addr(dtlb_store[i].te_va,
    700 			                               MAP_DTLB);
    701 			if ( hv_rc != H_EOK ) {
    702 				panic("hv_mmu_unmap_perm_addr() failed - "
    703 				      "rc = %ld", hv_rc);
    704 			}
    705 			hv_rc = hv_mmu_map_perm_addr(dtlb_store[i].te_va, data,
    706 			                             MAP_DTLB);
    707 			if ( hv_rc != H_EOK ) {
    708 				panic("hv_mmu_map_perm_addr() failed - "
    709 				      "rc = %ld", hv_rc);
    710 			}
    711 		}
    712 
    713 		itlb_store[itlb_slot] = dtlb_store[i];
    714 		itlb_slot++;
    715 		hv_rc = hv_mmu_map_perm_addr(dtlb_store[i].te_va, data,
    716 		                             MAP_ITLB);
    717 		if ( hv_rc != H_EOK ) {
    718 			panic("hv_mmu_map_perm_addr() failed - rc = %ld", hv_rc);
    719 		}
    720 	}
    721 	if (writable_text)
    722 		printf("WARNING: kernel text mapped writable!\n");
    723 }
    724 #endif
    725 
    726 /*
    727  * Record kernel mappings in bootinfo structure.
    728  */
    729 void
    730 sparc64_bi_add(void)
    731 {
    732 	int i;
    733 	int itlb_size, dtlb_size;
    734 	struct btinfo_count bi_count;
    735 	struct btinfo_tlb *bi_itlb, *bi_dtlb;
    736 
    737 	bi_count.count = itlb_slot;
    738 	bi_add(&bi_count, BTINFO_ITLB_SLOTS, sizeof(bi_count));
    739 	bi_count.count = dtlb_slot;
    740 	bi_add(&bi_count, BTINFO_DTLB_SLOTS, sizeof(bi_count));
    741 
    742 	itlb_size = sizeof(*bi_itlb) + sizeof(struct tlb_entry) * itlb_slot;
    743 	dtlb_size = sizeof(*bi_dtlb) + sizeof(struct tlb_entry) * dtlb_slot;
    744 
    745 	bi_itlb = alloc(itlb_size);
    746 	bi_dtlb = alloc(dtlb_size);
    747 
    748 	if ((bi_itlb == NULL) || (bi_dtlb == NULL)) {
    749 		panic("Out of memory in sparc64_bi_add.\n");
    750 	}
    751 
    752 	for (i = 0; i < itlb_slot; i++) {
    753 		bi_itlb->tlb[i].te_va = itlb_store[i].te_va;
    754 		bi_itlb->tlb[i].te_pa = itlb_store[i].te_pa;
    755 	}
    756 	bi_add(bi_itlb, BTINFO_ITLB, itlb_size);
    757 
    758 	for (i = 0; i < dtlb_slot; i++) {
    759 		bi_dtlb->tlb[i].te_va = dtlb_store[i].te_va;
    760 		bi_dtlb->tlb[i].te_pa = dtlb_store[i].te_pa;
    761 	}
    762 	bi_add(bi_dtlb, BTINFO_DTLB, dtlb_size);
    763 }
    764 
    765 /*
    766  * Choose kernel image mapping strategy:
    767  *
    768  * LOADFILE_NOP_ALLOCATOR	To load kernel image headers
    769  * LOADFILE_OFW_ALLOCATOR	To map the kernel by OpenFirmware means
    770  * LOADFILE_MMU_ALLOCATOR	To use permanent 4MB mappings
    771  */
    772 void
    773 loadfile_set_allocator(int type)
    774 {
    775 	if (type >= (sizeof(memswa) / sizeof(struct memsw))) {
    776 		panic("Bad allocator request.\n");
    777 	}
    778 
    779 	/*
    780 	 * Release all memory claimed by previous allocator and schedule
    781 	 * another allocator for succeeding memory allocation calls.
    782 	 */
    783 	(*memsw->freeall)();
    784 	memsw = &memswa[type];
    785 }
    786