Home | History | Annotate | Line # | Download | only in ofwboot
loadfile_machdep.c revision 1.13.4.3
      1  1.13.4.3     skrll /*	$NetBSD: loadfile_machdep.c,v 1.13.4.3 2016/12/05 10:54:58 skrll Exp $	*/
      2       1.1       cdi 
      3       1.1       cdi /*-
      4       1.1       cdi  * Copyright (c) 2005 The NetBSD Foundation, Inc.
      5       1.1       cdi  * All rights reserved.
      6       1.1       cdi  *
      7       1.1       cdi  * This work is based on the code contributed by Robert Drehmel to the
      8       1.1       cdi  * FreeBSD project.
      9       1.1       cdi  *
     10       1.1       cdi  * Redistribution and use in source and binary forms, with or without
     11       1.1       cdi  * modification, are permitted provided that the following conditions
     12       1.1       cdi  * are met:
     13       1.1       cdi  * 1. Redistributions of source code must retain the above copyright
     14       1.1       cdi  *    notice, this list of conditions and the following disclaimer.
     15       1.1       cdi  * 2. Redistributions in binary form must reproduce the above copyright
     16       1.1       cdi  *    notice, this list of conditions and the following disclaimer in the
     17       1.1       cdi  *    documentation and/or other materials provided with the distribution.
     18       1.1       cdi  *
     19       1.1       cdi  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20       1.1       cdi  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21       1.1       cdi  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22       1.1       cdi  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23       1.1       cdi  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24       1.1       cdi  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25       1.1       cdi  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26       1.1       cdi  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27       1.1       cdi  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28       1.1       cdi  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29       1.1       cdi  * POSSIBILITY OF SUCH DAMAGE.
     30       1.1       cdi  */
     31       1.1       cdi 
     32       1.1       cdi #include <lib/libsa/stand.h>
     33       1.8        he #include <lib/libkern/libkern.h>
     34       1.1       cdi 
     35       1.1       cdi #include <machine/pte.h>
     36       1.1       cdi #include <machine/cpu.h>
     37       1.1       cdi #include <machine/ctlreg.h>
     38       1.1       cdi #include <machine/vmparam.h>
     39       1.1       cdi #include <machine/promlib.h>
     40      1.11     palle #include <machine/hypervisor.h>
     41       1.1       cdi 
     42       1.1       cdi #include "boot.h"
     43       1.1       cdi #include "openfirm.h"
     44       1.1       cdi 
     45       1.1       cdi 
     46       1.1       cdi #define MAXSEGNUM	50
     47       1.2       uwe #define hi(val)		((uint32_t)(((val) >> 32) & (uint32_t)-1))
     48       1.2       uwe #define lo(val)		((uint32_t)((val) & (uint32_t)-1))
     49       1.1       cdi 
     50       1.1       cdi #define roundup2(x, y)	(((x)+((y)-1))&(~((y)-1)))
     51       1.1       cdi 
     52       1.1       cdi 
     53       1.1       cdi typedef int phandle_t;
     54       1.1       cdi 
     55       1.2       uwe extern void	itlb_enter(vaddr_t, uint32_t, uint32_t);
     56       1.2       uwe extern void	dtlb_enter(vaddr_t, uint32_t, uint32_t);
     57       1.3    martin extern void	dtlb_replace(vaddr_t, uint32_t, uint32_t);
     58       1.1       cdi extern vaddr_t	itlb_va_to_pa(vaddr_t);
     59       1.1       cdi extern vaddr_t	dtlb_va_to_pa(vaddr_t);
     60       1.1       cdi 
     61       1.1       cdi static void	tlb_init(void);
     62      1.11     palle static void	tlb_init_sun4u(void);
     63      1.11     palle #ifdef SUN4V
     64      1.11     palle static void	tlb_init_sun4v(void);
     65      1.11     palle #endif
     66      1.11     palle void	sparc64_finalize_tlb_sun4u(u_long);
     67      1.11     palle #ifdef SUN4V
     68      1.11     palle void	sparc64_finalize_tlb_sun4v(u_long);
     69      1.11     palle #endif
     70       1.1       cdi static int	mmu_mapin(vaddr_t, vsize_t);
     71      1.11     palle static int	mmu_mapin_sun4u(vaddr_t, vsize_t);
     72      1.11     palle #ifdef SUN4V
     73      1.11     palle static int	mmu_mapin_sun4v(vaddr_t, vsize_t);
     74      1.11     palle #endif
     75       1.1       cdi static ssize_t	mmu_read(int, void *, size_t);
     76       1.1       cdi static void*	mmu_memcpy(void *, const void *, size_t);
     77       1.1       cdi static void*	mmu_memset(void *, int, size_t);
     78       1.1       cdi static void	mmu_freeall(void);
     79       1.1       cdi 
     80       1.1       cdi static int	ofw_mapin(vaddr_t, vsize_t);
     81       1.1       cdi static ssize_t	ofw_read(int, void *, size_t);
     82       1.1       cdi static void*	ofw_memcpy(void *, const void *, size_t);
     83       1.1       cdi static void*	ofw_memset(void *, int, size_t);
     84       1.1       cdi static void	ofw_freeall(void);
     85       1.1       cdi 
     86       1.9   tsutsui #if 0
     87       1.1       cdi static int	nop_mapin(vaddr_t, vsize_t);
     88       1.9   tsutsui #endif
     89       1.1       cdi static ssize_t	nop_read(int, void *, size_t);
     90       1.1       cdi static void*	nop_memcpy(void *, const void *, size_t);
     91       1.1       cdi static void*	nop_memset(void *, int, size_t);
     92       1.1       cdi static void	nop_freeall(void);
     93       1.1       cdi 
     94       1.1       cdi 
     95       1.1       cdi struct tlb_entry *dtlb_store = 0;
     96       1.1       cdi struct tlb_entry *itlb_store = 0;
     97       1.1       cdi 
     98       1.1       cdi int dtlb_slot;
     99       1.1       cdi int itlb_slot;
    100       1.1       cdi int dtlb_slot_max;
    101       1.1       cdi int itlb_slot_max;
    102       1.1       cdi 
    103       1.1       cdi static struct kvamap {
    104       1.1       cdi 	uint64_t start;
    105       1.1       cdi 	uint64_t end;
    106       1.1       cdi } kvamap[MAXSEGNUM];
    107       1.1       cdi 
    108       1.1       cdi static struct memsw {
    109       1.1       cdi 	ssize_t	(* read)(int f, void *addr, size_t size);
    110       1.1       cdi 	void*	(* memcpy)(void *dst, const void *src, size_t size);
    111       1.1       cdi 	void*	(* memset)(void *dst, int c, size_t size);
    112       1.1       cdi 	void	(* freeall)(void);
    113       1.1       cdi } memswa[] = {
    114       1.1       cdi 	{ nop_read, nop_memcpy, nop_memset, nop_freeall },
    115       1.1       cdi 	{ ofw_read, ofw_memcpy, ofw_memset, ofw_freeall },
    116       1.1       cdi 	{ mmu_read, mmu_memcpy, mmu_memset, mmu_freeall }
    117       1.1       cdi };
    118       1.1       cdi 
    119       1.1       cdi static struct memsw *memsw = &memswa[0];
    120       1.1       cdi 
    121      1.11     palle #ifdef SUN4V
    122      1.11     palle static int sun4v = 0;
    123      1.11     palle #endif
    124       1.1       cdi 
    125       1.1       cdi /*
    126       1.1       cdi  * Check if a memory region is already mapped. Return length and virtual
    127       1.1       cdi  * address of unmapped sub-region, if any.
    128       1.1       cdi  */
    129       1.1       cdi static uint64_t
    130       1.1       cdi kvamap_extract(vaddr_t va, vsize_t len, vaddr_t *new_va)
    131       1.1       cdi {
    132       1.1       cdi 	int i;
    133       1.1       cdi 
    134       1.1       cdi 	*new_va  = va;
    135       1.1       cdi 	for (i = 0; (len > 0) && (i < MAXSEGNUM); i++) {
    136       1.1       cdi 		if (kvamap[i].start == NULL)
    137       1.1       cdi 			break;
    138       1.1       cdi 		if ((kvamap[i].start <= va) && (va < kvamap[i].end)) {
    139  1.13.4.1     skrll 			uint64_t va_len = kvamap[i].end - va;
    140       1.1       cdi 			len = (va_len < len) ? len - va_len : 0;
    141       1.1       cdi 			*new_va = kvamap[i].end;
    142       1.1       cdi 		}
    143       1.1       cdi 	}
    144       1.1       cdi 
    145  1.13.4.1     skrll 	return len;
    146       1.1       cdi }
    147       1.1       cdi 
    148       1.1       cdi /*
    149       1.1       cdi  * Record new kernel mapping.
    150       1.1       cdi  */
    151       1.1       cdi static void
    152       1.1       cdi kvamap_enter(uint64_t va, uint64_t len)
    153       1.1       cdi {
    154       1.1       cdi 	int i;
    155       1.1       cdi 
    156       1.1       cdi 	DPRINTF(("kvamap_enter: %d@%p\n", (int)len, (void*)(u_long)va));
    157       1.1       cdi 	for (i = 0; (len > 0) && (i < MAXSEGNUM); i++) {
    158       1.1       cdi 		if (kvamap[i].start == NULL) {
    159       1.1       cdi 			kvamap[i].start = va;
    160       1.1       cdi 			kvamap[i].end = va + len;
    161       1.1       cdi 			break;
    162       1.1       cdi 		}
    163       1.1       cdi 	}
    164       1.1       cdi 
    165       1.1       cdi 	if (i == MAXSEGNUM) {
    166       1.1       cdi 		panic("Too many allocations requested.");
    167       1.1       cdi 	}
    168       1.1       cdi }
    169       1.1       cdi 
    170       1.1       cdi /*
    171       1.1       cdi  * Initialize TLB as required by MMU mapping functions.
    172       1.1       cdi  */
    173       1.1       cdi static void
    174       1.1       cdi tlb_init(void)
    175       1.1       cdi {
    176       1.1       cdi 	phandle_t root;
    177  1.13.4.2     skrll #ifdef SUN4V
    178       1.1       cdi 	char buf[128];
    179  1.13.4.2     skrll #endif
    180       1.1       cdi 
    181       1.1       cdi 	if (dtlb_store != NULL) {
    182       1.1       cdi 		return;
    183       1.1       cdi 	}
    184       1.1       cdi 
    185      1.11     palle 	if ( (root = prom_findroot()) == -1) {
    186      1.11     palle 		panic("tlb_init: prom_findroot()");
    187      1.11     palle 	}
    188  1.13.4.2     skrll #ifdef SUN4V
    189      1.11     palle 	if (_prom_getprop(root, "compatible", buf, sizeof(buf)) > 0 &&
    190      1.11     palle 		    strcmp(buf, "sun4v") == 0) {
    191      1.11     palle 		tlb_init_sun4v();
    192      1.11     palle 		sun4v = 1;
    193      1.11     palle 	}
    194      1.11     palle 	else {
    195      1.11     palle #endif
    196      1.11     palle 		tlb_init_sun4u();
    197  1.13.4.2     skrll #ifdef SUN4V
    198      1.11     palle 	}
    199      1.11     palle #endif
    200      1.11     palle 
    201      1.11     palle 	dtlb_store = alloc(dtlb_slot_max * sizeof(*dtlb_store));
    202      1.11     palle 	itlb_store = alloc(itlb_slot_max * sizeof(*itlb_store));
    203      1.11     palle 	if (dtlb_store == NULL || itlb_store == NULL) {
    204      1.11     palle 		panic("tlb_init: malloc");
    205      1.11     palle 	}
    206      1.11     palle 
    207      1.11     palle 	dtlb_slot = itlb_slot = 0;
    208      1.11     palle }
    209      1.11     palle 
    210      1.11     palle /*
    211      1.11     palle  * Initialize TLB as required by MMU mapping functions - sun4u.
    212      1.11     palle  */
    213      1.11     palle static void
    214      1.11     palle tlb_init_sun4u(void)
    215      1.11     palle {
    216      1.11     palle 	phandle_t child;
    217      1.11     palle 	phandle_t root;
    218      1.11     palle 	char buf[128];
    219  1.13.4.2     skrll 	bool foundcpu = false;
    220      1.11     palle 	u_int bootcpu;
    221      1.11     palle 	u_int cpu;
    222      1.11     palle 
    223       1.1       cdi 	bootcpu = get_cpuid();
    224       1.1       cdi 
    225       1.1       cdi 	if ( (root = prom_findroot()) == -1) {
    226       1.1       cdi 		panic("tlb_init: prom_findroot()");
    227       1.1       cdi 	}
    228       1.1       cdi 
    229       1.1       cdi 	for (child = prom_firstchild(root); child != 0;
    230       1.1       cdi 			child = prom_nextsibling(child)) {
    231       1.1       cdi 		if (child == -1) {
    232       1.1       cdi 			panic("tlb_init: OF_child");
    233       1.1       cdi 		}
    234       1.1       cdi 		if (_prom_getprop(child, "device_type", buf, sizeof(buf)) > 0 &&
    235       1.1       cdi 		    strcmp(buf, "cpu") == 0) {
    236       1.1       cdi 			if (_prom_getprop(child, "upa-portid", &cpu,
    237       1.1       cdi 			    sizeof(cpu)) == -1 && _prom_getprop(child, "portid",
    238       1.1       cdi 			    &cpu, sizeof(cpu)) == -1)
    239       1.7  nakayama 				panic("tlb_init: prom_getprop");
    240  1.13.4.2     skrll 			foundcpu = true;
    241       1.1       cdi 			if (cpu == bootcpu)
    242       1.1       cdi 				break;
    243       1.1       cdi 		}
    244       1.1       cdi 	}
    245  1.13.4.2     skrll 	if (!foundcpu)
    246  1.13.4.2     skrll 		panic("tlb_init: no cpu found!");
    247       1.1       cdi 	if (cpu != bootcpu)
    248       1.7  nakayama 		panic("tlb_init: no node for bootcpu?!?!");
    249       1.1       cdi 	if (_prom_getprop(child, "#dtlb-entries", &dtlb_slot_max,
    250       1.1       cdi 	    sizeof(dtlb_slot_max)) == -1 ||
    251       1.1       cdi 	    _prom_getprop(child, "#itlb-entries", &itlb_slot_max,
    252       1.1       cdi 	    sizeof(itlb_slot_max)) == -1)
    253       1.7  nakayama 		panic("tlb_init: prom_getprop");
    254      1.11     palle }
    255      1.11     palle 
    256      1.11     palle #ifdef SUN4V
    257      1.11     palle /*
    258      1.11     palle  * Initialize TLB as required by MMU mapping functions - sun4v.
    259      1.11     palle  */
    260      1.11     palle static void
    261      1.11     palle tlb_init_sun4v(void)
    262      1.11     palle {
    263      1.11     palle 	psize_t len;
    264      1.11     palle 	paddr_t pa;
    265      1.11     palle 	int64_t hv_rc;
    266      1.11     palle 
    267      1.11     palle 	hv_mach_desc((paddr_t)NULL, &len); /* Trick to get actual length */
    268      1.11     palle 	if ( !len ) {
    269      1.11     palle 		panic("init_tlb: hv_mach_desc() failed");
    270      1.11     palle 	}
    271      1.11     palle 	pa = OF_alloc_phys(len, 16);
    272      1.11     palle 	if ( pa == -1 ) {
    273      1.11     palle 		panic("OF_alloc_phys() failed");
    274      1.11     palle 	}
    275      1.11     palle 	hv_rc = hv_mach_desc(pa, &len);
    276      1.11     palle 	if (hv_rc != H_EOK) {
    277      1.11     palle 		panic("hv_mach_desc() failed");
    278       1.1       cdi 	}
    279      1.11     palle 	/* XXX dig out TLB node info - 64 is ok for loading the kernel */
    280      1.11     palle 	dtlb_slot_max = itlb_slot_max = 64;
    281       1.1       cdi }
    282      1.11     palle #endif
    283       1.1       cdi 
    284       1.1       cdi /*
    285       1.1       cdi  * Map requested memory region with permanent 4MB pages.
    286       1.1       cdi  */
    287       1.1       cdi static int
    288       1.1       cdi mmu_mapin(vaddr_t rva, vsize_t len)
    289       1.1       cdi {
    290      1.11     palle 	len  = roundup2(len + (rva & PAGE_MASK_4M), PAGE_SIZE_4M);
    291      1.11     palle 	rva &= ~PAGE_MASK_4M;
    292      1.11     palle 
    293      1.11     palle 	tlb_init();
    294      1.11     palle 
    295  1.13.4.2     skrll #if SUN4V
    296      1.11     palle 	if ( sun4v )
    297      1.11     palle 		return mmu_mapin_sun4v(rva, len);
    298      1.11     palle 	else
    299  1.13.4.2     skrll #endif
    300      1.11     palle 		return mmu_mapin_sun4u(rva, len);
    301      1.11     palle }
    302      1.11     palle 
    303      1.11     palle /*
    304      1.11     palle  * Map requested memory region with permanent 4MB pages - sun4u.
    305      1.11     palle  */
    306      1.11     palle static int
    307      1.11     palle mmu_mapin_sun4u(vaddr_t rva, vsize_t len)
    308      1.11     palle {
    309       1.7  nakayama 	uint64_t data;
    310       1.7  nakayama 	paddr_t pa;
    311       1.7  nakayama 	vaddr_t va, mva;
    312       1.1       cdi 
    313       1.7  nakayama 	for (pa = (paddr_t)-1; len > 0; rva = va) {
    314       1.1       cdi 		if ( (len = kvamap_extract(rva, len, &va)) == 0) {
    315       1.1       cdi 			/* The rest is already mapped */
    316       1.1       cdi 			break;
    317       1.1       cdi 		}
    318       1.1       cdi 
    319       1.1       cdi 		if (dtlb_va_to_pa(va) == (u_long)-1 ||
    320       1.1       cdi 		    itlb_va_to_pa(va) == (u_long)-1) {
    321       1.1       cdi 			/* Allocate a physical page, claim the virtual area */
    322       1.7  nakayama 			if (pa == (paddr_t)-1) {
    323       1.7  nakayama 				pa = OF_alloc_phys(PAGE_SIZE_4M, PAGE_SIZE_4M);
    324       1.7  nakayama 				if (pa == (paddr_t)-1)
    325       1.1       cdi 					panic("out of memory");
    326       1.7  nakayama 				mva = OF_claim_virt(va, PAGE_SIZE_4M);
    327       1.1       cdi 				if (mva != va) {
    328       1.1       cdi 					panic("can't claim virtual page "
    329       1.1       cdi 					    "(wanted %#lx, got %#lx)",
    330       1.1       cdi 					    va, mva);
    331       1.1       cdi 				}
    332       1.1       cdi 				/* The mappings may have changed, be paranoid. */
    333       1.1       cdi 				continue;
    334       1.1       cdi 			}
    335       1.1       cdi 
    336       1.1       cdi 			/*
    337       1.1       cdi 			 * Actually, we can only allocate two pages less at
    338       1.1       cdi 			 * most (depending on the kernel TSB size).
    339       1.1       cdi 			 */
    340       1.1       cdi 			if (dtlb_slot >= dtlb_slot_max)
    341       1.1       cdi 				panic("mmu_mapin: out of dtlb_slots");
    342       1.1       cdi 			if (itlb_slot >= itlb_slot_max)
    343       1.1       cdi 				panic("mmu_mapin: out of itlb_slots");
    344       1.1       cdi 
    345      1.10  nakayama 			DPRINTF(("mmu_mapin: 0x%lx:0x%x.0x%x\n", va,
    346      1.10  nakayama 			    hi(pa), lo(pa)));
    347       1.1       cdi 
    348      1.12     palle 			data = SUN4U_TSB_DATA(0,	/* global */
    349       1.1       cdi 					PGSZ_4M,	/* 4mb page */
    350       1.1       cdi 					pa,		/* phys.address */
    351       1.1       cdi 					1,		/* privileged */
    352       1.1       cdi 					1,		/* write */
    353       1.1       cdi 					1,		/* cache */
    354       1.1       cdi 					1,		/* alias */
    355       1.1       cdi 					1,		/* valid */
    356  1.13.4.3     skrll 					0,		/* endianness */
    357  1.13.4.3     skrll 					0		/* wc */
    358       1.1       cdi 					);
    359      1.12     palle 			data |= SUN4U_TLB_L | SUN4U_TLB_CV; /* locked, virt.cache */
    360       1.1       cdi 
    361       1.1       cdi 			dtlb_store[dtlb_slot].te_pa = pa;
    362       1.1       cdi 			dtlb_store[dtlb_slot].te_va = va;
    363       1.1       cdi 			dtlb_slot++;
    364       1.1       cdi 			dtlb_enter(va, hi(data), lo(data));
    365       1.7  nakayama 			pa = (paddr_t)-1;
    366       1.1       cdi 		}
    367       1.1       cdi 
    368       1.1       cdi 		kvamap_enter(va, PAGE_SIZE_4M);
    369       1.1       cdi 
    370       1.1       cdi 		len -= len > PAGE_SIZE_4M ? PAGE_SIZE_4M : len;
    371       1.1       cdi 		va += PAGE_SIZE_4M;
    372       1.1       cdi 	}
    373       1.1       cdi 
    374       1.7  nakayama 	if (pa != (paddr_t)-1) {
    375       1.1       cdi 		OF_free_phys(pa, PAGE_SIZE_4M);
    376       1.1       cdi 	}
    377       1.1       cdi 
    378       1.1       cdi 	return (0);
    379       1.1       cdi }
    380       1.1       cdi 
    381      1.11     palle #ifdef SUN4V
    382      1.11     palle /*
    383      1.11     palle  * Map requested memory region with permanent 4MB pages - sun4v.
    384      1.11     palle  */
    385      1.11     palle static int
    386      1.11     palle mmu_mapin_sun4v(vaddr_t rva, vsize_t len)
    387      1.11     palle {
    388      1.11     palle 	uint64_t data;
    389      1.11     palle 	paddr_t pa;
    390      1.11     palle 	vaddr_t va, mva;
    391      1.11     palle 	int64_t hv_rc;
    392      1.11     palle 
    393      1.11     palle 	for (pa = (paddr_t)-1; len > 0; rva = va) {
    394      1.11     palle 		if ( (len = kvamap_extract(rva, len, &va)) == 0) {
    395      1.11     palle 			/* The rest is already mapped */
    396      1.11     palle 			break;
    397      1.11     palle 		}
    398      1.11     palle 
    399      1.11     palle 		/* Allocate a physical page, claim the virtual area */
    400      1.11     palle 		if (pa == (paddr_t)-1) {
    401      1.11     palle 			pa = OF_alloc_phys(PAGE_SIZE_4M, PAGE_SIZE_4M);
    402      1.11     palle 			if (pa == (paddr_t)-1)
    403      1.11     palle 				panic("out of memory");
    404      1.11     palle 			mva = OF_claim_virt(va, PAGE_SIZE_4M);
    405      1.11     palle 			if (mva != va) {
    406      1.11     palle 				panic("can't claim virtual page "
    407      1.11     palle 				    "(wanted %#lx, got %#lx)",
    408      1.11     palle 				    va, mva);
    409      1.11     palle 			}
    410      1.11     palle 		}
    411      1.11     palle 
    412      1.11     palle 		/*
    413      1.11     palle 		 * Actually, we can only allocate two pages less at
    414      1.11     palle 		 * most (depending on the kernel TSB size).
    415      1.11     palle 		 */
    416      1.11     palle 		if (dtlb_slot >= dtlb_slot_max)
    417      1.11     palle 			panic("mmu_mapin: out of dtlb_slots");
    418      1.11     palle 		if (itlb_slot >= itlb_slot_max)
    419      1.11     palle 			panic("mmu_mapin: out of itlb_slots");
    420  1.13.4.2     skrll 
    421      1.11     palle 		DPRINTF(("mmu_mapin: 0x%lx:0x%x.0x%x\n", va,
    422      1.11     palle 		    hi(pa), lo(pa)));
    423      1.11     palle 
    424      1.11     palle 		data = SUN4V_TSB_DATA(
    425      1.11     palle 			0,		/* global */
    426      1.11     palle 			PGSZ_4M,	/* 4mb page */
    427      1.11     palle 			pa,		/* phys.address */
    428      1.11     palle 			1,		/* privileged */
    429      1.11     palle 			1,		/* write */
    430      1.11     palle 			1,		/* cache */
    431      1.11     palle 			1,		/* alias */
    432      1.11     palle 			1,		/* valid */
    433  1.13.4.3     skrll 			0,		/* endianness */
    434  1.13.4.3     skrll 			0		/* wc */
    435      1.11     palle 			);
    436      1.11     palle 		data |= SUN4V_TLB_CV; /* virt.cache */
    437  1.13.4.2     skrll 
    438      1.11     palle 		dtlb_store[dtlb_slot].te_pa = pa;
    439      1.11     palle 		dtlb_store[dtlb_slot].te_va = va;
    440      1.11     palle 		dtlb_slot++;
    441      1.11     palle 		hv_rc = hv_mmu_map_perm_addr(va, data, MAP_DTLB);
    442      1.11     palle 		if ( hv_rc != H_EOK ) {
    443      1.11     palle 			panic("hv_mmu_map_perm_addr() failed - rc = %ld", hv_rc);
    444      1.11     palle 		}
    445      1.11     palle 
    446      1.11     palle 		kvamap_enter(va, PAGE_SIZE_4M);
    447      1.11     palle 
    448      1.11     palle 		pa = (paddr_t)-1;
    449      1.11     palle 
    450      1.11     palle 		len -= len > PAGE_SIZE_4M ? PAGE_SIZE_4M : len;
    451      1.11     palle 		va += PAGE_SIZE_4M;
    452      1.11     palle 	}
    453      1.11     palle 
    454      1.11     palle 	if (pa != (paddr_t)-1) {
    455      1.11     palle 		OF_free_phys(pa, PAGE_SIZE_4M);
    456      1.11     palle 	}
    457      1.11     palle 
    458      1.11     palle 	return (0);
    459      1.11     palle }
    460      1.11     palle #endif
    461      1.11     palle 
    462       1.1       cdi static ssize_t
    463       1.1       cdi mmu_read(int f, void *addr, size_t size)
    464       1.1       cdi {
    465       1.1       cdi 	mmu_mapin((vaddr_t)addr, size);
    466       1.1       cdi 	return read(f, addr, size);
    467       1.1       cdi }
    468       1.1       cdi 
    469       1.1       cdi static void*
    470       1.1       cdi mmu_memcpy(void *dst, const void *src, size_t size)
    471       1.1       cdi {
    472       1.1       cdi 	mmu_mapin((vaddr_t)dst, size);
    473       1.1       cdi 	return memcpy(dst, src, size);
    474       1.1       cdi }
    475       1.1       cdi 
    476       1.1       cdi static void*
    477       1.1       cdi mmu_memset(void *dst, int c, size_t size)
    478       1.1       cdi {
    479       1.1       cdi 	mmu_mapin((vaddr_t)dst, size);
    480       1.1       cdi 	return memset(dst, c, size);
    481       1.1       cdi }
    482       1.1       cdi 
    483       1.1       cdi static void
    484       1.1       cdi mmu_freeall(void)
    485       1.1       cdi {
    486       1.1       cdi 	int i;
    487       1.1       cdi 
    488       1.1       cdi 	dtlb_slot = itlb_slot = 0;
    489       1.1       cdi 	for (i = 0; i < MAXSEGNUM; i++) {
    490       1.1       cdi 		/* XXX return all mappings to PROM and unmap the pages! */
    491       1.1       cdi 		kvamap[i].start = kvamap[i].end = 0;
    492       1.1       cdi 	}
    493       1.1       cdi }
    494       1.1       cdi 
    495       1.1       cdi /*
    496       1.1       cdi  * Claim requested memory region in OpenFirmware allocation pool.
    497       1.1       cdi  */
    498       1.1       cdi static int
    499       1.1       cdi ofw_mapin(vaddr_t rva, vsize_t len)
    500       1.1       cdi {
    501       1.1       cdi 	vaddr_t va;
    502       1.1       cdi 
    503       1.1       cdi 	len  = roundup2(len + (rva & PAGE_MASK_4M), PAGE_SIZE_4M);
    504       1.1       cdi 	rva &= ~PAGE_MASK_4M;
    505       1.1       cdi 
    506       1.1       cdi 	if ( (len = kvamap_extract(rva, len, &va)) != 0) {
    507       1.1       cdi 		if (OF_claim((void *)(long)va, len, PAGE_SIZE_4M) == (void*)-1){
    508       1.1       cdi 			panic("ofw_mapin: Cannot claim memory.");
    509       1.1       cdi 		}
    510       1.1       cdi 		kvamap_enter(va, len);
    511       1.1       cdi 	}
    512       1.1       cdi 
    513       1.1       cdi 	return (0);
    514       1.1       cdi }
    515       1.1       cdi 
    516       1.1       cdi static ssize_t
    517       1.1       cdi ofw_read(int f, void *addr, size_t size)
    518       1.1       cdi {
    519       1.1       cdi 	ofw_mapin((vaddr_t)addr, size);
    520       1.1       cdi 	return read(f, addr, size);
    521       1.1       cdi }
    522       1.1       cdi 
    523       1.1       cdi static void*
    524       1.1       cdi ofw_memcpy(void *dst, const void *src, size_t size)
    525       1.1       cdi {
    526       1.1       cdi 	ofw_mapin((vaddr_t)dst, size);
    527       1.1       cdi 	return memcpy(dst, src, size);
    528       1.1       cdi }
    529       1.1       cdi 
    530       1.1       cdi static void*
    531       1.1       cdi ofw_memset(void *dst, int c, size_t size)
    532       1.1       cdi {
    533       1.1       cdi 	ofw_mapin((vaddr_t)dst, size);
    534       1.1       cdi 	return memset(dst, c, size);
    535       1.1       cdi }
    536       1.1       cdi 
    537       1.1       cdi static void
    538       1.1       cdi ofw_freeall(void)
    539       1.1       cdi {
    540       1.1       cdi 	int i;
    541       1.1       cdi 
    542       1.1       cdi 	dtlb_slot = itlb_slot = 0;
    543       1.1       cdi 	for (i = 0; i < MAXSEGNUM; i++) {
    544       1.1       cdi 		OF_release((void*)(u_long)kvamap[i].start,
    545       1.1       cdi 				(u_int)(kvamap[i].end - kvamap[i].start));
    546       1.1       cdi 		kvamap[i].start = kvamap[i].end = 0;
    547       1.1       cdi 	}
    548       1.1       cdi }
    549       1.1       cdi 
    550       1.1       cdi /*
    551       1.1       cdi  * NOP implementation exists solely for kernel header loading sake. Here
    552       1.1       cdi  * we use alloc() interface to allocate memory and avoid doing some dangerous
    553       1.1       cdi  * things.
    554       1.1       cdi  */
    555       1.1       cdi static ssize_t
    556       1.1       cdi nop_read(int f, void *addr, size_t size)
    557       1.1       cdi {
    558       1.1       cdi 	return read(f, addr, size);
    559       1.1       cdi }
    560       1.1       cdi 
    561       1.1       cdi static void*
    562       1.1       cdi nop_memcpy(void *dst, const void *src, size_t size)
    563       1.1       cdi {
    564       1.1       cdi 	/*
    565       1.1       cdi 	 * Real NOP to make LOAD_HDR work: loadfile_elfXX copies ELF headers
    566       1.1       cdi 	 * right after the highest kernel address which will not be mapped with
    567       1.1       cdi 	 * nop_XXX operations.
    568       1.1       cdi 	 */
    569       1.1       cdi 	return (dst);
    570       1.1       cdi }
    571       1.1       cdi 
    572       1.1       cdi static void*
    573       1.1       cdi nop_memset(void *dst, int c, size_t size)
    574       1.1       cdi {
    575       1.1       cdi 	return memset(dst, c, size);
    576       1.1       cdi }
    577       1.1       cdi 
    578       1.1       cdi static void
    579       1.1       cdi nop_freeall(void)
    580       1.1       cdi { }
    581       1.1       cdi 
    582       1.1       cdi /*
    583       1.1       cdi  * loadfile() hooks.
    584       1.1       cdi  */
    585       1.1       cdi ssize_t
    586       1.1       cdi sparc64_read(int f, void *addr, size_t size)
    587       1.1       cdi {
    588       1.1       cdi 	return (*memsw->read)(f, addr, size);
    589       1.1       cdi }
    590       1.1       cdi 
    591       1.1       cdi void*
    592       1.1       cdi sparc64_memcpy(void *dst, const void *src, size_t size)
    593       1.1       cdi {
    594       1.1       cdi 	return (*memsw->memcpy)(dst, src, size);
    595       1.1       cdi }
    596       1.1       cdi 
    597       1.1       cdi void*
    598       1.1       cdi sparc64_memset(void *dst, int c, size_t size)
    599       1.1       cdi {
    600       1.1       cdi 	return (*memsw->memset)(dst, c, size);
    601       1.1       cdi }
    602       1.1       cdi 
    603       1.1       cdi /*
    604       1.3    martin  * Remove write permissions from text mappings in the dTLB.
    605       1.3    martin  * Add entries in the iTLB.
    606       1.3    martin  */
    607       1.3    martin void
    608       1.3    martin sparc64_finalize_tlb(u_long data_va)
    609       1.3    martin {
    610      1.11     palle #ifdef SUN4V
    611      1.11     palle 	if ( sun4v )
    612      1.11     palle 		sparc64_finalize_tlb_sun4v(data_va);
    613      1.11     palle 	else
    614  1.13.4.2     skrll #endif
    615      1.11     palle 		sparc64_finalize_tlb_sun4u(data_va);
    616      1.11     palle }
    617      1.11     palle 
    618      1.11     palle /*
    619      1.11     palle  * Remove write permissions from text mappings in the dTLB - sun4u.
    620      1.11     palle  * Add entries in the iTLB.
    621      1.11     palle  */
    622      1.11     palle void
    623      1.11     palle sparc64_finalize_tlb_sun4u(u_long data_va)
    624      1.11     palle {
    625       1.3    martin 	int i;
    626       1.3    martin 	int64_t data;
    627       1.6    martin 	bool writable_text = false;
    628       1.3    martin 
    629       1.3    martin 	for (i = 0; i < dtlb_slot; i++) {
    630       1.6    martin 		if (dtlb_store[i].te_va >= data_va) {
    631       1.6    martin 			/*
    632       1.6    martin 			 * If (for whatever reason) the start of the
    633       1.6    martin 			 * writable section is right at the start of
    634       1.6    martin 			 * the kernel, we need to map it into the ITLB
    635       1.6    martin 			 * nevertheless (and don't make it readonly).
    636       1.6    martin 			 */
    637       1.6    martin 			if (i == 0 && dtlb_store[i].te_va == data_va)
    638       1.6    martin 				writable_text = true;
    639       1.6    martin 			else
    640       1.6    martin 				continue;
    641       1.6    martin 		}
    642       1.3    martin 
    643      1.12     palle 		data = SUN4U_TSB_DATA(0,	/* global */
    644       1.3    martin 				PGSZ_4M,	/* 4mb page */
    645       1.3    martin 				dtlb_store[i].te_pa,	/* phys.address */
    646       1.3    martin 				1,		/* privileged */
    647       1.3    martin 				0,		/* write */
    648       1.3    martin 				1,		/* cache */
    649       1.3    martin 				1,		/* alias */
    650       1.3    martin 				1,		/* valid */
    651  1.13.4.3     skrll 				0,		/* endianness */
    652  1.13.4.3     skrll 				0		/* wc */
    653       1.3    martin 				);
    654      1.12     palle 		data |= SUN4U_TLB_L | SUN4U_TLB_CV; /* locked, virt.cache */
    655       1.6    martin 		if (!writable_text)
    656       1.6    martin 			dtlb_replace(dtlb_store[i].te_va, hi(data), lo(data));
    657       1.3    martin 		itlb_store[itlb_slot] = dtlb_store[i];
    658       1.3    martin 		itlb_slot++;
    659       1.3    martin 		itlb_enter(dtlb_store[i].te_va, hi(data), lo(data));
    660       1.3    martin 	}
    661       1.6    martin 	if (writable_text)
    662       1.6    martin 		printf("WARNING: kernel text mapped writable!\n");
    663      1.11     palle 
    664       1.3    martin }
    665       1.3    martin 
    666      1.11     palle #ifdef SUN4V
    667      1.11     palle /*
    668      1.11     palle  * Remove write permissions from text mappings in the dTLB - sun4v.
    669      1.11     palle  * Add entries in the iTLB.
    670      1.11     palle  */
    671      1.11     palle void
    672      1.11     palle sparc64_finalize_tlb_sun4v(u_long data_va)
    673      1.11     palle {
    674      1.11     palle 	int i;
    675      1.11     palle 	int64_t data;
    676      1.11     palle 	bool writable_text = false;
    677      1.11     palle 	int64_t hv_rc;
    678      1.11     palle 
    679      1.11     palle 	for (i = 0; i < dtlb_slot; i++) {
    680      1.11     palle 		if (dtlb_store[i].te_va >= data_va) {
    681      1.11     palle 			/*
    682      1.11     palle 			 * If (for whatever reason) the start of the
    683      1.11     palle 			 * writable section is right at the start of
    684      1.11     palle 			 * the kernel, we need to map it into the ITLB
    685      1.11     palle 			 * nevertheless (and don't make it readonly).
    686      1.11     palle 			 */
    687      1.11     palle 			if (i == 0 && dtlb_store[i].te_va == data_va)
    688      1.11     palle 				writable_text = true;
    689      1.11     palle 			else
    690      1.11     palle 				continue;
    691      1.11     palle 		}
    692      1.11     palle 
    693      1.11     palle 		data = SUN4V_TSB_DATA(
    694      1.11     palle 			0,		/* global */
    695      1.11     palle 			PGSZ_4M,	/* 4mb page */
    696      1.11     palle 			dtlb_store[i].te_pa,	/* phys.address */
    697      1.11     palle 			1,		/* privileged */
    698      1.11     palle 			0,		/* write */
    699      1.11     palle 			1,		/* cache */
    700      1.11     palle 			1,		/* alias */
    701      1.11     palle 			1,		/* valid */
    702  1.13.4.3     skrll 			0,		/* endianness */
    703  1.13.4.3     skrll 			0		/* wc */
    704      1.11     palle 			);
    705      1.13     palle 		data |= SUN4V_TLB_CV|SUN4V_TLB_X; /* virt.cache, executable */
    706      1.11     palle 		if (!writable_text) {
    707      1.11     palle 			hv_rc = hv_mmu_unmap_perm_addr(dtlb_store[i].te_va,
    708      1.11     palle 			                               MAP_DTLB);
    709      1.11     palle 			if ( hv_rc != H_EOK ) {
    710      1.11     palle 				panic("hv_mmu_unmap_perm_addr() failed - "
    711      1.11     palle 				      "rc = %ld", hv_rc);
    712      1.11     palle 			}
    713      1.11     palle 			hv_rc = hv_mmu_map_perm_addr(dtlb_store[i].te_va, data,
    714      1.11     palle 			                             MAP_DTLB);
    715      1.11     palle 			if ( hv_rc != H_EOK ) {
    716      1.11     palle 				panic("hv_mmu_map_perm_addr() failed - "
    717      1.11     palle 				      "rc = %ld", hv_rc);
    718      1.11     palle 			}
    719      1.11     palle 		}
    720  1.13.4.2     skrll 
    721      1.11     palle 		itlb_store[itlb_slot] = dtlb_store[i];
    722      1.11     palle 		itlb_slot++;
    723      1.11     palle 		hv_rc = hv_mmu_map_perm_addr(dtlb_store[i].te_va, data,
    724      1.11     palle 		                             MAP_ITLB);
    725      1.11     palle 		if ( hv_rc != H_EOK ) {
    726      1.11     palle 			panic("hv_mmu_map_perm_addr() failed - rc = %ld", hv_rc);
    727      1.11     palle 		}
    728      1.11     palle 	}
    729      1.11     palle 	if (writable_text)
    730      1.11     palle 		printf("WARNING: kernel text mapped writable!\n");
    731      1.11     palle }
    732      1.11     palle #endif
    733      1.11     palle 
    734       1.3    martin /*
    735       1.1       cdi  * Record kernel mappings in bootinfo structure.
    736       1.1       cdi  */
    737       1.1       cdi void
    738       1.1       cdi sparc64_bi_add(void)
    739       1.1       cdi {
    740       1.1       cdi 	int i;
    741       1.1       cdi 	int itlb_size, dtlb_size;
    742       1.1       cdi 	struct btinfo_count bi_count;
    743       1.1       cdi 	struct btinfo_tlb *bi_itlb, *bi_dtlb;
    744       1.1       cdi 
    745       1.1       cdi 	bi_count.count = itlb_slot;
    746       1.1       cdi 	bi_add(&bi_count, BTINFO_ITLB_SLOTS, sizeof(bi_count));
    747       1.1       cdi 	bi_count.count = dtlb_slot;
    748       1.1       cdi 	bi_add(&bi_count, BTINFO_DTLB_SLOTS, sizeof(bi_count));
    749       1.1       cdi 
    750       1.1       cdi 	itlb_size = sizeof(*bi_itlb) + sizeof(struct tlb_entry) * itlb_slot;
    751       1.1       cdi 	dtlb_size = sizeof(*bi_dtlb) + sizeof(struct tlb_entry) * dtlb_slot;
    752       1.1       cdi 
    753       1.1       cdi 	bi_itlb = alloc(itlb_size);
    754       1.1       cdi 	bi_dtlb = alloc(dtlb_size);
    755       1.1       cdi 
    756       1.1       cdi 	if ((bi_itlb == NULL) || (bi_dtlb == NULL)) {
    757       1.1       cdi 		panic("Out of memory in sparc64_bi_add.\n");
    758       1.1       cdi 	}
    759       1.1       cdi 
    760       1.1       cdi 	for (i = 0; i < itlb_slot; i++) {
    761       1.1       cdi 		bi_itlb->tlb[i].te_va = itlb_store[i].te_va;
    762       1.1       cdi 		bi_itlb->tlb[i].te_pa = itlb_store[i].te_pa;
    763       1.1       cdi 	}
    764       1.1       cdi 	bi_add(bi_itlb, BTINFO_ITLB, itlb_size);
    765       1.1       cdi 
    766       1.1       cdi 	for (i = 0; i < dtlb_slot; i++) {
    767       1.1       cdi 		bi_dtlb->tlb[i].te_va = dtlb_store[i].te_va;
    768       1.1       cdi 		bi_dtlb->tlb[i].te_pa = dtlb_store[i].te_pa;
    769       1.1       cdi 	}
    770       1.1       cdi 	bi_add(bi_dtlb, BTINFO_DTLB, dtlb_size);
    771       1.1       cdi }
    772       1.1       cdi 
    773       1.1       cdi /*
    774       1.1       cdi  * Choose kernel image mapping strategy:
    775       1.1       cdi  *
    776       1.1       cdi  * LOADFILE_NOP_ALLOCATOR	To load kernel image headers
    777       1.1       cdi  * LOADFILE_OFW_ALLOCATOR	To map the kernel by OpenFirmware means
    778       1.1       cdi  * LOADFILE_MMU_ALLOCATOR	To use permanent 4MB mappings
    779       1.1       cdi  */
    780       1.1       cdi void
    781       1.1       cdi loadfile_set_allocator(int type)
    782       1.1       cdi {
    783       1.1       cdi 	if (type >= (sizeof(memswa) / sizeof(struct memsw))) {
    784       1.1       cdi 		panic("Bad allocator request.\n");
    785       1.1       cdi 	}
    786       1.1       cdi 
    787       1.1       cdi 	/*
    788       1.1       cdi 	 * Release all memory claimed by previous allocator and schedule
    789       1.1       cdi 	 * another allocator for succeeding memory allocation calls.
    790       1.1       cdi 	 */
    791       1.1       cdi 	(*memsw->freeall)();
    792       1.1       cdi 	memsw = &memswa[type];
    793       1.1       cdi }
    794