Home | History | Annotate | Line # | Download | only in prekern
mm.c revision 1.4
      1 /*	$NetBSD: mm.c,v 1.4 2017/10/23 06:00:59 maxv Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2017 The NetBSD Foundation, Inc. All rights reserved.
      5  *
      6  * This code is derived from software contributed to The NetBSD Foundation
      7  * by Maxime Villard.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  *
     18  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     19  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     20  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     21  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     22  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     28  * POSSIBILITY OF SUCH DAMAGE.
     29  */
     30 
     31 #include "prekern.h"
     32 
     33 static const pt_entry_t protection_codes[3] = {
     34 	[MM_PROT_READ] = PG_RO | PG_NX,
     35 	[MM_PROT_WRITE] = PG_RW | PG_NX,
     36 	[MM_PROT_EXECUTE] = PG_RO,
     37 	/* RWX does not exist */
     38 };
     39 
     40 extern paddr_t kernpa_start, kernpa_end;
     41 vaddr_t iom_base;
     42 
     43 paddr_t pa_avail = 0;
     44 static const vaddr_t tmpva = (PREKERNBASE + NKL2_KIMG_ENTRIES * NBPD_L2);
     45 
     46 void
     47 mm_init(paddr_t first_pa)
     48 {
     49 	pa_avail = first_pa;
     50 }
     51 
     52 static void
     53 mm_enter_pa(paddr_t pa, vaddr_t va, pte_prot_t prot)
     54 {
     55 	PTE_BASE[pl1_i(va)] = pa | PG_V | protection_codes[prot];
     56 }
     57 
     58 static void
     59 mm_flush_va(vaddr_t va)
     60 {
     61 	asm volatile("invlpg (%0)" ::"r" (va) : "memory");
     62 }
     63 
     64 static paddr_t
     65 mm_palloc(size_t npages)
     66 {
     67 	paddr_t pa;
     68 	size_t i;
     69 
     70 	/* Allocate the physical pages */
     71 	pa = pa_avail;
     72 	pa_avail += npages * PAGE_SIZE;
     73 
     74 	/* Zero them out */
     75 	for (i = 0; i < npages; i++) {
     76 		mm_enter_pa(pa + i * PAGE_SIZE, tmpva,
     77 		    MM_PROT_READ|MM_PROT_WRITE);
     78 		mm_flush_va(tmpva);
     79 		memset((void *)tmpva, 0, PAGE_SIZE);
     80 	}
     81 
     82 	return pa;
     83 }
     84 
     85 static bool
     86 mm_pte_is_valid(pt_entry_t pte)
     87 {
     88 	return ((pte & PG_V) != 0);
     89 }
     90 
     91 paddr_t
     92 mm_vatopa(vaddr_t va)
     93 {
     94 	return (PTE_BASE[pl1_i(va)] & PG_FRAME);
     95 }
     96 
     97 void
     98 mm_mprotect(vaddr_t startva, size_t size, int prot)
     99 {
    100 	size_t i, npages;
    101 	vaddr_t va;
    102 	paddr_t pa;
    103 
    104 	ASSERT(size % PAGE_SIZE == 0);
    105 	npages = size / PAGE_SIZE;
    106 
    107 	for (i = 0; i < npages; i++) {
    108 		va = startva + i * PAGE_SIZE;
    109 		pa = (PTE_BASE[pl1_i(va)] & PG_FRAME);
    110 		mm_enter_pa(pa, va, prot);
    111 		mm_flush_va(va);
    112 	}
    113 }
    114 
    115 static void
    116 mm_map_tree(vaddr_t startva, vaddr_t endva)
    117 {
    118 	size_t i, size, nL4e, nL3e, nL2e;
    119 	size_t L4e_idx, L3e_idx, L2e_idx;
    120 	paddr_t pa;
    121 
    122 	size = endva - startva;
    123 
    124 	/*
    125 	 * Build L4.
    126 	 */
    127 	L4e_idx = pl4_i(startva);
    128 	nL4e = roundup(size, NBPD_L4) / NBPD_L4;
    129 	ASSERT(L4e_idx == 511);
    130 	ASSERT(nL4e == 1);
    131 	if (!mm_pte_is_valid(L4_BASE[L4e_idx])) {
    132 		pa = mm_palloc(1);
    133 		L4_BASE[L4e_idx] = pa | PG_V | PG_RW;
    134 	}
    135 
    136 	/*
    137 	 * Build L3.
    138 	 */
    139 	L3e_idx = pl3_i(startva);
    140 	nL3e = roundup(size, NBPD_L3) / NBPD_L3;
    141 	for (i = 0; i < nL3e; i++) {
    142 		if (mm_pte_is_valid(L3_BASE[L3e_idx+i])) {
    143 			continue;
    144 		}
    145 		pa = mm_palloc(1);
    146 		L3_BASE[L3e_idx+i] = pa | PG_V | PG_RW;
    147 	}
    148 
    149 	/*
    150 	 * Build L2.
    151 	 */
    152 	L2e_idx = pl2_i(startva);
    153 	nL2e = roundup(size, NBPD_L2) / NBPD_L2;
    154 	for (i = 0; i < nL2e; i++) {
    155 		if (mm_pte_is_valid(L2_BASE[L2e_idx+i])) {
    156 			continue;
    157 		}
    158 		pa = mm_palloc(1);
    159 		L2_BASE[L2e_idx+i] = pa | PG_V | PG_RW;
    160 	}
    161 }
    162 
    163 /*
    164  * Select a random VA, and create a page tree. The size of this tree is
    165  * actually hard-coded, and matches the one created by the generic NetBSD
    166  * locore.
    167  */
    168 static vaddr_t
    169 mm_rand_base()
    170 {
    171 	vaddr_t randva;
    172 	uint64_t rnd;
    173 	size_t size;
    174 
    175 	size = (NKL2_KIMG_ENTRIES + 1) * NBPD_L2;
    176 
    177 	/* XXX: yes, this is ridiculous, will be fixed soon */
    178 	rnd = rdtsc();
    179 	randva = rounddown(KASLR_WINDOW_BASE + rnd % (KASLR_WINDOW_SIZE - size),
    180 	    PAGE_SIZE);
    181 
    182 	mm_map_tree(randva, randva + size);
    183 
    184 	return randva;
    185 }
    186 
    187 /*
    188  * Virtual address space of the kernel:
    189  * +---------------+---------------------+------------------+-------------+
    190  * | KERNEL + SYMS | [PRELOADED MODULES] | BOOTSTRAP TABLES | ISA I/O MEM |
    191  * +---------------+---------------------+------------------+-------------+
    192  * We basically choose a random VA, and map everything contiguously starting
    193  * from there. Note that the physical pages allocated by mm_palloc are part
    194  * of the BOOTSTRAP TABLES.
    195  */
    196 vaddr_t
    197 mm_map_kernel()
    198 {
    199 	size_t i, npages, size;
    200 	vaddr_t baseva;
    201 
    202 	size = (pa_avail - kernpa_start);
    203 	baseva = mm_rand_base();
    204 	npages = size / PAGE_SIZE;
    205 
    206 	/* Enter the whole area linearly */
    207 	for (i = 0; i < npages; i++) {
    208 		mm_enter_pa(kernpa_start + i * PAGE_SIZE,
    209 		    baseva + i * PAGE_SIZE, MM_PROT_READ|MM_PROT_WRITE);
    210 	}
    211 
    212 	/* Enter the ISA I/O MEM */
    213 	iom_base = baseva + npages * PAGE_SIZE;
    214 	npages = IOM_SIZE / PAGE_SIZE;
    215 	for (i = 0; i < npages; i++) {
    216 		mm_enter_pa(IOM_BEGIN + i * PAGE_SIZE,
    217 		    iom_base + i * PAGE_SIZE, MM_PROT_READ|MM_PROT_WRITE);
    218 	}
    219 
    220 	return baseva;
    221 }
    222