Home | History | Annotate | Line # | Download | only in prekern
      1  1.28  khorben /*	$NetBSD: mm.c,v 1.28 2021/05/04 21:09:16 khorben Exp $	*/
      2   1.1     maxv 
      3   1.1     maxv /*
      4  1.25     maxv  * Copyright (c) 2017-2020 The NetBSD Foundation, Inc. All rights reserved.
      5   1.1     maxv  *
      6   1.1     maxv  * This code is derived from software contributed to The NetBSD Foundation
      7   1.1     maxv  * by Maxime Villard.
      8   1.1     maxv  *
      9   1.1     maxv  * Redistribution and use in source and binary forms, with or without
     10   1.1     maxv  * modification, are permitted provided that the following conditions
     11   1.1     maxv  * are met:
     12   1.1     maxv  * 1. Redistributions of source code must retain the above copyright
     13   1.1     maxv  *    notice, this list of conditions and the following disclaimer.
     14   1.1     maxv  * 2. Redistributions in binary form must reproduce the above copyright
     15   1.1     maxv  *    notice, this list of conditions and the following disclaimer in the
     16   1.1     maxv  *    documentation and/or other materials provided with the distribution.
     17   1.1     maxv  *
     18   1.1     maxv  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     19   1.1     maxv  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     20   1.1     maxv  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     21   1.1     maxv  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     22   1.1     maxv  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     23   1.1     maxv  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     24   1.1     maxv  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     25   1.1     maxv  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     26   1.1     maxv  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     27   1.1     maxv  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     28   1.1     maxv  * POSSIBILITY OF SUCH DAMAGE.
     29   1.1     maxv  */
     30   1.1     maxv 
     31   1.1     maxv #include "prekern.h"
     32   1.1     maxv 
     33  1.14     maxv #define ELFROUND	64
     34  1.14     maxv 
     35  1.18     maxv static const uint8_t pads[4] = {
     36  1.17     maxv 	[BTSEG_NONE] = 0x00,
     37  1.17     maxv 	[BTSEG_TEXT] = 0xCC,
     38  1.17     maxv 	[BTSEG_RODATA] = 0x00,
     39  1.17     maxv 	[BTSEG_DATA] = 0x00
     40  1.17     maxv };
     41  1.17     maxv 
     42  1.15     maxv #define MM_PROT_READ	0x00
     43  1.15     maxv #define MM_PROT_WRITE	0x01
     44  1.15     maxv #define MM_PROT_EXECUTE	0x02
     45  1.15     maxv 
     46   1.1     maxv static const pt_entry_t protection_codes[3] = {
     47  1.24     maxv 	[MM_PROT_READ] = PTE_NX,
     48  1.24     maxv 	[MM_PROT_WRITE] = PTE_W | PTE_NX,
     49  1.23     maxv 	[MM_PROT_EXECUTE] = 0,
     50   1.1     maxv 	/* RWX does not exist */
     51   1.1     maxv };
     52   1.1     maxv 
     53   1.6     maxv struct bootspace bootspace;
     54   1.6     maxv 
     55   1.1     maxv extern paddr_t kernpa_start, kernpa_end;
     56   1.1     maxv vaddr_t iom_base;
     57   1.1     maxv 
     58   1.1     maxv paddr_t pa_avail = 0;
     59   1.2     maxv static const vaddr_t tmpva = (PREKERNBASE + NKL2_KIMG_ENTRIES * NBPD_L2);
     60   1.1     maxv 
     61   1.1     maxv void
     62   1.1     maxv mm_init(paddr_t first_pa)
     63   1.1     maxv {
     64   1.1     maxv 	pa_avail = first_pa;
     65   1.1     maxv }
     66   1.1     maxv 
     67   1.1     maxv static void
     68   1.1     maxv mm_enter_pa(paddr_t pa, vaddr_t va, pte_prot_t prot)
     69   1.1     maxv {
     70  1.24     maxv 	if (PTE_BASE[pl1_i(va)] & PTE_P) {
     71  1.20     maxv 		fatal("mm_enter_pa: mapping already present");
     72  1.20     maxv 	}
     73  1.24     maxv 	PTE_BASE[pl1_i(va)] = pa | PTE_P | protection_codes[prot];
     74  1.20     maxv }
     75  1.20     maxv 
     76  1.20     maxv static void
     77  1.20     maxv mm_reenter_pa(paddr_t pa, vaddr_t va, pte_prot_t prot)
     78  1.20     maxv {
     79  1.24     maxv 	PTE_BASE[pl1_i(va)] = pa | PTE_P | protection_codes[prot];
     80   1.1     maxv }
     81   1.1     maxv 
     82   1.1     maxv static void
     83   1.1     maxv mm_flush_va(vaddr_t va)
     84   1.1     maxv {
     85   1.1     maxv 	asm volatile("invlpg (%0)" ::"r" (va) : "memory");
     86   1.1     maxv }
     87   1.1     maxv 
     88   1.2     maxv static paddr_t
     89   1.2     maxv mm_palloc(size_t npages)
     90   1.2     maxv {
     91   1.2     maxv 	paddr_t pa;
     92   1.2     maxv 	size_t i;
     93   1.2     maxv 
     94   1.2     maxv 	/* Allocate the physical pages */
     95   1.2     maxv 	pa = pa_avail;
     96   1.2     maxv 	pa_avail += npages * PAGE_SIZE;
     97   1.2     maxv 
     98   1.2     maxv 	/* Zero them out */
     99   1.2     maxv 	for (i = 0; i < npages; i++) {
    100  1.20     maxv 		mm_reenter_pa(pa + i * PAGE_SIZE, tmpva,
    101   1.2     maxv 		    MM_PROT_READ|MM_PROT_WRITE);
    102   1.2     maxv 		mm_flush_va(tmpva);
    103   1.2     maxv 		memset((void *)tmpva, 0, PAGE_SIZE);
    104   1.2     maxv 	}
    105   1.2     maxv 
    106   1.2     maxv 	return pa;
    107   1.2     maxv }
    108   1.2     maxv 
    109   1.3     maxv static bool
    110   1.3     maxv mm_pte_is_valid(pt_entry_t pte)
    111   1.3     maxv {
    112  1.24     maxv 	return ((pte & PTE_P) != 0);
    113   1.3     maxv }
    114   1.3     maxv 
    115   1.8     maxv static void
    116  1.17     maxv mm_mprotect(vaddr_t startva, size_t size, pte_prot_t prot)
    117   1.1     maxv {
    118   1.1     maxv 	size_t i, npages;
    119   1.1     maxv 	vaddr_t va;
    120   1.1     maxv 	paddr_t pa;
    121   1.1     maxv 
    122   1.1     maxv 	ASSERT(size % PAGE_SIZE == 0);
    123   1.1     maxv 	npages = size / PAGE_SIZE;
    124   1.1     maxv 
    125   1.1     maxv 	for (i = 0; i < npages; i++) {
    126   1.1     maxv 		va = startva + i * PAGE_SIZE;
    127  1.24     maxv 		pa = (PTE_BASE[pl1_i(va)] & PTE_FRAME);
    128  1.20     maxv 		mm_reenter_pa(pa, va, prot);
    129   1.1     maxv 		mm_flush_va(va);
    130   1.1     maxv 	}
    131   1.1     maxv }
    132   1.1     maxv 
    133   1.8     maxv void
    134  1.13     maxv mm_bootspace_mprotect(void)
    135   1.8     maxv {
    136  1.17     maxv 	pte_prot_t prot;
    137  1.10     maxv 	size_t i;
    138  1.10     maxv 
    139  1.10     maxv 	/* Remap the kernel segments with proper permissions. */
    140  1.10     maxv 	for (i = 0; i < BTSPACE_NSEGS; i++) {
    141  1.10     maxv 		if (bootspace.segs[i].type == BTSEG_TEXT) {
    142  1.10     maxv 			prot = MM_PROT_READ|MM_PROT_EXECUTE;
    143  1.10     maxv 		} else if (bootspace.segs[i].type == BTSEG_RODATA) {
    144  1.10     maxv 			prot = MM_PROT_READ;
    145  1.10     maxv 		} else {
    146  1.10     maxv 			continue;
    147  1.10     maxv 		}
    148  1.10     maxv 		mm_mprotect(bootspace.segs[i].va, bootspace.segs[i].sz, prot);
    149  1.10     maxv 	}
    150   1.8     maxv 
    151  1.28  khorben 	print_state(STATE_NORMAL, "Segments protection updated");
    152   1.8     maxv }
    153   1.8     maxv 
    154   1.5     maxv static size_t
    155   1.5     maxv mm_nentries_range(vaddr_t startva, vaddr_t endva, size_t pgsz)
    156   1.5     maxv {
    157   1.5     maxv 	size_t npages;
    158   1.5     maxv 
    159   1.5     maxv 	npages = roundup((endva / PAGE_SIZE), (pgsz / PAGE_SIZE)) -
    160   1.5     maxv 	    rounddown((startva / PAGE_SIZE), (pgsz / PAGE_SIZE));
    161   1.5     maxv 	return (npages / (pgsz / PAGE_SIZE));
    162   1.5     maxv }
    163   1.5     maxv 
    164   1.1     maxv static void
    165   1.2     maxv mm_map_tree(vaddr_t startva, vaddr_t endva)
    166   1.1     maxv {
    167   1.5     maxv 	size_t i, nL4e, nL3e, nL2e;
    168   1.1     maxv 	size_t L4e_idx, L3e_idx, L2e_idx;
    169   1.3     maxv 	paddr_t pa;
    170   1.3     maxv 
    171  1.18     maxv 	/* Build L4. */
    172   1.3     maxv 	L4e_idx = pl4_i(startva);
    173   1.5     maxv 	nL4e = mm_nentries_range(startva, endva, NBPD_L4);
    174   1.3     maxv 	ASSERT(L4e_idx == 511);
    175   1.2     maxv 	ASSERT(nL4e == 1);
    176   1.3     maxv 	if (!mm_pte_is_valid(L4_BASE[L4e_idx])) {
    177   1.3     maxv 		pa = mm_palloc(1);
    178  1.24     maxv 		L4_BASE[L4e_idx] = pa | PTE_P | PTE_W;
    179   1.3     maxv 	}
    180   1.1     maxv 
    181  1.18     maxv 	/* Build L3. */
    182   1.3     maxv 	L3e_idx = pl3_i(startva);
    183   1.5     maxv 	nL3e = mm_nentries_range(startva, endva, NBPD_L3);
    184   1.3     maxv 	for (i = 0; i < nL3e; i++) {
    185   1.3     maxv 		if (mm_pte_is_valid(L3_BASE[L3e_idx+i])) {
    186   1.3     maxv 			continue;
    187   1.3     maxv 		}
    188   1.3     maxv 		pa = mm_palloc(1);
    189  1.24     maxv 		L3_BASE[L3e_idx+i] = pa | PTE_P | PTE_W;
    190   1.3     maxv 	}
    191   1.1     maxv 
    192  1.18     maxv 	/* Build L2. */
    193   1.3     maxv 	L2e_idx = pl2_i(startva);
    194   1.5     maxv 	nL2e = mm_nentries_range(startva, endva, NBPD_L2);
    195   1.2     maxv 	for (i = 0; i < nL2e; i++) {
    196   1.3     maxv 		if (mm_pte_is_valid(L2_BASE[L2e_idx+i])) {
    197   1.3     maxv 			continue;
    198   1.3     maxv 		}
    199   1.3     maxv 		pa = mm_palloc(1);
    200  1.24     maxv 		L2_BASE[L2e_idx+i] = pa | PTE_P | PTE_W;
    201   1.1     maxv 	}
    202   1.1     maxv }
    203   1.1     maxv 
    204   1.1     maxv static vaddr_t
    205  1.17     maxv mm_randva_kregion(size_t size, size_t pagesz)
    206   1.1     maxv {
    207  1.11     maxv 	vaddr_t sva, eva;
    208   1.1     maxv 	vaddr_t randva;
    209   1.1     maxv 	uint64_t rnd;
    210   1.6     maxv 	size_t i;
    211   1.6     maxv 	bool ok;
    212   1.6     maxv 
    213   1.6     maxv 	while (1) {
    214  1.19     maxv 		prng_get_rand(&rnd, sizeof(rnd));
    215   1.6     maxv 		randva = rounddown(KASLR_WINDOW_BASE +
    216  1.17     maxv 		    rnd % (KASLR_WINDOW_SIZE - size), pagesz);
    217   1.6     maxv 
    218   1.6     maxv 		/* Detect collisions */
    219   1.6     maxv 		ok = true;
    220  1.11     maxv 		for (i = 0; i < BTSPACE_NSEGS; i++) {
    221  1.11     maxv 			if (bootspace.segs[i].type == BTSEG_NONE) {
    222  1.11     maxv 				continue;
    223  1.11     maxv 			}
    224  1.11     maxv 			sva = bootspace.segs[i].va;
    225  1.11     maxv 			eva = sva + bootspace.segs[i].sz;
    226  1.11     maxv 
    227  1.11     maxv 			if ((sva <= randva) && (randva < eva)) {
    228   1.6     maxv 				ok = false;
    229   1.6     maxv 				break;
    230   1.6     maxv 			}
    231  1.11     maxv 			if ((sva < randva + size) && (randva + size <= eva)) {
    232   1.6     maxv 				ok = false;
    233   1.6     maxv 				break;
    234   1.6     maxv 			}
    235  1.20     maxv 			if (randva < sva && eva < (randva + size)) {
    236  1.20     maxv 				ok = false;
    237  1.20     maxv 				break;
    238  1.20     maxv 			}
    239   1.6     maxv 		}
    240   1.6     maxv 		if (ok) {
    241   1.6     maxv 			break;
    242   1.6     maxv 		}
    243   1.6     maxv 	}
    244   1.1     maxv 
    245   1.2     maxv 	mm_map_tree(randva, randva + size);
    246   1.1     maxv 
    247   1.1     maxv 	return randva;
    248   1.1     maxv }
    249   1.1     maxv 
    250  1.10     maxv static paddr_t
    251  1.26     maxv bootspace_get_kern_segs_end_pa(void)
    252  1.10     maxv {
    253  1.10     maxv 	paddr_t pa, max = 0;
    254  1.10     maxv 	size_t i;
    255  1.10     maxv 
    256  1.10     maxv 	for (i = 0; i < BTSPACE_NSEGS; i++) {
    257  1.10     maxv 		if (bootspace.segs[i].type == BTSEG_NONE) {
    258  1.10     maxv 			continue;
    259  1.10     maxv 		}
    260  1.10     maxv 		pa = bootspace.segs[i].pa + bootspace.segs[i].sz;
    261  1.10     maxv 		if (pa > max)
    262  1.10     maxv 			max = pa;
    263  1.10     maxv 	}
    264  1.10     maxv 
    265  1.10     maxv 	return max;
    266  1.10     maxv }
    267  1.10     maxv 
    268  1.10     maxv static void
    269  1.10     maxv bootspace_addseg(int type, vaddr_t va, paddr_t pa, size_t sz)
    270  1.10     maxv {
    271  1.10     maxv 	size_t i;
    272  1.10     maxv 
    273  1.10     maxv 	for (i = 0; i < BTSPACE_NSEGS; i++) {
    274  1.10     maxv 		if (bootspace.segs[i].type == BTSEG_NONE) {
    275  1.10     maxv 			bootspace.segs[i].type = type;
    276  1.10     maxv 			bootspace.segs[i].va = va;
    277  1.10     maxv 			bootspace.segs[i].pa = pa;
    278  1.10     maxv 			bootspace.segs[i].sz = sz;
    279  1.10     maxv 			return;
    280  1.10     maxv 		}
    281  1.10     maxv 	}
    282  1.10     maxv 
    283  1.10     maxv 	fatal("bootspace_addseg: segments full");
    284  1.10     maxv }
    285  1.10     maxv 
    286  1.14     maxv static size_t
    287  1.14     maxv mm_shift_segment(vaddr_t va, size_t pagesz, size_t elfsz, size_t elfalign)
    288  1.14     maxv {
    289  1.14     maxv 	size_t shiftsize, offset;
    290  1.14     maxv 	uint64_t rnd;
    291  1.14     maxv 
    292  1.25     maxv 	/*
    293  1.25     maxv 	 * If possible, shift the segment in memory using a random offset. Once
    294  1.25     maxv 	 * shifted the segment remains in the same page, of size pagesz. Make
    295  1.25     maxv 	 * sure to respect the ELF alignment constraint.
    296  1.25     maxv 	 */
    297  1.25     maxv 
    298  1.14     maxv 	if (elfalign == 0) {
    299  1.14     maxv 		elfalign = ELFROUND;
    300  1.14     maxv 	}
    301  1.14     maxv 
    302  1.17     maxv 	ASSERT(pagesz >= elfalign);
    303  1.17     maxv 	ASSERT(pagesz % elfalign == 0);
    304  1.14     maxv 	shiftsize = roundup(elfsz, pagesz) - roundup(elfsz, elfalign);
    305  1.14     maxv 	if (shiftsize == 0) {
    306  1.14     maxv 		return 0;
    307  1.14     maxv 	}
    308  1.14     maxv 
    309  1.19     maxv 	prng_get_rand(&rnd, sizeof(rnd));
    310  1.14     maxv 	offset = roundup(rnd % shiftsize, elfalign);
    311  1.14     maxv 	ASSERT((va + offset) % elfalign == 0);
    312  1.14     maxv 
    313  1.14     maxv 	memmove((void *)(va + offset), (void *)va, elfsz);
    314  1.14     maxv 
    315  1.14     maxv 	return offset;
    316  1.14     maxv }
    317  1.14     maxv 
    318  1.18     maxv static void
    319  1.18     maxv mm_map_head(void)
    320  1.18     maxv {
    321  1.18     maxv 	size_t i, npages, size;
    322  1.18     maxv 	uint64_t rnd;
    323  1.18     maxv 	vaddr_t randva;
    324  1.18     maxv 
    325  1.18     maxv 	/*
    326  1.25     maxv 	 * The HEAD window is 1GB below the main KASLR window. This is to
    327  1.25     maxv 	 * ensure that head always comes first in virtual memory. The reason
    328  1.25     maxv 	 * for that is that we use (headva + sh_offset), and sh_offset is
    329  1.25     maxv 	 * unsigned.
    330  1.25     maxv 	 */
    331  1.25     maxv 
    332  1.25     maxv 	/*
    333  1.18     maxv 	 * To get the size of the head, we give a look at the read-only
    334  1.18     maxv 	 * mapping of the kernel we created in locore. We're identity mapped,
    335  1.18     maxv 	 * so kernpa = kernva.
    336  1.18     maxv 	 */
    337  1.18     maxv 	size = elf_get_head_size((vaddr_t)kernpa_start);
    338  1.18     maxv 	npages = size / PAGE_SIZE;
    339  1.18     maxv 
    340  1.25     maxv 	/*
    341  1.25     maxv 	 * Choose a random range of VAs in the HEAD window, and create the page
    342  1.25     maxv 	 * tree for it.
    343  1.25     maxv 	 */
    344  1.19     maxv 	prng_get_rand(&rnd, sizeof(rnd));
    345  1.18     maxv 	randva = rounddown(HEAD_WINDOW_BASE + rnd % (HEAD_WINDOW_SIZE - size),
    346  1.18     maxv 	    PAGE_SIZE);
    347  1.18     maxv 	mm_map_tree(randva, randva + size);
    348  1.18     maxv 
    349  1.18     maxv 	/* Enter the area and build the ELF info */
    350  1.18     maxv 	for (i = 0; i < npages; i++) {
    351  1.18     maxv 		mm_enter_pa(kernpa_start + i * PAGE_SIZE,
    352  1.18     maxv 		    randva + i * PAGE_SIZE, MM_PROT_READ|MM_PROT_WRITE);
    353  1.18     maxv 	}
    354  1.18     maxv 	elf_build_head(randva);
    355  1.18     maxv 
    356  1.18     maxv 	/* Register the values in bootspace */
    357  1.18     maxv 	bootspace.head.va = randva;
    358  1.18     maxv 	bootspace.head.pa = kernpa_start;
    359  1.18     maxv 	bootspace.head.sz = size;
    360  1.18     maxv }
    361  1.18     maxv 
    362  1.12     maxv vaddr_t
    363  1.14     maxv mm_map_segment(int segtype, paddr_t pa, size_t elfsz, size_t elfalign)
    364   1.1     maxv {
    365  1.14     maxv 	size_t i, npages, size, pagesz, offset;
    366   1.6     maxv 	vaddr_t randva;
    367  1.12     maxv 	char pad;
    368   1.6     maxv 
    369  1.16     maxv 	if (elfsz <= PAGE_SIZE) {
    370  1.14     maxv 		pagesz = NBPD_L1;
    371  1.14     maxv 	} else {
    372  1.14     maxv 		pagesz = NBPD_L2;
    373  1.14     maxv 	}
    374  1.14     maxv 
    375  1.25     maxv 	/* Create the page tree */
    376  1.14     maxv 	size = roundup(elfsz, pagesz);
    377  1.14     maxv 	randva = mm_randva_kregion(size, pagesz);
    378  1.14     maxv 
    379  1.25     maxv 	/* Enter the segment */
    380   1.6     maxv 	npages = size / PAGE_SIZE;
    381   1.6     maxv 	for (i = 0; i < npages; i++) {
    382   1.6     maxv 		mm_enter_pa(pa + i * PAGE_SIZE,
    383   1.6     maxv 		    randva + i * PAGE_SIZE, MM_PROT_READ|MM_PROT_WRITE);
    384   1.6     maxv 	}
    385   1.6     maxv 
    386  1.25     maxv 	/* Shift the segment in memory */
    387  1.14     maxv 	offset = mm_shift_segment(randva, pagesz, elfsz, elfalign);
    388  1.14     maxv 	ASSERT(offset + elfsz <= size);
    389  1.14     maxv 
    390  1.25     maxv 	/* Fill the paddings */
    391  1.17     maxv 	pad = pads[segtype];
    392  1.14     maxv 	memset((void *)randva, pad, offset);
    393  1.14     maxv 	memset((void *)(randva + offset + elfsz), pad, size - elfsz - offset);
    394   1.6     maxv 
    395  1.25     maxv 	/* Register the bootspace information */
    396  1.12     maxv 	bootspace_addseg(segtype, randva, pa, size);
    397   1.9     maxv 
    398  1.14     maxv 	return (randva + offset);
    399   1.6     maxv }
    400   1.6     maxv 
    401   1.6     maxv static void
    402  1.13     maxv mm_map_boot(void)
    403   1.6     maxv {
    404   1.6     maxv 	size_t i, npages, size;
    405   1.6     maxv 	vaddr_t randva;
    406   1.6     maxv 	paddr_t bootpa;
    407   1.6     maxv 
    408   1.6     maxv 	/*
    409   1.6     maxv 	 * The "boot" region is special: its page tree has a fixed size, but
    410   1.6     maxv 	 * the number of pages entered is lower.
    411   1.6     maxv 	 */
    412   1.6     maxv 
    413  1.26     maxv 	/* Create the page tree, starting at a random VA */
    414   1.6     maxv 	size = (NKL2_KIMG_ENTRIES + 1) * NBPD_L2;
    415  1.14     maxv 	randva = mm_randva_kregion(size, PAGE_SIZE);
    416   1.6     maxv 
    417  1.26     maxv 	/* The "boot" region begins right after the kernel segments */
    418  1.26     maxv 	bootpa = bootspace_get_kern_segs_end_pa();
    419  1.26     maxv 
    420  1.27     maxv 	/* The prekern consumed some EXTRA memory up until pa_avail, this
    421  1.27     maxv 	 * covers REL/RELA/SYM/STR and EXTRA */
    422   1.6     maxv 	size = (pa_avail - bootpa);
    423   1.6     maxv 	npages = size / PAGE_SIZE;
    424  1.26     maxv 
    425  1.26     maxv 	/* Enter the whole area linearly */
    426   1.6     maxv 	for (i = 0; i < npages; i++) {
    427   1.6     maxv 		mm_enter_pa(bootpa + i * PAGE_SIZE,
    428   1.6     maxv 		    randva + i * PAGE_SIZE, MM_PROT_READ|MM_PROT_WRITE);
    429   1.1     maxv 	}
    430  1.26     maxv 
    431  1.27     maxv 	/* Fix up the ELF sections located in the "boot" region */
    432  1.27     maxv 	elf_fixup_boot(randva, bootpa);
    433   1.1     maxv 
    434  1.27     maxv 	/* Map the ISA I/O MEM right after EXTRA, in pure VA */
    435   1.6     maxv 	iom_base = randva + npages * PAGE_SIZE;
    436   1.1     maxv 	npages = IOM_SIZE / PAGE_SIZE;
    437   1.1     maxv 	for (i = 0; i < npages; i++) {
    438   1.1     maxv 		mm_enter_pa(IOM_BEGIN + i * PAGE_SIZE,
    439   1.1     maxv 		    iom_base + i * PAGE_SIZE, MM_PROT_READ|MM_PROT_WRITE);
    440   1.1     maxv 	}
    441   1.1     maxv 
    442   1.6     maxv 	/* Register the values in bootspace */
    443   1.6     maxv 	bootspace.boot.va = randva;
    444   1.6     maxv 	bootspace.boot.pa = bootpa;
    445   1.6     maxv 	bootspace.boot.sz = (size_t)(iom_base + IOM_SIZE) -
    446   1.6     maxv 	    (size_t)bootspace.boot.va;
    447   1.6     maxv 
    448   1.6     maxv 	/* Initialize the values that are located in the "boot" region */
    449   1.6     maxv 	extern uint64_t PDPpaddr;
    450   1.6     maxv 	bootspace.spareva = bootspace.boot.va + NKL2_KIMG_ENTRIES * NBPD_L2;
    451   1.6     maxv 	bootspace.pdir = bootspace.boot.va + (PDPpaddr - bootspace.boot.pa);
    452  1.22     maxv 	bootspace.smodule = (vaddr_t)iom_base + IOM_SIZE;
    453   1.6     maxv 	bootspace.emodule = bootspace.boot.va + NKL2_KIMG_ENTRIES * NBPD_L2;
    454   1.1     maxv }
    455   1.6     maxv 
    456   1.6     maxv /*
    457  1.25     maxv  * The bootloader has set up the following layout of physical memory:
    458  1.27     maxv  * +------------+--------------+------------+------------------------+-------+
    459  1.27     maxv  * | ELF HEADER | SECT HEADERS | KERN SECTS | REL/RELA/SYM/STR SECTS | EXTRA |
    460  1.27     maxv  * +------------+--------------+------------+------------------------+-------+
    461  1.26     maxv  * This was done in the loadfile_elf32.c:loadfile_dynamic() function.
    462  1.26     maxv  *
    463  1.26     maxv  * We abstract this layout into several "regions":
    464  1.27     maxv  * +---------------------------+------------+--------------------------------+
    465  1.27     maxv  * |         Head region       | Kern segs  |          Boot region           |
    466  1.27     maxv  * +---------------------------+------------+--------------------------------+
    467  1.25     maxv  *
    468  1.25     maxv  * There is a variable number of independent regions we create: one head,
    469  1.25     maxv  * several kernel segments, one boot. They are all mapped at random VAs.
    470   1.6     maxv  *
    471  1.26     maxv  * "Head" contains the ELF Header and ELF Section Headers, and we use them to
    472  1.26     maxv  * map the rest of the regions. Head must be placed *before* the other
    473  1.26     maxv  * regions, in both virtual memory and physical memory.
    474  1.25     maxv  *
    475  1.26     maxv  * The "Kernel Segments" contain the kernel SHT_NOBITS and SHT_PROGBITS
    476  1.26     maxv  * sections, in a 1:1 manner (one segment is associated with one section).
    477  1.26     maxv  * The segments are mapped at random VAs and referenced in bootspace.segs[].
    478  1.25     maxv  *
    479  1.27     maxv  * "Boot" contains miscellaneous information:
    480  1.27     maxv  *  - The ELF Rel/Rela/Sym/Str sections of the kernel
    481  1.27     maxv  *  - Some extra memory the prekern has consumed so far
    482  1.27     maxv  *  - The ISA I/O MEM, in pure VA
    483  1.27     maxv  *  - Eventually the module_map, in pure VA (the kernel uses the available VA
    484  1.27     maxv  *    at the end of "boot")
    485  1.27     maxv  * Boot is placed *after* the other regions in physical memory. In virtual
    486  1.27     maxv  * memory however there is no constraint, so its VA is randomly selected in
    487  1.27     maxv  * the main KASLR window.
    488   1.6     maxv  *
    489   1.6     maxv  * At the end of this function, the bootspace structure is fully constructed.
    490   1.6     maxv  */
    491   1.6     maxv void
    492  1.13     maxv mm_map_kernel(void)
    493   1.6     maxv {
    494   1.6     maxv 	memset(&bootspace, 0, sizeof(bootspace));
    495   1.6     maxv 	mm_map_head();
    496  1.28  khorben 	print_state(STATE_NORMAL, "Head region mapped");
    497  1.12     maxv 	elf_map_sections();
    498  1.28  khorben 	print_state(STATE_NORMAL, "Segments mapped");
    499   1.6     maxv 	mm_map_boot();
    500  1.28  khorben 	print_state(STATE_NORMAL, "Boot region mapped");
    501   1.6     maxv }
    502