Home | History | Annotate | Line # | Download | only in pbsdboot
      1 /*	$NetBSD: vmem.c,v 1.7 2009/03/18 10:22:29 cegger Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1999 Shin Takemura.
      5  * All rights reserved.
      6  *
      7  * This software is part of the PocketBSD.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed by the PocketBSD project
     20  *	and its contributors.
     21  * 4. Neither the name of the project nor the names of its contributors
     22  *    may be used to endorse or promote products derived from this software
     23  *    without specific prior written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     35  * SUCH DAMAGE.
     36  *
     37  */
     38 #include <pbsdboot.h>
     39 
     40 struct addr_s {
     41 	void *addr;
     42 	int in_use;
     43 };
     44 
     45 struct page_header_s {
     46 	unsigned long magic0;
     47 	int pageno;
     48 	unsigned long magic1;
     49 };
     50 
     51 struct map_s *map = NULL;
     52 struct addr_s *phys_addrs = NULL;
     53 unsigned char* heap = NULL;
     54 int npages;
     55 void *kernel_start;
     56 void *kernel_end;
     57 
     58 int
     59 vmem_exec(void *entry, int argc, char *argv[], struct bootinfo *bi)
     60 {
     61 	int i;
     62 	void *p;
     63 
     64 	if (map == NULL) {
     65 		debug_printf(TEXT("vmem is not initialized.\n"));
     66 		msg_printf(MSG_ERROR, whoami, TEXT("vmem is not initialized.\n"));
     67 		return (-1);
     68 	}
     69 
     70 	debug_printf(TEXT("entry point=0x%x\n"), entry);
     71 
     72 	map->entry = entry;
     73 	map->base = kernel_start;
     74 
     75 	for (i = 0; i < argc; i++) {
     76 		argv[i] = vtophysaddr(argv[i]);
     77 	}
     78 	map->arg0 = (void *)argc;
     79 	map->arg1 = vtophysaddr((void *)argv);
     80 	map->arg2 = vtophysaddr((void *)bi);
     81 	map->arg3 = NULL;
     82 
     83 	if (map->arg1 == NULL || map->arg2 == NULL) {
     84 		debug_printf(TEXT("arg, vtophysaddr() failed\n"));
     85 		msg_printf(MSG_ERROR, whoami,
     86 			   TEXT("arg, vtophysaddr() failed\n"));
     87 		return (-1);
     88 	}
     89 
     90 	for (i = 0; p = map->leaf[i / map->leafsize][i % map->leafsize]; i++)  {
     91 		if ((p = vtophysaddr(p)) == NULL) {
     92 			debug_printf(TEXT("vtophysaddr() failed, page %d (addr=0x%x) \n"),
     93 				     i, map->leaf[i / map->leafsize][i % map->leafsize]);
     94 			msg_printf(MSG_ERROR, whoami,
     95 				   TEXT("vtophysaddr() failed, page %d (addr=0x%x) \n"),
     96 				   i, map->leaf[i / map->leafsize][i % map->leafsize]);
     97 			return (-1);
     98 		}
     99 		map->leaf[i / map->leafsize][i % map->leafsize] = p;
    100 	}
    101 
    102 	for (i = 0; i < map->nleaves; i++) {
    103 		if ((p = vtophysaddr((void *)map->leaf[i])) == NULL) {
    104 			debug_printf(TEXT("vtophysaddr() failed, leaf %d (addr=0x%x) \n"),
    105 				     i, map->leaf[i / map->leafsize][i % map->leafsize]);
    106 			msg_printf(MSG_ERROR, whoami,
    107 				   TEXT("vtophysaddr() failed, leaf %d (addr=0x%x) \n"),
    108 				   i, map->leaf[i / map->leafsize][i % map->leafsize]);
    109 			return (-1);
    110 		}
    111 		map->leaf[i] = (void **)p;
    112 	}
    113 
    114 	debug_printf(TEXT("execute startprog()\n"));
    115 	//return (-1);
    116 	close_debug_log();
    117 	return ((*system_info.si_boot)(vtophysaddr((void *)map)));
    118 }
    119 
    120 void *
    121 vmem_alloc(void)
    122 {
    123 	int i, pagesize;
    124 	struct page_header_s *page;
    125 
    126 	pagesize = system_info.si_pagesize;
    127 	for (i = 0; i < npages; i++) {
    128 		page = (struct page_header_s*)&heap[pagesize * i];
    129 		if (!phys_addrs[i].in_use &&
    130 		    !(kernel_start <= phys_addrs[i].addr &&
    131 		      phys_addrs[i].addr < kernel_end)) {
    132 			phys_addrs[i].in_use = 1;
    133 			return ((void *)page);
    134 		}
    135 	}
    136 	return (NULL);
    137 }
    138 
    139 static void *
    140 alloc_kpage(void *phys_addr)
    141 {
    142 	int i, pagesize;
    143 	struct page_header_s *page;
    144 
    145 	pagesize = system_info.si_pagesize;
    146 	for (i = 0; i < npages; i++) {
    147 		page = (struct page_header_s*)&heap[pagesize * i];
    148 		if (phys_addrs[i].addr == phys_addr) {
    149 			if (phys_addrs[i].in_use) {
    150 				debug_printf(TEXT("page %d (phys addr=0x%x) is already in use\n"),
    151 					     i, phys_addr);
    152 				msg_printf(MSG_ERROR, whoami,
    153 					   TEXT("page %d (phys addr=0x%x) is already in use\n"),
    154 					   i, phys_addr);
    155 				return (NULL);
    156 			}
    157 			phys_addrs[i].in_use = 1;
    158 			return ((void *)page);
    159 		}
    160 	}
    161 	return (vmem_alloc());
    162 }
    163 
    164 void *
    165 vmem_get(void *phys_addr, int *length)
    166 {
    167 	int pagesize = system_info.si_pagesize;
    168 	int pageno = (phys_addr - kernel_start) / pagesize;
    169 	int offset = (phys_addr - kernel_start) % pagesize;
    170 
    171 	if (map == NULL || pageno < 0 || npages <= pageno) {
    172 		return (NULL);
    173 	}
    174 	if (length) {
    175 		*length = pagesize - offset;
    176 	}
    177 	return (map->leaf[pageno / map->leafsize][pageno % map->leafsize] + offset);
    178 }
    179 
    180 void *
    181 vtophysaddr(void *page)
    182 {
    183 	int pageno = (page - heap) / system_info.si_pagesize;
    184 	int offset = (page - heap) % system_info.si_pagesize;
    185 
    186 	if (map == NULL || pageno < 0 || npages <= pageno) {
    187 		return (NULL);
    188 	}
    189 	return (phys_addrs[pageno].addr + offset);
    190 }
    191 
    192 int
    193 vmem_init(void *start, void *end)
    194 {
    195 #define MEM_BLOCK_SIZE (1024*1024*4) /* must be greater than page size */
    196 	int i, m, pageno;
    197 	unsigned long magic0;
    198 	unsigned long magic1;
    199 	int nfounds;
    200 	struct page_header_s *page;
    201 	long size;
    202 	int nleaves;
    203 	int pagesize, memblocks;
    204 
    205 	pagesize = system_info.si_pagesize;
    206 	memblocks = (system_info.si_drammaxsize) / MEM_BLOCK_SIZE;
    207 
    208 	/* align with page size */
    209 	start = (void *)(((long)start / pagesize) * pagesize);
    210 	end = (void *)((((long)end + pagesize - 1) / pagesize) * pagesize);
    211 
    212 	kernel_start = start;
    213 	kernel_end = end;
    214 	size = end - start;
    215 
    216 	/*
    217 	 *  program image pages.
    218 	 */
    219 	npages = (size + pagesize - 1) / pagesize;
    220 
    221 	/*
    222 	 *  map leaf pages.
    223 	 *  npages plus one for end mark.
    224 	 */
    225 	npages += (nleaves = ((npages * sizeof(void *) + pagesize) / pagesize));
    226 
    227 	/*
    228 	 *  map root page, startprg code page, argument page and bootinfo page.
    229 	 */
    230 	npages += 4;
    231 
    232 	/*
    233 	 *  allocate pages
    234 	 */
    235 	debug_printf(TEXT("allocate %d pages\n"), npages);
    236 	heap = (unsigned char*)
    237 		VirtualAlloc(0,
    238 			     npages * pagesize,
    239 			     MEM_COMMIT,
    240 			     PAGE_READWRITE | PAGE_NOCACHE);
    241 	if (heap == NULL) {
    242 		debug_printf(TEXT("can't allocate heap\n"));
    243 		msg_printf(MSG_ERROR, whoami, TEXT("can't allocate heap\n"));
    244 		goto error_cleanup;
    245 	}
    246 
    247 	/*
    248 	 *  allocate address table.
    249 	 */
    250 	phys_addrs = (struct addr_s *)
    251 		VirtualAlloc(0,
    252 			     npages * sizeof(struct addr_s),
    253 			     MEM_COMMIT,
    254 			     PAGE_READWRITE);
    255 	if (phys_addrs == NULL) {
    256 		debug_printf(TEXT("can't allocate address table\n"));
    257 		msg_printf(MSG_ERROR, whoami, TEXT("can't allocate address table\n"));
    258 		goto error_cleanup;
    259 	}
    260 
    261 	/*
    262 	 *  set magic number for each page in buffer.
    263 	 */
    264 	magic0 = Random();
    265 	magic1 = Random();
    266 	debug_printf(TEXT("magic=%08x%08x\n"), magic0, magic1);
    267 
    268 	for (i = 0; i < npages; i++) {
    269 		page = (struct page_header_s*)&heap[pagesize * i];
    270 		page->magic0 = magic0;
    271 		page->pageno = i;
    272 		page->magic1 = magic1;
    273 		phys_addrs[i].addr = 0;
    274 		phys_addrs[i].in_use = 0;
    275 	}
    276 
    277 	/*
    278 	 *  Scan whole physical memory.
    279 	 */
    280 	nfounds = 0;
    281 	for (m = 0; (m < memblocks) && (nfounds < npages); m++) {
    282 		unsigned char* mem;
    283 		/* Map physical memory block */
    284 		mem = (unsigned char*)VirtualAlloc(0, MEM_BLOCK_SIZE,
    285 						   MEM_RESERVE, PAGE_NOACCESS);
    286 		if(!VirtualCopy((LPVOID)mem, (LPVOID)
    287 				((system_info.si_dramstart + MEM_BLOCK_SIZE * m) >> 8),
    288 				MEM_BLOCK_SIZE,
    289 				PAGE_READWRITE | PAGE_NOCACHE | PAGE_PHYSICAL)) {
    290 			VirtualFree(mem, 0, MEM_RELEASE);
    291 			continue;
    292 		}
    293 		/* Find preliminary allocated pages */
    294 		for (i = 0; i < (int)(MEM_BLOCK_SIZE / pagesize); i++) {
    295 			page = (struct page_header_s*)&mem[pagesize * i];
    296 			if (page->magic0 == magic0 &&
    297 			    page->magic1 == magic1) {
    298 				pageno = page->pageno;
    299 				if (0 <= pageno && pageno < npages &&
    300 				    phys_addrs[pageno].addr == 0) {
    301 					/* Set kernel virtual addr. XXX mips dependent */
    302 					phys_addrs[pageno].addr = (unsigned char*)
    303 						((0x80000000 |
    304 						  system_info.si_dramstart) +
    305 						 MEM_BLOCK_SIZE * m +
    306 						 pagesize * i);
    307 					page->magic0 = 0;
    308 					page->magic1 = 0;
    309 					if (npages <= ++nfounds) {
    310 						break;
    311 					}
    312 				} else {
    313 					debug_printf(TEXT("invalid page header\n"));
    314 					msg_printf(MSG_ERROR, whoami, TEXT("invalid page header\n"));
    315 					goto error_cleanup;
    316 				}
    317 			}
    318 		}
    319 		VirtualFree(mem, 0, MEM_RELEASE);
    320 	}
    321 
    322 	if (nfounds < npages) {
    323 		debug_printf(TEXT("lost %d pages\n"), npages - nfounds);
    324 		msg_printf(MSG_ERROR, whoami,
    325 			   TEXT("lost %d pages (allocated %d pages)\n"),
    326 			   npages - nfounds, npages);
    327 		goto error_cleanup;
    328 	}
    329 
    330 	/*
    331 	 *  allocate root page
    332 	 */
    333 	if ((map = (struct map_s*)vmem_alloc()) == NULL) {
    334 		debug_printf(TEXT("can't allocate root page.\n"));
    335 		msg_printf(MSG_ERROR, whoami, TEXT("can't allocate root page.\n"));
    336 		goto error_cleanup;
    337 	}
    338 	map->nleaves = nleaves;
    339 	map->leafsize = pagesize / sizeof(void *);
    340 	map->pagesize = pagesize;
    341 
    342 	/*
    343 	 *  allocate leaf pages
    344 	 */
    345 	for (i = 0; i < nleaves; i++) {
    346 		if ((map->leaf[i] = (void **)vmem_alloc()) == NULL) {
    347 			debug_printf(TEXT("can't allocate leaf page.\n"));
    348 			msg_printf(MSG_ERROR, whoami, TEXT("can't allocate leaf page.\n"));
    349 			goto error_cleanup;
    350 		}
    351 	}
    352 
    353 	/*
    354 	 *  allocate kernel pages
    355 	 */
    356 	for (i = 0; start < kernel_end; start += pagesize, i++) {
    357 		void **leaf = map->leaf[i / map->leafsize];
    358 		if ((leaf[i % map->leafsize] = alloc_kpage(start)) == NULL) {
    359 			debug_printf(TEXT("can't allocate page 0x%x.\n"), start);
    360 			msg_printf(MSG_ERROR, whoami, TEXT("can't allocate page 0x%x.\n"), start);
    361 			goto error_cleanup;
    362 		}
    363 	}
    364 	map->leaf[i / map->leafsize][i % map->leafsize] = NULL; /* END MARK */
    365 
    366 	return (0);
    367 
    368  error_cleanup:
    369 	vmem_free();
    370 
    371 	return (-1);
    372 }
    373 
    374 void
    375 vmem_free(void)
    376 {
    377 	map = NULL;
    378 	if (heap) {
    379 		VirtualFree(heap, 0, MEM_RELEASE);
    380 		heap = NULL;
    381 	}
    382 	if (phys_addrs) {
    383 		VirtualFree(phys_addrs, 0, MEM_RELEASE);
    384 		phys_addrs = NULL;
    385 	}
    386 }
    387 
    388 void
    389 vmem_dump_map(void)
    390 {
    391 	void *addr, page, paddr;
    392 
    393 	if (map == NULL) {
    394 		debug_printf(TEXT("no page map\n"));
    395 		return;
    396 	}
    397 
    398 	for (addr = kernel_start; addr < kernel_end; addr += system_info.si_pagesize) {
    399 		page = vmem_get(addr, NULL);
    400 		paddr = vtophysaddr(page);
    401 		debug_printf(TEXT("%08X: vaddr=%08X paddr=%08X %s\n"),
    402 			     addr, page, paddr, addr == paddr ? TEXT("*") : TEXT("reloc"));
    403 
    404 	}
    405 }
    406