Home | History | Annotate | Line # | Download | only in pbsdboot
vmem.c revision 1.2
      1 /*	$NetBSD: vmem.c,v 1.2 1999/09/22 12:49:50 uch Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1999 Shin Takemura.
      5  * All rights reserved.
      6  *
      7  * This software is part of the PocketBSD.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed by the PocketBSD project
     20  *	and its contributors.
     21  * 4. Neither the name of the project nor the names of its contributors
     22  *    may be used to endorse or promote products derived from this software
     23  *    without specific prior written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     35  * SUCH DAMAGE.
     36  *
     37  */
     38 #include <pbsdboot.h>
     39 
     40 #define MAX_MEMORY (1024*1024*32)	/* 32 MB */
     41 #define MEM_BLOCKS 8
     42 #define MEM_BLOCK_SIZE (1024*1024*4)
     43 
     44 struct addr_s {
     45 	caddr_t addr;
     46 	int in_use;
     47 };
     48 
     49 struct page_header_s {
     50 	unsigned long magic0;
     51 	int pageno;
     52 	unsigned long magic1;
     53 };
     54 
     55 struct map_s *map = NULL;
     56 struct addr_s *phys_addrs = NULL;
     57 unsigned char* heap = NULL;
     58 int npages;
     59 caddr_t kernel_start;
     60 caddr_t kernel_end;
     61 
     62 int
     63 vmem_exec(caddr_t entry, int argc, char *argv[], struct bootinfo *bi)
     64 {
     65 	int i;
     66 	caddr_t p;
     67 
     68 	if (map == NULL) {
     69 		debug_printf(TEXT("vmem is not initialized.\n"));
     70 		msg_printf(MSG_ERROR, whoami, TEXT("vmem is not initialized.\n"));
     71 		return (-1);
     72 	}
     73 
     74 	debug_printf(TEXT("entry point=0x%x\n"), entry);
     75 
     76 	map->entry = entry;
     77 	map->base = kernel_start;
     78 
     79 	for (i = 0; i < argc; i++) {
     80 		argv[i] = vtophysaddr(argv[i]);
     81 	}
     82 	map->arg0 = (caddr_t)argc;
     83 	map->arg1 = vtophysaddr((caddr_t)argv);
     84 	map->arg2 = vtophysaddr((caddr_t)bi);
     85 	map->arg3 = NULL;
     86 
     87 	if (map->arg1 == NULL || map->arg2 == NULL) {
     88 		debug_printf(TEXT("arg, vtophysaddr() failed\n"));
     89 		msg_printf(MSG_ERROR, whoami,
     90 			   TEXT("arg, vtophysaddr() failed\n"));
     91 		return (-1);
     92 	}
     93 
     94 	for (i = 0; p = map->leaf[i / map->leafsize][i % map->leafsize]; i++)  {
     95 		if ((p = vtophysaddr(p)) == NULL) {
     96 			debug_printf(TEXT("vtophysaddr() failed, page %d (addr=0x%x) \n"),
     97 				     i, map->leaf[i / map->leafsize][i % map->leafsize]);
     98 			msg_printf(MSG_ERROR, whoami,
     99 				   TEXT("vtophysaddr() failed, page %d (addr=0x%x) \n"),
    100 				   i, map->leaf[i / map->leafsize][i % map->leafsize]);
    101 			return (-1);
    102 		}
    103 		map->leaf[i / map->leafsize][i % map->leafsize] = p;
    104 	}
    105 
    106 	for (i = 0; i < map->nleaves; i++) {
    107 		if ((p = vtophysaddr((caddr_t)map->leaf[i])) == NULL) {
    108 			debug_printf(TEXT("vtophysaddr() failed, leaf %d (addr=0x%x) \n"),
    109 				     i, map->leaf[i / map->leafsize][i % map->leafsize]);
    110 			msg_printf(MSG_ERROR, whoami,
    111 				   TEXT("vtophysaddr() failed, leaf %d (addr=0x%x) \n"),
    112 				   i, map->leaf[i / map->leafsize][i % map->leafsize]);
    113 			return (-1);
    114 		}
    115 		map->leaf[i] = (caddr_t*)p;
    116 	}
    117 
    118 	debug_printf(TEXT("execute startprog()\n"));
    119 	//return (-1);
    120 	return (startprog(vtophysaddr((caddr_t)map)));
    121 }
    122 
    123 DWORD
    124 getpagesize()
    125 {
    126 	static int init = 0;
    127 	static SYSTEM_INFO info;
    128 
    129 	if (!init) {
    130 		GetSystemInfo(&info);
    131 		init = 1;
    132 	}
    133 
    134 	return (info.dwPageSize);
    135 }
    136 
    137 caddr_t
    138 vmem_alloc()
    139 {
    140 	int i;
    141 	struct page_header_s *page;
    142 	for (i = 0; i < npages; i++) {
    143 		page = (struct page_header_s*)&heap[getpagesize() * i];
    144 		if (!phys_addrs[i].in_use &&
    145 		    !(kernel_start <= phys_addrs[i].addr &&
    146 		      phys_addrs[i].addr < kernel_end)) {
    147 			phys_addrs[i].in_use = 1;
    148 			return ((caddr_t)page);
    149 		}
    150 	}
    151 	return (NULL);
    152 }
    153 
    154 static caddr_t
    155 alloc_kpage(caddr_t phys_addr)
    156 {
    157 	int i;
    158 	struct page_header_s *page;
    159 	for (i = 0; i < npages; i++) {
    160 		page = (struct page_header_s*)&heap[getpagesize() * i];
    161 		if (phys_addrs[i].addr == phys_addr) {
    162 			if (phys_addrs[i].in_use) {
    163 				debug_printf(TEXT("page %d (phys addr=0x%x) is already in use\n"),
    164 					     i, phys_addr);
    165 				msg_printf(MSG_ERROR, whoami,
    166 					   TEXT("page %d (phys addr=0x%x) is already in use\n"),
    167 					   i, phys_addr);
    168 				return (NULL);
    169 			}
    170 			phys_addrs[i].in_use = 1;
    171 			return ((caddr_t)page);
    172 		}
    173 	}
    174 	return (vmem_alloc());
    175 }
    176 
    177 caddr_t
    178 vmem_get(caddr_t phys_addr, int *length)
    179 {
    180 	int pageno = (phys_addr - kernel_start) / getpagesize();
    181 	int offset = (phys_addr - kernel_start) % getpagesize();
    182 
    183 	if (map == NULL || pageno < 0 || npages <= pageno) {
    184 		return (NULL);
    185 	}
    186 	if (length) {
    187 		*length = getpagesize() - offset;
    188 	}
    189 	return (map->leaf[pageno / map->leafsize][pageno % map->leafsize] + offset);
    190 }
    191 
    192 caddr_t
    193 vtophysaddr(caddr_t page)
    194 {
    195 	int pageno = (page - heap) / getpagesize();
    196 	int offset = (page - heap) % getpagesize();
    197 
    198 	if (map == NULL || pageno < 0 || npages <= pageno) {
    199 		return (NULL);
    200 	}
    201 	return (phys_addrs[pageno].addr + offset);
    202 }
    203 
    204 int
    205 vmem_init(caddr_t start, caddr_t end)
    206 {
    207 	int i, N, pageno;
    208 	unsigned long magic0;
    209 	unsigned long magic1;
    210 	int nfounds;
    211 	struct page_header_s *page;
    212 	long size;
    213 	int nleaves;
    214 
    215 	/* align with page size */
    216 	start = (caddr_t)(((long)start / getpagesize()) * getpagesize());
    217 	end = (caddr_t)((((long)end + getpagesize() - 1) / getpagesize()) * getpagesize());
    218 
    219 	kernel_start = start;
    220 	kernel_end = end;
    221 	size = end - start;
    222 
    223 	/*
    224 	 *  program image pages.
    225 	 */
    226 	npages = (size + getpagesize() - 1) / getpagesize();
    227 
    228 	/*
    229 	 *  map leaf pages.
    230 	 *  npages plus one for end mark.
    231 	 */
    232 	npages += (nleaves = ((npages * sizeof(caddr_t) + getpagesize()) / getpagesize()));
    233 
    234 	/*
    235 	 *  map root page, startprg code page, argument page and bootinfo page.
    236 	 */
    237 	npages += 4;
    238 
    239 	/*
    240 	 *  allocate pages
    241 	 */
    242 	debug_printf(TEXT("allocate %d pages\n"), npages);
    243 	heap = (unsigned char*)
    244 		VirtualAlloc(0,
    245 			     npages * getpagesize(),
    246 			     MEM_COMMIT,
    247 			     PAGE_READWRITE | PAGE_NOCACHE);
    248 	if (heap == NULL) {
    249 		debug_printf(TEXT("can't allocate heap\n"));
    250 		msg_printf(MSG_ERROR, whoami, TEXT("can't allocate heap\n"));
    251 		goto error_cleanup;
    252 	}
    253 
    254 	/*
    255 	 *  allocate address table.
    256 	 */
    257 	phys_addrs = (struct addr_s *)
    258 		VirtualAlloc(0,
    259 			     npages * sizeof(struct addr_s),
    260 			     MEM_COMMIT,
    261 			     PAGE_READWRITE);
    262 	if (phys_addrs == NULL) {
    263 		debug_printf(TEXT("can't allocate address table\n"));
    264 		msg_printf(MSG_ERROR, whoami, TEXT("can't allocate address table\n"));
    265 		goto error_cleanup;
    266 	}
    267 
    268 	/*
    269 	 *  set magic number for each page in buffer.
    270 	 */
    271 	magic0 = Random();
    272 	magic1 = Random();
    273 	debug_printf(TEXT("magic=%08x%08x\n"), magic0, magic1);
    274 
    275 	for (i = 0; i < npages; i++) {
    276 		page = (struct page_header_s*)&heap[getpagesize() * i];
    277 		page->magic0 = magic0;
    278 		page->pageno = i;
    279 		page->magic1 = magic1;
    280 		phys_addrs[i].addr = 0;
    281 		phys_addrs[i].in_use = 0;
    282 	}
    283 
    284 	/*
    285 	 *  Scan whole physical memory.
    286 	 */
    287 	nfounds = 0;
    288 	for (N = 0; N < MEM_BLOCKS && nfounds < npages; N++) {
    289 		unsigned char* mem;
    290 		int res;
    291 		mem = (unsigned char*)
    292 			VirtualAlloc(0,
    293 				     MEM_BLOCK_SIZE,
    294 				     MEM_RESERVE,
    295 				     PAGE_NOACCESS);
    296 		res = VirtualCopy((LPVOID)mem,
    297 				  //(LPVOID)((0xa0000000 + MEM_BLOCK_SIZE * N) >> 8),
    298 				  (LPVOID)((0x80000000 + MEM_BLOCK_SIZE * N) >> 8),
    299 				  MEM_BLOCK_SIZE,
    300 				  PAGE_READWRITE | PAGE_NOCACHE | PAGE_PHYSICAL);
    301 
    302 		for (i = 0; i < (int)(MEM_BLOCK_SIZE/getpagesize()); i++) {
    303 			page = (struct page_header_s*)&mem[getpagesize() * i];
    304 			if (page->magic0 == magic0 &&
    305 			    page->magic1 == magic1) {
    306 				pageno = page->pageno;
    307 				if (0 <= pageno && pageno < npages &&
    308 				    phys_addrs[pageno].addr == 0) {
    309 					phys_addrs[pageno].addr =
    310 						(unsigned char*)(0x80000000 + MEM_BLOCK_SIZE * N +
    311 								 getpagesize() * i);
    312 					page->magic0 = 0;
    313 					page->magic1 = 0;
    314 					if (npages <= ++nfounds) {
    315 						break;
    316 					}
    317 				} else {
    318 					debug_printf(TEXT("invalid page header\n"));
    319 					msg_printf(MSG_ERROR, whoami, TEXT("invalid page header\n"));
    320 					goto error_cleanup;
    321 				}
    322 			}
    323 		}
    324 		VirtualFree(mem, 0, MEM_RELEASE);
    325 	}
    326 
    327 	if (nfounds < npages) {
    328 		debug_printf(TEXT("lost %d pages\n"), npages - nfounds);
    329 		msg_printf(MSG_ERROR, whoami, TEXT("lost %d pages\n"), npages - nfounds);
    330 		goto error_cleanup;
    331 	}
    332 
    333 	/*
    334 	 *  allocate root page
    335 	 */
    336 	if ((map = (struct map_s*)vmem_alloc()) == NULL) {
    337 		debug_printf(TEXT("can't allocate root page.\n"));
    338 		msg_printf(MSG_ERROR, whoami, TEXT("can't allocate root page.\n"));
    339 		goto error_cleanup;
    340 	}
    341 	map->nleaves = nleaves;
    342 	map->leafsize = getpagesize() / sizeof(caddr_t);
    343 	map->pagesize = getpagesize();
    344 
    345 	/*
    346 	 *  allocate leaf pages
    347 	 */
    348 	for (i = 0; i < nleaves; i++) {
    349 		if ((map->leaf[i] = (caddr_t*)vmem_alloc()) == NULL) {
    350 			debug_printf(TEXT("can't allocate leaf page.\n"));
    351 			msg_printf(MSG_ERROR, whoami, TEXT("can't allocate leaf page.\n"));
    352 			goto error_cleanup;
    353 		}
    354 	}
    355 
    356 	/*
    357 	 *  allocate kernel pages
    358 	 */
    359 	for (i = 0; start < kernel_end; start += getpagesize(), i++) {
    360 		caddr_t *leaf = map->leaf[i / map->leafsize];
    361 		if ((leaf[i % map->leafsize] = alloc_kpage(start)) == NULL) {
    362 			debug_printf(TEXT("can't allocate page 0x%x.\n"), start);
    363 			msg_printf(MSG_ERROR, whoami, TEXT("can't allocate page 0x%x.\n"), start);
    364 			goto error_cleanup;
    365 		}
    366 	}
    367 	map->leaf[i / map->leafsize][i % map->leafsize] = NULL; /* END MARK */
    368 
    369 	return (0);
    370 
    371  error_cleanup:
    372 	vmem_free();
    373 
    374 	return (-1);
    375 }
    376 
    377 void
    378 vmem_free()
    379 {
    380 	map = NULL;
    381 	if (heap) {
    382 		VirtualFree(heap, 0, MEM_RELEASE);
    383 		heap = NULL;
    384 	}
    385 	if (phys_addrs) {
    386 		VirtualFree(phys_addrs, 0, MEM_RELEASE);
    387 		phys_addrs = NULL;
    388 	}
    389 }
    390 
    391 void
    392 vmem_dump_map()
    393 {
    394 	caddr_t addr, page, paddr;
    395 
    396 	if (map == NULL) {
    397 		debug_printf(TEXT("no page map\n"));
    398 		return;
    399 	}
    400 
    401 	for (addr = kernel_start; addr < kernel_end; addr += getpagesize()) {
    402 		page = vmem_get(addr, NULL);
    403 		paddr = vtophysaddr(page);
    404 		debug_printf(TEXT("%08X: vaddr=%08X paddr=%08X %s\n"),
    405 			     addr, page, paddr, addr == paddr ? TEXT("*") : TEXT("reloc"));
    406 
    407 	}
    408 }
    409