vmem.c revision 1.3 1 /* $NetBSD: vmem.c,v 1.3 1999/09/26 02:42:52 takemura Exp $ */
2
3 /*-
4 * Copyright (c) 1999 Shin Takemura.
5 * All rights reserved.
6 *
7 * This software is part of the PocketBSD.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the PocketBSD project
20 * and its contributors.
21 * 4. Neither the name of the project nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 */
38 #include <pbsdboot.h>
39
40 struct addr_s {
41 caddr_t addr;
42 int in_use;
43 };
44
45 struct page_header_s {
46 unsigned long magic0;
47 int pageno;
48 unsigned long magic1;
49 };
50
51 struct map_s *map = NULL;
52 struct addr_s *phys_addrs = NULL;
53 unsigned char* heap = NULL;
54 int npages;
55 caddr_t kernel_start;
56 caddr_t kernel_end;
57
58 int
59 vmem_exec(caddr_t entry, int argc, char *argv[], struct bootinfo *bi)
60 {
61 int i;
62 caddr_t p;
63
64 if (map == NULL) {
65 debug_printf(TEXT("vmem is not initialized.\n"));
66 msg_printf(MSG_ERROR, whoami, TEXT("vmem is not initialized.\n"));
67 return (-1);
68 }
69
70 debug_printf(TEXT("entry point=0x%x\n"), entry);
71
72 map->entry = entry;
73 map->base = kernel_start;
74
75 for (i = 0; i < argc; i++) {
76 argv[i] = vtophysaddr(argv[i]);
77 }
78 map->arg0 = (caddr_t)argc;
79 map->arg1 = vtophysaddr((caddr_t)argv);
80 map->arg2 = vtophysaddr((caddr_t)bi);
81 map->arg3 = NULL;
82
83 if (map->arg1 == NULL || map->arg2 == NULL) {
84 debug_printf(TEXT("arg, vtophysaddr() failed\n"));
85 msg_printf(MSG_ERROR, whoami,
86 TEXT("arg, vtophysaddr() failed\n"));
87 return (-1);
88 }
89
90 for (i = 0; p = map->leaf[i / map->leafsize][i % map->leafsize]; i++) {
91 if ((p = vtophysaddr(p)) == NULL) {
92 debug_printf(TEXT("vtophysaddr() failed, page %d (addr=0x%x) \n"),
93 i, map->leaf[i / map->leafsize][i % map->leafsize]);
94 msg_printf(MSG_ERROR, whoami,
95 TEXT("vtophysaddr() failed, page %d (addr=0x%x) \n"),
96 i, map->leaf[i / map->leafsize][i % map->leafsize]);
97 return (-1);
98 }
99 map->leaf[i / map->leafsize][i % map->leafsize] = p;
100 }
101
102 for (i = 0; i < map->nleaves; i++) {
103 if ((p = vtophysaddr((caddr_t)map->leaf[i])) == NULL) {
104 debug_printf(TEXT("vtophysaddr() failed, leaf %d (addr=0x%x) \n"),
105 i, map->leaf[i / map->leafsize][i % map->leafsize]);
106 msg_printf(MSG_ERROR, whoami,
107 TEXT("vtophysaddr() failed, leaf %d (addr=0x%x) \n"),
108 i, map->leaf[i / map->leafsize][i % map->leafsize]);
109 return (-1);
110 }
111 map->leaf[i] = (caddr_t*)p;
112 }
113
114 debug_printf(TEXT("execute startprog()\n"));
115 //return (-1);
116 return ((*system_info.si_boot)(vtophysaddr((caddr_t)map)));
117 }
118
119 caddr_t
120 vmem_alloc()
121 {
122 int i, pagesize;
123 struct page_header_s *page;
124
125 pagesize = system_info.si_pagesize;
126 for (i = 0; i < npages; i++) {
127 page = (struct page_header_s*)&heap[pagesize * i];
128 if (!phys_addrs[i].in_use &&
129 !(kernel_start <= phys_addrs[i].addr &&
130 phys_addrs[i].addr < kernel_end)) {
131 phys_addrs[i].in_use = 1;
132 return ((caddr_t)page);
133 }
134 }
135 return (NULL);
136 }
137
138 static caddr_t
139 alloc_kpage(caddr_t phys_addr)
140 {
141 int i, pagesize;
142 struct page_header_s *page;
143
144 pagesize = system_info.si_pagesize;
145 for (i = 0; i < npages; i++) {
146 page = (struct page_header_s*)&heap[pagesize * i];
147 if (phys_addrs[i].addr == phys_addr) {
148 if (phys_addrs[i].in_use) {
149 debug_printf(TEXT("page %d (phys addr=0x%x) is already in use\n"),
150 i, phys_addr);
151 msg_printf(MSG_ERROR, whoami,
152 TEXT("page %d (phys addr=0x%x) is already in use\n"),
153 i, phys_addr);
154 return (NULL);
155 }
156 phys_addrs[i].in_use = 1;
157 return ((caddr_t)page);
158 }
159 }
160 return (vmem_alloc());
161 }
162
163 caddr_t
164 vmem_get(caddr_t phys_addr, int *length)
165 {
166 int pagesize = system_info.si_pagesize;
167 int pageno = (phys_addr - kernel_start) / pagesize;
168 int offset = (phys_addr - kernel_start) % pagesize;
169
170 if (map == NULL || pageno < 0 || npages <= pageno) {
171 return (NULL);
172 }
173 if (length) {
174 *length = pagesize - offset;
175 }
176 return (map->leaf[pageno / map->leafsize][pageno % map->leafsize] + offset);
177 }
178
179 caddr_t
180 vtophysaddr(caddr_t page)
181 {
182 int pageno = (page - heap) / system_info.si_pagesize;
183 int offset = (page - heap) % system_info.si_pagesize;
184
185 if (map == NULL || pageno < 0 || npages <= pageno) {
186 return (NULL);
187 }
188 return (phys_addrs[pageno].addr + offset);
189 }
190
191 int
192 vmem_init(caddr_t start, caddr_t end)
193 {
194 #define MEM_BLOCK_SIZE (1024*1024*4) /* must be greater than page size */
195 int i, m, pageno;
196 unsigned long magic0;
197 unsigned long magic1;
198 int nfounds;
199 struct page_header_s *page;
200 long size;
201 int nleaves;
202 int pagesize, memblocks;
203
204 pagesize = system_info.si_pagesize;
205 memblocks = (system_info.si_drammaxsize) / MEM_BLOCK_SIZE;
206
207 /* align with page size */
208 start = (caddr_t)(((long)start / pagesize) * pagesize);
209 end = (caddr_t)((((long)end + pagesize - 1) / pagesize) * pagesize);
210
211 kernel_start = start;
212 kernel_end = end;
213 size = end - start;
214
215 /*
216 * program image pages.
217 */
218 npages = (size + pagesize - 1) / pagesize;
219
220 /*
221 * map leaf pages.
222 * npages plus one for end mark.
223 */
224 npages += (nleaves = ((npages * sizeof(caddr_t) + pagesize) / pagesize));
225
226 /*
227 * map root page, startprg code page, argument page and bootinfo page.
228 */
229 npages += 4;
230
231 /*
232 * allocate pages
233 */
234 debug_printf(TEXT("allocate %d pages\n"), npages);
235 heap = (unsigned char*)
236 VirtualAlloc(0,
237 npages * pagesize,
238 MEM_COMMIT,
239 PAGE_READWRITE | PAGE_NOCACHE);
240 if (heap == NULL) {
241 debug_printf(TEXT("can't allocate heap\n"));
242 msg_printf(MSG_ERROR, whoami, TEXT("can't allocate heap\n"));
243 goto error_cleanup;
244 }
245
246 /*
247 * allocate address table.
248 */
249 phys_addrs = (struct addr_s *)
250 VirtualAlloc(0,
251 npages * sizeof(struct addr_s),
252 MEM_COMMIT,
253 PAGE_READWRITE);
254 if (phys_addrs == NULL) {
255 debug_printf(TEXT("can't allocate address table\n"));
256 msg_printf(MSG_ERROR, whoami, TEXT("can't allocate address table\n"));
257 goto error_cleanup;
258 }
259
260 /*
261 * set magic number for each page in buffer.
262 */
263 magic0 = Random();
264 magic1 = Random();
265 debug_printf(TEXT("magic=%08x%08x\n"), magic0, magic1);
266
267 for (i = 0; i < npages; i++) {
268 page = (struct page_header_s*)&heap[pagesize * i];
269 page->magic0 = magic0;
270 page->pageno = i;
271 page->magic1 = magic1;
272 phys_addrs[i].addr = 0;
273 phys_addrs[i].in_use = 0;
274 }
275
276 /*
277 * Scan whole physical memory.
278 */
279 nfounds = 0;
280 for (m = 0; (m < memblocks) && (nfounds < npages); m++) {
281 unsigned char* mem;
282 /* Map physical memory block */
283 mem = (unsigned char*)VirtualAlloc(0, MEM_BLOCK_SIZE,
284 MEM_RESERVE, PAGE_NOACCESS);
285 if(!VirtualCopy((LPVOID)mem, (LPVOID)
286 ((system_info.si_dramstart + MEM_BLOCK_SIZE * m) >> 8),
287 MEM_BLOCK_SIZE,
288 PAGE_READWRITE | PAGE_NOCACHE | PAGE_PHYSICAL)) {
289 VirtualFree(mem, 0, MEM_RELEASE);
290 continue;
291 }
292 /* Find preliminary allocated pages */
293 for (i = 0; i < (int)(MEM_BLOCK_SIZE / pagesize); i++) {
294 page = (struct page_header_s*)&mem[pagesize * i];
295 if (page->magic0 == magic0 &&
296 page->magic1 == magic1) {
297 pageno = page->pageno;
298 if (0 <= pageno && pageno < npages &&
299 phys_addrs[pageno].addr == 0) {
300 /* Set kernel virtual addr. XXX mips dependent */
301 phys_addrs[pageno].addr = (unsigned char*)
302 ((0x80000000 |
303 system_info.si_dramstart) +
304 MEM_BLOCK_SIZE * m +
305 pagesize * i);
306 page->magic0 = 0;
307 page->magic1 = 0;
308 if (npages <= ++nfounds) {
309 break;
310 }
311 } else {
312 debug_printf(TEXT("invalid page header\n"));
313 msg_printf(MSG_ERROR, whoami, TEXT("invalid page header\n"));
314 goto error_cleanup;
315 }
316 }
317 }
318 VirtualFree(mem, 0, MEM_RELEASE);
319 }
320
321 if (nfounds < npages) {
322 debug_printf(TEXT("lost %d pages\n"), npages - nfounds);
323 msg_printf(MSG_ERROR, whoami,
324 TEXT("lost %d pages (allocated %d pages)\n"),
325 npages - nfounds, npages);
326 goto error_cleanup;
327 }
328
329 /*
330 * allocate root page
331 */
332 if ((map = (struct map_s*)vmem_alloc()) == NULL) {
333 debug_printf(TEXT("can't allocate root page.\n"));
334 msg_printf(MSG_ERROR, whoami, TEXT("can't allocate root page.\n"));
335 goto error_cleanup;
336 }
337 map->nleaves = nleaves;
338 map->leafsize = pagesize / sizeof(caddr_t);
339 map->pagesize = pagesize;
340
341 /*
342 * allocate leaf pages
343 */
344 for (i = 0; i < nleaves; i++) {
345 if ((map->leaf[i] = (caddr_t*)vmem_alloc()) == NULL) {
346 debug_printf(TEXT("can't allocate leaf page.\n"));
347 msg_printf(MSG_ERROR, whoami, TEXT("can't allocate leaf page.\n"));
348 goto error_cleanup;
349 }
350 }
351
352 /*
353 * allocate kernel pages
354 */
355 for (i = 0; start < kernel_end; start += pagesize, i++) {
356 caddr_t *leaf = map->leaf[i / map->leafsize];
357 if ((leaf[i % map->leafsize] = alloc_kpage(start)) == NULL) {
358 debug_printf(TEXT("can't allocate page 0x%x.\n"), start);
359 msg_printf(MSG_ERROR, whoami, TEXT("can't allocate page 0x%x.\n"), start);
360 goto error_cleanup;
361 }
362 }
363 map->leaf[i / map->leafsize][i % map->leafsize] = NULL; /* END MARK */
364
365 return (0);
366
367 error_cleanup:
368 vmem_free();
369
370 return (-1);
371 }
372
373 void
374 vmem_free()
375 {
376 map = NULL;
377 if (heap) {
378 VirtualFree(heap, 0, MEM_RELEASE);
379 heap = NULL;
380 }
381 if (phys_addrs) {
382 VirtualFree(phys_addrs, 0, MEM_RELEASE);
383 phys_addrs = NULL;
384 }
385 }
386
387 void
388 vmem_dump_map()
389 {
390 caddr_t addr, page, paddr;
391
392 if (map == NULL) {
393 debug_printf(TEXT("no page map\n"));
394 return;
395 }
396
397 for (addr = kernel_start; addr < kernel_end; addr += system_info.si_pagesize) {
398 page = vmem_get(addr, NULL);
399 paddr = vtophysaddr(page);
400 debug_printf(TEXT("%08X: vaddr=%08X paddr=%08X %s\n"),
401 addr, page, paddr, addr == paddr ? TEXT("*") : TEXT("reloc"));
402
403 }
404 }
405