uvm_km.c revision 1.1 1 /* $Id: uvm_km.c,v 1.1 1998/02/05 06:25:10 mrg Exp $ */
2
3 /*
4 * XXXCDC: "ROUGH DRAFT" QUALITY UVM PRE-RELEASE FILE!
5 * >>>USE AT YOUR OWN RISK, WORK IS NOT FINISHED<<<
6 */
7 /*
8 * Copyright (c) 1997 Charles D. Cranor and Washington University.
9 * Copyright (c) 1991, 1993, The Regents of the University of California.
10 *
11 * All rights reserved.
12 *
13 * This code is derived from software contributed to Berkeley by
14 * The Mach Operating System project at Carnegie-Mellon University.
15 *
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions
18 * are met:
19 * 1. Redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer.
21 * 2. Redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution.
24 * 3. All advertising materials mentioning features or use of this software
25 * must display the following acknowledgement:
26 * This product includes software developed by Charles D. Cranor,
27 * Washington University, the University of California, Berkeley and
28 * its contributors.
29 * 4. Neither the name of the University nor the names of its contributors
30 * may be used to endorse or promote products derived from this software
31 * without specific prior written permission.
32 *
33 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
34 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
35 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
36 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
37 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
38 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
39 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
40 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
41 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
42 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
43 * SUCH DAMAGE.
44 *
45 * @(#)vm_kern.c 8.3 (Berkeley) 1/12/94
46 *
47 *
48 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
49 * All rights reserved.
50 *
51 * Permission to use, copy, modify and distribute this software and
52 * its documentation is hereby granted, provided that both the copyright
53 * notice and this permission notice appear in all copies of the
54 * software, derivative works or modified versions, and any portions
55 * thereof, and that both notices appear in supporting documentation.
56 *
57 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
58 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
59 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
60 *
61 * Carnegie Mellon requests users of this software to return to
62 *
63 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
64 * School of Computer Science
65 * Carnegie Mellon University
66 * Pittsburgh PA 15213-3890
67 *
68 * any improvements or extensions that they make and grant Carnegie the
69 * rights to redistribute these changes.
70 */
71
72 /*
73 * uvm_km.c: handle kernel memory allocation and management
74 */
75
76 #include <sys/param.h>
77 #include <sys/systm.h>
78 #include <sys/proc.h>
79
80 #include <vm/vm.h>
81 #include <vm/vm_page.h>
82 #include <vm/vm_kern.h>
83
84 #include <uvm/uvm.h>
85
86 /*
87 * global data structures
88 */
89
90 vm_map_t kernel_map = NULL;
91
92 /*
93 * local functions
94 */
95
96 static int uvm_km_get __P((struct uvm_object *, vm_offset_t,
97 vm_page_t *, int *, int, vm_prot_t, int, int));
98 /*
99 * local data structues
100 */
101
102 static struct vm_map kernel_map_store;
103 static struct uvm_object kernel_object_store;
104 static struct uvm_object kmem_object_store;
105 static struct uvm_object mb_object_store;
106
107 static struct uvm_pagerops km_pager = {
108 NULL, /* init */
109 NULL, /* attach */
110 NULL, /* reference */
111 NULL, /* detach */
112 NULL, /* fault */
113 NULL, /* flush */
114 uvm_km_get, /* get */
115 /* ... rest are NULL */
116 };
117
118 /*
119 * uvm_km_get: pager get function for kernel objects
120 *
121 * => currently we do not support pageout to the swap area, so this
122 * pager is very simple. eventually we may want an anonymous
123 * object pager which will do paging.
124 */
125
126
127 static int uvm_km_get(uobj, offset, pps, npagesp, centeridx, access_type,
128 advice, flags)
129
130 struct uvm_object *uobj;
131 vm_offset_t offset;
132 struct vm_page **pps;
133 int *npagesp;
134 int centeridx, advice, flags;
135 vm_prot_t access_type;
136
137 {
138 vm_offset_t current_offset;
139 vm_page_t ptmp;
140 int lcv, gotpages, maxpages;
141 boolean_t done;
142 UVMHIST_FUNC("uvm_km_get"); UVMHIST_CALLED(maphist);
143
144 UVMHIST_LOG(maphist, "flags=%d", flags,0,0,0);
145
146 /*
147 * get number of pages
148 */
149
150 maxpages = *npagesp;
151
152 /*
153 * step 1: handled the case where fault data structures are locked.
154 */
155
156 if (flags & PGO_LOCKED) {
157
158 /*
159 * step 1a: get pages that are already resident. only do this
160 * if the data structures are locked (i.e. the first time through).
161 */
162
163 done = TRUE; /* be optimistic */
164 gotpages = 0; /* # of pages we got so far */
165
166 for (lcv = 0, current_offset = offset ;
167 lcv < maxpages ; lcv++, current_offset += PAGE_SIZE) {
168
169 /* do we care about this page? if not, skip it */
170 if (pps[lcv] == PGO_DONTCARE)
171 continue;
172
173 /* lookup page */
174 ptmp = uvm_pagelookup(uobj, current_offset);
175
176 /* null? attempt to allocate the page */
177 if (ptmp == NULL) {
178 ptmp = uvm_pagealloc(uobj, current_offset, NULL);
179 if (ptmp) {
180 ptmp->flags &= ~(PG_BUSY|PG_FAKE); /* new page */
181 UVM_PAGE_OWN(ptmp, NULL);
182 ptmp->wire_count = 1; /* XXX: prevents pageout attempts */
183 uvm_pagezero(ptmp);
184 }
185 }
186
187 /* to be useful must get a non-busy, non-released page */
188 if (ptmp == NULL || (ptmp->flags & (PG_BUSY|PG_RELEASED)) != 0) {
189 if (lcv == centeridx || (flags & PGO_ALLPAGES) != 0)
190 done = FALSE; /* need to do a wait or I/O! */
191 continue;
192 }
193
194 /* useful page: busy/lock it and plug it in our result array */
195 ptmp->flags |= PG_BUSY; /* caller must un-busy this page */
196 UVM_PAGE_OWN(ptmp, "uvm_km_get1");
197 pps[lcv] = ptmp;
198 gotpages++;
199
200 } /* "for" lcv loop */
201
202 /*
203 * step 1b: now we've either done everything needed or we to unlock
204 * and do some waiting or I/O.
205 */
206
207 UVMHIST_LOG(maphist, "<- done (done=%d)", done, 0,0,0);
208
209 *npagesp = gotpages;
210 if (done)
211 return(VM_PAGER_OK); /* bingo! */
212 else
213 return(VM_PAGER_UNLOCK); /* EEK! Need to unlock and I/O */
214 }
215
216 /*
217 * step 2: get non-resident or busy pages.
218 * object is locked. data structures are unlocked.
219 */
220
221 for (lcv = 0, current_offset = offset ;
222 lcv < maxpages ; lcv++, current_offset += PAGE_SIZE) {
223
224 /* skip over pages we've already gotten or don't want */
225 /* skip over pages we don't _have_ to get */
226 if (pps[lcv] != NULL ||
227 (lcv != centeridx && (flags & PGO_ALLPAGES) == 0))
228 continue;
229
230 /*
231 * we have yet to locate the current page (pps[lcv]). we first
232 * look for a page that is already at the current offset. if we
233 * find a page, we check to see if it is busy or released. if that
234 * is the case, then we sleep on the page until it is no longer busy
235 * or released and repeat the lookup. if the page we found is
236 * neither busy nor released, then we busy it (so we own it) and
237 * plug it into pps[lcv]. this 'break's the following while loop
238 * and indicates we are ready to move on to the next page in the
239 * "lcv" loop above.
240 *
241 * if we exit the while loop with pps[lcv] still set to NULL, then
242 * it means that we allocated a new busy/fake/clean page ptmp in the
243 * object and we need to do I/O to fill in the data.
244 */
245
246 while (pps[lcv] == NULL) { /* top of "pps" while loop */
247
248 /* look for a current page */
249 ptmp = uvm_pagelookup(uobj, current_offset);
250
251 /* nope? allocate one now (if we can) */
252 if (ptmp == NULL) {
253
254 ptmp = uvm_pagealloc(uobj, current_offset, NULL); /* alloc */
255
256 /* out of RAM? */
257 if (ptmp == NULL) {
258 simple_unlock(&uobj->vmobjlock);
259 uvm_wait("kmgetwait1");
260 simple_lock(&uobj->vmobjlock);
261 continue; /* goto top of pps while loop */
262 }
263
264 /*
265 * got new page ready for I/O. break pps while loop. pps[lcv] is
266 * still NULL.
267 */
268 break;
269 }
270
271 /* page is there, see if we need to wait on it */
272 if ((ptmp->flags & (PG_BUSY|PG_RELEASED)) != 0) {
273 ptmp->flags |= PG_WANTED;
274 UVM_UNLOCK_AND_WAIT(ptmp,&uobj->vmobjlock,0,"uvn_get",0);
275 simple_lock(&uobj->vmobjlock);
276 continue; /* goto top of pps while loop */
277 }
278
279 /*
280 * if we get here then the page has become resident and unbusy
281 * between steps 1 and 2. we busy it now (so we own it) and set
282 * pps[lcv] (so that we exit the while loop).
283 */
284 ptmp->flags |= PG_BUSY; /* we own it, caller must un-busy */
285 UVM_PAGE_OWN(ptmp, "uvm_km_get2");
286 pps[lcv] = ptmp;
287 }
288
289 /*
290 * if we own the a valid page at the correct offset, pps[lcv] will
291 * point to it. nothing more to do except go to the next page.
292 */
293
294 if (pps[lcv])
295 continue; /* next lcv */
296
297 /*
298 * we have a "fake/busy/clean" page that we just allocated.
299 * do the needed "i/o" (in this case that means zero it).
300 */
301
302 uvm_pagezero(ptmp);
303 ptmp->flags &= ~(PG_FAKE);
304 ptmp->wire_count = 1; /* XXX: prevents pageout attempts */
305 pps[lcv] = ptmp;
306
307 } /* lcv loop */
308
309 /*
310 * finally, unlock object and return.
311 */
312
313 simple_unlock(&uobj->vmobjlock);
314 UVMHIST_LOG(maphist, "<- done (OK)",0,0,0,0);
315 return(VM_PAGER_OK);
316 }
317
318 /*
319 * uvm_km_init: init kernel maps and objects to reflect reality (i.e.
320 * KVM already allocated for text, data, bss, and static data structures).
321 *
322 * => KVM is defined by VM_MIN_KERNEL_ADDRESS/VM_MAX_KERNEL_ADDRESS.
323 * we assume that [min -> start] has already been allocated and that
324 * "end" is the end.
325 */
326
327 void uvm_km_init(start, end)
328
329 vm_offset_t start, end;
330
331 {
332 vm_offset_t base = VM_MIN_KERNEL_ADDRESS;
333
334 /*
335 * first, init kernel memory objects.
336 */
337
338 /* kernel_object: for pageable anonymous kernel memory (eventually) */
339 simple_lock_init(&kernel_object_store.vmobjlock);
340 kernel_object_store.pgops = &km_pager;
341 TAILQ_INIT(&kernel_object_store.memq);
342 kernel_object_store.uo_npages = 0;
343 kernel_object_store.uo_refs = UVM_OBJ_KERN;
344 /* we are special. we never die */
345 uvm.kernel_object = &kernel_object_store;
346
347 /* kmem_object: for malloc'd memory (always wired) */
348 simple_lock_init(&kmem_object_store.vmobjlock);
349 kmem_object_store.pgops = &km_pager;
350 TAILQ_INIT(&kmem_object_store.memq);
351 kmem_object_store.uo_npages = 0;
352 kmem_object_store.uo_refs = UVM_OBJ_KERN;
353 /* we are special. we never die */
354 uvmexp.kmem_object = &kmem_object_store;
355
356 /* mb_object: for mbuf memory (always wired) */
357 simple_lock_init(&mb_object_store.vmobjlock);
358 mb_object_store.pgops = &km_pager;
359 TAILQ_INIT(&mb_object_store.memq);
360 mb_object_store.uo_npages = 0;
361 mb_object_store.uo_refs = UVM_OBJ_KERN;
362 /* we are special. we never die */
363 uvmexp.mb_object = &mb_object_store;
364
365 /*
366 * init the map and reserve kernel space before installing.
367 */
368
369 uvm_map_setup(&kernel_map_store, base, end, FALSE);
370 kernel_map_store.pmap = pmap_kernel();
371 if (uvm_map(&kernel_map_store, &base, start - base, NULL, UVM_UNKNOWN_OFFSET,
372 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
373 UVM_ADV_RANDOM,UVM_FLAG_FIXED)) != KERN_SUCCESS)
374 panic("uvm_km_init: could not reserve space for kernel");
375
376 /*
377 * install!
378 */
379
380 kernel_map = &kernel_map_store;
381 }
382
383 /*
384 * uvm_km_suballoc: allocate a submap in the kernel map. once a submap
385 * is allocated all references to that area of VM must go through it. this
386 * allows the locking of VAs in kernel_map to be broken up into regions.
387 *
388 * => if submap is non NULL we use that as the submap, otherwise we
389 * alloc a new map
390 */
391
392 struct vm_map *uvm_km_suballoc(map, min, max, size, pageable, submap)
393
394 struct vm_map *map;
395 vm_offset_t *min, *max; /* OUT, OUT */
396 vm_size_t size;
397 boolean_t pageable;
398 struct vm_map *submap;
399
400 {
401 size = round_page(size); /* round up to pagesize */
402
403 /*
404 * first allocate a blank spot in the parent map
405 */
406
407 if (uvm_map(map, min, size, NULL, UVM_UNKNOWN_OFFSET,
408 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
409 UVM_ADV_RANDOM, UVM_FLAG_NOMERGE)) != KERN_SUCCESS) {
410 panic("uvm_km_suballoc: unable to allocate space in parent map");
411 }
412
413 /*
414 * set VM bounds (min is filled in by uvm_map)
415 */
416
417 *max = *min + size;
418
419 /*
420 * add references to pmap and create or init the submap
421 */
422
423 pmap_reference(vm_map_pmap(map));
424 if (submap == NULL) {
425 submap = uvm_map_create(vm_map_pmap(map), *min, *max, pageable);
426 if (submap == NULL)
427 panic("uvm_km_suballoc: unable to create submap");
428 } else {
429 uvm_map_setup(submap, *min, *max, pageable);
430 submap->pmap = vm_map_pmap(map);
431 }
432
433 /*
434 * now let uvm_map_submap plug in it...
435 */
436
437 if (uvm_map_submap(map, *min, *max, submap) != KERN_SUCCESS)
438 panic("uvm_km_suballoc: submap allocation failed");
439
440 return(submap);
441 }
442
443 /*
444 * uvm_km_pgremove: remove pages from a kernel uvm_object.
445 *
446 * => when you unmap a part of anonymous kernel memory you want to toss
447 * the pages right away. (this gets called from uvm_unmap_...).
448 */
449
450 #define UKM_HASH_PENALTY 4 /* a guess */
451
452 void uvm_km_pgremove(uobj, start, end)
453
454 struct uvm_object *uobj;
455 vm_offset_t start, end;
456
457 {
458 boolean_t by_list;
459 struct vm_page *pp, *ppnext;
460 vm_offset_t curoff;
461 UVMHIST_FUNC("uvm_km_pgremove"); UVMHIST_CALLED(maphist);
462
463 simple_lock(&uobj->vmobjlock); /* lock object */
464
465 /* choose cheapest traversal */
466 by_list = (uobj->uo_npages <=
467 ((end - start) / PAGE_SIZE) * UKM_HASH_PENALTY);
468
469 if (by_list)
470 goto loop_by_list;
471
472 /* by hash */
473
474 for (curoff = start ; curoff < end ; curoff += PAGE_SIZE) {
475 pp = uvm_pagelookup(uobj, curoff);
476 if (pp == NULL)
477 continue;
478
479 UVMHIST_LOG(maphist," page 0x%x, busy=%d", pp,pp->flags & PG_BUSY,0,0);
480 /* now do the actual work */
481 if (pp->flags & PG_BUSY)
482 pp->flags |= PG_RELEASED; /* owner must check for this when done */
483 else {
484 pmap_page_protect(PMAP_PGARG(pp), VM_PROT_NONE);
485 uvm_lock_pageq();
486 uvm_pagefree(pp);
487 uvm_unlock_pageq();
488 }
489 /* done */
490
491 }
492 simple_unlock(&uobj->vmobjlock);
493 return;
494
495 loop_by_list:
496
497 for (pp = uobj->memq.tqh_first ; pp != NULL ; pp = ppnext) {
498
499 ppnext = pp->listq.tqe_next;
500 if (pp->offset < start || pp->offset >= end) {
501 continue;
502 }
503
504 UVMHIST_LOG(maphist," page 0x%x, busy=%d", pp,pp->flags & PG_BUSY,0,0);
505 /* now do the actual work */
506 if (pp->flags & PG_BUSY)
507 pp->flags |= PG_RELEASED; /* owner must check for this when done */
508 else {
509 pmap_page_protect(PMAP_PGARG(pp), VM_PROT_NONE);
510 uvm_lock_pageq();
511 uvm_pagefree(pp);
512 uvm_unlock_pageq();
513 }
514 /* done */
515
516 }
517 simple_unlock(&uobj->vmobjlock);
518 return;
519 }
520
521
522 /*
523 * uvm_km_kmemalloc: lower level kernel memory allocator for malloc()
524 *
525 * => we map wired memory into the specified map using the obj passed in
526 * => NOTE: we can return NULL even if we can wait if there is not enough
527 * free VM space in the map... caller should be prepared to handle
528 * this case.
529 * => we return KVA of memory allocated
530 * => flags: NOWAIT, VALLOC - just allocate VA, TRYLOCK - fail if we can't
531 * lock the map
532 */
533
534 vm_offset_t uvm_km_kmemalloc(map, obj, size, flags)
535
536 vm_map_t map;
537 struct uvm_object *obj;
538 vm_size_t size;
539 int flags;
540
541 {
542 vm_offset_t kva, loopva;
543 vm_offset_t offset;
544 struct vm_page *pg;
545 UVMHIST_FUNC("uvm_km_kmemalloc"); UVMHIST_CALLED(maphist);
546
547
548 UVMHIST_LOG(maphist," (map=0x%x, obj=0x%x, size=0x%x, flags=%d)",
549 map, obj, size, flags);
550 #ifdef DIAGNOSTIC
551 /* sanity check */
552 if (vm_map_pmap(map) != pmap_kernel())
553 panic("uvm_km_kmemalloc: invalid map");
554 #endif
555
556 /*
557 * setup for call
558 */
559
560 size = round_page(size);
561 kva = vm_map_min(map); /* hint */
562
563 /*
564 * allocate some virtual space
565 */
566
567 if (uvm_map(map, &kva, size, obj, UVM_UNKNOWN_OFFSET,
568 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
569 UVM_ADV_RANDOM, (flags & UVM_KMF_TRYLOCK)))
570 != KERN_SUCCESS) {
571 UVMHIST_LOG(maphist, "<- done (no VM)",0,0,0,0);
572 return(0);
573 }
574
575 /*
576 * if all we wanted was VA, return now
577 */
578
579 if (flags & UVM_KMF_VALLOC) {
580 UVMHIST_LOG(maphist,"<- done valloc (kva=0x%x)", kva,0,0,0);
581 return(kva);
582 }
583 /*
584 * recover object offset from virtual address
585 */
586
587 offset = kva - vm_map_min(map);
588 UVMHIST_LOG(maphist, " kva=0x%x, offset=0x%x", kva, offset,0,0);
589
590 /*
591 * now allocate and map in the memory... note that we are the only ones
592 * whom should ever get a handle on this area of VM.
593 */
594
595 loopva = kva;
596 while (size) {
597 simple_lock(&obj->vmobjlock);
598 pg = uvm_pagealloc(obj, offset, NULL);
599 if (pg) {
600 pg->flags &= ~PG_BUSY; /* new page */
601 UVM_PAGE_OWN(pg, NULL);
602 }
603 simple_unlock(&obj->vmobjlock);
604
605 /*
606 * out of memory?
607 */
608
609 if (pg == NULL) {
610 if (flags & UVM_KMF_NOWAIT) {
611 uvm_unmap(map, kva, kva + size, 0); /* free everything! */
612 return(0);
613 } else {
614 uvm_wait("km_getwait2"); /* sleep here */
615 continue;
616 }
617 }
618
619 /*
620 * map it in: note that we call pmap_enter with the map and object
621 * unlocked in case we are kmem_map/kmem_object (because if pmap_enter
622 * wants to allocate out of kmem_object it will need to lock it itself!)
623 */
624 #if defined(PMAP_NEW)
625 pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg), VM_PROT_ALL);
626 #else
627 pmap_enter(map->pmap, loopva, VM_PAGE_TO_PHYS(pg), UVM_PROT_ALL, TRUE);
628 #endif
629 loopva += PAGE_SIZE;
630 offset += PAGE_SIZE;
631 size -= PAGE_SIZE;
632 }
633
634 UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0);
635 return(kva);
636 }
637
638 /*
639 * uvm_km_free: free an area of kernel memory
640 */
641
642 void uvm_km_free(map, addr, size)
643
644 vm_map_t map;
645 vm_offset_t addr;
646 vm_size_t size;
647
648 {
649 uvm_unmap(map, trunc_page(addr), round_page(addr+size), 1);
650 }
651
652 /*
653 * uvm_km_free_wakeup: free an area of kernel memory and wake up
654 * anyone waiting for vm space.
655 *
656 * => XXX: "wanted" bit + unlock&wait on other end?
657 */
658
659 void uvm_km_free_wakeup(map, addr, size)
660
661 vm_map_t map;
662 vm_offset_t addr;
663 vm_size_t size;
664
665 {
666 vm_map_entry_t dead_entries;
667
668 vm_map_lock(map);
669 (void)uvm_unmap_remove(map, trunc_page(addr), round_page(addr+size), 1,
670 &dead_entries);
671 thread_wakeup(map);
672 vm_map_unlock(map);
673
674 if (dead_entries != NULL)
675 uvm_unmap_detach(dead_entries, 0);
676 }
677
678 /*
679 * uvm_km_alloc1: allocate wired down memory in the kernel map.
680 *
681 * => we can sleep if needed
682 */
683
684 vm_offset_t uvm_km_alloc1(map, size, zeroit)
685
686 vm_map_t map;
687 vm_size_t size;
688 boolean_t zeroit;
689
690 {
691 vm_offset_t kva, loopva, offset;
692 struct vm_page *pg;
693 UVMHIST_FUNC("uvm_km_alloc1"); UVMHIST_CALLED(maphist);
694
695 UVMHIST_LOG(maphist,"(map=0x%x, size=0x%x)", map, size,0,0);
696
697 #ifdef DIAGNOSTIC
698 if (vm_map_pmap(map) != pmap_kernel())
699 panic("uvm_km_alloc1");
700 #endif
701
702 size = round_page(size);
703 kva = vm_map_min(map); /* hint */
704
705 /*
706 * allocate some virtual space
707 */
708
709 if (uvm_map(map, &kva, size, uvm.kernel_object, UVM_UNKNOWN_OFFSET,
710 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
711 UVM_ADV_RANDOM, 0)) != KERN_SUCCESS) {
712 UVMHIST_LOG(maphist,"<- done (no VM)",0,0,0,0);
713 return(0);
714 }
715
716 /*
717 * recover object offset from virtual address
718 */
719
720 offset = kva - vm_map_min(map);
721 UVMHIST_LOG(maphist," kva=0x%x, offset=0x%x", kva, offset,0,0);
722
723 /*
724 * now allocate the memory. we must be careful about released pages.
725 */
726
727 loopva = kva;
728 while (size) {
729 simple_lock(&uvm.kernel_object->vmobjlock);
730 pg = uvm_pagelookup(uvm.kernel_object, offset);
731
732 /* if we found a page in an unallocated region, it must be released */
733 if (pg) {
734 if ((pg->flags & PG_RELEASED) == 0)
735 panic("uvm_km_alloc1: non-released page");
736 pg->flags |= PG_WANTED;
737 UVM_UNLOCK_AND_WAIT(pg, &uvm.kernel_object->vmobjlock,0,"km_alloc",0);
738 continue; /* retry */
739 }
740
741 /* allocate ram */
742 pg = uvm_pagealloc(uvm.kernel_object, offset, NULL);
743 if (pg) {
744 pg->flags &= ~PG_BUSY; /* new page */
745 UVM_PAGE_OWN(pg, NULL);
746 }
747 simple_unlock(&uvm.kernel_object->vmobjlock);
748 if (pg == NULL) {
749 uvm_wait("km_alloc1w"); /* wait for memory */
750 continue;
751 }
752
753 /* map it in */
754 #if defined(PMAP_NEW)
755 pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg), UVM_PROT_ALL);
756 #else
757 pmap_enter(map->pmap, loopva, VM_PAGE_TO_PHYS(pg), UVM_PROT_ALL, TRUE);
758 #endif
759 loopva += PAGE_SIZE;
760 offset += PAGE_SIZE;
761 size -= PAGE_SIZE;
762 }
763
764 /*
765 * zero on request (note that "size" is now zero due to the above loop
766 * so we need to subtract kva from loopva to reconstruct the size).
767 */
768
769 if (zeroit)
770 bzero((caddr_t)kva, loopva - kva);
771
772 UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0);
773 return(kva);
774 }
775
776 /*
777 * uvm_km_valloc: allocate zero-fill memory in the kernel's address space
778 *
779 * => memory is not allocated until fault time
780 */
781
782 vm_offset_t uvm_km_valloc(map, size)
783
784 vm_map_t map;
785 vm_size_t size;
786
787 {
788 vm_offset_t kva;
789 UVMHIST_FUNC("uvm_km_valloc"); UVMHIST_CALLED(maphist);
790
791 UVMHIST_LOG(maphist, "(map=0x%x, size=0x%x)", map, size, 0,0);
792
793 #ifdef DIAGNOSTIC
794 if (vm_map_pmap(map) != pmap_kernel())
795 panic("uvm_km_valloc");
796 #endif
797
798 size = round_page(size);
799 kva = vm_map_min(map); /* hint */
800
801 /*
802 * allocate some virtual space. will be demand filled by kernel_object.
803 */
804
805 if (uvm_map(map, &kva, size, uvm.kernel_object, UVM_UNKNOWN_OFFSET,
806 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
807 UVM_ADV_RANDOM, 0)) != KERN_SUCCESS) {
808 UVMHIST_LOG(maphist, "<- done (no VM)", 0,0,0,0);
809 return(0);
810 }
811
812 UVMHIST_LOG(maphist, "<- done (kva=0x%x)", kva,0,0,0);
813 return(kva);
814 }
815
816 /*
817 * uvm_km_valloc_wait: allocate zero-fill memory in the kernel's address space
818 *
819 * => memory is not allocated until fault time
820 * => if no room in map, wait for space to free, unless requested size
821 * is larger than map (in which case we return 0)
822 */
823
824 vm_offset_t uvm_km_valloc_wait(map, size)
825
826 vm_map_t map;
827 vm_size_t size;
828
829 {
830 vm_offset_t kva;
831 UVMHIST_FUNC("uvm_km_valloc_wait"); UVMHIST_CALLED(maphist);
832
833 UVMHIST_LOG(maphist, "(map=0x%x, size=0x%x)", map, size, 0,0);
834
835 #ifdef DIAGNOSTIC
836 if (vm_map_pmap(map) != pmap_kernel())
837 panic("uvm_km_valloc_wait");
838 #endif
839
840 size = round_page(size);
841 if (size > vm_map_max(map) - vm_map_min(map))
842 return(0);
843
844 while (1) {
845 kva = vm_map_min(map); /* hint */
846
847 /*
848 * allocate some virtual space. will be demand filled by kernel_object.
849 */
850
851 if (uvm_map(map, &kva, size, uvm.kernel_object, UVM_UNKNOWN_OFFSET,
852 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
853 UVM_ADV_RANDOM, 0)) == KERN_SUCCESS){
854 UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0);
855 return(kva);
856 }
857
858 /*
859 * failed. sleep for a while (on map)
860 */
861
862 UVMHIST_LOG(maphist,"<<<sleeping>>>",0,0,0,0);
863 tsleep((caddr_t)map, PVM, "vallocwait", 0);
864 }
865 /*NOTREACHED*/
866 }
867