uvm_km.c revision 1.22.2.1.2.1 1 /* $NetBSD: uvm_km.c,v 1.22.2.1.2.1 1999/06/07 04:25:36 chs Exp $ */
2
3 /*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * Copyright (c) 1991, 1993, The Regents of the University of California.
6 *
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to Berkeley by
10 * The Mach Operating System project at Carnegie-Mellon University.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by Charles D. Cranor,
23 * Washington University, the University of California, Berkeley and
24 * its contributors.
25 * 4. Neither the name of the University nor the names of its contributors
26 * may be used to endorse or promote products derived from this software
27 * without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * SUCH DAMAGE.
40 *
41 * @(#)vm_kern.c 8.3 (Berkeley) 1/12/94
42 * from: Id: uvm_km.c,v 1.1.2.14 1998/02/06 05:19:27 chs Exp
43 *
44 *
45 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
46 * All rights reserved.
47 *
48 * Permission to use, copy, modify and distribute this software and
49 * its documentation is hereby granted, provided that both the copyright
50 * notice and this permission notice appear in all copies of the
51 * software, derivative works or modified versions, and any portions
52 * thereof, and that both notices appear in supporting documentation.
53 *
54 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
55 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
56 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
57 *
58 * Carnegie Mellon requests users of this software to return to
59 *
60 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
61 * School of Computer Science
62 * Carnegie Mellon University
63 * Pittsburgh PA 15213-3890
64 *
65 * any improvements or extensions that they make and grant Carnegie the
66 * rights to redistribute these changes.
67 */
68
69 #include "opt_uvmhist.h"
70 #include "opt_pmap_new.h"
71
72 /*
73 * uvm_km.c: handle kernel memory allocation and management
74 */
75
76 /*
77 * overview of kernel memory management:
78 *
79 * the kernel virtual address space is mapped by "kernel_map." kernel_map
80 * starts at VM_MIN_KERNEL_ADDRESS and goes to VM_MAX_KERNEL_ADDRESS.
81 * note that VM_MIN_KERNEL_ADDRESS is equal to vm_map_min(kernel_map).
82 *
83 * the kernel_map has several "submaps." submaps can only appear in
84 * the kernel_map (user processes can't use them). submaps "take over"
85 * the management of a sub-range of the kernel's address space. submaps
86 * are typically allocated at boot time and are never released. kernel
87 * virtual address space that is mapped by a submap is locked by the
88 * submap's lock -- not the kernel_map's lock.
89 *
90 * thus, the useful feature of submaps is that they allow us to break
91 * up the locking and protection of the kernel address space into smaller
92 * chunks.
93 *
94 * the vm system has several standard kernel submaps, including:
95 * kmem_map => contains only wired kernel memory for the kernel
96 * malloc. *** access to kmem_map must be protected
97 * by splimp() because we are allowed to call malloc()
98 * at interrupt time ***
99 * mb_map => memory for large mbufs, *** protected by splimp ***
100 * pager_map => used to map "buf" structures into kernel space
101 * exec_map => used during exec to handle exec args
102 * etc...
103 *
104 * the kernel allocates its private memory out of special uvm_objects whose
105 * reference count is set to UVM_OBJ_KERN (thus indicating that the objects
106 * are "special" and never die). all kernel objects should be thought of
107 * as large, fixed-sized, sparsely populated uvm_objects. each kernel
108 * object is equal to the size of kernel virtual address space (i.e. the
109 * value "VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS").
110 *
111 * most kernel private memory lives in kernel_object. the only exception
112 * to this is for memory that belongs to submaps that must be protected
113 * by splimp(). each of these submaps has their own private kernel
114 * object (e.g. kmem_object, mb_object).
115 *
116 * note that just because a kernel object spans the entire kernel virutal
117 * address space doesn't mean that it has to be mapped into the entire space.
118 * large chunks of a kernel object's space go unused either because
119 * that area of kernel VM is unmapped, or there is some other type of
120 * object mapped into that range (e.g. a vnode). for submap's kernel
121 * objects, the only part of the object that can ever be populated is the
122 * offsets that are managed by the submap.
123 *
124 * note that the "offset" in a kernel object is always the kernel virtual
125 * address minus the VM_MIN_KERNEL_ADDRESS (aka vm_map_min(kernel_map)).
126 * example:
127 * suppose VM_MIN_KERNEL_ADDRESS is 0xf8000000 and the kernel does a
128 * uvm_km_alloc(kernel_map, PAGE_SIZE) [allocate 1 wired down page in the
129 * kernel map]. if uvm_km_alloc returns virtual address 0xf8235000,
130 * then that means that the page at offset 0x235000 in kernel_object is
131 * mapped at 0xf8235000.
132 *
133 * note that the offsets in kmem_object and mb_object also follow this
134 * rule. this means that the offsets for kmem_object must fall in the
135 * range of [vm_map_min(kmem_object) - vm_map_min(kernel_map)] to
136 * [vm_map_max(kmem_object) - vm_map_min(kernel_map)], so the offsets
137 * in those objects will typically not start at zero.
138 *
139 * kernel object have one other special property: when the kernel virtual
140 * memory mapping them is unmapped, the backing memory in the object is
141 * freed right away. this is done with the uvm_km_pgremove() function.
142 * this has to be done because there is no backing store for kernel pages
143 * and no need to save them after they are no longer referenced.
144 */
145
146 #include <sys/param.h>
147 #include <sys/systm.h>
148 #include <sys/proc.h>
149
150 #include <vm/vm.h>
151 #include <vm/vm_page.h>
152 #include <vm/vm_kern.h>
153
154 #include <uvm/uvm.h>
155
156 /*
157 * global data structures
158 */
159
160 vm_map_t kernel_map = NULL;
161
162 /*
163 * local functions
164 */
165
166 static int uvm_km_get __P((struct uvm_object *, vaddr_t,
167 vm_page_t *, int *, int, vm_prot_t, int, int));
168 /*
169 * local data structues
170 */
171
172 static struct vm_map kernel_map_store;
173 static struct uvm_object kmem_object_store;
174 static struct uvm_object mb_object_store;
175
176 static struct uvm_pagerops km_pager = {
177 NULL, /* init */
178 NULL, /* reference */
179 NULL, /* detach */
180 NULL, /* fault */
181 NULL, /* flush */
182 uvm_km_get, /* get */
183 /* ... rest are NULL */
184 };
185
186 /*
187 * uvm_km_get: pager get function for kernel objects
188 *
189 * => currently we do not support pageout to the swap area, so this
190 * pager is very simple. eventually we may want an anonymous
191 * object pager which will do paging.
192 * => XXXCDC: this pager should be phased out in favor of the aobj pager
193 */
194
195
196 static int
197 uvm_km_get(uobj, offset, pps, npagesp, centeridx, access_type, advice, flags)
198 struct uvm_object *uobj;
199 vaddr_t offset;
200 struct vm_page **pps;
201 int *npagesp;
202 int centeridx, advice, flags;
203 vm_prot_t access_type;
204 {
205 vaddr_t current_offset;
206 vm_page_t ptmp;
207 int lcv, gotpages, maxpages;
208 boolean_t done;
209 UVMHIST_FUNC("uvm_km_get"); UVMHIST_CALLED(maphist);
210
211 UVMHIST_LOG(maphist, "flags=%d", flags,0,0,0);
212
213 /*
214 * get number of pages
215 */
216
217 maxpages = *npagesp;
218
219 /*
220 * step 1: handled the case where fault data structures are locked.
221 */
222
223 if (flags & PGO_LOCKED) {
224
225 /*
226 * step 1a: get pages that are already resident. only do
227 * this if the data structures are locked (i.e. the first time
228 * through).
229 */
230
231 done = TRUE; /* be optimistic */
232 gotpages = 0; /* # of pages we got so far */
233
234 for (lcv = 0, current_offset = offset ;
235 lcv < maxpages ; lcv++, current_offset += PAGE_SIZE) {
236
237 /* do we care about this page? if not, skip it */
238 if (pps[lcv] == PGO_DONTCARE)
239 continue;
240
241 /* lookup page */
242 ptmp = uvm_pagelookup(uobj, current_offset);
243
244 /* null? attempt to allocate the page */
245 if (ptmp == NULL) {
246 ptmp = uvm_pagealloc(uobj, current_offset,
247 NULL, 0);
248 if (ptmp) {
249 /* new page */
250 ptmp->flags &= ~(PG_BUSY|PG_FAKE);
251 UVM_PAGE_OWN(ptmp, NULL);
252 uvm_pagezero(ptmp);
253 }
254 }
255
256 /*
257 * to be useful must get a non-busy, non-released page
258 */
259 if (ptmp == NULL ||
260 (ptmp->flags & (PG_BUSY|PG_RELEASED)) != 0) {
261 if (lcv == centeridx ||
262 (flags & PGO_ALLPAGES) != 0)
263 /* need to do a wait or I/O! */
264 done = FALSE;
265 continue;
266 }
267
268 /*
269 * useful page: busy/lock it and plug it in our
270 * result array
271 */
272
273 /* caller must un-busy this page */
274 ptmp->flags |= PG_BUSY;
275 UVM_PAGE_OWN(ptmp, "uvm_km_get1");
276 pps[lcv] = ptmp;
277 gotpages++;
278
279 } /* "for" lcv loop */
280
281 /*
282 * step 1b: now we've either done everything needed or we
283 * to unlock and do some waiting or I/O.
284 */
285
286 UVMHIST_LOG(maphist, "<- done (done=%d)", done, 0,0,0);
287
288 *npagesp = gotpages;
289 if (done)
290 return(VM_PAGER_OK); /* bingo! */
291 else
292 return(VM_PAGER_UNLOCK); /* EEK! Need to
293 * unlock and I/O */
294 }
295
296 /*
297 * step 2: get non-resident or busy pages.
298 * object is locked. data structures are unlocked.
299 */
300
301 for (lcv = 0, current_offset = offset ;
302 lcv < maxpages ; lcv++, current_offset += PAGE_SIZE) {
303
304 /* skip over pages we've already gotten or don't want */
305 /* skip over pages we don't _have_ to get */
306 if (pps[lcv] != NULL ||
307 (lcv != centeridx && (flags & PGO_ALLPAGES) == 0))
308 continue;
309
310 /*
311 * we have yet to locate the current page (pps[lcv]). we
312 * first look for a page that is already at the current offset.
313 * if we find a page, we check to see if it is busy or
314 * released. if that is the case, then we sleep on the page
315 * until it is no longer busy or released and repeat the
316 * lookup. if the page we found is neither busy nor
317 * released, then we busy it (so we own it) and plug it into
318 * pps[lcv]. this 'break's the following while loop and
319 * indicates we are ready to move on to the next page in the
320 * "lcv" loop above.
321 *
322 * if we exit the while loop with pps[lcv] still set to NULL,
323 * then it means that we allocated a new busy/fake/clean page
324 * ptmp in the object and we need to do I/O to fill in the
325 * data.
326 */
327
328 while (pps[lcv] == NULL) { /* top of "pps" while loop */
329
330 /* look for a current page */
331 ptmp = uvm_pagelookup(uobj, current_offset);
332
333 /* nope? allocate one now (if we can) */
334 if (ptmp == NULL) {
335
336 ptmp = uvm_pagealloc(uobj, current_offset,
337 NULL, 0);
338
339 /* out of RAM? */
340 if (ptmp == NULL) {
341 simple_unlock(&uobj->vmobjlock);
342 uvm_wait("kmgetwait1");
343 simple_lock(&uobj->vmobjlock);
344 /* goto top of pps while loop */
345 continue;
346 }
347
348 /*
349 * got new page ready for I/O. break pps
350 * while loop. pps[lcv] is still NULL.
351 */
352 break;
353 }
354
355 /* page is there, see if we need to wait on it */
356 if ((ptmp->flags & (PG_BUSY|PG_RELEASED)) != 0) {
357 ptmp->flags |= PG_WANTED;
358 UVM_UNLOCK_AND_WAIT(ptmp,&uobj->vmobjlock, 0,
359 "uvn_get",0);
360 simple_lock(&uobj->vmobjlock);
361 continue; /* goto top of pps while loop */
362 }
363
364 /*
365 * if we get here then the page has become resident
366 * and unbusy between steps 1 and 2. we busy it now
367 * (so we own it) and set pps[lcv] (so that we exit
368 * the while loop). caller must un-busy.
369 */
370 ptmp->flags |= PG_BUSY;
371 UVM_PAGE_OWN(ptmp, "uvm_km_get2");
372 pps[lcv] = ptmp;
373 }
374
375 /*
376 * if we own the a valid page at the correct offset, pps[lcv]
377 * will point to it. nothing more to do except go to the
378 * next page.
379 */
380
381 if (pps[lcv])
382 continue; /* next lcv */
383
384 /*
385 * we have a "fake/busy/clean" page that we just allocated.
386 * do the needed "i/o" (in this case that means zero it).
387 */
388
389 uvm_pagezero(ptmp);
390 ptmp->flags &= ~(PG_FAKE);
391 pps[lcv] = ptmp;
392
393 } /* lcv loop */
394
395 /*
396 * finally, unlock object and return.
397 */
398
399 simple_unlock(&uobj->vmobjlock);
400 UVMHIST_LOG(maphist, "<- done (OK)",0,0,0,0);
401 return(VM_PAGER_OK);
402 }
403
404 /*
405 * uvm_km_init: init kernel maps and objects to reflect reality (i.e.
406 * KVM already allocated for text, data, bss, and static data structures).
407 *
408 * => KVM is defined by VM_MIN_KERNEL_ADDRESS/VM_MAX_KERNEL_ADDRESS.
409 * we assume that [min -> start] has already been allocated and that
410 * "end" is the end.
411 */
412
413 void
414 uvm_km_init(start, end)
415 vaddr_t start, end;
416 {
417 vaddr_t base = VM_MIN_KERNEL_ADDRESS;
418
419 /*
420 * first, init kernel memory objects.
421 */
422
423 /* kernel_object: for pageable anonymous kernel memory */
424 uao_init();
425 uvm.kernel_object = uao_create(VM_MAX_KERNEL_ADDRESS -
426 VM_MIN_KERNEL_ADDRESS, UAO_FLAG_KERNOBJ);
427
428 /* kmem_object: for malloc'd memory (wired, protected by splimp) */
429 simple_lock_init(&kmem_object_store.vmobjlock);
430 kmem_object_store.pgops = &km_pager;
431 TAILQ_INIT(&kmem_object_store.memq);
432 kmem_object_store.uo_npages = 0;
433 /* we are special. we never die */
434 kmem_object_store.uo_refs = UVM_OBJ_KERN;
435 uvmexp.kmem_object = &kmem_object_store;
436
437 /* mb_object: for mbuf memory (always wired, protected by splimp) */
438 simple_lock_init(&mb_object_store.vmobjlock);
439 mb_object_store.pgops = &km_pager;
440 TAILQ_INIT(&mb_object_store.memq);
441 mb_object_store.uo_npages = 0;
442 /* we are special. we never die */
443 mb_object_store.uo_refs = UVM_OBJ_KERN;
444 uvmexp.mb_object = &mb_object_store;
445
446 /*
447 * init the map and reserve allready allocated kernel space
448 * before installing.
449 */
450
451 uvm_map_setup(&kernel_map_store, base, end, FALSE);
452 kernel_map_store.pmap = pmap_kernel();
453 if (uvm_map(&kernel_map_store, &base, start - base, NULL,
454 UVM_UNKNOWN_OFFSET, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL,
455 UVM_INH_NONE, UVM_ADV_RANDOM,UVM_FLAG_FIXED)) != KERN_SUCCESS)
456 panic("uvm_km_init: could not reserve space for kernel");
457
458 /*
459 * install!
460 */
461
462 kernel_map = &kernel_map_store;
463 }
464
465 /*
466 * uvm_km_suballoc: allocate a submap in the kernel map. once a submap
467 * is allocated all references to that area of VM must go through it. this
468 * allows the locking of VAs in kernel_map to be broken up into regions.
469 *
470 * => if `fixed' is true, *min specifies where the region described
471 * by the submap must start
472 * => if submap is non NULL we use that as the submap, otherwise we
473 * alloc a new map
474 */
475 struct vm_map *
476 uvm_km_suballoc(map, min, max, size, pageable, fixed, submap)
477 struct vm_map *map;
478 vaddr_t *min, *max; /* OUT, OUT */
479 vsize_t size;
480 boolean_t pageable;
481 boolean_t fixed;
482 struct vm_map *submap;
483 {
484 int mapflags = UVM_FLAG_NOMERGE | (fixed ? UVM_FLAG_FIXED : 0);
485
486 size = round_page(size); /* round up to pagesize */
487
488 /*
489 * first allocate a blank spot in the parent map
490 */
491
492 if (uvm_map(map, min, size, NULL, UVM_UNKNOWN_OFFSET,
493 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
494 UVM_ADV_RANDOM, mapflags)) != KERN_SUCCESS) {
495 panic("uvm_km_suballoc: unable to allocate space in parent map");
496 }
497
498 /*
499 * set VM bounds (min is filled in by uvm_map)
500 */
501
502 *max = *min + size;
503
504 /*
505 * add references to pmap and create or init the submap
506 */
507
508 pmap_reference(vm_map_pmap(map));
509 if (submap == NULL) {
510 submap = uvm_map_create(vm_map_pmap(map), *min, *max, pageable);
511 if (submap == NULL)
512 panic("uvm_km_suballoc: unable to create submap");
513 } else {
514 uvm_map_setup(submap, *min, *max, pageable);
515 submap->pmap = vm_map_pmap(map);
516 }
517
518 /*
519 * now let uvm_map_submap plug in it...
520 */
521
522 if (uvm_map_submap(map, *min, *max, submap) != KERN_SUCCESS)
523 panic("uvm_km_suballoc: submap allocation failed");
524
525 return(submap);
526 }
527
528 /*
529 * uvm_km_pgremove: remove pages from a kernel uvm_object.
530 *
531 * => when you unmap a part of anonymous kernel memory you want to toss
532 * the pages right away. (this gets called from uvm_unmap_...).
533 */
534
535 #define UKM_HASH_PENALTY 4 /* a guess */
536
537 void
538 uvm_km_pgremove(uobj, start, end)
539 struct uvm_object *uobj;
540 vaddr_t start, end;
541 {
542 boolean_t by_list, is_aobj;
543 struct vm_page *pp, *ppnext;
544 vaddr_t curoff;
545 UVMHIST_FUNC("uvm_km_pgremove"); UVMHIST_CALLED(maphist);
546
547 simple_lock(&uobj->vmobjlock); /* lock object */
548
549 /* is uobj an aobj? */
550 is_aobj = uobj->pgops == &aobj_pager;
551
552 /* choose cheapest traversal */
553 by_list = (uobj->uo_npages <=
554 ((end - start) >> PAGE_SHIFT) * UKM_HASH_PENALTY);
555
556 if (by_list)
557 goto loop_by_list;
558
559 /* by hash */
560
561 for (curoff = start ; curoff < end ; curoff += PAGE_SIZE) {
562 pp = uvm_pagelookup(uobj, curoff);
563 if (pp == NULL)
564 continue;
565
566 UVMHIST_LOG(maphist," page 0x%x, busy=%d", pp,
567 pp->flags & PG_BUSY, 0, 0);
568 /* now do the actual work */
569 if (pp->flags & PG_BUSY)
570 /* owner must check for this when done */
571 pp->flags |= PG_RELEASED;
572 else {
573 pmap_page_protect(PMAP_PGARG(pp), VM_PROT_NONE);
574
575 /*
576 * if this kernel object is an aobj, free the swap slot.
577 */
578 if (is_aobj) {
579 uao_dropswap(uobj, curoff >> PAGE_SHIFT);
580 }
581
582 uvm_lock_pageq();
583 uvm_pagefree(pp);
584 uvm_unlock_pageq();
585 }
586 /* done */
587
588 }
589 simple_unlock(&uobj->vmobjlock);
590 return;
591
592 loop_by_list:
593
594 for (pp = uobj->memq.tqh_first ; pp != NULL ; pp = ppnext) {
595
596 ppnext = pp->listq.tqe_next;
597 if (pp->offset < start || pp->offset >= end) {
598 continue;
599 }
600
601 UVMHIST_LOG(maphist," page 0x%x, busy=%d", pp,
602 pp->flags & PG_BUSY, 0, 0);
603 /* now do the actual work */
604 if (pp->flags & PG_BUSY)
605 /* owner must check for this when done */
606 pp->flags |= PG_RELEASED;
607 else {
608 pmap_page_protect(PMAP_PGARG(pp), VM_PROT_NONE);
609
610 /*
611 * if this kernel object is an aobj, free the swap slot.
612 */
613 if (is_aobj) {
614 uao_dropswap(uobj, pp->offset >> PAGE_SHIFT);
615 }
616
617 uvm_lock_pageq();
618 uvm_pagefree(pp);
619 uvm_unlock_pageq();
620 }
621 /* done */
622
623 }
624 simple_unlock(&uobj->vmobjlock);
625 return;
626 }
627
628
629 /*
630 * uvm_km_kmemalloc: lower level kernel memory allocator for malloc()
631 *
632 * => we map wired memory into the specified map using the obj passed in
633 * => NOTE: we can return NULL even if we can wait if there is not enough
634 * free VM space in the map... caller should be prepared to handle
635 * this case.
636 * => we return KVA of memory allocated
637 * => flags: NOWAIT, VALLOC - just allocate VA, TRYLOCK - fail if we can't
638 * lock the map
639 */
640
641 vaddr_t
642 uvm_km_kmemalloc(map, obj, size, flags)
643 vm_map_t map;
644 struct uvm_object *obj;
645 vsize_t size;
646 int flags;
647 {
648 vaddr_t kva, loopva;
649 vaddr_t offset;
650 struct vm_page *pg;
651 UVMHIST_FUNC("uvm_km_kmemalloc"); UVMHIST_CALLED(maphist);
652
653
654 UVMHIST_LOG(maphist," (map=0x%x, obj=0x%x, size=0x%x, flags=%d)",
655 map, obj, size, flags);
656 #ifdef DIAGNOSTIC
657 /* sanity check */
658 if (vm_map_pmap(map) != pmap_kernel())
659 panic("uvm_km_kmemalloc: invalid map");
660 #endif
661
662 /*
663 * setup for call
664 */
665
666 size = round_page(size);
667 kva = vm_map_min(map); /* hint */
668
669 /*
670 * allocate some virtual space
671 */
672
673 if (uvm_map(map, &kva, size, obj, UVM_UNKNOWN_OFFSET,
674 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
675 UVM_ADV_RANDOM,
676 (flags & UVM_KMF_TRYLOCK)))
677 != KERN_SUCCESS) {
678 UVMHIST_LOG(maphist, "<- done (no VM)",0,0,0,0);
679 return(0);
680 }
681
682 /*
683 * if all we wanted was VA, return now
684 */
685
686 if (flags & UVM_KMF_VALLOC) {
687 UVMHIST_LOG(maphist,"<- done valloc (kva=0x%x)", kva,0,0,0);
688 return(kva);
689 }
690 /*
691 * recover object offset from virtual address
692 */
693
694 offset = kva - vm_map_min(kernel_map);
695 UVMHIST_LOG(maphist, " kva=0x%x, offset=0x%x", kva, offset,0,0);
696
697 /*
698 * now allocate and map in the memory... note that we are the only ones
699 * whom should ever get a handle on this area of VM.
700 */
701
702 loopva = kva;
703 while (size) {
704 simple_lock(&obj->vmobjlock);
705 pg = uvm_pagealloc(obj, offset, NULL, 0);
706 if (pg) {
707 pg->flags &= ~PG_BUSY; /* new page */
708 UVM_PAGE_OWN(pg, NULL);
709 }
710 simple_unlock(&obj->vmobjlock);
711
712 /*
713 * out of memory?
714 */
715
716 if (pg == NULL) {
717 if (flags & UVM_KMF_NOWAIT) {
718 /* free everything! */
719 uvm_unmap(map, kva, kva + size);
720 return(0);
721 } else {
722 uvm_wait("km_getwait2"); /* sleep here */
723 continue;
724 }
725 }
726
727 /*
728 * map it in: note that we call pmap_enter with the map and
729 * object unlocked in case we are kmem_map/kmem_object
730 * (because if pmap_enter wants to allocate out of kmem_object
731 * it will need to lock it itself!)
732 */
733 #if defined(PMAP_NEW)
734 pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg), VM_PROT_ALL);
735 #else
736 pmap_enter(map->pmap, loopva, VM_PAGE_TO_PHYS(pg),
737 UVM_PROT_ALL, TRUE, 0);
738 #endif
739 loopva += PAGE_SIZE;
740 offset += PAGE_SIZE;
741 size -= PAGE_SIZE;
742 }
743
744 UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0);
745 return(kva);
746 }
747
748 /*
749 * uvm_km_free: free an area of kernel memory
750 */
751
752 void
753 uvm_km_free(map, addr, size)
754 vm_map_t map;
755 vaddr_t addr;
756 vsize_t size;
757 {
758
759 uvm_unmap(map, trunc_page(addr), round_page(addr+size));
760 }
761
762 /*
763 * uvm_km_free_wakeup: free an area of kernel memory and wake up
764 * anyone waiting for vm space.
765 *
766 * => XXX: "wanted" bit + unlock&wait on other end?
767 */
768
769 void
770 uvm_km_free_wakeup(map, addr, size)
771 vm_map_t map;
772 vaddr_t addr;
773 vsize_t size;
774 {
775 vm_map_entry_t dead_entries;
776
777 vm_map_lock(map);
778 (void)uvm_unmap_remove(map, trunc_page(addr), round_page(addr+size),
779 &dead_entries);
780 wakeup(map);
781 vm_map_unlock(map);
782
783 if (dead_entries != NULL)
784 uvm_unmap_detach(dead_entries, 0);
785 }
786
787 /*
788 * uvm_km_alloc1: allocate wired down memory in the kernel map.
789 *
790 * => we can sleep if needed
791 */
792
793 vaddr_t
794 uvm_km_alloc1(map, size, zeroit)
795 vm_map_t map;
796 vsize_t size;
797 boolean_t zeroit;
798 {
799 vaddr_t kva, loopva, offset;
800 struct vm_page *pg;
801 UVMHIST_FUNC("uvm_km_alloc1"); UVMHIST_CALLED(maphist);
802
803 UVMHIST_LOG(maphist,"(map=0x%x, size=0x%x)", map, size,0,0);
804
805 #ifdef DIAGNOSTIC
806 if (vm_map_pmap(map) != pmap_kernel())
807 panic("uvm_km_alloc1");
808 #endif
809
810 size = round_page(size);
811 kva = vm_map_min(map); /* hint */
812
813 /*
814 * allocate some virtual space
815 */
816
817 if (uvm_map(map, &kva, size, uvm.kernel_object, UVM_UNKNOWN_OFFSET,
818 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
819 UVM_ADV_RANDOM, 0)) != KERN_SUCCESS) {
820 UVMHIST_LOG(maphist,"<- done (no VM)",0,0,0,0);
821 return(0);
822 }
823
824 /*
825 * recover object offset from virtual address
826 */
827
828 offset = kva - vm_map_min(kernel_map);
829 UVMHIST_LOG(maphist," kva=0x%x, offset=0x%x", kva, offset,0,0);
830
831 /*
832 * now allocate the memory. we must be careful about released pages.
833 */
834
835 loopva = kva;
836 while (size) {
837 simple_lock(&uvm.kernel_object->vmobjlock);
838 pg = uvm_pagelookup(uvm.kernel_object, offset);
839
840 /*
841 * if we found a page in an unallocated region, it must be
842 * released
843 */
844 if (pg) {
845 if ((pg->flags & PG_RELEASED) == 0)
846 panic("uvm_km_alloc1: non-released page");
847 pg->flags |= PG_WANTED;
848 UVM_UNLOCK_AND_WAIT(pg, &uvm.kernel_object->vmobjlock,
849 0, "km_alloc", 0);
850 continue; /* retry */
851 }
852
853 /* allocate ram */
854 pg = uvm_pagealloc(uvm.kernel_object, offset, NULL, 0);
855 if (pg) {
856 pg->flags &= ~PG_BUSY; /* new page */
857 UVM_PAGE_OWN(pg, NULL);
858 }
859 simple_unlock(&uvm.kernel_object->vmobjlock);
860 if (pg == NULL) {
861 uvm_wait("km_alloc1w"); /* wait for memory */
862 continue;
863 }
864
865 /* map it in */
866 #if defined(PMAP_NEW)
867 pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg), UVM_PROT_ALL);
868 #else
869 pmap_enter(map->pmap, loopva, VM_PAGE_TO_PHYS(pg),
870 UVM_PROT_ALL, TRUE, 0);
871 #endif
872 loopva += PAGE_SIZE;
873 offset += PAGE_SIZE;
874 size -= PAGE_SIZE;
875 }
876
877 /*
878 * zero on request (note that "size" is now zero due to the above loop
879 * so we need to subtract kva from loopva to reconstruct the size).
880 */
881
882 if (zeroit)
883 memset((caddr_t)kva, 0, loopva - kva);
884
885 UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0);
886 return(kva);
887 }
888
889 /*
890 * uvm_km_valloc: allocate zero-fill memory in the kernel's address space
891 *
892 * => memory is not allocated until fault time
893 */
894
895 vaddr_t
896 uvm_km_valloc(map, size)
897 vm_map_t map;
898 vsize_t size;
899 {
900 vaddr_t kva;
901 UVMHIST_FUNC("uvm_km_valloc"); UVMHIST_CALLED(maphist);
902
903 UVMHIST_LOG(maphist, "(map=0x%x, size=0x%x)", map, size, 0,0);
904
905 #ifdef DIAGNOSTIC
906 if (vm_map_pmap(map) != pmap_kernel())
907 panic("uvm_km_valloc");
908 #endif
909
910 size = round_page(size);
911 kva = vm_map_min(map); /* hint */
912
913 /*
914 * allocate some virtual space. will be demand filled by kernel_object.
915 */
916
917 if (uvm_map(map, &kva, size, uvm.kernel_object, UVM_UNKNOWN_OFFSET,
918 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
919 UVM_ADV_RANDOM, 0)) != KERN_SUCCESS) {
920 UVMHIST_LOG(maphist, "<- done (no VM)", 0,0,0,0);
921 return(0);
922 }
923
924 UVMHIST_LOG(maphist, "<- done (kva=0x%x)", kva,0,0,0);
925 return(kva);
926 }
927
928 /*
929 * uvm_km_valloc_wait: allocate zero-fill memory in the kernel's address space
930 *
931 * => memory is not allocated until fault time
932 * => if no room in map, wait for space to free, unless requested size
933 * is larger than map (in which case we return 0)
934 */
935
936 vaddr_t
937 uvm_km_valloc_wait(map, size)
938 vm_map_t map;
939 vsize_t size;
940 {
941 vaddr_t kva;
942 UVMHIST_FUNC("uvm_km_valloc_wait"); UVMHIST_CALLED(maphist);
943
944 UVMHIST_LOG(maphist, "(map=0x%x, size=0x%x)", map, size, 0,0);
945
946 #ifdef DIAGNOSTIC
947 if (vm_map_pmap(map) != pmap_kernel())
948 panic("uvm_km_valloc_wait");
949 #endif
950
951 size = round_page(size);
952 if (size > vm_map_max(map) - vm_map_min(map))
953 return(0);
954
955 while (1) {
956 kva = vm_map_min(map); /* hint */
957
958 /*
959 * allocate some virtual space. will be demand filled
960 * by kernel_object.
961 */
962
963 if (uvm_map(map, &kva, size, uvm.kernel_object,
964 UVM_UNKNOWN_OFFSET, UVM_MAPFLAG(UVM_PROT_ALL,
965 UVM_PROT_ALL, UVM_INH_NONE, UVM_ADV_RANDOM, 0))
966 == KERN_SUCCESS) {
967 UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0);
968 return(kva);
969 }
970
971 /*
972 * failed. sleep for a while (on map)
973 */
974
975 UVMHIST_LOG(maphist,"<<<sleeping>>>",0,0,0,0);
976 tsleep((caddr_t)map, PVM, "vallocwait", 0);
977 }
978 /*NOTREACHED*/
979 }
980
981 /* Sanity; must specify both or none. */
982 #if (defined(PMAP_MAP_POOLPAGE) || defined(PMAP_UNMAP_POOLPAGE)) && \
983 (!defined(PMAP_MAP_POOLPAGE) || !defined(PMAP_UNMAP_POOLPAGE))
984 #error Must specify MAP and UNMAP together.
985 #endif
986
987 /*
988 * uvm_km_alloc_poolpage: allocate a page for the pool allocator
989 *
990 * => if the pmap specifies an alternate mapping method, we use it.
991 */
992
993 /* ARGSUSED */
994 vaddr_t
995 uvm_km_alloc_poolpage1(map, obj, waitok)
996 vm_map_t map;
997 struct uvm_object *obj;
998 boolean_t waitok;
999 {
1000 #if defined(PMAP_MAP_POOLPAGE)
1001 struct vm_page *pg;
1002 vaddr_t va;
1003
1004 again:
1005 pg = uvm_pagealloc(NULL, 0, NULL, 0);
1006 if (pg == NULL) {
1007 if (waitok) {
1008 uvm_wait("plpg");
1009 goto again;
1010 } else
1011 return (0);
1012 }
1013 va = PMAP_MAP_POOLPAGE(VM_PAGE_TO_PHYS(pg));
1014 if (va == 0)
1015 uvm_pagefree(pg);
1016 return (va);
1017 #else
1018 vaddr_t va;
1019 int s;
1020
1021 /*
1022 * NOTE: We may be called with a map that doens't require splimp
1023 * protection (e.g. kernel_map). However, it does not hurt to
1024 * go to splimp in this case (since unprocted maps will never be
1025 * accessed in interrupt context).
1026 *
1027 * XXX We may want to consider changing the interface to this
1028 * XXX function.
1029 */
1030
1031 s = splimp();
1032 va = uvm_km_kmemalloc(map, obj, PAGE_SIZE, waitok ? 0 : UVM_KMF_NOWAIT);
1033 splx(s);
1034 return (va);
1035 #endif /* PMAP_MAP_POOLPAGE */
1036 }
1037
1038 /*
1039 * uvm_km_free_poolpage: free a previously allocated pool page
1040 *
1041 * => if the pmap specifies an alternate unmapping method, we use it.
1042 */
1043
1044 /* ARGSUSED */
1045 void
1046 uvm_km_free_poolpage1(map, addr)
1047 vm_map_t map;
1048 vaddr_t addr;
1049 {
1050 #if defined(PMAP_UNMAP_POOLPAGE)
1051 paddr_t pa;
1052
1053 pa = PMAP_UNMAP_POOLPAGE(addr);
1054 uvm_pagefree(PHYS_TO_VM_PAGE(pa));
1055 #else
1056 int s;
1057
1058 /*
1059 * NOTE: We may be called with a map that doens't require splimp
1060 * protection (e.g. kernel_map). However, it does not hurt to
1061 * go to splimp in this case (since unprocted maps will never be
1062 * accessed in interrupt context).
1063 *
1064 * XXX We may want to consider changing the interface to this
1065 * XXX function.
1066 */
1067
1068 s = splimp();
1069 uvm_km_free(map, addr, PAGE_SIZE);
1070 splx(s);
1071 #endif /* PMAP_UNMAP_POOLPAGE */
1072 }
1073