uvm_km.c revision 1.54 1 1.54 chs /* $NetBSD: uvm_km.c,v 1.54 2001/11/07 08:43:32 chs Exp $ */
2 1.1 mrg
3 1.47 chs /*
4 1.1 mrg * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 1.47 chs * Copyright (c) 1991, 1993, The Regents of the University of California.
6 1.1 mrg *
7 1.1 mrg * All rights reserved.
8 1.1 mrg *
9 1.1 mrg * This code is derived from software contributed to Berkeley by
10 1.1 mrg * The Mach Operating System project at Carnegie-Mellon University.
11 1.1 mrg *
12 1.1 mrg * Redistribution and use in source and binary forms, with or without
13 1.1 mrg * modification, are permitted provided that the following conditions
14 1.1 mrg * are met:
15 1.1 mrg * 1. Redistributions of source code must retain the above copyright
16 1.1 mrg * notice, this list of conditions and the following disclaimer.
17 1.1 mrg * 2. Redistributions in binary form must reproduce the above copyright
18 1.1 mrg * notice, this list of conditions and the following disclaimer in the
19 1.1 mrg * documentation and/or other materials provided with the distribution.
20 1.1 mrg * 3. All advertising materials mentioning features or use of this software
21 1.1 mrg * must display the following acknowledgement:
22 1.1 mrg * This product includes software developed by Charles D. Cranor,
23 1.47 chs * Washington University, the University of California, Berkeley and
24 1.1 mrg * its contributors.
25 1.1 mrg * 4. Neither the name of the University nor the names of its contributors
26 1.1 mrg * may be used to endorse or promote products derived from this software
27 1.1 mrg * without specific prior written permission.
28 1.1 mrg *
29 1.1 mrg * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 1.1 mrg * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 1.1 mrg * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 1.1 mrg * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 1.1 mrg * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 1.1 mrg * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 1.1 mrg * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 1.1 mrg * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 1.1 mrg * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 1.1 mrg * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 1.1 mrg * SUCH DAMAGE.
40 1.1 mrg *
41 1.1 mrg * @(#)vm_kern.c 8.3 (Berkeley) 1/12/94
42 1.4 mrg * from: Id: uvm_km.c,v 1.1.2.14 1998/02/06 05:19:27 chs Exp
43 1.1 mrg *
44 1.1 mrg *
45 1.1 mrg * Copyright (c) 1987, 1990 Carnegie-Mellon University.
46 1.1 mrg * All rights reserved.
47 1.47 chs *
48 1.1 mrg * Permission to use, copy, modify and distribute this software and
49 1.1 mrg * its documentation is hereby granted, provided that both the copyright
50 1.1 mrg * notice and this permission notice appear in all copies of the
51 1.1 mrg * software, derivative works or modified versions, and any portions
52 1.1 mrg * thereof, and that both notices appear in supporting documentation.
53 1.47 chs *
54 1.47 chs * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
55 1.47 chs * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
56 1.1 mrg * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
57 1.47 chs *
58 1.1 mrg * Carnegie Mellon requests users of this software to return to
59 1.1 mrg *
60 1.1 mrg * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
61 1.1 mrg * School of Computer Science
62 1.1 mrg * Carnegie Mellon University
63 1.1 mrg * Pittsburgh PA 15213-3890
64 1.1 mrg *
65 1.1 mrg * any improvements or extensions that they make and grant Carnegie the
66 1.1 mrg * rights to redistribute these changes.
67 1.1 mrg */
68 1.6 mrg
69 1.6 mrg #include "opt_uvmhist.h"
70 1.1 mrg
71 1.1 mrg /*
72 1.1 mrg * uvm_km.c: handle kernel memory allocation and management
73 1.1 mrg */
74 1.1 mrg
75 1.7 chuck /*
76 1.7 chuck * overview of kernel memory management:
77 1.7 chuck *
78 1.7 chuck * the kernel virtual address space is mapped by "kernel_map." kernel_map
79 1.7 chuck * starts at VM_MIN_KERNEL_ADDRESS and goes to VM_MAX_KERNEL_ADDRESS.
80 1.7 chuck * note that VM_MIN_KERNEL_ADDRESS is equal to vm_map_min(kernel_map).
81 1.7 chuck *
82 1.47 chs * the kernel_map has several "submaps." submaps can only appear in
83 1.7 chuck * the kernel_map (user processes can't use them). submaps "take over"
84 1.7 chuck * the management of a sub-range of the kernel's address space. submaps
85 1.7 chuck * are typically allocated at boot time and are never released. kernel
86 1.47 chs * virtual address space that is mapped by a submap is locked by the
87 1.7 chuck * submap's lock -- not the kernel_map's lock.
88 1.7 chuck *
89 1.7 chuck * thus, the useful feature of submaps is that they allow us to break
90 1.7 chuck * up the locking and protection of the kernel address space into smaller
91 1.7 chuck * chunks.
92 1.7 chuck *
93 1.7 chuck * the vm system has several standard kernel submaps, including:
94 1.7 chuck * kmem_map => contains only wired kernel memory for the kernel
95 1.7 chuck * malloc. *** access to kmem_map must be protected
96 1.42 thorpej * by splvm() because we are allowed to call malloc()
97 1.7 chuck * at interrupt time ***
98 1.42 thorpej * mb_map => memory for large mbufs, *** protected by splvm ***
99 1.7 chuck * pager_map => used to map "buf" structures into kernel space
100 1.7 chuck * exec_map => used during exec to handle exec args
101 1.7 chuck * etc...
102 1.7 chuck *
103 1.7 chuck * the kernel allocates its private memory out of special uvm_objects whose
104 1.7 chuck * reference count is set to UVM_OBJ_KERN (thus indicating that the objects
105 1.7 chuck * are "special" and never die). all kernel objects should be thought of
106 1.47 chs * as large, fixed-sized, sparsely populated uvm_objects. each kernel
107 1.7 chuck * object is equal to the size of kernel virtual address space (i.e. the
108 1.7 chuck * value "VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS").
109 1.7 chuck *
110 1.7 chuck * most kernel private memory lives in kernel_object. the only exception
111 1.7 chuck * to this is for memory that belongs to submaps that must be protected
112 1.52 chs * by splvm(). pages in these submaps are not assigned to an object.
113 1.7 chuck *
114 1.7 chuck * note that just because a kernel object spans the entire kernel virutal
115 1.7 chuck * address space doesn't mean that it has to be mapped into the entire space.
116 1.47 chs * large chunks of a kernel object's space go unused either because
117 1.47 chs * that area of kernel VM is unmapped, or there is some other type of
118 1.7 chuck * object mapped into that range (e.g. a vnode). for submap's kernel
119 1.7 chuck * objects, the only part of the object that can ever be populated is the
120 1.7 chuck * offsets that are managed by the submap.
121 1.7 chuck *
122 1.7 chuck * note that the "offset" in a kernel object is always the kernel virtual
123 1.7 chuck * address minus the VM_MIN_KERNEL_ADDRESS (aka vm_map_min(kernel_map)).
124 1.7 chuck * example:
125 1.7 chuck * suppose VM_MIN_KERNEL_ADDRESS is 0xf8000000 and the kernel does a
126 1.7 chuck * uvm_km_alloc(kernel_map, PAGE_SIZE) [allocate 1 wired down page in the
127 1.7 chuck * kernel map]. if uvm_km_alloc returns virtual address 0xf8235000,
128 1.7 chuck * then that means that the page at offset 0x235000 in kernel_object is
129 1.47 chs * mapped at 0xf8235000.
130 1.7 chuck *
131 1.7 chuck * kernel object have one other special property: when the kernel virtual
132 1.7 chuck * memory mapping them is unmapped, the backing memory in the object is
133 1.7 chuck * freed right away. this is done with the uvm_km_pgremove() function.
134 1.7 chuck * this has to be done because there is no backing store for kernel pages
135 1.7 chuck * and no need to save them after they are no longer referenced.
136 1.7 chuck */
137 1.7 chuck
138 1.1 mrg #include <sys/param.h>
139 1.1 mrg #include <sys/systm.h>
140 1.1 mrg #include <sys/proc.h>
141 1.1 mrg
142 1.1 mrg #include <uvm/uvm.h>
143 1.1 mrg
144 1.1 mrg /*
145 1.1 mrg * global data structures
146 1.1 mrg */
147 1.1 mrg
148 1.49 chs struct vm_map *kernel_map = NULL;
149 1.1 mrg
150 1.1 mrg /*
151 1.1 mrg * local data structues
152 1.1 mrg */
153 1.1 mrg
154 1.1 mrg static struct vm_map kernel_map_store;
155 1.1 mrg
156 1.1 mrg /*
157 1.1 mrg * uvm_km_init: init kernel maps and objects to reflect reality (i.e.
158 1.1 mrg * KVM already allocated for text, data, bss, and static data structures).
159 1.1 mrg *
160 1.1 mrg * => KVM is defined by VM_MIN_KERNEL_ADDRESS/VM_MAX_KERNEL_ADDRESS.
161 1.1 mrg * we assume that [min -> start] has already been allocated and that
162 1.1 mrg * "end" is the end.
163 1.1 mrg */
164 1.1 mrg
165 1.8 mrg void
166 1.8 mrg uvm_km_init(start, end)
167 1.14 eeh vaddr_t start, end;
168 1.1 mrg {
169 1.14 eeh vaddr_t base = VM_MIN_KERNEL_ADDRESS;
170 1.27 thorpej
171 1.27 thorpej /*
172 1.27 thorpej * next, init kernel memory objects.
173 1.8 mrg */
174 1.1 mrg
175 1.8 mrg /* kernel_object: for pageable anonymous kernel memory */
176 1.34 chs uao_init();
177 1.8 mrg uvm.kernel_object = uao_create(VM_MAX_KERNEL_ADDRESS -
178 1.3 chs VM_MIN_KERNEL_ADDRESS, UAO_FLAG_KERNOBJ);
179 1.1 mrg
180 1.24 thorpej /*
181 1.53 chs * init the map and reserve already allocated kernel space
182 1.8 mrg * before installing.
183 1.8 mrg */
184 1.1 mrg
185 1.25 thorpej uvm_map_setup(&kernel_map_store, base, end, VM_MAP_PAGEABLE);
186 1.8 mrg kernel_map_store.pmap = pmap_kernel();
187 1.8 mrg if (uvm_map(&kernel_map_store, &base, start - base, NULL,
188 1.39 thorpej UVM_UNKNOWN_OFFSET, 0, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL,
189 1.43 chs UVM_INH_NONE, UVM_ADV_RANDOM,UVM_FLAG_FIXED)) != 0)
190 1.8 mrg panic("uvm_km_init: could not reserve space for kernel");
191 1.47 chs
192 1.8 mrg /*
193 1.8 mrg * install!
194 1.8 mrg */
195 1.8 mrg
196 1.8 mrg kernel_map = &kernel_map_store;
197 1.1 mrg }
198 1.1 mrg
199 1.1 mrg /*
200 1.1 mrg * uvm_km_suballoc: allocate a submap in the kernel map. once a submap
201 1.1 mrg * is allocated all references to that area of VM must go through it. this
202 1.1 mrg * allows the locking of VAs in kernel_map to be broken up into regions.
203 1.1 mrg *
204 1.5 thorpej * => if `fixed' is true, *min specifies where the region described
205 1.5 thorpej * by the submap must start
206 1.1 mrg * => if submap is non NULL we use that as the submap, otherwise we
207 1.1 mrg * alloc a new map
208 1.1 mrg */
209 1.8 mrg struct vm_map *
210 1.25 thorpej uvm_km_suballoc(map, min, max, size, flags, fixed, submap)
211 1.8 mrg struct vm_map *map;
212 1.52 chs vaddr_t *min, *max; /* IN/OUT, OUT */
213 1.14 eeh vsize_t size;
214 1.25 thorpej int flags;
215 1.8 mrg boolean_t fixed;
216 1.8 mrg struct vm_map *submap;
217 1.8 mrg {
218 1.8 mrg int mapflags = UVM_FLAG_NOMERGE | (fixed ? UVM_FLAG_FIXED : 0);
219 1.1 mrg
220 1.8 mrg size = round_page(size); /* round up to pagesize */
221 1.1 mrg
222 1.8 mrg /*
223 1.8 mrg * first allocate a blank spot in the parent map
224 1.8 mrg */
225 1.8 mrg
226 1.39 thorpej if (uvm_map(map, min, size, NULL, UVM_UNKNOWN_OFFSET, 0,
227 1.8 mrg UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
228 1.43 chs UVM_ADV_RANDOM, mapflags)) != 0) {
229 1.8 mrg panic("uvm_km_suballoc: unable to allocate space in parent map");
230 1.8 mrg }
231 1.8 mrg
232 1.8 mrg /*
233 1.8 mrg * set VM bounds (min is filled in by uvm_map)
234 1.8 mrg */
235 1.1 mrg
236 1.8 mrg *max = *min + size;
237 1.5 thorpej
238 1.8 mrg /*
239 1.8 mrg * add references to pmap and create or init the submap
240 1.8 mrg */
241 1.1 mrg
242 1.8 mrg pmap_reference(vm_map_pmap(map));
243 1.8 mrg if (submap == NULL) {
244 1.25 thorpej submap = uvm_map_create(vm_map_pmap(map), *min, *max, flags);
245 1.8 mrg if (submap == NULL)
246 1.8 mrg panic("uvm_km_suballoc: unable to create submap");
247 1.8 mrg } else {
248 1.25 thorpej uvm_map_setup(submap, *min, *max, flags);
249 1.8 mrg submap->pmap = vm_map_pmap(map);
250 1.8 mrg }
251 1.1 mrg
252 1.8 mrg /*
253 1.8 mrg * now let uvm_map_submap plug in it...
254 1.8 mrg */
255 1.1 mrg
256 1.43 chs if (uvm_map_submap(map, *min, *max, submap) != 0)
257 1.8 mrg panic("uvm_km_suballoc: submap allocation failed");
258 1.1 mrg
259 1.8 mrg return(submap);
260 1.1 mrg }
261 1.1 mrg
262 1.1 mrg /*
263 1.1 mrg * uvm_km_pgremove: remove pages from a kernel uvm_object.
264 1.1 mrg *
265 1.1 mrg * => when you unmap a part of anonymous kernel memory you want to toss
266 1.1 mrg * the pages right away. (this gets called from uvm_unmap_...).
267 1.1 mrg */
268 1.1 mrg
269 1.8 mrg void
270 1.8 mrg uvm_km_pgremove(uobj, start, end)
271 1.8 mrg struct uvm_object *uobj;
272 1.14 eeh vaddr_t start, end;
273 1.1 mrg {
274 1.53 chs struct vm_page *pg;
275 1.52 chs voff_t curoff, nextoff;
276 1.53 chs int swpgonlydelta = 0;
277 1.8 mrg UVMHIST_FUNC("uvm_km_pgremove"); UVMHIST_CALLED(maphist);
278 1.1 mrg
279 1.40 chs KASSERT(uobj->pgops == &aobj_pager);
280 1.40 chs simple_lock(&uobj->vmobjlock);
281 1.3 chs
282 1.52 chs for (curoff = start; curoff < end; curoff = nextoff) {
283 1.52 chs nextoff = curoff + PAGE_SIZE;
284 1.52 chs pg = uvm_pagelookup(uobj, curoff);
285 1.53 chs if (pg != NULL && pg->flags & PG_BUSY) {
286 1.52 chs pg->flags |= PG_WANTED;
287 1.52 chs UVM_UNLOCK_AND_WAIT(pg, &uobj->vmobjlock, 0,
288 1.52 chs "km_pgrm", 0);
289 1.52 chs simple_lock(&uobj->vmobjlock);
290 1.52 chs nextoff = curoff;
291 1.8 mrg continue;
292 1.52 chs }
293 1.8 mrg
294 1.52 chs /*
295 1.52 chs * free the swap slot, then the page.
296 1.52 chs */
297 1.8 mrg
298 1.53 chs if (pg == NULL &&
299 1.53 chs uao_find_swslot(uobj, curoff >> PAGE_SHIFT) != 0) {
300 1.53 chs swpgonlydelta++;
301 1.53 chs }
302 1.52 chs uao_dropswap(uobj, curoff >> PAGE_SHIFT);
303 1.53 chs if (pg != NULL) {
304 1.53 chs uvm_lock_pageq();
305 1.53 chs uvm_pagefree(pg);
306 1.53 chs uvm_unlock_pageq();
307 1.53 chs }
308 1.8 mrg }
309 1.8 mrg simple_unlock(&uobj->vmobjlock);
310 1.8 mrg
311 1.54 chs if (swpgonlydelta > 0) {
312 1.54 chs simple_lock(&uvm.swap_data_lock);
313 1.54 chs KASSERT(uvmexp.swpgonly >= swpgonlydelta);
314 1.54 chs uvmexp.swpgonly -= swpgonlydelta;
315 1.54 chs simple_unlock(&uvm.swap_data_lock);
316 1.54 chs }
317 1.24 thorpej }
318 1.24 thorpej
319 1.24 thorpej
320 1.24 thorpej /*
321 1.24 thorpej * uvm_km_pgremove_intrsafe: like uvm_km_pgremove(), but for "intrsafe"
322 1.52 chs * maps
323 1.24 thorpej *
324 1.24 thorpej * => when you unmap a part of anonymous kernel memory you want to toss
325 1.52 chs * the pages right away. (this is called from uvm_unmap_...).
326 1.24 thorpej * => none of the pages will ever be busy, and none of them will ever
327 1.52 chs * be on the active or inactive queues (because they have no object).
328 1.24 thorpej */
329 1.24 thorpej
330 1.24 thorpej void
331 1.52 chs uvm_km_pgremove_intrsafe(start, end)
332 1.24 thorpej vaddr_t start, end;
333 1.24 thorpej {
334 1.52 chs struct vm_page *pg;
335 1.52 chs paddr_t pa;
336 1.24 thorpej UVMHIST_FUNC("uvm_km_pgremove_intrsafe"); UVMHIST_CALLED(maphist);
337 1.24 thorpej
338 1.52 chs for (; start < end; start += PAGE_SIZE) {
339 1.52 chs if (!pmap_extract(pmap_kernel(), start, &pa)) {
340 1.24 thorpej continue;
341 1.40 chs }
342 1.52 chs pg = PHYS_TO_VM_PAGE(pa);
343 1.52 chs KASSERT(pg);
344 1.52 chs KASSERT(pg->uobject == NULL && pg->uanon == NULL);
345 1.52 chs uvm_pagefree(pg);
346 1.24 thorpej }
347 1.1 mrg }
348 1.1 mrg
349 1.1 mrg
350 1.1 mrg /*
351 1.1 mrg * uvm_km_kmemalloc: lower level kernel memory allocator for malloc()
352 1.1 mrg *
353 1.1 mrg * => we map wired memory into the specified map using the obj passed in
354 1.1 mrg * => NOTE: we can return NULL even if we can wait if there is not enough
355 1.1 mrg * free VM space in the map... caller should be prepared to handle
356 1.1 mrg * this case.
357 1.1 mrg * => we return KVA of memory allocated
358 1.1 mrg * => flags: NOWAIT, VALLOC - just allocate VA, TRYLOCK - fail if we can't
359 1.1 mrg * lock the map
360 1.1 mrg */
361 1.1 mrg
362 1.14 eeh vaddr_t
363 1.8 mrg uvm_km_kmemalloc(map, obj, size, flags)
364 1.49 chs struct vm_map *map;
365 1.8 mrg struct uvm_object *obj;
366 1.14 eeh vsize_t size;
367 1.8 mrg int flags;
368 1.1 mrg {
369 1.14 eeh vaddr_t kva, loopva;
370 1.14 eeh vaddr_t offset;
371 1.44 thorpej vsize_t loopsize;
372 1.8 mrg struct vm_page *pg;
373 1.8 mrg UVMHIST_FUNC("uvm_km_kmemalloc"); UVMHIST_CALLED(maphist);
374 1.1 mrg
375 1.8 mrg UVMHIST_LOG(maphist," (map=0x%x, obj=0x%x, size=0x%x, flags=%d)",
376 1.40 chs map, obj, size, flags);
377 1.40 chs KASSERT(vm_map_pmap(map) == pmap_kernel());
378 1.1 mrg
379 1.8 mrg /*
380 1.8 mrg * setup for call
381 1.8 mrg */
382 1.8 mrg
383 1.8 mrg size = round_page(size);
384 1.8 mrg kva = vm_map_min(map); /* hint */
385 1.1 mrg
386 1.8 mrg /*
387 1.8 mrg * allocate some virtual space
388 1.8 mrg */
389 1.8 mrg
390 1.35 thorpej if (__predict_false(uvm_map(map, &kva, size, obj, UVM_UNKNOWN_OFFSET,
391 1.39 thorpej 0, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
392 1.47 chs UVM_ADV_RANDOM, (flags & UVM_KMF_TRYLOCK)))
393 1.43 chs != 0)) {
394 1.8 mrg UVMHIST_LOG(maphist, "<- done (no VM)",0,0,0,0);
395 1.8 mrg return(0);
396 1.8 mrg }
397 1.8 mrg
398 1.8 mrg /*
399 1.8 mrg * if all we wanted was VA, return now
400 1.8 mrg */
401 1.8 mrg
402 1.8 mrg if (flags & UVM_KMF_VALLOC) {
403 1.8 mrg UVMHIST_LOG(maphist,"<- done valloc (kva=0x%x)", kva,0,0,0);
404 1.8 mrg return(kva);
405 1.8 mrg }
406 1.40 chs
407 1.8 mrg /*
408 1.8 mrg * recover object offset from virtual address
409 1.8 mrg */
410 1.8 mrg
411 1.8 mrg offset = kva - vm_map_min(kernel_map);
412 1.8 mrg UVMHIST_LOG(maphist, " kva=0x%x, offset=0x%x", kva, offset,0,0);
413 1.8 mrg
414 1.8 mrg /*
415 1.8 mrg * now allocate and map in the memory... note that we are the only ones
416 1.8 mrg * whom should ever get a handle on this area of VM.
417 1.8 mrg */
418 1.8 mrg
419 1.8 mrg loopva = kva;
420 1.44 thorpej loopsize = size;
421 1.44 thorpej while (loopsize) {
422 1.52 chs if (obj) {
423 1.52 chs simple_lock(&obj->vmobjlock);
424 1.52 chs }
425 1.52 chs pg = uvm_pagealloc(obj, offset, NULL, UVM_PGA_USERESERVE);
426 1.45 thorpej if (__predict_true(pg != NULL)) {
427 1.8 mrg pg->flags &= ~PG_BUSY; /* new page */
428 1.8 mrg UVM_PAGE_OWN(pg, NULL);
429 1.8 mrg }
430 1.52 chs if (obj) {
431 1.52 chs simple_unlock(&obj->vmobjlock);
432 1.52 chs }
433 1.47 chs
434 1.8 mrg /*
435 1.8 mrg * out of memory?
436 1.8 mrg */
437 1.8 mrg
438 1.35 thorpej if (__predict_false(pg == NULL)) {
439 1.8 mrg if (flags & UVM_KMF_NOWAIT) {
440 1.8 mrg /* free everything! */
441 1.17 chuck uvm_unmap(map, kva, kva + size);
442 1.8 mrg return(0);
443 1.8 mrg } else {
444 1.8 mrg uvm_wait("km_getwait2"); /* sleep here */
445 1.8 mrg continue;
446 1.8 mrg }
447 1.8 mrg }
448 1.47 chs
449 1.8 mrg /*
450 1.52 chs * map it in
451 1.8 mrg */
452 1.40 chs
453 1.52 chs if (obj == NULL) {
454 1.24 thorpej pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg),
455 1.24 thorpej VM_PROT_ALL);
456 1.24 thorpej } else {
457 1.24 thorpej pmap_enter(map->pmap, loopva, VM_PAGE_TO_PHYS(pg),
458 1.33 thorpej UVM_PROT_ALL,
459 1.33 thorpej PMAP_WIRED | VM_PROT_READ | VM_PROT_WRITE);
460 1.24 thorpej }
461 1.8 mrg loopva += PAGE_SIZE;
462 1.8 mrg offset += PAGE_SIZE;
463 1.44 thorpej loopsize -= PAGE_SIZE;
464 1.8 mrg }
465 1.51 chris
466 1.51 chris pmap_update(pmap_kernel());
467 1.51 chris
468 1.8 mrg UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0);
469 1.8 mrg return(kva);
470 1.1 mrg }
471 1.1 mrg
472 1.1 mrg /*
473 1.1 mrg * uvm_km_free: free an area of kernel memory
474 1.1 mrg */
475 1.1 mrg
476 1.8 mrg void
477 1.8 mrg uvm_km_free(map, addr, size)
478 1.49 chs struct vm_map *map;
479 1.14 eeh vaddr_t addr;
480 1.14 eeh vsize_t size;
481 1.8 mrg {
482 1.17 chuck uvm_unmap(map, trunc_page(addr), round_page(addr+size));
483 1.1 mrg }
484 1.1 mrg
485 1.1 mrg /*
486 1.1 mrg * uvm_km_free_wakeup: free an area of kernel memory and wake up
487 1.1 mrg * anyone waiting for vm space.
488 1.1 mrg *
489 1.1 mrg * => XXX: "wanted" bit + unlock&wait on other end?
490 1.1 mrg */
491 1.1 mrg
492 1.8 mrg void
493 1.8 mrg uvm_km_free_wakeup(map, addr, size)
494 1.49 chs struct vm_map *map;
495 1.14 eeh vaddr_t addr;
496 1.14 eeh vsize_t size;
497 1.1 mrg {
498 1.49 chs struct vm_map_entry *dead_entries;
499 1.1 mrg
500 1.8 mrg vm_map_lock(map);
501 1.47 chs uvm_unmap_remove(map, trunc_page(addr), round_page(addr + size),
502 1.43 chs &dead_entries);
503 1.31 thorpej wakeup(map);
504 1.8 mrg vm_map_unlock(map);
505 1.8 mrg if (dead_entries != NULL)
506 1.8 mrg uvm_unmap_detach(dead_entries, 0);
507 1.1 mrg }
508 1.1 mrg
509 1.1 mrg /*
510 1.1 mrg * uvm_km_alloc1: allocate wired down memory in the kernel map.
511 1.1 mrg *
512 1.1 mrg * => we can sleep if needed
513 1.1 mrg */
514 1.1 mrg
515 1.14 eeh vaddr_t
516 1.8 mrg uvm_km_alloc1(map, size, zeroit)
517 1.49 chs struct vm_map *map;
518 1.14 eeh vsize_t size;
519 1.8 mrg boolean_t zeroit;
520 1.1 mrg {
521 1.14 eeh vaddr_t kva, loopva, offset;
522 1.8 mrg struct vm_page *pg;
523 1.8 mrg UVMHIST_FUNC("uvm_km_alloc1"); UVMHIST_CALLED(maphist);
524 1.1 mrg
525 1.8 mrg UVMHIST_LOG(maphist,"(map=0x%x, size=0x%x)", map, size,0,0);
526 1.40 chs KASSERT(vm_map_pmap(map) == pmap_kernel());
527 1.1 mrg
528 1.8 mrg size = round_page(size);
529 1.8 mrg kva = vm_map_min(map); /* hint */
530 1.1 mrg
531 1.8 mrg /*
532 1.8 mrg * allocate some virtual space
533 1.8 mrg */
534 1.1 mrg
535 1.35 thorpej if (__predict_false(uvm_map(map, &kva, size, uvm.kernel_object,
536 1.39 thorpej UVM_UNKNOWN_OFFSET, 0, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL,
537 1.35 thorpej UVM_INH_NONE, UVM_ADV_RANDOM,
538 1.43 chs 0)) != 0)) {
539 1.8 mrg UVMHIST_LOG(maphist,"<- done (no VM)",0,0,0,0);
540 1.8 mrg return(0);
541 1.8 mrg }
542 1.8 mrg
543 1.8 mrg /*
544 1.8 mrg * recover object offset from virtual address
545 1.8 mrg */
546 1.8 mrg
547 1.8 mrg offset = kva - vm_map_min(kernel_map);
548 1.8 mrg UVMHIST_LOG(maphist," kva=0x%x, offset=0x%x", kva, offset,0,0);
549 1.8 mrg
550 1.8 mrg /*
551 1.52 chs * now allocate the memory.
552 1.8 mrg */
553 1.8 mrg
554 1.8 mrg loopva = kva;
555 1.8 mrg while (size) {
556 1.8 mrg simple_lock(&uvm.kernel_object->vmobjlock);
557 1.52 chs KASSERT(uvm_pagelookup(uvm.kernel_object, offset) == NULL);
558 1.23 chs pg = uvm_pagealloc(uvm.kernel_object, offset, NULL, 0);
559 1.8 mrg if (pg) {
560 1.52 chs pg->flags &= ~PG_BUSY;
561 1.8 mrg UVM_PAGE_OWN(pg, NULL);
562 1.8 mrg }
563 1.8 mrg simple_unlock(&uvm.kernel_object->vmobjlock);
564 1.52 chs if (pg == NULL) {
565 1.52 chs uvm_wait("km_alloc1w");
566 1.8 mrg continue;
567 1.8 mrg }
568 1.8 mrg pmap_enter(map->pmap, loopva, VM_PAGE_TO_PHYS(pg),
569 1.33 thorpej UVM_PROT_ALL, PMAP_WIRED | VM_PROT_READ | VM_PROT_WRITE);
570 1.8 mrg loopva += PAGE_SIZE;
571 1.8 mrg offset += PAGE_SIZE;
572 1.8 mrg size -= PAGE_SIZE;
573 1.8 mrg }
574 1.51 chris pmap_update(map->pmap);
575 1.46 thorpej
576 1.8 mrg /*
577 1.8 mrg * zero on request (note that "size" is now zero due to the above loop
578 1.8 mrg * so we need to subtract kva from loopva to reconstruct the size).
579 1.8 mrg */
580 1.1 mrg
581 1.8 mrg if (zeroit)
582 1.13 perry memset((caddr_t)kva, 0, loopva - kva);
583 1.8 mrg UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0);
584 1.8 mrg return(kva);
585 1.1 mrg }
586 1.1 mrg
587 1.1 mrg /*
588 1.1 mrg * uvm_km_valloc: allocate zero-fill memory in the kernel's address space
589 1.1 mrg *
590 1.1 mrg * => memory is not allocated until fault time
591 1.1 mrg */
592 1.1 mrg
593 1.14 eeh vaddr_t
594 1.8 mrg uvm_km_valloc(map, size)
595 1.49 chs struct vm_map *map;
596 1.14 eeh vsize_t size;
597 1.1 mrg {
598 1.41 nisimura return(uvm_km_valloc_align(map, size, 0));
599 1.41 nisimura }
600 1.41 nisimura
601 1.41 nisimura vaddr_t
602 1.41 nisimura uvm_km_valloc_align(map, size, align)
603 1.49 chs struct vm_map *map;
604 1.41 nisimura vsize_t size;
605 1.41 nisimura vsize_t align;
606 1.41 nisimura {
607 1.14 eeh vaddr_t kva;
608 1.8 mrg UVMHIST_FUNC("uvm_km_valloc"); UVMHIST_CALLED(maphist);
609 1.1 mrg
610 1.8 mrg UVMHIST_LOG(maphist, "(map=0x%x, size=0x%x)", map, size, 0,0);
611 1.40 chs KASSERT(vm_map_pmap(map) == pmap_kernel());
612 1.1 mrg
613 1.8 mrg size = round_page(size);
614 1.8 mrg kva = vm_map_min(map); /* hint */
615 1.1 mrg
616 1.8 mrg /*
617 1.8 mrg * allocate some virtual space. will be demand filled by kernel_object.
618 1.8 mrg */
619 1.1 mrg
620 1.35 thorpej if (__predict_false(uvm_map(map, &kva, size, uvm.kernel_object,
621 1.41 nisimura UVM_UNKNOWN_OFFSET, align, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL,
622 1.35 thorpej UVM_INH_NONE, UVM_ADV_RANDOM,
623 1.43 chs 0)) != 0)) {
624 1.8 mrg UVMHIST_LOG(maphist, "<- done (no VM)", 0,0,0,0);
625 1.8 mrg return(0);
626 1.8 mrg }
627 1.1 mrg
628 1.8 mrg UVMHIST_LOG(maphist, "<- done (kva=0x%x)", kva,0,0,0);
629 1.8 mrg return(kva);
630 1.1 mrg }
631 1.1 mrg
632 1.1 mrg /*
633 1.1 mrg * uvm_km_valloc_wait: allocate zero-fill memory in the kernel's address space
634 1.1 mrg *
635 1.1 mrg * => memory is not allocated until fault time
636 1.1 mrg * => if no room in map, wait for space to free, unless requested size
637 1.1 mrg * is larger than map (in which case we return 0)
638 1.1 mrg */
639 1.1 mrg
640 1.14 eeh vaddr_t
641 1.38 jeffs uvm_km_valloc_prefer_wait(map, size, prefer)
642 1.49 chs struct vm_map *map;
643 1.14 eeh vsize_t size;
644 1.38 jeffs voff_t prefer;
645 1.1 mrg {
646 1.14 eeh vaddr_t kva;
647 1.38 jeffs UVMHIST_FUNC("uvm_km_valloc_prefer_wait"); UVMHIST_CALLED(maphist);
648 1.1 mrg
649 1.8 mrg UVMHIST_LOG(maphist, "(map=0x%x, size=0x%x)", map, size, 0,0);
650 1.40 chs KASSERT(vm_map_pmap(map) == pmap_kernel());
651 1.1 mrg
652 1.8 mrg size = round_page(size);
653 1.8 mrg if (size > vm_map_max(map) - vm_map_min(map))
654 1.8 mrg return(0);
655 1.8 mrg
656 1.52 chs for (;;) {
657 1.8 mrg kva = vm_map_min(map); /* hint */
658 1.8 mrg
659 1.8 mrg /*
660 1.8 mrg * allocate some virtual space. will be demand filled
661 1.8 mrg * by kernel_object.
662 1.8 mrg */
663 1.8 mrg
664 1.35 thorpej if (__predict_true(uvm_map(map, &kva, size, uvm.kernel_object,
665 1.39 thorpej prefer, 0, UVM_MAPFLAG(UVM_PROT_ALL,
666 1.8 mrg UVM_PROT_ALL, UVM_INH_NONE, UVM_ADV_RANDOM, 0))
667 1.43 chs == 0)) {
668 1.8 mrg UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0);
669 1.8 mrg return(kva);
670 1.8 mrg }
671 1.8 mrg
672 1.8 mrg /*
673 1.8 mrg * failed. sleep for a while (on map)
674 1.8 mrg */
675 1.8 mrg
676 1.8 mrg UVMHIST_LOG(maphist,"<<<sleeping>>>",0,0,0,0);
677 1.8 mrg tsleep((caddr_t)map, PVM, "vallocwait", 0);
678 1.8 mrg }
679 1.8 mrg /*NOTREACHED*/
680 1.38 jeffs }
681 1.38 jeffs
682 1.38 jeffs vaddr_t
683 1.38 jeffs uvm_km_valloc_wait(map, size)
684 1.49 chs struct vm_map *map;
685 1.38 jeffs vsize_t size;
686 1.38 jeffs {
687 1.38 jeffs return uvm_km_valloc_prefer_wait(map, size, UVM_UNKNOWN_OFFSET);
688 1.10 thorpej }
689 1.10 thorpej
690 1.10 thorpej /* Sanity; must specify both or none. */
691 1.10 thorpej #if (defined(PMAP_MAP_POOLPAGE) || defined(PMAP_UNMAP_POOLPAGE)) && \
692 1.10 thorpej (!defined(PMAP_MAP_POOLPAGE) || !defined(PMAP_UNMAP_POOLPAGE))
693 1.10 thorpej #error Must specify MAP and UNMAP together.
694 1.10 thorpej #endif
695 1.10 thorpej
696 1.10 thorpej /*
697 1.10 thorpej * uvm_km_alloc_poolpage: allocate a page for the pool allocator
698 1.10 thorpej *
699 1.10 thorpej * => if the pmap specifies an alternate mapping method, we use it.
700 1.10 thorpej */
701 1.10 thorpej
702 1.11 thorpej /* ARGSUSED */
703 1.14 eeh vaddr_t
704 1.15 thorpej uvm_km_alloc_poolpage1(map, obj, waitok)
705 1.49 chs struct vm_map *map;
706 1.12 thorpej struct uvm_object *obj;
707 1.15 thorpej boolean_t waitok;
708 1.10 thorpej {
709 1.10 thorpej #if defined(PMAP_MAP_POOLPAGE)
710 1.10 thorpej struct vm_page *pg;
711 1.14 eeh vaddr_t va;
712 1.10 thorpej
713 1.15 thorpej again:
714 1.29 chs pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE);
715 1.35 thorpej if (__predict_false(pg == NULL)) {
716 1.15 thorpej if (waitok) {
717 1.15 thorpej uvm_wait("plpg");
718 1.15 thorpej goto again;
719 1.15 thorpej } else
720 1.15 thorpej return (0);
721 1.15 thorpej }
722 1.10 thorpej va = PMAP_MAP_POOLPAGE(VM_PAGE_TO_PHYS(pg));
723 1.35 thorpej if (__predict_false(va == 0))
724 1.10 thorpej uvm_pagefree(pg);
725 1.10 thorpej return (va);
726 1.10 thorpej #else
727 1.14 eeh vaddr_t va;
728 1.10 thorpej int s;
729 1.10 thorpej
730 1.16 thorpej /*
731 1.42 thorpej * NOTE: We may be called with a map that doens't require splvm
732 1.16 thorpej * protection (e.g. kernel_map). However, it does not hurt to
733 1.42 thorpej * go to splvm in this case (since unprocted maps will never be
734 1.16 thorpej * accessed in interrupt context).
735 1.16 thorpej *
736 1.16 thorpej * XXX We may want to consider changing the interface to this
737 1.16 thorpej * XXX function.
738 1.16 thorpej */
739 1.16 thorpej
740 1.42 thorpej s = splvm();
741 1.15 thorpej va = uvm_km_kmemalloc(map, obj, PAGE_SIZE, waitok ? 0 : UVM_KMF_NOWAIT);
742 1.10 thorpej splx(s);
743 1.10 thorpej return (va);
744 1.10 thorpej #endif /* PMAP_MAP_POOLPAGE */
745 1.10 thorpej }
746 1.10 thorpej
747 1.10 thorpej /*
748 1.10 thorpej * uvm_km_free_poolpage: free a previously allocated pool page
749 1.10 thorpej *
750 1.10 thorpej * => if the pmap specifies an alternate unmapping method, we use it.
751 1.10 thorpej */
752 1.10 thorpej
753 1.11 thorpej /* ARGSUSED */
754 1.10 thorpej void
755 1.11 thorpej uvm_km_free_poolpage1(map, addr)
756 1.49 chs struct vm_map *map;
757 1.14 eeh vaddr_t addr;
758 1.10 thorpej {
759 1.10 thorpej #if defined(PMAP_UNMAP_POOLPAGE)
760 1.14 eeh paddr_t pa;
761 1.10 thorpej
762 1.10 thorpej pa = PMAP_UNMAP_POOLPAGE(addr);
763 1.10 thorpej uvm_pagefree(PHYS_TO_VM_PAGE(pa));
764 1.10 thorpej #else
765 1.10 thorpej int s;
766 1.16 thorpej
767 1.16 thorpej /*
768 1.42 thorpej * NOTE: We may be called with a map that doens't require splvm
769 1.16 thorpej * protection (e.g. kernel_map). However, it does not hurt to
770 1.42 thorpej * go to splvm in this case (since unprocted maps will never be
771 1.16 thorpej * accessed in interrupt context).
772 1.16 thorpej *
773 1.16 thorpej * XXX We may want to consider changing the interface to this
774 1.16 thorpej * XXX function.
775 1.16 thorpej */
776 1.10 thorpej
777 1.42 thorpej s = splvm();
778 1.11 thorpej uvm_km_free(map, addr, PAGE_SIZE);
779 1.10 thorpej splx(s);
780 1.10 thorpej #endif /* PMAP_UNMAP_POOLPAGE */
781 1.1 mrg }
782