uvm_km.c revision 1.62.2.4 1 1.62.2.4 skrll /* $NetBSD: uvm_km.c,v 1.62.2.4 2005/01/17 19:33:11 skrll Exp $ */
2 1.1 mrg
3 1.47 chs /*
4 1.1 mrg * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 1.47 chs * Copyright (c) 1991, 1993, The Regents of the University of California.
6 1.1 mrg *
7 1.1 mrg * All rights reserved.
8 1.1 mrg *
9 1.1 mrg * This code is derived from software contributed to Berkeley by
10 1.1 mrg * The Mach Operating System project at Carnegie-Mellon University.
11 1.1 mrg *
12 1.1 mrg * Redistribution and use in source and binary forms, with or without
13 1.1 mrg * modification, are permitted provided that the following conditions
14 1.1 mrg * are met:
15 1.1 mrg * 1. Redistributions of source code must retain the above copyright
16 1.1 mrg * notice, this list of conditions and the following disclaimer.
17 1.1 mrg * 2. Redistributions in binary form must reproduce the above copyright
18 1.1 mrg * notice, this list of conditions and the following disclaimer in the
19 1.1 mrg * documentation and/or other materials provided with the distribution.
20 1.1 mrg * 3. All advertising materials mentioning features or use of this software
21 1.1 mrg * must display the following acknowledgement:
22 1.1 mrg * This product includes software developed by Charles D. Cranor,
23 1.47 chs * Washington University, the University of California, Berkeley and
24 1.1 mrg * its contributors.
25 1.1 mrg * 4. Neither the name of the University nor the names of its contributors
26 1.1 mrg * may be used to endorse or promote products derived from this software
27 1.1 mrg * without specific prior written permission.
28 1.1 mrg *
29 1.1 mrg * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 1.1 mrg * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 1.1 mrg * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 1.1 mrg * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 1.1 mrg * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 1.1 mrg * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 1.1 mrg * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 1.1 mrg * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 1.1 mrg * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 1.1 mrg * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 1.1 mrg * SUCH DAMAGE.
40 1.1 mrg *
41 1.1 mrg * @(#)vm_kern.c 8.3 (Berkeley) 1/12/94
42 1.4 mrg * from: Id: uvm_km.c,v 1.1.2.14 1998/02/06 05:19:27 chs Exp
43 1.1 mrg *
44 1.1 mrg *
45 1.1 mrg * Copyright (c) 1987, 1990 Carnegie-Mellon University.
46 1.1 mrg * All rights reserved.
47 1.47 chs *
48 1.1 mrg * Permission to use, copy, modify and distribute this software and
49 1.1 mrg * its documentation is hereby granted, provided that both the copyright
50 1.1 mrg * notice and this permission notice appear in all copies of the
51 1.1 mrg * software, derivative works or modified versions, and any portions
52 1.1 mrg * thereof, and that both notices appear in supporting documentation.
53 1.47 chs *
54 1.47 chs * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
55 1.47 chs * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
56 1.1 mrg * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
57 1.47 chs *
58 1.1 mrg * Carnegie Mellon requests users of this software to return to
59 1.1 mrg *
60 1.1 mrg * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
61 1.1 mrg * School of Computer Science
62 1.1 mrg * Carnegie Mellon University
63 1.1 mrg * Pittsburgh PA 15213-3890
64 1.1 mrg *
65 1.1 mrg * any improvements or extensions that they make and grant Carnegie the
66 1.1 mrg * rights to redistribute these changes.
67 1.1 mrg */
68 1.6 mrg
69 1.1 mrg /*
70 1.1 mrg * uvm_km.c: handle kernel memory allocation and management
71 1.1 mrg */
72 1.1 mrg
73 1.7 chuck /*
74 1.7 chuck * overview of kernel memory management:
75 1.7 chuck *
76 1.7 chuck * the kernel virtual address space is mapped by "kernel_map." kernel_map
77 1.62 thorpej * starts at VM_MIN_KERNEL_ADDRESS and goes to VM_MAX_KERNEL_ADDRESS.
78 1.62 thorpej * note that VM_MIN_KERNEL_ADDRESS is equal to vm_map_min(kernel_map).
79 1.7 chuck *
80 1.47 chs * the kernel_map has several "submaps." submaps can only appear in
81 1.7 chuck * the kernel_map (user processes can't use them). submaps "take over"
82 1.7 chuck * the management of a sub-range of the kernel's address space. submaps
83 1.7 chuck * are typically allocated at boot time and are never released. kernel
84 1.47 chs * virtual address space that is mapped by a submap is locked by the
85 1.7 chuck * submap's lock -- not the kernel_map's lock.
86 1.7 chuck *
87 1.7 chuck * thus, the useful feature of submaps is that they allow us to break
88 1.7 chuck * up the locking and protection of the kernel address space into smaller
89 1.7 chuck * chunks.
90 1.7 chuck *
91 1.7 chuck * the vm system has several standard kernel submaps, including:
92 1.7 chuck * kmem_map => contains only wired kernel memory for the kernel
93 1.7 chuck * malloc. *** access to kmem_map must be protected
94 1.42 thorpej * by splvm() because we are allowed to call malloc()
95 1.7 chuck * at interrupt time ***
96 1.42 thorpej * mb_map => memory for large mbufs, *** protected by splvm ***
97 1.7 chuck * pager_map => used to map "buf" structures into kernel space
98 1.7 chuck * exec_map => used during exec to handle exec args
99 1.7 chuck * etc...
100 1.7 chuck *
101 1.7 chuck * the kernel allocates its private memory out of special uvm_objects whose
102 1.7 chuck * reference count is set to UVM_OBJ_KERN (thus indicating that the objects
103 1.7 chuck * are "special" and never die). all kernel objects should be thought of
104 1.47 chs * as large, fixed-sized, sparsely populated uvm_objects. each kernel
105 1.62 thorpej * object is equal to the size of kernel virtual address space (i.e. the
106 1.62 thorpej * value "VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS").
107 1.7 chuck *
108 1.7 chuck * most kernel private memory lives in kernel_object. the only exception
109 1.7 chuck * to this is for memory that belongs to submaps that must be protected
110 1.52 chs * by splvm(). pages in these submaps are not assigned to an object.
111 1.7 chuck *
112 1.7 chuck * note that just because a kernel object spans the entire kernel virutal
113 1.7 chuck * address space doesn't mean that it has to be mapped into the entire space.
114 1.47 chs * large chunks of a kernel object's space go unused either because
115 1.47 chs * that area of kernel VM is unmapped, or there is some other type of
116 1.7 chuck * object mapped into that range (e.g. a vnode). for submap's kernel
117 1.7 chuck * objects, the only part of the object that can ever be populated is the
118 1.7 chuck * offsets that are managed by the submap.
119 1.7 chuck *
120 1.7 chuck * note that the "offset" in a kernel object is always the kernel virtual
121 1.62 thorpej * address minus the VM_MIN_KERNEL_ADDRESS (aka vm_map_min(kernel_map)).
122 1.7 chuck * example:
123 1.62 thorpej * suppose VM_MIN_KERNEL_ADDRESS is 0xf8000000 and the kernel does a
124 1.7 chuck * uvm_km_alloc(kernel_map, PAGE_SIZE) [allocate 1 wired down page in the
125 1.7 chuck * kernel map]. if uvm_km_alloc returns virtual address 0xf8235000,
126 1.7 chuck * then that means that the page at offset 0x235000 in kernel_object is
127 1.47 chs * mapped at 0xf8235000.
128 1.7 chuck *
129 1.7 chuck * kernel object have one other special property: when the kernel virtual
130 1.7 chuck * memory mapping them is unmapped, the backing memory in the object is
131 1.7 chuck * freed right away. this is done with the uvm_km_pgremove() function.
132 1.7 chuck * this has to be done because there is no backing store for kernel pages
133 1.7 chuck * and no need to save them after they are no longer referenced.
134 1.7 chuck */
135 1.55 lukem
136 1.55 lukem #include <sys/cdefs.h>
137 1.62.2.4 skrll __KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1.62.2.4 2005/01/17 19:33:11 skrll Exp $");
138 1.55 lukem
139 1.55 lukem #include "opt_uvmhist.h"
140 1.7 chuck
141 1.1 mrg #include <sys/param.h>
142 1.62.2.4 skrll #include <sys/malloc.h>
143 1.1 mrg #include <sys/systm.h>
144 1.1 mrg #include <sys/proc.h>
145 1.62.2.4 skrll #include <sys/pool.h>
146 1.1 mrg
147 1.1 mrg #include <uvm/uvm.h>
148 1.1 mrg
149 1.1 mrg /*
150 1.1 mrg * global data structures
151 1.1 mrg */
152 1.1 mrg
153 1.49 chs struct vm_map *kernel_map = NULL;
154 1.1 mrg
155 1.1 mrg /*
156 1.1 mrg * local data structues
157 1.1 mrg */
158 1.1 mrg
159 1.62.2.4 skrll static struct vm_map_kernel kernel_map_store;
160 1.62.2.4 skrll static struct vm_map_entry kernel_first_mapent_store;
161 1.62.2.4 skrll
162 1.62.2.4 skrll #if !defined(PMAP_MAP_POOLPAGE)
163 1.62.2.4 skrll
164 1.62.2.4 skrll /*
165 1.62.2.4 skrll * kva cache
166 1.62.2.4 skrll *
167 1.62.2.4 skrll * XXX maybe it's better to do this at the uvm_map layer.
168 1.62.2.4 skrll */
169 1.62.2.4 skrll
170 1.62.2.4 skrll #define KM_VACACHE_SIZE (32 * PAGE_SIZE) /* XXX tune */
171 1.62.2.4 skrll
172 1.62.2.4 skrll static void *km_vacache_alloc(struct pool *, int);
173 1.62.2.4 skrll static void km_vacache_free(struct pool *, void *);
174 1.62.2.4 skrll static void km_vacache_init(struct vm_map *, const char *, size_t);
175 1.62.2.4 skrll
176 1.62.2.4 skrll /* XXX */
177 1.62.2.4 skrll #define KM_VACACHE_POOL_TO_MAP(pp) \
178 1.62.2.4 skrll ((struct vm_map *)((char *)(pp) - \
179 1.62.2.4 skrll offsetof(struct vm_map_kernel, vmk_vacache)))
180 1.62.2.4 skrll
181 1.62.2.4 skrll static void *
182 1.62.2.4 skrll km_vacache_alloc(struct pool *pp, int flags)
183 1.62.2.4 skrll {
184 1.62.2.4 skrll vaddr_t va;
185 1.62.2.4 skrll size_t size;
186 1.62.2.4 skrll struct vm_map *map;
187 1.62.2.4 skrll #if defined(DEBUG)
188 1.62.2.4 skrll vaddr_t loopva;
189 1.62.2.4 skrll #endif
190 1.62.2.4 skrll size = pp->pr_alloc->pa_pagesz;
191 1.62.2.4 skrll
192 1.62.2.4 skrll map = KM_VACACHE_POOL_TO_MAP(pp);
193 1.62.2.4 skrll
194 1.62.2.4 skrll va = vm_map_min(map); /* hint */
195 1.62.2.4 skrll if (uvm_map(map, &va, size, NULL, UVM_UNKNOWN_OFFSET, size,
196 1.62.2.4 skrll UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
197 1.62.2.4 skrll UVM_ADV_RANDOM, UVM_FLAG_QUANTUM |
198 1.62.2.4 skrll ((flags & PR_WAITOK) ? 0 : UVM_FLAG_TRYLOCK | UVM_FLAG_NOWAIT))))
199 1.62.2.4 skrll return NULL;
200 1.62.2.4 skrll
201 1.62.2.4 skrll #if defined(DEBUG)
202 1.62.2.4 skrll for (loopva = va; loopva < va + size; loopva += PAGE_SIZE) {
203 1.62.2.4 skrll if (pmap_extract(pmap_kernel(), loopva, NULL))
204 1.62.2.4 skrll panic("km_vacache_free: has mapping");
205 1.62.2.4 skrll }
206 1.62.2.4 skrll #endif
207 1.62.2.4 skrll
208 1.62.2.4 skrll return (void *)va;
209 1.62.2.4 skrll }
210 1.62.2.4 skrll
211 1.62.2.4 skrll static void
212 1.62.2.4 skrll km_vacache_free(struct pool *pp, void *v)
213 1.62.2.4 skrll {
214 1.62.2.4 skrll vaddr_t va = (vaddr_t)v;
215 1.62.2.4 skrll size_t size = pp->pr_alloc->pa_pagesz;
216 1.62.2.4 skrll struct vm_map *map;
217 1.62.2.4 skrll #if defined(DEBUG)
218 1.62.2.4 skrll vaddr_t loopva;
219 1.62.2.4 skrll
220 1.62.2.4 skrll for (loopva = va; loopva < va + size; loopva += PAGE_SIZE) {
221 1.62.2.4 skrll if (pmap_extract(pmap_kernel(), loopva, NULL))
222 1.62.2.4 skrll panic("km_vacache_free: has mapping");
223 1.62.2.4 skrll }
224 1.62.2.4 skrll #endif
225 1.62.2.4 skrll map = KM_VACACHE_POOL_TO_MAP(pp);
226 1.62.2.4 skrll uvm_unmap1(map, va, va + size, UVM_FLAG_QUANTUM);
227 1.62.2.4 skrll }
228 1.62.2.4 skrll
229 1.62.2.4 skrll /*
230 1.62.2.4 skrll * km_vacache_init: initialize kva cache.
231 1.62.2.4 skrll */
232 1.62.2.4 skrll
233 1.62.2.4 skrll static void
234 1.62.2.4 skrll km_vacache_init(struct vm_map *map, const char *name, size_t size)
235 1.62.2.4 skrll {
236 1.62.2.4 skrll struct vm_map_kernel *vmk;
237 1.62.2.4 skrll struct pool *pp;
238 1.62.2.4 skrll struct pool_allocator *pa;
239 1.62.2.4 skrll
240 1.62.2.4 skrll KASSERT(VM_MAP_IS_KERNEL(map));
241 1.62.2.4 skrll KASSERT(size < (vm_map_max(map) - vm_map_min(map)) / 2); /* sanity */
242 1.62.2.4 skrll
243 1.62.2.4 skrll vmk = vm_map_to_kernel(map);
244 1.62.2.4 skrll pp = &vmk->vmk_vacache;
245 1.62.2.4 skrll pa = &vmk->vmk_vacache_allocator;
246 1.62.2.4 skrll memset(pa, 0, sizeof(*pa));
247 1.62.2.4 skrll pa->pa_alloc = km_vacache_alloc;
248 1.62.2.4 skrll pa->pa_free = km_vacache_free;
249 1.62.2.4 skrll pa->pa_pagesz = (unsigned int)size;
250 1.62.2.4 skrll pool_init(pp, PAGE_SIZE, 0, 0, PR_NOTOUCH | PR_RECURSIVE, name, pa);
251 1.62.2.4 skrll
252 1.62.2.4 skrll /* XXX for now.. */
253 1.62.2.4 skrll pool_sethiwat(pp, 0);
254 1.62.2.4 skrll }
255 1.62.2.4 skrll
256 1.62.2.4 skrll void
257 1.62.2.4 skrll uvm_km_vacache_init(struct vm_map *map, const char *name, size_t size)
258 1.62.2.4 skrll {
259 1.62.2.4 skrll
260 1.62.2.4 skrll map->flags |= VM_MAP_VACACHE;
261 1.62.2.4 skrll if (size == 0)
262 1.62.2.4 skrll size = KM_VACACHE_SIZE;
263 1.62.2.4 skrll km_vacache_init(map, name, size);
264 1.62.2.4 skrll }
265 1.62.2.4 skrll
266 1.62.2.4 skrll #else /* !defined(PMAP_MAP_POOLPAGE) */
267 1.62.2.4 skrll
268 1.62.2.4 skrll void
269 1.62.2.4 skrll uvm_km_vacache_init(struct vm_map *map, const char *name, size_t size)
270 1.62.2.4 skrll {
271 1.62.2.4 skrll
272 1.62.2.4 skrll /* nothing */
273 1.62.2.4 skrll }
274 1.62.2.4 skrll
275 1.62.2.4 skrll #endif /* !defined(PMAP_MAP_POOLPAGE) */
276 1.1 mrg
277 1.1 mrg /*
278 1.1 mrg * uvm_km_init: init kernel maps and objects to reflect reality (i.e.
279 1.1 mrg * KVM already allocated for text, data, bss, and static data structures).
280 1.1 mrg *
281 1.62 thorpej * => KVM is defined by VM_MIN_KERNEL_ADDRESS/VM_MAX_KERNEL_ADDRESS.
282 1.62 thorpej * we assume that [min -> start] has already been allocated and that
283 1.62 thorpej * "end" is the end.
284 1.1 mrg */
285 1.1 mrg
286 1.8 mrg void
287 1.62 thorpej uvm_km_init(start, end)
288 1.62 thorpej vaddr_t start, end;
289 1.1 mrg {
290 1.62 thorpej vaddr_t base = VM_MIN_KERNEL_ADDRESS;
291 1.27 thorpej
292 1.27 thorpej /*
293 1.27 thorpej * next, init kernel memory objects.
294 1.8 mrg */
295 1.1 mrg
296 1.8 mrg /* kernel_object: for pageable anonymous kernel memory */
297 1.34 chs uao_init();
298 1.62 thorpej uvm.kernel_object = uao_create(VM_MAX_KERNEL_ADDRESS -
299 1.62 thorpej VM_MIN_KERNEL_ADDRESS, UAO_FLAG_KERNOBJ);
300 1.1 mrg
301 1.24 thorpej /*
302 1.56 thorpej * init the map and reserve any space that might already
303 1.56 thorpej * have been allocated kernel space before installing.
304 1.8 mrg */
305 1.1 mrg
306 1.62.2.4 skrll uvm_map_setup_kernel(&kernel_map_store, base, end, VM_MAP_PAGEABLE);
307 1.62.2.4 skrll kernel_map_store.vmk_map.pmap = pmap_kernel();
308 1.62.2.4 skrll if (start != base) {
309 1.62.2.4 skrll int error;
310 1.62.2.4 skrll struct uvm_map_args args;
311 1.62.2.4 skrll
312 1.62.2.4 skrll error = uvm_map_prepare(&kernel_map_store.vmk_map,
313 1.62.2.4 skrll base, start - base,
314 1.62.2.4 skrll NULL, UVM_UNKNOWN_OFFSET, 0,
315 1.62 thorpej UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
316 1.62.2.4 skrll UVM_ADV_RANDOM, UVM_FLAG_FIXED), &args);
317 1.62.2.4 skrll if (!error) {
318 1.62.2.4 skrll kernel_first_mapent_store.flags =
319 1.62.2.4 skrll UVM_MAP_KERNEL | UVM_MAP_FIRST;
320 1.62.2.4 skrll error = uvm_map_enter(&kernel_map_store.vmk_map, &args,
321 1.62.2.4 skrll &kernel_first_mapent_store);
322 1.62.2.4 skrll }
323 1.62.2.4 skrll
324 1.62.2.4 skrll if (error)
325 1.62.2.4 skrll panic(
326 1.62.2.4 skrll "uvm_km_init: could not reserve space for kernel");
327 1.62.2.4 skrll }
328 1.47 chs
329 1.8 mrg /*
330 1.8 mrg * install!
331 1.8 mrg */
332 1.8 mrg
333 1.62.2.4 skrll kernel_map = &kernel_map_store.vmk_map;
334 1.62.2.4 skrll uvm_km_vacache_init(kernel_map, "kvakernel", 0);
335 1.1 mrg }
336 1.1 mrg
337 1.1 mrg /*
338 1.1 mrg * uvm_km_suballoc: allocate a submap in the kernel map. once a submap
339 1.1 mrg * is allocated all references to that area of VM must go through it. this
340 1.1 mrg * allows the locking of VAs in kernel_map to be broken up into regions.
341 1.1 mrg *
342 1.5 thorpej * => if `fixed' is true, *min specifies where the region described
343 1.5 thorpej * by the submap must start
344 1.1 mrg * => if submap is non NULL we use that as the submap, otherwise we
345 1.1 mrg * alloc a new map
346 1.1 mrg */
347 1.8 mrg struct vm_map *
348 1.25 thorpej uvm_km_suballoc(map, min, max, size, flags, fixed, submap)
349 1.8 mrg struct vm_map *map;
350 1.52 chs vaddr_t *min, *max; /* IN/OUT, OUT */
351 1.14 eeh vsize_t size;
352 1.25 thorpej int flags;
353 1.8 mrg boolean_t fixed;
354 1.62.2.4 skrll struct vm_map_kernel *submap;
355 1.8 mrg {
356 1.8 mrg int mapflags = UVM_FLAG_NOMERGE | (fixed ? UVM_FLAG_FIXED : 0);
357 1.1 mrg
358 1.62.2.4 skrll KASSERT(vm_map_pmap(map) == pmap_kernel());
359 1.62.2.4 skrll
360 1.8 mrg size = round_page(size); /* round up to pagesize */
361 1.1 mrg
362 1.8 mrg /*
363 1.8 mrg * first allocate a blank spot in the parent map
364 1.8 mrg */
365 1.8 mrg
366 1.39 thorpej if (uvm_map(map, min, size, NULL, UVM_UNKNOWN_OFFSET, 0,
367 1.8 mrg UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
368 1.43 chs UVM_ADV_RANDOM, mapflags)) != 0) {
369 1.8 mrg panic("uvm_km_suballoc: unable to allocate space in parent map");
370 1.8 mrg }
371 1.8 mrg
372 1.8 mrg /*
373 1.8 mrg * set VM bounds (min is filled in by uvm_map)
374 1.8 mrg */
375 1.1 mrg
376 1.8 mrg *max = *min + size;
377 1.5 thorpej
378 1.8 mrg /*
379 1.8 mrg * add references to pmap and create or init the submap
380 1.8 mrg */
381 1.1 mrg
382 1.8 mrg pmap_reference(vm_map_pmap(map));
383 1.8 mrg if (submap == NULL) {
384 1.62.2.4 skrll submap = malloc(sizeof(*submap), M_VMMAP, M_WAITOK);
385 1.8 mrg if (submap == NULL)
386 1.8 mrg panic("uvm_km_suballoc: unable to create submap");
387 1.8 mrg }
388 1.62.2.4 skrll uvm_map_setup_kernel(submap, *min, *max, flags);
389 1.62.2.4 skrll submap->vmk_map.pmap = vm_map_pmap(map);
390 1.1 mrg
391 1.8 mrg /*
392 1.8 mrg * now let uvm_map_submap plug in it...
393 1.8 mrg */
394 1.1 mrg
395 1.62.2.4 skrll if (uvm_map_submap(map, *min, *max, &submap->vmk_map) != 0)
396 1.8 mrg panic("uvm_km_suballoc: submap allocation failed");
397 1.1 mrg
398 1.62.2.4 skrll return(&submap->vmk_map);
399 1.1 mrg }
400 1.1 mrg
401 1.1 mrg /*
402 1.1 mrg * uvm_km_pgremove: remove pages from a kernel uvm_object.
403 1.1 mrg *
404 1.1 mrg * => when you unmap a part of anonymous kernel memory you want to toss
405 1.1 mrg * the pages right away. (this gets called from uvm_unmap_...).
406 1.1 mrg */
407 1.1 mrg
408 1.8 mrg void
409 1.8 mrg uvm_km_pgremove(uobj, start, end)
410 1.8 mrg struct uvm_object *uobj;
411 1.14 eeh vaddr_t start, end;
412 1.1 mrg {
413 1.53 chs struct vm_page *pg;
414 1.52 chs voff_t curoff, nextoff;
415 1.53 chs int swpgonlydelta = 0;
416 1.8 mrg UVMHIST_FUNC("uvm_km_pgremove"); UVMHIST_CALLED(maphist);
417 1.1 mrg
418 1.40 chs KASSERT(uobj->pgops == &aobj_pager);
419 1.40 chs simple_lock(&uobj->vmobjlock);
420 1.3 chs
421 1.52 chs for (curoff = start; curoff < end; curoff = nextoff) {
422 1.52 chs nextoff = curoff + PAGE_SIZE;
423 1.52 chs pg = uvm_pagelookup(uobj, curoff);
424 1.53 chs if (pg != NULL && pg->flags & PG_BUSY) {
425 1.52 chs pg->flags |= PG_WANTED;
426 1.52 chs UVM_UNLOCK_AND_WAIT(pg, &uobj->vmobjlock, 0,
427 1.52 chs "km_pgrm", 0);
428 1.52 chs simple_lock(&uobj->vmobjlock);
429 1.52 chs nextoff = curoff;
430 1.8 mrg continue;
431 1.52 chs }
432 1.8 mrg
433 1.52 chs /*
434 1.52 chs * free the swap slot, then the page.
435 1.52 chs */
436 1.8 mrg
437 1.53 chs if (pg == NULL &&
438 1.62.2.1 skrll uao_find_swslot(uobj, curoff >> PAGE_SHIFT) > 0) {
439 1.53 chs swpgonlydelta++;
440 1.53 chs }
441 1.52 chs uao_dropswap(uobj, curoff >> PAGE_SHIFT);
442 1.53 chs if (pg != NULL) {
443 1.53 chs uvm_lock_pageq();
444 1.53 chs uvm_pagefree(pg);
445 1.53 chs uvm_unlock_pageq();
446 1.53 chs }
447 1.8 mrg }
448 1.8 mrg simple_unlock(&uobj->vmobjlock);
449 1.8 mrg
450 1.54 chs if (swpgonlydelta > 0) {
451 1.54 chs simple_lock(&uvm.swap_data_lock);
452 1.54 chs KASSERT(uvmexp.swpgonly >= swpgonlydelta);
453 1.54 chs uvmexp.swpgonly -= swpgonlydelta;
454 1.54 chs simple_unlock(&uvm.swap_data_lock);
455 1.54 chs }
456 1.24 thorpej }
457 1.24 thorpej
458 1.24 thorpej
459 1.24 thorpej /*
460 1.24 thorpej * uvm_km_pgremove_intrsafe: like uvm_km_pgremove(), but for "intrsafe"
461 1.52 chs * maps
462 1.24 thorpej *
463 1.24 thorpej * => when you unmap a part of anonymous kernel memory you want to toss
464 1.52 chs * the pages right away. (this is called from uvm_unmap_...).
465 1.24 thorpej * => none of the pages will ever be busy, and none of them will ever
466 1.52 chs * be on the active or inactive queues (because they have no object).
467 1.24 thorpej */
468 1.24 thorpej
469 1.24 thorpej void
470 1.52 chs uvm_km_pgremove_intrsafe(start, end)
471 1.24 thorpej vaddr_t start, end;
472 1.24 thorpej {
473 1.52 chs struct vm_page *pg;
474 1.52 chs paddr_t pa;
475 1.24 thorpej UVMHIST_FUNC("uvm_km_pgremove_intrsafe"); UVMHIST_CALLED(maphist);
476 1.24 thorpej
477 1.52 chs for (; start < end; start += PAGE_SIZE) {
478 1.52 chs if (!pmap_extract(pmap_kernel(), start, &pa)) {
479 1.24 thorpej continue;
480 1.40 chs }
481 1.52 chs pg = PHYS_TO_VM_PAGE(pa);
482 1.52 chs KASSERT(pg);
483 1.52 chs KASSERT(pg->uobject == NULL && pg->uanon == NULL);
484 1.52 chs uvm_pagefree(pg);
485 1.24 thorpej }
486 1.1 mrg }
487 1.1 mrg
488 1.1 mrg
489 1.1 mrg /*
490 1.1 mrg * uvm_km_kmemalloc: lower level kernel memory allocator for malloc()
491 1.1 mrg *
492 1.1 mrg * => we map wired memory into the specified map using the obj passed in
493 1.1 mrg * => NOTE: we can return NULL even if we can wait if there is not enough
494 1.1 mrg * free VM space in the map... caller should be prepared to handle
495 1.1 mrg * this case.
496 1.1 mrg * => we return KVA of memory allocated
497 1.62.2.1 skrll * => align,prefer - passed on to uvm_map()
498 1.1 mrg * => flags: NOWAIT, VALLOC - just allocate VA, TRYLOCK - fail if we can't
499 1.1 mrg * lock the map
500 1.1 mrg */
501 1.1 mrg
502 1.14 eeh vaddr_t
503 1.62.2.1 skrll uvm_km_kmemalloc1(map, obj, size, align, prefer, flags)
504 1.49 chs struct vm_map *map;
505 1.8 mrg struct uvm_object *obj;
506 1.14 eeh vsize_t size;
507 1.62.2.1 skrll vsize_t align;
508 1.62.2.1 skrll voff_t prefer;
509 1.8 mrg int flags;
510 1.1 mrg {
511 1.14 eeh vaddr_t kva, loopva;
512 1.14 eeh vaddr_t offset;
513 1.44 thorpej vsize_t loopsize;
514 1.8 mrg struct vm_page *pg;
515 1.8 mrg UVMHIST_FUNC("uvm_km_kmemalloc"); UVMHIST_CALLED(maphist);
516 1.1 mrg
517 1.8 mrg UVMHIST_LOG(maphist," (map=0x%x, obj=0x%x, size=0x%x, flags=%d)",
518 1.40 chs map, obj, size, flags);
519 1.40 chs KASSERT(vm_map_pmap(map) == pmap_kernel());
520 1.1 mrg
521 1.8 mrg /*
522 1.8 mrg * setup for call
523 1.8 mrg */
524 1.8 mrg
525 1.8 mrg size = round_page(size);
526 1.8 mrg kva = vm_map_min(map); /* hint */
527 1.1 mrg
528 1.8 mrg /*
529 1.8 mrg * allocate some virtual space
530 1.8 mrg */
531 1.8 mrg
532 1.62.2.1 skrll if (__predict_false(uvm_map(map, &kva, size, obj, prefer, align,
533 1.62.2.1 skrll UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
534 1.62.2.1 skrll UVM_ADV_RANDOM,
535 1.62.2.4 skrll (flags & (UVM_KMF_TRYLOCK | UVM_KMF_NOWAIT))
536 1.62.2.4 skrll | UVM_FLAG_QUANTUM))
537 1.43 chs != 0)) {
538 1.8 mrg UVMHIST_LOG(maphist, "<- done (no VM)",0,0,0,0);
539 1.8 mrg return(0);
540 1.8 mrg }
541 1.8 mrg
542 1.8 mrg /*
543 1.8 mrg * if all we wanted was VA, return now
544 1.8 mrg */
545 1.8 mrg
546 1.8 mrg if (flags & UVM_KMF_VALLOC) {
547 1.8 mrg UVMHIST_LOG(maphist,"<- done valloc (kva=0x%x)", kva,0,0,0);
548 1.8 mrg return(kva);
549 1.8 mrg }
550 1.40 chs
551 1.8 mrg /*
552 1.8 mrg * recover object offset from virtual address
553 1.8 mrg */
554 1.8 mrg
555 1.8 mrg offset = kva - vm_map_min(kernel_map);
556 1.8 mrg UVMHIST_LOG(maphist, " kva=0x%x, offset=0x%x", kva, offset,0,0);
557 1.8 mrg
558 1.8 mrg /*
559 1.8 mrg * now allocate and map in the memory... note that we are the only ones
560 1.8 mrg * whom should ever get a handle on this area of VM.
561 1.8 mrg */
562 1.8 mrg
563 1.8 mrg loopva = kva;
564 1.44 thorpej loopsize = size;
565 1.44 thorpej while (loopsize) {
566 1.52 chs if (obj) {
567 1.52 chs simple_lock(&obj->vmobjlock);
568 1.52 chs }
569 1.52 chs pg = uvm_pagealloc(obj, offset, NULL, UVM_PGA_USERESERVE);
570 1.45 thorpej if (__predict_true(pg != NULL)) {
571 1.8 mrg pg->flags &= ~PG_BUSY; /* new page */
572 1.8 mrg UVM_PAGE_OWN(pg, NULL);
573 1.8 mrg }
574 1.52 chs if (obj) {
575 1.52 chs simple_unlock(&obj->vmobjlock);
576 1.52 chs }
577 1.47 chs
578 1.8 mrg /*
579 1.8 mrg * out of memory?
580 1.8 mrg */
581 1.8 mrg
582 1.35 thorpej if (__predict_false(pg == NULL)) {
583 1.58 chs if ((flags & UVM_KMF_NOWAIT) ||
584 1.62.2.1 skrll ((flags & UVM_KMF_CANFAIL) && uvm_swapisfull())) {
585 1.8 mrg /* free everything! */
586 1.62.2.4 skrll uvm_unmap1(map, kva, kva + size,
587 1.62.2.4 skrll UVM_FLAG_QUANTUM);
588 1.58 chs return (0);
589 1.8 mrg } else {
590 1.8 mrg uvm_wait("km_getwait2"); /* sleep here */
591 1.8 mrg continue;
592 1.8 mrg }
593 1.8 mrg }
594 1.47 chs
595 1.8 mrg /*
596 1.52 chs * map it in
597 1.8 mrg */
598 1.40 chs
599 1.52 chs if (obj == NULL) {
600 1.24 thorpej pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg),
601 1.57 thorpej VM_PROT_READ | VM_PROT_WRITE);
602 1.24 thorpej } else {
603 1.24 thorpej pmap_enter(map->pmap, loopva, VM_PAGE_TO_PHYS(pg),
604 1.33 thorpej UVM_PROT_ALL,
605 1.33 thorpej PMAP_WIRED | VM_PROT_READ | VM_PROT_WRITE);
606 1.24 thorpej }
607 1.8 mrg loopva += PAGE_SIZE;
608 1.8 mrg offset += PAGE_SIZE;
609 1.44 thorpej loopsize -= PAGE_SIZE;
610 1.8 mrg }
611 1.62.2.1 skrll
612 1.51 chris pmap_update(pmap_kernel());
613 1.62.2.1 skrll
614 1.8 mrg UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0);
615 1.8 mrg return(kva);
616 1.1 mrg }
617 1.1 mrg
618 1.1 mrg /*
619 1.1 mrg * uvm_km_free: free an area of kernel memory
620 1.1 mrg */
621 1.1 mrg
622 1.8 mrg void
623 1.8 mrg uvm_km_free(map, addr, size)
624 1.49 chs struct vm_map *map;
625 1.14 eeh vaddr_t addr;
626 1.14 eeh vsize_t size;
627 1.8 mrg {
628 1.62.2.4 skrll uvm_unmap1(map, trunc_page(addr), round_page(addr+size),
629 1.62.2.4 skrll UVM_FLAG_QUANTUM);
630 1.1 mrg }
631 1.1 mrg
632 1.1 mrg /*
633 1.1 mrg * uvm_km_alloc1: allocate wired down memory in the kernel map.
634 1.1 mrg *
635 1.1 mrg * => we can sleep if needed
636 1.1 mrg */
637 1.1 mrg
638 1.14 eeh vaddr_t
639 1.8 mrg uvm_km_alloc1(map, size, zeroit)
640 1.49 chs struct vm_map *map;
641 1.14 eeh vsize_t size;
642 1.8 mrg boolean_t zeroit;
643 1.1 mrg {
644 1.14 eeh vaddr_t kva, loopva, offset;
645 1.8 mrg struct vm_page *pg;
646 1.8 mrg UVMHIST_FUNC("uvm_km_alloc1"); UVMHIST_CALLED(maphist);
647 1.1 mrg
648 1.8 mrg UVMHIST_LOG(maphist,"(map=0x%x, size=0x%x)", map, size,0,0);
649 1.40 chs KASSERT(vm_map_pmap(map) == pmap_kernel());
650 1.1 mrg
651 1.8 mrg size = round_page(size);
652 1.8 mrg kva = vm_map_min(map); /* hint */
653 1.1 mrg
654 1.8 mrg /*
655 1.8 mrg * allocate some virtual space
656 1.8 mrg */
657 1.1 mrg
658 1.35 thorpej if (__predict_false(uvm_map(map, &kva, size, uvm.kernel_object,
659 1.39 thorpej UVM_UNKNOWN_OFFSET, 0, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL,
660 1.35 thorpej UVM_INH_NONE, UVM_ADV_RANDOM,
661 1.62.2.4 skrll UVM_FLAG_QUANTUM)) != 0)) {
662 1.8 mrg UVMHIST_LOG(maphist,"<- done (no VM)",0,0,0,0);
663 1.8 mrg return(0);
664 1.8 mrg }
665 1.8 mrg
666 1.8 mrg /*
667 1.8 mrg * recover object offset from virtual address
668 1.8 mrg */
669 1.8 mrg
670 1.8 mrg offset = kva - vm_map_min(kernel_map);
671 1.8 mrg UVMHIST_LOG(maphist," kva=0x%x, offset=0x%x", kva, offset,0,0);
672 1.8 mrg
673 1.8 mrg /*
674 1.52 chs * now allocate the memory.
675 1.8 mrg */
676 1.8 mrg
677 1.8 mrg loopva = kva;
678 1.8 mrg while (size) {
679 1.8 mrg simple_lock(&uvm.kernel_object->vmobjlock);
680 1.52 chs KASSERT(uvm_pagelookup(uvm.kernel_object, offset) == NULL);
681 1.23 chs pg = uvm_pagealloc(uvm.kernel_object, offset, NULL, 0);
682 1.8 mrg if (pg) {
683 1.52 chs pg->flags &= ~PG_BUSY;
684 1.8 mrg UVM_PAGE_OWN(pg, NULL);
685 1.8 mrg }
686 1.8 mrg simple_unlock(&uvm.kernel_object->vmobjlock);
687 1.52 chs if (pg == NULL) {
688 1.52 chs uvm_wait("km_alloc1w");
689 1.8 mrg continue;
690 1.8 mrg }
691 1.8 mrg pmap_enter(map->pmap, loopva, VM_PAGE_TO_PHYS(pg),
692 1.33 thorpej UVM_PROT_ALL, PMAP_WIRED | VM_PROT_READ | VM_PROT_WRITE);
693 1.8 mrg loopva += PAGE_SIZE;
694 1.8 mrg offset += PAGE_SIZE;
695 1.8 mrg size -= PAGE_SIZE;
696 1.8 mrg }
697 1.51 chris pmap_update(map->pmap);
698 1.46 thorpej
699 1.8 mrg /*
700 1.8 mrg * zero on request (note that "size" is now zero due to the above loop
701 1.8 mrg * so we need to subtract kva from loopva to reconstruct the size).
702 1.8 mrg */
703 1.1 mrg
704 1.8 mrg if (zeroit)
705 1.13 perry memset((caddr_t)kva, 0, loopva - kva);
706 1.8 mrg UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0);
707 1.8 mrg return(kva);
708 1.1 mrg }
709 1.1 mrg
710 1.1 mrg /*
711 1.62.2.1 skrll * uvm_km_valloc1: allocate zero-fill memory in the kernel's address space
712 1.1 mrg *
713 1.1 mrg * => memory is not allocated until fault time
714 1.62.2.1 skrll * => the align, prefer and flags parameters are passed on to uvm_map().
715 1.62.2.1 skrll *
716 1.62.2.1 skrll * Note: this function is also the backend for these macros:
717 1.62.2.1 skrll * uvm_km_valloc
718 1.62.2.1 skrll * uvm_km_valloc_wait
719 1.62.2.1 skrll * uvm_km_valloc_prefer
720 1.62.2.1 skrll * uvm_km_valloc_prefer_wait
721 1.62.2.1 skrll * uvm_km_valloc_align
722 1.1 mrg */
723 1.1 mrg
724 1.14 eeh vaddr_t
725 1.62.2.1 skrll uvm_km_valloc1(map, size, align, prefer, flags)
726 1.49 chs struct vm_map *map;
727 1.41 nisimura vsize_t size;
728 1.41 nisimura vsize_t align;
729 1.62.2.1 skrll voff_t prefer;
730 1.62.2.1 skrll uvm_flag_t flags;
731 1.41 nisimura {
732 1.14 eeh vaddr_t kva;
733 1.62.2.4 skrll int error;
734 1.62.2.1 skrll UVMHIST_FUNC("uvm_km_valloc1"); UVMHIST_CALLED(maphist);
735 1.62.2.1 skrll
736 1.62.2.1 skrll UVMHIST_LOG(maphist, "(map=0x%x, size=0x%x, align=0x%x, prefer=0x%x)",
737 1.62.2.1 skrll map, size, align, prefer);
738 1.1 mrg
739 1.40 chs KASSERT(vm_map_pmap(map) == pmap_kernel());
740 1.1 mrg
741 1.8 mrg size = round_page(size);
742 1.8 mrg /*
743 1.62.2.1 skrll * Check if requested size is larger than the map, in which
744 1.62.2.1 skrll * case we can't succeed.
745 1.8 mrg */
746 1.8 mrg if (size > vm_map_max(map) - vm_map_min(map))
747 1.62.2.1 skrll return (0);
748 1.8 mrg
749 1.62.2.4 skrll flags |= UVM_FLAG_QUANTUM;
750 1.62.2.4 skrll if ((flags & UVM_KMF_NOWAIT) == 0) /* XXX */
751 1.62.2.4 skrll flags |= UVM_FLAG_WAITVA; /* XXX */
752 1.62.2.4 skrll
753 1.62.2.4 skrll kva = vm_map_min(map); /* hint */
754 1.8 mrg
755 1.62.2.4 skrll /*
756 1.62.2.4 skrll * allocate some virtual space. will be demand filled
757 1.62.2.4 skrll * by kernel_object.
758 1.62.2.4 skrll */
759 1.8 mrg
760 1.62.2.4 skrll error = uvm_map(map, &kva, size, uvm.kernel_object,
761 1.62.2.4 skrll prefer, align, UVM_MAPFLAG(UVM_PROT_ALL,
762 1.62.2.4 skrll UVM_PROT_ALL, UVM_INH_NONE, UVM_ADV_RANDOM, flags));
763 1.8 mrg
764 1.62.2.4 skrll KASSERT(error == 0 || (flags & UVM_KMF_NOWAIT) != 0);
765 1.8 mrg
766 1.62.2.4 skrll UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0);
767 1.62.2.4 skrll
768 1.62.2.4 skrll return (kva);
769 1.38 jeffs }
770 1.38 jeffs
771 1.62.2.1 skrll /* Function definitions for binary compatibility */
772 1.38 jeffs vaddr_t
773 1.62.2.1 skrll uvm_km_kmemalloc(struct vm_map *map, struct uvm_object *obj,
774 1.62.2.1 skrll vsize_t sz, int flags)
775 1.62.2.1 skrll {
776 1.62.2.1 skrll return uvm_km_kmemalloc1(map, obj, sz, 0, UVM_UNKNOWN_OFFSET, flags);
777 1.62.2.1 skrll }
778 1.62.2.1 skrll
779 1.62.2.1 skrll vaddr_t uvm_km_valloc(struct vm_map *map, vsize_t sz)
780 1.62.2.1 skrll {
781 1.62.2.1 skrll return uvm_km_valloc1(map, sz, 0, UVM_UNKNOWN_OFFSET, UVM_KMF_NOWAIT);
782 1.62.2.1 skrll }
783 1.62.2.1 skrll
784 1.62.2.1 skrll vaddr_t uvm_km_valloc_align(struct vm_map *map, vsize_t sz, vsize_t align)
785 1.62.2.1 skrll {
786 1.62.2.1 skrll return uvm_km_valloc1(map, sz, align, UVM_UNKNOWN_OFFSET, UVM_KMF_NOWAIT);
787 1.62.2.1 skrll }
788 1.62.2.1 skrll
789 1.62.2.1 skrll vaddr_t uvm_km_valloc_prefer_wait(struct vm_map *map, vsize_t sz, voff_t prefer)
790 1.62.2.1 skrll {
791 1.62.2.1 skrll return uvm_km_valloc1(map, sz, 0, prefer, 0);
792 1.62.2.1 skrll }
793 1.62.2.1 skrll
794 1.62.2.1 skrll vaddr_t uvm_km_valloc_wait(struct vm_map *map, vsize_t sz)
795 1.38 jeffs {
796 1.62.2.1 skrll return uvm_km_valloc1(map, sz, 0, UVM_UNKNOWN_OFFSET, 0);
797 1.10 thorpej }
798 1.10 thorpej
799 1.10 thorpej /* Sanity; must specify both or none. */
800 1.10 thorpej #if (defined(PMAP_MAP_POOLPAGE) || defined(PMAP_UNMAP_POOLPAGE)) && \
801 1.10 thorpej (!defined(PMAP_MAP_POOLPAGE) || !defined(PMAP_UNMAP_POOLPAGE))
802 1.10 thorpej #error Must specify MAP and UNMAP together.
803 1.10 thorpej #endif
804 1.10 thorpej
805 1.10 thorpej /*
806 1.10 thorpej * uvm_km_alloc_poolpage: allocate a page for the pool allocator
807 1.10 thorpej *
808 1.10 thorpej * => if the pmap specifies an alternate mapping method, we use it.
809 1.10 thorpej */
810 1.10 thorpej
811 1.11 thorpej /* ARGSUSED */
812 1.14 eeh vaddr_t
813 1.62.2.4 skrll uvm_km_alloc_poolpage_cache(map, obj, waitok)
814 1.62.2.4 skrll struct vm_map *map;
815 1.62.2.4 skrll struct uvm_object *obj;
816 1.62.2.4 skrll boolean_t waitok;
817 1.62.2.4 skrll {
818 1.62.2.4 skrll #if defined(PMAP_MAP_POOLPAGE)
819 1.62.2.4 skrll return uvm_km_alloc_poolpage1(map, obj, waitok);
820 1.62.2.4 skrll #else
821 1.62.2.4 skrll struct vm_page *pg;
822 1.62.2.4 skrll struct pool *pp = &vm_map_to_kernel(map)->vmk_vacache;
823 1.62.2.4 skrll vaddr_t va;
824 1.62.2.4 skrll int s = 0xdeadbeaf; /* XXX: gcc */
825 1.62.2.4 skrll const boolean_t intrsafe = (map->flags & VM_MAP_INTRSAFE) != 0;
826 1.62.2.4 skrll
827 1.62.2.4 skrll if ((map->flags & VM_MAP_VACACHE) == 0)
828 1.62.2.4 skrll return uvm_km_alloc_poolpage1(map, obj, waitok);
829 1.62.2.4 skrll
830 1.62.2.4 skrll if (intrsafe)
831 1.62.2.4 skrll s = splvm();
832 1.62.2.4 skrll va = (vaddr_t)pool_get(pp, waitok ? PR_WAITOK : PR_NOWAIT);
833 1.62.2.4 skrll if (intrsafe)
834 1.62.2.4 skrll splx(s);
835 1.62.2.4 skrll if (va == 0)
836 1.62.2.4 skrll return 0;
837 1.62.2.4 skrll KASSERT(!pmap_extract(pmap_kernel(), va, NULL));
838 1.62.2.4 skrll again:
839 1.62.2.4 skrll pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE);
840 1.62.2.4 skrll if (__predict_false(pg == NULL)) {
841 1.62.2.4 skrll if (waitok) {
842 1.62.2.4 skrll uvm_wait("plpg");
843 1.62.2.4 skrll goto again;
844 1.62.2.4 skrll } else {
845 1.62.2.4 skrll if (intrsafe)
846 1.62.2.4 skrll s = splvm();
847 1.62.2.4 skrll pool_put(pp, (void *)va);
848 1.62.2.4 skrll if (intrsafe)
849 1.62.2.4 skrll splx(s);
850 1.62.2.4 skrll return 0;
851 1.62.2.4 skrll }
852 1.62.2.4 skrll }
853 1.62.2.4 skrll pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg),
854 1.62.2.4 skrll VM_PROT_READ|VM_PROT_WRITE);
855 1.62.2.4 skrll pmap_update(pmap_kernel());
856 1.62.2.4 skrll
857 1.62.2.4 skrll return va;
858 1.62.2.4 skrll #endif /* PMAP_MAP_POOLPAGE */
859 1.62.2.4 skrll }
860 1.62.2.4 skrll
861 1.62.2.4 skrll vaddr_t
862 1.15 thorpej uvm_km_alloc_poolpage1(map, obj, waitok)
863 1.49 chs struct vm_map *map;
864 1.12 thorpej struct uvm_object *obj;
865 1.15 thorpej boolean_t waitok;
866 1.10 thorpej {
867 1.10 thorpej #if defined(PMAP_MAP_POOLPAGE)
868 1.10 thorpej struct vm_page *pg;
869 1.14 eeh vaddr_t va;
870 1.10 thorpej
871 1.15 thorpej again:
872 1.29 chs pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE);
873 1.35 thorpej if (__predict_false(pg == NULL)) {
874 1.15 thorpej if (waitok) {
875 1.15 thorpej uvm_wait("plpg");
876 1.15 thorpej goto again;
877 1.15 thorpej } else
878 1.15 thorpej return (0);
879 1.15 thorpej }
880 1.10 thorpej va = PMAP_MAP_POOLPAGE(VM_PAGE_TO_PHYS(pg));
881 1.35 thorpej if (__predict_false(va == 0))
882 1.10 thorpej uvm_pagefree(pg);
883 1.10 thorpej return (va);
884 1.10 thorpej #else
885 1.14 eeh vaddr_t va;
886 1.62.2.4 skrll int s = 0xdeadbeaf; /* XXX: gcc */
887 1.62.2.4 skrll const boolean_t intrsafe = (map->flags & VM_MAP_INTRSAFE) != 0;
888 1.10 thorpej
889 1.62.2.4 skrll if (intrsafe)
890 1.62.2.4 skrll s = splvm();
891 1.60 bouyer va = uvm_km_kmemalloc(map, obj, PAGE_SIZE,
892 1.60 bouyer waitok ? 0 : UVM_KMF_NOWAIT | UVM_KMF_TRYLOCK);
893 1.62.2.4 skrll if (intrsafe)
894 1.62.2.4 skrll splx(s);
895 1.10 thorpej return (va);
896 1.10 thorpej #endif /* PMAP_MAP_POOLPAGE */
897 1.10 thorpej }
898 1.10 thorpej
899 1.10 thorpej /*
900 1.10 thorpej * uvm_km_free_poolpage: free a previously allocated pool page
901 1.10 thorpej *
902 1.10 thorpej * => if the pmap specifies an alternate unmapping method, we use it.
903 1.10 thorpej */
904 1.10 thorpej
905 1.11 thorpej /* ARGSUSED */
906 1.10 thorpej void
907 1.62.2.4 skrll uvm_km_free_poolpage_cache(map, addr)
908 1.62.2.4 skrll struct vm_map *map;
909 1.62.2.4 skrll vaddr_t addr;
910 1.62.2.4 skrll {
911 1.62.2.4 skrll #if defined(PMAP_UNMAP_POOLPAGE)
912 1.62.2.4 skrll uvm_km_free_poolpage1(map, addr);
913 1.62.2.4 skrll #else
914 1.62.2.4 skrll struct pool *pp;
915 1.62.2.4 skrll int s = 0xdeadbeaf; /* XXX: gcc */
916 1.62.2.4 skrll const boolean_t intrsafe = (map->flags & VM_MAP_INTRSAFE) != 0;
917 1.62.2.4 skrll
918 1.62.2.4 skrll if ((map->flags & VM_MAP_VACACHE) == 0) {
919 1.62.2.4 skrll uvm_km_free_poolpage1(map, addr);
920 1.62.2.4 skrll return;
921 1.62.2.4 skrll }
922 1.62.2.4 skrll
923 1.62.2.4 skrll KASSERT(pmap_extract(pmap_kernel(), addr, NULL));
924 1.62.2.4 skrll uvm_km_pgremove_intrsafe(addr, addr + PAGE_SIZE);
925 1.62.2.4 skrll pmap_kremove(addr, PAGE_SIZE);
926 1.62.2.4 skrll #if defined(DEBUG)
927 1.62.2.4 skrll pmap_update(pmap_kernel());
928 1.62.2.4 skrll #endif
929 1.62.2.4 skrll KASSERT(!pmap_extract(pmap_kernel(), addr, NULL));
930 1.62.2.4 skrll pp = &vm_map_to_kernel(map)->vmk_vacache;
931 1.62.2.4 skrll if (intrsafe)
932 1.62.2.4 skrll s = splvm();
933 1.62.2.4 skrll pool_put(pp, (void *)addr);
934 1.62.2.4 skrll if (intrsafe)
935 1.62.2.4 skrll splx(s);
936 1.62.2.4 skrll #endif
937 1.62.2.4 skrll }
938 1.62.2.4 skrll
939 1.62.2.4 skrll /* ARGSUSED */
940 1.62.2.4 skrll void
941 1.11 thorpej uvm_km_free_poolpage1(map, addr)
942 1.49 chs struct vm_map *map;
943 1.14 eeh vaddr_t addr;
944 1.10 thorpej {
945 1.10 thorpej #if defined(PMAP_UNMAP_POOLPAGE)
946 1.14 eeh paddr_t pa;
947 1.10 thorpej
948 1.10 thorpej pa = PMAP_UNMAP_POOLPAGE(addr);
949 1.10 thorpej uvm_pagefree(PHYS_TO_VM_PAGE(pa));
950 1.10 thorpej #else
951 1.62.2.4 skrll int s = 0xdeadbeaf; /* XXX: gcc */
952 1.62.2.4 skrll const boolean_t intrsafe = (map->flags & VM_MAP_INTRSAFE) != 0;
953 1.10 thorpej
954 1.62.2.4 skrll if (intrsafe)
955 1.62.2.4 skrll s = splvm();
956 1.11 thorpej uvm_km_free(map, addr, PAGE_SIZE);
957 1.62.2.4 skrll if (intrsafe)
958 1.62.2.4 skrll splx(s);
959 1.10 thorpej #endif /* PMAP_UNMAP_POOLPAGE */
960 1.1 mrg }
961