uvm_km.c revision 1.27 1 1.27 thorpej /* $NetBSD: uvm_km.c,v 1.27 1999/06/04 23:38:41 thorpej Exp $ */
2 1.1 mrg
3 1.1 mrg /*
4 1.1 mrg * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 1.1 mrg * Copyright (c) 1991, 1993, The Regents of the University of California.
6 1.1 mrg *
7 1.1 mrg * All rights reserved.
8 1.1 mrg *
9 1.1 mrg * This code is derived from software contributed to Berkeley by
10 1.1 mrg * The Mach Operating System project at Carnegie-Mellon University.
11 1.1 mrg *
12 1.1 mrg * Redistribution and use in source and binary forms, with or without
13 1.1 mrg * modification, are permitted provided that the following conditions
14 1.1 mrg * are met:
15 1.1 mrg * 1. Redistributions of source code must retain the above copyright
16 1.1 mrg * notice, this list of conditions and the following disclaimer.
17 1.1 mrg * 2. Redistributions in binary form must reproduce the above copyright
18 1.1 mrg * notice, this list of conditions and the following disclaimer in the
19 1.1 mrg * documentation and/or other materials provided with the distribution.
20 1.1 mrg * 3. All advertising materials mentioning features or use of this software
21 1.1 mrg * must display the following acknowledgement:
22 1.1 mrg * This product includes software developed by Charles D. Cranor,
23 1.1 mrg * Washington University, the University of California, Berkeley and
24 1.1 mrg * its contributors.
25 1.1 mrg * 4. Neither the name of the University nor the names of its contributors
26 1.1 mrg * may be used to endorse or promote products derived from this software
27 1.1 mrg * without specific prior written permission.
28 1.1 mrg *
29 1.1 mrg * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 1.1 mrg * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 1.1 mrg * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 1.1 mrg * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 1.1 mrg * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 1.1 mrg * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 1.1 mrg * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 1.1 mrg * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 1.1 mrg * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 1.1 mrg * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 1.1 mrg * SUCH DAMAGE.
40 1.1 mrg *
41 1.1 mrg * @(#)vm_kern.c 8.3 (Berkeley) 1/12/94
42 1.4 mrg * from: Id: uvm_km.c,v 1.1.2.14 1998/02/06 05:19:27 chs Exp
43 1.1 mrg *
44 1.1 mrg *
45 1.1 mrg * Copyright (c) 1987, 1990 Carnegie-Mellon University.
46 1.1 mrg * All rights reserved.
47 1.1 mrg *
48 1.1 mrg * Permission to use, copy, modify and distribute this software and
49 1.1 mrg * its documentation is hereby granted, provided that both the copyright
50 1.1 mrg * notice and this permission notice appear in all copies of the
51 1.1 mrg * software, derivative works or modified versions, and any portions
52 1.1 mrg * thereof, and that both notices appear in supporting documentation.
53 1.1 mrg *
54 1.1 mrg * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
55 1.1 mrg * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
56 1.1 mrg * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
57 1.1 mrg *
58 1.1 mrg * Carnegie Mellon requests users of this software to return to
59 1.1 mrg *
60 1.1 mrg * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
61 1.1 mrg * School of Computer Science
62 1.1 mrg * Carnegie Mellon University
63 1.1 mrg * Pittsburgh PA 15213-3890
64 1.1 mrg *
65 1.1 mrg * any improvements or extensions that they make and grant Carnegie the
66 1.1 mrg * rights to redistribute these changes.
67 1.1 mrg */
68 1.6 mrg
69 1.6 mrg #include "opt_uvmhist.h"
70 1.6 mrg #include "opt_pmap_new.h"
71 1.1 mrg
72 1.1 mrg /*
73 1.1 mrg * uvm_km.c: handle kernel memory allocation and management
74 1.1 mrg */
75 1.1 mrg
76 1.7 chuck /*
77 1.7 chuck * overview of kernel memory management:
78 1.7 chuck *
79 1.7 chuck * the kernel virtual address space is mapped by "kernel_map." kernel_map
80 1.7 chuck * starts at VM_MIN_KERNEL_ADDRESS and goes to VM_MAX_KERNEL_ADDRESS.
81 1.7 chuck * note that VM_MIN_KERNEL_ADDRESS is equal to vm_map_min(kernel_map).
82 1.7 chuck *
83 1.7 chuck * the kernel_map has several "submaps." submaps can only appear in
84 1.7 chuck * the kernel_map (user processes can't use them). submaps "take over"
85 1.7 chuck * the management of a sub-range of the kernel's address space. submaps
86 1.7 chuck * are typically allocated at boot time and are never released. kernel
87 1.7 chuck * virtual address space that is mapped by a submap is locked by the
88 1.7 chuck * submap's lock -- not the kernel_map's lock.
89 1.7 chuck *
90 1.7 chuck * thus, the useful feature of submaps is that they allow us to break
91 1.7 chuck * up the locking and protection of the kernel address space into smaller
92 1.7 chuck * chunks.
93 1.7 chuck *
94 1.7 chuck * the vm system has several standard kernel submaps, including:
95 1.7 chuck * kmem_map => contains only wired kernel memory for the kernel
96 1.7 chuck * malloc. *** access to kmem_map must be protected
97 1.7 chuck * by splimp() because we are allowed to call malloc()
98 1.7 chuck * at interrupt time ***
99 1.7 chuck * mb_map => memory for large mbufs, *** protected by splimp ***
100 1.7 chuck * pager_map => used to map "buf" structures into kernel space
101 1.7 chuck * exec_map => used during exec to handle exec args
102 1.7 chuck * etc...
103 1.7 chuck *
104 1.7 chuck * the kernel allocates its private memory out of special uvm_objects whose
105 1.7 chuck * reference count is set to UVM_OBJ_KERN (thus indicating that the objects
106 1.7 chuck * are "special" and never die). all kernel objects should be thought of
107 1.7 chuck * as large, fixed-sized, sparsely populated uvm_objects. each kernel
108 1.7 chuck * object is equal to the size of kernel virtual address space (i.e. the
109 1.7 chuck * value "VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS").
110 1.7 chuck *
111 1.7 chuck * most kernel private memory lives in kernel_object. the only exception
112 1.7 chuck * to this is for memory that belongs to submaps that must be protected
113 1.7 chuck * by splimp(). each of these submaps has their own private kernel
114 1.7 chuck * object (e.g. kmem_object, mb_object).
115 1.7 chuck *
116 1.7 chuck * note that just because a kernel object spans the entire kernel virutal
117 1.7 chuck * address space doesn't mean that it has to be mapped into the entire space.
118 1.7 chuck * large chunks of a kernel object's space go unused either because
119 1.7 chuck * that area of kernel VM is unmapped, or there is some other type of
120 1.7 chuck * object mapped into that range (e.g. a vnode). for submap's kernel
121 1.7 chuck * objects, the only part of the object that can ever be populated is the
122 1.7 chuck * offsets that are managed by the submap.
123 1.7 chuck *
124 1.7 chuck * note that the "offset" in a kernel object is always the kernel virtual
125 1.7 chuck * address minus the VM_MIN_KERNEL_ADDRESS (aka vm_map_min(kernel_map)).
126 1.7 chuck * example:
127 1.7 chuck * suppose VM_MIN_KERNEL_ADDRESS is 0xf8000000 and the kernel does a
128 1.7 chuck * uvm_km_alloc(kernel_map, PAGE_SIZE) [allocate 1 wired down page in the
129 1.7 chuck * kernel map]. if uvm_km_alloc returns virtual address 0xf8235000,
130 1.7 chuck * then that means that the page at offset 0x235000 in kernel_object is
131 1.7 chuck * mapped at 0xf8235000.
132 1.7 chuck *
133 1.7 chuck * note that the offsets in kmem_object and mb_object also follow this
134 1.7 chuck * rule. this means that the offsets for kmem_object must fall in the
135 1.7 chuck * range of [vm_map_min(kmem_object) - vm_map_min(kernel_map)] to
136 1.7 chuck * [vm_map_max(kmem_object) - vm_map_min(kernel_map)], so the offsets
137 1.7 chuck * in those objects will typically not start at zero.
138 1.7 chuck *
139 1.7 chuck * kernel object have one other special property: when the kernel virtual
140 1.7 chuck * memory mapping them is unmapped, the backing memory in the object is
141 1.7 chuck * freed right away. this is done with the uvm_km_pgremove() function.
142 1.7 chuck * this has to be done because there is no backing store for kernel pages
143 1.7 chuck * and no need to save them after they are no longer referenced.
144 1.7 chuck */
145 1.7 chuck
146 1.1 mrg #include <sys/param.h>
147 1.1 mrg #include <sys/systm.h>
148 1.1 mrg #include <sys/proc.h>
149 1.1 mrg
150 1.1 mrg #include <vm/vm.h>
151 1.1 mrg #include <vm/vm_page.h>
152 1.1 mrg #include <vm/vm_kern.h>
153 1.1 mrg
154 1.1 mrg #include <uvm/uvm.h>
155 1.1 mrg
156 1.1 mrg /*
157 1.1 mrg * global data structures
158 1.1 mrg */
159 1.1 mrg
160 1.1 mrg vm_map_t kernel_map = NULL;
161 1.1 mrg
162 1.27 thorpej struct vmi_list vmi_list;
163 1.27 thorpej simple_lock_data_t vmi_list_slock;
164 1.27 thorpej
165 1.1 mrg /*
166 1.1 mrg * local functions
167 1.1 mrg */
168 1.1 mrg
169 1.14 eeh static int uvm_km_get __P((struct uvm_object *, vaddr_t,
170 1.24 thorpej vm_page_t *, int *, int, vm_prot_t, int, int));
171 1.24 thorpej
172 1.1 mrg /*
173 1.1 mrg * local data structues
174 1.1 mrg */
175 1.1 mrg
176 1.1 mrg static struct vm_map kernel_map_store;
177 1.1 mrg static struct uvm_object kmem_object_store;
178 1.1 mrg static struct uvm_object mb_object_store;
179 1.1 mrg
180 1.1 mrg static struct uvm_pagerops km_pager = {
181 1.8 mrg NULL, /* init */
182 1.8 mrg NULL, /* reference */
183 1.8 mrg NULL, /* detach */
184 1.8 mrg NULL, /* fault */
185 1.8 mrg NULL, /* flush */
186 1.8 mrg uvm_km_get, /* get */
187 1.8 mrg /* ... rest are NULL */
188 1.1 mrg };
189 1.1 mrg
190 1.1 mrg /*
191 1.1 mrg * uvm_km_get: pager get function for kernel objects
192 1.1 mrg *
193 1.1 mrg * => currently we do not support pageout to the swap area, so this
194 1.1 mrg * pager is very simple. eventually we may want an anonymous
195 1.1 mrg * object pager which will do paging.
196 1.7 chuck * => XXXCDC: this pager should be phased out in favor of the aobj pager
197 1.1 mrg */
198 1.1 mrg
199 1.1 mrg
200 1.8 mrg static int
201 1.8 mrg uvm_km_get(uobj, offset, pps, npagesp, centeridx, access_type, advice, flags)
202 1.8 mrg struct uvm_object *uobj;
203 1.14 eeh vaddr_t offset;
204 1.8 mrg struct vm_page **pps;
205 1.8 mrg int *npagesp;
206 1.8 mrg int centeridx, advice, flags;
207 1.8 mrg vm_prot_t access_type;
208 1.8 mrg {
209 1.14 eeh vaddr_t current_offset;
210 1.8 mrg vm_page_t ptmp;
211 1.8 mrg int lcv, gotpages, maxpages;
212 1.8 mrg boolean_t done;
213 1.8 mrg UVMHIST_FUNC("uvm_km_get"); UVMHIST_CALLED(maphist);
214 1.8 mrg
215 1.8 mrg UVMHIST_LOG(maphist, "flags=%d", flags,0,0,0);
216 1.8 mrg
217 1.8 mrg /*
218 1.8 mrg * get number of pages
219 1.8 mrg */
220 1.8 mrg
221 1.8 mrg maxpages = *npagesp;
222 1.8 mrg
223 1.8 mrg /*
224 1.8 mrg * step 1: handled the case where fault data structures are locked.
225 1.8 mrg */
226 1.8 mrg
227 1.8 mrg if (flags & PGO_LOCKED) {
228 1.8 mrg
229 1.8 mrg /*
230 1.8 mrg * step 1a: get pages that are already resident. only do
231 1.8 mrg * this if the data structures are locked (i.e. the first time
232 1.8 mrg * through).
233 1.8 mrg */
234 1.8 mrg
235 1.8 mrg done = TRUE; /* be optimistic */
236 1.8 mrg gotpages = 0; /* # of pages we got so far */
237 1.8 mrg
238 1.8 mrg for (lcv = 0, current_offset = offset ;
239 1.8 mrg lcv < maxpages ; lcv++, current_offset += PAGE_SIZE) {
240 1.8 mrg
241 1.8 mrg /* do we care about this page? if not, skip it */
242 1.8 mrg if (pps[lcv] == PGO_DONTCARE)
243 1.8 mrg continue;
244 1.8 mrg
245 1.8 mrg /* lookup page */
246 1.8 mrg ptmp = uvm_pagelookup(uobj, current_offset);
247 1.8 mrg
248 1.8 mrg /* null? attempt to allocate the page */
249 1.8 mrg if (ptmp == NULL) {
250 1.8 mrg ptmp = uvm_pagealloc(uobj, current_offset,
251 1.23 chs NULL, 0);
252 1.8 mrg if (ptmp) {
253 1.8 mrg /* new page */
254 1.8 mrg ptmp->flags &= ~(PG_BUSY|PG_FAKE);
255 1.8 mrg UVM_PAGE_OWN(ptmp, NULL);
256 1.8 mrg uvm_pagezero(ptmp);
257 1.8 mrg }
258 1.8 mrg }
259 1.8 mrg
260 1.8 mrg /*
261 1.8 mrg * to be useful must get a non-busy, non-released page
262 1.8 mrg */
263 1.8 mrg if (ptmp == NULL ||
264 1.8 mrg (ptmp->flags & (PG_BUSY|PG_RELEASED)) != 0) {
265 1.8 mrg if (lcv == centeridx ||
266 1.8 mrg (flags & PGO_ALLPAGES) != 0)
267 1.8 mrg /* need to do a wait or I/O! */
268 1.8 mrg done = FALSE;
269 1.8 mrg continue;
270 1.8 mrg }
271 1.8 mrg
272 1.8 mrg /*
273 1.8 mrg * useful page: busy/lock it and plug it in our
274 1.8 mrg * result array
275 1.8 mrg */
276 1.8 mrg
277 1.8 mrg /* caller must un-busy this page */
278 1.8 mrg ptmp->flags |= PG_BUSY;
279 1.8 mrg UVM_PAGE_OWN(ptmp, "uvm_km_get1");
280 1.8 mrg pps[lcv] = ptmp;
281 1.8 mrg gotpages++;
282 1.8 mrg
283 1.8 mrg } /* "for" lcv loop */
284 1.8 mrg
285 1.8 mrg /*
286 1.8 mrg * step 1b: now we've either done everything needed or we
287 1.8 mrg * to unlock and do some waiting or I/O.
288 1.8 mrg */
289 1.8 mrg
290 1.8 mrg UVMHIST_LOG(maphist, "<- done (done=%d)", done, 0,0,0);
291 1.8 mrg
292 1.8 mrg *npagesp = gotpages;
293 1.8 mrg if (done)
294 1.8 mrg return(VM_PAGER_OK); /* bingo! */
295 1.8 mrg else
296 1.8 mrg return(VM_PAGER_UNLOCK); /* EEK! Need to
297 1.8 mrg * unlock and I/O */
298 1.8 mrg }
299 1.8 mrg
300 1.8 mrg /*
301 1.8 mrg * step 2: get non-resident or busy pages.
302 1.8 mrg * object is locked. data structures are unlocked.
303 1.8 mrg */
304 1.8 mrg
305 1.8 mrg for (lcv = 0, current_offset = offset ;
306 1.8 mrg lcv < maxpages ; lcv++, current_offset += PAGE_SIZE) {
307 1.8 mrg
308 1.8 mrg /* skip over pages we've already gotten or don't want */
309 1.8 mrg /* skip over pages we don't _have_ to get */
310 1.8 mrg if (pps[lcv] != NULL ||
311 1.8 mrg (lcv != centeridx && (flags & PGO_ALLPAGES) == 0))
312 1.8 mrg continue;
313 1.8 mrg
314 1.8 mrg /*
315 1.8 mrg * we have yet to locate the current page (pps[lcv]). we
316 1.8 mrg * first look for a page that is already at the current offset.
317 1.8 mrg * if we find a page, we check to see if it is busy or
318 1.8 mrg * released. if that is the case, then we sleep on the page
319 1.8 mrg * until it is no longer busy or released and repeat the
320 1.8 mrg * lookup. if the page we found is neither busy nor
321 1.8 mrg * released, then we busy it (so we own it) and plug it into
322 1.8 mrg * pps[lcv]. this 'break's the following while loop and
323 1.8 mrg * indicates we are ready to move on to the next page in the
324 1.8 mrg * "lcv" loop above.
325 1.8 mrg *
326 1.8 mrg * if we exit the while loop with pps[lcv] still set to NULL,
327 1.8 mrg * then it means that we allocated a new busy/fake/clean page
328 1.8 mrg * ptmp in the object and we need to do I/O to fill in the
329 1.8 mrg * data.
330 1.8 mrg */
331 1.8 mrg
332 1.8 mrg while (pps[lcv] == NULL) { /* top of "pps" while loop */
333 1.8 mrg
334 1.8 mrg /* look for a current page */
335 1.8 mrg ptmp = uvm_pagelookup(uobj, current_offset);
336 1.8 mrg
337 1.8 mrg /* nope? allocate one now (if we can) */
338 1.8 mrg if (ptmp == NULL) {
339 1.8 mrg
340 1.8 mrg ptmp = uvm_pagealloc(uobj, current_offset,
341 1.23 chs NULL, 0);
342 1.8 mrg
343 1.8 mrg /* out of RAM? */
344 1.8 mrg if (ptmp == NULL) {
345 1.8 mrg simple_unlock(&uobj->vmobjlock);
346 1.8 mrg uvm_wait("kmgetwait1");
347 1.8 mrg simple_lock(&uobj->vmobjlock);
348 1.8 mrg /* goto top of pps while loop */
349 1.8 mrg continue;
350 1.8 mrg }
351 1.8 mrg
352 1.8 mrg /*
353 1.8 mrg * got new page ready for I/O. break pps
354 1.8 mrg * while loop. pps[lcv] is still NULL.
355 1.8 mrg */
356 1.8 mrg break;
357 1.8 mrg }
358 1.8 mrg
359 1.8 mrg /* page is there, see if we need to wait on it */
360 1.8 mrg if ((ptmp->flags & (PG_BUSY|PG_RELEASED)) != 0) {
361 1.8 mrg ptmp->flags |= PG_WANTED;
362 1.8 mrg UVM_UNLOCK_AND_WAIT(ptmp,&uobj->vmobjlock, 0,
363 1.8 mrg "uvn_get",0);
364 1.8 mrg simple_lock(&uobj->vmobjlock);
365 1.8 mrg continue; /* goto top of pps while loop */
366 1.8 mrg }
367 1.8 mrg
368 1.8 mrg /*
369 1.8 mrg * if we get here then the page has become resident
370 1.8 mrg * and unbusy between steps 1 and 2. we busy it now
371 1.8 mrg * (so we own it) and set pps[lcv] (so that we exit
372 1.8 mrg * the while loop). caller must un-busy.
373 1.8 mrg */
374 1.8 mrg ptmp->flags |= PG_BUSY;
375 1.8 mrg UVM_PAGE_OWN(ptmp, "uvm_km_get2");
376 1.8 mrg pps[lcv] = ptmp;
377 1.8 mrg }
378 1.8 mrg
379 1.8 mrg /*
380 1.8 mrg * if we own the a valid page at the correct offset, pps[lcv]
381 1.8 mrg * will point to it. nothing more to do except go to the
382 1.8 mrg * next page.
383 1.8 mrg */
384 1.8 mrg
385 1.8 mrg if (pps[lcv])
386 1.8 mrg continue; /* next lcv */
387 1.8 mrg
388 1.8 mrg /*
389 1.8 mrg * we have a "fake/busy/clean" page that we just allocated.
390 1.8 mrg * do the needed "i/o" (in this case that means zero it).
391 1.8 mrg */
392 1.8 mrg
393 1.8 mrg uvm_pagezero(ptmp);
394 1.8 mrg ptmp->flags &= ~(PG_FAKE);
395 1.8 mrg pps[lcv] = ptmp;
396 1.1 mrg
397 1.8 mrg } /* lcv loop */
398 1.1 mrg
399 1.8 mrg /*
400 1.8 mrg * finally, unlock object and return.
401 1.8 mrg */
402 1.8 mrg
403 1.8 mrg simple_unlock(&uobj->vmobjlock);
404 1.8 mrg UVMHIST_LOG(maphist, "<- done (OK)",0,0,0,0);
405 1.8 mrg return(VM_PAGER_OK);
406 1.1 mrg }
407 1.1 mrg
408 1.1 mrg /*
409 1.1 mrg * uvm_km_init: init kernel maps and objects to reflect reality (i.e.
410 1.1 mrg * KVM already allocated for text, data, bss, and static data structures).
411 1.1 mrg *
412 1.1 mrg * => KVM is defined by VM_MIN_KERNEL_ADDRESS/VM_MAX_KERNEL_ADDRESS.
413 1.1 mrg * we assume that [min -> start] has already been allocated and that
414 1.1 mrg * "end" is the end.
415 1.1 mrg */
416 1.1 mrg
417 1.8 mrg void
418 1.8 mrg uvm_km_init(start, end)
419 1.14 eeh vaddr_t start, end;
420 1.1 mrg {
421 1.14 eeh vaddr_t base = VM_MIN_KERNEL_ADDRESS;
422 1.1 mrg
423 1.8 mrg /*
424 1.27 thorpej * first, initialize the interrupt-safe map list.
425 1.27 thorpej */
426 1.27 thorpej LIST_INIT(&vmi_list);
427 1.27 thorpej simple_lock_init(&vmi_list_slock);
428 1.27 thorpej
429 1.27 thorpej /*
430 1.27 thorpej * next, init kernel memory objects.
431 1.8 mrg */
432 1.1 mrg
433 1.8 mrg /* kernel_object: for pageable anonymous kernel memory */
434 1.8 mrg uvm.kernel_object = uao_create(VM_MAX_KERNEL_ADDRESS -
435 1.3 chs VM_MIN_KERNEL_ADDRESS, UAO_FLAG_KERNOBJ);
436 1.1 mrg
437 1.24 thorpej /*
438 1.24 thorpej * kmem_object: for use by the kernel malloc(). Memory is always
439 1.24 thorpej * wired, and this object (and the kmem_map) can be accessed at
440 1.24 thorpej * interrupt time.
441 1.24 thorpej */
442 1.8 mrg simple_lock_init(&kmem_object_store.vmobjlock);
443 1.8 mrg kmem_object_store.pgops = &km_pager;
444 1.8 mrg TAILQ_INIT(&kmem_object_store.memq);
445 1.8 mrg kmem_object_store.uo_npages = 0;
446 1.8 mrg /* we are special. we never die */
447 1.24 thorpej kmem_object_store.uo_refs = UVM_OBJ_KERN_INTRSAFE;
448 1.8 mrg uvmexp.kmem_object = &kmem_object_store;
449 1.8 mrg
450 1.24 thorpej /*
451 1.24 thorpej * mb_object: for mbuf cluster pages on platforms which use the
452 1.24 thorpej * mb_map. Memory is always wired, and this object (and the mb_map)
453 1.24 thorpej * can be accessed at interrupt time.
454 1.24 thorpej */
455 1.8 mrg simple_lock_init(&mb_object_store.vmobjlock);
456 1.8 mrg mb_object_store.pgops = &km_pager;
457 1.8 mrg TAILQ_INIT(&mb_object_store.memq);
458 1.8 mrg mb_object_store.uo_npages = 0;
459 1.8 mrg /* we are special. we never die */
460 1.24 thorpej mb_object_store.uo_refs = UVM_OBJ_KERN_INTRSAFE;
461 1.8 mrg uvmexp.mb_object = &mb_object_store;
462 1.8 mrg
463 1.8 mrg /*
464 1.8 mrg * init the map and reserve allready allocated kernel space
465 1.8 mrg * before installing.
466 1.8 mrg */
467 1.1 mrg
468 1.25 thorpej uvm_map_setup(&kernel_map_store, base, end, VM_MAP_PAGEABLE);
469 1.8 mrg kernel_map_store.pmap = pmap_kernel();
470 1.8 mrg if (uvm_map(&kernel_map_store, &base, start - base, NULL,
471 1.8 mrg UVM_UNKNOWN_OFFSET, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL,
472 1.8 mrg UVM_INH_NONE, UVM_ADV_RANDOM,UVM_FLAG_FIXED)) != KERN_SUCCESS)
473 1.8 mrg panic("uvm_km_init: could not reserve space for kernel");
474 1.8 mrg
475 1.8 mrg /*
476 1.8 mrg * install!
477 1.8 mrg */
478 1.8 mrg
479 1.8 mrg kernel_map = &kernel_map_store;
480 1.1 mrg }
481 1.1 mrg
482 1.1 mrg /*
483 1.1 mrg * uvm_km_suballoc: allocate a submap in the kernel map. once a submap
484 1.1 mrg * is allocated all references to that area of VM must go through it. this
485 1.1 mrg * allows the locking of VAs in kernel_map to be broken up into regions.
486 1.1 mrg *
487 1.5 thorpej * => if `fixed' is true, *min specifies where the region described
488 1.5 thorpej * by the submap must start
489 1.1 mrg * => if submap is non NULL we use that as the submap, otherwise we
490 1.1 mrg * alloc a new map
491 1.1 mrg */
492 1.8 mrg struct vm_map *
493 1.25 thorpej uvm_km_suballoc(map, min, max, size, flags, fixed, submap)
494 1.8 mrg struct vm_map *map;
495 1.14 eeh vaddr_t *min, *max; /* OUT, OUT */
496 1.14 eeh vsize_t size;
497 1.25 thorpej int flags;
498 1.8 mrg boolean_t fixed;
499 1.8 mrg struct vm_map *submap;
500 1.8 mrg {
501 1.8 mrg int mapflags = UVM_FLAG_NOMERGE | (fixed ? UVM_FLAG_FIXED : 0);
502 1.1 mrg
503 1.8 mrg size = round_page(size); /* round up to pagesize */
504 1.1 mrg
505 1.8 mrg /*
506 1.8 mrg * first allocate a blank spot in the parent map
507 1.8 mrg */
508 1.8 mrg
509 1.8 mrg if (uvm_map(map, min, size, NULL, UVM_UNKNOWN_OFFSET,
510 1.8 mrg UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
511 1.8 mrg UVM_ADV_RANDOM, mapflags)) != KERN_SUCCESS) {
512 1.8 mrg panic("uvm_km_suballoc: unable to allocate space in parent map");
513 1.8 mrg }
514 1.8 mrg
515 1.8 mrg /*
516 1.8 mrg * set VM bounds (min is filled in by uvm_map)
517 1.8 mrg */
518 1.1 mrg
519 1.8 mrg *max = *min + size;
520 1.5 thorpej
521 1.8 mrg /*
522 1.8 mrg * add references to pmap and create or init the submap
523 1.8 mrg */
524 1.1 mrg
525 1.8 mrg pmap_reference(vm_map_pmap(map));
526 1.8 mrg if (submap == NULL) {
527 1.25 thorpej submap = uvm_map_create(vm_map_pmap(map), *min, *max, flags);
528 1.8 mrg if (submap == NULL)
529 1.8 mrg panic("uvm_km_suballoc: unable to create submap");
530 1.8 mrg } else {
531 1.25 thorpej uvm_map_setup(submap, *min, *max, flags);
532 1.8 mrg submap->pmap = vm_map_pmap(map);
533 1.8 mrg }
534 1.1 mrg
535 1.8 mrg /*
536 1.8 mrg * now let uvm_map_submap plug in it...
537 1.8 mrg */
538 1.1 mrg
539 1.8 mrg if (uvm_map_submap(map, *min, *max, submap) != KERN_SUCCESS)
540 1.8 mrg panic("uvm_km_suballoc: submap allocation failed");
541 1.1 mrg
542 1.8 mrg return(submap);
543 1.1 mrg }
544 1.1 mrg
545 1.1 mrg /*
546 1.1 mrg * uvm_km_pgremove: remove pages from a kernel uvm_object.
547 1.1 mrg *
548 1.1 mrg * => when you unmap a part of anonymous kernel memory you want to toss
549 1.1 mrg * the pages right away. (this gets called from uvm_unmap_...).
550 1.1 mrg */
551 1.1 mrg
552 1.1 mrg #define UKM_HASH_PENALTY 4 /* a guess */
553 1.1 mrg
554 1.8 mrg void
555 1.8 mrg uvm_km_pgremove(uobj, start, end)
556 1.8 mrg struct uvm_object *uobj;
557 1.14 eeh vaddr_t start, end;
558 1.1 mrg {
559 1.24 thorpej boolean_t by_list;
560 1.8 mrg struct vm_page *pp, *ppnext;
561 1.14 eeh vaddr_t curoff;
562 1.8 mrg UVMHIST_FUNC("uvm_km_pgremove"); UVMHIST_CALLED(maphist);
563 1.1 mrg
564 1.8 mrg simple_lock(&uobj->vmobjlock); /* lock object */
565 1.1 mrg
566 1.24 thorpej #ifdef DIAGNOSTIC
567 1.24 thorpej if (uobj->pgops != &aobj_pager)
568 1.24 thorpej panic("uvm_km_pgremove: object %p not an aobj", uobj);
569 1.24 thorpej #endif
570 1.3 chs
571 1.8 mrg /* choose cheapest traversal */
572 1.8 mrg by_list = (uobj->uo_npages <=
573 1.18 chs ((end - start) >> PAGE_SHIFT) * UKM_HASH_PENALTY);
574 1.1 mrg
575 1.8 mrg if (by_list)
576 1.8 mrg goto loop_by_list;
577 1.1 mrg
578 1.8 mrg /* by hash */
579 1.1 mrg
580 1.8 mrg for (curoff = start ; curoff < end ; curoff += PAGE_SIZE) {
581 1.8 mrg pp = uvm_pagelookup(uobj, curoff);
582 1.8 mrg if (pp == NULL)
583 1.8 mrg continue;
584 1.8 mrg
585 1.8 mrg UVMHIST_LOG(maphist," page 0x%x, busy=%d", pp,
586 1.8 mrg pp->flags & PG_BUSY, 0, 0);
587 1.24 thorpej
588 1.8 mrg /* now do the actual work */
589 1.24 thorpej if (pp->flags & PG_BUSY) {
590 1.8 mrg /* owner must check for this when done */
591 1.8 mrg pp->flags |= PG_RELEASED;
592 1.24 thorpej } else {
593 1.24 thorpej /* free the swap slot... */
594 1.24 thorpej uao_dropswap(uobj, curoff >> PAGE_SHIFT);
595 1.8 mrg
596 1.8 mrg /*
597 1.24 thorpej * ...and free the page; note it may be on the
598 1.24 thorpej * active or inactive queues.
599 1.8 mrg */
600 1.8 mrg uvm_lock_pageq();
601 1.8 mrg uvm_pagefree(pp);
602 1.8 mrg uvm_unlock_pageq();
603 1.8 mrg }
604 1.8 mrg /* done */
605 1.8 mrg }
606 1.8 mrg simple_unlock(&uobj->vmobjlock);
607 1.8 mrg return;
608 1.1 mrg
609 1.1 mrg loop_by_list:
610 1.1 mrg
611 1.8 mrg for (pp = uobj->memq.tqh_first ; pp != NULL ; pp = ppnext) {
612 1.8 mrg ppnext = pp->listq.tqe_next;
613 1.8 mrg if (pp->offset < start || pp->offset >= end) {
614 1.8 mrg continue;
615 1.8 mrg }
616 1.8 mrg
617 1.8 mrg UVMHIST_LOG(maphist," page 0x%x, busy=%d", pp,
618 1.8 mrg pp->flags & PG_BUSY, 0, 0);
619 1.24 thorpej
620 1.8 mrg /* now do the actual work */
621 1.24 thorpej if (pp->flags & PG_BUSY) {
622 1.8 mrg /* owner must check for this when done */
623 1.8 mrg pp->flags |= PG_RELEASED;
624 1.24 thorpej } else {
625 1.24 thorpej /* free the swap slot... */
626 1.24 thorpej uao_dropswap(uobj, pp->offset >> PAGE_SHIFT);
627 1.8 mrg
628 1.8 mrg /*
629 1.24 thorpej * ...and free the page; note it may be on the
630 1.24 thorpej * active or inactive queues.
631 1.8 mrg */
632 1.8 mrg uvm_lock_pageq();
633 1.8 mrg uvm_pagefree(pp);
634 1.8 mrg uvm_unlock_pageq();
635 1.8 mrg }
636 1.8 mrg /* done */
637 1.24 thorpej }
638 1.24 thorpej simple_unlock(&uobj->vmobjlock);
639 1.24 thorpej return;
640 1.24 thorpej }
641 1.24 thorpej
642 1.24 thorpej
643 1.24 thorpej /*
644 1.24 thorpej * uvm_km_pgremove_intrsafe: like uvm_km_pgremove(), but for "intrsafe"
645 1.24 thorpej * objects
646 1.24 thorpej *
647 1.24 thorpej * => when you unmap a part of anonymous kernel memory you want to toss
648 1.24 thorpej * the pages right away. (this gets called from uvm_unmap_...).
649 1.24 thorpej * => none of the pages will ever be busy, and none of them will ever
650 1.24 thorpej * be on the active or inactive queues (because these objects are
651 1.24 thorpej * never allowed to "page").
652 1.24 thorpej */
653 1.24 thorpej
654 1.24 thorpej void
655 1.24 thorpej uvm_km_pgremove_intrsafe(uobj, start, end)
656 1.24 thorpej struct uvm_object *uobj;
657 1.24 thorpej vaddr_t start, end;
658 1.24 thorpej {
659 1.24 thorpej boolean_t by_list;
660 1.24 thorpej struct vm_page *pp, *ppnext;
661 1.24 thorpej vaddr_t curoff;
662 1.24 thorpej UVMHIST_FUNC("uvm_km_pgremove_intrsafe"); UVMHIST_CALLED(maphist);
663 1.24 thorpej
664 1.24 thorpej simple_lock(&uobj->vmobjlock); /* lock object */
665 1.24 thorpej
666 1.24 thorpej #ifdef DIAGNOSTIC
667 1.24 thorpej if (UVM_OBJ_IS_INTRSAFE_OBJECT(uobj) == 0)
668 1.24 thorpej panic("uvm_km_pgremove_intrsafe: object %p not intrsafe", uobj);
669 1.24 thorpej #endif
670 1.24 thorpej
671 1.24 thorpej /* choose cheapest traversal */
672 1.24 thorpej by_list = (uobj->uo_npages <=
673 1.24 thorpej ((end - start) >> PAGE_SHIFT) * UKM_HASH_PENALTY);
674 1.24 thorpej
675 1.24 thorpej if (by_list)
676 1.24 thorpej goto loop_by_list;
677 1.24 thorpej
678 1.24 thorpej /* by hash */
679 1.24 thorpej
680 1.24 thorpej for (curoff = start ; curoff < end ; curoff += PAGE_SIZE) {
681 1.24 thorpej pp = uvm_pagelookup(uobj, curoff);
682 1.24 thorpej if (pp == NULL)
683 1.24 thorpej continue;
684 1.24 thorpej
685 1.24 thorpej UVMHIST_LOG(maphist," page 0x%x, busy=%d", pp,
686 1.24 thorpej pp->flags & PG_BUSY, 0, 0);
687 1.24 thorpej #ifdef DIAGNOSTIC
688 1.24 thorpej if (pp->flags & PG_BUSY)
689 1.24 thorpej panic("uvm_km_pgremove_intrsafe: busy page");
690 1.24 thorpej if (pp->pqflags & PQ_ACTIVE)
691 1.24 thorpej panic("uvm_km_pgremove_intrsafe: active page");
692 1.24 thorpej if (pp->pqflags & PQ_INACTIVE)
693 1.24 thorpej panic("uvm_km_pgremove_intrsafe: inactive page");
694 1.24 thorpej #endif
695 1.24 thorpej
696 1.24 thorpej /* free the page */
697 1.24 thorpej uvm_pagefree(pp);
698 1.24 thorpej }
699 1.24 thorpej simple_unlock(&uobj->vmobjlock);
700 1.24 thorpej return;
701 1.24 thorpej
702 1.24 thorpej loop_by_list:
703 1.1 mrg
704 1.24 thorpej for (pp = uobj->memq.tqh_first ; pp != NULL ; pp = ppnext) {
705 1.24 thorpej ppnext = pp->listq.tqe_next;
706 1.24 thorpej if (pp->offset < start || pp->offset >= end) {
707 1.24 thorpej continue;
708 1.24 thorpej }
709 1.24 thorpej
710 1.24 thorpej UVMHIST_LOG(maphist," page 0x%x, busy=%d", pp,
711 1.24 thorpej pp->flags & PG_BUSY, 0, 0);
712 1.24 thorpej
713 1.24 thorpej #ifdef DIAGNOSTIC
714 1.24 thorpej if (pp->flags & PG_BUSY)
715 1.24 thorpej panic("uvm_km_pgremove_intrsafe: busy page");
716 1.24 thorpej if (pp->pqflags & PQ_ACTIVE)
717 1.24 thorpej panic("uvm_km_pgremove_intrsafe: active page");
718 1.24 thorpej if (pp->pqflags & PQ_INACTIVE)
719 1.24 thorpej panic("uvm_km_pgremove_intrsafe: inactive page");
720 1.24 thorpej #endif
721 1.24 thorpej
722 1.24 thorpej /* free the page */
723 1.24 thorpej uvm_pagefree(pp);
724 1.8 mrg }
725 1.8 mrg simple_unlock(&uobj->vmobjlock);
726 1.8 mrg return;
727 1.1 mrg }
728 1.1 mrg
729 1.1 mrg
730 1.1 mrg /*
731 1.1 mrg * uvm_km_kmemalloc: lower level kernel memory allocator for malloc()
732 1.1 mrg *
733 1.1 mrg * => we map wired memory into the specified map using the obj passed in
734 1.1 mrg * => NOTE: we can return NULL even if we can wait if there is not enough
735 1.1 mrg * free VM space in the map... caller should be prepared to handle
736 1.1 mrg * this case.
737 1.1 mrg * => we return KVA of memory allocated
738 1.1 mrg * => flags: NOWAIT, VALLOC - just allocate VA, TRYLOCK - fail if we can't
739 1.1 mrg * lock the map
740 1.1 mrg */
741 1.1 mrg
742 1.14 eeh vaddr_t
743 1.8 mrg uvm_km_kmemalloc(map, obj, size, flags)
744 1.8 mrg vm_map_t map;
745 1.8 mrg struct uvm_object *obj;
746 1.14 eeh vsize_t size;
747 1.8 mrg int flags;
748 1.1 mrg {
749 1.14 eeh vaddr_t kva, loopva;
750 1.14 eeh vaddr_t offset;
751 1.8 mrg struct vm_page *pg;
752 1.8 mrg UVMHIST_FUNC("uvm_km_kmemalloc"); UVMHIST_CALLED(maphist);
753 1.1 mrg
754 1.1 mrg
755 1.8 mrg UVMHIST_LOG(maphist," (map=0x%x, obj=0x%x, size=0x%x, flags=%d)",
756 1.1 mrg map, obj, size, flags);
757 1.1 mrg #ifdef DIAGNOSTIC
758 1.8 mrg /* sanity check */
759 1.8 mrg if (vm_map_pmap(map) != pmap_kernel())
760 1.8 mrg panic("uvm_km_kmemalloc: invalid map");
761 1.1 mrg #endif
762 1.1 mrg
763 1.8 mrg /*
764 1.8 mrg * setup for call
765 1.8 mrg */
766 1.8 mrg
767 1.8 mrg size = round_page(size);
768 1.8 mrg kva = vm_map_min(map); /* hint */
769 1.1 mrg
770 1.8 mrg /*
771 1.8 mrg * allocate some virtual space
772 1.8 mrg */
773 1.8 mrg
774 1.8 mrg if (uvm_map(map, &kva, size, obj, UVM_UNKNOWN_OFFSET,
775 1.1 mrg UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
776 1.1 mrg UVM_ADV_RANDOM, (flags & UVM_KMF_TRYLOCK)))
777 1.8 mrg != KERN_SUCCESS) {
778 1.8 mrg UVMHIST_LOG(maphist, "<- done (no VM)",0,0,0,0);
779 1.8 mrg return(0);
780 1.8 mrg }
781 1.8 mrg
782 1.8 mrg /*
783 1.8 mrg * if all we wanted was VA, return now
784 1.8 mrg */
785 1.8 mrg
786 1.8 mrg if (flags & UVM_KMF_VALLOC) {
787 1.8 mrg UVMHIST_LOG(maphist,"<- done valloc (kva=0x%x)", kva,0,0,0);
788 1.8 mrg return(kva);
789 1.8 mrg }
790 1.8 mrg /*
791 1.8 mrg * recover object offset from virtual address
792 1.8 mrg */
793 1.8 mrg
794 1.8 mrg offset = kva - vm_map_min(kernel_map);
795 1.8 mrg UVMHIST_LOG(maphist, " kva=0x%x, offset=0x%x", kva, offset,0,0);
796 1.8 mrg
797 1.8 mrg /*
798 1.8 mrg * now allocate and map in the memory... note that we are the only ones
799 1.8 mrg * whom should ever get a handle on this area of VM.
800 1.8 mrg */
801 1.8 mrg
802 1.8 mrg loopva = kva;
803 1.8 mrg while (size) {
804 1.8 mrg simple_lock(&obj->vmobjlock);
805 1.23 chs pg = uvm_pagealloc(obj, offset, NULL, 0);
806 1.8 mrg if (pg) {
807 1.8 mrg pg->flags &= ~PG_BUSY; /* new page */
808 1.8 mrg UVM_PAGE_OWN(pg, NULL);
809 1.8 mrg }
810 1.8 mrg simple_unlock(&obj->vmobjlock);
811 1.8 mrg
812 1.8 mrg /*
813 1.8 mrg * out of memory?
814 1.8 mrg */
815 1.8 mrg
816 1.8 mrg if (pg == NULL) {
817 1.8 mrg if (flags & UVM_KMF_NOWAIT) {
818 1.8 mrg /* free everything! */
819 1.17 chuck uvm_unmap(map, kva, kva + size);
820 1.8 mrg return(0);
821 1.8 mrg } else {
822 1.8 mrg uvm_wait("km_getwait2"); /* sleep here */
823 1.8 mrg continue;
824 1.8 mrg }
825 1.8 mrg }
826 1.8 mrg
827 1.8 mrg /*
828 1.8 mrg * map it in: note that we call pmap_enter with the map and
829 1.8 mrg * object unlocked in case we are kmem_map/kmem_object
830 1.8 mrg * (because if pmap_enter wants to allocate out of kmem_object
831 1.8 mrg * it will need to lock it itself!)
832 1.8 mrg */
833 1.24 thorpej if (UVM_OBJ_IS_INTRSAFE_OBJECT(obj)) {
834 1.1 mrg #if defined(PMAP_NEW)
835 1.24 thorpej pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg),
836 1.24 thorpej VM_PROT_ALL);
837 1.1 mrg #else
838 1.24 thorpej pmap_enter(map->pmap, loopva, VM_PAGE_TO_PHYS(pg),
839 1.26 thorpej UVM_PROT_ALL, TRUE, VM_PROT_READ|VM_PROT_WRITE);
840 1.1 mrg #endif
841 1.24 thorpej } else {
842 1.24 thorpej pmap_enter(map->pmap, loopva, VM_PAGE_TO_PHYS(pg),
843 1.26 thorpej UVM_PROT_ALL, TRUE, VM_PROT_READ|VM_PROT_WRITE);
844 1.24 thorpej }
845 1.8 mrg loopva += PAGE_SIZE;
846 1.8 mrg offset += PAGE_SIZE;
847 1.8 mrg size -= PAGE_SIZE;
848 1.8 mrg }
849 1.1 mrg
850 1.8 mrg UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0);
851 1.8 mrg return(kva);
852 1.1 mrg }
853 1.1 mrg
854 1.1 mrg /*
855 1.1 mrg * uvm_km_free: free an area of kernel memory
856 1.1 mrg */
857 1.1 mrg
858 1.8 mrg void
859 1.8 mrg uvm_km_free(map, addr, size)
860 1.8 mrg vm_map_t map;
861 1.14 eeh vaddr_t addr;
862 1.14 eeh vsize_t size;
863 1.8 mrg {
864 1.1 mrg
865 1.17 chuck uvm_unmap(map, trunc_page(addr), round_page(addr+size));
866 1.1 mrg }
867 1.1 mrg
868 1.1 mrg /*
869 1.1 mrg * uvm_km_free_wakeup: free an area of kernel memory and wake up
870 1.1 mrg * anyone waiting for vm space.
871 1.1 mrg *
872 1.1 mrg * => XXX: "wanted" bit + unlock&wait on other end?
873 1.1 mrg */
874 1.1 mrg
875 1.8 mrg void
876 1.8 mrg uvm_km_free_wakeup(map, addr, size)
877 1.8 mrg vm_map_t map;
878 1.14 eeh vaddr_t addr;
879 1.14 eeh vsize_t size;
880 1.1 mrg {
881 1.8 mrg vm_map_entry_t dead_entries;
882 1.1 mrg
883 1.8 mrg vm_map_lock(map);
884 1.17 chuck (void)uvm_unmap_remove(map, trunc_page(addr), round_page(addr+size),
885 1.1 mrg &dead_entries);
886 1.8 mrg thread_wakeup(map);
887 1.8 mrg vm_map_unlock(map);
888 1.1 mrg
889 1.8 mrg if (dead_entries != NULL)
890 1.8 mrg uvm_unmap_detach(dead_entries, 0);
891 1.1 mrg }
892 1.1 mrg
893 1.1 mrg /*
894 1.1 mrg * uvm_km_alloc1: allocate wired down memory in the kernel map.
895 1.1 mrg *
896 1.1 mrg * => we can sleep if needed
897 1.1 mrg */
898 1.1 mrg
899 1.14 eeh vaddr_t
900 1.8 mrg uvm_km_alloc1(map, size, zeroit)
901 1.8 mrg vm_map_t map;
902 1.14 eeh vsize_t size;
903 1.8 mrg boolean_t zeroit;
904 1.1 mrg {
905 1.14 eeh vaddr_t kva, loopva, offset;
906 1.8 mrg struct vm_page *pg;
907 1.8 mrg UVMHIST_FUNC("uvm_km_alloc1"); UVMHIST_CALLED(maphist);
908 1.1 mrg
909 1.8 mrg UVMHIST_LOG(maphist,"(map=0x%x, size=0x%x)", map, size,0,0);
910 1.1 mrg
911 1.1 mrg #ifdef DIAGNOSTIC
912 1.8 mrg if (vm_map_pmap(map) != pmap_kernel())
913 1.8 mrg panic("uvm_km_alloc1");
914 1.1 mrg #endif
915 1.1 mrg
916 1.8 mrg size = round_page(size);
917 1.8 mrg kva = vm_map_min(map); /* hint */
918 1.1 mrg
919 1.8 mrg /*
920 1.8 mrg * allocate some virtual space
921 1.8 mrg */
922 1.1 mrg
923 1.8 mrg if (uvm_map(map, &kva, size, uvm.kernel_object, UVM_UNKNOWN_OFFSET,
924 1.1 mrg UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
925 1.1 mrg UVM_ADV_RANDOM, 0)) != KERN_SUCCESS) {
926 1.8 mrg UVMHIST_LOG(maphist,"<- done (no VM)",0,0,0,0);
927 1.8 mrg return(0);
928 1.8 mrg }
929 1.8 mrg
930 1.8 mrg /*
931 1.8 mrg * recover object offset from virtual address
932 1.8 mrg */
933 1.8 mrg
934 1.8 mrg offset = kva - vm_map_min(kernel_map);
935 1.8 mrg UVMHIST_LOG(maphist," kva=0x%x, offset=0x%x", kva, offset,0,0);
936 1.8 mrg
937 1.8 mrg /*
938 1.8 mrg * now allocate the memory. we must be careful about released pages.
939 1.8 mrg */
940 1.8 mrg
941 1.8 mrg loopva = kva;
942 1.8 mrg while (size) {
943 1.8 mrg simple_lock(&uvm.kernel_object->vmobjlock);
944 1.8 mrg pg = uvm_pagelookup(uvm.kernel_object, offset);
945 1.8 mrg
946 1.8 mrg /*
947 1.8 mrg * if we found a page in an unallocated region, it must be
948 1.8 mrg * released
949 1.8 mrg */
950 1.8 mrg if (pg) {
951 1.8 mrg if ((pg->flags & PG_RELEASED) == 0)
952 1.8 mrg panic("uvm_km_alloc1: non-released page");
953 1.8 mrg pg->flags |= PG_WANTED;
954 1.8 mrg UVM_UNLOCK_AND_WAIT(pg, &uvm.kernel_object->vmobjlock,
955 1.8 mrg 0, "km_alloc", 0);
956 1.8 mrg continue; /* retry */
957 1.8 mrg }
958 1.8 mrg
959 1.8 mrg /* allocate ram */
960 1.23 chs pg = uvm_pagealloc(uvm.kernel_object, offset, NULL, 0);
961 1.8 mrg if (pg) {
962 1.8 mrg pg->flags &= ~PG_BUSY; /* new page */
963 1.8 mrg UVM_PAGE_OWN(pg, NULL);
964 1.8 mrg }
965 1.8 mrg simple_unlock(&uvm.kernel_object->vmobjlock);
966 1.8 mrg if (pg == NULL) {
967 1.8 mrg uvm_wait("km_alloc1w"); /* wait for memory */
968 1.8 mrg continue;
969 1.8 mrg }
970 1.8 mrg
971 1.24 thorpej /*
972 1.24 thorpej * map it in; note we're never called with an intrsafe
973 1.24 thorpej * object, so we always use regular old pmap_enter().
974 1.24 thorpej */
975 1.8 mrg pmap_enter(map->pmap, loopva, VM_PAGE_TO_PHYS(pg),
976 1.26 thorpej UVM_PROT_ALL, TRUE, VM_PROT_READ|VM_PROT_WRITE);
977 1.24 thorpej
978 1.8 mrg loopva += PAGE_SIZE;
979 1.8 mrg offset += PAGE_SIZE;
980 1.8 mrg size -= PAGE_SIZE;
981 1.8 mrg }
982 1.8 mrg
983 1.8 mrg /*
984 1.8 mrg * zero on request (note that "size" is now zero due to the above loop
985 1.8 mrg * so we need to subtract kva from loopva to reconstruct the size).
986 1.8 mrg */
987 1.1 mrg
988 1.8 mrg if (zeroit)
989 1.13 perry memset((caddr_t)kva, 0, loopva - kva);
990 1.1 mrg
991 1.8 mrg UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0);
992 1.8 mrg return(kva);
993 1.1 mrg }
994 1.1 mrg
995 1.1 mrg /*
996 1.1 mrg * uvm_km_valloc: allocate zero-fill memory in the kernel's address space
997 1.1 mrg *
998 1.1 mrg * => memory is not allocated until fault time
999 1.1 mrg */
1000 1.1 mrg
1001 1.14 eeh vaddr_t
1002 1.8 mrg uvm_km_valloc(map, size)
1003 1.8 mrg vm_map_t map;
1004 1.14 eeh vsize_t size;
1005 1.1 mrg {
1006 1.14 eeh vaddr_t kva;
1007 1.8 mrg UVMHIST_FUNC("uvm_km_valloc"); UVMHIST_CALLED(maphist);
1008 1.1 mrg
1009 1.8 mrg UVMHIST_LOG(maphist, "(map=0x%x, size=0x%x)", map, size, 0,0);
1010 1.1 mrg
1011 1.1 mrg #ifdef DIAGNOSTIC
1012 1.8 mrg if (vm_map_pmap(map) != pmap_kernel())
1013 1.8 mrg panic("uvm_km_valloc");
1014 1.1 mrg #endif
1015 1.1 mrg
1016 1.8 mrg size = round_page(size);
1017 1.8 mrg kva = vm_map_min(map); /* hint */
1018 1.1 mrg
1019 1.8 mrg /*
1020 1.8 mrg * allocate some virtual space. will be demand filled by kernel_object.
1021 1.8 mrg */
1022 1.1 mrg
1023 1.8 mrg if (uvm_map(map, &kva, size, uvm.kernel_object, UVM_UNKNOWN_OFFSET,
1024 1.8 mrg UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
1025 1.8 mrg UVM_ADV_RANDOM, 0)) != KERN_SUCCESS) {
1026 1.8 mrg UVMHIST_LOG(maphist, "<- done (no VM)", 0,0,0,0);
1027 1.8 mrg return(0);
1028 1.8 mrg }
1029 1.1 mrg
1030 1.8 mrg UVMHIST_LOG(maphist, "<- done (kva=0x%x)", kva,0,0,0);
1031 1.8 mrg return(kva);
1032 1.1 mrg }
1033 1.1 mrg
1034 1.1 mrg /*
1035 1.1 mrg * uvm_km_valloc_wait: allocate zero-fill memory in the kernel's address space
1036 1.1 mrg *
1037 1.1 mrg * => memory is not allocated until fault time
1038 1.1 mrg * => if no room in map, wait for space to free, unless requested size
1039 1.1 mrg * is larger than map (in which case we return 0)
1040 1.1 mrg */
1041 1.1 mrg
1042 1.14 eeh vaddr_t
1043 1.8 mrg uvm_km_valloc_wait(map, size)
1044 1.8 mrg vm_map_t map;
1045 1.14 eeh vsize_t size;
1046 1.1 mrg {
1047 1.14 eeh vaddr_t kva;
1048 1.8 mrg UVMHIST_FUNC("uvm_km_valloc_wait"); UVMHIST_CALLED(maphist);
1049 1.1 mrg
1050 1.8 mrg UVMHIST_LOG(maphist, "(map=0x%x, size=0x%x)", map, size, 0,0);
1051 1.1 mrg
1052 1.1 mrg #ifdef DIAGNOSTIC
1053 1.8 mrg if (vm_map_pmap(map) != pmap_kernel())
1054 1.8 mrg panic("uvm_km_valloc_wait");
1055 1.1 mrg #endif
1056 1.1 mrg
1057 1.8 mrg size = round_page(size);
1058 1.8 mrg if (size > vm_map_max(map) - vm_map_min(map))
1059 1.8 mrg return(0);
1060 1.8 mrg
1061 1.8 mrg while (1) {
1062 1.8 mrg kva = vm_map_min(map); /* hint */
1063 1.8 mrg
1064 1.8 mrg /*
1065 1.8 mrg * allocate some virtual space. will be demand filled
1066 1.8 mrg * by kernel_object.
1067 1.8 mrg */
1068 1.8 mrg
1069 1.8 mrg if (uvm_map(map, &kva, size, uvm.kernel_object,
1070 1.8 mrg UVM_UNKNOWN_OFFSET, UVM_MAPFLAG(UVM_PROT_ALL,
1071 1.8 mrg UVM_PROT_ALL, UVM_INH_NONE, UVM_ADV_RANDOM, 0))
1072 1.8 mrg == KERN_SUCCESS) {
1073 1.8 mrg UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0);
1074 1.8 mrg return(kva);
1075 1.8 mrg }
1076 1.8 mrg
1077 1.8 mrg /*
1078 1.8 mrg * failed. sleep for a while (on map)
1079 1.8 mrg */
1080 1.8 mrg
1081 1.8 mrg UVMHIST_LOG(maphist,"<<<sleeping>>>",0,0,0,0);
1082 1.8 mrg tsleep((caddr_t)map, PVM, "vallocwait", 0);
1083 1.8 mrg }
1084 1.8 mrg /*NOTREACHED*/
1085 1.10 thorpej }
1086 1.10 thorpej
1087 1.10 thorpej /* Sanity; must specify both or none. */
1088 1.10 thorpej #if (defined(PMAP_MAP_POOLPAGE) || defined(PMAP_UNMAP_POOLPAGE)) && \
1089 1.10 thorpej (!defined(PMAP_MAP_POOLPAGE) || !defined(PMAP_UNMAP_POOLPAGE))
1090 1.10 thorpej #error Must specify MAP and UNMAP together.
1091 1.10 thorpej #endif
1092 1.10 thorpej
1093 1.10 thorpej /*
1094 1.10 thorpej * uvm_km_alloc_poolpage: allocate a page for the pool allocator
1095 1.10 thorpej *
1096 1.10 thorpej * => if the pmap specifies an alternate mapping method, we use it.
1097 1.10 thorpej */
1098 1.10 thorpej
1099 1.11 thorpej /* ARGSUSED */
1100 1.14 eeh vaddr_t
1101 1.15 thorpej uvm_km_alloc_poolpage1(map, obj, waitok)
1102 1.11 thorpej vm_map_t map;
1103 1.12 thorpej struct uvm_object *obj;
1104 1.15 thorpej boolean_t waitok;
1105 1.10 thorpej {
1106 1.10 thorpej #if defined(PMAP_MAP_POOLPAGE)
1107 1.10 thorpej struct vm_page *pg;
1108 1.14 eeh vaddr_t va;
1109 1.10 thorpej
1110 1.15 thorpej again:
1111 1.23 chs pg = uvm_pagealloc(NULL, 0, NULL, 0);
1112 1.15 thorpej if (pg == NULL) {
1113 1.15 thorpej if (waitok) {
1114 1.15 thorpej uvm_wait("plpg");
1115 1.15 thorpej goto again;
1116 1.15 thorpej } else
1117 1.15 thorpej return (0);
1118 1.15 thorpej }
1119 1.10 thorpej va = PMAP_MAP_POOLPAGE(VM_PAGE_TO_PHYS(pg));
1120 1.10 thorpej if (va == 0)
1121 1.10 thorpej uvm_pagefree(pg);
1122 1.10 thorpej return (va);
1123 1.10 thorpej #else
1124 1.14 eeh vaddr_t va;
1125 1.10 thorpej int s;
1126 1.10 thorpej
1127 1.16 thorpej /*
1128 1.16 thorpej * NOTE: We may be called with a map that doens't require splimp
1129 1.16 thorpej * protection (e.g. kernel_map). However, it does not hurt to
1130 1.16 thorpej * go to splimp in this case (since unprocted maps will never be
1131 1.16 thorpej * accessed in interrupt context).
1132 1.16 thorpej *
1133 1.16 thorpej * XXX We may want to consider changing the interface to this
1134 1.16 thorpej * XXX function.
1135 1.16 thorpej */
1136 1.16 thorpej
1137 1.10 thorpej s = splimp();
1138 1.15 thorpej va = uvm_km_kmemalloc(map, obj, PAGE_SIZE, waitok ? 0 : UVM_KMF_NOWAIT);
1139 1.10 thorpej splx(s);
1140 1.10 thorpej return (va);
1141 1.10 thorpej #endif /* PMAP_MAP_POOLPAGE */
1142 1.10 thorpej }
1143 1.10 thorpej
1144 1.10 thorpej /*
1145 1.10 thorpej * uvm_km_free_poolpage: free a previously allocated pool page
1146 1.10 thorpej *
1147 1.10 thorpej * => if the pmap specifies an alternate unmapping method, we use it.
1148 1.10 thorpej */
1149 1.10 thorpej
1150 1.11 thorpej /* ARGSUSED */
1151 1.10 thorpej void
1152 1.11 thorpej uvm_km_free_poolpage1(map, addr)
1153 1.11 thorpej vm_map_t map;
1154 1.14 eeh vaddr_t addr;
1155 1.10 thorpej {
1156 1.10 thorpej #if defined(PMAP_UNMAP_POOLPAGE)
1157 1.14 eeh paddr_t pa;
1158 1.10 thorpej
1159 1.10 thorpej pa = PMAP_UNMAP_POOLPAGE(addr);
1160 1.10 thorpej uvm_pagefree(PHYS_TO_VM_PAGE(pa));
1161 1.10 thorpej #else
1162 1.10 thorpej int s;
1163 1.16 thorpej
1164 1.16 thorpej /*
1165 1.16 thorpej * NOTE: We may be called with a map that doens't require splimp
1166 1.16 thorpej * protection (e.g. kernel_map). However, it does not hurt to
1167 1.16 thorpej * go to splimp in this case (since unprocted maps will never be
1168 1.16 thorpej * accessed in interrupt context).
1169 1.16 thorpej *
1170 1.16 thorpej * XXX We may want to consider changing the interface to this
1171 1.16 thorpej * XXX function.
1172 1.16 thorpej */
1173 1.10 thorpej
1174 1.10 thorpej s = splimp();
1175 1.11 thorpej uvm_km_free(map, addr, PAGE_SIZE);
1176 1.10 thorpej splx(s);
1177 1.10 thorpej #endif /* PMAP_UNMAP_POOLPAGE */
1178 1.1 mrg }
1179