uvm_aobj.c revision 1.7 1 1.7 chs /* $NetBSD: uvm_aobj.c,v 1.7 1998/02/12 07:36:43 chs Exp $ */
2 1.6 mrg
3 1.4 mrg /*
4 1.7 chs * XXXCDC: "ROUGH DRAFT" QUALITY UVM PRE-RELEASE FILE!
5 1.7 chs * >>>USE AT YOUR OWN RISK, WORK IS NOT FINISHED<<<
6 1.7 chs */
7 1.7 chs /*
8 1.7 chs * Copyright (c) 1998 Chuck Silvers, Charles D. Cranor and
9 1.7 chs * Washington University.
10 1.7 chs * All rights reserved.
11 1.7 chs *
12 1.7 chs * Redistribution and use in source and binary forms, with or without
13 1.7 chs * modification, are permitted provided that the following conditions
14 1.7 chs * are met:
15 1.7 chs * 1. Redistributions of source code must retain the above copyright
16 1.7 chs * notice, this list of conditions and the following disclaimer.
17 1.7 chs * 2. Redistributions in binary form must reproduce the above copyright
18 1.7 chs * notice, this list of conditions and the following disclaimer in the
19 1.7 chs * documentation and/or other materials provided with the distribution.
20 1.7 chs * 3. All advertising materials mentioning features or use of this software
21 1.7 chs * must display the following acknowledgement:
22 1.7 chs * This product includes software developed by Charles D. Cranor and
23 1.7 chs * Washington University.
24 1.7 chs * 4. The name of the author may not be used to endorse or promote products
25 1.7 chs * derived from this software without specific prior written permission.
26 1.7 chs *
27 1.7 chs * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
28 1.7 chs * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
29 1.7 chs * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
30 1.7 chs * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
31 1.7 chs * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
32 1.7 chs * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
33 1.7 chs * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
34 1.7 chs * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
35 1.7 chs * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
36 1.7 chs * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 1.7 chs *
38 1.4 mrg * from: Id: uvm_aobj.c,v 1.1.2.5 1998/02/06 05:14:38 chs Exp
39 1.4 mrg */
40 1.7 chs /*
41 1.7 chs * uvm_aobj.c: anonymous memory uvm_object pager
42 1.7 chs *
43 1.7 chs * author: Chuck Silvers <chuq (at) chuq.com>
44 1.7 chs * started: Jan-1998
45 1.7 chs *
46 1.7 chs * - design mostly from Chuck Cranor
47 1.7 chs */
48 1.7 chs
49 1.7 chs
50 1.7 chs
51 1.7 chs #include "opt_uvmhist.h"
52 1.1 mrg
53 1.1 mrg #include <sys/param.h>
54 1.1 mrg #include <sys/systm.h>
55 1.1 mrg #include <sys/proc.h>
56 1.1 mrg #include <sys/malloc.h>
57 1.1 mrg
58 1.1 mrg #include <vm/vm.h>
59 1.1 mrg #include <vm/vm_page.h>
60 1.1 mrg #include <vm/vm_kern.h>
61 1.1 mrg
62 1.1 mrg #include <uvm/uvm.h>
63 1.1 mrg
64 1.1 mrg /*
65 1.1 mrg * an aobj manages anonymous-memory backed uvm_objects. in addition
66 1.1 mrg * to keeping the list of resident pages, it also keeps a list of
67 1.1 mrg * allocated swap blocks. depending on the size of the aobj this list
68 1.1 mrg * of allocated swap blocks is either stored in an array (small objects)
69 1.1 mrg * or in a hash table (large objects).
70 1.1 mrg */
71 1.1 mrg
72 1.1 mrg /*
73 1.1 mrg * local structures
74 1.1 mrg */
75 1.1 mrg
76 1.1 mrg /*
77 1.1 mrg * for hash tables, we break the address space of the aobj into blocks
78 1.1 mrg * of UAO_SWHASH_CLUSTER_SIZE pages. we require the cluster size to
79 1.1 mrg * be a power of two.
80 1.1 mrg */
81 1.1 mrg
82 1.1 mrg #define UAO_SWHASH_CLUSTER_SHIFT 4
83 1.1 mrg #define UAO_SWHASH_CLUSTER_SIZE (1 << UAO_SWHASH_CLUSTER_SHIFT)
84 1.1 mrg
85 1.1 mrg /* get the "tag" for this page index */
86 1.1 mrg #define UAO_SWHASH_ELT_TAG(PAGEIDX) \
87 1.1 mrg ((PAGEIDX) >> UAO_SWHASH_CLUSTER_SHIFT)
88 1.1 mrg
89 1.1 mrg /* given an ELT and a page index, find the swap slot */
90 1.1 mrg #define UAO_SWHASH_ELT_PAGESLOT(ELT, PAGEIDX) \
91 1.1 mrg ((ELT)->slots[(PAGEIDX) & (UAO_SWHASH_CLUSTER_SIZE - 1)])
92 1.1 mrg
93 1.1 mrg /* given an ELT, return its pageidx base */
94 1.1 mrg #define UAO_SWHASH_ELT_PAGEIDX_BASE(ELT) \
95 1.1 mrg ((ELT)->tag << UAO_SWHASH_CLUSTER_SHIFT)
96 1.1 mrg
97 1.1 mrg /*
98 1.1 mrg * the swhash hash function
99 1.1 mrg */
100 1.1 mrg #define UAO_SWHASH_HASH(AOBJ, PAGEIDX) \
101 1.1 mrg (&(AOBJ)->u_swhash[(((PAGEIDX) >> UAO_SWHASH_CLUSTER_SHIFT) \
102 1.1 mrg & (AOBJ)->u_swhashmask)])
103 1.1 mrg
104 1.1 mrg /*
105 1.1 mrg * the swhash threshhold determines if we will use an array or a
106 1.1 mrg * hash table to store the list of allocated swap blocks.
107 1.1 mrg */
108 1.1 mrg
109 1.1 mrg #define UAO_SWHASH_THRESHOLD (UAO_SWHASH_CLUSTER_SIZE * 4)
110 1.1 mrg #define UAO_USES_SWHASH(AOBJ) \
111 1.1 mrg ((AOBJ)->u_pages > UAO_SWHASH_THRESHOLD) /* use hash? */
112 1.1 mrg
113 1.1 mrg /*
114 1.3 chs * the number of buckets in a swhash, with an upper bound
115 1.1 mrg */
116 1.1 mrg #define UAO_SWHASH_MAXBUCKETS 256
117 1.1 mrg #define UAO_SWHASH_BUCKETS(AOBJ) \
118 1.1 mrg (min((AOBJ)->u_pages >> UAO_SWHASH_CLUSTER_SHIFT, \
119 1.1 mrg UAO_SWHASH_MAXBUCKETS))
120 1.1 mrg
121 1.1 mrg
122 1.1 mrg /*
123 1.1 mrg * uao_swhash_elt: when a hash table is being used, this structure defines
124 1.1 mrg * the format of an entry in the bucket list.
125 1.1 mrg */
126 1.1 mrg
127 1.1 mrg struct uao_swhash_elt {
128 1.5 mrg LIST_ENTRY(uao_swhash_elt) list; /* the hash list */
129 1.5 mrg vm_offset_t tag; /* our 'tag' */
130 1.5 mrg int count; /* our number of active slots */
131 1.5 mrg int slots[UAO_SWHASH_CLUSTER_SIZE]; /* the slots */
132 1.1 mrg };
133 1.1 mrg
134 1.1 mrg /*
135 1.1 mrg * uao_swhash: the swap hash table structure
136 1.1 mrg */
137 1.1 mrg
138 1.1 mrg LIST_HEAD(uao_swhash, uao_swhash_elt);
139 1.1 mrg
140 1.1 mrg
141 1.1 mrg /*
142 1.1 mrg * uvm_aobj: the actual anon-backed uvm_object
143 1.1 mrg *
144 1.1 mrg * => the uvm_object is at the top of the structure, this allows
145 1.1 mrg * (struct uvm_device *) == (struct uvm_object *)
146 1.1 mrg * => only one of u_swslots and u_swhash is used in any given aobj
147 1.1 mrg */
148 1.1 mrg
149 1.1 mrg struct uvm_aobj {
150 1.5 mrg struct uvm_object u_obj; /* has: lock, pgops, memq, #pages, #refs */
151 1.5 mrg vm_size_t u_pages; /* number of pages in entire object */
152 1.5 mrg int u_flags; /* the flags (see uvm_aobj.h) */
153 1.5 mrg int *u_swslots; /* array of offset->swapslot mappings */
154 1.5 mrg /*
155 1.5 mrg * hashtable of offset->swapslot mappings
156 1.5 mrg * (u_swhash is an array of bucket heads)
157 1.5 mrg */
158 1.5 mrg struct uao_swhash *u_swhash;
159 1.5 mrg u_long u_swhashmask; /* mask for hashtable */
160 1.5 mrg LIST_ENTRY(uvm_aobj) u_list; /* global list of aobjs */
161 1.1 mrg };
162 1.1 mrg
163 1.1 mrg /*
164 1.1 mrg * local functions
165 1.1 mrg */
166 1.1 mrg
167 1.1 mrg static void uao_init __P((void));
168 1.1 mrg static struct uao_swhash_elt *uao_find_swhash_elt __P((struct uvm_aobj *,
169 1.1 mrg int, boolean_t));
170 1.1 mrg static int uao_find_swslot __P((struct uvm_aobj *,
171 1.1 mrg vm_offset_t));
172 1.1 mrg static boolean_t uao_flush __P((struct uvm_object *,
173 1.1 mrg vm_offset_t, vm_offset_t,
174 1.1 mrg int));
175 1.1 mrg static void uao_free __P((struct uvm_aobj *));
176 1.1 mrg static int uao_get __P((struct uvm_object *, vm_offset_t,
177 1.1 mrg vm_page_t *, int *, int,
178 1.1 mrg vm_prot_t, int, int));
179 1.1 mrg static boolean_t uao_releasepg __P((struct vm_page *,
180 1.1 mrg struct vm_page **));
181 1.1 mrg
182 1.1 mrg
183 1.1 mrg
184 1.1 mrg /*
185 1.1 mrg * aobj_pager
186 1.1 mrg *
187 1.1 mrg * note that some functions (e.g. put) are handled elsewhere
188 1.1 mrg */
189 1.1 mrg
190 1.1 mrg struct uvm_pagerops aobj_pager = {
191 1.5 mrg uao_init, /* init */
192 1.5 mrg NULL, /* attach */
193 1.5 mrg uao_reference, /* reference */
194 1.5 mrg uao_detach, /* detach */
195 1.5 mrg NULL, /* fault */
196 1.5 mrg uao_flush, /* flush */
197 1.5 mrg uao_get, /* get */
198 1.5 mrg NULL, /* asyncget */
199 1.5 mrg NULL, /* put (done by pagedaemon) */
200 1.5 mrg NULL, /* cluster */
201 1.5 mrg NULL, /* mk_pcluster */
202 1.5 mrg uvm_shareprot, /* shareprot */
203 1.5 mrg NULL, /* aiodone */
204 1.5 mrg uao_releasepg /* releasepg */
205 1.1 mrg };
206 1.1 mrg
207 1.1 mrg /*
208 1.1 mrg * uao_list: global list of active aobjs, locked by uao_list_lock
209 1.1 mrg */
210 1.1 mrg
211 1.1 mrg static LIST_HEAD(aobjlist, uvm_aobj) uao_list;
212 1.1 mrg #if NCPU > 1
213 1.1 mrg static simple_lock_data_t uao_list_lock;
214 1.1 mrg #endif
215 1.1 mrg
216 1.1 mrg
217 1.1 mrg /*
218 1.1 mrg * functions
219 1.1 mrg */
220 1.1 mrg
221 1.1 mrg /*
222 1.1 mrg * hash table/array related functions
223 1.1 mrg */
224 1.1 mrg
225 1.1 mrg /*
226 1.1 mrg * uao_find_swhash_elt: find (or create) a hash table entry for a page
227 1.1 mrg * offset.
228 1.1 mrg *
229 1.1 mrg * => the object should be locked by the caller
230 1.1 mrg */
231 1.1 mrg
232 1.5 mrg static struct uao_swhash_elt *
233 1.5 mrg uao_find_swhash_elt(aobj, pageidx, create)
234 1.5 mrg struct uvm_aobj *aobj;
235 1.5 mrg int pageidx;
236 1.5 mrg boolean_t create;
237 1.5 mrg {
238 1.5 mrg struct uao_swhash *swhash;
239 1.5 mrg struct uao_swhash_elt *elt;
240 1.5 mrg int page_tag;
241 1.1 mrg
242 1.5 mrg swhash = UAO_SWHASH_HASH(aobj, pageidx); /* first hash to get bucket */
243 1.5 mrg page_tag = UAO_SWHASH_ELT_TAG(pageidx); /* tag to search for */
244 1.1 mrg
245 1.5 mrg /*
246 1.5 mrg * now search the bucket for the requested tag
247 1.5 mrg */
248 1.5 mrg for (elt = swhash->lh_first; elt != NULL; elt = elt->list.le_next) {
249 1.5 mrg if (elt->tag == page_tag)
250 1.5 mrg return(elt);
251 1.5 mrg }
252 1.5 mrg
253 1.5 mrg /* fail now if we are not allowed to create a new entry in the bucket */
254 1.5 mrg if (!create)
255 1.5 mrg return NULL;
256 1.5 mrg
257 1.5 mrg
258 1.5 mrg /*
259 1.5 mrg * malloc a new entry for the bucket and init/insert it in
260 1.5 mrg */
261 1.5 mrg MALLOC(elt, struct uao_swhash_elt *, sizeof(*elt), M_UVMAOBJ, M_WAITOK);
262 1.5 mrg LIST_INSERT_HEAD(swhash, elt, list);
263 1.5 mrg elt->tag = page_tag;
264 1.5 mrg elt->count = 0;
265 1.5 mrg bzero(elt->slots, sizeof(elt->slots));
266 1.5 mrg
267 1.5 mrg return(elt);
268 1.1 mrg }
269 1.1 mrg
270 1.1 mrg /*
271 1.1 mrg * uao_find_swslot: find the swap slot number for an aobj/pageidx
272 1.1 mrg *
273 1.1 mrg * => object must be locked by caller
274 1.1 mrg */
275 1.5 mrg __inline static int
276 1.5 mrg uao_find_swslot(aobj, pageidx)
277 1.5 mrg struct uvm_aobj *aobj;
278 1.5 mrg vm_offset_t pageidx;
279 1.1 mrg {
280 1.1 mrg
281 1.5 mrg /*
282 1.5 mrg * if noswap flag is set, then we never return a slot
283 1.5 mrg */
284 1.1 mrg
285 1.5 mrg if (aobj->u_flags & UAO_FLAG_NOSWAP)
286 1.5 mrg return(0);
287 1.1 mrg
288 1.5 mrg /*
289 1.5 mrg * if hashing, look in hash table.
290 1.5 mrg */
291 1.1 mrg
292 1.5 mrg if (UAO_USES_SWHASH(aobj)) {
293 1.5 mrg struct uao_swhash_elt *elt =
294 1.5 mrg uao_find_swhash_elt(aobj, pageidx, FALSE);
295 1.5 mrg
296 1.5 mrg if (elt)
297 1.5 mrg return(UAO_SWHASH_ELT_PAGESLOT(elt, pageidx));
298 1.5 mrg else
299 1.5 mrg return(NULL);
300 1.5 mrg }
301 1.1 mrg
302 1.5 mrg /*
303 1.5 mrg * otherwise, look in the array
304 1.5 mrg */
305 1.5 mrg return(aobj->u_swslots[pageidx]);
306 1.1 mrg }
307 1.1 mrg
308 1.1 mrg /*
309 1.1 mrg * uao_set_swslot: set the swap slot for a page in an aobj.
310 1.1 mrg *
311 1.1 mrg * => setting a slot to zero frees the slot
312 1.1 mrg * => object must be locked by caller
313 1.1 mrg */
314 1.5 mrg int
315 1.5 mrg uao_set_swslot(uobj, pageidx, slot)
316 1.5 mrg struct uvm_object *uobj;
317 1.5 mrg int pageidx, slot;
318 1.5 mrg {
319 1.5 mrg struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
320 1.5 mrg int oldslot;
321 1.5 mrg UVMHIST_FUNC("uao_set_swslot"); UVMHIST_CALLED(pdhist);
322 1.5 mrg UVMHIST_LOG(pdhist, "aobj %p pageidx %d slot %d",
323 1.5 mrg aobj, pageidx, slot, 0);
324 1.1 mrg
325 1.5 mrg /*
326 1.5 mrg * if noswap flag is set, then we can't set a slot
327 1.5 mrg */
328 1.1 mrg
329 1.5 mrg if (aobj->u_flags & UAO_FLAG_NOSWAP) {
330 1.1 mrg
331 1.5 mrg if (slot == 0)
332 1.5 mrg return(0); /* a clear is ok */
333 1.1 mrg
334 1.5 mrg /* but a set is not */
335 1.5 mrg printf("uao_set_swslot: uobj = %p\n", uobj);
336 1.5 mrg panic("uao_set_swslot: attempt to set a slot on a NOSWAP object");
337 1.5 mrg }
338 1.1 mrg
339 1.5 mrg /*
340 1.5 mrg * are we using a hash table? if so, add it in the hash.
341 1.5 mrg */
342 1.1 mrg
343 1.5 mrg if (UAO_USES_SWHASH(aobj)) {
344 1.5 mrg struct uao_swhash_elt *elt =
345 1.5 mrg uao_find_swhash_elt(aobj, pageidx, TRUE);
346 1.5 mrg
347 1.5 mrg oldslot = UAO_SWHASH_ELT_PAGESLOT(elt, pageidx);
348 1.5 mrg UAO_SWHASH_ELT_PAGESLOT(elt, pageidx) = slot;
349 1.5 mrg
350 1.5 mrg /*
351 1.5 mrg * now adjust the elt's reference counter and free it if we've
352 1.5 mrg * dropped it to zero.
353 1.5 mrg */
354 1.5 mrg
355 1.5 mrg /* an allocation? */
356 1.5 mrg if (slot) {
357 1.5 mrg if (oldslot == 0)
358 1.5 mrg elt->count++;
359 1.5 mrg } else { /* freeing slot ... */
360 1.5 mrg if (oldslot) /* to be safe */
361 1.5 mrg elt->count--;
362 1.5 mrg
363 1.5 mrg if (elt->count == 0) {
364 1.5 mrg LIST_REMOVE(elt, list);
365 1.5 mrg FREE(elt, M_UVMAOBJ);
366 1.5 mrg }
367 1.5 mrg }
368 1.5 mrg
369 1.5 mrg } else {
370 1.5 mrg /* we are using an array */
371 1.5 mrg oldslot = aobj->u_swslots[pageidx];
372 1.5 mrg aobj->u_swslots[pageidx] = slot;
373 1.5 mrg }
374 1.5 mrg return (oldslot);
375 1.1 mrg }
376 1.1 mrg
377 1.1 mrg /*
378 1.1 mrg * end of hash/array functions
379 1.1 mrg */
380 1.1 mrg
381 1.1 mrg /*
382 1.1 mrg * uao_free: free all resources held by an aobj, and then free the aobj
383 1.1 mrg *
384 1.1 mrg * => the aobj should be dead
385 1.1 mrg */
386 1.1 mrg static void
387 1.1 mrg uao_free(aobj)
388 1.5 mrg struct uvm_aobj *aobj;
389 1.1 mrg {
390 1.1 mrg
391 1.5 mrg if (UAO_USES_SWHASH(aobj)) {
392 1.5 mrg int i, hashbuckets = aobj->u_swhashmask + 1;
393 1.1 mrg
394 1.5 mrg /*
395 1.5 mrg * free the swslots from each hash bucket,
396 1.5 mrg * then the hash bucket, and finally the hash table itself.
397 1.5 mrg */
398 1.5 mrg for (i = 0; i < hashbuckets; i++) {
399 1.5 mrg struct uao_swhash_elt *elt, *next;
400 1.5 mrg
401 1.5 mrg for (elt = aobj->u_swhash[i].lh_first; elt != NULL;
402 1.5 mrg elt = next) {
403 1.5 mrg int j;
404 1.5 mrg
405 1.5 mrg for (j = 0; j < UAO_SWHASH_CLUSTER_SIZE; j++)
406 1.5 mrg {
407 1.5 mrg int slot = elt->slots[j];
408 1.5 mrg
409 1.5 mrg if (slot)
410 1.5 mrg uvm_swap_free(slot, 1);
411 1.5 mrg }
412 1.5 mrg
413 1.5 mrg next = elt->list.le_next;
414 1.5 mrg FREE(elt, M_UVMAOBJ);
415 1.5 mrg }
416 1.5 mrg }
417 1.5 mrg FREE(aobj->u_swhash, M_UVMAOBJ);
418 1.5 mrg } else {
419 1.5 mrg int i;
420 1.5 mrg
421 1.5 mrg /*
422 1.5 mrg * free the array
423 1.5 mrg */
424 1.5 mrg
425 1.5 mrg for (i = 0; i < aobj->u_pages; i++)
426 1.5 mrg {
427 1.5 mrg int slot = aobj->u_swslots[i];
428 1.5 mrg
429 1.5 mrg if (slot)
430 1.5 mrg uvm_swap_free(slot, 1);
431 1.5 mrg }
432 1.5 mrg FREE(aobj->u_swslots, M_UVMAOBJ);
433 1.1 mrg }
434 1.1 mrg
435 1.5 mrg /*
436 1.5 mrg * finally free the aobj itself
437 1.5 mrg */
438 1.5 mrg FREE(aobj, M_UVMAOBJ);
439 1.1 mrg }
440 1.1 mrg
441 1.1 mrg /*
442 1.1 mrg * pager functions
443 1.1 mrg */
444 1.1 mrg
445 1.1 mrg /*
446 1.1 mrg * uao_create: create an aobj of the given size and return its uvm_object.
447 1.1 mrg *
448 1.1 mrg * => for normal use, flags are always zero
449 1.1 mrg * => for the kernel object, the flags are:
450 1.1 mrg * UAO_FLAG_KERNOBJ - allocate the kernel object (can only happen once)
451 1.1 mrg * UAO_FLAG_KERNSWAP - enable swapping of kernel object (" ")
452 1.1 mrg */
453 1.5 mrg struct uvm_object *
454 1.5 mrg uao_create(size, flags)
455 1.5 mrg vm_size_t size;
456 1.5 mrg int flags;
457 1.5 mrg {
458 1.5 mrg static struct uvm_aobj kernel_object_store; /* home of kernel_object */
459 1.5 mrg static int kobj_alloced = 0; /* not allocated yet */
460 1.5 mrg int pages = round_page(size) / PAGE_SIZE;
461 1.5 mrg struct uvm_aobj *aobj;
462 1.1 mrg
463 1.5 mrg /*
464 1.5 mrg * malloc a new aobj unless we are asked for the kernel object
465 1.5 mrg */
466 1.5 mrg if (flags & UAO_FLAG_KERNOBJ) { /* want kernel object? */
467 1.5 mrg if (kobj_alloced)
468 1.5 mrg panic("uao_create: kernel object already allocated");
469 1.5 mrg
470 1.5 mrg aobj = &kernel_object_store;
471 1.5 mrg aobj->u_pages = pages;
472 1.5 mrg aobj->u_flags = UAO_FLAG_NOSWAP; /* no swap to start */
473 1.5 mrg /* we are special, we never die */
474 1.5 mrg aobj->u_obj.uo_refs = UVM_OBJ_KERN;
475 1.5 mrg kobj_alloced = UAO_FLAG_KERNOBJ;
476 1.5 mrg } else if (flags & UAO_FLAG_KERNSWAP) {
477 1.5 mrg aobj = &kernel_object_store;
478 1.5 mrg if (kobj_alloced != UAO_FLAG_KERNOBJ)
479 1.5 mrg panic("uao_create: asked to enable swap on kernel object");
480 1.5 mrg kobj_alloced = UAO_FLAG_KERNSWAP;
481 1.5 mrg } else { /* normal object */
482 1.5 mrg MALLOC(aobj, struct uvm_aobj *, sizeof(*aobj), M_UVMAOBJ,
483 1.5 mrg M_WAITOK);
484 1.5 mrg aobj->u_pages = pages;
485 1.5 mrg aobj->u_flags = 0; /* normal object */
486 1.5 mrg aobj->u_obj.uo_refs = 1; /* start with 1 reference */
487 1.5 mrg }
488 1.1 mrg
489 1.5 mrg /*
490 1.5 mrg * allocate hash/array if necessary
491 1.5 mrg *
492 1.5 mrg * note: in the KERNSWAP case no need to worry about locking since
493 1.5 mrg * we are still booting we should be the only thread around.
494 1.5 mrg */
495 1.5 mrg if (flags == 0 || (flags & UAO_FLAG_KERNSWAP) != 0) {
496 1.5 mrg int mflags = (flags & UAO_FLAG_KERNSWAP) != 0 ?
497 1.5 mrg M_NOWAIT : M_WAITOK;
498 1.5 mrg
499 1.5 mrg /* allocate hash table or array depending on object size */
500 1.5 mrg if (UAO_USES_SWHASH(aobj)) {
501 1.5 mrg aobj->u_swhash = hashinit(UAO_SWHASH_BUCKETS(aobj),
502 1.5 mrg M_UVMAOBJ, mflags, &aobj->u_swhashmask);
503 1.5 mrg if (aobj->u_swhash == NULL)
504 1.5 mrg panic("uao_create: hashinit swhash failed");
505 1.5 mrg } else {
506 1.5 mrg MALLOC(aobj->u_swslots, int *, pages * sizeof(int),
507 1.5 mrg M_UVMAOBJ, mflags);
508 1.5 mrg if (aobj->u_swslots == NULL)
509 1.5 mrg panic("uao_create: malloc swslots failed");
510 1.5 mrg bzero(aobj->u_swslots, pages * sizeof(int));
511 1.5 mrg }
512 1.5 mrg
513 1.5 mrg if (flags) {
514 1.5 mrg aobj->u_flags &= ~UAO_FLAG_NOSWAP; /* clear noswap */
515 1.5 mrg return(&aobj->u_obj);
516 1.5 mrg /* done! */
517 1.5 mrg }
518 1.5 mrg }
519 1.5 mrg
520 1.5 mrg /*
521 1.5 mrg * init aobj fields
522 1.5 mrg */
523 1.5 mrg simple_lock_init(&aobj->u_obj.vmobjlock);
524 1.5 mrg aobj->u_obj.pgops = &aobj_pager;
525 1.5 mrg TAILQ_INIT(&aobj->u_obj.memq);
526 1.5 mrg aobj->u_obj.uo_npages = 0;
527 1.1 mrg
528 1.5 mrg /*
529 1.5 mrg * now that aobj is ready, add it to the global list
530 1.5 mrg * XXXCHS: uao_init hasn't been called'd in the KERNOBJ case,
531 1.5 mrg * do we really need the kernel object on this list anyway?
532 1.5 mrg */
533 1.5 mrg simple_lock(&uao_list_lock);
534 1.5 mrg LIST_INSERT_HEAD(&uao_list, aobj, u_list);
535 1.5 mrg simple_unlock(&uao_list_lock);
536 1.5 mrg
537 1.5 mrg /*
538 1.5 mrg * done!
539 1.5 mrg */
540 1.5 mrg return(&aobj->u_obj);
541 1.1 mrg }
542 1.1 mrg
543 1.1 mrg
544 1.1 mrg
545 1.1 mrg /*
546 1.1 mrg * uao_init: set up aobj pager subsystem
547 1.1 mrg *
548 1.1 mrg * => called at boot time from uvm_pager_init()
549 1.1 mrg */
550 1.5 mrg static void
551 1.5 mrg uao_init()
552 1.5 mrg {
553 1.1 mrg
554 1.5 mrg LIST_INIT(&uao_list);
555 1.5 mrg simple_lock_init(&uao_list_lock);
556 1.1 mrg }
557 1.1 mrg
558 1.1 mrg /*
559 1.1 mrg * uao_reference: add a ref to an aobj
560 1.1 mrg *
561 1.1 mrg * => aobj must be unlocked (we will lock it)
562 1.1 mrg */
563 1.5 mrg void
564 1.5 mrg uao_reference(uobj)
565 1.5 mrg struct uvm_object *uobj;
566 1.1 mrg {
567 1.5 mrg UVMHIST_FUNC("uao_reference"); UVMHIST_CALLED(maphist);
568 1.1 mrg
569 1.5 mrg /*
570 1.5 mrg * kernel_object already has plenty of references, leave it alone.
571 1.5 mrg */
572 1.1 mrg
573 1.5 mrg if (uobj->uo_refs == UVM_OBJ_KERN)
574 1.5 mrg return;
575 1.1 mrg
576 1.5 mrg simple_lock(&uobj->vmobjlock);
577 1.5 mrg uobj->uo_refs++; /* bump! */
578 1.5 mrg UVMHIST_LOG(maphist, "<- done (uobj=0x%x, ref = %d)",
579 1.1 mrg uobj, uobj->uo_refs,0,0);
580 1.5 mrg simple_unlock(&uobj->vmobjlock);
581 1.1 mrg }
582 1.1 mrg
583 1.1 mrg /*
584 1.1 mrg * uao_detach: drop a reference to an aobj
585 1.1 mrg *
586 1.1 mrg * => aobj must be unlocked, we will lock it
587 1.1 mrg */
588 1.5 mrg void
589 1.5 mrg uao_detach(uobj)
590 1.5 mrg struct uvm_object *uobj;
591 1.5 mrg {
592 1.5 mrg struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
593 1.5 mrg struct vm_page *pg;
594 1.5 mrg boolean_t busybody;
595 1.5 mrg UVMHIST_FUNC("uao_detach"); UVMHIST_CALLED(maphist);
596 1.1 mrg
597 1.5 mrg /*
598 1.5 mrg * detaching from kernel_object is a noop.
599 1.5 mrg */
600 1.5 mrg if (uobj->uo_refs == UVM_OBJ_KERN)
601 1.5 mrg return;
602 1.1 mrg
603 1.5 mrg simple_lock(&uobj->vmobjlock);
604 1.5 mrg
605 1.5 mrg UVMHIST_LOG(maphist," (uobj=0x%x) ref=%d", uobj,uobj->uo_refs,0,0);
606 1.5 mrg uobj->uo_refs--; /* drop ref! */
607 1.5 mrg if (uobj->uo_refs) { /* still more refs? */
608 1.5 mrg simple_unlock(&uobj->vmobjlock);
609 1.5 mrg UVMHIST_LOG(maphist, "<- done (rc>0)", 0,0,0,0);
610 1.5 mrg return;
611 1.5 mrg }
612 1.5 mrg
613 1.5 mrg /*
614 1.5 mrg * remove the aobj from the global list.
615 1.5 mrg */
616 1.5 mrg simple_lock(&uao_list_lock);
617 1.5 mrg LIST_REMOVE(aobj, u_list);
618 1.5 mrg simple_unlock(&uao_list_lock);
619 1.5 mrg
620 1.5 mrg /*
621 1.5 mrg * free all the pages that aren't PG_BUSY, mark for release any that are.
622 1.5 mrg */
623 1.1 mrg
624 1.5 mrg busybody = FALSE;
625 1.5 mrg for (pg = uobj->memq.tqh_first ; pg != NULL ; pg = pg->listq.tqe_next) {
626 1.5 mrg int swslot;
627 1.5 mrg
628 1.5 mrg if (pg->flags & PG_BUSY) {
629 1.5 mrg pg->flags |= PG_RELEASED;
630 1.5 mrg busybody = TRUE;
631 1.5 mrg continue;
632 1.5 mrg }
633 1.5 mrg
634 1.5 mrg
635 1.5 mrg /* zap the mappings, free the swap slot, free the page */
636 1.5 mrg pmap_page_protect(PMAP_PGARG(pg), VM_PROT_NONE);
637 1.5 mrg
638 1.5 mrg swslot = uao_set_swslot(&aobj->u_obj, pg->offset / PAGE_SIZE, 0);
639 1.5 mrg if (swslot) {
640 1.5 mrg uvm_swap_free(swslot, 1);
641 1.5 mrg }
642 1.5 mrg
643 1.5 mrg uvm_lock_pageq();
644 1.5 mrg uvm_pagefree(pg);
645 1.5 mrg uvm_unlock_pageq();
646 1.5 mrg }
647 1.1 mrg
648 1.5 mrg /*
649 1.5 mrg * if we found any busy pages, we're done for now.
650 1.5 mrg * mark the aobj for death, releasepg will finish up for us.
651 1.5 mrg */
652 1.5 mrg if (busybody) {
653 1.5 mrg aobj->u_flags |= UAO_FLAG_KILLME;
654 1.5 mrg simple_unlock(&aobj->u_obj.vmobjlock);
655 1.5 mrg return;
656 1.5 mrg }
657 1.1 mrg
658 1.5 mrg /*
659 1.5 mrg * finally, free the rest.
660 1.5 mrg */
661 1.5 mrg uao_free(aobj);
662 1.5 mrg }
663 1.1 mrg
664 1.1 mrg /*
665 1.1 mrg * uao_flush: uh, yea, sure it's flushed. really!
666 1.1 mrg */
667 1.5 mrg boolean_t
668 1.5 mrg uao_flush(uobj, start, end, flags)
669 1.5 mrg struct uvm_object *uobj;
670 1.5 mrg vm_offset_t start, end;
671 1.5 mrg int flags;
672 1.5 mrg {
673 1.1 mrg
674 1.5 mrg /*
675 1.5 mrg * anonymous memory doesn't "flush"
676 1.5 mrg */
677 1.5 mrg /*
678 1.5 mrg * XXX
679 1.5 mrg * deal with PGO_DEACTIVATE (for madvise(MADV_SEQUENTIAL))
680 1.5 mrg * and PGO_FREE (for msync(MSINVALIDATE))
681 1.5 mrg */
682 1.5 mrg return TRUE;
683 1.1 mrg }
684 1.1 mrg
685 1.1 mrg /*
686 1.1 mrg * uao_get: fetch me a page
687 1.1 mrg *
688 1.1 mrg * we have three cases:
689 1.1 mrg * 1: page is resident -> just return the page.
690 1.1 mrg * 2: page is zero-fill -> allocate a new page and zero it.
691 1.1 mrg * 3: page is swapped out -> fetch the page from swap.
692 1.1 mrg *
693 1.1 mrg * cases 1 and 2 can be handled with PGO_LOCKED, case 3 cannot.
694 1.1 mrg * so, if the "center" page hits case 3 (or any page, with PGO_ALLPAGES),
695 1.1 mrg * then we will need to return VM_PAGER_UNLOCK.
696 1.1 mrg *
697 1.1 mrg * => prefer map unlocked (not required)
698 1.1 mrg * => object must be locked! we will _unlock_ it before starting any I/O.
699 1.1 mrg * => flags: PGO_ALLPAGES: get all of the pages
700 1.1 mrg * PGO_LOCKED: fault data structures are locked
701 1.1 mrg * => NOTE: offset is the offset of pps[0], _NOT_ pps[centeridx]
702 1.1 mrg * => NOTE: caller must check for released pages!!
703 1.1 mrg */
704 1.5 mrg static int
705 1.5 mrg uao_get(uobj, offset, pps, npagesp, centeridx, access_type, advice, flags)
706 1.5 mrg struct uvm_object *uobj;
707 1.5 mrg vm_offset_t offset;
708 1.5 mrg struct vm_page **pps;
709 1.5 mrg int *npagesp;
710 1.5 mrg int centeridx, advice, flags;
711 1.5 mrg vm_prot_t access_type;
712 1.5 mrg {
713 1.5 mrg struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
714 1.5 mrg vm_offset_t current_offset;
715 1.5 mrg vm_page_t ptmp;
716 1.5 mrg int lcv, gotpages, maxpages, swslot, rv;
717 1.5 mrg boolean_t done;
718 1.5 mrg UVMHIST_FUNC("uao_get"); UVMHIST_CALLED(pdhist);
719 1.5 mrg
720 1.5 mrg UVMHIST_LOG(pdhist, "aobj=%p offset=%d, flags=%d", aobj, offset, flags,0);
721 1.5 mrg
722 1.5 mrg /*
723 1.5 mrg * get number of pages
724 1.5 mrg */
725 1.5 mrg
726 1.5 mrg maxpages = *npagesp;
727 1.5 mrg
728 1.5 mrg /*
729 1.5 mrg * step 1: handled the case where fault data structures are locked.
730 1.5 mrg */
731 1.1 mrg
732 1.5 mrg if (flags & PGO_LOCKED) {
733 1.1 mrg
734 1.5 mrg /*
735 1.5 mrg * step 1a: get pages that are already resident. only do
736 1.5 mrg * this if the data structures are locked (i.e. the first
737 1.5 mrg * time through).
738 1.5 mrg */
739 1.5 mrg
740 1.5 mrg done = TRUE; /* be optimistic */
741 1.5 mrg gotpages = 0; /* # of pages we got so far */
742 1.5 mrg
743 1.5 mrg for (lcv = 0, current_offset = offset ; lcv < maxpages ;
744 1.5 mrg lcv++, current_offset += PAGE_SIZE) {
745 1.5 mrg /* do we care about this page? if not, skip it */
746 1.5 mrg if (pps[lcv] == PGO_DONTCARE)
747 1.5 mrg continue;
748 1.5 mrg
749 1.5 mrg ptmp = uvm_pagelookup(uobj, current_offset);
750 1.5 mrg
751 1.5 mrg /*
752 1.5 mrg * if page is new, attempt to allocate the page, then
753 1.5 mrg * zero-fill it.
754 1.5 mrg */
755 1.5 mrg if (ptmp == NULL && uao_find_swslot(aobj,
756 1.5 mrg current_offset / PAGE_SIZE) == 0) {
757 1.5 mrg ptmp = uvm_pagealloc(uobj, current_offset,
758 1.5 mrg NULL);
759 1.5 mrg if (ptmp) {
760 1.5 mrg /* new page */
761 1.5 mrg ptmp->flags &= ~(PG_BUSY|PG_FAKE);
762 1.5 mrg ptmp->pqflags |= PQ_AOBJ;
763 1.5 mrg UVM_PAGE_OWN(ptmp, NULL);
764 1.5 mrg uvm_pagezero(ptmp);
765 1.5 mrg }
766 1.5 mrg }
767 1.5 mrg
768 1.5 mrg /*
769 1.5 mrg * to be useful must get a non-busy, non-released page
770 1.5 mrg */
771 1.5 mrg if (ptmp == NULL ||
772 1.5 mrg (ptmp->flags & (PG_BUSY|PG_RELEASED)) != 0) {
773 1.5 mrg if (lcv == centeridx ||
774 1.5 mrg (flags & PGO_ALLPAGES) != 0)
775 1.5 mrg /* need to do a wait or I/O! */
776 1.5 mrg done = FALSE;
777 1.5 mrg continue;
778 1.5 mrg }
779 1.5 mrg
780 1.5 mrg /*
781 1.5 mrg * useful page: busy/lock it and plug it in our
782 1.5 mrg * result array
783 1.5 mrg */
784 1.5 mrg /* caller must un-busy this page */
785 1.5 mrg ptmp->flags |= PG_BUSY;
786 1.5 mrg UVM_PAGE_OWN(ptmp, "uao_get1");
787 1.5 mrg pps[lcv] = ptmp;
788 1.5 mrg gotpages++;
789 1.5 mrg
790 1.5 mrg } /* "for" lcv loop */
791 1.5 mrg
792 1.5 mrg /*
793 1.5 mrg * step 1b: now we've either done everything needed or we
794 1.5 mrg * to unlock and do some waiting or I/O.
795 1.5 mrg */
796 1.5 mrg
797 1.5 mrg UVMHIST_LOG(pdhist, "<- done (done=%d)", done, 0,0,0);
798 1.5 mrg
799 1.5 mrg *npagesp = gotpages;
800 1.5 mrg if (done)
801 1.5 mrg /* bingo! */
802 1.5 mrg return(VM_PAGER_OK);
803 1.5 mrg else
804 1.5 mrg /* EEK! Need to unlock and I/O */
805 1.5 mrg return(VM_PAGER_UNLOCK);
806 1.1 mrg }
807 1.1 mrg
808 1.5 mrg /*
809 1.5 mrg * step 2: get non-resident or busy pages.
810 1.5 mrg * object is locked. data structures are unlocked.
811 1.5 mrg */
812 1.5 mrg
813 1.5 mrg for (lcv = 0, current_offset = offset ; lcv < maxpages ;
814 1.5 mrg lcv++, current_offset += PAGE_SIZE) {
815 1.5 mrg /*
816 1.5 mrg * - skip over pages we've already gotten or don't want
817 1.5 mrg * - skip over pages we don't _have_ to get
818 1.5 mrg */
819 1.5 mrg if (pps[lcv] != NULL ||
820 1.5 mrg (lcv != centeridx && (flags & PGO_ALLPAGES) == 0))
821 1.5 mrg continue;
822 1.5 mrg
823 1.5 mrg /*
824 1.5 mrg * we have yet to locate the current page (pps[lcv]). we
825 1.5 mrg * first look for a page that is already at the current offset.
826 1.5 mrg * if we find a page, we check to see if it is busy or
827 1.5 mrg * released. if that is the case, then we sleep on the page
828 1.5 mrg * until it is no longer busy or released and repeat the lookup.
829 1.5 mrg * if the page we found is neither busy nor released, then we
830 1.5 mrg * busy it (so we own it) and plug it into pps[lcv]. this
831 1.5 mrg * 'break's the following while loop and indicates we are
832 1.5 mrg * ready to move on to the next page in the "lcv" loop above.
833 1.5 mrg *
834 1.5 mrg * if we exit the while loop with pps[lcv] still set to NULL,
835 1.5 mrg * then it means that we allocated a new busy/fake/clean page
836 1.5 mrg * ptmp in the object and we need to do I/O to fill in the data.
837 1.5 mrg */
838 1.5 mrg
839 1.5 mrg /* top of "pps" while loop */
840 1.5 mrg while (pps[lcv] == NULL) {
841 1.5 mrg /* look for a resident page */
842 1.5 mrg ptmp = uvm_pagelookup(uobj, current_offset);
843 1.5 mrg
844 1.5 mrg /* not resident? allocate one now (if we can) */
845 1.5 mrg if (ptmp == NULL) {
846 1.5 mrg
847 1.5 mrg ptmp = uvm_pagealloc(uobj, current_offset,
848 1.5 mrg NULL); /* alloc */
849 1.5 mrg
850 1.5 mrg /* out of RAM? */
851 1.5 mrg if (ptmp == NULL) {
852 1.5 mrg simple_unlock(&uobj->vmobjlock);
853 1.5 mrg UVMHIST_LOG(pdhist,
854 1.5 mrg "sleeping, ptmp == NULL\n",0,0,0,0);
855 1.5 mrg uvm_wait("uao_getpage");
856 1.5 mrg simple_lock(&uobj->vmobjlock);
857 1.5 mrg /* goto top of pps while loop */
858 1.5 mrg continue;
859 1.5 mrg }
860 1.5 mrg
861 1.5 mrg /*
862 1.5 mrg * safe with PQ's unlocked: because we just
863 1.5 mrg * alloc'd the page
864 1.5 mrg */
865 1.5 mrg ptmp->pqflags |= PQ_AOBJ;
866 1.5 mrg
867 1.5 mrg /*
868 1.5 mrg * got new page ready for I/O. break pps while
869 1.5 mrg * loop. pps[lcv] is still NULL.
870 1.5 mrg */
871 1.5 mrg break;
872 1.5 mrg }
873 1.5 mrg
874 1.5 mrg /* page is there, see if we need to wait on it */
875 1.5 mrg if ((ptmp->flags & (PG_BUSY|PG_RELEASED)) != 0) {
876 1.5 mrg ptmp->flags |= PG_WANTED;
877 1.5 mrg UVMHIST_LOG(pdhist,
878 1.5 mrg "sleeping, ptmp->flags 0x%x\n",
879 1.5 mrg ptmp->flags,0,0,0);
880 1.5 mrg UVM_UNLOCK_AND_WAIT(ptmp, &uobj->vmobjlock, 0,
881 1.5 mrg "uao_get", 0);
882 1.5 mrg simple_lock(&uobj->vmobjlock);
883 1.5 mrg continue; /* goto top of pps while loop */
884 1.5 mrg }
885 1.5 mrg
886 1.5 mrg /*
887 1.5 mrg * if we get here then the page has become resident and
888 1.5 mrg * unbusy between steps 1 and 2. we busy it now (so we
889 1.5 mrg * own it) and set pps[lcv] (so that we exit the while
890 1.5 mrg * loop).
891 1.5 mrg */
892 1.5 mrg /* we own it, caller must un-busy */
893 1.5 mrg ptmp->flags |= PG_BUSY;
894 1.5 mrg UVM_PAGE_OWN(ptmp, "uao_get2");
895 1.5 mrg pps[lcv] = ptmp;
896 1.5 mrg }
897 1.5 mrg
898 1.5 mrg /*
899 1.5 mrg * if we own the valid page at the correct offset, pps[lcv] will
900 1.5 mrg * point to it. nothing more to do except go to the next page.
901 1.5 mrg */
902 1.5 mrg if (pps[lcv])
903 1.5 mrg continue; /* next lcv */
904 1.5 mrg
905 1.5 mrg /*
906 1.5 mrg * we have a "fake/busy/clean" page that we just allocated.
907 1.5 mrg * do the needed "i/o", either reading from swap or zeroing.
908 1.5 mrg */
909 1.5 mrg swslot = uao_find_swslot(aobj, current_offset / PAGE_SIZE);
910 1.5 mrg
911 1.5 mrg /*
912 1.5 mrg * just zero the page if there's nothing in swap.
913 1.5 mrg */
914 1.5 mrg if (swslot == 0)
915 1.5 mrg {
916 1.5 mrg /*
917 1.5 mrg * page hasn't existed before, just zero it.
918 1.5 mrg */
919 1.5 mrg uvm_pagezero(ptmp);
920 1.5 mrg }
921 1.5 mrg else
922 1.5 mrg {
923 1.5 mrg UVMHIST_LOG(pdhist, "pagein from swslot %d",
924 1.5 mrg swslot, 0,0,0);
925 1.5 mrg
926 1.5 mrg /*
927 1.5 mrg * page in the swapped-out page.
928 1.5 mrg * unlock object for i/o, relock when done.
929 1.5 mrg */
930 1.5 mrg simple_unlock(&uobj->vmobjlock);
931 1.5 mrg rv = uvm_swap_get(ptmp, swslot, PGO_SYNCIO);
932 1.5 mrg simple_lock(&uobj->vmobjlock);
933 1.5 mrg
934 1.5 mrg /*
935 1.5 mrg * I/O done. check for errors.
936 1.5 mrg */
937 1.5 mrg if (rv != VM_PAGER_OK)
938 1.5 mrg {
939 1.5 mrg UVMHIST_LOG(pdhist, "<- done (error=%d)",
940 1.5 mrg rv,0,0,0);
941 1.5 mrg if (ptmp->flags & PG_WANTED)
942 1.5 mrg /* object lock still held */
943 1.5 mrg thread_wakeup(ptmp);
944 1.5 mrg ptmp->flags &= ~(PG_WANTED|PG_BUSY);
945 1.5 mrg UVM_PAGE_OWN(ptmp, NULL);
946 1.5 mrg uvm_lock_pageq();
947 1.5 mrg uvm_pagefree(ptmp);
948 1.5 mrg uvm_unlock_pageq();
949 1.5 mrg simple_unlock(&uobj->vmobjlock);
950 1.5 mrg return (rv);
951 1.5 mrg }
952 1.5 mrg }
953 1.5 mrg
954 1.5 mrg /*
955 1.5 mrg * we got the page! clear the fake flag (indicates valid
956 1.5 mrg * data now in page) and plug into our result array. note
957 1.5 mrg * that page is still busy.
958 1.5 mrg *
959 1.5 mrg * it is the callers job to:
960 1.5 mrg * => check if the page is released
961 1.5 mrg * => unbusy the page
962 1.5 mrg * => activate the page
963 1.5 mrg */
964 1.5 mrg
965 1.5 mrg ptmp->flags &= ~PG_FAKE; /* data is valid ... */
966 1.5 mrg pmap_clear_modify(PMAP_PGARG(ptmp)); /* ... and clean */
967 1.5 mrg pps[lcv] = ptmp;
968 1.1 mrg
969 1.5 mrg } /* lcv loop */
970 1.1 mrg
971 1.1 mrg /*
972 1.5 mrg * finally, unlock object and return.
973 1.5 mrg */
974 1.1 mrg
975 1.1 mrg simple_unlock(&uobj->vmobjlock);
976 1.5 mrg UVMHIST_LOG(pdhist, "<- done (OK)",0,0,0,0);
977 1.5 mrg return(VM_PAGER_OK);
978 1.1 mrg }
979 1.1 mrg
980 1.1 mrg /*
981 1.1 mrg * uao_releasepg: handle released page in an aobj
982 1.1 mrg *
983 1.1 mrg * => "pg" is a PG_BUSY [caller owns it], PG_RELEASED page that we need
984 1.1 mrg * to dispose of.
985 1.1 mrg * => caller must handle PG_WANTED case
986 1.1 mrg * => called with page's object locked, pageq's unlocked
987 1.1 mrg * => returns TRUE if page's object is still alive, FALSE if we
988 1.1 mrg * killed the page's object. if we return TRUE, then we
989 1.1 mrg * return with the object locked.
990 1.1 mrg * => if (nextpgp != NULL) => we return pageq.tqe_next here, and return
991 1.1 mrg * with the page queues locked [for pagedaemon]
992 1.1 mrg * => if (nextpgp == NULL) => we return with page queues unlocked [normal case]
993 1.1 mrg * => we kill the aobj if it is not referenced and we are suppose to
994 1.1 mrg * kill it ("KILLME").
995 1.1 mrg */
996 1.1 mrg static boolean_t uao_releasepg(pg, nextpgp)
997 1.5 mrg struct vm_page *pg;
998 1.5 mrg struct vm_page **nextpgp; /* OUT */
999 1.1 mrg {
1000 1.5 mrg struct uvm_aobj *aobj = (struct uvm_aobj *) pg->uobject;
1001 1.5 mrg int slot;
1002 1.1 mrg
1003 1.1 mrg #ifdef DIAGNOSTIC
1004 1.5 mrg if ((pg->flags & PG_RELEASED) == 0)
1005 1.5 mrg panic("uao_releasepg: page not released!");
1006 1.1 mrg #endif
1007 1.5 mrg
1008 1.5 mrg /*
1009 1.5 mrg * dispose of the page [caller handles PG_WANTED] and swap slot.
1010 1.5 mrg */
1011 1.5 mrg pmap_page_protect(PMAP_PGARG(pg), VM_PROT_NONE);
1012 1.5 mrg slot = uao_set_swslot(&aobj->u_obj, pg->offset / PAGE_SIZE, 0);
1013 1.5 mrg if (slot)
1014 1.5 mrg uvm_swap_free(slot, 1);
1015 1.5 mrg uvm_lock_pageq();
1016 1.5 mrg if (nextpgp)
1017 1.5 mrg *nextpgp = pg->pageq.tqe_next; /* next page for daemon */
1018 1.5 mrg uvm_pagefree(pg);
1019 1.5 mrg if (!nextpgp)
1020 1.5 mrg uvm_unlock_pageq(); /* keep locked for daemon */
1021 1.5 mrg
1022 1.5 mrg /*
1023 1.5 mrg * if we're not killing the object, we're done.
1024 1.5 mrg */
1025 1.5 mrg if ((aobj->u_flags & UAO_FLAG_KILLME) == 0)
1026 1.5 mrg return TRUE;
1027 1.1 mrg
1028 1.1 mrg #ifdef DIAGNOSTIC
1029 1.5 mrg if (aobj->u_obj.uo_refs)
1030 1.5 mrg panic("uvm_km_releasepg: kill flag set on referenced object!");
1031 1.1 mrg #endif
1032 1.1 mrg
1033 1.5 mrg /*
1034 1.5 mrg * if there are still pages in the object, we're done for now.
1035 1.5 mrg */
1036 1.5 mrg if (aobj->u_obj.uo_npages != 0)
1037 1.5 mrg return TRUE;
1038 1.1 mrg
1039 1.1 mrg #ifdef DIAGNOSTIC
1040 1.5 mrg if (aobj->u_obj.memq.tqh_first)
1041 1.5 mrg panic("uvn_releasepg: pages in object with npages == 0");
1042 1.1 mrg #endif
1043 1.1 mrg
1044 1.5 mrg /*
1045 1.5 mrg * finally, free the rest.
1046 1.5 mrg */
1047 1.5 mrg uao_free(aobj);
1048 1.1 mrg
1049 1.5 mrg return FALSE;
1050 1.1 mrg }
1051