uvm_aobj.c revision 1.25 1 1.25 thorpej /* $NetBSD: uvm_aobj.c,v 1.25 1999/08/21 02:19:05 thorpej Exp $ */
2 1.6 mrg
3 1.7 chs /*
4 1.7 chs * Copyright (c) 1998 Chuck Silvers, Charles D. Cranor and
5 1.7 chs * Washington University.
6 1.7 chs * All rights reserved.
7 1.7 chs *
8 1.7 chs * Redistribution and use in source and binary forms, with or without
9 1.7 chs * modification, are permitted provided that the following conditions
10 1.7 chs * are met:
11 1.7 chs * 1. Redistributions of source code must retain the above copyright
12 1.7 chs * notice, this list of conditions and the following disclaimer.
13 1.7 chs * 2. Redistributions in binary form must reproduce the above copyright
14 1.7 chs * notice, this list of conditions and the following disclaimer in the
15 1.7 chs * documentation and/or other materials provided with the distribution.
16 1.7 chs * 3. All advertising materials mentioning features or use of this software
17 1.7 chs * must display the following acknowledgement:
18 1.7 chs * This product includes software developed by Charles D. Cranor and
19 1.7 chs * Washington University.
20 1.7 chs * 4. The name of the author may not be used to endorse or promote products
21 1.7 chs * derived from this software without specific prior written permission.
22 1.7 chs *
23 1.7 chs * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 1.7 chs * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 1.7 chs * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 1.7 chs * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 1.7 chs * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 1.7 chs * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 1.7 chs * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 1.7 chs * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 1.7 chs * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 1.7 chs * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 1.7 chs *
34 1.4 mrg * from: Id: uvm_aobj.c,v 1.1.2.5 1998/02/06 05:14:38 chs Exp
35 1.4 mrg */
36 1.7 chs /*
37 1.7 chs * uvm_aobj.c: anonymous memory uvm_object pager
38 1.7 chs *
39 1.7 chs * author: Chuck Silvers <chuq (at) chuq.com>
40 1.7 chs * started: Jan-1998
41 1.7 chs *
42 1.7 chs * - design mostly from Chuck Cranor
43 1.7 chs */
44 1.7 chs
45 1.7 chs
46 1.7 chs
47 1.7 chs #include "opt_uvmhist.h"
48 1.1 mrg
49 1.1 mrg #include <sys/param.h>
50 1.1 mrg #include <sys/systm.h>
51 1.1 mrg #include <sys/proc.h>
52 1.1 mrg #include <sys/malloc.h>
53 1.12 thorpej #include <sys/pool.h>
54 1.1 mrg
55 1.1 mrg #include <vm/vm.h>
56 1.1 mrg #include <vm/vm_page.h>
57 1.1 mrg #include <vm/vm_kern.h>
58 1.1 mrg
59 1.1 mrg #include <uvm/uvm.h>
60 1.1 mrg
61 1.1 mrg /*
62 1.1 mrg * an aobj manages anonymous-memory backed uvm_objects. in addition
63 1.1 mrg * to keeping the list of resident pages, it also keeps a list of
64 1.1 mrg * allocated swap blocks. depending on the size of the aobj this list
65 1.1 mrg * of allocated swap blocks is either stored in an array (small objects)
66 1.1 mrg * or in a hash table (large objects).
67 1.1 mrg */
68 1.1 mrg
69 1.1 mrg /*
70 1.1 mrg * local structures
71 1.1 mrg */
72 1.1 mrg
73 1.1 mrg /*
74 1.1 mrg * for hash tables, we break the address space of the aobj into blocks
75 1.1 mrg * of UAO_SWHASH_CLUSTER_SIZE pages. we require the cluster size to
76 1.1 mrg * be a power of two.
77 1.1 mrg */
78 1.1 mrg
79 1.1 mrg #define UAO_SWHASH_CLUSTER_SHIFT 4
80 1.1 mrg #define UAO_SWHASH_CLUSTER_SIZE (1 << UAO_SWHASH_CLUSTER_SHIFT)
81 1.1 mrg
82 1.1 mrg /* get the "tag" for this page index */
83 1.1 mrg #define UAO_SWHASH_ELT_TAG(PAGEIDX) \
84 1.1 mrg ((PAGEIDX) >> UAO_SWHASH_CLUSTER_SHIFT)
85 1.1 mrg
86 1.1 mrg /* given an ELT and a page index, find the swap slot */
87 1.1 mrg #define UAO_SWHASH_ELT_PAGESLOT(ELT, PAGEIDX) \
88 1.1 mrg ((ELT)->slots[(PAGEIDX) & (UAO_SWHASH_CLUSTER_SIZE - 1)])
89 1.1 mrg
90 1.1 mrg /* given an ELT, return its pageidx base */
91 1.1 mrg #define UAO_SWHASH_ELT_PAGEIDX_BASE(ELT) \
92 1.1 mrg ((ELT)->tag << UAO_SWHASH_CLUSTER_SHIFT)
93 1.1 mrg
94 1.1 mrg /*
95 1.1 mrg * the swhash hash function
96 1.1 mrg */
97 1.1 mrg #define UAO_SWHASH_HASH(AOBJ, PAGEIDX) \
98 1.1 mrg (&(AOBJ)->u_swhash[(((PAGEIDX) >> UAO_SWHASH_CLUSTER_SHIFT) \
99 1.1 mrg & (AOBJ)->u_swhashmask)])
100 1.1 mrg
101 1.1 mrg /*
102 1.1 mrg * the swhash threshhold determines if we will use an array or a
103 1.1 mrg * hash table to store the list of allocated swap blocks.
104 1.1 mrg */
105 1.1 mrg
106 1.1 mrg #define UAO_SWHASH_THRESHOLD (UAO_SWHASH_CLUSTER_SIZE * 4)
107 1.1 mrg #define UAO_USES_SWHASH(AOBJ) \
108 1.1 mrg ((AOBJ)->u_pages > UAO_SWHASH_THRESHOLD) /* use hash? */
109 1.1 mrg
110 1.1 mrg /*
111 1.3 chs * the number of buckets in a swhash, with an upper bound
112 1.1 mrg */
113 1.1 mrg #define UAO_SWHASH_MAXBUCKETS 256
114 1.1 mrg #define UAO_SWHASH_BUCKETS(AOBJ) \
115 1.1 mrg (min((AOBJ)->u_pages >> UAO_SWHASH_CLUSTER_SHIFT, \
116 1.1 mrg UAO_SWHASH_MAXBUCKETS))
117 1.1 mrg
118 1.1 mrg
119 1.1 mrg /*
120 1.1 mrg * uao_swhash_elt: when a hash table is being used, this structure defines
121 1.1 mrg * the format of an entry in the bucket list.
122 1.1 mrg */
123 1.1 mrg
124 1.1 mrg struct uao_swhash_elt {
125 1.5 mrg LIST_ENTRY(uao_swhash_elt) list; /* the hash list */
126 1.10 eeh vaddr_t tag; /* our 'tag' */
127 1.5 mrg int count; /* our number of active slots */
128 1.5 mrg int slots[UAO_SWHASH_CLUSTER_SIZE]; /* the slots */
129 1.1 mrg };
130 1.1 mrg
131 1.1 mrg /*
132 1.1 mrg * uao_swhash: the swap hash table structure
133 1.1 mrg */
134 1.1 mrg
135 1.1 mrg LIST_HEAD(uao_swhash, uao_swhash_elt);
136 1.1 mrg
137 1.12 thorpej /*
138 1.12 thorpej * uao_swhash_elt_pool: pool of uao_swhash_elt structures
139 1.12 thorpej */
140 1.12 thorpej
141 1.12 thorpej struct pool uao_swhash_elt_pool;
142 1.1 mrg
143 1.1 mrg /*
144 1.1 mrg * uvm_aobj: the actual anon-backed uvm_object
145 1.1 mrg *
146 1.1 mrg * => the uvm_object is at the top of the structure, this allows
147 1.1 mrg * (struct uvm_device *) == (struct uvm_object *)
148 1.1 mrg * => only one of u_swslots and u_swhash is used in any given aobj
149 1.1 mrg */
150 1.1 mrg
151 1.1 mrg struct uvm_aobj {
152 1.5 mrg struct uvm_object u_obj; /* has: lock, pgops, memq, #pages, #refs */
153 1.11 drochner int u_pages; /* number of pages in entire object */
154 1.5 mrg int u_flags; /* the flags (see uvm_aobj.h) */
155 1.5 mrg int *u_swslots; /* array of offset->swapslot mappings */
156 1.5 mrg /*
157 1.5 mrg * hashtable of offset->swapslot mappings
158 1.5 mrg * (u_swhash is an array of bucket heads)
159 1.5 mrg */
160 1.5 mrg struct uao_swhash *u_swhash;
161 1.5 mrg u_long u_swhashmask; /* mask for hashtable */
162 1.5 mrg LIST_ENTRY(uvm_aobj) u_list; /* global list of aobjs */
163 1.1 mrg };
164 1.1 mrg
165 1.1 mrg /*
166 1.12 thorpej * uvm_aobj_pool: pool of uvm_aobj structures
167 1.12 thorpej */
168 1.12 thorpej
169 1.12 thorpej struct pool uvm_aobj_pool;
170 1.12 thorpej
171 1.12 thorpej /*
172 1.1 mrg * local functions
173 1.1 mrg */
174 1.1 mrg
175 1.1 mrg static void uao_init __P((void));
176 1.1 mrg static struct uao_swhash_elt *uao_find_swhash_elt __P((struct uvm_aobj *,
177 1.1 mrg int, boolean_t));
178 1.1 mrg static int uao_find_swslot __P((struct uvm_aobj *,
179 1.11 drochner int));
180 1.1 mrg static boolean_t uao_flush __P((struct uvm_object *,
181 1.10 eeh vaddr_t, vaddr_t,
182 1.1 mrg int));
183 1.1 mrg static void uao_free __P((struct uvm_aobj *));
184 1.10 eeh static int uao_get __P((struct uvm_object *, vaddr_t,
185 1.1 mrg vm_page_t *, int *, int,
186 1.1 mrg vm_prot_t, int, int));
187 1.1 mrg static boolean_t uao_releasepg __P((struct vm_page *,
188 1.1 mrg struct vm_page **));
189 1.1 mrg
190 1.1 mrg
191 1.1 mrg
192 1.1 mrg /*
193 1.1 mrg * aobj_pager
194 1.1 mrg *
195 1.1 mrg * note that some functions (e.g. put) are handled elsewhere
196 1.1 mrg */
197 1.1 mrg
198 1.1 mrg struct uvm_pagerops aobj_pager = {
199 1.5 mrg uao_init, /* init */
200 1.5 mrg uao_reference, /* reference */
201 1.5 mrg uao_detach, /* detach */
202 1.5 mrg NULL, /* fault */
203 1.5 mrg uao_flush, /* flush */
204 1.5 mrg uao_get, /* get */
205 1.5 mrg NULL, /* asyncget */
206 1.5 mrg NULL, /* put (done by pagedaemon) */
207 1.5 mrg NULL, /* cluster */
208 1.5 mrg NULL, /* mk_pcluster */
209 1.5 mrg uvm_shareprot, /* shareprot */
210 1.5 mrg NULL, /* aiodone */
211 1.5 mrg uao_releasepg /* releasepg */
212 1.1 mrg };
213 1.1 mrg
214 1.1 mrg /*
215 1.1 mrg * uao_list: global list of active aobjs, locked by uao_list_lock
216 1.1 mrg */
217 1.1 mrg
218 1.1 mrg static LIST_HEAD(aobjlist, uvm_aobj) uao_list;
219 1.1 mrg static simple_lock_data_t uao_list_lock;
220 1.1 mrg
221 1.1 mrg
222 1.1 mrg /*
223 1.1 mrg * functions
224 1.1 mrg */
225 1.1 mrg
226 1.1 mrg /*
227 1.1 mrg * hash table/array related functions
228 1.1 mrg */
229 1.1 mrg
230 1.1 mrg /*
231 1.1 mrg * uao_find_swhash_elt: find (or create) a hash table entry for a page
232 1.1 mrg * offset.
233 1.1 mrg *
234 1.1 mrg * => the object should be locked by the caller
235 1.1 mrg */
236 1.1 mrg
237 1.5 mrg static struct uao_swhash_elt *
238 1.5 mrg uao_find_swhash_elt(aobj, pageidx, create)
239 1.5 mrg struct uvm_aobj *aobj;
240 1.5 mrg int pageidx;
241 1.5 mrg boolean_t create;
242 1.5 mrg {
243 1.5 mrg struct uao_swhash *swhash;
244 1.5 mrg struct uao_swhash_elt *elt;
245 1.5 mrg int page_tag;
246 1.1 mrg
247 1.5 mrg swhash = UAO_SWHASH_HASH(aobj, pageidx); /* first hash to get bucket */
248 1.5 mrg page_tag = UAO_SWHASH_ELT_TAG(pageidx); /* tag to search for */
249 1.1 mrg
250 1.5 mrg /*
251 1.5 mrg * now search the bucket for the requested tag
252 1.5 mrg */
253 1.5 mrg for (elt = swhash->lh_first; elt != NULL; elt = elt->list.le_next) {
254 1.5 mrg if (elt->tag == page_tag)
255 1.5 mrg return(elt);
256 1.5 mrg }
257 1.5 mrg
258 1.5 mrg /* fail now if we are not allowed to create a new entry in the bucket */
259 1.5 mrg if (!create)
260 1.5 mrg return NULL;
261 1.5 mrg
262 1.5 mrg
263 1.5 mrg /*
264 1.12 thorpej * allocate a new entry for the bucket and init/insert it in
265 1.5 mrg */
266 1.12 thorpej elt = pool_get(&uao_swhash_elt_pool, PR_WAITOK);
267 1.5 mrg LIST_INSERT_HEAD(swhash, elt, list);
268 1.5 mrg elt->tag = page_tag;
269 1.5 mrg elt->count = 0;
270 1.9 perry memset(elt->slots, 0, sizeof(elt->slots));
271 1.5 mrg
272 1.5 mrg return(elt);
273 1.1 mrg }
274 1.1 mrg
275 1.1 mrg /*
276 1.1 mrg * uao_find_swslot: find the swap slot number for an aobj/pageidx
277 1.1 mrg *
278 1.1 mrg * => object must be locked by caller
279 1.1 mrg */
280 1.5 mrg __inline static int
281 1.5 mrg uao_find_swslot(aobj, pageidx)
282 1.5 mrg struct uvm_aobj *aobj;
283 1.11 drochner int pageidx;
284 1.1 mrg {
285 1.1 mrg
286 1.5 mrg /*
287 1.5 mrg * if noswap flag is set, then we never return a slot
288 1.5 mrg */
289 1.1 mrg
290 1.5 mrg if (aobj->u_flags & UAO_FLAG_NOSWAP)
291 1.5 mrg return(0);
292 1.1 mrg
293 1.5 mrg /*
294 1.5 mrg * if hashing, look in hash table.
295 1.5 mrg */
296 1.1 mrg
297 1.5 mrg if (UAO_USES_SWHASH(aobj)) {
298 1.5 mrg struct uao_swhash_elt *elt =
299 1.5 mrg uao_find_swhash_elt(aobj, pageidx, FALSE);
300 1.5 mrg
301 1.5 mrg if (elt)
302 1.5 mrg return(UAO_SWHASH_ELT_PAGESLOT(elt, pageidx));
303 1.5 mrg else
304 1.5 mrg return(NULL);
305 1.5 mrg }
306 1.1 mrg
307 1.5 mrg /*
308 1.5 mrg * otherwise, look in the array
309 1.5 mrg */
310 1.5 mrg return(aobj->u_swslots[pageidx]);
311 1.1 mrg }
312 1.1 mrg
313 1.1 mrg /*
314 1.1 mrg * uao_set_swslot: set the swap slot for a page in an aobj.
315 1.1 mrg *
316 1.1 mrg * => setting a slot to zero frees the slot
317 1.1 mrg * => object must be locked by caller
318 1.1 mrg */
319 1.5 mrg int
320 1.5 mrg uao_set_swslot(uobj, pageidx, slot)
321 1.5 mrg struct uvm_object *uobj;
322 1.5 mrg int pageidx, slot;
323 1.5 mrg {
324 1.5 mrg struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
325 1.5 mrg int oldslot;
326 1.5 mrg UVMHIST_FUNC("uao_set_swslot"); UVMHIST_CALLED(pdhist);
327 1.5 mrg UVMHIST_LOG(pdhist, "aobj %p pageidx %d slot %d",
328 1.5 mrg aobj, pageidx, slot, 0);
329 1.1 mrg
330 1.5 mrg /*
331 1.5 mrg * if noswap flag is set, then we can't set a slot
332 1.5 mrg */
333 1.1 mrg
334 1.5 mrg if (aobj->u_flags & UAO_FLAG_NOSWAP) {
335 1.1 mrg
336 1.5 mrg if (slot == 0)
337 1.5 mrg return(0); /* a clear is ok */
338 1.1 mrg
339 1.5 mrg /* but a set is not */
340 1.5 mrg printf("uao_set_swslot: uobj = %p\n", uobj);
341 1.5 mrg panic("uao_set_swslot: attempt to set a slot on a NOSWAP object");
342 1.5 mrg }
343 1.1 mrg
344 1.5 mrg /*
345 1.5 mrg * are we using a hash table? if so, add it in the hash.
346 1.5 mrg */
347 1.1 mrg
348 1.5 mrg if (UAO_USES_SWHASH(aobj)) {
349 1.12 thorpej /*
350 1.12 thorpej * Avoid allocating an entry just to free it again if
351 1.12 thorpej * the page had not swap slot in the first place, and
352 1.12 thorpej * we are freeing.
353 1.12 thorpej */
354 1.5 mrg struct uao_swhash_elt *elt =
355 1.12 thorpej uao_find_swhash_elt(aobj, pageidx, slot ? TRUE : FALSE);
356 1.12 thorpej if (elt == NULL) {
357 1.12 thorpej #ifdef DIAGNOSTIC
358 1.12 thorpej if (slot)
359 1.12 thorpej panic("uao_set_swslot: didn't create elt");
360 1.12 thorpej #endif
361 1.12 thorpej return (0);
362 1.12 thorpej }
363 1.5 mrg
364 1.5 mrg oldslot = UAO_SWHASH_ELT_PAGESLOT(elt, pageidx);
365 1.5 mrg UAO_SWHASH_ELT_PAGESLOT(elt, pageidx) = slot;
366 1.5 mrg
367 1.5 mrg /*
368 1.5 mrg * now adjust the elt's reference counter and free it if we've
369 1.5 mrg * dropped it to zero.
370 1.5 mrg */
371 1.5 mrg
372 1.5 mrg /* an allocation? */
373 1.5 mrg if (slot) {
374 1.5 mrg if (oldslot == 0)
375 1.5 mrg elt->count++;
376 1.5 mrg } else { /* freeing slot ... */
377 1.5 mrg if (oldslot) /* to be safe */
378 1.5 mrg elt->count--;
379 1.5 mrg
380 1.5 mrg if (elt->count == 0) {
381 1.5 mrg LIST_REMOVE(elt, list);
382 1.12 thorpej pool_put(&uao_swhash_elt_pool, elt);
383 1.5 mrg }
384 1.5 mrg }
385 1.5 mrg
386 1.5 mrg } else {
387 1.5 mrg /* we are using an array */
388 1.5 mrg oldslot = aobj->u_swslots[pageidx];
389 1.5 mrg aobj->u_swslots[pageidx] = slot;
390 1.5 mrg }
391 1.5 mrg return (oldslot);
392 1.1 mrg }
393 1.1 mrg
394 1.1 mrg /*
395 1.1 mrg * end of hash/array functions
396 1.1 mrg */
397 1.1 mrg
398 1.1 mrg /*
399 1.1 mrg * uao_free: free all resources held by an aobj, and then free the aobj
400 1.1 mrg *
401 1.1 mrg * => the aobj should be dead
402 1.1 mrg */
403 1.1 mrg static void
404 1.1 mrg uao_free(aobj)
405 1.5 mrg struct uvm_aobj *aobj;
406 1.1 mrg {
407 1.1 mrg
408 1.5 mrg if (UAO_USES_SWHASH(aobj)) {
409 1.5 mrg int i, hashbuckets = aobj->u_swhashmask + 1;
410 1.1 mrg
411 1.5 mrg /*
412 1.5 mrg * free the swslots from each hash bucket,
413 1.5 mrg * then the hash bucket, and finally the hash table itself.
414 1.5 mrg */
415 1.5 mrg for (i = 0; i < hashbuckets; i++) {
416 1.5 mrg struct uao_swhash_elt *elt, *next;
417 1.5 mrg
418 1.5 mrg for (elt = aobj->u_swhash[i].lh_first; elt != NULL;
419 1.5 mrg elt = next) {
420 1.5 mrg int j;
421 1.5 mrg
422 1.5 mrg for (j = 0; j < UAO_SWHASH_CLUSTER_SIZE; j++)
423 1.5 mrg {
424 1.5 mrg int slot = elt->slots[j];
425 1.5 mrg
426 1.18 chs if (slot) {
427 1.5 mrg uvm_swap_free(slot, 1);
428 1.18 chs
429 1.18 chs /*
430 1.18 chs * this page is no longer
431 1.18 chs * only in swap.
432 1.18 chs */
433 1.18 chs simple_lock(&uvm.swap_data_lock);
434 1.18 chs uvmexp.swpgonly--;
435 1.18 chs simple_unlock(&uvm.swap_data_lock);
436 1.18 chs }
437 1.5 mrg }
438 1.5 mrg
439 1.5 mrg next = elt->list.le_next;
440 1.12 thorpej pool_put(&uao_swhash_elt_pool, elt);
441 1.5 mrg }
442 1.5 mrg }
443 1.5 mrg FREE(aobj->u_swhash, M_UVMAOBJ);
444 1.5 mrg } else {
445 1.5 mrg int i;
446 1.5 mrg
447 1.5 mrg /*
448 1.5 mrg * free the array
449 1.5 mrg */
450 1.5 mrg
451 1.5 mrg for (i = 0; i < aobj->u_pages; i++)
452 1.5 mrg {
453 1.5 mrg int slot = aobj->u_swslots[i];
454 1.5 mrg
455 1.18 chs if (slot) {
456 1.5 mrg uvm_swap_free(slot, 1);
457 1.18 chs
458 1.18 chs /* this page is no longer only in swap. */
459 1.18 chs simple_lock(&uvm.swap_data_lock);
460 1.18 chs uvmexp.swpgonly--;
461 1.18 chs simple_unlock(&uvm.swap_data_lock);
462 1.18 chs }
463 1.5 mrg }
464 1.5 mrg FREE(aobj->u_swslots, M_UVMAOBJ);
465 1.1 mrg }
466 1.1 mrg
467 1.5 mrg /*
468 1.5 mrg * finally free the aobj itself
469 1.5 mrg */
470 1.12 thorpej pool_put(&uvm_aobj_pool, aobj);
471 1.1 mrg }
472 1.1 mrg
473 1.1 mrg /*
474 1.1 mrg * pager functions
475 1.1 mrg */
476 1.1 mrg
477 1.1 mrg /*
478 1.1 mrg * uao_create: create an aobj of the given size and return its uvm_object.
479 1.1 mrg *
480 1.1 mrg * => for normal use, flags are always zero
481 1.1 mrg * => for the kernel object, the flags are:
482 1.1 mrg * UAO_FLAG_KERNOBJ - allocate the kernel object (can only happen once)
483 1.1 mrg * UAO_FLAG_KERNSWAP - enable swapping of kernel object (" ")
484 1.1 mrg */
485 1.5 mrg struct uvm_object *
486 1.5 mrg uao_create(size, flags)
487 1.10 eeh vsize_t size;
488 1.5 mrg int flags;
489 1.5 mrg {
490 1.5 mrg static struct uvm_aobj kernel_object_store; /* home of kernel_object */
491 1.5 mrg static int kobj_alloced = 0; /* not allocated yet */
492 1.15 chs int pages = round_page(size) >> PAGE_SHIFT;
493 1.5 mrg struct uvm_aobj *aobj;
494 1.1 mrg
495 1.5 mrg /*
496 1.5 mrg * malloc a new aobj unless we are asked for the kernel object
497 1.5 mrg */
498 1.5 mrg if (flags & UAO_FLAG_KERNOBJ) { /* want kernel object? */
499 1.5 mrg if (kobj_alloced)
500 1.5 mrg panic("uao_create: kernel object already allocated");
501 1.5 mrg
502 1.12 thorpej /*
503 1.12 thorpej * XXXTHORPEJ: Need to call this now, so the pool gets
504 1.12 thorpej * initialized!
505 1.12 thorpej */
506 1.12 thorpej uao_init();
507 1.12 thorpej
508 1.5 mrg aobj = &kernel_object_store;
509 1.5 mrg aobj->u_pages = pages;
510 1.5 mrg aobj->u_flags = UAO_FLAG_NOSWAP; /* no swap to start */
511 1.5 mrg /* we are special, we never die */
512 1.5 mrg aobj->u_obj.uo_refs = UVM_OBJ_KERN;
513 1.5 mrg kobj_alloced = UAO_FLAG_KERNOBJ;
514 1.5 mrg } else if (flags & UAO_FLAG_KERNSWAP) {
515 1.5 mrg aobj = &kernel_object_store;
516 1.5 mrg if (kobj_alloced != UAO_FLAG_KERNOBJ)
517 1.5 mrg panic("uao_create: asked to enable swap on kernel object");
518 1.5 mrg kobj_alloced = UAO_FLAG_KERNSWAP;
519 1.5 mrg } else { /* normal object */
520 1.12 thorpej aobj = pool_get(&uvm_aobj_pool, PR_WAITOK);
521 1.5 mrg aobj->u_pages = pages;
522 1.5 mrg aobj->u_flags = 0; /* normal object */
523 1.5 mrg aobj->u_obj.uo_refs = 1; /* start with 1 reference */
524 1.5 mrg }
525 1.1 mrg
526 1.5 mrg /*
527 1.5 mrg * allocate hash/array if necessary
528 1.5 mrg *
529 1.5 mrg * note: in the KERNSWAP case no need to worry about locking since
530 1.5 mrg * we are still booting we should be the only thread around.
531 1.5 mrg */
532 1.5 mrg if (flags == 0 || (flags & UAO_FLAG_KERNSWAP) != 0) {
533 1.5 mrg int mflags = (flags & UAO_FLAG_KERNSWAP) != 0 ?
534 1.5 mrg M_NOWAIT : M_WAITOK;
535 1.5 mrg
536 1.5 mrg /* allocate hash table or array depending on object size */
537 1.5 mrg if (UAO_USES_SWHASH(aobj)) {
538 1.5 mrg aobj->u_swhash = hashinit(UAO_SWHASH_BUCKETS(aobj),
539 1.5 mrg M_UVMAOBJ, mflags, &aobj->u_swhashmask);
540 1.5 mrg if (aobj->u_swhash == NULL)
541 1.5 mrg panic("uao_create: hashinit swhash failed");
542 1.5 mrg } else {
543 1.5 mrg MALLOC(aobj->u_swslots, int *, pages * sizeof(int),
544 1.5 mrg M_UVMAOBJ, mflags);
545 1.5 mrg if (aobj->u_swslots == NULL)
546 1.5 mrg panic("uao_create: malloc swslots failed");
547 1.9 perry memset(aobj->u_swslots, 0, pages * sizeof(int));
548 1.5 mrg }
549 1.5 mrg
550 1.5 mrg if (flags) {
551 1.5 mrg aobj->u_flags &= ~UAO_FLAG_NOSWAP; /* clear noswap */
552 1.5 mrg return(&aobj->u_obj);
553 1.5 mrg /* done! */
554 1.5 mrg }
555 1.5 mrg }
556 1.5 mrg
557 1.5 mrg /*
558 1.5 mrg * init aobj fields
559 1.5 mrg */
560 1.5 mrg simple_lock_init(&aobj->u_obj.vmobjlock);
561 1.5 mrg aobj->u_obj.pgops = &aobj_pager;
562 1.5 mrg TAILQ_INIT(&aobj->u_obj.memq);
563 1.5 mrg aobj->u_obj.uo_npages = 0;
564 1.1 mrg
565 1.5 mrg /*
566 1.5 mrg * now that aobj is ready, add it to the global list
567 1.5 mrg * XXXCHS: uao_init hasn't been called'd in the KERNOBJ case,
568 1.5 mrg * do we really need the kernel object on this list anyway?
569 1.5 mrg */
570 1.5 mrg simple_lock(&uao_list_lock);
571 1.5 mrg LIST_INSERT_HEAD(&uao_list, aobj, u_list);
572 1.5 mrg simple_unlock(&uao_list_lock);
573 1.5 mrg
574 1.5 mrg /*
575 1.5 mrg * done!
576 1.5 mrg */
577 1.5 mrg return(&aobj->u_obj);
578 1.1 mrg }
579 1.1 mrg
580 1.1 mrg
581 1.1 mrg
582 1.1 mrg /*
583 1.1 mrg * uao_init: set up aobj pager subsystem
584 1.1 mrg *
585 1.1 mrg * => called at boot time from uvm_pager_init()
586 1.1 mrg */
587 1.5 mrg static void
588 1.5 mrg uao_init()
589 1.5 mrg {
590 1.12 thorpej static int uao_initialized;
591 1.12 thorpej
592 1.12 thorpej if (uao_initialized)
593 1.12 thorpej return;
594 1.12 thorpej uao_initialized = TRUE;
595 1.1 mrg
596 1.5 mrg LIST_INIT(&uao_list);
597 1.5 mrg simple_lock_init(&uao_list_lock);
598 1.12 thorpej
599 1.14 thorpej /*
600 1.14 thorpej * NOTE: Pages fror this pool must not come from a pageable
601 1.14 thorpej * kernel map!
602 1.14 thorpej */
603 1.12 thorpej pool_init(&uao_swhash_elt_pool, sizeof(struct uao_swhash_elt),
604 1.13 thorpej 0, 0, 0, "uaoeltpl", 0, NULL, NULL, M_UVMAOBJ);
605 1.12 thorpej
606 1.12 thorpej pool_init(&uvm_aobj_pool, sizeof(struct uvm_aobj), 0, 0, 0,
607 1.12 thorpej "aobjpl", 0,
608 1.12 thorpej pool_page_alloc_nointr, pool_page_free_nointr, M_UVMAOBJ);
609 1.1 mrg }
610 1.1 mrg
611 1.1 mrg /*
612 1.1 mrg * uao_reference: add a ref to an aobj
613 1.1 mrg *
614 1.1 mrg * => aobj must be unlocked (we will lock it)
615 1.1 mrg */
616 1.5 mrg void
617 1.5 mrg uao_reference(uobj)
618 1.5 mrg struct uvm_object *uobj;
619 1.1 mrg {
620 1.5 mrg UVMHIST_FUNC("uao_reference"); UVMHIST_CALLED(maphist);
621 1.1 mrg
622 1.5 mrg /*
623 1.5 mrg * kernel_object already has plenty of references, leave it alone.
624 1.5 mrg */
625 1.1 mrg
626 1.20 thorpej if (UVM_OBJ_IS_KERN_OBJECT(uobj))
627 1.5 mrg return;
628 1.1 mrg
629 1.5 mrg simple_lock(&uobj->vmobjlock);
630 1.5 mrg uobj->uo_refs++; /* bump! */
631 1.5 mrg UVMHIST_LOG(maphist, "<- done (uobj=0x%x, ref = %d)",
632 1.1 mrg uobj, uobj->uo_refs,0,0);
633 1.5 mrg simple_unlock(&uobj->vmobjlock);
634 1.1 mrg }
635 1.1 mrg
636 1.1 mrg /*
637 1.1 mrg * uao_detach: drop a reference to an aobj
638 1.1 mrg *
639 1.1 mrg * => aobj must be unlocked, we will lock it
640 1.1 mrg */
641 1.5 mrg void
642 1.5 mrg uao_detach(uobj)
643 1.5 mrg struct uvm_object *uobj;
644 1.5 mrg {
645 1.5 mrg struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
646 1.5 mrg struct vm_page *pg;
647 1.5 mrg boolean_t busybody;
648 1.5 mrg UVMHIST_FUNC("uao_detach"); UVMHIST_CALLED(maphist);
649 1.1 mrg
650 1.5 mrg /*
651 1.5 mrg * detaching from kernel_object is a noop.
652 1.5 mrg */
653 1.20 thorpej if (UVM_OBJ_IS_KERN_OBJECT(uobj))
654 1.5 mrg return;
655 1.1 mrg
656 1.5 mrg simple_lock(&uobj->vmobjlock);
657 1.5 mrg
658 1.5 mrg UVMHIST_LOG(maphist," (uobj=0x%x) ref=%d", uobj,uobj->uo_refs,0,0);
659 1.5 mrg uobj->uo_refs--; /* drop ref! */
660 1.5 mrg if (uobj->uo_refs) { /* still more refs? */
661 1.5 mrg simple_unlock(&uobj->vmobjlock);
662 1.5 mrg UVMHIST_LOG(maphist, "<- done (rc>0)", 0,0,0,0);
663 1.5 mrg return;
664 1.5 mrg }
665 1.5 mrg
666 1.5 mrg /*
667 1.5 mrg * remove the aobj from the global list.
668 1.5 mrg */
669 1.5 mrg simple_lock(&uao_list_lock);
670 1.5 mrg LIST_REMOVE(aobj, u_list);
671 1.5 mrg simple_unlock(&uao_list_lock);
672 1.5 mrg
673 1.5 mrg /*
674 1.5 mrg * free all the pages that aren't PG_BUSY, mark for release any that are.
675 1.5 mrg */
676 1.1 mrg
677 1.5 mrg busybody = FALSE;
678 1.5 mrg for (pg = uobj->memq.tqh_first ; pg != NULL ; pg = pg->listq.tqe_next) {
679 1.5 mrg
680 1.5 mrg if (pg->flags & PG_BUSY) {
681 1.5 mrg pg->flags |= PG_RELEASED;
682 1.5 mrg busybody = TRUE;
683 1.5 mrg continue;
684 1.5 mrg }
685 1.5 mrg
686 1.5 mrg /* zap the mappings, free the swap slot, free the page */
687 1.5 mrg pmap_page_protect(PMAP_PGARG(pg), VM_PROT_NONE);
688 1.18 chs uao_dropswap(&aobj->u_obj, pg->offset >> PAGE_SHIFT);
689 1.5 mrg uvm_lock_pageq();
690 1.5 mrg uvm_pagefree(pg);
691 1.5 mrg uvm_unlock_pageq();
692 1.5 mrg }
693 1.1 mrg
694 1.5 mrg /*
695 1.5 mrg * if we found any busy pages, we're done for now.
696 1.5 mrg * mark the aobj for death, releasepg will finish up for us.
697 1.5 mrg */
698 1.5 mrg if (busybody) {
699 1.5 mrg aobj->u_flags |= UAO_FLAG_KILLME;
700 1.5 mrg simple_unlock(&aobj->u_obj.vmobjlock);
701 1.5 mrg return;
702 1.5 mrg }
703 1.1 mrg
704 1.5 mrg /*
705 1.5 mrg * finally, free the rest.
706 1.5 mrg */
707 1.5 mrg uao_free(aobj);
708 1.5 mrg }
709 1.1 mrg
710 1.1 mrg /*
711 1.22 thorpej * uao_flush: "flush" pages out of a uvm object
712 1.22 thorpej *
713 1.22 thorpej * => object should be locked by caller. we may _unlock_ the object
714 1.22 thorpej * if (and only if) we need to clean a page (PGO_CLEANIT).
715 1.22 thorpej * XXXJRT Currently, however, we don't. In the case of cleaning
716 1.22 thorpej * XXXJRT a page, we simply just deactivate it. Should probably
717 1.22 thorpej * XXXJRT handle this better, in the future (although "flushing"
718 1.22 thorpej * XXXJRT anonymous memory isn't terribly important).
719 1.22 thorpej * => if PGO_CLEANIT is not set, then we will neither unlock the object
720 1.22 thorpej * or block.
721 1.22 thorpej * => if PGO_ALLPAGE is set, then all pages in the object are valid targets
722 1.22 thorpej * for flushing.
723 1.22 thorpej * => NOTE: we rely on the fact that the object's memq is a TAILQ and
724 1.22 thorpej * that new pages are inserted on the tail end of the list. thus,
725 1.22 thorpej * we can make a complete pass through the object in one go by starting
726 1.22 thorpej * at the head and working towards the tail (new pages are put in
727 1.22 thorpej * front of us).
728 1.22 thorpej * => NOTE: we are allowed to lock the page queues, so the caller
729 1.22 thorpej * must not be holding the lock on them [e.g. pagedaemon had
730 1.22 thorpej * better not call us with the queues locked]
731 1.22 thorpej * => we return TRUE unless we encountered some sort of I/O error
732 1.22 thorpej * XXXJRT currently never happens, as we never directly initiate
733 1.22 thorpej * XXXJRT I/O
734 1.22 thorpej *
735 1.22 thorpej * comment on "cleaning" object and PG_BUSY pages:
736 1.22 thorpej * this routine is holding the lock on the object. the only time
737 1.22 thorpej * that is can run into a PG_BUSY page that it does not own is if
738 1.22 thorpej * some other process has started I/O on the page (e.g. either
739 1.22 thorpej * a pagein or a pageout). if the PG_BUSY page is being paged
740 1.22 thorpej * in, then it can not be dirty (!PG_CLEAN) because no one has
741 1.22 thorpej * had a change to modify it yet. if the PG_BUSY page is being
742 1.22 thorpej * paged out then it means that someone else has already started
743 1.22 thorpej * cleaning the page for us (how nice!). in this case, if we
744 1.22 thorpej * have syncio specified, then after we make our pass through the
745 1.22 thorpej * object we need to wait for the other PG_BUSY pages to clear
746 1.22 thorpej * off (i.e. we need to do an iosync). also note that once a
747 1.22 thorpej * page is PG_BUSY is must stary in its object until it is un-busyed.
748 1.22 thorpej * XXXJRT We never actually do this, as we are "flushing" anonymous
749 1.22 thorpej * XXXJRT memory, which doesn't have persistent backing store.
750 1.22 thorpej *
751 1.22 thorpej * note on page traversal:
752 1.22 thorpej * we can traverse the pages in an object either by going down the
753 1.22 thorpej * linked list in "uobj->memq", or we can go over the address range
754 1.22 thorpej * by page doing hash table lookups for each address. depending
755 1.22 thorpej * on how many pages are in the object it may be cheaper to do one
756 1.22 thorpej * or the other. we set "by_list" to true if we are using memq.
757 1.22 thorpej * if the cost of a hash lookup was equal to the cost of the list
758 1.22 thorpej * traversal we could compare the number of pages in the start->stop
759 1.22 thorpej * range to the total number of pages in the object. however, it
760 1.22 thorpej * seems that a hash table lookup is more expensive than the linked
761 1.22 thorpej * list traversal, so we multiply the number of pages in the
762 1.22 thorpej * start->stop range by a penalty which we define below.
763 1.1 mrg */
764 1.22 thorpej
765 1.22 thorpej #define UAO_HASH_PENALTY 4 /* XXX: a guess */
766 1.22 thorpej
767 1.5 mrg boolean_t
768 1.22 thorpej uao_flush(uobj, start, stop, flags)
769 1.5 mrg struct uvm_object *uobj;
770 1.22 thorpej vaddr_t start, stop;
771 1.5 mrg int flags;
772 1.5 mrg {
773 1.22 thorpej struct uvm_aobj *aobj = (struct uvm_aobj *) uobj;
774 1.22 thorpej struct vm_page *pp, *ppnext;
775 1.22 thorpej boolean_t retval, by_list;
776 1.22 thorpej vaddr_t curoff;
777 1.22 thorpej UVMHIST_FUNC("uao_flush"); UVMHIST_CALLED(maphist);
778 1.22 thorpej
779 1.22 thorpej curoff = 0; /* XXX: shut up gcc */
780 1.22 thorpej
781 1.22 thorpej retval = TRUE; /* default to success */
782 1.22 thorpej
783 1.22 thorpej if (flags & PGO_ALLPAGES) {
784 1.22 thorpej start = 0;
785 1.22 thorpej stop = aobj->u_pages << PAGE_SHIFT;
786 1.22 thorpej by_list = TRUE; /* always go by the list */
787 1.22 thorpej } else {
788 1.22 thorpej start = trunc_page(start);
789 1.22 thorpej stop = round_page(stop);
790 1.22 thorpej if (stop > (aobj->u_pages << PAGE_SHIFT)) {
791 1.22 thorpej printf("uao_flush: strange, got an out of range "
792 1.22 thorpej "flush (fixed)\n");
793 1.22 thorpej stop = aobj->u_pages << PAGE_SHIFT;
794 1.22 thorpej }
795 1.22 thorpej by_list = (uobj->uo_npages <=
796 1.22 thorpej ((stop - start) >> PAGE_SHIFT) * UAO_HASH_PENALTY);
797 1.22 thorpej }
798 1.22 thorpej
799 1.22 thorpej UVMHIST_LOG(maphist,
800 1.22 thorpej " flush start=0x%lx, stop=0x%x, by_list=%d, flags=0x%x",
801 1.22 thorpej start, stop, by_list, flags);
802 1.1 mrg
803 1.5 mrg /*
804 1.22 thorpej * Don't need to do any work here if we're not freeing
805 1.22 thorpej * or deactivating pages.
806 1.22 thorpej */
807 1.22 thorpej if ((flags & (PGO_DEACTIVATE|PGO_FREE)) == 0) {
808 1.22 thorpej UVMHIST_LOG(maphist,
809 1.22 thorpej "<- done (no work to do)",0,0,0,0);
810 1.22 thorpej return (retval);
811 1.22 thorpej }
812 1.22 thorpej
813 1.5 mrg /*
814 1.22 thorpej * now do it. note: we must update ppnext in the body of loop or we
815 1.22 thorpej * will get stuck. we need to use ppnext because we may free "pp"
816 1.22 thorpej * before doing the next loop.
817 1.21 thorpej */
818 1.22 thorpej
819 1.22 thorpej if (by_list) {
820 1.22 thorpej pp = uobj->memq.tqh_first;
821 1.22 thorpej } else {
822 1.22 thorpej curoff = start;
823 1.22 thorpej pp = uvm_pagelookup(uobj, curoff);
824 1.22 thorpej }
825 1.22 thorpej
826 1.22 thorpej ppnext = NULL; /* XXX: shut up gcc */
827 1.22 thorpej uvm_lock_pageq(); /* page queues locked */
828 1.22 thorpej
829 1.22 thorpej /* locked: both page queues and uobj */
830 1.22 thorpej for ( ; (by_list && pp != NULL) ||
831 1.22 thorpej (!by_list && curoff < stop) ; pp = ppnext) {
832 1.22 thorpej if (by_list) {
833 1.22 thorpej ppnext = pp->listq.tqe_next;
834 1.22 thorpej
835 1.22 thorpej /* range check */
836 1.22 thorpej if (pp->offset < start || pp->offset >= stop)
837 1.22 thorpej continue;
838 1.22 thorpej } else {
839 1.22 thorpej curoff += PAGE_SIZE;
840 1.22 thorpej if (curoff < stop)
841 1.22 thorpej ppnext = uvm_pagelookup(uobj, curoff);
842 1.22 thorpej
843 1.22 thorpej /* null check */
844 1.22 thorpej if (pp == NULL)
845 1.22 thorpej continue;
846 1.22 thorpej }
847 1.22 thorpej
848 1.22 thorpej switch (flags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE)) {
849 1.22 thorpej /*
850 1.22 thorpej * XXX In these first 3 cases, we always just
851 1.22 thorpej * XXX deactivate the page. We may want to
852 1.22 thorpej * XXX handle the different cases more specifically
853 1.22 thorpej * XXX in the future.
854 1.22 thorpej */
855 1.22 thorpej case PGO_CLEANIT|PGO_FREE:
856 1.22 thorpej case PGO_CLEANIT|PGO_DEACTIVATE:
857 1.22 thorpej case PGO_DEACTIVATE:
858 1.25 thorpej deactivate_it:
859 1.22 thorpej /* skip the page if it's loaned or wired */
860 1.22 thorpej if (pp->loan_count != 0 ||
861 1.22 thorpej pp->wire_count != 0)
862 1.22 thorpej continue;
863 1.22 thorpej
864 1.22 thorpej /* zap all mappings for the page. */
865 1.22 thorpej pmap_page_protect(PMAP_PGARG(pp),
866 1.22 thorpej VM_PROT_NONE);
867 1.22 thorpej
868 1.22 thorpej /* ...and deactivate the page. */
869 1.22 thorpej uvm_pagedeactivate(pp);
870 1.22 thorpej
871 1.22 thorpej continue;
872 1.22 thorpej
873 1.22 thorpej case PGO_FREE:
874 1.25 thorpej /*
875 1.25 thorpej * If there are multiple references to
876 1.25 thorpej * the object, just deactivate the page.
877 1.25 thorpej */
878 1.25 thorpej if (uobj->uo_refs > 1)
879 1.25 thorpej goto deactivate_it;
880 1.25 thorpej
881 1.22 thorpej /* XXX skip the page if it's loaned or wired */
882 1.22 thorpej if (pp->loan_count != 0 ||
883 1.22 thorpej pp->wire_count != 0)
884 1.22 thorpej continue;
885 1.22 thorpej
886 1.22 thorpej /*
887 1.22 thorpej * mark the page as released if its busy.
888 1.22 thorpej */
889 1.22 thorpej if (pp->flags & PG_BUSY) {
890 1.22 thorpej pp->flags |= PG_RELEASED;
891 1.22 thorpej continue;
892 1.22 thorpej }
893 1.22 thorpej
894 1.22 thorpej /* zap all mappings for the page. */
895 1.22 thorpej pmap_page_protect(PMAP_PGARG(pp),
896 1.22 thorpej VM_PROT_NONE);
897 1.22 thorpej
898 1.22 thorpej uao_dropswap(uobj, pp->offset >> PAGE_SHIFT);
899 1.22 thorpej uvm_pagefree(pp);
900 1.22 thorpej
901 1.22 thorpej continue;
902 1.22 thorpej
903 1.22 thorpej default:
904 1.22 thorpej panic("uao_flush: weird flags");
905 1.22 thorpej }
906 1.22 thorpej #ifdef DIAGNOSTIC
907 1.22 thorpej panic("uao_flush: unreachable code");
908 1.22 thorpej #endif
909 1.22 thorpej }
910 1.22 thorpej
911 1.22 thorpej uvm_unlock_pageq();
912 1.22 thorpej
913 1.22 thorpej UVMHIST_LOG(maphist,
914 1.22 thorpej "<- done, rv=%d",retval,0,0,0);
915 1.22 thorpej return (retval);
916 1.1 mrg }
917 1.1 mrg
918 1.1 mrg /*
919 1.1 mrg * uao_get: fetch me a page
920 1.1 mrg *
921 1.1 mrg * we have three cases:
922 1.1 mrg * 1: page is resident -> just return the page.
923 1.1 mrg * 2: page is zero-fill -> allocate a new page and zero it.
924 1.1 mrg * 3: page is swapped out -> fetch the page from swap.
925 1.1 mrg *
926 1.1 mrg * cases 1 and 2 can be handled with PGO_LOCKED, case 3 cannot.
927 1.1 mrg * so, if the "center" page hits case 3 (or any page, with PGO_ALLPAGES),
928 1.1 mrg * then we will need to return VM_PAGER_UNLOCK.
929 1.1 mrg *
930 1.1 mrg * => prefer map unlocked (not required)
931 1.1 mrg * => object must be locked! we will _unlock_ it before starting any I/O.
932 1.1 mrg * => flags: PGO_ALLPAGES: get all of the pages
933 1.1 mrg * PGO_LOCKED: fault data structures are locked
934 1.1 mrg * => NOTE: offset is the offset of pps[0], _NOT_ pps[centeridx]
935 1.1 mrg * => NOTE: caller must check for released pages!!
936 1.1 mrg */
937 1.5 mrg static int
938 1.5 mrg uao_get(uobj, offset, pps, npagesp, centeridx, access_type, advice, flags)
939 1.5 mrg struct uvm_object *uobj;
940 1.10 eeh vaddr_t offset;
941 1.5 mrg struct vm_page **pps;
942 1.5 mrg int *npagesp;
943 1.5 mrg int centeridx, advice, flags;
944 1.5 mrg vm_prot_t access_type;
945 1.5 mrg {
946 1.5 mrg struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
947 1.10 eeh vaddr_t current_offset;
948 1.5 mrg vm_page_t ptmp;
949 1.5 mrg int lcv, gotpages, maxpages, swslot, rv;
950 1.5 mrg boolean_t done;
951 1.5 mrg UVMHIST_FUNC("uao_get"); UVMHIST_CALLED(pdhist);
952 1.5 mrg
953 1.5 mrg UVMHIST_LOG(pdhist, "aobj=%p offset=%d, flags=%d", aobj, offset, flags,0);
954 1.5 mrg
955 1.5 mrg /*
956 1.5 mrg * get number of pages
957 1.5 mrg */
958 1.5 mrg
959 1.5 mrg maxpages = *npagesp;
960 1.5 mrg
961 1.5 mrg /*
962 1.5 mrg * step 1: handled the case where fault data structures are locked.
963 1.5 mrg */
964 1.1 mrg
965 1.5 mrg if (flags & PGO_LOCKED) {
966 1.1 mrg
967 1.5 mrg /*
968 1.5 mrg * step 1a: get pages that are already resident. only do
969 1.5 mrg * this if the data structures are locked (i.e. the first
970 1.5 mrg * time through).
971 1.5 mrg */
972 1.5 mrg
973 1.5 mrg done = TRUE; /* be optimistic */
974 1.5 mrg gotpages = 0; /* # of pages we got so far */
975 1.5 mrg
976 1.5 mrg for (lcv = 0, current_offset = offset ; lcv < maxpages ;
977 1.5 mrg lcv++, current_offset += PAGE_SIZE) {
978 1.5 mrg /* do we care about this page? if not, skip it */
979 1.5 mrg if (pps[lcv] == PGO_DONTCARE)
980 1.5 mrg continue;
981 1.5 mrg
982 1.5 mrg ptmp = uvm_pagelookup(uobj, current_offset);
983 1.5 mrg
984 1.5 mrg /*
985 1.5 mrg * if page is new, attempt to allocate the page, then
986 1.5 mrg * zero-fill it.
987 1.5 mrg */
988 1.5 mrg if (ptmp == NULL && uao_find_swslot(aobj,
989 1.15 chs current_offset >> PAGE_SHIFT) == 0) {
990 1.5 mrg ptmp = uvm_pagealloc(uobj, current_offset,
991 1.19 chs NULL, 0);
992 1.5 mrg if (ptmp) {
993 1.5 mrg /* new page */
994 1.5 mrg ptmp->flags &= ~(PG_BUSY|PG_FAKE);
995 1.5 mrg ptmp->pqflags |= PQ_AOBJ;
996 1.5 mrg UVM_PAGE_OWN(ptmp, NULL);
997 1.5 mrg uvm_pagezero(ptmp);
998 1.5 mrg }
999 1.5 mrg }
1000 1.5 mrg
1001 1.5 mrg /*
1002 1.5 mrg * to be useful must get a non-busy, non-released page
1003 1.5 mrg */
1004 1.5 mrg if (ptmp == NULL ||
1005 1.5 mrg (ptmp->flags & (PG_BUSY|PG_RELEASED)) != 0) {
1006 1.5 mrg if (lcv == centeridx ||
1007 1.5 mrg (flags & PGO_ALLPAGES) != 0)
1008 1.5 mrg /* need to do a wait or I/O! */
1009 1.5 mrg done = FALSE;
1010 1.5 mrg continue;
1011 1.5 mrg }
1012 1.5 mrg
1013 1.5 mrg /*
1014 1.5 mrg * useful page: busy/lock it and plug it in our
1015 1.5 mrg * result array
1016 1.5 mrg */
1017 1.5 mrg /* caller must un-busy this page */
1018 1.5 mrg ptmp->flags |= PG_BUSY;
1019 1.5 mrg UVM_PAGE_OWN(ptmp, "uao_get1");
1020 1.5 mrg pps[lcv] = ptmp;
1021 1.5 mrg gotpages++;
1022 1.5 mrg
1023 1.5 mrg } /* "for" lcv loop */
1024 1.5 mrg
1025 1.5 mrg /*
1026 1.5 mrg * step 1b: now we've either done everything needed or we
1027 1.5 mrg * to unlock and do some waiting or I/O.
1028 1.5 mrg */
1029 1.5 mrg
1030 1.5 mrg UVMHIST_LOG(pdhist, "<- done (done=%d)", done, 0,0,0);
1031 1.5 mrg
1032 1.5 mrg *npagesp = gotpages;
1033 1.5 mrg if (done)
1034 1.5 mrg /* bingo! */
1035 1.5 mrg return(VM_PAGER_OK);
1036 1.5 mrg else
1037 1.5 mrg /* EEK! Need to unlock and I/O */
1038 1.5 mrg return(VM_PAGER_UNLOCK);
1039 1.1 mrg }
1040 1.1 mrg
1041 1.5 mrg /*
1042 1.5 mrg * step 2: get non-resident or busy pages.
1043 1.5 mrg * object is locked. data structures are unlocked.
1044 1.5 mrg */
1045 1.5 mrg
1046 1.5 mrg for (lcv = 0, current_offset = offset ; lcv < maxpages ;
1047 1.5 mrg lcv++, current_offset += PAGE_SIZE) {
1048 1.5 mrg /*
1049 1.5 mrg * - skip over pages we've already gotten or don't want
1050 1.5 mrg * - skip over pages we don't _have_ to get
1051 1.5 mrg */
1052 1.5 mrg if (pps[lcv] != NULL ||
1053 1.5 mrg (lcv != centeridx && (flags & PGO_ALLPAGES) == 0))
1054 1.5 mrg continue;
1055 1.5 mrg
1056 1.5 mrg /*
1057 1.5 mrg * we have yet to locate the current page (pps[lcv]). we
1058 1.5 mrg * first look for a page that is already at the current offset.
1059 1.5 mrg * if we find a page, we check to see if it is busy or
1060 1.5 mrg * released. if that is the case, then we sleep on the page
1061 1.5 mrg * until it is no longer busy or released and repeat the lookup.
1062 1.5 mrg * if the page we found is neither busy nor released, then we
1063 1.5 mrg * busy it (so we own it) and plug it into pps[lcv]. this
1064 1.5 mrg * 'break's the following while loop and indicates we are
1065 1.5 mrg * ready to move on to the next page in the "lcv" loop above.
1066 1.5 mrg *
1067 1.5 mrg * if we exit the while loop with pps[lcv] still set to NULL,
1068 1.5 mrg * then it means that we allocated a new busy/fake/clean page
1069 1.5 mrg * ptmp in the object and we need to do I/O to fill in the data.
1070 1.5 mrg */
1071 1.5 mrg
1072 1.5 mrg /* top of "pps" while loop */
1073 1.5 mrg while (pps[lcv] == NULL) {
1074 1.5 mrg /* look for a resident page */
1075 1.5 mrg ptmp = uvm_pagelookup(uobj, current_offset);
1076 1.5 mrg
1077 1.5 mrg /* not resident? allocate one now (if we can) */
1078 1.5 mrg if (ptmp == NULL) {
1079 1.5 mrg
1080 1.5 mrg ptmp = uvm_pagealloc(uobj, current_offset,
1081 1.19 chs NULL, 0);
1082 1.5 mrg
1083 1.5 mrg /* out of RAM? */
1084 1.5 mrg if (ptmp == NULL) {
1085 1.5 mrg simple_unlock(&uobj->vmobjlock);
1086 1.5 mrg UVMHIST_LOG(pdhist,
1087 1.5 mrg "sleeping, ptmp == NULL\n",0,0,0,0);
1088 1.5 mrg uvm_wait("uao_getpage");
1089 1.5 mrg simple_lock(&uobj->vmobjlock);
1090 1.5 mrg /* goto top of pps while loop */
1091 1.5 mrg continue;
1092 1.5 mrg }
1093 1.5 mrg
1094 1.5 mrg /*
1095 1.5 mrg * safe with PQ's unlocked: because we just
1096 1.5 mrg * alloc'd the page
1097 1.5 mrg */
1098 1.5 mrg ptmp->pqflags |= PQ_AOBJ;
1099 1.5 mrg
1100 1.5 mrg /*
1101 1.5 mrg * got new page ready for I/O. break pps while
1102 1.5 mrg * loop. pps[lcv] is still NULL.
1103 1.5 mrg */
1104 1.5 mrg break;
1105 1.5 mrg }
1106 1.5 mrg
1107 1.5 mrg /* page is there, see if we need to wait on it */
1108 1.5 mrg if ((ptmp->flags & (PG_BUSY|PG_RELEASED)) != 0) {
1109 1.5 mrg ptmp->flags |= PG_WANTED;
1110 1.5 mrg UVMHIST_LOG(pdhist,
1111 1.5 mrg "sleeping, ptmp->flags 0x%x\n",
1112 1.5 mrg ptmp->flags,0,0,0);
1113 1.23 thorpej UVM_UNLOCK_AND_WAIT(ptmp, &uobj->vmobjlock,
1114 1.23 thorpej FALSE, "uao_get", 0);
1115 1.5 mrg simple_lock(&uobj->vmobjlock);
1116 1.5 mrg continue; /* goto top of pps while loop */
1117 1.5 mrg }
1118 1.5 mrg
1119 1.5 mrg /*
1120 1.5 mrg * if we get here then the page has become resident and
1121 1.5 mrg * unbusy between steps 1 and 2. we busy it now (so we
1122 1.5 mrg * own it) and set pps[lcv] (so that we exit the while
1123 1.5 mrg * loop).
1124 1.5 mrg */
1125 1.5 mrg /* we own it, caller must un-busy */
1126 1.5 mrg ptmp->flags |= PG_BUSY;
1127 1.5 mrg UVM_PAGE_OWN(ptmp, "uao_get2");
1128 1.5 mrg pps[lcv] = ptmp;
1129 1.5 mrg }
1130 1.5 mrg
1131 1.5 mrg /*
1132 1.5 mrg * if we own the valid page at the correct offset, pps[lcv] will
1133 1.5 mrg * point to it. nothing more to do except go to the next page.
1134 1.5 mrg */
1135 1.5 mrg if (pps[lcv])
1136 1.5 mrg continue; /* next lcv */
1137 1.5 mrg
1138 1.5 mrg /*
1139 1.5 mrg * we have a "fake/busy/clean" page that we just allocated.
1140 1.5 mrg * do the needed "i/o", either reading from swap or zeroing.
1141 1.5 mrg */
1142 1.15 chs swslot = uao_find_swslot(aobj, current_offset >> PAGE_SHIFT);
1143 1.5 mrg
1144 1.5 mrg /*
1145 1.5 mrg * just zero the page if there's nothing in swap.
1146 1.5 mrg */
1147 1.5 mrg if (swslot == 0)
1148 1.5 mrg {
1149 1.5 mrg /*
1150 1.5 mrg * page hasn't existed before, just zero it.
1151 1.5 mrg */
1152 1.5 mrg uvm_pagezero(ptmp);
1153 1.5 mrg }
1154 1.5 mrg else
1155 1.5 mrg {
1156 1.5 mrg UVMHIST_LOG(pdhist, "pagein from swslot %d",
1157 1.5 mrg swslot, 0,0,0);
1158 1.5 mrg
1159 1.5 mrg /*
1160 1.5 mrg * page in the swapped-out page.
1161 1.5 mrg * unlock object for i/o, relock when done.
1162 1.5 mrg */
1163 1.5 mrg simple_unlock(&uobj->vmobjlock);
1164 1.5 mrg rv = uvm_swap_get(ptmp, swslot, PGO_SYNCIO);
1165 1.5 mrg simple_lock(&uobj->vmobjlock);
1166 1.5 mrg
1167 1.5 mrg /*
1168 1.5 mrg * I/O done. check for errors.
1169 1.5 mrg */
1170 1.5 mrg if (rv != VM_PAGER_OK)
1171 1.5 mrg {
1172 1.5 mrg UVMHIST_LOG(pdhist, "<- done (error=%d)",
1173 1.5 mrg rv,0,0,0);
1174 1.5 mrg if (ptmp->flags & PG_WANTED)
1175 1.5 mrg /* object lock still held */
1176 1.24 thorpej wakeup(ptmp);
1177 1.5 mrg ptmp->flags &= ~(PG_WANTED|PG_BUSY);
1178 1.5 mrg UVM_PAGE_OWN(ptmp, NULL);
1179 1.5 mrg uvm_lock_pageq();
1180 1.5 mrg uvm_pagefree(ptmp);
1181 1.5 mrg uvm_unlock_pageq();
1182 1.5 mrg simple_unlock(&uobj->vmobjlock);
1183 1.5 mrg return (rv);
1184 1.5 mrg }
1185 1.5 mrg }
1186 1.5 mrg
1187 1.5 mrg /*
1188 1.5 mrg * we got the page! clear the fake flag (indicates valid
1189 1.5 mrg * data now in page) and plug into our result array. note
1190 1.5 mrg * that page is still busy.
1191 1.5 mrg *
1192 1.5 mrg * it is the callers job to:
1193 1.5 mrg * => check if the page is released
1194 1.5 mrg * => unbusy the page
1195 1.5 mrg * => activate the page
1196 1.5 mrg */
1197 1.5 mrg
1198 1.5 mrg ptmp->flags &= ~PG_FAKE; /* data is valid ... */
1199 1.5 mrg pmap_clear_modify(PMAP_PGARG(ptmp)); /* ... and clean */
1200 1.5 mrg pps[lcv] = ptmp;
1201 1.1 mrg
1202 1.5 mrg } /* lcv loop */
1203 1.1 mrg
1204 1.1 mrg /*
1205 1.5 mrg * finally, unlock object and return.
1206 1.5 mrg */
1207 1.1 mrg
1208 1.1 mrg simple_unlock(&uobj->vmobjlock);
1209 1.5 mrg UVMHIST_LOG(pdhist, "<- done (OK)",0,0,0,0);
1210 1.5 mrg return(VM_PAGER_OK);
1211 1.1 mrg }
1212 1.1 mrg
1213 1.1 mrg /*
1214 1.1 mrg * uao_releasepg: handle released page in an aobj
1215 1.1 mrg *
1216 1.1 mrg * => "pg" is a PG_BUSY [caller owns it], PG_RELEASED page that we need
1217 1.1 mrg * to dispose of.
1218 1.1 mrg * => caller must handle PG_WANTED case
1219 1.1 mrg * => called with page's object locked, pageq's unlocked
1220 1.1 mrg * => returns TRUE if page's object is still alive, FALSE if we
1221 1.1 mrg * killed the page's object. if we return TRUE, then we
1222 1.1 mrg * return with the object locked.
1223 1.1 mrg * => if (nextpgp != NULL) => we return pageq.tqe_next here, and return
1224 1.1 mrg * with the page queues locked [for pagedaemon]
1225 1.1 mrg * => if (nextpgp == NULL) => we return with page queues unlocked [normal case]
1226 1.1 mrg * => we kill the aobj if it is not referenced and we are suppose to
1227 1.1 mrg * kill it ("KILLME").
1228 1.1 mrg */
1229 1.1 mrg static boolean_t uao_releasepg(pg, nextpgp)
1230 1.5 mrg struct vm_page *pg;
1231 1.5 mrg struct vm_page **nextpgp; /* OUT */
1232 1.1 mrg {
1233 1.5 mrg struct uvm_aobj *aobj = (struct uvm_aobj *) pg->uobject;
1234 1.1 mrg
1235 1.1 mrg #ifdef DIAGNOSTIC
1236 1.5 mrg if ((pg->flags & PG_RELEASED) == 0)
1237 1.5 mrg panic("uao_releasepg: page not released!");
1238 1.1 mrg #endif
1239 1.5 mrg
1240 1.5 mrg /*
1241 1.5 mrg * dispose of the page [caller handles PG_WANTED] and swap slot.
1242 1.5 mrg */
1243 1.5 mrg pmap_page_protect(PMAP_PGARG(pg), VM_PROT_NONE);
1244 1.18 chs uao_dropswap(&aobj->u_obj, pg->offset >> PAGE_SHIFT);
1245 1.5 mrg uvm_lock_pageq();
1246 1.5 mrg if (nextpgp)
1247 1.5 mrg *nextpgp = pg->pageq.tqe_next; /* next page for daemon */
1248 1.5 mrg uvm_pagefree(pg);
1249 1.5 mrg if (!nextpgp)
1250 1.5 mrg uvm_unlock_pageq(); /* keep locked for daemon */
1251 1.5 mrg
1252 1.5 mrg /*
1253 1.5 mrg * if we're not killing the object, we're done.
1254 1.5 mrg */
1255 1.5 mrg if ((aobj->u_flags & UAO_FLAG_KILLME) == 0)
1256 1.5 mrg return TRUE;
1257 1.1 mrg
1258 1.1 mrg #ifdef DIAGNOSTIC
1259 1.5 mrg if (aobj->u_obj.uo_refs)
1260 1.5 mrg panic("uvm_km_releasepg: kill flag set on referenced object!");
1261 1.1 mrg #endif
1262 1.1 mrg
1263 1.5 mrg /*
1264 1.5 mrg * if there are still pages in the object, we're done for now.
1265 1.5 mrg */
1266 1.5 mrg if (aobj->u_obj.uo_npages != 0)
1267 1.5 mrg return TRUE;
1268 1.1 mrg
1269 1.1 mrg #ifdef DIAGNOSTIC
1270 1.5 mrg if (aobj->u_obj.memq.tqh_first)
1271 1.5 mrg panic("uvn_releasepg: pages in object with npages == 0");
1272 1.1 mrg #endif
1273 1.1 mrg
1274 1.5 mrg /*
1275 1.5 mrg * finally, free the rest.
1276 1.5 mrg */
1277 1.5 mrg uao_free(aobj);
1278 1.1 mrg
1279 1.5 mrg return FALSE;
1280 1.18 chs }
1281 1.18 chs
1282 1.18 chs /*
1283 1.18 chs * uao_dropswap: release any swap resources from this aobj page.
1284 1.18 chs *
1285 1.18 chs * => aobj must be locked or have a reference count of 0.
1286 1.18 chs */
1287 1.18 chs
1288 1.18 chs void
1289 1.18 chs uao_dropswap(uobj, pageidx)
1290 1.18 chs struct uvm_object *uobj;
1291 1.18 chs int pageidx;
1292 1.18 chs {
1293 1.18 chs int slot;
1294 1.18 chs
1295 1.18 chs slot = uao_set_swslot(uobj, pageidx, 0);
1296 1.18 chs if (slot) {
1297 1.18 chs uvm_swap_free(slot, 1);
1298 1.18 chs }
1299 1.1 mrg }
1300