uvm_aobj.c revision 1.12 1 /* $NetBSD: uvm_aobj.c,v 1.12 1998/08/31 00:01:59 thorpej Exp $ */
2
3 /*
4 * XXXCDC: "ROUGH DRAFT" QUALITY UVM PRE-RELEASE FILE!
5 * >>>USE AT YOUR OWN RISK, WORK IS NOT FINISHED<<<
6 */
7 /*
8 * Copyright (c) 1998 Chuck Silvers, Charles D. Cranor and
9 * Washington University.
10 * All rights reserved.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by Charles D. Cranor and
23 * Washington University.
24 * 4. The name of the author may not be used to endorse or promote products
25 * derived from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
28 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
29 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
30 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
31 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
32 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
33 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
34 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
35 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
36 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 *
38 * from: Id: uvm_aobj.c,v 1.1.2.5 1998/02/06 05:14:38 chs Exp
39 */
40 /*
41 * uvm_aobj.c: anonymous memory uvm_object pager
42 *
43 * author: Chuck Silvers <chuq (at) chuq.com>
44 * started: Jan-1998
45 *
46 * - design mostly from Chuck Cranor
47 */
48
49
50
51 #include "opt_uvmhist.h"
52
53 #include <sys/param.h>
54 #include <sys/systm.h>
55 #include <sys/proc.h>
56 #include <sys/malloc.h>
57 #include <sys/pool.h>
58
59 #include <vm/vm.h>
60 #include <vm/vm_page.h>
61 #include <vm/vm_kern.h>
62
63 #include <uvm/uvm.h>
64
65 /*
66 * an aobj manages anonymous-memory backed uvm_objects. in addition
67 * to keeping the list of resident pages, it also keeps a list of
68 * allocated swap blocks. depending on the size of the aobj this list
69 * of allocated swap blocks is either stored in an array (small objects)
70 * or in a hash table (large objects).
71 */
72
73 /*
74 * local structures
75 */
76
77 /*
78 * for hash tables, we break the address space of the aobj into blocks
79 * of UAO_SWHASH_CLUSTER_SIZE pages. we require the cluster size to
80 * be a power of two.
81 */
82
83 #define UAO_SWHASH_CLUSTER_SHIFT 4
84 #define UAO_SWHASH_CLUSTER_SIZE (1 << UAO_SWHASH_CLUSTER_SHIFT)
85
86 /* get the "tag" for this page index */
87 #define UAO_SWHASH_ELT_TAG(PAGEIDX) \
88 ((PAGEIDX) >> UAO_SWHASH_CLUSTER_SHIFT)
89
90 /* given an ELT and a page index, find the swap slot */
91 #define UAO_SWHASH_ELT_PAGESLOT(ELT, PAGEIDX) \
92 ((ELT)->slots[(PAGEIDX) & (UAO_SWHASH_CLUSTER_SIZE - 1)])
93
94 /* given an ELT, return its pageidx base */
95 #define UAO_SWHASH_ELT_PAGEIDX_BASE(ELT) \
96 ((ELT)->tag << UAO_SWHASH_CLUSTER_SHIFT)
97
98 /*
99 * the swhash hash function
100 */
101 #define UAO_SWHASH_HASH(AOBJ, PAGEIDX) \
102 (&(AOBJ)->u_swhash[(((PAGEIDX) >> UAO_SWHASH_CLUSTER_SHIFT) \
103 & (AOBJ)->u_swhashmask)])
104
105 /*
106 * the swhash threshhold determines if we will use an array or a
107 * hash table to store the list of allocated swap blocks.
108 */
109
110 #define UAO_SWHASH_THRESHOLD (UAO_SWHASH_CLUSTER_SIZE * 4)
111 #define UAO_USES_SWHASH(AOBJ) \
112 ((AOBJ)->u_pages > UAO_SWHASH_THRESHOLD) /* use hash? */
113
114 /*
115 * the number of buckets in a swhash, with an upper bound
116 */
117 #define UAO_SWHASH_MAXBUCKETS 256
118 #define UAO_SWHASH_BUCKETS(AOBJ) \
119 (min((AOBJ)->u_pages >> UAO_SWHASH_CLUSTER_SHIFT, \
120 UAO_SWHASH_MAXBUCKETS))
121
122
123 /*
124 * uao_swhash_elt: when a hash table is being used, this structure defines
125 * the format of an entry in the bucket list.
126 */
127
128 struct uao_swhash_elt {
129 LIST_ENTRY(uao_swhash_elt) list; /* the hash list */
130 vaddr_t tag; /* our 'tag' */
131 int count; /* our number of active slots */
132 int slots[UAO_SWHASH_CLUSTER_SIZE]; /* the slots */
133 };
134
135 /*
136 * uao_swhash: the swap hash table structure
137 */
138
139 LIST_HEAD(uao_swhash, uao_swhash_elt);
140
141 /*
142 * uao_swhash_elt_pool: pool of uao_swhash_elt structures
143 */
144
145 struct pool uao_swhash_elt_pool;
146
147 /*
148 * uvm_aobj: the actual anon-backed uvm_object
149 *
150 * => the uvm_object is at the top of the structure, this allows
151 * (struct uvm_device *) == (struct uvm_object *)
152 * => only one of u_swslots and u_swhash is used in any given aobj
153 */
154
155 struct uvm_aobj {
156 struct uvm_object u_obj; /* has: lock, pgops, memq, #pages, #refs */
157 int u_pages; /* number of pages in entire object */
158 int u_flags; /* the flags (see uvm_aobj.h) */
159 int *u_swslots; /* array of offset->swapslot mappings */
160 /*
161 * hashtable of offset->swapslot mappings
162 * (u_swhash is an array of bucket heads)
163 */
164 struct uao_swhash *u_swhash;
165 u_long u_swhashmask; /* mask for hashtable */
166 LIST_ENTRY(uvm_aobj) u_list; /* global list of aobjs */
167 };
168
169 /*
170 * uvm_aobj_pool: pool of uvm_aobj structures
171 */
172
173 struct pool uvm_aobj_pool;
174
175 /*
176 * local functions
177 */
178
179 static void uao_init __P((void));
180 static struct uao_swhash_elt *uao_find_swhash_elt __P((struct uvm_aobj *,
181 int, boolean_t));
182 static int uao_find_swslot __P((struct uvm_aobj *,
183 int));
184 static boolean_t uao_flush __P((struct uvm_object *,
185 vaddr_t, vaddr_t,
186 int));
187 static void uao_free __P((struct uvm_aobj *));
188 static int uao_get __P((struct uvm_object *, vaddr_t,
189 vm_page_t *, int *, int,
190 vm_prot_t, int, int));
191 static boolean_t uao_releasepg __P((struct vm_page *,
192 struct vm_page **));
193
194
195
196 /*
197 * aobj_pager
198 *
199 * note that some functions (e.g. put) are handled elsewhere
200 */
201
202 struct uvm_pagerops aobj_pager = {
203 uao_init, /* init */
204 NULL, /* attach */
205 uao_reference, /* reference */
206 uao_detach, /* detach */
207 NULL, /* fault */
208 uao_flush, /* flush */
209 uao_get, /* get */
210 NULL, /* asyncget */
211 NULL, /* put (done by pagedaemon) */
212 NULL, /* cluster */
213 NULL, /* mk_pcluster */
214 uvm_shareprot, /* shareprot */
215 NULL, /* aiodone */
216 uao_releasepg /* releasepg */
217 };
218
219 /*
220 * uao_list: global list of active aobjs, locked by uao_list_lock
221 */
222
223 static LIST_HEAD(aobjlist, uvm_aobj) uao_list;
224 static simple_lock_data_t uao_list_lock;
225
226
227 /*
228 * functions
229 */
230
231 /*
232 * hash table/array related functions
233 */
234
235 /*
236 * uao_find_swhash_elt: find (or create) a hash table entry for a page
237 * offset.
238 *
239 * => the object should be locked by the caller
240 */
241
242 static struct uao_swhash_elt *
243 uao_find_swhash_elt(aobj, pageidx, create)
244 struct uvm_aobj *aobj;
245 int pageidx;
246 boolean_t create;
247 {
248 struct uao_swhash *swhash;
249 struct uao_swhash_elt *elt;
250 int page_tag;
251
252 swhash = UAO_SWHASH_HASH(aobj, pageidx); /* first hash to get bucket */
253 page_tag = UAO_SWHASH_ELT_TAG(pageidx); /* tag to search for */
254
255 /*
256 * now search the bucket for the requested tag
257 */
258 for (elt = swhash->lh_first; elt != NULL; elt = elt->list.le_next) {
259 if (elt->tag == page_tag)
260 return(elt);
261 }
262
263 /* fail now if we are not allowed to create a new entry in the bucket */
264 if (!create)
265 return NULL;
266
267
268 /*
269 * allocate a new entry for the bucket and init/insert it in
270 */
271 elt = pool_get(&uao_swhash_elt_pool, PR_WAITOK);
272 LIST_INSERT_HEAD(swhash, elt, list);
273 elt->tag = page_tag;
274 elt->count = 0;
275 memset(elt->slots, 0, sizeof(elt->slots));
276
277 return(elt);
278 }
279
280 /*
281 * uao_find_swslot: find the swap slot number for an aobj/pageidx
282 *
283 * => object must be locked by caller
284 */
285 __inline static int
286 uao_find_swslot(aobj, pageidx)
287 struct uvm_aobj *aobj;
288 int pageidx;
289 {
290
291 /*
292 * if noswap flag is set, then we never return a slot
293 */
294
295 if (aobj->u_flags & UAO_FLAG_NOSWAP)
296 return(0);
297
298 /*
299 * if hashing, look in hash table.
300 */
301
302 if (UAO_USES_SWHASH(aobj)) {
303 struct uao_swhash_elt *elt =
304 uao_find_swhash_elt(aobj, pageidx, FALSE);
305
306 if (elt)
307 return(UAO_SWHASH_ELT_PAGESLOT(elt, pageidx));
308 else
309 return(NULL);
310 }
311
312 /*
313 * otherwise, look in the array
314 */
315 return(aobj->u_swslots[pageidx]);
316 }
317
318 /*
319 * uao_set_swslot: set the swap slot for a page in an aobj.
320 *
321 * => setting a slot to zero frees the slot
322 * => object must be locked by caller
323 */
324 int
325 uao_set_swslot(uobj, pageidx, slot)
326 struct uvm_object *uobj;
327 int pageidx, slot;
328 {
329 struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
330 int oldslot;
331 UVMHIST_FUNC("uao_set_swslot"); UVMHIST_CALLED(pdhist);
332 UVMHIST_LOG(pdhist, "aobj %p pageidx %d slot %d",
333 aobj, pageidx, slot, 0);
334
335 /*
336 * if noswap flag is set, then we can't set a slot
337 */
338
339 if (aobj->u_flags & UAO_FLAG_NOSWAP) {
340
341 if (slot == 0)
342 return(0); /* a clear is ok */
343
344 /* but a set is not */
345 printf("uao_set_swslot: uobj = %p\n", uobj);
346 panic("uao_set_swslot: attempt to set a slot on a NOSWAP object");
347 }
348
349 /*
350 * are we using a hash table? if so, add it in the hash.
351 */
352
353 if (UAO_USES_SWHASH(aobj)) {
354 /*
355 * Avoid allocating an entry just to free it again if
356 * the page had not swap slot in the first place, and
357 * we are freeing.
358 */
359 struct uao_swhash_elt *elt =
360 uao_find_swhash_elt(aobj, pageidx, slot ? TRUE : FALSE);
361 if (elt == NULL) {
362 #ifdef DIAGNOSTIC
363 if (slot)
364 panic("uao_set_swslot: didn't create elt");
365 #endif
366 return (0);
367 }
368
369 oldslot = UAO_SWHASH_ELT_PAGESLOT(elt, pageidx);
370 UAO_SWHASH_ELT_PAGESLOT(elt, pageidx) = slot;
371
372 /*
373 * now adjust the elt's reference counter and free it if we've
374 * dropped it to zero.
375 */
376
377 /* an allocation? */
378 if (slot) {
379 if (oldslot == 0)
380 elt->count++;
381 } else { /* freeing slot ... */
382 if (oldslot) /* to be safe */
383 elt->count--;
384
385 if (elt->count == 0) {
386 LIST_REMOVE(elt, list);
387 pool_put(&uao_swhash_elt_pool, elt);
388 }
389 }
390
391 } else {
392 /* we are using an array */
393 oldslot = aobj->u_swslots[pageidx];
394 aobj->u_swslots[pageidx] = slot;
395 }
396 return (oldslot);
397 }
398
399 /*
400 * end of hash/array functions
401 */
402
403 /*
404 * uao_free: free all resources held by an aobj, and then free the aobj
405 *
406 * => the aobj should be dead
407 */
408 static void
409 uao_free(aobj)
410 struct uvm_aobj *aobj;
411 {
412
413 if (UAO_USES_SWHASH(aobj)) {
414 int i, hashbuckets = aobj->u_swhashmask + 1;
415
416 /*
417 * free the swslots from each hash bucket,
418 * then the hash bucket, and finally the hash table itself.
419 */
420 for (i = 0; i < hashbuckets; i++) {
421 struct uao_swhash_elt *elt, *next;
422
423 for (elt = aobj->u_swhash[i].lh_first; elt != NULL;
424 elt = next) {
425 int j;
426
427 for (j = 0; j < UAO_SWHASH_CLUSTER_SIZE; j++)
428 {
429 int slot = elt->slots[j];
430
431 if (slot)
432 uvm_swap_free(slot, 1);
433 }
434
435 next = elt->list.le_next;
436 pool_put(&uao_swhash_elt_pool, elt);
437 }
438 }
439 FREE(aobj->u_swhash, M_UVMAOBJ);
440 } else {
441 int i;
442
443 /*
444 * free the array
445 */
446
447 for (i = 0; i < aobj->u_pages; i++)
448 {
449 int slot = aobj->u_swslots[i];
450
451 if (slot)
452 uvm_swap_free(slot, 1);
453 }
454 FREE(aobj->u_swslots, M_UVMAOBJ);
455 }
456
457 /*
458 * finally free the aobj itself
459 */
460 pool_put(&uvm_aobj_pool, aobj);
461 }
462
463 /*
464 * pager functions
465 */
466
467 /*
468 * uao_create: create an aobj of the given size and return its uvm_object.
469 *
470 * => for normal use, flags are always zero
471 * => for the kernel object, the flags are:
472 * UAO_FLAG_KERNOBJ - allocate the kernel object (can only happen once)
473 * UAO_FLAG_KERNSWAP - enable swapping of kernel object (" ")
474 */
475 struct uvm_object *
476 uao_create(size, flags)
477 vsize_t size;
478 int flags;
479 {
480 static struct uvm_aobj kernel_object_store; /* home of kernel_object */
481 static int kobj_alloced = 0; /* not allocated yet */
482 int pages = round_page(size) / PAGE_SIZE;
483 struct uvm_aobj *aobj;
484
485 /*
486 * malloc a new aobj unless we are asked for the kernel object
487 */
488 if (flags & UAO_FLAG_KERNOBJ) { /* want kernel object? */
489 if (kobj_alloced)
490 panic("uao_create: kernel object already allocated");
491
492 /*
493 * XXXTHORPEJ: Need to call this now, so the pool gets
494 * initialized!
495 */
496 uao_init();
497
498 aobj = &kernel_object_store;
499 aobj->u_pages = pages;
500 aobj->u_flags = UAO_FLAG_NOSWAP; /* no swap to start */
501 /* we are special, we never die */
502 aobj->u_obj.uo_refs = UVM_OBJ_KERN;
503 kobj_alloced = UAO_FLAG_KERNOBJ;
504 } else if (flags & UAO_FLAG_KERNSWAP) {
505 aobj = &kernel_object_store;
506 if (kobj_alloced != UAO_FLAG_KERNOBJ)
507 panic("uao_create: asked to enable swap on kernel object");
508 kobj_alloced = UAO_FLAG_KERNSWAP;
509 } else { /* normal object */
510 aobj = pool_get(&uvm_aobj_pool, PR_WAITOK);
511 aobj->u_pages = pages;
512 aobj->u_flags = 0; /* normal object */
513 aobj->u_obj.uo_refs = 1; /* start with 1 reference */
514 }
515
516 /*
517 * allocate hash/array if necessary
518 *
519 * note: in the KERNSWAP case no need to worry about locking since
520 * we are still booting we should be the only thread around.
521 */
522 if (flags == 0 || (flags & UAO_FLAG_KERNSWAP) != 0) {
523 int mflags = (flags & UAO_FLAG_KERNSWAP) != 0 ?
524 M_NOWAIT : M_WAITOK;
525
526 /* allocate hash table or array depending on object size */
527 if (UAO_USES_SWHASH(aobj)) {
528 aobj->u_swhash = hashinit(UAO_SWHASH_BUCKETS(aobj),
529 M_UVMAOBJ, mflags, &aobj->u_swhashmask);
530 if (aobj->u_swhash == NULL)
531 panic("uao_create: hashinit swhash failed");
532 } else {
533 MALLOC(aobj->u_swslots, int *, pages * sizeof(int),
534 M_UVMAOBJ, mflags);
535 if (aobj->u_swslots == NULL)
536 panic("uao_create: malloc swslots failed");
537 memset(aobj->u_swslots, 0, pages * sizeof(int));
538 }
539
540 if (flags) {
541 aobj->u_flags &= ~UAO_FLAG_NOSWAP; /* clear noswap */
542 return(&aobj->u_obj);
543 /* done! */
544 }
545 }
546
547 /*
548 * init aobj fields
549 */
550 simple_lock_init(&aobj->u_obj.vmobjlock);
551 aobj->u_obj.pgops = &aobj_pager;
552 TAILQ_INIT(&aobj->u_obj.memq);
553 aobj->u_obj.uo_npages = 0;
554
555 /*
556 * now that aobj is ready, add it to the global list
557 * XXXCHS: uao_init hasn't been called'd in the KERNOBJ case,
558 * do we really need the kernel object on this list anyway?
559 */
560 simple_lock(&uao_list_lock);
561 LIST_INSERT_HEAD(&uao_list, aobj, u_list);
562 simple_unlock(&uao_list_lock);
563
564 /*
565 * done!
566 */
567 return(&aobj->u_obj);
568 }
569
570
571
572 /*
573 * uao_init: set up aobj pager subsystem
574 *
575 * => called at boot time from uvm_pager_init()
576 */
577 static void
578 uao_init()
579 {
580 static int uao_initialized;
581
582 if (uao_initialized)
583 return;
584 uao_initialized = TRUE;
585
586 LIST_INIT(&uao_list);
587 simple_lock_init(&uao_list_lock);
588
589 pool_init(&uao_swhash_elt_pool, sizeof(struct uao_swhash_elt),
590 0, 0, 0, "uaoeltpl", 0,
591 pool_page_alloc_nointr, pool_page_free_nointr, M_UVMAOBJ);
592
593 pool_init(&uvm_aobj_pool, sizeof(struct uvm_aobj), 0, 0, 0,
594 "aobjpl", 0,
595 pool_page_alloc_nointr, pool_page_free_nointr, M_UVMAOBJ);
596 }
597
598 /*
599 * uao_reference: add a ref to an aobj
600 *
601 * => aobj must be unlocked (we will lock it)
602 */
603 void
604 uao_reference(uobj)
605 struct uvm_object *uobj;
606 {
607 UVMHIST_FUNC("uao_reference"); UVMHIST_CALLED(maphist);
608
609 /*
610 * kernel_object already has plenty of references, leave it alone.
611 */
612
613 if (uobj->uo_refs == UVM_OBJ_KERN)
614 return;
615
616 simple_lock(&uobj->vmobjlock);
617 uobj->uo_refs++; /* bump! */
618 UVMHIST_LOG(maphist, "<- done (uobj=0x%x, ref = %d)",
619 uobj, uobj->uo_refs,0,0);
620 simple_unlock(&uobj->vmobjlock);
621 }
622
623 /*
624 * uao_detach: drop a reference to an aobj
625 *
626 * => aobj must be unlocked, we will lock it
627 */
628 void
629 uao_detach(uobj)
630 struct uvm_object *uobj;
631 {
632 struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
633 struct vm_page *pg;
634 boolean_t busybody;
635 UVMHIST_FUNC("uao_detach"); UVMHIST_CALLED(maphist);
636
637 /*
638 * detaching from kernel_object is a noop.
639 */
640 if (uobj->uo_refs == UVM_OBJ_KERN)
641 return;
642
643 simple_lock(&uobj->vmobjlock);
644
645 UVMHIST_LOG(maphist," (uobj=0x%x) ref=%d", uobj,uobj->uo_refs,0,0);
646 uobj->uo_refs--; /* drop ref! */
647 if (uobj->uo_refs) { /* still more refs? */
648 simple_unlock(&uobj->vmobjlock);
649 UVMHIST_LOG(maphist, "<- done (rc>0)", 0,0,0,0);
650 return;
651 }
652
653 /*
654 * remove the aobj from the global list.
655 */
656 simple_lock(&uao_list_lock);
657 LIST_REMOVE(aobj, u_list);
658 simple_unlock(&uao_list_lock);
659
660 /*
661 * free all the pages that aren't PG_BUSY, mark for release any that are.
662 */
663
664 busybody = FALSE;
665 for (pg = uobj->memq.tqh_first ; pg != NULL ; pg = pg->listq.tqe_next) {
666 int swslot;
667
668 if (pg->flags & PG_BUSY) {
669 pg->flags |= PG_RELEASED;
670 busybody = TRUE;
671 continue;
672 }
673
674
675 /* zap the mappings, free the swap slot, free the page */
676 pmap_page_protect(PMAP_PGARG(pg), VM_PROT_NONE);
677
678 swslot = uao_set_swslot(&aobj->u_obj, pg->offset / PAGE_SIZE, 0);
679 if (swslot) {
680 uvm_swap_free(swslot, 1);
681 }
682
683 uvm_lock_pageq();
684 uvm_pagefree(pg);
685 uvm_unlock_pageq();
686 }
687
688 /*
689 * if we found any busy pages, we're done for now.
690 * mark the aobj for death, releasepg will finish up for us.
691 */
692 if (busybody) {
693 aobj->u_flags |= UAO_FLAG_KILLME;
694 simple_unlock(&aobj->u_obj.vmobjlock);
695 return;
696 }
697
698 /*
699 * finally, free the rest.
700 */
701 uao_free(aobj);
702 }
703
704 /*
705 * uao_flush: uh, yea, sure it's flushed. really!
706 */
707 boolean_t
708 uao_flush(uobj, start, end, flags)
709 struct uvm_object *uobj;
710 vaddr_t start, end;
711 int flags;
712 {
713
714 /*
715 * anonymous memory doesn't "flush"
716 */
717 /*
718 * XXX
719 * deal with PGO_DEACTIVATE (for madvise(MADV_SEQUENTIAL))
720 * and PGO_FREE (for msync(MSINVALIDATE))
721 */
722 return TRUE;
723 }
724
725 /*
726 * uao_get: fetch me a page
727 *
728 * we have three cases:
729 * 1: page is resident -> just return the page.
730 * 2: page is zero-fill -> allocate a new page and zero it.
731 * 3: page is swapped out -> fetch the page from swap.
732 *
733 * cases 1 and 2 can be handled with PGO_LOCKED, case 3 cannot.
734 * so, if the "center" page hits case 3 (or any page, with PGO_ALLPAGES),
735 * then we will need to return VM_PAGER_UNLOCK.
736 *
737 * => prefer map unlocked (not required)
738 * => object must be locked! we will _unlock_ it before starting any I/O.
739 * => flags: PGO_ALLPAGES: get all of the pages
740 * PGO_LOCKED: fault data structures are locked
741 * => NOTE: offset is the offset of pps[0], _NOT_ pps[centeridx]
742 * => NOTE: caller must check for released pages!!
743 */
744 static int
745 uao_get(uobj, offset, pps, npagesp, centeridx, access_type, advice, flags)
746 struct uvm_object *uobj;
747 vaddr_t offset;
748 struct vm_page **pps;
749 int *npagesp;
750 int centeridx, advice, flags;
751 vm_prot_t access_type;
752 {
753 struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
754 vaddr_t current_offset;
755 vm_page_t ptmp;
756 int lcv, gotpages, maxpages, swslot, rv;
757 boolean_t done;
758 UVMHIST_FUNC("uao_get"); UVMHIST_CALLED(pdhist);
759
760 UVMHIST_LOG(pdhist, "aobj=%p offset=%d, flags=%d", aobj, offset, flags,0);
761
762 /*
763 * get number of pages
764 */
765
766 maxpages = *npagesp;
767
768 /*
769 * step 1: handled the case where fault data structures are locked.
770 */
771
772 if (flags & PGO_LOCKED) {
773
774 /*
775 * step 1a: get pages that are already resident. only do
776 * this if the data structures are locked (i.e. the first
777 * time through).
778 */
779
780 done = TRUE; /* be optimistic */
781 gotpages = 0; /* # of pages we got so far */
782
783 for (lcv = 0, current_offset = offset ; lcv < maxpages ;
784 lcv++, current_offset += PAGE_SIZE) {
785 /* do we care about this page? if not, skip it */
786 if (pps[lcv] == PGO_DONTCARE)
787 continue;
788
789 ptmp = uvm_pagelookup(uobj, current_offset);
790
791 /*
792 * if page is new, attempt to allocate the page, then
793 * zero-fill it.
794 */
795 if (ptmp == NULL && uao_find_swslot(aobj,
796 current_offset / PAGE_SIZE) == 0) {
797 ptmp = uvm_pagealloc(uobj, current_offset,
798 NULL);
799 if (ptmp) {
800 /* new page */
801 ptmp->flags &= ~(PG_BUSY|PG_FAKE);
802 ptmp->pqflags |= PQ_AOBJ;
803 UVM_PAGE_OWN(ptmp, NULL);
804 uvm_pagezero(ptmp);
805 }
806 }
807
808 /*
809 * to be useful must get a non-busy, non-released page
810 */
811 if (ptmp == NULL ||
812 (ptmp->flags & (PG_BUSY|PG_RELEASED)) != 0) {
813 if (lcv == centeridx ||
814 (flags & PGO_ALLPAGES) != 0)
815 /* need to do a wait or I/O! */
816 done = FALSE;
817 continue;
818 }
819
820 /*
821 * useful page: busy/lock it and plug it in our
822 * result array
823 */
824 /* caller must un-busy this page */
825 ptmp->flags |= PG_BUSY;
826 UVM_PAGE_OWN(ptmp, "uao_get1");
827 pps[lcv] = ptmp;
828 gotpages++;
829
830 } /* "for" lcv loop */
831
832 /*
833 * step 1b: now we've either done everything needed or we
834 * to unlock and do some waiting or I/O.
835 */
836
837 UVMHIST_LOG(pdhist, "<- done (done=%d)", done, 0,0,0);
838
839 *npagesp = gotpages;
840 if (done)
841 /* bingo! */
842 return(VM_PAGER_OK);
843 else
844 /* EEK! Need to unlock and I/O */
845 return(VM_PAGER_UNLOCK);
846 }
847
848 /*
849 * step 2: get non-resident or busy pages.
850 * object is locked. data structures are unlocked.
851 */
852
853 for (lcv = 0, current_offset = offset ; lcv < maxpages ;
854 lcv++, current_offset += PAGE_SIZE) {
855 /*
856 * - skip over pages we've already gotten or don't want
857 * - skip over pages we don't _have_ to get
858 */
859 if (pps[lcv] != NULL ||
860 (lcv != centeridx && (flags & PGO_ALLPAGES) == 0))
861 continue;
862
863 /*
864 * we have yet to locate the current page (pps[lcv]). we
865 * first look for a page that is already at the current offset.
866 * if we find a page, we check to see if it is busy or
867 * released. if that is the case, then we sleep on the page
868 * until it is no longer busy or released and repeat the lookup.
869 * if the page we found is neither busy nor released, then we
870 * busy it (so we own it) and plug it into pps[lcv]. this
871 * 'break's the following while loop and indicates we are
872 * ready to move on to the next page in the "lcv" loop above.
873 *
874 * if we exit the while loop with pps[lcv] still set to NULL,
875 * then it means that we allocated a new busy/fake/clean page
876 * ptmp in the object and we need to do I/O to fill in the data.
877 */
878
879 /* top of "pps" while loop */
880 while (pps[lcv] == NULL) {
881 /* look for a resident page */
882 ptmp = uvm_pagelookup(uobj, current_offset);
883
884 /* not resident? allocate one now (if we can) */
885 if (ptmp == NULL) {
886
887 ptmp = uvm_pagealloc(uobj, current_offset,
888 NULL); /* alloc */
889
890 /* out of RAM? */
891 if (ptmp == NULL) {
892 simple_unlock(&uobj->vmobjlock);
893 UVMHIST_LOG(pdhist,
894 "sleeping, ptmp == NULL\n",0,0,0,0);
895 uvm_wait("uao_getpage");
896 simple_lock(&uobj->vmobjlock);
897 /* goto top of pps while loop */
898 continue;
899 }
900
901 /*
902 * safe with PQ's unlocked: because we just
903 * alloc'd the page
904 */
905 ptmp->pqflags |= PQ_AOBJ;
906
907 /*
908 * got new page ready for I/O. break pps while
909 * loop. pps[lcv] is still NULL.
910 */
911 break;
912 }
913
914 /* page is there, see if we need to wait on it */
915 if ((ptmp->flags & (PG_BUSY|PG_RELEASED)) != 0) {
916 ptmp->flags |= PG_WANTED;
917 UVMHIST_LOG(pdhist,
918 "sleeping, ptmp->flags 0x%x\n",
919 ptmp->flags,0,0,0);
920 UVM_UNLOCK_AND_WAIT(ptmp, &uobj->vmobjlock, 0,
921 "uao_get", 0);
922 simple_lock(&uobj->vmobjlock);
923 continue; /* goto top of pps while loop */
924 }
925
926 /*
927 * if we get here then the page has become resident and
928 * unbusy between steps 1 and 2. we busy it now (so we
929 * own it) and set pps[lcv] (so that we exit the while
930 * loop).
931 */
932 /* we own it, caller must un-busy */
933 ptmp->flags |= PG_BUSY;
934 UVM_PAGE_OWN(ptmp, "uao_get2");
935 pps[lcv] = ptmp;
936 }
937
938 /*
939 * if we own the valid page at the correct offset, pps[lcv] will
940 * point to it. nothing more to do except go to the next page.
941 */
942 if (pps[lcv])
943 continue; /* next lcv */
944
945 /*
946 * we have a "fake/busy/clean" page that we just allocated.
947 * do the needed "i/o", either reading from swap or zeroing.
948 */
949 swslot = uao_find_swslot(aobj, current_offset / PAGE_SIZE);
950
951 /*
952 * just zero the page if there's nothing in swap.
953 */
954 if (swslot == 0)
955 {
956 /*
957 * page hasn't existed before, just zero it.
958 */
959 uvm_pagezero(ptmp);
960 }
961 else
962 {
963 UVMHIST_LOG(pdhist, "pagein from swslot %d",
964 swslot, 0,0,0);
965
966 /*
967 * page in the swapped-out page.
968 * unlock object for i/o, relock when done.
969 */
970 simple_unlock(&uobj->vmobjlock);
971 rv = uvm_swap_get(ptmp, swslot, PGO_SYNCIO);
972 simple_lock(&uobj->vmobjlock);
973
974 /*
975 * I/O done. check for errors.
976 */
977 if (rv != VM_PAGER_OK)
978 {
979 UVMHIST_LOG(pdhist, "<- done (error=%d)",
980 rv,0,0,0);
981 if (ptmp->flags & PG_WANTED)
982 /* object lock still held */
983 thread_wakeup(ptmp);
984 ptmp->flags &= ~(PG_WANTED|PG_BUSY);
985 UVM_PAGE_OWN(ptmp, NULL);
986 uvm_lock_pageq();
987 uvm_pagefree(ptmp);
988 uvm_unlock_pageq();
989 simple_unlock(&uobj->vmobjlock);
990 return (rv);
991 }
992 }
993
994 /*
995 * we got the page! clear the fake flag (indicates valid
996 * data now in page) and plug into our result array. note
997 * that page is still busy.
998 *
999 * it is the callers job to:
1000 * => check if the page is released
1001 * => unbusy the page
1002 * => activate the page
1003 */
1004
1005 ptmp->flags &= ~PG_FAKE; /* data is valid ... */
1006 pmap_clear_modify(PMAP_PGARG(ptmp)); /* ... and clean */
1007 pps[lcv] = ptmp;
1008
1009 } /* lcv loop */
1010
1011 /*
1012 * finally, unlock object and return.
1013 */
1014
1015 simple_unlock(&uobj->vmobjlock);
1016 UVMHIST_LOG(pdhist, "<- done (OK)",0,0,0,0);
1017 return(VM_PAGER_OK);
1018 }
1019
1020 /*
1021 * uao_releasepg: handle released page in an aobj
1022 *
1023 * => "pg" is a PG_BUSY [caller owns it], PG_RELEASED page that we need
1024 * to dispose of.
1025 * => caller must handle PG_WANTED case
1026 * => called with page's object locked, pageq's unlocked
1027 * => returns TRUE if page's object is still alive, FALSE if we
1028 * killed the page's object. if we return TRUE, then we
1029 * return with the object locked.
1030 * => if (nextpgp != NULL) => we return pageq.tqe_next here, and return
1031 * with the page queues locked [for pagedaemon]
1032 * => if (nextpgp == NULL) => we return with page queues unlocked [normal case]
1033 * => we kill the aobj if it is not referenced and we are suppose to
1034 * kill it ("KILLME").
1035 */
1036 static boolean_t uao_releasepg(pg, nextpgp)
1037 struct vm_page *pg;
1038 struct vm_page **nextpgp; /* OUT */
1039 {
1040 struct uvm_aobj *aobj = (struct uvm_aobj *) pg->uobject;
1041 int slot;
1042
1043 #ifdef DIAGNOSTIC
1044 if ((pg->flags & PG_RELEASED) == 0)
1045 panic("uao_releasepg: page not released!");
1046 #endif
1047
1048 /*
1049 * dispose of the page [caller handles PG_WANTED] and swap slot.
1050 */
1051 pmap_page_protect(PMAP_PGARG(pg), VM_PROT_NONE);
1052 slot = uao_set_swslot(&aobj->u_obj, pg->offset / PAGE_SIZE, 0);
1053 if (slot)
1054 uvm_swap_free(slot, 1);
1055 uvm_lock_pageq();
1056 if (nextpgp)
1057 *nextpgp = pg->pageq.tqe_next; /* next page for daemon */
1058 uvm_pagefree(pg);
1059 if (!nextpgp)
1060 uvm_unlock_pageq(); /* keep locked for daemon */
1061
1062 /*
1063 * if we're not killing the object, we're done.
1064 */
1065 if ((aobj->u_flags & UAO_FLAG_KILLME) == 0)
1066 return TRUE;
1067
1068 #ifdef DIAGNOSTIC
1069 if (aobj->u_obj.uo_refs)
1070 panic("uvm_km_releasepg: kill flag set on referenced object!");
1071 #endif
1072
1073 /*
1074 * if there are still pages in the object, we're done for now.
1075 */
1076 if (aobj->u_obj.uo_npages != 0)
1077 return TRUE;
1078
1079 #ifdef DIAGNOSTIC
1080 if (aobj->u_obj.memq.tqh_first)
1081 panic("uvn_releasepg: pages in object with npages == 0");
1082 #endif
1083
1084 /*
1085 * finally, free the rest.
1086 */
1087 uao_free(aobj);
1088
1089 return FALSE;
1090 }
1091