uvm_aobj.c revision 1.18.2.1.2.4 1 /* $NetBSD: uvm_aobj.c,v 1.18.2.1.2.4 1999/08/02 23:16:14 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 1998 Chuck Silvers, Charles D. Cranor and
5 * Washington University.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Charles D. Cranor and
19 * Washington University.
20 * 4. The name of the author may not be used to endorse or promote products
21 * derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 * from: Id: uvm_aobj.c,v 1.1.2.5 1998/02/06 05:14:38 chs Exp
35 */
36 /*
37 * uvm_aobj.c: anonymous memory uvm_object pager
38 *
39 * author: Chuck Silvers <chuq (at) chuq.com>
40 * started: Jan-1998
41 *
42 * - design mostly from Chuck Cranor
43 */
44
45
46
47 #include "opt_uvmhist.h"
48
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/proc.h>
52 #include <sys/malloc.h>
53 #include <sys/kernel.h>
54 #include <sys/pool.h>
55
56 #include <vm/vm.h>
57 #include <vm/vm_page.h>
58 #include <vm/vm_kern.h>
59
60 #include <uvm/uvm.h>
61
62 /*
63 * an aobj manages anonymous-memory backed uvm_objects. in addition
64 * to keeping the list of resident pages, it also keeps a list of
65 * allocated swap blocks. depending on the size of the aobj this list
66 * of allocated swap blocks is either stored in an array (small objects)
67 * or in a hash table (large objects).
68 */
69
70 /*
71 * local structures
72 */
73
74 /*
75 * for hash tables, we break the address space of the aobj into blocks
76 * of UAO_SWHASH_CLUSTER_SIZE pages. we require the cluster size to
77 * be a power of two.
78 */
79
80 #define UAO_SWHASH_CLUSTER_SHIFT 4
81 #define UAO_SWHASH_CLUSTER_SIZE (1 << UAO_SWHASH_CLUSTER_SHIFT)
82
83 /* get the "tag" for this page index */
84 #define UAO_SWHASH_ELT_TAG(PAGEIDX) \
85 ((PAGEIDX) >> UAO_SWHASH_CLUSTER_SHIFT)
86
87 /* given an ELT and a page index, find the swap slot */
88 #define UAO_SWHASH_ELT_PAGESLOT(ELT, PAGEIDX) \
89 ((ELT)->slots[(PAGEIDX) & (UAO_SWHASH_CLUSTER_SIZE - 1)])
90
91 /* given an ELT, return its pageidx base */
92 #define UAO_SWHASH_ELT_PAGEIDX_BASE(ELT) \
93 ((ELT)->tag << UAO_SWHASH_CLUSTER_SHIFT)
94
95 /*
96 * the swhash hash function
97 */
98 #define UAO_SWHASH_HASH(AOBJ, PAGEIDX) \
99 (&(AOBJ)->u_swhash[(((PAGEIDX) >> UAO_SWHASH_CLUSTER_SHIFT) \
100 & (AOBJ)->u_swhashmask)])
101
102 /*
103 * the swhash threshhold determines if we will use an array or a
104 * hash table to store the list of allocated swap blocks.
105 */
106
107 #define UAO_SWHASH_THRESHOLD (UAO_SWHASH_CLUSTER_SIZE * 4)
108 #define UAO_USES_SWHASH(AOBJ) \
109 ((AOBJ)->u_pages > UAO_SWHASH_THRESHOLD) /* use hash? */
110
111 /*
112 * the number of buckets in a swhash, with an upper bound
113 */
114 #define UAO_SWHASH_MAXBUCKETS 256
115 #define UAO_SWHASH_BUCKETS(AOBJ) \
116 (min((AOBJ)->u_pages >> UAO_SWHASH_CLUSTER_SHIFT, \
117 UAO_SWHASH_MAXBUCKETS))
118
119
120 /*
121 * uao_swhash_elt: when a hash table is being used, this structure defines
122 * the format of an entry in the bucket list.
123 */
124
125 struct uao_swhash_elt {
126 LIST_ENTRY(uao_swhash_elt) list; /* the hash list */
127 vaddr_t tag; /* our 'tag' */
128 int count; /* our number of active slots */
129 int slots[UAO_SWHASH_CLUSTER_SIZE]; /* the slots */
130 };
131
132 /*
133 * uao_swhash: the swap hash table structure
134 */
135
136 LIST_HEAD(uao_swhash, uao_swhash_elt);
137
138 /*
139 * uao_swhash_elt_pool: pool of uao_swhash_elt structures
140 */
141
142 struct pool uao_swhash_elt_pool;
143
144 /*
145 * uvm_aobj: the actual anon-backed uvm_object
146 *
147 * => the uvm_object is at the top of the structure, this allows
148 * (struct uvm_device *) == (struct uvm_object *)
149 * => only one of u_swslots and u_swhash is used in any given aobj
150 */
151
152 struct uvm_aobj {
153 struct uvm_object u_obj; /* has: lock, pgops, memq, #pages, #refs */
154 int u_pages; /* number of pages in entire object */
155 int u_flags; /* the flags (see uvm_aobj.h) */
156 int *u_swslots; /* array of offset->swapslot mappings */
157 /*
158 * hashtable of offset->swapslot mappings
159 * (u_swhash is an array of bucket heads)
160 */
161 struct uao_swhash *u_swhash;
162 u_long u_swhashmask; /* mask for hashtable */
163 LIST_ENTRY(uvm_aobj) u_list; /* global list of aobjs */
164 };
165
166 /*
167 * uvm_aobj_pool: pool of uvm_aobj structures
168 */
169
170 struct pool uvm_aobj_pool;
171
172 /*
173 * local functions
174 */
175
176 static struct uao_swhash_elt *uao_find_swhash_elt __P((struct uvm_aobj *,
177 int, boolean_t));
178 static int uao_find_swslot __P((struct uvm_aobj *,
179 int));
180 static boolean_t uao_flush __P((struct uvm_object *,
181 vaddr_t, vaddr_t,
182 int));
183 static void uao_free __P((struct uvm_aobj *));
184 static int uao_get __P((struct uvm_object *, vaddr_t,
185 vm_page_t *, int *, int,
186 vm_prot_t, int, int));
187 static boolean_t uao_releasepg __P((struct vm_page *,
188 struct vm_page **));
189
190 /*
191 * aobj_pager
192 *
193 * note that some functions (e.g. put) are handled elsewhere
194 */
195
196 struct uvm_pagerops aobj_pager = {
197 NULL, /* init */
198 uao_reference, /* reference */
199 uao_detach, /* detach */
200 NULL, /* fault */
201 uao_flush, /* flush */
202 uao_get, /* get */
203 NULL, /* asyncget */
204 NULL, /* put (done by pagedaemon) */
205 NULL, /* cluster */
206 NULL, /* mk_pcluster */
207 uvm_shareprot, /* shareprot */
208 NULL, /* aiodone */
209 uao_releasepg /* releasepg */
210 };
211
212 /*
213 * uao_list: global list of active aobjs, locked by uao_list_lock
214 */
215
216 static LIST_HEAD(aobjlist, uvm_aobj) uao_list;
217 static simple_lock_data_t uao_list_lock;
218
219
220 /*
221 * functions
222 */
223
224 /*
225 * hash table/array related functions
226 */
227
228 /*
229 * uao_find_swhash_elt: find (or create) a hash table entry for a page
230 * offset.
231 *
232 * => the object should be locked by the caller
233 */
234
235 static struct uao_swhash_elt *
236 uao_find_swhash_elt(aobj, pageidx, create)
237 struct uvm_aobj *aobj;
238 int pageidx;
239 boolean_t create;
240 {
241 struct uao_swhash *swhash;
242 struct uao_swhash_elt *elt;
243 int page_tag;
244
245 swhash = UAO_SWHASH_HASH(aobj, pageidx); /* first hash to get bucket */
246 page_tag = UAO_SWHASH_ELT_TAG(pageidx); /* tag to search for */
247
248 /*
249 * now search the bucket for the requested tag
250 */
251 for (elt = swhash->lh_first; elt != NULL; elt = elt->list.le_next) {
252 if (elt->tag == page_tag)
253 return(elt);
254 }
255
256 /* fail now if we are not allowed to create a new entry in the bucket */
257 if (!create)
258 return NULL;
259
260
261 /*
262 * allocate a new entry for the bucket and init/insert it in
263 */
264 elt = pool_get(&uao_swhash_elt_pool, PR_WAITOK);
265 LIST_INSERT_HEAD(swhash, elt, list);
266 elt->tag = page_tag;
267 elt->count = 0;
268 memset(elt->slots, 0, sizeof(elt->slots));
269
270 return(elt);
271 }
272
273 /*
274 * uao_find_swslot: find the swap slot number for an aobj/pageidx
275 *
276 * => object must be locked by caller
277 */
278 __inline static int
279 uao_find_swslot(aobj, pageidx)
280 struct uvm_aobj *aobj;
281 int pageidx;
282 {
283
284 /*
285 * if noswap flag is set, then we never return a slot
286 */
287
288 if (aobj->u_flags & UAO_FLAG_NOSWAP)
289 return(0);
290
291 /*
292 * if hashing, look in hash table.
293 */
294
295 if (UAO_USES_SWHASH(aobj)) {
296 struct uao_swhash_elt *elt =
297 uao_find_swhash_elt(aobj, pageidx, FALSE);
298
299 if (elt)
300 return(UAO_SWHASH_ELT_PAGESLOT(elt, pageidx));
301 else
302 return(NULL);
303 }
304
305 /*
306 * otherwise, look in the array
307 */
308 return(aobj->u_swslots[pageidx]);
309 }
310
311 /*
312 * uao_set_swslot: set the swap slot for a page in an aobj.
313 *
314 * => setting a slot to zero frees the slot
315 * => object must be locked by caller
316 */
317 int
318 uao_set_swslot(uobj, pageidx, slot)
319 struct uvm_object *uobj;
320 int pageidx, slot;
321 {
322 struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
323 int oldslot;
324 UVMHIST_FUNC("uao_set_swslot"); UVMHIST_CALLED(pdhist);
325 UVMHIST_LOG(pdhist, "aobj %p pageidx %d slot %d",
326 aobj, pageidx, slot, 0);
327
328 /*
329 * if noswap flag is set, then we can't set a slot
330 */
331
332 if (aobj->u_flags & UAO_FLAG_NOSWAP) {
333
334 if (slot == 0)
335 return(0); /* a clear is ok */
336
337 /* but a set is not */
338 printf("uao_set_swslot: uobj = %p\n", uobj);
339 panic("uao_set_swslot: attempt to set a slot on a NOSWAP object");
340 }
341
342 /*
343 * are we using a hash table? if so, add it in the hash.
344 */
345
346 if (UAO_USES_SWHASH(aobj)) {
347 /*
348 * Avoid allocating an entry just to free it again if
349 * the page had not swap slot in the first place, and
350 * we are freeing.
351 */
352 struct uao_swhash_elt *elt =
353 uao_find_swhash_elt(aobj, pageidx, slot ? TRUE : FALSE);
354 if (elt == NULL) {
355 #ifdef DIAGNOSTIC
356 if (slot)
357 panic("uao_set_swslot: didn't create elt");
358 #endif
359 return (0);
360 }
361
362 oldslot = UAO_SWHASH_ELT_PAGESLOT(elt, pageidx);
363 UAO_SWHASH_ELT_PAGESLOT(elt, pageidx) = slot;
364
365 /*
366 * now adjust the elt's reference counter and free it if we've
367 * dropped it to zero.
368 */
369
370 /* an allocation? */
371 if (slot) {
372 if (oldslot == 0)
373 elt->count++;
374 } else { /* freeing slot ... */
375 if (oldslot) /* to be safe */
376 elt->count--;
377
378 if (elt->count == 0) {
379 LIST_REMOVE(elt, list);
380 pool_put(&uao_swhash_elt_pool, elt);
381 }
382 }
383 } else {
384 /* we are using an array */
385 oldslot = aobj->u_swslots[pageidx];
386 aobj->u_swslots[pageidx] = slot;
387 }
388 return (oldslot);
389 }
390
391 /*
392 * end of hash/array functions
393 */
394
395 /*
396 * uao_free: free all resources held by an aobj, and then free the aobj
397 *
398 * => the aobj should be dead
399 */
400 static void
401 uao_free(aobj)
402 struct uvm_aobj *aobj;
403 {
404
405 if (UAO_USES_SWHASH(aobj)) {
406 int i, hashbuckets = aobj->u_swhashmask + 1;
407
408 /*
409 * free the swslots from each hash bucket,
410 * then the hash bucket, and finally the hash table itself.
411 */
412 for (i = 0; i < hashbuckets; i++) {
413 struct uao_swhash_elt *elt, *next;
414
415 for (elt = aobj->u_swhash[i].lh_first; elt != NULL;
416 elt = next) {
417 int j;
418
419 for (j = 0; j < UAO_SWHASH_CLUSTER_SIZE; j++)
420 {
421 int slot = elt->slots[j];
422
423 if (slot) {
424 uvm_swap_free(slot, 1);
425
426 /*
427 * this page is no longer
428 * only in swap.
429 */
430 simple_lock(&uvm.swap_data_lock);
431 uvmexp.swpgonly--;
432 simple_unlock(&uvm.swap_data_lock);
433 }
434 }
435
436 next = elt->list.le_next;
437 pool_put(&uao_swhash_elt_pool, elt);
438 }
439 }
440 FREE(aobj->u_swhash, M_UVMAOBJ);
441 } else {
442 int i;
443
444 /*
445 * free the array
446 */
447
448 for (i = 0; i < aobj->u_pages; i++)
449 {
450 int slot = aobj->u_swslots[i];
451
452 if (slot) {
453 uvm_swap_free(slot, 1);
454
455 /* this page is no longer only in swap. */
456 simple_lock(&uvm.swap_data_lock);
457 uvmexp.swpgonly--;
458 simple_unlock(&uvm.swap_data_lock);
459 }
460 }
461 FREE(aobj->u_swslots, M_UVMAOBJ);
462 }
463
464 /*
465 * finally free the aobj itself
466 */
467 pool_put(&uvm_aobj_pool, aobj);
468 }
469
470 /*
471 * pager functions
472 */
473
474 /*
475 * uao_create: create an aobj of the given size and return its uvm_object.
476 *
477 * => for normal use, flags are always zero
478 * => for the kernel object, the flags are:
479 * UAO_FLAG_KERNOBJ - allocate the kernel object (can only happen once)
480 * UAO_FLAG_KERNSWAP - enable swapping of kernel object (" ")
481 */
482 struct uvm_object *
483 uao_create(size, flags)
484 vsize_t size;
485 int flags;
486 {
487 static struct uvm_aobj kernel_object_store; /* home of kernel_object */
488 static int kobj_alloced = 0; /* not allocated yet */
489 int pages = round_page(size) >> PAGE_SHIFT;
490 struct uvm_aobj *aobj;
491
492 /*
493 * malloc a new aobj unless we are asked for the kernel object
494 */
495 if (flags & UAO_FLAG_KERNOBJ) { /* want kernel object? */
496 if (kobj_alloced)
497 panic("uao_create: kernel object already allocated");
498
499 /*
500 * XXXTHORPEJ: Need to call this now, so the pool gets
501 * initialized!
502 */
503 uao_init();
504
505 aobj = &kernel_object_store;
506 aobj->u_pages = pages;
507 aobj->u_flags = UAO_FLAG_NOSWAP; /* no swap to start */
508 /* we are special, we never die */
509 aobj->u_obj.uo_refs = UVM_OBJ_KERN;
510 kobj_alloced = UAO_FLAG_KERNOBJ;
511 } else if (flags & UAO_FLAG_KERNSWAP) {
512 aobj = &kernel_object_store;
513 if (kobj_alloced != UAO_FLAG_KERNOBJ)
514 panic("uao_create: asked to enable swap on kernel object");
515 kobj_alloced = UAO_FLAG_KERNSWAP;
516 } else { /* normal object */
517 aobj = pool_get(&uvm_aobj_pool, PR_WAITOK);
518 aobj->u_pages = pages;
519 aobj->u_flags = 0; /* normal object */
520 aobj->u_obj.uo_refs = 1; /* start with 1 reference */
521 }
522
523 /*
524 * allocate hash/array if necessary
525 *
526 * note: in the KERNSWAP case no need to worry about locking since
527 * we are still booting we should be the only thread around.
528 */
529 if (flags == 0 || (flags & UAO_FLAG_KERNSWAP) != 0) {
530 int mflags = (flags & UAO_FLAG_KERNSWAP) != 0 ?
531 M_NOWAIT : M_WAITOK;
532
533 /* allocate hash table or array depending on object size */
534 if (UAO_USES_SWHASH(aobj)) {
535 aobj->u_swhash = hashinit(UAO_SWHASH_BUCKETS(aobj),
536 M_UVMAOBJ, mflags, &aobj->u_swhashmask);
537 if (aobj->u_swhash == NULL)
538 panic("uao_create: hashinit swhash failed");
539 } else {
540 MALLOC(aobj->u_swslots, int *, pages * sizeof(int),
541 M_UVMAOBJ, mflags);
542 if (aobj->u_swslots == NULL)
543 panic("uao_create: malloc swslots failed");
544 memset(aobj->u_swslots, 0, pages * sizeof(int));
545 }
546
547 if (flags) {
548 aobj->u_flags &= ~UAO_FLAG_NOSWAP; /* clear noswap */
549 return(&aobj->u_obj);
550 /* done! */
551 }
552 }
553
554 /*
555 * init aobj fields
556 */
557 simple_lock_init(&aobj->u_obj.vmobjlock);
558 aobj->u_obj.pgops = &aobj_pager;
559 TAILQ_INIT(&aobj->u_obj.memq);
560 aobj->u_obj.uo_npages = 0;
561
562 /*
563 * now that aobj is ready, add it to the global list
564 */
565 simple_lock(&uao_list_lock);
566 LIST_INSERT_HEAD(&uao_list, aobj, u_list);
567 simple_unlock(&uao_list_lock);
568
569 /*
570 * done!
571 */
572 return(&aobj->u_obj);
573 }
574
575
576
577 /*
578 * uao_init: set up aobj pager subsystem
579 *
580 * => called at boot time from uvm_pager_init()
581 */
582 void
583 uao_init()
584 {
585 static int uao_initialized;
586
587 if (uao_initialized)
588 return;
589 uao_initialized = TRUE;
590
591 LIST_INIT(&uao_list);
592 simple_lock_init(&uao_list_lock);
593
594 /*
595 * NOTE: Pages fror this pool must not come from a pageable
596 * kernel map!
597 */
598 pool_init(&uao_swhash_elt_pool, sizeof(struct uao_swhash_elt),
599 0, 0, 0, "uaoeltpl", 0, NULL, NULL, M_UVMAOBJ);
600
601 pool_init(&uvm_aobj_pool, sizeof(struct uvm_aobj), 0, 0, 0,
602 "aobjpl", 0,
603 pool_page_alloc_nointr, pool_page_free_nointr, M_UVMAOBJ);
604 }
605
606 /*
607 * uao_reference: add a ref to an aobj
608 *
609 * => aobj must be unlocked (we will lock it)
610 * just lock and call the locked version
611 */
612 void
613 uao_reference(uobj)
614 struct uvm_object *uobj;
615 {
616 simple_lock(&uobj->vmobjlock);
617 uao_reference_locked(uobj);
618 simple_unlock(&uobj->vmobjlock);
619 }
620
621 /*
622 * uao_reference_locked: add a ref to an aobj that is already locked
623 *
624 * => aobj must be locked
625 */
626 void
627 uao_reference_locked(uobj)
628 struct uvm_object *uobj;
629 {
630 UVMHIST_FUNC("uao_reference"); UVMHIST_CALLED(maphist);
631
632 /*
633 * kernel_object already has plenty of references, leave it alone.
634 */
635
636 if (UVM_OBJ_IS_KERN_OBJECT(uobj))
637 return;
638
639 uobj->uo_refs++; /* bump! */
640 UVMHIST_LOG(maphist, "<- done (uobj=0x%x, ref = %d)",
641 uobj, uobj->uo_refs,0,0);
642 }
643
644
645 /*
646 * uao_detach: drop a reference to an aobj
647 *
648 * => aobj must be unlocked
649 */
650 void
651 uao_detach(uobj)
652 struct uvm_object *uobj;
653 {
654 simple_lock(&uobj->vmobjlock);
655 uao_detach_locked(uobj);
656 }
657
658
659 /*
660 * uao_detach_locked: drop a reference to an aobj
661 *
662 * => aobj must be locked, and is unlocked (or freed) upon return.
663 */
664 void
665 uao_detach_locked(uobj)
666 struct uvm_object *uobj;
667 {
668 struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
669 struct vm_page *pg;
670 boolean_t busybody;
671 UVMHIST_FUNC("uao_detach"); UVMHIST_CALLED(maphist);
672
673 /*
674 * detaching from kernel_object is a noop.
675 */
676 if (UVM_OBJ_IS_KERN_OBJECT(uobj)) {
677 simple_unlock(&uobj->vmobjlock);
678 return;
679 }
680
681 UVMHIST_LOG(maphist," (uobj=0x%x) ref=%d", uobj,uobj->uo_refs,0,0);
682 uobj->uo_refs--; /* drop ref! */
683 if (uobj->uo_refs) { /* still more refs? */
684 simple_unlock(&uobj->vmobjlock);
685 UVMHIST_LOG(maphist, "<- done (rc>0)", 0,0,0,0);
686 return;
687 }
688
689 /*
690 * remove the aobj from the global list.
691 */
692 simple_lock(&uao_list_lock);
693 LIST_REMOVE(aobj, u_list);
694 simple_unlock(&uao_list_lock);
695
696 /*
697 * free all the pages that aren't PG_BUSY,
698 * mark for release any that are.
699 */
700 busybody = FALSE;
701 for (pg = uobj->memq.tqh_first ; pg != NULL ; pg = pg->listq.tqe_next) {
702
703 if (pg->flags & PG_BUSY) {
704 pg->flags |= PG_RELEASED;
705 busybody = TRUE;
706 continue;
707 }
708
709 /* zap the mappings, free the swap slot, free the page */
710 pmap_page_protect(PMAP_PGARG(pg), VM_PROT_NONE);
711 uao_dropswap(&aobj->u_obj, pg->offset >> PAGE_SHIFT);
712 uvm_lock_pageq();
713 uvm_pagefree(pg);
714 uvm_unlock_pageq();
715 }
716
717 /*
718 * if we found any busy pages, we're done for now.
719 * mark the aobj for death, releasepg will finish up for us.
720 */
721 if (busybody) {
722 aobj->u_flags |= UAO_FLAG_KILLME;
723 simple_unlock(&aobj->u_obj.vmobjlock);
724 return;
725 }
726
727 /*
728 * finally, free the rest.
729 */
730 uao_free(aobj);
731 }
732
733 /*
734 * uao_flush: "flush" pages out of a uvm object
735 *
736 * => object should be locked by caller. we may _unlock_ the object
737 * if (and only if) we need to clean a page (PGO_CLEANIT).
738 * XXXJRT Currently, however, we don't. In the case of cleaning
739 * XXXJRT a page, we simply just deactivate it. Should probably
740 * XXXJRT handle this better, in the future (although "flushing"
741 * XXXJRT anonymous memory isn't terribly important).
742 * => if PGO_CLEANIT is not set, then we will neither unlock the object
743 * or block.
744 * => if PGO_ALLPAGE is set, then all pages in the object are valid targets
745 * for flushing.
746 * => NOTE: we rely on the fact that the object's memq is a TAILQ and
747 * that new pages are inserted on the tail end of the list. thus,
748 * we can make a complete pass through the object in one go by starting
749 * at the head and working towards the tail (new pages are put in
750 * front of us).
751 * => NOTE: we are allowed to lock the page queues, so the caller
752 * must not be holding the lock on them [e.g. pagedaemon had
753 * better not call us with the queues locked]
754 * => we return TRUE unless we encountered some sort of I/O error
755 * XXXJRT currently never happens, as we never directly initiate
756 * XXXJRT I/O
757 *
758 * comment on "cleaning" object and PG_BUSY pages:
759 * this routine is holding the lock on the object. the only time
760 * that is can run into a PG_BUSY page that it does not own is if
761 * some other process has started I/O on the page (e.g. either
762 * a pagein or a pageout). if the PG_BUSY page is being paged
763 * in, then it can not be dirty (!PG_CLEAN) because no one has
764 * had a change to modify it yet. if the PG_BUSY page is being
765 * paged out then it means that someone else has already started
766 * cleaning the page for us (how nice!). in this case, if we
767 * have syncio specified, then after we make our pass through the
768 * object we need to wait for the other PG_BUSY pages to clear
769 * off (i.e. we need to do an iosync). also note that once a
770 * page is PG_BUSY is must stary in its object until it is un-busyed.
771 * XXXJRT We never actually do this, as we are "flushing" anonymous
772 * XXXJRT memory, which doesn't have persistent backing store.
773 *
774 * note on page traversal:
775 * we can traverse the pages in an object either by going down the
776 * linked list in "uobj->memq", or we can go over the address range
777 * by page doing hash table lookups for each address. depending
778 * on how many pages are in the object it may be cheaper to do one
779 * or the other. we set "by_list" to true if we are using memq.
780 * if the cost of a hash lookup was equal to the cost of the list
781 * traversal we could compare the number of pages in the start->stop
782 * range to the total number of pages in the object. however, it
783 * seems that a hash table lookup is more expensive than the linked
784 * list traversal, so we multiply the number of pages in the
785 * start->stop range by a penalty which we define below.
786 */
787
788 #define UAO_HASH_PENALTY 4 /* XXX: a guess */
789
790 boolean_t
791 uao_flush(uobj, start, stop, flags)
792 struct uvm_object *uobj;
793 vaddr_t start, stop;
794 int flags;
795 {
796 struct uvm_aobj *aobj = (struct uvm_aobj *) uobj;
797 struct vm_page *pp, *ppnext;
798 boolean_t retval, by_list;
799 vaddr_t curoff;
800 UVMHIST_FUNC("uao_flush"); UVMHIST_CALLED(maphist);
801
802 curoff = 0; /* XXX: shut up gcc */
803
804 retval = TRUE; /* default to success */
805
806 if (flags & PGO_ALLPAGES) {
807 start = 0;
808 stop = aobj->u_pages << PAGE_SHIFT;
809 by_list = TRUE; /* always go by the list */
810 } else {
811 start = trunc_page(start);
812 stop = round_page(stop);
813 if (stop > (aobj->u_pages << PAGE_SHIFT)) {
814 printf("uao_flush: strange, got an out of range "
815 "flush (fixed)\n");
816 stop = aobj->u_pages << PAGE_SHIFT;
817 }
818 by_list = (uobj->uo_npages <=
819 ((stop - start) >> PAGE_SHIFT) * UAO_HASH_PENALTY);
820 }
821
822 UVMHIST_LOG(maphist,
823 " flush start=0x%lx, stop=0x%x, by_list=%d, flags=0x%x",
824 start, stop, by_list, flags);
825
826 /*
827 * Don't need to do any work here if we're not freeing
828 * or deactivating pages.
829 */
830 if ((flags & (PGO_DEACTIVATE|PGO_FREE)) == 0) {
831 UVMHIST_LOG(maphist,
832 "<- done (no work to do)",0,0,0,0);
833 return (retval);
834 }
835
836 /*
837 * now do it. note: we must update ppnext in the body of loop or we
838 * will get stuck. we need to use ppnext because we may free "pp"
839 * before doing the next loop.
840 */
841
842 if (by_list) {
843 pp = uobj->memq.tqh_first;
844 } else {
845 curoff = start;
846 pp = uvm_pagelookup(uobj, curoff);
847 }
848
849 ppnext = NULL; /* XXX: shut up gcc */
850 uvm_lock_pageq(); /* page queues locked */
851
852 /* locked: both page queues and uobj */
853 for ( ; (by_list && pp != NULL) ||
854 (!by_list && curoff < stop) ; pp = ppnext) {
855 if (by_list) {
856 ppnext = pp->listq.tqe_next;
857
858 /* range check */
859 if (pp->offset < start || pp->offset >= stop)
860 continue;
861 } else {
862 curoff += PAGE_SIZE;
863 if (curoff < stop)
864 ppnext = uvm_pagelookup(uobj, curoff);
865
866 /* null check */
867 if (pp == NULL)
868 continue;
869 }
870
871 switch (flags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE)) {
872 /*
873 * XXX In these first 3 cases, we always just
874 * XXX deactivate the page. We may want to
875 * XXX handle the different cases more specifically
876 * XXX in the future.
877 */
878 case PGO_CLEANIT|PGO_FREE:
879 case PGO_CLEANIT|PGO_DEACTIVATE:
880 case PGO_DEACTIVATE:
881 /* skip the page if it's loaned or wired */
882 if (pp->loan_count != 0 ||
883 pp->wire_count != 0)
884 continue;
885
886 /* zap all mappings for the page. */
887 pmap_page_protect(PMAP_PGARG(pp),
888 VM_PROT_NONE);
889
890 /* ...and deactivate the page. */
891 uvm_pagedeactivate(pp);
892
893 continue;
894
895 case PGO_FREE:
896 /* XXX skip the page if it's loaned or wired */
897 if (pp->loan_count != 0 ||
898 pp->wire_count != 0)
899 continue;
900
901 /*
902 * mark the page as released if its busy.
903 */
904 if (pp->flags & PG_BUSY) {
905 pp->flags |= PG_RELEASED;
906 continue;
907 }
908
909 /* zap all mappings for the page. */
910 pmap_page_protect(PMAP_PGARG(pp),
911 VM_PROT_NONE);
912
913 uao_dropswap(uobj, pp->offset >> PAGE_SHIFT);
914 uvm_pagefree(pp);
915
916 continue;
917
918 default:
919 panic("uao_flush: weird flags");
920 }
921 #ifdef DIAGNOSTIC
922 panic("uao_flush: unreachable code");
923 #endif
924 }
925
926 uvm_unlock_pageq();
927
928 UVMHIST_LOG(maphist,
929 "<- done, rv=%d",retval,0,0,0);
930 return (retval);
931 }
932
933 /*
934 * uao_get: fetch me a page
935 *
936 * we have three cases:
937 * 1: page is resident -> just return the page.
938 * 2: page is zero-fill -> allocate a new page and zero it.
939 * 3: page is swapped out -> fetch the page from swap.
940 *
941 * cases 1 and 2 can be handled with PGO_LOCKED, case 3 cannot.
942 * so, if the "center" page hits case 3 (or any page, with PGO_ALLPAGES),
943 * then we will need to return VM_PAGER_UNLOCK.
944 *
945 * => prefer map unlocked (not required)
946 * => object must be locked! we will _unlock_ it before starting any I/O.
947 * => flags: PGO_ALLPAGES: get all of the pages
948 * PGO_LOCKED: fault data structures are locked
949 * => NOTE: offset is the offset of pps[0], _NOT_ pps[centeridx]
950 * => NOTE: caller must check for released pages!!
951 */
952 static int
953 uao_get(uobj, offset, pps, npagesp, centeridx, access_type, advice, flags)
954 struct uvm_object *uobj;
955 vaddr_t offset;
956 struct vm_page **pps;
957 int *npagesp;
958 int centeridx, advice, flags;
959 vm_prot_t access_type;
960 {
961 struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
962 vaddr_t current_offset;
963 vm_page_t ptmp;
964 int lcv, gotpages, maxpages, swslot, rv;
965 boolean_t done;
966 UVMHIST_FUNC("uao_get"); UVMHIST_CALLED(pdhist);
967
968 UVMHIST_LOG(pdhist, "aobj=%p offset=%d, flags=%d",
969 aobj, offset, flags,0);
970
971 /*
972 * get number of pages
973 */
974 maxpages = *npagesp;
975
976 /*
977 * step 1: handled the case where fault data structures are locked.
978 */
979
980 if (flags & PGO_LOCKED) {
981 /*
982 * step 1a: get pages that are already resident. only do
983 * this if the data structures are locked (i.e. the first
984 * time through).
985 */
986
987 done = TRUE; /* be optimistic */
988 gotpages = 0; /* # of pages we got so far */
989
990 for (lcv = 0, current_offset = offset ; lcv < maxpages ;
991 lcv++, current_offset += PAGE_SIZE) {
992 /* do we care about this page? if not, skip it */
993 if (pps[lcv] == PGO_DONTCARE)
994 continue;
995
996 ptmp = uvm_pagelookup(uobj, current_offset);
997
998 /*
999 * if page is new, attempt to allocate the page, then
1000 * zero-fill it.
1001 */
1002 if (ptmp == NULL && uao_find_swslot(aobj,
1003 current_offset >> PAGE_SHIFT) == 0) {
1004 ptmp = uvm_pagealloc(uobj, current_offset,
1005 NULL, 0);
1006 if (ptmp) {
1007 /* new page */
1008 ptmp->flags &= ~(PG_BUSY|PG_FAKE);
1009 ptmp->pqflags |= PQ_AOBJ;
1010 UVM_PAGE_OWN(ptmp, NULL);
1011 uvm_pagezero(ptmp);
1012 }
1013 }
1014
1015 /*
1016 * to be useful must get a non-busy, non-released page
1017 */
1018 if (ptmp == NULL ||
1019 (ptmp->flags & (PG_BUSY|PG_RELEASED)) != 0) {
1020 if (lcv == centeridx ||
1021 (flags & PGO_ALLPAGES) != 0)
1022 /* need to do a wait or I/O! */
1023 done = FALSE;
1024 continue;
1025 }
1026
1027 /*
1028 * useful page: busy/lock it and plug it in our
1029 * result array
1030 */
1031 /* caller must un-busy this page */
1032 ptmp->flags |= PG_BUSY;
1033 UVM_PAGE_OWN(ptmp, "uao_get1");
1034 pps[lcv] = ptmp;
1035 gotpages++;
1036
1037 } /* "for" lcv loop */
1038
1039 /*
1040 * step 1b: now we've either done everything needed or we
1041 * to unlock and do some waiting or I/O.
1042 */
1043
1044 UVMHIST_LOG(pdhist, "<- done (done=%d)", done, 0,0,0);
1045
1046 *npagesp = gotpages;
1047 if (done)
1048 /* bingo! */
1049 return(VM_PAGER_OK);
1050 else
1051 /* EEK! Need to unlock and I/O */
1052 return(VM_PAGER_UNLOCK);
1053 }
1054
1055 /*
1056 * step 2: get non-resident or busy pages.
1057 * object is locked. data structures are unlocked.
1058 */
1059
1060 for (lcv = 0, current_offset = offset ; lcv < maxpages ;
1061 lcv++, current_offset += PAGE_SIZE) {
1062 /*
1063 * - skip over pages we've already gotten or don't want
1064 * - skip over pages we don't _have_ to get
1065 */
1066 if (pps[lcv] != NULL ||
1067 (lcv != centeridx && (flags & PGO_ALLPAGES) == 0))
1068 continue;
1069
1070 /*
1071 * we have yet to locate the current page (pps[lcv]). we
1072 * first look for a page that is already at the current offset.
1073 * if we find a page, we check to see if it is busy or
1074 * released. if that is the case, then we sleep on the page
1075 * until it is no longer busy or released and repeat the lookup.
1076 * if the page we found is neither busy nor released, then we
1077 * busy it (so we own it) and plug it into pps[lcv]. this
1078 * 'break's the following while loop and indicates we are
1079 * ready to move on to the next page in the "lcv" loop above.
1080 *
1081 * if we exit the while loop with pps[lcv] still set to NULL,
1082 * then it means that we allocated a new busy/fake/clean page
1083 * ptmp in the object and we need to do I/O to fill in the data.
1084 */
1085
1086 /* top of "pps" while loop */
1087 while (pps[lcv] == NULL) {
1088 /* look for a resident page */
1089 ptmp = uvm_pagelookup(uobj, current_offset);
1090
1091 /* not resident? allocate one now (if we can) */
1092 if (ptmp == NULL) {
1093
1094 ptmp = uvm_pagealloc(uobj, current_offset,
1095 NULL, 0);
1096
1097 /* out of RAM? */
1098 if (ptmp == NULL) {
1099 simple_unlock(&uobj->vmobjlock);
1100 UVMHIST_LOG(pdhist,
1101 "sleeping, ptmp == NULL\n",0,0,0,0);
1102 uvm_wait("uao_getpage");
1103 simple_lock(&uobj->vmobjlock);
1104 /* goto top of pps while loop */
1105 continue;
1106 }
1107
1108 /*
1109 * safe with PQ's unlocked: because we just
1110 * alloc'd the page
1111 */
1112 ptmp->pqflags |= PQ_AOBJ;
1113
1114 /*
1115 * got new page ready for I/O. break pps while
1116 * loop. pps[lcv] is still NULL.
1117 */
1118 break;
1119 }
1120
1121 /* page is there, see if we need to wait on it */
1122 if ((ptmp->flags & (PG_BUSY|PG_RELEASED)) != 0) {
1123 ptmp->flags |= PG_WANTED;
1124 UVMHIST_LOG(pdhist,
1125 "sleeping, ptmp->flags 0x%x\n",
1126 ptmp->flags,0,0,0);
1127 UVM_UNLOCK_AND_WAIT(ptmp, &uobj->vmobjlock,
1128 FALSE, "uao_get", 0);
1129 simple_lock(&uobj->vmobjlock);
1130 continue; /* goto top of pps while loop */
1131 }
1132
1133 /*
1134 * if we get here then the page has become resident and
1135 * unbusy between steps 1 and 2. we busy it now (so we
1136 * own it) and set pps[lcv] (so that we exit the while
1137 * loop).
1138 */
1139 /* we own it, caller must un-busy */
1140 ptmp->flags |= PG_BUSY;
1141 UVM_PAGE_OWN(ptmp, "uao_get2");
1142 pps[lcv] = ptmp;
1143 }
1144
1145 /*
1146 * if we own the valid page at the correct offset, pps[lcv] will
1147 * point to it. nothing more to do except go to the next page.
1148 */
1149 if (pps[lcv])
1150 continue; /* next lcv */
1151
1152 /*
1153 * we have a "fake/busy/clean" page that we just allocated.
1154 * do the needed "i/o", either reading from swap or zeroing.
1155 */
1156 swslot = uao_find_swslot(aobj, current_offset >> PAGE_SHIFT);
1157
1158 /*
1159 * just zero the page if there's nothing in swap.
1160 */
1161 if (swslot == 0)
1162 {
1163 /*
1164 * page hasn't existed before, just zero it.
1165 */
1166 uvm_pagezero(ptmp);
1167 }
1168 else
1169 {
1170 UVMHIST_LOG(pdhist, "pagein from swslot %d",
1171 swslot, 0,0,0);
1172
1173 /*
1174 * page in the swapped-out page.
1175 * unlock object for i/o, relock when done.
1176 */
1177 simple_unlock(&uobj->vmobjlock);
1178 rv = uvm_swap_get(ptmp, swslot, PGO_SYNCIO);
1179 simple_lock(&uobj->vmobjlock);
1180
1181 /*
1182 * I/O done. check for errors.
1183 */
1184 if (rv != VM_PAGER_OK)
1185 {
1186 UVMHIST_LOG(pdhist, "<- done (error=%d)",
1187 rv,0,0,0);
1188 if (ptmp->flags & PG_WANTED)
1189 /* object lock still held */
1190 wakeup(ptmp);
1191 ptmp->flags &= ~(PG_WANTED|PG_BUSY);
1192 UVM_PAGE_OWN(ptmp, NULL);
1193 uvm_lock_pageq();
1194 uvm_pagefree(ptmp);
1195 uvm_unlock_pageq();
1196
1197 simple_unlock(&uobj->vmobjlock);
1198 return (rv);
1199 }
1200 }
1201
1202 /*
1203 * we got the page! clear the fake flag (indicates valid
1204 * data now in page) and plug into our result array. note
1205 * that page is still busy.
1206 *
1207 * it is the callers job to:
1208 * => check if the page is released
1209 * => unbusy the page
1210 * => activate the page
1211 */
1212
1213 ptmp->flags &= ~PG_FAKE; /* data is valid ... */
1214 pmap_clear_modify(PMAP_PGARG(ptmp)); /* ... and clean */
1215 pps[lcv] = ptmp;
1216
1217 } /* lcv loop */
1218
1219 /*
1220 * finally, unlock object and return.
1221 */
1222
1223 simple_unlock(&uobj->vmobjlock);
1224 UVMHIST_LOG(pdhist, "<- done (OK)",0,0,0,0);
1225 return(VM_PAGER_OK);
1226 }
1227
1228 /*
1229 * uao_releasepg: handle released page in an aobj
1230 *
1231 * => "pg" is a PG_BUSY [caller owns it], PG_RELEASED page that we need
1232 * to dispose of.
1233 * => caller must handle PG_WANTED case
1234 * => called with page's object locked, pageq's unlocked
1235 * => returns TRUE if page's object is still alive, FALSE if we
1236 * killed the page's object. if we return TRUE, then we
1237 * return with the object locked.
1238 * => if (nextpgp != NULL) => we return pageq.tqe_next here, and return
1239 * with the page queues locked [for pagedaemon]
1240 * => if (nextpgp == NULL) => we return with page queues unlocked [normal case]
1241 * => we kill the aobj if it is not referenced and we are suppose to
1242 * kill it ("KILLME").
1243 */
1244 static boolean_t
1245 uao_releasepg(pg, nextpgp)
1246 struct vm_page *pg;
1247 struct vm_page **nextpgp; /* OUT */
1248 {
1249 struct uvm_aobj *aobj = (struct uvm_aobj *) pg->uobject;
1250
1251 #ifdef DIAGNOSTIC
1252 if ((pg->flags & PG_RELEASED) == 0)
1253 panic("uao_releasepg: page not released!");
1254 #endif
1255
1256 /*
1257 * dispose of the page [caller handles PG_WANTED] and swap slot.
1258 */
1259 pmap_page_protect(PMAP_PGARG(pg), VM_PROT_NONE);
1260 uao_dropswap(&aobj->u_obj, pg->offset >> PAGE_SHIFT);
1261 uvm_lock_pageq();
1262 if (nextpgp)
1263 *nextpgp = pg->pageq.tqe_next; /* next page for daemon */
1264 uvm_pagefree(pg);
1265 if (!nextpgp)
1266 uvm_unlock_pageq(); /* keep locked for daemon */
1267
1268 /*
1269 * if we're not killing the object, we're done.
1270 */
1271 if ((aobj->u_flags & UAO_FLAG_KILLME) == 0)
1272 return TRUE;
1273
1274 #ifdef DIAGNOSTIC
1275 if (aobj->u_obj.uo_refs)
1276 panic("uvm_km_releasepg: kill flag set on referenced object!");
1277 #endif
1278
1279 /*
1280 * if there are still pages in the object, we're done for now.
1281 */
1282 if (aobj->u_obj.uo_npages != 0)
1283 return TRUE;
1284
1285 #ifdef DIAGNOSTIC
1286 if (aobj->u_obj.memq.tqh_first)
1287 panic("uvn_releasepg: pages in object with npages == 0");
1288 #endif
1289
1290 /*
1291 * finally, free the rest.
1292 */
1293 uao_free(aobj);
1294
1295 return FALSE;
1296 }
1297
1298 /*
1299 * uao_dropswap: release any swap resources from this aobj page.
1300 *
1301 * => aobj must be locked or have a reference count of 0.
1302 */
1303
1304 void
1305 uao_dropswap(uobj, pageidx)
1306 struct uvm_object *uobj;
1307 int pageidx;
1308 {
1309 int slot;
1310
1311 slot = uao_set_swslot(uobj, pageidx, 0);
1312 if (slot) {
1313 uvm_swap_free(slot, 1);
1314 }
1315 }
1316