uvm_aobj.c revision 1.15.2.2 1 /* $NetBSD: uvm_aobj.c,v 1.15.2.2 1999/02/25 04:08:38 chs Exp $ */
2
3 /*
4 * XXXCDC: "ROUGH DRAFT" QUALITY UVM PRE-RELEASE FILE!
5 * >>>USE AT YOUR OWN RISK, WORK IS NOT FINISHED<<<
6 */
7 /*
8 * Copyright (c) 1998 Chuck Silvers, Charles D. Cranor and
9 * Washington University.
10 * All rights reserved.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by Charles D. Cranor and
23 * Washington University.
24 * 4. The name of the author may not be used to endorse or promote products
25 * derived from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
28 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
29 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
30 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
31 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
32 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
33 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
34 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
35 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
36 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 *
38 * from: Id: uvm_aobj.c,v 1.1.2.5 1998/02/06 05:14:38 chs Exp
39 */
40 /*
41 * uvm_aobj.c: anonymous memory uvm_object pager
42 *
43 * author: Chuck Silvers <chuq (at) chuq.com>
44 * started: Jan-1998
45 *
46 * - design mostly from Chuck Cranor
47 */
48
49
50
51 #include "opt_uvmhist.h"
52
53 #include <sys/param.h>
54 #include <sys/systm.h>
55 #include <sys/proc.h>
56 #include <sys/malloc.h>
57 #include <sys/kernel.h>
58 #include <sys/pool.h>
59
60 #include <vm/vm.h>
61 #include <vm/vm_page.h>
62 #include <vm/vm_kern.h>
63
64 #include <uvm/uvm.h>
65
66 /*
67 * an aobj manages anonymous-memory backed uvm_objects. in addition
68 * to keeping the list of resident pages, it also keeps a list of
69 * allocated swap blocks. depending on the size of the aobj this list
70 * of allocated swap blocks is either stored in an array (small objects)
71 * or in a hash table (large objects).
72 */
73
74 /*
75 * local structures
76 */
77
78 /*
79 * for hash tables, we break the address space of the aobj into blocks
80 * of UAO_SWHASH_CLUSTER_SIZE pages. we require the cluster size to
81 * be a power of two.
82 */
83
84 #define UAO_SWHASH_CLUSTER_SHIFT 4
85 #define UAO_SWHASH_CLUSTER_SIZE (1 << UAO_SWHASH_CLUSTER_SHIFT)
86
87 /* get the "tag" for this page index */
88 #define UAO_SWHASH_ELT_TAG(PAGEIDX) \
89 ((PAGEIDX) >> UAO_SWHASH_CLUSTER_SHIFT)
90
91 /* given an ELT and a page index, find the swap slot */
92 #define UAO_SWHASH_ELT_PAGESLOT(ELT, PAGEIDX) \
93 ((ELT)->slots[(PAGEIDX) & (UAO_SWHASH_CLUSTER_SIZE - 1)])
94
95 /* given an ELT, return its pageidx base */
96 #define UAO_SWHASH_ELT_PAGEIDX_BASE(ELT) \
97 ((ELT)->tag << UAO_SWHASH_CLUSTER_SHIFT)
98
99 /*
100 * the swhash hash function
101 */
102 #define UAO_SWHASH_HASH(AOBJ, PAGEIDX) \
103 (&(AOBJ)->u_swhash[(((PAGEIDX) >> UAO_SWHASH_CLUSTER_SHIFT) \
104 & (AOBJ)->u_swhashmask)])
105
106 /*
107 * the swhash threshhold determines if we will use an array or a
108 * hash table to store the list of allocated swap blocks.
109 */
110
111 #define UAO_SWHASH_THRESHOLD (UAO_SWHASH_CLUSTER_SIZE * 4)
112 #define UAO_USES_SWHASH(AOBJ) \
113 ((AOBJ)->u_pages > UAO_SWHASH_THRESHOLD) /* use hash? */
114
115 /*
116 * the number of buckets in a swhash, with an upper bound
117 */
118 #define UAO_SWHASH_MAXBUCKETS 256
119 #define UAO_SWHASH_BUCKETS(AOBJ) \
120 (min((AOBJ)->u_pages >> UAO_SWHASH_CLUSTER_SHIFT, \
121 UAO_SWHASH_MAXBUCKETS))
122
123
124 /*
125 * uao_swhash_elt: when a hash table is being used, this structure defines
126 * the format of an entry in the bucket list.
127 */
128
129 struct uao_swhash_elt {
130 LIST_ENTRY(uao_swhash_elt) list; /* the hash list */
131 vaddr_t tag; /* our 'tag' */
132 int count; /* our number of active slots */
133 int slots[UAO_SWHASH_CLUSTER_SIZE]; /* the slots */
134 };
135
136 /*
137 * uao_swhash: the swap hash table structure
138 */
139
140 LIST_HEAD(uao_swhash, uao_swhash_elt);
141
142 /*
143 * uao_swhash_elt_pool: pool of uao_swhash_elt structures
144 */
145
146 struct pool uao_swhash_elt_pool;
147
148 /*
149 * uvm_aobj: the actual anon-backed uvm_object
150 *
151 * => the uvm_object is at the top of the structure, this allows
152 * (struct uvm_device *) == (struct uvm_object *)
153 * => only one of u_swslots and u_swhash is used in any given aobj
154 */
155
156 struct uvm_aobj {
157 struct uvm_object u_obj; /* has: lock, pgops, memq, #pages, #refs */
158 int u_pages; /* number of pages in entire object */
159 int u_flags; /* the flags (see uvm_aobj.h) */
160 int *u_swslots; /* array of offset->swapslot mappings */
161 /*
162 * hashtable of offset->swapslot mappings
163 * (u_swhash is an array of bucket heads)
164 */
165 struct uao_swhash *u_swhash;
166 u_long u_swhashmask; /* mask for hashtable */
167 LIST_ENTRY(uvm_aobj) u_list; /* global list of aobjs */
168 };
169
170 /*
171 * uvm_aobj_pool: pool of uvm_aobj structures
172 */
173
174 struct pool uvm_aobj_pool;
175
176 /*
177 * local functions
178 */
179
180 static struct uao_swhash_elt *uao_find_swhash_elt __P((struct uvm_aobj *,
181 int, boolean_t));
182 static int uao_find_swslot __P((struct uvm_aobj *,
183 int));
184 static boolean_t uao_flush __P((struct uvm_object *,
185 vaddr_t, vaddr_t,
186 int));
187 static void uao_free __P((struct uvm_aobj *));
188 static int uao_get __P((struct uvm_object *, vaddr_t,
189 vm_page_t *, int *, int,
190 vm_prot_t, int, int));
191 static boolean_t uao_releasepg __P((struct vm_page *,
192 struct vm_page **));
193 static boolean_t uao_pagein __P((struct uvm_aobj *, int, int));
194 static boolean_t uao_pagein_page __P((struct uvm_aobj *, int));
195
196
197
198 /*
199 * aobj_pager
200 *
201 * note that some functions (e.g. put) are handled elsewhere
202 */
203
204 struct uvm_pagerops aobj_pager = {
205 NULL, /* init */
206 NULL, /* attach */
207 uao_reference, /* reference */
208 uao_detach, /* detach */
209 NULL, /* fault */
210 uao_flush, /* flush */
211 uao_get, /* get */
212 NULL, /* asyncget */
213 NULL, /* put (done by pagedaemon) */
214 NULL, /* cluster */
215 NULL, /* mk_pcluster */
216 uvm_shareprot, /* shareprot */
217 NULL, /* aiodone */
218 uao_releasepg /* releasepg */
219 };
220
221 /*
222 * uao_list: global list of active aobjs, locked by uao_list_lock
223 */
224
225 static LIST_HEAD(aobjlist, uvm_aobj) uao_list;
226 static simple_lock_data_t uao_list_lock;
227
228
229 /*
230 * functions
231 */
232
233 /*
234 * hash table/array related functions
235 */
236
237 /*
238 * uao_find_swhash_elt: find (or create) a hash table entry for a page
239 * offset.
240 *
241 * => the object should be locked by the caller
242 */
243
244 static struct uao_swhash_elt *
245 uao_find_swhash_elt(aobj, pageidx, create)
246 struct uvm_aobj *aobj;
247 int pageidx;
248 boolean_t create;
249 {
250 struct uao_swhash *swhash;
251 struct uao_swhash_elt *elt;
252 int page_tag;
253
254 swhash = UAO_SWHASH_HASH(aobj, pageidx); /* first hash to get bucket */
255 page_tag = UAO_SWHASH_ELT_TAG(pageidx); /* tag to search for */
256
257 /*
258 * now search the bucket for the requested tag
259 */
260 for (elt = swhash->lh_first; elt != NULL; elt = elt->list.le_next) {
261 if (elt->tag == page_tag)
262 return(elt);
263 }
264
265 /* fail now if we are not allowed to create a new entry in the bucket */
266 if (!create)
267 return NULL;
268
269
270 /*
271 * allocate a new entry for the bucket and init/insert it in
272 */
273 elt = pool_get(&uao_swhash_elt_pool, PR_WAITOK);
274 LIST_INSERT_HEAD(swhash, elt, list);
275 elt->tag = page_tag;
276 elt->count = 0;
277 memset(elt->slots, 0, sizeof(elt->slots));
278
279 return(elt);
280 }
281
282 /*
283 * uao_find_swslot: find the swap slot number for an aobj/pageidx
284 *
285 * => object must be locked by caller
286 */
287 __inline static int
288 uao_find_swslot(aobj, pageidx)
289 struct uvm_aobj *aobj;
290 int pageidx;
291 {
292
293 /*
294 * if noswap flag is set, then we never return a slot
295 */
296
297 if (aobj->u_flags & UAO_FLAG_NOSWAP)
298 return(0);
299
300 /*
301 * if hashing, look in hash table.
302 */
303
304 if (UAO_USES_SWHASH(aobj)) {
305 struct uao_swhash_elt *elt =
306 uao_find_swhash_elt(aobj, pageidx, FALSE);
307
308 if (elt)
309 return(UAO_SWHASH_ELT_PAGESLOT(elt, pageidx));
310 else
311 return(NULL);
312 }
313
314 /*
315 * otherwise, look in the array
316 */
317 return(aobj->u_swslots[pageidx]);
318 }
319
320 /*
321 * uao_set_swslot: set the swap slot for a page in an aobj.
322 *
323 * => setting a slot to zero frees the slot
324 * => object must be locked by caller
325 */
326 int
327 uao_set_swslot(uobj, pageidx, slot)
328 struct uvm_object *uobj;
329 int pageidx, slot;
330 {
331 struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
332 int oldslot;
333 UVMHIST_FUNC("uao_set_swslot"); UVMHIST_CALLED(pdhist);
334 UVMHIST_LOG(pdhist, "aobj %p pageidx %d slot %d",
335 aobj, pageidx, slot, 0);
336
337 /*
338 * if noswap flag is set, then we can't set a slot
339 */
340
341 if (aobj->u_flags & UAO_FLAG_NOSWAP) {
342
343 if (slot == 0)
344 return(0); /* a clear is ok */
345
346 /* but a set is not */
347 printf("uao_set_swslot: uobj = %p\n", uobj);
348 panic("uao_set_swslot: attempt to set a slot on a NOSWAP object");
349 }
350
351 /*
352 * are we using a hash table? if so, add it in the hash.
353 */
354
355 if (UAO_USES_SWHASH(aobj)) {
356 /*
357 * Avoid allocating an entry just to free it again if
358 * the page had not swap slot in the first place, and
359 * we are freeing.
360 */
361 struct uao_swhash_elt *elt =
362 uao_find_swhash_elt(aobj, pageidx, slot ? TRUE : FALSE);
363 if (elt == NULL) {
364 #ifdef DIAGNOSTIC
365 if (slot)
366 panic("uao_set_swslot: didn't create elt");
367 #endif
368 return (0);
369 }
370
371 oldslot = UAO_SWHASH_ELT_PAGESLOT(elt, pageidx);
372 UAO_SWHASH_ELT_PAGESLOT(elt, pageidx) = slot;
373
374 /*
375 * now adjust the elt's reference counter and free it if we've
376 * dropped it to zero.
377 */
378
379 /* an allocation? */
380 if (slot) {
381 if (oldslot == 0)
382 elt->count++;
383 } else { /* freeing slot ... */
384 if (oldslot) /* to be safe */
385 elt->count--;
386
387 if (elt->count == 0) {
388 LIST_REMOVE(elt, list);
389 pool_put(&uao_swhash_elt_pool, elt);
390 }
391 }
392 } else {
393 /* we are using an array */
394 oldslot = aobj->u_swslots[pageidx];
395 aobj->u_swslots[pageidx] = slot;
396 }
397 return (oldslot);
398 }
399
400 /*
401 * end of hash/array functions
402 */
403
404 /*
405 * uao_free: free all resources held by an aobj, and then free the aobj
406 *
407 * => the aobj should be dead
408 */
409 static void
410 uao_free(aobj)
411 struct uvm_aobj *aobj;
412 {
413
414 if (UAO_USES_SWHASH(aobj)) {
415 int i, hashbuckets = aobj->u_swhashmask + 1;
416
417 /*
418 * free the swslots from each hash bucket,
419 * then the hash bucket, and finally the hash table itself.
420 */
421 for (i = 0; i < hashbuckets; i++) {
422 struct uao_swhash_elt *elt, *next;
423
424 for (elt = aobj->u_swhash[i].lh_first; elt != NULL;
425 elt = next) {
426 int j;
427
428 for (j = 0; j < UAO_SWHASH_CLUSTER_SIZE; j++)
429 {
430 int slot = elt->slots[j];
431
432 if (slot) {
433 uvm_swap_free(slot, 1);
434
435 /*
436 * this page is no longer
437 * only in swap.
438 */
439 uvmexp.swpguniq--;
440 }
441 }
442
443 next = elt->list.le_next;
444 pool_put(&uao_swhash_elt_pool, elt);
445 }
446 }
447 FREE(aobj->u_swhash, M_UVMAOBJ);
448 } else {
449 int i;
450
451 /*
452 * free the array
453 */
454
455 for (i = 0; i < aobj->u_pages; i++)
456 {
457 int slot = aobj->u_swslots[i];
458
459 if (slot) {
460 uvm_swap_free(slot, 1);
461
462 /* this page is no longer only in swap. */
463 uvmexp.swpguniq--;
464 }
465 }
466 FREE(aobj->u_swslots, M_UVMAOBJ);
467 }
468
469 /*
470 * finally free the aobj itself
471 */
472 pool_put(&uvm_aobj_pool, aobj);
473 }
474
475 /*
476 * pager functions
477 */
478
479 /*
480 * uao_create: create an aobj of the given size and return its uvm_object.
481 *
482 * => for normal use, flags are always zero
483 * => for the kernel object, the flags are:
484 * UAO_FLAG_KERNOBJ - allocate the kernel object (can only happen once)
485 * UAO_FLAG_KERNSWAP - enable swapping of kernel object (" ")
486 */
487 struct uvm_object *
488 uao_create(size, flags)
489 vsize_t size;
490 int flags;
491 {
492 static struct uvm_aobj kernel_object_store; /* home of kernel_object */
493 static int kobj_alloced = 0; /* not allocated yet */
494 int pages = round_page(size) >> PAGE_SHIFT;
495 struct uvm_aobj *aobj;
496
497 /*
498 * malloc a new aobj unless we are asked for the kernel object
499 */
500 if (flags & UAO_FLAG_KERNOBJ) { /* want kernel object? */
501 if (kobj_alloced)
502 panic("uao_create: kernel object already allocated");
503
504 /*
505 * XXXTHORPEJ: Need to call this now, so the pool gets
506 * initialized!
507 */
508 uao_init();
509
510 aobj = &kernel_object_store;
511 aobj->u_pages = pages;
512 aobj->u_flags = UAO_FLAG_NOSWAP; /* no swap to start */
513 /* we are special, we never die */
514 aobj->u_obj.uo_refs = UVM_OBJ_KERN;
515 kobj_alloced = UAO_FLAG_KERNOBJ;
516 } else if (flags & UAO_FLAG_KERNSWAP) {
517 aobj = &kernel_object_store;
518 if (kobj_alloced != UAO_FLAG_KERNOBJ)
519 panic("uao_create: asked to enable swap on kernel object");
520 kobj_alloced = UAO_FLAG_KERNSWAP;
521 } else { /* normal object */
522 aobj = pool_get(&uvm_aobj_pool, PR_WAITOK);
523 aobj->u_pages = pages;
524 aobj->u_flags = 0; /* normal object */
525 aobj->u_obj.uo_refs = 1; /* start with 1 reference */
526 }
527
528 /*
529 * allocate hash/array if necessary
530 *
531 * note: in the KERNSWAP case no need to worry about locking since
532 * we are still booting we should be the only thread around.
533 */
534 if (flags == 0 || (flags & UAO_FLAG_KERNSWAP) != 0) {
535 int mflags = (flags & UAO_FLAG_KERNSWAP) != 0 ?
536 M_NOWAIT : M_WAITOK;
537
538 /* allocate hash table or array depending on object size */
539 if (UAO_USES_SWHASH(aobj)) {
540 aobj->u_swhash = hashinit(UAO_SWHASH_BUCKETS(aobj),
541 M_UVMAOBJ, mflags, &aobj->u_swhashmask);
542 if (aobj->u_swhash == NULL)
543 panic("uao_create: hashinit swhash failed");
544 } else {
545 MALLOC(aobj->u_swslots, int *, pages * sizeof(int),
546 M_UVMAOBJ, mflags);
547 if (aobj->u_swslots == NULL)
548 panic("uao_create: malloc swslots failed");
549 memset(aobj->u_swslots, 0, pages * sizeof(int));
550 }
551
552 if (flags) {
553 aobj->u_flags &= ~UAO_FLAG_NOSWAP; /* clear noswap */
554 return(&aobj->u_obj);
555 /* done! */
556 }
557 }
558
559 /*
560 * init aobj fields
561 */
562 simple_lock_init(&aobj->u_obj.vmobjlock);
563 aobj->u_obj.pgops = &aobj_pager;
564 TAILQ_INIT(&aobj->u_obj.memq);
565 aobj->u_obj.uo_npages = 0;
566
567 /*
568 * now that aobj is ready, add it to the global list
569 */
570 simple_lock(&uao_list_lock);
571 LIST_INSERT_HEAD(&uao_list, aobj, u_list);
572 simple_unlock(&uao_list_lock);
573
574 /*
575 * done!
576 */
577 return(&aobj->u_obj);
578 }
579
580
581
582 /*
583 * uao_init: set up aobj pager subsystem
584 *
585 * => called at boot time from uvm_pager_init()
586 */
587 void
588 uao_init()
589 {
590 static int uao_initialized;
591
592 if (uao_initialized)
593 return;
594 uao_initialized = TRUE;
595
596 LIST_INIT(&uao_list);
597 simple_lock_init(&uao_list_lock);
598
599 /*
600 * NOTE: Pages fror this pool must not come from a pageable
601 * kernel map!
602 */
603 pool_init(&uao_swhash_elt_pool, sizeof(struct uao_swhash_elt),
604 0, 0, 0, "uaoeltpl", 0, NULL, NULL, M_UVMAOBJ);
605
606 pool_init(&uvm_aobj_pool, sizeof(struct uvm_aobj), 0, 0, 0,
607 "aobjpl", 0,
608 pool_page_alloc_nointr, pool_page_free_nointr, M_UVMAOBJ);
609 }
610
611 /*
612 * uao_reference: add a ref to an aobj
613 *
614 * => aobj must be unlocked (we will lock it)
615 * just lock and call the locked version
616 */
617 void
618 uao_reference(uobj)
619 struct uvm_object *uobj;
620 {
621 simple_lock(&uobj->vmobjlock);
622 uao_reference_locked(uobj);
623 simple_unlock(&uobj->vmobjlock);
624 }
625
626 /*
627 * uao_reference_locked: add a ref to an aobj that is already locked
628 *
629 * => aobj must be locked
630 */
631 void
632 uao_reference_locked(uobj)
633 struct uvm_object *uobj;
634 {
635 UVMHIST_FUNC("uao_reference"); UVMHIST_CALLED(maphist);
636
637 /*
638 * kernel_object already has plenty of references, leave it alone.
639 */
640
641 if (uobj->uo_refs == UVM_OBJ_KERN)
642 return;
643
644 uobj->uo_refs++; /* bump! */
645 UVMHIST_LOG(maphist, "<- done (uobj=0x%x, ref = %d)",
646 uobj, uobj->uo_refs,0,0);
647 }
648
649
650 /*
651 * uao_detach: drop a reference to an aobj
652 *
653 * => aobj must be unlocked
654 */
655 void
656 uao_detach(uobj)
657 struct uvm_object *uobj;
658 {
659 simple_lock(&uobj->vmobjlock);
660 uao_detach_locked(uobj);
661 }
662
663
664 /*
665 * uao_detach_locked: drop a reference to an aobj
666 *
667 * => aobj must be locked, and is unlocked (or freed) upon return.
668 */
669 void
670 uao_detach_locked(uobj)
671 struct uvm_object *uobj;
672 {
673 struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
674 struct vm_page *pg;
675 boolean_t busybody;
676 UVMHIST_FUNC("uao_detach"); UVMHIST_CALLED(maphist);
677
678 /*
679 * detaching from kernel_object is a noop.
680 */
681 if (uobj->uo_refs == UVM_OBJ_KERN) {
682 simple_unlock(&uobj->vmobjlock);
683 return;
684 }
685
686 UVMHIST_LOG(maphist," (uobj=0x%x) ref=%d", uobj,uobj->uo_refs,0,0);
687 uobj->uo_refs--; /* drop ref! */
688 if (uobj->uo_refs) { /* still more refs? */
689 simple_unlock(&uobj->vmobjlock);
690 UVMHIST_LOG(maphist, "<- done (rc>0)", 0,0,0,0);
691 return;
692 }
693
694 /*
695 * remove the aobj from the global list.
696 */
697 simple_lock(&uao_list_lock);
698 LIST_REMOVE(aobj, u_list);
699 simple_unlock(&uao_list_lock);
700
701 /*
702 * free all the pages that aren't PG_BUSY,
703 * mark for release any that are.
704 */
705 busybody = FALSE;
706 for (pg = uobj->memq.tqh_first ; pg != NULL ; pg = pg->listq.tqe_next) {
707 int swslot;
708
709 if (pg->flags & PG_BUSY) {
710 pg->flags |= PG_RELEASED;
711 busybody = TRUE;
712 continue;
713 }
714
715
716 /* zap the mappings, free the swap slot, free the page */
717 pmap_page_protect(PMAP_PGARG(pg), VM_PROT_NONE);
718
719 swslot = uao_set_swslot(&aobj->u_obj,
720 pg->offset >> PAGE_SHIFT, 0);
721 if (swslot) {
722 uvm_swap_free(swslot, 1);
723 }
724
725 uvm_lock_pageq();
726 uvm_pagefree(pg);
727 uvm_unlock_pageq();
728 }
729
730 /*
731 * if we found any busy pages, we're done for now.
732 * mark the aobj for death, releasepg will finish up for us.
733 */
734 if (busybody) {
735 aobj->u_flags |= UAO_FLAG_KILLME;
736 simple_unlock(&aobj->u_obj.vmobjlock);
737 return;
738 }
739
740 /*
741 * finally, free the rest.
742 */
743 uao_free(aobj);
744 }
745
746 /*
747 * uao_flush: uh, yea, sure it's flushed. really!
748 */
749 boolean_t
750 uao_flush(uobj, start, end, flags)
751 struct uvm_object *uobj;
752 vaddr_t start, end;
753 int flags;
754 {
755
756 /*
757 * anonymous memory doesn't "flush"
758 */
759 /*
760 * XXX
761 * deal with PGO_DEACTIVATE (for madvise(MADV_SEQUENTIAL))
762 * and PGO_FREE (for msync(MSINVALIDATE))
763 */
764 return TRUE;
765 }
766
767 /*
768 * uao_get: fetch me a page
769 *
770 * we have three cases:
771 * 1: page is resident -> just return the page.
772 * 2: page is zero-fill -> allocate a new page and zero it.
773 * 3: page is swapped out -> fetch the page from swap.
774 *
775 * cases 1 and 2 can be handled with PGO_LOCKED, case 3 cannot.
776 * so, if the "center" page hits case 3 (or any page, with PGO_ALLPAGES),
777 * then we will need to return VM_PAGER_UNLOCK.
778 *
779 * => prefer map unlocked (not required)
780 * => object must be locked! we will _unlock_ it before starting any I/O.
781 * => flags: PGO_ALLPAGES: get all of the pages
782 * PGO_LOCKED: fault data structures are locked
783 * => NOTE: offset is the offset of pps[0], _NOT_ pps[centeridx]
784 * => NOTE: caller must check for released pages!!
785 */
786 static int
787 uao_get(uobj, offset, pps, npagesp, centeridx, access_type, advice, flags)
788 struct uvm_object *uobj;
789 vaddr_t offset;
790 struct vm_page **pps;
791 int *npagesp;
792 int centeridx, advice, flags;
793 vm_prot_t access_type;
794 {
795 struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
796 vaddr_t current_offset;
797 vm_page_t ptmp;
798 int lcv, gotpages, maxpages, swslot, rv;
799 boolean_t done;
800 UVMHIST_FUNC("uao_get"); UVMHIST_CALLED(pdhist);
801
802 UVMHIST_LOG(pdhist, "aobj=%p offset=%d, flags=%d",
803 aobj, offset, flags,0);
804
805 /*
806 * get number of pages
807 */
808 maxpages = *npagesp;
809
810 /*
811 * step 1: handled the case where fault data structures are locked.
812 */
813
814 if (flags & PGO_LOCKED) {
815 /*
816 * step 1a: get pages that are already resident. only do
817 * this if the data structures are locked (i.e. the first
818 * time through).
819 */
820
821 done = TRUE; /* be optimistic */
822 gotpages = 0; /* # of pages we got so far */
823
824 for (lcv = 0, current_offset = offset ; lcv < maxpages ;
825 lcv++, current_offset += PAGE_SIZE) {
826 /* do we care about this page? if not, skip it */
827 if (pps[lcv] == PGO_DONTCARE)
828 continue;
829
830 ptmp = uvm_pagelookup(uobj, current_offset);
831
832 /*
833 * if page is new, attempt to allocate the page, then
834 * zero-fill it.
835 */
836 if (ptmp == NULL && uao_find_swslot(aobj,
837 current_offset >> PAGE_SHIFT) == 0) {
838 ptmp = uvm_pagealloc(uobj, current_offset,
839 NULL);
840 if (ptmp) {
841 /* new page */
842 ptmp->flags &= ~(PG_BUSY|PG_FAKE);
843 ptmp->pqflags |= PQ_AOBJ;
844 UVM_PAGE_OWN(ptmp, NULL);
845 uvm_pagezero(ptmp);
846 }
847 }
848
849 /*
850 * to be useful must get a non-busy, non-released page
851 */
852 if (ptmp == NULL ||
853 (ptmp->flags & (PG_BUSY|PG_RELEASED)) != 0) {
854 if (lcv == centeridx ||
855 (flags & PGO_ALLPAGES) != 0)
856 /* need to do a wait or I/O! */
857 done = FALSE;
858 continue;
859 }
860
861 /*
862 * useful page: busy/lock it and plug it in our
863 * result array
864 */
865 /* caller must un-busy this page */
866 ptmp->flags |= PG_BUSY;
867 UVM_PAGE_OWN(ptmp, "uao_get1");
868 pps[lcv] = ptmp;
869 gotpages++;
870
871 } /* "for" lcv loop */
872
873 /*
874 * step 1b: now we've either done everything needed or we
875 * to unlock and do some waiting or I/O.
876 */
877
878 UVMHIST_LOG(pdhist, "<- done (done=%d)", done, 0,0,0);
879
880 *npagesp = gotpages;
881 if (done)
882 /* bingo! */
883 return(VM_PAGER_OK);
884 else
885 /* EEK! Need to unlock and I/O */
886 return(VM_PAGER_UNLOCK);
887 }
888
889 /*
890 * step 2: get non-resident or busy pages.
891 * object is locked. data structures are unlocked.
892 */
893
894 for (lcv = 0, current_offset = offset ; lcv < maxpages ;
895 lcv++, current_offset += PAGE_SIZE) {
896 /*
897 * - skip over pages we've already gotten or don't want
898 * - skip over pages we don't _have_ to get
899 */
900 if (pps[lcv] != NULL ||
901 (lcv != centeridx && (flags & PGO_ALLPAGES) == 0))
902 continue;
903
904 /*
905 * we have yet to locate the current page (pps[lcv]). we
906 * first look for a page that is already at the current offset.
907 * if we find a page, we check to see if it is busy or
908 * released. if that is the case, then we sleep on the page
909 * until it is no longer busy or released and repeat the lookup.
910 * if the page we found is neither busy nor released, then we
911 * busy it (so we own it) and plug it into pps[lcv]. this
912 * 'break's the following while loop and indicates we are
913 * ready to move on to the next page in the "lcv" loop above.
914 *
915 * if we exit the while loop with pps[lcv] still set to NULL,
916 * then it means that we allocated a new busy/fake/clean page
917 * ptmp in the object and we need to do I/O to fill in the data.
918 */
919
920 /* top of "pps" while loop */
921 while (pps[lcv] == NULL) {
922 /* look for a resident page */
923 ptmp = uvm_pagelookup(uobj, current_offset);
924
925 /* not resident? allocate one now (if we can) */
926 if (ptmp == NULL) {
927
928 ptmp = uvm_pagealloc(uobj, current_offset,
929 NULL); /* alloc */
930
931 /* out of RAM? */
932 if (ptmp == NULL) {
933 simple_unlock(&uobj->vmobjlock);
934 UVMHIST_LOG(pdhist,
935 "sleeping, ptmp == NULL\n",0,0,0,0);
936 uvm_wait("uao_getpage");
937 simple_lock(&uobj->vmobjlock);
938 /* goto top of pps while loop */
939 continue;
940 }
941
942 /*
943 * safe with PQ's unlocked: because we just
944 * alloc'd the page
945 */
946 ptmp->pqflags |= PQ_AOBJ;
947
948 /*
949 * got new page ready for I/O. break pps while
950 * loop. pps[lcv] is still NULL.
951 */
952 break;
953 }
954
955 /* page is there, see if we need to wait on it */
956 if ((ptmp->flags & (PG_BUSY|PG_RELEASED)) != 0) {
957 ptmp->flags |= PG_WANTED;
958 UVMHIST_LOG(pdhist,
959 "sleeping, ptmp->flags 0x%x\n",
960 ptmp->flags,0,0,0);
961 UVM_UNLOCK_AND_WAIT(ptmp, &uobj->vmobjlock, 0,
962 "uao_get", 0);
963 simple_lock(&uobj->vmobjlock);
964 continue; /* goto top of pps while loop */
965 }
966
967 /*
968 * if we get here then the page has become resident and
969 * unbusy between steps 1 and 2. we busy it now (so we
970 * own it) and set pps[lcv] (so that we exit the while
971 * loop).
972 */
973 /* we own it, caller must un-busy */
974 ptmp->flags |= PG_BUSY;
975 UVM_PAGE_OWN(ptmp, "uao_get2");
976 pps[lcv] = ptmp;
977 }
978
979 /*
980 * if we own the valid page at the correct offset, pps[lcv] will
981 * point to it. nothing more to do except go to the next page.
982 */
983 if (pps[lcv])
984 continue; /* next lcv */
985
986 /*
987 * we have a "fake/busy/clean" page that we just allocated.
988 * do the needed "i/o", either reading from swap or zeroing.
989 */
990 swslot = uao_find_swslot(aobj, current_offset >> PAGE_SHIFT);
991
992 /*
993 * just zero the page if there's nothing in swap.
994 */
995 if (swslot == 0)
996 {
997 /*
998 * page hasn't existed before, just zero it.
999 */
1000 uvm_pagezero(ptmp);
1001 }
1002 else
1003 {
1004 UVMHIST_LOG(pdhist, "pagein from swslot %d",
1005 swslot, 0,0,0);
1006
1007 /*
1008 * page in the swapped-out page.
1009 * unlock object for i/o, relock when done.
1010 */
1011 simple_unlock(&uobj->vmobjlock);
1012 rv = uvm_swap_get(ptmp, swslot, PGO_SYNCIO);
1013 simple_lock(&uobj->vmobjlock);
1014
1015 /*
1016 * I/O done. check for errors.
1017 */
1018 if (rv != VM_PAGER_OK)
1019 {
1020 UVMHIST_LOG(pdhist, "<- done (error=%d)",
1021 rv,0,0,0);
1022 if (ptmp->flags & PG_WANTED)
1023 /* object lock still held */
1024 wakeup(ptmp);
1025 ptmp->flags &= ~(PG_WANTED|PG_BUSY);
1026 UVM_PAGE_OWN(ptmp, NULL);
1027 uvm_lock_pageq();
1028 uvm_pagefree(ptmp);
1029 uvm_unlock_pageq();
1030
1031 simple_unlock(&uobj->vmobjlock);
1032 return (rv);
1033 }
1034 }
1035
1036 /*
1037 * we got the page! clear the fake flag (indicates valid
1038 * data now in page) and plug into our result array. note
1039 * that page is still busy.
1040 *
1041 * it is the callers job to:
1042 * => check if the page is released
1043 * => unbusy the page
1044 * => activate the page
1045 */
1046
1047 ptmp->flags &= ~PG_FAKE; /* data is valid ... */
1048 pmap_clear_modify(PMAP_PGARG(ptmp)); /* ... and clean */
1049 pps[lcv] = ptmp;
1050
1051 } /* lcv loop */
1052
1053 /*
1054 * finally, unlock object and return.
1055 */
1056
1057 simple_unlock(&uobj->vmobjlock);
1058 UVMHIST_LOG(pdhist, "<- done (OK)",0,0,0,0);
1059 return(VM_PAGER_OK);
1060 }
1061
1062 /*
1063 * uao_releasepg: handle released page in an aobj
1064 *
1065 * => "pg" is a PG_BUSY [caller owns it], PG_RELEASED page that we need
1066 * to dispose of.
1067 * => caller must handle PG_WANTED case
1068 * => called with page's object locked, pageq's unlocked
1069 * => returns TRUE if page's object is still alive, FALSE if we
1070 * killed the page's object. if we return TRUE, then we
1071 * return with the object locked.
1072 * => if (nextpgp != NULL) => we return pageq.tqe_next here, and return
1073 * with the page queues locked [for pagedaemon]
1074 * => if (nextpgp == NULL) => we return with page queues unlocked [normal case]
1075 * => we kill the aobj if it is not referenced and we are suppose to
1076 * kill it ("KILLME").
1077 */
1078 static boolean_t
1079 uao_releasepg(pg, nextpgp)
1080 struct vm_page *pg;
1081 struct vm_page **nextpgp; /* OUT */
1082 {
1083 struct uvm_aobj *aobj = (struct uvm_aobj *) pg->uobject;
1084 int slot;
1085
1086 #ifdef DIAGNOSTIC
1087 if ((pg->flags & PG_RELEASED) == 0)
1088 panic("uao_releasepg: page not released!");
1089 #endif
1090
1091 /*
1092 * dispose of the page [caller handles PG_WANTED] and swap slot.
1093 */
1094 pmap_page_protect(PMAP_PGARG(pg), VM_PROT_NONE);
1095 slot = uao_set_swslot(&aobj->u_obj, pg->offset >> PAGE_SHIFT, 0);
1096 if (slot)
1097 uvm_swap_free(slot, 1);
1098
1099 uvm_lock_pageq();
1100 if (nextpgp)
1101 *nextpgp = pg->pageq.tqe_next; /* next page for daemon */
1102 uvm_pagefree(pg);
1103 if (!nextpgp)
1104 uvm_unlock_pageq(); /* keep locked for daemon */
1105
1106 /*
1107 * if we're not killing the object, we're done.
1108 */
1109 if ((aobj->u_flags & UAO_FLAG_KILLME) == 0)
1110 return TRUE;
1111
1112 #ifdef DIAGNOSTIC
1113 if (aobj->u_obj.uo_refs)
1114 panic("uvm_km_releasepg: kill flag set on referenced object!");
1115 #endif
1116
1117 /*
1118 * if there are still pages in the object, we're done for now.
1119 */
1120 if (aobj->u_obj.uo_npages != 0)
1121 return TRUE;
1122
1123 #ifdef DIAGNOSTIC
1124 if (aobj->u_obj.memq.tqh_first)
1125 panic("uvn_releasepg: pages in object with npages == 0");
1126 #endif
1127
1128 /*
1129 * finally, free the rest.
1130 */
1131 uao_free(aobj);
1132
1133 return FALSE;
1134 }
1135
1136
1137
1138 /*
1139 * page in every page in every aobj that is paged-out to a range of swslots.
1140 * returns TRUE if pagein was aborted due to lack of memory.
1141 *
1142 * nothing is locked.
1143 */
1144 boolean_t
1145 uao_swap_off(startslot, endslot)
1146 int startslot, endslot;
1147 {
1148 struct uvm_aobj *aobj, *nextaobj;
1149
1150 /*
1151 * walk the list of all aobjs
1152 */
1153
1154 restart:
1155 simple_lock(&uao_list_lock);
1156
1157 for (aobj = uao_list.lh_first;
1158 aobj != NULL;
1159 aobj = nextaobj)
1160 {
1161 int rv;
1162
1163 /*
1164 * try to get the object lock,
1165 * start all over if we fail.
1166 */
1167 if (!simple_lock_try(&aobj->u_obj.vmobjlock))
1168 {
1169 simple_unlock(&uao_list_lock);
1170 goto restart;
1171 }
1172
1173 /*
1174 * add a ref to the aobj so it doesn't disappear
1175 * while we're working.
1176 */
1177 uao_reference_locked(&aobj->u_obj);
1178
1179 /*
1180 * now it's safe to unlock the uao list.
1181 */
1182 simple_unlock(&uao_list_lock);
1183
1184 /*
1185 * page in any pages in the swslot range.
1186 * if there's an error, abort and return the error.
1187 */
1188 rv = uao_pagein(aobj, startslot, endslot);
1189 if (rv) {
1190 uao_detach_locked(&aobj->u_obj);
1191 return rv;
1192 }
1193
1194 /*
1195 * we're done with this aobj.
1196 * relock the list and drop our ref on the aobj.
1197 */
1198 simple_lock(&uao_list_lock);
1199 nextaobj = aobj->u_list.le_next;
1200 uao_detach_locked(&aobj->u_obj);
1201 }
1202
1203 /*
1204 * done with traversal, unlock the list
1205 */
1206 simple_unlock(&uao_list_lock);
1207 return FALSE;
1208 }
1209
1210
1211 /*
1212 * page in any pages from aobj in the given range.
1213 * returns TRUE if pagein was aborted due to lack of memory.
1214 *
1215 * => aobj must be locked and is returned locked.
1216 */
1217 static boolean_t
1218 uao_pagein(aobj, startslot, endslot)
1219 struct uvm_aobj *aobj;
1220 int startslot, endslot;
1221 {
1222 int rv;
1223
1224 if (UAO_USES_SWHASH(aobj)) {
1225 struct uao_swhash_elt *elt;
1226 int bucket;
1227
1228 restart:
1229 for (bucket = aobj->u_swhashmask; bucket >= 0; bucket--)
1230 for (elt = aobj->u_swhash[bucket].lh_first;
1231 elt != NULL;
1232 elt = elt->list.le_next)
1233 {
1234 int i;
1235
1236
1237 for (i = 0; i < UAO_SWHASH_CLUSTER_SIZE; i++) {
1238 int slot = elt->slots[i];
1239
1240 /*
1241 * if the slot isn't in range, skip it.
1242 */
1243 if (slot < startslot ||
1244 slot >= endslot) {
1245 continue;
1246 }
1247
1248 /*
1249 * process the page,
1250 * the start over on this object*
1251 * since the swhash elt
1252 * may have been freed.
1253 */
1254 rv = uao_pagein_page(aobj,
1255 UAO_SWHASH_ELT_PAGEIDX_BASE(elt) + i);
1256 if (rv) {
1257 return rv;
1258 }
1259 goto restart;
1260 }
1261 }
1262 }
1263 else {
1264 int i;
1265
1266 for (i = 0; i < aobj->u_pages; i++) {
1267 int slot = aobj->u_swslots[i];
1268
1269 /*
1270 * if the slot isn't in range, skip it
1271 */
1272 if (slot < startslot || slot >= endslot) {
1273 continue;
1274 }
1275
1276 /*
1277 * process the page.
1278 */
1279 rv = uao_pagein_page(aobj, i);
1280 if (rv) {
1281 return rv;
1282 }
1283 }
1284 }
1285
1286 return FALSE;
1287 }
1288
1289
1290
1291 /*
1292 * page in a page from an aobj. used for swap_off.
1293 * returns TRUE if pagein was aborted due to lack of memory.
1294 *
1295 * => aobj must be locked and is returned locked.
1296 */
1297 static boolean_t
1298 uao_pagein_page(aobj, pageidx)
1299 struct uvm_aobj *aobj;
1300 int pageidx;
1301 {
1302 struct vm_page *pg;
1303 int slot, rv;
1304 vaddr_t offset = pageidx << PAGE_SHIFT;
1305 UVMHIST_FUNC("uao_pagein_page"); UVMHIST_CALLED(pdhist);
1306
1307 /*
1308 * check if page is resident.
1309 */
1310 restart:
1311 while ((pg = uvm_pagelookup(&aobj->u_obj, offset)) != NULL) {
1312
1313 /*
1314 * page is already resident.
1315 * if the page is busy or released,
1316 * wait til it's not, then try again.
1317 */
1318
1319 if ((pg->flags & (PG_BUSY|PG_RELEASED)) != 0) {
1320 pg->flags |= PG_WANTED;
1321 UVM_UNLOCK_AND_WAIT(pg, &aobj->u_obj.vmobjlock, FALSE,
1322 "uao_pagein", 0);
1323 simple_lock(&aobj->u_obj.vmobjlock);
1324 continue;
1325 }
1326
1327 /*
1328 * page is ours.
1329 * mark it as dirty,
1330 * mark the aobj as no longer having this page in swap,
1331 * and free the swap slot.
1332 */
1333
1334 pg->flags &= ~(PG_CLEAN);
1335 slot = uao_set_swslot(&aobj->u_obj, pageidx, 0);
1336 if (slot) {
1337 uvm_swap_free(slot, 1);
1338 }
1339
1340 return FALSE;
1341 }
1342
1343 /*
1344 * page is not resident.
1345 * try to allocate a page, start over if we fail.
1346 */
1347 while ((pg = uvm_pagealloc(&aobj->u_obj, offset, NULL)) == NULL) {
1348 boolean_t nomem;
1349
1350 /*
1351 * if we are out of swap here, then we have to fail.
1352 */
1353 /* XXX what locks these? */
1354 nomem = uvmexp.swpages <= uvmexp.swpginuse;
1355
1356 if (nomem) {
1357 simple_unlock(&aobj->u_obj.vmobjlock);
1358 return TRUE;
1359 }
1360
1361 /*
1362 * otherwise wait for free pages.
1363 */
1364 simple_unlock(&aobj->u_obj.vmobjlock);
1365 uvm_wait("uao_pagein_page");
1366 simple_lock(&aobj->u_obj.vmobjlock);
1367 goto restart;
1368 }
1369
1370 pg->pqflags |= PQ_AOBJ;
1371
1372 /*
1373 * fetch the page from swap.
1374 * everything is unlocked.
1375 */
1376 slot = uao_find_swslot(aobj, pageidx);
1377 simple_unlock(&aobj->u_obj.vmobjlock);
1378 rv = uvm_swap_get(pg, slot, PGO_SYNCIO);
1379
1380 switch (rv)
1381 {
1382 case VM_PAGER_OK:
1383 break;
1384
1385 case VM_PAGER_AGAIN:
1386 /*
1387 * sleep a bit, then try again.
1388 */
1389 tsleep(&lbolt, PVM, "uao_pagein", 0);
1390 simple_lock(&aobj->u_obj.vmobjlock);
1391 goto restart;
1392 default:
1393 panic("uao_pagein_page: uvm_swap_get -> %d\n", rv);
1394 }
1395
1396 /*
1397 * relock to finish up.
1398 */
1399 simple_lock(&aobj->u_obj.vmobjlock);
1400
1401 /*
1402 * handle wanted pages
1403 */
1404 if (pg->flags & PG_WANTED) {
1405 wakeup(pg);
1406 }
1407
1408 #ifdef DIAGNOSTIC
1409 /*
1410 * this should never happen, since we have a reference on the aobj.
1411 */
1412 if (pg->flags & PG_RELEASED) {
1413 panic("uao_pagein_page: found PG_RELEASED page?\n");
1414 }
1415 #endif
1416
1417 /*
1418 * ok, we've got the page now.
1419 * mark it as dirty, clear its swslot and un-busy it.
1420 */
1421 slot = uao_set_swslot(&aobj->u_obj, pageidx, 0);
1422 if (slot) {
1423 uvm_swap_free(slot, 1);
1424 }
1425 pmap_page_protect(PMAP_PGARG(pg), VM_PROT_NONE);
1426 pmap_clear_reference(PMAP_PGARG(pg));
1427 pg->flags &= ~(PG_BUSY|PG_CLEAN|PG_FAKE);
1428 UVM_PAGE_OWN(pg, NULL);
1429
1430 /*
1431 * deactivate the page (to put it on a page queue).
1432 */
1433 uvm_lock_pageq();
1434 uvm_pagedeactivate(pg);
1435 uvm_unlock_pageq();
1436
1437 return FALSE;
1438 }
1439