uvm_aobj.c revision 1.5 1 /* $NetBSD: uvm_aobj.c,v 1.5 1998/02/09 14:35:48 mrg Exp $ */
2
3 /* copyright here */
4 /*
5 * from: Id: uvm_aobj.c,v 1.1.2.5 1998/02/06 05:14:38 chs Exp
6 */
7
8 #include <sys/param.h>
9 #include <sys/systm.h>
10 #include <sys/proc.h>
11 #include <sys/malloc.h>
12
13 #include <vm/vm.h>
14 #include <vm/vm_page.h>
15 #include <vm/vm_kern.h>
16
17 #include <uvm/uvm.h>
18
19 /*
20 * uvm_aobj.c: anonymous-memory backed uvm_object
21 */
22
23 /*
24 * an aobj manages anonymous-memory backed uvm_objects. in addition
25 * to keeping the list of resident pages, it also keeps a list of
26 * allocated swap blocks. depending on the size of the aobj this list
27 * of allocated swap blocks is either stored in an array (small objects)
28 * or in a hash table (large objects).
29 */
30
31 /*
32 * local structures
33 */
34
35 /*
36 * for hash tables, we break the address space of the aobj into blocks
37 * of UAO_SWHASH_CLUSTER_SIZE pages. we require the cluster size to
38 * be a power of two.
39 */
40
41 #define UAO_SWHASH_CLUSTER_SHIFT 4
42 #define UAO_SWHASH_CLUSTER_SIZE (1 << UAO_SWHASH_CLUSTER_SHIFT)
43
44 /* get the "tag" for this page index */
45 #define UAO_SWHASH_ELT_TAG(PAGEIDX) \
46 ((PAGEIDX) >> UAO_SWHASH_CLUSTER_SHIFT)
47
48 /* given an ELT and a page index, find the swap slot */
49 #define UAO_SWHASH_ELT_PAGESLOT(ELT, PAGEIDX) \
50 ((ELT)->slots[(PAGEIDX) & (UAO_SWHASH_CLUSTER_SIZE - 1)])
51
52 /* given an ELT, return its pageidx base */
53 #define UAO_SWHASH_ELT_PAGEIDX_BASE(ELT) \
54 ((ELT)->tag << UAO_SWHASH_CLUSTER_SHIFT)
55
56 /*
57 * the swhash hash function
58 */
59 #define UAO_SWHASH_HASH(AOBJ, PAGEIDX) \
60 (&(AOBJ)->u_swhash[(((PAGEIDX) >> UAO_SWHASH_CLUSTER_SHIFT) \
61 & (AOBJ)->u_swhashmask)])
62
63 /*
64 * the swhash threshhold determines if we will use an array or a
65 * hash table to store the list of allocated swap blocks.
66 */
67
68 #define UAO_SWHASH_THRESHOLD (UAO_SWHASH_CLUSTER_SIZE * 4)
69 #define UAO_USES_SWHASH(AOBJ) \
70 ((AOBJ)->u_pages > UAO_SWHASH_THRESHOLD) /* use hash? */
71
72 /*
73 * the number of buckets in a swhash, with an upper bound
74 */
75 #define UAO_SWHASH_MAXBUCKETS 256
76 #define UAO_SWHASH_BUCKETS(AOBJ) \
77 (min((AOBJ)->u_pages >> UAO_SWHASH_CLUSTER_SHIFT, \
78 UAO_SWHASH_MAXBUCKETS))
79
80
81 /*
82 * uao_swhash_elt: when a hash table is being used, this structure defines
83 * the format of an entry in the bucket list.
84 */
85
86 struct uao_swhash_elt {
87 LIST_ENTRY(uao_swhash_elt) list; /* the hash list */
88 vm_offset_t tag; /* our 'tag' */
89 int count; /* our number of active slots */
90 int slots[UAO_SWHASH_CLUSTER_SIZE]; /* the slots */
91 };
92
93 /*
94 * uao_swhash: the swap hash table structure
95 */
96
97 LIST_HEAD(uao_swhash, uao_swhash_elt);
98
99
100 /*
101 * uvm_aobj: the actual anon-backed uvm_object
102 *
103 * => the uvm_object is at the top of the structure, this allows
104 * (struct uvm_device *) == (struct uvm_object *)
105 * => only one of u_swslots and u_swhash is used in any given aobj
106 */
107
108 struct uvm_aobj {
109 struct uvm_object u_obj; /* has: lock, pgops, memq, #pages, #refs */
110 vm_size_t u_pages; /* number of pages in entire object */
111 int u_flags; /* the flags (see uvm_aobj.h) */
112 int *u_swslots; /* array of offset->swapslot mappings */
113 /*
114 * hashtable of offset->swapslot mappings
115 * (u_swhash is an array of bucket heads)
116 */
117 struct uao_swhash *u_swhash;
118 u_long u_swhashmask; /* mask for hashtable */
119 LIST_ENTRY(uvm_aobj) u_list; /* global list of aobjs */
120 };
121
122 /*
123 * local functions
124 */
125
126 static void uao_init __P((void));
127 static struct uao_swhash_elt *uao_find_swhash_elt __P((struct uvm_aobj *,
128 int, boolean_t));
129 static int uao_find_swslot __P((struct uvm_aobj *,
130 vm_offset_t));
131 static boolean_t uao_flush __P((struct uvm_object *,
132 vm_offset_t, vm_offset_t,
133 int));
134 static void uao_free __P((struct uvm_aobj *));
135 static int uao_get __P((struct uvm_object *, vm_offset_t,
136 vm_page_t *, int *, int,
137 vm_prot_t, int, int));
138 static boolean_t uao_releasepg __P((struct vm_page *,
139 struct vm_page **));
140
141
142
143 /*
144 * aobj_pager
145 *
146 * note that some functions (e.g. put) are handled elsewhere
147 */
148
149 struct uvm_pagerops aobj_pager = {
150 uao_init, /* init */
151 NULL, /* attach */
152 uao_reference, /* reference */
153 uao_detach, /* detach */
154 NULL, /* fault */
155 uao_flush, /* flush */
156 uao_get, /* get */
157 NULL, /* asyncget */
158 NULL, /* put (done by pagedaemon) */
159 NULL, /* cluster */
160 NULL, /* mk_pcluster */
161 uvm_shareprot, /* shareprot */
162 NULL, /* aiodone */
163 uao_releasepg /* releasepg */
164 };
165
166 /*
167 * uao_list: global list of active aobjs, locked by uao_list_lock
168 */
169
170 static LIST_HEAD(aobjlist, uvm_aobj) uao_list;
171 #if NCPU > 1
172 static simple_lock_data_t uao_list_lock;
173 #endif
174
175
176 /*
177 * functions
178 */
179
180 /*
181 * hash table/array related functions
182 */
183
184 /*
185 * uao_find_swhash_elt: find (or create) a hash table entry for a page
186 * offset.
187 *
188 * => the object should be locked by the caller
189 */
190
191 static struct uao_swhash_elt *
192 uao_find_swhash_elt(aobj, pageidx, create)
193 struct uvm_aobj *aobj;
194 int pageidx;
195 boolean_t create;
196 {
197 struct uao_swhash *swhash;
198 struct uao_swhash_elt *elt;
199 int page_tag;
200
201 swhash = UAO_SWHASH_HASH(aobj, pageidx); /* first hash to get bucket */
202 page_tag = UAO_SWHASH_ELT_TAG(pageidx); /* tag to search for */
203
204 /*
205 * now search the bucket for the requested tag
206 */
207 for (elt = swhash->lh_first; elt != NULL; elt = elt->list.le_next) {
208 if (elt->tag == page_tag)
209 return(elt);
210 }
211
212 /* fail now if we are not allowed to create a new entry in the bucket */
213 if (!create)
214 return NULL;
215
216
217 /*
218 * malloc a new entry for the bucket and init/insert it in
219 */
220 MALLOC(elt, struct uao_swhash_elt *, sizeof(*elt), M_UVMAOBJ, M_WAITOK);
221 LIST_INSERT_HEAD(swhash, elt, list);
222 elt->tag = page_tag;
223 elt->count = 0;
224 bzero(elt->slots, sizeof(elt->slots));
225
226 return(elt);
227 }
228
229 /*
230 * uao_find_swslot: find the swap slot number for an aobj/pageidx
231 *
232 * => object must be locked by caller
233 */
234 __inline static int
235 uao_find_swslot(aobj, pageidx)
236 struct uvm_aobj *aobj;
237 vm_offset_t pageidx;
238 {
239
240 /*
241 * if noswap flag is set, then we never return a slot
242 */
243
244 if (aobj->u_flags & UAO_FLAG_NOSWAP)
245 return(0);
246
247 /*
248 * if hashing, look in hash table.
249 */
250
251 if (UAO_USES_SWHASH(aobj)) {
252 struct uao_swhash_elt *elt =
253 uao_find_swhash_elt(aobj, pageidx, FALSE);
254
255 if (elt)
256 return(UAO_SWHASH_ELT_PAGESLOT(elt, pageidx));
257 else
258 return(NULL);
259 }
260
261 /*
262 * otherwise, look in the array
263 */
264 return(aobj->u_swslots[pageidx]);
265 }
266
267 /*
268 * uao_set_swslot: set the swap slot for a page in an aobj.
269 *
270 * => setting a slot to zero frees the slot
271 * => object must be locked by caller
272 */
273 int
274 uao_set_swslot(uobj, pageidx, slot)
275 struct uvm_object *uobj;
276 int pageidx, slot;
277 {
278 struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
279 int oldslot;
280 UVMHIST_FUNC("uao_set_swslot"); UVMHIST_CALLED(pdhist);
281 UVMHIST_LOG(pdhist, "aobj %p pageidx %d slot %d",
282 aobj, pageidx, slot, 0);
283
284 /*
285 * if noswap flag is set, then we can't set a slot
286 */
287
288 if (aobj->u_flags & UAO_FLAG_NOSWAP) {
289
290 if (slot == 0)
291 return(0); /* a clear is ok */
292
293 /* but a set is not */
294 printf("uao_set_swslot: uobj = %p\n", uobj);
295 panic("uao_set_swslot: attempt to set a slot on a NOSWAP object");
296 }
297
298 /*
299 * are we using a hash table? if so, add it in the hash.
300 */
301
302 if (UAO_USES_SWHASH(aobj)) {
303 struct uao_swhash_elt *elt =
304 uao_find_swhash_elt(aobj, pageidx, TRUE);
305
306 oldslot = UAO_SWHASH_ELT_PAGESLOT(elt, pageidx);
307 UAO_SWHASH_ELT_PAGESLOT(elt, pageidx) = slot;
308
309 /*
310 * now adjust the elt's reference counter and free it if we've
311 * dropped it to zero.
312 */
313
314 /* an allocation? */
315 if (slot) {
316 if (oldslot == 0)
317 elt->count++;
318 } else { /* freeing slot ... */
319 if (oldslot) /* to be safe */
320 elt->count--;
321
322 if (elt->count == 0) {
323 LIST_REMOVE(elt, list);
324 FREE(elt, M_UVMAOBJ);
325 }
326 }
327
328 } else {
329 /* we are using an array */
330 oldslot = aobj->u_swslots[pageidx];
331 aobj->u_swslots[pageidx] = slot;
332 }
333 return (oldslot);
334 }
335
336 /*
337 * end of hash/array functions
338 */
339
340 /*
341 * uao_free: free all resources held by an aobj, and then free the aobj
342 *
343 * => the aobj should be dead
344 */
345 static void
346 uao_free(aobj)
347 struct uvm_aobj *aobj;
348 {
349
350 if (UAO_USES_SWHASH(aobj)) {
351 int i, hashbuckets = aobj->u_swhashmask + 1;
352
353 /*
354 * free the swslots from each hash bucket,
355 * then the hash bucket, and finally the hash table itself.
356 */
357 for (i = 0; i < hashbuckets; i++) {
358 struct uao_swhash_elt *elt, *next;
359
360 for (elt = aobj->u_swhash[i].lh_first; elt != NULL;
361 elt = next) {
362 int j;
363
364 for (j = 0; j < UAO_SWHASH_CLUSTER_SIZE; j++)
365 {
366 int slot = elt->slots[j];
367
368 if (slot)
369 uvm_swap_free(slot, 1);
370 }
371
372 next = elt->list.le_next;
373 FREE(elt, M_UVMAOBJ);
374 }
375 }
376 FREE(aobj->u_swhash, M_UVMAOBJ);
377 } else {
378 int i;
379
380 /*
381 * free the array
382 */
383
384 for (i = 0; i < aobj->u_pages; i++)
385 {
386 int slot = aobj->u_swslots[i];
387
388 if (slot)
389 uvm_swap_free(slot, 1);
390 }
391 FREE(aobj->u_swslots, M_UVMAOBJ);
392 }
393
394 /*
395 * finally free the aobj itself
396 */
397 FREE(aobj, M_UVMAOBJ);
398 }
399
400 /*
401 * pager functions
402 */
403
404 /*
405 * uao_create: create an aobj of the given size and return its uvm_object.
406 *
407 * => for normal use, flags are always zero
408 * => for the kernel object, the flags are:
409 * UAO_FLAG_KERNOBJ - allocate the kernel object (can only happen once)
410 * UAO_FLAG_KERNSWAP - enable swapping of kernel object (" ")
411 */
412 struct uvm_object *
413 uao_create(size, flags)
414 vm_size_t size;
415 int flags;
416 {
417 static struct uvm_aobj kernel_object_store; /* home of kernel_object */
418 static int kobj_alloced = 0; /* not allocated yet */
419 int pages = round_page(size) / PAGE_SIZE;
420 struct uvm_aobj *aobj;
421
422 /*
423 * malloc a new aobj unless we are asked for the kernel object
424 */
425 if (flags & UAO_FLAG_KERNOBJ) { /* want kernel object? */
426 if (kobj_alloced)
427 panic("uao_create: kernel object already allocated");
428
429 aobj = &kernel_object_store;
430 aobj->u_pages = pages;
431 aobj->u_flags = UAO_FLAG_NOSWAP; /* no swap to start */
432 /* we are special, we never die */
433 aobj->u_obj.uo_refs = UVM_OBJ_KERN;
434 kobj_alloced = UAO_FLAG_KERNOBJ;
435 } else if (flags & UAO_FLAG_KERNSWAP) {
436 aobj = &kernel_object_store;
437 if (kobj_alloced != UAO_FLAG_KERNOBJ)
438 panic("uao_create: asked to enable swap on kernel object");
439 kobj_alloced = UAO_FLAG_KERNSWAP;
440 } else { /* normal object */
441 MALLOC(aobj, struct uvm_aobj *, sizeof(*aobj), M_UVMAOBJ,
442 M_WAITOK);
443 aobj->u_pages = pages;
444 aobj->u_flags = 0; /* normal object */
445 aobj->u_obj.uo_refs = 1; /* start with 1 reference */
446 }
447
448 /*
449 * allocate hash/array if necessary
450 *
451 * note: in the KERNSWAP case no need to worry about locking since
452 * we are still booting we should be the only thread around.
453 */
454 if (flags == 0 || (flags & UAO_FLAG_KERNSWAP) != 0) {
455 int mflags = (flags & UAO_FLAG_KERNSWAP) != 0 ?
456 M_NOWAIT : M_WAITOK;
457
458 /* allocate hash table or array depending on object size */
459 if (UAO_USES_SWHASH(aobj)) {
460 aobj->u_swhash = hashinit(UAO_SWHASH_BUCKETS(aobj),
461 M_UVMAOBJ, mflags, &aobj->u_swhashmask);
462 if (aobj->u_swhash == NULL)
463 panic("uao_create: hashinit swhash failed");
464 } else {
465 MALLOC(aobj->u_swslots, int *, pages * sizeof(int),
466 M_UVMAOBJ, mflags);
467 if (aobj->u_swslots == NULL)
468 panic("uao_create: malloc swslots failed");
469 bzero(aobj->u_swslots, pages * sizeof(int));
470 }
471
472 if (flags) {
473 aobj->u_flags &= ~UAO_FLAG_NOSWAP; /* clear noswap */
474 return(&aobj->u_obj);
475 /* done! */
476 }
477 }
478
479 /*
480 * init aobj fields
481 */
482 simple_lock_init(&aobj->u_obj.vmobjlock);
483 aobj->u_obj.pgops = &aobj_pager;
484 TAILQ_INIT(&aobj->u_obj.memq);
485 aobj->u_obj.uo_npages = 0;
486
487 /*
488 * now that aobj is ready, add it to the global list
489 * XXXCHS: uao_init hasn't been called'd in the KERNOBJ case,
490 * do we really need the kernel object on this list anyway?
491 */
492 simple_lock(&uao_list_lock);
493 LIST_INSERT_HEAD(&uao_list, aobj, u_list);
494 simple_unlock(&uao_list_lock);
495
496 /*
497 * done!
498 */
499 return(&aobj->u_obj);
500 }
501
502
503
504 /*
505 * uao_init: set up aobj pager subsystem
506 *
507 * => called at boot time from uvm_pager_init()
508 */
509 static void
510 uao_init()
511 {
512
513 LIST_INIT(&uao_list);
514 simple_lock_init(&uao_list_lock);
515 }
516
517 /*
518 * uao_reference: add a ref to an aobj
519 *
520 * => aobj must be unlocked (we will lock it)
521 */
522 void
523 uao_reference(uobj)
524 struct uvm_object *uobj;
525 {
526 UVMHIST_FUNC("uao_reference"); UVMHIST_CALLED(maphist);
527
528 /*
529 * kernel_object already has plenty of references, leave it alone.
530 */
531
532 if (uobj->uo_refs == UVM_OBJ_KERN)
533 return;
534
535 simple_lock(&uobj->vmobjlock);
536 uobj->uo_refs++; /* bump! */
537 UVMHIST_LOG(maphist, "<- done (uobj=0x%x, ref = %d)",
538 uobj, uobj->uo_refs,0,0);
539 simple_unlock(&uobj->vmobjlock);
540 }
541
542 /*
543 * uao_detach: drop a reference to an aobj
544 *
545 * => aobj must be unlocked, we will lock it
546 */
547 void
548 uao_detach(uobj)
549 struct uvm_object *uobj;
550 {
551 struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
552 struct vm_page *pg;
553 boolean_t busybody;
554 UVMHIST_FUNC("uao_detach"); UVMHIST_CALLED(maphist);
555
556 /*
557 * detaching from kernel_object is a noop.
558 */
559 if (uobj->uo_refs == UVM_OBJ_KERN)
560 return;
561
562 simple_lock(&uobj->vmobjlock);
563
564 UVMHIST_LOG(maphist," (uobj=0x%x) ref=%d", uobj,uobj->uo_refs,0,0);
565 uobj->uo_refs--; /* drop ref! */
566 if (uobj->uo_refs) { /* still more refs? */
567 simple_unlock(&uobj->vmobjlock);
568 UVMHIST_LOG(maphist, "<- done (rc>0)", 0,0,0,0);
569 return;
570 }
571
572 /*
573 * remove the aobj from the global list.
574 */
575 simple_lock(&uao_list_lock);
576 LIST_REMOVE(aobj, u_list);
577 simple_unlock(&uao_list_lock);
578
579 /*
580 * free all the pages that aren't PG_BUSY, mark for release any that are.
581 */
582
583 busybody = FALSE;
584 for (pg = uobj->memq.tqh_first ; pg != NULL ; pg = pg->listq.tqe_next) {
585 int swslot;
586
587 if (pg->flags & PG_BUSY) {
588 pg->flags |= PG_RELEASED;
589 busybody = TRUE;
590 continue;
591 }
592
593
594 /* zap the mappings, free the swap slot, free the page */
595 pmap_page_protect(PMAP_PGARG(pg), VM_PROT_NONE);
596
597 swslot = uao_set_swslot(&aobj->u_obj, pg->offset / PAGE_SIZE, 0);
598 if (swslot) {
599 uvm_swap_free(swslot, 1);
600 }
601
602 uvm_lock_pageq();
603 uvm_pagefree(pg);
604 uvm_unlock_pageq();
605 }
606
607 /*
608 * if we found any busy pages, we're done for now.
609 * mark the aobj for death, releasepg will finish up for us.
610 */
611 if (busybody) {
612 aobj->u_flags |= UAO_FLAG_KILLME;
613 simple_unlock(&aobj->u_obj.vmobjlock);
614 return;
615 }
616
617 /*
618 * finally, free the rest.
619 */
620 uao_free(aobj);
621 }
622
623 /*
624 * uao_flush: uh, yea, sure it's flushed. really!
625 */
626 boolean_t
627 uao_flush(uobj, start, end, flags)
628 struct uvm_object *uobj;
629 vm_offset_t start, end;
630 int flags;
631 {
632
633 /*
634 * anonymous memory doesn't "flush"
635 */
636 /*
637 * XXX
638 * deal with PGO_DEACTIVATE (for madvise(MADV_SEQUENTIAL))
639 * and PGO_FREE (for msync(MSINVALIDATE))
640 */
641 return TRUE;
642 }
643
644 /*
645 * uao_get: fetch me a page
646 *
647 * we have three cases:
648 * 1: page is resident -> just return the page.
649 * 2: page is zero-fill -> allocate a new page and zero it.
650 * 3: page is swapped out -> fetch the page from swap.
651 *
652 * cases 1 and 2 can be handled with PGO_LOCKED, case 3 cannot.
653 * so, if the "center" page hits case 3 (or any page, with PGO_ALLPAGES),
654 * then we will need to return VM_PAGER_UNLOCK.
655 *
656 * => prefer map unlocked (not required)
657 * => object must be locked! we will _unlock_ it before starting any I/O.
658 * => flags: PGO_ALLPAGES: get all of the pages
659 * PGO_LOCKED: fault data structures are locked
660 * => NOTE: offset is the offset of pps[0], _NOT_ pps[centeridx]
661 * => NOTE: caller must check for released pages!!
662 */
663 static int
664 uao_get(uobj, offset, pps, npagesp, centeridx, access_type, advice, flags)
665 struct uvm_object *uobj;
666 vm_offset_t offset;
667 struct vm_page **pps;
668 int *npagesp;
669 int centeridx, advice, flags;
670 vm_prot_t access_type;
671 {
672 struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
673 vm_offset_t current_offset;
674 vm_page_t ptmp;
675 int lcv, gotpages, maxpages, swslot, rv;
676 boolean_t done;
677 UVMHIST_FUNC("uao_get"); UVMHIST_CALLED(pdhist);
678
679 UVMHIST_LOG(pdhist, "aobj=%p offset=%d, flags=%d", aobj, offset, flags,0);
680
681 /*
682 * get number of pages
683 */
684
685 maxpages = *npagesp;
686
687 /*
688 * step 1: handled the case where fault data structures are locked.
689 */
690
691 if (flags & PGO_LOCKED) {
692
693 /*
694 * step 1a: get pages that are already resident. only do
695 * this if the data structures are locked (i.e. the first
696 * time through).
697 */
698
699 done = TRUE; /* be optimistic */
700 gotpages = 0; /* # of pages we got so far */
701
702 for (lcv = 0, current_offset = offset ; lcv < maxpages ;
703 lcv++, current_offset += PAGE_SIZE) {
704 /* do we care about this page? if not, skip it */
705 if (pps[lcv] == PGO_DONTCARE)
706 continue;
707
708 ptmp = uvm_pagelookup(uobj, current_offset);
709
710 /*
711 * if page is new, attempt to allocate the page, then
712 * zero-fill it.
713 */
714 if (ptmp == NULL && uao_find_swslot(aobj,
715 current_offset / PAGE_SIZE) == 0) {
716 ptmp = uvm_pagealloc(uobj, current_offset,
717 NULL);
718 if (ptmp) {
719 /* new page */
720 ptmp->flags &= ~(PG_BUSY|PG_FAKE);
721 ptmp->pqflags |= PQ_AOBJ;
722 UVM_PAGE_OWN(ptmp, NULL);
723 uvm_pagezero(ptmp);
724 }
725 }
726
727 /*
728 * to be useful must get a non-busy, non-released page
729 */
730 if (ptmp == NULL ||
731 (ptmp->flags & (PG_BUSY|PG_RELEASED)) != 0) {
732 if (lcv == centeridx ||
733 (flags & PGO_ALLPAGES) != 0)
734 /* need to do a wait or I/O! */
735 done = FALSE;
736 continue;
737 }
738
739 /*
740 * useful page: busy/lock it and plug it in our
741 * result array
742 */
743 /* caller must un-busy this page */
744 ptmp->flags |= PG_BUSY;
745 UVM_PAGE_OWN(ptmp, "uao_get1");
746 pps[lcv] = ptmp;
747 gotpages++;
748
749 } /* "for" lcv loop */
750
751 /*
752 * step 1b: now we've either done everything needed or we
753 * to unlock and do some waiting or I/O.
754 */
755
756 UVMHIST_LOG(pdhist, "<- done (done=%d)", done, 0,0,0);
757
758 *npagesp = gotpages;
759 if (done)
760 /* bingo! */
761 return(VM_PAGER_OK);
762 else
763 /* EEK! Need to unlock and I/O */
764 return(VM_PAGER_UNLOCK);
765 }
766
767 /*
768 * step 2: get non-resident or busy pages.
769 * object is locked. data structures are unlocked.
770 */
771
772 for (lcv = 0, current_offset = offset ; lcv < maxpages ;
773 lcv++, current_offset += PAGE_SIZE) {
774 /*
775 * - skip over pages we've already gotten or don't want
776 * - skip over pages we don't _have_ to get
777 */
778 if (pps[lcv] != NULL ||
779 (lcv != centeridx && (flags & PGO_ALLPAGES) == 0))
780 continue;
781
782 /*
783 * we have yet to locate the current page (pps[lcv]). we
784 * first look for a page that is already at the current offset.
785 * if we find a page, we check to see if it is busy or
786 * released. if that is the case, then we sleep on the page
787 * until it is no longer busy or released and repeat the lookup.
788 * if the page we found is neither busy nor released, then we
789 * busy it (so we own it) and plug it into pps[lcv]. this
790 * 'break's the following while loop and indicates we are
791 * ready to move on to the next page in the "lcv" loop above.
792 *
793 * if we exit the while loop with pps[lcv] still set to NULL,
794 * then it means that we allocated a new busy/fake/clean page
795 * ptmp in the object and we need to do I/O to fill in the data.
796 */
797
798 /* top of "pps" while loop */
799 while (pps[lcv] == NULL) {
800 /* look for a resident page */
801 ptmp = uvm_pagelookup(uobj, current_offset);
802
803 /* not resident? allocate one now (if we can) */
804 if (ptmp == NULL) {
805
806 ptmp = uvm_pagealloc(uobj, current_offset,
807 NULL); /* alloc */
808
809 /* out of RAM? */
810 if (ptmp == NULL) {
811 simple_unlock(&uobj->vmobjlock);
812 UVMHIST_LOG(pdhist,
813 "sleeping, ptmp == NULL\n",0,0,0,0);
814 uvm_wait("uao_getpage");
815 simple_lock(&uobj->vmobjlock);
816 /* goto top of pps while loop */
817 continue;
818 }
819
820 /*
821 * safe with PQ's unlocked: because we just
822 * alloc'd the page
823 */
824 ptmp->pqflags |= PQ_AOBJ;
825
826 /*
827 * got new page ready for I/O. break pps while
828 * loop. pps[lcv] is still NULL.
829 */
830 break;
831 }
832
833 /* page is there, see if we need to wait on it */
834 if ((ptmp->flags & (PG_BUSY|PG_RELEASED)) != 0) {
835 ptmp->flags |= PG_WANTED;
836 UVMHIST_LOG(pdhist,
837 "sleeping, ptmp->flags 0x%x\n",
838 ptmp->flags,0,0,0);
839 UVM_UNLOCK_AND_WAIT(ptmp, &uobj->vmobjlock, 0,
840 "uao_get", 0);
841 simple_lock(&uobj->vmobjlock);
842 continue; /* goto top of pps while loop */
843 }
844
845 /*
846 * if we get here then the page has become resident and
847 * unbusy between steps 1 and 2. we busy it now (so we
848 * own it) and set pps[lcv] (so that we exit the while
849 * loop).
850 */
851 /* we own it, caller must un-busy */
852 ptmp->flags |= PG_BUSY;
853 UVM_PAGE_OWN(ptmp, "uao_get2");
854 pps[lcv] = ptmp;
855 }
856
857 /*
858 * if we own the valid page at the correct offset, pps[lcv] will
859 * point to it. nothing more to do except go to the next page.
860 */
861 if (pps[lcv])
862 continue; /* next lcv */
863
864 /*
865 * we have a "fake/busy/clean" page that we just allocated.
866 * do the needed "i/o", either reading from swap or zeroing.
867 */
868 swslot = uao_find_swslot(aobj, current_offset / PAGE_SIZE);
869
870 /*
871 * just zero the page if there's nothing in swap.
872 */
873 if (swslot == 0)
874 {
875 /*
876 * page hasn't existed before, just zero it.
877 */
878 uvm_pagezero(ptmp);
879 }
880 else
881 {
882 UVMHIST_LOG(pdhist, "pagein from swslot %d",
883 swslot, 0,0,0);
884
885 /*
886 * page in the swapped-out page.
887 * unlock object for i/o, relock when done.
888 */
889 simple_unlock(&uobj->vmobjlock);
890 rv = uvm_swap_get(ptmp, swslot, PGO_SYNCIO);
891 simple_lock(&uobj->vmobjlock);
892
893 /*
894 * I/O done. check for errors.
895 */
896 if (rv != VM_PAGER_OK)
897 {
898 UVMHIST_LOG(pdhist, "<- done (error=%d)",
899 rv,0,0,0);
900 if (ptmp->flags & PG_WANTED)
901 /* object lock still held */
902 thread_wakeup(ptmp);
903 ptmp->flags &= ~(PG_WANTED|PG_BUSY);
904 UVM_PAGE_OWN(ptmp, NULL);
905 uvm_lock_pageq();
906 uvm_pagefree(ptmp);
907 uvm_unlock_pageq();
908 simple_unlock(&uobj->vmobjlock);
909 return (rv);
910 }
911 }
912
913 /*
914 * we got the page! clear the fake flag (indicates valid
915 * data now in page) and plug into our result array. note
916 * that page is still busy.
917 *
918 * it is the callers job to:
919 * => check if the page is released
920 * => unbusy the page
921 * => activate the page
922 */
923
924 ptmp->flags &= ~PG_FAKE; /* data is valid ... */
925 pmap_clear_modify(PMAP_PGARG(ptmp)); /* ... and clean */
926 pps[lcv] = ptmp;
927
928 } /* lcv loop */
929
930 /*
931 * finally, unlock object and return.
932 */
933
934 simple_unlock(&uobj->vmobjlock);
935 UVMHIST_LOG(pdhist, "<- done (OK)",0,0,0,0);
936 return(VM_PAGER_OK);
937 }
938
939 /*
940 * uao_releasepg: handle released page in an aobj
941 *
942 * => "pg" is a PG_BUSY [caller owns it], PG_RELEASED page that we need
943 * to dispose of.
944 * => caller must handle PG_WANTED case
945 * => called with page's object locked, pageq's unlocked
946 * => returns TRUE if page's object is still alive, FALSE if we
947 * killed the page's object. if we return TRUE, then we
948 * return with the object locked.
949 * => if (nextpgp != NULL) => we return pageq.tqe_next here, and return
950 * with the page queues locked [for pagedaemon]
951 * => if (nextpgp == NULL) => we return with page queues unlocked [normal case]
952 * => we kill the aobj if it is not referenced and we are suppose to
953 * kill it ("KILLME").
954 */
955 static boolean_t uao_releasepg(pg, nextpgp)
956 struct vm_page *pg;
957 struct vm_page **nextpgp; /* OUT */
958 {
959 struct uvm_aobj *aobj = (struct uvm_aobj *) pg->uobject;
960 int slot;
961
962 #ifdef DIAGNOSTIC
963 if ((pg->flags & PG_RELEASED) == 0)
964 panic("uao_releasepg: page not released!");
965 #endif
966
967 /*
968 * dispose of the page [caller handles PG_WANTED] and swap slot.
969 */
970 pmap_page_protect(PMAP_PGARG(pg), VM_PROT_NONE);
971 slot = uao_set_swslot(&aobj->u_obj, pg->offset / PAGE_SIZE, 0);
972 if (slot)
973 uvm_swap_free(slot, 1);
974 uvm_lock_pageq();
975 if (nextpgp)
976 *nextpgp = pg->pageq.tqe_next; /* next page for daemon */
977 uvm_pagefree(pg);
978 if (!nextpgp)
979 uvm_unlock_pageq(); /* keep locked for daemon */
980
981 /*
982 * if we're not killing the object, we're done.
983 */
984 if ((aobj->u_flags & UAO_FLAG_KILLME) == 0)
985 return TRUE;
986
987 #ifdef DIAGNOSTIC
988 if (aobj->u_obj.uo_refs)
989 panic("uvm_km_releasepg: kill flag set on referenced object!");
990 #endif
991
992 /*
993 * if there are still pages in the object, we're done for now.
994 */
995 if (aobj->u_obj.uo_npages != 0)
996 return TRUE;
997
998 #ifdef DIAGNOSTIC
999 if (aobj->u_obj.memq.tqh_first)
1000 panic("uvn_releasepg: pages in object with npages == 0");
1001 #endif
1002
1003 /*
1004 * finally, free the rest.
1005 */
1006 uao_free(aobj);
1007
1008 return FALSE;
1009 }
1010