uvm_aobj.c revision 1.151.2.2 1 /* $NetBSD: uvm_aobj.c,v 1.151.2.2 2021/04/03 22:29:03 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 1998 Chuck Silvers, Charles D. Cranor and
5 * Washington University.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * from: Id: uvm_aobj.c,v 1.1.2.5 1998/02/06 05:14:38 chs Exp
29 */
30
31 /*
32 * uvm_aobj.c: anonymous memory uvm_object pager
33 *
34 * author: Chuck Silvers <chuq (at) chuq.com>
35 * started: Jan-1998
36 *
37 * - design mostly from Chuck Cranor
38 */
39
40 #include <sys/cdefs.h>
41 __KERNEL_RCSID(0, "$NetBSD: uvm_aobj.c,v 1.151.2.2 2021/04/03 22:29:03 thorpej Exp $");
42
43 #ifdef _KERNEL_OPT
44 #include "opt_uvmhist.h"
45 #endif
46
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/kernel.h>
50 #include <sys/kmem.h>
51 #include <sys/pool.h>
52 #include <sys/atomic.h>
53
54 #include <uvm/uvm.h>
55 #include <uvm/uvm_page_array.h>
56
57 /*
58 * An anonymous UVM object (aobj) manages anonymous-memory. In addition to
59 * keeping the list of resident pages, it may also keep a list of allocated
60 * swap blocks. Depending on the size of the object, this list is either
61 * stored in an array (small objects) or in a hash table (large objects).
62 *
63 * Lock order
64 *
65 * uao_list_lock ->
66 * uvm_object::vmobjlock
67 */
68
69 /*
70 * Note: for hash tables, we break the address space of the aobj into blocks
71 * of UAO_SWHASH_CLUSTER_SIZE pages, which shall be a power of two.
72 */
73
74 #define UAO_SWHASH_CLUSTER_SHIFT 4
75 #define UAO_SWHASH_CLUSTER_SIZE (1 << UAO_SWHASH_CLUSTER_SHIFT)
76
77 /* Get the "tag" for this page index. */
78 #define UAO_SWHASH_ELT_TAG(idx) ((idx) >> UAO_SWHASH_CLUSTER_SHIFT)
79 #define UAO_SWHASH_ELT_PAGESLOT_IDX(idx) \
80 ((idx) & (UAO_SWHASH_CLUSTER_SIZE - 1))
81
82 /* Given an ELT and a page index, find the swap slot. */
83 #define UAO_SWHASH_ELT_PAGESLOT(elt, idx) \
84 ((elt)->slots[UAO_SWHASH_ELT_PAGESLOT_IDX(idx)])
85
86 /* Given an ELT, return its pageidx base. */
87 #define UAO_SWHASH_ELT_PAGEIDX_BASE(ELT) \
88 ((elt)->tag << UAO_SWHASH_CLUSTER_SHIFT)
89
90 /* The hash function. */
91 #define UAO_SWHASH_HASH(aobj, idx) \
92 (&(aobj)->u_swhash[(((idx) >> UAO_SWHASH_CLUSTER_SHIFT) \
93 & (aobj)->u_swhashmask)])
94
95 /*
96 * The threshold which determines whether we will use an array or a
97 * hash table to store the list of allocated swap blocks.
98 */
99 #define UAO_SWHASH_THRESHOLD (UAO_SWHASH_CLUSTER_SIZE * 4)
100 #define UAO_USES_SWHASH(aobj) \
101 ((aobj)->u_pages > UAO_SWHASH_THRESHOLD)
102
103 /* The number of buckets in a hash, with an upper bound. */
104 #define UAO_SWHASH_MAXBUCKETS 256
105 #define UAO_SWHASH_BUCKETS(aobj) \
106 (MIN((aobj)->u_pages >> UAO_SWHASH_CLUSTER_SHIFT, UAO_SWHASH_MAXBUCKETS))
107
108 /*
109 * uao_swhash_elt: when a hash table is being used, this structure defines
110 * the format of an entry in the bucket list.
111 */
112
113 struct uao_swhash_elt {
114 LIST_ENTRY(uao_swhash_elt) list; /* the hash list */
115 voff_t tag; /* our 'tag' */
116 int count; /* our number of active slots */
117 int slots[UAO_SWHASH_CLUSTER_SIZE]; /* the slots */
118 };
119
120 /*
121 * uao_swhash: the swap hash table structure
122 */
123
124 LIST_HEAD(uao_swhash, uao_swhash_elt);
125
126 /*
127 * uao_swhash_elt_pool: pool of uao_swhash_elt structures.
128 * Note: pages for this pool must not come from a pageable kernel map.
129 */
130 static struct pool uao_swhash_elt_pool __cacheline_aligned;
131
132 /*
133 * uvm_aobj: the actual anon-backed uvm_object
134 *
135 * => the uvm_object is at the top of the structure, this allows
136 * (struct uvm_aobj *) == (struct uvm_object *)
137 * => only one of u_swslots and u_swhash is used in any given aobj
138 */
139
140 struct uvm_aobj {
141 struct uvm_object u_obj; /* has: lock, pgops, #pages, #refs */
142 pgoff_t u_pages; /* number of pages in entire object */
143 int u_flags; /* the flags (see uvm_aobj.h) */
144 int *u_swslots; /* array of offset->swapslot mappings */
145 /*
146 * hashtable of offset->swapslot mappings
147 * (u_swhash is an array of bucket heads)
148 */
149 struct uao_swhash *u_swhash;
150 u_long u_swhashmask; /* mask for hashtable */
151 LIST_ENTRY(uvm_aobj) u_list; /* global list of aobjs */
152 int u_freelist; /* freelist to allocate pages from */
153 };
154
155 static void uao_free(struct uvm_aobj *);
156 static int uao_get(struct uvm_object *, voff_t, struct vm_page **,
157 int *, int, vm_prot_t, int, int);
158 static int uao_put(struct uvm_object *, voff_t, voff_t, int);
159
160 #if defined(VMSWAP)
161 static struct uao_swhash_elt *uao_find_swhash_elt
162 (struct uvm_aobj *, int, bool);
163
164 static bool uao_pagein(struct uvm_aobj *, int, int);
165 static bool uao_pagein_page(struct uvm_aobj *, int);
166 #endif /* defined(VMSWAP) */
167
168 static struct vm_page *uao_pagealloc(struct uvm_object *, voff_t, int);
169
170 /*
171 * aobj_pager
172 *
173 * note that some functions (e.g. put) are handled elsewhere
174 */
175
176 const struct uvm_pagerops aobj_pager = {
177 .pgo_reference = uao_reference,
178 .pgo_detach = uao_detach,
179 .pgo_get = uao_get,
180 .pgo_put = uao_put,
181 };
182
183 /*
184 * uao_list: global list of active aobjs, locked by uao_list_lock
185 */
186
187 static LIST_HEAD(aobjlist, uvm_aobj) uao_list __cacheline_aligned;
188 static kmutex_t uao_list_lock __cacheline_aligned;
189
190 /*
191 * hash table/array related functions
192 */
193
194 #if defined(VMSWAP)
195
196 /*
197 * uao_find_swhash_elt: find (or create) a hash table entry for a page
198 * offset.
199 *
200 * => the object should be locked by the caller
201 */
202
203 static struct uao_swhash_elt *
204 uao_find_swhash_elt(struct uvm_aobj *aobj, int pageidx, bool create)
205 {
206 struct uao_swhash *swhash;
207 struct uao_swhash_elt *elt;
208 voff_t page_tag;
209
210 swhash = UAO_SWHASH_HASH(aobj, pageidx);
211 page_tag = UAO_SWHASH_ELT_TAG(pageidx);
212
213 /*
214 * now search the bucket for the requested tag
215 */
216
217 LIST_FOREACH(elt, swhash, list) {
218 if (elt->tag == page_tag) {
219 return elt;
220 }
221 }
222 if (!create) {
223 return NULL;
224 }
225
226 /*
227 * allocate a new entry for the bucket and init/insert it in
228 */
229
230 elt = pool_get(&uao_swhash_elt_pool, PR_NOWAIT);
231 if (elt == NULL) {
232 return NULL;
233 }
234 LIST_INSERT_HEAD(swhash, elt, list);
235 elt->tag = page_tag;
236 elt->count = 0;
237 memset(elt->slots, 0, sizeof(elt->slots));
238 return elt;
239 }
240
241 /*
242 * uao_find_swslot: find the swap slot number for an aobj/pageidx
243 *
244 * => object must be locked by caller
245 */
246
247 int
248 uao_find_swslot(struct uvm_object *uobj, int pageidx)
249 {
250 struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
251 struct uao_swhash_elt *elt;
252
253 KASSERT(UVM_OBJ_IS_AOBJ(uobj));
254
255 /*
256 * if noswap flag is set, then we never return a slot
257 */
258
259 if (aobj->u_flags & UAO_FLAG_NOSWAP)
260 return 0;
261
262 /*
263 * if hashing, look in hash table.
264 */
265
266 if (UAO_USES_SWHASH(aobj)) {
267 elt = uao_find_swhash_elt(aobj, pageidx, false);
268 return elt ? UAO_SWHASH_ELT_PAGESLOT(elt, pageidx) : 0;
269 }
270
271 /*
272 * otherwise, look in the array
273 */
274
275 return aobj->u_swslots[pageidx];
276 }
277
278 /*
279 * uao_set_swslot: set the swap slot for a page in an aobj.
280 *
281 * => setting a slot to zero frees the slot
282 * => object must be locked by caller
283 * => we return the old slot number, or -1 if we failed to allocate
284 * memory to record the new slot number
285 */
286
287 int
288 uao_set_swslot(struct uvm_object *uobj, int pageidx, int slot)
289 {
290 struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
291 struct uao_swhash_elt *elt;
292 int oldslot;
293 UVMHIST_FUNC(__func__);
294 UVMHIST_CALLARGS(pdhist, "aobj %#jx pageidx %jd slot %jd",
295 (uintptr_t)aobj, pageidx, slot, 0);
296
297 KASSERT(rw_write_held(uobj->vmobjlock) || uobj->uo_refs == 0);
298 KASSERT(UVM_OBJ_IS_AOBJ(uobj));
299
300 /*
301 * if noswap flag is set, then we can't set a non-zero slot.
302 */
303
304 if (aobj->u_flags & UAO_FLAG_NOSWAP) {
305 KASSERTMSG(slot == 0, "uao_set_swslot: no swap object");
306 return 0;
307 }
308
309 /*
310 * are we using a hash table? if so, add it in the hash.
311 */
312
313 if (UAO_USES_SWHASH(aobj)) {
314
315 /*
316 * Avoid allocating an entry just to free it again if
317 * the page had not swap slot in the first place, and
318 * we are freeing.
319 */
320
321 elt = uao_find_swhash_elt(aobj, pageidx, slot != 0);
322 if (elt == NULL) {
323 return slot ? -1 : 0;
324 }
325
326 oldslot = UAO_SWHASH_ELT_PAGESLOT(elt, pageidx);
327 UAO_SWHASH_ELT_PAGESLOT(elt, pageidx) = slot;
328
329 /*
330 * now adjust the elt's reference counter and free it if we've
331 * dropped it to zero.
332 */
333
334 if (slot) {
335 if (oldslot == 0)
336 elt->count++;
337 } else {
338 if (oldslot)
339 elt->count--;
340
341 if (elt->count == 0) {
342 LIST_REMOVE(elt, list);
343 pool_put(&uao_swhash_elt_pool, elt);
344 }
345 }
346 } else {
347 /* we are using an array */
348 oldslot = aobj->u_swslots[pageidx];
349 aobj->u_swslots[pageidx] = slot;
350 }
351 return oldslot;
352 }
353
354 #endif /* defined(VMSWAP) */
355
356 /*
357 * end of hash/array functions
358 */
359
360 /*
361 * uao_free: free all resources held by an aobj, and then free the aobj
362 *
363 * => the aobj should be dead
364 */
365
366 static void
367 uao_free(struct uvm_aobj *aobj)
368 {
369 struct uvm_object *uobj = &aobj->u_obj;
370
371 KASSERT(UVM_OBJ_IS_AOBJ(uobj));
372 KASSERT(rw_write_held(uobj->vmobjlock));
373 uao_dropswap_range(uobj, 0, 0);
374 rw_exit(uobj->vmobjlock);
375
376 #if defined(VMSWAP)
377 if (UAO_USES_SWHASH(aobj)) {
378
379 /*
380 * free the hash table itself.
381 */
382
383 hashdone(aobj->u_swhash, HASH_LIST, aobj->u_swhashmask);
384 } else {
385
386 /*
387 * free the array itsself.
388 */
389
390 kmem_free(aobj->u_swslots, aobj->u_pages * sizeof(int));
391 }
392 #endif /* defined(VMSWAP) */
393
394 /*
395 * finally free the aobj itself
396 */
397
398 uvm_obj_destroy(uobj, true);
399 kmem_free(aobj, sizeof(struct uvm_aobj));
400 }
401
402 /*
403 * pager functions
404 */
405
406 /*
407 * uao_create: create an aobj of the given size and return its uvm_object.
408 *
409 * => for normal use, flags are always zero
410 * => for the kernel object, the flags are:
411 * UAO_FLAG_KERNOBJ - allocate the kernel object (can only happen once)
412 * UAO_FLAG_KERNSWAP - enable swapping of kernel object (" ")
413 */
414
415 struct uvm_object *
416 uao_create(voff_t size, int flags)
417 {
418 static struct uvm_aobj kernel_object_store;
419 static krwlock_t bootstrap_kernel_object_lock;
420 static int kobj_alloced __diagused = 0;
421 pgoff_t pages = round_page((uint64_t)size) >> PAGE_SHIFT;
422 struct uvm_aobj *aobj;
423 int refs;
424
425 /*
426 * Allocate a new aobj, unless kernel object is requested.
427 */
428
429 if (flags & UAO_FLAG_KERNOBJ) {
430 KASSERT(!kobj_alloced);
431 aobj = &kernel_object_store;
432 aobj->u_pages = pages;
433 aobj->u_flags = UAO_FLAG_NOSWAP;
434 refs = UVM_OBJ_KERN;
435 kobj_alloced = UAO_FLAG_KERNOBJ;
436 } else if (flags & UAO_FLAG_KERNSWAP) {
437 KASSERT(kobj_alloced == UAO_FLAG_KERNOBJ);
438 aobj = &kernel_object_store;
439 kobj_alloced = UAO_FLAG_KERNSWAP;
440 refs = 0xdeadbeaf; /* XXX: gcc */
441 } else {
442 aobj = kmem_alloc(sizeof(struct uvm_aobj), KM_SLEEP);
443 aobj->u_pages = pages;
444 aobj->u_flags = 0;
445 refs = 1;
446 }
447
448 /*
449 * no freelist by default
450 */
451
452 aobj->u_freelist = VM_NFREELIST;
453
454 /*
455 * allocate hash/array if necessary
456 *
457 * note: in the KERNSWAP case no need to worry about locking since
458 * we are still booting we should be the only thread around.
459 */
460
461 const int kernswap = (flags & UAO_FLAG_KERNSWAP) != 0;
462 if (flags == 0 || kernswap) {
463 #if defined(VMSWAP)
464
465 /* allocate hash table or array depending on object size */
466 if (UAO_USES_SWHASH(aobj)) {
467 aobj->u_swhash = hashinit(UAO_SWHASH_BUCKETS(aobj),
468 HASH_LIST, true, &aobj->u_swhashmask);
469 } else {
470 aobj->u_swslots = kmem_zalloc(pages * sizeof(int),
471 KM_SLEEP);
472 }
473 #endif /* defined(VMSWAP) */
474
475 /*
476 * Replace kernel_object's temporary static lock with
477 * a regular rw_obj. We cannot use uvm_obj_setlock()
478 * because that would try to free the old lock.
479 */
480
481 if (kernswap) {
482 aobj->u_obj.vmobjlock = rw_obj_alloc();
483 rw_destroy(&bootstrap_kernel_object_lock);
484 }
485 if (flags) {
486 aobj->u_flags &= ~UAO_FLAG_NOSWAP; /* clear noswap */
487 return &aobj->u_obj;
488 }
489 }
490
491 /*
492 * Initialise UVM object.
493 */
494
495 const bool kernobj = (flags & UAO_FLAG_KERNOBJ) != 0;
496 uvm_obj_init(&aobj->u_obj, &aobj_pager, !kernobj, refs);
497 if (__predict_false(kernobj)) {
498 /* Use a temporary static lock for kernel_object. */
499 rw_init(&bootstrap_kernel_object_lock);
500 uvm_obj_setlock(&aobj->u_obj, &bootstrap_kernel_object_lock);
501 }
502
503 /*
504 * now that aobj is ready, add it to the global list
505 */
506
507 mutex_enter(&uao_list_lock);
508 LIST_INSERT_HEAD(&uao_list, aobj, u_list);
509 mutex_exit(&uao_list_lock);
510 return(&aobj->u_obj);
511 }
512
513 /*
514 * uao_set_pgfl: allocate pages only from the specified freelist.
515 *
516 * => must be called before any pages are allocated for the object.
517 * => reset by setting it to VM_NFREELIST, meaning any freelist.
518 */
519
520 void
521 uao_set_pgfl(struct uvm_object *uobj, int freelist)
522 {
523 struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
524
525 KASSERTMSG((0 <= freelist), "invalid freelist %d", freelist);
526 KASSERTMSG((freelist <= VM_NFREELIST), "invalid freelist %d",
527 freelist);
528
529 aobj->u_freelist = freelist;
530 }
531
532 /*
533 * uao_pagealloc: allocate a page for aobj.
534 */
535
536 static inline struct vm_page *
537 uao_pagealloc(struct uvm_object *uobj, voff_t offset, int flags)
538 {
539 struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
540
541 if (__predict_true(aobj->u_freelist == VM_NFREELIST))
542 return uvm_pagealloc(uobj, offset, NULL, flags);
543 else
544 return uvm_pagealloc_strat(uobj, offset, NULL, flags,
545 UVM_PGA_STRAT_ONLY, aobj->u_freelist);
546 }
547
548 /*
549 * uao_init: set up aobj pager subsystem
550 *
551 * => called at boot time from uvm_pager_init()
552 */
553
554 void
555 uao_init(void)
556 {
557 static int uao_initialized;
558
559 if (uao_initialized)
560 return;
561 uao_initialized = true;
562 LIST_INIT(&uao_list);
563 mutex_init(&uao_list_lock, MUTEX_DEFAULT, IPL_NONE);
564 pool_init(&uao_swhash_elt_pool, sizeof(struct uao_swhash_elt),
565 0, 0, 0, "uaoeltpl", NULL, IPL_VM);
566 }
567
568 /*
569 * uao_reference: hold a reference to an anonymous UVM object.
570 */
571 void
572 uao_reference(struct uvm_object *uobj)
573 {
574 /* Kernel object is persistent. */
575 if (UVM_OBJ_IS_KERN_OBJECT(uobj)) {
576 return;
577 }
578 atomic_inc_uint(&uobj->uo_refs);
579 }
580
581 /*
582 * uao_detach: drop a reference to an anonymous UVM object.
583 */
584 void
585 uao_detach(struct uvm_object *uobj)
586 {
587 struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
588 struct uvm_page_array a;
589 struct vm_page *pg;
590
591 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
592
593 /*
594 * Detaching from kernel object is a NOP.
595 */
596
597 if (UVM_OBJ_IS_KERN_OBJECT(uobj))
598 return;
599
600 /*
601 * Drop the reference. If it was the last one, destroy the object.
602 */
603
604 KASSERT(uobj->uo_refs > 0);
605 UVMHIST_LOG(maphist," (uobj=%#jx) ref=%jd",
606 (uintptr_t)uobj, uobj->uo_refs, 0, 0);
607 if (atomic_dec_uint_nv(&uobj->uo_refs) > 0) {
608 UVMHIST_LOG(maphist, "<- done (rc>0)", 0,0,0,0);
609 return;
610 }
611
612 /*
613 * Remove the aobj from the global list.
614 */
615
616 mutex_enter(&uao_list_lock);
617 LIST_REMOVE(aobj, u_list);
618 mutex_exit(&uao_list_lock);
619
620 /*
621 * Free all the pages left in the aobj. For each page, when the
622 * page is no longer busy (and thus after any disk I/O that it is
623 * involved in is complete), release any swap resources and free
624 * the page itself.
625 */
626 uvm_page_array_init(&a, uobj, 0);
627 rw_enter(uobj->vmobjlock, RW_WRITER);
628 while ((pg = uvm_page_array_fill_and_peek(&a, 0, 0)) != NULL) {
629 uvm_page_array_advance(&a);
630 pmap_page_protect(pg, VM_PROT_NONE);
631 if (pg->flags & PG_BUSY) {
632 uvm_pagewait(pg, uobj->vmobjlock, "uao_det");
633 uvm_page_array_clear(&a);
634 rw_enter(uobj->vmobjlock, RW_WRITER);
635 continue;
636 }
637 uao_dropswap(&aobj->u_obj, pg->offset >> PAGE_SHIFT);
638 uvm_pagefree(pg);
639 }
640 uvm_page_array_fini(&a);
641
642 /*
643 * Finally, free the anonymous UVM object itself.
644 */
645
646 uao_free(aobj);
647 }
648
649 /*
650 * uao_put: flush pages out of a uvm object
651 *
652 * => object should be locked by caller. we may _unlock_ the object
653 * if (and only if) we need to clean a page (PGO_CLEANIT).
654 * XXXJRT Currently, however, we don't. In the case of cleaning
655 * XXXJRT a page, we simply just deactivate it. Should probably
656 * XXXJRT handle this better, in the future (although "flushing"
657 * XXXJRT anonymous memory isn't terribly important).
658 * => if PGO_CLEANIT is not set, then we will neither unlock the object
659 * or block.
660 * => if PGO_ALLPAGE is set, then all pages in the object are valid targets
661 * for flushing.
662 * => we return 0 unless we encountered some sort of I/O error
663 * XXXJRT currently never happens, as we never directly initiate
664 * XXXJRT I/O
665 */
666
667 static int
668 uao_put(struct uvm_object *uobj, voff_t start, voff_t stop, int flags)
669 {
670 struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
671 struct uvm_page_array a;
672 struct vm_page *pg;
673 voff_t curoff;
674 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
675
676 KASSERT(UVM_OBJ_IS_AOBJ(uobj));
677 KASSERT(rw_write_held(uobj->vmobjlock));
678
679 if (flags & PGO_ALLPAGES) {
680 start = 0;
681 stop = aobj->u_pages << PAGE_SHIFT;
682 } else {
683 start = trunc_page(start);
684 if (stop == 0) {
685 stop = aobj->u_pages << PAGE_SHIFT;
686 } else {
687 stop = round_page(stop);
688 }
689 if (stop > (uint64_t)(aobj->u_pages << PAGE_SHIFT)) {
690 printf("uao_put: strange, got an out of range "
691 "flush %#jx > %#jx (fixed)\n",
692 (uintmax_t)stop,
693 (uintmax_t)(aobj->u_pages << PAGE_SHIFT));
694 stop = aobj->u_pages << PAGE_SHIFT;
695 }
696 }
697 UVMHIST_LOG(maphist,
698 " flush start=%#jx, stop=%#jx, flags=%#jx",
699 start, stop, flags, 0);
700
701 /*
702 * Don't need to do any work here if we're not freeing
703 * or deactivating pages.
704 */
705
706 if ((flags & (PGO_DEACTIVATE|PGO_FREE)) == 0) {
707 rw_exit(uobj->vmobjlock);
708 return 0;
709 }
710
711 /* locked: uobj */
712 uvm_page_array_init(&a, uobj, 0);
713 curoff = start;
714 while ((pg = uvm_page_array_fill_and_peek(&a, curoff, 0)) != NULL) {
715 if (pg->offset >= stop) {
716 break;
717 }
718
719 /*
720 * wait and try again if the page is busy.
721 */
722
723 if (pg->flags & PG_BUSY) {
724 uvm_pagewait(pg, uobj->vmobjlock, "uao_put");
725 uvm_page_array_clear(&a);
726 rw_enter(uobj->vmobjlock, RW_WRITER);
727 continue;
728 }
729 uvm_page_array_advance(&a);
730 curoff = pg->offset + PAGE_SIZE;
731
732 switch (flags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE)) {
733
734 /*
735 * XXX In these first 3 cases, we always just
736 * XXX deactivate the page. We may want to
737 * XXX handle the different cases more specifically
738 * XXX in the future.
739 */
740
741 case PGO_CLEANIT|PGO_FREE:
742 case PGO_CLEANIT|PGO_DEACTIVATE:
743 case PGO_DEACTIVATE:
744 deactivate_it:
745 uvm_pagelock(pg);
746 uvm_pagedeactivate(pg);
747 uvm_pageunlock(pg);
748 break;
749
750 case PGO_FREE:
751 /*
752 * If there are multiple references to
753 * the object, just deactivate the page.
754 */
755
756 if (uobj->uo_refs > 1)
757 goto deactivate_it;
758
759 /*
760 * free the swap slot and the page.
761 */
762
763 pmap_page_protect(pg, VM_PROT_NONE);
764
765 /*
766 * freeing swapslot here is not strictly necessary.
767 * however, leaving it here doesn't save much
768 * because we need to update swap accounting anyway.
769 */
770
771 uao_dropswap(uobj, pg->offset >> PAGE_SHIFT);
772 uvm_pagefree(pg);
773 break;
774
775 default:
776 panic("%s: impossible", __func__);
777 }
778 }
779 rw_exit(uobj->vmobjlock);
780 uvm_page_array_fini(&a);
781 return 0;
782 }
783
784 /*
785 * uao_get: fetch me a page
786 *
787 * we have three cases:
788 * 1: page is resident -> just return the page.
789 * 2: page is zero-fill -> allocate a new page and zero it.
790 * 3: page is swapped out -> fetch the page from swap.
791 *
792 * case 1 can be handled with PGO_LOCKED, cases 2 and 3 cannot.
793 * so, if the "center" page hits case 2/3 then we will need to return EBUSY.
794 *
795 * => prefer map unlocked (not required)
796 * => object must be locked! we will _unlock_ it before starting any I/O.
797 * => flags: PGO_LOCKED: fault data structures are locked
798 * => NOTE: offset is the offset of pps[0], _NOT_ pps[centeridx]
799 * => NOTE: caller must check for released pages!!
800 */
801
802 static int
803 uao_get(struct uvm_object *uobj, voff_t offset, struct vm_page **pps,
804 int *npagesp, int centeridx, vm_prot_t access_type, int advice, int flags)
805 {
806 voff_t current_offset;
807 struct vm_page *ptmp;
808 int lcv, gotpages, maxpages, swslot, pageidx;
809 bool overwrite = ((flags & PGO_OVERWRITE) != 0);
810 struct uvm_page_array a;
811
812 UVMHIST_FUNC(__func__);
813 UVMHIST_CALLARGS(pdhist, "aobj=%#jx offset=%jd, flags=%#jx",
814 (uintptr_t)uobj, offset, flags,0);
815
816 /*
817 * the object must be locked. it can only be a read lock when
818 * processing a read fault with PGO_LOCKED.
819 */
820
821 KASSERT(UVM_OBJ_IS_AOBJ(uobj));
822 KASSERT(rw_lock_held(uobj->vmobjlock));
823 KASSERT(rw_write_held(uobj->vmobjlock) ||
824 ((flags & PGO_LOCKED) != 0 && (access_type & VM_PROT_WRITE) == 0));
825
826 /*
827 * get number of pages
828 */
829
830 maxpages = *npagesp;
831
832 /*
833 * step 1: handled the case where fault data structures are locked.
834 */
835
836 if (flags & PGO_LOCKED) {
837
838 /*
839 * step 1a: get pages that are already resident. only do
840 * this if the data structures are locked (i.e. the first
841 * time through).
842 */
843
844 uvm_page_array_init(&a, uobj, 0);
845 gotpages = 0; /* # of pages we got so far */
846 for (lcv = 0; lcv < maxpages; lcv++) {
847 ptmp = uvm_page_array_fill_and_peek(&a,
848 offset + (lcv << PAGE_SHIFT), maxpages);
849 if (ptmp == NULL) {
850 break;
851 }
852 KASSERT(ptmp->offset >= offset);
853 lcv = (ptmp->offset - offset) >> PAGE_SHIFT;
854 if (lcv >= maxpages) {
855 break;
856 }
857 uvm_page_array_advance(&a);
858
859 /*
860 * to be useful must get a non-busy page
861 */
862
863 if ((ptmp->flags & PG_BUSY) != 0) {
864 continue;
865 }
866
867 /*
868 * useful page: plug it in our result array
869 */
870
871 KASSERT(uvm_pagegetdirty(ptmp) !=
872 UVM_PAGE_STATUS_CLEAN);
873 pps[lcv] = ptmp;
874 gotpages++;
875 }
876 uvm_page_array_fini(&a);
877
878 /*
879 * step 1b: now we've either done everything needed or we
880 * to unlock and do some waiting or I/O.
881 */
882
883 UVMHIST_LOG(pdhist, "<- done (done=%jd)",
884 (pps[centeridx] != NULL), 0,0,0);
885 *npagesp = gotpages;
886 return pps[centeridx] != NULL ? 0 : EBUSY;
887 }
888
889 /*
890 * step 2: get non-resident or busy pages.
891 * object is locked. data structures are unlocked.
892 */
893
894 if ((flags & PGO_SYNCIO) == 0) {
895 goto done;
896 }
897
898 uvm_page_array_init(&a, uobj, 0);
899 for (lcv = 0, current_offset = offset ; lcv < maxpages ;) {
900
901 /*
902 * we have yet to locate the current page (pps[lcv]). we
903 * first look for a page that is already at the current offset.
904 * if we find a page, we check to see if it is busy or
905 * released. if that is the case, then we sleep on the page
906 * until it is no longer busy or released and repeat the lookup.
907 * if the page we found is neither busy nor released, then we
908 * busy it (so we own it) and plug it into pps[lcv]. we are
909 * ready to move on to the next page.
910 */
911
912 ptmp = uvm_page_array_fill_and_peek(&a, current_offset,
913 maxpages - lcv);
914
915 if (ptmp != NULL && ptmp->offset == current_offset) {
916 /* page is there, see if we need to wait on it */
917 if ((ptmp->flags & PG_BUSY) != 0) {
918 UVMHIST_LOG(pdhist,
919 "sleeping, ptmp->flags %#jx\n",
920 ptmp->flags,0,0,0);
921 uvm_pagewait(ptmp, uobj->vmobjlock, "uao_get");
922 rw_enter(uobj->vmobjlock, RW_WRITER);
923 uvm_page_array_clear(&a);
924 continue;
925 }
926
927 /*
928 * if we get here then the page is resident and
929 * unbusy. we busy it now (so we own it). if
930 * overwriting, mark the page dirty up front as
931 * it will be zapped via an unmanaged mapping.
932 */
933
934 KASSERT(uvm_pagegetdirty(ptmp) !=
935 UVM_PAGE_STATUS_CLEAN);
936 if (overwrite) {
937 uvm_pagemarkdirty(ptmp, UVM_PAGE_STATUS_DIRTY);
938 }
939 /* we own it, caller must un-busy */
940 ptmp->flags |= PG_BUSY;
941 UVM_PAGE_OWN(ptmp, "uao_get2");
942 pps[lcv++] = ptmp;
943 current_offset += PAGE_SIZE;
944 uvm_page_array_advance(&a);
945 continue;
946 } else {
947 KASSERT(ptmp == NULL || ptmp->offset > current_offset);
948 }
949
950 /*
951 * not resident. allocate a new busy/fake/clean page in the
952 * object. if it's in swap we need to do I/O to fill in the
953 * data, otherwise the page needs to be cleared: if it's not
954 * destined to be overwritten, then zero it here and now.
955 */
956
957 pageidx = current_offset >> PAGE_SHIFT;
958 swslot = uao_find_swslot(uobj, pageidx);
959 ptmp = uao_pagealloc(uobj, current_offset,
960 swslot != 0 || overwrite ? 0 : UVM_PGA_ZERO);
961
962 /* out of RAM? */
963 if (ptmp == NULL) {
964 rw_exit(uobj->vmobjlock);
965 UVMHIST_LOG(pdhist, "sleeping, ptmp == NULL",0,0,0,0);
966 uvm_wait("uao_getpage");
967 rw_enter(uobj->vmobjlock, RW_WRITER);
968 uvm_page_array_clear(&a);
969 continue;
970 }
971
972 /*
973 * if swslot == 0, page hasn't existed before and is zeroed.
974 * otherwise we have a "fake/busy/clean" page that we just
975 * allocated. do the needed "i/o", reading from swap.
976 */
977
978 if (swslot != 0) {
979 #if defined(VMSWAP)
980 int error;
981
982 UVMHIST_LOG(pdhist, "pagein from swslot %jd",
983 swslot, 0,0,0);
984
985 /*
986 * page in the swapped-out page.
987 * unlock object for i/o, relock when done.
988 */
989
990 uvm_page_array_clear(&a);
991 rw_exit(uobj->vmobjlock);
992 error = uvm_swap_get(ptmp, swslot, PGO_SYNCIO);
993 rw_enter(uobj->vmobjlock, RW_WRITER);
994
995 /*
996 * I/O done. check for errors.
997 */
998
999 if (error != 0) {
1000 UVMHIST_LOG(pdhist, "<- done (error=%jd)",
1001 error,0,0,0);
1002
1003 /*
1004 * remove the swap slot from the aobj
1005 * and mark the aobj as having no real slot.
1006 * don't free the swap slot, thus preventing
1007 * it from being used again.
1008 */
1009
1010 swslot = uao_set_swslot(uobj, pageidx,
1011 SWSLOT_BAD);
1012 if (swslot > 0) {
1013 uvm_swap_markbad(swslot, 1);
1014 }
1015
1016 uvm_pagefree(ptmp);
1017 rw_exit(uobj->vmobjlock);
1018 UVMHIST_LOG(pdhist, "<- done (error)",
1019 error,lcv,0,0);
1020 if (lcv != 0) {
1021 uvm_page_unbusy(pps, lcv);
1022 }
1023 memset(pps, 0, maxpages * sizeof(pps[0]));
1024 uvm_page_array_fini(&a);
1025 return error;
1026 }
1027 #else /* defined(VMSWAP) */
1028 panic("%s: pagein", __func__);
1029 #endif /* defined(VMSWAP) */
1030 }
1031
1032 /*
1033 * note that we will allow the page being writably-mapped
1034 * (!PG_RDONLY) regardless of access_type. if overwrite,
1035 * the page can be modified through an unmanaged mapping
1036 * so mark it dirty up front.
1037 */
1038 if (overwrite) {
1039 uvm_pagemarkdirty(ptmp, UVM_PAGE_STATUS_DIRTY);
1040 } else {
1041 uvm_pagemarkdirty(ptmp, UVM_PAGE_STATUS_UNKNOWN);
1042 }
1043
1044 /*
1045 * we got the page! clear the fake flag (indicates valid
1046 * data now in page) and plug into our result array. note
1047 * that page is still busy.
1048 *
1049 * it is the callers job to:
1050 * => check if the page is released
1051 * => unbusy the page
1052 * => activate the page
1053 */
1054 KASSERT(uvm_pagegetdirty(ptmp) != UVM_PAGE_STATUS_CLEAN);
1055 KASSERT((ptmp->flags & PG_FAKE) != 0);
1056 KASSERT(ptmp->offset == current_offset);
1057 ptmp->flags &= ~PG_FAKE;
1058 pps[lcv++] = ptmp;
1059 current_offset += PAGE_SIZE;
1060 }
1061 uvm_page_array_fini(&a);
1062
1063 /*
1064 * finally, unlock object and return.
1065 */
1066
1067 done:
1068 rw_exit(uobj->vmobjlock);
1069 UVMHIST_LOG(pdhist, "<- done (OK)",0,0,0,0);
1070 return 0;
1071 }
1072
1073 #if defined(VMSWAP)
1074
1075 /*
1076 * uao_dropswap: release any swap resources from this aobj page.
1077 *
1078 * => aobj must be locked or have a reference count of 0.
1079 */
1080
1081 void
1082 uao_dropswap(struct uvm_object *uobj, int pageidx)
1083 {
1084 int slot;
1085
1086 KASSERT(UVM_OBJ_IS_AOBJ(uobj));
1087
1088 slot = uao_set_swslot(uobj, pageidx, 0);
1089 if (slot) {
1090 uvm_swap_free(slot, 1);
1091 }
1092 }
1093
1094 /*
1095 * page in every page in every aobj that is paged-out to a range of swslots.
1096 *
1097 * => nothing should be locked.
1098 * => returns true if pagein was aborted due to lack of memory.
1099 */
1100
1101 bool
1102 uao_swap_off(int startslot, int endslot)
1103 {
1104 struct uvm_aobj *aobj;
1105
1106 /*
1107 * Walk the list of all anonymous UVM objects. Grab the first.
1108 */
1109 mutex_enter(&uao_list_lock);
1110 if ((aobj = LIST_FIRST(&uao_list)) == NULL) {
1111 mutex_exit(&uao_list_lock);
1112 return false;
1113 }
1114 uao_reference(&aobj->u_obj);
1115
1116 do {
1117 struct uvm_aobj *nextaobj;
1118 bool rv;
1119
1120 /*
1121 * Prefetch the next object and immediately hold a reference
1122 * on it, so neither the current nor the next entry could
1123 * disappear while we are iterating.
1124 */
1125 if ((nextaobj = LIST_NEXT(aobj, u_list)) != NULL) {
1126 uao_reference(&nextaobj->u_obj);
1127 }
1128 mutex_exit(&uao_list_lock);
1129
1130 /*
1131 * Page in all pages in the swap slot range.
1132 */
1133 rw_enter(aobj->u_obj.vmobjlock, RW_WRITER);
1134 rv = uao_pagein(aobj, startslot, endslot);
1135 rw_exit(aobj->u_obj.vmobjlock);
1136
1137 /* Drop the reference of the current object. */
1138 uao_detach(&aobj->u_obj);
1139 if (rv) {
1140 if (nextaobj) {
1141 uao_detach(&nextaobj->u_obj);
1142 }
1143 return rv;
1144 }
1145
1146 aobj = nextaobj;
1147 mutex_enter(&uao_list_lock);
1148 } while (aobj);
1149
1150 mutex_exit(&uao_list_lock);
1151 return false;
1152 }
1153
1154 /*
1155 * page in any pages from aobj in the given range.
1156 *
1157 * => aobj must be locked and is returned locked.
1158 * => returns true if pagein was aborted due to lack of memory.
1159 */
1160 static bool
1161 uao_pagein(struct uvm_aobj *aobj, int startslot, int endslot)
1162 {
1163 bool rv;
1164
1165 if (UAO_USES_SWHASH(aobj)) {
1166 struct uao_swhash_elt *elt;
1167 int buck;
1168
1169 restart:
1170 for (buck = aobj->u_swhashmask; buck >= 0; buck--) {
1171 for (elt = LIST_FIRST(&aobj->u_swhash[buck]);
1172 elt != NULL;
1173 elt = LIST_NEXT(elt, list)) {
1174 int i;
1175
1176 for (i = 0; i < UAO_SWHASH_CLUSTER_SIZE; i++) {
1177 int slot = elt->slots[i];
1178
1179 /*
1180 * if the slot isn't in range, skip it.
1181 */
1182
1183 if (slot < startslot ||
1184 slot >= endslot) {
1185 continue;
1186 }
1187
1188 /*
1189 * process the page,
1190 * the start over on this object
1191 * since the swhash elt
1192 * may have been freed.
1193 */
1194
1195 rv = uao_pagein_page(aobj,
1196 UAO_SWHASH_ELT_PAGEIDX_BASE(elt) + i);
1197 if (rv) {
1198 return rv;
1199 }
1200 goto restart;
1201 }
1202 }
1203 }
1204 } else {
1205 int i;
1206
1207 for (i = 0; i < aobj->u_pages; i++) {
1208 int slot = aobj->u_swslots[i];
1209
1210 /*
1211 * if the slot isn't in range, skip it
1212 */
1213
1214 if (slot < startslot || slot >= endslot) {
1215 continue;
1216 }
1217
1218 /*
1219 * process the page.
1220 */
1221
1222 rv = uao_pagein_page(aobj, i);
1223 if (rv) {
1224 return rv;
1225 }
1226 }
1227 }
1228
1229 return false;
1230 }
1231
1232 /*
1233 * uao_pagein_page: page in a single page from an anonymous UVM object.
1234 *
1235 * => Returns true if pagein was aborted due to lack of memory.
1236 * => Object must be locked and is returned locked.
1237 */
1238
1239 static bool
1240 uao_pagein_page(struct uvm_aobj *aobj, int pageidx)
1241 {
1242 struct uvm_object *uobj = &aobj->u_obj;
1243 struct vm_page *pg;
1244 int rv, npages;
1245
1246 pg = NULL;
1247 npages = 1;
1248
1249 KASSERT(rw_write_held(uobj->vmobjlock));
1250 rv = uao_get(uobj, (voff_t)pageidx << PAGE_SHIFT, &pg, &npages,
1251 0, VM_PROT_READ | VM_PROT_WRITE, 0, PGO_SYNCIO);
1252
1253 /*
1254 * relock and finish up.
1255 */
1256
1257 rw_enter(uobj->vmobjlock, RW_WRITER);
1258 switch (rv) {
1259 case 0:
1260 break;
1261
1262 case EIO:
1263 case ERESTART:
1264
1265 /*
1266 * nothing more to do on errors.
1267 * ERESTART can only mean that the anon was freed,
1268 * so again there's nothing to do.
1269 */
1270
1271 return false;
1272
1273 default:
1274 return true;
1275 }
1276
1277 /*
1278 * ok, we've got the page now.
1279 * mark it as dirty, clear its swslot and un-busy it.
1280 */
1281 uao_dropswap(&aobj->u_obj, pageidx);
1282
1283 /*
1284 * make sure it's on a page queue.
1285 */
1286 uvm_pagelock(pg);
1287 uvm_pageenqueue(pg);
1288 uvm_pagewakeup(pg);
1289 uvm_pageunlock(pg);
1290
1291 pg->flags &= ~(PG_BUSY|PG_FAKE);
1292 uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_DIRTY);
1293 UVM_PAGE_OWN(pg, NULL);
1294
1295 return false;
1296 }
1297
1298 /*
1299 * uao_dropswap_range: drop swapslots in the range.
1300 *
1301 * => aobj must be locked and is returned locked.
1302 * => start is inclusive. end is exclusive.
1303 */
1304
1305 void
1306 uao_dropswap_range(struct uvm_object *uobj, voff_t start, voff_t end)
1307 {
1308 struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
1309 int swpgonlydelta = 0;
1310
1311 KASSERT(UVM_OBJ_IS_AOBJ(uobj));
1312 KASSERT(rw_write_held(uobj->vmobjlock));
1313
1314 if (end == 0) {
1315 end = INT64_MAX;
1316 }
1317
1318 if (UAO_USES_SWHASH(aobj)) {
1319 int i, hashbuckets = aobj->u_swhashmask + 1;
1320 voff_t taghi;
1321 voff_t taglo;
1322
1323 taglo = UAO_SWHASH_ELT_TAG(start);
1324 taghi = UAO_SWHASH_ELT_TAG(end);
1325
1326 for (i = 0; i < hashbuckets; i++) {
1327 struct uao_swhash_elt *elt, *next;
1328
1329 for (elt = LIST_FIRST(&aobj->u_swhash[i]);
1330 elt != NULL;
1331 elt = next) {
1332 int startidx, endidx;
1333 int j;
1334
1335 next = LIST_NEXT(elt, list);
1336
1337 if (elt->tag < taglo || taghi < elt->tag) {
1338 continue;
1339 }
1340
1341 if (elt->tag == taglo) {
1342 startidx =
1343 UAO_SWHASH_ELT_PAGESLOT_IDX(start);
1344 } else {
1345 startidx = 0;
1346 }
1347
1348 if (elt->tag == taghi) {
1349 endidx =
1350 UAO_SWHASH_ELT_PAGESLOT_IDX(end);
1351 } else {
1352 endidx = UAO_SWHASH_CLUSTER_SIZE;
1353 }
1354
1355 for (j = startidx; j < endidx; j++) {
1356 int slot = elt->slots[j];
1357
1358 KASSERT(uvm_pagelookup(&aobj->u_obj,
1359 (UAO_SWHASH_ELT_PAGEIDX_BASE(elt)
1360 + j) << PAGE_SHIFT) == NULL);
1361 if (slot > 0) {
1362 uvm_swap_free(slot, 1);
1363 swpgonlydelta++;
1364 KASSERT(elt->count > 0);
1365 elt->slots[j] = 0;
1366 elt->count--;
1367 }
1368 }
1369
1370 if (elt->count == 0) {
1371 LIST_REMOVE(elt, list);
1372 pool_put(&uao_swhash_elt_pool, elt);
1373 }
1374 }
1375 }
1376 } else {
1377 int i;
1378
1379 if (aobj->u_pages < end) {
1380 end = aobj->u_pages;
1381 }
1382 for (i = start; i < end; i++) {
1383 int slot = aobj->u_swslots[i];
1384
1385 if (slot > 0) {
1386 uvm_swap_free(slot, 1);
1387 swpgonlydelta++;
1388 }
1389 }
1390 }
1391
1392 /*
1393 * adjust the counter of pages only in swap for all
1394 * the swap slots we've freed.
1395 */
1396
1397 if (swpgonlydelta > 0) {
1398 KASSERT(uvmexp.swpgonly >= swpgonlydelta);
1399 atomic_add_int(&uvmexp.swpgonly, -swpgonlydelta);
1400 }
1401 }
1402
1403 #endif /* defined(VMSWAP) */
1404