uvm_aobj.c revision 1.155 1 1.155 riastrad /* $NetBSD: uvm_aobj.c,v 1.155 2022/04/09 23:38:33 riastradh Exp $ */
2 1.6 mrg
3 1.7 chs /*
4 1.7 chs * Copyright (c) 1998 Chuck Silvers, Charles D. Cranor and
5 1.7 chs * Washington University.
6 1.7 chs * All rights reserved.
7 1.7 chs *
8 1.7 chs * Redistribution and use in source and binary forms, with or without
9 1.7 chs * modification, are permitted provided that the following conditions
10 1.7 chs * are met:
11 1.7 chs * 1. Redistributions of source code must retain the above copyright
12 1.7 chs * notice, this list of conditions and the following disclaimer.
13 1.7 chs * 2. Redistributions in binary form must reproduce the above copyright
14 1.7 chs * notice, this list of conditions and the following disclaimer in the
15 1.7 chs * documentation and/or other materials provided with the distribution.
16 1.7 chs *
17 1.7 chs * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 1.7 chs * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 1.7 chs * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 1.7 chs * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 1.7 chs * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 1.7 chs * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 1.7 chs * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 1.7 chs * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 1.7 chs * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 1.7 chs * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 1.7 chs *
28 1.4 mrg * from: Id: uvm_aobj.c,v 1.1.2.5 1998/02/06 05:14:38 chs Exp
29 1.4 mrg */
30 1.113 rmind
31 1.7 chs /*
32 1.7 chs * uvm_aobj.c: anonymous memory uvm_object pager
33 1.7 chs *
34 1.7 chs * author: Chuck Silvers <chuq (at) chuq.com>
35 1.7 chs * started: Jan-1998
36 1.7 chs *
37 1.7 chs * - design mostly from Chuck Cranor
38 1.7 chs */
39 1.49 lukem
40 1.49 lukem #include <sys/cdefs.h>
41 1.155 riastrad __KERNEL_RCSID(0, "$NetBSD: uvm_aobj.c,v 1.155 2022/04/09 23:38:33 riastradh Exp $");
42 1.7 chs
43 1.123 pooka #ifdef _KERNEL_OPT
44 1.7 chs #include "opt_uvmhist.h"
45 1.123 pooka #endif
46 1.1 mrg
47 1.1 mrg #include <sys/param.h>
48 1.1 mrg #include <sys/systm.h>
49 1.37 chs #include <sys/kernel.h>
50 1.104 rmind #include <sys/kmem.h>
51 1.12 thorpej #include <sys/pool.h>
52 1.119 matt #include <sys/atomic.h>
53 1.1 mrg
54 1.1 mrg #include <uvm/uvm.h>
55 1.132 ad #include <uvm/uvm_page_array.h>
56 1.1 mrg
57 1.1 mrg /*
58 1.117 rmind * An anonymous UVM object (aobj) manages anonymous-memory. In addition to
59 1.117 rmind * keeping the list of resident pages, it may also keep a list of allocated
60 1.117 rmind * swap blocks. Depending on the size of the object, this list is either
61 1.117 rmind * stored in an array (small objects) or in a hash table (large objects).
62 1.117 rmind *
63 1.117 rmind * Lock order
64 1.117 rmind *
65 1.118 rmind * uao_list_lock ->
66 1.118 rmind * uvm_object::vmobjlock
67 1.1 mrg */
68 1.1 mrg
69 1.1 mrg /*
70 1.117 rmind * Note: for hash tables, we break the address space of the aobj into blocks
71 1.117 rmind * of UAO_SWHASH_CLUSTER_SIZE pages, which shall be a power of two.
72 1.1 mrg */
73 1.1 mrg
74 1.117 rmind #define UAO_SWHASH_CLUSTER_SHIFT 4
75 1.117 rmind #define UAO_SWHASH_CLUSTER_SIZE (1 << UAO_SWHASH_CLUSTER_SHIFT)
76 1.1 mrg
77 1.117 rmind /* Get the "tag" for this page index. */
78 1.117 rmind #define UAO_SWHASH_ELT_TAG(idx) ((idx) >> UAO_SWHASH_CLUSTER_SHIFT)
79 1.117 rmind #define UAO_SWHASH_ELT_PAGESLOT_IDX(idx) \
80 1.117 rmind ((idx) & (UAO_SWHASH_CLUSTER_SIZE - 1))
81 1.1 mrg
82 1.117 rmind /* Given an ELT and a page index, find the swap slot. */
83 1.117 rmind #define UAO_SWHASH_ELT_PAGESLOT(elt, idx) \
84 1.117 rmind ((elt)->slots[UAO_SWHASH_ELT_PAGESLOT_IDX(idx)])
85 1.75 yamt
86 1.117 rmind /* Given an ELT, return its pageidx base. */
87 1.117 rmind #define UAO_SWHASH_ELT_PAGEIDX_BASE(ELT) \
88 1.117 rmind ((elt)->tag << UAO_SWHASH_CLUSTER_SHIFT)
89 1.1 mrg
90 1.117 rmind /* The hash function. */
91 1.117 rmind #define UAO_SWHASH_HASH(aobj, idx) \
92 1.117 rmind (&(aobj)->u_swhash[(((idx) >> UAO_SWHASH_CLUSTER_SHIFT) \
93 1.117 rmind & (aobj)->u_swhashmask)])
94 1.1 mrg
95 1.1 mrg /*
96 1.117 rmind * The threshold which determines whether we will use an array or a
97 1.1 mrg * hash table to store the list of allocated swap blocks.
98 1.1 mrg */
99 1.117 rmind #define UAO_SWHASH_THRESHOLD (UAO_SWHASH_CLUSTER_SIZE * 4)
100 1.117 rmind #define UAO_USES_SWHASH(aobj) \
101 1.117 rmind ((aobj)->u_pages > UAO_SWHASH_THRESHOLD)
102 1.117 rmind
103 1.117 rmind /* The number of buckets in a hash, with an upper bound. */
104 1.117 rmind #define UAO_SWHASH_MAXBUCKETS 256
105 1.117 rmind #define UAO_SWHASH_BUCKETS(aobj) \
106 1.117 rmind (MIN((aobj)->u_pages >> UAO_SWHASH_CLUSTER_SHIFT, UAO_SWHASH_MAXBUCKETS))
107 1.1 mrg
108 1.1 mrg /*
109 1.1 mrg * uao_swhash_elt: when a hash table is being used, this structure defines
110 1.1 mrg * the format of an entry in the bucket list.
111 1.1 mrg */
112 1.1 mrg
113 1.1 mrg struct uao_swhash_elt {
114 1.5 mrg LIST_ENTRY(uao_swhash_elt) list; /* the hash list */
115 1.28 kleink voff_t tag; /* our 'tag' */
116 1.5 mrg int count; /* our number of active slots */
117 1.5 mrg int slots[UAO_SWHASH_CLUSTER_SIZE]; /* the slots */
118 1.1 mrg };
119 1.1 mrg
120 1.1 mrg /*
121 1.1 mrg * uao_swhash: the swap hash table structure
122 1.1 mrg */
123 1.1 mrg
124 1.1 mrg LIST_HEAD(uao_swhash, uao_swhash_elt);
125 1.1 mrg
126 1.12 thorpej /*
127 1.113 rmind * uao_swhash_elt_pool: pool of uao_swhash_elt structures.
128 1.113 rmind * Note: pages for this pool must not come from a pageable kernel map.
129 1.12 thorpej */
130 1.117 rmind static struct pool uao_swhash_elt_pool __cacheline_aligned;
131 1.1 mrg
132 1.1 mrg /*
133 1.1 mrg * uvm_aobj: the actual anon-backed uvm_object
134 1.1 mrg *
135 1.1 mrg * => the uvm_object is at the top of the structure, this allows
136 1.46 chs * (struct uvm_aobj *) == (struct uvm_object *)
137 1.1 mrg * => only one of u_swslots and u_swhash is used in any given aobj
138 1.1 mrg */
139 1.1 mrg
140 1.1 mrg struct uvm_aobj {
141 1.132 ad struct uvm_object u_obj; /* has: lock, pgops, #pages, #refs */
142 1.79 cherry pgoff_t u_pages; /* number of pages in entire object */
143 1.5 mrg int u_flags; /* the flags (see uvm_aobj.h) */
144 1.5 mrg int *u_swslots; /* array of offset->swapslot mappings */
145 1.5 mrg /*
146 1.5 mrg * hashtable of offset->swapslot mappings
147 1.5 mrg * (u_swhash is an array of bucket heads)
148 1.5 mrg */
149 1.5 mrg struct uao_swhash *u_swhash;
150 1.5 mrg u_long u_swhashmask; /* mask for hashtable */
151 1.5 mrg LIST_ENTRY(uvm_aobj) u_list; /* global list of aobjs */
152 1.121 riastrad int u_freelist; /* freelist to allocate pages from */
153 1.1 mrg };
154 1.1 mrg
155 1.62 junyoung static void uao_free(struct uvm_aobj *);
156 1.62 junyoung static int uao_get(struct uvm_object *, voff_t, struct vm_page **,
157 1.62 junyoung int *, int, vm_prot_t, int, int);
158 1.86 matt static int uao_put(struct uvm_object *, voff_t, voff_t, int);
159 1.72 yamt
160 1.72 yamt #if defined(VMSWAP)
161 1.72 yamt static struct uao_swhash_elt *uao_find_swhash_elt
162 1.85 thorpej (struct uvm_aobj *, int, bool);
163 1.72 yamt
164 1.85 thorpej static bool uao_pagein(struct uvm_aobj *, int, int);
165 1.85 thorpej static bool uao_pagein_page(struct uvm_aobj *, int);
166 1.72 yamt #endif /* defined(VMSWAP) */
167 1.1 mrg
168 1.121 riastrad static struct vm_page *uao_pagealloc(struct uvm_object *, voff_t, int);
169 1.121 riastrad
170 1.1 mrg /*
171 1.1 mrg * aobj_pager
172 1.41 chs *
173 1.1 mrg * note that some functions (e.g. put) are handled elsewhere
174 1.1 mrg */
175 1.1 mrg
176 1.95 yamt const struct uvm_pagerops aobj_pager = {
177 1.94 yamt .pgo_reference = uao_reference,
178 1.94 yamt .pgo_detach = uao_detach,
179 1.94 yamt .pgo_get = uao_get,
180 1.94 yamt .pgo_put = uao_put,
181 1.1 mrg };
182 1.1 mrg
183 1.1 mrg /*
184 1.1 mrg * uao_list: global list of active aobjs, locked by uao_list_lock
185 1.1 mrg */
186 1.1 mrg
187 1.117 rmind static LIST_HEAD(aobjlist, uvm_aobj) uao_list __cacheline_aligned;
188 1.117 rmind static kmutex_t uao_list_lock __cacheline_aligned;
189 1.1 mrg
190 1.1 mrg /*
191 1.1 mrg * hash table/array related functions
192 1.1 mrg */
193 1.1 mrg
194 1.72 yamt #if defined(VMSWAP)
195 1.72 yamt
196 1.1 mrg /*
197 1.1 mrg * uao_find_swhash_elt: find (or create) a hash table entry for a page
198 1.1 mrg * offset.
199 1.1 mrg *
200 1.1 mrg * => the object should be locked by the caller
201 1.1 mrg */
202 1.1 mrg
203 1.5 mrg static struct uao_swhash_elt *
204 1.85 thorpej uao_find_swhash_elt(struct uvm_aobj *aobj, int pageidx, bool create)
205 1.5 mrg {
206 1.5 mrg struct uao_swhash *swhash;
207 1.5 mrg struct uao_swhash_elt *elt;
208 1.28 kleink voff_t page_tag;
209 1.1 mrg
210 1.45 chs swhash = UAO_SWHASH_HASH(aobj, pageidx);
211 1.45 chs page_tag = UAO_SWHASH_ELT_TAG(pageidx);
212 1.1 mrg
213 1.5 mrg /*
214 1.5 mrg * now search the bucket for the requested tag
215 1.5 mrg */
216 1.45 chs
217 1.37 chs LIST_FOREACH(elt, swhash, list) {
218 1.45 chs if (elt->tag == page_tag) {
219 1.45 chs return elt;
220 1.45 chs }
221 1.5 mrg }
222 1.45 chs if (!create) {
223 1.5 mrg return NULL;
224 1.45 chs }
225 1.5 mrg
226 1.5 mrg /*
227 1.12 thorpej * allocate a new entry for the bucket and init/insert it in
228 1.5 mrg */
229 1.45 chs
230 1.45 chs elt = pool_get(&uao_swhash_elt_pool, PR_NOWAIT);
231 1.45 chs if (elt == NULL) {
232 1.45 chs return NULL;
233 1.45 chs }
234 1.5 mrg LIST_INSERT_HEAD(swhash, elt, list);
235 1.5 mrg elt->tag = page_tag;
236 1.5 mrg elt->count = 0;
237 1.9 perry memset(elt->slots, 0, sizeof(elt->slots));
238 1.45 chs return elt;
239 1.1 mrg }
240 1.1 mrg
241 1.1 mrg /*
242 1.1 mrg * uao_find_swslot: find the swap slot number for an aobj/pageidx
243 1.1 mrg *
244 1.41 chs * => object must be locked by caller
245 1.1 mrg */
246 1.46 chs
247 1.46 chs int
248 1.67 thorpej uao_find_swslot(struct uvm_object *uobj, int pageidx)
249 1.1 mrg {
250 1.46 chs struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
251 1.46 chs struct uao_swhash_elt *elt;
252 1.1 mrg
253 1.141 ad KASSERT(UVM_OBJ_IS_AOBJ(uobj));
254 1.141 ad
255 1.5 mrg /*
256 1.5 mrg * if noswap flag is set, then we never return a slot
257 1.5 mrg */
258 1.1 mrg
259 1.5 mrg if (aobj->u_flags & UAO_FLAG_NOSWAP)
260 1.117 rmind return 0;
261 1.1 mrg
262 1.5 mrg /*
263 1.5 mrg * if hashing, look in hash table.
264 1.5 mrg */
265 1.1 mrg
266 1.5 mrg if (UAO_USES_SWHASH(aobj)) {
267 1.87 thorpej elt = uao_find_swhash_elt(aobj, pageidx, false);
268 1.117 rmind return elt ? UAO_SWHASH_ELT_PAGESLOT(elt, pageidx) : 0;
269 1.5 mrg }
270 1.1 mrg
271 1.41 chs /*
272 1.5 mrg * otherwise, look in the array
273 1.5 mrg */
274 1.46 chs
275 1.117 rmind return aobj->u_swslots[pageidx];
276 1.1 mrg }
277 1.1 mrg
278 1.1 mrg /*
279 1.1 mrg * uao_set_swslot: set the swap slot for a page in an aobj.
280 1.1 mrg *
281 1.1 mrg * => setting a slot to zero frees the slot
282 1.1 mrg * => object must be locked by caller
283 1.45 chs * => we return the old slot number, or -1 if we failed to allocate
284 1.45 chs * memory to record the new slot number
285 1.1 mrg */
286 1.46 chs
287 1.5 mrg int
288 1.67 thorpej uao_set_swslot(struct uvm_object *uobj, int pageidx, int slot)
289 1.5 mrg {
290 1.5 mrg struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
291 1.45 chs struct uao_swhash_elt *elt;
292 1.5 mrg int oldslot;
293 1.149 skrll UVMHIST_FUNC(__func__);
294 1.149 skrll UVMHIST_CALLARGS(pdhist, "aobj %#jx pageidx %jd slot %jd",
295 1.126 pgoyette (uintptr_t)aobj, pageidx, slot, 0);
296 1.1 mrg
297 1.135 ad KASSERT(rw_write_held(uobj->vmobjlock) || uobj->uo_refs == 0);
298 1.141 ad KASSERT(UVM_OBJ_IS_AOBJ(uobj));
299 1.109 rmind
300 1.5 mrg /*
301 1.46 chs * if noswap flag is set, then we can't set a non-zero slot.
302 1.5 mrg */
303 1.1 mrg
304 1.5 mrg if (aobj->u_flags & UAO_FLAG_NOSWAP) {
305 1.117 rmind KASSERTMSG(slot == 0, "uao_set_swslot: no swap object");
306 1.117 rmind return 0;
307 1.5 mrg }
308 1.1 mrg
309 1.5 mrg /*
310 1.5 mrg * are we using a hash table? if so, add it in the hash.
311 1.5 mrg */
312 1.1 mrg
313 1.5 mrg if (UAO_USES_SWHASH(aobj)) {
314 1.39 chs
315 1.12 thorpej /*
316 1.12 thorpej * Avoid allocating an entry just to free it again if
317 1.12 thorpej * the page had not swap slot in the first place, and
318 1.12 thorpej * we are freeing.
319 1.12 thorpej */
320 1.39 chs
321 1.46 chs elt = uao_find_swhash_elt(aobj, pageidx, slot != 0);
322 1.12 thorpej if (elt == NULL) {
323 1.45 chs return slot ? -1 : 0;
324 1.12 thorpej }
325 1.5 mrg
326 1.5 mrg oldslot = UAO_SWHASH_ELT_PAGESLOT(elt, pageidx);
327 1.5 mrg UAO_SWHASH_ELT_PAGESLOT(elt, pageidx) = slot;
328 1.5 mrg
329 1.5 mrg /*
330 1.5 mrg * now adjust the elt's reference counter and free it if we've
331 1.5 mrg * dropped it to zero.
332 1.5 mrg */
333 1.5 mrg
334 1.5 mrg if (slot) {
335 1.5 mrg if (oldslot == 0)
336 1.5 mrg elt->count++;
337 1.45 chs } else {
338 1.45 chs if (oldslot)
339 1.5 mrg elt->count--;
340 1.5 mrg
341 1.5 mrg if (elt->count == 0) {
342 1.5 mrg LIST_REMOVE(elt, list);
343 1.12 thorpej pool_put(&uao_swhash_elt_pool, elt);
344 1.5 mrg }
345 1.5 mrg }
346 1.41 chs } else {
347 1.5 mrg /* we are using an array */
348 1.5 mrg oldslot = aobj->u_swslots[pageidx];
349 1.5 mrg aobj->u_swslots[pageidx] = slot;
350 1.5 mrg }
351 1.117 rmind return oldslot;
352 1.1 mrg }
353 1.1 mrg
354 1.72 yamt #endif /* defined(VMSWAP) */
355 1.72 yamt
356 1.1 mrg /*
357 1.1 mrg * end of hash/array functions
358 1.1 mrg */
359 1.1 mrg
360 1.1 mrg /*
361 1.1 mrg * uao_free: free all resources held by an aobj, and then free the aobj
362 1.1 mrg *
363 1.1 mrg * => the aobj should be dead
364 1.1 mrg */
365 1.46 chs
366 1.1 mrg static void
367 1.67 thorpej uao_free(struct uvm_aobj *aobj)
368 1.1 mrg {
369 1.117 rmind struct uvm_object *uobj = &aobj->u_obj;
370 1.96 ad
371 1.141 ad KASSERT(UVM_OBJ_IS_AOBJ(uobj));
372 1.135 ad KASSERT(rw_write_held(uobj->vmobjlock));
373 1.118 rmind uao_dropswap_range(uobj, 0, 0);
374 1.135 ad rw_exit(uobj->vmobjlock);
375 1.72 yamt
376 1.72 yamt #if defined(VMSWAP)
377 1.5 mrg if (UAO_USES_SWHASH(aobj)) {
378 1.1 mrg
379 1.5 mrg /*
380 1.75 yamt * free the hash table itself.
381 1.5 mrg */
382 1.46 chs
383 1.104 rmind hashdone(aobj->u_swhash, HASH_LIST, aobj->u_swhashmask);
384 1.5 mrg } else {
385 1.5 mrg
386 1.5 mrg /*
387 1.75 yamt * free the array itsself.
388 1.5 mrg */
389 1.5 mrg
390 1.104 rmind kmem_free(aobj->u_swslots, aobj->u_pages * sizeof(int));
391 1.1 mrg }
392 1.72 yamt #endif /* defined(VMSWAP) */
393 1.72 yamt
394 1.5 mrg /*
395 1.5 mrg * finally free the aobj itself
396 1.5 mrg */
397 1.46 chs
398 1.117 rmind uvm_obj_destroy(uobj, true);
399 1.113 rmind kmem_free(aobj, sizeof(struct uvm_aobj));
400 1.1 mrg }
401 1.1 mrg
402 1.1 mrg /*
403 1.1 mrg * pager functions
404 1.1 mrg */
405 1.1 mrg
406 1.1 mrg /*
407 1.1 mrg * uao_create: create an aobj of the given size and return its uvm_object.
408 1.1 mrg *
409 1.1 mrg * => for normal use, flags are always zero
410 1.1 mrg * => for the kernel object, the flags are:
411 1.1 mrg * UAO_FLAG_KERNOBJ - allocate the kernel object (can only happen once)
412 1.1 mrg * UAO_FLAG_KERNSWAP - enable swapping of kernel object (" ")
413 1.1 mrg */
414 1.46 chs
415 1.5 mrg struct uvm_object *
416 1.127 chs uao_create(voff_t size, int flags)
417 1.5 mrg {
418 1.46 chs static struct uvm_aobj kernel_object_store;
419 1.152 chs static krwlock_t bootstrap_kernel_object_lock;
420 1.120 martin static int kobj_alloced __diagused = 0;
421 1.127 chs pgoff_t pages = round_page((uint64_t)size) >> PAGE_SHIFT;
422 1.5 mrg struct uvm_aobj *aobj;
423 1.66 yamt int refs;
424 1.1 mrg
425 1.5 mrg /*
426 1.114 rmind * Allocate a new aobj, unless kernel object is requested.
427 1.27 chs */
428 1.5 mrg
429 1.46 chs if (flags & UAO_FLAG_KERNOBJ) {
430 1.46 chs KASSERT(!kobj_alloced);
431 1.5 mrg aobj = &kernel_object_store;
432 1.5 mrg aobj->u_pages = pages;
433 1.46 chs aobj->u_flags = UAO_FLAG_NOSWAP;
434 1.66 yamt refs = UVM_OBJ_KERN;
435 1.5 mrg kobj_alloced = UAO_FLAG_KERNOBJ;
436 1.5 mrg } else if (flags & UAO_FLAG_KERNSWAP) {
437 1.46 chs KASSERT(kobj_alloced == UAO_FLAG_KERNOBJ);
438 1.5 mrg aobj = &kernel_object_store;
439 1.5 mrg kobj_alloced = UAO_FLAG_KERNSWAP;
440 1.66 yamt refs = 0xdeadbeaf; /* XXX: gcc */
441 1.46 chs } else {
442 1.113 rmind aobj = kmem_alloc(sizeof(struct uvm_aobj), KM_SLEEP);
443 1.5 mrg aobj->u_pages = pages;
444 1.46 chs aobj->u_flags = 0;
445 1.66 yamt refs = 1;
446 1.5 mrg }
447 1.1 mrg
448 1.5 mrg /*
449 1.121 riastrad * no freelist by default
450 1.121 riastrad */
451 1.121 riastrad
452 1.121 riastrad aobj->u_freelist = VM_NFREELIST;
453 1.121 riastrad
454 1.121 riastrad /*
455 1.5 mrg * allocate hash/array if necessary
456 1.5 mrg *
457 1.5 mrg * note: in the KERNSWAP case no need to worry about locking since
458 1.5 mrg * we are still booting we should be the only thread around.
459 1.5 mrg */
460 1.46 chs
461 1.152 chs const int kernswap = (flags & UAO_FLAG_KERNSWAP) != 0;
462 1.152 chs if (flags == 0 || kernswap) {
463 1.72 yamt #if defined(VMSWAP)
464 1.5 mrg
465 1.5 mrg /* allocate hash table or array depending on object size */
466 1.27 chs if (UAO_USES_SWHASH(aobj)) {
467 1.104 rmind aobj->u_swhash = hashinit(UAO_SWHASH_BUCKETS(aobj),
468 1.152 chs HASH_LIST, true, &aobj->u_swhashmask);
469 1.5 mrg } else {
470 1.104 rmind aobj->u_swslots = kmem_zalloc(pages * sizeof(int),
471 1.152 chs KM_SLEEP);
472 1.5 mrg }
473 1.72 yamt #endif /* defined(VMSWAP) */
474 1.5 mrg
475 1.152 chs /*
476 1.152 chs * Replace kernel_object's temporary static lock with
477 1.152 chs * a regular rw_obj. We cannot use uvm_obj_setlock()
478 1.152 chs * because that would try to free the old lock.
479 1.152 chs */
480 1.152 chs
481 1.152 chs if (kernswap) {
482 1.152 chs aobj->u_obj.vmobjlock = rw_obj_alloc();
483 1.152 chs rw_destroy(&bootstrap_kernel_object_lock);
484 1.152 chs }
485 1.5 mrg if (flags) {
486 1.5 mrg aobj->u_flags &= ~UAO_FLAG_NOSWAP; /* clear noswap */
487 1.117 rmind return &aobj->u_obj;
488 1.5 mrg }
489 1.5 mrg }
490 1.5 mrg
491 1.5 mrg /*
492 1.115 rmind * Initialise UVM object.
493 1.115 rmind */
494 1.46 chs
495 1.115 rmind const bool kernobj = (flags & UAO_FLAG_KERNOBJ) != 0;
496 1.115 rmind uvm_obj_init(&aobj->u_obj, &aobj_pager, !kernobj, refs);
497 1.115 rmind if (__predict_false(kernobj)) {
498 1.152 chs /* Use a temporary static lock for kernel_object. */
499 1.152 chs rw_init(&bootstrap_kernel_object_lock);
500 1.152 chs uvm_obj_setlock(&aobj->u_obj, &bootstrap_kernel_object_lock);
501 1.115 rmind }
502 1.1 mrg
503 1.5 mrg /*
504 1.5 mrg * now that aobj is ready, add it to the global list
505 1.5 mrg */
506 1.46 chs
507 1.90 ad mutex_enter(&uao_list_lock);
508 1.5 mrg LIST_INSERT_HEAD(&uao_list, aobj, u_list);
509 1.90 ad mutex_exit(&uao_list_lock);
510 1.5 mrg return(&aobj->u_obj);
511 1.1 mrg }
512 1.1 mrg
513 1.1 mrg /*
514 1.121 riastrad * uao_set_pgfl: allocate pages only from the specified freelist.
515 1.121 riastrad *
516 1.121 riastrad * => must be called before any pages are allocated for the object.
517 1.122 riastrad * => reset by setting it to VM_NFREELIST, meaning any freelist.
518 1.121 riastrad */
519 1.121 riastrad
520 1.121 riastrad void
521 1.121 riastrad uao_set_pgfl(struct uvm_object *uobj, int freelist)
522 1.121 riastrad {
523 1.121 riastrad struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
524 1.121 riastrad
525 1.121 riastrad KASSERTMSG((0 <= freelist), "invalid freelist %d", freelist);
526 1.122 riastrad KASSERTMSG((freelist <= VM_NFREELIST), "invalid freelist %d",
527 1.122 riastrad freelist);
528 1.121 riastrad
529 1.121 riastrad aobj->u_freelist = freelist;
530 1.121 riastrad }
531 1.121 riastrad
532 1.121 riastrad /*
533 1.121 riastrad * uao_pagealloc: allocate a page for aobj.
534 1.121 riastrad */
535 1.121 riastrad
536 1.121 riastrad static inline struct vm_page *
537 1.121 riastrad uao_pagealloc(struct uvm_object *uobj, voff_t offset, int flags)
538 1.121 riastrad {
539 1.121 riastrad struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
540 1.121 riastrad
541 1.121 riastrad if (__predict_true(aobj->u_freelist == VM_NFREELIST))
542 1.121 riastrad return uvm_pagealloc(uobj, offset, NULL, flags);
543 1.121 riastrad else
544 1.121 riastrad return uvm_pagealloc_strat(uobj, offset, NULL, flags,
545 1.121 riastrad UVM_PGA_STRAT_ONLY, aobj->u_freelist);
546 1.121 riastrad }
547 1.121 riastrad
548 1.121 riastrad /*
549 1.1 mrg * uao_init: set up aobj pager subsystem
550 1.1 mrg *
551 1.1 mrg * => called at boot time from uvm_pager_init()
552 1.1 mrg */
553 1.46 chs
554 1.27 chs void
555 1.46 chs uao_init(void)
556 1.5 mrg {
557 1.12 thorpej static int uao_initialized;
558 1.12 thorpej
559 1.12 thorpej if (uao_initialized)
560 1.12 thorpej return;
561 1.87 thorpej uao_initialized = true;
562 1.5 mrg LIST_INIT(&uao_list);
563 1.96 ad mutex_init(&uao_list_lock, MUTEX_DEFAULT, IPL_NONE);
564 1.107 pooka pool_init(&uao_swhash_elt_pool, sizeof(struct uao_swhash_elt),
565 1.107 pooka 0, 0, 0, "uaoeltpl", NULL, IPL_VM);
566 1.1 mrg }
567 1.1 mrg
568 1.1 mrg /*
569 1.118 rmind * uao_reference: hold a reference to an anonymous UVM object.
570 1.1 mrg */
571 1.5 mrg void
572 1.67 thorpej uao_reference(struct uvm_object *uobj)
573 1.1 mrg {
574 1.118 rmind /* Kernel object is persistent. */
575 1.118 rmind if (UVM_OBJ_IS_KERN_OBJECT(uobj)) {
576 1.101 ad return;
577 1.118 rmind }
578 1.118 rmind atomic_inc_uint(&uobj->uo_refs);
579 1.1 mrg }
580 1.1 mrg
581 1.1 mrg /*
582 1.118 rmind * uao_detach: drop a reference to an anonymous UVM object.
583 1.1 mrg */
584 1.5 mrg void
585 1.67 thorpej uao_detach(struct uvm_object *uobj)
586 1.5 mrg {
587 1.118 rmind struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
588 1.132 ad struct uvm_page_array a;
589 1.118 rmind struct vm_page *pg;
590 1.118 rmind
591 1.149 skrll UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
592 1.101 ad
593 1.101 ad /*
594 1.118 rmind * Detaching from kernel object is a NOP.
595 1.118 rmind */
596 1.101 ad
597 1.101 ad if (UVM_OBJ_IS_KERN_OBJECT(uobj))
598 1.102 ad return;
599 1.101 ad
600 1.5 mrg /*
601 1.118 rmind * Drop the reference. If it was the last one, destroy the object.
602 1.118 rmind */
603 1.5 mrg
604 1.125 chs KASSERT(uobj->uo_refs > 0);
605 1.136 rin UVMHIST_LOG(maphist," (uobj=%#jx) ref=%jd",
606 1.126 pgoyette (uintptr_t)uobj, uobj->uo_refs, 0, 0);
607 1.154 riastrad #ifndef __HAVE_ATOMIC_AS_MEMBAR
608 1.155 riastrad membar_release();
609 1.154 riastrad #endif
610 1.118 rmind if (atomic_dec_uint_nv(&uobj->uo_refs) > 0) {
611 1.5 mrg UVMHIST_LOG(maphist, "<- done (rc>0)", 0,0,0,0);
612 1.5 mrg return;
613 1.5 mrg }
614 1.154 riastrad #ifndef __HAVE_ATOMIC_AS_MEMBAR
615 1.155 riastrad membar_acquire();
616 1.154 riastrad #endif
617 1.5 mrg
618 1.5 mrg /*
619 1.118 rmind * Remove the aobj from the global list.
620 1.118 rmind */
621 1.46 chs
622 1.92 ad mutex_enter(&uao_list_lock);
623 1.5 mrg LIST_REMOVE(aobj, u_list);
624 1.92 ad mutex_exit(&uao_list_lock);
625 1.5 mrg
626 1.5 mrg /*
627 1.118 rmind * Free all the pages left in the aobj. For each page, when the
628 1.118 rmind * page is no longer busy (and thus after any disk I/O that it is
629 1.118 rmind * involved in is complete), release any swap resources and free
630 1.118 rmind * the page itself.
631 1.118 rmind */
632 1.146 ad uvm_page_array_init(&a, uobj, 0);
633 1.135 ad rw_enter(uobj->vmobjlock, RW_WRITER);
634 1.146 ad while ((pg = uvm_page_array_fill_and_peek(&a, 0, 0)) != NULL) {
635 1.132 ad uvm_page_array_advance(&a);
636 1.130 ad pmap_page_protect(pg, VM_PROT_NONE);
637 1.5 mrg if (pg->flags & PG_BUSY) {
638 1.137 ad uvm_pagewait(pg, uobj->vmobjlock, "uao_det");
639 1.132 ad uvm_page_array_clear(&a);
640 1.135 ad rw_enter(uobj->vmobjlock, RW_WRITER);
641 1.5 mrg continue;
642 1.5 mrg }
643 1.18 chs uao_dropswap(&aobj->u_obj, pg->offset >> PAGE_SHIFT);
644 1.5 mrg uvm_pagefree(pg);
645 1.5 mrg }
646 1.132 ad uvm_page_array_fini(&a);
647 1.1 mrg
648 1.5 mrg /*
649 1.118 rmind * Finally, free the anonymous UVM object itself.
650 1.118 rmind */
651 1.1 mrg
652 1.5 mrg uao_free(aobj);
653 1.5 mrg }
654 1.1 mrg
655 1.1 mrg /*
656 1.46 chs * uao_put: flush pages out of a uvm object
657 1.22 thorpej *
658 1.22 thorpej * => object should be locked by caller. we may _unlock_ the object
659 1.22 thorpej * if (and only if) we need to clean a page (PGO_CLEANIT).
660 1.22 thorpej * XXXJRT Currently, however, we don't. In the case of cleaning
661 1.22 thorpej * XXXJRT a page, we simply just deactivate it. Should probably
662 1.22 thorpej * XXXJRT handle this better, in the future (although "flushing"
663 1.22 thorpej * XXXJRT anonymous memory isn't terribly important).
664 1.22 thorpej * => if PGO_CLEANIT is not set, then we will neither unlock the object
665 1.22 thorpej * or block.
666 1.22 thorpej * => if PGO_ALLPAGE is set, then all pages in the object are valid targets
667 1.22 thorpej * for flushing.
668 1.86 matt * => we return 0 unless we encountered some sort of I/O error
669 1.22 thorpej * XXXJRT currently never happens, as we never directly initiate
670 1.22 thorpej * XXXJRT I/O
671 1.1 mrg */
672 1.22 thorpej
673 1.68 thorpej static int
674 1.67 thorpej uao_put(struct uvm_object *uobj, voff_t start, voff_t stop, int flags)
675 1.5 mrg {
676 1.46 chs struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
677 1.132 ad struct uvm_page_array a;
678 1.132 ad struct vm_page *pg;
679 1.28 kleink voff_t curoff;
680 1.149 skrll UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
681 1.22 thorpej
682 1.141 ad KASSERT(UVM_OBJ_IS_AOBJ(uobj));
683 1.135 ad KASSERT(rw_write_held(uobj->vmobjlock));
684 1.96 ad
685 1.22 thorpej if (flags & PGO_ALLPAGES) {
686 1.22 thorpej start = 0;
687 1.22 thorpej stop = aobj->u_pages << PAGE_SHIFT;
688 1.22 thorpej } else {
689 1.22 thorpej start = trunc_page(start);
690 1.71 yamt if (stop == 0) {
691 1.71 yamt stop = aobj->u_pages << PAGE_SHIFT;
692 1.71 yamt } else {
693 1.71 yamt stop = round_page(stop);
694 1.71 yamt }
695 1.127 chs if (stop > (uint64_t)(aobj->u_pages << PAGE_SHIFT)) {
696 1.127 chs printf("uao_put: strange, got an out of range "
697 1.136 rin "flush %#jx > %#jx (fixed)\n",
698 1.127 chs (uintmax_t)stop,
699 1.127 chs (uintmax_t)(aobj->u_pages << PAGE_SHIFT));
700 1.22 thorpej stop = aobj->u_pages << PAGE_SHIFT;
701 1.22 thorpej }
702 1.22 thorpej }
703 1.22 thorpej UVMHIST_LOG(maphist,
704 1.136 rin " flush start=%#jx, stop=%#jx, flags=%#jx",
705 1.132 ad start, stop, flags, 0);
706 1.1 mrg
707 1.5 mrg /*
708 1.22 thorpej * Don't need to do any work here if we're not freeing
709 1.22 thorpej * or deactivating pages.
710 1.22 thorpej */
711 1.46 chs
712 1.22 thorpej if ((flags & (PGO_DEACTIVATE|PGO_FREE)) == 0) {
713 1.135 ad rw_exit(uobj->vmobjlock);
714 1.46 chs return 0;
715 1.22 thorpej }
716 1.22 thorpej
717 1.99 ad /* locked: uobj */
718 1.146 ad uvm_page_array_init(&a, uobj, 0);
719 1.132 ad curoff = start;
720 1.146 ad while ((pg = uvm_page_array_fill_and_peek(&a, curoff, 0)) != NULL) {
721 1.132 ad if (pg->offset >= stop) {
722 1.132 ad break;
723 1.22 thorpej }
724 1.98 yamt
725 1.98 yamt /*
726 1.98 yamt * wait and try again if the page is busy.
727 1.98 yamt */
728 1.98 yamt
729 1.98 yamt if (pg->flags & PG_BUSY) {
730 1.137 ad uvm_pagewait(pg, uobj->vmobjlock, "uao_put");
731 1.132 ad uvm_page_array_clear(&a);
732 1.135 ad rw_enter(uobj->vmobjlock, RW_WRITER);
733 1.98 yamt continue;
734 1.98 yamt }
735 1.132 ad uvm_page_array_advance(&a);
736 1.132 ad curoff = pg->offset + PAGE_SIZE;
737 1.98 yamt
738 1.46 chs switch (flags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE)) {
739 1.41 chs
740 1.22 thorpej /*
741 1.22 thorpej * XXX In these first 3 cases, we always just
742 1.22 thorpej * XXX deactivate the page. We may want to
743 1.22 thorpej * XXX handle the different cases more specifically
744 1.22 thorpej * XXX in the future.
745 1.22 thorpej */
746 1.46 chs
747 1.22 thorpej case PGO_CLEANIT|PGO_FREE:
748 1.22 thorpej case PGO_CLEANIT|PGO_DEACTIVATE:
749 1.22 thorpej case PGO_DEACTIVATE:
750 1.25 thorpej deactivate_it:
751 1.133 ad uvm_pagelock(pg);
752 1.131 ad uvm_pagedeactivate(pg);
753 1.133 ad uvm_pageunlock(pg);
754 1.98 yamt break;
755 1.22 thorpej
756 1.22 thorpej case PGO_FREE:
757 1.25 thorpej /*
758 1.25 thorpej * If there are multiple references to
759 1.25 thorpej * the object, just deactivate the page.
760 1.25 thorpej */
761 1.46 chs
762 1.25 thorpej if (uobj->uo_refs > 1)
763 1.25 thorpej goto deactivate_it;
764 1.25 thorpej
765 1.22 thorpej /*
766 1.98 yamt * free the swap slot and the page.
767 1.22 thorpej */
768 1.46 chs
769 1.46 chs pmap_page_protect(pg, VM_PROT_NONE);
770 1.75 yamt
771 1.75 yamt /*
772 1.75 yamt * freeing swapslot here is not strictly necessary.
773 1.75 yamt * however, leaving it here doesn't save much
774 1.75 yamt * because we need to update swap accounting anyway.
775 1.75 yamt */
776 1.75 yamt
777 1.46 chs uao_dropswap(uobj, pg->offset >> PAGE_SHIFT);
778 1.46 chs uvm_pagefree(pg);
779 1.98 yamt break;
780 1.98 yamt
781 1.98 yamt default:
782 1.98 yamt panic("%s: impossible", __func__);
783 1.22 thorpej }
784 1.22 thorpej }
785 1.135 ad rw_exit(uobj->vmobjlock);
786 1.132 ad uvm_page_array_fini(&a);
787 1.46 chs return 0;
788 1.1 mrg }
789 1.1 mrg
790 1.1 mrg /*
791 1.1 mrg * uao_get: fetch me a page
792 1.1 mrg *
793 1.1 mrg * we have three cases:
794 1.1 mrg * 1: page is resident -> just return the page.
795 1.1 mrg * 2: page is zero-fill -> allocate a new page and zero it.
796 1.1 mrg * 3: page is swapped out -> fetch the page from swap.
797 1.1 mrg *
798 1.142 ad * case 1 can be handled with PGO_LOCKED, cases 2 and 3 cannot.
799 1.142 ad * so, if the "center" page hits case 2/3 then we will need to return EBUSY.
800 1.1 mrg *
801 1.1 mrg * => prefer map unlocked (not required)
802 1.1 mrg * => object must be locked! we will _unlock_ it before starting any I/O.
803 1.142 ad * => flags: PGO_LOCKED: fault data structures are locked
804 1.1 mrg * => NOTE: offset is the offset of pps[0], _NOT_ pps[centeridx]
805 1.1 mrg * => NOTE: caller must check for released pages!!
806 1.1 mrg */
807 1.46 chs
808 1.5 mrg static int
809 1.67 thorpej uao_get(struct uvm_object *uobj, voff_t offset, struct vm_page **pps,
810 1.82 yamt int *npagesp, int centeridx, vm_prot_t access_type, int advice, int flags)
811 1.5 mrg {
812 1.28 kleink voff_t current_offset;
813 1.147 ad struct vm_page *ptmp;
814 1.147 ad int lcv, gotpages, maxpages, swslot, pageidx;
815 1.144 ad bool overwrite = ((flags & PGO_OVERWRITE) != 0);
816 1.147 ad struct uvm_page_array a;
817 1.5 mrg
818 1.149 skrll UVMHIST_FUNC(__func__);
819 1.153 skrll UVMHIST_CALLARGS(pdhist, "aobj=%#jx offset=%jd, flags=%#jx",
820 1.126 pgoyette (uintptr_t)uobj, offset, flags,0);
821 1.37 chs
822 1.5 mrg /*
823 1.139 ad * the object must be locked. it can only be a read lock when
824 1.141 ad * processing a read fault with PGO_LOCKED.
825 1.139 ad */
826 1.139 ad
827 1.141 ad KASSERT(UVM_OBJ_IS_AOBJ(uobj));
828 1.139 ad KASSERT(rw_lock_held(uobj->vmobjlock));
829 1.139 ad KASSERT(rw_write_held(uobj->vmobjlock) ||
830 1.141 ad ((flags & PGO_LOCKED) != 0 && (access_type & VM_PROT_WRITE) == 0));
831 1.139 ad
832 1.139 ad /*
833 1.5 mrg * get number of pages
834 1.5 mrg */
835 1.46 chs
836 1.5 mrg maxpages = *npagesp;
837 1.5 mrg
838 1.5 mrg /*
839 1.5 mrg * step 1: handled the case where fault data structures are locked.
840 1.5 mrg */
841 1.1 mrg
842 1.5 mrg if (flags & PGO_LOCKED) {
843 1.46 chs
844 1.5 mrg /*
845 1.5 mrg * step 1a: get pages that are already resident. only do
846 1.5 mrg * this if the data structures are locked (i.e. the first
847 1.5 mrg * time through).
848 1.5 mrg */
849 1.5 mrg
850 1.146 ad uvm_page_array_init(&a, uobj, 0);
851 1.5 mrg gotpages = 0; /* # of pages we got so far */
852 1.141 ad for (lcv = 0; lcv < maxpages; lcv++) {
853 1.146 ad ptmp = uvm_page_array_fill_and_peek(&a,
854 1.146 ad offset + (lcv << PAGE_SHIFT), maxpages);
855 1.141 ad if (ptmp == NULL) {
856 1.141 ad break;
857 1.141 ad }
858 1.141 ad KASSERT(ptmp->offset >= offset);
859 1.141 ad lcv = (ptmp->offset - offset) >> PAGE_SHIFT;
860 1.141 ad if (lcv >= maxpages) {
861 1.141 ad break;
862 1.5 mrg }
863 1.141 ad uvm_page_array_advance(&a);
864 1.5 mrg
865 1.5 mrg /*
866 1.46 chs * to be useful must get a non-busy page
867 1.5 mrg */
868 1.46 chs
869 1.141 ad if ((ptmp->flags & PG_BUSY) != 0) {
870 1.124 martin continue;
871 1.5 mrg }
872 1.5 mrg
873 1.5 mrg /*
874 1.141 ad * useful page: plug it in our result array
875 1.5 mrg */
876 1.141 ad
877 1.134 ad KASSERT(uvm_pagegetdirty(ptmp) !=
878 1.134 ad UVM_PAGE_STATUS_CLEAN);
879 1.5 mrg pps[lcv] = ptmp;
880 1.5 mrg gotpages++;
881 1.46 chs }
882 1.141 ad uvm_page_array_fini(&a);
883 1.5 mrg
884 1.5 mrg /*
885 1.5 mrg * step 1b: now we've either done everything needed or we
886 1.5 mrg * to unlock and do some waiting or I/O.
887 1.5 mrg */
888 1.5 mrg
889 1.143 hannken UVMHIST_LOG(pdhist, "<- done (done=%jd)",
890 1.143 hannken (pps[centeridx] != NULL), 0,0,0);
891 1.5 mrg *npagesp = gotpages;
892 1.142 ad return pps[centeridx] != NULL ? 0 : EBUSY;
893 1.1 mrg }
894 1.1 mrg
895 1.5 mrg /*
896 1.5 mrg * step 2: get non-resident or busy pages.
897 1.5 mrg * object is locked. data structures are unlocked.
898 1.5 mrg */
899 1.5 mrg
900 1.76 yamt if ((flags & PGO_SYNCIO) == 0) {
901 1.76 yamt goto done;
902 1.76 yamt }
903 1.76 yamt
904 1.147 ad uvm_page_array_init(&a, uobj, 0);
905 1.147 ad for (lcv = 0, current_offset = offset ; lcv < maxpages ;) {
906 1.27 chs
907 1.5 mrg /*
908 1.5 mrg * we have yet to locate the current page (pps[lcv]). we
909 1.5 mrg * first look for a page that is already at the current offset.
910 1.5 mrg * if we find a page, we check to see if it is busy or
911 1.5 mrg * released. if that is the case, then we sleep on the page
912 1.5 mrg * until it is no longer busy or released and repeat the lookup.
913 1.5 mrg * if the page we found is neither busy nor released, then we
914 1.147 ad * busy it (so we own it) and plug it into pps[lcv]. we are
915 1.147 ad * ready to move on to the next page.
916 1.5 mrg */
917 1.5 mrg
918 1.147 ad ptmp = uvm_page_array_fill_and_peek(&a, current_offset,
919 1.147 ad maxpages - lcv);
920 1.5 mrg
921 1.147 ad if (ptmp != NULL && ptmp->offset == current_offset) {
922 1.5 mrg /* page is there, see if we need to wait on it */
923 1.46 chs if ((ptmp->flags & PG_BUSY) != 0) {
924 1.5 mrg UVMHIST_LOG(pdhist,
925 1.136 rin "sleeping, ptmp->flags %#jx\n",
926 1.5 mrg ptmp->flags,0,0,0);
927 1.137 ad uvm_pagewait(ptmp, uobj->vmobjlock, "uao_get");
928 1.135 ad rw_enter(uobj->vmobjlock, RW_WRITER);
929 1.147 ad uvm_page_array_clear(&a);
930 1.46 chs continue;
931 1.5 mrg }
932 1.41 chs
933 1.41 chs /*
934 1.147 ad * if we get here then the page is resident and
935 1.147 ad * unbusy. we busy it now (so we own it). if
936 1.147 ad * overwriting, mark the page dirty up front as
937 1.147 ad * it will be zapped via an unmanaged mapping.
938 1.5 mrg */
939 1.46 chs
940 1.134 ad KASSERT(uvm_pagegetdirty(ptmp) !=
941 1.134 ad UVM_PAGE_STATUS_CLEAN);
942 1.145 ad if (overwrite) {
943 1.145 ad uvm_pagemarkdirty(ptmp, UVM_PAGE_STATUS_DIRTY);
944 1.145 ad }
945 1.5 mrg /* we own it, caller must un-busy */
946 1.5 mrg ptmp->flags |= PG_BUSY;
947 1.5 mrg UVM_PAGE_OWN(ptmp, "uao_get2");
948 1.147 ad pps[lcv++] = ptmp;
949 1.147 ad current_offset += PAGE_SIZE;
950 1.147 ad uvm_page_array_advance(&a);
951 1.147 ad continue;
952 1.147 ad } else {
953 1.147 ad KASSERT(ptmp == NULL || ptmp->offset > current_offset);
954 1.5 mrg }
955 1.5 mrg
956 1.5 mrg /*
957 1.147 ad * not resident. allocate a new busy/fake/clean page in the
958 1.147 ad * object. if it's in swap we need to do I/O to fill in the
959 1.147 ad * data, otherwise the page needs to be cleared: if it's not
960 1.147 ad * destined to be overwritten, then zero it here and now.
961 1.147 ad */
962 1.46 chs
963 1.147 ad pageidx = current_offset >> PAGE_SHIFT;
964 1.147 ad swslot = uao_find_swslot(uobj, pageidx);
965 1.147 ad ptmp = uao_pagealloc(uobj, current_offset,
966 1.147 ad swslot != 0 || overwrite ? 0 : UVM_PGA_ZERO);
967 1.147 ad
968 1.147 ad /* out of RAM? */
969 1.147 ad if (ptmp == NULL) {
970 1.147 ad rw_exit(uobj->vmobjlock);
971 1.150 simonb UVMHIST_LOG(pdhist, "sleeping, ptmp == NULL",0,0,0,0);
972 1.147 ad uvm_wait("uao_getpage");
973 1.147 ad rw_enter(uobj->vmobjlock, RW_WRITER);
974 1.147 ad uvm_page_array_clear(&a);
975 1.147 ad continue;
976 1.147 ad }
977 1.5 mrg
978 1.5 mrg /*
979 1.148 skrll * if swslot == 0, page hasn't existed before and is zeroed.
980 1.142 ad * otherwise we have a "fake/busy/clean" page that we just
981 1.142 ad * allocated. do the needed "i/o", reading from swap.
982 1.5 mrg */
983 1.46 chs
984 1.142 ad if (swslot != 0) {
985 1.72 yamt #if defined(VMSWAP)
986 1.72 yamt int error;
987 1.72 yamt
988 1.126 pgoyette UVMHIST_LOG(pdhist, "pagein from swslot %jd",
989 1.5 mrg swslot, 0,0,0);
990 1.5 mrg
991 1.5 mrg /*
992 1.5 mrg * page in the swapped-out page.
993 1.5 mrg * unlock object for i/o, relock when done.
994 1.5 mrg */
995 1.46 chs
996 1.151 chs uvm_page_array_clear(&a);
997 1.135 ad rw_exit(uobj->vmobjlock);
998 1.46 chs error = uvm_swap_get(ptmp, swslot, PGO_SYNCIO);
999 1.135 ad rw_enter(uobj->vmobjlock, RW_WRITER);
1000 1.5 mrg
1001 1.5 mrg /*
1002 1.5 mrg * I/O done. check for errors.
1003 1.5 mrg */
1004 1.46 chs
1005 1.46 chs if (error != 0) {
1006 1.126 pgoyette UVMHIST_LOG(pdhist, "<- done (error=%jd)",
1007 1.46 chs error,0,0,0);
1008 1.27 chs
1009 1.27 chs /*
1010 1.27 chs * remove the swap slot from the aobj
1011 1.27 chs * and mark the aobj as having no real slot.
1012 1.27 chs * don't free the swap slot, thus preventing
1013 1.27 chs * it from being used again.
1014 1.27 chs */
1015 1.46 chs
1016 1.118 rmind swslot = uao_set_swslot(uobj, pageidx,
1017 1.118 rmind SWSLOT_BAD);
1018 1.57 pk if (swslot > 0) {
1019 1.45 chs uvm_swap_markbad(swslot, 1);
1020 1.45 chs }
1021 1.27 chs
1022 1.5 mrg uvm_pagefree(ptmp);
1023 1.135 ad rw_exit(uobj->vmobjlock);
1024 1.142 ad UVMHIST_LOG(pdhist, "<- done (error)",
1025 1.142 ad error,lcv,0,0);
1026 1.142 ad if (lcv != 0) {
1027 1.142 ad uvm_page_unbusy(pps, lcv);
1028 1.142 ad }
1029 1.142 ad memset(pps, 0, maxpages * sizeof(pps[0]));
1030 1.151 chs uvm_page_array_fini(&a);
1031 1.46 chs return error;
1032 1.5 mrg }
1033 1.72 yamt #else /* defined(VMSWAP) */
1034 1.72 yamt panic("%s: pagein", __func__);
1035 1.72 yamt #endif /* defined(VMSWAP) */
1036 1.5 mrg }
1037 1.5 mrg
1038 1.134 ad /*
1039 1.134 ad * note that we will allow the page being writably-mapped
1040 1.144 ad * (!PG_RDONLY) regardless of access_type. if overwrite,
1041 1.144 ad * the page can be modified through an unmanaged mapping
1042 1.144 ad * so mark it dirty up front.
1043 1.134 ad */
1044 1.144 ad if (overwrite) {
1045 1.144 ad uvm_pagemarkdirty(ptmp, UVM_PAGE_STATUS_DIRTY);
1046 1.144 ad } else {
1047 1.144 ad uvm_pagemarkdirty(ptmp, UVM_PAGE_STATUS_UNKNOWN);
1048 1.144 ad }
1049 1.78 yamt
1050 1.41 chs /*
1051 1.5 mrg * we got the page! clear the fake flag (indicates valid
1052 1.5 mrg * data now in page) and plug into our result array. note
1053 1.41 chs * that page is still busy.
1054 1.5 mrg *
1055 1.5 mrg * it is the callers job to:
1056 1.5 mrg * => check if the page is released
1057 1.5 mrg * => unbusy the page
1058 1.5 mrg * => activate the page
1059 1.5 mrg */
1060 1.134 ad KASSERT(uvm_pagegetdirty(ptmp) != UVM_PAGE_STATUS_CLEAN);
1061 1.134 ad KASSERT((ptmp->flags & PG_FAKE) != 0);
1062 1.147 ad KASSERT(ptmp->offset == current_offset);
1063 1.46 chs ptmp->flags &= ~PG_FAKE;
1064 1.147 ad pps[lcv++] = ptmp;
1065 1.147 ad current_offset += PAGE_SIZE;
1066 1.46 chs }
1067 1.147 ad uvm_page_array_fini(&a);
1068 1.1 mrg
1069 1.1 mrg /*
1070 1.5 mrg * finally, unlock object and return.
1071 1.5 mrg */
1072 1.1 mrg
1073 1.76 yamt done:
1074 1.135 ad rw_exit(uobj->vmobjlock);
1075 1.5 mrg UVMHIST_LOG(pdhist, "<- done (OK)",0,0,0,0);
1076 1.46 chs return 0;
1077 1.1 mrg }
1078 1.1 mrg
1079 1.72 yamt #if defined(VMSWAP)
1080 1.72 yamt
1081 1.1 mrg /*
1082 1.18 chs * uao_dropswap: release any swap resources from this aobj page.
1083 1.41 chs *
1084 1.18 chs * => aobj must be locked or have a reference count of 0.
1085 1.18 chs */
1086 1.18 chs
1087 1.18 chs void
1088 1.67 thorpej uao_dropswap(struct uvm_object *uobj, int pageidx)
1089 1.18 chs {
1090 1.18 chs int slot;
1091 1.18 chs
1092 1.141 ad KASSERT(UVM_OBJ_IS_AOBJ(uobj));
1093 1.141 ad
1094 1.18 chs slot = uao_set_swslot(uobj, pageidx, 0);
1095 1.18 chs if (slot) {
1096 1.18 chs uvm_swap_free(slot, 1);
1097 1.18 chs }
1098 1.27 chs }
1099 1.27 chs
1100 1.27 chs /*
1101 1.27 chs * page in every page in every aobj that is paged-out to a range of swslots.
1102 1.41 chs *
1103 1.27 chs * => nothing should be locked.
1104 1.87 thorpej * => returns true if pagein was aborted due to lack of memory.
1105 1.27 chs */
1106 1.46 chs
1107 1.85 thorpej bool
1108 1.67 thorpej uao_swap_off(int startslot, int endslot)
1109 1.27 chs {
1110 1.118 rmind struct uvm_aobj *aobj;
1111 1.27 chs
1112 1.27 chs /*
1113 1.118 rmind * Walk the list of all anonymous UVM objects. Grab the first.
1114 1.27 chs */
1115 1.118 rmind mutex_enter(&uao_list_lock);
1116 1.118 rmind if ((aobj = LIST_FIRST(&uao_list)) == NULL) {
1117 1.118 rmind mutex_exit(&uao_list_lock);
1118 1.118 rmind return false;
1119 1.118 rmind }
1120 1.118 rmind uao_reference(&aobj->u_obj);
1121 1.27 chs
1122 1.118 rmind do {
1123 1.118 rmind struct uvm_aobj *nextaobj;
1124 1.118 rmind bool rv;
1125 1.27 chs
1126 1.27 chs /*
1127 1.118 rmind * Prefetch the next object and immediately hold a reference
1128 1.118 rmind * on it, so neither the current nor the next entry could
1129 1.118 rmind * disappear while we are iterating.
1130 1.27 chs */
1131 1.118 rmind if ((nextaobj = LIST_NEXT(aobj, u_list)) != NULL) {
1132 1.118 rmind uao_reference(&nextaobj->u_obj);
1133 1.27 chs }
1134 1.90 ad mutex_exit(&uao_list_lock);
1135 1.27 chs
1136 1.27 chs /*
1137 1.118 rmind * Page in all pages in the swap slot range.
1138 1.27 chs */
1139 1.135 ad rw_enter(aobj->u_obj.vmobjlock, RW_WRITER);
1140 1.118 rmind rv = uao_pagein(aobj, startslot, endslot);
1141 1.135 ad rw_exit(aobj->u_obj.vmobjlock);
1142 1.46 chs
1143 1.118 rmind /* Drop the reference of the current object. */
1144 1.118 rmind uao_detach(&aobj->u_obj);
1145 1.27 chs if (rv) {
1146 1.118 rmind if (nextaobj) {
1147 1.118 rmind uao_detach(&nextaobj->u_obj);
1148 1.118 rmind }
1149 1.27 chs return rv;
1150 1.27 chs }
1151 1.27 chs
1152 1.118 rmind aobj = nextaobj;
1153 1.90 ad mutex_enter(&uao_list_lock);
1154 1.118 rmind } while (aobj);
1155 1.27 chs
1156 1.90 ad mutex_exit(&uao_list_lock);
1157 1.87 thorpej return false;
1158 1.27 chs }
1159 1.27 chs
1160 1.27 chs /*
1161 1.27 chs * page in any pages from aobj in the given range.
1162 1.27 chs *
1163 1.27 chs * => aobj must be locked and is returned locked.
1164 1.87 thorpej * => returns true if pagein was aborted due to lack of memory.
1165 1.27 chs */
1166 1.85 thorpej static bool
1167 1.67 thorpej uao_pagein(struct uvm_aobj *aobj, int startslot, int endslot)
1168 1.27 chs {
1169 1.85 thorpej bool rv;
1170 1.27 chs
1171 1.27 chs if (UAO_USES_SWHASH(aobj)) {
1172 1.27 chs struct uao_swhash_elt *elt;
1173 1.65 christos int buck;
1174 1.27 chs
1175 1.27 chs restart:
1176 1.65 christos for (buck = aobj->u_swhashmask; buck >= 0; buck--) {
1177 1.65 christos for (elt = LIST_FIRST(&aobj->u_swhash[buck]);
1178 1.27 chs elt != NULL;
1179 1.27 chs elt = LIST_NEXT(elt, list)) {
1180 1.27 chs int i;
1181 1.27 chs
1182 1.27 chs for (i = 0; i < UAO_SWHASH_CLUSTER_SIZE; i++) {
1183 1.27 chs int slot = elt->slots[i];
1184 1.27 chs
1185 1.27 chs /*
1186 1.27 chs * if the slot isn't in range, skip it.
1187 1.27 chs */
1188 1.46 chs
1189 1.41 chs if (slot < startslot ||
1190 1.27 chs slot >= endslot) {
1191 1.27 chs continue;
1192 1.27 chs }
1193 1.27 chs
1194 1.27 chs /*
1195 1.27 chs * process the page,
1196 1.27 chs * the start over on this object
1197 1.27 chs * since the swhash elt
1198 1.27 chs * may have been freed.
1199 1.27 chs */
1200 1.46 chs
1201 1.27 chs rv = uao_pagein_page(aobj,
1202 1.27 chs UAO_SWHASH_ELT_PAGEIDX_BASE(elt) + i);
1203 1.27 chs if (rv) {
1204 1.27 chs return rv;
1205 1.27 chs }
1206 1.27 chs goto restart;
1207 1.27 chs }
1208 1.27 chs }
1209 1.27 chs }
1210 1.27 chs } else {
1211 1.27 chs int i;
1212 1.27 chs
1213 1.27 chs for (i = 0; i < aobj->u_pages; i++) {
1214 1.27 chs int slot = aobj->u_swslots[i];
1215 1.27 chs
1216 1.27 chs /*
1217 1.27 chs * if the slot isn't in range, skip it
1218 1.27 chs */
1219 1.46 chs
1220 1.27 chs if (slot < startslot || slot >= endslot) {
1221 1.27 chs continue;
1222 1.27 chs }
1223 1.27 chs
1224 1.27 chs /*
1225 1.27 chs * process the page.
1226 1.27 chs */
1227 1.46 chs
1228 1.27 chs rv = uao_pagein_page(aobj, i);
1229 1.27 chs if (rv) {
1230 1.27 chs return rv;
1231 1.27 chs }
1232 1.27 chs }
1233 1.27 chs }
1234 1.27 chs
1235 1.87 thorpej return false;
1236 1.27 chs }
1237 1.27 chs
1238 1.27 chs /*
1239 1.117 rmind * uao_pagein_page: page in a single page from an anonymous UVM object.
1240 1.27 chs *
1241 1.117 rmind * => Returns true if pagein was aborted due to lack of memory.
1242 1.117 rmind * => Object must be locked and is returned locked.
1243 1.27 chs */
1244 1.46 chs
1245 1.85 thorpej static bool
1246 1.67 thorpej uao_pagein_page(struct uvm_aobj *aobj, int pageidx)
1247 1.27 chs {
1248 1.117 rmind struct uvm_object *uobj = &aobj->u_obj;
1249 1.27 chs struct vm_page *pg;
1250 1.57 pk int rv, npages;
1251 1.27 chs
1252 1.27 chs pg = NULL;
1253 1.27 chs npages = 1;
1254 1.117 rmind
1255 1.135 ad KASSERT(rw_write_held(uobj->vmobjlock));
1256 1.128 msaitoh rv = uao_get(uobj, (voff_t)pageidx << PAGE_SHIFT, &pg, &npages,
1257 1.117 rmind 0, VM_PROT_READ | VM_PROT_WRITE, 0, PGO_SYNCIO);
1258 1.27 chs
1259 1.27 chs /*
1260 1.27 chs * relock and finish up.
1261 1.27 chs */
1262 1.46 chs
1263 1.135 ad rw_enter(uobj->vmobjlock, RW_WRITER);
1264 1.27 chs switch (rv) {
1265 1.40 chs case 0:
1266 1.27 chs break;
1267 1.27 chs
1268 1.40 chs case EIO:
1269 1.40 chs case ERESTART:
1270 1.46 chs
1271 1.27 chs /*
1272 1.27 chs * nothing more to do on errors.
1273 1.40 chs * ERESTART can only mean that the anon was freed,
1274 1.27 chs * so again there's nothing to do.
1275 1.27 chs */
1276 1.46 chs
1277 1.87 thorpej return false;
1278 1.59 pk
1279 1.59 pk default:
1280 1.87 thorpej return true;
1281 1.27 chs }
1282 1.27 chs
1283 1.27 chs /*
1284 1.27 chs * ok, we've got the page now.
1285 1.27 chs * mark it as dirty, clear its swslot and un-busy it.
1286 1.27 chs */
1287 1.57 pk uao_dropswap(&aobj->u_obj, pageidx);
1288 1.27 chs
1289 1.27 chs /*
1290 1.80 yamt * make sure it's on a page queue.
1291 1.27 chs */
1292 1.133 ad uvm_pagelock(pg);
1293 1.131 ad uvm_pageenqueue(pg);
1294 1.138 ad uvm_pagewakeup(pg);
1295 1.133 ad uvm_pageunlock(pg);
1296 1.56 yamt
1297 1.138 ad pg->flags &= ~(PG_BUSY|PG_FAKE);
1298 1.134 ad uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_DIRTY);
1299 1.138 ad UVM_PAGE_OWN(pg, NULL);
1300 1.56 yamt
1301 1.87 thorpej return false;
1302 1.1 mrg }
1303 1.72 yamt
1304 1.75 yamt /*
1305 1.75 yamt * uao_dropswap_range: drop swapslots in the range.
1306 1.75 yamt *
1307 1.75 yamt * => aobj must be locked and is returned locked.
1308 1.75 yamt * => start is inclusive. end is exclusive.
1309 1.75 yamt */
1310 1.75 yamt
1311 1.75 yamt void
1312 1.75 yamt uao_dropswap_range(struct uvm_object *uobj, voff_t start, voff_t end)
1313 1.75 yamt {
1314 1.75 yamt struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
1315 1.117 rmind int swpgonlydelta = 0;
1316 1.75 yamt
1317 1.141 ad KASSERT(UVM_OBJ_IS_AOBJ(uobj));
1318 1.135 ad KASSERT(rw_write_held(uobj->vmobjlock));
1319 1.75 yamt
1320 1.75 yamt if (end == 0) {
1321 1.75 yamt end = INT64_MAX;
1322 1.75 yamt }
1323 1.75 yamt
1324 1.75 yamt if (UAO_USES_SWHASH(aobj)) {
1325 1.75 yamt int i, hashbuckets = aobj->u_swhashmask + 1;
1326 1.75 yamt voff_t taghi;
1327 1.75 yamt voff_t taglo;
1328 1.75 yamt
1329 1.75 yamt taglo = UAO_SWHASH_ELT_TAG(start);
1330 1.75 yamt taghi = UAO_SWHASH_ELT_TAG(end);
1331 1.75 yamt
1332 1.75 yamt for (i = 0; i < hashbuckets; i++) {
1333 1.75 yamt struct uao_swhash_elt *elt, *next;
1334 1.75 yamt
1335 1.75 yamt for (elt = LIST_FIRST(&aobj->u_swhash[i]);
1336 1.75 yamt elt != NULL;
1337 1.75 yamt elt = next) {
1338 1.75 yamt int startidx, endidx;
1339 1.75 yamt int j;
1340 1.75 yamt
1341 1.75 yamt next = LIST_NEXT(elt, list);
1342 1.75 yamt
1343 1.75 yamt if (elt->tag < taglo || taghi < elt->tag) {
1344 1.75 yamt continue;
1345 1.75 yamt }
1346 1.75 yamt
1347 1.75 yamt if (elt->tag == taglo) {
1348 1.75 yamt startidx =
1349 1.75 yamt UAO_SWHASH_ELT_PAGESLOT_IDX(start);
1350 1.75 yamt } else {
1351 1.75 yamt startidx = 0;
1352 1.75 yamt }
1353 1.75 yamt
1354 1.75 yamt if (elt->tag == taghi) {
1355 1.75 yamt endidx =
1356 1.75 yamt UAO_SWHASH_ELT_PAGESLOT_IDX(end);
1357 1.75 yamt } else {
1358 1.75 yamt endidx = UAO_SWHASH_CLUSTER_SIZE;
1359 1.75 yamt }
1360 1.75 yamt
1361 1.75 yamt for (j = startidx; j < endidx; j++) {
1362 1.75 yamt int slot = elt->slots[j];
1363 1.75 yamt
1364 1.75 yamt KASSERT(uvm_pagelookup(&aobj->u_obj,
1365 1.75 yamt (UAO_SWHASH_ELT_PAGEIDX_BASE(elt)
1366 1.75 yamt + j) << PAGE_SHIFT) == NULL);
1367 1.75 yamt if (slot > 0) {
1368 1.75 yamt uvm_swap_free(slot, 1);
1369 1.75 yamt swpgonlydelta++;
1370 1.75 yamt KASSERT(elt->count > 0);
1371 1.75 yamt elt->slots[j] = 0;
1372 1.75 yamt elt->count--;
1373 1.75 yamt }
1374 1.75 yamt }
1375 1.75 yamt
1376 1.75 yamt if (elt->count == 0) {
1377 1.75 yamt LIST_REMOVE(elt, list);
1378 1.75 yamt pool_put(&uao_swhash_elt_pool, elt);
1379 1.75 yamt }
1380 1.75 yamt }
1381 1.75 yamt }
1382 1.75 yamt } else {
1383 1.75 yamt int i;
1384 1.75 yamt
1385 1.75 yamt if (aobj->u_pages < end) {
1386 1.75 yamt end = aobj->u_pages;
1387 1.75 yamt }
1388 1.75 yamt for (i = start; i < end; i++) {
1389 1.75 yamt int slot = aobj->u_swslots[i];
1390 1.75 yamt
1391 1.75 yamt if (slot > 0) {
1392 1.75 yamt uvm_swap_free(slot, 1);
1393 1.75 yamt swpgonlydelta++;
1394 1.75 yamt }
1395 1.75 yamt }
1396 1.75 yamt }
1397 1.75 yamt
1398 1.75 yamt /*
1399 1.75 yamt * adjust the counter of pages only in swap for all
1400 1.75 yamt * the swap slots we've freed.
1401 1.75 yamt */
1402 1.75 yamt
1403 1.75 yamt if (swpgonlydelta > 0) {
1404 1.75 yamt KASSERT(uvmexp.swpgonly >= swpgonlydelta);
1405 1.129 ad atomic_add_int(&uvmexp.swpgonly, -swpgonlydelta);
1406 1.75 yamt }
1407 1.75 yamt }
1408 1.75 yamt
1409 1.72 yamt #endif /* defined(VMSWAP) */
1410