uvm_aobj.c revision 1.122 1 1.122 riastrad /* $NetBSD: uvm_aobj.c,v 1.122 2014/05/25 18:55:11 riastradh Exp $ */
2 1.6 mrg
3 1.7 chs /*
4 1.7 chs * Copyright (c) 1998 Chuck Silvers, Charles D. Cranor and
5 1.7 chs * Washington University.
6 1.7 chs * All rights reserved.
7 1.7 chs *
8 1.7 chs * Redistribution and use in source and binary forms, with or without
9 1.7 chs * modification, are permitted provided that the following conditions
10 1.7 chs * are met:
11 1.7 chs * 1. Redistributions of source code must retain the above copyright
12 1.7 chs * notice, this list of conditions and the following disclaimer.
13 1.7 chs * 2. Redistributions in binary form must reproduce the above copyright
14 1.7 chs * notice, this list of conditions and the following disclaimer in the
15 1.7 chs * documentation and/or other materials provided with the distribution.
16 1.7 chs *
17 1.7 chs * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 1.7 chs * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 1.7 chs * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 1.7 chs * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 1.7 chs * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 1.7 chs * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 1.7 chs * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 1.7 chs * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 1.7 chs * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 1.7 chs * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 1.7 chs *
28 1.4 mrg * from: Id: uvm_aobj.c,v 1.1.2.5 1998/02/06 05:14:38 chs Exp
29 1.4 mrg */
30 1.113 rmind
31 1.7 chs /*
32 1.7 chs * uvm_aobj.c: anonymous memory uvm_object pager
33 1.7 chs *
34 1.7 chs * author: Chuck Silvers <chuq (at) chuq.com>
35 1.7 chs * started: Jan-1998
36 1.7 chs *
37 1.7 chs * - design mostly from Chuck Cranor
38 1.7 chs */
39 1.49 lukem
40 1.49 lukem #include <sys/cdefs.h>
41 1.122 riastrad __KERNEL_RCSID(0, "$NetBSD: uvm_aobj.c,v 1.122 2014/05/25 18:55:11 riastradh Exp $");
42 1.7 chs
43 1.7 chs #include "opt_uvmhist.h"
44 1.1 mrg
45 1.1 mrg #include <sys/param.h>
46 1.1 mrg #include <sys/systm.h>
47 1.37 chs #include <sys/kernel.h>
48 1.104 rmind #include <sys/kmem.h>
49 1.12 thorpej #include <sys/pool.h>
50 1.119 matt #include <sys/atomic.h>
51 1.1 mrg
52 1.1 mrg #include <uvm/uvm.h>
53 1.1 mrg
54 1.1 mrg /*
55 1.117 rmind * An anonymous UVM object (aobj) manages anonymous-memory. In addition to
56 1.117 rmind * keeping the list of resident pages, it may also keep a list of allocated
57 1.117 rmind * swap blocks. Depending on the size of the object, this list is either
58 1.117 rmind * stored in an array (small objects) or in a hash table (large objects).
59 1.117 rmind *
60 1.117 rmind * Lock order
61 1.117 rmind *
62 1.118 rmind * uao_list_lock ->
63 1.118 rmind * uvm_object::vmobjlock
64 1.1 mrg */
65 1.1 mrg
66 1.1 mrg /*
67 1.117 rmind * Note: for hash tables, we break the address space of the aobj into blocks
68 1.117 rmind * of UAO_SWHASH_CLUSTER_SIZE pages, which shall be a power of two.
69 1.1 mrg */
70 1.1 mrg
71 1.117 rmind #define UAO_SWHASH_CLUSTER_SHIFT 4
72 1.117 rmind #define UAO_SWHASH_CLUSTER_SIZE (1 << UAO_SWHASH_CLUSTER_SHIFT)
73 1.1 mrg
74 1.117 rmind /* Get the "tag" for this page index. */
75 1.117 rmind #define UAO_SWHASH_ELT_TAG(idx) ((idx) >> UAO_SWHASH_CLUSTER_SHIFT)
76 1.117 rmind #define UAO_SWHASH_ELT_PAGESLOT_IDX(idx) \
77 1.117 rmind ((idx) & (UAO_SWHASH_CLUSTER_SIZE - 1))
78 1.1 mrg
79 1.117 rmind /* Given an ELT and a page index, find the swap slot. */
80 1.117 rmind #define UAO_SWHASH_ELT_PAGESLOT(elt, idx) \
81 1.117 rmind ((elt)->slots[UAO_SWHASH_ELT_PAGESLOT_IDX(idx)])
82 1.75 yamt
83 1.117 rmind /* Given an ELT, return its pageidx base. */
84 1.117 rmind #define UAO_SWHASH_ELT_PAGEIDX_BASE(ELT) \
85 1.117 rmind ((elt)->tag << UAO_SWHASH_CLUSTER_SHIFT)
86 1.1 mrg
87 1.117 rmind /* The hash function. */
88 1.117 rmind #define UAO_SWHASH_HASH(aobj, idx) \
89 1.117 rmind (&(aobj)->u_swhash[(((idx) >> UAO_SWHASH_CLUSTER_SHIFT) \
90 1.117 rmind & (aobj)->u_swhashmask)])
91 1.1 mrg
92 1.1 mrg /*
93 1.117 rmind * The threshold which determines whether we will use an array or a
94 1.1 mrg * hash table to store the list of allocated swap blocks.
95 1.1 mrg */
96 1.117 rmind #define UAO_SWHASH_THRESHOLD (UAO_SWHASH_CLUSTER_SIZE * 4)
97 1.117 rmind #define UAO_USES_SWHASH(aobj) \
98 1.117 rmind ((aobj)->u_pages > UAO_SWHASH_THRESHOLD)
99 1.117 rmind
100 1.117 rmind /* The number of buckets in a hash, with an upper bound. */
101 1.117 rmind #define UAO_SWHASH_MAXBUCKETS 256
102 1.117 rmind #define UAO_SWHASH_BUCKETS(aobj) \
103 1.117 rmind (MIN((aobj)->u_pages >> UAO_SWHASH_CLUSTER_SHIFT, UAO_SWHASH_MAXBUCKETS))
104 1.1 mrg
105 1.1 mrg /*
106 1.1 mrg * uao_swhash_elt: when a hash table is being used, this structure defines
107 1.1 mrg * the format of an entry in the bucket list.
108 1.1 mrg */
109 1.1 mrg
110 1.1 mrg struct uao_swhash_elt {
111 1.5 mrg LIST_ENTRY(uao_swhash_elt) list; /* the hash list */
112 1.28 kleink voff_t tag; /* our 'tag' */
113 1.5 mrg int count; /* our number of active slots */
114 1.5 mrg int slots[UAO_SWHASH_CLUSTER_SIZE]; /* the slots */
115 1.1 mrg };
116 1.1 mrg
117 1.1 mrg /*
118 1.1 mrg * uao_swhash: the swap hash table structure
119 1.1 mrg */
120 1.1 mrg
121 1.1 mrg LIST_HEAD(uao_swhash, uao_swhash_elt);
122 1.1 mrg
123 1.12 thorpej /*
124 1.113 rmind * uao_swhash_elt_pool: pool of uao_swhash_elt structures.
125 1.113 rmind * Note: pages for this pool must not come from a pageable kernel map.
126 1.12 thorpej */
127 1.117 rmind static struct pool uao_swhash_elt_pool __cacheline_aligned;
128 1.1 mrg
129 1.1 mrg /*
130 1.1 mrg * uvm_aobj: the actual anon-backed uvm_object
131 1.1 mrg *
132 1.1 mrg * => the uvm_object is at the top of the structure, this allows
133 1.46 chs * (struct uvm_aobj *) == (struct uvm_object *)
134 1.1 mrg * => only one of u_swslots and u_swhash is used in any given aobj
135 1.1 mrg */
136 1.1 mrg
137 1.1 mrg struct uvm_aobj {
138 1.5 mrg struct uvm_object u_obj; /* has: lock, pgops, memq, #pages, #refs */
139 1.79 cherry pgoff_t u_pages; /* number of pages in entire object */
140 1.5 mrg int u_flags; /* the flags (see uvm_aobj.h) */
141 1.5 mrg int *u_swslots; /* array of offset->swapslot mappings */
142 1.5 mrg /*
143 1.5 mrg * hashtable of offset->swapslot mappings
144 1.5 mrg * (u_swhash is an array of bucket heads)
145 1.5 mrg */
146 1.5 mrg struct uao_swhash *u_swhash;
147 1.5 mrg u_long u_swhashmask; /* mask for hashtable */
148 1.5 mrg LIST_ENTRY(uvm_aobj) u_list; /* global list of aobjs */
149 1.121 riastrad int u_freelist; /* freelist to allocate pages from */
150 1.1 mrg };
151 1.1 mrg
152 1.62 junyoung static void uao_free(struct uvm_aobj *);
153 1.62 junyoung static int uao_get(struct uvm_object *, voff_t, struct vm_page **,
154 1.62 junyoung int *, int, vm_prot_t, int, int);
155 1.86 matt static int uao_put(struct uvm_object *, voff_t, voff_t, int);
156 1.72 yamt
157 1.72 yamt #if defined(VMSWAP)
158 1.72 yamt static struct uao_swhash_elt *uao_find_swhash_elt
159 1.85 thorpej (struct uvm_aobj *, int, bool);
160 1.72 yamt
161 1.85 thorpej static bool uao_pagein(struct uvm_aobj *, int, int);
162 1.85 thorpej static bool uao_pagein_page(struct uvm_aobj *, int);
163 1.72 yamt #endif /* defined(VMSWAP) */
164 1.1 mrg
165 1.121 riastrad static struct vm_page *uao_pagealloc(struct uvm_object *, voff_t, int);
166 1.121 riastrad
167 1.1 mrg /*
168 1.1 mrg * aobj_pager
169 1.41 chs *
170 1.1 mrg * note that some functions (e.g. put) are handled elsewhere
171 1.1 mrg */
172 1.1 mrg
173 1.95 yamt const struct uvm_pagerops aobj_pager = {
174 1.94 yamt .pgo_reference = uao_reference,
175 1.94 yamt .pgo_detach = uao_detach,
176 1.94 yamt .pgo_get = uao_get,
177 1.94 yamt .pgo_put = uao_put,
178 1.1 mrg };
179 1.1 mrg
180 1.1 mrg /*
181 1.1 mrg * uao_list: global list of active aobjs, locked by uao_list_lock
182 1.1 mrg */
183 1.1 mrg
184 1.117 rmind static LIST_HEAD(aobjlist, uvm_aobj) uao_list __cacheline_aligned;
185 1.117 rmind static kmutex_t uao_list_lock __cacheline_aligned;
186 1.1 mrg
187 1.1 mrg /*
188 1.1 mrg * hash table/array related functions
189 1.1 mrg */
190 1.1 mrg
191 1.72 yamt #if defined(VMSWAP)
192 1.72 yamt
193 1.1 mrg /*
194 1.1 mrg * uao_find_swhash_elt: find (or create) a hash table entry for a page
195 1.1 mrg * offset.
196 1.1 mrg *
197 1.1 mrg * => the object should be locked by the caller
198 1.1 mrg */
199 1.1 mrg
200 1.5 mrg static struct uao_swhash_elt *
201 1.85 thorpej uao_find_swhash_elt(struct uvm_aobj *aobj, int pageidx, bool create)
202 1.5 mrg {
203 1.5 mrg struct uao_swhash *swhash;
204 1.5 mrg struct uao_swhash_elt *elt;
205 1.28 kleink voff_t page_tag;
206 1.1 mrg
207 1.45 chs swhash = UAO_SWHASH_HASH(aobj, pageidx);
208 1.45 chs page_tag = UAO_SWHASH_ELT_TAG(pageidx);
209 1.1 mrg
210 1.5 mrg /*
211 1.5 mrg * now search the bucket for the requested tag
212 1.5 mrg */
213 1.45 chs
214 1.37 chs LIST_FOREACH(elt, swhash, list) {
215 1.45 chs if (elt->tag == page_tag) {
216 1.45 chs return elt;
217 1.45 chs }
218 1.5 mrg }
219 1.45 chs if (!create) {
220 1.5 mrg return NULL;
221 1.45 chs }
222 1.5 mrg
223 1.5 mrg /*
224 1.12 thorpej * allocate a new entry for the bucket and init/insert it in
225 1.5 mrg */
226 1.45 chs
227 1.45 chs elt = pool_get(&uao_swhash_elt_pool, PR_NOWAIT);
228 1.45 chs if (elt == NULL) {
229 1.45 chs return NULL;
230 1.45 chs }
231 1.5 mrg LIST_INSERT_HEAD(swhash, elt, list);
232 1.5 mrg elt->tag = page_tag;
233 1.5 mrg elt->count = 0;
234 1.9 perry memset(elt->slots, 0, sizeof(elt->slots));
235 1.45 chs return elt;
236 1.1 mrg }
237 1.1 mrg
238 1.1 mrg /*
239 1.1 mrg * uao_find_swslot: find the swap slot number for an aobj/pageidx
240 1.1 mrg *
241 1.41 chs * => object must be locked by caller
242 1.1 mrg */
243 1.46 chs
244 1.46 chs int
245 1.67 thorpej uao_find_swslot(struct uvm_object *uobj, int pageidx)
246 1.1 mrg {
247 1.46 chs struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
248 1.46 chs struct uao_swhash_elt *elt;
249 1.1 mrg
250 1.5 mrg /*
251 1.5 mrg * if noswap flag is set, then we never return a slot
252 1.5 mrg */
253 1.1 mrg
254 1.5 mrg if (aobj->u_flags & UAO_FLAG_NOSWAP)
255 1.117 rmind return 0;
256 1.1 mrg
257 1.5 mrg /*
258 1.5 mrg * if hashing, look in hash table.
259 1.5 mrg */
260 1.1 mrg
261 1.5 mrg if (UAO_USES_SWHASH(aobj)) {
262 1.87 thorpej elt = uao_find_swhash_elt(aobj, pageidx, false);
263 1.117 rmind return elt ? UAO_SWHASH_ELT_PAGESLOT(elt, pageidx) : 0;
264 1.5 mrg }
265 1.1 mrg
266 1.41 chs /*
267 1.5 mrg * otherwise, look in the array
268 1.5 mrg */
269 1.46 chs
270 1.117 rmind return aobj->u_swslots[pageidx];
271 1.1 mrg }
272 1.1 mrg
273 1.1 mrg /*
274 1.1 mrg * uao_set_swslot: set the swap slot for a page in an aobj.
275 1.1 mrg *
276 1.1 mrg * => setting a slot to zero frees the slot
277 1.1 mrg * => object must be locked by caller
278 1.45 chs * => we return the old slot number, or -1 if we failed to allocate
279 1.45 chs * memory to record the new slot number
280 1.1 mrg */
281 1.46 chs
282 1.5 mrg int
283 1.67 thorpej uao_set_swslot(struct uvm_object *uobj, int pageidx, int slot)
284 1.5 mrg {
285 1.5 mrg struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
286 1.45 chs struct uao_swhash_elt *elt;
287 1.5 mrg int oldslot;
288 1.5 mrg UVMHIST_FUNC("uao_set_swslot"); UVMHIST_CALLED(pdhist);
289 1.5 mrg UVMHIST_LOG(pdhist, "aobj %p pageidx %d slot %d",
290 1.5 mrg aobj, pageidx, slot, 0);
291 1.1 mrg
292 1.115 rmind KASSERT(mutex_owned(uobj->vmobjlock) || uobj->uo_refs == 0);
293 1.109 rmind
294 1.5 mrg /*
295 1.46 chs * if noswap flag is set, then we can't set a non-zero slot.
296 1.5 mrg */
297 1.1 mrg
298 1.5 mrg if (aobj->u_flags & UAO_FLAG_NOSWAP) {
299 1.117 rmind KASSERTMSG(slot == 0, "uao_set_swslot: no swap object");
300 1.117 rmind return 0;
301 1.5 mrg }
302 1.1 mrg
303 1.5 mrg /*
304 1.5 mrg * are we using a hash table? if so, add it in the hash.
305 1.5 mrg */
306 1.1 mrg
307 1.5 mrg if (UAO_USES_SWHASH(aobj)) {
308 1.39 chs
309 1.12 thorpej /*
310 1.12 thorpej * Avoid allocating an entry just to free it again if
311 1.12 thorpej * the page had not swap slot in the first place, and
312 1.12 thorpej * we are freeing.
313 1.12 thorpej */
314 1.39 chs
315 1.46 chs elt = uao_find_swhash_elt(aobj, pageidx, slot != 0);
316 1.12 thorpej if (elt == NULL) {
317 1.45 chs return slot ? -1 : 0;
318 1.12 thorpej }
319 1.5 mrg
320 1.5 mrg oldslot = UAO_SWHASH_ELT_PAGESLOT(elt, pageidx);
321 1.5 mrg UAO_SWHASH_ELT_PAGESLOT(elt, pageidx) = slot;
322 1.5 mrg
323 1.5 mrg /*
324 1.5 mrg * now adjust the elt's reference counter and free it if we've
325 1.5 mrg * dropped it to zero.
326 1.5 mrg */
327 1.5 mrg
328 1.5 mrg if (slot) {
329 1.5 mrg if (oldslot == 0)
330 1.5 mrg elt->count++;
331 1.45 chs } else {
332 1.45 chs if (oldslot)
333 1.5 mrg elt->count--;
334 1.5 mrg
335 1.5 mrg if (elt->count == 0) {
336 1.5 mrg LIST_REMOVE(elt, list);
337 1.12 thorpej pool_put(&uao_swhash_elt_pool, elt);
338 1.5 mrg }
339 1.5 mrg }
340 1.41 chs } else {
341 1.5 mrg /* we are using an array */
342 1.5 mrg oldslot = aobj->u_swslots[pageidx];
343 1.5 mrg aobj->u_swslots[pageidx] = slot;
344 1.5 mrg }
345 1.117 rmind return oldslot;
346 1.1 mrg }
347 1.1 mrg
348 1.72 yamt #endif /* defined(VMSWAP) */
349 1.72 yamt
350 1.1 mrg /*
351 1.1 mrg * end of hash/array functions
352 1.1 mrg */
353 1.1 mrg
354 1.1 mrg /*
355 1.1 mrg * uao_free: free all resources held by an aobj, and then free the aobj
356 1.1 mrg *
357 1.1 mrg * => the aobj should be dead
358 1.1 mrg */
359 1.46 chs
360 1.1 mrg static void
361 1.67 thorpej uao_free(struct uvm_aobj *aobj)
362 1.1 mrg {
363 1.117 rmind struct uvm_object *uobj = &aobj->u_obj;
364 1.96 ad
365 1.118 rmind KASSERT(mutex_owned(uobj->vmobjlock));
366 1.118 rmind uao_dropswap_range(uobj, 0, 0);
367 1.117 rmind mutex_exit(uobj->vmobjlock);
368 1.72 yamt
369 1.72 yamt #if defined(VMSWAP)
370 1.5 mrg if (UAO_USES_SWHASH(aobj)) {
371 1.1 mrg
372 1.5 mrg /*
373 1.75 yamt * free the hash table itself.
374 1.5 mrg */
375 1.46 chs
376 1.104 rmind hashdone(aobj->u_swhash, HASH_LIST, aobj->u_swhashmask);
377 1.5 mrg } else {
378 1.5 mrg
379 1.5 mrg /*
380 1.75 yamt * free the array itsself.
381 1.5 mrg */
382 1.5 mrg
383 1.104 rmind kmem_free(aobj->u_swslots, aobj->u_pages * sizeof(int));
384 1.1 mrg }
385 1.72 yamt #endif /* defined(VMSWAP) */
386 1.72 yamt
387 1.5 mrg /*
388 1.5 mrg * finally free the aobj itself
389 1.5 mrg */
390 1.46 chs
391 1.117 rmind uvm_obj_destroy(uobj, true);
392 1.113 rmind kmem_free(aobj, sizeof(struct uvm_aobj));
393 1.1 mrg }
394 1.1 mrg
395 1.1 mrg /*
396 1.1 mrg * pager functions
397 1.1 mrg */
398 1.1 mrg
399 1.1 mrg /*
400 1.1 mrg * uao_create: create an aobj of the given size and return its uvm_object.
401 1.1 mrg *
402 1.1 mrg * => for normal use, flags are always zero
403 1.1 mrg * => for the kernel object, the flags are:
404 1.1 mrg * UAO_FLAG_KERNOBJ - allocate the kernel object (can only happen once)
405 1.1 mrg * UAO_FLAG_KERNSWAP - enable swapping of kernel object (" ")
406 1.1 mrg */
407 1.46 chs
408 1.5 mrg struct uvm_object *
409 1.67 thorpej uao_create(vsize_t size, int flags)
410 1.5 mrg {
411 1.46 chs static struct uvm_aobj kernel_object_store;
412 1.115 rmind static kmutex_t kernel_object_lock;
413 1.120 martin static int kobj_alloced __diagused = 0;
414 1.79 cherry pgoff_t pages = round_page(size) >> PAGE_SHIFT;
415 1.5 mrg struct uvm_aobj *aobj;
416 1.66 yamt int refs;
417 1.1 mrg
418 1.5 mrg /*
419 1.114 rmind * Allocate a new aobj, unless kernel object is requested.
420 1.27 chs */
421 1.5 mrg
422 1.46 chs if (flags & UAO_FLAG_KERNOBJ) {
423 1.46 chs KASSERT(!kobj_alloced);
424 1.5 mrg aobj = &kernel_object_store;
425 1.5 mrg aobj->u_pages = pages;
426 1.46 chs aobj->u_flags = UAO_FLAG_NOSWAP;
427 1.66 yamt refs = UVM_OBJ_KERN;
428 1.5 mrg kobj_alloced = UAO_FLAG_KERNOBJ;
429 1.5 mrg } else if (flags & UAO_FLAG_KERNSWAP) {
430 1.46 chs KASSERT(kobj_alloced == UAO_FLAG_KERNOBJ);
431 1.5 mrg aobj = &kernel_object_store;
432 1.5 mrg kobj_alloced = UAO_FLAG_KERNSWAP;
433 1.66 yamt refs = 0xdeadbeaf; /* XXX: gcc */
434 1.46 chs } else {
435 1.113 rmind aobj = kmem_alloc(sizeof(struct uvm_aobj), KM_SLEEP);
436 1.5 mrg aobj->u_pages = pages;
437 1.46 chs aobj->u_flags = 0;
438 1.66 yamt refs = 1;
439 1.5 mrg }
440 1.1 mrg
441 1.5 mrg /*
442 1.121 riastrad * no freelist by default
443 1.121 riastrad */
444 1.121 riastrad
445 1.121 riastrad aobj->u_freelist = VM_NFREELIST;
446 1.121 riastrad
447 1.121 riastrad /*
448 1.5 mrg * allocate hash/array if necessary
449 1.5 mrg *
450 1.5 mrg * note: in the KERNSWAP case no need to worry about locking since
451 1.5 mrg * we are still booting we should be the only thread around.
452 1.5 mrg */
453 1.46 chs
454 1.5 mrg if (flags == 0 || (flags & UAO_FLAG_KERNSWAP) != 0) {
455 1.72 yamt #if defined(VMSWAP)
456 1.104 rmind const int kernswap = (flags & UAO_FLAG_KERNSWAP) != 0;
457 1.5 mrg
458 1.5 mrg /* allocate hash table or array depending on object size */
459 1.27 chs if (UAO_USES_SWHASH(aobj)) {
460 1.104 rmind aobj->u_swhash = hashinit(UAO_SWHASH_BUCKETS(aobj),
461 1.104 rmind HASH_LIST, kernswap ? false : true,
462 1.104 rmind &aobj->u_swhashmask);
463 1.5 mrg if (aobj->u_swhash == NULL)
464 1.5 mrg panic("uao_create: hashinit swhash failed");
465 1.5 mrg } else {
466 1.104 rmind aobj->u_swslots = kmem_zalloc(pages * sizeof(int),
467 1.104 rmind kernswap ? KM_NOSLEEP : KM_SLEEP);
468 1.5 mrg if (aobj->u_swslots == NULL)
469 1.114 rmind panic("uao_create: swslots allocation failed");
470 1.5 mrg }
471 1.72 yamt #endif /* defined(VMSWAP) */
472 1.5 mrg
473 1.5 mrg if (flags) {
474 1.5 mrg aobj->u_flags &= ~UAO_FLAG_NOSWAP; /* clear noswap */
475 1.117 rmind return &aobj->u_obj;
476 1.5 mrg }
477 1.5 mrg }
478 1.5 mrg
479 1.5 mrg /*
480 1.115 rmind * Initialise UVM object.
481 1.115 rmind */
482 1.46 chs
483 1.115 rmind const bool kernobj = (flags & UAO_FLAG_KERNOBJ) != 0;
484 1.115 rmind uvm_obj_init(&aobj->u_obj, &aobj_pager, !kernobj, refs);
485 1.115 rmind if (__predict_false(kernobj)) {
486 1.115 rmind /* Initialisation only once, for UAO_FLAG_KERNOBJ. */
487 1.115 rmind mutex_init(&kernel_object_lock, MUTEX_DEFAULT, IPL_NONE);
488 1.115 rmind uvm_obj_setlock(&aobj->u_obj, &kernel_object_lock);
489 1.115 rmind }
490 1.1 mrg
491 1.5 mrg /*
492 1.5 mrg * now that aobj is ready, add it to the global list
493 1.5 mrg */
494 1.46 chs
495 1.90 ad mutex_enter(&uao_list_lock);
496 1.5 mrg LIST_INSERT_HEAD(&uao_list, aobj, u_list);
497 1.90 ad mutex_exit(&uao_list_lock);
498 1.5 mrg return(&aobj->u_obj);
499 1.1 mrg }
500 1.1 mrg
501 1.1 mrg /*
502 1.121 riastrad * uao_set_pgfl: allocate pages only from the specified freelist.
503 1.121 riastrad *
504 1.121 riastrad * => must be called before any pages are allocated for the object.
505 1.122 riastrad * => reset by setting it to VM_NFREELIST, meaning any freelist.
506 1.121 riastrad */
507 1.121 riastrad
508 1.121 riastrad void
509 1.121 riastrad uao_set_pgfl(struct uvm_object *uobj, int freelist)
510 1.121 riastrad {
511 1.121 riastrad struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
512 1.121 riastrad
513 1.121 riastrad KASSERTMSG((0 <= freelist), "invalid freelist %d", freelist);
514 1.122 riastrad KASSERTMSG((freelist <= VM_NFREELIST), "invalid freelist %d",
515 1.122 riastrad freelist);
516 1.121 riastrad
517 1.121 riastrad aobj->u_freelist = freelist;
518 1.121 riastrad }
519 1.121 riastrad
520 1.121 riastrad /*
521 1.121 riastrad * uao_pagealloc: allocate a page for aobj.
522 1.121 riastrad */
523 1.121 riastrad
524 1.121 riastrad static inline struct vm_page *
525 1.121 riastrad uao_pagealloc(struct uvm_object *uobj, voff_t offset, int flags)
526 1.121 riastrad {
527 1.121 riastrad struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
528 1.121 riastrad
529 1.121 riastrad if (__predict_true(aobj->u_freelist == VM_NFREELIST))
530 1.121 riastrad return uvm_pagealloc(uobj, offset, NULL, flags);
531 1.121 riastrad else
532 1.121 riastrad return uvm_pagealloc_strat(uobj, offset, NULL, flags,
533 1.121 riastrad UVM_PGA_STRAT_ONLY, aobj->u_freelist);
534 1.121 riastrad }
535 1.121 riastrad
536 1.121 riastrad /*
537 1.1 mrg * uao_init: set up aobj pager subsystem
538 1.1 mrg *
539 1.1 mrg * => called at boot time from uvm_pager_init()
540 1.1 mrg */
541 1.46 chs
542 1.27 chs void
543 1.46 chs uao_init(void)
544 1.5 mrg {
545 1.12 thorpej static int uao_initialized;
546 1.12 thorpej
547 1.12 thorpej if (uao_initialized)
548 1.12 thorpej return;
549 1.87 thorpej uao_initialized = true;
550 1.5 mrg LIST_INIT(&uao_list);
551 1.96 ad mutex_init(&uao_list_lock, MUTEX_DEFAULT, IPL_NONE);
552 1.107 pooka pool_init(&uao_swhash_elt_pool, sizeof(struct uao_swhash_elt),
553 1.107 pooka 0, 0, 0, "uaoeltpl", NULL, IPL_VM);
554 1.1 mrg }
555 1.1 mrg
556 1.1 mrg /*
557 1.118 rmind * uao_reference: hold a reference to an anonymous UVM object.
558 1.1 mrg */
559 1.5 mrg void
560 1.67 thorpej uao_reference(struct uvm_object *uobj)
561 1.1 mrg {
562 1.118 rmind /* Kernel object is persistent. */
563 1.118 rmind if (UVM_OBJ_IS_KERN_OBJECT(uobj)) {
564 1.101 ad return;
565 1.118 rmind }
566 1.118 rmind atomic_inc_uint(&uobj->uo_refs);
567 1.1 mrg }
568 1.1 mrg
569 1.1 mrg /*
570 1.118 rmind * uao_detach: drop a reference to an anonymous UVM object.
571 1.1 mrg */
572 1.5 mrg void
573 1.67 thorpej uao_detach(struct uvm_object *uobj)
574 1.5 mrg {
575 1.118 rmind struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
576 1.118 rmind struct vm_page *pg;
577 1.118 rmind
578 1.118 rmind UVMHIST_FUNC("uao_detach"); UVMHIST_CALLED(maphist);
579 1.101 ad
580 1.101 ad /*
581 1.118 rmind * Detaching from kernel object is a NOP.
582 1.118 rmind */
583 1.101 ad
584 1.101 ad if (UVM_OBJ_IS_KERN_OBJECT(uobj))
585 1.102 ad return;
586 1.101 ad
587 1.5 mrg /*
588 1.118 rmind * Drop the reference. If it was the last one, destroy the object.
589 1.118 rmind */
590 1.5 mrg
591 1.5 mrg UVMHIST_LOG(maphist," (uobj=0x%x) ref=%d", uobj,uobj->uo_refs,0,0);
592 1.118 rmind if (atomic_dec_uint_nv(&uobj->uo_refs) > 0) {
593 1.5 mrg UVMHIST_LOG(maphist, "<- done (rc>0)", 0,0,0,0);
594 1.5 mrg return;
595 1.5 mrg }
596 1.5 mrg
597 1.5 mrg /*
598 1.118 rmind * Remove the aobj from the global list.
599 1.118 rmind */
600 1.46 chs
601 1.92 ad mutex_enter(&uao_list_lock);
602 1.5 mrg LIST_REMOVE(aobj, u_list);
603 1.92 ad mutex_exit(&uao_list_lock);
604 1.5 mrg
605 1.5 mrg /*
606 1.118 rmind * Free all the pages left in the aobj. For each page, when the
607 1.118 rmind * page is no longer busy (and thus after any disk I/O that it is
608 1.118 rmind * involved in is complete), release any swap resources and free
609 1.118 rmind * the page itself.
610 1.118 rmind */
611 1.46 chs
612 1.118 rmind mutex_enter(uobj->vmobjlock);
613 1.96 ad mutex_enter(&uvm_pageqlock);
614 1.46 chs while ((pg = TAILQ_FIRST(&uobj->memq)) != NULL) {
615 1.46 chs pmap_page_protect(pg, VM_PROT_NONE);
616 1.5 mrg if (pg->flags & PG_BUSY) {
617 1.46 chs pg->flags |= PG_WANTED;
618 1.96 ad mutex_exit(&uvm_pageqlock);
619 1.115 rmind UVM_UNLOCK_AND_WAIT(pg, uobj->vmobjlock, false,
620 1.46 chs "uao_det", 0);
621 1.115 rmind mutex_enter(uobj->vmobjlock);
622 1.96 ad mutex_enter(&uvm_pageqlock);
623 1.5 mrg continue;
624 1.5 mrg }
625 1.18 chs uao_dropswap(&aobj->u_obj, pg->offset >> PAGE_SHIFT);
626 1.5 mrg uvm_pagefree(pg);
627 1.5 mrg }
628 1.96 ad mutex_exit(&uvm_pageqlock);
629 1.1 mrg
630 1.5 mrg /*
631 1.118 rmind * Finally, free the anonymous UVM object itself.
632 1.118 rmind */
633 1.1 mrg
634 1.5 mrg uao_free(aobj);
635 1.5 mrg }
636 1.1 mrg
637 1.1 mrg /*
638 1.46 chs * uao_put: flush pages out of a uvm object
639 1.22 thorpej *
640 1.22 thorpej * => object should be locked by caller. we may _unlock_ the object
641 1.22 thorpej * if (and only if) we need to clean a page (PGO_CLEANIT).
642 1.22 thorpej * XXXJRT Currently, however, we don't. In the case of cleaning
643 1.22 thorpej * XXXJRT a page, we simply just deactivate it. Should probably
644 1.22 thorpej * XXXJRT handle this better, in the future (although "flushing"
645 1.22 thorpej * XXXJRT anonymous memory isn't terribly important).
646 1.22 thorpej * => if PGO_CLEANIT is not set, then we will neither unlock the object
647 1.22 thorpej * or block.
648 1.22 thorpej * => if PGO_ALLPAGE is set, then all pages in the object are valid targets
649 1.22 thorpej * for flushing.
650 1.22 thorpej * => NOTE: we rely on the fact that the object's memq is a TAILQ and
651 1.22 thorpej * that new pages are inserted on the tail end of the list. thus,
652 1.22 thorpej * we can make a complete pass through the object in one go by starting
653 1.22 thorpej * at the head and working towards the tail (new pages are put in
654 1.22 thorpej * front of us).
655 1.22 thorpej * => NOTE: we are allowed to lock the page queues, so the caller
656 1.22 thorpej * must not be holding the lock on them [e.g. pagedaemon had
657 1.22 thorpej * better not call us with the queues locked]
658 1.86 matt * => we return 0 unless we encountered some sort of I/O error
659 1.22 thorpej * XXXJRT currently never happens, as we never directly initiate
660 1.22 thorpej * XXXJRT I/O
661 1.22 thorpej *
662 1.22 thorpej * note on page traversal:
663 1.22 thorpej * we can traverse the pages in an object either by going down the
664 1.22 thorpej * linked list in "uobj->memq", or we can go over the address range
665 1.22 thorpej * by page doing hash table lookups for each address. depending
666 1.22 thorpej * on how many pages are in the object it may be cheaper to do one
667 1.22 thorpej * or the other. we set "by_list" to true if we are using memq.
668 1.22 thorpej * if the cost of a hash lookup was equal to the cost of the list
669 1.22 thorpej * traversal we could compare the number of pages in the start->stop
670 1.22 thorpej * range to the total number of pages in the object. however, it
671 1.22 thorpej * seems that a hash table lookup is more expensive than the linked
672 1.22 thorpej * list traversal, so we multiply the number of pages in the
673 1.22 thorpej * start->stop range by a penalty which we define below.
674 1.1 mrg */
675 1.22 thorpej
676 1.68 thorpej static int
677 1.67 thorpej uao_put(struct uvm_object *uobj, voff_t start, voff_t stop, int flags)
678 1.5 mrg {
679 1.46 chs struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
680 1.51 enami struct vm_page *pg, *nextpg, curmp, endmp;
681 1.85 thorpej bool by_list;
682 1.28 kleink voff_t curoff;
683 1.46 chs UVMHIST_FUNC("uao_put"); UVMHIST_CALLED(maphist);
684 1.22 thorpej
685 1.115 rmind KASSERT(mutex_owned(uobj->vmobjlock));
686 1.96 ad
687 1.46 chs curoff = 0;
688 1.22 thorpej if (flags & PGO_ALLPAGES) {
689 1.22 thorpej start = 0;
690 1.22 thorpej stop = aobj->u_pages << PAGE_SHIFT;
691 1.86 matt by_list = true; /* always go by the list */
692 1.22 thorpej } else {
693 1.22 thorpej start = trunc_page(start);
694 1.71 yamt if (stop == 0) {
695 1.71 yamt stop = aobj->u_pages << PAGE_SHIFT;
696 1.71 yamt } else {
697 1.71 yamt stop = round_page(stop);
698 1.71 yamt }
699 1.22 thorpej if (stop > (aobj->u_pages << PAGE_SHIFT)) {
700 1.22 thorpej printf("uao_flush: strange, got an out of range "
701 1.22 thorpej "flush (fixed)\n");
702 1.22 thorpej stop = aobj->u_pages << PAGE_SHIFT;
703 1.22 thorpej }
704 1.22 thorpej by_list = (uobj->uo_npages <=
705 1.105 yamt ((stop - start) >> PAGE_SHIFT) * UVM_PAGE_TREE_PENALTY);
706 1.22 thorpej }
707 1.22 thorpej UVMHIST_LOG(maphist,
708 1.22 thorpej " flush start=0x%lx, stop=0x%x, by_list=%d, flags=0x%x",
709 1.22 thorpej start, stop, by_list, flags);
710 1.1 mrg
711 1.5 mrg /*
712 1.22 thorpej * Don't need to do any work here if we're not freeing
713 1.22 thorpej * or deactivating pages.
714 1.22 thorpej */
715 1.46 chs
716 1.22 thorpej if ((flags & (PGO_DEACTIVATE|PGO_FREE)) == 0) {
717 1.115 rmind mutex_exit(uobj->vmobjlock);
718 1.46 chs return 0;
719 1.22 thorpej }
720 1.22 thorpej
721 1.5 mrg /*
722 1.51 enami * Initialize the marker pages. See the comment in
723 1.51 enami * genfs_putpages() also.
724 1.51 enami */
725 1.51 enami
726 1.110 hannken curmp.flags = PG_MARKER;
727 1.110 hannken endmp.flags = PG_MARKER;
728 1.51 enami
729 1.51 enami /*
730 1.46 chs * now do it. note: we must update nextpg in the body of loop or we
731 1.51 enami * will get stuck. we need to use nextpg if we'll traverse the list
732 1.51 enami * because we may free "pg" before doing the next loop.
733 1.21 thorpej */
734 1.22 thorpej
735 1.22 thorpej if (by_list) {
736 1.102 ad TAILQ_INSERT_TAIL(&uobj->memq, &endmp, listq.queue);
737 1.51 enami nextpg = TAILQ_FIRST(&uobj->memq);
738 1.22 thorpej } else {
739 1.22 thorpej curoff = start;
740 1.52 scw nextpg = NULL; /* Quell compiler warning */
741 1.22 thorpej }
742 1.22 thorpej
743 1.99 ad /* locked: uobj */
744 1.51 enami for (;;) {
745 1.22 thorpej if (by_list) {
746 1.51 enami pg = nextpg;
747 1.51 enami if (pg == &endmp)
748 1.51 enami break;
749 1.102 ad nextpg = TAILQ_NEXT(pg, listq.queue);
750 1.110 hannken if (pg->flags & PG_MARKER)
751 1.110 hannken continue;
752 1.46 chs if (pg->offset < start || pg->offset >= stop)
753 1.22 thorpej continue;
754 1.22 thorpej } else {
755 1.51 enami if (curoff < stop) {
756 1.51 enami pg = uvm_pagelookup(uobj, curoff);
757 1.51 enami curoff += PAGE_SIZE;
758 1.51 enami } else
759 1.51 enami break;
760 1.46 chs if (pg == NULL)
761 1.22 thorpej continue;
762 1.22 thorpej }
763 1.98 yamt
764 1.98 yamt /*
765 1.98 yamt * wait and try again if the page is busy.
766 1.98 yamt */
767 1.98 yamt
768 1.98 yamt if (pg->flags & PG_BUSY) {
769 1.98 yamt if (by_list) {
770 1.102 ad TAILQ_INSERT_BEFORE(pg, &curmp, listq.queue);
771 1.98 yamt }
772 1.98 yamt pg->flags |= PG_WANTED;
773 1.115 rmind UVM_UNLOCK_AND_WAIT(pg, uobj->vmobjlock, 0,
774 1.98 yamt "uao_put", 0);
775 1.115 rmind mutex_enter(uobj->vmobjlock);
776 1.98 yamt if (by_list) {
777 1.102 ad nextpg = TAILQ_NEXT(&curmp, listq.queue);
778 1.98 yamt TAILQ_REMOVE(&uobj->memq, &curmp,
779 1.102 ad listq.queue);
780 1.98 yamt } else
781 1.98 yamt curoff -= PAGE_SIZE;
782 1.98 yamt continue;
783 1.98 yamt }
784 1.98 yamt
785 1.46 chs switch (flags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE)) {
786 1.41 chs
787 1.22 thorpej /*
788 1.22 thorpej * XXX In these first 3 cases, we always just
789 1.22 thorpej * XXX deactivate the page. We may want to
790 1.22 thorpej * XXX handle the different cases more specifically
791 1.22 thorpej * XXX in the future.
792 1.22 thorpej */
793 1.46 chs
794 1.22 thorpej case PGO_CLEANIT|PGO_FREE:
795 1.22 thorpej case PGO_CLEANIT|PGO_DEACTIVATE:
796 1.22 thorpej case PGO_DEACTIVATE:
797 1.25 thorpej deactivate_it:
798 1.98 yamt mutex_enter(&uvm_pageqlock);
799 1.83 yamt /* skip the page if it's wired */
800 1.98 yamt if (pg->wire_count == 0) {
801 1.98 yamt uvm_pagedeactivate(pg);
802 1.98 yamt }
803 1.98 yamt mutex_exit(&uvm_pageqlock);
804 1.98 yamt break;
805 1.22 thorpej
806 1.22 thorpej case PGO_FREE:
807 1.25 thorpej /*
808 1.25 thorpej * If there are multiple references to
809 1.25 thorpej * the object, just deactivate the page.
810 1.25 thorpej */
811 1.46 chs
812 1.25 thorpej if (uobj->uo_refs > 1)
813 1.25 thorpej goto deactivate_it;
814 1.25 thorpej
815 1.22 thorpej /*
816 1.98 yamt * free the swap slot and the page.
817 1.22 thorpej */
818 1.46 chs
819 1.46 chs pmap_page_protect(pg, VM_PROT_NONE);
820 1.75 yamt
821 1.75 yamt /*
822 1.75 yamt * freeing swapslot here is not strictly necessary.
823 1.75 yamt * however, leaving it here doesn't save much
824 1.75 yamt * because we need to update swap accounting anyway.
825 1.75 yamt */
826 1.75 yamt
827 1.46 chs uao_dropswap(uobj, pg->offset >> PAGE_SHIFT);
828 1.98 yamt mutex_enter(&uvm_pageqlock);
829 1.46 chs uvm_pagefree(pg);
830 1.98 yamt mutex_exit(&uvm_pageqlock);
831 1.98 yamt break;
832 1.98 yamt
833 1.98 yamt default:
834 1.98 yamt panic("%s: impossible", __func__);
835 1.22 thorpej }
836 1.22 thorpej }
837 1.51 enami if (by_list) {
838 1.102 ad TAILQ_REMOVE(&uobj->memq, &endmp, listq.queue);
839 1.89 ad }
840 1.115 rmind mutex_exit(uobj->vmobjlock);
841 1.46 chs return 0;
842 1.1 mrg }
843 1.1 mrg
844 1.1 mrg /*
845 1.1 mrg * uao_get: fetch me a page
846 1.1 mrg *
847 1.1 mrg * we have three cases:
848 1.1 mrg * 1: page is resident -> just return the page.
849 1.1 mrg * 2: page is zero-fill -> allocate a new page and zero it.
850 1.1 mrg * 3: page is swapped out -> fetch the page from swap.
851 1.1 mrg *
852 1.1 mrg * cases 1 and 2 can be handled with PGO_LOCKED, case 3 cannot.
853 1.1 mrg * so, if the "center" page hits case 3 (or any page, with PGO_ALLPAGES),
854 1.40 chs * then we will need to return EBUSY.
855 1.1 mrg *
856 1.1 mrg * => prefer map unlocked (not required)
857 1.1 mrg * => object must be locked! we will _unlock_ it before starting any I/O.
858 1.1 mrg * => flags: PGO_ALLPAGES: get all of the pages
859 1.1 mrg * PGO_LOCKED: fault data structures are locked
860 1.1 mrg * => NOTE: offset is the offset of pps[0], _NOT_ pps[centeridx]
861 1.1 mrg * => NOTE: caller must check for released pages!!
862 1.1 mrg */
863 1.46 chs
864 1.5 mrg static int
865 1.67 thorpej uao_get(struct uvm_object *uobj, voff_t offset, struct vm_page **pps,
866 1.82 yamt int *npagesp, int centeridx, vm_prot_t access_type, int advice, int flags)
867 1.5 mrg {
868 1.28 kleink voff_t current_offset;
869 1.52 scw struct vm_page *ptmp = NULL; /* Quell compiler warning */
870 1.72 yamt int lcv, gotpages, maxpages, swslot, pageidx;
871 1.85 thorpej bool done;
872 1.5 mrg UVMHIST_FUNC("uao_get"); UVMHIST_CALLED(pdhist);
873 1.5 mrg
874 1.27 chs UVMHIST_LOG(pdhist, "aobj=%p offset=%d, flags=%d",
875 1.74 yamt (struct uvm_aobj *)uobj, offset, flags,0);
876 1.37 chs
877 1.5 mrg /*
878 1.5 mrg * get number of pages
879 1.5 mrg */
880 1.46 chs
881 1.5 mrg maxpages = *npagesp;
882 1.5 mrg
883 1.5 mrg /*
884 1.5 mrg * step 1: handled the case where fault data structures are locked.
885 1.5 mrg */
886 1.1 mrg
887 1.5 mrg if (flags & PGO_LOCKED) {
888 1.46 chs
889 1.5 mrg /*
890 1.5 mrg * step 1a: get pages that are already resident. only do
891 1.5 mrg * this if the data structures are locked (i.e. the first
892 1.5 mrg * time through).
893 1.5 mrg */
894 1.5 mrg
895 1.87 thorpej done = true; /* be optimistic */
896 1.5 mrg gotpages = 0; /* # of pages we got so far */
897 1.5 mrg for (lcv = 0, current_offset = offset ; lcv < maxpages ;
898 1.5 mrg lcv++, current_offset += PAGE_SIZE) {
899 1.5 mrg /* do we care about this page? if not, skip it */
900 1.5 mrg if (pps[lcv] == PGO_DONTCARE)
901 1.5 mrg continue;
902 1.5 mrg ptmp = uvm_pagelookup(uobj, current_offset);
903 1.5 mrg
904 1.5 mrg /*
905 1.30 thorpej * if page is new, attempt to allocate the page,
906 1.30 thorpej * zero-fill'd.
907 1.5 mrg */
908 1.46 chs
909 1.117 rmind if (ptmp == NULL && uao_find_swslot(uobj,
910 1.15 chs current_offset >> PAGE_SHIFT) == 0) {
911 1.121 riastrad ptmp = uao_pagealloc(uobj, current_offset,
912 1.121 riastrad UVM_FLAG_COLORMATCH|UVM_PGA_ZERO);
913 1.5 mrg if (ptmp) {
914 1.5 mrg /* new page */
915 1.47 chs ptmp->flags &= ~(PG_FAKE);
916 1.5 mrg ptmp->pqflags |= PQ_AOBJ;
917 1.47 chs goto gotpage;
918 1.5 mrg }
919 1.5 mrg }
920 1.5 mrg
921 1.5 mrg /*
922 1.46 chs * to be useful must get a non-busy page
923 1.5 mrg */
924 1.46 chs
925 1.46 chs if (ptmp == NULL || (ptmp->flags & PG_BUSY) != 0) {
926 1.5 mrg if (lcv == centeridx ||
927 1.5 mrg (flags & PGO_ALLPAGES) != 0)
928 1.5 mrg /* need to do a wait or I/O! */
929 1.87 thorpej done = false;
930 1.5 mrg continue;
931 1.5 mrg }
932 1.5 mrg
933 1.5 mrg /*
934 1.5 mrg * useful page: busy/lock it and plug it in our
935 1.5 mrg * result array
936 1.5 mrg */
937 1.46 chs
938 1.5 mrg /* caller must un-busy this page */
939 1.41 chs ptmp->flags |= PG_BUSY;
940 1.5 mrg UVM_PAGE_OWN(ptmp, "uao_get1");
941 1.47 chs gotpage:
942 1.5 mrg pps[lcv] = ptmp;
943 1.5 mrg gotpages++;
944 1.46 chs }
945 1.5 mrg
946 1.5 mrg /*
947 1.5 mrg * step 1b: now we've either done everything needed or we
948 1.5 mrg * to unlock and do some waiting or I/O.
949 1.5 mrg */
950 1.5 mrg
951 1.5 mrg UVMHIST_LOG(pdhist, "<- done (done=%d)", done, 0,0,0);
952 1.5 mrg *npagesp = gotpages;
953 1.5 mrg if (done)
954 1.46 chs return 0;
955 1.5 mrg else
956 1.46 chs return EBUSY;
957 1.1 mrg }
958 1.1 mrg
959 1.5 mrg /*
960 1.5 mrg * step 2: get non-resident or busy pages.
961 1.5 mrg * object is locked. data structures are unlocked.
962 1.5 mrg */
963 1.5 mrg
964 1.76 yamt if ((flags & PGO_SYNCIO) == 0) {
965 1.76 yamt goto done;
966 1.76 yamt }
967 1.76 yamt
968 1.5 mrg for (lcv = 0, current_offset = offset ; lcv < maxpages ;
969 1.5 mrg lcv++, current_offset += PAGE_SIZE) {
970 1.27 chs
971 1.5 mrg /*
972 1.5 mrg * - skip over pages we've already gotten or don't want
973 1.5 mrg * - skip over pages we don't _have_ to get
974 1.5 mrg */
975 1.27 chs
976 1.5 mrg if (pps[lcv] != NULL ||
977 1.5 mrg (lcv != centeridx && (flags & PGO_ALLPAGES) == 0))
978 1.5 mrg continue;
979 1.5 mrg
980 1.27 chs pageidx = current_offset >> PAGE_SHIFT;
981 1.27 chs
982 1.5 mrg /*
983 1.5 mrg * we have yet to locate the current page (pps[lcv]). we
984 1.5 mrg * first look for a page that is already at the current offset.
985 1.5 mrg * if we find a page, we check to see if it is busy or
986 1.5 mrg * released. if that is the case, then we sleep on the page
987 1.5 mrg * until it is no longer busy or released and repeat the lookup.
988 1.5 mrg * if the page we found is neither busy nor released, then we
989 1.5 mrg * busy it (so we own it) and plug it into pps[lcv]. this
990 1.5 mrg * 'break's the following while loop and indicates we are
991 1.5 mrg * ready to move on to the next page in the "lcv" loop above.
992 1.5 mrg *
993 1.5 mrg * if we exit the while loop with pps[lcv] still set to NULL,
994 1.5 mrg * then it means that we allocated a new busy/fake/clean page
995 1.5 mrg * ptmp in the object and we need to do I/O to fill in the data.
996 1.5 mrg */
997 1.5 mrg
998 1.5 mrg /* top of "pps" while loop */
999 1.5 mrg while (pps[lcv] == NULL) {
1000 1.5 mrg /* look for a resident page */
1001 1.5 mrg ptmp = uvm_pagelookup(uobj, current_offset);
1002 1.5 mrg
1003 1.5 mrg /* not resident? allocate one now (if we can) */
1004 1.5 mrg if (ptmp == NULL) {
1005 1.5 mrg
1006 1.121 riastrad ptmp = uao_pagealloc(uobj, current_offset, 0);
1007 1.5 mrg
1008 1.5 mrg /* out of RAM? */
1009 1.5 mrg if (ptmp == NULL) {
1010 1.115 rmind mutex_exit(uobj->vmobjlock);
1011 1.5 mrg UVMHIST_LOG(pdhist,
1012 1.5 mrg "sleeping, ptmp == NULL\n",0,0,0,0);
1013 1.5 mrg uvm_wait("uao_getpage");
1014 1.115 rmind mutex_enter(uobj->vmobjlock);
1015 1.41 chs continue;
1016 1.5 mrg }
1017 1.5 mrg
1018 1.5 mrg /*
1019 1.5 mrg * safe with PQ's unlocked: because we just
1020 1.5 mrg * alloc'd the page
1021 1.5 mrg */
1022 1.46 chs
1023 1.5 mrg ptmp->pqflags |= PQ_AOBJ;
1024 1.5 mrg
1025 1.41 chs /*
1026 1.5 mrg * got new page ready for I/O. break pps while
1027 1.5 mrg * loop. pps[lcv] is still NULL.
1028 1.5 mrg */
1029 1.46 chs
1030 1.5 mrg break;
1031 1.5 mrg }
1032 1.5 mrg
1033 1.5 mrg /* page is there, see if we need to wait on it */
1034 1.46 chs if ((ptmp->flags & PG_BUSY) != 0) {
1035 1.5 mrg ptmp->flags |= PG_WANTED;
1036 1.5 mrg UVMHIST_LOG(pdhist,
1037 1.5 mrg "sleeping, ptmp->flags 0x%x\n",
1038 1.5 mrg ptmp->flags,0,0,0);
1039 1.115 rmind UVM_UNLOCK_AND_WAIT(ptmp, uobj->vmobjlock,
1040 1.87 thorpej false, "uao_get", 0);
1041 1.115 rmind mutex_enter(uobj->vmobjlock);
1042 1.46 chs continue;
1043 1.5 mrg }
1044 1.41 chs
1045 1.41 chs /*
1046 1.5 mrg * if we get here then the page has become resident and
1047 1.5 mrg * unbusy between steps 1 and 2. we busy it now (so we
1048 1.5 mrg * own it) and set pps[lcv] (so that we exit the while
1049 1.5 mrg * loop).
1050 1.5 mrg */
1051 1.46 chs
1052 1.5 mrg /* we own it, caller must un-busy */
1053 1.5 mrg ptmp->flags |= PG_BUSY;
1054 1.5 mrg UVM_PAGE_OWN(ptmp, "uao_get2");
1055 1.5 mrg pps[lcv] = ptmp;
1056 1.5 mrg }
1057 1.5 mrg
1058 1.5 mrg /*
1059 1.5 mrg * if we own the valid page at the correct offset, pps[lcv] will
1060 1.5 mrg * point to it. nothing more to do except go to the next page.
1061 1.5 mrg */
1062 1.46 chs
1063 1.5 mrg if (pps[lcv])
1064 1.5 mrg continue; /* next lcv */
1065 1.5 mrg
1066 1.5 mrg /*
1067 1.41 chs * we have a "fake/busy/clean" page that we just allocated.
1068 1.5 mrg * do the needed "i/o", either reading from swap or zeroing.
1069 1.5 mrg */
1070 1.46 chs
1071 1.117 rmind swslot = uao_find_swslot(uobj, pageidx);
1072 1.5 mrg
1073 1.5 mrg /*
1074 1.5 mrg * just zero the page if there's nothing in swap.
1075 1.5 mrg */
1076 1.46 chs
1077 1.46 chs if (swslot == 0) {
1078 1.46 chs
1079 1.5 mrg /*
1080 1.5 mrg * page hasn't existed before, just zero it.
1081 1.5 mrg */
1082 1.46 chs
1083 1.5 mrg uvm_pagezero(ptmp);
1084 1.27 chs } else {
1085 1.72 yamt #if defined(VMSWAP)
1086 1.72 yamt int error;
1087 1.72 yamt
1088 1.5 mrg UVMHIST_LOG(pdhist, "pagein from swslot %d",
1089 1.5 mrg swslot, 0,0,0);
1090 1.5 mrg
1091 1.5 mrg /*
1092 1.5 mrg * page in the swapped-out page.
1093 1.5 mrg * unlock object for i/o, relock when done.
1094 1.5 mrg */
1095 1.46 chs
1096 1.115 rmind mutex_exit(uobj->vmobjlock);
1097 1.46 chs error = uvm_swap_get(ptmp, swslot, PGO_SYNCIO);
1098 1.115 rmind mutex_enter(uobj->vmobjlock);
1099 1.5 mrg
1100 1.5 mrg /*
1101 1.5 mrg * I/O done. check for errors.
1102 1.5 mrg */
1103 1.46 chs
1104 1.46 chs if (error != 0) {
1105 1.5 mrg UVMHIST_LOG(pdhist, "<- done (error=%d)",
1106 1.46 chs error,0,0,0);
1107 1.5 mrg if (ptmp->flags & PG_WANTED)
1108 1.24 thorpej wakeup(ptmp);
1109 1.27 chs
1110 1.27 chs /*
1111 1.27 chs * remove the swap slot from the aobj
1112 1.27 chs * and mark the aobj as having no real slot.
1113 1.27 chs * don't free the swap slot, thus preventing
1114 1.27 chs * it from being used again.
1115 1.27 chs */
1116 1.46 chs
1117 1.118 rmind swslot = uao_set_swslot(uobj, pageidx,
1118 1.118 rmind SWSLOT_BAD);
1119 1.57 pk if (swslot > 0) {
1120 1.45 chs uvm_swap_markbad(swslot, 1);
1121 1.45 chs }
1122 1.27 chs
1123 1.96 ad mutex_enter(&uvm_pageqlock);
1124 1.5 mrg uvm_pagefree(ptmp);
1125 1.96 ad mutex_exit(&uvm_pageqlock);
1126 1.115 rmind mutex_exit(uobj->vmobjlock);
1127 1.46 chs return error;
1128 1.5 mrg }
1129 1.72 yamt #else /* defined(VMSWAP) */
1130 1.72 yamt panic("%s: pagein", __func__);
1131 1.72 yamt #endif /* defined(VMSWAP) */
1132 1.5 mrg }
1133 1.5 mrg
1134 1.78 yamt if ((access_type & VM_PROT_WRITE) == 0) {
1135 1.78 yamt ptmp->flags |= PG_CLEAN;
1136 1.78 yamt pmap_clear_modify(ptmp);
1137 1.78 yamt }
1138 1.78 yamt
1139 1.41 chs /*
1140 1.5 mrg * we got the page! clear the fake flag (indicates valid
1141 1.5 mrg * data now in page) and plug into our result array. note
1142 1.41 chs * that page is still busy.
1143 1.5 mrg *
1144 1.5 mrg * it is the callers job to:
1145 1.5 mrg * => check if the page is released
1146 1.5 mrg * => unbusy the page
1147 1.5 mrg * => activate the page
1148 1.5 mrg */
1149 1.5 mrg
1150 1.46 chs ptmp->flags &= ~PG_FAKE;
1151 1.5 mrg pps[lcv] = ptmp;
1152 1.46 chs }
1153 1.1 mrg
1154 1.1 mrg /*
1155 1.5 mrg * finally, unlock object and return.
1156 1.5 mrg */
1157 1.1 mrg
1158 1.76 yamt done:
1159 1.115 rmind mutex_exit(uobj->vmobjlock);
1160 1.5 mrg UVMHIST_LOG(pdhist, "<- done (OK)",0,0,0,0);
1161 1.46 chs return 0;
1162 1.1 mrg }
1163 1.1 mrg
1164 1.72 yamt #if defined(VMSWAP)
1165 1.72 yamt
1166 1.1 mrg /*
1167 1.18 chs * uao_dropswap: release any swap resources from this aobj page.
1168 1.41 chs *
1169 1.18 chs * => aobj must be locked or have a reference count of 0.
1170 1.18 chs */
1171 1.18 chs
1172 1.18 chs void
1173 1.67 thorpej uao_dropswap(struct uvm_object *uobj, int pageidx)
1174 1.18 chs {
1175 1.18 chs int slot;
1176 1.18 chs
1177 1.18 chs slot = uao_set_swslot(uobj, pageidx, 0);
1178 1.18 chs if (slot) {
1179 1.18 chs uvm_swap_free(slot, 1);
1180 1.18 chs }
1181 1.27 chs }
1182 1.27 chs
1183 1.27 chs /*
1184 1.27 chs * page in every page in every aobj that is paged-out to a range of swslots.
1185 1.41 chs *
1186 1.27 chs * => nothing should be locked.
1187 1.87 thorpej * => returns true if pagein was aborted due to lack of memory.
1188 1.27 chs */
1189 1.46 chs
1190 1.85 thorpej bool
1191 1.67 thorpej uao_swap_off(int startslot, int endslot)
1192 1.27 chs {
1193 1.118 rmind struct uvm_aobj *aobj;
1194 1.27 chs
1195 1.27 chs /*
1196 1.118 rmind * Walk the list of all anonymous UVM objects. Grab the first.
1197 1.27 chs */
1198 1.118 rmind mutex_enter(&uao_list_lock);
1199 1.118 rmind if ((aobj = LIST_FIRST(&uao_list)) == NULL) {
1200 1.118 rmind mutex_exit(&uao_list_lock);
1201 1.118 rmind return false;
1202 1.118 rmind }
1203 1.118 rmind uao_reference(&aobj->u_obj);
1204 1.27 chs
1205 1.118 rmind do {
1206 1.118 rmind struct uvm_aobj *nextaobj;
1207 1.118 rmind bool rv;
1208 1.27 chs
1209 1.27 chs /*
1210 1.118 rmind * Prefetch the next object and immediately hold a reference
1211 1.118 rmind * on it, so neither the current nor the next entry could
1212 1.118 rmind * disappear while we are iterating.
1213 1.27 chs */
1214 1.118 rmind if ((nextaobj = LIST_NEXT(aobj, u_list)) != NULL) {
1215 1.118 rmind uao_reference(&nextaobj->u_obj);
1216 1.27 chs }
1217 1.90 ad mutex_exit(&uao_list_lock);
1218 1.27 chs
1219 1.27 chs /*
1220 1.118 rmind * Page in all pages in the swap slot range.
1221 1.27 chs */
1222 1.118 rmind mutex_enter(aobj->u_obj.vmobjlock);
1223 1.118 rmind rv = uao_pagein(aobj, startslot, endslot);
1224 1.118 rmind mutex_exit(aobj->u_obj.vmobjlock);
1225 1.46 chs
1226 1.118 rmind /* Drop the reference of the current object. */
1227 1.118 rmind uao_detach(&aobj->u_obj);
1228 1.27 chs if (rv) {
1229 1.118 rmind if (nextaobj) {
1230 1.118 rmind uao_detach(&nextaobj->u_obj);
1231 1.118 rmind }
1232 1.27 chs return rv;
1233 1.27 chs }
1234 1.27 chs
1235 1.118 rmind aobj = nextaobj;
1236 1.90 ad mutex_enter(&uao_list_lock);
1237 1.118 rmind } while (aobj);
1238 1.27 chs
1239 1.90 ad mutex_exit(&uao_list_lock);
1240 1.87 thorpej return false;
1241 1.27 chs }
1242 1.27 chs
1243 1.27 chs /*
1244 1.27 chs * page in any pages from aobj in the given range.
1245 1.27 chs *
1246 1.27 chs * => aobj must be locked and is returned locked.
1247 1.87 thorpej * => returns true if pagein was aborted due to lack of memory.
1248 1.27 chs */
1249 1.85 thorpej static bool
1250 1.67 thorpej uao_pagein(struct uvm_aobj *aobj, int startslot, int endslot)
1251 1.27 chs {
1252 1.85 thorpej bool rv;
1253 1.27 chs
1254 1.27 chs if (UAO_USES_SWHASH(aobj)) {
1255 1.27 chs struct uao_swhash_elt *elt;
1256 1.65 christos int buck;
1257 1.27 chs
1258 1.27 chs restart:
1259 1.65 christos for (buck = aobj->u_swhashmask; buck >= 0; buck--) {
1260 1.65 christos for (elt = LIST_FIRST(&aobj->u_swhash[buck]);
1261 1.27 chs elt != NULL;
1262 1.27 chs elt = LIST_NEXT(elt, list)) {
1263 1.27 chs int i;
1264 1.27 chs
1265 1.27 chs for (i = 0; i < UAO_SWHASH_CLUSTER_SIZE; i++) {
1266 1.27 chs int slot = elt->slots[i];
1267 1.27 chs
1268 1.27 chs /*
1269 1.27 chs * if the slot isn't in range, skip it.
1270 1.27 chs */
1271 1.46 chs
1272 1.41 chs if (slot < startslot ||
1273 1.27 chs slot >= endslot) {
1274 1.27 chs continue;
1275 1.27 chs }
1276 1.27 chs
1277 1.27 chs /*
1278 1.27 chs * process the page,
1279 1.27 chs * the start over on this object
1280 1.27 chs * since the swhash elt
1281 1.27 chs * may have been freed.
1282 1.27 chs */
1283 1.46 chs
1284 1.27 chs rv = uao_pagein_page(aobj,
1285 1.27 chs UAO_SWHASH_ELT_PAGEIDX_BASE(elt) + i);
1286 1.27 chs if (rv) {
1287 1.27 chs return rv;
1288 1.27 chs }
1289 1.27 chs goto restart;
1290 1.27 chs }
1291 1.27 chs }
1292 1.27 chs }
1293 1.27 chs } else {
1294 1.27 chs int i;
1295 1.27 chs
1296 1.27 chs for (i = 0; i < aobj->u_pages; i++) {
1297 1.27 chs int slot = aobj->u_swslots[i];
1298 1.27 chs
1299 1.27 chs /*
1300 1.27 chs * if the slot isn't in range, skip it
1301 1.27 chs */
1302 1.46 chs
1303 1.27 chs if (slot < startslot || slot >= endslot) {
1304 1.27 chs continue;
1305 1.27 chs }
1306 1.27 chs
1307 1.27 chs /*
1308 1.27 chs * process the page.
1309 1.27 chs */
1310 1.46 chs
1311 1.27 chs rv = uao_pagein_page(aobj, i);
1312 1.27 chs if (rv) {
1313 1.27 chs return rv;
1314 1.27 chs }
1315 1.27 chs }
1316 1.27 chs }
1317 1.27 chs
1318 1.87 thorpej return false;
1319 1.27 chs }
1320 1.27 chs
1321 1.27 chs /*
1322 1.117 rmind * uao_pagein_page: page in a single page from an anonymous UVM object.
1323 1.27 chs *
1324 1.117 rmind * => Returns true if pagein was aborted due to lack of memory.
1325 1.117 rmind * => Object must be locked and is returned locked.
1326 1.27 chs */
1327 1.46 chs
1328 1.85 thorpej static bool
1329 1.67 thorpej uao_pagein_page(struct uvm_aobj *aobj, int pageidx)
1330 1.27 chs {
1331 1.117 rmind struct uvm_object *uobj = &aobj->u_obj;
1332 1.27 chs struct vm_page *pg;
1333 1.57 pk int rv, npages;
1334 1.27 chs
1335 1.27 chs pg = NULL;
1336 1.27 chs npages = 1;
1337 1.117 rmind
1338 1.117 rmind KASSERT(mutex_owned(uobj->vmobjlock));
1339 1.117 rmind rv = uao_get(uobj, pageidx << PAGE_SHIFT, &pg, &npages,
1340 1.117 rmind 0, VM_PROT_READ | VM_PROT_WRITE, 0, PGO_SYNCIO);
1341 1.27 chs
1342 1.27 chs /*
1343 1.27 chs * relock and finish up.
1344 1.27 chs */
1345 1.46 chs
1346 1.117 rmind mutex_enter(uobj->vmobjlock);
1347 1.27 chs switch (rv) {
1348 1.40 chs case 0:
1349 1.27 chs break;
1350 1.27 chs
1351 1.40 chs case EIO:
1352 1.40 chs case ERESTART:
1353 1.46 chs
1354 1.27 chs /*
1355 1.27 chs * nothing more to do on errors.
1356 1.40 chs * ERESTART can only mean that the anon was freed,
1357 1.27 chs * so again there's nothing to do.
1358 1.27 chs */
1359 1.46 chs
1360 1.87 thorpej return false;
1361 1.59 pk
1362 1.59 pk default:
1363 1.87 thorpej return true;
1364 1.27 chs }
1365 1.27 chs
1366 1.27 chs /*
1367 1.27 chs * ok, we've got the page now.
1368 1.27 chs * mark it as dirty, clear its swslot and un-busy it.
1369 1.27 chs */
1370 1.57 pk uao_dropswap(&aobj->u_obj, pageidx);
1371 1.27 chs
1372 1.27 chs /*
1373 1.80 yamt * make sure it's on a page queue.
1374 1.27 chs */
1375 1.96 ad mutex_enter(&uvm_pageqlock);
1376 1.58 pk if (pg->wire_count == 0)
1377 1.80 yamt uvm_pageenqueue(pg);
1378 1.96 ad mutex_exit(&uvm_pageqlock);
1379 1.56 yamt
1380 1.59 pk if (pg->flags & PG_WANTED) {
1381 1.59 pk wakeup(pg);
1382 1.59 pk }
1383 1.59 pk pg->flags &= ~(PG_WANTED|PG_BUSY|PG_CLEAN|PG_FAKE);
1384 1.56 yamt UVM_PAGE_OWN(pg, NULL);
1385 1.56 yamt
1386 1.87 thorpej return false;
1387 1.1 mrg }
1388 1.72 yamt
1389 1.75 yamt /*
1390 1.75 yamt * uao_dropswap_range: drop swapslots in the range.
1391 1.75 yamt *
1392 1.75 yamt * => aobj must be locked and is returned locked.
1393 1.75 yamt * => start is inclusive. end is exclusive.
1394 1.75 yamt */
1395 1.75 yamt
1396 1.75 yamt void
1397 1.75 yamt uao_dropswap_range(struct uvm_object *uobj, voff_t start, voff_t end)
1398 1.75 yamt {
1399 1.75 yamt struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
1400 1.117 rmind int swpgonlydelta = 0;
1401 1.75 yamt
1402 1.115 rmind KASSERT(mutex_owned(uobj->vmobjlock));
1403 1.75 yamt
1404 1.75 yamt if (end == 0) {
1405 1.75 yamt end = INT64_MAX;
1406 1.75 yamt }
1407 1.75 yamt
1408 1.75 yamt if (UAO_USES_SWHASH(aobj)) {
1409 1.75 yamt int i, hashbuckets = aobj->u_swhashmask + 1;
1410 1.75 yamt voff_t taghi;
1411 1.75 yamt voff_t taglo;
1412 1.75 yamt
1413 1.75 yamt taglo = UAO_SWHASH_ELT_TAG(start);
1414 1.75 yamt taghi = UAO_SWHASH_ELT_TAG(end);
1415 1.75 yamt
1416 1.75 yamt for (i = 0; i < hashbuckets; i++) {
1417 1.75 yamt struct uao_swhash_elt *elt, *next;
1418 1.75 yamt
1419 1.75 yamt for (elt = LIST_FIRST(&aobj->u_swhash[i]);
1420 1.75 yamt elt != NULL;
1421 1.75 yamt elt = next) {
1422 1.75 yamt int startidx, endidx;
1423 1.75 yamt int j;
1424 1.75 yamt
1425 1.75 yamt next = LIST_NEXT(elt, list);
1426 1.75 yamt
1427 1.75 yamt if (elt->tag < taglo || taghi < elt->tag) {
1428 1.75 yamt continue;
1429 1.75 yamt }
1430 1.75 yamt
1431 1.75 yamt if (elt->tag == taglo) {
1432 1.75 yamt startidx =
1433 1.75 yamt UAO_SWHASH_ELT_PAGESLOT_IDX(start);
1434 1.75 yamt } else {
1435 1.75 yamt startidx = 0;
1436 1.75 yamt }
1437 1.75 yamt
1438 1.75 yamt if (elt->tag == taghi) {
1439 1.75 yamt endidx =
1440 1.75 yamt UAO_SWHASH_ELT_PAGESLOT_IDX(end);
1441 1.75 yamt } else {
1442 1.75 yamt endidx = UAO_SWHASH_CLUSTER_SIZE;
1443 1.75 yamt }
1444 1.75 yamt
1445 1.75 yamt for (j = startidx; j < endidx; j++) {
1446 1.75 yamt int slot = elt->slots[j];
1447 1.75 yamt
1448 1.75 yamt KASSERT(uvm_pagelookup(&aobj->u_obj,
1449 1.75 yamt (UAO_SWHASH_ELT_PAGEIDX_BASE(elt)
1450 1.75 yamt + j) << PAGE_SHIFT) == NULL);
1451 1.75 yamt if (slot > 0) {
1452 1.75 yamt uvm_swap_free(slot, 1);
1453 1.75 yamt swpgonlydelta++;
1454 1.75 yamt KASSERT(elt->count > 0);
1455 1.75 yamt elt->slots[j] = 0;
1456 1.75 yamt elt->count--;
1457 1.75 yamt }
1458 1.75 yamt }
1459 1.75 yamt
1460 1.75 yamt if (elt->count == 0) {
1461 1.75 yamt LIST_REMOVE(elt, list);
1462 1.75 yamt pool_put(&uao_swhash_elt_pool, elt);
1463 1.75 yamt }
1464 1.75 yamt }
1465 1.75 yamt }
1466 1.75 yamt } else {
1467 1.75 yamt int i;
1468 1.75 yamt
1469 1.75 yamt if (aobj->u_pages < end) {
1470 1.75 yamt end = aobj->u_pages;
1471 1.75 yamt }
1472 1.75 yamt for (i = start; i < end; i++) {
1473 1.75 yamt int slot = aobj->u_swslots[i];
1474 1.75 yamt
1475 1.75 yamt if (slot > 0) {
1476 1.75 yamt uvm_swap_free(slot, 1);
1477 1.75 yamt swpgonlydelta++;
1478 1.75 yamt }
1479 1.75 yamt }
1480 1.75 yamt }
1481 1.75 yamt
1482 1.75 yamt /*
1483 1.75 yamt * adjust the counter of pages only in swap for all
1484 1.75 yamt * the swap slots we've freed.
1485 1.75 yamt */
1486 1.75 yamt
1487 1.75 yamt if (swpgonlydelta > 0) {
1488 1.92 ad mutex_enter(&uvm_swap_data_lock);
1489 1.75 yamt KASSERT(uvmexp.swpgonly >= swpgonlydelta);
1490 1.75 yamt uvmexp.swpgonly -= swpgonlydelta;
1491 1.92 ad mutex_exit(&uvm_swap_data_lock);
1492 1.75 yamt }
1493 1.75 yamt }
1494 1.75 yamt
1495 1.72 yamt #endif /* defined(VMSWAP) */
1496