uvm_aobj.c revision 1.56 1 1.56 yamt /* $NetBSD: uvm_aobj.c,v 1.56 2003/04/12 14:36:43 yamt Exp $ */
2 1.6 mrg
3 1.7 chs /*
4 1.7 chs * Copyright (c) 1998 Chuck Silvers, Charles D. Cranor and
5 1.7 chs * Washington University.
6 1.7 chs * All rights reserved.
7 1.7 chs *
8 1.7 chs * Redistribution and use in source and binary forms, with or without
9 1.7 chs * modification, are permitted provided that the following conditions
10 1.7 chs * are met:
11 1.7 chs * 1. Redistributions of source code must retain the above copyright
12 1.7 chs * notice, this list of conditions and the following disclaimer.
13 1.7 chs * 2. Redistributions in binary form must reproduce the above copyright
14 1.7 chs * notice, this list of conditions and the following disclaimer in the
15 1.7 chs * documentation and/or other materials provided with the distribution.
16 1.7 chs * 3. All advertising materials mentioning features or use of this software
17 1.7 chs * must display the following acknowledgement:
18 1.7 chs * This product includes software developed by Charles D. Cranor and
19 1.7 chs * Washington University.
20 1.7 chs * 4. The name of the author may not be used to endorse or promote products
21 1.7 chs * derived from this software without specific prior written permission.
22 1.7 chs *
23 1.7 chs * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 1.7 chs * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 1.7 chs * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 1.7 chs * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 1.7 chs * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 1.7 chs * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 1.7 chs * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 1.7 chs * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 1.7 chs * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 1.7 chs * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 1.7 chs *
34 1.4 mrg * from: Id: uvm_aobj.c,v 1.1.2.5 1998/02/06 05:14:38 chs Exp
35 1.4 mrg */
36 1.7 chs /*
37 1.7 chs * uvm_aobj.c: anonymous memory uvm_object pager
38 1.7 chs *
39 1.7 chs * author: Chuck Silvers <chuq (at) chuq.com>
40 1.7 chs * started: Jan-1998
41 1.7 chs *
42 1.7 chs * - design mostly from Chuck Cranor
43 1.7 chs */
44 1.49 lukem
45 1.49 lukem #include <sys/cdefs.h>
46 1.56 yamt __KERNEL_RCSID(0, "$NetBSD: uvm_aobj.c,v 1.56 2003/04/12 14:36:43 yamt Exp $");
47 1.7 chs
48 1.7 chs #include "opt_uvmhist.h"
49 1.1 mrg
50 1.1 mrg #include <sys/param.h>
51 1.1 mrg #include <sys/systm.h>
52 1.1 mrg #include <sys/proc.h>
53 1.1 mrg #include <sys/malloc.h>
54 1.37 chs #include <sys/kernel.h>
55 1.12 thorpej #include <sys/pool.h>
56 1.27 chs #include <sys/kernel.h>
57 1.1 mrg
58 1.1 mrg #include <uvm/uvm.h>
59 1.1 mrg
60 1.1 mrg /*
61 1.1 mrg * an aobj manages anonymous-memory backed uvm_objects. in addition
62 1.1 mrg * to keeping the list of resident pages, it also keeps a list of
63 1.1 mrg * allocated swap blocks. depending on the size of the aobj this list
64 1.1 mrg * of allocated swap blocks is either stored in an array (small objects)
65 1.1 mrg * or in a hash table (large objects).
66 1.1 mrg */
67 1.1 mrg
68 1.1 mrg /*
69 1.1 mrg * local structures
70 1.1 mrg */
71 1.1 mrg
72 1.1 mrg /*
73 1.1 mrg * for hash tables, we break the address space of the aobj into blocks
74 1.1 mrg * of UAO_SWHASH_CLUSTER_SIZE pages. we require the cluster size to
75 1.1 mrg * be a power of two.
76 1.1 mrg */
77 1.1 mrg
78 1.1 mrg #define UAO_SWHASH_CLUSTER_SHIFT 4
79 1.1 mrg #define UAO_SWHASH_CLUSTER_SIZE (1 << UAO_SWHASH_CLUSTER_SHIFT)
80 1.1 mrg
81 1.1 mrg /* get the "tag" for this page index */
82 1.1 mrg #define UAO_SWHASH_ELT_TAG(PAGEIDX) \
83 1.1 mrg ((PAGEIDX) >> UAO_SWHASH_CLUSTER_SHIFT)
84 1.1 mrg
85 1.1 mrg /* given an ELT and a page index, find the swap slot */
86 1.1 mrg #define UAO_SWHASH_ELT_PAGESLOT(ELT, PAGEIDX) \
87 1.1 mrg ((ELT)->slots[(PAGEIDX) & (UAO_SWHASH_CLUSTER_SIZE - 1)])
88 1.1 mrg
89 1.1 mrg /* given an ELT, return its pageidx base */
90 1.1 mrg #define UAO_SWHASH_ELT_PAGEIDX_BASE(ELT) \
91 1.1 mrg ((ELT)->tag << UAO_SWHASH_CLUSTER_SHIFT)
92 1.1 mrg
93 1.1 mrg /*
94 1.1 mrg * the swhash hash function
95 1.1 mrg */
96 1.46 chs
97 1.1 mrg #define UAO_SWHASH_HASH(AOBJ, PAGEIDX) \
98 1.1 mrg (&(AOBJ)->u_swhash[(((PAGEIDX) >> UAO_SWHASH_CLUSTER_SHIFT) \
99 1.1 mrg & (AOBJ)->u_swhashmask)])
100 1.1 mrg
101 1.1 mrg /*
102 1.1 mrg * the swhash threshhold determines if we will use an array or a
103 1.1 mrg * hash table to store the list of allocated swap blocks.
104 1.1 mrg */
105 1.1 mrg
106 1.1 mrg #define UAO_SWHASH_THRESHOLD (UAO_SWHASH_CLUSTER_SIZE * 4)
107 1.1 mrg #define UAO_USES_SWHASH(AOBJ) \
108 1.1 mrg ((AOBJ)->u_pages > UAO_SWHASH_THRESHOLD) /* use hash? */
109 1.1 mrg
110 1.1 mrg /*
111 1.3 chs * the number of buckets in a swhash, with an upper bound
112 1.1 mrg */
113 1.46 chs
114 1.1 mrg #define UAO_SWHASH_MAXBUCKETS 256
115 1.1 mrg #define UAO_SWHASH_BUCKETS(AOBJ) \
116 1.46 chs (MIN((AOBJ)->u_pages >> UAO_SWHASH_CLUSTER_SHIFT, \
117 1.1 mrg UAO_SWHASH_MAXBUCKETS))
118 1.1 mrg
119 1.1 mrg
120 1.1 mrg /*
121 1.1 mrg * uao_swhash_elt: when a hash table is being used, this structure defines
122 1.1 mrg * the format of an entry in the bucket list.
123 1.1 mrg */
124 1.1 mrg
125 1.1 mrg struct uao_swhash_elt {
126 1.5 mrg LIST_ENTRY(uao_swhash_elt) list; /* the hash list */
127 1.28 kleink voff_t tag; /* our 'tag' */
128 1.5 mrg int count; /* our number of active slots */
129 1.5 mrg int slots[UAO_SWHASH_CLUSTER_SIZE]; /* the slots */
130 1.1 mrg };
131 1.1 mrg
132 1.1 mrg /*
133 1.1 mrg * uao_swhash: the swap hash table structure
134 1.1 mrg */
135 1.1 mrg
136 1.1 mrg LIST_HEAD(uao_swhash, uao_swhash_elt);
137 1.1 mrg
138 1.12 thorpej /*
139 1.12 thorpej * uao_swhash_elt_pool: pool of uao_swhash_elt structures
140 1.12 thorpej */
141 1.12 thorpej
142 1.12 thorpej struct pool uao_swhash_elt_pool;
143 1.1 mrg
144 1.1 mrg /*
145 1.1 mrg * uvm_aobj: the actual anon-backed uvm_object
146 1.1 mrg *
147 1.1 mrg * => the uvm_object is at the top of the structure, this allows
148 1.46 chs * (struct uvm_aobj *) == (struct uvm_object *)
149 1.1 mrg * => only one of u_swslots and u_swhash is used in any given aobj
150 1.1 mrg */
151 1.1 mrg
152 1.1 mrg struct uvm_aobj {
153 1.5 mrg struct uvm_object u_obj; /* has: lock, pgops, memq, #pages, #refs */
154 1.11 drochner int u_pages; /* number of pages in entire object */
155 1.5 mrg int u_flags; /* the flags (see uvm_aobj.h) */
156 1.5 mrg int *u_swslots; /* array of offset->swapslot mappings */
157 1.5 mrg /*
158 1.5 mrg * hashtable of offset->swapslot mappings
159 1.5 mrg * (u_swhash is an array of bucket heads)
160 1.5 mrg */
161 1.5 mrg struct uao_swhash *u_swhash;
162 1.5 mrg u_long u_swhashmask; /* mask for hashtable */
163 1.5 mrg LIST_ENTRY(uvm_aobj) u_list; /* global list of aobjs */
164 1.1 mrg };
165 1.1 mrg
166 1.1 mrg /*
167 1.12 thorpej * uvm_aobj_pool: pool of uvm_aobj structures
168 1.12 thorpej */
169 1.12 thorpej
170 1.12 thorpej struct pool uvm_aobj_pool;
171 1.54 thorpej
172 1.54 thorpej MALLOC_DEFINE(M_UVMAOBJ, "UVM aobj", "UVM aobj and related structures");
173 1.12 thorpej
174 1.12 thorpej /*
175 1.1 mrg * local functions
176 1.1 mrg */
177 1.1 mrg
178 1.46 chs static struct uao_swhash_elt *uao_find_swhash_elt
179 1.46 chs __P((struct uvm_aobj *, int, boolean_t));
180 1.46 chs
181 1.46 chs static void uao_free __P((struct uvm_aobj *));
182 1.46 chs static int uao_get __P((struct uvm_object *, voff_t, struct vm_page **,
183 1.46 chs int *, int, vm_prot_t, int, int));
184 1.46 chs static boolean_t uao_put __P((struct uvm_object *, voff_t, voff_t, int));
185 1.46 chs static boolean_t uao_pagein __P((struct uvm_aobj *, int, int));
186 1.46 chs static boolean_t uao_pagein_page __P((struct uvm_aobj *, int));
187 1.1 mrg
188 1.1 mrg /*
189 1.1 mrg * aobj_pager
190 1.41 chs *
191 1.1 mrg * note that some functions (e.g. put) are handled elsewhere
192 1.1 mrg */
193 1.1 mrg
194 1.1 mrg struct uvm_pagerops aobj_pager = {
195 1.27 chs NULL, /* init */
196 1.5 mrg uao_reference, /* reference */
197 1.5 mrg uao_detach, /* detach */
198 1.5 mrg NULL, /* fault */
199 1.5 mrg uao_get, /* get */
200 1.46 chs uao_put, /* flush */
201 1.1 mrg };
202 1.1 mrg
203 1.1 mrg /*
204 1.1 mrg * uao_list: global list of active aobjs, locked by uao_list_lock
205 1.1 mrg */
206 1.1 mrg
207 1.1 mrg static LIST_HEAD(aobjlist, uvm_aobj) uao_list;
208 1.42 chs static struct simplelock uao_list_lock;
209 1.1 mrg
210 1.1 mrg /*
211 1.1 mrg * functions
212 1.1 mrg */
213 1.1 mrg
214 1.1 mrg /*
215 1.1 mrg * hash table/array related functions
216 1.1 mrg */
217 1.1 mrg
218 1.1 mrg /*
219 1.1 mrg * uao_find_swhash_elt: find (or create) a hash table entry for a page
220 1.1 mrg * offset.
221 1.1 mrg *
222 1.1 mrg * => the object should be locked by the caller
223 1.1 mrg */
224 1.1 mrg
225 1.5 mrg static struct uao_swhash_elt *
226 1.5 mrg uao_find_swhash_elt(aobj, pageidx, create)
227 1.5 mrg struct uvm_aobj *aobj;
228 1.5 mrg int pageidx;
229 1.5 mrg boolean_t create;
230 1.5 mrg {
231 1.5 mrg struct uao_swhash *swhash;
232 1.5 mrg struct uao_swhash_elt *elt;
233 1.28 kleink voff_t page_tag;
234 1.1 mrg
235 1.45 chs swhash = UAO_SWHASH_HASH(aobj, pageidx);
236 1.45 chs page_tag = UAO_SWHASH_ELT_TAG(pageidx);
237 1.1 mrg
238 1.5 mrg /*
239 1.5 mrg * now search the bucket for the requested tag
240 1.5 mrg */
241 1.45 chs
242 1.37 chs LIST_FOREACH(elt, swhash, list) {
243 1.45 chs if (elt->tag == page_tag) {
244 1.45 chs return elt;
245 1.45 chs }
246 1.5 mrg }
247 1.45 chs if (!create) {
248 1.5 mrg return NULL;
249 1.45 chs }
250 1.5 mrg
251 1.5 mrg /*
252 1.12 thorpej * allocate a new entry for the bucket and init/insert it in
253 1.5 mrg */
254 1.45 chs
255 1.45 chs elt = pool_get(&uao_swhash_elt_pool, PR_NOWAIT);
256 1.45 chs if (elt == NULL) {
257 1.45 chs return NULL;
258 1.45 chs }
259 1.5 mrg LIST_INSERT_HEAD(swhash, elt, list);
260 1.5 mrg elt->tag = page_tag;
261 1.5 mrg elt->count = 0;
262 1.9 perry memset(elt->slots, 0, sizeof(elt->slots));
263 1.45 chs return elt;
264 1.1 mrg }
265 1.1 mrg
266 1.1 mrg /*
267 1.1 mrg * uao_find_swslot: find the swap slot number for an aobj/pageidx
268 1.1 mrg *
269 1.41 chs * => object must be locked by caller
270 1.1 mrg */
271 1.46 chs
272 1.46 chs int
273 1.46 chs uao_find_swslot(uobj, pageidx)
274 1.46 chs struct uvm_object *uobj;
275 1.11 drochner int pageidx;
276 1.1 mrg {
277 1.46 chs struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
278 1.46 chs struct uao_swhash_elt *elt;
279 1.1 mrg
280 1.5 mrg /*
281 1.5 mrg * if noswap flag is set, then we never return a slot
282 1.5 mrg */
283 1.1 mrg
284 1.5 mrg if (aobj->u_flags & UAO_FLAG_NOSWAP)
285 1.5 mrg return(0);
286 1.1 mrg
287 1.5 mrg /*
288 1.5 mrg * if hashing, look in hash table.
289 1.5 mrg */
290 1.1 mrg
291 1.5 mrg if (UAO_USES_SWHASH(aobj)) {
292 1.46 chs elt = uao_find_swhash_elt(aobj, pageidx, FALSE);
293 1.5 mrg if (elt)
294 1.5 mrg return(UAO_SWHASH_ELT_PAGESLOT(elt, pageidx));
295 1.5 mrg else
296 1.31 thorpej return(0);
297 1.5 mrg }
298 1.1 mrg
299 1.41 chs /*
300 1.5 mrg * otherwise, look in the array
301 1.5 mrg */
302 1.46 chs
303 1.5 mrg return(aobj->u_swslots[pageidx]);
304 1.1 mrg }
305 1.1 mrg
306 1.1 mrg /*
307 1.1 mrg * uao_set_swslot: set the swap slot for a page in an aobj.
308 1.1 mrg *
309 1.1 mrg * => setting a slot to zero frees the slot
310 1.1 mrg * => object must be locked by caller
311 1.45 chs * => we return the old slot number, or -1 if we failed to allocate
312 1.45 chs * memory to record the new slot number
313 1.1 mrg */
314 1.46 chs
315 1.5 mrg int
316 1.5 mrg uao_set_swslot(uobj, pageidx, slot)
317 1.5 mrg struct uvm_object *uobj;
318 1.5 mrg int pageidx, slot;
319 1.5 mrg {
320 1.5 mrg struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
321 1.45 chs struct uao_swhash_elt *elt;
322 1.5 mrg int oldslot;
323 1.5 mrg UVMHIST_FUNC("uao_set_swslot"); UVMHIST_CALLED(pdhist);
324 1.5 mrg UVMHIST_LOG(pdhist, "aobj %p pageidx %d slot %d",
325 1.5 mrg aobj, pageidx, slot, 0);
326 1.1 mrg
327 1.5 mrg /*
328 1.46 chs * if noswap flag is set, then we can't set a non-zero slot.
329 1.5 mrg */
330 1.1 mrg
331 1.5 mrg if (aobj->u_flags & UAO_FLAG_NOSWAP) {
332 1.5 mrg if (slot == 0)
333 1.46 chs return(0);
334 1.1 mrg
335 1.5 mrg printf("uao_set_swslot: uobj = %p\n", uobj);
336 1.46 chs panic("uao_set_swslot: NOSWAP object");
337 1.5 mrg }
338 1.1 mrg
339 1.5 mrg /*
340 1.5 mrg * are we using a hash table? if so, add it in the hash.
341 1.5 mrg */
342 1.1 mrg
343 1.5 mrg if (UAO_USES_SWHASH(aobj)) {
344 1.39 chs
345 1.12 thorpej /*
346 1.12 thorpej * Avoid allocating an entry just to free it again if
347 1.12 thorpej * the page had not swap slot in the first place, and
348 1.12 thorpej * we are freeing.
349 1.12 thorpej */
350 1.39 chs
351 1.46 chs elt = uao_find_swhash_elt(aobj, pageidx, slot != 0);
352 1.12 thorpej if (elt == NULL) {
353 1.45 chs return slot ? -1 : 0;
354 1.12 thorpej }
355 1.5 mrg
356 1.5 mrg oldslot = UAO_SWHASH_ELT_PAGESLOT(elt, pageidx);
357 1.5 mrg UAO_SWHASH_ELT_PAGESLOT(elt, pageidx) = slot;
358 1.5 mrg
359 1.5 mrg /*
360 1.5 mrg * now adjust the elt's reference counter and free it if we've
361 1.5 mrg * dropped it to zero.
362 1.5 mrg */
363 1.5 mrg
364 1.5 mrg if (slot) {
365 1.5 mrg if (oldslot == 0)
366 1.5 mrg elt->count++;
367 1.45 chs } else {
368 1.45 chs if (oldslot)
369 1.5 mrg elt->count--;
370 1.5 mrg
371 1.5 mrg if (elt->count == 0) {
372 1.5 mrg LIST_REMOVE(elt, list);
373 1.12 thorpej pool_put(&uao_swhash_elt_pool, elt);
374 1.5 mrg }
375 1.5 mrg }
376 1.41 chs } else {
377 1.5 mrg /* we are using an array */
378 1.5 mrg oldslot = aobj->u_swslots[pageidx];
379 1.5 mrg aobj->u_swslots[pageidx] = slot;
380 1.5 mrg }
381 1.5 mrg return (oldslot);
382 1.1 mrg }
383 1.1 mrg
384 1.1 mrg /*
385 1.1 mrg * end of hash/array functions
386 1.1 mrg */
387 1.1 mrg
388 1.1 mrg /*
389 1.1 mrg * uao_free: free all resources held by an aobj, and then free the aobj
390 1.1 mrg *
391 1.1 mrg * => the aobj should be dead
392 1.1 mrg */
393 1.46 chs
394 1.1 mrg static void
395 1.1 mrg uao_free(aobj)
396 1.5 mrg struct uvm_aobj *aobj;
397 1.1 mrg {
398 1.46 chs int swpgonlydelta = 0;
399 1.1 mrg
400 1.27 chs simple_unlock(&aobj->u_obj.vmobjlock);
401 1.5 mrg if (UAO_USES_SWHASH(aobj)) {
402 1.5 mrg int i, hashbuckets = aobj->u_swhashmask + 1;
403 1.1 mrg
404 1.5 mrg /*
405 1.5 mrg * free the swslots from each hash bucket,
406 1.5 mrg * then the hash bucket, and finally the hash table itself.
407 1.5 mrg */
408 1.46 chs
409 1.5 mrg for (i = 0; i < hashbuckets; i++) {
410 1.5 mrg struct uao_swhash_elt *elt, *next;
411 1.5 mrg
412 1.27 chs for (elt = LIST_FIRST(&aobj->u_swhash[i]);
413 1.27 chs elt != NULL;
414 1.27 chs elt = next) {
415 1.5 mrg int j;
416 1.5 mrg
417 1.27 chs for (j = 0; j < UAO_SWHASH_CLUSTER_SIZE; j++) {
418 1.5 mrg int slot = elt->slots[j];
419 1.5 mrg
420 1.37 chs if (slot == 0) {
421 1.37 chs continue;
422 1.37 chs }
423 1.37 chs uvm_swap_free(slot, 1);
424 1.46 chs swpgonlydelta++;
425 1.5 mrg }
426 1.5 mrg
427 1.27 chs next = LIST_NEXT(elt, list);
428 1.12 thorpej pool_put(&uao_swhash_elt_pool, elt);
429 1.5 mrg }
430 1.5 mrg }
431 1.34 thorpej free(aobj->u_swhash, M_UVMAOBJ);
432 1.5 mrg } else {
433 1.5 mrg int i;
434 1.5 mrg
435 1.5 mrg /*
436 1.5 mrg * free the array
437 1.5 mrg */
438 1.5 mrg
439 1.27 chs for (i = 0; i < aobj->u_pages; i++) {
440 1.5 mrg int slot = aobj->u_swslots[i];
441 1.5 mrg
442 1.18 chs if (slot) {
443 1.5 mrg uvm_swap_free(slot, 1);
444 1.46 chs swpgonlydelta++;
445 1.18 chs }
446 1.5 mrg }
447 1.34 thorpej free(aobj->u_swslots, M_UVMAOBJ);
448 1.1 mrg }
449 1.1 mrg
450 1.5 mrg /*
451 1.5 mrg * finally free the aobj itself
452 1.5 mrg */
453 1.46 chs
454 1.12 thorpej pool_put(&uvm_aobj_pool, aobj);
455 1.46 chs
456 1.46 chs /*
457 1.46 chs * adjust the counter of pages only in swap for all
458 1.46 chs * the swap slots we've freed.
459 1.46 chs */
460 1.46 chs
461 1.48 chs if (swpgonlydelta > 0) {
462 1.48 chs simple_lock(&uvm.swap_data_lock);
463 1.48 chs KASSERT(uvmexp.swpgonly >= swpgonlydelta);
464 1.48 chs uvmexp.swpgonly -= swpgonlydelta;
465 1.48 chs simple_unlock(&uvm.swap_data_lock);
466 1.48 chs }
467 1.1 mrg }
468 1.1 mrg
469 1.1 mrg /*
470 1.1 mrg * pager functions
471 1.1 mrg */
472 1.1 mrg
473 1.1 mrg /*
474 1.1 mrg * uao_create: create an aobj of the given size and return its uvm_object.
475 1.1 mrg *
476 1.1 mrg * => for normal use, flags are always zero
477 1.1 mrg * => for the kernel object, the flags are:
478 1.1 mrg * UAO_FLAG_KERNOBJ - allocate the kernel object (can only happen once)
479 1.1 mrg * UAO_FLAG_KERNSWAP - enable swapping of kernel object (" ")
480 1.1 mrg */
481 1.46 chs
482 1.5 mrg struct uvm_object *
483 1.5 mrg uao_create(size, flags)
484 1.10 eeh vsize_t size;
485 1.5 mrg int flags;
486 1.5 mrg {
487 1.46 chs static struct uvm_aobj kernel_object_store;
488 1.46 chs static int kobj_alloced = 0;
489 1.15 chs int pages = round_page(size) >> PAGE_SHIFT;
490 1.5 mrg struct uvm_aobj *aobj;
491 1.1 mrg
492 1.5 mrg /*
493 1.27 chs * malloc a new aobj unless we are asked for the kernel object
494 1.27 chs */
495 1.5 mrg
496 1.46 chs if (flags & UAO_FLAG_KERNOBJ) {
497 1.46 chs KASSERT(!kobj_alloced);
498 1.5 mrg aobj = &kernel_object_store;
499 1.5 mrg aobj->u_pages = pages;
500 1.46 chs aobj->u_flags = UAO_FLAG_NOSWAP;
501 1.5 mrg aobj->u_obj.uo_refs = UVM_OBJ_KERN;
502 1.5 mrg kobj_alloced = UAO_FLAG_KERNOBJ;
503 1.5 mrg } else if (flags & UAO_FLAG_KERNSWAP) {
504 1.46 chs KASSERT(kobj_alloced == UAO_FLAG_KERNOBJ);
505 1.5 mrg aobj = &kernel_object_store;
506 1.5 mrg kobj_alloced = UAO_FLAG_KERNSWAP;
507 1.46 chs } else {
508 1.12 thorpej aobj = pool_get(&uvm_aobj_pool, PR_WAITOK);
509 1.5 mrg aobj->u_pages = pages;
510 1.46 chs aobj->u_flags = 0;
511 1.46 chs aobj->u_obj.uo_refs = 1;
512 1.5 mrg }
513 1.1 mrg
514 1.5 mrg /*
515 1.5 mrg * allocate hash/array if necessary
516 1.5 mrg *
517 1.5 mrg * note: in the KERNSWAP case no need to worry about locking since
518 1.5 mrg * we are still booting we should be the only thread around.
519 1.5 mrg */
520 1.46 chs
521 1.5 mrg if (flags == 0 || (flags & UAO_FLAG_KERNSWAP) != 0) {
522 1.5 mrg int mflags = (flags & UAO_FLAG_KERNSWAP) != 0 ?
523 1.5 mrg M_NOWAIT : M_WAITOK;
524 1.5 mrg
525 1.5 mrg /* allocate hash table or array depending on object size */
526 1.27 chs if (UAO_USES_SWHASH(aobj)) {
527 1.5 mrg aobj->u_swhash = hashinit(UAO_SWHASH_BUCKETS(aobj),
528 1.35 ad HASH_LIST, M_UVMAOBJ, mflags, &aobj->u_swhashmask);
529 1.5 mrg if (aobj->u_swhash == NULL)
530 1.5 mrg panic("uao_create: hashinit swhash failed");
531 1.5 mrg } else {
532 1.34 thorpej aobj->u_swslots = malloc(pages * sizeof(int),
533 1.5 mrg M_UVMAOBJ, mflags);
534 1.5 mrg if (aobj->u_swslots == NULL)
535 1.5 mrg panic("uao_create: malloc swslots failed");
536 1.9 perry memset(aobj->u_swslots, 0, pages * sizeof(int));
537 1.5 mrg }
538 1.5 mrg
539 1.5 mrg if (flags) {
540 1.5 mrg aobj->u_flags &= ~UAO_FLAG_NOSWAP; /* clear noswap */
541 1.5 mrg return(&aobj->u_obj);
542 1.5 mrg }
543 1.5 mrg }
544 1.5 mrg
545 1.5 mrg /*
546 1.5 mrg * init aobj fields
547 1.5 mrg */
548 1.46 chs
549 1.5 mrg simple_lock_init(&aobj->u_obj.vmobjlock);
550 1.5 mrg aobj->u_obj.pgops = &aobj_pager;
551 1.5 mrg TAILQ_INIT(&aobj->u_obj.memq);
552 1.5 mrg aobj->u_obj.uo_npages = 0;
553 1.1 mrg
554 1.5 mrg /*
555 1.5 mrg * now that aobj is ready, add it to the global list
556 1.5 mrg */
557 1.46 chs
558 1.5 mrg simple_lock(&uao_list_lock);
559 1.5 mrg LIST_INSERT_HEAD(&uao_list, aobj, u_list);
560 1.5 mrg simple_unlock(&uao_list_lock);
561 1.5 mrg return(&aobj->u_obj);
562 1.1 mrg }
563 1.1 mrg
564 1.1 mrg
565 1.1 mrg
566 1.1 mrg /*
567 1.1 mrg * uao_init: set up aobj pager subsystem
568 1.1 mrg *
569 1.1 mrg * => called at boot time from uvm_pager_init()
570 1.1 mrg */
571 1.46 chs
572 1.27 chs void
573 1.46 chs uao_init(void)
574 1.5 mrg {
575 1.12 thorpej static int uao_initialized;
576 1.12 thorpej
577 1.12 thorpej if (uao_initialized)
578 1.12 thorpej return;
579 1.12 thorpej uao_initialized = TRUE;
580 1.5 mrg LIST_INIT(&uao_list);
581 1.5 mrg simple_lock_init(&uao_list_lock);
582 1.12 thorpej
583 1.14 thorpej /*
584 1.14 thorpej * NOTE: Pages fror this pool must not come from a pageable
585 1.14 thorpej * kernel map!
586 1.14 thorpej */
587 1.46 chs
588 1.12 thorpej pool_init(&uao_swhash_elt_pool, sizeof(struct uao_swhash_elt),
589 1.50 thorpej 0, 0, 0, "uaoeltpl", NULL);
590 1.12 thorpej pool_init(&uvm_aobj_pool, sizeof(struct uvm_aobj), 0, 0, 0,
591 1.50 thorpej "aobjpl", &pool_allocator_nointr);
592 1.1 mrg }
593 1.1 mrg
594 1.1 mrg /*
595 1.1 mrg * uao_reference: add a ref to an aobj
596 1.1 mrg *
597 1.27 chs * => aobj must be unlocked
598 1.27 chs * => just lock it and call the locked version
599 1.1 mrg */
600 1.46 chs
601 1.5 mrg void
602 1.5 mrg uao_reference(uobj)
603 1.5 mrg struct uvm_object *uobj;
604 1.1 mrg {
605 1.27 chs simple_lock(&uobj->vmobjlock);
606 1.27 chs uao_reference_locked(uobj);
607 1.27 chs simple_unlock(&uobj->vmobjlock);
608 1.27 chs }
609 1.27 chs
610 1.27 chs /*
611 1.27 chs * uao_reference_locked: add a ref to an aobj that is already locked
612 1.27 chs *
613 1.27 chs * => aobj must be locked
614 1.27 chs * this needs to be separate from the normal routine
615 1.27 chs * since sometimes we need to add a reference to an aobj when
616 1.27 chs * it's already locked.
617 1.27 chs */
618 1.46 chs
619 1.27 chs void
620 1.27 chs uao_reference_locked(uobj)
621 1.27 chs struct uvm_object *uobj;
622 1.27 chs {
623 1.5 mrg UVMHIST_FUNC("uao_reference"); UVMHIST_CALLED(maphist);
624 1.1 mrg
625 1.5 mrg /*
626 1.5 mrg * kernel_object already has plenty of references, leave it alone.
627 1.5 mrg */
628 1.1 mrg
629 1.20 thorpej if (UVM_OBJ_IS_KERN_OBJECT(uobj))
630 1.5 mrg return;
631 1.1 mrg
632 1.46 chs uobj->uo_refs++;
633 1.41 chs UVMHIST_LOG(maphist, "<- done (uobj=0x%x, ref = %d)",
634 1.27 chs uobj, uobj->uo_refs,0,0);
635 1.1 mrg }
636 1.1 mrg
637 1.1 mrg /*
638 1.1 mrg * uao_detach: drop a reference to an aobj
639 1.1 mrg *
640 1.27 chs * => aobj must be unlocked
641 1.27 chs * => just lock it and call the locked version
642 1.1 mrg */
643 1.46 chs
644 1.5 mrg void
645 1.5 mrg uao_detach(uobj)
646 1.5 mrg struct uvm_object *uobj;
647 1.5 mrg {
648 1.27 chs simple_lock(&uobj->vmobjlock);
649 1.27 chs uao_detach_locked(uobj);
650 1.27 chs }
651 1.27 chs
652 1.27 chs /*
653 1.27 chs * uao_detach_locked: drop a reference to an aobj
654 1.27 chs *
655 1.27 chs * => aobj must be locked, and is unlocked (or freed) upon return.
656 1.27 chs * this needs to be separate from the normal routine
657 1.27 chs * since sometimes we need to detach from an aobj when
658 1.27 chs * it's already locked.
659 1.27 chs */
660 1.46 chs
661 1.27 chs void
662 1.27 chs uao_detach_locked(uobj)
663 1.27 chs struct uvm_object *uobj;
664 1.27 chs {
665 1.5 mrg struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
666 1.46 chs struct vm_page *pg;
667 1.5 mrg UVMHIST_FUNC("uao_detach"); UVMHIST_CALLED(maphist);
668 1.1 mrg
669 1.5 mrg /*
670 1.5 mrg * detaching from kernel_object is a noop.
671 1.5 mrg */
672 1.46 chs
673 1.27 chs if (UVM_OBJ_IS_KERN_OBJECT(uobj)) {
674 1.27 chs simple_unlock(&uobj->vmobjlock);
675 1.5 mrg return;
676 1.27 chs }
677 1.5 mrg
678 1.5 mrg UVMHIST_LOG(maphist," (uobj=0x%x) ref=%d", uobj,uobj->uo_refs,0,0);
679 1.46 chs uobj->uo_refs--;
680 1.46 chs if (uobj->uo_refs) {
681 1.5 mrg simple_unlock(&uobj->vmobjlock);
682 1.5 mrg UVMHIST_LOG(maphist, "<- done (rc>0)", 0,0,0,0);
683 1.5 mrg return;
684 1.5 mrg }
685 1.5 mrg
686 1.5 mrg /*
687 1.5 mrg * remove the aobj from the global list.
688 1.5 mrg */
689 1.46 chs
690 1.5 mrg simple_lock(&uao_list_lock);
691 1.5 mrg LIST_REMOVE(aobj, u_list);
692 1.5 mrg simple_unlock(&uao_list_lock);
693 1.5 mrg
694 1.5 mrg /*
695 1.46 chs * free all the pages left in the aobj. for each page,
696 1.46 chs * when the page is no longer busy (and thus after any disk i/o that
697 1.46 chs * it's involved in is complete), release any swap resources and
698 1.46 chs * free the page itself.
699 1.5 mrg */
700 1.46 chs
701 1.46 chs uvm_lock_pageq();
702 1.46 chs while ((pg = TAILQ_FIRST(&uobj->memq)) != NULL) {
703 1.46 chs pmap_page_protect(pg, VM_PROT_NONE);
704 1.5 mrg if (pg->flags & PG_BUSY) {
705 1.46 chs pg->flags |= PG_WANTED;
706 1.46 chs uvm_unlock_pageq();
707 1.46 chs UVM_UNLOCK_AND_WAIT(pg, &uobj->vmobjlock, FALSE,
708 1.46 chs "uao_det", 0);
709 1.46 chs simple_lock(&uobj->vmobjlock);
710 1.46 chs uvm_lock_pageq();
711 1.5 mrg continue;
712 1.5 mrg }
713 1.18 chs uao_dropswap(&aobj->u_obj, pg->offset >> PAGE_SHIFT);
714 1.5 mrg uvm_pagefree(pg);
715 1.5 mrg }
716 1.46 chs uvm_unlock_pageq();
717 1.1 mrg
718 1.5 mrg /*
719 1.46 chs * finally, free the aobj itself.
720 1.5 mrg */
721 1.1 mrg
722 1.5 mrg uao_free(aobj);
723 1.5 mrg }
724 1.1 mrg
725 1.1 mrg /*
726 1.46 chs * uao_put: flush pages out of a uvm object
727 1.22 thorpej *
728 1.22 thorpej * => object should be locked by caller. we may _unlock_ the object
729 1.22 thorpej * if (and only if) we need to clean a page (PGO_CLEANIT).
730 1.22 thorpej * XXXJRT Currently, however, we don't. In the case of cleaning
731 1.22 thorpej * XXXJRT a page, we simply just deactivate it. Should probably
732 1.22 thorpej * XXXJRT handle this better, in the future (although "flushing"
733 1.22 thorpej * XXXJRT anonymous memory isn't terribly important).
734 1.22 thorpej * => if PGO_CLEANIT is not set, then we will neither unlock the object
735 1.22 thorpej * or block.
736 1.22 thorpej * => if PGO_ALLPAGE is set, then all pages in the object are valid targets
737 1.22 thorpej * for flushing.
738 1.22 thorpej * => NOTE: we rely on the fact that the object's memq is a TAILQ and
739 1.22 thorpej * that new pages are inserted on the tail end of the list. thus,
740 1.22 thorpej * we can make a complete pass through the object in one go by starting
741 1.22 thorpej * at the head and working towards the tail (new pages are put in
742 1.22 thorpej * front of us).
743 1.22 thorpej * => NOTE: we are allowed to lock the page queues, so the caller
744 1.22 thorpej * must not be holding the lock on them [e.g. pagedaemon had
745 1.22 thorpej * better not call us with the queues locked]
746 1.22 thorpej * => we return TRUE unless we encountered some sort of I/O error
747 1.22 thorpej * XXXJRT currently never happens, as we never directly initiate
748 1.22 thorpej * XXXJRT I/O
749 1.22 thorpej *
750 1.22 thorpej * note on page traversal:
751 1.22 thorpej * we can traverse the pages in an object either by going down the
752 1.22 thorpej * linked list in "uobj->memq", or we can go over the address range
753 1.22 thorpej * by page doing hash table lookups for each address. depending
754 1.22 thorpej * on how many pages are in the object it may be cheaper to do one
755 1.22 thorpej * or the other. we set "by_list" to true if we are using memq.
756 1.22 thorpej * if the cost of a hash lookup was equal to the cost of the list
757 1.22 thorpej * traversal we could compare the number of pages in the start->stop
758 1.22 thorpej * range to the total number of pages in the object. however, it
759 1.22 thorpej * seems that a hash table lookup is more expensive than the linked
760 1.22 thorpej * list traversal, so we multiply the number of pages in the
761 1.22 thorpej * start->stop range by a penalty which we define below.
762 1.1 mrg */
763 1.22 thorpej
764 1.46 chs int
765 1.46 chs uao_put(uobj, start, stop, flags)
766 1.5 mrg struct uvm_object *uobj;
767 1.28 kleink voff_t start, stop;
768 1.5 mrg int flags;
769 1.5 mrg {
770 1.46 chs struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
771 1.51 enami struct vm_page *pg, *nextpg, curmp, endmp;
772 1.46 chs boolean_t by_list;
773 1.28 kleink voff_t curoff;
774 1.46 chs UVMHIST_FUNC("uao_put"); UVMHIST_CALLED(maphist);
775 1.22 thorpej
776 1.46 chs curoff = 0;
777 1.22 thorpej if (flags & PGO_ALLPAGES) {
778 1.22 thorpej start = 0;
779 1.22 thorpej stop = aobj->u_pages << PAGE_SHIFT;
780 1.22 thorpej by_list = TRUE; /* always go by the list */
781 1.22 thorpej } else {
782 1.22 thorpej start = trunc_page(start);
783 1.22 thorpej stop = round_page(stop);
784 1.22 thorpej if (stop > (aobj->u_pages << PAGE_SHIFT)) {
785 1.22 thorpej printf("uao_flush: strange, got an out of range "
786 1.22 thorpej "flush (fixed)\n");
787 1.22 thorpej stop = aobj->u_pages << PAGE_SHIFT;
788 1.22 thorpej }
789 1.22 thorpej by_list = (uobj->uo_npages <=
790 1.46 chs ((stop - start) >> PAGE_SHIFT) * UVM_PAGE_HASH_PENALTY);
791 1.22 thorpej }
792 1.22 thorpej UVMHIST_LOG(maphist,
793 1.22 thorpej " flush start=0x%lx, stop=0x%x, by_list=%d, flags=0x%x",
794 1.22 thorpej start, stop, by_list, flags);
795 1.1 mrg
796 1.5 mrg /*
797 1.22 thorpej * Don't need to do any work here if we're not freeing
798 1.22 thorpej * or deactivating pages.
799 1.22 thorpej */
800 1.46 chs
801 1.22 thorpej if ((flags & (PGO_DEACTIVATE|PGO_FREE)) == 0) {
802 1.46 chs simple_unlock(&uobj->vmobjlock);
803 1.46 chs return 0;
804 1.22 thorpej }
805 1.22 thorpej
806 1.5 mrg /*
807 1.51 enami * Initialize the marker pages. See the comment in
808 1.51 enami * genfs_putpages() also.
809 1.51 enami */
810 1.51 enami
811 1.51 enami curmp.uobject = uobj;
812 1.51 enami curmp.offset = (voff_t)-1;
813 1.51 enami curmp.flags = PG_BUSY;
814 1.51 enami endmp.uobject = uobj;
815 1.51 enami endmp.offset = (voff_t)-1;
816 1.51 enami endmp.flags = PG_BUSY;
817 1.51 enami
818 1.51 enami /*
819 1.46 chs * now do it. note: we must update nextpg in the body of loop or we
820 1.51 enami * will get stuck. we need to use nextpg if we'll traverse the list
821 1.51 enami * because we may free "pg" before doing the next loop.
822 1.21 thorpej */
823 1.22 thorpej
824 1.22 thorpej if (by_list) {
825 1.51 enami TAILQ_INSERT_TAIL(&uobj->memq, &endmp, listq);
826 1.51 enami nextpg = TAILQ_FIRST(&uobj->memq);
827 1.53 thorpej PHOLD(curlwp);
828 1.22 thorpej } else {
829 1.22 thorpej curoff = start;
830 1.52 scw nextpg = NULL; /* Quell compiler warning */
831 1.22 thorpej }
832 1.22 thorpej
833 1.46 chs uvm_lock_pageq();
834 1.22 thorpej
835 1.22 thorpej /* locked: both page queues and uobj */
836 1.51 enami for (;;) {
837 1.22 thorpej if (by_list) {
838 1.51 enami pg = nextpg;
839 1.51 enami if (pg == &endmp)
840 1.51 enami break;
841 1.46 chs nextpg = TAILQ_NEXT(pg, listq);
842 1.46 chs if (pg->offset < start || pg->offset >= stop)
843 1.22 thorpej continue;
844 1.22 thorpej } else {
845 1.51 enami if (curoff < stop) {
846 1.51 enami pg = uvm_pagelookup(uobj, curoff);
847 1.51 enami curoff += PAGE_SIZE;
848 1.51 enami } else
849 1.51 enami break;
850 1.46 chs if (pg == NULL)
851 1.22 thorpej continue;
852 1.22 thorpej }
853 1.46 chs switch (flags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE)) {
854 1.41 chs
855 1.22 thorpej /*
856 1.22 thorpej * XXX In these first 3 cases, we always just
857 1.22 thorpej * XXX deactivate the page. We may want to
858 1.22 thorpej * XXX handle the different cases more specifically
859 1.22 thorpej * XXX in the future.
860 1.22 thorpej */
861 1.46 chs
862 1.22 thorpej case PGO_CLEANIT|PGO_FREE:
863 1.22 thorpej case PGO_CLEANIT|PGO_DEACTIVATE:
864 1.22 thorpej case PGO_DEACTIVATE:
865 1.25 thorpej deactivate_it:
866 1.22 thorpej /* skip the page if it's loaned or wired */
867 1.46 chs if (pg->loan_count != 0 || pg->wire_count != 0)
868 1.22 thorpej continue;
869 1.22 thorpej
870 1.22 thorpej /* ...and deactivate the page. */
871 1.46 chs pmap_clear_reference(pg);
872 1.46 chs uvm_pagedeactivate(pg);
873 1.22 thorpej continue;
874 1.22 thorpej
875 1.22 thorpej case PGO_FREE:
876 1.46 chs
877 1.25 thorpej /*
878 1.25 thorpej * If there are multiple references to
879 1.25 thorpej * the object, just deactivate the page.
880 1.25 thorpej */
881 1.46 chs
882 1.25 thorpej if (uobj->uo_refs > 1)
883 1.25 thorpej goto deactivate_it;
884 1.25 thorpej
885 1.22 thorpej /* XXX skip the page if it's loaned or wired */
886 1.46 chs if (pg->loan_count != 0 || pg->wire_count != 0)
887 1.22 thorpej continue;
888 1.22 thorpej
889 1.22 thorpej /*
890 1.51 enami * wait and try again if the page is busy.
891 1.51 enami * otherwise free the swap slot and the page.
892 1.22 thorpej */
893 1.46 chs
894 1.46 chs pmap_page_protect(pg, VM_PROT_NONE);
895 1.51 enami if (pg->flags & PG_BUSY) {
896 1.51 enami if (by_list) {
897 1.51 enami TAILQ_INSERT_BEFORE(pg, &curmp, listq);
898 1.51 enami }
899 1.46 chs pg->flags |= PG_WANTED;
900 1.46 chs uvm_unlock_pageq();
901 1.46 chs UVM_UNLOCK_AND_WAIT(pg, &uobj->vmobjlock, 0,
902 1.46 chs "uao_put", 0);
903 1.46 chs simple_lock(&uobj->vmobjlock);
904 1.46 chs uvm_lock_pageq();
905 1.51 enami if (by_list) {
906 1.51 enami nextpg = TAILQ_NEXT(&curmp, listq);
907 1.51 enami TAILQ_REMOVE(&uobj->memq, &curmp,
908 1.51 enami listq);
909 1.51 enami } else
910 1.51 enami curoff -= PAGE_SIZE;
911 1.51 enami continue;
912 1.22 thorpej }
913 1.46 chs uao_dropswap(uobj, pg->offset >> PAGE_SHIFT);
914 1.46 chs uvm_pagefree(pg);
915 1.22 thorpej continue;
916 1.22 thorpej }
917 1.22 thorpej }
918 1.22 thorpej uvm_unlock_pageq();
919 1.51 enami if (by_list) {
920 1.51 enami TAILQ_REMOVE(&uobj->memq, &endmp, listq);
921 1.53 thorpej PRELE(curlwp);
922 1.51 enami }
923 1.55 pk simple_unlock(&uobj->vmobjlock);
924 1.46 chs return 0;
925 1.1 mrg }
926 1.1 mrg
927 1.1 mrg /*
928 1.1 mrg * uao_get: fetch me a page
929 1.1 mrg *
930 1.1 mrg * we have three cases:
931 1.1 mrg * 1: page is resident -> just return the page.
932 1.1 mrg * 2: page is zero-fill -> allocate a new page and zero it.
933 1.1 mrg * 3: page is swapped out -> fetch the page from swap.
934 1.1 mrg *
935 1.1 mrg * cases 1 and 2 can be handled with PGO_LOCKED, case 3 cannot.
936 1.1 mrg * so, if the "center" page hits case 3 (or any page, with PGO_ALLPAGES),
937 1.40 chs * then we will need to return EBUSY.
938 1.1 mrg *
939 1.1 mrg * => prefer map unlocked (not required)
940 1.1 mrg * => object must be locked! we will _unlock_ it before starting any I/O.
941 1.1 mrg * => flags: PGO_ALLPAGES: get all of the pages
942 1.1 mrg * PGO_LOCKED: fault data structures are locked
943 1.1 mrg * => NOTE: offset is the offset of pps[0], _NOT_ pps[centeridx]
944 1.1 mrg * => NOTE: caller must check for released pages!!
945 1.1 mrg */
946 1.46 chs
947 1.5 mrg static int
948 1.5 mrg uao_get(uobj, offset, pps, npagesp, centeridx, access_type, advice, flags)
949 1.5 mrg struct uvm_object *uobj;
950 1.28 kleink voff_t offset;
951 1.5 mrg struct vm_page **pps;
952 1.5 mrg int *npagesp;
953 1.5 mrg int centeridx, advice, flags;
954 1.5 mrg vm_prot_t access_type;
955 1.5 mrg {
956 1.5 mrg struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
957 1.28 kleink voff_t current_offset;
958 1.52 scw struct vm_page *ptmp = NULL; /* Quell compiler warning */
959 1.46 chs int lcv, gotpages, maxpages, swslot, error, pageidx;
960 1.5 mrg boolean_t done;
961 1.5 mrg UVMHIST_FUNC("uao_get"); UVMHIST_CALLED(pdhist);
962 1.5 mrg
963 1.27 chs UVMHIST_LOG(pdhist, "aobj=%p offset=%d, flags=%d",
964 1.27 chs aobj, offset, flags,0);
965 1.37 chs
966 1.5 mrg /*
967 1.5 mrg * get number of pages
968 1.5 mrg */
969 1.46 chs
970 1.5 mrg maxpages = *npagesp;
971 1.5 mrg
972 1.5 mrg /*
973 1.5 mrg * step 1: handled the case where fault data structures are locked.
974 1.5 mrg */
975 1.1 mrg
976 1.5 mrg if (flags & PGO_LOCKED) {
977 1.46 chs
978 1.5 mrg /*
979 1.5 mrg * step 1a: get pages that are already resident. only do
980 1.5 mrg * this if the data structures are locked (i.e. the first
981 1.5 mrg * time through).
982 1.5 mrg */
983 1.5 mrg
984 1.5 mrg done = TRUE; /* be optimistic */
985 1.5 mrg gotpages = 0; /* # of pages we got so far */
986 1.5 mrg for (lcv = 0, current_offset = offset ; lcv < maxpages ;
987 1.5 mrg lcv++, current_offset += PAGE_SIZE) {
988 1.5 mrg /* do we care about this page? if not, skip it */
989 1.5 mrg if (pps[lcv] == PGO_DONTCARE)
990 1.5 mrg continue;
991 1.5 mrg ptmp = uvm_pagelookup(uobj, current_offset);
992 1.5 mrg
993 1.5 mrg /*
994 1.30 thorpej * if page is new, attempt to allocate the page,
995 1.30 thorpej * zero-fill'd.
996 1.5 mrg */
997 1.46 chs
998 1.46 chs if (ptmp == NULL && uao_find_swslot(&aobj->u_obj,
999 1.15 chs current_offset >> PAGE_SHIFT) == 0) {
1000 1.5 mrg ptmp = uvm_pagealloc(uobj, current_offset,
1001 1.30 thorpej NULL, UVM_PGA_ZERO);
1002 1.5 mrg if (ptmp) {
1003 1.5 mrg /* new page */
1004 1.47 chs ptmp->flags &= ~(PG_FAKE);
1005 1.5 mrg ptmp->pqflags |= PQ_AOBJ;
1006 1.47 chs goto gotpage;
1007 1.5 mrg }
1008 1.5 mrg }
1009 1.5 mrg
1010 1.5 mrg /*
1011 1.46 chs * to be useful must get a non-busy page
1012 1.5 mrg */
1013 1.46 chs
1014 1.46 chs if (ptmp == NULL || (ptmp->flags & PG_BUSY) != 0) {
1015 1.5 mrg if (lcv == centeridx ||
1016 1.5 mrg (flags & PGO_ALLPAGES) != 0)
1017 1.5 mrg /* need to do a wait or I/O! */
1018 1.41 chs done = FALSE;
1019 1.5 mrg continue;
1020 1.5 mrg }
1021 1.5 mrg
1022 1.5 mrg /*
1023 1.5 mrg * useful page: busy/lock it and plug it in our
1024 1.5 mrg * result array
1025 1.5 mrg */
1026 1.46 chs
1027 1.5 mrg /* caller must un-busy this page */
1028 1.41 chs ptmp->flags |= PG_BUSY;
1029 1.5 mrg UVM_PAGE_OWN(ptmp, "uao_get1");
1030 1.47 chs gotpage:
1031 1.5 mrg pps[lcv] = ptmp;
1032 1.5 mrg gotpages++;
1033 1.46 chs }
1034 1.5 mrg
1035 1.5 mrg /*
1036 1.5 mrg * step 1b: now we've either done everything needed or we
1037 1.5 mrg * to unlock and do some waiting or I/O.
1038 1.5 mrg */
1039 1.5 mrg
1040 1.5 mrg UVMHIST_LOG(pdhist, "<- done (done=%d)", done, 0,0,0);
1041 1.5 mrg *npagesp = gotpages;
1042 1.5 mrg if (done)
1043 1.46 chs return 0;
1044 1.5 mrg else
1045 1.46 chs return EBUSY;
1046 1.1 mrg }
1047 1.1 mrg
1048 1.5 mrg /*
1049 1.5 mrg * step 2: get non-resident or busy pages.
1050 1.5 mrg * object is locked. data structures are unlocked.
1051 1.5 mrg */
1052 1.5 mrg
1053 1.5 mrg for (lcv = 0, current_offset = offset ; lcv < maxpages ;
1054 1.5 mrg lcv++, current_offset += PAGE_SIZE) {
1055 1.27 chs
1056 1.5 mrg /*
1057 1.5 mrg * - skip over pages we've already gotten or don't want
1058 1.5 mrg * - skip over pages we don't _have_ to get
1059 1.5 mrg */
1060 1.27 chs
1061 1.5 mrg if (pps[lcv] != NULL ||
1062 1.5 mrg (lcv != centeridx && (flags & PGO_ALLPAGES) == 0))
1063 1.5 mrg continue;
1064 1.5 mrg
1065 1.27 chs pageidx = current_offset >> PAGE_SHIFT;
1066 1.27 chs
1067 1.5 mrg /*
1068 1.5 mrg * we have yet to locate the current page (pps[lcv]). we
1069 1.5 mrg * first look for a page that is already at the current offset.
1070 1.5 mrg * if we find a page, we check to see if it is busy or
1071 1.5 mrg * released. if that is the case, then we sleep on the page
1072 1.5 mrg * until it is no longer busy or released and repeat the lookup.
1073 1.5 mrg * if the page we found is neither busy nor released, then we
1074 1.5 mrg * busy it (so we own it) and plug it into pps[lcv]. this
1075 1.5 mrg * 'break's the following while loop and indicates we are
1076 1.5 mrg * ready to move on to the next page in the "lcv" loop above.
1077 1.5 mrg *
1078 1.5 mrg * if we exit the while loop with pps[lcv] still set to NULL,
1079 1.5 mrg * then it means that we allocated a new busy/fake/clean page
1080 1.5 mrg * ptmp in the object and we need to do I/O to fill in the data.
1081 1.5 mrg */
1082 1.5 mrg
1083 1.5 mrg /* top of "pps" while loop */
1084 1.5 mrg while (pps[lcv] == NULL) {
1085 1.5 mrg /* look for a resident page */
1086 1.5 mrg ptmp = uvm_pagelookup(uobj, current_offset);
1087 1.5 mrg
1088 1.5 mrg /* not resident? allocate one now (if we can) */
1089 1.5 mrg if (ptmp == NULL) {
1090 1.5 mrg
1091 1.5 mrg ptmp = uvm_pagealloc(uobj, current_offset,
1092 1.19 chs NULL, 0);
1093 1.5 mrg
1094 1.5 mrg /* out of RAM? */
1095 1.5 mrg if (ptmp == NULL) {
1096 1.5 mrg simple_unlock(&uobj->vmobjlock);
1097 1.5 mrg UVMHIST_LOG(pdhist,
1098 1.5 mrg "sleeping, ptmp == NULL\n",0,0,0,0);
1099 1.5 mrg uvm_wait("uao_getpage");
1100 1.5 mrg simple_lock(&uobj->vmobjlock);
1101 1.41 chs continue;
1102 1.5 mrg }
1103 1.5 mrg
1104 1.5 mrg /*
1105 1.5 mrg * safe with PQ's unlocked: because we just
1106 1.5 mrg * alloc'd the page
1107 1.5 mrg */
1108 1.46 chs
1109 1.5 mrg ptmp->pqflags |= PQ_AOBJ;
1110 1.5 mrg
1111 1.41 chs /*
1112 1.5 mrg * got new page ready for I/O. break pps while
1113 1.5 mrg * loop. pps[lcv] is still NULL.
1114 1.5 mrg */
1115 1.46 chs
1116 1.5 mrg break;
1117 1.5 mrg }
1118 1.5 mrg
1119 1.5 mrg /* page is there, see if we need to wait on it */
1120 1.46 chs if ((ptmp->flags & PG_BUSY) != 0) {
1121 1.5 mrg ptmp->flags |= PG_WANTED;
1122 1.5 mrg UVMHIST_LOG(pdhist,
1123 1.5 mrg "sleeping, ptmp->flags 0x%x\n",
1124 1.5 mrg ptmp->flags,0,0,0);
1125 1.23 thorpej UVM_UNLOCK_AND_WAIT(ptmp, &uobj->vmobjlock,
1126 1.23 thorpej FALSE, "uao_get", 0);
1127 1.5 mrg simple_lock(&uobj->vmobjlock);
1128 1.46 chs continue;
1129 1.5 mrg }
1130 1.41 chs
1131 1.41 chs /*
1132 1.5 mrg * if we get here then the page has become resident and
1133 1.5 mrg * unbusy between steps 1 and 2. we busy it now (so we
1134 1.5 mrg * own it) and set pps[lcv] (so that we exit the while
1135 1.5 mrg * loop).
1136 1.5 mrg */
1137 1.46 chs
1138 1.5 mrg /* we own it, caller must un-busy */
1139 1.5 mrg ptmp->flags |= PG_BUSY;
1140 1.5 mrg UVM_PAGE_OWN(ptmp, "uao_get2");
1141 1.5 mrg pps[lcv] = ptmp;
1142 1.5 mrg }
1143 1.5 mrg
1144 1.5 mrg /*
1145 1.5 mrg * if we own the valid page at the correct offset, pps[lcv] will
1146 1.5 mrg * point to it. nothing more to do except go to the next page.
1147 1.5 mrg */
1148 1.46 chs
1149 1.5 mrg if (pps[lcv])
1150 1.5 mrg continue; /* next lcv */
1151 1.5 mrg
1152 1.5 mrg /*
1153 1.41 chs * we have a "fake/busy/clean" page that we just allocated.
1154 1.5 mrg * do the needed "i/o", either reading from swap or zeroing.
1155 1.5 mrg */
1156 1.46 chs
1157 1.46 chs swslot = uao_find_swslot(&aobj->u_obj, pageidx);
1158 1.5 mrg
1159 1.5 mrg /*
1160 1.5 mrg * just zero the page if there's nothing in swap.
1161 1.5 mrg */
1162 1.46 chs
1163 1.46 chs if (swslot == 0) {
1164 1.46 chs
1165 1.5 mrg /*
1166 1.5 mrg * page hasn't existed before, just zero it.
1167 1.5 mrg */
1168 1.46 chs
1169 1.5 mrg uvm_pagezero(ptmp);
1170 1.27 chs } else {
1171 1.5 mrg UVMHIST_LOG(pdhist, "pagein from swslot %d",
1172 1.5 mrg swslot, 0,0,0);
1173 1.5 mrg
1174 1.5 mrg /*
1175 1.5 mrg * page in the swapped-out page.
1176 1.5 mrg * unlock object for i/o, relock when done.
1177 1.5 mrg */
1178 1.46 chs
1179 1.5 mrg simple_unlock(&uobj->vmobjlock);
1180 1.46 chs error = uvm_swap_get(ptmp, swslot, PGO_SYNCIO);
1181 1.5 mrg simple_lock(&uobj->vmobjlock);
1182 1.5 mrg
1183 1.5 mrg /*
1184 1.5 mrg * I/O done. check for errors.
1185 1.5 mrg */
1186 1.46 chs
1187 1.46 chs if (error != 0) {
1188 1.5 mrg UVMHIST_LOG(pdhist, "<- done (error=%d)",
1189 1.46 chs error,0,0,0);
1190 1.5 mrg if (ptmp->flags & PG_WANTED)
1191 1.24 thorpej wakeup(ptmp);
1192 1.27 chs
1193 1.27 chs /*
1194 1.27 chs * remove the swap slot from the aobj
1195 1.27 chs * and mark the aobj as having no real slot.
1196 1.27 chs * don't free the swap slot, thus preventing
1197 1.27 chs * it from being used again.
1198 1.27 chs */
1199 1.46 chs
1200 1.27 chs swslot = uao_set_swslot(&aobj->u_obj, pageidx,
1201 1.27 chs SWSLOT_BAD);
1202 1.45 chs if (swslot != -1) {
1203 1.45 chs uvm_swap_markbad(swslot, 1);
1204 1.45 chs }
1205 1.27 chs
1206 1.5 mrg uvm_lock_pageq();
1207 1.5 mrg uvm_pagefree(ptmp);
1208 1.5 mrg uvm_unlock_pageq();
1209 1.5 mrg simple_unlock(&uobj->vmobjlock);
1210 1.46 chs return error;
1211 1.5 mrg }
1212 1.5 mrg }
1213 1.5 mrg
1214 1.41 chs /*
1215 1.5 mrg * we got the page! clear the fake flag (indicates valid
1216 1.5 mrg * data now in page) and plug into our result array. note
1217 1.41 chs * that page is still busy.
1218 1.5 mrg *
1219 1.5 mrg * it is the callers job to:
1220 1.5 mrg * => check if the page is released
1221 1.5 mrg * => unbusy the page
1222 1.5 mrg * => activate the page
1223 1.5 mrg */
1224 1.5 mrg
1225 1.46 chs ptmp->flags &= ~PG_FAKE;
1226 1.5 mrg pps[lcv] = ptmp;
1227 1.46 chs }
1228 1.1 mrg
1229 1.1 mrg /*
1230 1.5 mrg * finally, unlock object and return.
1231 1.5 mrg */
1232 1.1 mrg
1233 1.1 mrg simple_unlock(&uobj->vmobjlock);
1234 1.5 mrg UVMHIST_LOG(pdhist, "<- done (OK)",0,0,0,0);
1235 1.46 chs return 0;
1236 1.1 mrg }
1237 1.1 mrg
1238 1.1 mrg /*
1239 1.18 chs * uao_dropswap: release any swap resources from this aobj page.
1240 1.41 chs *
1241 1.18 chs * => aobj must be locked or have a reference count of 0.
1242 1.18 chs */
1243 1.18 chs
1244 1.18 chs void
1245 1.18 chs uao_dropswap(uobj, pageidx)
1246 1.18 chs struct uvm_object *uobj;
1247 1.18 chs int pageidx;
1248 1.18 chs {
1249 1.18 chs int slot;
1250 1.18 chs
1251 1.18 chs slot = uao_set_swslot(uobj, pageidx, 0);
1252 1.18 chs if (slot) {
1253 1.18 chs uvm_swap_free(slot, 1);
1254 1.18 chs }
1255 1.27 chs }
1256 1.27 chs
1257 1.27 chs /*
1258 1.27 chs * page in every page in every aobj that is paged-out to a range of swslots.
1259 1.41 chs *
1260 1.27 chs * => nothing should be locked.
1261 1.27 chs * => returns TRUE if pagein was aborted due to lack of memory.
1262 1.27 chs */
1263 1.46 chs
1264 1.27 chs boolean_t
1265 1.27 chs uao_swap_off(startslot, endslot)
1266 1.27 chs int startslot, endslot;
1267 1.27 chs {
1268 1.27 chs struct uvm_aobj *aobj, *nextaobj;
1269 1.46 chs boolean_t rv;
1270 1.27 chs
1271 1.27 chs /*
1272 1.27 chs * walk the list of all aobjs.
1273 1.27 chs */
1274 1.27 chs
1275 1.27 chs restart:
1276 1.27 chs simple_lock(&uao_list_lock);
1277 1.27 chs for (aobj = LIST_FIRST(&uao_list);
1278 1.27 chs aobj != NULL;
1279 1.27 chs aobj = nextaobj) {
1280 1.27 chs
1281 1.27 chs /*
1282 1.46 chs * try to get the object lock, start all over if we fail.
1283 1.27 chs * most of the time we'll get the aobj lock,
1284 1.27 chs * so this should be a rare case.
1285 1.27 chs */
1286 1.46 chs
1287 1.27 chs if (!simple_lock_try(&aobj->u_obj.vmobjlock)) {
1288 1.27 chs simple_unlock(&uao_list_lock);
1289 1.27 chs goto restart;
1290 1.27 chs }
1291 1.27 chs
1292 1.27 chs /*
1293 1.27 chs * add a ref to the aobj so it doesn't disappear
1294 1.27 chs * while we're working.
1295 1.27 chs */
1296 1.46 chs
1297 1.27 chs uao_reference_locked(&aobj->u_obj);
1298 1.27 chs
1299 1.27 chs /*
1300 1.27 chs * now it's safe to unlock the uao list.
1301 1.27 chs */
1302 1.46 chs
1303 1.27 chs simple_unlock(&uao_list_lock);
1304 1.27 chs
1305 1.27 chs /*
1306 1.27 chs * page in any pages in the swslot range.
1307 1.27 chs * if there's an error, abort and return the error.
1308 1.27 chs */
1309 1.46 chs
1310 1.27 chs rv = uao_pagein(aobj, startslot, endslot);
1311 1.27 chs if (rv) {
1312 1.27 chs uao_detach_locked(&aobj->u_obj);
1313 1.27 chs return rv;
1314 1.27 chs }
1315 1.27 chs
1316 1.27 chs /*
1317 1.27 chs * we're done with this aobj.
1318 1.27 chs * relock the list and drop our ref on the aobj.
1319 1.27 chs */
1320 1.46 chs
1321 1.27 chs simple_lock(&uao_list_lock);
1322 1.27 chs nextaobj = LIST_NEXT(aobj, u_list);
1323 1.27 chs uao_detach_locked(&aobj->u_obj);
1324 1.27 chs }
1325 1.27 chs
1326 1.27 chs /*
1327 1.27 chs * done with traversal, unlock the list
1328 1.27 chs */
1329 1.27 chs simple_unlock(&uao_list_lock);
1330 1.27 chs return FALSE;
1331 1.27 chs }
1332 1.27 chs
1333 1.27 chs
1334 1.27 chs /*
1335 1.27 chs * page in any pages from aobj in the given range.
1336 1.27 chs *
1337 1.27 chs * => aobj must be locked and is returned locked.
1338 1.27 chs * => returns TRUE if pagein was aborted due to lack of memory.
1339 1.27 chs */
1340 1.27 chs static boolean_t
1341 1.27 chs uao_pagein(aobj, startslot, endslot)
1342 1.27 chs struct uvm_aobj *aobj;
1343 1.27 chs int startslot, endslot;
1344 1.27 chs {
1345 1.27 chs boolean_t rv;
1346 1.27 chs
1347 1.27 chs if (UAO_USES_SWHASH(aobj)) {
1348 1.27 chs struct uao_swhash_elt *elt;
1349 1.27 chs int bucket;
1350 1.27 chs
1351 1.27 chs restart:
1352 1.27 chs for (bucket = aobj->u_swhashmask; bucket >= 0; bucket--) {
1353 1.27 chs for (elt = LIST_FIRST(&aobj->u_swhash[bucket]);
1354 1.27 chs elt != NULL;
1355 1.27 chs elt = LIST_NEXT(elt, list)) {
1356 1.27 chs int i;
1357 1.27 chs
1358 1.27 chs for (i = 0; i < UAO_SWHASH_CLUSTER_SIZE; i++) {
1359 1.27 chs int slot = elt->slots[i];
1360 1.27 chs
1361 1.27 chs /*
1362 1.27 chs * if the slot isn't in range, skip it.
1363 1.27 chs */
1364 1.46 chs
1365 1.41 chs if (slot < startslot ||
1366 1.27 chs slot >= endslot) {
1367 1.27 chs continue;
1368 1.27 chs }
1369 1.27 chs
1370 1.27 chs /*
1371 1.27 chs * process the page,
1372 1.27 chs * the start over on this object
1373 1.27 chs * since the swhash elt
1374 1.27 chs * may have been freed.
1375 1.27 chs */
1376 1.46 chs
1377 1.27 chs rv = uao_pagein_page(aobj,
1378 1.27 chs UAO_SWHASH_ELT_PAGEIDX_BASE(elt) + i);
1379 1.27 chs if (rv) {
1380 1.27 chs return rv;
1381 1.27 chs }
1382 1.27 chs goto restart;
1383 1.27 chs }
1384 1.27 chs }
1385 1.27 chs }
1386 1.27 chs } else {
1387 1.27 chs int i;
1388 1.27 chs
1389 1.27 chs for (i = 0; i < aobj->u_pages; i++) {
1390 1.27 chs int slot = aobj->u_swslots[i];
1391 1.27 chs
1392 1.27 chs /*
1393 1.27 chs * if the slot isn't in range, skip it
1394 1.27 chs */
1395 1.46 chs
1396 1.27 chs if (slot < startslot || slot >= endslot) {
1397 1.27 chs continue;
1398 1.27 chs }
1399 1.27 chs
1400 1.27 chs /*
1401 1.27 chs * process the page.
1402 1.27 chs */
1403 1.46 chs
1404 1.27 chs rv = uao_pagein_page(aobj, i);
1405 1.27 chs if (rv) {
1406 1.27 chs return rv;
1407 1.27 chs }
1408 1.27 chs }
1409 1.27 chs }
1410 1.27 chs
1411 1.27 chs return FALSE;
1412 1.27 chs }
1413 1.27 chs
1414 1.27 chs /*
1415 1.27 chs * page in a page from an aobj. used for swap_off.
1416 1.27 chs * returns TRUE if pagein was aborted due to lack of memory.
1417 1.27 chs *
1418 1.27 chs * => aobj must be locked and is returned locked.
1419 1.27 chs */
1420 1.46 chs
1421 1.27 chs static boolean_t
1422 1.27 chs uao_pagein_page(aobj, pageidx)
1423 1.27 chs struct uvm_aobj *aobj;
1424 1.27 chs int pageidx;
1425 1.27 chs {
1426 1.27 chs struct vm_page *pg;
1427 1.27 chs int rv, slot, npages;
1428 1.27 chs
1429 1.27 chs pg = NULL;
1430 1.27 chs npages = 1;
1431 1.27 chs /* locked: aobj */
1432 1.27 chs rv = uao_get(&aobj->u_obj, pageidx << PAGE_SHIFT,
1433 1.27 chs &pg, &npages, 0, VM_PROT_READ|VM_PROT_WRITE, 0, 0);
1434 1.27 chs /* unlocked: aobj */
1435 1.27 chs
1436 1.27 chs /*
1437 1.27 chs * relock and finish up.
1438 1.27 chs */
1439 1.46 chs
1440 1.27 chs simple_lock(&aobj->u_obj.vmobjlock);
1441 1.27 chs switch (rv) {
1442 1.40 chs case 0:
1443 1.27 chs break;
1444 1.27 chs
1445 1.40 chs case EIO:
1446 1.40 chs case ERESTART:
1447 1.46 chs
1448 1.27 chs /*
1449 1.27 chs * nothing more to do on errors.
1450 1.40 chs * ERESTART can only mean that the anon was freed,
1451 1.27 chs * so again there's nothing to do.
1452 1.27 chs */
1453 1.46 chs
1454 1.27 chs return FALSE;
1455 1.27 chs }
1456 1.27 chs
1457 1.27 chs /*
1458 1.27 chs * ok, we've got the page now.
1459 1.27 chs * mark it as dirty, clear its swslot and un-busy it.
1460 1.27 chs */
1461 1.46 chs
1462 1.27 chs slot = uao_set_swslot(&aobj->u_obj, pageidx, 0);
1463 1.27 chs uvm_swap_free(slot, 1);
1464 1.27 chs
1465 1.27 chs /*
1466 1.46 chs * deactivate the page (to make sure it's on a page queue).
1467 1.27 chs */
1468 1.46 chs
1469 1.27 chs uvm_lock_pageq();
1470 1.27 chs uvm_pagedeactivate(pg);
1471 1.27 chs uvm_unlock_pageq();
1472 1.56 yamt
1473 1.56 yamt pg->flags &= ~(PG_BUSY|PG_CLEAN|PG_FAKE);
1474 1.56 yamt UVM_PAGE_OWN(pg, NULL);
1475 1.56 yamt
1476 1.27 chs return FALSE;
1477 1.1 mrg }
1478