uvm_swap.c revision 1.46.2.12 1 /* $NetBSD: uvm_swap.c,v 1.46.2.12 2002/08/01 02:47:09 nathanw Exp $ */
2
3 /*
4 * Copyright (c) 1995, 1996, 1997 Matthew R. Green
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
25 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 * from: NetBSD: vm_swap.c,v 1.52 1997/12/02 13:47:37 pk Exp
31 * from: Id: uvm_swap.c,v 1.1.2.42 1998/02/02 20:38:06 chuck Exp
32 */
33
34 #include <sys/cdefs.h>
35 __KERNEL_RCSID(0, "$NetBSD: uvm_swap.c,v 1.46.2.12 2002/08/01 02:47:09 nathanw Exp $");
36
37 #include "fs_nfs.h"
38 #include "opt_uvmhist.h"
39 #include "opt_compat_netbsd.h"
40 #include "opt_ddb.h"
41
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/buf.h>
45 #include <sys/conf.h>
46 #include <sys/proc.h>
47 #include <sys/namei.h>
48 #include <sys/disklabel.h>
49 #include <sys/errno.h>
50 #include <sys/kernel.h>
51 #include <sys/malloc.h>
52 #include <sys/vnode.h>
53 #include <sys/file.h>
54 #include <sys/extent.h>
55 #include <sys/mount.h>
56 #include <sys/pool.h>
57 #include <sys/sa.h>
58 #include <sys/syscallargs.h>
59 #include <sys/swap.h>
60
61 #include <uvm/uvm.h>
62
63 #include <miscfs/specfs/specdev.h>
64
65 /*
66 * uvm_swap.c: manage configuration and i/o to swap space.
67 */
68
69 /*
70 * swap space is managed in the following way:
71 *
72 * each swap partition or file is described by a "swapdev" structure.
73 * each "swapdev" structure contains a "swapent" structure which contains
74 * information that is passed up to the user (via system calls).
75 *
76 * each swap partition is assigned a "priority" (int) which controls
77 * swap parition usage.
78 *
79 * the system maintains a global data structure describing all swap
80 * partitions/files. there is a sorted LIST of "swappri" structures
81 * which describe "swapdev"'s at that priority. this LIST is headed
82 * by the "swap_priority" global var. each "swappri" contains a
83 * CIRCLEQ of "swapdev" structures at that priority.
84 *
85 * locking:
86 * - swap_syscall_lock (sleep lock): this lock serializes the swapctl
87 * system call and prevents the swap priority list from changing
88 * while we are in the middle of a system call (e.g. SWAP_STATS).
89 * - uvm.swap_data_lock (simple_lock): this lock protects all swap data
90 * structures including the priority list, the swapdev structures,
91 * and the swapmap extent.
92 *
93 * each swap device has the following info:
94 * - swap device in use (could be disabled, preventing future use)
95 * - swap enabled (allows new allocations on swap)
96 * - map info in /dev/drum
97 * - vnode pointer
98 * for swap files only:
99 * - block size
100 * - max byte count in buffer
101 * - buffer
102 *
103 * userland controls and configures swap with the swapctl(2) system call.
104 * the sys_swapctl performs the following operations:
105 * [1] SWAP_NSWAP: returns the number of swap devices currently configured
106 * [2] SWAP_STATS: given a pointer to an array of swapent structures
107 * (passed in via "arg") of a size passed in via "misc" ... we load
108 * the current swap config into the array. The actual work is done
109 * in the uvm_swap_stats(9) function.
110 * [3] SWAP_ON: given a pathname in arg (could be device or file) and a
111 * priority in "misc", start swapping on it.
112 * [4] SWAP_OFF: as SWAP_ON, but stops swapping to a device
113 * [5] SWAP_CTL: changes the priority of a swap device (new priority in
114 * "misc")
115 */
116
117 /*
118 * swapdev: describes a single swap partition/file
119 *
120 * note the following should be true:
121 * swd_inuse <= swd_nblks [number of blocks in use is <= total blocks]
122 * swd_nblks <= swd_mapsize [because mapsize includes miniroot+disklabel]
123 */
124 struct swapdev {
125 struct oswapent swd_ose;
126 #define swd_dev swd_ose.ose_dev /* device id */
127 #define swd_flags swd_ose.ose_flags /* flags:inuse/enable/fake */
128 #define swd_priority swd_ose.ose_priority /* our priority */
129 /* also: swd_ose.ose_nblks, swd_ose.ose_inuse */
130 char *swd_path; /* saved pathname of device */
131 int swd_pathlen; /* length of pathname */
132 int swd_npages; /* #pages we can use */
133 int swd_npginuse; /* #pages in use */
134 int swd_npgbad; /* #pages bad */
135 int swd_drumoffset; /* page0 offset in drum */
136 int swd_drumsize; /* #pages in drum */
137 struct extent *swd_ex; /* extent for this swapdev */
138 char swd_exname[12]; /* name of extent above */
139 struct vnode *swd_vp; /* backing vnode */
140 CIRCLEQ_ENTRY(swapdev) swd_next; /* priority circleq */
141
142 int swd_bsize; /* blocksize (bytes) */
143 int swd_maxactive; /* max active i/o reqs */
144 struct bufq_state swd_tab; /* buffer list */
145 int swd_active; /* number of active buffers */
146 };
147
148 /*
149 * swap device priority entry; the list is kept sorted on `spi_priority'.
150 */
151 struct swappri {
152 int spi_priority; /* priority */
153 CIRCLEQ_HEAD(spi_swapdev, swapdev) spi_swapdev;
154 /* circleq of swapdevs at this priority */
155 LIST_ENTRY(swappri) spi_swappri; /* global list of pri's */
156 };
157
158 /*
159 * The following two structures are used to keep track of data transfers
160 * on swap devices associated with regular files.
161 * NOTE: this code is more or less a copy of vnd.c; we use the same
162 * structure names here to ease porting..
163 */
164 struct vndxfer {
165 struct buf *vx_bp; /* Pointer to parent buffer */
166 struct swapdev *vx_sdp;
167 int vx_error;
168 int vx_pending; /* # of pending aux buffers */
169 int vx_flags;
170 #define VX_BUSY 1
171 #define VX_DEAD 2
172 };
173
174 struct vndbuf {
175 struct buf vb_buf;
176 struct vndxfer *vb_xfer;
177 };
178
179
180 /*
181 * We keep a of pool vndbuf's and vndxfer structures.
182 */
183 static struct pool vndxfer_pool;
184 static struct pool vndbuf_pool;
185
186 #define getvndxfer(vnx) do { \
187 int s = splbio(); \
188 vnx = pool_get(&vndxfer_pool, PR_WAITOK); \
189 splx(s); \
190 } while (0)
191
192 #define putvndxfer(vnx) { \
193 pool_put(&vndxfer_pool, (void *)(vnx)); \
194 }
195
196 #define getvndbuf(vbp) do { \
197 int s = splbio(); \
198 vbp = pool_get(&vndbuf_pool, PR_WAITOK); \
199 splx(s); \
200 } while (0)
201
202 #define putvndbuf(vbp) { \
203 pool_put(&vndbuf_pool, (void *)(vbp)); \
204 }
205
206 /* /dev/drum */
207 bdev_decl(sw);
208 cdev_decl(sw);
209
210 /*
211 * local variables
212 */
213 static struct extent *swapmap; /* controls the mapping of /dev/drum */
214
215 /* list of all active swap devices [by priority] */
216 LIST_HEAD(swap_priority, swappri);
217 static struct swap_priority swap_priority;
218
219 /* locks */
220 struct lock swap_syscall_lock;
221
222 /*
223 * prototypes
224 */
225 static struct swapdev *swapdrum_getsdp __P((int));
226
227 static struct swapdev *swaplist_find __P((struct vnode *, int));
228 static void swaplist_insert __P((struct swapdev *,
229 struct swappri *, int));
230 static void swaplist_trim __P((void));
231
232 static int swap_on __P((struct proc *, struct swapdev *));
233 static int swap_off __P((struct proc *, struct swapdev *));
234
235 static void sw_reg_strategy __P((struct swapdev *, struct buf *, int));
236 static void sw_reg_iodone __P((struct buf *));
237 static void sw_reg_start __P((struct swapdev *));
238
239 static int uvm_swap_io __P((struct vm_page **, int, int, int));
240
241 /*
242 * uvm_swap_init: init the swap system data structures and locks
243 *
244 * => called at boot time from init_main.c after the filesystems
245 * are brought up (which happens after uvm_init())
246 */
247 void
248 uvm_swap_init()
249 {
250 UVMHIST_FUNC("uvm_swap_init");
251
252 UVMHIST_CALLED(pdhist);
253 /*
254 * first, init the swap list, its counter, and its lock.
255 * then get a handle on the vnode for /dev/drum by using
256 * the its dev_t number ("swapdev", from MD conf.c).
257 */
258
259 LIST_INIT(&swap_priority);
260 uvmexp.nswapdev = 0;
261 lockinit(&swap_syscall_lock, PVM, "swapsys", 0, 0);
262 simple_lock_init(&uvm.swap_data_lock);
263
264 if (bdevvp(swapdev, &swapdev_vp))
265 panic("uvm_swap_init: can't get vnode for swap device");
266
267 /*
268 * create swap block resource map to map /dev/drum. the range
269 * from 1 to INT_MAX allows 2 gigablocks of swap space. note
270 * that block 0 is reserved (used to indicate an allocation
271 * failure, or no allocation).
272 */
273 swapmap = extent_create("swapmap", 1, INT_MAX,
274 M_VMSWAP, 0, 0, EX_NOWAIT);
275 if (swapmap == 0)
276 panic("uvm_swap_init: extent_create failed");
277
278 /*
279 * allocate pools for structures used for swapping to files.
280 */
281
282 pool_init(&vndxfer_pool, sizeof(struct vndxfer), 0, 0, 0,
283 "swp vnx", NULL);
284
285 pool_init(&vndbuf_pool, sizeof(struct vndbuf), 0, 0, 0,
286 "swp vnd", NULL);
287
288 /*
289 * done!
290 */
291 UVMHIST_LOG(pdhist, "<- done", 0, 0, 0, 0);
292 }
293
294 /*
295 * swaplist functions: functions that operate on the list of swap
296 * devices on the system.
297 */
298
299 /*
300 * swaplist_insert: insert swap device "sdp" into the global list
301 *
302 * => caller must hold both swap_syscall_lock and uvm.swap_data_lock
303 * => caller must provide a newly malloc'd swappri structure (we will
304 * FREE it if we don't need it... this it to prevent malloc blocking
305 * here while adding swap)
306 */
307 static void
308 swaplist_insert(sdp, newspp, priority)
309 struct swapdev *sdp;
310 struct swappri *newspp;
311 int priority;
312 {
313 struct swappri *spp, *pspp;
314 UVMHIST_FUNC("swaplist_insert"); UVMHIST_CALLED(pdhist);
315
316 /*
317 * find entry at or after which to insert the new device.
318 */
319 pspp = NULL;
320 LIST_FOREACH(spp, &swap_priority, spi_swappri) {
321 if (priority <= spp->spi_priority)
322 break;
323 pspp = spp;
324 }
325
326 /*
327 * new priority?
328 */
329 if (spp == NULL || spp->spi_priority != priority) {
330 spp = newspp; /* use newspp! */
331 UVMHIST_LOG(pdhist, "created new swappri = %d",
332 priority, 0, 0, 0);
333
334 spp->spi_priority = priority;
335 CIRCLEQ_INIT(&spp->spi_swapdev);
336
337 if (pspp)
338 LIST_INSERT_AFTER(pspp, spp, spi_swappri);
339 else
340 LIST_INSERT_HEAD(&swap_priority, spp, spi_swappri);
341 } else {
342 /* we don't need a new priority structure, free it */
343 FREE(newspp, M_VMSWAP);
344 }
345
346 /*
347 * priority found (or created). now insert on the priority's
348 * circleq list and bump the total number of swapdevs.
349 */
350 sdp->swd_priority = priority;
351 CIRCLEQ_INSERT_TAIL(&spp->spi_swapdev, sdp, swd_next);
352 uvmexp.nswapdev++;
353 }
354
355 /*
356 * swaplist_find: find and optionally remove a swap device from the
357 * global list.
358 *
359 * => caller must hold both swap_syscall_lock and uvm.swap_data_lock
360 * => we return the swapdev we found (and removed)
361 */
362 static struct swapdev *
363 swaplist_find(vp, remove)
364 struct vnode *vp;
365 boolean_t remove;
366 {
367 struct swapdev *sdp;
368 struct swappri *spp;
369
370 /*
371 * search the lists for the requested vp
372 */
373
374 LIST_FOREACH(spp, &swap_priority, spi_swappri) {
375 CIRCLEQ_FOREACH(sdp, &spp->spi_swapdev, swd_next) {
376 if (sdp->swd_vp == vp) {
377 if (remove) {
378 CIRCLEQ_REMOVE(&spp->spi_swapdev,
379 sdp, swd_next);
380 uvmexp.nswapdev--;
381 }
382 return(sdp);
383 }
384 }
385 }
386 return (NULL);
387 }
388
389
390 /*
391 * swaplist_trim: scan priority list for empty priority entries and kill
392 * them.
393 *
394 * => caller must hold both swap_syscall_lock and uvm.swap_data_lock
395 */
396 static void
397 swaplist_trim()
398 {
399 struct swappri *spp, *nextspp;
400
401 for (spp = LIST_FIRST(&swap_priority); spp != NULL; spp = nextspp) {
402 nextspp = LIST_NEXT(spp, spi_swappri);
403 if (CIRCLEQ_FIRST(&spp->spi_swapdev) !=
404 (void *)&spp->spi_swapdev)
405 continue;
406 LIST_REMOVE(spp, spi_swappri);
407 free(spp, M_VMSWAP);
408 }
409 }
410
411 /*
412 * swapdrum_getsdp: given a page offset in /dev/drum, convert it back
413 * to the "swapdev" that maps that section of the drum.
414 *
415 * => each swapdev takes one big contig chunk of the drum
416 * => caller must hold uvm.swap_data_lock
417 */
418 static struct swapdev *
419 swapdrum_getsdp(pgno)
420 int pgno;
421 {
422 struct swapdev *sdp;
423 struct swappri *spp;
424
425 LIST_FOREACH(spp, &swap_priority, spi_swappri) {
426 CIRCLEQ_FOREACH(sdp, &spp->spi_swapdev, swd_next) {
427 if (sdp->swd_flags & SWF_FAKE)
428 continue;
429 if (pgno >= sdp->swd_drumoffset &&
430 pgno < (sdp->swd_drumoffset + sdp->swd_drumsize)) {
431 return sdp;
432 }
433 }
434 }
435 return NULL;
436 }
437
438
439 /*
440 * sys_swapctl: main entry point for swapctl(2) system call
441 * [with two helper functions: swap_on and swap_off]
442 */
443 int
444 sys_swapctl(l, v, retval)
445 struct lwp *l;
446 void *v;
447 register_t *retval;
448 {
449 struct sys_swapctl_args /* {
450 syscallarg(int) cmd;
451 syscallarg(void *) arg;
452 syscallarg(int) misc;
453 } */ *uap = (struct sys_swapctl_args *)v;
454 struct proc *p = l->l_proc;
455 struct vnode *vp;
456 struct nameidata nd;
457 struct swappri *spp;
458 struct swapdev *sdp;
459 struct swapent *sep;
460 char userpath[PATH_MAX + 1];
461 size_t len;
462 int error, misc;
463 int priority;
464 UVMHIST_FUNC("sys_swapctl"); UVMHIST_CALLED(pdhist);
465
466 misc = SCARG(uap, misc);
467
468 /*
469 * ensure serialized syscall access by grabbing the swap_syscall_lock
470 */
471 lockmgr(&swap_syscall_lock, LK_EXCLUSIVE, NULL);
472
473 /*
474 * we handle the non-priv NSWAP and STATS request first.
475 *
476 * SWAP_NSWAP: return number of config'd swap devices
477 * [can also be obtained with uvmexp sysctl]
478 */
479 if (SCARG(uap, cmd) == SWAP_NSWAP) {
480 UVMHIST_LOG(pdhist, "<- done SWAP_NSWAP=%d", uvmexp.nswapdev,
481 0, 0, 0);
482 *retval = uvmexp.nswapdev;
483 error = 0;
484 goto out;
485 }
486
487 /*
488 * SWAP_STATS: get stats on current # of configured swap devs
489 *
490 * note that the swap_priority list can't change as long
491 * as we are holding the swap_syscall_lock. we don't want
492 * to grab the uvm.swap_data_lock because we may fault&sleep during
493 * copyout() and we don't want to be holding that lock then!
494 */
495 if (SCARG(uap, cmd) == SWAP_STATS
496 #if defined(COMPAT_13)
497 || SCARG(uap, cmd) == SWAP_OSTATS
498 #endif
499 ) {
500 misc = MIN(uvmexp.nswapdev, misc);
501 #if defined(COMPAT_13)
502 if (SCARG(uap, cmd) == SWAP_OSTATS)
503 len = sizeof(struct oswapent) * misc;
504 else
505 #endif
506 len = sizeof(struct swapent) * misc;
507 sep = (struct swapent *)malloc(len, M_TEMP, M_WAITOK);
508
509 uvm_swap_stats(SCARG(uap, cmd), sep, misc, retval);
510 error = copyout(sep, (void *)SCARG(uap, arg), len);
511
512 free(sep, M_TEMP);
513 UVMHIST_LOG(pdhist, "<- done SWAP_STATS", 0, 0, 0, 0);
514 goto out;
515 }
516 if (SCARG(uap, cmd) == SWAP_GETDUMPDEV) {
517 dev_t *devp = (dev_t *)SCARG(uap, arg);
518
519 error = copyout(&dumpdev, devp, sizeof(dumpdev));
520 goto out;
521 }
522
523 /*
524 * all other requests require superuser privs. verify.
525 */
526 if ((error = suser(p->p_ucred, &p->p_acflag)))
527 goto out;
528
529 /*
530 * at this point we expect a path name in arg. we will
531 * use namei() to gain a vnode reference (vref), and lock
532 * the vnode (VOP_LOCK).
533 *
534 * XXX: a NULL arg means use the root vnode pointer (e.g. for
535 * miniroot)
536 */
537 if (SCARG(uap, arg) == NULL) {
538 vp = rootvp; /* miniroot */
539 if (vget(vp, LK_EXCLUSIVE)) {
540 error = EBUSY;
541 goto out;
542 }
543 if (SCARG(uap, cmd) == SWAP_ON &&
544 copystr("miniroot", userpath, sizeof userpath, &len))
545 panic("swapctl: miniroot copy failed");
546 } else {
547 int space;
548 char *where;
549
550 if (SCARG(uap, cmd) == SWAP_ON) {
551 if ((error = copyinstr(SCARG(uap, arg), userpath,
552 sizeof userpath, &len)))
553 goto out;
554 space = UIO_SYSSPACE;
555 where = userpath;
556 } else {
557 space = UIO_USERSPACE;
558 where = (char *)SCARG(uap, arg);
559 }
560 NDINIT(&nd, LOOKUP, FOLLOW|LOCKLEAF, space, where, p);
561 if ((error = namei(&nd)))
562 goto out;
563 vp = nd.ni_vp;
564 }
565 /* note: "vp" is referenced and locked */
566
567 error = 0; /* assume no error */
568 switch(SCARG(uap, cmd)) {
569
570 case SWAP_DUMPDEV:
571 if (vp->v_type != VBLK) {
572 error = ENOTBLK;
573 break;
574 }
575 dumpdev = vp->v_rdev;
576 break;
577
578 case SWAP_CTL:
579 /*
580 * get new priority, remove old entry (if any) and then
581 * reinsert it in the correct place. finally, prune out
582 * any empty priority structures.
583 */
584 priority = SCARG(uap, misc);
585 spp = malloc(sizeof *spp, M_VMSWAP, M_WAITOK);
586 simple_lock(&uvm.swap_data_lock);
587 if ((sdp = swaplist_find(vp, 1)) == NULL) {
588 error = ENOENT;
589 } else {
590 swaplist_insert(sdp, spp, priority);
591 swaplist_trim();
592 }
593 simple_unlock(&uvm.swap_data_lock);
594 if (error)
595 free(spp, M_VMSWAP);
596 break;
597
598 case SWAP_ON:
599
600 /*
601 * check for duplicates. if none found, then insert a
602 * dummy entry on the list to prevent someone else from
603 * trying to enable this device while we are working on
604 * it.
605 */
606
607 priority = SCARG(uap, misc);
608 sdp = malloc(sizeof *sdp, M_VMSWAP, M_WAITOK);
609 spp = malloc(sizeof *spp, M_VMSWAP, M_WAITOK);
610 memset(sdp, 0, sizeof(*sdp));
611 sdp->swd_flags = SWF_FAKE;
612 sdp->swd_vp = vp;
613 sdp->swd_dev = (vp->v_type == VBLK) ? vp->v_rdev : NODEV;
614 bufq_alloc(&sdp->swd_tab, BUFQ_DISKSORT|BUFQ_SORT_RAWBLOCK);
615 simple_lock(&uvm.swap_data_lock);
616 if (swaplist_find(vp, 0) != NULL) {
617 error = EBUSY;
618 simple_unlock(&uvm.swap_data_lock);
619 bufq_free(&sdp->swd_tab);
620 free(sdp, M_VMSWAP);
621 free(spp, M_VMSWAP);
622 break;
623 }
624 swaplist_insert(sdp, spp, priority);
625 simple_unlock(&uvm.swap_data_lock);
626
627 sdp->swd_pathlen = len;
628 sdp->swd_path = malloc(sdp->swd_pathlen, M_VMSWAP, M_WAITOK);
629 if (copystr(userpath, sdp->swd_path, sdp->swd_pathlen, 0) != 0)
630 panic("swapctl: copystr");
631
632 /*
633 * we've now got a FAKE placeholder in the swap list.
634 * now attempt to enable swap on it. if we fail, undo
635 * what we've done and kill the fake entry we just inserted.
636 * if swap_on is a success, it will clear the SWF_FAKE flag
637 */
638
639 if ((error = swap_on(p, sdp)) != 0) {
640 simple_lock(&uvm.swap_data_lock);
641 (void) swaplist_find(vp, 1); /* kill fake entry */
642 swaplist_trim();
643 simple_unlock(&uvm.swap_data_lock);
644 bufq_free(&sdp->swd_tab);
645 free(sdp->swd_path, M_VMSWAP);
646 free(sdp, M_VMSWAP);
647 break;
648 }
649 break;
650
651 case SWAP_OFF:
652 simple_lock(&uvm.swap_data_lock);
653 if ((sdp = swaplist_find(vp, 0)) == NULL) {
654 simple_unlock(&uvm.swap_data_lock);
655 error = ENXIO;
656 break;
657 }
658
659 /*
660 * If a device isn't in use or enabled, we
661 * can't stop swapping from it (again).
662 */
663 if ((sdp->swd_flags & (SWF_INUSE|SWF_ENABLE)) == 0) {
664 simple_unlock(&uvm.swap_data_lock);
665 error = EBUSY;
666 break;
667 }
668
669 /*
670 * do the real work.
671 */
672 error = swap_off(p, sdp);
673 break;
674
675 default:
676 error = EINVAL;
677 }
678
679 /*
680 * done! release the ref gained by namei() and unlock.
681 */
682 vput(vp);
683
684 out:
685 lockmgr(&swap_syscall_lock, LK_RELEASE, NULL);
686
687 UVMHIST_LOG(pdhist, "<- done! error=%d", error, 0, 0, 0);
688 return (error);
689 }
690
691 /*
692 * swap_stats: implements swapctl(SWAP_STATS). The function is kept
693 * away from sys_swapctl() in order to allow COMPAT_* swapctl()
694 * emulation to use it directly without going through sys_swapctl().
695 * The problem with using sys_swapctl() there is that it involves
696 * copying the swapent array to the stackgap, and this array's size
697 * is not known at build time. Hence it would not be possible to
698 * ensure it would fit in the stackgap in any case.
699 */
700 void
701 uvm_swap_stats(cmd, sep, sec, retval)
702 int cmd;
703 struct swapent *sep;
704 int sec;
705 register_t *retval;
706 {
707 struct swappri *spp;
708 struct swapdev *sdp;
709 int count = 0;
710
711 LIST_FOREACH(spp, &swap_priority, spi_swappri) {
712 for (sdp = CIRCLEQ_FIRST(&spp->spi_swapdev);
713 sdp != (void *)&spp->spi_swapdev && sec-- > 0;
714 sdp = CIRCLEQ_NEXT(sdp, swd_next)) {
715 /*
716 * backwards compatibility for system call.
717 * note that we use 'struct oswapent' as an
718 * overlay into both 'struct swapdev' and
719 * the userland 'struct swapent', as we
720 * want to retain backwards compatibility
721 * with NetBSD 1.3.
722 */
723 sdp->swd_ose.ose_inuse =
724 btodb((u_int64_t)sdp->swd_npginuse <<
725 PAGE_SHIFT);
726 (void)memcpy(sep, &sdp->swd_ose,
727 sizeof(struct oswapent));
728
729 /* now copy out the path if necessary */
730 #if defined(COMPAT_13)
731 if (cmd == SWAP_STATS)
732 #endif
733 (void)memcpy(&sep->se_path, sdp->swd_path,
734 sdp->swd_pathlen);
735
736 count++;
737 #if defined(COMPAT_13)
738 if (cmd == SWAP_OSTATS)
739 sep = (struct swapent *)
740 ((struct oswapent *)sep + 1);
741 else
742 #endif
743 sep++;
744 }
745 }
746
747 *retval = count;
748 return;
749 }
750
751 /*
752 * swap_on: attempt to enable a swapdev for swapping. note that the
753 * swapdev is already on the global list, but disabled (marked
754 * SWF_FAKE).
755 *
756 * => we avoid the start of the disk (to protect disk labels)
757 * => we also avoid the miniroot, if we are swapping to root.
758 * => caller should leave uvm.swap_data_lock unlocked, we may lock it
759 * if needed.
760 */
761 static int
762 swap_on(p, sdp)
763 struct proc *p;
764 struct swapdev *sdp;
765 {
766 static int count = 0; /* static */
767 struct vnode *vp;
768 int error, npages, nblocks, size;
769 long addr;
770 u_long result;
771 struct vattr va;
772 #ifdef NFS
773 extern int (**nfsv2_vnodeop_p) __P((void *));
774 #endif /* NFS */
775 dev_t dev;
776 UVMHIST_FUNC("swap_on"); UVMHIST_CALLED(pdhist);
777
778 /*
779 * we want to enable swapping on sdp. the swd_vp contains
780 * the vnode we want (locked and ref'd), and the swd_dev
781 * contains the dev_t of the file, if it a block device.
782 */
783
784 vp = sdp->swd_vp;
785 dev = sdp->swd_dev;
786
787 /*
788 * open the swap file (mostly useful for block device files to
789 * let device driver know what is up).
790 *
791 * we skip the open/close for root on swap because the root
792 * has already been opened when root was mounted (mountroot).
793 */
794 if (vp != rootvp) {
795 if ((error = VOP_OPEN(vp, FREAD|FWRITE, p->p_ucred, p)))
796 return (error);
797 }
798
799 /* XXX this only works for block devices */
800 UVMHIST_LOG(pdhist, " dev=%d, major(dev)=%d", dev, major(dev), 0,0);
801
802 /*
803 * we now need to determine the size of the swap area. for
804 * block specials we can call the d_psize function.
805 * for normal files, we must stat [get attrs].
806 *
807 * we put the result in nblks.
808 * for normal files, we also want the filesystem block size
809 * (which we get with statfs).
810 */
811 switch (vp->v_type) {
812 case VBLK:
813 if (bdevsw[major(dev)].d_psize == 0 ||
814 (nblocks = (*bdevsw[major(dev)].d_psize)(dev)) == -1) {
815 error = ENXIO;
816 goto bad;
817 }
818 break;
819
820 case VREG:
821 if ((error = VOP_GETATTR(vp, &va, p->p_ucred, p)))
822 goto bad;
823 nblocks = (int)btodb(va.va_size);
824 if ((error =
825 VFS_STATFS(vp->v_mount, &vp->v_mount->mnt_stat, p)) != 0)
826 goto bad;
827
828 sdp->swd_bsize = vp->v_mount->mnt_stat.f_iosize;
829 /*
830 * limit the max # of outstanding I/O requests we issue
831 * at any one time. take it easy on NFS servers.
832 */
833 #ifdef NFS
834 if (vp->v_op == nfsv2_vnodeop_p)
835 sdp->swd_maxactive = 2; /* XXX */
836 else
837 #endif /* NFS */
838 sdp->swd_maxactive = 8; /* XXX */
839 break;
840
841 default:
842 error = ENXIO;
843 goto bad;
844 }
845
846 /*
847 * save nblocks in a safe place and convert to pages.
848 */
849
850 sdp->swd_ose.ose_nblks = nblocks;
851 npages = dbtob((u_int64_t)nblocks) >> PAGE_SHIFT;
852
853 /*
854 * for block special files, we want to make sure that leave
855 * the disklabel and bootblocks alone, so we arrange to skip
856 * over them (arbitrarily choosing to skip PAGE_SIZE bytes).
857 * note that because of this the "size" can be less than the
858 * actual number of blocks on the device.
859 */
860 if (vp->v_type == VBLK) {
861 /* we use pages 1 to (size - 1) [inclusive] */
862 size = npages - 1;
863 addr = 1;
864 } else {
865 /* we use pages 0 to (size - 1) [inclusive] */
866 size = npages;
867 addr = 0;
868 }
869
870 /*
871 * make sure we have enough blocks for a reasonable sized swap
872 * area. we want at least one page.
873 */
874
875 if (size < 1) {
876 UVMHIST_LOG(pdhist, " size <= 1!!", 0, 0, 0, 0);
877 error = EINVAL;
878 goto bad;
879 }
880
881 UVMHIST_LOG(pdhist, " dev=%x: size=%d addr=%ld\n", dev, size, addr, 0);
882
883 /*
884 * now we need to allocate an extent to manage this swap device
885 */
886 snprintf(sdp->swd_exname, sizeof(sdp->swd_exname), "swap0x%04x",
887 count++);
888
889 /* note that extent_create's 3rd arg is inclusive, thus "- 1" */
890 sdp->swd_ex = extent_create(sdp->swd_exname, 0, npages - 1, M_VMSWAP,
891 0, 0, EX_WAITOK);
892 /* allocate the `saved' region from the extent so it won't be used */
893 if (addr) {
894 if (extent_alloc_region(sdp->swd_ex, 0, addr, EX_WAITOK))
895 panic("disklabel region");
896 }
897
898 /*
899 * if the vnode we are swapping to is the root vnode
900 * (i.e. we are swapping to the miniroot) then we want
901 * to make sure we don't overwrite it. do a statfs to
902 * find its size and skip over it.
903 */
904 if (vp == rootvp) {
905 struct mount *mp;
906 struct statfs *sp;
907 int rootblocks, rootpages;
908
909 mp = rootvnode->v_mount;
910 sp = &mp->mnt_stat;
911 rootblocks = sp->f_blocks * btodb(sp->f_bsize);
912 /*
913 * XXX: sp->f_blocks isn't the total number of
914 * blocks in the filesystem, it's the number of
915 * data blocks. so, our rootblocks almost
916 * definitely underestimates the total size
917 * of the filesystem - how badly depends on the
918 * details of the filesystem type. there isn't
919 * an obvious way to deal with this cleanly
920 * and perfectly, so for now we just pad our
921 * rootblocks estimate with an extra 5 percent.
922 */
923 rootblocks += (rootblocks >> 5) +
924 (rootblocks >> 6) +
925 (rootblocks >> 7);
926 rootpages = round_page(dbtob(rootblocks)) >> PAGE_SHIFT;
927 if (rootpages > size)
928 panic("swap_on: miniroot larger than swap?");
929
930 if (extent_alloc_region(sdp->swd_ex, addr,
931 rootpages, EX_WAITOK))
932 panic("swap_on: unable to preserve miniroot");
933
934 size -= rootpages;
935 printf("Preserved %d pages of miniroot ", rootpages);
936 printf("leaving %d pages of swap\n", size);
937 }
938
939 /*
940 * try to add anons to reflect the new swap space.
941 */
942
943 error = uvm_anon_add(size);
944 if (error) {
945 goto bad;
946 }
947
948 /*
949 * add a ref to vp to reflect usage as a swap device.
950 */
951 vref(vp);
952
953 /*
954 * now add the new swapdev to the drum and enable.
955 */
956 if (extent_alloc(swapmap, npages, EX_NOALIGN, EX_NOBOUNDARY,
957 EX_WAITOK, &result))
958 panic("swapdrum_add");
959
960 sdp->swd_drumoffset = (int)result;
961 sdp->swd_drumsize = npages;
962 sdp->swd_npages = size;
963 simple_lock(&uvm.swap_data_lock);
964 sdp->swd_flags &= ~SWF_FAKE; /* going live */
965 sdp->swd_flags |= (SWF_INUSE|SWF_ENABLE);
966 uvmexp.swpages += size;
967 simple_unlock(&uvm.swap_data_lock);
968 return (0);
969
970 /*
971 * failure: clean up and return error.
972 */
973
974 bad:
975 if (sdp->swd_ex) {
976 extent_destroy(sdp->swd_ex);
977 }
978 if (vp != rootvp) {
979 (void)VOP_CLOSE(vp, FREAD|FWRITE, p->p_ucred, p);
980 }
981 return (error);
982 }
983
984 /*
985 * swap_off: stop swapping on swapdev
986 *
987 * => swap data should be locked, we will unlock.
988 */
989 static int
990 swap_off(p, sdp)
991 struct proc *p;
992 struct swapdev *sdp;
993 {
994 UVMHIST_FUNC("swap_off"); UVMHIST_CALLED(pdhist);
995 UVMHIST_LOG(pdhist, " dev=%x", sdp->swd_dev,0,0,0);
996
997 /* disable the swap area being removed */
998 sdp->swd_flags &= ~SWF_ENABLE;
999 simple_unlock(&uvm.swap_data_lock);
1000
1001 /*
1002 * the idea is to find all the pages that are paged out to this
1003 * device, and page them all in. in uvm, swap-backed pageable
1004 * memory can take two forms: aobjs and anons. call the
1005 * swapoff hook for each subsystem to bring in pages.
1006 */
1007
1008 if (uao_swap_off(sdp->swd_drumoffset,
1009 sdp->swd_drumoffset + sdp->swd_drumsize) ||
1010 anon_swap_off(sdp->swd_drumoffset,
1011 sdp->swd_drumoffset + sdp->swd_drumsize)) {
1012
1013 simple_lock(&uvm.swap_data_lock);
1014 sdp->swd_flags |= SWF_ENABLE;
1015 simple_unlock(&uvm.swap_data_lock);
1016 return ENOMEM;
1017 }
1018 KASSERT(sdp->swd_npginuse == sdp->swd_npgbad);
1019
1020 /*
1021 * done with the vnode.
1022 * drop our ref on the vnode before calling VOP_CLOSE()
1023 * so that spec_close() can tell if this is the last close.
1024 */
1025 vrele(sdp->swd_vp);
1026 if (sdp->swd_vp != rootvp) {
1027 (void) VOP_CLOSE(sdp->swd_vp, FREAD|FWRITE, p->p_ucred, p);
1028 }
1029
1030 /* remove anons from the system */
1031 uvm_anon_remove(sdp->swd_npages);
1032
1033 simple_lock(&uvm.swap_data_lock);
1034 uvmexp.swpages -= sdp->swd_npages;
1035
1036 if (swaplist_find(sdp->swd_vp, 1) == NULL)
1037 panic("swap_off: swapdev not in list\n");
1038 swaplist_trim();
1039 simple_unlock(&uvm.swap_data_lock);
1040
1041 /*
1042 * free all resources!
1043 */
1044 extent_free(swapmap, sdp->swd_drumoffset, sdp->swd_drumsize,
1045 EX_WAITOK);
1046 extent_destroy(sdp->swd_ex);
1047 bufq_free(&sdp->swd_tab);
1048 free(sdp, M_VMSWAP);
1049 return (0);
1050 }
1051
1052 /*
1053 * /dev/drum interface and i/o functions
1054 */
1055
1056 /*
1057 * swread: the read function for the drum (just a call to physio)
1058 */
1059 /*ARGSUSED*/
1060 int
1061 swread(dev, uio, ioflag)
1062 dev_t dev;
1063 struct uio *uio;
1064 int ioflag;
1065 {
1066 UVMHIST_FUNC("swread"); UVMHIST_CALLED(pdhist);
1067
1068 UVMHIST_LOG(pdhist, " dev=%x offset=%qx", dev, uio->uio_offset, 0, 0);
1069 return (physio(swstrategy, NULL, dev, B_READ, minphys, uio));
1070 }
1071
1072 /*
1073 * swwrite: the write function for the drum (just a call to physio)
1074 */
1075 /*ARGSUSED*/
1076 int
1077 swwrite(dev, uio, ioflag)
1078 dev_t dev;
1079 struct uio *uio;
1080 int ioflag;
1081 {
1082 UVMHIST_FUNC("swwrite"); UVMHIST_CALLED(pdhist);
1083
1084 UVMHIST_LOG(pdhist, " dev=%x offset=%qx", dev, uio->uio_offset, 0, 0);
1085 return (physio(swstrategy, NULL, dev, B_WRITE, minphys, uio));
1086 }
1087
1088 /*
1089 * swstrategy: perform I/O on the drum
1090 *
1091 * => we must map the i/o request from the drum to the correct swapdev.
1092 */
1093 void
1094 swstrategy(bp)
1095 struct buf *bp;
1096 {
1097 struct swapdev *sdp;
1098 struct vnode *vp;
1099 int s, pageno, bn;
1100 UVMHIST_FUNC("swstrategy"); UVMHIST_CALLED(pdhist);
1101
1102 /*
1103 * convert block number to swapdev. note that swapdev can't
1104 * be yanked out from under us because we are holding resources
1105 * in it (i.e. the blocks we are doing I/O on).
1106 */
1107 pageno = dbtob((int64_t)bp->b_blkno) >> PAGE_SHIFT;
1108 simple_lock(&uvm.swap_data_lock);
1109 sdp = swapdrum_getsdp(pageno);
1110 simple_unlock(&uvm.swap_data_lock);
1111 if (sdp == NULL) {
1112 bp->b_error = EINVAL;
1113 bp->b_flags |= B_ERROR;
1114 biodone(bp);
1115 UVMHIST_LOG(pdhist, " failed to get swap device", 0, 0, 0, 0);
1116 return;
1117 }
1118
1119 /*
1120 * convert drum page number to block number on this swapdev.
1121 */
1122
1123 pageno -= sdp->swd_drumoffset; /* page # on swapdev */
1124 bn = btodb((u_int64_t)pageno << PAGE_SHIFT); /* convert to diskblock */
1125
1126 UVMHIST_LOG(pdhist, " %s: mapoff=%x bn=%x bcount=%ld",
1127 ((bp->b_flags & B_READ) == 0) ? "write" : "read",
1128 sdp->swd_drumoffset, bn, bp->b_bcount);
1129
1130 /*
1131 * for block devices we finish up here.
1132 * for regular files we have to do more work which we delegate
1133 * to sw_reg_strategy().
1134 */
1135
1136 switch (sdp->swd_vp->v_type) {
1137 default:
1138 panic("swstrategy: vnode type 0x%x", sdp->swd_vp->v_type);
1139
1140 case VBLK:
1141
1142 /*
1143 * must convert "bp" from an I/O on /dev/drum to an I/O
1144 * on the swapdev (sdp).
1145 */
1146 s = splbio();
1147 bp->b_blkno = bn; /* swapdev block number */
1148 vp = sdp->swd_vp; /* swapdev vnode pointer */
1149 bp->b_dev = sdp->swd_dev; /* swapdev dev_t */
1150
1151 /*
1152 * if we are doing a write, we have to redirect the i/o on
1153 * drum's v_numoutput counter to the swapdevs.
1154 */
1155 if ((bp->b_flags & B_READ) == 0) {
1156 vwakeup(bp); /* kills one 'v_numoutput' on drum */
1157 vp->v_numoutput++; /* put it on swapdev */
1158 }
1159
1160 /*
1161 * finally plug in swapdev vnode and start I/O
1162 */
1163 bp->b_vp = vp;
1164 splx(s);
1165 VOP_STRATEGY(bp);
1166 return;
1167
1168 case VREG:
1169 /*
1170 * delegate to sw_reg_strategy function.
1171 */
1172 sw_reg_strategy(sdp, bp, bn);
1173 return;
1174 }
1175 /* NOTREACHED */
1176 }
1177
1178 /*
1179 * sw_reg_strategy: handle swap i/o to regular files
1180 */
1181 static void
1182 sw_reg_strategy(sdp, bp, bn)
1183 struct swapdev *sdp;
1184 struct buf *bp;
1185 int bn;
1186 {
1187 struct vnode *vp;
1188 struct vndxfer *vnx;
1189 daddr_t nbn;
1190 caddr_t addr;
1191 off_t byteoff;
1192 int s, off, nra, error, sz, resid;
1193 UVMHIST_FUNC("sw_reg_strategy"); UVMHIST_CALLED(pdhist);
1194
1195 /*
1196 * allocate a vndxfer head for this transfer and point it to
1197 * our buffer.
1198 */
1199 getvndxfer(vnx);
1200 vnx->vx_flags = VX_BUSY;
1201 vnx->vx_error = 0;
1202 vnx->vx_pending = 0;
1203 vnx->vx_bp = bp;
1204 vnx->vx_sdp = sdp;
1205
1206 /*
1207 * setup for main loop where we read filesystem blocks into
1208 * our buffer.
1209 */
1210 error = 0;
1211 bp->b_resid = bp->b_bcount; /* nothing transfered yet! */
1212 addr = bp->b_data; /* current position in buffer */
1213 byteoff = dbtob((u_int64_t)bn);
1214
1215 for (resid = bp->b_resid; resid; resid -= sz) {
1216 struct vndbuf *nbp;
1217
1218 /*
1219 * translate byteoffset into block number. return values:
1220 * vp = vnode of underlying device
1221 * nbn = new block number (on underlying vnode dev)
1222 * nra = num blocks we can read-ahead (excludes requested
1223 * block)
1224 */
1225 nra = 0;
1226 error = VOP_BMAP(sdp->swd_vp, byteoff / sdp->swd_bsize,
1227 &vp, &nbn, &nra);
1228
1229 if (error == 0 && nbn == (daddr_t)-1) {
1230 /*
1231 * this used to just set error, but that doesn't
1232 * do the right thing. Instead, it causes random
1233 * memory errors. The panic() should remain until
1234 * this condition doesn't destabilize the system.
1235 */
1236 #if 1
1237 panic("sw_reg_strategy: swap to sparse file");
1238 #else
1239 error = EIO; /* failure */
1240 #endif
1241 }
1242
1243 /*
1244 * punt if there was an error or a hole in the file.
1245 * we must wait for any i/o ops we have already started
1246 * to finish before returning.
1247 *
1248 * XXX we could deal with holes here but it would be
1249 * a hassle (in the write case).
1250 */
1251 if (error) {
1252 s = splbio();
1253 vnx->vx_error = error; /* pass error up */
1254 goto out;
1255 }
1256
1257 /*
1258 * compute the size ("sz") of this transfer (in bytes).
1259 */
1260 off = byteoff % sdp->swd_bsize;
1261 sz = (1 + nra) * sdp->swd_bsize - off;
1262 if (sz > resid)
1263 sz = resid;
1264
1265 UVMHIST_LOG(pdhist, "sw_reg_strategy: "
1266 "vp %p/%p offset 0x%x/0x%x",
1267 sdp->swd_vp, vp, byteoff, nbn);
1268
1269 /*
1270 * now get a buf structure. note that the vb_buf is
1271 * at the front of the nbp structure so that you can
1272 * cast pointers between the two structure easily.
1273 */
1274 getvndbuf(nbp);
1275 nbp->vb_buf.b_flags = bp->b_flags | B_CALL;
1276 nbp->vb_buf.b_bcount = sz;
1277 nbp->vb_buf.b_bufsize = sz;
1278 nbp->vb_buf.b_error = 0;
1279 nbp->vb_buf.b_data = addr;
1280 nbp->vb_buf.b_lblkno = 0;
1281 nbp->vb_buf.b_blkno = nbn + btodb(off);
1282 nbp->vb_buf.b_rawblkno = nbp->vb_buf.b_blkno;
1283 nbp->vb_buf.b_iodone = sw_reg_iodone;
1284 nbp->vb_buf.b_vp = vp;
1285 if (vp->v_type == VBLK) {
1286 nbp->vb_buf.b_dev = vp->v_rdev;
1287 }
1288 LIST_INIT(&nbp->vb_buf.b_dep);
1289
1290 nbp->vb_xfer = vnx; /* patch it back in to vnx */
1291
1292 /*
1293 * Just sort by block number
1294 */
1295 s = splbio();
1296 if (vnx->vx_error != 0) {
1297 putvndbuf(nbp);
1298 goto out;
1299 }
1300 vnx->vx_pending++;
1301
1302 /* sort it in and start I/O if we are not over our limit */
1303 BUFQ_PUT(&sdp->swd_tab, &nbp->vb_buf);
1304 sw_reg_start(sdp);
1305 splx(s);
1306
1307 /*
1308 * advance to the next I/O
1309 */
1310 byteoff += sz;
1311 addr += sz;
1312 }
1313
1314 s = splbio();
1315
1316 out: /* Arrive here at splbio */
1317 vnx->vx_flags &= ~VX_BUSY;
1318 if (vnx->vx_pending == 0) {
1319 if (vnx->vx_error != 0) {
1320 bp->b_error = vnx->vx_error;
1321 bp->b_flags |= B_ERROR;
1322 }
1323 putvndxfer(vnx);
1324 biodone(bp);
1325 }
1326 splx(s);
1327 }
1328
1329 /*
1330 * sw_reg_start: start an I/O request on the requested swapdev
1331 *
1332 * => reqs are sorted by b_rawblkno (above)
1333 */
1334 static void
1335 sw_reg_start(sdp)
1336 struct swapdev *sdp;
1337 {
1338 struct buf *bp;
1339 UVMHIST_FUNC("sw_reg_start"); UVMHIST_CALLED(pdhist);
1340
1341 /* recursion control */
1342 if ((sdp->swd_flags & SWF_BUSY) != 0)
1343 return;
1344
1345 sdp->swd_flags |= SWF_BUSY;
1346
1347 while (sdp->swd_active < sdp->swd_maxactive) {
1348 bp = BUFQ_GET(&sdp->swd_tab);
1349 if (bp == NULL)
1350 break;
1351 sdp->swd_active++;
1352
1353 UVMHIST_LOG(pdhist,
1354 "sw_reg_start: bp %p vp %p blkno %p cnt %lx",
1355 bp, bp->b_vp, bp->b_blkno, bp->b_bcount);
1356 if ((bp->b_flags & B_READ) == 0)
1357 bp->b_vp->v_numoutput++;
1358
1359 VOP_STRATEGY(bp);
1360 }
1361 sdp->swd_flags &= ~SWF_BUSY;
1362 }
1363
1364 /*
1365 * sw_reg_iodone: one of our i/o's has completed and needs post-i/o cleanup
1366 *
1367 * => note that we can recover the vndbuf struct by casting the buf ptr
1368 */
1369 static void
1370 sw_reg_iodone(bp)
1371 struct buf *bp;
1372 {
1373 struct vndbuf *vbp = (struct vndbuf *) bp;
1374 struct vndxfer *vnx = vbp->vb_xfer;
1375 struct buf *pbp = vnx->vx_bp; /* parent buffer */
1376 struct swapdev *sdp = vnx->vx_sdp;
1377 int s, resid;
1378 UVMHIST_FUNC("sw_reg_iodone"); UVMHIST_CALLED(pdhist);
1379
1380 UVMHIST_LOG(pdhist, " vbp=%p vp=%p blkno=%x addr=%p",
1381 vbp, vbp->vb_buf.b_vp, vbp->vb_buf.b_blkno, vbp->vb_buf.b_data);
1382 UVMHIST_LOG(pdhist, " cnt=%lx resid=%lx",
1383 vbp->vb_buf.b_bcount, vbp->vb_buf.b_resid, 0, 0);
1384
1385 /*
1386 * protect vbp at splbio and update.
1387 */
1388
1389 s = splbio();
1390 resid = vbp->vb_buf.b_bcount - vbp->vb_buf.b_resid;
1391 pbp->b_resid -= resid;
1392 vnx->vx_pending--;
1393
1394 if (vbp->vb_buf.b_error) {
1395 UVMHIST_LOG(pdhist, " got error=%d !",
1396 vbp->vb_buf.b_error, 0, 0, 0);
1397
1398 /* pass error upward */
1399 vnx->vx_error = vbp->vb_buf.b_error;
1400 }
1401
1402 /*
1403 * kill vbp structure
1404 */
1405 putvndbuf(vbp);
1406
1407 /*
1408 * wrap up this transaction if it has run to completion or, in
1409 * case of an error, when all auxiliary buffers have returned.
1410 */
1411 if (vnx->vx_error != 0) {
1412 /* pass error upward */
1413 pbp->b_flags |= B_ERROR;
1414 pbp->b_error = vnx->vx_error;
1415 if ((vnx->vx_flags & VX_BUSY) == 0 && vnx->vx_pending == 0) {
1416 putvndxfer(vnx);
1417 biodone(pbp);
1418 }
1419 } else if (pbp->b_resid == 0) {
1420 KASSERT(vnx->vx_pending == 0);
1421 if ((vnx->vx_flags & VX_BUSY) == 0) {
1422 UVMHIST_LOG(pdhist, " iodone error=%d !",
1423 pbp, vnx->vx_error, 0, 0);
1424 putvndxfer(vnx);
1425 biodone(pbp);
1426 }
1427 }
1428
1429 /*
1430 * done! start next swapdev I/O if one is pending
1431 */
1432 sdp->swd_active--;
1433 sw_reg_start(sdp);
1434 splx(s);
1435 }
1436
1437
1438 /*
1439 * uvm_swap_alloc: allocate space on swap
1440 *
1441 * => allocation is done "round robin" down the priority list, as we
1442 * allocate in a priority we "rotate" the circle queue.
1443 * => space can be freed with uvm_swap_free
1444 * => we return the page slot number in /dev/drum (0 == invalid slot)
1445 * => we lock uvm.swap_data_lock
1446 * => XXXMRG: "LESSOK" INTERFACE NEEDED TO EXTENT SYSTEM
1447 */
1448 int
1449 uvm_swap_alloc(nslots, lessok)
1450 int *nslots; /* IN/OUT */
1451 boolean_t lessok;
1452 {
1453 struct swapdev *sdp;
1454 struct swappri *spp;
1455 u_long result;
1456 UVMHIST_FUNC("uvm_swap_alloc"); UVMHIST_CALLED(pdhist);
1457
1458 /*
1459 * no swap devices configured yet? definite failure.
1460 */
1461 if (uvmexp.nswapdev < 1)
1462 return 0;
1463
1464 /*
1465 * lock data lock, convert slots into blocks, and enter loop
1466 */
1467 simple_lock(&uvm.swap_data_lock);
1468
1469 ReTry: /* XXXMRG */
1470 LIST_FOREACH(spp, &swap_priority, spi_swappri) {
1471 CIRCLEQ_FOREACH(sdp, &spp->spi_swapdev, swd_next) {
1472 /* if it's not enabled, then we can't swap from it */
1473 if ((sdp->swd_flags & SWF_ENABLE) == 0)
1474 continue;
1475 if (sdp->swd_npginuse + *nslots > sdp->swd_npages)
1476 continue;
1477 if (extent_alloc(sdp->swd_ex, *nslots, EX_NOALIGN,
1478 EX_NOBOUNDARY, EX_MALLOCOK|EX_NOWAIT,
1479 &result) != 0) {
1480 continue;
1481 }
1482
1483 /*
1484 * successful allocation! now rotate the circleq.
1485 */
1486 CIRCLEQ_REMOVE(&spp->spi_swapdev, sdp, swd_next);
1487 CIRCLEQ_INSERT_TAIL(&spp->spi_swapdev, sdp, swd_next);
1488 sdp->swd_npginuse += *nslots;
1489 uvmexp.swpginuse += *nslots;
1490 simple_unlock(&uvm.swap_data_lock);
1491 /* done! return drum slot number */
1492 UVMHIST_LOG(pdhist,
1493 "success! returning %d slots starting at %d",
1494 *nslots, result + sdp->swd_drumoffset, 0, 0);
1495 return (result + sdp->swd_drumoffset);
1496 }
1497 }
1498
1499 /* XXXMRG: BEGIN HACK */
1500 if (*nslots > 1 && lessok) {
1501 *nslots = 1;
1502 goto ReTry; /* XXXMRG: ugh! extent should support this for us */
1503 }
1504 /* XXXMRG: END HACK */
1505
1506 simple_unlock(&uvm.swap_data_lock);
1507 return 0;
1508 }
1509
1510 /*
1511 * uvm_swap_markbad: keep track of swap ranges where we've had i/o errors
1512 *
1513 * => we lock uvm.swap_data_lock
1514 */
1515 void
1516 uvm_swap_markbad(startslot, nslots)
1517 int startslot;
1518 int nslots;
1519 {
1520 struct swapdev *sdp;
1521 UVMHIST_FUNC("uvm_swap_markbad"); UVMHIST_CALLED(pdhist);
1522
1523 simple_lock(&uvm.swap_data_lock);
1524 sdp = swapdrum_getsdp(startslot);
1525
1526 /*
1527 * we just keep track of how many pages have been marked bad
1528 * in this device, to make everything add up in swap_off().
1529 * we assume here that the range of slots will all be within
1530 * one swap device.
1531 */
1532
1533 sdp->swd_npgbad += nslots;
1534 UVMHIST_LOG(pdhist, "now %d bad", sdp->swd_npgbad, 0,0,0);
1535 simple_unlock(&uvm.swap_data_lock);
1536 }
1537
1538 /*
1539 * uvm_swap_free: free swap slots
1540 *
1541 * => this can be all or part of an allocation made by uvm_swap_alloc
1542 * => we lock uvm.swap_data_lock
1543 */
1544 void
1545 uvm_swap_free(startslot, nslots)
1546 int startslot;
1547 int nslots;
1548 {
1549 struct swapdev *sdp;
1550 UVMHIST_FUNC("uvm_swap_free"); UVMHIST_CALLED(pdhist);
1551
1552 UVMHIST_LOG(pdhist, "freeing %d slots starting at %d", nslots,
1553 startslot, 0, 0);
1554
1555 /*
1556 * ignore attempts to free the "bad" slot.
1557 */
1558
1559 if (startslot == SWSLOT_BAD) {
1560 return;
1561 }
1562
1563 /*
1564 * convert drum slot offset back to sdp, free the blocks
1565 * in the extent, and return. must hold pri lock to do
1566 * lookup and access the extent.
1567 */
1568
1569 simple_lock(&uvm.swap_data_lock);
1570 sdp = swapdrum_getsdp(startslot);
1571 KASSERT(uvmexp.nswapdev >= 1);
1572 KASSERT(sdp != NULL);
1573 KASSERT(sdp->swd_npginuse >= nslots);
1574 if (extent_free(sdp->swd_ex, startslot - sdp->swd_drumoffset, nslots,
1575 EX_MALLOCOK|EX_NOWAIT) != 0) {
1576 printf("warning: resource shortage: %d pages of swap lost\n",
1577 nslots);
1578 }
1579 sdp->swd_npginuse -= nslots;
1580 uvmexp.swpginuse -= nslots;
1581 simple_unlock(&uvm.swap_data_lock);
1582 }
1583
1584 /*
1585 * uvm_swap_put: put any number of pages into a contig place on swap
1586 *
1587 * => can be sync or async
1588 */
1589
1590 int
1591 uvm_swap_put(swslot, ppsp, npages, flags)
1592 int swslot;
1593 struct vm_page **ppsp;
1594 int npages;
1595 int flags;
1596 {
1597 int error;
1598
1599 error = uvm_swap_io(ppsp, swslot, npages, B_WRITE |
1600 ((flags & PGO_SYNCIO) ? 0 : B_ASYNC));
1601 return error;
1602 }
1603
1604 /*
1605 * uvm_swap_get: get a single page from swap
1606 *
1607 * => usually a sync op (from fault)
1608 */
1609
1610 int
1611 uvm_swap_get(page, swslot, flags)
1612 struct vm_page *page;
1613 int swslot, flags;
1614 {
1615 int error;
1616
1617 uvmexp.nswget++;
1618 KASSERT(flags & PGO_SYNCIO);
1619 if (swslot == SWSLOT_BAD) {
1620 return EIO;
1621 }
1622 error = uvm_swap_io(&page, swslot, 1, B_READ |
1623 ((flags & PGO_SYNCIO) ? 0 : B_ASYNC));
1624 if (error == 0) {
1625
1626 /*
1627 * this page is no longer only in swap.
1628 */
1629
1630 simple_lock(&uvm.swap_data_lock);
1631 KASSERT(uvmexp.swpgonly > 0);
1632 uvmexp.swpgonly--;
1633 simple_unlock(&uvm.swap_data_lock);
1634 }
1635 return error;
1636 }
1637
1638 /*
1639 * uvm_swap_io: do an i/o operation to swap
1640 */
1641
1642 static int
1643 uvm_swap_io(pps, startslot, npages, flags)
1644 struct vm_page **pps;
1645 int startslot, npages, flags;
1646 {
1647 daddr_t startblk;
1648 struct buf *bp;
1649 vaddr_t kva;
1650 int error, s, mapinflags;
1651 boolean_t write, async;
1652 UVMHIST_FUNC("uvm_swap_io"); UVMHIST_CALLED(pdhist);
1653
1654 UVMHIST_LOG(pdhist, "<- called, startslot=%d, npages=%d, flags=%d",
1655 startslot, npages, flags, 0);
1656
1657 write = (flags & B_READ) == 0;
1658 async = (flags & B_ASYNC) != 0;
1659
1660 /*
1661 * convert starting drum slot to block number
1662 */
1663
1664 startblk = btodb((u_int64_t)startslot << PAGE_SHIFT);
1665
1666 /*
1667 * first, map the pages into the kernel.
1668 */
1669
1670 mapinflags = !write ?
1671 UVMPAGER_MAPIN_WAITOK|UVMPAGER_MAPIN_READ :
1672 UVMPAGER_MAPIN_WAITOK|UVMPAGER_MAPIN_WRITE;
1673 kva = uvm_pagermapin(pps, npages, mapinflags);
1674
1675 /*
1676 * now allocate a buf for the i/o.
1677 */
1678
1679 s = splbio();
1680 bp = pool_get(&bufpool, PR_WAITOK);
1681 splx(s);
1682
1683 /*
1684 * fill in the bp/sbp. we currently route our i/o through
1685 * /dev/drum's vnode [swapdev_vp].
1686 */
1687
1688 bp->b_flags = B_BUSY | B_NOCACHE | (flags & (B_READ|B_ASYNC));
1689 bp->b_proc = &proc0; /* XXX */
1690 bp->b_vnbufs.le_next = NOLIST;
1691 bp->b_data = (caddr_t)kva;
1692 bp->b_blkno = startblk;
1693 bp->b_vp = swapdev_vp;
1694 bp->b_dev = swapdev_vp->v_rdev;
1695 bp->b_bufsize = bp->b_bcount = npages << PAGE_SHIFT;
1696 LIST_INIT(&bp->b_dep);
1697
1698 /*
1699 * bump v_numoutput (counter of number of active outputs).
1700 */
1701
1702 if (write) {
1703 s = splbio();
1704 swapdev_vp->v_numoutput++;
1705 splx(s);
1706 }
1707
1708 /*
1709 * for async ops we must set up the iodone handler.
1710 */
1711
1712 if (async) {
1713 bp->b_flags |= B_CALL;
1714 bp->b_iodone = uvm_aio_biodone;
1715 UVMHIST_LOG(pdhist, "doing async!", 0, 0, 0, 0);
1716 }
1717 UVMHIST_LOG(pdhist,
1718 "about to start io: data = %p blkno = 0x%x, bcount = %ld",
1719 bp->b_data, bp->b_blkno, bp->b_bcount, 0);
1720
1721 /*
1722 * now we start the I/O, and if async, return.
1723 */
1724
1725 VOP_STRATEGY(bp);
1726 if (async)
1727 return 0;
1728
1729 /*
1730 * must be sync i/o. wait for it to finish
1731 */
1732
1733 error = biowait(bp);
1734
1735 /*
1736 * kill the pager mapping
1737 */
1738
1739 uvm_pagermapout(kva, npages);
1740
1741 /*
1742 * now dispose of the buf and we're done.
1743 */
1744
1745 s = splbio();
1746 if (write)
1747 vwakeup(bp);
1748 pool_put(&bufpool, bp);
1749 splx(s);
1750 UVMHIST_LOG(pdhist, "<- done (sync) error=%d", error, 0, 0, 0);
1751 return (error);
1752 }
1753