Home | History | Annotate | Line # | Download | only in uvm
uvm_swap.c revision 1.161.2.2
      1 /*	$NetBSD: uvm_swap.c,v 1.161.2.2 2014/10/27 05:42:43 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1995, 1996, 1997, 2009 Matthew R. Green
      5  * All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  *
     16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
     21  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
     22  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
     23  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
     24  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     26  * SUCH DAMAGE.
     27  *
     28  * from: NetBSD: vm_swap.c,v 1.52 1997/12/02 13:47:37 pk Exp
     29  * from: Id: uvm_swap.c,v 1.1.2.42 1998/02/02 20:38:06 chuck Exp
     30  */
     31 
     32 #include <sys/cdefs.h>
     33 __KERNEL_RCSID(0, "$NetBSD: uvm_swap.c,v 1.161.2.2 2014/10/27 05:42:43 msaitoh Exp $");
     34 
     35 #include "opt_uvmhist.h"
     36 #include "opt_compat_netbsd.h"
     37 #include "opt_ddb.h"
     38 
     39 #include <sys/param.h>
     40 #include <sys/systm.h>
     41 #include <sys/buf.h>
     42 #include <sys/bufq.h>
     43 #include <sys/conf.h>
     44 #include <sys/proc.h>
     45 #include <sys/namei.h>
     46 #include <sys/disklabel.h>
     47 #include <sys/errno.h>
     48 #include <sys/kernel.h>
     49 #include <sys/vnode.h>
     50 #include <sys/file.h>
     51 #include <sys/vmem.h>
     52 #include <sys/blist.h>
     53 #include <sys/mount.h>
     54 #include <sys/pool.h>
     55 #include <sys/kmem.h>
     56 #include <sys/syscallargs.h>
     57 #include <sys/swap.h>
     58 #include <sys/kauth.h>
     59 #include <sys/sysctl.h>
     60 #include <sys/workqueue.h>
     61 
     62 #include <uvm/uvm.h>
     63 
     64 #include <miscfs/specfs/specdev.h>
     65 
     66 /*
     67  * uvm_swap.c: manage configuration and i/o to swap space.
     68  */
     69 
     70 /*
     71  * swap space is managed in the following way:
     72  *
     73  * each swap partition or file is described by a "swapdev" structure.
     74  * each "swapdev" structure contains a "swapent" structure which contains
     75  * information that is passed up to the user (via system calls).
     76  *
     77  * each swap partition is assigned a "priority" (int) which controls
     78  * swap parition usage.
     79  *
     80  * the system maintains a global data structure describing all swap
     81  * partitions/files.   there is a sorted LIST of "swappri" structures
     82  * which describe "swapdev"'s at that priority.   this LIST is headed
     83  * by the "swap_priority" global var.    each "swappri" contains a
     84  * CIRCLEQ of "swapdev" structures at that priority.
     85  *
     86  * locking:
     87  *  - swap_syscall_lock (krwlock_t): this lock serializes the swapctl
     88  *    system call and prevents the swap priority list from changing
     89  *    while we are in the middle of a system call (e.g. SWAP_STATS).
     90  *  - uvm_swap_data_lock (kmutex_t): this lock protects all swap data
     91  *    structures including the priority list, the swapdev structures,
     92  *    and the swapmap arena.
     93  *
     94  * each swap device has the following info:
     95  *  - swap device in use (could be disabled, preventing future use)
     96  *  - swap enabled (allows new allocations on swap)
     97  *  - map info in /dev/drum
     98  *  - vnode pointer
     99  * for swap files only:
    100  *  - block size
    101  *  - max byte count in buffer
    102  *  - buffer
    103  *
    104  * userland controls and configures swap with the swapctl(2) system call.
    105  * the sys_swapctl performs the following operations:
    106  *  [1] SWAP_NSWAP: returns the number of swap devices currently configured
    107  *  [2] SWAP_STATS: given a pointer to an array of swapent structures
    108  *	(passed in via "arg") of a size passed in via "misc" ... we load
    109  *	the current swap config into the array. The actual work is done
    110  *	in the uvm_swap_stats() function.
    111  *  [3] SWAP_ON: given a pathname in arg (could be device or file) and a
    112  *	priority in "misc", start swapping on it.
    113  *  [4] SWAP_OFF: as SWAP_ON, but stops swapping to a device
    114  *  [5] SWAP_CTL: changes the priority of a swap device (new priority in
    115  *	"misc")
    116  */
    117 
    118 /*
    119  * swapdev: describes a single swap partition/file
    120  *
    121  * note the following should be true:
    122  * swd_inuse <= swd_nblks  [number of blocks in use is <= total blocks]
    123  * swd_nblks <= swd_mapsize [because mapsize includes miniroot+disklabel]
    124  */
    125 struct swapdev {
    126 	dev_t			swd_dev;	/* device id */
    127 	int			swd_flags;	/* flags:inuse/enable/fake */
    128 	int			swd_priority;	/* our priority */
    129 	int			swd_nblks;	/* blocks in this device */
    130 	char			*swd_path;	/* saved pathname of device */
    131 	int			swd_pathlen;	/* length of pathname */
    132 	int			swd_npages;	/* #pages we can use */
    133 	int			swd_npginuse;	/* #pages in use */
    134 	int			swd_npgbad;	/* #pages bad */
    135 	int			swd_drumoffset;	/* page0 offset in drum */
    136 	int			swd_drumsize;	/* #pages in drum */
    137 	blist_t			swd_blist;	/* blist for this swapdev */
    138 	struct vnode		*swd_vp;	/* backing vnode */
    139 	CIRCLEQ_ENTRY(swapdev)	swd_next;	/* priority circleq */
    140 
    141 	int			swd_bsize;	/* blocksize (bytes) */
    142 	int			swd_maxactive;	/* max active i/o reqs */
    143 	struct bufq_state	*swd_tab;	/* buffer list */
    144 	int			swd_active;	/* number of active buffers */
    145 };
    146 
    147 /*
    148  * swap device priority entry; the list is kept sorted on `spi_priority'.
    149  */
    150 struct swappri {
    151 	int			spi_priority;     /* priority */
    152 	CIRCLEQ_HEAD(spi_swapdev, swapdev)	spi_swapdev;
    153 	/* circleq of swapdevs at this priority */
    154 	LIST_ENTRY(swappri)	spi_swappri;      /* global list of pri's */
    155 };
    156 
    157 /*
    158  * The following two structures are used to keep track of data transfers
    159  * on swap devices associated with regular files.
    160  * NOTE: this code is more or less a copy of vnd.c; we use the same
    161  * structure names here to ease porting..
    162  */
    163 struct vndxfer {
    164 	struct buf	*vx_bp;		/* Pointer to parent buffer */
    165 	struct swapdev	*vx_sdp;
    166 	int		vx_error;
    167 	int		vx_pending;	/* # of pending aux buffers */
    168 	int		vx_flags;
    169 #define VX_BUSY		1
    170 #define VX_DEAD		2
    171 };
    172 
    173 struct vndbuf {
    174 	struct buf	vb_buf;
    175 	struct vndxfer	*vb_xfer;
    176 };
    177 
    178 /*
    179  * NetBSD 1.3 swapctl(SWAP_STATS, ...) swapent structure; uses 32 bit
    180  * dev_t and has no se_path[] member.
    181  */
    182 struct swapent13 {
    183 	int32_t	se13_dev;		/* device id */
    184 	int	se13_flags;		/* flags */
    185 	int	se13_nblks;		/* total blocks */
    186 	int	se13_inuse;		/* blocks in use */
    187 	int	se13_priority;		/* priority of this device */
    188 };
    189 
    190 /*
    191  * NetBSD 5.0 swapctl(SWAP_STATS, ...) swapent structure; uses 32 bit
    192  * dev_t.
    193  */
    194 struct swapent50 {
    195 	int32_t	se50_dev;		/* device id */
    196 	int	se50_flags;		/* flags */
    197 	int	se50_nblks;		/* total blocks */
    198 	int	se50_inuse;		/* blocks in use */
    199 	int	se50_priority;		/* priority of this device */
    200 	char	se50_path[PATH_MAX+1];	/* path name */
    201 };
    202 
    203 /*
    204  * We keep a of pool vndbuf's and vndxfer structures.
    205  */
    206 static struct pool vndxfer_pool, vndbuf_pool;
    207 
    208 /*
    209  * local variables
    210  */
    211 static vmem_t *swapmap;	/* controls the mapping of /dev/drum */
    212 
    213 /* list of all active swap devices [by priority] */
    214 LIST_HEAD(swap_priority, swappri);
    215 static struct swap_priority swap_priority;
    216 
    217 /* locks */
    218 static krwlock_t swap_syscall_lock;
    219 
    220 /* workqueue and use counter for swap to regular files */
    221 static int sw_reg_count = 0;
    222 static struct workqueue *sw_reg_workqueue;
    223 
    224 /* tuneables */
    225 u_int uvm_swapisfull_factor = 99;
    226 
    227 /*
    228  * prototypes
    229  */
    230 static struct swapdev	*swapdrum_getsdp(int);
    231 
    232 static struct swapdev	*swaplist_find(struct vnode *, bool);
    233 static void		 swaplist_insert(struct swapdev *,
    234 					 struct swappri *, int);
    235 static void		 swaplist_trim(void);
    236 
    237 static int swap_on(struct lwp *, struct swapdev *);
    238 static int swap_off(struct lwp *, struct swapdev *);
    239 
    240 static void sw_reg_strategy(struct swapdev *, struct buf *, int);
    241 static void sw_reg_biodone(struct buf *);
    242 static void sw_reg_iodone(struct work *wk, void *dummy);
    243 static void sw_reg_start(struct swapdev *);
    244 
    245 static int uvm_swap_io(struct vm_page **, int, int, int);
    246 
    247 /*
    248  * uvm_swap_init: init the swap system data structures and locks
    249  *
    250  * => called at boot time from init_main.c after the filesystems
    251  *	are brought up (which happens after uvm_init())
    252  */
    253 void
    254 uvm_swap_init(void)
    255 {
    256 	UVMHIST_FUNC("uvm_swap_init");
    257 
    258 	UVMHIST_CALLED(pdhist);
    259 	/*
    260 	 * first, init the swap list, its counter, and its lock.
    261 	 * then get a handle on the vnode for /dev/drum by using
    262 	 * the its dev_t number ("swapdev", from MD conf.c).
    263 	 */
    264 
    265 	LIST_INIT(&swap_priority);
    266 	uvmexp.nswapdev = 0;
    267 	rw_init(&swap_syscall_lock);
    268 	mutex_init(&uvm_swap_data_lock, MUTEX_DEFAULT, IPL_NONE);
    269 
    270 	if (bdevvp(swapdev, &swapdev_vp))
    271 		panic("%s: can't get vnode for swap device", __func__);
    272 	if (vn_lock(swapdev_vp, LK_EXCLUSIVE | LK_RETRY))
    273 		panic("%s: can't lock swap device", __func__);
    274 	if (VOP_OPEN(swapdev_vp, FREAD | FWRITE, NOCRED))
    275 		panic("%s: can't open swap device", __func__);
    276 	VOP_UNLOCK(swapdev_vp);
    277 
    278 	/*
    279 	 * create swap block resource map to map /dev/drum.   the range
    280 	 * from 1 to INT_MAX allows 2 gigablocks of swap space.  note
    281 	 * that block 0 is reserved (used to indicate an allocation
    282 	 * failure, or no allocation).
    283 	 */
    284 	swapmap = vmem_create("swapmap", 1, INT_MAX - 1, 1, NULL, NULL, NULL, 0,
    285 	    VM_NOSLEEP, IPL_NONE);
    286 	if (swapmap == 0) {
    287 		panic("%s: vmem_create failed", __func__);
    288 	}
    289 
    290 	pool_init(&vndxfer_pool, sizeof(struct vndxfer), 0, 0, 0, "swp vnx",
    291 	    NULL, IPL_BIO);
    292 	pool_init(&vndbuf_pool, sizeof(struct vndbuf), 0, 0, 0, "swp vnd",
    293 	    NULL, IPL_BIO);
    294 
    295 	UVMHIST_LOG(pdhist, "<- done", 0, 0, 0, 0);
    296 }
    297 
    298 /*
    299  * swaplist functions: functions that operate on the list of swap
    300  * devices on the system.
    301  */
    302 
    303 /*
    304  * swaplist_insert: insert swap device "sdp" into the global list
    305  *
    306  * => caller must hold both swap_syscall_lock and uvm_swap_data_lock
    307  * => caller must provide a newly allocated swappri structure (we will
    308  *	FREE it if we don't need it... this it to prevent allocation
    309  *	blocking here while adding swap)
    310  */
    311 static void
    312 swaplist_insert(struct swapdev *sdp, struct swappri *newspp, int priority)
    313 {
    314 	struct swappri *spp, *pspp;
    315 	UVMHIST_FUNC("swaplist_insert"); UVMHIST_CALLED(pdhist);
    316 
    317 	/*
    318 	 * find entry at or after which to insert the new device.
    319 	 */
    320 	pspp = NULL;
    321 	LIST_FOREACH(spp, &swap_priority, spi_swappri) {
    322 		if (priority <= spp->spi_priority)
    323 			break;
    324 		pspp = spp;
    325 	}
    326 
    327 	/*
    328 	 * new priority?
    329 	 */
    330 	if (spp == NULL || spp->spi_priority != priority) {
    331 		spp = newspp;  /* use newspp! */
    332 		UVMHIST_LOG(pdhist, "created new swappri = %d",
    333 			    priority, 0, 0, 0);
    334 
    335 		spp->spi_priority = priority;
    336 		CIRCLEQ_INIT(&spp->spi_swapdev);
    337 
    338 		if (pspp)
    339 			LIST_INSERT_AFTER(pspp, spp, spi_swappri);
    340 		else
    341 			LIST_INSERT_HEAD(&swap_priority, spp, spi_swappri);
    342 	} else {
    343 	  	/* we don't need a new priority structure, free it */
    344 		kmem_free(newspp, sizeof(*newspp));
    345 	}
    346 
    347 	/*
    348 	 * priority found (or created).   now insert on the priority's
    349 	 * circleq list and bump the total number of swapdevs.
    350 	 */
    351 	sdp->swd_priority = priority;
    352 	CIRCLEQ_INSERT_TAIL(&spp->spi_swapdev, sdp, swd_next);
    353 	uvmexp.nswapdev++;
    354 }
    355 
    356 /*
    357  * swaplist_find: find and optionally remove a swap device from the
    358  *	global list.
    359  *
    360  * => caller must hold both swap_syscall_lock and uvm_swap_data_lock
    361  * => we return the swapdev we found (and removed)
    362  */
    363 static struct swapdev *
    364 swaplist_find(struct vnode *vp, bool remove)
    365 {
    366 	struct swapdev *sdp;
    367 	struct swappri *spp;
    368 
    369 	/*
    370 	 * search the lists for the requested vp
    371 	 */
    372 
    373 	LIST_FOREACH(spp, &swap_priority, spi_swappri) {
    374 		CIRCLEQ_FOREACH(sdp, &spp->spi_swapdev, swd_next) {
    375 			if (sdp->swd_vp == vp) {
    376 				if (remove) {
    377 					CIRCLEQ_REMOVE(&spp->spi_swapdev,
    378 					    sdp, swd_next);
    379 					uvmexp.nswapdev--;
    380 				}
    381 				return(sdp);
    382 			}
    383 		}
    384 	}
    385 	return (NULL);
    386 }
    387 
    388 /*
    389  * swaplist_trim: scan priority list for empty priority entries and kill
    390  *	them.
    391  *
    392  * => caller must hold both swap_syscall_lock and uvm_swap_data_lock
    393  */
    394 static void
    395 swaplist_trim(void)
    396 {
    397 	struct swappri *spp, *nextspp;
    398 
    399 	LIST_FOREACH_SAFE(spp, &swap_priority, spi_swappri, nextspp) {
    400 		if (CIRCLEQ_FIRST(&spp->spi_swapdev) !=
    401 		    (void *)&spp->spi_swapdev)
    402 			continue;
    403 		LIST_REMOVE(spp, spi_swappri);
    404 		kmem_free(spp, sizeof(*spp));
    405 	}
    406 }
    407 
    408 /*
    409  * swapdrum_getsdp: given a page offset in /dev/drum, convert it back
    410  *	to the "swapdev" that maps that section of the drum.
    411  *
    412  * => each swapdev takes one big contig chunk of the drum
    413  * => caller must hold uvm_swap_data_lock
    414  */
    415 static struct swapdev *
    416 swapdrum_getsdp(int pgno)
    417 {
    418 	struct swapdev *sdp;
    419 	struct swappri *spp;
    420 
    421 	LIST_FOREACH(spp, &swap_priority, spi_swappri) {
    422 		CIRCLEQ_FOREACH(sdp, &spp->spi_swapdev, swd_next) {
    423 			if (sdp->swd_flags & SWF_FAKE)
    424 				continue;
    425 			if (pgno >= sdp->swd_drumoffset &&
    426 			    pgno < (sdp->swd_drumoffset + sdp->swd_drumsize)) {
    427 				return sdp;
    428 			}
    429 		}
    430 	}
    431 	return NULL;
    432 }
    433 
    434 
    435 /*
    436  * sys_swapctl: main entry point for swapctl(2) system call
    437  * 	[with two helper functions: swap_on and swap_off]
    438  */
    439 int
    440 sys_swapctl(struct lwp *l, const struct sys_swapctl_args *uap, register_t *retval)
    441 {
    442 	/* {
    443 		syscallarg(int) cmd;
    444 		syscallarg(void *) arg;
    445 		syscallarg(int) misc;
    446 	} */
    447 	struct vnode *vp;
    448 	struct nameidata nd;
    449 	struct swappri *spp;
    450 	struct swapdev *sdp;
    451 	struct swapent *sep;
    452 #define SWAP_PATH_MAX (PATH_MAX + 1)
    453 	char	*userpath;
    454 	size_t	len = 0;
    455 	int	error, misc;
    456 	int	priority;
    457 	UVMHIST_FUNC("sys_swapctl"); UVMHIST_CALLED(pdhist);
    458 
    459 	/*
    460 	 * we handle the non-priv NSWAP and STATS request first.
    461 	 *
    462 	 * SWAP_NSWAP: return number of config'd swap devices
    463 	 * [can also be obtained with uvmexp sysctl]
    464 	 */
    465 	if (SCARG(uap, cmd) == SWAP_NSWAP) {
    466 		const int nswapdev = uvmexp.nswapdev;
    467 		UVMHIST_LOG(pdhist, "<- done SWAP_NSWAP=%d", nswapdev, 0, 0, 0);
    468 		*retval = nswapdev;
    469 		return 0;
    470 	}
    471 
    472 	misc = SCARG(uap, misc);
    473 	userpath = kmem_alloc(SWAP_PATH_MAX, KM_SLEEP);
    474 
    475 	/*
    476 	 * ensure serialized syscall access by grabbing the swap_syscall_lock
    477 	 */
    478 	rw_enter(&swap_syscall_lock, RW_WRITER);
    479 
    480 	/*
    481 	 * SWAP_STATS: get stats on current # of configured swap devs
    482 	 *
    483 	 * note that the swap_priority list can't change as long
    484 	 * as we are holding the swap_syscall_lock.  we don't want
    485 	 * to grab the uvm_swap_data_lock because we may fault&sleep during
    486 	 * copyout() and we don't want to be holding that lock then!
    487 	 */
    488 	if (SCARG(uap, cmd) == SWAP_STATS
    489 #if defined(COMPAT_50)
    490 	    || SCARG(uap, cmd) == SWAP_STATS50
    491 #endif
    492 #if defined(COMPAT_13)
    493 	    || SCARG(uap, cmd) == SWAP_STATS13
    494 #endif
    495 	    ) {
    496 		if ((size_t)misc > (size_t)uvmexp.nswapdev)
    497 			misc = uvmexp.nswapdev;
    498 
    499 		if (misc == 0) {
    500 			error = EINVAL;
    501 			goto out;
    502 		}
    503 		KASSERT(misc > 0);
    504 #if defined(COMPAT_13)
    505 		if (SCARG(uap, cmd) == SWAP_STATS13)
    506 			len = sizeof(struct swapent13) * misc;
    507 		else
    508 #endif
    509 #if defined(COMPAT_50)
    510 		if (SCARG(uap, cmd) == SWAP_STATS50)
    511 			len = sizeof(struct swapent50) * misc;
    512 		else
    513 #endif
    514 			len = sizeof(struct swapent) * misc;
    515 		sep = (struct swapent *)kmem_alloc(len, KM_SLEEP);
    516 
    517 		uvm_swap_stats(SCARG(uap, cmd), sep, misc, retval);
    518 		error = copyout(sep, SCARG(uap, arg), len);
    519 
    520 		kmem_free(sep, len);
    521 		UVMHIST_LOG(pdhist, "<- done SWAP_STATS", 0, 0, 0, 0);
    522 		goto out;
    523 	}
    524 	if (SCARG(uap, cmd) == SWAP_GETDUMPDEV) {
    525 		dev_t	*devp = (dev_t *)SCARG(uap, arg);
    526 
    527 		error = copyout(&dumpdev, devp, sizeof(dumpdev));
    528 		goto out;
    529 	}
    530 
    531 	/*
    532 	 * all other requests require superuser privs.   verify.
    533 	 */
    534 	if ((error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_SWAPCTL,
    535 	    0, NULL, NULL, NULL)))
    536 		goto out;
    537 
    538 	if (SCARG(uap, cmd) == SWAP_DUMPOFF) {
    539 		/* drop the current dump device */
    540 		dumpdev = NODEV;
    541 		dumpcdev = NODEV;
    542 		cpu_dumpconf();
    543 		goto out;
    544 	}
    545 
    546 	/*
    547 	 * at this point we expect a path name in arg.   we will
    548 	 * use namei() to gain a vnode reference (vref), and lock
    549 	 * the vnode (VOP_LOCK).
    550 	 *
    551 	 * XXX: a NULL arg means use the root vnode pointer (e.g. for
    552 	 * miniroot)
    553 	 */
    554 	if (SCARG(uap, arg) == NULL) {
    555 		vp = rootvp;		/* miniroot */
    556 		vref(vp);
    557 		if (vn_lock(vp, LK_EXCLUSIVE)) {
    558 			vrele(vp);
    559 			error = EBUSY;
    560 			goto out;
    561 		}
    562 		if (SCARG(uap, cmd) == SWAP_ON &&
    563 		    copystr("miniroot", userpath, SWAP_PATH_MAX, &len))
    564 			panic("swapctl: miniroot copy failed");
    565 		KASSERT(len > 0);
    566 	} else {
    567 		struct pathbuf *pb;
    568 
    569 		/*
    570 		 * This used to allow copying in one extra byte
    571 		 * (SWAP_PATH_MAX instead of PATH_MAX) for SWAP_ON.
    572 		 * This was completely pointless because if anyone
    573 		 * used that extra byte namei would fail with
    574 		 * ENAMETOOLONG anyway, so I've removed the excess
    575 		 * logic. - dholland 20100215
    576 		 */
    577 
    578 		error = pathbuf_copyin(SCARG(uap, arg), &pb);
    579 		if (error) {
    580 			goto out;
    581 		}
    582 		if (SCARG(uap, cmd) == SWAP_ON) {
    583 			/* get a copy of the string */
    584 			pathbuf_copystring(pb, userpath, SWAP_PATH_MAX);
    585 			len = strlen(userpath) + 1;
    586 		}
    587 		NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | TRYEMULROOT, pb);
    588 		if ((error = namei(&nd))) {
    589 			pathbuf_destroy(pb);
    590 			goto out;
    591 		}
    592 		vp = nd.ni_vp;
    593 		pathbuf_destroy(pb);
    594 	}
    595 	/* note: "vp" is referenced and locked */
    596 
    597 	error = 0;		/* assume no error */
    598 	switch(SCARG(uap, cmd)) {
    599 
    600 	case SWAP_DUMPDEV:
    601 		if (vp->v_type != VBLK) {
    602 			error = ENOTBLK;
    603 			break;
    604 		}
    605 		if (bdevsw_lookup(vp->v_rdev)) {
    606 			dumpdev = vp->v_rdev;
    607 			dumpcdev = devsw_blk2chr(dumpdev);
    608 		} else
    609 			dumpdev = NODEV;
    610 		cpu_dumpconf();
    611 		break;
    612 
    613 	case SWAP_CTL:
    614 		/*
    615 		 * get new priority, remove old entry (if any) and then
    616 		 * reinsert it in the correct place.  finally, prune out
    617 		 * any empty priority structures.
    618 		 */
    619 		priority = SCARG(uap, misc);
    620 		spp = kmem_alloc(sizeof(*spp), KM_SLEEP);
    621 		mutex_enter(&uvm_swap_data_lock);
    622 		if ((sdp = swaplist_find(vp, true)) == NULL) {
    623 			error = ENOENT;
    624 		} else {
    625 			swaplist_insert(sdp, spp, priority);
    626 			swaplist_trim();
    627 		}
    628 		mutex_exit(&uvm_swap_data_lock);
    629 		if (error)
    630 			kmem_free(spp, sizeof(*spp));
    631 		break;
    632 
    633 	case SWAP_ON:
    634 
    635 		/*
    636 		 * check for duplicates.   if none found, then insert a
    637 		 * dummy entry on the list to prevent someone else from
    638 		 * trying to enable this device while we are working on
    639 		 * it.
    640 		 */
    641 
    642 		priority = SCARG(uap, misc);
    643 		sdp = kmem_zalloc(sizeof(*sdp), KM_SLEEP);
    644 		spp = kmem_alloc(sizeof(*spp), KM_SLEEP);
    645 		sdp->swd_flags = SWF_FAKE;
    646 		sdp->swd_vp = vp;
    647 		sdp->swd_dev = (vp->v_type == VBLK) ? vp->v_rdev : NODEV;
    648 		bufq_alloc(&sdp->swd_tab, "disksort", BUFQ_SORT_RAWBLOCK);
    649 		mutex_enter(&uvm_swap_data_lock);
    650 		if (swaplist_find(vp, false) != NULL) {
    651 			error = EBUSY;
    652 			mutex_exit(&uvm_swap_data_lock);
    653 			bufq_free(sdp->swd_tab);
    654 			kmem_free(sdp, sizeof(*sdp));
    655 			kmem_free(spp, sizeof(*spp));
    656 			break;
    657 		}
    658 		swaplist_insert(sdp, spp, priority);
    659 		mutex_exit(&uvm_swap_data_lock);
    660 
    661 		KASSERT(len > 0);
    662 		sdp->swd_pathlen = len;
    663 		sdp->swd_path = kmem_alloc(len, KM_SLEEP);
    664 		if (copystr(userpath, sdp->swd_path, len, 0) != 0)
    665 			panic("swapctl: copystr");
    666 
    667 		/*
    668 		 * we've now got a FAKE placeholder in the swap list.
    669 		 * now attempt to enable swap on it.  if we fail, undo
    670 		 * what we've done and kill the fake entry we just inserted.
    671 		 * if swap_on is a success, it will clear the SWF_FAKE flag
    672 		 */
    673 
    674 		if ((error = swap_on(l, sdp)) != 0) {
    675 			mutex_enter(&uvm_swap_data_lock);
    676 			(void) swaplist_find(vp, true);  /* kill fake entry */
    677 			swaplist_trim();
    678 			mutex_exit(&uvm_swap_data_lock);
    679 			bufq_free(sdp->swd_tab);
    680 			kmem_free(sdp->swd_path, sdp->swd_pathlen);
    681 			kmem_free(sdp, sizeof(*sdp));
    682 			break;
    683 		}
    684 		break;
    685 
    686 	case SWAP_OFF:
    687 		mutex_enter(&uvm_swap_data_lock);
    688 		if ((sdp = swaplist_find(vp, false)) == NULL) {
    689 			mutex_exit(&uvm_swap_data_lock);
    690 			error = ENXIO;
    691 			break;
    692 		}
    693 
    694 		/*
    695 		 * If a device isn't in use or enabled, we
    696 		 * can't stop swapping from it (again).
    697 		 */
    698 		if ((sdp->swd_flags & (SWF_INUSE|SWF_ENABLE)) == 0) {
    699 			mutex_exit(&uvm_swap_data_lock);
    700 			error = EBUSY;
    701 			break;
    702 		}
    703 
    704 		/*
    705 		 * do the real work.
    706 		 */
    707 		error = swap_off(l, sdp);
    708 		break;
    709 
    710 	default:
    711 		error = EINVAL;
    712 	}
    713 
    714 	/*
    715 	 * done!  release the ref gained by namei() and unlock.
    716 	 */
    717 	vput(vp);
    718 out:
    719 	rw_exit(&swap_syscall_lock);
    720 	kmem_free(userpath, SWAP_PATH_MAX);
    721 
    722 	UVMHIST_LOG(pdhist, "<- done!  error=%d", error, 0, 0, 0);
    723 	return (error);
    724 }
    725 
    726 /*
    727  * uvm_swap_stats: implements swapctl(SWAP_STATS). The function is kept
    728  * away from sys_swapctl() in order to allow COMPAT_* swapctl()
    729  * emulation to use it directly without going through sys_swapctl().
    730  * The problem with using sys_swapctl() there is that it involves
    731  * copying the swapent array to the stackgap, and this array's size
    732  * is not known at build time. Hence it would not be possible to
    733  * ensure it would fit in the stackgap in any case.
    734  */
    735 void
    736 uvm_swap_stats(int cmd, struct swapent *sep, int sec, register_t *retval)
    737 {
    738 	struct swappri *spp;
    739 	struct swapdev *sdp;
    740 	int count = 0;
    741 
    742 	LIST_FOREACH(spp, &swap_priority, spi_swappri) {
    743 		CIRCLEQ_FOREACH(sdp, &spp->spi_swapdev, swd_next) {
    744 			int inuse;
    745 
    746 			if (sec-- <= 0)
    747 				break;
    748 
    749 			/*
    750 			 * backwards compatibility for system call.
    751 			 * For NetBSD 1.3 and 5.0, we have to use
    752 			 * the 32 bit dev_t.  For 5.0 and -current
    753 			 * we have to add the path.
    754 			 */
    755 			inuse = btodb((uint64_t)sdp->swd_npginuse <<
    756 			    PAGE_SHIFT);
    757 
    758 #if defined(COMPAT_13) || defined(COMPAT_50)
    759 			if (cmd == SWAP_STATS) {
    760 #endif
    761 				sep->se_dev = sdp->swd_dev;
    762 				sep->se_flags = sdp->swd_flags;
    763 				sep->se_nblks = sdp->swd_nblks;
    764 				sep->se_inuse = inuse;
    765 				sep->se_priority = sdp->swd_priority;
    766 				KASSERT(sdp->swd_pathlen <
    767 				    sizeof(sep->se_path));
    768 				strcpy(sep->se_path, sdp->swd_path);
    769 				sep++;
    770 #if defined(COMPAT_13)
    771 			} else if (cmd == SWAP_STATS13) {
    772 				struct swapent13 *sep13 =
    773 				    (struct swapent13 *)sep;
    774 
    775 				sep13->se13_dev = sdp->swd_dev;
    776 				sep13->se13_flags = sdp->swd_flags;
    777 				sep13->se13_nblks = sdp->swd_nblks;
    778 				sep13->se13_inuse = inuse;
    779 				sep13->se13_priority = sdp->swd_priority;
    780 				sep = (struct swapent *)(sep13 + 1);
    781 #endif
    782 #if defined(COMPAT_50)
    783 			} else if (cmd == SWAP_STATS50) {
    784 				struct swapent50 *sep50 =
    785 				    (struct swapent50 *)sep;
    786 
    787 				sep50->se50_dev = sdp->swd_dev;
    788 				sep50->se50_flags = sdp->swd_flags;
    789 				sep50->se50_nblks = sdp->swd_nblks;
    790 				sep50->se50_inuse = inuse;
    791 				sep50->se50_priority = sdp->swd_priority;
    792 				KASSERT(sdp->swd_pathlen <
    793 				    sizeof(sep50->se50_path));
    794 				strcpy(sep50->se50_path, sdp->swd_path);
    795 				sep = (struct swapent *)(sep50 + 1);
    796 #endif
    797 #if defined(COMPAT_13) || defined(COMPAT_50)
    798 			}
    799 #endif
    800 			count++;
    801 		}
    802 	}
    803 	*retval = count;
    804 }
    805 
    806 /*
    807  * swap_on: attempt to enable a swapdev for swapping.   note that the
    808  *	swapdev is already on the global list, but disabled (marked
    809  *	SWF_FAKE).
    810  *
    811  * => we avoid the start of the disk (to protect disk labels)
    812  * => we also avoid the miniroot, if we are swapping to root.
    813  * => caller should leave uvm_swap_data_lock unlocked, we may lock it
    814  *	if needed.
    815  */
    816 static int
    817 swap_on(struct lwp *l, struct swapdev *sdp)
    818 {
    819 	struct vnode *vp;
    820 	int error, npages, nblocks, size;
    821 	long addr;
    822 	vmem_addr_t result;
    823 	struct vattr va;
    824 	dev_t dev;
    825 	UVMHIST_FUNC("swap_on"); UVMHIST_CALLED(pdhist);
    826 
    827 	/*
    828 	 * we want to enable swapping on sdp.   the swd_vp contains
    829 	 * the vnode we want (locked and ref'd), and the swd_dev
    830 	 * contains the dev_t of the file, if it a block device.
    831 	 */
    832 
    833 	vp = sdp->swd_vp;
    834 	dev = sdp->swd_dev;
    835 
    836 	/*
    837 	 * open the swap file (mostly useful for block device files to
    838 	 * let device driver know what is up).
    839 	 *
    840 	 * we skip the open/close for root on swap because the root
    841 	 * has already been opened when root was mounted (mountroot).
    842 	 */
    843 	if (vp != rootvp) {
    844 		if ((error = VOP_OPEN(vp, FREAD|FWRITE, l->l_cred)))
    845 			return (error);
    846 	}
    847 
    848 	/* XXX this only works for block devices */
    849 	UVMHIST_LOG(pdhist, "  dev=%d, major(dev)=%d", dev, major(dev), 0,0);
    850 
    851 	/*
    852 	 * we now need to determine the size of the swap area.   for
    853 	 * block specials we can call the d_psize function.
    854 	 * for normal files, we must stat [get attrs].
    855 	 *
    856 	 * we put the result in nblks.
    857 	 * for normal files, we also want the filesystem block size
    858 	 * (which we get with statfs).
    859 	 */
    860 	switch (vp->v_type) {
    861 	case VBLK:
    862 		if ((nblocks = bdev_size(dev)) == -1) {
    863 			error = ENXIO;
    864 			goto bad;
    865 		}
    866 		break;
    867 
    868 	case VREG:
    869 		if ((error = VOP_GETATTR(vp, &va, l->l_cred)))
    870 			goto bad;
    871 		nblocks = (int)btodb(va.va_size);
    872 		sdp->swd_bsize = 1 << vp->v_mount->mnt_fs_bshift;
    873 		/*
    874 		 * limit the max # of outstanding I/O requests we issue
    875 		 * at any one time.   take it easy on NFS servers.
    876 		 */
    877 		if (vp->v_tag == VT_NFS)
    878 			sdp->swd_maxactive = 2; /* XXX */
    879 		else
    880 			sdp->swd_maxactive = 8; /* XXX */
    881 		break;
    882 
    883 	default:
    884 		error = ENXIO;
    885 		goto bad;
    886 	}
    887 
    888 	/*
    889 	 * save nblocks in a safe place and convert to pages.
    890 	 */
    891 
    892 	sdp->swd_nblks = nblocks;
    893 	npages = dbtob((uint64_t)nblocks) >> PAGE_SHIFT;
    894 
    895 	/*
    896 	 * for block special files, we want to make sure that leave
    897 	 * the disklabel and bootblocks alone, so we arrange to skip
    898 	 * over them (arbitrarily choosing to skip PAGE_SIZE bytes).
    899 	 * note that because of this the "size" can be less than the
    900 	 * actual number of blocks on the device.
    901 	 */
    902 	if (vp->v_type == VBLK) {
    903 		/* we use pages 1 to (size - 1) [inclusive] */
    904 		size = npages - 1;
    905 		addr = 1;
    906 	} else {
    907 		/* we use pages 0 to (size - 1) [inclusive] */
    908 		size = npages;
    909 		addr = 0;
    910 	}
    911 
    912 	/*
    913 	 * make sure we have enough blocks for a reasonable sized swap
    914 	 * area.   we want at least one page.
    915 	 */
    916 
    917 	if (size < 1) {
    918 		UVMHIST_LOG(pdhist, "  size <= 1!!", 0, 0, 0, 0);
    919 		error = EINVAL;
    920 		goto bad;
    921 	}
    922 
    923 	UVMHIST_LOG(pdhist, "  dev=%x: size=%d addr=%ld\n", dev, size, addr, 0);
    924 
    925 	/*
    926 	 * now we need to allocate an extent to manage this swap device
    927 	 */
    928 
    929 	sdp->swd_blist = blist_create(npages);
    930 	/* mark all expect the `saved' region free. */
    931 	blist_free(sdp->swd_blist, addr, size);
    932 
    933 	/*
    934 	 * if the vnode we are swapping to is the root vnode
    935 	 * (i.e. we are swapping to the miniroot) then we want
    936 	 * to make sure we don't overwrite it.   do a statfs to
    937 	 * find its size and skip over it.
    938 	 */
    939 	if (vp == rootvp) {
    940 		struct mount *mp;
    941 		struct statvfs *sp;
    942 		int rootblocks, rootpages;
    943 
    944 		mp = rootvnode->v_mount;
    945 		sp = &mp->mnt_stat;
    946 		rootblocks = sp->f_blocks * btodb(sp->f_frsize);
    947 		/*
    948 		 * XXX: sp->f_blocks isn't the total number of
    949 		 * blocks in the filesystem, it's the number of
    950 		 * data blocks.  so, our rootblocks almost
    951 		 * definitely underestimates the total size
    952 		 * of the filesystem - how badly depends on the
    953 		 * details of the filesystem type.  there isn't
    954 		 * an obvious way to deal with this cleanly
    955 		 * and perfectly, so for now we just pad our
    956 		 * rootblocks estimate with an extra 5 percent.
    957 		 */
    958 		rootblocks += (rootblocks >> 5) +
    959 			(rootblocks >> 6) +
    960 			(rootblocks >> 7);
    961 		rootpages = round_page(dbtob(rootblocks)) >> PAGE_SHIFT;
    962 		if (rootpages > size)
    963 			panic("swap_on: miniroot larger than swap?");
    964 
    965 		if (rootpages != blist_fill(sdp->swd_blist, addr, rootpages)) {
    966 			panic("swap_on: unable to preserve miniroot");
    967 		}
    968 
    969 		size -= rootpages;
    970 		printf("Preserved %d pages of miniroot ", rootpages);
    971 		printf("leaving %d pages of swap\n", size);
    972 	}
    973 
    974 	/*
    975 	 * add a ref to vp to reflect usage as a swap device.
    976 	 */
    977 	vref(vp);
    978 
    979 	/*
    980 	 * now add the new swapdev to the drum and enable.
    981 	 */
    982 	error = vmem_alloc(swapmap, npages, VM_BESTFIT | VM_SLEEP, &result);
    983 	if (error != 0)
    984 		panic("swapdrum_add");
    985 	/*
    986 	 * If this is the first regular swap create the workqueue.
    987 	 * => Protected by swap_syscall_lock.
    988 	 */
    989 	if (vp->v_type != VBLK) {
    990 		if (sw_reg_count++ == 0) {
    991 			KASSERT(sw_reg_workqueue == NULL);
    992 			if (workqueue_create(&sw_reg_workqueue, "swapiod",
    993 			    sw_reg_iodone, NULL, PRIBIO, IPL_BIO, 0) != 0)
    994 				panic("%s: workqueue_create failed", __func__);
    995 		}
    996 	}
    997 
    998 	sdp->swd_drumoffset = (int)result;
    999 	sdp->swd_drumsize = npages;
   1000 	sdp->swd_npages = size;
   1001 	mutex_enter(&uvm_swap_data_lock);
   1002 	sdp->swd_flags &= ~SWF_FAKE;	/* going live */
   1003 	sdp->swd_flags |= (SWF_INUSE|SWF_ENABLE);
   1004 	uvmexp.swpages += size;
   1005 	uvmexp.swpgavail += size;
   1006 	mutex_exit(&uvm_swap_data_lock);
   1007 	return (0);
   1008 
   1009 	/*
   1010 	 * failure: clean up and return error.
   1011 	 */
   1012 
   1013 bad:
   1014 	if (sdp->swd_blist) {
   1015 		blist_destroy(sdp->swd_blist);
   1016 	}
   1017 	if (vp != rootvp) {
   1018 		(void)VOP_CLOSE(vp, FREAD|FWRITE, l->l_cred);
   1019 	}
   1020 	return (error);
   1021 }
   1022 
   1023 /*
   1024  * swap_off: stop swapping on swapdev
   1025  *
   1026  * => swap data should be locked, we will unlock.
   1027  */
   1028 static int
   1029 swap_off(struct lwp *l, struct swapdev *sdp)
   1030 {
   1031 	int npages = sdp->swd_npages;
   1032 	int error = 0;
   1033 
   1034 	UVMHIST_FUNC("swap_off"); UVMHIST_CALLED(pdhist);
   1035 	UVMHIST_LOG(pdhist, "  dev=%x, npages=%d", sdp->swd_dev,npages,0,0);
   1036 
   1037 	/* disable the swap area being removed */
   1038 	sdp->swd_flags &= ~SWF_ENABLE;
   1039 	uvmexp.swpgavail -= npages;
   1040 	mutex_exit(&uvm_swap_data_lock);
   1041 
   1042 	/*
   1043 	 * the idea is to find all the pages that are paged out to this
   1044 	 * device, and page them all in.  in uvm, swap-backed pageable
   1045 	 * memory can take two forms: aobjs and anons.  call the
   1046 	 * swapoff hook for each subsystem to bring in pages.
   1047 	 */
   1048 
   1049 	if (uao_swap_off(sdp->swd_drumoffset,
   1050 			 sdp->swd_drumoffset + sdp->swd_drumsize) ||
   1051 	    amap_swap_off(sdp->swd_drumoffset,
   1052 			  sdp->swd_drumoffset + sdp->swd_drumsize)) {
   1053 		error = ENOMEM;
   1054 	} else if (sdp->swd_npginuse > sdp->swd_npgbad) {
   1055 		error = EBUSY;
   1056 	}
   1057 
   1058 	if (error) {
   1059 		mutex_enter(&uvm_swap_data_lock);
   1060 		sdp->swd_flags |= SWF_ENABLE;
   1061 		uvmexp.swpgavail += npages;
   1062 		mutex_exit(&uvm_swap_data_lock);
   1063 
   1064 		return error;
   1065 	}
   1066 
   1067 	/*
   1068 	 * If this is the last regular swap destroy the workqueue.
   1069 	 * => Protected by swap_syscall_lock.
   1070 	 */
   1071 	if (sdp->swd_vp->v_type != VBLK) {
   1072 		KASSERT(sw_reg_count > 0);
   1073 		KASSERT(sw_reg_workqueue != NULL);
   1074 		if (--sw_reg_count == 0) {
   1075 			workqueue_destroy(sw_reg_workqueue);
   1076 			sw_reg_workqueue = NULL;
   1077 		}
   1078 	}
   1079 
   1080 	/*
   1081 	 * done with the vnode.
   1082 	 * drop our ref on the vnode before calling VOP_CLOSE()
   1083 	 * so that spec_close() can tell if this is the last close.
   1084 	 */
   1085 	vrele(sdp->swd_vp);
   1086 	if (sdp->swd_vp != rootvp) {
   1087 		(void) VOP_CLOSE(sdp->swd_vp, FREAD|FWRITE, l->l_cred);
   1088 	}
   1089 
   1090 	mutex_enter(&uvm_swap_data_lock);
   1091 	uvmexp.swpages -= npages;
   1092 	uvmexp.swpginuse -= sdp->swd_npgbad;
   1093 
   1094 	if (swaplist_find(sdp->swd_vp, true) == NULL)
   1095 		panic("%s: swapdev not in list", __func__);
   1096 	swaplist_trim();
   1097 	mutex_exit(&uvm_swap_data_lock);
   1098 
   1099 	/*
   1100 	 * free all resources!
   1101 	 */
   1102 	vmem_free(swapmap, sdp->swd_drumoffset, sdp->swd_drumsize);
   1103 	blist_destroy(sdp->swd_blist);
   1104 	bufq_free(sdp->swd_tab);
   1105 	kmem_free(sdp, sizeof(*sdp));
   1106 	return (0);
   1107 }
   1108 
   1109 /*
   1110  * /dev/drum interface and i/o functions
   1111  */
   1112 
   1113 /*
   1114  * swstrategy: perform I/O on the drum
   1115  *
   1116  * => we must map the i/o request from the drum to the correct swapdev.
   1117  */
   1118 static void
   1119 swstrategy(struct buf *bp)
   1120 {
   1121 	struct swapdev *sdp;
   1122 	struct vnode *vp;
   1123 	int pageno, bn;
   1124 	UVMHIST_FUNC("swstrategy"); UVMHIST_CALLED(pdhist);
   1125 
   1126 	/*
   1127 	 * convert block number to swapdev.   note that swapdev can't
   1128 	 * be yanked out from under us because we are holding resources
   1129 	 * in it (i.e. the blocks we are doing I/O on).
   1130 	 */
   1131 	pageno = dbtob((int64_t)bp->b_blkno) >> PAGE_SHIFT;
   1132 	mutex_enter(&uvm_swap_data_lock);
   1133 	sdp = swapdrum_getsdp(pageno);
   1134 	mutex_exit(&uvm_swap_data_lock);
   1135 	if (sdp == NULL) {
   1136 		bp->b_error = EINVAL;
   1137 		bp->b_resid = bp->b_bcount;
   1138 		biodone(bp);
   1139 		UVMHIST_LOG(pdhist, "  failed to get swap device", 0, 0, 0, 0);
   1140 		return;
   1141 	}
   1142 
   1143 	/*
   1144 	 * convert drum page number to block number on this swapdev.
   1145 	 */
   1146 
   1147 	pageno -= sdp->swd_drumoffset;	/* page # on swapdev */
   1148 	bn = btodb((uint64_t)pageno << PAGE_SHIFT); /* convert to diskblock */
   1149 
   1150 	UVMHIST_LOG(pdhist, "  %s: mapoff=%x bn=%x bcount=%ld",
   1151 		((bp->b_flags & B_READ) == 0) ? "write" : "read",
   1152 		sdp->swd_drumoffset, bn, bp->b_bcount);
   1153 
   1154 	/*
   1155 	 * for block devices we finish up here.
   1156 	 * for regular files we have to do more work which we delegate
   1157 	 * to sw_reg_strategy().
   1158 	 */
   1159 
   1160 	vp = sdp->swd_vp;		/* swapdev vnode pointer */
   1161 	switch (vp->v_type) {
   1162 	default:
   1163 		panic("%s: vnode type 0x%x", __func__, vp->v_type);
   1164 
   1165 	case VBLK:
   1166 
   1167 		/*
   1168 		 * must convert "bp" from an I/O on /dev/drum to an I/O
   1169 		 * on the swapdev (sdp).
   1170 		 */
   1171 		bp->b_blkno = bn;		/* swapdev block number */
   1172 		bp->b_dev = sdp->swd_dev;	/* swapdev dev_t */
   1173 
   1174 		/*
   1175 		 * if we are doing a write, we have to redirect the i/o on
   1176 		 * drum's v_numoutput counter to the swapdevs.
   1177 		 */
   1178 		if ((bp->b_flags & B_READ) == 0) {
   1179 			mutex_enter(bp->b_objlock);
   1180 			vwakeup(bp);	/* kills one 'v_numoutput' on drum */
   1181 			mutex_exit(bp->b_objlock);
   1182 			mutex_enter(vp->v_interlock);
   1183 			vp->v_numoutput++;	/* put it on swapdev */
   1184 			mutex_exit(vp->v_interlock);
   1185 		}
   1186 
   1187 		/*
   1188 		 * finally plug in swapdev vnode and start I/O
   1189 		 */
   1190 		bp->b_vp = vp;
   1191 		bp->b_objlock = vp->v_interlock;
   1192 		VOP_STRATEGY(vp, bp);
   1193 		return;
   1194 
   1195 	case VREG:
   1196 		/*
   1197 		 * delegate to sw_reg_strategy function.
   1198 		 */
   1199 		sw_reg_strategy(sdp, bp, bn);
   1200 		return;
   1201 	}
   1202 	/* NOTREACHED */
   1203 }
   1204 
   1205 /*
   1206  * swread: the read function for the drum (just a call to physio)
   1207  */
   1208 /*ARGSUSED*/
   1209 static int
   1210 swread(dev_t dev, struct uio *uio, int ioflag)
   1211 {
   1212 	UVMHIST_FUNC("swread"); UVMHIST_CALLED(pdhist);
   1213 
   1214 	UVMHIST_LOG(pdhist, "  dev=%x offset=%qx", dev, uio->uio_offset, 0, 0);
   1215 	return (physio(swstrategy, NULL, dev, B_READ, minphys, uio));
   1216 }
   1217 
   1218 /*
   1219  * swwrite: the write function for the drum (just a call to physio)
   1220  */
   1221 /*ARGSUSED*/
   1222 static int
   1223 swwrite(dev_t dev, struct uio *uio, int ioflag)
   1224 {
   1225 	UVMHIST_FUNC("swwrite"); UVMHIST_CALLED(pdhist);
   1226 
   1227 	UVMHIST_LOG(pdhist, "  dev=%x offset=%qx", dev, uio->uio_offset, 0, 0);
   1228 	return (physio(swstrategy, NULL, dev, B_WRITE, minphys, uio));
   1229 }
   1230 
   1231 const struct bdevsw swap_bdevsw = {
   1232 	nullopen, nullclose, swstrategy, noioctl, nodump, nosize, D_OTHER,
   1233 };
   1234 
   1235 const struct cdevsw swap_cdevsw = {
   1236 	nullopen, nullclose, swread, swwrite, noioctl,
   1237 	nostop, notty, nopoll, nommap, nokqfilter, D_OTHER,
   1238 };
   1239 
   1240 /*
   1241  * sw_reg_strategy: handle swap i/o to regular files
   1242  */
   1243 static void
   1244 sw_reg_strategy(struct swapdev *sdp, struct buf *bp, int bn)
   1245 {
   1246 	struct vnode	*vp;
   1247 	struct vndxfer	*vnx;
   1248 	daddr_t		nbn;
   1249 	char 		*addr;
   1250 	off_t		byteoff;
   1251 	int		s, off, nra, error, sz, resid;
   1252 	UVMHIST_FUNC("sw_reg_strategy"); UVMHIST_CALLED(pdhist);
   1253 
   1254 	/*
   1255 	 * allocate a vndxfer head for this transfer and point it to
   1256 	 * our buffer.
   1257 	 */
   1258 	vnx = pool_get(&vndxfer_pool, PR_WAITOK);
   1259 	vnx->vx_flags = VX_BUSY;
   1260 	vnx->vx_error = 0;
   1261 	vnx->vx_pending = 0;
   1262 	vnx->vx_bp = bp;
   1263 	vnx->vx_sdp = sdp;
   1264 
   1265 	/*
   1266 	 * setup for main loop where we read filesystem blocks into
   1267 	 * our buffer.
   1268 	 */
   1269 	error = 0;
   1270 	bp->b_resid = bp->b_bcount;	/* nothing transfered yet! */
   1271 	addr = bp->b_data;		/* current position in buffer */
   1272 	byteoff = dbtob((uint64_t)bn);
   1273 
   1274 	for (resid = bp->b_resid; resid; resid -= sz) {
   1275 		struct vndbuf	*nbp;
   1276 
   1277 		/*
   1278 		 * translate byteoffset into block number.  return values:
   1279 		 *   vp = vnode of underlying device
   1280 		 *  nbn = new block number (on underlying vnode dev)
   1281 		 *  nra = num blocks we can read-ahead (excludes requested
   1282 		 *	block)
   1283 		 */
   1284 		nra = 0;
   1285 		error = VOP_BMAP(sdp->swd_vp, byteoff / sdp->swd_bsize,
   1286 				 	&vp, &nbn, &nra);
   1287 
   1288 		if (error == 0 && nbn == (daddr_t)-1) {
   1289 			/*
   1290 			 * this used to just set error, but that doesn't
   1291 			 * do the right thing.  Instead, it causes random
   1292 			 * memory errors.  The panic() should remain until
   1293 			 * this condition doesn't destabilize the system.
   1294 			 */
   1295 #if 1
   1296 			panic("%s: swap to sparse file", __func__);
   1297 #else
   1298 			error = EIO;	/* failure */
   1299 #endif
   1300 		}
   1301 
   1302 		/*
   1303 		 * punt if there was an error or a hole in the file.
   1304 		 * we must wait for any i/o ops we have already started
   1305 		 * to finish before returning.
   1306 		 *
   1307 		 * XXX we could deal with holes here but it would be
   1308 		 * a hassle (in the write case).
   1309 		 */
   1310 		if (error) {
   1311 			s = splbio();
   1312 			vnx->vx_error = error;	/* pass error up */
   1313 			goto out;
   1314 		}
   1315 
   1316 		/*
   1317 		 * compute the size ("sz") of this transfer (in bytes).
   1318 		 */
   1319 		off = byteoff % sdp->swd_bsize;
   1320 		sz = (1 + nra) * sdp->swd_bsize - off;
   1321 		if (sz > resid)
   1322 			sz = resid;
   1323 
   1324 		UVMHIST_LOG(pdhist, "sw_reg_strategy: "
   1325 			    "vp %p/%p offset 0x%x/0x%x",
   1326 			    sdp->swd_vp, vp, byteoff, nbn);
   1327 
   1328 		/*
   1329 		 * now get a buf structure.   note that the vb_buf is
   1330 		 * at the front of the nbp structure so that you can
   1331 		 * cast pointers between the two structure easily.
   1332 		 */
   1333 		nbp = pool_get(&vndbuf_pool, PR_WAITOK);
   1334 		buf_init(&nbp->vb_buf);
   1335 		nbp->vb_buf.b_flags    = bp->b_flags;
   1336 		nbp->vb_buf.b_cflags   = bp->b_cflags;
   1337 		nbp->vb_buf.b_oflags   = bp->b_oflags;
   1338 		nbp->vb_buf.b_bcount   = sz;
   1339 		nbp->vb_buf.b_bufsize  = sz;
   1340 		nbp->vb_buf.b_error    = 0;
   1341 		nbp->vb_buf.b_data     = addr;
   1342 		nbp->vb_buf.b_lblkno   = 0;
   1343 		nbp->vb_buf.b_blkno    = nbn + btodb(off);
   1344 		nbp->vb_buf.b_rawblkno = nbp->vb_buf.b_blkno;
   1345 		nbp->vb_buf.b_iodone   = sw_reg_biodone;
   1346 		nbp->vb_buf.b_vp       = vp;
   1347 		nbp->vb_buf.b_objlock  = vp->v_interlock;
   1348 		if (vp->v_type == VBLK) {
   1349 			nbp->vb_buf.b_dev = vp->v_rdev;
   1350 		}
   1351 
   1352 		nbp->vb_xfer = vnx;	/* patch it back in to vnx */
   1353 
   1354 		/*
   1355 		 * Just sort by block number
   1356 		 */
   1357 		s = splbio();
   1358 		if (vnx->vx_error != 0) {
   1359 			buf_destroy(&nbp->vb_buf);
   1360 			pool_put(&vndbuf_pool, nbp);
   1361 			goto out;
   1362 		}
   1363 		vnx->vx_pending++;
   1364 
   1365 		/* sort it in and start I/O if we are not over our limit */
   1366 		/* XXXAD locking */
   1367 		bufq_put(sdp->swd_tab, &nbp->vb_buf);
   1368 		sw_reg_start(sdp);
   1369 		splx(s);
   1370 
   1371 		/*
   1372 		 * advance to the next I/O
   1373 		 */
   1374 		byteoff += sz;
   1375 		addr += sz;
   1376 	}
   1377 
   1378 	s = splbio();
   1379 
   1380 out: /* Arrive here at splbio */
   1381 	vnx->vx_flags &= ~VX_BUSY;
   1382 	if (vnx->vx_pending == 0) {
   1383 		error = vnx->vx_error;
   1384 		pool_put(&vndxfer_pool, vnx);
   1385 		bp->b_error = error;
   1386 		biodone(bp);
   1387 	}
   1388 	splx(s);
   1389 }
   1390 
   1391 /*
   1392  * sw_reg_start: start an I/O request on the requested swapdev
   1393  *
   1394  * => reqs are sorted by b_rawblkno (above)
   1395  */
   1396 static void
   1397 sw_reg_start(struct swapdev *sdp)
   1398 {
   1399 	struct buf	*bp;
   1400 	struct vnode	*vp;
   1401 	UVMHIST_FUNC("sw_reg_start"); UVMHIST_CALLED(pdhist);
   1402 
   1403 	/* recursion control */
   1404 	if ((sdp->swd_flags & SWF_BUSY) != 0)
   1405 		return;
   1406 
   1407 	sdp->swd_flags |= SWF_BUSY;
   1408 
   1409 	while (sdp->swd_active < sdp->swd_maxactive) {
   1410 		bp = bufq_get(sdp->swd_tab);
   1411 		if (bp == NULL)
   1412 			break;
   1413 		sdp->swd_active++;
   1414 
   1415 		UVMHIST_LOG(pdhist,
   1416 		    "sw_reg_start:  bp %p vp %p blkno %p cnt %lx",
   1417 		    bp, bp->b_vp, bp->b_blkno, bp->b_bcount);
   1418 		vp = bp->b_vp;
   1419 		KASSERT(bp->b_objlock == vp->v_interlock);
   1420 		if ((bp->b_flags & B_READ) == 0) {
   1421 			mutex_enter(vp->v_interlock);
   1422 			vp->v_numoutput++;
   1423 			mutex_exit(vp->v_interlock);
   1424 		}
   1425 		VOP_STRATEGY(vp, bp);
   1426 	}
   1427 	sdp->swd_flags &= ~SWF_BUSY;
   1428 }
   1429 
   1430 /*
   1431  * sw_reg_biodone: one of our i/o's has completed
   1432  */
   1433 static void
   1434 sw_reg_biodone(struct buf *bp)
   1435 {
   1436 	workqueue_enqueue(sw_reg_workqueue, &bp->b_work, NULL);
   1437 }
   1438 
   1439 /*
   1440  * sw_reg_iodone: one of our i/o's has completed and needs post-i/o cleanup
   1441  *
   1442  * => note that we can recover the vndbuf struct by casting the buf ptr
   1443  */
   1444 static void
   1445 sw_reg_iodone(struct work *wk, void *dummy)
   1446 {
   1447 	struct vndbuf *vbp = (void *)wk;
   1448 	struct vndxfer *vnx = vbp->vb_xfer;
   1449 	struct buf *pbp = vnx->vx_bp;		/* parent buffer */
   1450 	struct swapdev	*sdp = vnx->vx_sdp;
   1451 	int s, resid, error;
   1452 	KASSERT(&vbp->vb_buf.b_work == wk);
   1453 	UVMHIST_FUNC("sw_reg_iodone"); UVMHIST_CALLED(pdhist);
   1454 
   1455 	UVMHIST_LOG(pdhist, "  vbp=%p vp=%p blkno=%x addr=%p",
   1456 	    vbp, vbp->vb_buf.b_vp, vbp->vb_buf.b_blkno, vbp->vb_buf.b_data);
   1457 	UVMHIST_LOG(pdhist, "  cnt=%lx resid=%lx",
   1458 	    vbp->vb_buf.b_bcount, vbp->vb_buf.b_resid, 0, 0);
   1459 
   1460 	/*
   1461 	 * protect vbp at splbio and update.
   1462 	 */
   1463 
   1464 	s = splbio();
   1465 	resid = vbp->vb_buf.b_bcount - vbp->vb_buf.b_resid;
   1466 	pbp->b_resid -= resid;
   1467 	vnx->vx_pending--;
   1468 
   1469 	if (vbp->vb_buf.b_error != 0) {
   1470 		/* pass error upward */
   1471 		error = vbp->vb_buf.b_error ? vbp->vb_buf.b_error : EIO;
   1472 		UVMHIST_LOG(pdhist, "  got error=%d !", error, 0, 0, 0);
   1473 		vnx->vx_error = error;
   1474 	}
   1475 
   1476 	/*
   1477 	 * kill vbp structure
   1478 	 */
   1479 	buf_destroy(&vbp->vb_buf);
   1480 	pool_put(&vndbuf_pool, vbp);
   1481 
   1482 	/*
   1483 	 * wrap up this transaction if it has run to completion or, in
   1484 	 * case of an error, when all auxiliary buffers have returned.
   1485 	 */
   1486 	if (vnx->vx_error != 0) {
   1487 		/* pass error upward */
   1488 		error = vnx->vx_error;
   1489 		if ((vnx->vx_flags & VX_BUSY) == 0 && vnx->vx_pending == 0) {
   1490 			pbp->b_error = error;
   1491 			biodone(pbp);
   1492 			pool_put(&vndxfer_pool, vnx);
   1493 		}
   1494 	} else if (pbp->b_resid == 0) {
   1495 		KASSERT(vnx->vx_pending == 0);
   1496 		if ((vnx->vx_flags & VX_BUSY) == 0) {
   1497 			UVMHIST_LOG(pdhist, "  iodone error=%d !",
   1498 			    pbp, vnx->vx_error, 0, 0);
   1499 			biodone(pbp);
   1500 			pool_put(&vndxfer_pool, vnx);
   1501 		}
   1502 	}
   1503 
   1504 	/*
   1505 	 * done!   start next swapdev I/O if one is pending
   1506 	 */
   1507 	sdp->swd_active--;
   1508 	sw_reg_start(sdp);
   1509 	splx(s);
   1510 }
   1511 
   1512 
   1513 /*
   1514  * uvm_swap_alloc: allocate space on swap
   1515  *
   1516  * => allocation is done "round robin" down the priority list, as we
   1517  *	allocate in a priority we "rotate" the circle queue.
   1518  * => space can be freed with uvm_swap_free
   1519  * => we return the page slot number in /dev/drum (0 == invalid slot)
   1520  * => we lock uvm_swap_data_lock
   1521  * => XXXMRG: "LESSOK" INTERFACE NEEDED TO EXTENT SYSTEM
   1522  */
   1523 int
   1524 uvm_swap_alloc(int *nslots /* IN/OUT */, bool lessok)
   1525 {
   1526 	struct swapdev *sdp;
   1527 	struct swappri *spp;
   1528 	UVMHIST_FUNC("uvm_swap_alloc"); UVMHIST_CALLED(pdhist);
   1529 
   1530 	/*
   1531 	 * no swap devices configured yet?   definite failure.
   1532 	 */
   1533 	if (uvmexp.nswapdev < 1)
   1534 		return 0;
   1535 
   1536 	/*
   1537 	 * lock data lock, convert slots into blocks, and enter loop
   1538 	 */
   1539 	mutex_enter(&uvm_swap_data_lock);
   1540 
   1541 ReTry:	/* XXXMRG */
   1542 	LIST_FOREACH(spp, &swap_priority, spi_swappri) {
   1543 		CIRCLEQ_FOREACH(sdp, &spp->spi_swapdev, swd_next) {
   1544 			uint64_t result;
   1545 
   1546 			/* if it's not enabled, then we can't swap from it */
   1547 			if ((sdp->swd_flags & SWF_ENABLE) == 0)
   1548 				continue;
   1549 			if (sdp->swd_npginuse + *nslots > sdp->swd_npages)
   1550 				continue;
   1551 			result = blist_alloc(sdp->swd_blist, *nslots);
   1552 			if (result == BLIST_NONE) {
   1553 				continue;
   1554 			}
   1555 			KASSERT(result < sdp->swd_drumsize);
   1556 
   1557 			/*
   1558 			 * successful allocation!  now rotate the circleq.
   1559 			 */
   1560 			CIRCLEQ_REMOVE(&spp->spi_swapdev, sdp, swd_next);
   1561 			CIRCLEQ_INSERT_TAIL(&spp->spi_swapdev, sdp, swd_next);
   1562 			sdp->swd_npginuse += *nslots;
   1563 			uvmexp.swpginuse += *nslots;
   1564 			mutex_exit(&uvm_swap_data_lock);
   1565 			/* done!  return drum slot number */
   1566 			UVMHIST_LOG(pdhist,
   1567 			    "success!  returning %d slots starting at %d",
   1568 			    *nslots, result + sdp->swd_drumoffset, 0, 0);
   1569 			return (result + sdp->swd_drumoffset);
   1570 		}
   1571 	}
   1572 
   1573 	/* XXXMRG: BEGIN HACK */
   1574 	if (*nslots > 1 && lessok) {
   1575 		*nslots = 1;
   1576 		/* XXXMRG: ugh!  blist should support this for us */
   1577 		goto ReTry;
   1578 	}
   1579 	/* XXXMRG: END HACK */
   1580 
   1581 	mutex_exit(&uvm_swap_data_lock);
   1582 	return 0;
   1583 }
   1584 
   1585 /*
   1586  * uvm_swapisfull: return true if most of available swap is allocated
   1587  * and in use.  we don't count some small portion as it may be inaccessible
   1588  * to us at any given moment, for example if there is lock contention or if
   1589  * pages are busy.
   1590  */
   1591 bool
   1592 uvm_swapisfull(void)
   1593 {
   1594 	int swpgonly;
   1595 	bool rv;
   1596 
   1597 	mutex_enter(&uvm_swap_data_lock);
   1598 	KASSERT(uvmexp.swpgonly <= uvmexp.swpages);
   1599 	swpgonly = (int)((uint64_t)uvmexp.swpgonly * 100 /
   1600 	    uvm_swapisfull_factor);
   1601 	rv = (swpgonly >= uvmexp.swpgavail);
   1602 	mutex_exit(&uvm_swap_data_lock);
   1603 
   1604 	return (rv);
   1605 }
   1606 
   1607 /*
   1608  * uvm_swap_markbad: keep track of swap ranges where we've had i/o errors
   1609  *
   1610  * => we lock uvm_swap_data_lock
   1611  */
   1612 void
   1613 uvm_swap_markbad(int startslot, int nslots)
   1614 {
   1615 	struct swapdev *sdp;
   1616 	UVMHIST_FUNC("uvm_swap_markbad"); UVMHIST_CALLED(pdhist);
   1617 
   1618 	mutex_enter(&uvm_swap_data_lock);
   1619 	sdp = swapdrum_getsdp(startslot);
   1620 	KASSERT(sdp != NULL);
   1621 
   1622 	/*
   1623 	 * we just keep track of how many pages have been marked bad
   1624 	 * in this device, to make everything add up in swap_off().
   1625 	 * we assume here that the range of slots will all be within
   1626 	 * one swap device.
   1627 	 */
   1628 
   1629 	KASSERT(uvmexp.swpgonly >= nslots);
   1630 	uvmexp.swpgonly -= nslots;
   1631 	sdp->swd_npgbad += nslots;
   1632 	UVMHIST_LOG(pdhist, "now %d bad", sdp->swd_npgbad, 0,0,0);
   1633 	mutex_exit(&uvm_swap_data_lock);
   1634 }
   1635 
   1636 /*
   1637  * uvm_swap_free: free swap slots
   1638  *
   1639  * => this can be all or part of an allocation made by uvm_swap_alloc
   1640  * => we lock uvm_swap_data_lock
   1641  */
   1642 void
   1643 uvm_swap_free(int startslot, int nslots)
   1644 {
   1645 	struct swapdev *sdp;
   1646 	UVMHIST_FUNC("uvm_swap_free"); UVMHIST_CALLED(pdhist);
   1647 
   1648 	UVMHIST_LOG(pdhist, "freeing %d slots starting at %d", nslots,
   1649 	    startslot, 0, 0);
   1650 
   1651 	/*
   1652 	 * ignore attempts to free the "bad" slot.
   1653 	 */
   1654 
   1655 	if (startslot == SWSLOT_BAD) {
   1656 		return;
   1657 	}
   1658 
   1659 	/*
   1660 	 * convert drum slot offset back to sdp, free the blocks
   1661 	 * in the extent, and return.   must hold pri lock to do
   1662 	 * lookup and access the extent.
   1663 	 */
   1664 
   1665 	mutex_enter(&uvm_swap_data_lock);
   1666 	sdp = swapdrum_getsdp(startslot);
   1667 	KASSERT(uvmexp.nswapdev >= 1);
   1668 	KASSERT(sdp != NULL);
   1669 	KASSERT(sdp->swd_npginuse >= nslots);
   1670 	blist_free(sdp->swd_blist, startslot - sdp->swd_drumoffset, nslots);
   1671 	sdp->swd_npginuse -= nslots;
   1672 	uvmexp.swpginuse -= nslots;
   1673 	mutex_exit(&uvm_swap_data_lock);
   1674 }
   1675 
   1676 /*
   1677  * uvm_swap_put: put any number of pages into a contig place on swap
   1678  *
   1679  * => can be sync or async
   1680  */
   1681 
   1682 int
   1683 uvm_swap_put(int swslot, struct vm_page **ppsp, int npages, int flags)
   1684 {
   1685 	int error;
   1686 
   1687 	error = uvm_swap_io(ppsp, swslot, npages, B_WRITE |
   1688 	    ((flags & PGO_SYNCIO) ? 0 : B_ASYNC));
   1689 	return error;
   1690 }
   1691 
   1692 /*
   1693  * uvm_swap_get: get a single page from swap
   1694  *
   1695  * => usually a sync op (from fault)
   1696  */
   1697 
   1698 int
   1699 uvm_swap_get(struct vm_page *page, int swslot, int flags)
   1700 {
   1701 	int error;
   1702 
   1703 	uvmexp.nswget++;
   1704 	KASSERT(flags & PGO_SYNCIO);
   1705 	if (swslot == SWSLOT_BAD) {
   1706 		return EIO;
   1707 	}
   1708 
   1709 	error = uvm_swap_io(&page, swslot, 1, B_READ |
   1710 	    ((flags & PGO_SYNCIO) ? 0 : B_ASYNC));
   1711 	if (error == 0) {
   1712 
   1713 		/*
   1714 		 * this page is no longer only in swap.
   1715 		 */
   1716 
   1717 		mutex_enter(&uvm_swap_data_lock);
   1718 		KASSERT(uvmexp.swpgonly > 0);
   1719 		uvmexp.swpgonly--;
   1720 		mutex_exit(&uvm_swap_data_lock);
   1721 	}
   1722 	return error;
   1723 }
   1724 
   1725 /*
   1726  * uvm_swap_io: do an i/o operation to swap
   1727  */
   1728 
   1729 static int
   1730 uvm_swap_io(struct vm_page **pps, int startslot, int npages, int flags)
   1731 {
   1732 	daddr_t startblk;
   1733 	struct	buf *bp;
   1734 	vaddr_t kva;
   1735 	int	error, mapinflags;
   1736 	bool write, async;
   1737 	UVMHIST_FUNC("uvm_swap_io"); UVMHIST_CALLED(pdhist);
   1738 
   1739 	UVMHIST_LOG(pdhist, "<- called, startslot=%d, npages=%d, flags=%d",
   1740 	    startslot, npages, flags, 0);
   1741 
   1742 	write = (flags & B_READ) == 0;
   1743 	async = (flags & B_ASYNC) != 0;
   1744 
   1745 	/*
   1746 	 * allocate a buf for the i/o.
   1747 	 */
   1748 
   1749 	KASSERT(curlwp != uvm.pagedaemon_lwp || (write && async));
   1750 	bp = getiobuf(swapdev_vp, curlwp != uvm.pagedaemon_lwp);
   1751 	if (bp == NULL) {
   1752 		uvm_aio_aiodone_pages(pps, npages, true, ENOMEM);
   1753 		return ENOMEM;
   1754 	}
   1755 
   1756 	/*
   1757 	 * convert starting drum slot to block number
   1758 	 */
   1759 
   1760 	startblk = btodb((uint64_t)startslot << PAGE_SHIFT);
   1761 
   1762 	/*
   1763 	 * first, map the pages into the kernel.
   1764 	 */
   1765 
   1766 	mapinflags = !write ?
   1767 		UVMPAGER_MAPIN_WAITOK|UVMPAGER_MAPIN_READ :
   1768 		UVMPAGER_MAPIN_WAITOK|UVMPAGER_MAPIN_WRITE;
   1769 	kva = uvm_pagermapin(pps, npages, mapinflags);
   1770 
   1771 	/*
   1772 	 * fill in the bp/sbp.   we currently route our i/o through
   1773 	 * /dev/drum's vnode [swapdev_vp].
   1774 	 */
   1775 
   1776 	bp->b_cflags = BC_BUSY | BC_NOCACHE;
   1777 	bp->b_flags = (flags & (B_READ|B_ASYNC));
   1778 	bp->b_proc = &proc0;	/* XXX */
   1779 	bp->b_vnbufs.le_next = NOLIST;
   1780 	bp->b_data = (void *)kva;
   1781 	bp->b_blkno = startblk;
   1782 	bp->b_bufsize = bp->b_bcount = npages << PAGE_SHIFT;
   1783 
   1784 	/*
   1785 	 * bump v_numoutput (counter of number of active outputs).
   1786 	 */
   1787 
   1788 	if (write) {
   1789 		mutex_enter(swapdev_vp->v_interlock);
   1790 		swapdev_vp->v_numoutput++;
   1791 		mutex_exit(swapdev_vp->v_interlock);
   1792 	}
   1793 
   1794 	/*
   1795 	 * for async ops we must set up the iodone handler.
   1796 	 */
   1797 
   1798 	if (async) {
   1799 		bp->b_iodone = uvm_aio_biodone;
   1800 		UVMHIST_LOG(pdhist, "doing async!", 0, 0, 0, 0);
   1801 		if (curlwp == uvm.pagedaemon_lwp)
   1802 			BIO_SETPRIO(bp, BPRIO_TIMECRITICAL);
   1803 		else
   1804 			BIO_SETPRIO(bp, BPRIO_TIMELIMITED);
   1805 	} else {
   1806 		bp->b_iodone = NULL;
   1807 		BIO_SETPRIO(bp, BPRIO_TIMECRITICAL);
   1808 	}
   1809 	UVMHIST_LOG(pdhist,
   1810 	    "about to start io: data = %p blkno = 0x%x, bcount = %ld",
   1811 	    bp->b_data, bp->b_blkno, bp->b_bcount, 0);
   1812 
   1813 	/*
   1814 	 * now we start the I/O, and if async, return.
   1815 	 */
   1816 
   1817 	VOP_STRATEGY(swapdev_vp, bp);
   1818 	if (async)
   1819 		return 0;
   1820 
   1821 	/*
   1822 	 * must be sync i/o.   wait for it to finish
   1823 	 */
   1824 
   1825 	error = biowait(bp);
   1826 
   1827 	/*
   1828 	 * kill the pager mapping
   1829 	 */
   1830 
   1831 	uvm_pagermapout(kva, npages);
   1832 
   1833 	/*
   1834 	 * now dispose of the buf and we're done.
   1835 	 */
   1836 
   1837 	if (write) {
   1838 		mutex_enter(swapdev_vp->v_interlock);
   1839 		vwakeup(bp);
   1840 		mutex_exit(swapdev_vp->v_interlock);
   1841 	}
   1842 	putiobuf(bp);
   1843 	UVMHIST_LOG(pdhist, "<- done (sync)  error=%d", error, 0, 0, 0);
   1844 
   1845 	return (error);
   1846 }
   1847