Home | History | Annotate | Line # | Download | only in uvm
uvm_swap.c revision 1.160
      1 /*	$NetBSD: uvm_swap.c,v 1.160 2012/01/28 00:00:06 rmind Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1995, 1996, 1997, 2009 Matthew R. Green
      5  * All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  *
     16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
     21  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
     22  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
     23  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
     24  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     26  * SUCH DAMAGE.
     27  *
     28  * from: NetBSD: vm_swap.c,v 1.52 1997/12/02 13:47:37 pk Exp
     29  * from: Id: uvm_swap.c,v 1.1.2.42 1998/02/02 20:38:06 chuck Exp
     30  */
     31 
     32 #include <sys/cdefs.h>
     33 __KERNEL_RCSID(0, "$NetBSD: uvm_swap.c,v 1.160 2012/01/28 00:00:06 rmind Exp $");
     34 
     35 #include "opt_uvmhist.h"
     36 #include "opt_compat_netbsd.h"
     37 #include "opt_ddb.h"
     38 
     39 #include <sys/param.h>
     40 #include <sys/systm.h>
     41 #include <sys/buf.h>
     42 #include <sys/bufq.h>
     43 #include <sys/conf.h>
     44 #include <sys/proc.h>
     45 #include <sys/namei.h>
     46 #include <sys/disklabel.h>
     47 #include <sys/errno.h>
     48 #include <sys/kernel.h>
     49 #include <sys/vnode.h>
     50 #include <sys/file.h>
     51 #include <sys/vmem.h>
     52 #include <sys/blist.h>
     53 #include <sys/mount.h>
     54 #include <sys/pool.h>
     55 #include <sys/kmem.h>
     56 #include <sys/syscallargs.h>
     57 #include <sys/swap.h>
     58 #include <sys/kauth.h>
     59 #include <sys/sysctl.h>
     60 #include <sys/workqueue.h>
     61 
     62 #include <uvm/uvm.h>
     63 
     64 #include <miscfs/specfs/specdev.h>
     65 
     66 /*
     67  * uvm_swap.c: manage configuration and i/o to swap space.
     68  */
     69 
     70 /*
     71  * swap space is managed in the following way:
     72  *
     73  * each swap partition or file is described by a "swapdev" structure.
     74  * each "swapdev" structure contains a "swapent" structure which contains
     75  * information that is passed up to the user (via system calls).
     76  *
     77  * each swap partition is assigned a "priority" (int) which controls
     78  * swap parition usage.
     79  *
     80  * the system maintains a global data structure describing all swap
     81  * partitions/files.   there is a sorted LIST of "swappri" structures
     82  * which describe "swapdev"'s at that priority.   this LIST is headed
     83  * by the "swap_priority" global var.    each "swappri" contains a
     84  * CIRCLEQ of "swapdev" structures at that priority.
     85  *
     86  * locking:
     87  *  - swap_syscall_lock (krwlock_t): this lock serializes the swapctl
     88  *    system call and prevents the swap priority list from changing
     89  *    while we are in the middle of a system call (e.g. SWAP_STATS).
     90  *  - uvm_swap_data_lock (kmutex_t): this lock protects all swap data
     91  *    structures including the priority list, the swapdev structures,
     92  *    and the swapmap arena.
     93  *
     94  * each swap device has the following info:
     95  *  - swap device in use (could be disabled, preventing future use)
     96  *  - swap enabled (allows new allocations on swap)
     97  *  - map info in /dev/drum
     98  *  - vnode pointer
     99  * for swap files only:
    100  *  - block size
    101  *  - max byte count in buffer
    102  *  - buffer
    103  *
    104  * userland controls and configures swap with the swapctl(2) system call.
    105  * the sys_swapctl performs the following operations:
    106  *  [1] SWAP_NSWAP: returns the number of swap devices currently configured
    107  *  [2] SWAP_STATS: given a pointer to an array of swapent structures
    108  *	(passed in via "arg") of a size passed in via "misc" ... we load
    109  *	the current swap config into the array. The actual work is done
    110  *	in the uvm_swap_stats() function.
    111  *  [3] SWAP_ON: given a pathname in arg (could be device or file) and a
    112  *	priority in "misc", start swapping on it.
    113  *  [4] SWAP_OFF: as SWAP_ON, but stops swapping to a device
    114  *  [5] SWAP_CTL: changes the priority of a swap device (new priority in
    115  *	"misc")
    116  */
    117 
    118 /*
    119  * swapdev: describes a single swap partition/file
    120  *
    121  * note the following should be true:
    122  * swd_inuse <= swd_nblks  [number of blocks in use is <= total blocks]
    123  * swd_nblks <= swd_mapsize [because mapsize includes miniroot+disklabel]
    124  */
    125 struct swapdev {
    126 	dev_t			swd_dev;	/* device id */
    127 	int			swd_flags;	/* flags:inuse/enable/fake */
    128 	int			swd_priority;	/* our priority */
    129 	int			swd_nblks;	/* blocks in this device */
    130 	char			*swd_path;	/* saved pathname of device */
    131 	int			swd_pathlen;	/* length of pathname */
    132 	int			swd_npages;	/* #pages we can use */
    133 	int			swd_npginuse;	/* #pages in use */
    134 	int			swd_npgbad;	/* #pages bad */
    135 	int			swd_drumoffset;	/* page0 offset in drum */
    136 	int			swd_drumsize;	/* #pages in drum */
    137 	blist_t			swd_blist;	/* blist for this swapdev */
    138 	struct vnode		*swd_vp;	/* backing vnode */
    139 	CIRCLEQ_ENTRY(swapdev)	swd_next;	/* priority circleq */
    140 
    141 	int			swd_bsize;	/* blocksize (bytes) */
    142 	int			swd_maxactive;	/* max active i/o reqs */
    143 	struct bufq_state	*swd_tab;	/* buffer list */
    144 	int			swd_active;	/* number of active buffers */
    145 };
    146 
    147 /*
    148  * swap device priority entry; the list is kept sorted on `spi_priority'.
    149  */
    150 struct swappri {
    151 	int			spi_priority;     /* priority */
    152 	CIRCLEQ_HEAD(spi_swapdev, swapdev)	spi_swapdev;
    153 	/* circleq of swapdevs at this priority */
    154 	LIST_ENTRY(swappri)	spi_swappri;      /* global list of pri's */
    155 };
    156 
    157 /*
    158  * The following two structures are used to keep track of data transfers
    159  * on swap devices associated with regular files.
    160  * NOTE: this code is more or less a copy of vnd.c; we use the same
    161  * structure names here to ease porting..
    162  */
    163 struct vndxfer {
    164 	struct buf	*vx_bp;		/* Pointer to parent buffer */
    165 	struct swapdev	*vx_sdp;
    166 	int		vx_error;
    167 	int		vx_pending;	/* # of pending aux buffers */
    168 	int		vx_flags;
    169 #define VX_BUSY		1
    170 #define VX_DEAD		2
    171 };
    172 
    173 struct vndbuf {
    174 	struct buf	vb_buf;
    175 	struct vndxfer	*vb_xfer;
    176 };
    177 
    178 /*
    179  * NetBSD 1.3 swapctl(SWAP_STATS, ...) swapent structure; uses 32 bit
    180  * dev_t and has no se_path[] member.
    181  */
    182 struct swapent13 {
    183 	int32_t	se13_dev;		/* device id */
    184 	int	se13_flags;		/* flags */
    185 	int	se13_nblks;		/* total blocks */
    186 	int	se13_inuse;		/* blocks in use */
    187 	int	se13_priority;		/* priority of this device */
    188 };
    189 
    190 /*
    191  * NetBSD 5.0 swapctl(SWAP_STATS, ...) swapent structure; uses 32 bit
    192  * dev_t.
    193  */
    194 struct swapent50 {
    195 	int32_t	se50_dev;		/* device id */
    196 	int	se50_flags;		/* flags */
    197 	int	se50_nblks;		/* total blocks */
    198 	int	se50_inuse;		/* blocks in use */
    199 	int	se50_priority;		/* priority of this device */
    200 	char	se50_path[PATH_MAX+1];	/* path name */
    201 };
    202 
    203 /*
    204  * We keep a of pool vndbuf's and vndxfer structures.
    205  */
    206 static struct pool vndxfer_pool, vndbuf_pool;
    207 
    208 /*
    209  * local variables
    210  */
    211 static vmem_t *swapmap;	/* controls the mapping of /dev/drum */
    212 
    213 /* list of all active swap devices [by priority] */
    214 LIST_HEAD(swap_priority, swappri);
    215 static struct swap_priority swap_priority;
    216 
    217 /* locks */
    218 static krwlock_t swap_syscall_lock;
    219 
    220 /* workqueue and use counter for swap to regular files */
    221 static int sw_reg_count = 0;
    222 static struct workqueue *sw_reg_workqueue;
    223 
    224 /* tuneables */
    225 u_int uvm_swapisfull_factor = 99;
    226 
    227 /*
    228  * prototypes
    229  */
    230 static struct swapdev	*swapdrum_getsdp(int);
    231 
    232 static struct swapdev	*swaplist_find(struct vnode *, bool);
    233 static void		 swaplist_insert(struct swapdev *,
    234 					 struct swappri *, int);
    235 static void		 swaplist_trim(void);
    236 
    237 static int swap_on(struct lwp *, struct swapdev *);
    238 static int swap_off(struct lwp *, struct swapdev *);
    239 
    240 static void uvm_swap_stats(int, struct swapent *, int, register_t *);
    241 
    242 static void sw_reg_strategy(struct swapdev *, struct buf *, int);
    243 static void sw_reg_biodone(struct buf *);
    244 static void sw_reg_iodone(struct work *wk, void *dummy);
    245 static void sw_reg_start(struct swapdev *);
    246 
    247 static int uvm_swap_io(struct vm_page **, int, int, int);
    248 
    249 /*
    250  * uvm_swap_init: init the swap system data structures and locks
    251  *
    252  * => called at boot time from init_main.c after the filesystems
    253  *	are brought up (which happens after uvm_init())
    254  */
    255 void
    256 uvm_swap_init(void)
    257 {
    258 	UVMHIST_FUNC("uvm_swap_init");
    259 
    260 	UVMHIST_CALLED(pdhist);
    261 	/*
    262 	 * first, init the swap list, its counter, and its lock.
    263 	 * then get a handle on the vnode for /dev/drum by using
    264 	 * the its dev_t number ("swapdev", from MD conf.c).
    265 	 */
    266 
    267 	LIST_INIT(&swap_priority);
    268 	uvmexp.nswapdev = 0;
    269 	rw_init(&swap_syscall_lock);
    270 	mutex_init(&uvm_swap_data_lock, MUTEX_DEFAULT, IPL_NONE);
    271 
    272 	if (bdevvp(swapdev, &swapdev_vp))
    273 		panic("%s: can't get vnode for swap device", __func__);
    274 	if (vn_lock(swapdev_vp, LK_EXCLUSIVE | LK_RETRY))
    275 		panic("%s: can't lock swap device", __func__);
    276 	if (VOP_OPEN(swapdev_vp, FREAD | FWRITE, NOCRED))
    277 		panic("%s: can't open swap device", __func__);
    278 	VOP_UNLOCK(swapdev_vp);
    279 
    280 	/*
    281 	 * create swap block resource map to map /dev/drum.   the range
    282 	 * from 1 to INT_MAX allows 2 gigablocks of swap space.  note
    283 	 * that block 0 is reserved (used to indicate an allocation
    284 	 * failure, or no allocation).
    285 	 */
    286 	swapmap = vmem_create("swapmap", 1, INT_MAX - 1, 1, NULL, NULL, NULL, 0,
    287 	    VM_NOSLEEP, IPL_NONE);
    288 	if (swapmap == 0) {
    289 		panic("%s: vmem_create failed", __func__);
    290 	}
    291 
    292 	pool_init(&vndxfer_pool, sizeof(struct vndxfer), 0, 0, 0, "swp vnx",
    293 	    NULL, IPL_BIO);
    294 	pool_init(&vndbuf_pool, sizeof(struct vndbuf), 0, 0, 0, "swp vnd",
    295 	    NULL, IPL_BIO);
    296 
    297 	UVMHIST_LOG(pdhist, "<- done", 0, 0, 0, 0);
    298 }
    299 
    300 /*
    301  * swaplist functions: functions that operate on the list of swap
    302  * devices on the system.
    303  */
    304 
    305 /*
    306  * swaplist_insert: insert swap device "sdp" into the global list
    307  *
    308  * => caller must hold both swap_syscall_lock and uvm_swap_data_lock
    309  * => caller must provide a newly allocated swappri structure (we will
    310  *	FREE it if we don't need it... this it to prevent allocation
    311  *	blocking here while adding swap)
    312  */
    313 static void
    314 swaplist_insert(struct swapdev *sdp, struct swappri *newspp, int priority)
    315 {
    316 	struct swappri *spp, *pspp;
    317 	UVMHIST_FUNC("swaplist_insert"); UVMHIST_CALLED(pdhist);
    318 
    319 	/*
    320 	 * find entry at or after which to insert the new device.
    321 	 */
    322 	pspp = NULL;
    323 	LIST_FOREACH(spp, &swap_priority, spi_swappri) {
    324 		if (priority <= spp->spi_priority)
    325 			break;
    326 		pspp = spp;
    327 	}
    328 
    329 	/*
    330 	 * new priority?
    331 	 */
    332 	if (spp == NULL || spp->spi_priority != priority) {
    333 		spp = newspp;  /* use newspp! */
    334 		UVMHIST_LOG(pdhist, "created new swappri = %d",
    335 			    priority, 0, 0, 0);
    336 
    337 		spp->spi_priority = priority;
    338 		CIRCLEQ_INIT(&spp->spi_swapdev);
    339 
    340 		if (pspp)
    341 			LIST_INSERT_AFTER(pspp, spp, spi_swappri);
    342 		else
    343 			LIST_INSERT_HEAD(&swap_priority, spp, spi_swappri);
    344 	} else {
    345 	  	/* we don't need a new priority structure, free it */
    346 		kmem_free(newspp, sizeof(*newspp));
    347 	}
    348 
    349 	/*
    350 	 * priority found (or created).   now insert on the priority's
    351 	 * circleq list and bump the total number of swapdevs.
    352 	 */
    353 	sdp->swd_priority = priority;
    354 	CIRCLEQ_INSERT_TAIL(&spp->spi_swapdev, sdp, swd_next);
    355 	uvmexp.nswapdev++;
    356 }
    357 
    358 /*
    359  * swaplist_find: find and optionally remove a swap device from the
    360  *	global list.
    361  *
    362  * => caller must hold both swap_syscall_lock and uvm_swap_data_lock
    363  * => we return the swapdev we found (and removed)
    364  */
    365 static struct swapdev *
    366 swaplist_find(struct vnode *vp, bool remove)
    367 {
    368 	struct swapdev *sdp;
    369 	struct swappri *spp;
    370 
    371 	/*
    372 	 * search the lists for the requested vp
    373 	 */
    374 
    375 	LIST_FOREACH(spp, &swap_priority, spi_swappri) {
    376 		CIRCLEQ_FOREACH(sdp, &spp->spi_swapdev, swd_next) {
    377 			if (sdp->swd_vp == vp) {
    378 				if (remove) {
    379 					CIRCLEQ_REMOVE(&spp->spi_swapdev,
    380 					    sdp, swd_next);
    381 					uvmexp.nswapdev--;
    382 				}
    383 				return(sdp);
    384 			}
    385 		}
    386 	}
    387 	return (NULL);
    388 }
    389 
    390 /*
    391  * swaplist_trim: scan priority list for empty priority entries and kill
    392  *	them.
    393  *
    394  * => caller must hold both swap_syscall_lock and uvm_swap_data_lock
    395  */
    396 static void
    397 swaplist_trim(void)
    398 {
    399 	struct swappri *spp, *nextspp;
    400 
    401 	for (spp = LIST_FIRST(&swap_priority); spp != NULL; spp = nextspp) {
    402 		nextspp = LIST_NEXT(spp, spi_swappri);
    403 		if (CIRCLEQ_FIRST(&spp->spi_swapdev) !=
    404 		    (void *)&spp->spi_swapdev)
    405 			continue;
    406 		LIST_REMOVE(spp, spi_swappri);
    407 		kmem_free(spp, sizeof(*spp));
    408 	}
    409 }
    410 
    411 /*
    412  * swapdrum_getsdp: given a page offset in /dev/drum, convert it back
    413  *	to the "swapdev" that maps that section of the drum.
    414  *
    415  * => each swapdev takes one big contig chunk of the drum
    416  * => caller must hold uvm_swap_data_lock
    417  */
    418 static struct swapdev *
    419 swapdrum_getsdp(int pgno)
    420 {
    421 	struct swapdev *sdp;
    422 	struct swappri *spp;
    423 
    424 	LIST_FOREACH(spp, &swap_priority, spi_swappri) {
    425 		CIRCLEQ_FOREACH(sdp, &spp->spi_swapdev, swd_next) {
    426 			if (sdp->swd_flags & SWF_FAKE)
    427 				continue;
    428 			if (pgno >= sdp->swd_drumoffset &&
    429 			    pgno < (sdp->swd_drumoffset + sdp->swd_drumsize)) {
    430 				return sdp;
    431 			}
    432 		}
    433 	}
    434 	return NULL;
    435 }
    436 
    437 
    438 /*
    439  * sys_swapctl: main entry point for swapctl(2) system call
    440  * 	[with two helper functions: swap_on and swap_off]
    441  */
    442 int
    443 sys_swapctl(struct lwp *l, const struct sys_swapctl_args *uap, register_t *retval)
    444 {
    445 	/* {
    446 		syscallarg(int) cmd;
    447 		syscallarg(void *) arg;
    448 		syscallarg(int) misc;
    449 	} */
    450 	struct vnode *vp;
    451 	struct nameidata nd;
    452 	struct swappri *spp;
    453 	struct swapdev *sdp;
    454 	struct swapent *sep;
    455 #define SWAP_PATH_MAX (PATH_MAX + 1)
    456 	char	*userpath;
    457 	size_t	len;
    458 	int	error, misc;
    459 	int	priority;
    460 	UVMHIST_FUNC("sys_swapctl"); UVMHIST_CALLED(pdhist);
    461 
    462 	misc = SCARG(uap, misc);
    463 
    464 	userpath = kmem_alloc(SWAP_PATH_MAX, KM_SLEEP);
    465 
    466 	/*
    467 	 * ensure serialized syscall access by grabbing the swap_syscall_lock
    468 	 */
    469 	rw_enter(&swap_syscall_lock, RW_WRITER);
    470 
    471 	/*
    472 	 * we handle the non-priv NSWAP and STATS request first.
    473 	 *
    474 	 * SWAP_NSWAP: return number of config'd swap devices
    475 	 * [can also be obtained with uvmexp sysctl]
    476 	 */
    477 	if (SCARG(uap, cmd) == SWAP_NSWAP) {
    478 		UVMHIST_LOG(pdhist, "<- done SWAP_NSWAP=%d", uvmexp.nswapdev,
    479 		    0, 0, 0);
    480 		*retval = uvmexp.nswapdev;
    481 		error = 0;
    482 		goto out;
    483 	}
    484 
    485 	/*
    486 	 * SWAP_STATS: get stats on current # of configured swap devs
    487 	 *
    488 	 * note that the swap_priority list can't change as long
    489 	 * as we are holding the swap_syscall_lock.  we don't want
    490 	 * to grab the uvm_swap_data_lock because we may fault&sleep during
    491 	 * copyout() and we don't want to be holding that lock then!
    492 	 */
    493 	if (SCARG(uap, cmd) == SWAP_STATS
    494 #if defined(COMPAT_50)
    495 	    || SCARG(uap, cmd) == SWAP_STATS50
    496 #endif
    497 #if defined(COMPAT_13)
    498 	    || SCARG(uap, cmd) == SWAP_STATS13
    499 #endif
    500 	    ) {
    501 		if ((size_t)misc > (size_t)uvmexp.nswapdev)
    502 			misc = uvmexp.nswapdev;
    503 #if defined(COMPAT_13)
    504 		if (SCARG(uap, cmd) == SWAP_STATS13)
    505 			len = sizeof(struct swapent13) * misc;
    506 		else
    507 #endif
    508 #if defined(COMPAT_50)
    509 		if (SCARG(uap, cmd) == SWAP_STATS50)
    510 			len = sizeof(struct swapent50) * misc;
    511 		else
    512 #endif
    513 			len = sizeof(struct swapent) * misc;
    514 		sep = (struct swapent *)kmem_alloc(len, KM_SLEEP);
    515 
    516 		uvm_swap_stats(SCARG(uap, cmd), sep, misc, retval);
    517 		error = copyout(sep, SCARG(uap, arg), len);
    518 
    519 		kmem_free(sep, len);
    520 		UVMHIST_LOG(pdhist, "<- done SWAP_STATS", 0, 0, 0, 0);
    521 		goto out;
    522 	}
    523 	if (SCARG(uap, cmd) == SWAP_GETDUMPDEV) {
    524 		dev_t	*devp = (dev_t *)SCARG(uap, arg);
    525 
    526 		error = copyout(&dumpdev, devp, sizeof(dumpdev));
    527 		goto out;
    528 	}
    529 
    530 	/*
    531 	 * all other requests require superuser privs.   verify.
    532 	 */
    533 	if ((error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_SWAPCTL,
    534 	    0, NULL, NULL, NULL)))
    535 		goto out;
    536 
    537 	if (SCARG(uap, cmd) == SWAP_DUMPOFF) {
    538 		/* drop the current dump device */
    539 		dumpdev = NODEV;
    540 		dumpcdev = NODEV;
    541 		cpu_dumpconf();
    542 		goto out;
    543 	}
    544 
    545 	/*
    546 	 * at this point we expect a path name in arg.   we will
    547 	 * use namei() to gain a vnode reference (vref), and lock
    548 	 * the vnode (VOP_LOCK).
    549 	 *
    550 	 * XXX: a NULL arg means use the root vnode pointer (e.g. for
    551 	 * miniroot)
    552 	 */
    553 	if (SCARG(uap, arg) == NULL) {
    554 		vp = rootvp;		/* miniroot */
    555 		vref(vp);
    556 		if (vn_lock(vp, LK_EXCLUSIVE)) {
    557 			vrele(vp);
    558 			error = EBUSY;
    559 			goto out;
    560 		}
    561 		if (SCARG(uap, cmd) == SWAP_ON &&
    562 		    copystr("miniroot", userpath, SWAP_PATH_MAX, &len))
    563 			panic("swapctl: miniroot copy failed");
    564 	} else {
    565 		struct pathbuf *pb;
    566 
    567 		/*
    568 		 * This used to allow copying in one extra byte
    569 		 * (SWAP_PATH_MAX instead of PATH_MAX) for SWAP_ON.
    570 		 * This was completely pointless because if anyone
    571 		 * used that extra byte namei would fail with
    572 		 * ENAMETOOLONG anyway, so I've removed the excess
    573 		 * logic. - dholland 20100215
    574 		 */
    575 
    576 		error = pathbuf_copyin(SCARG(uap, arg), &pb);
    577 		if (error) {
    578 			goto out;
    579 		}
    580 		if (SCARG(uap, cmd) == SWAP_ON) {
    581 			/* get a copy of the string */
    582 			pathbuf_copystring(pb, userpath, SWAP_PATH_MAX);
    583 			len = strlen(userpath) + 1;
    584 		}
    585 		NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | TRYEMULROOT, pb);
    586 		if ((error = namei(&nd))) {
    587 			pathbuf_destroy(pb);
    588 			goto out;
    589 		}
    590 		vp = nd.ni_vp;
    591 		pathbuf_destroy(pb);
    592 	}
    593 	/* note: "vp" is referenced and locked */
    594 
    595 	error = 0;		/* assume no error */
    596 	switch(SCARG(uap, cmd)) {
    597 
    598 	case SWAP_DUMPDEV:
    599 		if (vp->v_type != VBLK) {
    600 			error = ENOTBLK;
    601 			break;
    602 		}
    603 		if (bdevsw_lookup(vp->v_rdev)) {
    604 			dumpdev = vp->v_rdev;
    605 			dumpcdev = devsw_blk2chr(dumpdev);
    606 		} else
    607 			dumpdev = NODEV;
    608 		cpu_dumpconf();
    609 		break;
    610 
    611 	case SWAP_CTL:
    612 		/*
    613 		 * get new priority, remove old entry (if any) and then
    614 		 * reinsert it in the correct place.  finally, prune out
    615 		 * any empty priority structures.
    616 		 */
    617 		priority = SCARG(uap, misc);
    618 		spp = kmem_alloc(sizeof(*spp), KM_SLEEP);
    619 		mutex_enter(&uvm_swap_data_lock);
    620 		if ((sdp = swaplist_find(vp, true)) == NULL) {
    621 			error = ENOENT;
    622 		} else {
    623 			swaplist_insert(sdp, spp, priority);
    624 			swaplist_trim();
    625 		}
    626 		mutex_exit(&uvm_swap_data_lock);
    627 		if (error)
    628 			kmem_free(spp, sizeof(*spp));
    629 		break;
    630 
    631 	case SWAP_ON:
    632 
    633 		/*
    634 		 * check for duplicates.   if none found, then insert a
    635 		 * dummy entry on the list to prevent someone else from
    636 		 * trying to enable this device while we are working on
    637 		 * it.
    638 		 */
    639 
    640 		priority = SCARG(uap, misc);
    641 		sdp = kmem_zalloc(sizeof(*sdp), KM_SLEEP);
    642 		spp = kmem_alloc(sizeof(*spp), KM_SLEEP);
    643 		sdp->swd_flags = SWF_FAKE;
    644 		sdp->swd_vp = vp;
    645 		sdp->swd_dev = (vp->v_type == VBLK) ? vp->v_rdev : NODEV;
    646 		bufq_alloc(&sdp->swd_tab, "disksort", BUFQ_SORT_RAWBLOCK);
    647 		mutex_enter(&uvm_swap_data_lock);
    648 		if (swaplist_find(vp, false) != NULL) {
    649 			error = EBUSY;
    650 			mutex_exit(&uvm_swap_data_lock);
    651 			bufq_free(sdp->swd_tab);
    652 			kmem_free(sdp, sizeof(*sdp));
    653 			kmem_free(spp, sizeof(*spp));
    654 			break;
    655 		}
    656 		swaplist_insert(sdp, spp, priority);
    657 		mutex_exit(&uvm_swap_data_lock);
    658 
    659 		sdp->swd_pathlen = len;
    660 		sdp->swd_path = kmem_alloc(sdp->swd_pathlen, KM_SLEEP);
    661 		if (copystr(userpath, sdp->swd_path, sdp->swd_pathlen, 0) != 0)
    662 			panic("swapctl: copystr");
    663 
    664 		/*
    665 		 * we've now got a FAKE placeholder in the swap list.
    666 		 * now attempt to enable swap on it.  if we fail, undo
    667 		 * what we've done and kill the fake entry we just inserted.
    668 		 * if swap_on is a success, it will clear the SWF_FAKE flag
    669 		 */
    670 
    671 		if ((error = swap_on(l, sdp)) != 0) {
    672 			mutex_enter(&uvm_swap_data_lock);
    673 			(void) swaplist_find(vp, true);  /* kill fake entry */
    674 			swaplist_trim();
    675 			mutex_exit(&uvm_swap_data_lock);
    676 			bufq_free(sdp->swd_tab);
    677 			kmem_free(sdp->swd_path, sdp->swd_pathlen);
    678 			kmem_free(sdp, sizeof(*sdp));
    679 			break;
    680 		}
    681 		break;
    682 
    683 	case SWAP_OFF:
    684 		mutex_enter(&uvm_swap_data_lock);
    685 		if ((sdp = swaplist_find(vp, false)) == NULL) {
    686 			mutex_exit(&uvm_swap_data_lock);
    687 			error = ENXIO;
    688 			break;
    689 		}
    690 
    691 		/*
    692 		 * If a device isn't in use or enabled, we
    693 		 * can't stop swapping from it (again).
    694 		 */
    695 		if ((sdp->swd_flags & (SWF_INUSE|SWF_ENABLE)) == 0) {
    696 			mutex_exit(&uvm_swap_data_lock);
    697 			error = EBUSY;
    698 			break;
    699 		}
    700 
    701 		/*
    702 		 * do the real work.
    703 		 */
    704 		error = swap_off(l, sdp);
    705 		break;
    706 
    707 	default:
    708 		error = EINVAL;
    709 	}
    710 
    711 	/*
    712 	 * done!  release the ref gained by namei() and unlock.
    713 	 */
    714 	vput(vp);
    715 out:
    716 	rw_exit(&swap_syscall_lock);
    717 	kmem_free(userpath, SWAP_PATH_MAX);
    718 
    719 	UVMHIST_LOG(pdhist, "<- done!  error=%d", error, 0, 0, 0);
    720 	return (error);
    721 }
    722 
    723 /*
    724  * uvm_swap_stats: implements swapctl(SWAP_STATS). The function is kept
    725  * away from sys_swapctl() in order to allow COMPAT_* swapctl()
    726  * emulation to use it directly without going through sys_swapctl().
    727  * The problem with using sys_swapctl() there is that it involves
    728  * copying the swapent array to the stackgap, and this array's size
    729  * is not known at build time. Hence it would not be possible to
    730  * ensure it would fit in the stackgap in any case.
    731  */
    732 static void
    733 uvm_swap_stats(int cmd, struct swapent *sep, int sec, register_t *retval)
    734 {
    735 	struct swappri *spp;
    736 	struct swapdev *sdp;
    737 	int count = 0;
    738 
    739 	LIST_FOREACH(spp, &swap_priority, spi_swappri) {
    740 		for (sdp = CIRCLEQ_FIRST(&spp->spi_swapdev);
    741 		     sdp != (void *)&spp->spi_swapdev && sec-- > 0;
    742 		     sdp = CIRCLEQ_NEXT(sdp, swd_next)) {
    743 			int inuse;
    744 
    745 		  	/*
    746 			 * backwards compatibility for system call.
    747 			 * For NetBSD 1.3 and 5.0, we have to use
    748 			 * the 32 bit dev_t.  For 5.0 and -current
    749 			 * we have to add the path.
    750 			 */
    751 			inuse = btodb((uint64_t)sdp->swd_npginuse <<
    752 			    PAGE_SHIFT);
    753 
    754 #if defined(COMPAT_13) || defined(COMPAT_50)
    755 			if (cmd == SWAP_STATS) {
    756 #endif
    757 				sep->se_dev = sdp->swd_dev;
    758 				sep->se_flags = sdp->swd_flags;
    759 				sep->se_nblks = sdp->swd_nblks;
    760 				sep->se_inuse = inuse;
    761 				sep->se_priority = sdp->swd_priority;
    762 				memcpy(&sep->se_path, sdp->swd_path,
    763 				       sizeof sep->se_path);
    764 				sep++;
    765 #if defined(COMPAT_13)
    766 			} else if (cmd == SWAP_STATS13) {
    767 				struct swapent13 *sep13 =
    768 				    (struct swapent13 *)sep;
    769 
    770 				sep13->se13_dev = sdp->swd_dev;
    771 				sep13->se13_flags = sdp->swd_flags;
    772 				sep13->se13_nblks = sdp->swd_nblks;
    773 				sep13->se13_inuse = inuse;
    774 				sep13->se13_priority = sdp->swd_priority;
    775 				sep = (struct swapent *)(sep13 + 1);
    776 #endif
    777 #if defined(COMPAT_50)
    778 			} else if (cmd == SWAP_STATS50) {
    779 				struct swapent50 *sep50 =
    780 				    (struct swapent50 *)sep;
    781 
    782 				sep50->se50_dev = sdp->swd_dev;
    783 				sep50->se50_flags = sdp->swd_flags;
    784 				sep50->se50_nblks = sdp->swd_nblks;
    785 				sep50->se50_inuse = inuse;
    786 				sep50->se50_priority = sdp->swd_priority;
    787 				memcpy(&sep50->se50_path, sdp->swd_path,
    788 				       sizeof sep50->se50_path);
    789 				sep = (struct swapent *)(sep50 + 1);
    790 #endif
    791 #if defined(COMPAT_13) || defined(COMPAT_50)
    792 			}
    793 #endif
    794 			count++;
    795 		}
    796 	}
    797 
    798 	*retval = count;
    799 	return;
    800 }
    801 
    802 /*
    803  * swap_on: attempt to enable a swapdev for swapping.   note that the
    804  *	swapdev is already on the global list, but disabled (marked
    805  *	SWF_FAKE).
    806  *
    807  * => we avoid the start of the disk (to protect disk labels)
    808  * => we also avoid the miniroot, if we are swapping to root.
    809  * => caller should leave uvm_swap_data_lock unlocked, we may lock it
    810  *	if needed.
    811  */
    812 static int
    813 swap_on(struct lwp *l, struct swapdev *sdp)
    814 {
    815 	struct vnode *vp;
    816 	int error, npages, nblocks, size;
    817 	long addr;
    818 	vmem_addr_t result;
    819 	struct vattr va;
    820 	dev_t dev;
    821 	UVMHIST_FUNC("swap_on"); UVMHIST_CALLED(pdhist);
    822 
    823 	/*
    824 	 * we want to enable swapping on sdp.   the swd_vp contains
    825 	 * the vnode we want (locked and ref'd), and the swd_dev
    826 	 * contains the dev_t of the file, if it a block device.
    827 	 */
    828 
    829 	vp = sdp->swd_vp;
    830 	dev = sdp->swd_dev;
    831 
    832 	/*
    833 	 * open the swap file (mostly useful for block device files to
    834 	 * let device driver know what is up).
    835 	 *
    836 	 * we skip the open/close for root on swap because the root
    837 	 * has already been opened when root was mounted (mountroot).
    838 	 */
    839 	if (vp != rootvp) {
    840 		if ((error = VOP_OPEN(vp, FREAD|FWRITE, l->l_cred)))
    841 			return (error);
    842 	}
    843 
    844 	/* XXX this only works for block devices */
    845 	UVMHIST_LOG(pdhist, "  dev=%d, major(dev)=%d", dev, major(dev), 0,0);
    846 
    847 	/*
    848 	 * we now need to determine the size of the swap area.   for
    849 	 * block specials we can call the d_psize function.
    850 	 * for normal files, we must stat [get attrs].
    851 	 *
    852 	 * we put the result in nblks.
    853 	 * for normal files, we also want the filesystem block size
    854 	 * (which we get with statfs).
    855 	 */
    856 	switch (vp->v_type) {
    857 	case VBLK:
    858 		if ((nblocks = bdev_size(dev)) == -1) {
    859 			error = ENXIO;
    860 			goto bad;
    861 		}
    862 		break;
    863 
    864 	case VREG:
    865 		if ((error = VOP_GETATTR(vp, &va, l->l_cred)))
    866 			goto bad;
    867 		nblocks = (int)btodb(va.va_size);
    868 		sdp->swd_bsize = 1 << vp->v_mount->mnt_fs_bshift;
    869 		/*
    870 		 * limit the max # of outstanding I/O requests we issue
    871 		 * at any one time.   take it easy on NFS servers.
    872 		 */
    873 		if (vp->v_tag == VT_NFS)
    874 			sdp->swd_maxactive = 2; /* XXX */
    875 		else
    876 			sdp->swd_maxactive = 8; /* XXX */
    877 		break;
    878 
    879 	default:
    880 		error = ENXIO;
    881 		goto bad;
    882 	}
    883 
    884 	/*
    885 	 * save nblocks in a safe place and convert to pages.
    886 	 */
    887 
    888 	sdp->swd_nblks = nblocks;
    889 	npages = dbtob((uint64_t)nblocks) >> PAGE_SHIFT;
    890 
    891 	/*
    892 	 * for block special files, we want to make sure that leave
    893 	 * the disklabel and bootblocks alone, so we arrange to skip
    894 	 * over them (arbitrarily choosing to skip PAGE_SIZE bytes).
    895 	 * note that because of this the "size" can be less than the
    896 	 * actual number of blocks on the device.
    897 	 */
    898 	if (vp->v_type == VBLK) {
    899 		/* we use pages 1 to (size - 1) [inclusive] */
    900 		size = npages - 1;
    901 		addr = 1;
    902 	} else {
    903 		/* we use pages 0 to (size - 1) [inclusive] */
    904 		size = npages;
    905 		addr = 0;
    906 	}
    907 
    908 	/*
    909 	 * make sure we have enough blocks for a reasonable sized swap
    910 	 * area.   we want at least one page.
    911 	 */
    912 
    913 	if (size < 1) {
    914 		UVMHIST_LOG(pdhist, "  size <= 1!!", 0, 0, 0, 0);
    915 		error = EINVAL;
    916 		goto bad;
    917 	}
    918 
    919 	UVMHIST_LOG(pdhist, "  dev=%x: size=%d addr=%ld\n", dev, size, addr, 0);
    920 
    921 	/*
    922 	 * now we need to allocate an extent to manage this swap device
    923 	 */
    924 
    925 	sdp->swd_blist = blist_create(npages);
    926 	/* mark all expect the `saved' region free. */
    927 	blist_free(sdp->swd_blist, addr, size);
    928 
    929 	/*
    930 	 * if the vnode we are swapping to is the root vnode
    931 	 * (i.e. we are swapping to the miniroot) then we want
    932 	 * to make sure we don't overwrite it.   do a statfs to
    933 	 * find its size and skip over it.
    934 	 */
    935 	if (vp == rootvp) {
    936 		struct mount *mp;
    937 		struct statvfs *sp;
    938 		int rootblocks, rootpages;
    939 
    940 		mp = rootvnode->v_mount;
    941 		sp = &mp->mnt_stat;
    942 		rootblocks = sp->f_blocks * btodb(sp->f_frsize);
    943 		/*
    944 		 * XXX: sp->f_blocks isn't the total number of
    945 		 * blocks in the filesystem, it's the number of
    946 		 * data blocks.  so, our rootblocks almost
    947 		 * definitely underestimates the total size
    948 		 * of the filesystem - how badly depends on the
    949 		 * details of the filesystem type.  there isn't
    950 		 * an obvious way to deal with this cleanly
    951 		 * and perfectly, so for now we just pad our
    952 		 * rootblocks estimate with an extra 5 percent.
    953 		 */
    954 		rootblocks += (rootblocks >> 5) +
    955 			(rootblocks >> 6) +
    956 			(rootblocks >> 7);
    957 		rootpages = round_page(dbtob(rootblocks)) >> PAGE_SHIFT;
    958 		if (rootpages > size)
    959 			panic("swap_on: miniroot larger than swap?");
    960 
    961 		if (rootpages != blist_fill(sdp->swd_blist, addr, rootpages)) {
    962 			panic("swap_on: unable to preserve miniroot");
    963 		}
    964 
    965 		size -= rootpages;
    966 		printf("Preserved %d pages of miniroot ", rootpages);
    967 		printf("leaving %d pages of swap\n", size);
    968 	}
    969 
    970 	/*
    971 	 * add a ref to vp to reflect usage as a swap device.
    972 	 */
    973 	vref(vp);
    974 
    975 	/*
    976 	 * now add the new swapdev to the drum and enable.
    977 	 */
    978 	error = vmem_alloc(swapmap, npages, VM_BESTFIT | VM_SLEEP, &result);
    979 	if (error != 0)
    980 		panic("swapdrum_add");
    981 	/*
    982 	 * If this is the first regular swap create the workqueue.
    983 	 * => Protected by swap_syscall_lock.
    984 	 */
    985 	if (vp->v_type != VBLK) {
    986 		if (sw_reg_count++ == 0) {
    987 			KASSERT(sw_reg_workqueue == NULL);
    988 			if (workqueue_create(&sw_reg_workqueue, "swapiod",
    989 			    sw_reg_iodone, NULL, PRIBIO, IPL_BIO, 0) != 0)
    990 				panic("%s: workqueue_create failed", __func__);
    991 		}
    992 	}
    993 
    994 	sdp->swd_drumoffset = (int)result;
    995 	sdp->swd_drumsize = npages;
    996 	sdp->swd_npages = size;
    997 	mutex_enter(&uvm_swap_data_lock);
    998 	sdp->swd_flags &= ~SWF_FAKE;	/* going live */
    999 	sdp->swd_flags |= (SWF_INUSE|SWF_ENABLE);
   1000 	uvmexp.swpages += size;
   1001 	uvmexp.swpgavail += size;
   1002 	mutex_exit(&uvm_swap_data_lock);
   1003 	return (0);
   1004 
   1005 	/*
   1006 	 * failure: clean up and return error.
   1007 	 */
   1008 
   1009 bad:
   1010 	if (sdp->swd_blist) {
   1011 		blist_destroy(sdp->swd_blist);
   1012 	}
   1013 	if (vp != rootvp) {
   1014 		(void)VOP_CLOSE(vp, FREAD|FWRITE, l->l_cred);
   1015 	}
   1016 	return (error);
   1017 }
   1018 
   1019 /*
   1020  * swap_off: stop swapping on swapdev
   1021  *
   1022  * => swap data should be locked, we will unlock.
   1023  */
   1024 static int
   1025 swap_off(struct lwp *l, struct swapdev *sdp)
   1026 {
   1027 	int npages = sdp->swd_npages;
   1028 	int error = 0;
   1029 
   1030 	UVMHIST_FUNC("swap_off"); UVMHIST_CALLED(pdhist);
   1031 	UVMHIST_LOG(pdhist, "  dev=%x, npages=%d", sdp->swd_dev,npages,0,0);
   1032 
   1033 	/* disable the swap area being removed */
   1034 	sdp->swd_flags &= ~SWF_ENABLE;
   1035 	uvmexp.swpgavail -= npages;
   1036 	mutex_exit(&uvm_swap_data_lock);
   1037 
   1038 	/*
   1039 	 * the idea is to find all the pages that are paged out to this
   1040 	 * device, and page them all in.  in uvm, swap-backed pageable
   1041 	 * memory can take two forms: aobjs and anons.  call the
   1042 	 * swapoff hook for each subsystem to bring in pages.
   1043 	 */
   1044 
   1045 	if (uao_swap_off(sdp->swd_drumoffset,
   1046 			 sdp->swd_drumoffset + sdp->swd_drumsize) ||
   1047 	    amap_swap_off(sdp->swd_drumoffset,
   1048 			  sdp->swd_drumoffset + sdp->swd_drumsize)) {
   1049 		error = ENOMEM;
   1050 	} else if (sdp->swd_npginuse > sdp->swd_npgbad) {
   1051 		error = EBUSY;
   1052 	}
   1053 
   1054 	if (error) {
   1055 		mutex_enter(&uvm_swap_data_lock);
   1056 		sdp->swd_flags |= SWF_ENABLE;
   1057 		uvmexp.swpgavail += npages;
   1058 		mutex_exit(&uvm_swap_data_lock);
   1059 
   1060 		return error;
   1061 	}
   1062 
   1063 	/*
   1064 	 * If this is the last regular swap destroy the workqueue.
   1065 	 * => Protected by swap_syscall_lock.
   1066 	 */
   1067 	if (sdp->swd_vp->v_type != VBLK) {
   1068 		KASSERT(sw_reg_count > 0);
   1069 		KASSERT(sw_reg_workqueue != NULL);
   1070 		if (--sw_reg_count == 0) {
   1071 			workqueue_destroy(sw_reg_workqueue);
   1072 			sw_reg_workqueue = NULL;
   1073 		}
   1074 	}
   1075 
   1076 	/*
   1077 	 * done with the vnode.
   1078 	 * drop our ref on the vnode before calling VOP_CLOSE()
   1079 	 * so that spec_close() can tell if this is the last close.
   1080 	 */
   1081 	vrele(sdp->swd_vp);
   1082 	if (sdp->swd_vp != rootvp) {
   1083 		(void) VOP_CLOSE(sdp->swd_vp, FREAD|FWRITE, l->l_cred);
   1084 	}
   1085 
   1086 	mutex_enter(&uvm_swap_data_lock);
   1087 	uvmexp.swpages -= npages;
   1088 	uvmexp.swpginuse -= sdp->swd_npgbad;
   1089 
   1090 	if (swaplist_find(sdp->swd_vp, true) == NULL)
   1091 		panic("%s: swapdev not in list", __func__);
   1092 	swaplist_trim();
   1093 	mutex_exit(&uvm_swap_data_lock);
   1094 
   1095 	/*
   1096 	 * free all resources!
   1097 	 */
   1098 	vmem_free(swapmap, sdp->swd_drumoffset, sdp->swd_drumsize);
   1099 	blist_destroy(sdp->swd_blist);
   1100 	bufq_free(sdp->swd_tab);
   1101 	kmem_free(sdp, sizeof(*sdp));
   1102 	return (0);
   1103 }
   1104 
   1105 /*
   1106  * /dev/drum interface and i/o functions
   1107  */
   1108 
   1109 /*
   1110  * swstrategy: perform I/O on the drum
   1111  *
   1112  * => we must map the i/o request from the drum to the correct swapdev.
   1113  */
   1114 static void
   1115 swstrategy(struct buf *bp)
   1116 {
   1117 	struct swapdev *sdp;
   1118 	struct vnode *vp;
   1119 	int pageno, bn;
   1120 	UVMHIST_FUNC("swstrategy"); UVMHIST_CALLED(pdhist);
   1121 
   1122 	/*
   1123 	 * convert block number to swapdev.   note that swapdev can't
   1124 	 * be yanked out from under us because we are holding resources
   1125 	 * in it (i.e. the blocks we are doing I/O on).
   1126 	 */
   1127 	pageno = dbtob((int64_t)bp->b_blkno) >> PAGE_SHIFT;
   1128 	mutex_enter(&uvm_swap_data_lock);
   1129 	sdp = swapdrum_getsdp(pageno);
   1130 	mutex_exit(&uvm_swap_data_lock);
   1131 	if (sdp == NULL) {
   1132 		bp->b_error = EINVAL;
   1133 		biodone(bp);
   1134 		UVMHIST_LOG(pdhist, "  failed to get swap device", 0, 0, 0, 0);
   1135 		return;
   1136 	}
   1137 
   1138 	/*
   1139 	 * convert drum page number to block number on this swapdev.
   1140 	 */
   1141 
   1142 	pageno -= sdp->swd_drumoffset;	/* page # on swapdev */
   1143 	bn = btodb((uint64_t)pageno << PAGE_SHIFT); /* convert to diskblock */
   1144 
   1145 	UVMHIST_LOG(pdhist, "  %s: mapoff=%x bn=%x bcount=%ld",
   1146 		((bp->b_flags & B_READ) == 0) ? "write" : "read",
   1147 		sdp->swd_drumoffset, bn, bp->b_bcount);
   1148 
   1149 	/*
   1150 	 * for block devices we finish up here.
   1151 	 * for regular files we have to do more work which we delegate
   1152 	 * to sw_reg_strategy().
   1153 	 */
   1154 
   1155 	vp = sdp->swd_vp;		/* swapdev vnode pointer */
   1156 	switch (vp->v_type) {
   1157 	default:
   1158 		panic("%s: vnode type 0x%x", __func__, vp->v_type);
   1159 
   1160 	case VBLK:
   1161 
   1162 		/*
   1163 		 * must convert "bp" from an I/O on /dev/drum to an I/O
   1164 		 * on the swapdev (sdp).
   1165 		 */
   1166 		bp->b_blkno = bn;		/* swapdev block number */
   1167 		bp->b_dev = sdp->swd_dev;	/* swapdev dev_t */
   1168 
   1169 		/*
   1170 		 * if we are doing a write, we have to redirect the i/o on
   1171 		 * drum's v_numoutput counter to the swapdevs.
   1172 		 */
   1173 		if ((bp->b_flags & B_READ) == 0) {
   1174 			mutex_enter(bp->b_objlock);
   1175 			vwakeup(bp);	/* kills one 'v_numoutput' on drum */
   1176 			mutex_exit(bp->b_objlock);
   1177 			mutex_enter(vp->v_interlock);
   1178 			vp->v_numoutput++;	/* put it on swapdev */
   1179 			mutex_exit(vp->v_interlock);
   1180 		}
   1181 
   1182 		/*
   1183 		 * finally plug in swapdev vnode and start I/O
   1184 		 */
   1185 		bp->b_vp = vp;
   1186 		bp->b_objlock = vp->v_interlock;
   1187 		VOP_STRATEGY(vp, bp);
   1188 		return;
   1189 
   1190 	case VREG:
   1191 		/*
   1192 		 * delegate to sw_reg_strategy function.
   1193 		 */
   1194 		sw_reg_strategy(sdp, bp, bn);
   1195 		return;
   1196 	}
   1197 	/* NOTREACHED */
   1198 }
   1199 
   1200 /*
   1201  * swread: the read function for the drum (just a call to physio)
   1202  */
   1203 /*ARGSUSED*/
   1204 static int
   1205 swread(dev_t dev, struct uio *uio, int ioflag)
   1206 {
   1207 	UVMHIST_FUNC("swread"); UVMHIST_CALLED(pdhist);
   1208 
   1209 	UVMHIST_LOG(pdhist, "  dev=%x offset=%qx", dev, uio->uio_offset, 0, 0);
   1210 	return (physio(swstrategy, NULL, dev, B_READ, minphys, uio));
   1211 }
   1212 
   1213 /*
   1214  * swwrite: the write function for the drum (just a call to physio)
   1215  */
   1216 /*ARGSUSED*/
   1217 static int
   1218 swwrite(dev_t dev, struct uio *uio, int ioflag)
   1219 {
   1220 	UVMHIST_FUNC("swwrite"); UVMHIST_CALLED(pdhist);
   1221 
   1222 	UVMHIST_LOG(pdhist, "  dev=%x offset=%qx", dev, uio->uio_offset, 0, 0);
   1223 	return (physio(swstrategy, NULL, dev, B_WRITE, minphys, uio));
   1224 }
   1225 
   1226 const struct bdevsw swap_bdevsw = {
   1227 	nullopen, nullclose, swstrategy, noioctl, nodump, nosize, D_OTHER,
   1228 };
   1229 
   1230 const struct cdevsw swap_cdevsw = {
   1231 	nullopen, nullclose, swread, swwrite, noioctl,
   1232 	nostop, notty, nopoll, nommap, nokqfilter, D_OTHER,
   1233 };
   1234 
   1235 /*
   1236  * sw_reg_strategy: handle swap i/o to regular files
   1237  */
   1238 static void
   1239 sw_reg_strategy(struct swapdev *sdp, struct buf *bp, int bn)
   1240 {
   1241 	struct vnode	*vp;
   1242 	struct vndxfer	*vnx;
   1243 	daddr_t		nbn;
   1244 	char 		*addr;
   1245 	off_t		byteoff;
   1246 	int		s, off, nra, error, sz, resid;
   1247 	UVMHIST_FUNC("sw_reg_strategy"); UVMHIST_CALLED(pdhist);
   1248 
   1249 	/*
   1250 	 * allocate a vndxfer head for this transfer and point it to
   1251 	 * our buffer.
   1252 	 */
   1253 	vnx = pool_get(&vndxfer_pool, PR_WAITOK);
   1254 	vnx->vx_flags = VX_BUSY;
   1255 	vnx->vx_error = 0;
   1256 	vnx->vx_pending = 0;
   1257 	vnx->vx_bp = bp;
   1258 	vnx->vx_sdp = sdp;
   1259 
   1260 	/*
   1261 	 * setup for main loop where we read filesystem blocks into
   1262 	 * our buffer.
   1263 	 */
   1264 	error = 0;
   1265 	bp->b_resid = bp->b_bcount;	/* nothing transfered yet! */
   1266 	addr = bp->b_data;		/* current position in buffer */
   1267 	byteoff = dbtob((uint64_t)bn);
   1268 
   1269 	for (resid = bp->b_resid; resid; resid -= sz) {
   1270 		struct vndbuf	*nbp;
   1271 
   1272 		/*
   1273 		 * translate byteoffset into block number.  return values:
   1274 		 *   vp = vnode of underlying device
   1275 		 *  nbn = new block number (on underlying vnode dev)
   1276 		 *  nra = num blocks we can read-ahead (excludes requested
   1277 		 *	block)
   1278 		 */
   1279 		nra = 0;
   1280 		error = VOP_BMAP(sdp->swd_vp, byteoff / sdp->swd_bsize,
   1281 				 	&vp, &nbn, &nra);
   1282 
   1283 		if (error == 0 && nbn == (daddr_t)-1) {
   1284 			/*
   1285 			 * this used to just set error, but that doesn't
   1286 			 * do the right thing.  Instead, it causes random
   1287 			 * memory errors.  The panic() should remain until
   1288 			 * this condition doesn't destabilize the system.
   1289 			 */
   1290 #if 1
   1291 			panic("%s: swap to sparse file", __func__);
   1292 #else
   1293 			error = EIO;	/* failure */
   1294 #endif
   1295 		}
   1296 
   1297 		/*
   1298 		 * punt if there was an error or a hole in the file.
   1299 		 * we must wait for any i/o ops we have already started
   1300 		 * to finish before returning.
   1301 		 *
   1302 		 * XXX we could deal with holes here but it would be
   1303 		 * a hassle (in the write case).
   1304 		 */
   1305 		if (error) {
   1306 			s = splbio();
   1307 			vnx->vx_error = error;	/* pass error up */
   1308 			goto out;
   1309 		}
   1310 
   1311 		/*
   1312 		 * compute the size ("sz") of this transfer (in bytes).
   1313 		 */
   1314 		off = byteoff % sdp->swd_bsize;
   1315 		sz = (1 + nra) * sdp->swd_bsize - off;
   1316 		if (sz > resid)
   1317 			sz = resid;
   1318 
   1319 		UVMHIST_LOG(pdhist, "sw_reg_strategy: "
   1320 			    "vp %p/%p offset 0x%x/0x%x",
   1321 			    sdp->swd_vp, vp, byteoff, nbn);
   1322 
   1323 		/*
   1324 		 * now get a buf structure.   note that the vb_buf is
   1325 		 * at the front of the nbp structure so that you can
   1326 		 * cast pointers between the two structure easily.
   1327 		 */
   1328 		nbp = pool_get(&vndbuf_pool, PR_WAITOK);
   1329 		buf_init(&nbp->vb_buf);
   1330 		nbp->vb_buf.b_flags    = bp->b_flags;
   1331 		nbp->vb_buf.b_cflags   = bp->b_cflags;
   1332 		nbp->vb_buf.b_oflags   = bp->b_oflags;
   1333 		nbp->vb_buf.b_bcount   = sz;
   1334 		nbp->vb_buf.b_bufsize  = sz;
   1335 		nbp->vb_buf.b_error    = 0;
   1336 		nbp->vb_buf.b_data     = addr;
   1337 		nbp->vb_buf.b_lblkno   = 0;
   1338 		nbp->vb_buf.b_blkno    = nbn + btodb(off);
   1339 		nbp->vb_buf.b_rawblkno = nbp->vb_buf.b_blkno;
   1340 		nbp->vb_buf.b_iodone   = sw_reg_biodone;
   1341 		nbp->vb_buf.b_vp       = vp;
   1342 		nbp->vb_buf.b_objlock  = vp->v_interlock;
   1343 		if (vp->v_type == VBLK) {
   1344 			nbp->vb_buf.b_dev = vp->v_rdev;
   1345 		}
   1346 
   1347 		nbp->vb_xfer = vnx;	/* patch it back in to vnx */
   1348 
   1349 		/*
   1350 		 * Just sort by block number
   1351 		 */
   1352 		s = splbio();
   1353 		if (vnx->vx_error != 0) {
   1354 			buf_destroy(&nbp->vb_buf);
   1355 			pool_put(&vndbuf_pool, nbp);
   1356 			goto out;
   1357 		}
   1358 		vnx->vx_pending++;
   1359 
   1360 		/* sort it in and start I/O if we are not over our limit */
   1361 		/* XXXAD locking */
   1362 		bufq_put(sdp->swd_tab, &nbp->vb_buf);
   1363 		sw_reg_start(sdp);
   1364 		splx(s);
   1365 
   1366 		/*
   1367 		 * advance to the next I/O
   1368 		 */
   1369 		byteoff += sz;
   1370 		addr += sz;
   1371 	}
   1372 
   1373 	s = splbio();
   1374 
   1375 out: /* Arrive here at splbio */
   1376 	vnx->vx_flags &= ~VX_BUSY;
   1377 	if (vnx->vx_pending == 0) {
   1378 		error = vnx->vx_error;
   1379 		pool_put(&vndxfer_pool, vnx);
   1380 		bp->b_error = error;
   1381 		biodone(bp);
   1382 	}
   1383 	splx(s);
   1384 }
   1385 
   1386 /*
   1387  * sw_reg_start: start an I/O request on the requested swapdev
   1388  *
   1389  * => reqs are sorted by b_rawblkno (above)
   1390  */
   1391 static void
   1392 sw_reg_start(struct swapdev *sdp)
   1393 {
   1394 	struct buf	*bp;
   1395 	struct vnode	*vp;
   1396 	UVMHIST_FUNC("sw_reg_start"); UVMHIST_CALLED(pdhist);
   1397 
   1398 	/* recursion control */
   1399 	if ((sdp->swd_flags & SWF_BUSY) != 0)
   1400 		return;
   1401 
   1402 	sdp->swd_flags |= SWF_BUSY;
   1403 
   1404 	while (sdp->swd_active < sdp->swd_maxactive) {
   1405 		bp = bufq_get(sdp->swd_tab);
   1406 		if (bp == NULL)
   1407 			break;
   1408 		sdp->swd_active++;
   1409 
   1410 		UVMHIST_LOG(pdhist,
   1411 		    "sw_reg_start:  bp %p vp %p blkno %p cnt %lx",
   1412 		    bp, bp->b_vp, bp->b_blkno, bp->b_bcount);
   1413 		vp = bp->b_vp;
   1414 		KASSERT(bp->b_objlock == vp->v_interlock);
   1415 		if ((bp->b_flags & B_READ) == 0) {
   1416 			mutex_enter(vp->v_interlock);
   1417 			vp->v_numoutput++;
   1418 			mutex_exit(vp->v_interlock);
   1419 		}
   1420 		VOP_STRATEGY(vp, bp);
   1421 	}
   1422 	sdp->swd_flags &= ~SWF_BUSY;
   1423 }
   1424 
   1425 /*
   1426  * sw_reg_biodone: one of our i/o's has completed
   1427  */
   1428 static void
   1429 sw_reg_biodone(struct buf *bp)
   1430 {
   1431 	workqueue_enqueue(sw_reg_workqueue, &bp->b_work, NULL);
   1432 }
   1433 
   1434 /*
   1435  * sw_reg_iodone: one of our i/o's has completed and needs post-i/o cleanup
   1436  *
   1437  * => note that we can recover the vndbuf struct by casting the buf ptr
   1438  */
   1439 static void
   1440 sw_reg_iodone(struct work *wk, void *dummy)
   1441 {
   1442 	struct vndbuf *vbp = (void *)wk;
   1443 	struct vndxfer *vnx = vbp->vb_xfer;
   1444 	struct buf *pbp = vnx->vx_bp;		/* parent buffer */
   1445 	struct swapdev	*sdp = vnx->vx_sdp;
   1446 	int s, resid, error;
   1447 	KASSERT(&vbp->vb_buf.b_work == wk);
   1448 	UVMHIST_FUNC("sw_reg_iodone"); UVMHIST_CALLED(pdhist);
   1449 
   1450 	UVMHIST_LOG(pdhist, "  vbp=%p vp=%p blkno=%x addr=%p",
   1451 	    vbp, vbp->vb_buf.b_vp, vbp->vb_buf.b_blkno, vbp->vb_buf.b_data);
   1452 	UVMHIST_LOG(pdhist, "  cnt=%lx resid=%lx",
   1453 	    vbp->vb_buf.b_bcount, vbp->vb_buf.b_resid, 0, 0);
   1454 
   1455 	/*
   1456 	 * protect vbp at splbio and update.
   1457 	 */
   1458 
   1459 	s = splbio();
   1460 	resid = vbp->vb_buf.b_bcount - vbp->vb_buf.b_resid;
   1461 	pbp->b_resid -= resid;
   1462 	vnx->vx_pending--;
   1463 
   1464 	if (vbp->vb_buf.b_error != 0) {
   1465 		/* pass error upward */
   1466 		error = vbp->vb_buf.b_error ? vbp->vb_buf.b_error : EIO;
   1467 		UVMHIST_LOG(pdhist, "  got error=%d !", error, 0, 0, 0);
   1468 		vnx->vx_error = error;
   1469 	}
   1470 
   1471 	/*
   1472 	 * kill vbp structure
   1473 	 */
   1474 	buf_destroy(&vbp->vb_buf);
   1475 	pool_put(&vndbuf_pool, vbp);
   1476 
   1477 	/*
   1478 	 * wrap up this transaction if it has run to completion or, in
   1479 	 * case of an error, when all auxiliary buffers have returned.
   1480 	 */
   1481 	if (vnx->vx_error != 0) {
   1482 		/* pass error upward */
   1483 		error = vnx->vx_error;
   1484 		if ((vnx->vx_flags & VX_BUSY) == 0 && vnx->vx_pending == 0) {
   1485 			pbp->b_error = error;
   1486 			biodone(pbp);
   1487 			pool_put(&vndxfer_pool, vnx);
   1488 		}
   1489 	} else if (pbp->b_resid == 0) {
   1490 		KASSERT(vnx->vx_pending == 0);
   1491 		if ((vnx->vx_flags & VX_BUSY) == 0) {
   1492 			UVMHIST_LOG(pdhist, "  iodone error=%d !",
   1493 			    pbp, vnx->vx_error, 0, 0);
   1494 			biodone(pbp);
   1495 			pool_put(&vndxfer_pool, vnx);
   1496 		}
   1497 	}
   1498 
   1499 	/*
   1500 	 * done!   start next swapdev I/O if one is pending
   1501 	 */
   1502 	sdp->swd_active--;
   1503 	sw_reg_start(sdp);
   1504 	splx(s);
   1505 }
   1506 
   1507 
   1508 /*
   1509  * uvm_swap_alloc: allocate space on swap
   1510  *
   1511  * => allocation is done "round robin" down the priority list, as we
   1512  *	allocate in a priority we "rotate" the circle queue.
   1513  * => space can be freed with uvm_swap_free
   1514  * => we return the page slot number in /dev/drum (0 == invalid slot)
   1515  * => we lock uvm_swap_data_lock
   1516  * => XXXMRG: "LESSOK" INTERFACE NEEDED TO EXTENT SYSTEM
   1517  */
   1518 int
   1519 uvm_swap_alloc(int *nslots /* IN/OUT */, bool lessok)
   1520 {
   1521 	struct swapdev *sdp;
   1522 	struct swappri *spp;
   1523 	UVMHIST_FUNC("uvm_swap_alloc"); UVMHIST_CALLED(pdhist);
   1524 
   1525 	/*
   1526 	 * no swap devices configured yet?   definite failure.
   1527 	 */
   1528 	if (uvmexp.nswapdev < 1)
   1529 		return 0;
   1530 
   1531 	/*
   1532 	 * lock data lock, convert slots into blocks, and enter loop
   1533 	 */
   1534 	mutex_enter(&uvm_swap_data_lock);
   1535 
   1536 ReTry:	/* XXXMRG */
   1537 	LIST_FOREACH(spp, &swap_priority, spi_swappri) {
   1538 		CIRCLEQ_FOREACH(sdp, &spp->spi_swapdev, swd_next) {
   1539 			uint64_t result;
   1540 
   1541 			/* if it's not enabled, then we can't swap from it */
   1542 			if ((sdp->swd_flags & SWF_ENABLE) == 0)
   1543 				continue;
   1544 			if (sdp->swd_npginuse + *nslots > sdp->swd_npages)
   1545 				continue;
   1546 			result = blist_alloc(sdp->swd_blist, *nslots);
   1547 			if (result == BLIST_NONE) {
   1548 				continue;
   1549 			}
   1550 			KASSERT(result < sdp->swd_drumsize);
   1551 
   1552 			/*
   1553 			 * successful allocation!  now rotate the circleq.
   1554 			 */
   1555 			CIRCLEQ_REMOVE(&spp->spi_swapdev, sdp, swd_next);
   1556 			CIRCLEQ_INSERT_TAIL(&spp->spi_swapdev, sdp, swd_next);
   1557 			sdp->swd_npginuse += *nslots;
   1558 			uvmexp.swpginuse += *nslots;
   1559 			mutex_exit(&uvm_swap_data_lock);
   1560 			/* done!  return drum slot number */
   1561 			UVMHIST_LOG(pdhist,
   1562 			    "success!  returning %d slots starting at %d",
   1563 			    *nslots, result + sdp->swd_drumoffset, 0, 0);
   1564 			return (result + sdp->swd_drumoffset);
   1565 		}
   1566 	}
   1567 
   1568 	/* XXXMRG: BEGIN HACK */
   1569 	if (*nslots > 1 && lessok) {
   1570 		*nslots = 1;
   1571 		/* XXXMRG: ugh!  blist should support this for us */
   1572 		goto ReTry;
   1573 	}
   1574 	/* XXXMRG: END HACK */
   1575 
   1576 	mutex_exit(&uvm_swap_data_lock);
   1577 	return 0;
   1578 }
   1579 
   1580 /*
   1581  * uvm_swapisfull: return true if most of available swap is allocated
   1582  * and in use.  we don't count some small portion as it may be inaccessible
   1583  * to us at any given moment, for example if there is lock contention or if
   1584  * pages are busy.
   1585  */
   1586 bool
   1587 uvm_swapisfull(void)
   1588 {
   1589 	int swpgonly;
   1590 	bool rv;
   1591 
   1592 	mutex_enter(&uvm_swap_data_lock);
   1593 	KASSERT(uvmexp.swpgonly <= uvmexp.swpages);
   1594 	swpgonly = (int)((uint64_t)uvmexp.swpgonly * 100 /
   1595 	    uvm_swapisfull_factor);
   1596 	rv = (swpgonly >= uvmexp.swpgavail);
   1597 	mutex_exit(&uvm_swap_data_lock);
   1598 
   1599 	return (rv);
   1600 }
   1601 
   1602 /*
   1603  * uvm_swap_markbad: keep track of swap ranges where we've had i/o errors
   1604  *
   1605  * => we lock uvm_swap_data_lock
   1606  */
   1607 void
   1608 uvm_swap_markbad(int startslot, int nslots)
   1609 {
   1610 	struct swapdev *sdp;
   1611 	UVMHIST_FUNC("uvm_swap_markbad"); UVMHIST_CALLED(pdhist);
   1612 
   1613 	mutex_enter(&uvm_swap_data_lock);
   1614 	sdp = swapdrum_getsdp(startslot);
   1615 	KASSERT(sdp != NULL);
   1616 
   1617 	/*
   1618 	 * we just keep track of how many pages have been marked bad
   1619 	 * in this device, to make everything add up in swap_off().
   1620 	 * we assume here that the range of slots will all be within
   1621 	 * one swap device.
   1622 	 */
   1623 
   1624 	KASSERT(uvmexp.swpgonly >= nslots);
   1625 	uvmexp.swpgonly -= nslots;
   1626 	sdp->swd_npgbad += nslots;
   1627 	UVMHIST_LOG(pdhist, "now %d bad", sdp->swd_npgbad, 0,0,0);
   1628 	mutex_exit(&uvm_swap_data_lock);
   1629 }
   1630 
   1631 /*
   1632  * uvm_swap_free: free swap slots
   1633  *
   1634  * => this can be all or part of an allocation made by uvm_swap_alloc
   1635  * => we lock uvm_swap_data_lock
   1636  */
   1637 void
   1638 uvm_swap_free(int startslot, int nslots)
   1639 {
   1640 	struct swapdev *sdp;
   1641 	UVMHIST_FUNC("uvm_swap_free"); UVMHIST_CALLED(pdhist);
   1642 
   1643 	UVMHIST_LOG(pdhist, "freeing %d slots starting at %d", nslots,
   1644 	    startslot, 0, 0);
   1645 
   1646 	/*
   1647 	 * ignore attempts to free the "bad" slot.
   1648 	 */
   1649 
   1650 	if (startslot == SWSLOT_BAD) {
   1651 		return;
   1652 	}
   1653 
   1654 	/*
   1655 	 * convert drum slot offset back to sdp, free the blocks
   1656 	 * in the extent, and return.   must hold pri lock to do
   1657 	 * lookup and access the extent.
   1658 	 */
   1659 
   1660 	mutex_enter(&uvm_swap_data_lock);
   1661 	sdp = swapdrum_getsdp(startslot);
   1662 	KASSERT(uvmexp.nswapdev >= 1);
   1663 	KASSERT(sdp != NULL);
   1664 	KASSERT(sdp->swd_npginuse >= nslots);
   1665 	blist_free(sdp->swd_blist, startslot - sdp->swd_drumoffset, nslots);
   1666 	sdp->swd_npginuse -= nslots;
   1667 	uvmexp.swpginuse -= nslots;
   1668 	mutex_exit(&uvm_swap_data_lock);
   1669 }
   1670 
   1671 /*
   1672  * uvm_swap_put: put any number of pages into a contig place on swap
   1673  *
   1674  * => can be sync or async
   1675  */
   1676 
   1677 int
   1678 uvm_swap_put(int swslot, struct vm_page **ppsp, int npages, int flags)
   1679 {
   1680 	int error;
   1681 
   1682 	error = uvm_swap_io(ppsp, swslot, npages, B_WRITE |
   1683 	    ((flags & PGO_SYNCIO) ? 0 : B_ASYNC));
   1684 	return error;
   1685 }
   1686 
   1687 /*
   1688  * uvm_swap_get: get a single page from swap
   1689  *
   1690  * => usually a sync op (from fault)
   1691  */
   1692 
   1693 int
   1694 uvm_swap_get(struct vm_page *page, int swslot, int flags)
   1695 {
   1696 	int error;
   1697 
   1698 	uvmexp.nswget++;
   1699 	KASSERT(flags & PGO_SYNCIO);
   1700 	if (swslot == SWSLOT_BAD) {
   1701 		return EIO;
   1702 	}
   1703 
   1704 	error = uvm_swap_io(&page, swslot, 1, B_READ |
   1705 	    ((flags & PGO_SYNCIO) ? 0 : B_ASYNC));
   1706 	if (error == 0) {
   1707 
   1708 		/*
   1709 		 * this page is no longer only in swap.
   1710 		 */
   1711 
   1712 		mutex_enter(&uvm_swap_data_lock);
   1713 		KASSERT(uvmexp.swpgonly > 0);
   1714 		uvmexp.swpgonly--;
   1715 		mutex_exit(&uvm_swap_data_lock);
   1716 	}
   1717 	return error;
   1718 }
   1719 
   1720 /*
   1721  * uvm_swap_io: do an i/o operation to swap
   1722  */
   1723 
   1724 static int
   1725 uvm_swap_io(struct vm_page **pps, int startslot, int npages, int flags)
   1726 {
   1727 	daddr_t startblk;
   1728 	struct	buf *bp;
   1729 	vaddr_t kva;
   1730 	int	error, mapinflags;
   1731 	bool write, async;
   1732 	UVMHIST_FUNC("uvm_swap_io"); UVMHIST_CALLED(pdhist);
   1733 
   1734 	UVMHIST_LOG(pdhist, "<- called, startslot=%d, npages=%d, flags=%d",
   1735 	    startslot, npages, flags, 0);
   1736 
   1737 	write = (flags & B_READ) == 0;
   1738 	async = (flags & B_ASYNC) != 0;
   1739 
   1740 	/*
   1741 	 * allocate a buf for the i/o.
   1742 	 */
   1743 
   1744 	KASSERT(curlwp != uvm.pagedaemon_lwp || (write && async));
   1745 	bp = getiobuf(swapdev_vp, curlwp != uvm.pagedaemon_lwp);
   1746 	if (bp == NULL) {
   1747 		uvm_aio_aiodone_pages(pps, npages, true, ENOMEM);
   1748 		return ENOMEM;
   1749 	}
   1750 
   1751 	/*
   1752 	 * convert starting drum slot to block number
   1753 	 */
   1754 
   1755 	startblk = btodb((uint64_t)startslot << PAGE_SHIFT);
   1756 
   1757 	/*
   1758 	 * first, map the pages into the kernel.
   1759 	 */
   1760 
   1761 	mapinflags = !write ?
   1762 		UVMPAGER_MAPIN_WAITOK|UVMPAGER_MAPIN_READ :
   1763 		UVMPAGER_MAPIN_WAITOK|UVMPAGER_MAPIN_WRITE;
   1764 	kva = uvm_pagermapin(pps, npages, mapinflags);
   1765 
   1766 	/*
   1767 	 * fill in the bp/sbp.   we currently route our i/o through
   1768 	 * /dev/drum's vnode [swapdev_vp].
   1769 	 */
   1770 
   1771 	bp->b_cflags = BC_BUSY | BC_NOCACHE;
   1772 	bp->b_flags = (flags & (B_READ|B_ASYNC));
   1773 	bp->b_proc = &proc0;	/* XXX */
   1774 	bp->b_vnbufs.le_next = NOLIST;
   1775 	bp->b_data = (void *)kva;
   1776 	bp->b_blkno = startblk;
   1777 	bp->b_bufsize = bp->b_bcount = npages << PAGE_SHIFT;
   1778 
   1779 	/*
   1780 	 * bump v_numoutput (counter of number of active outputs).
   1781 	 */
   1782 
   1783 	if (write) {
   1784 		mutex_enter(swapdev_vp->v_interlock);
   1785 		swapdev_vp->v_numoutput++;
   1786 		mutex_exit(swapdev_vp->v_interlock);
   1787 	}
   1788 
   1789 	/*
   1790 	 * for async ops we must set up the iodone handler.
   1791 	 */
   1792 
   1793 	if (async) {
   1794 		bp->b_iodone = uvm_aio_biodone;
   1795 		UVMHIST_LOG(pdhist, "doing async!", 0, 0, 0, 0);
   1796 		if (curlwp == uvm.pagedaemon_lwp)
   1797 			BIO_SETPRIO(bp, BPRIO_TIMECRITICAL);
   1798 		else
   1799 			BIO_SETPRIO(bp, BPRIO_TIMELIMITED);
   1800 	} else {
   1801 		bp->b_iodone = NULL;
   1802 		BIO_SETPRIO(bp, BPRIO_TIMECRITICAL);
   1803 	}
   1804 	UVMHIST_LOG(pdhist,
   1805 	    "about to start io: data = %p blkno = 0x%x, bcount = %ld",
   1806 	    bp->b_data, bp->b_blkno, bp->b_bcount, 0);
   1807 
   1808 	/*
   1809 	 * now we start the I/O, and if async, return.
   1810 	 */
   1811 
   1812 	VOP_STRATEGY(swapdev_vp, bp);
   1813 	if (async)
   1814 		return 0;
   1815 
   1816 	/*
   1817 	 * must be sync i/o.   wait for it to finish
   1818 	 */
   1819 
   1820 	error = biowait(bp);
   1821 
   1822 	/*
   1823 	 * kill the pager mapping
   1824 	 */
   1825 
   1826 	uvm_pagermapout(kva, npages);
   1827 
   1828 	/*
   1829 	 * now dispose of the buf and we're done.
   1830 	 */
   1831 
   1832 	if (write) {
   1833 		mutex_enter(swapdev_vp->v_interlock);
   1834 		vwakeup(bp);
   1835 		mutex_exit(swapdev_vp->v_interlock);
   1836 	}
   1837 	putiobuf(bp);
   1838 	UVMHIST_LOG(pdhist, "<- done (sync)  error=%d", error, 0, 0, 0);
   1839 
   1840 	return (error);
   1841 }
   1842