Home | History | Annotate | Line # | Download | only in uvm
uvm_swap.c revision 1.46.2.8
      1 /*	$NetBSD: uvm_swap.c,v 1.46.2.8 2002/04/17 00:06:33 nathanw Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1995, 1996, 1997 Matthew R. Green
      5  * All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  * 3. The name of the author may not be used to endorse or promote products
     16  *    derived from this software without specific prior written permission.
     17  *
     18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
     23  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
     24  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
     25  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
     26  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     28  * SUCH DAMAGE.
     29  *
     30  * from: NetBSD: vm_swap.c,v 1.52 1997/12/02 13:47:37 pk Exp
     31  * from: Id: uvm_swap.c,v 1.1.2.42 1998/02/02 20:38:06 chuck Exp
     32  */
     33 
     34 #include <sys/cdefs.h>
     35 __KERNEL_RCSID(0, "$NetBSD: uvm_swap.c,v 1.46.2.8 2002/04/17 00:06:33 nathanw Exp $");
     36 
     37 #include "fs_nfs.h"
     38 #include "opt_uvmhist.h"
     39 #include "opt_compat_netbsd.h"
     40 #include "opt_ddb.h"
     41 
     42 #include <sys/param.h>
     43 #include <sys/systm.h>
     44 #include <sys/buf.h>
     45 #include <sys/conf.h>
     46 #include <sys/lwp.h>
     47 #include <sys/proc.h>
     48 #include <sys/namei.h>
     49 #include <sys/disklabel.h>
     50 #include <sys/errno.h>
     51 #include <sys/kernel.h>
     52 #include <sys/malloc.h>
     53 #include <sys/vnode.h>
     54 #include <sys/file.h>
     55 #include <sys/extent.h>
     56 #include <sys/mount.h>
     57 #include <sys/pool.h>
     58 #include <sys/syscallargs.h>
     59 #include <sys/swap.h>
     60 
     61 #include <uvm/uvm.h>
     62 
     63 #include <miscfs/specfs/specdev.h>
     64 
     65 /*
     66  * uvm_swap.c: manage configuration and i/o to swap space.
     67  */
     68 
     69 /*
     70  * swap space is managed in the following way:
     71  *
     72  * each swap partition or file is described by a "swapdev" structure.
     73  * each "swapdev" structure contains a "swapent" structure which contains
     74  * information that is passed up to the user (via system calls).
     75  *
     76  * each swap partition is assigned a "priority" (int) which controls
     77  * swap parition usage.
     78  *
     79  * the system maintains a global data structure describing all swap
     80  * partitions/files.   there is a sorted LIST of "swappri" structures
     81  * which describe "swapdev"'s at that priority.   this LIST is headed
     82  * by the "swap_priority" global var.    each "swappri" contains a
     83  * CIRCLEQ of "swapdev" structures at that priority.
     84  *
     85  * locking:
     86  *  - swap_syscall_lock (sleep lock): this lock serializes the swapctl
     87  *    system call and prevents the swap priority list from changing
     88  *    while we are in the middle of a system call (e.g. SWAP_STATS).
     89  *  - uvm.swap_data_lock (simple_lock): this lock protects all swap data
     90  *    structures including the priority list, the swapdev structures,
     91  *    and the swapmap extent.
     92  *
     93  * each swap device has the following info:
     94  *  - swap device in use (could be disabled, preventing future use)
     95  *  - swap enabled (allows new allocations on swap)
     96  *  - map info in /dev/drum
     97  *  - vnode pointer
     98  * for swap files only:
     99  *  - block size
    100  *  - max byte count in buffer
    101  *  - buffer
    102  *
    103  * userland controls and configures swap with the swapctl(2) system call.
    104  * the sys_swapctl performs the following operations:
    105  *  [1] SWAP_NSWAP: returns the number of swap devices currently configured
    106  *  [2] SWAP_STATS: given a pointer to an array of swapent structures
    107  *	(passed in via "arg") of a size passed in via "misc" ... we load
    108  *	the current swap config into the array. The actual work is done
    109  *	in the uvm_swap_stats(9) function.
    110  *  [3] SWAP_ON: given a pathname in arg (could be device or file) and a
    111  *	priority in "misc", start swapping on it.
    112  *  [4] SWAP_OFF: as SWAP_ON, but stops swapping to a device
    113  *  [5] SWAP_CTL: changes the priority of a swap device (new priority in
    114  *	"misc")
    115  */
    116 
    117 /*
    118  * swapdev: describes a single swap partition/file
    119  *
    120  * note the following should be true:
    121  * swd_inuse <= swd_nblks  [number of blocks in use is <= total blocks]
    122  * swd_nblks <= swd_mapsize [because mapsize includes miniroot+disklabel]
    123  */
    124 struct swapdev {
    125 	struct oswapent swd_ose;
    126 #define	swd_dev		swd_ose.ose_dev		/* device id */
    127 #define	swd_flags	swd_ose.ose_flags	/* flags:inuse/enable/fake */
    128 #define	swd_priority	swd_ose.ose_priority	/* our priority */
    129 	/* also: swd_ose.ose_nblks, swd_ose.ose_inuse */
    130 	char			*swd_path;	/* saved pathname of device */
    131 	int			swd_pathlen;	/* length of pathname */
    132 	int			swd_npages;	/* #pages we can use */
    133 	int			swd_npginuse;	/* #pages in use */
    134 	int			swd_npgbad;	/* #pages bad */
    135 	int			swd_drumoffset;	/* page0 offset in drum */
    136 	int			swd_drumsize;	/* #pages in drum */
    137 	struct extent		*swd_ex;	/* extent for this swapdev */
    138 	char			swd_exname[12];	/* name of extent above */
    139 	struct vnode		*swd_vp;	/* backing vnode */
    140 	CIRCLEQ_ENTRY(swapdev)	swd_next;	/* priority circleq */
    141 
    142 	int			swd_bsize;	/* blocksize (bytes) */
    143 	int			swd_maxactive;	/* max active i/o reqs */
    144 	struct buf_queue	swd_tab;	/* buffer list */
    145 	int			swd_active;	/* number of active buffers */
    146 };
    147 
    148 /*
    149  * swap device priority entry; the list is kept sorted on `spi_priority'.
    150  */
    151 struct swappri {
    152 	int			spi_priority;     /* priority */
    153 	CIRCLEQ_HEAD(spi_swapdev, swapdev)	spi_swapdev;
    154 	/* circleq of swapdevs at this priority */
    155 	LIST_ENTRY(swappri)	spi_swappri;      /* global list of pri's */
    156 };
    157 
    158 /*
    159  * The following two structures are used to keep track of data transfers
    160  * on swap devices associated with regular files.
    161  * NOTE: this code is more or less a copy of vnd.c; we use the same
    162  * structure names here to ease porting..
    163  */
    164 struct vndxfer {
    165 	struct buf	*vx_bp;		/* Pointer to parent buffer */
    166 	struct swapdev	*vx_sdp;
    167 	int		vx_error;
    168 	int		vx_pending;	/* # of pending aux buffers */
    169 	int		vx_flags;
    170 #define VX_BUSY		1
    171 #define VX_DEAD		2
    172 };
    173 
    174 struct vndbuf {
    175 	struct buf	vb_buf;
    176 	struct vndxfer	*vb_xfer;
    177 };
    178 
    179 
    180 /*
    181  * We keep a of pool vndbuf's and vndxfer structures.
    182  */
    183 static struct pool vndxfer_pool;
    184 static struct pool vndbuf_pool;
    185 
    186 #define	getvndxfer(vnx)	do {						\
    187 	int s = splbio();						\
    188 	vnx = pool_get(&vndxfer_pool, PR_WAITOK);			\
    189 	splx(s);							\
    190 } while (0)
    191 
    192 #define putvndxfer(vnx) {						\
    193 	pool_put(&vndxfer_pool, (void *)(vnx));				\
    194 }
    195 
    196 #define	getvndbuf(vbp)	do {						\
    197 	int s = splbio();						\
    198 	vbp = pool_get(&vndbuf_pool, PR_WAITOK);			\
    199 	splx(s);							\
    200 } while (0)
    201 
    202 #define putvndbuf(vbp) {						\
    203 	pool_put(&vndbuf_pool, (void *)(vbp));				\
    204 }
    205 
    206 /* /dev/drum */
    207 bdev_decl(sw);
    208 cdev_decl(sw);
    209 
    210 /*
    211  * local variables
    212  */
    213 static struct extent *swapmap;		/* controls the mapping of /dev/drum */
    214 
    215 /* list of all active swap devices [by priority] */
    216 LIST_HEAD(swap_priority, swappri);
    217 static struct swap_priority swap_priority;
    218 
    219 /* locks */
    220 struct lock swap_syscall_lock;
    221 
    222 /*
    223  * prototypes
    224  */
    225 static struct swapdev	*swapdrum_getsdp __P((int));
    226 
    227 static struct swapdev	*swaplist_find __P((struct vnode *, int));
    228 static void		 swaplist_insert __P((struct swapdev *,
    229 					     struct swappri *, int));
    230 static void		 swaplist_trim __P((void));
    231 
    232 static int swap_on __P((struct proc *, struct swapdev *));
    233 static int swap_off __P((struct proc *, struct swapdev *));
    234 
    235 static void sw_reg_strategy __P((struct swapdev *, struct buf *, int));
    236 static void sw_reg_iodone __P((struct buf *));
    237 static void sw_reg_start __P((struct swapdev *));
    238 
    239 static int uvm_swap_io __P((struct vm_page **, int, int, int));
    240 
    241 /*
    242  * uvm_swap_init: init the swap system data structures and locks
    243  *
    244  * => called at boot time from init_main.c after the filesystems
    245  *	are brought up (which happens after uvm_init())
    246  */
    247 void
    248 uvm_swap_init()
    249 {
    250 	UVMHIST_FUNC("uvm_swap_init");
    251 
    252 	UVMHIST_CALLED(pdhist);
    253 	/*
    254 	 * first, init the swap list, its counter, and its lock.
    255 	 * then get a handle on the vnode for /dev/drum by using
    256 	 * the its dev_t number ("swapdev", from MD conf.c).
    257 	 */
    258 
    259 	LIST_INIT(&swap_priority);
    260 	uvmexp.nswapdev = 0;
    261 	lockinit(&swap_syscall_lock, PVM, "swapsys", 0, 0);
    262 	simple_lock_init(&uvm.swap_data_lock);
    263 
    264 	if (bdevvp(swapdev, &swapdev_vp))
    265 		panic("uvm_swap_init: can't get vnode for swap device");
    266 
    267 	/*
    268 	 * create swap block resource map to map /dev/drum.   the range
    269 	 * from 1 to INT_MAX allows 2 gigablocks of swap space.  note
    270 	 * that block 0 is reserved (used to indicate an allocation
    271 	 * failure, or no allocation).
    272 	 */
    273 	swapmap = extent_create("swapmap", 1, INT_MAX,
    274 				M_VMSWAP, 0, 0, EX_NOWAIT);
    275 	if (swapmap == 0)
    276 		panic("uvm_swap_init: extent_create failed");
    277 
    278 	/*
    279 	 * allocate pools for structures used for swapping to files.
    280 	 */
    281 
    282 	pool_init(&vndxfer_pool, sizeof(struct vndxfer), 0, 0, 0,
    283 	    "swp vnx", NULL);
    284 
    285 	pool_init(&vndbuf_pool, sizeof(struct vndbuf), 0, 0, 0,
    286 	    "swp vnd", NULL);
    287 
    288 	/*
    289 	 * done!
    290 	 */
    291 	UVMHIST_LOG(pdhist, "<- done", 0, 0, 0, 0);
    292 }
    293 
    294 /*
    295  * swaplist functions: functions that operate on the list of swap
    296  * devices on the system.
    297  */
    298 
    299 /*
    300  * swaplist_insert: insert swap device "sdp" into the global list
    301  *
    302  * => caller must hold both swap_syscall_lock and uvm.swap_data_lock
    303  * => caller must provide a newly malloc'd swappri structure (we will
    304  *	FREE it if we don't need it... this it to prevent malloc blocking
    305  *	here while adding swap)
    306  */
    307 static void
    308 swaplist_insert(sdp, newspp, priority)
    309 	struct swapdev *sdp;
    310 	struct swappri *newspp;
    311 	int priority;
    312 {
    313 	struct swappri *spp, *pspp;
    314 	UVMHIST_FUNC("swaplist_insert"); UVMHIST_CALLED(pdhist);
    315 
    316 	/*
    317 	 * find entry at or after which to insert the new device.
    318 	 */
    319 	pspp = NULL;
    320 	LIST_FOREACH(spp, &swap_priority, spi_swappri) {
    321 		if (priority <= spp->spi_priority)
    322 			break;
    323 		pspp = spp;
    324 	}
    325 
    326 	/*
    327 	 * new priority?
    328 	 */
    329 	if (spp == NULL || spp->spi_priority != priority) {
    330 		spp = newspp;  /* use newspp! */
    331 		UVMHIST_LOG(pdhist, "created new swappri = %d",
    332 			    priority, 0, 0, 0);
    333 
    334 		spp->spi_priority = priority;
    335 		CIRCLEQ_INIT(&spp->spi_swapdev);
    336 
    337 		if (pspp)
    338 			LIST_INSERT_AFTER(pspp, spp, spi_swappri);
    339 		else
    340 			LIST_INSERT_HEAD(&swap_priority, spp, spi_swappri);
    341 	} else {
    342 	  	/* we don't need a new priority structure, free it */
    343 		FREE(newspp, M_VMSWAP);
    344 	}
    345 
    346 	/*
    347 	 * priority found (or created).   now insert on the priority's
    348 	 * circleq list and bump the total number of swapdevs.
    349 	 */
    350 	sdp->swd_priority = priority;
    351 	CIRCLEQ_INSERT_TAIL(&spp->spi_swapdev, sdp, swd_next);
    352 	uvmexp.nswapdev++;
    353 }
    354 
    355 /*
    356  * swaplist_find: find and optionally remove a swap device from the
    357  *	global list.
    358  *
    359  * => caller must hold both swap_syscall_lock and uvm.swap_data_lock
    360  * => we return the swapdev we found (and removed)
    361  */
    362 static struct swapdev *
    363 swaplist_find(vp, remove)
    364 	struct vnode *vp;
    365 	boolean_t remove;
    366 {
    367 	struct swapdev *sdp;
    368 	struct swappri *spp;
    369 
    370 	/*
    371 	 * search the lists for the requested vp
    372 	 */
    373 
    374 	LIST_FOREACH(spp, &swap_priority, spi_swappri) {
    375 		CIRCLEQ_FOREACH(sdp, &spp->spi_swapdev, swd_next) {
    376 			if (sdp->swd_vp == vp) {
    377 				if (remove) {
    378 					CIRCLEQ_REMOVE(&spp->spi_swapdev,
    379 					    sdp, swd_next);
    380 					uvmexp.nswapdev--;
    381 				}
    382 				return(sdp);
    383 			}
    384 		}
    385 	}
    386 	return (NULL);
    387 }
    388 
    389 
    390 /*
    391  * swaplist_trim: scan priority list for empty priority entries and kill
    392  *	them.
    393  *
    394  * => caller must hold both swap_syscall_lock and uvm.swap_data_lock
    395  */
    396 static void
    397 swaplist_trim()
    398 {
    399 	struct swappri *spp, *nextspp;
    400 
    401 	for (spp = LIST_FIRST(&swap_priority); spp != NULL; spp = nextspp) {
    402 		nextspp = LIST_NEXT(spp, spi_swappri);
    403 		if (CIRCLEQ_FIRST(&spp->spi_swapdev) !=
    404 		    (void *)&spp->spi_swapdev)
    405 			continue;
    406 		LIST_REMOVE(spp, spi_swappri);
    407 		free(spp, M_VMSWAP);
    408 	}
    409 }
    410 
    411 /*
    412  * swapdrum_getsdp: given a page offset in /dev/drum, convert it back
    413  *	to the "swapdev" that maps that section of the drum.
    414  *
    415  * => each swapdev takes one big contig chunk of the drum
    416  * => caller must hold uvm.swap_data_lock
    417  */
    418 static struct swapdev *
    419 swapdrum_getsdp(pgno)
    420 	int pgno;
    421 {
    422 	struct swapdev *sdp;
    423 	struct swappri *spp;
    424 
    425 	LIST_FOREACH(spp, &swap_priority, spi_swappri) {
    426 		CIRCLEQ_FOREACH(sdp, &spp->spi_swapdev, swd_next) {
    427 			if (sdp->swd_flags & SWF_FAKE)
    428 				continue;
    429 			if (pgno >= sdp->swd_drumoffset &&
    430 			    pgno < (sdp->swd_drumoffset + sdp->swd_drumsize)) {
    431 				return sdp;
    432 			}
    433 		}
    434 	}
    435 	return NULL;
    436 }
    437 
    438 
    439 /*
    440  * sys_swapctl: main entry point for swapctl(2) system call
    441  * 	[with two helper functions: swap_on and swap_off]
    442  */
    443 int
    444 sys_swapctl(l, v, retval)
    445 	struct lwp *l;
    446 	void *v;
    447 	register_t *retval;
    448 {
    449 	struct sys_swapctl_args /* {
    450 		syscallarg(int) cmd;
    451 		syscallarg(void *) arg;
    452 		syscallarg(int) misc;
    453 	} */ *uap = (struct sys_swapctl_args *)v;
    454 	struct proc *p = l->l_proc;
    455 	struct vnode *vp;
    456 	struct nameidata nd;
    457 	struct swappri *spp;
    458 	struct swapdev *sdp;
    459 	struct swapent *sep;
    460 	char	userpath[PATH_MAX + 1];
    461 	size_t	len;
    462 	int	error, misc;
    463 	int	priority;
    464 	UVMHIST_FUNC("sys_swapctl"); UVMHIST_CALLED(pdhist);
    465 
    466 	misc = SCARG(uap, misc);
    467 
    468 	/*
    469 	 * ensure serialized syscall access by grabbing the swap_syscall_lock
    470 	 */
    471 	lockmgr(&swap_syscall_lock, LK_EXCLUSIVE, NULL);
    472 
    473 	/*
    474 	 * we handle the non-priv NSWAP and STATS request first.
    475 	 *
    476 	 * SWAP_NSWAP: return number of config'd swap devices
    477 	 * [can also be obtained with uvmexp sysctl]
    478 	 */
    479 	if (SCARG(uap, cmd) == SWAP_NSWAP) {
    480 		UVMHIST_LOG(pdhist, "<- done SWAP_NSWAP=%d", uvmexp.nswapdev,
    481 		    0, 0, 0);
    482 		*retval = uvmexp.nswapdev;
    483 		error = 0;
    484 		goto out;
    485 	}
    486 
    487 	/*
    488 	 * SWAP_STATS: get stats on current # of configured swap devs
    489 	 *
    490 	 * note that the swap_priority list can't change as long
    491 	 * as we are holding the swap_syscall_lock.  we don't want
    492 	 * to grab the uvm.swap_data_lock because we may fault&sleep during
    493 	 * copyout() and we don't want to be holding that lock then!
    494 	 */
    495 	if (SCARG(uap, cmd) == SWAP_STATS
    496 #if defined(COMPAT_13)
    497 	    || SCARG(uap, cmd) == SWAP_OSTATS
    498 #endif
    499 	    ) {
    500 		misc = MIN(uvmexp.nswapdev, misc);
    501 #if defined(COMPAT_13)
    502 		if (SCARG(uap, cmd) == SWAP_OSTATS)
    503 			len = sizeof(struct oswapent) * misc;
    504 		else
    505 #endif
    506 			len = sizeof(struct swapent) * misc;
    507 		sep = (struct swapent *)malloc(len, M_TEMP, M_WAITOK);
    508 
    509 		uvm_swap_stats(SCARG(uap, cmd), sep, misc, retval);
    510 		error = copyout(sep, (void *)SCARG(uap, arg), len);
    511 
    512 		free(sep, M_TEMP);
    513 		UVMHIST_LOG(pdhist, "<- done SWAP_STATS", 0, 0, 0, 0);
    514 		goto out;
    515 	}
    516 	if (SCARG(uap, cmd) == SWAP_GETDUMPDEV) {
    517 		dev_t	*devp = (dev_t *)SCARG(uap, arg);
    518 
    519 		error = copyout(&dumpdev, devp, sizeof(dumpdev));
    520 		goto out;
    521 	}
    522 
    523 	/*
    524 	 * all other requests require superuser privs.   verify.
    525 	 */
    526 	if ((error = suser(p->p_ucred, &p->p_acflag)))
    527 		goto out;
    528 
    529 	/*
    530 	 * at this point we expect a path name in arg.   we will
    531 	 * use namei() to gain a vnode reference (vref), and lock
    532 	 * the vnode (VOP_LOCK).
    533 	 *
    534 	 * XXX: a NULL arg means use the root vnode pointer (e.g. for
    535 	 * miniroot)
    536 	 */
    537 	if (SCARG(uap, arg) == NULL) {
    538 		vp = rootvp;		/* miniroot */
    539 		if (vget(vp, LK_EXCLUSIVE)) {
    540 			error = EBUSY;
    541 			goto out;
    542 		}
    543 		if (SCARG(uap, cmd) == SWAP_ON &&
    544 		    copystr("miniroot", userpath, sizeof userpath, &len))
    545 			panic("swapctl: miniroot copy failed");
    546 	} else {
    547 		int	space;
    548 		char	*where;
    549 
    550 		if (SCARG(uap, cmd) == SWAP_ON) {
    551 			if ((error = copyinstr(SCARG(uap, arg), userpath,
    552 			    sizeof userpath, &len)))
    553 				goto out;
    554 			space = UIO_SYSSPACE;
    555 			where = userpath;
    556 		} else {
    557 			space = UIO_USERSPACE;
    558 			where = (char *)SCARG(uap, arg);
    559 		}
    560 		NDINIT(&nd, LOOKUP, FOLLOW|LOCKLEAF, space, where, p);
    561 		if ((error = namei(&nd)))
    562 			goto out;
    563 		vp = nd.ni_vp;
    564 	}
    565 	/* note: "vp" is referenced and locked */
    566 
    567 	error = 0;		/* assume no error */
    568 	switch(SCARG(uap, cmd)) {
    569 
    570 	case SWAP_DUMPDEV:
    571 		if (vp->v_type != VBLK) {
    572 			error = ENOTBLK;
    573 			break;
    574 		}
    575 		dumpdev = vp->v_rdev;
    576 		break;
    577 
    578 	case SWAP_CTL:
    579 		/*
    580 		 * get new priority, remove old entry (if any) and then
    581 		 * reinsert it in the correct place.  finally, prune out
    582 		 * any empty priority structures.
    583 		 */
    584 		priority = SCARG(uap, misc);
    585 		spp = malloc(sizeof *spp, M_VMSWAP, M_WAITOK);
    586 		simple_lock(&uvm.swap_data_lock);
    587 		if ((sdp = swaplist_find(vp, 1)) == NULL) {
    588 			error = ENOENT;
    589 		} else {
    590 			swaplist_insert(sdp, spp, priority);
    591 			swaplist_trim();
    592 		}
    593 		simple_unlock(&uvm.swap_data_lock);
    594 		if (error)
    595 			free(spp, M_VMSWAP);
    596 		break;
    597 
    598 	case SWAP_ON:
    599 
    600 		/*
    601 		 * check for duplicates.   if none found, then insert a
    602 		 * dummy entry on the list to prevent someone else from
    603 		 * trying to enable this device while we are working on
    604 		 * it.
    605 		 */
    606 
    607 		priority = SCARG(uap, misc);
    608 		sdp = malloc(sizeof *sdp, M_VMSWAP, M_WAITOK);
    609 		spp = malloc(sizeof *spp, M_VMSWAP, M_WAITOK);
    610 		simple_lock(&uvm.swap_data_lock);
    611 		if (swaplist_find(vp, 0) != NULL) {
    612 			error = EBUSY;
    613 			simple_unlock(&uvm.swap_data_lock);
    614 			free(sdp, M_VMSWAP);
    615 			free(spp, M_VMSWAP);
    616 			break;
    617 		}
    618 		memset(sdp, 0, sizeof(*sdp));
    619 		sdp->swd_flags = SWF_FAKE;	/* placeholder only */
    620 		sdp->swd_vp = vp;
    621 		sdp->swd_dev = (vp->v_type == VBLK) ? vp->v_rdev : NODEV;
    622 		BUFQ_INIT(&sdp->swd_tab);
    623 
    624 		swaplist_insert(sdp, spp, priority);
    625 		simple_unlock(&uvm.swap_data_lock);
    626 
    627 		sdp->swd_pathlen = len;
    628 		sdp->swd_path = malloc(sdp->swd_pathlen, M_VMSWAP, M_WAITOK);
    629 		if (copystr(userpath, sdp->swd_path, sdp->swd_pathlen, 0) != 0)
    630 			panic("swapctl: copystr");
    631 
    632 		/*
    633 		 * we've now got a FAKE placeholder in the swap list.
    634 		 * now attempt to enable swap on it.  if we fail, undo
    635 		 * what we've done and kill the fake entry we just inserted.
    636 		 * if swap_on is a success, it will clear the SWF_FAKE flag
    637 		 */
    638 
    639 		if ((error = swap_on(p, sdp)) != 0) {
    640 			simple_lock(&uvm.swap_data_lock);
    641 			(void) swaplist_find(vp, 1);  /* kill fake entry */
    642 			swaplist_trim();
    643 			simple_unlock(&uvm.swap_data_lock);
    644 			free(sdp->swd_path, M_VMSWAP);
    645 			free(sdp, M_VMSWAP);
    646 			break;
    647 		}
    648 		break;
    649 
    650 	case SWAP_OFF:
    651 		simple_lock(&uvm.swap_data_lock);
    652 		if ((sdp = swaplist_find(vp, 0)) == NULL) {
    653 			simple_unlock(&uvm.swap_data_lock);
    654 			error = ENXIO;
    655 			break;
    656 		}
    657 
    658 		/*
    659 		 * If a device isn't in use or enabled, we
    660 		 * can't stop swapping from it (again).
    661 		 */
    662 		if ((sdp->swd_flags & (SWF_INUSE|SWF_ENABLE)) == 0) {
    663 			simple_unlock(&uvm.swap_data_lock);
    664 			error = EBUSY;
    665 			break;
    666 		}
    667 
    668 		/*
    669 		 * do the real work.
    670 		 */
    671 		error = swap_off(p, sdp);
    672 		break;
    673 
    674 	default:
    675 		error = EINVAL;
    676 	}
    677 
    678 	/*
    679 	 * done!  release the ref gained by namei() and unlock.
    680 	 */
    681 	vput(vp);
    682 
    683 out:
    684 	lockmgr(&swap_syscall_lock, LK_RELEASE, NULL);
    685 
    686 	UVMHIST_LOG(pdhist, "<- done!  error=%d", error, 0, 0, 0);
    687 	return (error);
    688 }
    689 
    690 /*
    691  * swap_stats: implements swapctl(SWAP_STATS). The function is kept
    692  * away from sys_swapctl() in order to allow COMPAT_* swapctl()
    693  * emulation to use it directly without going through sys_swapctl().
    694  * The problem with using sys_swapctl() there is that it involves
    695  * copying the swapent array to the stackgap, and this array's size
    696  * is not known at build time. Hence it would not be possible to
    697  * ensure it would fit in the stackgap in any case.
    698  */
    699 void
    700 uvm_swap_stats(cmd, sep, sec, retval)
    701 	int cmd;
    702 	struct swapent *sep;
    703 	int sec;
    704 	register_t *retval;
    705 {
    706 	struct swappri *spp;
    707 	struct swapdev *sdp;
    708 	int count = 0;
    709 
    710 	LIST_FOREACH(spp, &swap_priority, spi_swappri) {
    711 		for (sdp = CIRCLEQ_FIRST(&spp->spi_swapdev);
    712 		     sdp != (void *)&spp->spi_swapdev && sec-- > 0;
    713 		     sdp = CIRCLEQ_NEXT(sdp, swd_next)) {
    714 		  	/*
    715 			 * backwards compatibility for system call.
    716 			 * note that we use 'struct oswapent' as an
    717 			 * overlay into both 'struct swapdev' and
    718 			 * the userland 'struct swapent', as we
    719 			 * want to retain backwards compatibility
    720 			 * with NetBSD 1.3.
    721 			 */
    722 			sdp->swd_ose.ose_inuse =
    723 			    btodb((u_int64_t)sdp->swd_npginuse <<
    724 			    PAGE_SHIFT);
    725 			(void)memcpy(sep, &sdp->swd_ose,
    726 			    sizeof(struct oswapent));
    727 
    728 			/* now copy out the path if necessary */
    729 #if defined(COMPAT_13)
    730 			if (cmd == SWAP_STATS)
    731 #endif
    732 				(void)memcpy(&sep->se_path, sdp->swd_path,
    733 				    sdp->swd_pathlen);
    734 
    735 			count++;
    736 #if defined(COMPAT_13)
    737 			if (cmd == SWAP_OSTATS)
    738 				sep = (struct swapent *)
    739 				    ((struct oswapent *)sep + 1);
    740 			else
    741 #endif
    742 				sep++;
    743 		}
    744 	}
    745 
    746 	*retval = count;
    747 	return;
    748 }
    749 
    750 /*
    751  * swap_on: attempt to enable a swapdev for swapping.   note that the
    752  *	swapdev is already on the global list, but disabled (marked
    753  *	SWF_FAKE).
    754  *
    755  * => we avoid the start of the disk (to protect disk labels)
    756  * => we also avoid the miniroot, if we are swapping to root.
    757  * => caller should leave uvm.swap_data_lock unlocked, we may lock it
    758  *	if needed.
    759  */
    760 static int
    761 swap_on(p, sdp)
    762 	struct proc *p;
    763 	struct swapdev *sdp;
    764 {
    765 	static int count = 0;	/* static */
    766 	struct vnode *vp;
    767 	int error, npages, nblocks, size;
    768 	long addr;
    769 	u_long result;
    770 	struct vattr va;
    771 #ifdef NFS
    772 	extern int (**nfsv2_vnodeop_p) __P((void *));
    773 #endif /* NFS */
    774 	dev_t dev;
    775 	UVMHIST_FUNC("swap_on"); UVMHIST_CALLED(pdhist);
    776 
    777 	/*
    778 	 * we want to enable swapping on sdp.   the swd_vp contains
    779 	 * the vnode we want (locked and ref'd), and the swd_dev
    780 	 * contains the dev_t of the file, if it a block device.
    781 	 */
    782 
    783 	vp = sdp->swd_vp;
    784 	dev = sdp->swd_dev;
    785 
    786 	/*
    787 	 * open the swap file (mostly useful for block device files to
    788 	 * let device driver know what is up).
    789 	 *
    790 	 * we skip the open/close for root on swap because the root
    791 	 * has already been opened when root was mounted (mountroot).
    792 	 */
    793 	if (vp != rootvp) {
    794 		if ((error = VOP_OPEN(vp, FREAD|FWRITE, p->p_ucred, p)))
    795 			return (error);
    796 	}
    797 
    798 	/* XXX this only works for block devices */
    799 	UVMHIST_LOG(pdhist, "  dev=%d, major(dev)=%d", dev, major(dev), 0,0);
    800 
    801 	/*
    802 	 * we now need to determine the size of the swap area.   for
    803 	 * block specials we can call the d_psize function.
    804 	 * for normal files, we must stat [get attrs].
    805 	 *
    806 	 * we put the result in nblks.
    807 	 * for normal files, we also want the filesystem block size
    808 	 * (which we get with statfs).
    809 	 */
    810 	switch (vp->v_type) {
    811 	case VBLK:
    812 		if (bdevsw[major(dev)].d_psize == 0 ||
    813 		    (nblocks = (*bdevsw[major(dev)].d_psize)(dev)) == -1) {
    814 			error = ENXIO;
    815 			goto bad;
    816 		}
    817 		break;
    818 
    819 	case VREG:
    820 		if ((error = VOP_GETATTR(vp, &va, p->p_ucred, p)))
    821 			goto bad;
    822 		nblocks = (int)btodb(va.va_size);
    823 		if ((error =
    824 		     VFS_STATFS(vp->v_mount, &vp->v_mount->mnt_stat, p)) != 0)
    825 			goto bad;
    826 
    827 		sdp->swd_bsize = vp->v_mount->mnt_stat.f_iosize;
    828 		/*
    829 		 * limit the max # of outstanding I/O requests we issue
    830 		 * at any one time.   take it easy on NFS servers.
    831 		 */
    832 #ifdef NFS
    833 		if (vp->v_op == nfsv2_vnodeop_p)
    834 			sdp->swd_maxactive = 2; /* XXX */
    835 		else
    836 #endif /* NFS */
    837 			sdp->swd_maxactive = 8; /* XXX */
    838 		break;
    839 
    840 	default:
    841 		error = ENXIO;
    842 		goto bad;
    843 	}
    844 
    845 	/*
    846 	 * save nblocks in a safe place and convert to pages.
    847 	 */
    848 
    849 	sdp->swd_ose.ose_nblks = nblocks;
    850 	npages = dbtob((u_int64_t)nblocks) >> PAGE_SHIFT;
    851 
    852 	/*
    853 	 * for block special files, we want to make sure that leave
    854 	 * the disklabel and bootblocks alone, so we arrange to skip
    855 	 * over them (arbitrarily choosing to skip PAGE_SIZE bytes).
    856 	 * note that because of this the "size" can be less than the
    857 	 * actual number of blocks on the device.
    858 	 */
    859 	if (vp->v_type == VBLK) {
    860 		/* we use pages 1 to (size - 1) [inclusive] */
    861 		size = npages - 1;
    862 		addr = 1;
    863 	} else {
    864 		/* we use pages 0 to (size - 1) [inclusive] */
    865 		size = npages;
    866 		addr = 0;
    867 	}
    868 
    869 	/*
    870 	 * make sure we have enough blocks for a reasonable sized swap
    871 	 * area.   we want at least one page.
    872 	 */
    873 
    874 	if (size < 1) {
    875 		UVMHIST_LOG(pdhist, "  size <= 1!!", 0, 0, 0, 0);
    876 		error = EINVAL;
    877 		goto bad;
    878 	}
    879 
    880 	UVMHIST_LOG(pdhist, "  dev=%x: size=%d addr=%ld\n", dev, size, addr, 0);
    881 
    882 	/*
    883 	 * now we need to allocate an extent to manage this swap device
    884 	 */
    885 	snprintf(sdp->swd_exname, sizeof(sdp->swd_exname), "swap0x%04x",
    886 	    count++);
    887 
    888 	/* note that extent_create's 3rd arg is inclusive, thus "- 1" */
    889 	sdp->swd_ex = extent_create(sdp->swd_exname, 0, npages - 1, M_VMSWAP,
    890 				    0, 0, EX_WAITOK);
    891 	/* allocate the `saved' region from the extent so it won't be used */
    892 	if (addr) {
    893 		if (extent_alloc_region(sdp->swd_ex, 0, addr, EX_WAITOK))
    894 			panic("disklabel region");
    895 	}
    896 
    897 	/*
    898 	 * if the vnode we are swapping to is the root vnode
    899 	 * (i.e. we are swapping to the miniroot) then we want
    900 	 * to make sure we don't overwrite it.   do a statfs to
    901 	 * find its size and skip over it.
    902 	 */
    903 	if (vp == rootvp) {
    904 		struct mount *mp;
    905 		struct statfs *sp;
    906 		int rootblocks, rootpages;
    907 
    908 		mp = rootvnode->v_mount;
    909 		sp = &mp->mnt_stat;
    910 		rootblocks = sp->f_blocks * btodb(sp->f_bsize);
    911 		rootpages = round_page(dbtob(rootblocks)) >> PAGE_SHIFT;
    912 		if (rootpages > size)
    913 			panic("swap_on: miniroot larger than swap?");
    914 
    915 		if (extent_alloc_region(sdp->swd_ex, addr,
    916 					rootpages, EX_WAITOK))
    917 			panic("swap_on: unable to preserve miniroot");
    918 
    919 		size -= rootpages;
    920 		printf("Preserved %d pages of miniroot ", rootpages);
    921 		printf("leaving %d pages of swap\n", size);
    922 	}
    923 
    924   	/*
    925 	 * try to add anons to reflect the new swap space.
    926 	 */
    927 
    928 	error = uvm_anon_add(size);
    929 	if (error) {
    930 		goto bad;
    931 	}
    932 
    933 	/*
    934 	 * add a ref to vp to reflect usage as a swap device.
    935 	 */
    936 	vref(vp);
    937 
    938 	/*
    939 	 * now add the new swapdev to the drum and enable.
    940 	 */
    941 	if (extent_alloc(swapmap, npages, EX_NOALIGN, EX_NOBOUNDARY,
    942 	    EX_WAITOK, &result))
    943 		panic("swapdrum_add");
    944 
    945 	sdp->swd_drumoffset = (int)result;
    946 	sdp->swd_drumsize = npages;
    947 	sdp->swd_npages = size;
    948 	simple_lock(&uvm.swap_data_lock);
    949 	sdp->swd_flags &= ~SWF_FAKE;	/* going live */
    950 	sdp->swd_flags |= (SWF_INUSE|SWF_ENABLE);
    951 	uvmexp.swpages += size;
    952 	simple_unlock(&uvm.swap_data_lock);
    953 	return (0);
    954 
    955 	/*
    956 	 * failure: clean up and return error.
    957 	 */
    958 
    959 bad:
    960 	if (sdp->swd_ex) {
    961 		extent_destroy(sdp->swd_ex);
    962 	}
    963 	if (vp != rootvp) {
    964 		(void)VOP_CLOSE(vp, FREAD|FWRITE, p->p_ucred, p);
    965 	}
    966 	return (error);
    967 }
    968 
    969 /*
    970  * swap_off: stop swapping on swapdev
    971  *
    972  * => swap data should be locked, we will unlock.
    973  */
    974 static int
    975 swap_off(p, sdp)
    976 	struct proc *p;
    977 	struct swapdev *sdp;
    978 {
    979 	UVMHIST_FUNC("swap_off"); UVMHIST_CALLED(pdhist);
    980 	UVMHIST_LOG(pdhist, "  dev=%x", sdp->swd_dev,0,0,0);
    981 
    982 	/* disable the swap area being removed */
    983 	sdp->swd_flags &= ~SWF_ENABLE;
    984 	simple_unlock(&uvm.swap_data_lock);
    985 
    986 	/*
    987 	 * the idea is to find all the pages that are paged out to this
    988 	 * device, and page them all in.  in uvm, swap-backed pageable
    989 	 * memory can take two forms: aobjs and anons.  call the
    990 	 * swapoff hook for each subsystem to bring in pages.
    991 	 */
    992 
    993 	if (uao_swap_off(sdp->swd_drumoffset,
    994 			 sdp->swd_drumoffset + sdp->swd_drumsize) ||
    995 	    anon_swap_off(sdp->swd_drumoffset,
    996 			  sdp->swd_drumoffset + sdp->swd_drumsize)) {
    997 
    998 		simple_lock(&uvm.swap_data_lock);
    999 		sdp->swd_flags |= SWF_ENABLE;
   1000 		simple_unlock(&uvm.swap_data_lock);
   1001 		return ENOMEM;
   1002 	}
   1003 	KASSERT(sdp->swd_npginuse == sdp->swd_npgbad);
   1004 
   1005 	/*
   1006 	 * done with the vnode.
   1007 	 * drop our ref on the vnode before calling VOP_CLOSE()
   1008 	 * so that spec_close() can tell if this is the last close.
   1009 	 */
   1010 	vrele(sdp->swd_vp);
   1011 	if (sdp->swd_vp != rootvp) {
   1012 		(void) VOP_CLOSE(sdp->swd_vp, FREAD|FWRITE, p->p_ucred, p);
   1013 	}
   1014 
   1015 	/* remove anons from the system */
   1016 	uvm_anon_remove(sdp->swd_npages);
   1017 
   1018 	simple_lock(&uvm.swap_data_lock);
   1019 	uvmexp.swpages -= sdp->swd_npages;
   1020 
   1021 	if (swaplist_find(sdp->swd_vp, 1) == NULL)
   1022 		panic("swap_off: swapdev not in list\n");
   1023 	swaplist_trim();
   1024 	simple_unlock(&uvm.swap_data_lock);
   1025 
   1026 	/*
   1027 	 * free all resources!
   1028 	 */
   1029 	extent_free(swapmap, sdp->swd_drumoffset, sdp->swd_drumsize,
   1030 		    EX_WAITOK);
   1031 	extent_destroy(sdp->swd_ex);
   1032 	free(sdp, M_VMSWAP);
   1033 	return (0);
   1034 }
   1035 
   1036 /*
   1037  * /dev/drum interface and i/o functions
   1038  */
   1039 
   1040 /*
   1041  * swread: the read function for the drum (just a call to physio)
   1042  */
   1043 /*ARGSUSED*/
   1044 int
   1045 swread(dev, uio, ioflag)
   1046 	dev_t dev;
   1047 	struct uio *uio;
   1048 	int ioflag;
   1049 {
   1050 	UVMHIST_FUNC("swread"); UVMHIST_CALLED(pdhist);
   1051 
   1052 	UVMHIST_LOG(pdhist, "  dev=%x offset=%qx", dev, uio->uio_offset, 0, 0);
   1053 	return (physio(swstrategy, NULL, dev, B_READ, minphys, uio));
   1054 }
   1055 
   1056 /*
   1057  * swwrite: the write function for the drum (just a call to physio)
   1058  */
   1059 /*ARGSUSED*/
   1060 int
   1061 swwrite(dev, uio, ioflag)
   1062 	dev_t dev;
   1063 	struct uio *uio;
   1064 	int ioflag;
   1065 {
   1066 	UVMHIST_FUNC("swwrite"); UVMHIST_CALLED(pdhist);
   1067 
   1068 	UVMHIST_LOG(pdhist, "  dev=%x offset=%qx", dev, uio->uio_offset, 0, 0);
   1069 	return (physio(swstrategy, NULL, dev, B_WRITE, minphys, uio));
   1070 }
   1071 
   1072 /*
   1073  * swstrategy: perform I/O on the drum
   1074  *
   1075  * => we must map the i/o request from the drum to the correct swapdev.
   1076  */
   1077 void
   1078 swstrategy(bp)
   1079 	struct buf *bp;
   1080 {
   1081 	struct swapdev *sdp;
   1082 	struct vnode *vp;
   1083 	int s, pageno, bn;
   1084 	UVMHIST_FUNC("swstrategy"); UVMHIST_CALLED(pdhist);
   1085 
   1086 	/*
   1087 	 * convert block number to swapdev.   note that swapdev can't
   1088 	 * be yanked out from under us because we are holding resources
   1089 	 * in it (i.e. the blocks we are doing I/O on).
   1090 	 */
   1091 	pageno = dbtob((int64_t)bp->b_blkno) >> PAGE_SHIFT;
   1092 	simple_lock(&uvm.swap_data_lock);
   1093 	sdp = swapdrum_getsdp(pageno);
   1094 	simple_unlock(&uvm.swap_data_lock);
   1095 	if (sdp == NULL) {
   1096 		bp->b_error = EINVAL;
   1097 		bp->b_flags |= B_ERROR;
   1098 		biodone(bp);
   1099 		UVMHIST_LOG(pdhist, "  failed to get swap device", 0, 0, 0, 0);
   1100 		return;
   1101 	}
   1102 
   1103 	/*
   1104 	 * convert drum page number to block number on this swapdev.
   1105 	 */
   1106 
   1107 	pageno -= sdp->swd_drumoffset;	/* page # on swapdev */
   1108 	bn = btodb((u_int64_t)pageno << PAGE_SHIFT); /* convert to diskblock */
   1109 
   1110 	UVMHIST_LOG(pdhist, "  %s: mapoff=%x bn=%x bcount=%ld",
   1111 		((bp->b_flags & B_READ) == 0) ? "write" : "read",
   1112 		sdp->swd_drumoffset, bn, bp->b_bcount);
   1113 
   1114 	/*
   1115 	 * for block devices we finish up here.
   1116 	 * for regular files we have to do more work which we delegate
   1117 	 * to sw_reg_strategy().
   1118 	 */
   1119 
   1120 	switch (sdp->swd_vp->v_type) {
   1121 	default:
   1122 		panic("swstrategy: vnode type 0x%x", sdp->swd_vp->v_type);
   1123 
   1124 	case VBLK:
   1125 
   1126 		/*
   1127 		 * must convert "bp" from an I/O on /dev/drum to an I/O
   1128 		 * on the swapdev (sdp).
   1129 		 */
   1130 		s = splbio();
   1131 		bp->b_blkno = bn;		/* swapdev block number */
   1132 		vp = sdp->swd_vp;		/* swapdev vnode pointer */
   1133 		bp->b_dev = sdp->swd_dev;	/* swapdev dev_t */
   1134 
   1135 		/*
   1136 		 * if we are doing a write, we have to redirect the i/o on
   1137 		 * drum's v_numoutput counter to the swapdevs.
   1138 		 */
   1139 		if ((bp->b_flags & B_READ) == 0) {
   1140 			vwakeup(bp);	/* kills one 'v_numoutput' on drum */
   1141 			vp->v_numoutput++;	/* put it on swapdev */
   1142 		}
   1143 
   1144 		/*
   1145 		 * finally plug in swapdev vnode and start I/O
   1146 		 */
   1147 		bp->b_vp = vp;
   1148 		splx(s);
   1149 		VOP_STRATEGY(bp);
   1150 		return;
   1151 
   1152 	case VREG:
   1153 		/*
   1154 		 * delegate to sw_reg_strategy function.
   1155 		 */
   1156 		sw_reg_strategy(sdp, bp, bn);
   1157 		return;
   1158 	}
   1159 	/* NOTREACHED */
   1160 }
   1161 
   1162 /*
   1163  * sw_reg_strategy: handle swap i/o to regular files
   1164  */
   1165 static void
   1166 sw_reg_strategy(sdp, bp, bn)
   1167 	struct swapdev	*sdp;
   1168 	struct buf	*bp;
   1169 	int		bn;
   1170 {
   1171 	struct vnode	*vp;
   1172 	struct vndxfer	*vnx;
   1173 	daddr_t		nbn;
   1174 	caddr_t		addr;
   1175 	off_t		byteoff;
   1176 	int		s, off, nra, error, sz, resid;
   1177 	UVMHIST_FUNC("sw_reg_strategy"); UVMHIST_CALLED(pdhist);
   1178 
   1179 	/*
   1180 	 * allocate a vndxfer head for this transfer and point it to
   1181 	 * our buffer.
   1182 	 */
   1183 	getvndxfer(vnx);
   1184 	vnx->vx_flags = VX_BUSY;
   1185 	vnx->vx_error = 0;
   1186 	vnx->vx_pending = 0;
   1187 	vnx->vx_bp = bp;
   1188 	vnx->vx_sdp = sdp;
   1189 
   1190 	/*
   1191 	 * setup for main loop where we read filesystem blocks into
   1192 	 * our buffer.
   1193 	 */
   1194 	error = 0;
   1195 	bp->b_resid = bp->b_bcount;	/* nothing transfered yet! */
   1196 	addr = bp->b_data;		/* current position in buffer */
   1197 	byteoff = dbtob((u_int64_t)bn);
   1198 
   1199 	for (resid = bp->b_resid; resid; resid -= sz) {
   1200 		struct vndbuf	*nbp;
   1201 
   1202 		/*
   1203 		 * translate byteoffset into block number.  return values:
   1204 		 *   vp = vnode of underlying device
   1205 		 *  nbn = new block number (on underlying vnode dev)
   1206 		 *  nra = num blocks we can read-ahead (excludes requested
   1207 		 *	block)
   1208 		 */
   1209 		nra = 0;
   1210 		error = VOP_BMAP(sdp->swd_vp, byteoff / sdp->swd_bsize,
   1211 				 	&vp, &nbn, &nra);
   1212 
   1213 		if (error == 0 && nbn == (daddr_t)-1) {
   1214 			/*
   1215 			 * this used to just set error, but that doesn't
   1216 			 * do the right thing.  Instead, it causes random
   1217 			 * memory errors.  The panic() should remain until
   1218 			 * this condition doesn't destabilize the system.
   1219 			 */
   1220 #if 1
   1221 			panic("sw_reg_strategy: swap to sparse file");
   1222 #else
   1223 			error = EIO;	/* failure */
   1224 #endif
   1225 		}
   1226 
   1227 		/*
   1228 		 * punt if there was an error or a hole in the file.
   1229 		 * we must wait for any i/o ops we have already started
   1230 		 * to finish before returning.
   1231 		 *
   1232 		 * XXX we could deal with holes here but it would be
   1233 		 * a hassle (in the write case).
   1234 		 */
   1235 		if (error) {
   1236 			s = splbio();
   1237 			vnx->vx_error = error;	/* pass error up */
   1238 			goto out;
   1239 		}
   1240 
   1241 		/*
   1242 		 * compute the size ("sz") of this transfer (in bytes).
   1243 		 */
   1244 		off = byteoff % sdp->swd_bsize;
   1245 		sz = (1 + nra) * sdp->swd_bsize - off;
   1246 		if (sz > resid)
   1247 			sz = resid;
   1248 
   1249 		UVMHIST_LOG(pdhist, "sw_reg_strategy: "
   1250 			    "vp %p/%p offset 0x%x/0x%x",
   1251 			    sdp->swd_vp, vp, byteoff, nbn);
   1252 
   1253 		/*
   1254 		 * now get a buf structure.   note that the vb_buf is
   1255 		 * at the front of the nbp structure so that you can
   1256 		 * cast pointers between the two structure easily.
   1257 		 */
   1258 		getvndbuf(nbp);
   1259 		nbp->vb_buf.b_flags    = bp->b_flags | B_CALL;
   1260 		nbp->vb_buf.b_bcount   = sz;
   1261 		nbp->vb_buf.b_bufsize  = sz;
   1262 		nbp->vb_buf.b_error    = 0;
   1263 		nbp->vb_buf.b_data     = addr;
   1264 		nbp->vb_buf.b_lblkno   = 0;
   1265 		nbp->vb_buf.b_blkno    = nbn + btodb(off);
   1266 		nbp->vb_buf.b_rawblkno = nbp->vb_buf.b_blkno;
   1267 		nbp->vb_buf.b_iodone   = sw_reg_iodone;
   1268 		nbp->vb_buf.b_vp       = vp;
   1269 		if (vp->v_type == VBLK) {
   1270 			nbp->vb_buf.b_dev = vp->v_rdev;
   1271 		}
   1272 		LIST_INIT(&nbp->vb_buf.b_dep);
   1273 
   1274 		nbp->vb_xfer = vnx;	/* patch it back in to vnx */
   1275 
   1276 		/*
   1277 		 * Just sort by block number
   1278 		 */
   1279 		s = splbio();
   1280 		if (vnx->vx_error != 0) {
   1281 			putvndbuf(nbp);
   1282 			goto out;
   1283 		}
   1284 		vnx->vx_pending++;
   1285 
   1286 		/* sort it in and start I/O if we are not over our limit */
   1287 		disksort_blkno(&sdp->swd_tab, &nbp->vb_buf);
   1288 		sw_reg_start(sdp);
   1289 		splx(s);
   1290 
   1291 		/*
   1292 		 * advance to the next I/O
   1293 		 */
   1294 		byteoff += sz;
   1295 		addr += sz;
   1296 	}
   1297 
   1298 	s = splbio();
   1299 
   1300 out: /* Arrive here at splbio */
   1301 	vnx->vx_flags &= ~VX_BUSY;
   1302 	if (vnx->vx_pending == 0) {
   1303 		if (vnx->vx_error != 0) {
   1304 			bp->b_error = vnx->vx_error;
   1305 			bp->b_flags |= B_ERROR;
   1306 		}
   1307 		putvndxfer(vnx);
   1308 		biodone(bp);
   1309 	}
   1310 	splx(s);
   1311 }
   1312 
   1313 /*
   1314  * sw_reg_start: start an I/O request on the requested swapdev
   1315  *
   1316  * => reqs are sorted by disksort (above)
   1317  */
   1318 static void
   1319 sw_reg_start(sdp)
   1320 	struct swapdev	*sdp;
   1321 {
   1322 	struct buf	*bp;
   1323 	UVMHIST_FUNC("sw_reg_start"); UVMHIST_CALLED(pdhist);
   1324 
   1325 	/* recursion control */
   1326 	if ((sdp->swd_flags & SWF_BUSY) != 0)
   1327 		return;
   1328 
   1329 	sdp->swd_flags |= SWF_BUSY;
   1330 
   1331 	while (sdp->swd_active < sdp->swd_maxactive) {
   1332 		bp = BUFQ_FIRST(&sdp->swd_tab);
   1333 		if (bp == NULL)
   1334 			break;
   1335 		BUFQ_REMOVE(&sdp->swd_tab, bp);
   1336 		sdp->swd_active++;
   1337 
   1338 		UVMHIST_LOG(pdhist,
   1339 		    "sw_reg_start:  bp %p vp %p blkno %p cnt %lx",
   1340 		    bp, bp->b_vp, bp->b_blkno, bp->b_bcount);
   1341 		if ((bp->b_flags & B_READ) == 0)
   1342 			bp->b_vp->v_numoutput++;
   1343 
   1344 		VOP_STRATEGY(bp);
   1345 	}
   1346 	sdp->swd_flags &= ~SWF_BUSY;
   1347 }
   1348 
   1349 /*
   1350  * sw_reg_iodone: one of our i/o's has completed and needs post-i/o cleanup
   1351  *
   1352  * => note that we can recover the vndbuf struct by casting the buf ptr
   1353  */
   1354 static void
   1355 sw_reg_iodone(bp)
   1356 	struct buf *bp;
   1357 {
   1358 	struct vndbuf *vbp = (struct vndbuf *) bp;
   1359 	struct vndxfer *vnx = vbp->vb_xfer;
   1360 	struct buf *pbp = vnx->vx_bp;		/* parent buffer */
   1361 	struct swapdev	*sdp = vnx->vx_sdp;
   1362 	int		s, resid;
   1363 	UVMHIST_FUNC("sw_reg_iodone"); UVMHIST_CALLED(pdhist);
   1364 
   1365 	UVMHIST_LOG(pdhist, "  vbp=%p vp=%p blkno=%x addr=%p",
   1366 	    vbp, vbp->vb_buf.b_vp, vbp->vb_buf.b_blkno, vbp->vb_buf.b_data);
   1367 	UVMHIST_LOG(pdhist, "  cnt=%lx resid=%lx",
   1368 	    vbp->vb_buf.b_bcount, vbp->vb_buf.b_resid, 0, 0);
   1369 
   1370 	/*
   1371 	 * protect vbp at splbio and update.
   1372 	 */
   1373 
   1374 	s = splbio();
   1375 	resid = vbp->vb_buf.b_bcount - vbp->vb_buf.b_resid;
   1376 	pbp->b_resid -= resid;
   1377 	vnx->vx_pending--;
   1378 
   1379 	if (vbp->vb_buf.b_error) {
   1380 		UVMHIST_LOG(pdhist, "  got error=%d !",
   1381 		    vbp->vb_buf.b_error, 0, 0, 0);
   1382 
   1383 		/* pass error upward */
   1384 		vnx->vx_error = vbp->vb_buf.b_error;
   1385 	}
   1386 
   1387 	/*
   1388 	 * kill vbp structure
   1389 	 */
   1390 	putvndbuf(vbp);
   1391 
   1392 	/*
   1393 	 * wrap up this transaction if it has run to completion or, in
   1394 	 * case of an error, when all auxiliary buffers have returned.
   1395 	 */
   1396 	if (vnx->vx_error != 0) {
   1397 		/* pass error upward */
   1398 		pbp->b_flags |= B_ERROR;
   1399 		pbp->b_error = vnx->vx_error;
   1400 		if ((vnx->vx_flags & VX_BUSY) == 0 && vnx->vx_pending == 0) {
   1401 			putvndxfer(vnx);
   1402 			biodone(pbp);
   1403 		}
   1404 	} else if (pbp->b_resid == 0) {
   1405 		KASSERT(vnx->vx_pending == 0);
   1406 		if ((vnx->vx_flags & VX_BUSY) == 0) {
   1407 			UVMHIST_LOG(pdhist, "  iodone error=%d !",
   1408 			    pbp, vnx->vx_error, 0, 0);
   1409 			putvndxfer(vnx);
   1410 			biodone(pbp);
   1411 		}
   1412 	}
   1413 
   1414 	/*
   1415 	 * done!   start next swapdev I/O if one is pending
   1416 	 */
   1417 	sdp->swd_active--;
   1418 	sw_reg_start(sdp);
   1419 	splx(s);
   1420 }
   1421 
   1422 
   1423 /*
   1424  * uvm_swap_alloc: allocate space on swap
   1425  *
   1426  * => allocation is done "round robin" down the priority list, as we
   1427  *	allocate in a priority we "rotate" the circle queue.
   1428  * => space can be freed with uvm_swap_free
   1429  * => we return the page slot number in /dev/drum (0 == invalid slot)
   1430  * => we lock uvm.swap_data_lock
   1431  * => XXXMRG: "LESSOK" INTERFACE NEEDED TO EXTENT SYSTEM
   1432  */
   1433 int
   1434 uvm_swap_alloc(nslots, lessok)
   1435 	int *nslots;	/* IN/OUT */
   1436 	boolean_t lessok;
   1437 {
   1438 	struct swapdev *sdp;
   1439 	struct swappri *spp;
   1440 	u_long	result;
   1441 	UVMHIST_FUNC("uvm_swap_alloc"); UVMHIST_CALLED(pdhist);
   1442 
   1443 	/*
   1444 	 * no swap devices configured yet?   definite failure.
   1445 	 */
   1446 	if (uvmexp.nswapdev < 1)
   1447 		return 0;
   1448 
   1449 	/*
   1450 	 * lock data lock, convert slots into blocks, and enter loop
   1451 	 */
   1452 	simple_lock(&uvm.swap_data_lock);
   1453 
   1454 ReTry:	/* XXXMRG */
   1455 	LIST_FOREACH(spp, &swap_priority, spi_swappri) {
   1456 		CIRCLEQ_FOREACH(sdp, &spp->spi_swapdev, swd_next) {
   1457 			/* if it's not enabled, then we can't swap from it */
   1458 			if ((sdp->swd_flags & SWF_ENABLE) == 0)
   1459 				continue;
   1460 			if (sdp->swd_npginuse + *nslots > sdp->swd_npages)
   1461 				continue;
   1462 			if (extent_alloc(sdp->swd_ex, *nslots, EX_NOALIGN,
   1463 					 EX_NOBOUNDARY, EX_MALLOCOK|EX_NOWAIT,
   1464 					 &result) != 0) {
   1465 				continue;
   1466 			}
   1467 
   1468 			/*
   1469 			 * successful allocation!  now rotate the circleq.
   1470 			 */
   1471 			CIRCLEQ_REMOVE(&spp->spi_swapdev, sdp, swd_next);
   1472 			CIRCLEQ_INSERT_TAIL(&spp->spi_swapdev, sdp, swd_next);
   1473 			sdp->swd_npginuse += *nslots;
   1474 			uvmexp.swpginuse += *nslots;
   1475 			simple_unlock(&uvm.swap_data_lock);
   1476 			/* done!  return drum slot number */
   1477 			UVMHIST_LOG(pdhist,
   1478 			    "success!  returning %d slots starting at %d",
   1479 			    *nslots, result + sdp->swd_drumoffset, 0, 0);
   1480 			return (result + sdp->swd_drumoffset);
   1481 		}
   1482 	}
   1483 
   1484 	/* XXXMRG: BEGIN HACK */
   1485 	if (*nslots > 1 && lessok) {
   1486 		*nslots = 1;
   1487 		goto ReTry;	/* XXXMRG: ugh!  extent should support this for us */
   1488 	}
   1489 	/* XXXMRG: END HACK */
   1490 
   1491 	simple_unlock(&uvm.swap_data_lock);
   1492 	return 0;
   1493 }
   1494 
   1495 /*
   1496  * uvm_swap_markbad: keep track of swap ranges where we've had i/o errors
   1497  *
   1498  * => we lock uvm.swap_data_lock
   1499  */
   1500 void
   1501 uvm_swap_markbad(startslot, nslots)
   1502 	int startslot;
   1503 	int nslots;
   1504 {
   1505 	struct swapdev *sdp;
   1506 	UVMHIST_FUNC("uvm_swap_markbad"); UVMHIST_CALLED(pdhist);
   1507 
   1508 	simple_lock(&uvm.swap_data_lock);
   1509 	sdp = swapdrum_getsdp(startslot);
   1510 
   1511 	/*
   1512 	 * we just keep track of how many pages have been marked bad
   1513 	 * in this device, to make everything add up in swap_off().
   1514 	 * we assume here that the range of slots will all be within
   1515 	 * one swap device.
   1516 	 */
   1517 
   1518 	sdp->swd_npgbad += nslots;
   1519 	UVMHIST_LOG(pdhist, "now %d bad", sdp->swd_npgbad, 0,0,0);
   1520 	simple_unlock(&uvm.swap_data_lock);
   1521 }
   1522 
   1523 /*
   1524  * uvm_swap_free: free swap slots
   1525  *
   1526  * => this can be all or part of an allocation made by uvm_swap_alloc
   1527  * => we lock uvm.swap_data_lock
   1528  */
   1529 void
   1530 uvm_swap_free(startslot, nslots)
   1531 	int startslot;
   1532 	int nslots;
   1533 {
   1534 	struct swapdev *sdp;
   1535 	UVMHIST_FUNC("uvm_swap_free"); UVMHIST_CALLED(pdhist);
   1536 
   1537 	UVMHIST_LOG(pdhist, "freeing %d slots starting at %d", nslots,
   1538 	    startslot, 0, 0);
   1539 
   1540 	/*
   1541 	 * ignore attempts to free the "bad" slot.
   1542 	 */
   1543 
   1544 	if (startslot == SWSLOT_BAD) {
   1545 		return;
   1546 	}
   1547 
   1548 	/*
   1549 	 * convert drum slot offset back to sdp, free the blocks
   1550 	 * in the extent, and return.   must hold pri lock to do
   1551 	 * lookup and access the extent.
   1552 	 */
   1553 
   1554 	simple_lock(&uvm.swap_data_lock);
   1555 	sdp = swapdrum_getsdp(startslot);
   1556 	KASSERT(uvmexp.nswapdev >= 1);
   1557 	KASSERT(sdp != NULL);
   1558 	KASSERT(sdp->swd_npginuse >= nslots);
   1559 	if (extent_free(sdp->swd_ex, startslot - sdp->swd_drumoffset, nslots,
   1560 			EX_MALLOCOK|EX_NOWAIT) != 0) {
   1561 		printf("warning: resource shortage: %d pages of swap lost\n",
   1562 			nslots);
   1563 	}
   1564 	sdp->swd_npginuse -= nslots;
   1565 	uvmexp.swpginuse -= nslots;
   1566 	simple_unlock(&uvm.swap_data_lock);
   1567 }
   1568 
   1569 /*
   1570  * uvm_swap_put: put any number of pages into a contig place on swap
   1571  *
   1572  * => can be sync or async
   1573  */
   1574 
   1575 int
   1576 uvm_swap_put(swslot, ppsp, npages, flags)
   1577 	int swslot;
   1578 	struct vm_page **ppsp;
   1579 	int npages;
   1580 	int flags;
   1581 {
   1582 	int error;
   1583 
   1584 	error = uvm_swap_io(ppsp, swslot, npages, B_WRITE |
   1585 	    ((flags & PGO_SYNCIO) ? 0 : B_ASYNC));
   1586 	return error;
   1587 }
   1588 
   1589 /*
   1590  * uvm_swap_get: get a single page from swap
   1591  *
   1592  * => usually a sync op (from fault)
   1593  */
   1594 
   1595 int
   1596 uvm_swap_get(page, swslot, flags)
   1597 	struct vm_page *page;
   1598 	int swslot, flags;
   1599 {
   1600 	int error;
   1601 
   1602 	uvmexp.nswget++;
   1603 	KASSERT(flags & PGO_SYNCIO);
   1604 	if (swslot == SWSLOT_BAD) {
   1605 		return EIO;
   1606 	}
   1607 	error = uvm_swap_io(&page, swslot, 1, B_READ |
   1608 	    ((flags & PGO_SYNCIO) ? 0 : B_ASYNC));
   1609 	if (error == 0) {
   1610 
   1611 		/*
   1612 		 * this page is no longer only in swap.
   1613 		 */
   1614 
   1615 		simple_lock(&uvm.swap_data_lock);
   1616 		KASSERT(uvmexp.swpgonly > 0);
   1617 		uvmexp.swpgonly--;
   1618 		simple_unlock(&uvm.swap_data_lock);
   1619 	}
   1620 	return error;
   1621 }
   1622 
   1623 /*
   1624  * uvm_swap_io: do an i/o operation to swap
   1625  */
   1626 
   1627 static int
   1628 uvm_swap_io(pps, startslot, npages, flags)
   1629 	struct vm_page **pps;
   1630 	int startslot, npages, flags;
   1631 {
   1632 	daddr_t startblk;
   1633 	struct	buf *bp;
   1634 	vaddr_t kva;
   1635 	int	error, s, mapinflags;
   1636 	boolean_t write, async;
   1637 	UVMHIST_FUNC("uvm_swap_io"); UVMHIST_CALLED(pdhist);
   1638 
   1639 	UVMHIST_LOG(pdhist, "<- called, startslot=%d, npages=%d, flags=%d",
   1640 	    startslot, npages, flags, 0);
   1641 
   1642 	write = (flags & B_READ) == 0;
   1643 	async = (flags & B_ASYNC) != 0;
   1644 
   1645 	/*
   1646 	 * convert starting drum slot to block number
   1647 	 */
   1648 
   1649 	startblk = btodb((u_int64_t)startslot << PAGE_SHIFT);
   1650 
   1651 	/*
   1652 	 * first, map the pages into the kernel.
   1653 	 */
   1654 
   1655 	mapinflags = !write ?
   1656 		UVMPAGER_MAPIN_WAITOK|UVMPAGER_MAPIN_READ :
   1657 		UVMPAGER_MAPIN_WAITOK|UVMPAGER_MAPIN_WRITE;
   1658 	kva = uvm_pagermapin(pps, npages, mapinflags);
   1659 
   1660 	/*
   1661 	 * now allocate a buf for the i/o.
   1662 	 */
   1663 
   1664 	s = splbio();
   1665 	bp = pool_get(&bufpool, PR_WAITOK);
   1666 	splx(s);
   1667 
   1668 	/*
   1669 	 * fill in the bp/sbp.   we currently route our i/o through
   1670 	 * /dev/drum's vnode [swapdev_vp].
   1671 	 */
   1672 
   1673 	bp->b_flags = B_BUSY | B_NOCACHE | (flags & (B_READ|B_ASYNC));
   1674 	bp->b_proc = &proc0;	/* XXX */
   1675 	bp->b_vnbufs.le_next = NOLIST;
   1676 	bp->b_data = (caddr_t)kva;
   1677 	bp->b_blkno = startblk;
   1678 	bp->b_vp = swapdev_vp;
   1679 	bp->b_dev = swapdev_vp->v_rdev;
   1680 	bp->b_bufsize = bp->b_bcount = npages << PAGE_SHIFT;
   1681 	LIST_INIT(&bp->b_dep);
   1682 
   1683 	/*
   1684 	 * bump v_numoutput (counter of number of active outputs).
   1685 	 */
   1686 
   1687 	if (write) {
   1688 		s = splbio();
   1689 		swapdev_vp->v_numoutput++;
   1690 		splx(s);
   1691 	}
   1692 
   1693 	/*
   1694 	 * for async ops we must set up the iodone handler.
   1695 	 */
   1696 
   1697 	if (async) {
   1698 		bp->b_flags |= B_CALL;
   1699 		bp->b_iodone = uvm_aio_biodone;
   1700 		UVMHIST_LOG(pdhist, "doing async!", 0, 0, 0, 0);
   1701 	}
   1702 	UVMHIST_LOG(pdhist,
   1703 	    "about to start io: data = %p blkno = 0x%x, bcount = %ld",
   1704 	    bp->b_data, bp->b_blkno, bp->b_bcount, 0);
   1705 
   1706 	/*
   1707 	 * now we start the I/O, and if async, return.
   1708 	 */
   1709 
   1710 	VOP_STRATEGY(bp);
   1711 	if (async)
   1712 		return 0;
   1713 
   1714 	/*
   1715 	 * must be sync i/o.   wait for it to finish
   1716 	 */
   1717 
   1718 	error = biowait(bp);
   1719 
   1720 	/*
   1721 	 * kill the pager mapping
   1722 	 */
   1723 
   1724 	uvm_pagermapout(kva, npages);
   1725 
   1726 	/*
   1727 	 * now dispose of the buf and we're done.
   1728 	 */
   1729 
   1730 	s = splbio();
   1731 	if (write)
   1732 		vwakeup(bp);
   1733 	pool_put(&bufpool, bp);
   1734 	splx(s);
   1735 	UVMHIST_LOG(pdhist, "<- done (sync)  error=%d", error, 0, 0, 0);
   1736 	return (error);
   1737 }
   1738