Home | History | Annotate | Line # | Download | only in uvm
uvm_swap.c revision 1.22
      1 /*	$NetBSD: uvm_swap.c,v 1.22 1998/11/08 19:41:49 mycroft Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1995, 1996, 1997 Matthew R. Green
      5  * All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  * 3. The name of the author may not be used to endorse or promote products
     16  *    derived from this software without specific prior written permission.
     17  *
     18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
     23  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
     24  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
     25  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
     26  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     28  * SUCH DAMAGE.
     29  *
     30  * from: NetBSD: vm_swap.c,v 1.52 1997/12/02 13:47:37 pk Exp
     31  * from: Id: uvm_swap.c,v 1.1.2.42 1998/02/02 20:38:06 chuck Exp
     32  */
     33 
     34 #include "fs_nfs.h"
     35 #include "opt_uvmhist.h"
     36 #include "opt_compat_netbsd.h"
     37 
     38 #include <sys/param.h>
     39 #include <sys/systm.h>
     40 #include <sys/buf.h>
     41 #include <sys/proc.h>
     42 #include <sys/namei.h>
     43 #include <sys/disklabel.h>
     44 #include <sys/errno.h>
     45 #include <sys/kernel.h>
     46 #include <sys/malloc.h>
     47 #include <sys/vnode.h>
     48 #include <sys/file.h>
     49 #include <sys/extent.h>
     50 #include <sys/mount.h>
     51 #include <sys/pool.h>
     52 #include <sys/syscallargs.h>
     53 #include <sys/swap.h>
     54 
     55 #include <vm/vm.h>
     56 #include <vm/vm_conf.h>
     57 
     58 #include <uvm/uvm.h>
     59 
     60 #include <miscfs/specfs/specdev.h>
     61 
     62 /*
     63  * uvm_swap.c: manage configuration and i/o to swap space.
     64  */
     65 
     66 /*
     67  * swap space is managed in the following way:
     68  *
     69  * each swap partition or file is described by a "swapdev" structure.
     70  * each "swapdev" structure contains a "swapent" structure which contains
     71  * information that is passed up to the user (via system calls).
     72  *
     73  * each swap partition is assigned a "priority" (int) which controls
     74  * swap parition usage.
     75  *
     76  * the system maintains a global data structure describing all swap
     77  * partitions/files.   there is a sorted LIST of "swappri" structures
     78  * which describe "swapdev"'s at that priority.   this LIST is headed
     79  * by the "swap_priority" global var.    each "swappri" contains a
     80  * CIRCLEQ of "swapdev" structures at that priority.
     81  *
     82  * the system maintains a fixed pool of "swapbuf" structures for use
     83  * at swap i/o time.  a swapbuf includes a "buf" structure and an
     84  * "aiodone" [we want to avoid malloc()'ing anything at swapout time
     85  * since memory may be low].
     86  *
     87  * locking:
     88  *  - swap_syscall_lock (sleep lock): this lock serializes the swapctl
     89  *    system call and prevents the swap priority list from changing
     90  *    while we are in the middle of a system call (e.g. SWAP_STATS).
     91  *  - swap_data_lock (simple_lock): this lock protects all swap data
     92  *    structures including the priority list, the swapdev structures,
     93  *    and the swapmap extent.
     94  *  - swap_buf_lock (simple_lock): this lock protects the free swapbuf
     95  *    pool.
     96  *
     97  * each swap device has the following info:
     98  *  - swap device in use (could be disabled, preventing future use)
     99  *  - swap enabled (allows new allocations on swap)
    100  *  - map info in /dev/drum
    101  *  - vnode pointer
    102  * for swap files only:
    103  *  - block size
    104  *  - max byte count in buffer
    105  *  - buffer
    106  *  - credentials to use when doing i/o to file
    107  *
    108  * userland controls and configures swap with the swapctl(2) system call.
    109  * the sys_swapctl performs the following operations:
    110  *  [1] SWAP_NSWAP: returns the number of swap devices currently configured
    111  *  [2] SWAP_STATS: given a pointer to an array of swapent structures
    112  *	(passed in via "arg") of a size passed in via "misc" ... we load
    113  *	the current swap config into the array.
    114  *  [3] SWAP_ON: given a pathname in arg (could be device or file) and a
    115  *	priority in "misc", start swapping on it.
    116  *  [4] SWAP_OFF: as SWAP_ON, but stops swapping to a device
    117  *  [5] SWAP_CTL: changes the priority of a swap device (new priority in
    118  *	"misc")
    119  */
    120 
    121 /*
    122  * SWAP_TO_FILES: allows swapping to plain files.
    123  */
    124 
    125 #define SWAP_TO_FILES
    126 
    127 /*
    128  * swapdev: describes a single swap partition/file
    129  *
    130  * note the following should be true:
    131  * swd_inuse <= swd_nblks  [number of blocks in use is <= total blocks]
    132  * swd_nblks <= swd_mapsize [because mapsize includes miniroot+disklabel]
    133  */
    134 struct swapdev {
    135 	struct oswapent swd_ose;
    136 #define	swd_dev		swd_ose.ose_dev		/* device id */
    137 #define	swd_flags	swd_ose.ose_flags	/* flags:inuse/enable/fake */
    138 #define	swd_priority	swd_ose.ose_priority	/* our priority */
    139 	/* also: swd_ose.ose_nblks, swd_ose.ose_inuse */
    140 	char			*swd_path;	/* saved pathname of device */
    141 	int			swd_pathlen;	/* length of pathname */
    142 	int			swd_npages;	/* #pages we can use */
    143 	int			swd_npginuse;	/* #pages in use */
    144 	int			swd_drumoffset;	/* page0 offset in drum */
    145 	int			swd_drumsize;	/* #pages in drum */
    146 	struct extent		*swd_ex;	/* extent for this swapdev */
    147 	struct vnode		*swd_vp;	/* backing vnode */
    148 	CIRCLEQ_ENTRY(swapdev)	swd_next;	/* priority circleq */
    149 
    150 #ifdef SWAP_TO_FILES
    151 	int			swd_bsize;	/* blocksize (bytes) */
    152 	int			swd_maxactive;	/* max active i/o reqs */
    153 	struct buf		swd_tab;	/* buffer list */
    154 	struct ucred		*swd_cred;	/* cred for file access */
    155 #endif
    156 };
    157 
    158 /*
    159  * swap device priority entry; the list is kept sorted on `spi_priority'.
    160  */
    161 struct swappri {
    162 	int			spi_priority;     /* priority */
    163 	CIRCLEQ_HEAD(spi_swapdev, swapdev)	spi_swapdev;
    164 	/* circleq of swapdevs at this priority */
    165 	LIST_ENTRY(swappri)	spi_swappri;      /* global list of pri's */
    166 };
    167 
    168 /*
    169  * swapbuf, swapbuffer plus async i/o info
    170  */
    171 struct swapbuf {
    172 	struct buf sw_buf;		/* a buffer structure */
    173 	struct uvm_aiodesc sw_aio;	/* aiodesc structure, used if ASYNC */
    174 	SIMPLEQ_ENTRY(swapbuf) sw_sq;	/* free list pointer */
    175 };
    176 
    177 /*
    178  * The following two structures are used to keep track of data transfers
    179  * on swap devices associated with regular files.
    180  * NOTE: this code is more or less a copy of vnd.c; we use the same
    181  * structure names here to ease porting..
    182  */
    183 struct vndxfer {
    184 	struct buf	*vx_bp;		/* Pointer to parent buffer */
    185 	struct swapdev	*vx_sdp;
    186 	int		vx_error;
    187 	int		vx_pending;	/* # of pending aux buffers */
    188 	int		vx_flags;
    189 #define VX_BUSY		1
    190 #define VX_DEAD		2
    191 };
    192 
    193 struct vndbuf {
    194 	struct buf	vb_buf;
    195 	struct vndxfer	*vb_xfer;
    196 };
    197 
    198 
    199 /*
    200  * We keep a of pool vndbuf's and vndxfer structures.
    201  */
    202 struct pool *vndxfer_pool;
    203 struct pool *vndbuf_pool;
    204 
    205 #define	getvndxfer(vnx)	do {						\
    206 	int s = splbio();						\
    207 	vnx = (struct vndxfer *)					\
    208 		pool_get(vndxfer_pool, PR_MALLOCOK|PR_WAITOK);		\
    209 	splx(s);							\
    210 } while (0)
    211 
    212 #define putvndxfer(vnx) {						\
    213 	pool_put(vndxfer_pool, (void *)(vnx));				\
    214 }
    215 
    216 #define	getvndbuf(vbp)	do {						\
    217 	int s = splbio();						\
    218 	vbp = (struct vndbuf *)						\
    219 		pool_get(vndbuf_pool, PR_MALLOCOK|PR_WAITOK);		\
    220 	splx(s);							\
    221 } while (0)
    222 
    223 #define putvndbuf(vbp) {						\
    224 	pool_put(vndbuf_pool, (void *)(vbp));				\
    225 }
    226 
    227 
    228 /*
    229  * local variables
    230  */
    231 static struct extent *swapmap;		/* controls the mapping of /dev/drum */
    232 SIMPLEQ_HEAD(swapbufhead, swapbuf);
    233 struct pool *swapbuf_pool;
    234 
    235 /* list of all active swap devices [by priority] */
    236 LIST_HEAD(swap_priority, swappri);
    237 static struct swap_priority swap_priority;
    238 
    239 /* locks */
    240 lock_data_t swap_syscall_lock;
    241 static simple_lock_data_t swap_data_lock;
    242 
    243 /*
    244  * prototypes
    245  */
    246 static void		 swapdrum_add __P((struct swapdev *, int));
    247 static struct swapdev	*swapdrum_getsdp __P((int));
    248 
    249 static struct swapdev	*swaplist_find __P((struct vnode *, int));
    250 static void		 swaplist_insert __P((struct swapdev *,
    251 					     struct swappri *, int));
    252 static void		 swaplist_trim __P((void));
    253 
    254 static int swap_on __P((struct proc *, struct swapdev *));
    255 #ifdef SWAP_OFF_WORKS
    256 static int swap_off __P((struct proc *, struct swapdev *));
    257 #endif
    258 
    259 #ifdef SWAP_TO_FILES
    260 static void sw_reg_strategy __P((struct swapdev *, struct buf *, int));
    261 static void sw_reg_iodone __P((struct buf *));
    262 static void sw_reg_start __P((struct swapdev *));
    263 #endif
    264 
    265 static void uvm_swap_aiodone __P((struct uvm_aiodesc *));
    266 static void uvm_swap_bufdone __P((struct buf *));
    267 static int uvm_swap_io __P((struct vm_page **, int, int, int));
    268 
    269 /*
    270  * uvm_swap_init: init the swap system data structures and locks
    271  *
    272  * => called at boot time from init_main.c after the filesystems
    273  *	are brought up (which happens after uvm_init())
    274  */
    275 void
    276 uvm_swap_init()
    277 {
    278 	UVMHIST_FUNC("uvm_swap_init");
    279 
    280 	UVMHIST_CALLED(pdhist);
    281 	/*
    282 	 * first, init the swap list, its counter, and its lock.
    283 	 * then get a handle on the vnode for /dev/drum by using
    284 	 * the its dev_t number ("swapdev", from MD conf.c).
    285 	 */
    286 
    287 	LIST_INIT(&swap_priority);
    288 	uvmexp.nswapdev = 0;
    289 	lockinit(&swap_syscall_lock, PVM, "swapsys", 0, 0);
    290 	simple_lock_init(&swap_data_lock);
    291 
    292 	if (bdevvp(swapdev, &swapdev_vp))
    293 		panic("uvm_swap_init: can't get vnode for swap device");
    294 
    295 	/*
    296 	 * create swap block resource map to map /dev/drum.   the range
    297 	 * from 1 to INT_MAX allows 2 gigablocks of swap space.  note
    298 	 * that block 0 is reserved (used to indicate an allocation
    299 	 * failure, or no allocation).
    300 	 */
    301 	swapmap = extent_create("swapmap", 1, INT_MAX,
    302 				M_VMSWAP, 0, 0, EX_NOWAIT);
    303 	if (swapmap == 0)
    304 		panic("uvm_swap_init: extent_create failed");
    305 
    306 	/*
    307 	 * allocate our private pool of "swapbuf" structures (includes
    308 	 * a "buf" structure).  ["nswbuf" comes from param.c and can
    309 	 * be adjusted by MD code before we get here].
    310 	 */
    311 
    312 	swapbuf_pool =
    313 		pool_create(sizeof(struct swapbuf), 0, 0, 0, "swp buf", 0,
    314 			    NULL, NULL, 0);
    315 	if (swapbuf_pool == NULL)
    316 		panic("swapinit: pool_create failed");
    317 	/* XXX - set a maximum on swapbuf_pool? */
    318 
    319 	vndxfer_pool =
    320 		pool_create(sizeof(struct vndxfer), 0, 0, 0, "swp vnx", 0,
    321 			    NULL, NULL, 0);
    322 	if (vndxfer_pool == NULL)
    323 		panic("swapinit: pool_create failed");
    324 
    325 	vndbuf_pool =
    326 		pool_create(sizeof(struct vndbuf), 0, 0, 0, "swp vnd", 0,
    327 			    NULL, NULL, 0);
    328 	if (vndbuf_pool == NULL)
    329 		panic("swapinit: pool_create failed");
    330 	/*
    331 	 * done!
    332 	 */
    333 	UVMHIST_LOG(pdhist, "<- done", 0, 0, 0, 0);
    334 }
    335 
    336 /*
    337  * swaplist functions: functions that operate on the list of swap
    338  * devices on the system.
    339  */
    340 
    341 /*
    342  * swaplist_insert: insert swap device "sdp" into the global list
    343  *
    344  * => caller must hold both swap_syscall_lock and swap_data_lock
    345  * => caller must provide a newly malloc'd swappri structure (we will
    346  *	FREE it if we don't need it... this it to prevent malloc blocking
    347  *	here while adding swap)
    348  */
    349 static void
    350 swaplist_insert(sdp, newspp, priority)
    351 	struct swapdev *sdp;
    352 	struct swappri *newspp;
    353 	int priority;
    354 {
    355 	struct swappri *spp, *pspp;
    356 	UVMHIST_FUNC("swaplist_insert"); UVMHIST_CALLED(pdhist);
    357 
    358 	/*
    359 	 * find entry at or after which to insert the new device.
    360 	 */
    361 	for (pspp = NULL, spp = swap_priority.lh_first; spp != NULL;
    362 	     spp = spp->spi_swappri.le_next) {
    363 		if (priority <= spp->spi_priority)
    364 			break;
    365 		pspp = spp;
    366 	}
    367 
    368 	/*
    369 	 * new priority?
    370 	 */
    371 	if (spp == NULL || spp->spi_priority != priority) {
    372 		spp = newspp;  /* use newspp! */
    373 		UVMHIST_LOG(pdhist, "created new swappri = %d", priority, 0, 0, 0);
    374 
    375 		spp->spi_priority = priority;
    376 		CIRCLEQ_INIT(&spp->spi_swapdev);
    377 
    378 		if (pspp)
    379 			LIST_INSERT_AFTER(pspp, spp, spi_swappri);
    380 		else
    381 			LIST_INSERT_HEAD(&swap_priority, spp, spi_swappri);
    382 	} else {
    383 	  	/* we don't need a new priority structure, free it */
    384 		FREE(newspp, M_VMSWAP);
    385 	}
    386 
    387 	/*
    388 	 * priority found (or created).   now insert on the priority's
    389 	 * circleq list and bump the total number of swapdevs.
    390 	 */
    391 	sdp->swd_priority = priority;
    392 	CIRCLEQ_INSERT_TAIL(&spp->spi_swapdev, sdp, swd_next);
    393 	uvmexp.nswapdev++;
    394 
    395 	/*
    396 	 * done!
    397 	 */
    398 }
    399 
    400 /*
    401  * swaplist_find: find and optionally remove a swap device from the
    402  *	global list.
    403  *
    404  * => caller must hold both swap_syscall_lock and swap_data_lock
    405  * => we return the swapdev we found (and removed)
    406  */
    407 static struct swapdev *
    408 swaplist_find(vp, remove)
    409 	struct vnode *vp;
    410 	boolean_t remove;
    411 {
    412 	struct swapdev *sdp;
    413 	struct swappri *spp;
    414 
    415 	/*
    416 	 * search the lists for the requested vp
    417 	 */
    418 	for (spp = swap_priority.lh_first; spp != NULL;
    419 	     spp = spp->spi_swappri.le_next) {
    420 		for (sdp = spp->spi_swapdev.cqh_first;
    421 		     sdp != (void *)&spp->spi_swapdev;
    422 		     sdp = sdp->swd_next.cqe_next)
    423 			if (sdp->swd_vp == vp) {
    424 				if (remove) {
    425 					CIRCLEQ_REMOVE(&spp->spi_swapdev,
    426 					    sdp, swd_next);
    427 					uvmexp.nswapdev--;
    428 				}
    429 				return(sdp);
    430 			}
    431 	}
    432 	return (NULL);
    433 }
    434 
    435 
    436 /*
    437  * swaplist_trim: scan priority list for empty priority entries and kill
    438  *	them.
    439  *
    440  * => caller must hold both swap_syscall_lock and swap_data_lock
    441  */
    442 static void
    443 swaplist_trim()
    444 {
    445 	struct swappri *spp, *nextspp;
    446 
    447 	for (spp = swap_priority.lh_first; spp != NULL; spp = nextspp) {
    448 		nextspp = spp->spi_swappri.le_next;
    449 		if (spp->spi_swapdev.cqh_first != (void *)&spp->spi_swapdev)
    450 			continue;
    451 		LIST_REMOVE(spp, spi_swappri);
    452 		free((caddr_t)spp, M_VMSWAP);
    453 	}
    454 }
    455 
    456 /*
    457  * swapdrum_add: add a "swapdev"'s blocks into /dev/drum's area.
    458  *
    459  * => caller must hold swap_syscall_lock
    460  * => swap_data_lock should be unlocked (we may sleep)
    461  */
    462 static void
    463 swapdrum_add(sdp, npages)
    464 	struct swapdev *sdp;
    465 	int	npages;
    466 {
    467 	u_long result;
    468 
    469 	if (extent_alloc(swapmap, npages, EX_NOALIGN, EX_NOBOUNDARY,
    470 	    EX_WAITOK, &result))
    471 		panic("swapdrum_add");
    472 
    473 	sdp->swd_drumoffset = result;
    474 	sdp->swd_drumsize = npages;
    475 }
    476 
    477 /*
    478  * swapdrum_getsdp: given a page offset in /dev/drum, convert it back
    479  *	to the "swapdev" that maps that section of the drum.
    480  *
    481  * => each swapdev takes one big contig chunk of the drum
    482  * => caller must hold swap_data_lock
    483  */
    484 static struct swapdev *
    485 swapdrum_getsdp(pgno)
    486 	int pgno;
    487 {
    488 	struct swapdev *sdp;
    489 	struct swappri *spp;
    490 
    491 	for (spp = swap_priority.lh_first; spp != NULL;
    492 	     spp = spp->spi_swappri.le_next)
    493 		for (sdp = spp->spi_swapdev.cqh_first;
    494 		     sdp != (void *)&spp->spi_swapdev;
    495 		     sdp = sdp->swd_next.cqe_next)
    496 			if (pgno >= sdp->swd_drumoffset &&
    497 			    pgno < (sdp->swd_drumoffset + sdp->swd_drumsize)) {
    498 				return sdp;
    499 			}
    500 	return NULL;
    501 }
    502 
    503 
    504 /*
    505  * sys_swapctl: main entry point for swapctl(2) system call
    506  * 	[with two helper functions: swap_on and swap_off]
    507  */
    508 int
    509 sys_swapctl(p, v, retval)
    510 	struct proc *p;
    511 	void *v;
    512 	register_t *retval;
    513 {
    514 	struct sys_swapctl_args /* {
    515 		syscallarg(int) cmd;
    516 		syscallarg(void *) arg;
    517 		syscallarg(int) misc;
    518 	} */ *uap = (struct sys_swapctl_args *)v;
    519 	struct vnode *vp;
    520 	struct nameidata nd;
    521 	struct swappri *spp;
    522 	struct swapdev *sdp;
    523 	struct swapent *sep;
    524 	char	userpath[PATH_MAX + 1];
    525 	size_t	len;
    526 	int	count, error, misc;
    527 	int	priority;
    528 	UVMHIST_FUNC("sys_swapctl"); UVMHIST_CALLED(pdhist);
    529 
    530 	misc = SCARG(uap, misc);
    531 
    532 	/*
    533 	 * ensure serialized syscall access by grabbing the swap_syscall_lock
    534 	 */
    535 	lockmgr(&swap_syscall_lock, LK_EXCLUSIVE, (void *)0);
    536 
    537 	/*
    538 	 * we handle the non-priv NSWAP and STATS request first.
    539 	 *
    540 	 * SWAP_NSWAP: return number of config'd swap devices
    541 	 * [can also be obtained with uvmexp sysctl]
    542 	 */
    543 	if (SCARG(uap, cmd) == SWAP_NSWAP) {
    544 		UVMHIST_LOG(pdhist, "<- done SWAP_NSWAP=%d", uvmexp.nswapdev,
    545 		    0, 0, 0);
    546 		*retval = uvmexp.nswapdev;
    547 		error = 0;
    548 		goto out;
    549 	}
    550 
    551 	/*
    552 	 * SWAP_STATS: get stats on current # of configured swap devs
    553 	 *
    554 	 * note that the swap_priority list can't change as long
    555 	 * as we are holding the swap_syscall_lock.  we don't want
    556 	 * to grab the swap_data_lock because we may fault&sleep during
    557 	 * copyout() and we don't want to be holding that lock then!
    558 	 */
    559 	if (SCARG(uap, cmd) == SWAP_STATS
    560 #if defined(COMPAT_13)
    561 	    || SCARG(uap, cmd) == SWAP_OSTATS
    562 #endif
    563 	    ) {
    564 		sep = (struct swapent *)SCARG(uap, arg);
    565 		count = 0;
    566 
    567 		for (spp = swap_priority.lh_first; spp != NULL;
    568 		    spp = spp->spi_swappri.le_next) {
    569 			for (sdp = spp->spi_swapdev.cqh_first;
    570 			     sdp != (void *)&spp->spi_swapdev && misc-- > 0;
    571 			     sdp = sdp->swd_next.cqe_next) {
    572 			  	/*
    573 				 * backwards compatibility for system call.
    574 				 * note that we use 'struct oswapent' as an
    575 				 * overlay into both 'struct swapdev' and
    576 				 * the userland 'struct swapent', as we
    577 				 * want to retain backwards compatibility
    578 				 * with NetBSD 1.3.
    579 				 */
    580 				sdp->swd_ose.ose_inuse =
    581 				    btodb(sdp->swd_npginuse << PAGE_SHIFT);
    582 				error = copyout((caddr_t)&sdp->swd_ose,
    583 				    (caddr_t)sep, sizeof(struct oswapent));
    584 
    585 				/* now copy out the path if necessary */
    586 #if defined(COMPAT_13)
    587 				if (error == 0 && SCARG(uap, cmd) == SWAP_STATS)
    588 #else
    589 				if (error == 0)
    590 #endif
    591 					error = copyout((caddr_t)sdp->swd_path,
    592 					    (caddr_t)&sep->se_path,
    593 					    sdp->swd_pathlen);
    594 
    595 				if (error)
    596 					goto out;
    597 				count++;
    598 #if defined(COMPAT_13)
    599 				if (SCARG(uap, cmd) == SWAP_OSTATS)
    600 					((struct oswapent *)sep)++;
    601 				else
    602 #endif
    603 					sep++;
    604 			}
    605 		}
    606 
    607 		UVMHIST_LOG(pdhist, "<- done SWAP_STATS", 0, 0, 0, 0);
    608 
    609 		*retval = count;
    610 		error = 0;
    611 		goto out;
    612 	}
    613 
    614 	/*
    615 	 * all other requests require superuser privs.   verify.
    616 	 */
    617 	if ((error = suser(p->p_ucred, &p->p_acflag)))
    618 		goto out;
    619 
    620 	/*
    621 	 * at this point we expect a path name in arg.   we will
    622 	 * use namei() to gain a vnode reference (vref), and lock
    623 	 * the vnode (VOP_LOCK).
    624 	 *
    625 	 * XXX: a NULL arg means use the root vnode pointer (e.g. for
    626 	 * miniroot)
    627 	 */
    628 	if (SCARG(uap, arg) == NULL) {
    629 		vp = rootvp;		/* miniroot */
    630 		if (vget(vp, LK_EXCLUSIVE)) {
    631 			error = EBUSY;
    632 			goto out;
    633 		}
    634 		if (SCARG(uap, cmd) == SWAP_ON &&
    635 		    copystr("miniroot", userpath, sizeof userpath, &len))
    636 			panic("swapctl: miniroot copy failed");
    637 	} else {
    638 		int	space;
    639 		char	*where;
    640 
    641 		if (SCARG(uap, cmd) == SWAP_ON) {
    642 			if ((error = copyinstr(SCARG(uap, arg), userpath,
    643 			    sizeof userpath, &len)))
    644 				goto out;
    645 			space = UIO_SYSSPACE;
    646 			where = userpath;
    647 		} else {
    648 			space = UIO_USERSPACE;
    649 			where = (char *)SCARG(uap, arg);
    650 		}
    651 		NDINIT(&nd, LOOKUP, FOLLOW|LOCKLEAF, space, where, p);
    652 		if ((error = namei(&nd)))
    653 			goto out;
    654 		vp = nd.ni_vp;
    655 	}
    656 	/* note: "vp" is referenced and locked */
    657 
    658 	error = 0;		/* assume no error */
    659 	switch(SCARG(uap, cmd)) {
    660 	case SWAP_CTL:
    661 		/*
    662 		 * get new priority, remove old entry (if any) and then
    663 		 * reinsert it in the correct place.  finally, prune out
    664 		 * any empty priority structures.
    665 		 */
    666 		priority = SCARG(uap, misc);
    667 		spp = (struct swappri *)
    668 			malloc(sizeof *spp, M_VMSWAP, M_WAITOK);
    669 		simple_lock(&swap_data_lock);
    670 		if ((sdp = swaplist_find(vp, 1)) == NULL) {
    671 			error = ENOENT;
    672 		} else {
    673 			swaplist_insert(sdp, spp, priority);
    674 			swaplist_trim();
    675 		}
    676 		simple_unlock(&swap_data_lock);
    677 		if (error)
    678 			free(spp, M_VMSWAP);
    679 		break;
    680 
    681 	case SWAP_ON:
    682 		/*
    683 		 * check for duplicates.   if none found, then insert a
    684 		 * dummy entry on the list to prevent someone else from
    685 		 * trying to enable this device while we are working on
    686 		 * it.
    687 		 */
    688 		priority = SCARG(uap, misc);
    689 		simple_lock(&swap_data_lock);
    690 		if ((sdp = swaplist_find(vp, 0)) != NULL) {
    691 			error = EBUSY;
    692 			simple_unlock(&swap_data_lock);
    693 			break;
    694 		}
    695 		sdp = (struct swapdev *)
    696 			malloc(sizeof *sdp, M_VMSWAP, M_WAITOK);
    697 		spp = (struct swappri *)
    698 			malloc(sizeof *spp, M_VMSWAP, M_WAITOK);
    699 		memset(sdp, 0, sizeof(*sdp));
    700 		sdp->swd_flags = SWF_FAKE;	/* placeholder only */
    701 		sdp->swd_vp = vp;
    702 		sdp->swd_dev = (vp->v_type == VBLK) ? vp->v_rdev : NODEV;
    703 #ifdef SWAP_TO_FILES
    704 		/*
    705 		 * XXX Is NFS elaboration necessary?
    706 		 */
    707 		if (vp->v_type == VREG)
    708 			sdp->swd_cred = crdup(p->p_ucred);
    709 #endif
    710 		swaplist_insert(sdp, spp, priority);
    711 		simple_unlock(&swap_data_lock);
    712 
    713 		sdp->swd_pathlen = len;
    714 		sdp->swd_path = malloc(sdp->swd_pathlen, M_VMSWAP, M_WAITOK);
    715 		if (copystr(userpath, sdp->swd_path, sdp->swd_pathlen, 0) != 0)
    716 			panic("swapctl: copystr");
    717 		/*
    718 		 * we've now got a FAKE placeholder in the swap list.
    719 		 * now attempt to enable swap on it.  if we fail, undo
    720 		 * what we've done and kill the fake entry we just inserted.
    721 		 * if swap_on is a success, it will clear the SWF_FAKE flag
    722 		 */
    723 		if ((error = swap_on(p, sdp)) != 0) {
    724 			simple_lock(&swap_data_lock);
    725 			(void) swaplist_find(vp, 1);  /* kill fake entry */
    726 			swaplist_trim();
    727 			simple_unlock(&swap_data_lock);
    728 #ifdef SWAP_TO_FILES
    729 			if (vp->v_type == VREG)
    730 				crfree(sdp->swd_cred);
    731 #endif
    732 			free(sdp->swd_path, M_VMSWAP);
    733 			free((caddr_t)sdp, M_VMSWAP);
    734 			break;
    735 		}
    736 
    737 		/*
    738 		 * got it!   now add a second reference to vp so that
    739 		 * we keep a reference to the vnode after we return.
    740 		 */
    741 		vref(vp);
    742 		break;
    743 
    744 	case SWAP_OFF:
    745 		UVMHIST_LOG(pdhist, "someone is using SWAP_OFF...??", 0,0,0,0);
    746 #ifdef SWAP_OFF_WORKS
    747 		/*
    748 		 * find the entry of interest and ensure it is enabled.
    749 		 */
    750 		simple_lock(&swap_data_lock);
    751 		if ((sdp = swaplist_find(vp, 0)) == NULL) {
    752 			simple_unlock(&swap_data_lock);
    753 			error = ENXIO;
    754 			break;
    755 		}
    756 		/*
    757 		 * If a device isn't in use or enabled, we
    758 		 * can't stop swapping from it (again).
    759 		 */
    760 		if ((sdp->swd_flags & (SWF_INUSE|SWF_ENABLE)) == 0) {
    761 			simple_unlock(&swap_data_lock);
    762 			error = EBUSY;
    763 			break;
    764 		}
    765 		/* XXXCDC: should we call with list locked or unlocked? */
    766 		if ((error = swap_off(p, sdp)) != 0)
    767 			break;
    768 		/* XXXCDC: might need relock here */
    769 
    770 		/*
    771 		 * now we can kill the entry.
    772 		 */
    773 		if ((sdp = swaplist_find(vp, 1)) == NULL) {
    774 			error = ENXIO;
    775 			break;
    776 		}
    777 		simple_unlock(&swap_data_lock);
    778 		free((caddr_t)sdp, M_VMSWAP);
    779 #else
    780 		error = EINVAL;
    781 #endif
    782 		break;
    783 
    784 	default:
    785 		UVMHIST_LOG(pdhist, "unhandled command: %#x",
    786 		    SCARG(uap, cmd), 0, 0, 0);
    787 		error = EINVAL;
    788 	}
    789 
    790 	/*
    791 	 * done!   use vput to drop our reference and unlock
    792 	 */
    793 	vput(vp);
    794 out:
    795 	lockmgr(&swap_syscall_lock, LK_RELEASE, (void *)0);
    796 
    797 	UVMHIST_LOG(pdhist, "<- done!  error=%d", error, 0, 0, 0);
    798 	return (error);
    799 }
    800 
    801 /*
    802  * swap_on: attempt to enable a swapdev for swapping.   note that the
    803  *	swapdev is already on the global list, but disabled (marked
    804  *	SWF_FAKE).
    805  *
    806  * => we avoid the start of the disk (to protect disk labels)
    807  * => we also avoid the miniroot, if we are swapping to root.
    808  * => caller should leave swap_data_lock unlocked, we may lock it
    809  *	if needed.
    810  */
    811 static int
    812 swap_on(p, sdp)
    813 	struct proc *p;
    814 	struct swapdev *sdp;
    815 {
    816 	static int count = 0;	/* static */
    817 	struct vnode *vp;
    818 	int error, npages, nblocks, size;
    819 	long addr;
    820 #ifdef SWAP_TO_FILES
    821 	struct vattr va;
    822 #endif
    823 #ifdef NFS
    824 	extern int (**nfsv2_vnodeop_p) __P((void *));
    825 #endif /* NFS */
    826 	dev_t dev;
    827 	char *name;
    828 	UVMHIST_FUNC("swap_on"); UVMHIST_CALLED(pdhist);
    829 
    830 	/*
    831 	 * we want to enable swapping on sdp.   the swd_vp contains
    832 	 * the vnode we want (locked and ref'd), and the swd_dev
    833 	 * contains the dev_t of the file, if it a block device.
    834 	 */
    835 
    836 	vp = sdp->swd_vp;
    837 	dev = sdp->swd_dev;
    838 
    839 	/*
    840 	 * open the swap file (mostly useful for block device files to
    841 	 * let device driver know what is up).
    842 	 *
    843 	 * we skip the open/close for root on swap because the root
    844 	 * has already been opened when root was mounted (mountroot).
    845 	 */
    846 	if (vp != rootvp) {
    847 		if ((error = VOP_OPEN(vp, FREAD|FWRITE, p->p_ucred, p)))
    848 			return (error);
    849 	}
    850 
    851 	/* XXX this only works for block devices */
    852 	UVMHIST_LOG(pdhist, "  dev=%d, major(dev)=%d", dev, major(dev), 0,0);
    853 
    854 	/*
    855 	 * we now need to determine the size of the swap area.   for
    856 	 * block specials we can call the d_psize function.
    857 	 * for normal files, we must stat [get attrs].
    858 	 *
    859 	 * we put the result in nblks.
    860 	 * for normal files, we also want the filesystem block size
    861 	 * (which we get with statfs).
    862 	 */
    863 	switch (vp->v_type) {
    864 	case VBLK:
    865 		if (bdevsw[major(dev)].d_psize == 0 ||
    866 		    (nblocks = (*bdevsw[major(dev)].d_psize)(dev)) == -1) {
    867 			error = ENXIO;
    868 			goto bad;
    869 		}
    870 		break;
    871 
    872 #ifdef SWAP_TO_FILES
    873 	case VREG:
    874 		if ((error = VOP_GETATTR(vp, &va, p->p_ucred, p)))
    875 			goto bad;
    876 		nblocks = (int)btodb(va.va_size);
    877 		if ((error =
    878 		     VFS_STATFS(vp->v_mount, &vp->v_mount->mnt_stat, p)) != 0)
    879 			goto bad;
    880 
    881 		sdp->swd_bsize = vp->v_mount->mnt_stat.f_iosize;
    882 		/*
    883 		 * limit the max # of outstanding I/O requests we issue
    884 		 * at any one time.   take it easy on NFS servers.
    885 		 */
    886 #ifdef NFS
    887 		if (vp->v_op == nfsv2_vnodeop_p)
    888 			sdp->swd_maxactive = 2; /* XXX */
    889 		else
    890 #endif /* NFS */
    891 			sdp->swd_maxactive = 8; /* XXX */
    892 		break;
    893 #endif
    894 
    895 	default:
    896 		error = ENXIO;
    897 		goto bad;
    898 	}
    899 
    900 	/*
    901 	 * save nblocks in a safe place and convert to pages.
    902 	 */
    903 
    904 	sdp->swd_ose.ose_nblks = nblocks;
    905 	npages = dbtob((u_int64_t)nblocks) >> PAGE_SHIFT;
    906 
    907 	/*
    908 	 * for block special files, we want to make sure that leave
    909 	 * the disklabel and bootblocks alone, so we arrange to skip
    910 	 * over them (randomly choosing to skip PAGE_SIZE bytes).
    911 	 * note that because of this the "size" can be less than the
    912 	 * actual number of blocks on the device.
    913 	 */
    914 	if (vp->v_type == VBLK) {
    915 		/* we use pages 1 to (size - 1) [inclusive] */
    916 		size = npages - 1;
    917 		addr = 1;
    918 	} else {
    919 		/* we use pages 0 to (size - 1) [inclusive] */
    920 		size = npages;
    921 		addr = 0;
    922 	}
    923 
    924 	/*
    925 	 * make sure we have enough blocks for a reasonable sized swap
    926 	 * area.   we want at least one page.
    927 	 */
    928 
    929 	if (size < 1) {
    930 		UVMHIST_LOG(pdhist, "  size <= 1!!", 0, 0, 0, 0);
    931 		error = EINVAL;
    932 		goto bad;
    933 	}
    934 
    935 	UVMHIST_LOG(pdhist, "  dev=%x: size=%d addr=%ld\n", dev, size, addr, 0);
    936 
    937 	/*
    938 	 * now we need to allocate an extent to manage this swap device
    939 	 */
    940 	name = malloc(12, M_VMSWAP, M_WAITOK);
    941 	sprintf(name, "swap0x%04x", count++);
    942 
    943 	/* note that extent_create's 3rd arg is inclusive, thus "- 1" */
    944 	sdp->swd_ex = extent_create(name, 0, npages - 1, M_VMSWAP,
    945 				    0, 0, EX_WAITOK);
    946 	/* allocate the `saved' region from the extent so it won't be used */
    947 	if (addr) {
    948 		if (extent_alloc_region(sdp->swd_ex, 0, addr, EX_WAITOK))
    949 			panic("disklabel region");
    950 		sdp->swd_npginuse += addr;
    951 		uvmexp.swpginuse += addr;
    952 	}
    953 
    954 
    955 	/*
    956 	 * if the vnode we are swapping to is the root vnode
    957 	 * (i.e. we are swapping to the miniroot) then we want
    958 	 * to make sure we don't overwrite it.   do a statfs to
    959 	 * find its size and skip over it.
    960 	 */
    961 	if (vp == rootvp) {
    962 		struct mount *mp;
    963 		struct statfs *sp;
    964 		int rootblocks, rootpages;
    965 
    966 		mp = rootvnode->v_mount;
    967 		sp = &mp->mnt_stat;
    968 		rootblocks = sp->f_blocks * btodb(sp->f_bsize);
    969 		rootpages = round_page(dbtob(rootblocks)) >> PAGE_SHIFT;
    970 		if (rootpages > npages)
    971 			panic("swap_on: miniroot larger than swap?");
    972 
    973 		if (extent_alloc_region(sdp->swd_ex, addr,
    974 					rootpages, EX_WAITOK))
    975 			panic("swap_on: unable to preserve miniroot");
    976 
    977 		sdp->swd_npginuse += (rootpages - addr);
    978 		uvmexp.swpginuse += (rootpages - addr);
    979 
    980 		printf("Preserved %d pages of miniroot ", rootpages);
    981 		printf("leaving %d pages of swap\n", size - rootpages);
    982 	}
    983 
    984 	/*
    985 	 * now add the new swapdev to the drum and enable.
    986 	 */
    987 	simple_lock(&swap_data_lock);
    988 	swapdrum_add(sdp, npages);
    989 	sdp->swd_npages = npages;
    990 	sdp->swd_flags &= ~SWF_FAKE;	/* going live */
    991 	sdp->swd_flags |= (SWF_INUSE|SWF_ENABLE);
    992 	simple_unlock(&swap_data_lock);
    993 	uvmexp.swpages += npages;
    994 
    995 	/*
    996 	 * add anon's to reflect the swap space we added
    997 	 */
    998 	uvm_anon_add(size);
    999 
   1000 #if 0
   1001 	/*
   1002 	 * At this point we could arrange to reserve memory for the
   1003 	 * swap buffer pools.
   1004 	 *
   1005 	 * I don't think this is necessary, since swapping starts well
   1006 	 * ahead of serious memory deprivation and the memory resource
   1007 	 * pools hold on to actively used memory. This should ensure
   1008 	 * we always have some resources to continue operation.
   1009 	 */
   1010 
   1011 	int s = splbio();
   1012 	int n = 8 * sdp->swd_maxactive;
   1013 
   1014 	(void)pool_prime(swapbuf_pool, n, 0);
   1015 
   1016 	if (vp->v_type == VREG) {
   1017 		/* Allocate additional vnx and vnd buffers */
   1018 		/*
   1019 		 * Allocation Policy:
   1020 		 *	(8  * swd_maxactive) vnx headers per swap dev
   1021 		 *	(16 * swd_maxactive) vnd buffers per swap dev
   1022 		 */
   1023 
   1024 		n = 8 * sdp->swd_maxactive;
   1025 		(void)pool_prime(vndxfer_pool, n, 0);
   1026 
   1027 		n = 16 * sdp->swd_maxactive;
   1028 		(void)pool_prime(vndbuf_pool, n, 0);
   1029 	}
   1030 	splx(s);
   1031 #endif
   1032 
   1033 	return (0);
   1034 
   1035 bad:
   1036 	/*
   1037 	 * failure: close device if necessary and return error.
   1038 	 */
   1039 	if (vp != rootvp)
   1040 		(void)VOP_CLOSE(vp, FREAD|FWRITE, p->p_ucred, p);
   1041 	return (error);
   1042 }
   1043 
   1044 #ifdef SWAP_OFF_WORKS
   1045 /*
   1046  * swap_off: stop swapping on swapdev
   1047  *
   1048  * XXXCDC: what conditions go here?
   1049  */
   1050 static int
   1051 swap_off(p, sdp)
   1052 	struct proc *p;
   1053 	struct swapdev *sdp;
   1054 {
   1055 	char	*name;
   1056 	UVMHIST_FUNC("swap_off"); UVMHIST_CALLED(pdhist);
   1057 
   1058 	/* turn off the enable flag */
   1059 	sdp->swd_flags &= ~SWF_ENABLE;
   1060 
   1061 	UVMHIST_LOG(pdhist, "  dev=%x", sdp->swd_dev);
   1062 
   1063 	/*
   1064 	 * XXX write me
   1065 	 *
   1066 	 * the idea is to find out which processes are using this swap
   1067 	 * device, and page them all in.
   1068 	 *
   1069 	 * eventually, we should try to move them out to other swap areas
   1070 	 * if available.
   1071 	 *
   1072 	 * The alternative is to create a redirection map for this swap
   1073 	 * device.  This should work by moving all the pages of data from
   1074 	 * the ex-swap device to another one, and making an entry in the
   1075 	 * redirection map for it.  locking is going to be important for
   1076 	 * this!
   1077 	 *
   1078 	 * XXXCDC: also need to shrink anon pool
   1079 	 */
   1080 
   1081 	/* until the above code is written, we must ENODEV */
   1082 	return ENODEV;
   1083 
   1084 	extent_free(swapmap, sdp->swd_mapoffset, sdp->swd_mapsize, EX_WAITOK);
   1085 	name = sdp->swd_ex->ex_name;
   1086 	extent_destroy(sdp->swd_ex);
   1087 	free(name, M_VMSWAP);
   1088 	free((caddr_t)sdp->swd_ex, M_VMSWAP);
   1089 	if (sdp->swp_vp != rootvp)
   1090 		(void) VOP_CLOSE(sdp->swd_vp, FREAD|FWRITE, p->p_ucred, p);
   1091 	if (sdp->swd_vp)
   1092 		vrele(sdp->swd_vp);
   1093 	free((caddr_t)sdp, M_VMSWAP);
   1094 	return (0);
   1095 }
   1096 #endif
   1097 
   1098 /*
   1099  * /dev/drum interface and i/o functions
   1100  */
   1101 
   1102 /*
   1103  * swread: the read function for the drum (just a call to physio)
   1104  */
   1105 /*ARGSUSED*/
   1106 int
   1107 swread(dev, uio, ioflag)
   1108 	dev_t dev;
   1109 	struct uio *uio;
   1110 	int ioflag;
   1111 {
   1112 	UVMHIST_FUNC("swread"); UVMHIST_CALLED(pdhist);
   1113 
   1114 	UVMHIST_LOG(pdhist, "  dev=%x offset=%qx", dev, uio->uio_offset, 0, 0);
   1115 	return (physio(swstrategy, NULL, dev, B_READ, minphys, uio));
   1116 }
   1117 
   1118 /*
   1119  * swwrite: the write function for the drum (just a call to physio)
   1120  */
   1121 /*ARGSUSED*/
   1122 int
   1123 swwrite(dev, uio, ioflag)
   1124 	dev_t dev;
   1125 	struct uio *uio;
   1126 	int ioflag;
   1127 {
   1128 	UVMHIST_FUNC("swwrite"); UVMHIST_CALLED(pdhist);
   1129 
   1130 	UVMHIST_LOG(pdhist, "  dev=%x offset=%qx", dev, uio->uio_offset, 0, 0);
   1131 	return (physio(swstrategy, NULL, dev, B_WRITE, minphys, uio));
   1132 }
   1133 
   1134 /*
   1135  * swstrategy: perform I/O on the drum
   1136  *
   1137  * => we must map the i/o request from the drum to the correct swapdev.
   1138  */
   1139 void
   1140 swstrategy(bp)
   1141 	struct buf *bp;
   1142 {
   1143 	struct swapdev *sdp;
   1144 	struct vnode *vp;
   1145 	int	pageno;
   1146 	int	bn;
   1147 	UVMHIST_FUNC("swstrategy"); UVMHIST_CALLED(pdhist);
   1148 
   1149 	/*
   1150 	 * convert block number to swapdev.   note that swapdev can't
   1151 	 * be yanked out from under us because we are holding resources
   1152 	 * in it (i.e. the blocks we are doing I/O on).
   1153 	 */
   1154 	pageno = dbtob(bp->b_blkno) >> PAGE_SHIFT;
   1155 	simple_lock(&swap_data_lock);
   1156 	sdp = swapdrum_getsdp(pageno);
   1157 	simple_unlock(&swap_data_lock);
   1158 	if (sdp == NULL) {
   1159 		bp->b_error = EINVAL;
   1160 		bp->b_flags |= B_ERROR;
   1161 		biodone(bp);
   1162 		UVMHIST_LOG(pdhist, "  failed to get swap device", 0, 0, 0, 0);
   1163 		return;
   1164 	}
   1165 
   1166 	/*
   1167 	 * convert drum page number to block number on this swapdev.
   1168 	 */
   1169 
   1170 	pageno = pageno - sdp->swd_drumoffset;	/* page # on swapdev */
   1171 	bn = btodb(pageno << PAGE_SHIFT);	/* convert to diskblock */
   1172 
   1173 	UVMHIST_LOG(pdhist, "  %s: mapoff=%x bn=%x bcount=%ld\n",
   1174 		((bp->b_flags & B_READ) == 0) ? "write" : "read",
   1175 		sdp->swd_drumoffset, bn, bp->b_bcount);
   1176 
   1177 
   1178 	/*
   1179 	 * for block devices we finish up here.
   1180 	 * for regular files we have to do more work which we deligate
   1181 	 * to sw_reg_strategy().
   1182 	 */
   1183 
   1184 	switch (sdp->swd_vp->v_type) {
   1185 	default:
   1186 		panic("swstrategy: vnode type 0x%x", sdp->swd_vp->v_type);
   1187 	case VBLK:
   1188 
   1189 		/*
   1190 		 * must convert "bp" from an I/O on /dev/drum to an I/O
   1191 		 * on the swapdev (sdp).
   1192 		 */
   1193 		bp->b_blkno = bn;		/* swapdev block number */
   1194 		vp = sdp->swd_vp;		/* swapdev vnode pointer */
   1195 		bp->b_dev = sdp->swd_dev;	/* swapdev dev_t */
   1196 		VHOLD(vp);			/* "hold" swapdev vp for i/o */
   1197 
   1198 		/*
   1199 		 * if we are doing a write, we have to redirect the i/o on
   1200 		 * drum's v_numoutput counter to the swapdevs.
   1201 		 */
   1202 		if ((bp->b_flags & B_READ) == 0) {
   1203 			int s = splbio();
   1204 			vwakeup(bp);	/* kills one 'v_numoutput' on drum */
   1205 			vp->v_numoutput++;	/* put it on swapdev */
   1206 			splx(s);
   1207 		}
   1208 
   1209 		/*
   1210 		 * dissassocate buffer with /dev/drum vnode
   1211 		 * [could be null if buf was from physio]
   1212 		 */
   1213 		if (bp->b_vp != NULLVP)
   1214 			brelvp(bp);
   1215 
   1216 		/*
   1217 		 * finally plug in swapdev vnode and start I/O
   1218 		 */
   1219 		bp->b_vp = vp;
   1220 		VOP_STRATEGY(bp);
   1221 		return;
   1222 #ifdef SWAP_TO_FILES
   1223 	case VREG:
   1224 		/*
   1225 		 * deligate to sw_reg_strategy function.
   1226 		 */
   1227 		sw_reg_strategy(sdp, bp, bn);
   1228 		return;
   1229 #endif
   1230 	}
   1231 	/* NOTREACHED */
   1232 }
   1233 
   1234 #ifdef SWAP_TO_FILES
   1235 /*
   1236  * sw_reg_strategy: handle swap i/o to regular files
   1237  */
   1238 static void
   1239 sw_reg_strategy(sdp, bp, bn)
   1240 	struct swapdev	*sdp;
   1241 	struct buf	*bp;
   1242 	int		bn;
   1243 {
   1244 	struct vnode	*vp;
   1245 	struct vndxfer	*vnx;
   1246 	daddr_t		nbn, byteoff;
   1247 	caddr_t		addr;
   1248 	int		s, off, nra, error, sz, resid;
   1249 	UVMHIST_FUNC("sw_reg_strategy"); UVMHIST_CALLED(pdhist);
   1250 
   1251 	/*
   1252 	 * allocate a vndxfer head for this transfer and point it to
   1253 	 * our buffer.
   1254 	 */
   1255 	getvndxfer(vnx);
   1256 	vnx->vx_flags = VX_BUSY;
   1257 	vnx->vx_error = 0;
   1258 	vnx->vx_pending = 0;
   1259 	vnx->vx_bp = bp;
   1260 	vnx->vx_sdp = sdp;
   1261 
   1262 	/*
   1263 	 * setup for main loop where we read filesystem blocks into
   1264 	 * our buffer.
   1265 	 */
   1266 	error = 0;
   1267 	bp->b_resid = bp->b_bcount;	/* nothing transfered yet! */
   1268 	addr = bp->b_data;		/* current position in buffer */
   1269 	byteoff = dbtob(bn);
   1270 
   1271 	for (resid = bp->b_resid; resid; resid -= sz) {
   1272 		struct vndbuf	*nbp;
   1273 
   1274 		/*
   1275 		 * translate byteoffset into block number.  return values:
   1276 		 *   vp = vnode of underlying device
   1277 		 *  nbn = new block number (on underlying vnode dev)
   1278 		 *  nra = num blocks we can read-ahead (excludes requested
   1279 		 *	block)
   1280 		 */
   1281 		nra = 0;
   1282 		error = VOP_BMAP(sdp->swd_vp, byteoff / sdp->swd_bsize,
   1283 				 	&vp, &nbn, &nra);
   1284 
   1285 		if (error == 0 && (long)nbn == -1)
   1286 			error = EIO;	/* failure */
   1287 
   1288 		/*
   1289 		 * punt if there was an error or a hole in the file.
   1290 		 * we must wait for any i/o ops we have already started
   1291 		 * to finish before returning.
   1292 		 *
   1293 		 * XXX we could deal with holes here but it would be
   1294 		 * a hassle (in the write case).
   1295 		 */
   1296 		if (error) {
   1297 			s = splbio();
   1298 			vnx->vx_error = error;	/* pass error up */
   1299 			goto out;
   1300 		}
   1301 
   1302 		/*
   1303 		 * compute the size ("sz") of this transfer (in bytes).
   1304 		 * XXXCDC: ignores read-ahead for non-zero offset
   1305 		 */
   1306 		if ((off = (byteoff % sdp->swd_bsize)) != 0)
   1307 			sz = sdp->swd_bsize - off;
   1308 		else
   1309 			sz = (1 + nra) * sdp->swd_bsize;
   1310 
   1311 		if (resid < sz)
   1312 			sz = resid;
   1313 
   1314 		UVMHIST_LOG(pdhist, "sw_reg_strategy: vp %p/%p offset 0x%x/0x%x",
   1315 				sdp->swd_vp, vp, byteoff, nbn);
   1316 
   1317 		/*
   1318 		 * now get a buf structure.   note that the vb_buf is
   1319 		 * at the front of the nbp structure so that you can
   1320 		 * cast pointers between the two structure easily.
   1321 		 */
   1322 		getvndbuf(nbp);
   1323 		nbp->vb_buf.b_flags    = bp->b_flags | B_CALL;
   1324 		nbp->vb_buf.b_bcount   = sz;
   1325 #if 0
   1326 		nbp->vb_buf.b_bufsize  = bp->b_bufsize; /* XXXCDC: really? */
   1327 #endif
   1328 		nbp->vb_buf.b_bufsize  = sz;
   1329 		nbp->vb_buf.b_error    = 0;
   1330 		nbp->vb_buf.b_data     = addr;
   1331 		nbp->vb_buf.b_blkno    = nbn + btodb(off);
   1332 		nbp->vb_buf.b_proc     = bp->b_proc;
   1333 		nbp->vb_buf.b_iodone   = sw_reg_iodone;
   1334 		nbp->vb_buf.b_vp       = NULLVP;
   1335 		nbp->vb_buf.b_vnbufs.le_next = NOLIST;
   1336 		nbp->vb_buf.b_rcred    = sdp->swd_cred;
   1337 		nbp->vb_buf.b_wcred    = sdp->swd_cred;
   1338 
   1339 		/*
   1340 		 * set b_dirtyoff/end and b_validoff/end.   this is
   1341 		 * required by the NFS client code (otherwise it will
   1342 		 * just discard our I/O request).
   1343 		 */
   1344 		if (bp->b_dirtyend == 0) {
   1345 			nbp->vb_buf.b_dirtyoff = 0;
   1346 			nbp->vb_buf.b_dirtyend = sz;
   1347 		} else {
   1348 			nbp->vb_buf.b_dirtyoff =
   1349 			    max(0, bp->b_dirtyoff - (bp->b_bcount-resid));
   1350 			nbp->vb_buf.b_dirtyend =
   1351 			    min(sz,
   1352 				max(0, bp->b_dirtyend - (bp->b_bcount-resid)));
   1353 		}
   1354 		if (bp->b_validend == 0) {
   1355 			nbp->vb_buf.b_validoff = 0;
   1356 			nbp->vb_buf.b_validend = sz;
   1357 		} else {
   1358 			nbp->vb_buf.b_validoff =
   1359 			    max(0, bp->b_validoff - (bp->b_bcount-resid));
   1360 			nbp->vb_buf.b_validend =
   1361 			    min(sz,
   1362 				max(0, bp->b_validend - (bp->b_bcount-resid)));
   1363 		}
   1364 
   1365 		nbp->vb_xfer = vnx;	/* patch it back in to vnx */
   1366 
   1367 		/*
   1368 		 * Just sort by block number
   1369 		 */
   1370 		nbp->vb_buf.b_cylinder = nbp->vb_buf.b_blkno;
   1371 		s = splbio();
   1372 		if (vnx->vx_error != 0) {
   1373 			putvndbuf(nbp);
   1374 			goto out;
   1375 		}
   1376 		vnx->vx_pending++;
   1377 
   1378 		/* assoc new buffer with underlying vnode */
   1379 		bgetvp(vp, &nbp->vb_buf);
   1380 
   1381 		/* sort it in and start I/O if we are not over our limit */
   1382 		disksort(&sdp->swd_tab, &nbp->vb_buf);
   1383 		sw_reg_start(sdp);
   1384 		splx(s);
   1385 
   1386 		/*
   1387 		 * advance to the next I/O
   1388 		 */
   1389 		byteoff += sz;
   1390 		addr += sz;
   1391 	}
   1392 
   1393 	s = splbio();
   1394 
   1395 out: /* Arrive here at splbio */
   1396 	vnx->vx_flags &= ~VX_BUSY;
   1397 	if (vnx->vx_pending == 0) {
   1398 		if (vnx->vx_error != 0) {
   1399 			bp->b_error = vnx->vx_error;
   1400 			bp->b_flags |= B_ERROR;
   1401 		}
   1402 		putvndxfer(vnx);
   1403 		biodone(bp);
   1404 	}
   1405 	splx(s);
   1406 }
   1407 
   1408 /*
   1409  * sw_reg_start: start an I/O request on the requested swapdev
   1410  *
   1411  * => reqs are sorted by disksort (above)
   1412  */
   1413 static void
   1414 sw_reg_start(sdp)
   1415 	struct swapdev	*sdp;
   1416 {
   1417 	struct buf	*bp;
   1418 	UVMHIST_FUNC("sw_reg_start"); UVMHIST_CALLED(pdhist);
   1419 
   1420 	/* recursion control */
   1421 	if ((sdp->swd_flags & SWF_BUSY) != 0)
   1422 		return;
   1423 
   1424 	sdp->swd_flags |= SWF_BUSY;
   1425 
   1426 	while (sdp->swd_tab.b_active < sdp->swd_maxactive) {
   1427 		bp = sdp->swd_tab.b_actf;
   1428 		if (bp == NULL)
   1429 			break;
   1430 		sdp->swd_tab.b_actf = bp->b_actf;
   1431 		sdp->swd_tab.b_active++;
   1432 
   1433 		UVMHIST_LOG(pdhist,
   1434 		    "sw_reg_start:  bp %p vp %p blkno %p cnt %lx",
   1435 		    bp, bp->b_vp, bp->b_blkno, bp->b_bcount);
   1436 		if ((bp->b_flags & B_READ) == 0)
   1437 			bp->b_vp->v_numoutput++;
   1438 		VOP_STRATEGY(bp);
   1439 	}
   1440 	sdp->swd_flags &= ~SWF_BUSY;
   1441 }
   1442 
   1443 /*
   1444  * sw_reg_iodone: one of our i/o's has completed and needs post-i/o cleanup
   1445  *
   1446  * => note that we can recover the vndbuf struct by casting the buf ptr
   1447  */
   1448 static void
   1449 sw_reg_iodone(bp)
   1450 	struct buf *bp;
   1451 {
   1452 	struct vndbuf *vbp = (struct vndbuf *) bp;
   1453 	struct vndxfer *vnx = vbp->vb_xfer;
   1454 	struct buf *pbp = vnx->vx_bp;		/* parent buffer */
   1455 	struct swapdev	*sdp = vnx->vx_sdp;
   1456 	int		s, resid;
   1457 	UVMHIST_FUNC("sw_reg_iodone"); UVMHIST_CALLED(pdhist);
   1458 
   1459 	UVMHIST_LOG(pdhist, "  vbp=%p vp=%p blkno=%x addr=%p",
   1460 	    vbp, vbp->vb_buf.b_vp, vbp->vb_buf.b_blkno, vbp->vb_buf.b_data);
   1461 	UVMHIST_LOG(pdhist, "  cnt=%lx resid=%lx",
   1462 	    vbp->vb_buf.b_bcount, vbp->vb_buf.b_resid, 0, 0);
   1463 
   1464 	/*
   1465 	 * protect vbp at splbio and update.
   1466 	 */
   1467 
   1468 	s = splbio();
   1469 	resid = vbp->vb_buf.b_bcount - vbp->vb_buf.b_resid;
   1470 	pbp->b_resid -= resid;
   1471 	vnx->vx_pending--;
   1472 
   1473 	if (vbp->vb_buf.b_error) {
   1474 		UVMHIST_LOG(pdhist, "  got error=%d !",
   1475 		    vbp->vb_buf.b_error, 0, 0, 0);
   1476 
   1477 		/* pass error upward */
   1478 		vnx->vx_error = vbp->vb_buf.b_error;
   1479 	}
   1480 
   1481 	/*
   1482 	 * drop "hold" reference to vnode (if one)
   1483 	 * XXXCDC: always set to NULLVP, this is useless, right?
   1484 	 */
   1485 	if (vbp->vb_buf.b_vp != NULLVP)
   1486 		brelvp(&vbp->vb_buf);
   1487 
   1488 	/*
   1489 	 * kill vbp structure
   1490 	 */
   1491 	putvndbuf(vbp);
   1492 
   1493 	/*
   1494 	 * wrap up this transaction if it has run to completion or, in
   1495 	 * case of an error, when all auxiliary buffers have returned.
   1496 	 */
   1497 	if (vnx->vx_error != 0) {
   1498 		/* pass error upward */
   1499 		pbp->b_flags |= B_ERROR;
   1500 		pbp->b_error = vnx->vx_error;
   1501 		if ((vnx->vx_flags & VX_BUSY) == 0 && vnx->vx_pending == 0) {
   1502 			putvndxfer(vnx);
   1503 			biodone(pbp);
   1504 		}
   1505 	} else if (pbp->b_resid == 0) {
   1506 #ifdef DIAGNOSTIC
   1507 		if (vnx->vx_pending != 0)
   1508 			panic("sw_reg_iodone: vnx pending: %d",vnx->vx_pending);
   1509 #endif
   1510 
   1511 		if ((vnx->vx_flags & VX_BUSY) == 0) {
   1512 			UVMHIST_LOG(pdhist, "  iodone error=%d !",
   1513 			    pbp, vnx->vx_error, 0, 0);
   1514 			putvndxfer(vnx);
   1515 			biodone(pbp);
   1516 		}
   1517 	}
   1518 
   1519 	/*
   1520 	 * done!   start next swapdev I/O if one is pending
   1521 	 */
   1522 	sdp->swd_tab.b_active--;
   1523 	sw_reg_start(sdp);
   1524 
   1525 	splx(s);
   1526 }
   1527 #endif /* SWAP_TO_FILES */
   1528 
   1529 
   1530 /*
   1531  * uvm_swap_alloc: allocate space on swap
   1532  *
   1533  * => allocation is done "round robin" down the priority list, as we
   1534  *	allocate in a priority we "rotate" the circle queue.
   1535  * => space can be freed with uvm_swap_free
   1536  * => we return the page slot number in /dev/drum (0 == invalid slot)
   1537  * => we lock swap_data_lock
   1538  * => XXXMRG: "LESSOK" INTERFACE NEEDED TO EXTENT SYSTEM
   1539  */
   1540 int
   1541 uvm_swap_alloc(nslots, lessok)
   1542 	int *nslots;	/* IN/OUT */
   1543 	boolean_t lessok;
   1544 {
   1545 	struct swapdev *sdp;
   1546 	struct swappri *spp;
   1547 	u_long	result;
   1548 	UVMHIST_FUNC("uvm_swap_alloc"); UVMHIST_CALLED(pdhist);
   1549 
   1550 	/*
   1551 	 * no swap devices configured yet?   definite failure.
   1552 	 */
   1553 	if (uvmexp.nswapdev < 1)
   1554 		return 0;
   1555 
   1556 	/*
   1557 	 * lock data lock, convert slots into blocks, and enter loop
   1558 	 */
   1559 	simple_lock(&swap_data_lock);
   1560 
   1561 ReTry:	/* XXXMRG */
   1562 	for (spp = swap_priority.lh_first; spp != NULL;
   1563 	     spp = spp->spi_swappri.le_next) {
   1564 		for (sdp = spp->spi_swapdev.cqh_first;
   1565 		     sdp != (void *)&spp->spi_swapdev;
   1566 		     sdp = sdp->swd_next.cqe_next) {
   1567 			/* if it's not enabled, then we can't swap from it */
   1568 			if ((sdp->swd_flags & SWF_ENABLE) == 0)
   1569 				continue;
   1570 			if (sdp->swd_npginuse + *nslots > sdp->swd_npages)
   1571 				continue;
   1572 			if (extent_alloc(sdp->swd_ex, *nslots, EX_NOALIGN,
   1573 					 EX_NOBOUNDARY, EX_MALLOCOK|EX_NOWAIT,
   1574 					 &result) != 0) {
   1575 				continue;
   1576 			}
   1577 
   1578 			/*
   1579 			 * successful allocation!  now rotate the circleq.
   1580 			 */
   1581 			CIRCLEQ_REMOVE(&spp->spi_swapdev, sdp, swd_next);
   1582 			CIRCLEQ_INSERT_TAIL(&spp->spi_swapdev, sdp, swd_next);
   1583 			sdp->swd_npginuse += *nslots;
   1584 			uvmexp.swpginuse += *nslots;
   1585 			simple_unlock(&swap_data_lock);
   1586 			/* done!  return drum slot number */
   1587 			UVMHIST_LOG(pdhist,
   1588 			    "success!  returning %d slots starting at %d",
   1589 			    *nslots, result + sdp->swd_drumoffset, 0, 0);
   1590 #if 0
   1591 {
   1592 	struct swapdev *sdp2;
   1593 
   1594 	sdp2 = swapdrum_getsdp(result + sdp->swd_drumoffset);
   1595 	if (sdp2 == NULL) {
   1596 printf("uvm_swap_alloc:  nslots=%d, dev=%x, drumoff=%d, result=%ld",
   1597     *nslots, sdp->swd_dev, sdp->swd_drumoffset, result);
   1598 panic("uvm_swap_alloc:  allocating unmapped swap block!");
   1599 	}
   1600 }
   1601 #endif
   1602 			return(result + sdp->swd_drumoffset);
   1603 		}
   1604 	}
   1605 
   1606 	/* XXXMRG: BEGIN HACK */
   1607 	if (*nslots > 1 && lessok) {
   1608 		*nslots = 1;
   1609 		goto ReTry;	/* XXXMRG: ugh!  extent should support this for us */
   1610 	}
   1611 	/* XXXMRG: END HACK */
   1612 
   1613 	simple_unlock(&swap_data_lock);
   1614 	return 0;		/* failed */
   1615 }
   1616 
   1617 /*
   1618  * uvm_swap_free: free swap slots
   1619  *
   1620  * => this can be all or part of an allocation made by uvm_swap_alloc
   1621  * => we lock swap_data_lock
   1622  */
   1623 void
   1624 uvm_swap_free(startslot, nslots)
   1625 	int startslot;
   1626 	int nslots;
   1627 {
   1628 	struct swapdev *sdp;
   1629 	UVMHIST_FUNC("uvm_swap_free"); UVMHIST_CALLED(pdhist);
   1630 
   1631 	UVMHIST_LOG(pdhist, "freeing %d slots starting at %d", nslots,
   1632 	    startslot, 0, 0);
   1633 	/*
   1634 	 * convert drum slot offset back to sdp, free the blocks
   1635 	 * in the extent, and return.   must hold pri lock to do
   1636 	 * lookup and access the extent.
   1637 	 */
   1638 	simple_lock(&swap_data_lock);
   1639 	sdp = swapdrum_getsdp(startslot);
   1640 
   1641 #ifdef DIAGNOSTIC
   1642 	if (uvmexp.nswapdev < 1)
   1643 		panic("uvm_swap_free: uvmexp.nswapdev < 1\n");
   1644 	if (sdp == NULL) {
   1645 		printf("uvm_swap_free: startslot %d, nslots %d\n", startslot,
   1646 		    nslots);
   1647 		panic("uvm_swap_free: unmapped address\n");
   1648 	}
   1649 #endif
   1650 	if (extent_free(sdp->swd_ex, startslot - sdp->swd_drumoffset, nslots,
   1651 			EX_MALLOCOK|EX_NOWAIT) != 0)
   1652 		printf("warning: resource shortage: %d slots of swap lost\n",
   1653 			nslots);
   1654 
   1655 	sdp->swd_npginuse -= nslots;
   1656 	uvmexp.swpginuse -= nslots;
   1657 #ifdef DIAGNOSTIC
   1658 	if (sdp->swd_npginuse < 0)
   1659 		panic("uvm_swap_free: inuse < 0");
   1660 #endif
   1661 	simple_unlock(&swap_data_lock);
   1662 }
   1663 
   1664 /*
   1665  * uvm_swap_put: put any number of pages into a contig place on swap
   1666  *
   1667  * => can be sync or async
   1668  * => XXXMRG: consider making it an inline or macro
   1669  */
   1670 int
   1671 uvm_swap_put(swslot, ppsp, npages, flags)
   1672 	int swslot;
   1673 	struct vm_page **ppsp;
   1674 	int	npages;
   1675 	int	flags;
   1676 {
   1677 	int	result;
   1678 
   1679 #if 0
   1680 	flags |= PGO_SYNCIO; /* XXXMRG: tmp, force sync */
   1681 #endif
   1682 
   1683 	result = uvm_swap_io(ppsp, swslot, npages, B_WRITE |
   1684 	    ((flags & PGO_SYNCIO) ? 0 : B_ASYNC));
   1685 
   1686 	return (result);
   1687 }
   1688 
   1689 /*
   1690  * uvm_swap_get: get a single page from swap
   1691  *
   1692  * => usually a sync op (from fault)
   1693  * => XXXMRG: consider making it an inline or macro
   1694  */
   1695 int
   1696 uvm_swap_get(page, swslot, flags)
   1697 	struct vm_page *page;
   1698 	int swslot, flags;
   1699 {
   1700 	int	result;
   1701 
   1702 	uvmexp.nswget++;
   1703 #ifdef DIAGNOSTIC
   1704 	if ((flags & PGO_SYNCIO) == 0)
   1705 		printf("uvm_swap_get: ASYNC get requested?\n");
   1706 #endif
   1707 
   1708 	result = uvm_swap_io(&page, swslot, 1, B_READ |
   1709 	    ((flags & PGO_SYNCIO) ? 0 : B_ASYNC));
   1710 
   1711 	return (result);
   1712 }
   1713 
   1714 /*
   1715  * uvm_swap_io: do an i/o operation to swap
   1716  */
   1717 
   1718 static int
   1719 uvm_swap_io(pps, startslot, npages, flags)
   1720 	struct vm_page **pps;
   1721 	int startslot, npages, flags;
   1722 {
   1723 	daddr_t startblk;
   1724 	struct swapbuf *sbp;
   1725 	struct	buf *bp;
   1726 	vaddr_t kva;
   1727 	int	result, s, waitf, pflag;
   1728 	UVMHIST_FUNC("uvm_swap_io"); UVMHIST_CALLED(pdhist);
   1729 
   1730 	UVMHIST_LOG(pdhist, "<- called, startslot=%d, npages=%d, flags=%d",
   1731 	    startslot, npages, flags, 0);
   1732 	/*
   1733 	 * convert starting drum slot to block number
   1734 	 */
   1735 	startblk = btodb(startslot << PAGE_SHIFT);
   1736 
   1737 	/*
   1738 	 * first, map the pages into the kernel (XXX: currently required
   1739 	 * by buffer system).   note that we don't let pagermapin alloc
   1740 	 * an aiodesc structure because we don't want to chance a malloc.
   1741 	 * we've got our own pool of aiodesc structures (in swapbuf).
   1742 	 */
   1743 	waitf = (flags & B_ASYNC) ? M_NOWAIT : M_WAITOK;
   1744 	kva = uvm_pagermapin(pps, npages, NULL, waitf);
   1745 	if (kva == NULL)
   1746 		return (VM_PAGER_AGAIN);
   1747 
   1748 	/*
   1749 	 * now allocate a swap buffer off of freesbufs
   1750 	 * [make sure we don't put the pagedaemon to sleep...]
   1751 	 */
   1752 	s = splbio();
   1753 	pflag = ((flags & B_ASYNC) != 0 || curproc == uvm.pagedaemon_proc)
   1754 		? 0
   1755 		: PR_WAITOK;
   1756 	sbp = pool_get(swapbuf_pool, pflag);
   1757 	splx(s);		/* drop splbio */
   1758 
   1759 	/*
   1760 	 * if we failed to get a swapbuf, return "try again"
   1761 	 */
   1762 	if (sbp == NULL)
   1763 		return (VM_PAGER_AGAIN);
   1764 
   1765 	/*
   1766 	 * fill in the bp/sbp.   we currently route our i/o through
   1767 	 * /dev/drum's vnode [swapdev_vp].
   1768 	 */
   1769 	bp = &sbp->sw_buf;
   1770 	bp->b_flags = B_BUSY | B_NOCACHE | (flags & (B_READ|B_ASYNC));
   1771 	bp->b_proc = &proc0;	/* XXX */
   1772 	bp->b_rcred = bp->b_wcred = proc0.p_ucred;
   1773 	bp->b_vnbufs.le_next = NOLIST;
   1774 	bp->b_data = (caddr_t)kva;
   1775 	bp->b_blkno = startblk;
   1776 	VHOLD(swapdev_vp);
   1777 	bp->b_vp = swapdev_vp;
   1778 	/* XXXCDC: isn't swapdev_vp always a VCHR? */
   1779 	/* XXXMRG: probably -- this is obviously something inherited... */
   1780 	if (swapdev_vp->v_type == VBLK)
   1781 		bp->b_dev = swapdev_vp->v_rdev;
   1782 	bp->b_bcount = npages << PAGE_SHIFT;
   1783 
   1784 	/*
   1785 	 * for pageouts we must set "dirtyoff" [NFS client code needs it].
   1786 	 * and we bump v_numoutput (counter of number of active outputs).
   1787 	 */
   1788 	if ((bp->b_flags & B_READ) == 0) {
   1789 		bp->b_dirtyoff = 0;
   1790 		bp->b_dirtyend = npages << PAGE_SHIFT;
   1791 		s = splbio();
   1792 		swapdev_vp->v_numoutput++;
   1793 		splx(s);
   1794 	}
   1795 
   1796 	/*
   1797 	 * for async ops we must set up the aiodesc and setup the callback
   1798 	 * XXX: we expect no async-reads, but we don't prevent it here.
   1799 	 */
   1800 	if (flags & B_ASYNC) {
   1801 		sbp->sw_aio.aiodone = uvm_swap_aiodone;
   1802 		sbp->sw_aio.kva = kva;
   1803 		sbp->sw_aio.npages = npages;
   1804 		sbp->sw_aio.pd_ptr = sbp;	/* backpointer */
   1805 		bp->b_flags |= B_CALL;		/* set callback */
   1806 		bp->b_iodone = uvm_swap_bufdone;/* "buf" iodone function */
   1807 		UVMHIST_LOG(pdhist, "doing async!", 0, 0, 0, 0);
   1808 	}
   1809 	UVMHIST_LOG(pdhist,
   1810 	    "about to start io: data = 0x%p blkno = 0x%x, bcount = %ld",
   1811 	    bp->b_data, bp->b_blkno, bp->b_bcount, 0);
   1812 
   1813 	/*
   1814 	 * now we start the I/O, and if async, return.
   1815 	 */
   1816 	VOP_STRATEGY(bp);
   1817 	if (flags & B_ASYNC)
   1818 		return (VM_PAGER_PEND);
   1819 
   1820 	/*
   1821 	 * must be sync i/o.   wait for it to finish
   1822 	 */
   1823 	bp->b_error = biowait(bp);
   1824 	result = (bp->b_flags & B_ERROR) ? VM_PAGER_ERROR : VM_PAGER_OK;
   1825 
   1826 	/*
   1827 	 * kill the pager mapping
   1828 	 */
   1829 	uvm_pagermapout(kva, npages);
   1830 
   1831 	/*
   1832 	 * now dispose of the swap buffer
   1833 	 */
   1834 	s = splbio();
   1835 	bp->b_flags &= ~(B_BUSY|B_WANTED|B_PHYS|B_PAGET|B_UAREA|B_DIRTY|B_NOCACHE);
   1836 	if (bp->b_vp)
   1837 		brelvp(bp);
   1838 
   1839 	pool_put(swapbuf_pool, sbp);
   1840 	splx(s);
   1841 
   1842 	/*
   1843 	 * finally return.
   1844 	 */
   1845 	UVMHIST_LOG(pdhist, "<- done (sync)  result=%d", result, 0, 0, 0);
   1846 	return (result);
   1847 }
   1848 
   1849 /*
   1850  * uvm_swap_bufdone: called from the buffer system when the i/o is done
   1851  */
   1852 static void
   1853 uvm_swap_bufdone(bp)
   1854 	struct buf *bp;
   1855 {
   1856 	struct swapbuf *sbp = (struct swapbuf *) bp;
   1857 	int	s = splbio();
   1858 	UVMHIST_FUNC("uvm_swap_bufdone"); UVMHIST_CALLED(pdhist);
   1859 
   1860 	UVMHIST_LOG(pdhist, "cleaning buf %p", buf, 0, 0, 0);
   1861 #ifdef DIAGNOSTIC
   1862 	/*
   1863 	 * sanity check: swapbufs are private, so they shouldn't be wanted
   1864 	 */
   1865 	if (bp->b_flags & B_WANTED)
   1866 		panic("uvm_swap_bufdone: private buf wanted");
   1867 #endif
   1868 
   1869 	/*
   1870 	 * drop buffers reference to the vnode and its flags.
   1871 	 */
   1872 	bp->b_flags &= ~(B_BUSY|B_WANTED|B_PHYS|B_PAGET|B_UAREA|B_DIRTY|B_NOCACHE);
   1873 	if (bp->b_vp)
   1874 		brelvp(bp);
   1875 
   1876 	/*
   1877 	 * now put the aio on the uvm.aio_done list and wake the
   1878 	 * pagedaemon (which will finish up our job in its context).
   1879 	 */
   1880 	simple_lock(&uvm.pagedaemon_lock);	/* locks uvm.aio_done */
   1881 	TAILQ_INSERT_TAIL(&uvm.aio_done, &sbp->sw_aio, aioq);
   1882 	simple_unlock(&uvm.pagedaemon_lock);
   1883 
   1884 	thread_wakeup(&uvm.pagedaemon);
   1885 	splx(s);
   1886 }
   1887 
   1888 /*
   1889  * uvm_swap_aiodone: aiodone function for anonymous memory
   1890  *
   1891  * => this is called in the context of the pagedaemon (but with the
   1892  *	page queues unlocked!)
   1893  * => our "aio" structure must be part of a "swapbuf"
   1894  */
   1895 static void
   1896 uvm_swap_aiodone(aio)
   1897 	struct uvm_aiodesc *aio;
   1898 {
   1899 	struct swapbuf *sbp = aio->pd_ptr;
   1900 	struct vm_page *pps[MAXBSIZE >> PAGE_SHIFT];
   1901 	int lcv, s;
   1902 	vaddr_t addr;
   1903 	UVMHIST_FUNC("uvm_swap_aiodone"); UVMHIST_CALLED(pdhist);
   1904 
   1905 	UVMHIST_LOG(pdhist, "done with aio %p", aio, 0, 0, 0);
   1906 #ifdef DIAGNOSTIC
   1907 	/*
   1908 	 * sanity check
   1909 	 */
   1910 	if (aio->npages > (MAXBSIZE >> PAGE_SHIFT))
   1911 		panic("uvm_swap_aiodone: aio too big!");
   1912 #endif
   1913 
   1914 	/*
   1915 	 * first, we have to recover the page pointers (pps) by poking in the
   1916 	 * kernel pmap (XXX: should be saved in the buf structure).
   1917 	 */
   1918 	for (addr = aio->kva, lcv = 0 ; lcv < aio->npages ;
   1919 		addr += PAGE_SIZE, lcv++) {
   1920 		pps[lcv] = uvm_pageratop(addr);
   1921 	}
   1922 
   1923 	/*
   1924 	 * now we can dispose of the kernel mappings of the buffer
   1925 	 */
   1926 	uvm_pagermapout(aio->kva, aio->npages);
   1927 
   1928 	/*
   1929 	 * now we can dispose of the pages by using the dropcluster function
   1930 	 * [note that we have no "page of interest" so we pass in null]
   1931 	 */
   1932 	uvm_pager_dropcluster(NULL, NULL, pps, &aio->npages,
   1933 				PGO_PDFREECLUST, 0);
   1934 
   1935 	/*
   1936 	 * finally, we can dispose of the swapbuf
   1937 	 */
   1938 	s = splbio();
   1939 	pool_put(swapbuf_pool, sbp);
   1940 	splx(s);
   1941 
   1942 	/*
   1943 	 * done!
   1944 	 */
   1945 }
   1946