uvm_swap.c revision 1.46.2.7 1 /* $NetBSD: uvm_swap.c,v 1.46.2.7 2002/04/01 07:49:24 nathanw Exp $ */
2
3 /*
4 * Copyright (c) 1995, 1996, 1997 Matthew R. Green
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
25 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 * from: NetBSD: vm_swap.c,v 1.52 1997/12/02 13:47:37 pk Exp
31 * from: Id: uvm_swap.c,v 1.1.2.42 1998/02/02 20:38:06 chuck Exp
32 */
33
34 #include <sys/cdefs.h>
35 __KERNEL_RCSID(0, "$NetBSD: uvm_swap.c,v 1.46.2.7 2002/04/01 07:49:24 nathanw Exp $");
36
37 #include "fs_nfs.h"
38 #include "opt_uvmhist.h"
39 #include "opt_compat_netbsd.h"
40 #include "opt_ddb.h"
41
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/buf.h>
45 #include <sys/conf.h>
46 #include <sys/lwp.h>
47 #include <sys/proc.h>
48 #include <sys/namei.h>
49 #include <sys/disklabel.h>
50 #include <sys/errno.h>
51 #include <sys/kernel.h>
52 #include <sys/malloc.h>
53 #include <sys/vnode.h>
54 #include <sys/file.h>
55 #include <sys/extent.h>
56 #include <sys/mount.h>
57 #include <sys/pool.h>
58 #include <sys/syscallargs.h>
59 #include <sys/swap.h>
60
61 #include <uvm/uvm.h>
62
63 #include <miscfs/specfs/specdev.h>
64
65 /*
66 * uvm_swap.c: manage configuration and i/o to swap space.
67 */
68
69 /*
70 * swap space is managed in the following way:
71 *
72 * each swap partition or file is described by a "swapdev" structure.
73 * each "swapdev" structure contains a "swapent" structure which contains
74 * information that is passed up to the user (via system calls).
75 *
76 * each swap partition is assigned a "priority" (int) which controls
77 * swap parition usage.
78 *
79 * the system maintains a global data structure describing all swap
80 * partitions/files. there is a sorted LIST of "swappri" structures
81 * which describe "swapdev"'s at that priority. this LIST is headed
82 * by the "swap_priority" global var. each "swappri" contains a
83 * CIRCLEQ of "swapdev" structures at that priority.
84 *
85 * locking:
86 * - swap_syscall_lock (sleep lock): this lock serializes the swapctl
87 * system call and prevents the swap priority list from changing
88 * while we are in the middle of a system call (e.g. SWAP_STATS).
89 * - uvm.swap_data_lock (simple_lock): this lock protects all swap data
90 * structures including the priority list, the swapdev structures,
91 * and the swapmap extent.
92 *
93 * each swap device has the following info:
94 * - swap device in use (could be disabled, preventing future use)
95 * - swap enabled (allows new allocations on swap)
96 * - map info in /dev/drum
97 * - vnode pointer
98 * for swap files only:
99 * - block size
100 * - max byte count in buffer
101 * - buffer
102 *
103 * userland controls and configures swap with the swapctl(2) system call.
104 * the sys_swapctl performs the following operations:
105 * [1] SWAP_NSWAP: returns the number of swap devices currently configured
106 * [2] SWAP_STATS: given a pointer to an array of swapent structures
107 * (passed in via "arg") of a size passed in via "misc" ... we load
108 * the current swap config into the array.
109 * [3] SWAP_ON: given a pathname in arg (could be device or file) and a
110 * priority in "misc", start swapping on it.
111 * [4] SWAP_OFF: as SWAP_ON, but stops swapping to a device
112 * [5] SWAP_CTL: changes the priority of a swap device (new priority in
113 * "misc")
114 */
115
116 /*
117 * swapdev: describes a single swap partition/file
118 *
119 * note the following should be true:
120 * swd_inuse <= swd_nblks [number of blocks in use is <= total blocks]
121 * swd_nblks <= swd_mapsize [because mapsize includes miniroot+disklabel]
122 */
123 struct swapdev {
124 struct oswapent swd_ose;
125 #define swd_dev swd_ose.ose_dev /* device id */
126 #define swd_flags swd_ose.ose_flags /* flags:inuse/enable/fake */
127 #define swd_priority swd_ose.ose_priority /* our priority */
128 /* also: swd_ose.ose_nblks, swd_ose.ose_inuse */
129 char *swd_path; /* saved pathname of device */
130 int swd_pathlen; /* length of pathname */
131 int swd_npages; /* #pages we can use */
132 int swd_npginuse; /* #pages in use */
133 int swd_npgbad; /* #pages bad */
134 int swd_drumoffset; /* page0 offset in drum */
135 int swd_drumsize; /* #pages in drum */
136 struct extent *swd_ex; /* extent for this swapdev */
137 char swd_exname[12]; /* name of extent above */
138 struct vnode *swd_vp; /* backing vnode */
139 CIRCLEQ_ENTRY(swapdev) swd_next; /* priority circleq */
140
141 int swd_bsize; /* blocksize (bytes) */
142 int swd_maxactive; /* max active i/o reqs */
143 struct buf_queue swd_tab; /* buffer list */
144 int swd_active; /* number of active buffers */
145 };
146
147 /*
148 * swap device priority entry; the list is kept sorted on `spi_priority'.
149 */
150 struct swappri {
151 int spi_priority; /* priority */
152 CIRCLEQ_HEAD(spi_swapdev, swapdev) spi_swapdev;
153 /* circleq of swapdevs at this priority */
154 LIST_ENTRY(swappri) spi_swappri; /* global list of pri's */
155 };
156
157 /*
158 * The following two structures are used to keep track of data transfers
159 * on swap devices associated with regular files.
160 * NOTE: this code is more or less a copy of vnd.c; we use the same
161 * structure names here to ease porting..
162 */
163 struct vndxfer {
164 struct buf *vx_bp; /* Pointer to parent buffer */
165 struct swapdev *vx_sdp;
166 int vx_error;
167 int vx_pending; /* # of pending aux buffers */
168 int vx_flags;
169 #define VX_BUSY 1
170 #define VX_DEAD 2
171 };
172
173 struct vndbuf {
174 struct buf vb_buf;
175 struct vndxfer *vb_xfer;
176 };
177
178
179 /*
180 * We keep a of pool vndbuf's and vndxfer structures.
181 */
182 static struct pool vndxfer_pool;
183 static struct pool vndbuf_pool;
184
185 #define getvndxfer(vnx) do { \
186 int s = splbio(); \
187 vnx = pool_get(&vndxfer_pool, PR_WAITOK); \
188 splx(s); \
189 } while (0)
190
191 #define putvndxfer(vnx) { \
192 pool_put(&vndxfer_pool, (void *)(vnx)); \
193 }
194
195 #define getvndbuf(vbp) do { \
196 int s = splbio(); \
197 vbp = pool_get(&vndbuf_pool, PR_WAITOK); \
198 splx(s); \
199 } while (0)
200
201 #define putvndbuf(vbp) { \
202 pool_put(&vndbuf_pool, (void *)(vbp)); \
203 }
204
205 /* /dev/drum */
206 bdev_decl(sw);
207 cdev_decl(sw);
208
209 /*
210 * local variables
211 */
212 static struct extent *swapmap; /* controls the mapping of /dev/drum */
213
214 /* list of all active swap devices [by priority] */
215 LIST_HEAD(swap_priority, swappri);
216 static struct swap_priority swap_priority;
217
218 /* locks */
219 struct lock swap_syscall_lock;
220
221 /*
222 * prototypes
223 */
224 static struct swapdev *swapdrum_getsdp __P((int));
225
226 static struct swapdev *swaplist_find __P((struct vnode *, int));
227 static void swaplist_insert __P((struct swapdev *,
228 struct swappri *, int));
229 static void swaplist_trim __P((void));
230
231 static int swap_on __P((struct proc *, struct swapdev *));
232 static int swap_off __P((struct proc *, struct swapdev *));
233
234 static void sw_reg_strategy __P((struct swapdev *, struct buf *, int));
235 static void sw_reg_iodone __P((struct buf *));
236 static void sw_reg_start __P((struct swapdev *));
237
238 static int uvm_swap_io __P((struct vm_page **, int, int, int));
239
240 /*
241 * uvm_swap_init: init the swap system data structures and locks
242 *
243 * => called at boot time from init_main.c after the filesystems
244 * are brought up (which happens after uvm_init())
245 */
246 void
247 uvm_swap_init()
248 {
249 UVMHIST_FUNC("uvm_swap_init");
250
251 UVMHIST_CALLED(pdhist);
252 /*
253 * first, init the swap list, its counter, and its lock.
254 * then get a handle on the vnode for /dev/drum by using
255 * the its dev_t number ("swapdev", from MD conf.c).
256 */
257
258 LIST_INIT(&swap_priority);
259 uvmexp.nswapdev = 0;
260 lockinit(&swap_syscall_lock, PVM, "swapsys", 0, 0);
261 simple_lock_init(&uvm.swap_data_lock);
262
263 if (bdevvp(swapdev, &swapdev_vp))
264 panic("uvm_swap_init: can't get vnode for swap device");
265
266 /*
267 * create swap block resource map to map /dev/drum. the range
268 * from 1 to INT_MAX allows 2 gigablocks of swap space. note
269 * that block 0 is reserved (used to indicate an allocation
270 * failure, or no allocation).
271 */
272 swapmap = extent_create("swapmap", 1, INT_MAX,
273 M_VMSWAP, 0, 0, EX_NOWAIT);
274 if (swapmap == 0)
275 panic("uvm_swap_init: extent_create failed");
276
277 /*
278 * allocate pools for structures used for swapping to files.
279 */
280
281 pool_init(&vndxfer_pool, sizeof(struct vndxfer), 0, 0, 0,
282 "swp vnx", NULL);
283
284 pool_init(&vndbuf_pool, sizeof(struct vndbuf), 0, 0, 0,
285 "swp vnd", NULL);
286
287 /*
288 * done!
289 */
290 UVMHIST_LOG(pdhist, "<- done", 0, 0, 0, 0);
291 }
292
293 /*
294 * swaplist functions: functions that operate on the list of swap
295 * devices on the system.
296 */
297
298 /*
299 * swaplist_insert: insert swap device "sdp" into the global list
300 *
301 * => caller must hold both swap_syscall_lock and uvm.swap_data_lock
302 * => caller must provide a newly malloc'd swappri structure (we will
303 * FREE it if we don't need it... this it to prevent malloc blocking
304 * here while adding swap)
305 */
306 static void
307 swaplist_insert(sdp, newspp, priority)
308 struct swapdev *sdp;
309 struct swappri *newspp;
310 int priority;
311 {
312 struct swappri *spp, *pspp;
313 UVMHIST_FUNC("swaplist_insert"); UVMHIST_CALLED(pdhist);
314
315 /*
316 * find entry at or after which to insert the new device.
317 */
318 pspp = NULL;
319 LIST_FOREACH(spp, &swap_priority, spi_swappri) {
320 if (priority <= spp->spi_priority)
321 break;
322 pspp = spp;
323 }
324
325 /*
326 * new priority?
327 */
328 if (spp == NULL || spp->spi_priority != priority) {
329 spp = newspp; /* use newspp! */
330 UVMHIST_LOG(pdhist, "created new swappri = %d",
331 priority, 0, 0, 0);
332
333 spp->spi_priority = priority;
334 CIRCLEQ_INIT(&spp->spi_swapdev);
335
336 if (pspp)
337 LIST_INSERT_AFTER(pspp, spp, spi_swappri);
338 else
339 LIST_INSERT_HEAD(&swap_priority, spp, spi_swappri);
340 } else {
341 /* we don't need a new priority structure, free it */
342 FREE(newspp, M_VMSWAP);
343 }
344
345 /*
346 * priority found (or created). now insert on the priority's
347 * circleq list and bump the total number of swapdevs.
348 */
349 sdp->swd_priority = priority;
350 CIRCLEQ_INSERT_TAIL(&spp->spi_swapdev, sdp, swd_next);
351 uvmexp.nswapdev++;
352 }
353
354 /*
355 * swaplist_find: find and optionally remove a swap device from the
356 * global list.
357 *
358 * => caller must hold both swap_syscall_lock and uvm.swap_data_lock
359 * => we return the swapdev we found (and removed)
360 */
361 static struct swapdev *
362 swaplist_find(vp, remove)
363 struct vnode *vp;
364 boolean_t remove;
365 {
366 struct swapdev *sdp;
367 struct swappri *spp;
368
369 /*
370 * search the lists for the requested vp
371 */
372
373 LIST_FOREACH(spp, &swap_priority, spi_swappri) {
374 CIRCLEQ_FOREACH(sdp, &spp->spi_swapdev, swd_next) {
375 if (sdp->swd_vp == vp) {
376 if (remove) {
377 CIRCLEQ_REMOVE(&spp->spi_swapdev,
378 sdp, swd_next);
379 uvmexp.nswapdev--;
380 }
381 return(sdp);
382 }
383 }
384 }
385 return (NULL);
386 }
387
388
389 /*
390 * swaplist_trim: scan priority list for empty priority entries and kill
391 * them.
392 *
393 * => caller must hold both swap_syscall_lock and uvm.swap_data_lock
394 */
395 static void
396 swaplist_trim()
397 {
398 struct swappri *spp, *nextspp;
399
400 for (spp = LIST_FIRST(&swap_priority); spp != NULL; spp = nextspp) {
401 nextspp = LIST_NEXT(spp, spi_swappri);
402 if (CIRCLEQ_FIRST(&spp->spi_swapdev) !=
403 (void *)&spp->spi_swapdev)
404 continue;
405 LIST_REMOVE(spp, spi_swappri);
406 free(spp, M_VMSWAP);
407 }
408 }
409
410 /*
411 * swapdrum_getsdp: given a page offset in /dev/drum, convert it back
412 * to the "swapdev" that maps that section of the drum.
413 *
414 * => each swapdev takes one big contig chunk of the drum
415 * => caller must hold uvm.swap_data_lock
416 */
417 static struct swapdev *
418 swapdrum_getsdp(pgno)
419 int pgno;
420 {
421 struct swapdev *sdp;
422 struct swappri *spp;
423
424 LIST_FOREACH(spp, &swap_priority, spi_swappri) {
425 CIRCLEQ_FOREACH(sdp, &spp->spi_swapdev, swd_next) {
426 if (sdp->swd_flags & SWF_FAKE)
427 continue;
428 if (pgno >= sdp->swd_drumoffset &&
429 pgno < (sdp->swd_drumoffset + sdp->swd_drumsize)) {
430 return sdp;
431 }
432 }
433 }
434 return NULL;
435 }
436
437
438 /*
439 * sys_swapctl: main entry point for swapctl(2) system call
440 * [with two helper functions: swap_on and swap_off]
441 */
442 int
443 sys_swapctl(l, v, retval)
444 struct lwp *l;
445 void *v;
446 register_t *retval;
447 {
448 struct sys_swapctl_args /* {
449 syscallarg(int) cmd;
450 syscallarg(void *) arg;
451 syscallarg(int) misc;
452 } */ *uap = (struct sys_swapctl_args *)v;
453 struct proc *p = l->l_proc;
454 struct vnode *vp;
455 struct nameidata nd;
456 struct swappri *spp;
457 struct swapdev *sdp;
458 struct swapent *sep;
459 char userpath[PATH_MAX + 1];
460 size_t len;
461 int error, misc;
462 int priority;
463 UVMHIST_FUNC("sys_swapctl"); UVMHIST_CALLED(pdhist);
464
465 misc = SCARG(uap, misc);
466
467 /*
468 * ensure serialized syscall access by grabbing the swap_syscall_lock
469 */
470 lockmgr(&swap_syscall_lock, LK_EXCLUSIVE, NULL);
471
472 /*
473 * we handle the non-priv NSWAP and STATS request first.
474 *
475 * SWAP_NSWAP: return number of config'd swap devices
476 * [can also be obtained with uvmexp sysctl]
477 */
478 if (SCARG(uap, cmd) == SWAP_NSWAP) {
479 UVMHIST_LOG(pdhist, "<- done SWAP_NSWAP=%d", uvmexp.nswapdev,
480 0, 0, 0);
481 *retval = uvmexp.nswapdev;
482 error = 0;
483 goto out;
484 }
485
486 /*
487 * SWAP_STATS: get stats on current # of configured swap devs
488 *
489 * note that the swap_priority list can't change as long
490 * as we are holding the swap_syscall_lock. we don't want
491 * to grab the uvm.swap_data_lock because we may fault&sleep during
492 * copyout() and we don't want to be holding that lock then!
493 */
494 if (SCARG(uap, cmd) == SWAP_STATS
495 #if defined(COMPAT_13)
496 || SCARG(uap, cmd) == SWAP_OSTATS
497 #endif
498 ) {
499 misc = MIN(uvmexp.nswapdev, misc);
500 #if defined(COMPAT_13)
501 if (SCARG(uap, cmd) == SWAP_OSTATS)
502 len = sizeof(struct oswapent) * misc;
503 else
504 #endif
505 len = sizeof(struct swapent) * misc;
506 sep = (struct swapent *)malloc(len, M_TEMP, M_WAITOK);
507
508 uvm_swap_stats(SCARG(uap, cmd), sep, misc, retval);
509 error = copyout(sep, (void *)SCARG(uap, arg), len);
510
511 free(sep, M_TEMP);
512 UVMHIST_LOG(pdhist, "<- done SWAP_STATS", 0, 0, 0, 0);
513 goto out;
514 }
515 if (SCARG(uap, cmd) == SWAP_GETDUMPDEV) {
516 dev_t *devp = (dev_t *)SCARG(uap, arg);
517
518 error = copyout(&dumpdev, devp, sizeof(dumpdev));
519 goto out;
520 }
521
522 /*
523 * all other requests require superuser privs. verify.
524 */
525 if ((error = suser(p->p_ucred, &p->p_acflag)))
526 goto out;
527
528 /*
529 * at this point we expect a path name in arg. we will
530 * use namei() to gain a vnode reference (vref), and lock
531 * the vnode (VOP_LOCK).
532 *
533 * XXX: a NULL arg means use the root vnode pointer (e.g. for
534 * miniroot)
535 */
536 if (SCARG(uap, arg) == NULL) {
537 vp = rootvp; /* miniroot */
538 if (vget(vp, LK_EXCLUSIVE)) {
539 error = EBUSY;
540 goto out;
541 }
542 if (SCARG(uap, cmd) == SWAP_ON &&
543 copystr("miniroot", userpath, sizeof userpath, &len))
544 panic("swapctl: miniroot copy failed");
545 } else {
546 int space;
547 char *where;
548
549 if (SCARG(uap, cmd) == SWAP_ON) {
550 if ((error = copyinstr(SCARG(uap, arg), userpath,
551 sizeof userpath, &len)))
552 goto out;
553 space = UIO_SYSSPACE;
554 where = userpath;
555 } else {
556 space = UIO_USERSPACE;
557 where = (char *)SCARG(uap, arg);
558 }
559 NDINIT(&nd, LOOKUP, FOLLOW|LOCKLEAF, space, where, p);
560 if ((error = namei(&nd)))
561 goto out;
562 vp = nd.ni_vp;
563 }
564 /* note: "vp" is referenced and locked */
565
566 error = 0; /* assume no error */
567 switch(SCARG(uap, cmd)) {
568
569 case SWAP_DUMPDEV:
570 if (vp->v_type != VBLK) {
571 error = ENOTBLK;
572 break;
573 }
574 dumpdev = vp->v_rdev;
575 break;
576
577 case SWAP_CTL:
578 /*
579 * get new priority, remove old entry (if any) and then
580 * reinsert it in the correct place. finally, prune out
581 * any empty priority structures.
582 */
583 priority = SCARG(uap, misc);
584 spp = malloc(sizeof *spp, M_VMSWAP, M_WAITOK);
585 simple_lock(&uvm.swap_data_lock);
586 if ((sdp = swaplist_find(vp, 1)) == NULL) {
587 error = ENOENT;
588 } else {
589 swaplist_insert(sdp, spp, priority);
590 swaplist_trim();
591 }
592 simple_unlock(&uvm.swap_data_lock);
593 if (error)
594 free(spp, M_VMSWAP);
595 break;
596
597 case SWAP_ON:
598
599 /*
600 * check for duplicates. if none found, then insert a
601 * dummy entry on the list to prevent someone else from
602 * trying to enable this device while we are working on
603 * it.
604 */
605
606 priority = SCARG(uap, misc);
607 sdp = malloc(sizeof *sdp, M_VMSWAP, M_WAITOK);
608 spp = malloc(sizeof *spp, M_VMSWAP, M_WAITOK);
609 simple_lock(&uvm.swap_data_lock);
610 if (swaplist_find(vp, 0) != NULL) {
611 error = EBUSY;
612 simple_unlock(&uvm.swap_data_lock);
613 free(sdp, M_VMSWAP);
614 free(spp, M_VMSWAP);
615 break;
616 }
617 memset(sdp, 0, sizeof(*sdp));
618 sdp->swd_flags = SWF_FAKE; /* placeholder only */
619 sdp->swd_vp = vp;
620 sdp->swd_dev = (vp->v_type == VBLK) ? vp->v_rdev : NODEV;
621 BUFQ_INIT(&sdp->swd_tab);
622
623 swaplist_insert(sdp, spp, priority);
624 simple_unlock(&uvm.swap_data_lock);
625
626 sdp->swd_pathlen = len;
627 sdp->swd_path = malloc(sdp->swd_pathlen, M_VMSWAP, M_WAITOK);
628 if (copystr(userpath, sdp->swd_path, sdp->swd_pathlen, 0) != 0)
629 panic("swapctl: copystr");
630
631 /*
632 * we've now got a FAKE placeholder in the swap list.
633 * now attempt to enable swap on it. if we fail, undo
634 * what we've done and kill the fake entry we just inserted.
635 * if swap_on is a success, it will clear the SWF_FAKE flag
636 */
637
638 if ((error = swap_on(p, sdp)) != 0) {
639 simple_lock(&uvm.swap_data_lock);
640 (void) swaplist_find(vp, 1); /* kill fake entry */
641 swaplist_trim();
642 simple_unlock(&uvm.swap_data_lock);
643 free(sdp->swd_path, M_VMSWAP);
644 free(sdp, M_VMSWAP);
645 break;
646 }
647 break;
648
649 case SWAP_OFF:
650 simple_lock(&uvm.swap_data_lock);
651 if ((sdp = swaplist_find(vp, 0)) == NULL) {
652 simple_unlock(&uvm.swap_data_lock);
653 error = ENXIO;
654 break;
655 }
656
657 /*
658 * If a device isn't in use or enabled, we
659 * can't stop swapping from it (again).
660 */
661 if ((sdp->swd_flags & (SWF_INUSE|SWF_ENABLE)) == 0) {
662 simple_unlock(&uvm.swap_data_lock);
663 error = EBUSY;
664 break;
665 }
666
667 /*
668 * do the real work.
669 */
670 error = swap_off(p, sdp);
671 break;
672
673 default:
674 error = EINVAL;
675 }
676
677 /*
678 * done! release the ref gained by namei() and unlock.
679 */
680 vput(vp);
681
682 out:
683 lockmgr(&swap_syscall_lock, LK_RELEASE, NULL);
684
685 UVMHIST_LOG(pdhist, "<- done! error=%d", error, 0, 0, 0);
686 return (error);
687 }
688
689 /*
690 * swap_stats: implements swapctl(SWAP_STATS). The function is kept
691 * away from sys_swapctl() in order to allow COMPAT_* swapctl()
692 * emulation to use it directly without going through sys_swapctl().
693 * The problem with using sys_swapctl() there is that it involves
694 * copying the swapent array to the stackgap, and this array's size
695 * is not known at build time. Hence it would not be possible to
696 * ensure it would fit in the stackgap in any case.
697 */
698 void
699 uvm_swap_stats(cmd, sep, sec, retval)
700 int cmd;
701 struct swapent *sep;
702 int sec;
703 register_t *retval;
704 {
705 struct swappri *spp;
706 struct swapdev *sdp;
707 int count = 0;
708
709 LIST_FOREACH(spp, &swap_priority, spi_swappri) {
710 for (sdp = CIRCLEQ_FIRST(&spp->spi_swapdev);
711 sdp != (void *)&spp->spi_swapdev && sec-- > 0;
712 sdp = CIRCLEQ_NEXT(sdp, swd_next)) {
713 /*
714 * backwards compatibility for system call.
715 * note that we use 'struct oswapent' as an
716 * overlay into both 'struct swapdev' and
717 * the userland 'struct swapent', as we
718 * want to retain backwards compatibility
719 * with NetBSD 1.3.
720 */
721 sdp->swd_ose.ose_inuse =
722 btodb((u_int64_t)sdp->swd_npginuse <<
723 PAGE_SHIFT);
724 (void)memcpy(sep, &sdp->swd_ose,
725 sizeof(struct oswapent));
726
727 /* now copy out the path if necessary */
728 #if defined(COMPAT_13)
729 if (cmd == SWAP_STATS)
730 #endif
731 (void)memcpy(&sep->se_path, sdp->swd_path,
732 sdp->swd_pathlen);
733
734 count++;
735 #if defined(COMPAT_13)
736 if (cmd == SWAP_OSTATS)
737 sep = (struct swapent *)
738 ((struct oswapent *)sep + 1);
739 else
740 #endif
741 sep++;
742 }
743 }
744
745 *retval = count;
746 return;
747 }
748
749 /*
750 * swap_on: attempt to enable a swapdev for swapping. note that the
751 * swapdev is already on the global list, but disabled (marked
752 * SWF_FAKE).
753 *
754 * => we avoid the start of the disk (to protect disk labels)
755 * => we also avoid the miniroot, if we are swapping to root.
756 * => caller should leave uvm.swap_data_lock unlocked, we may lock it
757 * if needed.
758 */
759 static int
760 swap_on(p, sdp)
761 struct proc *p;
762 struct swapdev *sdp;
763 {
764 static int count = 0; /* static */
765 struct vnode *vp;
766 int error, npages, nblocks, size;
767 long addr;
768 u_long result;
769 struct vattr va;
770 #ifdef NFS
771 extern int (**nfsv2_vnodeop_p) __P((void *));
772 #endif /* NFS */
773 dev_t dev;
774 UVMHIST_FUNC("swap_on"); UVMHIST_CALLED(pdhist);
775
776 /*
777 * we want to enable swapping on sdp. the swd_vp contains
778 * the vnode we want (locked and ref'd), and the swd_dev
779 * contains the dev_t of the file, if it a block device.
780 */
781
782 vp = sdp->swd_vp;
783 dev = sdp->swd_dev;
784
785 /*
786 * open the swap file (mostly useful for block device files to
787 * let device driver know what is up).
788 *
789 * we skip the open/close for root on swap because the root
790 * has already been opened when root was mounted (mountroot).
791 */
792 if (vp != rootvp) {
793 if ((error = VOP_OPEN(vp, FREAD|FWRITE, p->p_ucred, p)))
794 return (error);
795 }
796
797 /* XXX this only works for block devices */
798 UVMHIST_LOG(pdhist, " dev=%d, major(dev)=%d", dev, major(dev), 0,0);
799
800 /*
801 * we now need to determine the size of the swap area. for
802 * block specials we can call the d_psize function.
803 * for normal files, we must stat [get attrs].
804 *
805 * we put the result in nblks.
806 * for normal files, we also want the filesystem block size
807 * (which we get with statfs).
808 */
809 switch (vp->v_type) {
810 case VBLK:
811 if (bdevsw[major(dev)].d_psize == 0 ||
812 (nblocks = (*bdevsw[major(dev)].d_psize)(dev)) == -1) {
813 error = ENXIO;
814 goto bad;
815 }
816 break;
817
818 case VREG:
819 if ((error = VOP_GETATTR(vp, &va, p->p_ucred, p)))
820 goto bad;
821 nblocks = (int)btodb(va.va_size);
822 if ((error =
823 VFS_STATFS(vp->v_mount, &vp->v_mount->mnt_stat, p)) != 0)
824 goto bad;
825
826 sdp->swd_bsize = vp->v_mount->mnt_stat.f_iosize;
827 /*
828 * limit the max # of outstanding I/O requests we issue
829 * at any one time. take it easy on NFS servers.
830 */
831 #ifdef NFS
832 if (vp->v_op == nfsv2_vnodeop_p)
833 sdp->swd_maxactive = 2; /* XXX */
834 else
835 #endif /* NFS */
836 sdp->swd_maxactive = 8; /* XXX */
837 break;
838
839 default:
840 error = ENXIO;
841 goto bad;
842 }
843
844 /*
845 * save nblocks in a safe place and convert to pages.
846 */
847
848 sdp->swd_ose.ose_nblks = nblocks;
849 npages = dbtob((u_int64_t)nblocks) >> PAGE_SHIFT;
850
851 /*
852 * for block special files, we want to make sure that leave
853 * the disklabel and bootblocks alone, so we arrange to skip
854 * over them (arbitrarily choosing to skip PAGE_SIZE bytes).
855 * note that because of this the "size" can be less than the
856 * actual number of blocks on the device.
857 */
858 if (vp->v_type == VBLK) {
859 /* we use pages 1 to (size - 1) [inclusive] */
860 size = npages - 1;
861 addr = 1;
862 } else {
863 /* we use pages 0 to (size - 1) [inclusive] */
864 size = npages;
865 addr = 0;
866 }
867
868 /*
869 * make sure we have enough blocks for a reasonable sized swap
870 * area. we want at least one page.
871 */
872
873 if (size < 1) {
874 UVMHIST_LOG(pdhist, " size <= 1!!", 0, 0, 0, 0);
875 error = EINVAL;
876 goto bad;
877 }
878
879 UVMHIST_LOG(pdhist, " dev=%x: size=%d addr=%ld\n", dev, size, addr, 0);
880
881 /*
882 * now we need to allocate an extent to manage this swap device
883 */
884 snprintf(sdp->swd_exname, sizeof(sdp->swd_exname), "swap0x%04x",
885 count++);
886
887 /* note that extent_create's 3rd arg is inclusive, thus "- 1" */
888 sdp->swd_ex = extent_create(sdp->swd_exname, 0, npages - 1, M_VMSWAP,
889 0, 0, EX_WAITOK);
890 /* allocate the `saved' region from the extent so it won't be used */
891 if (addr) {
892 if (extent_alloc_region(sdp->swd_ex, 0, addr, EX_WAITOK))
893 panic("disklabel region");
894 }
895
896 /*
897 * if the vnode we are swapping to is the root vnode
898 * (i.e. we are swapping to the miniroot) then we want
899 * to make sure we don't overwrite it. do a statfs to
900 * find its size and skip over it.
901 */
902 if (vp == rootvp) {
903 struct mount *mp;
904 struct statfs *sp;
905 int rootblocks, rootpages;
906
907 mp = rootvnode->v_mount;
908 sp = &mp->mnt_stat;
909 rootblocks = sp->f_blocks * btodb(sp->f_bsize);
910 rootpages = round_page(dbtob(rootblocks)) >> PAGE_SHIFT;
911 if (rootpages > size)
912 panic("swap_on: miniroot larger than swap?");
913
914 if (extent_alloc_region(sdp->swd_ex, addr,
915 rootpages, EX_WAITOK))
916 panic("swap_on: unable to preserve miniroot");
917
918 size -= rootpages;
919 printf("Preserved %d pages of miniroot ", rootpages);
920 printf("leaving %d pages of swap\n", size);
921 }
922
923 /*
924 * try to add anons to reflect the new swap space.
925 */
926
927 error = uvm_anon_add(size);
928 if (error) {
929 goto bad;
930 }
931
932 /*
933 * add a ref to vp to reflect usage as a swap device.
934 */
935 vref(vp);
936
937 /*
938 * now add the new swapdev to the drum and enable.
939 */
940 if (extent_alloc(swapmap, npages, EX_NOALIGN, EX_NOBOUNDARY,
941 EX_WAITOK, &result))
942 panic("swapdrum_add");
943
944 sdp->swd_drumoffset = (int)result;
945 sdp->swd_drumsize = npages;
946 sdp->swd_npages = size;
947 simple_lock(&uvm.swap_data_lock);
948 sdp->swd_flags &= ~SWF_FAKE; /* going live */
949 sdp->swd_flags |= (SWF_INUSE|SWF_ENABLE);
950 uvmexp.swpages += size;
951 simple_unlock(&uvm.swap_data_lock);
952 return (0);
953
954 /*
955 * failure: clean up and return error.
956 */
957
958 bad:
959 if (sdp->swd_ex) {
960 extent_destroy(sdp->swd_ex);
961 }
962 if (vp != rootvp) {
963 (void)VOP_CLOSE(vp, FREAD|FWRITE, p->p_ucred, p);
964 }
965 return (error);
966 }
967
968 /*
969 * swap_off: stop swapping on swapdev
970 *
971 * => swap data should be locked, we will unlock.
972 */
973 static int
974 swap_off(p, sdp)
975 struct proc *p;
976 struct swapdev *sdp;
977 {
978 UVMHIST_FUNC("swap_off"); UVMHIST_CALLED(pdhist);
979 UVMHIST_LOG(pdhist, " dev=%x", sdp->swd_dev,0,0,0);
980
981 /* disable the swap area being removed */
982 sdp->swd_flags &= ~SWF_ENABLE;
983 simple_unlock(&uvm.swap_data_lock);
984
985 /*
986 * the idea is to find all the pages that are paged out to this
987 * device, and page them all in. in uvm, swap-backed pageable
988 * memory can take two forms: aobjs and anons. call the
989 * swapoff hook for each subsystem to bring in pages.
990 */
991
992 if (uao_swap_off(sdp->swd_drumoffset,
993 sdp->swd_drumoffset + sdp->swd_drumsize) ||
994 anon_swap_off(sdp->swd_drumoffset,
995 sdp->swd_drumoffset + sdp->swd_drumsize)) {
996
997 simple_lock(&uvm.swap_data_lock);
998 sdp->swd_flags |= SWF_ENABLE;
999 simple_unlock(&uvm.swap_data_lock);
1000 return ENOMEM;
1001 }
1002 KASSERT(sdp->swd_npginuse == sdp->swd_npgbad);
1003
1004 /*
1005 * done with the vnode.
1006 * drop our ref on the vnode before calling VOP_CLOSE()
1007 * so that spec_close() can tell if this is the last close.
1008 */
1009 vrele(sdp->swd_vp);
1010 if (sdp->swd_vp != rootvp) {
1011 (void) VOP_CLOSE(sdp->swd_vp, FREAD|FWRITE, p->p_ucred, p);
1012 }
1013
1014 /* remove anons from the system */
1015 uvm_anon_remove(sdp->swd_npages);
1016
1017 simple_lock(&uvm.swap_data_lock);
1018 uvmexp.swpages -= sdp->swd_npages;
1019
1020 if (swaplist_find(sdp->swd_vp, 1) == NULL)
1021 panic("swap_off: swapdev not in list\n");
1022 swaplist_trim();
1023 simple_unlock(&uvm.swap_data_lock);
1024
1025 /*
1026 * free all resources!
1027 */
1028 extent_free(swapmap, sdp->swd_drumoffset, sdp->swd_drumsize,
1029 EX_WAITOK);
1030 extent_destroy(sdp->swd_ex);
1031 free(sdp, M_VMSWAP);
1032 return (0);
1033 }
1034
1035 /*
1036 * /dev/drum interface and i/o functions
1037 */
1038
1039 /*
1040 * swread: the read function for the drum (just a call to physio)
1041 */
1042 /*ARGSUSED*/
1043 int
1044 swread(dev, uio, ioflag)
1045 dev_t dev;
1046 struct uio *uio;
1047 int ioflag;
1048 {
1049 UVMHIST_FUNC("swread"); UVMHIST_CALLED(pdhist);
1050
1051 UVMHIST_LOG(pdhist, " dev=%x offset=%qx", dev, uio->uio_offset, 0, 0);
1052 return (physio(swstrategy, NULL, dev, B_READ, minphys, uio));
1053 }
1054
1055 /*
1056 * swwrite: the write function for the drum (just a call to physio)
1057 */
1058 /*ARGSUSED*/
1059 int
1060 swwrite(dev, uio, ioflag)
1061 dev_t dev;
1062 struct uio *uio;
1063 int ioflag;
1064 {
1065 UVMHIST_FUNC("swwrite"); UVMHIST_CALLED(pdhist);
1066
1067 UVMHIST_LOG(pdhist, " dev=%x offset=%qx", dev, uio->uio_offset, 0, 0);
1068 return (physio(swstrategy, NULL, dev, B_WRITE, minphys, uio));
1069 }
1070
1071 /*
1072 * swstrategy: perform I/O on the drum
1073 *
1074 * => we must map the i/o request from the drum to the correct swapdev.
1075 */
1076 void
1077 swstrategy(bp)
1078 struct buf *bp;
1079 {
1080 struct swapdev *sdp;
1081 struct vnode *vp;
1082 int s, pageno, bn;
1083 UVMHIST_FUNC("swstrategy"); UVMHIST_CALLED(pdhist);
1084
1085 /*
1086 * convert block number to swapdev. note that swapdev can't
1087 * be yanked out from under us because we are holding resources
1088 * in it (i.e. the blocks we are doing I/O on).
1089 */
1090 pageno = dbtob((int64_t)bp->b_blkno) >> PAGE_SHIFT;
1091 simple_lock(&uvm.swap_data_lock);
1092 sdp = swapdrum_getsdp(pageno);
1093 simple_unlock(&uvm.swap_data_lock);
1094 if (sdp == NULL) {
1095 bp->b_error = EINVAL;
1096 bp->b_flags |= B_ERROR;
1097 biodone(bp);
1098 UVMHIST_LOG(pdhist, " failed to get swap device", 0, 0, 0, 0);
1099 return;
1100 }
1101
1102 /*
1103 * convert drum page number to block number on this swapdev.
1104 */
1105
1106 pageno -= sdp->swd_drumoffset; /* page # on swapdev */
1107 bn = btodb((u_int64_t)pageno << PAGE_SHIFT); /* convert to diskblock */
1108
1109 UVMHIST_LOG(pdhist, " %s: mapoff=%x bn=%x bcount=%ld",
1110 ((bp->b_flags & B_READ) == 0) ? "write" : "read",
1111 sdp->swd_drumoffset, bn, bp->b_bcount);
1112
1113 /*
1114 * for block devices we finish up here.
1115 * for regular files we have to do more work which we delegate
1116 * to sw_reg_strategy().
1117 */
1118
1119 switch (sdp->swd_vp->v_type) {
1120 default:
1121 panic("swstrategy: vnode type 0x%x", sdp->swd_vp->v_type);
1122
1123 case VBLK:
1124
1125 /*
1126 * must convert "bp" from an I/O on /dev/drum to an I/O
1127 * on the swapdev (sdp).
1128 */
1129 s = splbio();
1130 bp->b_blkno = bn; /* swapdev block number */
1131 vp = sdp->swd_vp; /* swapdev vnode pointer */
1132 bp->b_dev = sdp->swd_dev; /* swapdev dev_t */
1133
1134 /*
1135 * if we are doing a write, we have to redirect the i/o on
1136 * drum's v_numoutput counter to the swapdevs.
1137 */
1138 if ((bp->b_flags & B_READ) == 0) {
1139 vwakeup(bp); /* kills one 'v_numoutput' on drum */
1140 vp->v_numoutput++; /* put it on swapdev */
1141 }
1142
1143 /*
1144 * finally plug in swapdev vnode and start I/O
1145 */
1146 bp->b_vp = vp;
1147 splx(s);
1148 VOP_STRATEGY(bp);
1149 return;
1150
1151 case VREG:
1152 /*
1153 * delegate to sw_reg_strategy function.
1154 */
1155 sw_reg_strategy(sdp, bp, bn);
1156 return;
1157 }
1158 /* NOTREACHED */
1159 }
1160
1161 /*
1162 * sw_reg_strategy: handle swap i/o to regular files
1163 */
1164 static void
1165 sw_reg_strategy(sdp, bp, bn)
1166 struct swapdev *sdp;
1167 struct buf *bp;
1168 int bn;
1169 {
1170 struct vnode *vp;
1171 struct vndxfer *vnx;
1172 daddr_t nbn;
1173 caddr_t addr;
1174 off_t byteoff;
1175 int s, off, nra, error, sz, resid;
1176 UVMHIST_FUNC("sw_reg_strategy"); UVMHIST_CALLED(pdhist);
1177
1178 /*
1179 * allocate a vndxfer head for this transfer and point it to
1180 * our buffer.
1181 */
1182 getvndxfer(vnx);
1183 vnx->vx_flags = VX_BUSY;
1184 vnx->vx_error = 0;
1185 vnx->vx_pending = 0;
1186 vnx->vx_bp = bp;
1187 vnx->vx_sdp = sdp;
1188
1189 /*
1190 * setup for main loop where we read filesystem blocks into
1191 * our buffer.
1192 */
1193 error = 0;
1194 bp->b_resid = bp->b_bcount; /* nothing transfered yet! */
1195 addr = bp->b_data; /* current position in buffer */
1196 byteoff = dbtob((u_int64_t)bn);
1197
1198 for (resid = bp->b_resid; resid; resid -= sz) {
1199 struct vndbuf *nbp;
1200
1201 /*
1202 * translate byteoffset into block number. return values:
1203 * vp = vnode of underlying device
1204 * nbn = new block number (on underlying vnode dev)
1205 * nra = num blocks we can read-ahead (excludes requested
1206 * block)
1207 */
1208 nra = 0;
1209 error = VOP_BMAP(sdp->swd_vp, byteoff / sdp->swd_bsize,
1210 &vp, &nbn, &nra);
1211
1212 if (error == 0 && nbn == (daddr_t)-1) {
1213 /*
1214 * this used to just set error, but that doesn't
1215 * do the right thing. Instead, it causes random
1216 * memory errors. The panic() should remain until
1217 * this condition doesn't destabilize the system.
1218 */
1219 #if 1
1220 panic("sw_reg_strategy: swap to sparse file");
1221 #else
1222 error = EIO; /* failure */
1223 #endif
1224 }
1225
1226 /*
1227 * punt if there was an error or a hole in the file.
1228 * we must wait for any i/o ops we have already started
1229 * to finish before returning.
1230 *
1231 * XXX we could deal with holes here but it would be
1232 * a hassle (in the write case).
1233 */
1234 if (error) {
1235 s = splbio();
1236 vnx->vx_error = error; /* pass error up */
1237 goto out;
1238 }
1239
1240 /*
1241 * compute the size ("sz") of this transfer (in bytes).
1242 */
1243 off = byteoff % sdp->swd_bsize;
1244 sz = (1 + nra) * sdp->swd_bsize - off;
1245 if (sz > resid)
1246 sz = resid;
1247
1248 UVMHIST_LOG(pdhist, "sw_reg_strategy: "
1249 "vp %p/%p offset 0x%x/0x%x",
1250 sdp->swd_vp, vp, byteoff, nbn);
1251
1252 /*
1253 * now get a buf structure. note that the vb_buf is
1254 * at the front of the nbp structure so that you can
1255 * cast pointers between the two structure easily.
1256 */
1257 getvndbuf(nbp);
1258 nbp->vb_buf.b_flags = bp->b_flags | B_CALL;
1259 nbp->vb_buf.b_bcount = sz;
1260 nbp->vb_buf.b_bufsize = sz;
1261 nbp->vb_buf.b_error = 0;
1262 nbp->vb_buf.b_data = addr;
1263 nbp->vb_buf.b_lblkno = 0;
1264 nbp->vb_buf.b_blkno = nbn + btodb(off);
1265 nbp->vb_buf.b_rawblkno = nbp->vb_buf.b_blkno;
1266 nbp->vb_buf.b_iodone = sw_reg_iodone;
1267 nbp->vb_buf.b_vp = vp;
1268 if (vp->v_type == VBLK) {
1269 nbp->vb_buf.b_dev = vp->v_rdev;
1270 }
1271 LIST_INIT(&nbp->vb_buf.b_dep);
1272
1273 nbp->vb_xfer = vnx; /* patch it back in to vnx */
1274
1275 /*
1276 * Just sort by block number
1277 */
1278 s = splbio();
1279 if (vnx->vx_error != 0) {
1280 putvndbuf(nbp);
1281 goto out;
1282 }
1283 vnx->vx_pending++;
1284
1285 /* sort it in and start I/O if we are not over our limit */
1286 disksort_blkno(&sdp->swd_tab, &nbp->vb_buf);
1287 sw_reg_start(sdp);
1288 splx(s);
1289
1290 /*
1291 * advance to the next I/O
1292 */
1293 byteoff += sz;
1294 addr += sz;
1295 }
1296
1297 s = splbio();
1298
1299 out: /* Arrive here at splbio */
1300 vnx->vx_flags &= ~VX_BUSY;
1301 if (vnx->vx_pending == 0) {
1302 if (vnx->vx_error != 0) {
1303 bp->b_error = vnx->vx_error;
1304 bp->b_flags |= B_ERROR;
1305 }
1306 putvndxfer(vnx);
1307 biodone(bp);
1308 }
1309 splx(s);
1310 }
1311
1312 /*
1313 * sw_reg_start: start an I/O request on the requested swapdev
1314 *
1315 * => reqs are sorted by disksort (above)
1316 */
1317 static void
1318 sw_reg_start(sdp)
1319 struct swapdev *sdp;
1320 {
1321 struct buf *bp;
1322 UVMHIST_FUNC("sw_reg_start"); UVMHIST_CALLED(pdhist);
1323
1324 /* recursion control */
1325 if ((sdp->swd_flags & SWF_BUSY) != 0)
1326 return;
1327
1328 sdp->swd_flags |= SWF_BUSY;
1329
1330 while (sdp->swd_active < sdp->swd_maxactive) {
1331 bp = BUFQ_FIRST(&sdp->swd_tab);
1332 if (bp == NULL)
1333 break;
1334 BUFQ_REMOVE(&sdp->swd_tab, bp);
1335 sdp->swd_active++;
1336
1337 UVMHIST_LOG(pdhist,
1338 "sw_reg_start: bp %p vp %p blkno %p cnt %lx",
1339 bp, bp->b_vp, bp->b_blkno, bp->b_bcount);
1340 if ((bp->b_flags & B_READ) == 0)
1341 bp->b_vp->v_numoutput++;
1342
1343 VOP_STRATEGY(bp);
1344 }
1345 sdp->swd_flags &= ~SWF_BUSY;
1346 }
1347
1348 /*
1349 * sw_reg_iodone: one of our i/o's has completed and needs post-i/o cleanup
1350 *
1351 * => note that we can recover the vndbuf struct by casting the buf ptr
1352 */
1353 static void
1354 sw_reg_iodone(bp)
1355 struct buf *bp;
1356 {
1357 struct vndbuf *vbp = (struct vndbuf *) bp;
1358 struct vndxfer *vnx = vbp->vb_xfer;
1359 struct buf *pbp = vnx->vx_bp; /* parent buffer */
1360 struct swapdev *sdp = vnx->vx_sdp;
1361 int s, resid;
1362 UVMHIST_FUNC("sw_reg_iodone"); UVMHIST_CALLED(pdhist);
1363
1364 UVMHIST_LOG(pdhist, " vbp=%p vp=%p blkno=%x addr=%p",
1365 vbp, vbp->vb_buf.b_vp, vbp->vb_buf.b_blkno, vbp->vb_buf.b_data);
1366 UVMHIST_LOG(pdhist, " cnt=%lx resid=%lx",
1367 vbp->vb_buf.b_bcount, vbp->vb_buf.b_resid, 0, 0);
1368
1369 /*
1370 * protect vbp at splbio and update.
1371 */
1372
1373 s = splbio();
1374 resid = vbp->vb_buf.b_bcount - vbp->vb_buf.b_resid;
1375 pbp->b_resid -= resid;
1376 vnx->vx_pending--;
1377
1378 if (vbp->vb_buf.b_error) {
1379 UVMHIST_LOG(pdhist, " got error=%d !",
1380 vbp->vb_buf.b_error, 0, 0, 0);
1381
1382 /* pass error upward */
1383 vnx->vx_error = vbp->vb_buf.b_error;
1384 }
1385
1386 /*
1387 * kill vbp structure
1388 */
1389 putvndbuf(vbp);
1390
1391 /*
1392 * wrap up this transaction if it has run to completion or, in
1393 * case of an error, when all auxiliary buffers have returned.
1394 */
1395 if (vnx->vx_error != 0) {
1396 /* pass error upward */
1397 pbp->b_flags |= B_ERROR;
1398 pbp->b_error = vnx->vx_error;
1399 if ((vnx->vx_flags & VX_BUSY) == 0 && vnx->vx_pending == 0) {
1400 putvndxfer(vnx);
1401 biodone(pbp);
1402 }
1403 } else if (pbp->b_resid == 0) {
1404 KASSERT(vnx->vx_pending == 0);
1405 if ((vnx->vx_flags & VX_BUSY) == 0) {
1406 UVMHIST_LOG(pdhist, " iodone error=%d !",
1407 pbp, vnx->vx_error, 0, 0);
1408 putvndxfer(vnx);
1409 biodone(pbp);
1410 }
1411 }
1412
1413 /*
1414 * done! start next swapdev I/O if one is pending
1415 */
1416 sdp->swd_active--;
1417 sw_reg_start(sdp);
1418 splx(s);
1419 }
1420
1421
1422 /*
1423 * uvm_swap_alloc: allocate space on swap
1424 *
1425 * => allocation is done "round robin" down the priority list, as we
1426 * allocate in a priority we "rotate" the circle queue.
1427 * => space can be freed with uvm_swap_free
1428 * => we return the page slot number in /dev/drum (0 == invalid slot)
1429 * => we lock uvm.swap_data_lock
1430 * => XXXMRG: "LESSOK" INTERFACE NEEDED TO EXTENT SYSTEM
1431 */
1432 int
1433 uvm_swap_alloc(nslots, lessok)
1434 int *nslots; /* IN/OUT */
1435 boolean_t lessok;
1436 {
1437 struct swapdev *sdp;
1438 struct swappri *spp;
1439 u_long result;
1440 UVMHIST_FUNC("uvm_swap_alloc"); UVMHIST_CALLED(pdhist);
1441
1442 /*
1443 * no swap devices configured yet? definite failure.
1444 */
1445 if (uvmexp.nswapdev < 1)
1446 return 0;
1447
1448 /*
1449 * lock data lock, convert slots into blocks, and enter loop
1450 */
1451 simple_lock(&uvm.swap_data_lock);
1452
1453 ReTry: /* XXXMRG */
1454 LIST_FOREACH(spp, &swap_priority, spi_swappri) {
1455 CIRCLEQ_FOREACH(sdp, &spp->spi_swapdev, swd_next) {
1456 /* if it's not enabled, then we can't swap from it */
1457 if ((sdp->swd_flags & SWF_ENABLE) == 0)
1458 continue;
1459 if (sdp->swd_npginuse + *nslots > sdp->swd_npages)
1460 continue;
1461 if (extent_alloc(sdp->swd_ex, *nslots, EX_NOALIGN,
1462 EX_NOBOUNDARY, EX_MALLOCOK|EX_NOWAIT,
1463 &result) != 0) {
1464 continue;
1465 }
1466
1467 /*
1468 * successful allocation! now rotate the circleq.
1469 */
1470 CIRCLEQ_REMOVE(&spp->spi_swapdev, sdp, swd_next);
1471 CIRCLEQ_INSERT_TAIL(&spp->spi_swapdev, sdp, swd_next);
1472 sdp->swd_npginuse += *nslots;
1473 uvmexp.swpginuse += *nslots;
1474 simple_unlock(&uvm.swap_data_lock);
1475 /* done! return drum slot number */
1476 UVMHIST_LOG(pdhist,
1477 "success! returning %d slots starting at %d",
1478 *nslots, result + sdp->swd_drumoffset, 0, 0);
1479 return (result + sdp->swd_drumoffset);
1480 }
1481 }
1482
1483 /* XXXMRG: BEGIN HACK */
1484 if (*nslots > 1 && lessok) {
1485 *nslots = 1;
1486 goto ReTry; /* XXXMRG: ugh! extent should support this for us */
1487 }
1488 /* XXXMRG: END HACK */
1489
1490 simple_unlock(&uvm.swap_data_lock);
1491 return 0;
1492 }
1493
1494 /*
1495 * uvm_swap_markbad: keep track of swap ranges where we've had i/o errors
1496 *
1497 * => we lock uvm.swap_data_lock
1498 */
1499 void
1500 uvm_swap_markbad(startslot, nslots)
1501 int startslot;
1502 int nslots;
1503 {
1504 struct swapdev *sdp;
1505 UVMHIST_FUNC("uvm_swap_markbad"); UVMHIST_CALLED(pdhist);
1506
1507 simple_lock(&uvm.swap_data_lock);
1508 sdp = swapdrum_getsdp(startslot);
1509
1510 /*
1511 * we just keep track of how many pages have been marked bad
1512 * in this device, to make everything add up in swap_off().
1513 * we assume here that the range of slots will all be within
1514 * one swap device.
1515 */
1516
1517 sdp->swd_npgbad += nslots;
1518 UVMHIST_LOG(pdhist, "now %d bad", sdp->swd_npgbad, 0,0,0);
1519 simple_unlock(&uvm.swap_data_lock);
1520 }
1521
1522 /*
1523 * uvm_swap_free: free swap slots
1524 *
1525 * => this can be all or part of an allocation made by uvm_swap_alloc
1526 * => we lock uvm.swap_data_lock
1527 */
1528 void
1529 uvm_swap_free(startslot, nslots)
1530 int startslot;
1531 int nslots;
1532 {
1533 struct swapdev *sdp;
1534 UVMHIST_FUNC("uvm_swap_free"); UVMHIST_CALLED(pdhist);
1535
1536 UVMHIST_LOG(pdhist, "freeing %d slots starting at %d", nslots,
1537 startslot, 0, 0);
1538
1539 /*
1540 * ignore attempts to free the "bad" slot.
1541 */
1542
1543 if (startslot == SWSLOT_BAD) {
1544 return;
1545 }
1546
1547 /*
1548 * convert drum slot offset back to sdp, free the blocks
1549 * in the extent, and return. must hold pri lock to do
1550 * lookup and access the extent.
1551 */
1552
1553 simple_lock(&uvm.swap_data_lock);
1554 sdp = swapdrum_getsdp(startslot);
1555 KASSERT(uvmexp.nswapdev >= 1);
1556 KASSERT(sdp != NULL);
1557 KASSERT(sdp->swd_npginuse >= nslots);
1558 if (extent_free(sdp->swd_ex, startslot - sdp->swd_drumoffset, nslots,
1559 EX_MALLOCOK|EX_NOWAIT) != 0) {
1560 printf("warning: resource shortage: %d pages of swap lost\n",
1561 nslots);
1562 }
1563 sdp->swd_npginuse -= nslots;
1564 uvmexp.swpginuse -= nslots;
1565 simple_unlock(&uvm.swap_data_lock);
1566 }
1567
1568 /*
1569 * uvm_swap_put: put any number of pages into a contig place on swap
1570 *
1571 * => can be sync or async
1572 */
1573
1574 int
1575 uvm_swap_put(swslot, ppsp, npages, flags)
1576 int swslot;
1577 struct vm_page **ppsp;
1578 int npages;
1579 int flags;
1580 {
1581 int error;
1582
1583 error = uvm_swap_io(ppsp, swslot, npages, B_WRITE |
1584 ((flags & PGO_SYNCIO) ? 0 : B_ASYNC));
1585 return error;
1586 }
1587
1588 /*
1589 * uvm_swap_get: get a single page from swap
1590 *
1591 * => usually a sync op (from fault)
1592 */
1593
1594 int
1595 uvm_swap_get(page, swslot, flags)
1596 struct vm_page *page;
1597 int swslot, flags;
1598 {
1599 int error;
1600
1601 uvmexp.nswget++;
1602 KASSERT(flags & PGO_SYNCIO);
1603 if (swslot == SWSLOT_BAD) {
1604 return EIO;
1605 }
1606 error = uvm_swap_io(&page, swslot, 1, B_READ |
1607 ((flags & PGO_SYNCIO) ? 0 : B_ASYNC));
1608 if (error == 0) {
1609
1610 /*
1611 * this page is no longer only in swap.
1612 */
1613
1614 simple_lock(&uvm.swap_data_lock);
1615 KASSERT(uvmexp.swpgonly > 0);
1616 uvmexp.swpgonly--;
1617 simple_unlock(&uvm.swap_data_lock);
1618 }
1619 return error;
1620 }
1621
1622 /*
1623 * uvm_swap_io: do an i/o operation to swap
1624 */
1625
1626 static int
1627 uvm_swap_io(pps, startslot, npages, flags)
1628 struct vm_page **pps;
1629 int startslot, npages, flags;
1630 {
1631 daddr_t startblk;
1632 struct buf *bp;
1633 vaddr_t kva;
1634 int error, s, mapinflags;
1635 boolean_t write, async;
1636 UVMHIST_FUNC("uvm_swap_io"); UVMHIST_CALLED(pdhist);
1637
1638 UVMHIST_LOG(pdhist, "<- called, startslot=%d, npages=%d, flags=%d",
1639 startslot, npages, flags, 0);
1640
1641 write = (flags & B_READ) == 0;
1642 async = (flags & B_ASYNC) != 0;
1643
1644 /*
1645 * convert starting drum slot to block number
1646 */
1647
1648 startblk = btodb((u_int64_t)startslot << PAGE_SHIFT);
1649
1650 /*
1651 * first, map the pages into the kernel.
1652 */
1653
1654 mapinflags = !write ?
1655 UVMPAGER_MAPIN_WAITOK|UVMPAGER_MAPIN_READ :
1656 UVMPAGER_MAPIN_WAITOK|UVMPAGER_MAPIN_WRITE;
1657 kva = uvm_pagermapin(pps, npages, mapinflags);
1658
1659 /*
1660 * now allocate a buf for the i/o.
1661 */
1662
1663 s = splbio();
1664 bp = pool_get(&bufpool, PR_WAITOK);
1665 splx(s);
1666
1667 /*
1668 * fill in the bp/sbp. we currently route our i/o through
1669 * /dev/drum's vnode [swapdev_vp].
1670 */
1671
1672 bp->b_flags = B_BUSY | B_NOCACHE | (flags & (B_READ|B_ASYNC));
1673 bp->b_proc = &proc0; /* XXX */
1674 bp->b_vnbufs.le_next = NOLIST;
1675 bp->b_data = (caddr_t)kva;
1676 bp->b_blkno = startblk;
1677 bp->b_vp = swapdev_vp;
1678 bp->b_dev = swapdev_vp->v_rdev;
1679 bp->b_bufsize = bp->b_bcount = npages << PAGE_SHIFT;
1680 LIST_INIT(&bp->b_dep);
1681
1682 /*
1683 * bump v_numoutput (counter of number of active outputs).
1684 */
1685
1686 if (write) {
1687 s = splbio();
1688 swapdev_vp->v_numoutput++;
1689 splx(s);
1690 }
1691
1692 /*
1693 * for async ops we must set up the iodone handler.
1694 */
1695
1696 if (async) {
1697 bp->b_flags |= B_CALL;
1698 bp->b_iodone = uvm_aio_biodone;
1699 UVMHIST_LOG(pdhist, "doing async!", 0, 0, 0, 0);
1700 }
1701 UVMHIST_LOG(pdhist,
1702 "about to start io: data = %p blkno = 0x%x, bcount = %ld",
1703 bp->b_data, bp->b_blkno, bp->b_bcount, 0);
1704
1705 /*
1706 * now we start the I/O, and if async, return.
1707 */
1708
1709 VOP_STRATEGY(bp);
1710 if (async)
1711 return 0;
1712
1713 /*
1714 * must be sync i/o. wait for it to finish
1715 */
1716
1717 error = biowait(bp);
1718
1719 /*
1720 * kill the pager mapping
1721 */
1722
1723 uvm_pagermapout(kva, npages);
1724
1725 /*
1726 * now dispose of the buf and we're done.
1727 */
1728
1729 s = splbio();
1730 if (write)
1731 vwakeup(bp);
1732 pool_put(&bufpool, bp);
1733 splx(s);
1734 UVMHIST_LOG(pdhist, "<- done (sync) error=%d", error, 0, 0, 0);
1735 return (error);
1736 }
1737