uvm_swap.c revision 1.94 1 1.94 thorpej /* $NetBSD: uvm_swap.c,v 1.94 2005/06/27 02:29:32 thorpej Exp $ */
2 1.1 mrg
3 1.1 mrg /*
4 1.1 mrg * Copyright (c) 1995, 1996, 1997 Matthew R. Green
5 1.1 mrg * All rights reserved.
6 1.1 mrg *
7 1.1 mrg * Redistribution and use in source and binary forms, with or without
8 1.1 mrg * modification, are permitted provided that the following conditions
9 1.1 mrg * are met:
10 1.1 mrg * 1. Redistributions of source code must retain the above copyright
11 1.1 mrg * notice, this list of conditions and the following disclaimer.
12 1.1 mrg * 2. Redistributions in binary form must reproduce the above copyright
13 1.1 mrg * notice, this list of conditions and the following disclaimer in the
14 1.1 mrg * documentation and/or other materials provided with the distribution.
15 1.1 mrg * 3. The name of the author may not be used to endorse or promote products
16 1.1 mrg * derived from this software without specific prior written permission.
17 1.1 mrg *
18 1.1 mrg * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 1.1 mrg * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 1.1 mrg * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 1.1 mrg * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 1.1 mrg * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 1.1 mrg * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 1.1 mrg * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
25 1.1 mrg * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26 1.1 mrg * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 1.1 mrg * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 1.1 mrg * SUCH DAMAGE.
29 1.3 mrg *
30 1.3 mrg * from: NetBSD: vm_swap.c,v 1.52 1997/12/02 13:47:37 pk Exp
31 1.3 mrg * from: Id: uvm_swap.c,v 1.1.2.42 1998/02/02 20:38:06 chuck Exp
32 1.1 mrg */
33 1.57 lukem
34 1.57 lukem #include <sys/cdefs.h>
35 1.94 thorpej __KERNEL_RCSID(0, "$NetBSD: uvm_swap.c,v 1.94 2005/06/27 02:29:32 thorpej Exp $");
36 1.5 mrg
37 1.6 thorpej #include "fs_nfs.h"
38 1.5 mrg #include "opt_uvmhist.h"
39 1.16 mrg #include "opt_compat_netbsd.h"
40 1.41 chs #include "opt_ddb.h"
41 1.1 mrg
42 1.1 mrg #include <sys/param.h>
43 1.1 mrg #include <sys/systm.h>
44 1.1 mrg #include <sys/buf.h>
45 1.89 yamt #include <sys/bufq.h>
46 1.36 mrg #include <sys/conf.h>
47 1.1 mrg #include <sys/proc.h>
48 1.1 mrg #include <sys/namei.h>
49 1.1 mrg #include <sys/disklabel.h>
50 1.1 mrg #include <sys/errno.h>
51 1.1 mrg #include <sys/kernel.h>
52 1.1 mrg #include <sys/malloc.h>
53 1.1 mrg #include <sys/vnode.h>
54 1.1 mrg #include <sys/file.h>
55 1.1 mrg #include <sys/extent.h>
56 1.90 yamt #include <sys/blist.h>
57 1.1 mrg #include <sys/mount.h>
58 1.12 pk #include <sys/pool.h>
59 1.74 thorpej #include <sys/sa.h>
60 1.1 mrg #include <sys/syscallargs.h>
61 1.17 mrg #include <sys/swap.h>
62 1.1 mrg
63 1.1 mrg #include <uvm/uvm.h>
64 1.1 mrg
65 1.1 mrg #include <miscfs/specfs/specdev.h>
66 1.1 mrg
67 1.1 mrg /*
68 1.1 mrg * uvm_swap.c: manage configuration and i/o to swap space.
69 1.1 mrg */
70 1.1 mrg
71 1.1 mrg /*
72 1.1 mrg * swap space is managed in the following way:
73 1.51 chs *
74 1.1 mrg * each swap partition or file is described by a "swapdev" structure.
75 1.1 mrg * each "swapdev" structure contains a "swapent" structure which contains
76 1.1 mrg * information that is passed up to the user (via system calls).
77 1.1 mrg *
78 1.1 mrg * each swap partition is assigned a "priority" (int) which controls
79 1.1 mrg * swap parition usage.
80 1.1 mrg *
81 1.1 mrg * the system maintains a global data structure describing all swap
82 1.1 mrg * partitions/files. there is a sorted LIST of "swappri" structures
83 1.1 mrg * which describe "swapdev"'s at that priority. this LIST is headed
84 1.51 chs * by the "swap_priority" global var. each "swappri" contains a
85 1.1 mrg * CIRCLEQ of "swapdev" structures at that priority.
86 1.1 mrg *
87 1.1 mrg * locking:
88 1.1 mrg * - swap_syscall_lock (sleep lock): this lock serializes the swapctl
89 1.1 mrg * system call and prevents the swap priority list from changing
90 1.1 mrg * while we are in the middle of a system call (e.g. SWAP_STATS).
91 1.26 chs * - uvm.swap_data_lock (simple_lock): this lock protects all swap data
92 1.1 mrg * structures including the priority list, the swapdev structures,
93 1.1 mrg * and the swapmap extent.
94 1.1 mrg *
95 1.1 mrg * each swap device has the following info:
96 1.1 mrg * - swap device in use (could be disabled, preventing future use)
97 1.1 mrg * - swap enabled (allows new allocations on swap)
98 1.1 mrg * - map info in /dev/drum
99 1.1 mrg * - vnode pointer
100 1.1 mrg * for swap files only:
101 1.1 mrg * - block size
102 1.1 mrg * - max byte count in buffer
103 1.1 mrg * - buffer
104 1.1 mrg *
105 1.1 mrg * userland controls and configures swap with the swapctl(2) system call.
106 1.1 mrg * the sys_swapctl performs the following operations:
107 1.1 mrg * [1] SWAP_NSWAP: returns the number of swap devices currently configured
108 1.51 chs * [2] SWAP_STATS: given a pointer to an array of swapent structures
109 1.1 mrg * (passed in via "arg") of a size passed in via "misc" ... we load
110 1.85 junyoung * the current swap config into the array. The actual work is done
111 1.63 manu * in the uvm_swap_stats(9) function.
112 1.1 mrg * [3] SWAP_ON: given a pathname in arg (could be device or file) and a
113 1.1 mrg * priority in "misc", start swapping on it.
114 1.1 mrg * [4] SWAP_OFF: as SWAP_ON, but stops swapping to a device
115 1.1 mrg * [5] SWAP_CTL: changes the priority of a swap device (new priority in
116 1.1 mrg * "misc")
117 1.1 mrg */
118 1.1 mrg
119 1.1 mrg /*
120 1.1 mrg * swapdev: describes a single swap partition/file
121 1.1 mrg *
122 1.1 mrg * note the following should be true:
123 1.1 mrg * swd_inuse <= swd_nblks [number of blocks in use is <= total blocks]
124 1.1 mrg * swd_nblks <= swd_mapsize [because mapsize includes miniroot+disklabel]
125 1.1 mrg */
126 1.1 mrg struct swapdev {
127 1.16 mrg struct oswapent swd_ose;
128 1.16 mrg #define swd_dev swd_ose.ose_dev /* device id */
129 1.16 mrg #define swd_flags swd_ose.ose_flags /* flags:inuse/enable/fake */
130 1.16 mrg #define swd_priority swd_ose.ose_priority /* our priority */
131 1.16 mrg /* also: swd_ose.ose_nblks, swd_ose.ose_inuse */
132 1.16 mrg char *swd_path; /* saved pathname of device */
133 1.16 mrg int swd_pathlen; /* length of pathname */
134 1.16 mrg int swd_npages; /* #pages we can use */
135 1.16 mrg int swd_npginuse; /* #pages in use */
136 1.32 chs int swd_npgbad; /* #pages bad */
137 1.16 mrg int swd_drumoffset; /* page0 offset in drum */
138 1.16 mrg int swd_drumsize; /* #pages in drum */
139 1.90 yamt blist_t swd_blist; /* blist for this swapdev */
140 1.16 mrg struct vnode *swd_vp; /* backing vnode */
141 1.16 mrg CIRCLEQ_ENTRY(swapdev) swd_next; /* priority circleq */
142 1.1 mrg
143 1.16 mrg int swd_bsize; /* blocksize (bytes) */
144 1.16 mrg int swd_maxactive; /* max active i/o reqs */
145 1.65 hannken struct bufq_state swd_tab; /* buffer list */
146 1.33 thorpej int swd_active; /* number of active buffers */
147 1.1 mrg };
148 1.1 mrg
149 1.1 mrg /*
150 1.1 mrg * swap device priority entry; the list is kept sorted on `spi_priority'.
151 1.1 mrg */
152 1.1 mrg struct swappri {
153 1.1 mrg int spi_priority; /* priority */
154 1.1 mrg CIRCLEQ_HEAD(spi_swapdev, swapdev) spi_swapdev;
155 1.1 mrg /* circleq of swapdevs at this priority */
156 1.1 mrg LIST_ENTRY(swappri) spi_swappri; /* global list of pri's */
157 1.1 mrg };
158 1.1 mrg
159 1.1 mrg /*
160 1.1 mrg * The following two structures are used to keep track of data transfers
161 1.1 mrg * on swap devices associated with regular files.
162 1.1 mrg * NOTE: this code is more or less a copy of vnd.c; we use the same
163 1.1 mrg * structure names here to ease porting..
164 1.1 mrg */
165 1.1 mrg struct vndxfer {
166 1.1 mrg struct buf *vx_bp; /* Pointer to parent buffer */
167 1.1 mrg struct swapdev *vx_sdp;
168 1.1 mrg int vx_error;
169 1.1 mrg int vx_pending; /* # of pending aux buffers */
170 1.1 mrg int vx_flags;
171 1.1 mrg #define VX_BUSY 1
172 1.1 mrg #define VX_DEAD 2
173 1.1 mrg };
174 1.1 mrg
175 1.1 mrg struct vndbuf {
176 1.1 mrg struct buf vb_buf;
177 1.1 mrg struct vndxfer *vb_xfer;
178 1.1 mrg };
179 1.1 mrg
180 1.12 pk
181 1.1 mrg /*
182 1.12 pk * We keep a of pool vndbuf's and vndxfer structures.
183 1.1 mrg */
184 1.87 simonb POOL_INIT(vndxfer_pool, sizeof(struct vndxfer), 0, 0, 0, "swp vnx", NULL);
185 1.87 simonb POOL_INIT(vndbuf_pool, sizeof(struct vndbuf), 0, 0, 0, "swp vnd", NULL);
186 1.1 mrg
187 1.12 pk #define getvndxfer(vnx) do { \
188 1.92 christos int sp = splbio(); \
189 1.60 thorpej vnx = pool_get(&vndxfer_pool, PR_WAITOK); \
190 1.92 christos splx(sp); \
191 1.73 perry } while (/*CONSTCOND*/ 0)
192 1.12 pk
193 1.12 pk #define putvndxfer(vnx) { \
194 1.49 thorpej pool_put(&vndxfer_pool, (void *)(vnx)); \
195 1.12 pk }
196 1.12 pk
197 1.12 pk #define getvndbuf(vbp) do { \
198 1.92 christos int sp = splbio(); \
199 1.60 thorpej vbp = pool_get(&vndbuf_pool, PR_WAITOK); \
200 1.92 christos splx(sp); \
201 1.73 perry } while (/*CONSTCOND*/ 0)
202 1.1 mrg
203 1.12 pk #define putvndbuf(vbp) { \
204 1.49 thorpej pool_put(&vndbuf_pool, (void *)(vbp)); \
205 1.12 pk }
206 1.1 mrg
207 1.1 mrg /*
208 1.1 mrg * local variables
209 1.1 mrg */
210 1.1 mrg static struct extent *swapmap; /* controls the mapping of /dev/drum */
211 1.75 thorpej
212 1.75 thorpej MALLOC_DEFINE(M_VMSWAP, "VM swap", "VM swap structures");
213 1.1 mrg
214 1.1 mrg /* list of all active swap devices [by priority] */
215 1.1 mrg LIST_HEAD(swap_priority, swappri);
216 1.1 mrg static struct swap_priority swap_priority;
217 1.1 mrg
218 1.1 mrg /* locks */
219 1.52 chs struct lock swap_syscall_lock;
220 1.1 mrg
221 1.1 mrg /*
222 1.1 mrg * prototypes
223 1.1 mrg */
224 1.85 junyoung static struct swapdev *swapdrum_getsdp(int);
225 1.1 mrg
226 1.85 junyoung static struct swapdev *swaplist_find(struct vnode *, int);
227 1.85 junyoung static void swaplist_insert(struct swapdev *,
228 1.85 junyoung struct swappri *, int);
229 1.85 junyoung static void swaplist_trim(void);
230 1.1 mrg
231 1.85 junyoung static int swap_on(struct proc *, struct swapdev *);
232 1.85 junyoung static int swap_off(struct proc *, struct swapdev *);
233 1.1 mrg
234 1.85 junyoung static void sw_reg_strategy(struct swapdev *, struct buf *, int);
235 1.85 junyoung static void sw_reg_iodone(struct buf *);
236 1.85 junyoung static void sw_reg_start(struct swapdev *);
237 1.1 mrg
238 1.85 junyoung static int uvm_swap_io(struct vm_page **, int, int, int);
239 1.1 mrg
240 1.1 mrg /*
241 1.1 mrg * uvm_swap_init: init the swap system data structures and locks
242 1.1 mrg *
243 1.51 chs * => called at boot time from init_main.c after the filesystems
244 1.1 mrg * are brought up (which happens after uvm_init())
245 1.1 mrg */
246 1.1 mrg void
247 1.93 thorpej uvm_swap_init(void)
248 1.1 mrg {
249 1.1 mrg UVMHIST_FUNC("uvm_swap_init");
250 1.1 mrg
251 1.1 mrg UVMHIST_CALLED(pdhist);
252 1.1 mrg /*
253 1.1 mrg * first, init the swap list, its counter, and its lock.
254 1.1 mrg * then get a handle on the vnode for /dev/drum by using
255 1.1 mrg * the its dev_t number ("swapdev", from MD conf.c).
256 1.1 mrg */
257 1.1 mrg
258 1.1 mrg LIST_INIT(&swap_priority);
259 1.1 mrg uvmexp.nswapdev = 0;
260 1.1 mrg lockinit(&swap_syscall_lock, PVM, "swapsys", 0, 0);
261 1.26 chs simple_lock_init(&uvm.swap_data_lock);
262 1.12 pk
263 1.1 mrg if (bdevvp(swapdev, &swapdev_vp))
264 1.1 mrg panic("uvm_swap_init: can't get vnode for swap device");
265 1.1 mrg
266 1.1 mrg /*
267 1.1 mrg * create swap block resource map to map /dev/drum. the range
268 1.1 mrg * from 1 to INT_MAX allows 2 gigablocks of swap space. note
269 1.51 chs * that block 0 is reserved (used to indicate an allocation
270 1.1 mrg * failure, or no allocation).
271 1.1 mrg */
272 1.1 mrg swapmap = extent_create("swapmap", 1, INT_MAX,
273 1.1 mrg M_VMSWAP, 0, 0, EX_NOWAIT);
274 1.1 mrg if (swapmap == 0)
275 1.1 mrg panic("uvm_swap_init: extent_create failed");
276 1.1 mrg
277 1.1 mrg /*
278 1.1 mrg * done!
279 1.1 mrg */
280 1.1 mrg UVMHIST_LOG(pdhist, "<- done", 0, 0, 0, 0);
281 1.1 mrg }
282 1.1 mrg
283 1.1 mrg /*
284 1.1 mrg * swaplist functions: functions that operate on the list of swap
285 1.1 mrg * devices on the system.
286 1.1 mrg */
287 1.1 mrg
288 1.1 mrg /*
289 1.1 mrg * swaplist_insert: insert swap device "sdp" into the global list
290 1.1 mrg *
291 1.26 chs * => caller must hold both swap_syscall_lock and uvm.swap_data_lock
292 1.1 mrg * => caller must provide a newly malloc'd swappri structure (we will
293 1.1 mrg * FREE it if we don't need it... this it to prevent malloc blocking
294 1.1 mrg * here while adding swap)
295 1.1 mrg */
296 1.1 mrg static void
297 1.93 thorpej swaplist_insert(struct swapdev *sdp, struct swappri *newspp, int priority)
298 1.1 mrg {
299 1.1 mrg struct swappri *spp, *pspp;
300 1.1 mrg UVMHIST_FUNC("swaplist_insert"); UVMHIST_CALLED(pdhist);
301 1.1 mrg
302 1.1 mrg /*
303 1.1 mrg * find entry at or after which to insert the new device.
304 1.1 mrg */
305 1.55 chs pspp = NULL;
306 1.55 chs LIST_FOREACH(spp, &swap_priority, spi_swappri) {
307 1.1 mrg if (priority <= spp->spi_priority)
308 1.1 mrg break;
309 1.1 mrg pspp = spp;
310 1.1 mrg }
311 1.1 mrg
312 1.1 mrg /*
313 1.1 mrg * new priority?
314 1.1 mrg */
315 1.1 mrg if (spp == NULL || spp->spi_priority != priority) {
316 1.1 mrg spp = newspp; /* use newspp! */
317 1.32 chs UVMHIST_LOG(pdhist, "created new swappri = %d",
318 1.32 chs priority, 0, 0, 0);
319 1.1 mrg
320 1.1 mrg spp->spi_priority = priority;
321 1.1 mrg CIRCLEQ_INIT(&spp->spi_swapdev);
322 1.1 mrg
323 1.1 mrg if (pspp)
324 1.1 mrg LIST_INSERT_AFTER(pspp, spp, spi_swappri);
325 1.1 mrg else
326 1.1 mrg LIST_INSERT_HEAD(&swap_priority, spp, spi_swappri);
327 1.1 mrg } else {
328 1.1 mrg /* we don't need a new priority structure, free it */
329 1.1 mrg FREE(newspp, M_VMSWAP);
330 1.1 mrg }
331 1.1 mrg
332 1.1 mrg /*
333 1.1 mrg * priority found (or created). now insert on the priority's
334 1.1 mrg * circleq list and bump the total number of swapdevs.
335 1.1 mrg */
336 1.1 mrg sdp->swd_priority = priority;
337 1.1 mrg CIRCLEQ_INSERT_TAIL(&spp->spi_swapdev, sdp, swd_next);
338 1.1 mrg uvmexp.nswapdev++;
339 1.1 mrg }
340 1.1 mrg
341 1.1 mrg /*
342 1.1 mrg * swaplist_find: find and optionally remove a swap device from the
343 1.1 mrg * global list.
344 1.1 mrg *
345 1.26 chs * => caller must hold both swap_syscall_lock and uvm.swap_data_lock
346 1.1 mrg * => we return the swapdev we found (and removed)
347 1.1 mrg */
348 1.1 mrg static struct swapdev *
349 1.93 thorpej swaplist_find(struct vnode *vp, boolean_t remove)
350 1.1 mrg {
351 1.1 mrg struct swapdev *sdp;
352 1.1 mrg struct swappri *spp;
353 1.1 mrg
354 1.1 mrg /*
355 1.1 mrg * search the lists for the requested vp
356 1.1 mrg */
357 1.55 chs
358 1.55 chs LIST_FOREACH(spp, &swap_priority, spi_swappri) {
359 1.55 chs CIRCLEQ_FOREACH(sdp, &spp->spi_swapdev, swd_next) {
360 1.1 mrg if (sdp->swd_vp == vp) {
361 1.1 mrg if (remove) {
362 1.1 mrg CIRCLEQ_REMOVE(&spp->spi_swapdev,
363 1.1 mrg sdp, swd_next);
364 1.1 mrg uvmexp.nswapdev--;
365 1.1 mrg }
366 1.1 mrg return(sdp);
367 1.1 mrg }
368 1.55 chs }
369 1.1 mrg }
370 1.1 mrg return (NULL);
371 1.1 mrg }
372 1.1 mrg
373 1.1 mrg
374 1.1 mrg /*
375 1.1 mrg * swaplist_trim: scan priority list for empty priority entries and kill
376 1.1 mrg * them.
377 1.1 mrg *
378 1.26 chs * => caller must hold both swap_syscall_lock and uvm.swap_data_lock
379 1.1 mrg */
380 1.1 mrg static void
381 1.93 thorpej swaplist_trim(void)
382 1.1 mrg {
383 1.1 mrg struct swappri *spp, *nextspp;
384 1.1 mrg
385 1.32 chs for (spp = LIST_FIRST(&swap_priority); spp != NULL; spp = nextspp) {
386 1.32 chs nextspp = LIST_NEXT(spp, spi_swappri);
387 1.32 chs if (CIRCLEQ_FIRST(&spp->spi_swapdev) !=
388 1.32 chs (void *)&spp->spi_swapdev)
389 1.1 mrg continue;
390 1.1 mrg LIST_REMOVE(spp, spi_swappri);
391 1.32 chs free(spp, M_VMSWAP);
392 1.1 mrg }
393 1.1 mrg }
394 1.1 mrg
395 1.1 mrg /*
396 1.1 mrg * swapdrum_getsdp: given a page offset in /dev/drum, convert it back
397 1.1 mrg * to the "swapdev" that maps that section of the drum.
398 1.1 mrg *
399 1.1 mrg * => each swapdev takes one big contig chunk of the drum
400 1.26 chs * => caller must hold uvm.swap_data_lock
401 1.1 mrg */
402 1.1 mrg static struct swapdev *
403 1.93 thorpej swapdrum_getsdp(int pgno)
404 1.1 mrg {
405 1.1 mrg struct swapdev *sdp;
406 1.1 mrg struct swappri *spp;
407 1.51 chs
408 1.55 chs LIST_FOREACH(spp, &swap_priority, spi_swappri) {
409 1.55 chs CIRCLEQ_FOREACH(sdp, &spp->spi_swapdev, swd_next) {
410 1.48 fvdl if (sdp->swd_flags & SWF_FAKE)
411 1.48 fvdl continue;
412 1.1 mrg if (pgno >= sdp->swd_drumoffset &&
413 1.1 mrg pgno < (sdp->swd_drumoffset + sdp->swd_drumsize)) {
414 1.1 mrg return sdp;
415 1.1 mrg }
416 1.48 fvdl }
417 1.55 chs }
418 1.1 mrg return NULL;
419 1.1 mrg }
420 1.1 mrg
421 1.1 mrg
422 1.1 mrg /*
423 1.1 mrg * sys_swapctl: main entry point for swapctl(2) system call
424 1.1 mrg * [with two helper functions: swap_on and swap_off]
425 1.1 mrg */
426 1.1 mrg int
427 1.93 thorpej sys_swapctl(struct lwp *l, void *v, register_t *retval)
428 1.1 mrg {
429 1.1 mrg struct sys_swapctl_args /* {
430 1.1 mrg syscallarg(int) cmd;
431 1.1 mrg syscallarg(void *) arg;
432 1.1 mrg syscallarg(int) misc;
433 1.1 mrg } */ *uap = (struct sys_swapctl_args *)v;
434 1.74 thorpej struct proc *p = l->l_proc;
435 1.1 mrg struct vnode *vp;
436 1.1 mrg struct nameidata nd;
437 1.1 mrg struct swappri *spp;
438 1.1 mrg struct swapdev *sdp;
439 1.1 mrg struct swapent *sep;
440 1.16 mrg char userpath[PATH_MAX + 1];
441 1.18 enami size_t len;
442 1.61 manu int error, misc;
443 1.1 mrg int priority;
444 1.1 mrg UVMHIST_FUNC("sys_swapctl"); UVMHIST_CALLED(pdhist);
445 1.1 mrg
446 1.1 mrg misc = SCARG(uap, misc);
447 1.1 mrg
448 1.1 mrg /*
449 1.1 mrg * ensure serialized syscall access by grabbing the swap_syscall_lock
450 1.1 mrg */
451 1.32 chs lockmgr(&swap_syscall_lock, LK_EXCLUSIVE, NULL);
452 1.24 mrg
453 1.1 mrg /*
454 1.1 mrg * we handle the non-priv NSWAP and STATS request first.
455 1.1 mrg *
456 1.51 chs * SWAP_NSWAP: return number of config'd swap devices
457 1.1 mrg * [can also be obtained with uvmexp sysctl]
458 1.1 mrg */
459 1.1 mrg if (SCARG(uap, cmd) == SWAP_NSWAP) {
460 1.8 mrg UVMHIST_LOG(pdhist, "<- done SWAP_NSWAP=%d", uvmexp.nswapdev,
461 1.8 mrg 0, 0, 0);
462 1.1 mrg *retval = uvmexp.nswapdev;
463 1.16 mrg error = 0;
464 1.16 mrg goto out;
465 1.1 mrg }
466 1.1 mrg
467 1.1 mrg /*
468 1.1 mrg * SWAP_STATS: get stats on current # of configured swap devs
469 1.1 mrg *
470 1.51 chs * note that the swap_priority list can't change as long
471 1.1 mrg * as we are holding the swap_syscall_lock. we don't want
472 1.51 chs * to grab the uvm.swap_data_lock because we may fault&sleep during
473 1.1 mrg * copyout() and we don't want to be holding that lock then!
474 1.1 mrg */
475 1.16 mrg if (SCARG(uap, cmd) == SWAP_STATS
476 1.16 mrg #if defined(COMPAT_13)
477 1.16 mrg || SCARG(uap, cmd) == SWAP_OSTATS
478 1.16 mrg #endif
479 1.16 mrg ) {
480 1.88 christos if ((size_t)misc > (size_t)uvmexp.nswapdev)
481 1.88 christos misc = uvmexp.nswapdev;
482 1.16 mrg #if defined(COMPAT_13)
483 1.61 manu if (SCARG(uap, cmd) == SWAP_OSTATS)
484 1.61 manu len = sizeof(struct oswapent) * misc;
485 1.62 manu else
486 1.16 mrg #endif
487 1.62 manu len = sizeof(struct swapent) * misc;
488 1.62 manu sep = (struct swapent *)malloc(len, M_TEMP, M_WAITOK);
489 1.62 manu
490 1.62 manu uvm_swap_stats(SCARG(uap, cmd), sep, misc, retval);
491 1.92 christos error = copyout(sep, SCARG(uap, arg), len);
492 1.1 mrg
493 1.61 manu free(sep, M_TEMP);
494 1.16 mrg UVMHIST_LOG(pdhist, "<- done SWAP_STATS", 0, 0, 0, 0);
495 1.16 mrg goto out;
496 1.51 chs }
497 1.55 chs if (SCARG(uap, cmd) == SWAP_GETDUMPDEV) {
498 1.55 chs dev_t *devp = (dev_t *)SCARG(uap, arg);
499 1.55 chs
500 1.55 chs error = copyout(&dumpdev, devp, sizeof(dumpdev));
501 1.55 chs goto out;
502 1.55 chs }
503 1.1 mrg
504 1.1 mrg /*
505 1.1 mrg * all other requests require superuser privs. verify.
506 1.1 mrg */
507 1.16 mrg if ((error = suser(p->p_ucred, &p->p_acflag)))
508 1.16 mrg goto out;
509 1.1 mrg
510 1.1 mrg /*
511 1.1 mrg * at this point we expect a path name in arg. we will
512 1.1 mrg * use namei() to gain a vnode reference (vref), and lock
513 1.1 mrg * the vnode (VOP_LOCK).
514 1.1 mrg *
515 1.1 mrg * XXX: a NULL arg means use the root vnode pointer (e.g. for
516 1.16 mrg * miniroot)
517 1.1 mrg */
518 1.1 mrg if (SCARG(uap, arg) == NULL) {
519 1.1 mrg vp = rootvp; /* miniroot */
520 1.79 thorpej if (vget(vp, LK_EXCLUSIVE)) {
521 1.16 mrg error = EBUSY;
522 1.16 mrg goto out;
523 1.1 mrg }
524 1.16 mrg if (SCARG(uap, cmd) == SWAP_ON &&
525 1.16 mrg copystr("miniroot", userpath, sizeof userpath, &len))
526 1.16 mrg panic("swapctl: miniroot copy failed");
527 1.1 mrg } else {
528 1.16 mrg int space;
529 1.16 mrg char *where;
530 1.16 mrg
531 1.16 mrg if (SCARG(uap, cmd) == SWAP_ON) {
532 1.16 mrg if ((error = copyinstr(SCARG(uap, arg), userpath,
533 1.16 mrg sizeof userpath, &len)))
534 1.16 mrg goto out;
535 1.16 mrg space = UIO_SYSSPACE;
536 1.16 mrg where = userpath;
537 1.16 mrg } else {
538 1.16 mrg space = UIO_USERSPACE;
539 1.16 mrg where = (char *)SCARG(uap, arg);
540 1.1 mrg }
541 1.80 fvdl NDINIT(&nd, LOOKUP, FOLLOW|LOCKLEAF, space, where, p);
542 1.16 mrg if ((error = namei(&nd)))
543 1.16 mrg goto out;
544 1.1 mrg vp = nd.ni_vp;
545 1.1 mrg }
546 1.1 mrg /* note: "vp" is referenced and locked */
547 1.1 mrg
548 1.1 mrg error = 0; /* assume no error */
549 1.1 mrg switch(SCARG(uap, cmd)) {
550 1.40 mrg
551 1.24 mrg case SWAP_DUMPDEV:
552 1.24 mrg if (vp->v_type != VBLK) {
553 1.24 mrg error = ENOTBLK;
554 1.45 pk break;
555 1.24 mrg }
556 1.24 mrg dumpdev = vp->v_rdev;
557 1.68 drochner cpu_dumpconf();
558 1.24 mrg break;
559 1.24 mrg
560 1.1 mrg case SWAP_CTL:
561 1.1 mrg /*
562 1.1 mrg * get new priority, remove old entry (if any) and then
563 1.1 mrg * reinsert it in the correct place. finally, prune out
564 1.1 mrg * any empty priority structures.
565 1.1 mrg */
566 1.1 mrg priority = SCARG(uap, misc);
567 1.32 chs spp = malloc(sizeof *spp, M_VMSWAP, M_WAITOK);
568 1.26 chs simple_lock(&uvm.swap_data_lock);
569 1.1 mrg if ((sdp = swaplist_find(vp, 1)) == NULL) {
570 1.1 mrg error = ENOENT;
571 1.1 mrg } else {
572 1.1 mrg swaplist_insert(sdp, spp, priority);
573 1.1 mrg swaplist_trim();
574 1.1 mrg }
575 1.26 chs simple_unlock(&uvm.swap_data_lock);
576 1.1 mrg if (error)
577 1.1 mrg free(spp, M_VMSWAP);
578 1.1 mrg break;
579 1.1 mrg
580 1.1 mrg case SWAP_ON:
581 1.32 chs
582 1.1 mrg /*
583 1.1 mrg * check for duplicates. if none found, then insert a
584 1.1 mrg * dummy entry on the list to prevent someone else from
585 1.1 mrg * trying to enable this device while we are working on
586 1.1 mrg * it.
587 1.1 mrg */
588 1.32 chs
589 1.1 mrg priority = SCARG(uap, misc);
590 1.48 fvdl sdp = malloc(sizeof *sdp, M_VMSWAP, M_WAITOK);
591 1.48 fvdl spp = malloc(sizeof *spp, M_VMSWAP, M_WAITOK);
592 1.67 chs memset(sdp, 0, sizeof(*sdp));
593 1.67 chs sdp->swd_flags = SWF_FAKE;
594 1.67 chs sdp->swd_vp = vp;
595 1.67 chs sdp->swd_dev = (vp->v_type == VBLK) ? vp->v_rdev : NODEV;
596 1.66 hannken bufq_alloc(&sdp->swd_tab, BUFQ_DISKSORT|BUFQ_SORT_RAWBLOCK);
597 1.26 chs simple_lock(&uvm.swap_data_lock);
598 1.48 fvdl if (swaplist_find(vp, 0) != NULL) {
599 1.1 mrg error = EBUSY;
600 1.26 chs simple_unlock(&uvm.swap_data_lock);
601 1.66 hannken bufq_free(&sdp->swd_tab);
602 1.48 fvdl free(sdp, M_VMSWAP);
603 1.48 fvdl free(spp, M_VMSWAP);
604 1.16 mrg break;
605 1.1 mrg }
606 1.1 mrg swaplist_insert(sdp, spp, priority);
607 1.26 chs simple_unlock(&uvm.swap_data_lock);
608 1.1 mrg
609 1.16 mrg sdp->swd_pathlen = len;
610 1.16 mrg sdp->swd_path = malloc(sdp->swd_pathlen, M_VMSWAP, M_WAITOK);
611 1.19 pk if (copystr(userpath, sdp->swd_path, sdp->swd_pathlen, 0) != 0)
612 1.19 pk panic("swapctl: copystr");
613 1.32 chs
614 1.1 mrg /*
615 1.1 mrg * we've now got a FAKE placeholder in the swap list.
616 1.1 mrg * now attempt to enable swap on it. if we fail, undo
617 1.1 mrg * what we've done and kill the fake entry we just inserted.
618 1.1 mrg * if swap_on is a success, it will clear the SWF_FAKE flag
619 1.1 mrg */
620 1.32 chs
621 1.80 fvdl if ((error = swap_on(p, sdp)) != 0) {
622 1.26 chs simple_lock(&uvm.swap_data_lock);
623 1.8 mrg (void) swaplist_find(vp, 1); /* kill fake entry */
624 1.1 mrg swaplist_trim();
625 1.26 chs simple_unlock(&uvm.swap_data_lock);
626 1.66 hannken bufq_free(&sdp->swd_tab);
627 1.19 pk free(sdp->swd_path, M_VMSWAP);
628 1.32 chs free(sdp, M_VMSWAP);
629 1.1 mrg break;
630 1.1 mrg }
631 1.1 mrg break;
632 1.1 mrg
633 1.1 mrg case SWAP_OFF:
634 1.26 chs simple_lock(&uvm.swap_data_lock);
635 1.1 mrg if ((sdp = swaplist_find(vp, 0)) == NULL) {
636 1.26 chs simple_unlock(&uvm.swap_data_lock);
637 1.1 mrg error = ENXIO;
638 1.1 mrg break;
639 1.1 mrg }
640 1.32 chs
641 1.1 mrg /*
642 1.1 mrg * If a device isn't in use or enabled, we
643 1.1 mrg * can't stop swapping from it (again).
644 1.1 mrg */
645 1.1 mrg if ((sdp->swd_flags & (SWF_INUSE|SWF_ENABLE)) == 0) {
646 1.26 chs simple_unlock(&uvm.swap_data_lock);
647 1.1 mrg error = EBUSY;
648 1.16 mrg break;
649 1.1 mrg }
650 1.1 mrg
651 1.1 mrg /*
652 1.32 chs * do the real work.
653 1.1 mrg */
654 1.80 fvdl error = swap_off(p, sdp);
655 1.1 mrg break;
656 1.1 mrg
657 1.1 mrg default:
658 1.1 mrg error = EINVAL;
659 1.1 mrg }
660 1.1 mrg
661 1.1 mrg /*
662 1.39 chs * done! release the ref gained by namei() and unlock.
663 1.1 mrg */
664 1.1 mrg vput(vp);
665 1.39 chs
666 1.16 mrg out:
667 1.32 chs lockmgr(&swap_syscall_lock, LK_RELEASE, NULL);
668 1.1 mrg
669 1.1 mrg UVMHIST_LOG(pdhist, "<- done! error=%d", error, 0, 0, 0);
670 1.1 mrg return (error);
671 1.61 manu }
672 1.61 manu
673 1.85 junyoung /*
674 1.61 manu * swap_stats: implements swapctl(SWAP_STATS). The function is kept
675 1.85 junyoung * away from sys_swapctl() in order to allow COMPAT_* swapctl()
676 1.61 manu * emulation to use it directly without going through sys_swapctl().
677 1.61 manu * The problem with using sys_swapctl() there is that it involves
678 1.61 manu * copying the swapent array to the stackgap, and this array's size
679 1.85 junyoung * is not known at build time. Hence it would not be possible to
680 1.61 manu * ensure it would fit in the stackgap in any case.
681 1.61 manu */
682 1.61 manu void
683 1.93 thorpej uvm_swap_stats(int cmd, struct swapent *sep, int sec, register_t *retval)
684 1.61 manu {
685 1.61 manu struct swappri *spp;
686 1.61 manu struct swapdev *sdp;
687 1.61 manu int count = 0;
688 1.61 manu
689 1.61 manu LIST_FOREACH(spp, &swap_priority, spi_swappri) {
690 1.61 manu for (sdp = CIRCLEQ_FIRST(&spp->spi_swapdev);
691 1.61 manu sdp != (void *)&spp->spi_swapdev && sec-- > 0;
692 1.61 manu sdp = CIRCLEQ_NEXT(sdp, swd_next)) {
693 1.61 manu /*
694 1.61 manu * backwards compatibility for system call.
695 1.61 manu * note that we use 'struct oswapent' as an
696 1.61 manu * overlay into both 'struct swapdev' and
697 1.61 manu * the userland 'struct swapent', as we
698 1.61 manu * want to retain backwards compatibility
699 1.61 manu * with NetBSD 1.3.
700 1.61 manu */
701 1.61 manu sdp->swd_ose.ose_inuse =
702 1.61 manu btodb((u_int64_t)sdp->swd_npginuse <<
703 1.61 manu PAGE_SHIFT);
704 1.85 junyoung (void)memcpy(sep, &sdp->swd_ose,
705 1.61 manu sizeof(struct oswapent));
706 1.85 junyoung
707 1.61 manu /* now copy out the path if necessary */
708 1.61 manu #if defined(COMPAT_13)
709 1.61 manu if (cmd == SWAP_STATS)
710 1.61 manu #endif
711 1.61 manu (void)memcpy(&sep->se_path, sdp->swd_path,
712 1.61 manu sdp->swd_pathlen);
713 1.61 manu
714 1.61 manu count++;
715 1.61 manu #if defined(COMPAT_13)
716 1.61 manu if (cmd == SWAP_OSTATS)
717 1.61 manu sep = (struct swapent *)
718 1.61 manu ((struct oswapent *)sep + 1);
719 1.61 manu else
720 1.61 manu #endif
721 1.61 manu sep++;
722 1.61 manu }
723 1.61 manu }
724 1.61 manu
725 1.61 manu *retval = count;
726 1.61 manu return;
727 1.1 mrg }
728 1.1 mrg
729 1.1 mrg /*
730 1.1 mrg * swap_on: attempt to enable a swapdev for swapping. note that the
731 1.1 mrg * swapdev is already on the global list, but disabled (marked
732 1.1 mrg * SWF_FAKE).
733 1.1 mrg *
734 1.1 mrg * => we avoid the start of the disk (to protect disk labels)
735 1.1 mrg * => we also avoid the miniroot, if we are swapping to root.
736 1.26 chs * => caller should leave uvm.swap_data_lock unlocked, we may lock it
737 1.1 mrg * if needed.
738 1.1 mrg */
739 1.1 mrg static int
740 1.93 thorpej swap_on(struct proc *p, struct swapdev *sdp)
741 1.1 mrg {
742 1.1 mrg struct vnode *vp;
743 1.1 mrg int error, npages, nblocks, size;
744 1.1 mrg long addr;
745 1.48 fvdl u_long result;
746 1.1 mrg struct vattr va;
747 1.1 mrg #ifdef NFS
748 1.85 junyoung extern int (**nfsv2_vnodeop_p)(void *);
749 1.1 mrg #endif /* NFS */
750 1.69 gehenna const struct bdevsw *bdev;
751 1.1 mrg dev_t dev;
752 1.1 mrg UVMHIST_FUNC("swap_on"); UVMHIST_CALLED(pdhist);
753 1.1 mrg
754 1.1 mrg /*
755 1.1 mrg * we want to enable swapping on sdp. the swd_vp contains
756 1.1 mrg * the vnode we want (locked and ref'd), and the swd_dev
757 1.1 mrg * contains the dev_t of the file, if it a block device.
758 1.1 mrg */
759 1.1 mrg
760 1.1 mrg vp = sdp->swd_vp;
761 1.1 mrg dev = sdp->swd_dev;
762 1.1 mrg
763 1.1 mrg /*
764 1.1 mrg * open the swap file (mostly useful for block device files to
765 1.1 mrg * let device driver know what is up).
766 1.1 mrg *
767 1.1 mrg * we skip the open/close for root on swap because the root
768 1.1 mrg * has already been opened when root was mounted (mountroot).
769 1.1 mrg */
770 1.1 mrg if (vp != rootvp) {
771 1.80 fvdl if ((error = VOP_OPEN(vp, FREAD|FWRITE, p->p_ucred, p)))
772 1.1 mrg return (error);
773 1.1 mrg }
774 1.1 mrg
775 1.1 mrg /* XXX this only works for block devices */
776 1.1 mrg UVMHIST_LOG(pdhist, " dev=%d, major(dev)=%d", dev, major(dev), 0,0);
777 1.1 mrg
778 1.1 mrg /*
779 1.1 mrg * we now need to determine the size of the swap area. for
780 1.1 mrg * block specials we can call the d_psize function.
781 1.1 mrg * for normal files, we must stat [get attrs].
782 1.1 mrg *
783 1.1 mrg * we put the result in nblks.
784 1.1 mrg * for normal files, we also want the filesystem block size
785 1.1 mrg * (which we get with statfs).
786 1.1 mrg */
787 1.1 mrg switch (vp->v_type) {
788 1.1 mrg case VBLK:
789 1.69 gehenna bdev = bdevsw_lookup(dev);
790 1.69 gehenna if (bdev == NULL || bdev->d_psize == NULL ||
791 1.69 gehenna (nblocks = (*bdev->d_psize)(dev)) == -1) {
792 1.1 mrg error = ENXIO;
793 1.1 mrg goto bad;
794 1.1 mrg }
795 1.1 mrg break;
796 1.1 mrg
797 1.1 mrg case VREG:
798 1.80 fvdl if ((error = VOP_GETATTR(vp, &va, p->p_ucred, p)))
799 1.1 mrg goto bad;
800 1.1 mrg nblocks = (int)btodb(va.va_size);
801 1.1 mrg if ((error =
802 1.86 christos VFS_STATVFS(vp->v_mount, &vp->v_mount->mnt_stat, p)) != 0)
803 1.1 mrg goto bad;
804 1.1 mrg
805 1.1 mrg sdp->swd_bsize = vp->v_mount->mnt_stat.f_iosize;
806 1.1 mrg /*
807 1.1 mrg * limit the max # of outstanding I/O requests we issue
808 1.1 mrg * at any one time. take it easy on NFS servers.
809 1.1 mrg */
810 1.1 mrg #ifdef NFS
811 1.1 mrg if (vp->v_op == nfsv2_vnodeop_p)
812 1.1 mrg sdp->swd_maxactive = 2; /* XXX */
813 1.1 mrg else
814 1.1 mrg #endif /* NFS */
815 1.1 mrg sdp->swd_maxactive = 8; /* XXX */
816 1.1 mrg break;
817 1.1 mrg
818 1.1 mrg default:
819 1.1 mrg error = ENXIO;
820 1.1 mrg goto bad;
821 1.1 mrg }
822 1.1 mrg
823 1.1 mrg /*
824 1.1 mrg * save nblocks in a safe place and convert to pages.
825 1.1 mrg */
826 1.1 mrg
827 1.16 mrg sdp->swd_ose.ose_nblks = nblocks;
828 1.20 chs npages = dbtob((u_int64_t)nblocks) >> PAGE_SHIFT;
829 1.1 mrg
830 1.1 mrg /*
831 1.1 mrg * for block special files, we want to make sure that leave
832 1.1 mrg * the disklabel and bootblocks alone, so we arrange to skip
833 1.32 chs * over them (arbitrarily choosing to skip PAGE_SIZE bytes).
834 1.1 mrg * note that because of this the "size" can be less than the
835 1.1 mrg * actual number of blocks on the device.
836 1.1 mrg */
837 1.1 mrg if (vp->v_type == VBLK) {
838 1.1 mrg /* we use pages 1 to (size - 1) [inclusive] */
839 1.1 mrg size = npages - 1;
840 1.1 mrg addr = 1;
841 1.1 mrg } else {
842 1.1 mrg /* we use pages 0 to (size - 1) [inclusive] */
843 1.1 mrg size = npages;
844 1.1 mrg addr = 0;
845 1.1 mrg }
846 1.1 mrg
847 1.1 mrg /*
848 1.1 mrg * make sure we have enough blocks for a reasonable sized swap
849 1.1 mrg * area. we want at least one page.
850 1.1 mrg */
851 1.1 mrg
852 1.1 mrg if (size < 1) {
853 1.1 mrg UVMHIST_LOG(pdhist, " size <= 1!!", 0, 0, 0, 0);
854 1.1 mrg error = EINVAL;
855 1.1 mrg goto bad;
856 1.1 mrg }
857 1.1 mrg
858 1.1 mrg UVMHIST_LOG(pdhist, " dev=%x: size=%d addr=%ld\n", dev, size, addr, 0);
859 1.1 mrg
860 1.1 mrg /*
861 1.1 mrg * now we need to allocate an extent to manage this swap device
862 1.1 mrg */
863 1.1 mrg
864 1.90 yamt sdp->swd_blist = blist_create(npages);
865 1.90 yamt /* mark all expect the `saved' region free. */
866 1.90 yamt blist_free(sdp->swd_blist, addr, size);
867 1.1 mrg
868 1.1 mrg /*
869 1.51 chs * if the vnode we are swapping to is the root vnode
870 1.1 mrg * (i.e. we are swapping to the miniroot) then we want
871 1.51 chs * to make sure we don't overwrite it. do a statfs to
872 1.1 mrg * find its size and skip over it.
873 1.1 mrg */
874 1.1 mrg if (vp == rootvp) {
875 1.1 mrg struct mount *mp;
876 1.86 christos struct statvfs *sp;
877 1.1 mrg int rootblocks, rootpages;
878 1.1 mrg
879 1.1 mrg mp = rootvnode->v_mount;
880 1.1 mrg sp = &mp->mnt_stat;
881 1.86 christos rootblocks = sp->f_blocks * btodb(sp->f_frsize);
882 1.64 fredette /*
883 1.64 fredette * XXX: sp->f_blocks isn't the total number of
884 1.64 fredette * blocks in the filesystem, it's the number of
885 1.64 fredette * data blocks. so, our rootblocks almost
886 1.85 junyoung * definitely underestimates the total size
887 1.64 fredette * of the filesystem - how badly depends on the
888 1.85 junyoung * details of the filesystem type. there isn't
889 1.64 fredette * an obvious way to deal with this cleanly
890 1.85 junyoung * and perfectly, so for now we just pad our
891 1.64 fredette * rootblocks estimate with an extra 5 percent.
892 1.64 fredette */
893 1.64 fredette rootblocks += (rootblocks >> 5) +
894 1.64 fredette (rootblocks >> 6) +
895 1.64 fredette (rootblocks >> 7);
896 1.20 chs rootpages = round_page(dbtob(rootblocks)) >> PAGE_SHIFT;
897 1.32 chs if (rootpages > size)
898 1.1 mrg panic("swap_on: miniroot larger than swap?");
899 1.1 mrg
900 1.90 yamt if (rootpages != blist_fill(sdp->swd_blist, addr, rootpages)) {
901 1.1 mrg panic("swap_on: unable to preserve miniroot");
902 1.90 yamt }
903 1.1 mrg
904 1.32 chs size -= rootpages;
905 1.1 mrg printf("Preserved %d pages of miniroot ", rootpages);
906 1.32 chs printf("leaving %d pages of swap\n", size);
907 1.1 mrg }
908 1.1 mrg
909 1.39 chs /*
910 1.39 chs * add a ref to vp to reflect usage as a swap device.
911 1.39 chs */
912 1.39 chs vref(vp);
913 1.39 chs
914 1.1 mrg /*
915 1.1 mrg * now add the new swapdev to the drum and enable.
916 1.1 mrg */
917 1.48 fvdl if (extent_alloc(swapmap, npages, EX_NOALIGN, EX_NOBOUNDARY,
918 1.48 fvdl EX_WAITOK, &result))
919 1.48 fvdl panic("swapdrum_add");
920 1.48 fvdl
921 1.48 fvdl sdp->swd_drumoffset = (int)result;
922 1.48 fvdl sdp->swd_drumsize = npages;
923 1.48 fvdl sdp->swd_npages = size;
924 1.26 chs simple_lock(&uvm.swap_data_lock);
925 1.1 mrg sdp->swd_flags &= ~SWF_FAKE; /* going live */
926 1.1 mrg sdp->swd_flags |= (SWF_INUSE|SWF_ENABLE);
927 1.32 chs uvmexp.swpages += size;
928 1.81 pk uvmexp.swpgavail += size;
929 1.26 chs simple_unlock(&uvm.swap_data_lock);
930 1.1 mrg return (0);
931 1.1 mrg
932 1.1 mrg /*
933 1.43 chs * failure: clean up and return error.
934 1.1 mrg */
935 1.43 chs
936 1.43 chs bad:
937 1.90 yamt if (sdp->swd_blist) {
938 1.90 yamt blist_destroy(sdp->swd_blist);
939 1.43 chs }
940 1.43 chs if (vp != rootvp) {
941 1.80 fvdl (void)VOP_CLOSE(vp, FREAD|FWRITE, p->p_ucred, p);
942 1.43 chs }
943 1.1 mrg return (error);
944 1.1 mrg }
945 1.1 mrg
946 1.1 mrg /*
947 1.1 mrg * swap_off: stop swapping on swapdev
948 1.1 mrg *
949 1.32 chs * => swap data should be locked, we will unlock.
950 1.1 mrg */
951 1.1 mrg static int
952 1.93 thorpej swap_off(struct proc *p, struct swapdev *sdp)
953 1.1 mrg {
954 1.91 yamt int npages = sdp->swd_npages;
955 1.91 yamt int error = 0;
956 1.81 pk
957 1.1 mrg UVMHIST_FUNC("swap_off"); UVMHIST_CALLED(pdhist);
958 1.81 pk UVMHIST_LOG(pdhist, " dev=%x, npages=%d", sdp->swd_dev,npages,0,0);
959 1.1 mrg
960 1.32 chs /* disable the swap area being removed */
961 1.1 mrg sdp->swd_flags &= ~SWF_ENABLE;
962 1.81 pk uvmexp.swpgavail -= npages;
963 1.32 chs simple_unlock(&uvm.swap_data_lock);
964 1.32 chs
965 1.32 chs /*
966 1.32 chs * the idea is to find all the pages that are paged out to this
967 1.32 chs * device, and page them all in. in uvm, swap-backed pageable
968 1.32 chs * memory can take two forms: aobjs and anons. call the
969 1.32 chs * swapoff hook for each subsystem to bring in pages.
970 1.32 chs */
971 1.1 mrg
972 1.32 chs if (uao_swap_off(sdp->swd_drumoffset,
973 1.32 chs sdp->swd_drumoffset + sdp->swd_drumsize) ||
974 1.91 yamt amap_swap_off(sdp->swd_drumoffset,
975 1.32 chs sdp->swd_drumoffset + sdp->swd_drumsize)) {
976 1.91 yamt error = ENOMEM;
977 1.91 yamt } else if (sdp->swd_npginuse > sdp->swd_npgbad) {
978 1.91 yamt error = EBUSY;
979 1.91 yamt }
980 1.51 chs
981 1.91 yamt if (error) {
982 1.32 chs simple_lock(&uvm.swap_data_lock);
983 1.32 chs sdp->swd_flags |= SWF_ENABLE;
984 1.81 pk uvmexp.swpgavail += npages;
985 1.32 chs simple_unlock(&uvm.swap_data_lock);
986 1.91 yamt
987 1.91 yamt return error;
988 1.32 chs }
989 1.1 mrg
990 1.1 mrg /*
991 1.58 enami * done with the vnode.
992 1.39 chs * drop our ref on the vnode before calling VOP_CLOSE()
993 1.39 chs * so that spec_close() can tell if this is the last close.
994 1.1 mrg */
995 1.39 chs vrele(sdp->swd_vp);
996 1.32 chs if (sdp->swd_vp != rootvp) {
997 1.80 fvdl (void) VOP_CLOSE(sdp->swd_vp, FREAD|FWRITE, p->p_ucred, p);
998 1.32 chs }
999 1.32 chs
1000 1.32 chs simple_lock(&uvm.swap_data_lock);
1001 1.81 pk uvmexp.swpages -= npages;
1002 1.82 pk uvmexp.swpginuse -= sdp->swd_npgbad;
1003 1.1 mrg
1004 1.32 chs if (swaplist_find(sdp->swd_vp, 1) == NULL)
1005 1.70 provos panic("swap_off: swapdev not in list");
1006 1.32 chs swaplist_trim();
1007 1.48 fvdl simple_unlock(&uvm.swap_data_lock);
1008 1.1 mrg
1009 1.32 chs /*
1010 1.32 chs * free all resources!
1011 1.32 chs */
1012 1.32 chs extent_free(swapmap, sdp->swd_drumoffset, sdp->swd_drumsize,
1013 1.32 chs EX_WAITOK);
1014 1.90 yamt blist_destroy(sdp->swd_blist);
1015 1.66 hannken bufq_free(&sdp->swd_tab);
1016 1.32 chs free(sdp, M_VMSWAP);
1017 1.1 mrg return (0);
1018 1.1 mrg }
1019 1.1 mrg
1020 1.1 mrg /*
1021 1.1 mrg * /dev/drum interface and i/o functions
1022 1.1 mrg */
1023 1.1 mrg
1024 1.1 mrg /*
1025 1.1 mrg * swstrategy: perform I/O on the drum
1026 1.1 mrg *
1027 1.1 mrg * => we must map the i/o request from the drum to the correct swapdev.
1028 1.1 mrg */
1029 1.94 thorpej static void
1030 1.93 thorpej swstrategy(struct buf *bp)
1031 1.1 mrg {
1032 1.1 mrg struct swapdev *sdp;
1033 1.1 mrg struct vnode *vp;
1034 1.25 chs int s, pageno, bn;
1035 1.1 mrg UVMHIST_FUNC("swstrategy"); UVMHIST_CALLED(pdhist);
1036 1.1 mrg
1037 1.1 mrg /*
1038 1.1 mrg * convert block number to swapdev. note that swapdev can't
1039 1.1 mrg * be yanked out from under us because we are holding resources
1040 1.1 mrg * in it (i.e. the blocks we are doing I/O on).
1041 1.1 mrg */
1042 1.41 chs pageno = dbtob((int64_t)bp->b_blkno) >> PAGE_SHIFT;
1043 1.26 chs simple_lock(&uvm.swap_data_lock);
1044 1.1 mrg sdp = swapdrum_getsdp(pageno);
1045 1.26 chs simple_unlock(&uvm.swap_data_lock);
1046 1.1 mrg if (sdp == NULL) {
1047 1.1 mrg bp->b_error = EINVAL;
1048 1.1 mrg bp->b_flags |= B_ERROR;
1049 1.1 mrg biodone(bp);
1050 1.1 mrg UVMHIST_LOG(pdhist, " failed to get swap device", 0, 0, 0, 0);
1051 1.1 mrg return;
1052 1.1 mrg }
1053 1.1 mrg
1054 1.1 mrg /*
1055 1.1 mrg * convert drum page number to block number on this swapdev.
1056 1.1 mrg */
1057 1.1 mrg
1058 1.32 chs pageno -= sdp->swd_drumoffset; /* page # on swapdev */
1059 1.44 enami bn = btodb((u_int64_t)pageno << PAGE_SHIFT); /* convert to diskblock */
1060 1.1 mrg
1061 1.41 chs UVMHIST_LOG(pdhist, " %s: mapoff=%x bn=%x bcount=%ld",
1062 1.1 mrg ((bp->b_flags & B_READ) == 0) ? "write" : "read",
1063 1.1 mrg sdp->swd_drumoffset, bn, bp->b_bcount);
1064 1.1 mrg
1065 1.1 mrg /*
1066 1.1 mrg * for block devices we finish up here.
1067 1.32 chs * for regular files we have to do more work which we delegate
1068 1.1 mrg * to sw_reg_strategy().
1069 1.1 mrg */
1070 1.1 mrg
1071 1.1 mrg switch (sdp->swd_vp->v_type) {
1072 1.1 mrg default:
1073 1.1 mrg panic("swstrategy: vnode type 0x%x", sdp->swd_vp->v_type);
1074 1.32 chs
1075 1.1 mrg case VBLK:
1076 1.1 mrg
1077 1.1 mrg /*
1078 1.1 mrg * must convert "bp" from an I/O on /dev/drum to an I/O
1079 1.1 mrg * on the swapdev (sdp).
1080 1.1 mrg */
1081 1.25 chs s = splbio();
1082 1.1 mrg bp->b_blkno = bn; /* swapdev block number */
1083 1.1 mrg vp = sdp->swd_vp; /* swapdev vnode pointer */
1084 1.1 mrg bp->b_dev = sdp->swd_dev; /* swapdev dev_t */
1085 1.1 mrg
1086 1.1 mrg /*
1087 1.1 mrg * if we are doing a write, we have to redirect the i/o on
1088 1.1 mrg * drum's v_numoutput counter to the swapdevs.
1089 1.1 mrg */
1090 1.1 mrg if ((bp->b_flags & B_READ) == 0) {
1091 1.1 mrg vwakeup(bp); /* kills one 'v_numoutput' on drum */
1092 1.76 pk V_INCR_NUMOUTPUT(vp); /* put it on swapdev */
1093 1.1 mrg }
1094 1.1 mrg
1095 1.41 chs /*
1096 1.1 mrg * finally plug in swapdev vnode and start I/O
1097 1.1 mrg */
1098 1.1 mrg bp->b_vp = vp;
1099 1.25 chs splx(s);
1100 1.84 hannken VOP_STRATEGY(vp, bp);
1101 1.1 mrg return;
1102 1.32 chs
1103 1.1 mrg case VREG:
1104 1.1 mrg /*
1105 1.32 chs * delegate to sw_reg_strategy function.
1106 1.1 mrg */
1107 1.1 mrg sw_reg_strategy(sdp, bp, bn);
1108 1.1 mrg return;
1109 1.1 mrg }
1110 1.1 mrg /* NOTREACHED */
1111 1.1 mrg }
1112 1.1 mrg
1113 1.1 mrg /*
1114 1.94 thorpej * swread: the read function for the drum (just a call to physio)
1115 1.94 thorpej */
1116 1.94 thorpej /*ARGSUSED*/
1117 1.94 thorpej static int
1118 1.94 thorpej swread(dev_t dev, struct uio *uio, int ioflag)
1119 1.94 thorpej {
1120 1.94 thorpej UVMHIST_FUNC("swread"); UVMHIST_CALLED(pdhist);
1121 1.94 thorpej
1122 1.94 thorpej UVMHIST_LOG(pdhist, " dev=%x offset=%qx", dev, uio->uio_offset, 0, 0);
1123 1.94 thorpej return (physio(swstrategy, NULL, dev, B_READ, minphys, uio));
1124 1.94 thorpej }
1125 1.94 thorpej
1126 1.94 thorpej /*
1127 1.94 thorpej * swwrite: the write function for the drum (just a call to physio)
1128 1.94 thorpej */
1129 1.94 thorpej /*ARGSUSED*/
1130 1.94 thorpej static int
1131 1.94 thorpej swwrite(dev_t dev, struct uio *uio, int ioflag)
1132 1.94 thorpej {
1133 1.94 thorpej UVMHIST_FUNC("swwrite"); UVMHIST_CALLED(pdhist);
1134 1.94 thorpej
1135 1.94 thorpej UVMHIST_LOG(pdhist, " dev=%x offset=%qx", dev, uio->uio_offset, 0, 0);
1136 1.94 thorpej return (physio(swstrategy, NULL, dev, B_WRITE, minphys, uio));
1137 1.94 thorpej }
1138 1.94 thorpej
1139 1.94 thorpej const struct bdevsw swap_bdevsw = {
1140 1.94 thorpej noopen, noclose, swstrategy, noioctl, nodump, nosize,
1141 1.94 thorpej };
1142 1.94 thorpej
1143 1.94 thorpej const struct cdevsw swap_cdevsw = {
1144 1.94 thorpej nullopen, nullclose, swread, swwrite, noioctl,
1145 1.94 thorpej nostop, notty, nopoll, nommap, nokqfilter
1146 1.94 thorpej };
1147 1.94 thorpej
1148 1.94 thorpej /*
1149 1.1 mrg * sw_reg_strategy: handle swap i/o to regular files
1150 1.1 mrg */
1151 1.1 mrg static void
1152 1.93 thorpej sw_reg_strategy(struct swapdev *sdp, struct buf *bp, int bn)
1153 1.1 mrg {
1154 1.1 mrg struct vnode *vp;
1155 1.1 mrg struct vndxfer *vnx;
1156 1.44 enami daddr_t nbn;
1157 1.1 mrg caddr_t addr;
1158 1.44 enami off_t byteoff;
1159 1.9 mrg int s, off, nra, error, sz, resid;
1160 1.1 mrg UVMHIST_FUNC("sw_reg_strategy"); UVMHIST_CALLED(pdhist);
1161 1.1 mrg
1162 1.1 mrg /*
1163 1.1 mrg * allocate a vndxfer head for this transfer and point it to
1164 1.1 mrg * our buffer.
1165 1.1 mrg */
1166 1.12 pk getvndxfer(vnx);
1167 1.1 mrg vnx->vx_flags = VX_BUSY;
1168 1.1 mrg vnx->vx_error = 0;
1169 1.1 mrg vnx->vx_pending = 0;
1170 1.1 mrg vnx->vx_bp = bp;
1171 1.1 mrg vnx->vx_sdp = sdp;
1172 1.1 mrg
1173 1.1 mrg /*
1174 1.1 mrg * setup for main loop where we read filesystem blocks into
1175 1.1 mrg * our buffer.
1176 1.1 mrg */
1177 1.1 mrg error = 0;
1178 1.1 mrg bp->b_resid = bp->b_bcount; /* nothing transfered yet! */
1179 1.1 mrg addr = bp->b_data; /* current position in buffer */
1180 1.44 enami byteoff = dbtob((u_int64_t)bn);
1181 1.1 mrg
1182 1.1 mrg for (resid = bp->b_resid; resid; resid -= sz) {
1183 1.1 mrg struct vndbuf *nbp;
1184 1.1 mrg
1185 1.1 mrg /*
1186 1.1 mrg * translate byteoffset into block number. return values:
1187 1.1 mrg * vp = vnode of underlying device
1188 1.1 mrg * nbn = new block number (on underlying vnode dev)
1189 1.1 mrg * nra = num blocks we can read-ahead (excludes requested
1190 1.1 mrg * block)
1191 1.1 mrg */
1192 1.1 mrg nra = 0;
1193 1.1 mrg error = VOP_BMAP(sdp->swd_vp, byteoff / sdp->swd_bsize,
1194 1.1 mrg &vp, &nbn, &nra);
1195 1.1 mrg
1196 1.32 chs if (error == 0 && nbn == (daddr_t)-1) {
1197 1.51 chs /*
1198 1.23 marc * this used to just set error, but that doesn't
1199 1.23 marc * do the right thing. Instead, it causes random
1200 1.23 marc * memory errors. The panic() should remain until
1201 1.23 marc * this condition doesn't destabilize the system.
1202 1.23 marc */
1203 1.23 marc #if 1
1204 1.23 marc panic("sw_reg_strategy: swap to sparse file");
1205 1.23 marc #else
1206 1.1 mrg error = EIO; /* failure */
1207 1.23 marc #endif
1208 1.23 marc }
1209 1.1 mrg
1210 1.1 mrg /*
1211 1.1 mrg * punt if there was an error or a hole in the file.
1212 1.1 mrg * we must wait for any i/o ops we have already started
1213 1.1 mrg * to finish before returning.
1214 1.1 mrg *
1215 1.1 mrg * XXX we could deal with holes here but it would be
1216 1.1 mrg * a hassle (in the write case).
1217 1.1 mrg */
1218 1.1 mrg if (error) {
1219 1.1 mrg s = splbio();
1220 1.1 mrg vnx->vx_error = error; /* pass error up */
1221 1.1 mrg goto out;
1222 1.1 mrg }
1223 1.1 mrg
1224 1.1 mrg /*
1225 1.1 mrg * compute the size ("sz") of this transfer (in bytes).
1226 1.1 mrg */
1227 1.41 chs off = byteoff % sdp->swd_bsize;
1228 1.41 chs sz = (1 + nra) * sdp->swd_bsize - off;
1229 1.41 chs if (sz > resid)
1230 1.1 mrg sz = resid;
1231 1.1 mrg
1232 1.41 chs UVMHIST_LOG(pdhist, "sw_reg_strategy: "
1233 1.41 chs "vp %p/%p offset 0x%x/0x%x",
1234 1.41 chs sdp->swd_vp, vp, byteoff, nbn);
1235 1.1 mrg
1236 1.1 mrg /*
1237 1.1 mrg * now get a buf structure. note that the vb_buf is
1238 1.1 mrg * at the front of the nbp structure so that you can
1239 1.1 mrg * cast pointers between the two structure easily.
1240 1.1 mrg */
1241 1.12 pk getvndbuf(nbp);
1242 1.77 thorpej BUF_INIT(&nbp->vb_buf);
1243 1.1 mrg nbp->vb_buf.b_flags = bp->b_flags | B_CALL;
1244 1.1 mrg nbp->vb_buf.b_bcount = sz;
1245 1.12 pk nbp->vb_buf.b_bufsize = sz;
1246 1.1 mrg nbp->vb_buf.b_error = 0;
1247 1.1 mrg nbp->vb_buf.b_data = addr;
1248 1.41 chs nbp->vb_buf.b_lblkno = 0;
1249 1.1 mrg nbp->vb_buf.b_blkno = nbn + btodb(off);
1250 1.34 thorpej nbp->vb_buf.b_rawblkno = nbp->vb_buf.b_blkno;
1251 1.1 mrg nbp->vb_buf.b_iodone = sw_reg_iodone;
1252 1.53 chs nbp->vb_buf.b_vp = vp;
1253 1.53 chs if (vp->v_type == VBLK) {
1254 1.53 chs nbp->vb_buf.b_dev = vp->v_rdev;
1255 1.53 chs }
1256 1.1 mrg
1257 1.1 mrg nbp->vb_xfer = vnx; /* patch it back in to vnx */
1258 1.1 mrg
1259 1.1 mrg /*
1260 1.1 mrg * Just sort by block number
1261 1.1 mrg */
1262 1.1 mrg s = splbio();
1263 1.1 mrg if (vnx->vx_error != 0) {
1264 1.1 mrg putvndbuf(nbp);
1265 1.1 mrg goto out;
1266 1.1 mrg }
1267 1.1 mrg vnx->vx_pending++;
1268 1.1 mrg
1269 1.1 mrg /* sort it in and start I/O if we are not over our limit */
1270 1.65 hannken BUFQ_PUT(&sdp->swd_tab, &nbp->vb_buf);
1271 1.1 mrg sw_reg_start(sdp);
1272 1.1 mrg splx(s);
1273 1.1 mrg
1274 1.1 mrg /*
1275 1.1 mrg * advance to the next I/O
1276 1.1 mrg */
1277 1.9 mrg byteoff += sz;
1278 1.1 mrg addr += sz;
1279 1.1 mrg }
1280 1.1 mrg
1281 1.1 mrg s = splbio();
1282 1.1 mrg
1283 1.1 mrg out: /* Arrive here at splbio */
1284 1.1 mrg vnx->vx_flags &= ~VX_BUSY;
1285 1.1 mrg if (vnx->vx_pending == 0) {
1286 1.1 mrg if (vnx->vx_error != 0) {
1287 1.1 mrg bp->b_error = vnx->vx_error;
1288 1.1 mrg bp->b_flags |= B_ERROR;
1289 1.1 mrg }
1290 1.1 mrg putvndxfer(vnx);
1291 1.1 mrg biodone(bp);
1292 1.1 mrg }
1293 1.1 mrg splx(s);
1294 1.1 mrg }
1295 1.1 mrg
1296 1.1 mrg /*
1297 1.1 mrg * sw_reg_start: start an I/O request on the requested swapdev
1298 1.1 mrg *
1299 1.65 hannken * => reqs are sorted by b_rawblkno (above)
1300 1.1 mrg */
1301 1.1 mrg static void
1302 1.93 thorpej sw_reg_start(struct swapdev *sdp)
1303 1.1 mrg {
1304 1.1 mrg struct buf *bp;
1305 1.1 mrg UVMHIST_FUNC("sw_reg_start"); UVMHIST_CALLED(pdhist);
1306 1.1 mrg
1307 1.8 mrg /* recursion control */
1308 1.1 mrg if ((sdp->swd_flags & SWF_BUSY) != 0)
1309 1.1 mrg return;
1310 1.1 mrg
1311 1.1 mrg sdp->swd_flags |= SWF_BUSY;
1312 1.1 mrg
1313 1.33 thorpej while (sdp->swd_active < sdp->swd_maxactive) {
1314 1.65 hannken bp = BUFQ_GET(&sdp->swd_tab);
1315 1.1 mrg if (bp == NULL)
1316 1.1 mrg break;
1317 1.33 thorpej sdp->swd_active++;
1318 1.1 mrg
1319 1.1 mrg UVMHIST_LOG(pdhist,
1320 1.1 mrg "sw_reg_start: bp %p vp %p blkno %p cnt %lx",
1321 1.1 mrg bp, bp->b_vp, bp->b_blkno, bp->b_bcount);
1322 1.1 mrg if ((bp->b_flags & B_READ) == 0)
1323 1.76 pk V_INCR_NUMOUTPUT(bp->b_vp);
1324 1.41 chs
1325 1.84 hannken VOP_STRATEGY(bp->b_vp, bp);
1326 1.1 mrg }
1327 1.1 mrg sdp->swd_flags &= ~SWF_BUSY;
1328 1.1 mrg }
1329 1.1 mrg
1330 1.1 mrg /*
1331 1.1 mrg * sw_reg_iodone: one of our i/o's has completed and needs post-i/o cleanup
1332 1.1 mrg *
1333 1.1 mrg * => note that we can recover the vndbuf struct by casting the buf ptr
1334 1.1 mrg */
1335 1.1 mrg static void
1336 1.93 thorpej sw_reg_iodone(struct buf *bp)
1337 1.1 mrg {
1338 1.1 mrg struct vndbuf *vbp = (struct vndbuf *) bp;
1339 1.1 mrg struct vndxfer *vnx = vbp->vb_xfer;
1340 1.1 mrg struct buf *pbp = vnx->vx_bp; /* parent buffer */
1341 1.1 mrg struct swapdev *sdp = vnx->vx_sdp;
1342 1.72 chs int s, resid, error;
1343 1.1 mrg UVMHIST_FUNC("sw_reg_iodone"); UVMHIST_CALLED(pdhist);
1344 1.1 mrg
1345 1.1 mrg UVMHIST_LOG(pdhist, " vbp=%p vp=%p blkno=%x addr=%p",
1346 1.1 mrg vbp, vbp->vb_buf.b_vp, vbp->vb_buf.b_blkno, vbp->vb_buf.b_data);
1347 1.1 mrg UVMHIST_LOG(pdhist, " cnt=%lx resid=%lx",
1348 1.1 mrg vbp->vb_buf.b_bcount, vbp->vb_buf.b_resid, 0, 0);
1349 1.1 mrg
1350 1.1 mrg /*
1351 1.1 mrg * protect vbp at splbio and update.
1352 1.1 mrg */
1353 1.1 mrg
1354 1.1 mrg s = splbio();
1355 1.1 mrg resid = vbp->vb_buf.b_bcount - vbp->vb_buf.b_resid;
1356 1.1 mrg pbp->b_resid -= resid;
1357 1.1 mrg vnx->vx_pending--;
1358 1.1 mrg
1359 1.72 chs if (vbp->vb_buf.b_flags & B_ERROR) {
1360 1.1 mrg /* pass error upward */
1361 1.72 chs error = vbp->vb_buf.b_error ? vbp->vb_buf.b_error : EIO;
1362 1.72 chs UVMHIST_LOG(pdhist, " got error=%d !", error, 0, 0, 0);
1363 1.72 chs vnx->vx_error = error;
1364 1.35 chs }
1365 1.35 chs
1366 1.35 chs /*
1367 1.1 mrg * kill vbp structure
1368 1.1 mrg */
1369 1.1 mrg putvndbuf(vbp);
1370 1.1 mrg
1371 1.1 mrg /*
1372 1.1 mrg * wrap up this transaction if it has run to completion or, in
1373 1.1 mrg * case of an error, when all auxiliary buffers have returned.
1374 1.1 mrg */
1375 1.1 mrg if (vnx->vx_error != 0) {
1376 1.1 mrg /* pass error upward */
1377 1.1 mrg pbp->b_flags |= B_ERROR;
1378 1.1 mrg pbp->b_error = vnx->vx_error;
1379 1.1 mrg if ((vnx->vx_flags & VX_BUSY) == 0 && vnx->vx_pending == 0) {
1380 1.1 mrg putvndxfer(vnx);
1381 1.1 mrg biodone(pbp);
1382 1.1 mrg }
1383 1.11 pk } else if (pbp->b_resid == 0) {
1384 1.46 chs KASSERT(vnx->vx_pending == 0);
1385 1.1 mrg if ((vnx->vx_flags & VX_BUSY) == 0) {
1386 1.8 mrg UVMHIST_LOG(pdhist, " iodone error=%d !",
1387 1.8 mrg pbp, vnx->vx_error, 0, 0);
1388 1.8 mrg putvndxfer(vnx);
1389 1.1 mrg biodone(pbp);
1390 1.1 mrg }
1391 1.1 mrg }
1392 1.1 mrg
1393 1.1 mrg /*
1394 1.1 mrg * done! start next swapdev I/O if one is pending
1395 1.1 mrg */
1396 1.33 thorpej sdp->swd_active--;
1397 1.1 mrg sw_reg_start(sdp);
1398 1.1 mrg splx(s);
1399 1.1 mrg }
1400 1.1 mrg
1401 1.1 mrg
1402 1.1 mrg /*
1403 1.1 mrg * uvm_swap_alloc: allocate space on swap
1404 1.1 mrg *
1405 1.1 mrg * => allocation is done "round robin" down the priority list, as we
1406 1.1 mrg * allocate in a priority we "rotate" the circle queue.
1407 1.1 mrg * => space can be freed with uvm_swap_free
1408 1.1 mrg * => we return the page slot number in /dev/drum (0 == invalid slot)
1409 1.26 chs * => we lock uvm.swap_data_lock
1410 1.1 mrg * => XXXMRG: "LESSOK" INTERFACE NEEDED TO EXTENT SYSTEM
1411 1.1 mrg */
1412 1.1 mrg int
1413 1.93 thorpej uvm_swap_alloc(int *nslots /* IN/OUT */, boolean_t lessok)
1414 1.1 mrg {
1415 1.1 mrg struct swapdev *sdp;
1416 1.1 mrg struct swappri *spp;
1417 1.1 mrg UVMHIST_FUNC("uvm_swap_alloc"); UVMHIST_CALLED(pdhist);
1418 1.1 mrg
1419 1.1 mrg /*
1420 1.1 mrg * no swap devices configured yet? definite failure.
1421 1.1 mrg */
1422 1.1 mrg if (uvmexp.nswapdev < 1)
1423 1.1 mrg return 0;
1424 1.51 chs
1425 1.1 mrg /*
1426 1.1 mrg * lock data lock, convert slots into blocks, and enter loop
1427 1.1 mrg */
1428 1.26 chs simple_lock(&uvm.swap_data_lock);
1429 1.1 mrg
1430 1.1 mrg ReTry: /* XXXMRG */
1431 1.55 chs LIST_FOREACH(spp, &swap_priority, spi_swappri) {
1432 1.55 chs CIRCLEQ_FOREACH(sdp, &spp->spi_swapdev, swd_next) {
1433 1.90 yamt uint64_t result;
1434 1.90 yamt
1435 1.1 mrg /* if it's not enabled, then we can't swap from it */
1436 1.1 mrg if ((sdp->swd_flags & SWF_ENABLE) == 0)
1437 1.1 mrg continue;
1438 1.1 mrg if (sdp->swd_npginuse + *nslots > sdp->swd_npages)
1439 1.1 mrg continue;
1440 1.90 yamt result = blist_alloc(sdp->swd_blist, *nslots);
1441 1.90 yamt if (result == BLIST_NONE) {
1442 1.1 mrg continue;
1443 1.1 mrg }
1444 1.90 yamt KASSERT(result < sdp->swd_drumsize);
1445 1.1 mrg
1446 1.1 mrg /*
1447 1.1 mrg * successful allocation! now rotate the circleq.
1448 1.1 mrg */
1449 1.1 mrg CIRCLEQ_REMOVE(&spp->spi_swapdev, sdp, swd_next);
1450 1.1 mrg CIRCLEQ_INSERT_TAIL(&spp->spi_swapdev, sdp, swd_next);
1451 1.1 mrg sdp->swd_npginuse += *nslots;
1452 1.1 mrg uvmexp.swpginuse += *nslots;
1453 1.26 chs simple_unlock(&uvm.swap_data_lock);
1454 1.1 mrg /* done! return drum slot number */
1455 1.1 mrg UVMHIST_LOG(pdhist,
1456 1.1 mrg "success! returning %d slots starting at %d",
1457 1.1 mrg *nslots, result + sdp->swd_drumoffset, 0, 0);
1458 1.55 chs return (result + sdp->swd_drumoffset);
1459 1.1 mrg }
1460 1.1 mrg }
1461 1.1 mrg
1462 1.1 mrg /* XXXMRG: BEGIN HACK */
1463 1.1 mrg if (*nslots > 1 && lessok) {
1464 1.1 mrg *nslots = 1;
1465 1.90 yamt /* XXXMRG: ugh! blist should support this for us */
1466 1.90 yamt goto ReTry;
1467 1.1 mrg }
1468 1.1 mrg /* XXXMRG: END HACK */
1469 1.1 mrg
1470 1.26 chs simple_unlock(&uvm.swap_data_lock);
1471 1.55 chs return 0;
1472 1.1 mrg }
1473 1.1 mrg
1474 1.81 pk boolean_t
1475 1.81 pk uvm_swapisfull(void)
1476 1.81 pk {
1477 1.81 pk boolean_t rv;
1478 1.81 pk
1479 1.81 pk simple_lock(&uvm.swap_data_lock);
1480 1.81 pk KASSERT(uvmexp.swpgonly <= uvmexp.swpages);
1481 1.81 pk rv = (uvmexp.swpgonly >= uvmexp.swpgavail);
1482 1.81 pk simple_unlock(&uvm.swap_data_lock);
1483 1.81 pk
1484 1.81 pk return (rv);
1485 1.81 pk }
1486 1.81 pk
1487 1.1 mrg /*
1488 1.32 chs * uvm_swap_markbad: keep track of swap ranges where we've had i/o errors
1489 1.32 chs *
1490 1.32 chs * => we lock uvm.swap_data_lock
1491 1.32 chs */
1492 1.32 chs void
1493 1.93 thorpej uvm_swap_markbad(int startslot, int nslots)
1494 1.32 chs {
1495 1.32 chs struct swapdev *sdp;
1496 1.32 chs UVMHIST_FUNC("uvm_swap_markbad"); UVMHIST_CALLED(pdhist);
1497 1.32 chs
1498 1.32 chs simple_lock(&uvm.swap_data_lock);
1499 1.32 chs sdp = swapdrum_getsdp(startslot);
1500 1.82 pk KASSERT(sdp != NULL);
1501 1.32 chs
1502 1.32 chs /*
1503 1.32 chs * we just keep track of how many pages have been marked bad
1504 1.32 chs * in this device, to make everything add up in swap_off().
1505 1.32 chs * we assume here that the range of slots will all be within
1506 1.32 chs * one swap device.
1507 1.32 chs */
1508 1.41 chs
1509 1.82 pk KASSERT(uvmexp.swpgonly >= nslots);
1510 1.82 pk uvmexp.swpgonly -= nslots;
1511 1.32 chs sdp->swd_npgbad += nslots;
1512 1.41 chs UVMHIST_LOG(pdhist, "now %d bad", sdp->swd_npgbad, 0,0,0);
1513 1.32 chs simple_unlock(&uvm.swap_data_lock);
1514 1.32 chs }
1515 1.32 chs
1516 1.32 chs /*
1517 1.1 mrg * uvm_swap_free: free swap slots
1518 1.1 mrg *
1519 1.1 mrg * => this can be all or part of an allocation made by uvm_swap_alloc
1520 1.26 chs * => we lock uvm.swap_data_lock
1521 1.1 mrg */
1522 1.1 mrg void
1523 1.93 thorpej uvm_swap_free(int startslot, int nslots)
1524 1.1 mrg {
1525 1.1 mrg struct swapdev *sdp;
1526 1.1 mrg UVMHIST_FUNC("uvm_swap_free"); UVMHIST_CALLED(pdhist);
1527 1.1 mrg
1528 1.1 mrg UVMHIST_LOG(pdhist, "freeing %d slots starting at %d", nslots,
1529 1.1 mrg startslot, 0, 0);
1530 1.32 chs
1531 1.32 chs /*
1532 1.32 chs * ignore attempts to free the "bad" slot.
1533 1.32 chs */
1534 1.46 chs
1535 1.32 chs if (startslot == SWSLOT_BAD) {
1536 1.32 chs return;
1537 1.32 chs }
1538 1.32 chs
1539 1.1 mrg /*
1540 1.51 chs * convert drum slot offset back to sdp, free the blocks
1541 1.51 chs * in the extent, and return. must hold pri lock to do
1542 1.1 mrg * lookup and access the extent.
1543 1.1 mrg */
1544 1.46 chs
1545 1.26 chs simple_lock(&uvm.swap_data_lock);
1546 1.1 mrg sdp = swapdrum_getsdp(startslot);
1547 1.46 chs KASSERT(uvmexp.nswapdev >= 1);
1548 1.46 chs KASSERT(sdp != NULL);
1549 1.46 chs KASSERT(sdp->swd_npginuse >= nslots);
1550 1.90 yamt blist_free(sdp->swd_blist, startslot - sdp->swd_drumoffset, nslots);
1551 1.1 mrg sdp->swd_npginuse -= nslots;
1552 1.1 mrg uvmexp.swpginuse -= nslots;
1553 1.26 chs simple_unlock(&uvm.swap_data_lock);
1554 1.1 mrg }
1555 1.1 mrg
1556 1.1 mrg /*
1557 1.1 mrg * uvm_swap_put: put any number of pages into a contig place on swap
1558 1.1 mrg *
1559 1.1 mrg * => can be sync or async
1560 1.1 mrg */
1561 1.54 chs
1562 1.1 mrg int
1563 1.93 thorpej uvm_swap_put(int swslot, struct vm_page **ppsp, int npages, int flags)
1564 1.1 mrg {
1565 1.56 chs int error;
1566 1.1 mrg
1567 1.56 chs error = uvm_swap_io(ppsp, swslot, npages, B_WRITE |
1568 1.1 mrg ((flags & PGO_SYNCIO) ? 0 : B_ASYNC));
1569 1.56 chs return error;
1570 1.1 mrg }
1571 1.1 mrg
1572 1.1 mrg /*
1573 1.1 mrg * uvm_swap_get: get a single page from swap
1574 1.1 mrg *
1575 1.1 mrg * => usually a sync op (from fault)
1576 1.1 mrg */
1577 1.54 chs
1578 1.1 mrg int
1579 1.93 thorpej uvm_swap_get(struct vm_page *page, int swslot, int flags)
1580 1.1 mrg {
1581 1.56 chs int error;
1582 1.1 mrg
1583 1.1 mrg uvmexp.nswget++;
1584 1.46 chs KASSERT(flags & PGO_SYNCIO);
1585 1.32 chs if (swslot == SWSLOT_BAD) {
1586 1.47 chs return EIO;
1587 1.32 chs }
1588 1.81 pk
1589 1.56 chs error = uvm_swap_io(&page, swslot, 1, B_READ |
1590 1.1 mrg ((flags & PGO_SYNCIO) ? 0 : B_ASYNC));
1591 1.56 chs if (error == 0) {
1592 1.47 chs
1593 1.26 chs /*
1594 1.54 chs * this page is no longer only in swap.
1595 1.26 chs */
1596 1.47 chs
1597 1.26 chs simple_lock(&uvm.swap_data_lock);
1598 1.56 chs KASSERT(uvmexp.swpgonly > 0);
1599 1.54 chs uvmexp.swpgonly--;
1600 1.26 chs simple_unlock(&uvm.swap_data_lock);
1601 1.26 chs }
1602 1.56 chs return error;
1603 1.1 mrg }
1604 1.1 mrg
1605 1.1 mrg /*
1606 1.1 mrg * uvm_swap_io: do an i/o operation to swap
1607 1.1 mrg */
1608 1.1 mrg
1609 1.1 mrg static int
1610 1.93 thorpej uvm_swap_io(struct vm_page **pps, int startslot, int npages, int flags)
1611 1.1 mrg {
1612 1.1 mrg daddr_t startblk;
1613 1.1 mrg struct buf *bp;
1614 1.15 eeh vaddr_t kva;
1615 1.54 chs int error, s, mapinflags;
1616 1.41 chs boolean_t write, async;
1617 1.1 mrg UVMHIST_FUNC("uvm_swap_io"); UVMHIST_CALLED(pdhist);
1618 1.1 mrg
1619 1.1 mrg UVMHIST_LOG(pdhist, "<- called, startslot=%d, npages=%d, flags=%d",
1620 1.1 mrg startslot, npages, flags, 0);
1621 1.32 chs
1622 1.41 chs write = (flags & B_READ) == 0;
1623 1.41 chs async = (flags & B_ASYNC) != 0;
1624 1.41 chs
1625 1.1 mrg /*
1626 1.1 mrg * convert starting drum slot to block number
1627 1.1 mrg */
1628 1.54 chs
1629 1.44 enami startblk = btodb((u_int64_t)startslot << PAGE_SHIFT);
1630 1.1 mrg
1631 1.1 mrg /*
1632 1.54 chs * first, map the pages into the kernel.
1633 1.41 chs */
1634 1.41 chs
1635 1.54 chs mapinflags = !write ?
1636 1.54 chs UVMPAGER_MAPIN_WAITOK|UVMPAGER_MAPIN_READ :
1637 1.54 chs UVMPAGER_MAPIN_WAITOK|UVMPAGER_MAPIN_WRITE;
1638 1.41 chs kva = uvm_pagermapin(pps, npages, mapinflags);
1639 1.1 mrg
1640 1.51 chs /*
1641 1.41 chs * now allocate a buf for the i/o.
1642 1.1 mrg */
1643 1.54 chs
1644 1.1 mrg s = splbio();
1645 1.54 chs bp = pool_get(&bufpool, PR_WAITOK);
1646 1.41 chs splx(s);
1647 1.1 mrg
1648 1.1 mrg /*
1649 1.1 mrg * fill in the bp/sbp. we currently route our i/o through
1650 1.1 mrg * /dev/drum's vnode [swapdev_vp].
1651 1.1 mrg */
1652 1.54 chs
1653 1.77 thorpej BUF_INIT(bp);
1654 1.21 mycroft bp->b_flags = B_BUSY | B_NOCACHE | (flags & (B_READ|B_ASYNC));
1655 1.1 mrg bp->b_proc = &proc0; /* XXX */
1656 1.12 pk bp->b_vnbufs.le_next = NOLIST;
1657 1.1 mrg bp->b_data = (caddr_t)kva;
1658 1.1 mrg bp->b_blkno = startblk;
1659 1.1 mrg bp->b_vp = swapdev_vp;
1660 1.41 chs bp->b_bufsize = bp->b_bcount = npages << PAGE_SHIFT;
1661 1.1 mrg
1662 1.51 chs /*
1663 1.41 chs * bump v_numoutput (counter of number of active outputs).
1664 1.1 mrg */
1665 1.54 chs
1666 1.41 chs if (write) {
1667 1.1 mrg s = splbio();
1668 1.76 pk V_INCR_NUMOUTPUT(swapdev_vp);
1669 1.1 mrg splx(s);
1670 1.1 mrg }
1671 1.1 mrg
1672 1.1 mrg /*
1673 1.41 chs * for async ops we must set up the iodone handler.
1674 1.1 mrg */
1675 1.54 chs
1676 1.41 chs if (async) {
1677 1.54 chs bp->b_flags |= B_CALL;
1678 1.41 chs bp->b_iodone = uvm_aio_biodone;
1679 1.1 mrg UVMHIST_LOG(pdhist, "doing async!", 0, 0, 0, 0);
1680 1.83 yamt if (curproc == uvm.pagedaemon_proc)
1681 1.83 yamt BIO_SETPRIO(bp, BPRIO_TIMECRITICAL);
1682 1.83 yamt else
1683 1.83 yamt BIO_SETPRIO(bp, BPRIO_TIMELIMITED);
1684 1.83 yamt } else {
1685 1.83 yamt BIO_SETPRIO(bp, BPRIO_TIMECRITICAL);
1686 1.1 mrg }
1687 1.1 mrg UVMHIST_LOG(pdhist,
1688 1.41 chs "about to start io: data = %p blkno = 0x%x, bcount = %ld",
1689 1.1 mrg bp->b_data, bp->b_blkno, bp->b_bcount, 0);
1690 1.1 mrg
1691 1.1 mrg /*
1692 1.1 mrg * now we start the I/O, and if async, return.
1693 1.1 mrg */
1694 1.54 chs
1695 1.84 hannken VOP_STRATEGY(swapdev_vp, bp);
1696 1.41 chs if (async)
1697 1.47 chs return 0;
1698 1.1 mrg
1699 1.1 mrg /*
1700 1.1 mrg * must be sync i/o. wait for it to finish
1701 1.1 mrg */
1702 1.54 chs
1703 1.47 chs error = biowait(bp);
1704 1.1 mrg
1705 1.1 mrg /*
1706 1.1 mrg * kill the pager mapping
1707 1.1 mrg */
1708 1.54 chs
1709 1.1 mrg uvm_pagermapout(kva, npages);
1710 1.1 mrg
1711 1.1 mrg /*
1712 1.54 chs * now dispose of the buf and we're done.
1713 1.1 mrg */
1714 1.54 chs
1715 1.1 mrg s = splbio();
1716 1.41 chs if (write)
1717 1.41 chs vwakeup(bp);
1718 1.41 chs pool_put(&bufpool, bp);
1719 1.1 mrg splx(s);
1720 1.47 chs UVMHIST_LOG(pdhist, "<- done (sync) error=%d", error, 0, 0, 0);
1721 1.47 chs return (error);
1722 1.1 mrg }
1723