uvm_swap.c revision 1.73 1 1.73 perry /* $NetBSD: uvm_swap.c,v 1.73 2002/11/02 07:40:49 perry Exp $ */
2 1.1 mrg
3 1.1 mrg /*
4 1.1 mrg * Copyright (c) 1995, 1996, 1997 Matthew R. Green
5 1.1 mrg * All rights reserved.
6 1.1 mrg *
7 1.1 mrg * Redistribution and use in source and binary forms, with or without
8 1.1 mrg * modification, are permitted provided that the following conditions
9 1.1 mrg * are met:
10 1.1 mrg * 1. Redistributions of source code must retain the above copyright
11 1.1 mrg * notice, this list of conditions and the following disclaimer.
12 1.1 mrg * 2. Redistributions in binary form must reproduce the above copyright
13 1.1 mrg * notice, this list of conditions and the following disclaimer in the
14 1.1 mrg * documentation and/or other materials provided with the distribution.
15 1.1 mrg * 3. The name of the author may not be used to endorse or promote products
16 1.1 mrg * derived from this software without specific prior written permission.
17 1.1 mrg *
18 1.1 mrg * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 1.1 mrg * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 1.1 mrg * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 1.1 mrg * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 1.1 mrg * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 1.1 mrg * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 1.1 mrg * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
25 1.1 mrg * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26 1.1 mrg * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 1.1 mrg * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 1.1 mrg * SUCH DAMAGE.
29 1.3 mrg *
30 1.3 mrg * from: NetBSD: vm_swap.c,v 1.52 1997/12/02 13:47:37 pk Exp
31 1.3 mrg * from: Id: uvm_swap.c,v 1.1.2.42 1998/02/02 20:38:06 chuck Exp
32 1.1 mrg */
33 1.57 lukem
34 1.57 lukem #include <sys/cdefs.h>
35 1.73 perry __KERNEL_RCSID(0, "$NetBSD: uvm_swap.c,v 1.73 2002/11/02 07:40:49 perry Exp $");
36 1.5 mrg
37 1.6 thorpej #include "fs_nfs.h"
38 1.5 mrg #include "opt_uvmhist.h"
39 1.16 mrg #include "opt_compat_netbsd.h"
40 1.41 chs #include "opt_ddb.h"
41 1.1 mrg
42 1.1 mrg #include <sys/param.h>
43 1.1 mrg #include <sys/systm.h>
44 1.1 mrg #include <sys/buf.h>
45 1.36 mrg #include <sys/conf.h>
46 1.1 mrg #include <sys/proc.h>
47 1.1 mrg #include <sys/namei.h>
48 1.1 mrg #include <sys/disklabel.h>
49 1.1 mrg #include <sys/errno.h>
50 1.1 mrg #include <sys/kernel.h>
51 1.1 mrg #include <sys/malloc.h>
52 1.1 mrg #include <sys/vnode.h>
53 1.1 mrg #include <sys/file.h>
54 1.1 mrg #include <sys/extent.h>
55 1.1 mrg #include <sys/mount.h>
56 1.12 pk #include <sys/pool.h>
57 1.1 mrg #include <sys/syscallargs.h>
58 1.17 mrg #include <sys/swap.h>
59 1.1 mrg
60 1.1 mrg #include <uvm/uvm.h>
61 1.1 mrg
62 1.1 mrg #include <miscfs/specfs/specdev.h>
63 1.1 mrg
64 1.1 mrg /*
65 1.1 mrg * uvm_swap.c: manage configuration and i/o to swap space.
66 1.1 mrg */
67 1.1 mrg
68 1.1 mrg /*
69 1.1 mrg * swap space is managed in the following way:
70 1.51 chs *
71 1.1 mrg * each swap partition or file is described by a "swapdev" structure.
72 1.1 mrg * each "swapdev" structure contains a "swapent" structure which contains
73 1.1 mrg * information that is passed up to the user (via system calls).
74 1.1 mrg *
75 1.1 mrg * each swap partition is assigned a "priority" (int) which controls
76 1.1 mrg * swap parition usage.
77 1.1 mrg *
78 1.1 mrg * the system maintains a global data structure describing all swap
79 1.1 mrg * partitions/files. there is a sorted LIST of "swappri" structures
80 1.1 mrg * which describe "swapdev"'s at that priority. this LIST is headed
81 1.51 chs * by the "swap_priority" global var. each "swappri" contains a
82 1.1 mrg * CIRCLEQ of "swapdev" structures at that priority.
83 1.1 mrg *
84 1.1 mrg * locking:
85 1.1 mrg * - swap_syscall_lock (sleep lock): this lock serializes the swapctl
86 1.1 mrg * system call and prevents the swap priority list from changing
87 1.1 mrg * while we are in the middle of a system call (e.g. SWAP_STATS).
88 1.26 chs * - uvm.swap_data_lock (simple_lock): this lock protects all swap data
89 1.1 mrg * structures including the priority list, the swapdev structures,
90 1.1 mrg * and the swapmap extent.
91 1.1 mrg *
92 1.1 mrg * each swap device has the following info:
93 1.1 mrg * - swap device in use (could be disabled, preventing future use)
94 1.1 mrg * - swap enabled (allows new allocations on swap)
95 1.1 mrg * - map info in /dev/drum
96 1.1 mrg * - vnode pointer
97 1.1 mrg * for swap files only:
98 1.1 mrg * - block size
99 1.1 mrg * - max byte count in buffer
100 1.1 mrg * - buffer
101 1.1 mrg *
102 1.1 mrg * userland controls and configures swap with the swapctl(2) system call.
103 1.1 mrg * the sys_swapctl performs the following operations:
104 1.1 mrg * [1] SWAP_NSWAP: returns the number of swap devices currently configured
105 1.51 chs * [2] SWAP_STATS: given a pointer to an array of swapent structures
106 1.1 mrg * (passed in via "arg") of a size passed in via "misc" ... we load
107 1.63 manu * the current swap config into the array. The actual work is done
108 1.63 manu * in the uvm_swap_stats(9) function.
109 1.1 mrg * [3] SWAP_ON: given a pathname in arg (could be device or file) and a
110 1.1 mrg * priority in "misc", start swapping on it.
111 1.1 mrg * [4] SWAP_OFF: as SWAP_ON, but stops swapping to a device
112 1.1 mrg * [5] SWAP_CTL: changes the priority of a swap device (new priority in
113 1.1 mrg * "misc")
114 1.1 mrg */
115 1.1 mrg
116 1.1 mrg /*
117 1.1 mrg * swapdev: describes a single swap partition/file
118 1.1 mrg *
119 1.1 mrg * note the following should be true:
120 1.1 mrg * swd_inuse <= swd_nblks [number of blocks in use is <= total blocks]
121 1.1 mrg * swd_nblks <= swd_mapsize [because mapsize includes miniroot+disklabel]
122 1.1 mrg */
123 1.1 mrg struct swapdev {
124 1.16 mrg struct oswapent swd_ose;
125 1.16 mrg #define swd_dev swd_ose.ose_dev /* device id */
126 1.16 mrg #define swd_flags swd_ose.ose_flags /* flags:inuse/enable/fake */
127 1.16 mrg #define swd_priority swd_ose.ose_priority /* our priority */
128 1.16 mrg /* also: swd_ose.ose_nblks, swd_ose.ose_inuse */
129 1.16 mrg char *swd_path; /* saved pathname of device */
130 1.16 mrg int swd_pathlen; /* length of pathname */
131 1.16 mrg int swd_npages; /* #pages we can use */
132 1.16 mrg int swd_npginuse; /* #pages in use */
133 1.32 chs int swd_npgbad; /* #pages bad */
134 1.16 mrg int swd_drumoffset; /* page0 offset in drum */
135 1.16 mrg int swd_drumsize; /* #pages in drum */
136 1.16 mrg struct extent *swd_ex; /* extent for this swapdev */
137 1.42 enami char swd_exname[12]; /* name of extent above */
138 1.16 mrg struct vnode *swd_vp; /* backing vnode */
139 1.16 mrg CIRCLEQ_ENTRY(swapdev) swd_next; /* priority circleq */
140 1.1 mrg
141 1.16 mrg int swd_bsize; /* blocksize (bytes) */
142 1.16 mrg int swd_maxactive; /* max active i/o reqs */
143 1.65 hannken struct bufq_state swd_tab; /* buffer list */
144 1.33 thorpej int swd_active; /* number of active buffers */
145 1.1 mrg };
146 1.1 mrg
147 1.1 mrg /*
148 1.1 mrg * swap device priority entry; the list is kept sorted on `spi_priority'.
149 1.1 mrg */
150 1.1 mrg struct swappri {
151 1.1 mrg int spi_priority; /* priority */
152 1.1 mrg CIRCLEQ_HEAD(spi_swapdev, swapdev) spi_swapdev;
153 1.1 mrg /* circleq of swapdevs at this priority */
154 1.1 mrg LIST_ENTRY(swappri) spi_swappri; /* global list of pri's */
155 1.1 mrg };
156 1.1 mrg
157 1.1 mrg /*
158 1.1 mrg * The following two structures are used to keep track of data transfers
159 1.1 mrg * on swap devices associated with regular files.
160 1.1 mrg * NOTE: this code is more or less a copy of vnd.c; we use the same
161 1.1 mrg * structure names here to ease porting..
162 1.1 mrg */
163 1.1 mrg struct vndxfer {
164 1.1 mrg struct buf *vx_bp; /* Pointer to parent buffer */
165 1.1 mrg struct swapdev *vx_sdp;
166 1.1 mrg int vx_error;
167 1.1 mrg int vx_pending; /* # of pending aux buffers */
168 1.1 mrg int vx_flags;
169 1.1 mrg #define VX_BUSY 1
170 1.1 mrg #define VX_DEAD 2
171 1.1 mrg };
172 1.1 mrg
173 1.1 mrg struct vndbuf {
174 1.1 mrg struct buf vb_buf;
175 1.1 mrg struct vndxfer *vb_xfer;
176 1.1 mrg };
177 1.1 mrg
178 1.12 pk
179 1.1 mrg /*
180 1.12 pk * We keep a of pool vndbuf's and vndxfer structures.
181 1.1 mrg */
182 1.49 thorpej static struct pool vndxfer_pool;
183 1.49 thorpej static struct pool vndbuf_pool;
184 1.1 mrg
185 1.12 pk #define getvndxfer(vnx) do { \
186 1.12 pk int s = splbio(); \
187 1.60 thorpej vnx = pool_get(&vndxfer_pool, PR_WAITOK); \
188 1.12 pk splx(s); \
189 1.73 perry } while (/*CONSTCOND*/ 0)
190 1.12 pk
191 1.12 pk #define putvndxfer(vnx) { \
192 1.49 thorpej pool_put(&vndxfer_pool, (void *)(vnx)); \
193 1.12 pk }
194 1.12 pk
195 1.12 pk #define getvndbuf(vbp) do { \
196 1.12 pk int s = splbio(); \
197 1.60 thorpej vbp = pool_get(&vndbuf_pool, PR_WAITOK); \
198 1.12 pk splx(s); \
199 1.73 perry } while (/*CONSTCOND*/ 0)
200 1.1 mrg
201 1.12 pk #define putvndbuf(vbp) { \
202 1.49 thorpej pool_put(&vndbuf_pool, (void *)(vbp)); \
203 1.12 pk }
204 1.1 mrg
205 1.1 mrg /*
206 1.1 mrg * local variables
207 1.1 mrg */
208 1.1 mrg static struct extent *swapmap; /* controls the mapping of /dev/drum */
209 1.1 mrg
210 1.1 mrg /* list of all active swap devices [by priority] */
211 1.1 mrg LIST_HEAD(swap_priority, swappri);
212 1.1 mrg static struct swap_priority swap_priority;
213 1.1 mrg
214 1.1 mrg /* locks */
215 1.52 chs struct lock swap_syscall_lock;
216 1.1 mrg
217 1.1 mrg /*
218 1.1 mrg * prototypes
219 1.1 mrg */
220 1.1 mrg static struct swapdev *swapdrum_getsdp __P((int));
221 1.1 mrg
222 1.1 mrg static struct swapdev *swaplist_find __P((struct vnode *, int));
223 1.51 chs static void swaplist_insert __P((struct swapdev *,
224 1.1 mrg struct swappri *, int));
225 1.1 mrg static void swaplist_trim __P((void));
226 1.1 mrg
227 1.1 mrg static int swap_on __P((struct proc *, struct swapdev *));
228 1.1 mrg static int swap_off __P((struct proc *, struct swapdev *));
229 1.1 mrg
230 1.1 mrg static void sw_reg_strategy __P((struct swapdev *, struct buf *, int));
231 1.1 mrg static void sw_reg_iodone __P((struct buf *));
232 1.1 mrg static void sw_reg_start __P((struct swapdev *));
233 1.1 mrg
234 1.1 mrg static int uvm_swap_io __P((struct vm_page **, int, int, int));
235 1.1 mrg
236 1.69 gehenna dev_type_read(swread);
237 1.69 gehenna dev_type_write(swwrite);
238 1.69 gehenna dev_type_strategy(swstrategy);
239 1.69 gehenna
240 1.69 gehenna const struct bdevsw swap_bdevsw = {
241 1.69 gehenna noopen, noclose, swstrategy, noioctl, nodump, nosize,
242 1.69 gehenna };
243 1.69 gehenna
244 1.69 gehenna const struct cdevsw swap_cdevsw = {
245 1.69 gehenna nullopen, nullclose, swread, swwrite, noioctl,
246 1.71 jdolecek nostop, notty, nopoll, nommap, nokqfilter
247 1.69 gehenna };
248 1.69 gehenna
249 1.1 mrg /*
250 1.1 mrg * uvm_swap_init: init the swap system data structures and locks
251 1.1 mrg *
252 1.51 chs * => called at boot time from init_main.c after the filesystems
253 1.1 mrg * are brought up (which happens after uvm_init())
254 1.1 mrg */
255 1.1 mrg void
256 1.1 mrg uvm_swap_init()
257 1.1 mrg {
258 1.1 mrg UVMHIST_FUNC("uvm_swap_init");
259 1.1 mrg
260 1.1 mrg UVMHIST_CALLED(pdhist);
261 1.1 mrg /*
262 1.1 mrg * first, init the swap list, its counter, and its lock.
263 1.1 mrg * then get a handle on the vnode for /dev/drum by using
264 1.1 mrg * the its dev_t number ("swapdev", from MD conf.c).
265 1.1 mrg */
266 1.1 mrg
267 1.1 mrg LIST_INIT(&swap_priority);
268 1.1 mrg uvmexp.nswapdev = 0;
269 1.1 mrg lockinit(&swap_syscall_lock, PVM, "swapsys", 0, 0);
270 1.26 chs simple_lock_init(&uvm.swap_data_lock);
271 1.12 pk
272 1.1 mrg if (bdevvp(swapdev, &swapdev_vp))
273 1.1 mrg panic("uvm_swap_init: can't get vnode for swap device");
274 1.1 mrg
275 1.1 mrg /*
276 1.1 mrg * create swap block resource map to map /dev/drum. the range
277 1.1 mrg * from 1 to INT_MAX allows 2 gigablocks of swap space. note
278 1.51 chs * that block 0 is reserved (used to indicate an allocation
279 1.1 mrg * failure, or no allocation).
280 1.1 mrg */
281 1.1 mrg swapmap = extent_create("swapmap", 1, INT_MAX,
282 1.1 mrg M_VMSWAP, 0, 0, EX_NOWAIT);
283 1.1 mrg if (swapmap == 0)
284 1.1 mrg panic("uvm_swap_init: extent_create failed");
285 1.1 mrg
286 1.1 mrg /*
287 1.41 chs * allocate pools for structures used for swapping to files.
288 1.1 mrg */
289 1.1 mrg
290 1.49 thorpej pool_init(&vndxfer_pool, sizeof(struct vndxfer), 0, 0, 0,
291 1.59 thorpej "swp vnx", NULL);
292 1.49 thorpej
293 1.49 thorpej pool_init(&vndbuf_pool, sizeof(struct vndbuf), 0, 0, 0,
294 1.59 thorpej "swp vnd", NULL);
295 1.49 thorpej
296 1.1 mrg /*
297 1.1 mrg * done!
298 1.1 mrg */
299 1.1 mrg UVMHIST_LOG(pdhist, "<- done", 0, 0, 0, 0);
300 1.1 mrg }
301 1.1 mrg
302 1.1 mrg /*
303 1.1 mrg * swaplist functions: functions that operate on the list of swap
304 1.1 mrg * devices on the system.
305 1.1 mrg */
306 1.1 mrg
307 1.1 mrg /*
308 1.1 mrg * swaplist_insert: insert swap device "sdp" into the global list
309 1.1 mrg *
310 1.26 chs * => caller must hold both swap_syscall_lock and uvm.swap_data_lock
311 1.1 mrg * => caller must provide a newly malloc'd swappri structure (we will
312 1.1 mrg * FREE it if we don't need it... this it to prevent malloc blocking
313 1.1 mrg * here while adding swap)
314 1.1 mrg */
315 1.1 mrg static void
316 1.1 mrg swaplist_insert(sdp, newspp, priority)
317 1.1 mrg struct swapdev *sdp;
318 1.1 mrg struct swappri *newspp;
319 1.1 mrg int priority;
320 1.1 mrg {
321 1.1 mrg struct swappri *spp, *pspp;
322 1.1 mrg UVMHIST_FUNC("swaplist_insert"); UVMHIST_CALLED(pdhist);
323 1.1 mrg
324 1.1 mrg /*
325 1.1 mrg * find entry at or after which to insert the new device.
326 1.1 mrg */
327 1.55 chs pspp = NULL;
328 1.55 chs LIST_FOREACH(spp, &swap_priority, spi_swappri) {
329 1.1 mrg if (priority <= spp->spi_priority)
330 1.1 mrg break;
331 1.1 mrg pspp = spp;
332 1.1 mrg }
333 1.1 mrg
334 1.1 mrg /*
335 1.1 mrg * new priority?
336 1.1 mrg */
337 1.1 mrg if (spp == NULL || spp->spi_priority != priority) {
338 1.1 mrg spp = newspp; /* use newspp! */
339 1.32 chs UVMHIST_LOG(pdhist, "created new swappri = %d",
340 1.32 chs priority, 0, 0, 0);
341 1.1 mrg
342 1.1 mrg spp->spi_priority = priority;
343 1.1 mrg CIRCLEQ_INIT(&spp->spi_swapdev);
344 1.1 mrg
345 1.1 mrg if (pspp)
346 1.1 mrg LIST_INSERT_AFTER(pspp, spp, spi_swappri);
347 1.1 mrg else
348 1.1 mrg LIST_INSERT_HEAD(&swap_priority, spp, spi_swappri);
349 1.1 mrg } else {
350 1.1 mrg /* we don't need a new priority structure, free it */
351 1.1 mrg FREE(newspp, M_VMSWAP);
352 1.1 mrg }
353 1.1 mrg
354 1.1 mrg /*
355 1.1 mrg * priority found (or created). now insert on the priority's
356 1.1 mrg * circleq list and bump the total number of swapdevs.
357 1.1 mrg */
358 1.1 mrg sdp->swd_priority = priority;
359 1.1 mrg CIRCLEQ_INSERT_TAIL(&spp->spi_swapdev, sdp, swd_next);
360 1.1 mrg uvmexp.nswapdev++;
361 1.1 mrg }
362 1.1 mrg
363 1.1 mrg /*
364 1.1 mrg * swaplist_find: find and optionally remove a swap device from the
365 1.1 mrg * global list.
366 1.1 mrg *
367 1.26 chs * => caller must hold both swap_syscall_lock and uvm.swap_data_lock
368 1.1 mrg * => we return the swapdev we found (and removed)
369 1.1 mrg */
370 1.1 mrg static struct swapdev *
371 1.1 mrg swaplist_find(vp, remove)
372 1.1 mrg struct vnode *vp;
373 1.1 mrg boolean_t remove;
374 1.1 mrg {
375 1.1 mrg struct swapdev *sdp;
376 1.1 mrg struct swappri *spp;
377 1.1 mrg
378 1.1 mrg /*
379 1.1 mrg * search the lists for the requested vp
380 1.1 mrg */
381 1.55 chs
382 1.55 chs LIST_FOREACH(spp, &swap_priority, spi_swappri) {
383 1.55 chs CIRCLEQ_FOREACH(sdp, &spp->spi_swapdev, swd_next) {
384 1.1 mrg if (sdp->swd_vp == vp) {
385 1.1 mrg if (remove) {
386 1.1 mrg CIRCLEQ_REMOVE(&spp->spi_swapdev,
387 1.1 mrg sdp, swd_next);
388 1.1 mrg uvmexp.nswapdev--;
389 1.1 mrg }
390 1.1 mrg return(sdp);
391 1.1 mrg }
392 1.55 chs }
393 1.1 mrg }
394 1.1 mrg return (NULL);
395 1.1 mrg }
396 1.1 mrg
397 1.1 mrg
398 1.1 mrg /*
399 1.1 mrg * swaplist_trim: scan priority list for empty priority entries and kill
400 1.1 mrg * them.
401 1.1 mrg *
402 1.26 chs * => caller must hold both swap_syscall_lock and uvm.swap_data_lock
403 1.1 mrg */
404 1.1 mrg static void
405 1.1 mrg swaplist_trim()
406 1.1 mrg {
407 1.1 mrg struct swappri *spp, *nextspp;
408 1.1 mrg
409 1.32 chs for (spp = LIST_FIRST(&swap_priority); spp != NULL; spp = nextspp) {
410 1.32 chs nextspp = LIST_NEXT(spp, spi_swappri);
411 1.32 chs if (CIRCLEQ_FIRST(&spp->spi_swapdev) !=
412 1.32 chs (void *)&spp->spi_swapdev)
413 1.1 mrg continue;
414 1.1 mrg LIST_REMOVE(spp, spi_swappri);
415 1.32 chs free(spp, M_VMSWAP);
416 1.1 mrg }
417 1.1 mrg }
418 1.1 mrg
419 1.1 mrg /*
420 1.1 mrg * swapdrum_getsdp: given a page offset in /dev/drum, convert it back
421 1.1 mrg * to the "swapdev" that maps that section of the drum.
422 1.1 mrg *
423 1.1 mrg * => each swapdev takes one big contig chunk of the drum
424 1.26 chs * => caller must hold uvm.swap_data_lock
425 1.1 mrg */
426 1.1 mrg static struct swapdev *
427 1.1 mrg swapdrum_getsdp(pgno)
428 1.1 mrg int pgno;
429 1.1 mrg {
430 1.1 mrg struct swapdev *sdp;
431 1.1 mrg struct swappri *spp;
432 1.51 chs
433 1.55 chs LIST_FOREACH(spp, &swap_priority, spi_swappri) {
434 1.55 chs CIRCLEQ_FOREACH(sdp, &spp->spi_swapdev, swd_next) {
435 1.48 fvdl if (sdp->swd_flags & SWF_FAKE)
436 1.48 fvdl continue;
437 1.1 mrg if (pgno >= sdp->swd_drumoffset &&
438 1.1 mrg pgno < (sdp->swd_drumoffset + sdp->swd_drumsize)) {
439 1.1 mrg return sdp;
440 1.1 mrg }
441 1.48 fvdl }
442 1.55 chs }
443 1.1 mrg return NULL;
444 1.1 mrg }
445 1.1 mrg
446 1.1 mrg
447 1.1 mrg /*
448 1.1 mrg * sys_swapctl: main entry point for swapctl(2) system call
449 1.1 mrg * [with two helper functions: swap_on and swap_off]
450 1.1 mrg */
451 1.1 mrg int
452 1.1 mrg sys_swapctl(p, v, retval)
453 1.1 mrg struct proc *p;
454 1.1 mrg void *v;
455 1.1 mrg register_t *retval;
456 1.1 mrg {
457 1.1 mrg struct sys_swapctl_args /* {
458 1.1 mrg syscallarg(int) cmd;
459 1.1 mrg syscallarg(void *) arg;
460 1.1 mrg syscallarg(int) misc;
461 1.1 mrg } */ *uap = (struct sys_swapctl_args *)v;
462 1.1 mrg struct vnode *vp;
463 1.1 mrg struct nameidata nd;
464 1.1 mrg struct swappri *spp;
465 1.1 mrg struct swapdev *sdp;
466 1.1 mrg struct swapent *sep;
467 1.16 mrg char userpath[PATH_MAX + 1];
468 1.18 enami size_t len;
469 1.61 manu int error, misc;
470 1.1 mrg int priority;
471 1.1 mrg UVMHIST_FUNC("sys_swapctl"); UVMHIST_CALLED(pdhist);
472 1.1 mrg
473 1.1 mrg misc = SCARG(uap, misc);
474 1.1 mrg
475 1.1 mrg /*
476 1.1 mrg * ensure serialized syscall access by grabbing the swap_syscall_lock
477 1.1 mrg */
478 1.32 chs lockmgr(&swap_syscall_lock, LK_EXCLUSIVE, NULL);
479 1.24 mrg
480 1.1 mrg /*
481 1.1 mrg * we handle the non-priv NSWAP and STATS request first.
482 1.1 mrg *
483 1.51 chs * SWAP_NSWAP: return number of config'd swap devices
484 1.1 mrg * [can also be obtained with uvmexp sysctl]
485 1.1 mrg */
486 1.1 mrg if (SCARG(uap, cmd) == SWAP_NSWAP) {
487 1.8 mrg UVMHIST_LOG(pdhist, "<- done SWAP_NSWAP=%d", uvmexp.nswapdev,
488 1.8 mrg 0, 0, 0);
489 1.1 mrg *retval = uvmexp.nswapdev;
490 1.16 mrg error = 0;
491 1.16 mrg goto out;
492 1.1 mrg }
493 1.1 mrg
494 1.1 mrg /*
495 1.1 mrg * SWAP_STATS: get stats on current # of configured swap devs
496 1.1 mrg *
497 1.51 chs * note that the swap_priority list can't change as long
498 1.1 mrg * as we are holding the swap_syscall_lock. we don't want
499 1.51 chs * to grab the uvm.swap_data_lock because we may fault&sleep during
500 1.1 mrg * copyout() and we don't want to be holding that lock then!
501 1.1 mrg */
502 1.16 mrg if (SCARG(uap, cmd) == SWAP_STATS
503 1.16 mrg #if defined(COMPAT_13)
504 1.16 mrg || SCARG(uap, cmd) == SWAP_OSTATS
505 1.16 mrg #endif
506 1.16 mrg ) {
507 1.61 manu misc = MIN(uvmexp.nswapdev, misc);
508 1.16 mrg #if defined(COMPAT_13)
509 1.61 manu if (SCARG(uap, cmd) == SWAP_OSTATS)
510 1.61 manu len = sizeof(struct oswapent) * misc;
511 1.62 manu else
512 1.16 mrg #endif
513 1.62 manu len = sizeof(struct swapent) * misc;
514 1.62 manu sep = (struct swapent *)malloc(len, M_TEMP, M_WAITOK);
515 1.62 manu
516 1.62 manu uvm_swap_stats(SCARG(uap, cmd), sep, misc, retval);
517 1.61 manu error = copyout(sep, (void *)SCARG(uap, arg), len);
518 1.1 mrg
519 1.61 manu free(sep, M_TEMP);
520 1.16 mrg UVMHIST_LOG(pdhist, "<- done SWAP_STATS", 0, 0, 0, 0);
521 1.16 mrg goto out;
522 1.51 chs }
523 1.55 chs if (SCARG(uap, cmd) == SWAP_GETDUMPDEV) {
524 1.55 chs dev_t *devp = (dev_t *)SCARG(uap, arg);
525 1.55 chs
526 1.55 chs error = copyout(&dumpdev, devp, sizeof(dumpdev));
527 1.55 chs goto out;
528 1.55 chs }
529 1.1 mrg
530 1.1 mrg /*
531 1.1 mrg * all other requests require superuser privs. verify.
532 1.1 mrg */
533 1.16 mrg if ((error = suser(p->p_ucred, &p->p_acflag)))
534 1.16 mrg goto out;
535 1.1 mrg
536 1.1 mrg /*
537 1.1 mrg * at this point we expect a path name in arg. we will
538 1.1 mrg * use namei() to gain a vnode reference (vref), and lock
539 1.1 mrg * the vnode (VOP_LOCK).
540 1.1 mrg *
541 1.1 mrg * XXX: a NULL arg means use the root vnode pointer (e.g. for
542 1.16 mrg * miniroot)
543 1.1 mrg */
544 1.1 mrg if (SCARG(uap, arg) == NULL) {
545 1.1 mrg vp = rootvp; /* miniroot */
546 1.7 fvdl if (vget(vp, LK_EXCLUSIVE)) {
547 1.16 mrg error = EBUSY;
548 1.16 mrg goto out;
549 1.1 mrg }
550 1.16 mrg if (SCARG(uap, cmd) == SWAP_ON &&
551 1.16 mrg copystr("miniroot", userpath, sizeof userpath, &len))
552 1.16 mrg panic("swapctl: miniroot copy failed");
553 1.1 mrg } else {
554 1.16 mrg int space;
555 1.16 mrg char *where;
556 1.16 mrg
557 1.16 mrg if (SCARG(uap, cmd) == SWAP_ON) {
558 1.16 mrg if ((error = copyinstr(SCARG(uap, arg), userpath,
559 1.16 mrg sizeof userpath, &len)))
560 1.16 mrg goto out;
561 1.16 mrg space = UIO_SYSSPACE;
562 1.16 mrg where = userpath;
563 1.16 mrg } else {
564 1.16 mrg space = UIO_USERSPACE;
565 1.16 mrg where = (char *)SCARG(uap, arg);
566 1.1 mrg }
567 1.16 mrg NDINIT(&nd, LOOKUP, FOLLOW|LOCKLEAF, space, where, p);
568 1.16 mrg if ((error = namei(&nd)))
569 1.16 mrg goto out;
570 1.1 mrg vp = nd.ni_vp;
571 1.1 mrg }
572 1.1 mrg /* note: "vp" is referenced and locked */
573 1.1 mrg
574 1.1 mrg error = 0; /* assume no error */
575 1.1 mrg switch(SCARG(uap, cmd)) {
576 1.40 mrg
577 1.24 mrg case SWAP_DUMPDEV:
578 1.24 mrg if (vp->v_type != VBLK) {
579 1.24 mrg error = ENOTBLK;
580 1.45 pk break;
581 1.24 mrg }
582 1.24 mrg dumpdev = vp->v_rdev;
583 1.68 drochner cpu_dumpconf();
584 1.24 mrg break;
585 1.24 mrg
586 1.1 mrg case SWAP_CTL:
587 1.1 mrg /*
588 1.1 mrg * get new priority, remove old entry (if any) and then
589 1.1 mrg * reinsert it in the correct place. finally, prune out
590 1.1 mrg * any empty priority structures.
591 1.1 mrg */
592 1.1 mrg priority = SCARG(uap, misc);
593 1.32 chs spp = malloc(sizeof *spp, M_VMSWAP, M_WAITOK);
594 1.26 chs simple_lock(&uvm.swap_data_lock);
595 1.1 mrg if ((sdp = swaplist_find(vp, 1)) == NULL) {
596 1.1 mrg error = ENOENT;
597 1.1 mrg } else {
598 1.1 mrg swaplist_insert(sdp, spp, priority);
599 1.1 mrg swaplist_trim();
600 1.1 mrg }
601 1.26 chs simple_unlock(&uvm.swap_data_lock);
602 1.1 mrg if (error)
603 1.1 mrg free(spp, M_VMSWAP);
604 1.1 mrg break;
605 1.1 mrg
606 1.1 mrg case SWAP_ON:
607 1.32 chs
608 1.1 mrg /*
609 1.1 mrg * check for duplicates. if none found, then insert a
610 1.1 mrg * dummy entry on the list to prevent someone else from
611 1.1 mrg * trying to enable this device while we are working on
612 1.1 mrg * it.
613 1.1 mrg */
614 1.32 chs
615 1.1 mrg priority = SCARG(uap, misc);
616 1.48 fvdl sdp = malloc(sizeof *sdp, M_VMSWAP, M_WAITOK);
617 1.48 fvdl spp = malloc(sizeof *spp, M_VMSWAP, M_WAITOK);
618 1.67 chs memset(sdp, 0, sizeof(*sdp));
619 1.67 chs sdp->swd_flags = SWF_FAKE;
620 1.67 chs sdp->swd_vp = vp;
621 1.67 chs sdp->swd_dev = (vp->v_type == VBLK) ? vp->v_rdev : NODEV;
622 1.66 hannken bufq_alloc(&sdp->swd_tab, BUFQ_DISKSORT|BUFQ_SORT_RAWBLOCK);
623 1.26 chs simple_lock(&uvm.swap_data_lock);
624 1.48 fvdl if (swaplist_find(vp, 0) != NULL) {
625 1.1 mrg error = EBUSY;
626 1.26 chs simple_unlock(&uvm.swap_data_lock);
627 1.66 hannken bufq_free(&sdp->swd_tab);
628 1.48 fvdl free(sdp, M_VMSWAP);
629 1.48 fvdl free(spp, M_VMSWAP);
630 1.16 mrg break;
631 1.1 mrg }
632 1.1 mrg swaplist_insert(sdp, spp, priority);
633 1.26 chs simple_unlock(&uvm.swap_data_lock);
634 1.1 mrg
635 1.16 mrg sdp->swd_pathlen = len;
636 1.16 mrg sdp->swd_path = malloc(sdp->swd_pathlen, M_VMSWAP, M_WAITOK);
637 1.19 pk if (copystr(userpath, sdp->swd_path, sdp->swd_pathlen, 0) != 0)
638 1.19 pk panic("swapctl: copystr");
639 1.32 chs
640 1.1 mrg /*
641 1.1 mrg * we've now got a FAKE placeholder in the swap list.
642 1.1 mrg * now attempt to enable swap on it. if we fail, undo
643 1.1 mrg * what we've done and kill the fake entry we just inserted.
644 1.1 mrg * if swap_on is a success, it will clear the SWF_FAKE flag
645 1.1 mrg */
646 1.32 chs
647 1.1 mrg if ((error = swap_on(p, sdp)) != 0) {
648 1.26 chs simple_lock(&uvm.swap_data_lock);
649 1.8 mrg (void) swaplist_find(vp, 1); /* kill fake entry */
650 1.1 mrg swaplist_trim();
651 1.26 chs simple_unlock(&uvm.swap_data_lock);
652 1.66 hannken bufq_free(&sdp->swd_tab);
653 1.19 pk free(sdp->swd_path, M_VMSWAP);
654 1.32 chs free(sdp, M_VMSWAP);
655 1.1 mrg break;
656 1.1 mrg }
657 1.1 mrg break;
658 1.1 mrg
659 1.1 mrg case SWAP_OFF:
660 1.26 chs simple_lock(&uvm.swap_data_lock);
661 1.1 mrg if ((sdp = swaplist_find(vp, 0)) == NULL) {
662 1.26 chs simple_unlock(&uvm.swap_data_lock);
663 1.1 mrg error = ENXIO;
664 1.1 mrg break;
665 1.1 mrg }
666 1.32 chs
667 1.1 mrg /*
668 1.1 mrg * If a device isn't in use or enabled, we
669 1.1 mrg * can't stop swapping from it (again).
670 1.1 mrg */
671 1.1 mrg if ((sdp->swd_flags & (SWF_INUSE|SWF_ENABLE)) == 0) {
672 1.26 chs simple_unlock(&uvm.swap_data_lock);
673 1.1 mrg error = EBUSY;
674 1.16 mrg break;
675 1.1 mrg }
676 1.1 mrg
677 1.1 mrg /*
678 1.32 chs * do the real work.
679 1.1 mrg */
680 1.45 pk error = swap_off(p, sdp);
681 1.1 mrg break;
682 1.1 mrg
683 1.1 mrg default:
684 1.1 mrg error = EINVAL;
685 1.1 mrg }
686 1.1 mrg
687 1.1 mrg /*
688 1.39 chs * done! release the ref gained by namei() and unlock.
689 1.1 mrg */
690 1.1 mrg vput(vp);
691 1.39 chs
692 1.16 mrg out:
693 1.32 chs lockmgr(&swap_syscall_lock, LK_RELEASE, NULL);
694 1.1 mrg
695 1.1 mrg UVMHIST_LOG(pdhist, "<- done! error=%d", error, 0, 0, 0);
696 1.1 mrg return (error);
697 1.61 manu }
698 1.61 manu
699 1.61 manu /*
700 1.61 manu * swap_stats: implements swapctl(SWAP_STATS). The function is kept
701 1.61 manu * away from sys_swapctl() in order to allow COMPAT_* swapctl()
702 1.61 manu * emulation to use it directly without going through sys_swapctl().
703 1.61 manu * The problem with using sys_swapctl() there is that it involves
704 1.61 manu * copying the swapent array to the stackgap, and this array's size
705 1.61 manu * is not known at build time. Hence it would not be possible to
706 1.61 manu * ensure it would fit in the stackgap in any case.
707 1.61 manu */
708 1.61 manu void
709 1.61 manu uvm_swap_stats(cmd, sep, sec, retval)
710 1.61 manu int cmd;
711 1.61 manu struct swapent *sep;
712 1.61 manu int sec;
713 1.61 manu register_t *retval;
714 1.61 manu {
715 1.61 manu struct swappri *spp;
716 1.61 manu struct swapdev *sdp;
717 1.61 manu int count = 0;
718 1.61 manu
719 1.61 manu LIST_FOREACH(spp, &swap_priority, spi_swappri) {
720 1.61 manu for (sdp = CIRCLEQ_FIRST(&spp->spi_swapdev);
721 1.61 manu sdp != (void *)&spp->spi_swapdev && sec-- > 0;
722 1.61 manu sdp = CIRCLEQ_NEXT(sdp, swd_next)) {
723 1.61 manu /*
724 1.61 manu * backwards compatibility for system call.
725 1.61 manu * note that we use 'struct oswapent' as an
726 1.61 manu * overlay into both 'struct swapdev' and
727 1.61 manu * the userland 'struct swapent', as we
728 1.61 manu * want to retain backwards compatibility
729 1.61 manu * with NetBSD 1.3.
730 1.61 manu */
731 1.61 manu sdp->swd_ose.ose_inuse =
732 1.61 manu btodb((u_int64_t)sdp->swd_npginuse <<
733 1.61 manu PAGE_SHIFT);
734 1.61 manu (void)memcpy(sep, &sdp->swd_ose,
735 1.61 manu sizeof(struct oswapent));
736 1.61 manu
737 1.61 manu /* now copy out the path if necessary */
738 1.61 manu #if defined(COMPAT_13)
739 1.61 manu if (cmd == SWAP_STATS)
740 1.61 manu #endif
741 1.61 manu (void)memcpy(&sep->se_path, sdp->swd_path,
742 1.61 manu sdp->swd_pathlen);
743 1.61 manu
744 1.61 manu count++;
745 1.61 manu #if defined(COMPAT_13)
746 1.61 manu if (cmd == SWAP_OSTATS)
747 1.61 manu sep = (struct swapent *)
748 1.61 manu ((struct oswapent *)sep + 1);
749 1.61 manu else
750 1.61 manu #endif
751 1.61 manu sep++;
752 1.61 manu }
753 1.61 manu }
754 1.61 manu
755 1.61 manu *retval = count;
756 1.61 manu return;
757 1.1 mrg }
758 1.1 mrg
759 1.1 mrg /*
760 1.1 mrg * swap_on: attempt to enable a swapdev for swapping. note that the
761 1.1 mrg * swapdev is already on the global list, but disabled (marked
762 1.1 mrg * SWF_FAKE).
763 1.1 mrg *
764 1.1 mrg * => we avoid the start of the disk (to protect disk labels)
765 1.1 mrg * => we also avoid the miniroot, if we are swapping to root.
766 1.26 chs * => caller should leave uvm.swap_data_lock unlocked, we may lock it
767 1.1 mrg * if needed.
768 1.1 mrg */
769 1.1 mrg static int
770 1.1 mrg swap_on(p, sdp)
771 1.1 mrg struct proc *p;
772 1.1 mrg struct swapdev *sdp;
773 1.1 mrg {
774 1.1 mrg static int count = 0; /* static */
775 1.1 mrg struct vnode *vp;
776 1.1 mrg int error, npages, nblocks, size;
777 1.1 mrg long addr;
778 1.48 fvdl u_long result;
779 1.1 mrg struct vattr va;
780 1.1 mrg #ifdef NFS
781 1.1 mrg extern int (**nfsv2_vnodeop_p) __P((void *));
782 1.1 mrg #endif /* NFS */
783 1.69 gehenna const struct bdevsw *bdev;
784 1.1 mrg dev_t dev;
785 1.1 mrg UVMHIST_FUNC("swap_on"); UVMHIST_CALLED(pdhist);
786 1.1 mrg
787 1.1 mrg /*
788 1.1 mrg * we want to enable swapping on sdp. the swd_vp contains
789 1.1 mrg * the vnode we want (locked and ref'd), and the swd_dev
790 1.1 mrg * contains the dev_t of the file, if it a block device.
791 1.1 mrg */
792 1.1 mrg
793 1.1 mrg vp = sdp->swd_vp;
794 1.1 mrg dev = sdp->swd_dev;
795 1.1 mrg
796 1.1 mrg /*
797 1.1 mrg * open the swap file (mostly useful for block device files to
798 1.1 mrg * let device driver know what is up).
799 1.1 mrg *
800 1.1 mrg * we skip the open/close for root on swap because the root
801 1.1 mrg * has already been opened when root was mounted (mountroot).
802 1.1 mrg */
803 1.1 mrg if (vp != rootvp) {
804 1.1 mrg if ((error = VOP_OPEN(vp, FREAD|FWRITE, p->p_ucred, p)))
805 1.1 mrg return (error);
806 1.1 mrg }
807 1.1 mrg
808 1.1 mrg /* XXX this only works for block devices */
809 1.1 mrg UVMHIST_LOG(pdhist, " dev=%d, major(dev)=%d", dev, major(dev), 0,0);
810 1.1 mrg
811 1.1 mrg /*
812 1.1 mrg * we now need to determine the size of the swap area. for
813 1.1 mrg * block specials we can call the d_psize function.
814 1.1 mrg * for normal files, we must stat [get attrs].
815 1.1 mrg *
816 1.1 mrg * we put the result in nblks.
817 1.1 mrg * for normal files, we also want the filesystem block size
818 1.1 mrg * (which we get with statfs).
819 1.1 mrg */
820 1.1 mrg switch (vp->v_type) {
821 1.1 mrg case VBLK:
822 1.69 gehenna bdev = bdevsw_lookup(dev);
823 1.69 gehenna if (bdev == NULL || bdev->d_psize == NULL ||
824 1.69 gehenna (nblocks = (*bdev->d_psize)(dev)) == -1) {
825 1.1 mrg error = ENXIO;
826 1.1 mrg goto bad;
827 1.1 mrg }
828 1.1 mrg break;
829 1.1 mrg
830 1.1 mrg case VREG:
831 1.1 mrg if ((error = VOP_GETATTR(vp, &va, p->p_ucred, p)))
832 1.1 mrg goto bad;
833 1.1 mrg nblocks = (int)btodb(va.va_size);
834 1.1 mrg if ((error =
835 1.1 mrg VFS_STATFS(vp->v_mount, &vp->v_mount->mnt_stat, p)) != 0)
836 1.1 mrg goto bad;
837 1.1 mrg
838 1.1 mrg sdp->swd_bsize = vp->v_mount->mnt_stat.f_iosize;
839 1.1 mrg /*
840 1.1 mrg * limit the max # of outstanding I/O requests we issue
841 1.1 mrg * at any one time. take it easy on NFS servers.
842 1.1 mrg */
843 1.1 mrg #ifdef NFS
844 1.1 mrg if (vp->v_op == nfsv2_vnodeop_p)
845 1.1 mrg sdp->swd_maxactive = 2; /* XXX */
846 1.1 mrg else
847 1.1 mrg #endif /* NFS */
848 1.1 mrg sdp->swd_maxactive = 8; /* XXX */
849 1.1 mrg break;
850 1.1 mrg
851 1.1 mrg default:
852 1.1 mrg error = ENXIO;
853 1.1 mrg goto bad;
854 1.1 mrg }
855 1.1 mrg
856 1.1 mrg /*
857 1.1 mrg * save nblocks in a safe place and convert to pages.
858 1.1 mrg */
859 1.1 mrg
860 1.16 mrg sdp->swd_ose.ose_nblks = nblocks;
861 1.20 chs npages = dbtob((u_int64_t)nblocks) >> PAGE_SHIFT;
862 1.1 mrg
863 1.1 mrg /*
864 1.1 mrg * for block special files, we want to make sure that leave
865 1.1 mrg * the disklabel and bootblocks alone, so we arrange to skip
866 1.32 chs * over them (arbitrarily choosing to skip PAGE_SIZE bytes).
867 1.1 mrg * note that because of this the "size" can be less than the
868 1.1 mrg * actual number of blocks on the device.
869 1.1 mrg */
870 1.1 mrg if (vp->v_type == VBLK) {
871 1.1 mrg /* we use pages 1 to (size - 1) [inclusive] */
872 1.1 mrg size = npages - 1;
873 1.1 mrg addr = 1;
874 1.1 mrg } else {
875 1.1 mrg /* we use pages 0 to (size - 1) [inclusive] */
876 1.1 mrg size = npages;
877 1.1 mrg addr = 0;
878 1.1 mrg }
879 1.1 mrg
880 1.1 mrg /*
881 1.1 mrg * make sure we have enough blocks for a reasonable sized swap
882 1.1 mrg * area. we want at least one page.
883 1.1 mrg */
884 1.1 mrg
885 1.1 mrg if (size < 1) {
886 1.1 mrg UVMHIST_LOG(pdhist, " size <= 1!!", 0, 0, 0, 0);
887 1.1 mrg error = EINVAL;
888 1.1 mrg goto bad;
889 1.1 mrg }
890 1.1 mrg
891 1.1 mrg UVMHIST_LOG(pdhist, " dev=%x: size=%d addr=%ld\n", dev, size, addr, 0);
892 1.1 mrg
893 1.1 mrg /*
894 1.1 mrg * now we need to allocate an extent to manage this swap device
895 1.1 mrg */
896 1.42 enami snprintf(sdp->swd_exname, sizeof(sdp->swd_exname), "swap0x%04x",
897 1.42 enami count++);
898 1.1 mrg
899 1.1 mrg /* note that extent_create's 3rd arg is inclusive, thus "- 1" */
900 1.42 enami sdp->swd_ex = extent_create(sdp->swd_exname, 0, npages - 1, M_VMSWAP,
901 1.12 pk 0, 0, EX_WAITOK);
902 1.1 mrg /* allocate the `saved' region from the extent so it won't be used */
903 1.1 mrg if (addr) {
904 1.1 mrg if (extent_alloc_region(sdp->swd_ex, 0, addr, EX_WAITOK))
905 1.1 mrg panic("disklabel region");
906 1.1 mrg }
907 1.1 mrg
908 1.1 mrg /*
909 1.51 chs * if the vnode we are swapping to is the root vnode
910 1.1 mrg * (i.e. we are swapping to the miniroot) then we want
911 1.51 chs * to make sure we don't overwrite it. do a statfs to
912 1.1 mrg * find its size and skip over it.
913 1.1 mrg */
914 1.1 mrg if (vp == rootvp) {
915 1.1 mrg struct mount *mp;
916 1.1 mrg struct statfs *sp;
917 1.1 mrg int rootblocks, rootpages;
918 1.1 mrg
919 1.1 mrg mp = rootvnode->v_mount;
920 1.1 mrg sp = &mp->mnt_stat;
921 1.1 mrg rootblocks = sp->f_blocks * btodb(sp->f_bsize);
922 1.64 fredette /*
923 1.64 fredette * XXX: sp->f_blocks isn't the total number of
924 1.64 fredette * blocks in the filesystem, it's the number of
925 1.64 fredette * data blocks. so, our rootblocks almost
926 1.64 fredette * definitely underestimates the total size
927 1.64 fredette * of the filesystem - how badly depends on the
928 1.64 fredette * details of the filesystem type. there isn't
929 1.64 fredette * an obvious way to deal with this cleanly
930 1.64 fredette * and perfectly, so for now we just pad our
931 1.64 fredette * rootblocks estimate with an extra 5 percent.
932 1.64 fredette */
933 1.64 fredette rootblocks += (rootblocks >> 5) +
934 1.64 fredette (rootblocks >> 6) +
935 1.64 fredette (rootblocks >> 7);
936 1.20 chs rootpages = round_page(dbtob(rootblocks)) >> PAGE_SHIFT;
937 1.32 chs if (rootpages > size)
938 1.1 mrg panic("swap_on: miniroot larger than swap?");
939 1.1 mrg
940 1.51 chs if (extent_alloc_region(sdp->swd_ex, addr,
941 1.1 mrg rootpages, EX_WAITOK))
942 1.1 mrg panic("swap_on: unable to preserve miniroot");
943 1.1 mrg
944 1.32 chs size -= rootpages;
945 1.1 mrg printf("Preserved %d pages of miniroot ", rootpages);
946 1.32 chs printf("leaving %d pages of swap\n", size);
947 1.1 mrg }
948 1.1 mrg
949 1.43 chs /*
950 1.43 chs * try to add anons to reflect the new swap space.
951 1.43 chs */
952 1.43 chs
953 1.43 chs error = uvm_anon_add(size);
954 1.43 chs if (error) {
955 1.43 chs goto bad;
956 1.43 chs }
957 1.43 chs
958 1.39 chs /*
959 1.39 chs * add a ref to vp to reflect usage as a swap device.
960 1.39 chs */
961 1.39 chs vref(vp);
962 1.39 chs
963 1.1 mrg /*
964 1.1 mrg * now add the new swapdev to the drum and enable.
965 1.1 mrg */
966 1.48 fvdl if (extent_alloc(swapmap, npages, EX_NOALIGN, EX_NOBOUNDARY,
967 1.48 fvdl EX_WAITOK, &result))
968 1.48 fvdl panic("swapdrum_add");
969 1.48 fvdl
970 1.48 fvdl sdp->swd_drumoffset = (int)result;
971 1.48 fvdl sdp->swd_drumsize = npages;
972 1.48 fvdl sdp->swd_npages = size;
973 1.26 chs simple_lock(&uvm.swap_data_lock);
974 1.1 mrg sdp->swd_flags &= ~SWF_FAKE; /* going live */
975 1.1 mrg sdp->swd_flags |= (SWF_INUSE|SWF_ENABLE);
976 1.32 chs uvmexp.swpages += size;
977 1.26 chs simple_unlock(&uvm.swap_data_lock);
978 1.1 mrg return (0);
979 1.1 mrg
980 1.1 mrg /*
981 1.43 chs * failure: clean up and return error.
982 1.1 mrg */
983 1.43 chs
984 1.43 chs bad:
985 1.43 chs if (sdp->swd_ex) {
986 1.43 chs extent_destroy(sdp->swd_ex);
987 1.43 chs }
988 1.43 chs if (vp != rootvp) {
989 1.1 mrg (void)VOP_CLOSE(vp, FREAD|FWRITE, p->p_ucred, p);
990 1.43 chs }
991 1.1 mrg return (error);
992 1.1 mrg }
993 1.1 mrg
994 1.1 mrg /*
995 1.1 mrg * swap_off: stop swapping on swapdev
996 1.1 mrg *
997 1.32 chs * => swap data should be locked, we will unlock.
998 1.1 mrg */
999 1.1 mrg static int
1000 1.1 mrg swap_off(p, sdp)
1001 1.1 mrg struct proc *p;
1002 1.1 mrg struct swapdev *sdp;
1003 1.1 mrg {
1004 1.1 mrg UVMHIST_FUNC("swap_off"); UVMHIST_CALLED(pdhist);
1005 1.32 chs UVMHIST_LOG(pdhist, " dev=%x", sdp->swd_dev,0,0,0);
1006 1.1 mrg
1007 1.32 chs /* disable the swap area being removed */
1008 1.1 mrg sdp->swd_flags &= ~SWF_ENABLE;
1009 1.32 chs simple_unlock(&uvm.swap_data_lock);
1010 1.32 chs
1011 1.32 chs /*
1012 1.32 chs * the idea is to find all the pages that are paged out to this
1013 1.32 chs * device, and page them all in. in uvm, swap-backed pageable
1014 1.32 chs * memory can take two forms: aobjs and anons. call the
1015 1.32 chs * swapoff hook for each subsystem to bring in pages.
1016 1.32 chs */
1017 1.1 mrg
1018 1.32 chs if (uao_swap_off(sdp->swd_drumoffset,
1019 1.32 chs sdp->swd_drumoffset + sdp->swd_drumsize) ||
1020 1.32 chs anon_swap_off(sdp->swd_drumoffset,
1021 1.32 chs sdp->swd_drumoffset + sdp->swd_drumsize)) {
1022 1.51 chs
1023 1.32 chs simple_lock(&uvm.swap_data_lock);
1024 1.32 chs sdp->swd_flags |= SWF_ENABLE;
1025 1.32 chs simple_unlock(&uvm.swap_data_lock);
1026 1.32 chs return ENOMEM;
1027 1.32 chs }
1028 1.46 chs KASSERT(sdp->swd_npginuse == sdp->swd_npgbad);
1029 1.1 mrg
1030 1.1 mrg /*
1031 1.58 enami * done with the vnode.
1032 1.39 chs * drop our ref on the vnode before calling VOP_CLOSE()
1033 1.39 chs * so that spec_close() can tell if this is the last close.
1034 1.1 mrg */
1035 1.39 chs vrele(sdp->swd_vp);
1036 1.32 chs if (sdp->swd_vp != rootvp) {
1037 1.32 chs (void) VOP_CLOSE(sdp->swd_vp, FREAD|FWRITE, p->p_ucred, p);
1038 1.32 chs }
1039 1.32 chs
1040 1.32 chs /* remove anons from the system */
1041 1.32 chs uvm_anon_remove(sdp->swd_npages);
1042 1.32 chs
1043 1.32 chs simple_lock(&uvm.swap_data_lock);
1044 1.32 chs uvmexp.swpages -= sdp->swd_npages;
1045 1.1 mrg
1046 1.32 chs if (swaplist_find(sdp->swd_vp, 1) == NULL)
1047 1.70 provos panic("swap_off: swapdev not in list");
1048 1.32 chs swaplist_trim();
1049 1.48 fvdl simple_unlock(&uvm.swap_data_lock);
1050 1.1 mrg
1051 1.32 chs /*
1052 1.32 chs * free all resources!
1053 1.32 chs */
1054 1.32 chs extent_free(swapmap, sdp->swd_drumoffset, sdp->swd_drumsize,
1055 1.32 chs EX_WAITOK);
1056 1.1 mrg extent_destroy(sdp->swd_ex);
1057 1.66 hannken bufq_free(&sdp->swd_tab);
1058 1.32 chs free(sdp, M_VMSWAP);
1059 1.1 mrg return (0);
1060 1.1 mrg }
1061 1.1 mrg
1062 1.1 mrg /*
1063 1.1 mrg * /dev/drum interface and i/o functions
1064 1.1 mrg */
1065 1.1 mrg
1066 1.1 mrg /*
1067 1.1 mrg * swread: the read function for the drum (just a call to physio)
1068 1.1 mrg */
1069 1.1 mrg /*ARGSUSED*/
1070 1.1 mrg int
1071 1.1 mrg swread(dev, uio, ioflag)
1072 1.1 mrg dev_t dev;
1073 1.1 mrg struct uio *uio;
1074 1.1 mrg int ioflag;
1075 1.1 mrg {
1076 1.1 mrg UVMHIST_FUNC("swread"); UVMHIST_CALLED(pdhist);
1077 1.1 mrg
1078 1.1 mrg UVMHIST_LOG(pdhist, " dev=%x offset=%qx", dev, uio->uio_offset, 0, 0);
1079 1.1 mrg return (physio(swstrategy, NULL, dev, B_READ, minphys, uio));
1080 1.1 mrg }
1081 1.1 mrg
1082 1.1 mrg /*
1083 1.1 mrg * swwrite: the write function for the drum (just a call to physio)
1084 1.1 mrg */
1085 1.1 mrg /*ARGSUSED*/
1086 1.1 mrg int
1087 1.1 mrg swwrite(dev, uio, ioflag)
1088 1.1 mrg dev_t dev;
1089 1.1 mrg struct uio *uio;
1090 1.1 mrg int ioflag;
1091 1.1 mrg {
1092 1.1 mrg UVMHIST_FUNC("swwrite"); UVMHIST_CALLED(pdhist);
1093 1.1 mrg
1094 1.1 mrg UVMHIST_LOG(pdhist, " dev=%x offset=%qx", dev, uio->uio_offset, 0, 0);
1095 1.1 mrg return (physio(swstrategy, NULL, dev, B_WRITE, minphys, uio));
1096 1.1 mrg }
1097 1.1 mrg
1098 1.1 mrg /*
1099 1.1 mrg * swstrategy: perform I/O on the drum
1100 1.1 mrg *
1101 1.1 mrg * => we must map the i/o request from the drum to the correct swapdev.
1102 1.1 mrg */
1103 1.1 mrg void
1104 1.1 mrg swstrategy(bp)
1105 1.1 mrg struct buf *bp;
1106 1.1 mrg {
1107 1.1 mrg struct swapdev *sdp;
1108 1.1 mrg struct vnode *vp;
1109 1.25 chs int s, pageno, bn;
1110 1.1 mrg UVMHIST_FUNC("swstrategy"); UVMHIST_CALLED(pdhist);
1111 1.1 mrg
1112 1.1 mrg /*
1113 1.1 mrg * convert block number to swapdev. note that swapdev can't
1114 1.1 mrg * be yanked out from under us because we are holding resources
1115 1.1 mrg * in it (i.e. the blocks we are doing I/O on).
1116 1.1 mrg */
1117 1.41 chs pageno = dbtob((int64_t)bp->b_blkno) >> PAGE_SHIFT;
1118 1.26 chs simple_lock(&uvm.swap_data_lock);
1119 1.1 mrg sdp = swapdrum_getsdp(pageno);
1120 1.26 chs simple_unlock(&uvm.swap_data_lock);
1121 1.1 mrg if (sdp == NULL) {
1122 1.1 mrg bp->b_error = EINVAL;
1123 1.1 mrg bp->b_flags |= B_ERROR;
1124 1.1 mrg biodone(bp);
1125 1.1 mrg UVMHIST_LOG(pdhist, " failed to get swap device", 0, 0, 0, 0);
1126 1.1 mrg return;
1127 1.1 mrg }
1128 1.1 mrg
1129 1.1 mrg /*
1130 1.1 mrg * convert drum page number to block number on this swapdev.
1131 1.1 mrg */
1132 1.1 mrg
1133 1.32 chs pageno -= sdp->swd_drumoffset; /* page # on swapdev */
1134 1.44 enami bn = btodb((u_int64_t)pageno << PAGE_SHIFT); /* convert to diskblock */
1135 1.1 mrg
1136 1.41 chs UVMHIST_LOG(pdhist, " %s: mapoff=%x bn=%x bcount=%ld",
1137 1.1 mrg ((bp->b_flags & B_READ) == 0) ? "write" : "read",
1138 1.1 mrg sdp->swd_drumoffset, bn, bp->b_bcount);
1139 1.1 mrg
1140 1.1 mrg /*
1141 1.1 mrg * for block devices we finish up here.
1142 1.32 chs * for regular files we have to do more work which we delegate
1143 1.1 mrg * to sw_reg_strategy().
1144 1.1 mrg */
1145 1.1 mrg
1146 1.1 mrg switch (sdp->swd_vp->v_type) {
1147 1.1 mrg default:
1148 1.1 mrg panic("swstrategy: vnode type 0x%x", sdp->swd_vp->v_type);
1149 1.32 chs
1150 1.1 mrg case VBLK:
1151 1.1 mrg
1152 1.1 mrg /*
1153 1.1 mrg * must convert "bp" from an I/O on /dev/drum to an I/O
1154 1.1 mrg * on the swapdev (sdp).
1155 1.1 mrg */
1156 1.25 chs s = splbio();
1157 1.1 mrg bp->b_blkno = bn; /* swapdev block number */
1158 1.1 mrg vp = sdp->swd_vp; /* swapdev vnode pointer */
1159 1.1 mrg bp->b_dev = sdp->swd_dev; /* swapdev dev_t */
1160 1.1 mrg
1161 1.1 mrg /*
1162 1.1 mrg * if we are doing a write, we have to redirect the i/o on
1163 1.1 mrg * drum's v_numoutput counter to the swapdevs.
1164 1.1 mrg */
1165 1.1 mrg if ((bp->b_flags & B_READ) == 0) {
1166 1.1 mrg vwakeup(bp); /* kills one 'v_numoutput' on drum */
1167 1.1 mrg vp->v_numoutput++; /* put it on swapdev */
1168 1.1 mrg }
1169 1.1 mrg
1170 1.41 chs /*
1171 1.1 mrg * finally plug in swapdev vnode and start I/O
1172 1.1 mrg */
1173 1.1 mrg bp->b_vp = vp;
1174 1.25 chs splx(s);
1175 1.1 mrg VOP_STRATEGY(bp);
1176 1.1 mrg return;
1177 1.32 chs
1178 1.1 mrg case VREG:
1179 1.1 mrg /*
1180 1.32 chs * delegate to sw_reg_strategy function.
1181 1.1 mrg */
1182 1.1 mrg sw_reg_strategy(sdp, bp, bn);
1183 1.1 mrg return;
1184 1.1 mrg }
1185 1.1 mrg /* NOTREACHED */
1186 1.1 mrg }
1187 1.1 mrg
1188 1.1 mrg /*
1189 1.1 mrg * sw_reg_strategy: handle swap i/o to regular files
1190 1.1 mrg */
1191 1.1 mrg static void
1192 1.1 mrg sw_reg_strategy(sdp, bp, bn)
1193 1.1 mrg struct swapdev *sdp;
1194 1.1 mrg struct buf *bp;
1195 1.1 mrg int bn;
1196 1.1 mrg {
1197 1.1 mrg struct vnode *vp;
1198 1.1 mrg struct vndxfer *vnx;
1199 1.44 enami daddr_t nbn;
1200 1.1 mrg caddr_t addr;
1201 1.44 enami off_t byteoff;
1202 1.9 mrg int s, off, nra, error, sz, resid;
1203 1.1 mrg UVMHIST_FUNC("sw_reg_strategy"); UVMHIST_CALLED(pdhist);
1204 1.1 mrg
1205 1.1 mrg /*
1206 1.1 mrg * allocate a vndxfer head for this transfer and point it to
1207 1.1 mrg * our buffer.
1208 1.1 mrg */
1209 1.12 pk getvndxfer(vnx);
1210 1.1 mrg vnx->vx_flags = VX_BUSY;
1211 1.1 mrg vnx->vx_error = 0;
1212 1.1 mrg vnx->vx_pending = 0;
1213 1.1 mrg vnx->vx_bp = bp;
1214 1.1 mrg vnx->vx_sdp = sdp;
1215 1.1 mrg
1216 1.1 mrg /*
1217 1.1 mrg * setup for main loop where we read filesystem blocks into
1218 1.1 mrg * our buffer.
1219 1.1 mrg */
1220 1.1 mrg error = 0;
1221 1.1 mrg bp->b_resid = bp->b_bcount; /* nothing transfered yet! */
1222 1.1 mrg addr = bp->b_data; /* current position in buffer */
1223 1.44 enami byteoff = dbtob((u_int64_t)bn);
1224 1.1 mrg
1225 1.1 mrg for (resid = bp->b_resid; resid; resid -= sz) {
1226 1.1 mrg struct vndbuf *nbp;
1227 1.1 mrg
1228 1.1 mrg /*
1229 1.1 mrg * translate byteoffset into block number. return values:
1230 1.1 mrg * vp = vnode of underlying device
1231 1.1 mrg * nbn = new block number (on underlying vnode dev)
1232 1.1 mrg * nra = num blocks we can read-ahead (excludes requested
1233 1.1 mrg * block)
1234 1.1 mrg */
1235 1.1 mrg nra = 0;
1236 1.1 mrg error = VOP_BMAP(sdp->swd_vp, byteoff / sdp->swd_bsize,
1237 1.1 mrg &vp, &nbn, &nra);
1238 1.1 mrg
1239 1.32 chs if (error == 0 && nbn == (daddr_t)-1) {
1240 1.51 chs /*
1241 1.23 marc * this used to just set error, but that doesn't
1242 1.23 marc * do the right thing. Instead, it causes random
1243 1.23 marc * memory errors. The panic() should remain until
1244 1.23 marc * this condition doesn't destabilize the system.
1245 1.23 marc */
1246 1.23 marc #if 1
1247 1.23 marc panic("sw_reg_strategy: swap to sparse file");
1248 1.23 marc #else
1249 1.1 mrg error = EIO; /* failure */
1250 1.23 marc #endif
1251 1.23 marc }
1252 1.1 mrg
1253 1.1 mrg /*
1254 1.1 mrg * punt if there was an error or a hole in the file.
1255 1.1 mrg * we must wait for any i/o ops we have already started
1256 1.1 mrg * to finish before returning.
1257 1.1 mrg *
1258 1.1 mrg * XXX we could deal with holes here but it would be
1259 1.1 mrg * a hassle (in the write case).
1260 1.1 mrg */
1261 1.1 mrg if (error) {
1262 1.1 mrg s = splbio();
1263 1.1 mrg vnx->vx_error = error; /* pass error up */
1264 1.1 mrg goto out;
1265 1.1 mrg }
1266 1.1 mrg
1267 1.1 mrg /*
1268 1.1 mrg * compute the size ("sz") of this transfer (in bytes).
1269 1.1 mrg */
1270 1.41 chs off = byteoff % sdp->swd_bsize;
1271 1.41 chs sz = (1 + nra) * sdp->swd_bsize - off;
1272 1.41 chs if (sz > resid)
1273 1.1 mrg sz = resid;
1274 1.1 mrg
1275 1.41 chs UVMHIST_LOG(pdhist, "sw_reg_strategy: "
1276 1.41 chs "vp %p/%p offset 0x%x/0x%x",
1277 1.41 chs sdp->swd_vp, vp, byteoff, nbn);
1278 1.1 mrg
1279 1.1 mrg /*
1280 1.1 mrg * now get a buf structure. note that the vb_buf is
1281 1.1 mrg * at the front of the nbp structure so that you can
1282 1.1 mrg * cast pointers between the two structure easily.
1283 1.1 mrg */
1284 1.12 pk getvndbuf(nbp);
1285 1.1 mrg nbp->vb_buf.b_flags = bp->b_flags | B_CALL;
1286 1.1 mrg nbp->vb_buf.b_bcount = sz;
1287 1.12 pk nbp->vb_buf.b_bufsize = sz;
1288 1.1 mrg nbp->vb_buf.b_error = 0;
1289 1.1 mrg nbp->vb_buf.b_data = addr;
1290 1.41 chs nbp->vb_buf.b_lblkno = 0;
1291 1.1 mrg nbp->vb_buf.b_blkno = nbn + btodb(off);
1292 1.34 thorpej nbp->vb_buf.b_rawblkno = nbp->vb_buf.b_blkno;
1293 1.1 mrg nbp->vb_buf.b_iodone = sw_reg_iodone;
1294 1.53 chs nbp->vb_buf.b_vp = vp;
1295 1.53 chs if (vp->v_type == VBLK) {
1296 1.53 chs nbp->vb_buf.b_dev = vp->v_rdev;
1297 1.53 chs }
1298 1.30 fvdl LIST_INIT(&nbp->vb_buf.b_dep);
1299 1.1 mrg
1300 1.1 mrg nbp->vb_xfer = vnx; /* patch it back in to vnx */
1301 1.1 mrg
1302 1.1 mrg /*
1303 1.1 mrg * Just sort by block number
1304 1.1 mrg */
1305 1.1 mrg s = splbio();
1306 1.1 mrg if (vnx->vx_error != 0) {
1307 1.1 mrg putvndbuf(nbp);
1308 1.1 mrg goto out;
1309 1.1 mrg }
1310 1.1 mrg vnx->vx_pending++;
1311 1.1 mrg
1312 1.1 mrg /* sort it in and start I/O if we are not over our limit */
1313 1.65 hannken BUFQ_PUT(&sdp->swd_tab, &nbp->vb_buf);
1314 1.1 mrg sw_reg_start(sdp);
1315 1.1 mrg splx(s);
1316 1.1 mrg
1317 1.1 mrg /*
1318 1.1 mrg * advance to the next I/O
1319 1.1 mrg */
1320 1.9 mrg byteoff += sz;
1321 1.1 mrg addr += sz;
1322 1.1 mrg }
1323 1.1 mrg
1324 1.1 mrg s = splbio();
1325 1.1 mrg
1326 1.1 mrg out: /* Arrive here at splbio */
1327 1.1 mrg vnx->vx_flags &= ~VX_BUSY;
1328 1.1 mrg if (vnx->vx_pending == 0) {
1329 1.1 mrg if (vnx->vx_error != 0) {
1330 1.1 mrg bp->b_error = vnx->vx_error;
1331 1.1 mrg bp->b_flags |= B_ERROR;
1332 1.1 mrg }
1333 1.1 mrg putvndxfer(vnx);
1334 1.1 mrg biodone(bp);
1335 1.1 mrg }
1336 1.1 mrg splx(s);
1337 1.1 mrg }
1338 1.1 mrg
1339 1.1 mrg /*
1340 1.1 mrg * sw_reg_start: start an I/O request on the requested swapdev
1341 1.1 mrg *
1342 1.65 hannken * => reqs are sorted by b_rawblkno (above)
1343 1.1 mrg */
1344 1.1 mrg static void
1345 1.1 mrg sw_reg_start(sdp)
1346 1.1 mrg struct swapdev *sdp;
1347 1.1 mrg {
1348 1.1 mrg struct buf *bp;
1349 1.1 mrg UVMHIST_FUNC("sw_reg_start"); UVMHIST_CALLED(pdhist);
1350 1.1 mrg
1351 1.8 mrg /* recursion control */
1352 1.1 mrg if ((sdp->swd_flags & SWF_BUSY) != 0)
1353 1.1 mrg return;
1354 1.1 mrg
1355 1.1 mrg sdp->swd_flags |= SWF_BUSY;
1356 1.1 mrg
1357 1.33 thorpej while (sdp->swd_active < sdp->swd_maxactive) {
1358 1.65 hannken bp = BUFQ_GET(&sdp->swd_tab);
1359 1.1 mrg if (bp == NULL)
1360 1.1 mrg break;
1361 1.33 thorpej sdp->swd_active++;
1362 1.1 mrg
1363 1.1 mrg UVMHIST_LOG(pdhist,
1364 1.1 mrg "sw_reg_start: bp %p vp %p blkno %p cnt %lx",
1365 1.1 mrg bp, bp->b_vp, bp->b_blkno, bp->b_bcount);
1366 1.1 mrg if ((bp->b_flags & B_READ) == 0)
1367 1.1 mrg bp->b_vp->v_numoutput++;
1368 1.41 chs
1369 1.1 mrg VOP_STRATEGY(bp);
1370 1.1 mrg }
1371 1.1 mrg sdp->swd_flags &= ~SWF_BUSY;
1372 1.1 mrg }
1373 1.1 mrg
1374 1.1 mrg /*
1375 1.1 mrg * sw_reg_iodone: one of our i/o's has completed and needs post-i/o cleanup
1376 1.1 mrg *
1377 1.1 mrg * => note that we can recover the vndbuf struct by casting the buf ptr
1378 1.1 mrg */
1379 1.1 mrg static void
1380 1.1 mrg sw_reg_iodone(bp)
1381 1.1 mrg struct buf *bp;
1382 1.1 mrg {
1383 1.1 mrg struct vndbuf *vbp = (struct vndbuf *) bp;
1384 1.1 mrg struct vndxfer *vnx = vbp->vb_xfer;
1385 1.1 mrg struct buf *pbp = vnx->vx_bp; /* parent buffer */
1386 1.1 mrg struct swapdev *sdp = vnx->vx_sdp;
1387 1.72 chs int s, resid, error;
1388 1.1 mrg UVMHIST_FUNC("sw_reg_iodone"); UVMHIST_CALLED(pdhist);
1389 1.1 mrg
1390 1.1 mrg UVMHIST_LOG(pdhist, " vbp=%p vp=%p blkno=%x addr=%p",
1391 1.1 mrg vbp, vbp->vb_buf.b_vp, vbp->vb_buf.b_blkno, vbp->vb_buf.b_data);
1392 1.1 mrg UVMHIST_LOG(pdhist, " cnt=%lx resid=%lx",
1393 1.1 mrg vbp->vb_buf.b_bcount, vbp->vb_buf.b_resid, 0, 0);
1394 1.1 mrg
1395 1.1 mrg /*
1396 1.1 mrg * protect vbp at splbio and update.
1397 1.1 mrg */
1398 1.1 mrg
1399 1.1 mrg s = splbio();
1400 1.1 mrg resid = vbp->vb_buf.b_bcount - vbp->vb_buf.b_resid;
1401 1.1 mrg pbp->b_resid -= resid;
1402 1.1 mrg vnx->vx_pending--;
1403 1.1 mrg
1404 1.72 chs if (vbp->vb_buf.b_flags & B_ERROR) {
1405 1.1 mrg /* pass error upward */
1406 1.72 chs error = vbp->vb_buf.b_error ? vbp->vb_buf.b_error : EIO;
1407 1.72 chs UVMHIST_LOG(pdhist, " got error=%d !", error, 0, 0, 0);
1408 1.72 chs vnx->vx_error = error;
1409 1.35 chs }
1410 1.35 chs
1411 1.35 chs /*
1412 1.1 mrg * kill vbp structure
1413 1.1 mrg */
1414 1.1 mrg putvndbuf(vbp);
1415 1.1 mrg
1416 1.1 mrg /*
1417 1.1 mrg * wrap up this transaction if it has run to completion or, in
1418 1.1 mrg * case of an error, when all auxiliary buffers have returned.
1419 1.1 mrg */
1420 1.1 mrg if (vnx->vx_error != 0) {
1421 1.1 mrg /* pass error upward */
1422 1.1 mrg pbp->b_flags |= B_ERROR;
1423 1.1 mrg pbp->b_error = vnx->vx_error;
1424 1.1 mrg if ((vnx->vx_flags & VX_BUSY) == 0 && vnx->vx_pending == 0) {
1425 1.1 mrg putvndxfer(vnx);
1426 1.1 mrg biodone(pbp);
1427 1.1 mrg }
1428 1.11 pk } else if (pbp->b_resid == 0) {
1429 1.46 chs KASSERT(vnx->vx_pending == 0);
1430 1.1 mrg if ((vnx->vx_flags & VX_BUSY) == 0) {
1431 1.8 mrg UVMHIST_LOG(pdhist, " iodone error=%d !",
1432 1.8 mrg pbp, vnx->vx_error, 0, 0);
1433 1.8 mrg putvndxfer(vnx);
1434 1.1 mrg biodone(pbp);
1435 1.1 mrg }
1436 1.1 mrg }
1437 1.1 mrg
1438 1.1 mrg /*
1439 1.1 mrg * done! start next swapdev I/O if one is pending
1440 1.1 mrg */
1441 1.33 thorpej sdp->swd_active--;
1442 1.1 mrg sw_reg_start(sdp);
1443 1.1 mrg splx(s);
1444 1.1 mrg }
1445 1.1 mrg
1446 1.1 mrg
1447 1.1 mrg /*
1448 1.1 mrg * uvm_swap_alloc: allocate space on swap
1449 1.1 mrg *
1450 1.1 mrg * => allocation is done "round robin" down the priority list, as we
1451 1.1 mrg * allocate in a priority we "rotate" the circle queue.
1452 1.1 mrg * => space can be freed with uvm_swap_free
1453 1.1 mrg * => we return the page slot number in /dev/drum (0 == invalid slot)
1454 1.26 chs * => we lock uvm.swap_data_lock
1455 1.1 mrg * => XXXMRG: "LESSOK" INTERFACE NEEDED TO EXTENT SYSTEM
1456 1.1 mrg */
1457 1.1 mrg int
1458 1.1 mrg uvm_swap_alloc(nslots, lessok)
1459 1.1 mrg int *nslots; /* IN/OUT */
1460 1.1 mrg boolean_t lessok;
1461 1.1 mrg {
1462 1.1 mrg struct swapdev *sdp;
1463 1.1 mrg struct swappri *spp;
1464 1.1 mrg u_long result;
1465 1.1 mrg UVMHIST_FUNC("uvm_swap_alloc"); UVMHIST_CALLED(pdhist);
1466 1.1 mrg
1467 1.1 mrg /*
1468 1.1 mrg * no swap devices configured yet? definite failure.
1469 1.1 mrg */
1470 1.1 mrg if (uvmexp.nswapdev < 1)
1471 1.1 mrg return 0;
1472 1.51 chs
1473 1.1 mrg /*
1474 1.1 mrg * lock data lock, convert slots into blocks, and enter loop
1475 1.1 mrg */
1476 1.26 chs simple_lock(&uvm.swap_data_lock);
1477 1.1 mrg
1478 1.1 mrg ReTry: /* XXXMRG */
1479 1.55 chs LIST_FOREACH(spp, &swap_priority, spi_swappri) {
1480 1.55 chs CIRCLEQ_FOREACH(sdp, &spp->spi_swapdev, swd_next) {
1481 1.1 mrg /* if it's not enabled, then we can't swap from it */
1482 1.1 mrg if ((sdp->swd_flags & SWF_ENABLE) == 0)
1483 1.1 mrg continue;
1484 1.1 mrg if (sdp->swd_npginuse + *nslots > sdp->swd_npages)
1485 1.1 mrg continue;
1486 1.1 mrg if (extent_alloc(sdp->swd_ex, *nslots, EX_NOALIGN,
1487 1.1 mrg EX_NOBOUNDARY, EX_MALLOCOK|EX_NOWAIT,
1488 1.1 mrg &result) != 0) {
1489 1.1 mrg continue;
1490 1.1 mrg }
1491 1.1 mrg
1492 1.1 mrg /*
1493 1.1 mrg * successful allocation! now rotate the circleq.
1494 1.1 mrg */
1495 1.1 mrg CIRCLEQ_REMOVE(&spp->spi_swapdev, sdp, swd_next);
1496 1.1 mrg CIRCLEQ_INSERT_TAIL(&spp->spi_swapdev, sdp, swd_next);
1497 1.1 mrg sdp->swd_npginuse += *nslots;
1498 1.1 mrg uvmexp.swpginuse += *nslots;
1499 1.26 chs simple_unlock(&uvm.swap_data_lock);
1500 1.1 mrg /* done! return drum slot number */
1501 1.1 mrg UVMHIST_LOG(pdhist,
1502 1.1 mrg "success! returning %d slots starting at %d",
1503 1.1 mrg *nslots, result + sdp->swd_drumoffset, 0, 0);
1504 1.55 chs return (result + sdp->swd_drumoffset);
1505 1.1 mrg }
1506 1.1 mrg }
1507 1.1 mrg
1508 1.1 mrg /* XXXMRG: BEGIN HACK */
1509 1.1 mrg if (*nslots > 1 && lessok) {
1510 1.1 mrg *nslots = 1;
1511 1.1 mrg goto ReTry; /* XXXMRG: ugh! extent should support this for us */
1512 1.1 mrg }
1513 1.1 mrg /* XXXMRG: END HACK */
1514 1.1 mrg
1515 1.26 chs simple_unlock(&uvm.swap_data_lock);
1516 1.55 chs return 0;
1517 1.1 mrg }
1518 1.1 mrg
1519 1.1 mrg /*
1520 1.32 chs * uvm_swap_markbad: keep track of swap ranges where we've had i/o errors
1521 1.32 chs *
1522 1.32 chs * => we lock uvm.swap_data_lock
1523 1.32 chs */
1524 1.32 chs void
1525 1.32 chs uvm_swap_markbad(startslot, nslots)
1526 1.32 chs int startslot;
1527 1.32 chs int nslots;
1528 1.32 chs {
1529 1.32 chs struct swapdev *sdp;
1530 1.32 chs UVMHIST_FUNC("uvm_swap_markbad"); UVMHIST_CALLED(pdhist);
1531 1.32 chs
1532 1.32 chs simple_lock(&uvm.swap_data_lock);
1533 1.32 chs sdp = swapdrum_getsdp(startslot);
1534 1.32 chs
1535 1.32 chs /*
1536 1.32 chs * we just keep track of how many pages have been marked bad
1537 1.32 chs * in this device, to make everything add up in swap_off().
1538 1.32 chs * we assume here that the range of slots will all be within
1539 1.32 chs * one swap device.
1540 1.32 chs */
1541 1.41 chs
1542 1.32 chs sdp->swd_npgbad += nslots;
1543 1.41 chs UVMHIST_LOG(pdhist, "now %d bad", sdp->swd_npgbad, 0,0,0);
1544 1.32 chs simple_unlock(&uvm.swap_data_lock);
1545 1.32 chs }
1546 1.32 chs
1547 1.32 chs /*
1548 1.1 mrg * uvm_swap_free: free swap slots
1549 1.1 mrg *
1550 1.1 mrg * => this can be all or part of an allocation made by uvm_swap_alloc
1551 1.26 chs * => we lock uvm.swap_data_lock
1552 1.1 mrg */
1553 1.1 mrg void
1554 1.1 mrg uvm_swap_free(startslot, nslots)
1555 1.1 mrg int startslot;
1556 1.1 mrg int nslots;
1557 1.1 mrg {
1558 1.1 mrg struct swapdev *sdp;
1559 1.1 mrg UVMHIST_FUNC("uvm_swap_free"); UVMHIST_CALLED(pdhist);
1560 1.1 mrg
1561 1.1 mrg UVMHIST_LOG(pdhist, "freeing %d slots starting at %d", nslots,
1562 1.1 mrg startslot, 0, 0);
1563 1.32 chs
1564 1.32 chs /*
1565 1.32 chs * ignore attempts to free the "bad" slot.
1566 1.32 chs */
1567 1.46 chs
1568 1.32 chs if (startslot == SWSLOT_BAD) {
1569 1.32 chs return;
1570 1.32 chs }
1571 1.32 chs
1572 1.1 mrg /*
1573 1.51 chs * convert drum slot offset back to sdp, free the blocks
1574 1.51 chs * in the extent, and return. must hold pri lock to do
1575 1.1 mrg * lookup and access the extent.
1576 1.1 mrg */
1577 1.46 chs
1578 1.26 chs simple_lock(&uvm.swap_data_lock);
1579 1.1 mrg sdp = swapdrum_getsdp(startslot);
1580 1.46 chs KASSERT(uvmexp.nswapdev >= 1);
1581 1.46 chs KASSERT(sdp != NULL);
1582 1.46 chs KASSERT(sdp->swd_npginuse >= nslots);
1583 1.12 pk if (extent_free(sdp->swd_ex, startslot - sdp->swd_drumoffset, nslots,
1584 1.32 chs EX_MALLOCOK|EX_NOWAIT) != 0) {
1585 1.32 chs printf("warning: resource shortage: %d pages of swap lost\n",
1586 1.12 pk nslots);
1587 1.32 chs }
1588 1.1 mrg sdp->swd_npginuse -= nslots;
1589 1.1 mrg uvmexp.swpginuse -= nslots;
1590 1.26 chs simple_unlock(&uvm.swap_data_lock);
1591 1.1 mrg }
1592 1.1 mrg
1593 1.1 mrg /*
1594 1.1 mrg * uvm_swap_put: put any number of pages into a contig place on swap
1595 1.1 mrg *
1596 1.1 mrg * => can be sync or async
1597 1.1 mrg */
1598 1.54 chs
1599 1.1 mrg int
1600 1.1 mrg uvm_swap_put(swslot, ppsp, npages, flags)
1601 1.1 mrg int swslot;
1602 1.1 mrg struct vm_page **ppsp;
1603 1.54 chs int npages;
1604 1.54 chs int flags;
1605 1.1 mrg {
1606 1.56 chs int error;
1607 1.1 mrg
1608 1.56 chs error = uvm_swap_io(ppsp, swslot, npages, B_WRITE |
1609 1.1 mrg ((flags & PGO_SYNCIO) ? 0 : B_ASYNC));
1610 1.56 chs return error;
1611 1.1 mrg }
1612 1.1 mrg
1613 1.1 mrg /*
1614 1.1 mrg * uvm_swap_get: get a single page from swap
1615 1.1 mrg *
1616 1.1 mrg * => usually a sync op (from fault)
1617 1.1 mrg */
1618 1.54 chs
1619 1.1 mrg int
1620 1.1 mrg uvm_swap_get(page, swslot, flags)
1621 1.1 mrg struct vm_page *page;
1622 1.1 mrg int swslot, flags;
1623 1.1 mrg {
1624 1.56 chs int error;
1625 1.1 mrg
1626 1.1 mrg uvmexp.nswget++;
1627 1.46 chs KASSERT(flags & PGO_SYNCIO);
1628 1.32 chs if (swslot == SWSLOT_BAD) {
1629 1.47 chs return EIO;
1630 1.32 chs }
1631 1.56 chs error = uvm_swap_io(&page, swslot, 1, B_READ |
1632 1.1 mrg ((flags & PGO_SYNCIO) ? 0 : B_ASYNC));
1633 1.56 chs if (error == 0) {
1634 1.47 chs
1635 1.26 chs /*
1636 1.54 chs * this page is no longer only in swap.
1637 1.26 chs */
1638 1.47 chs
1639 1.26 chs simple_lock(&uvm.swap_data_lock);
1640 1.56 chs KASSERT(uvmexp.swpgonly > 0);
1641 1.54 chs uvmexp.swpgonly--;
1642 1.26 chs simple_unlock(&uvm.swap_data_lock);
1643 1.26 chs }
1644 1.56 chs return error;
1645 1.1 mrg }
1646 1.1 mrg
1647 1.1 mrg /*
1648 1.1 mrg * uvm_swap_io: do an i/o operation to swap
1649 1.1 mrg */
1650 1.1 mrg
1651 1.1 mrg static int
1652 1.1 mrg uvm_swap_io(pps, startslot, npages, flags)
1653 1.1 mrg struct vm_page **pps;
1654 1.1 mrg int startslot, npages, flags;
1655 1.1 mrg {
1656 1.1 mrg daddr_t startblk;
1657 1.1 mrg struct buf *bp;
1658 1.15 eeh vaddr_t kva;
1659 1.54 chs int error, s, mapinflags;
1660 1.41 chs boolean_t write, async;
1661 1.1 mrg UVMHIST_FUNC("uvm_swap_io"); UVMHIST_CALLED(pdhist);
1662 1.1 mrg
1663 1.1 mrg UVMHIST_LOG(pdhist, "<- called, startslot=%d, npages=%d, flags=%d",
1664 1.1 mrg startslot, npages, flags, 0);
1665 1.32 chs
1666 1.41 chs write = (flags & B_READ) == 0;
1667 1.41 chs async = (flags & B_ASYNC) != 0;
1668 1.41 chs
1669 1.1 mrg /*
1670 1.1 mrg * convert starting drum slot to block number
1671 1.1 mrg */
1672 1.54 chs
1673 1.44 enami startblk = btodb((u_int64_t)startslot << PAGE_SHIFT);
1674 1.1 mrg
1675 1.1 mrg /*
1676 1.54 chs * first, map the pages into the kernel.
1677 1.41 chs */
1678 1.41 chs
1679 1.54 chs mapinflags = !write ?
1680 1.54 chs UVMPAGER_MAPIN_WAITOK|UVMPAGER_MAPIN_READ :
1681 1.54 chs UVMPAGER_MAPIN_WAITOK|UVMPAGER_MAPIN_WRITE;
1682 1.41 chs kva = uvm_pagermapin(pps, npages, mapinflags);
1683 1.1 mrg
1684 1.51 chs /*
1685 1.41 chs * now allocate a buf for the i/o.
1686 1.1 mrg */
1687 1.54 chs
1688 1.1 mrg s = splbio();
1689 1.54 chs bp = pool_get(&bufpool, PR_WAITOK);
1690 1.41 chs splx(s);
1691 1.1 mrg
1692 1.1 mrg /*
1693 1.1 mrg * fill in the bp/sbp. we currently route our i/o through
1694 1.1 mrg * /dev/drum's vnode [swapdev_vp].
1695 1.1 mrg */
1696 1.54 chs
1697 1.21 mycroft bp->b_flags = B_BUSY | B_NOCACHE | (flags & (B_READ|B_ASYNC));
1698 1.1 mrg bp->b_proc = &proc0; /* XXX */
1699 1.12 pk bp->b_vnbufs.le_next = NOLIST;
1700 1.1 mrg bp->b_data = (caddr_t)kva;
1701 1.1 mrg bp->b_blkno = startblk;
1702 1.1 mrg bp->b_vp = swapdev_vp;
1703 1.53 chs bp->b_dev = swapdev_vp->v_rdev;
1704 1.41 chs bp->b_bufsize = bp->b_bcount = npages << PAGE_SHIFT;
1705 1.30 fvdl LIST_INIT(&bp->b_dep);
1706 1.1 mrg
1707 1.51 chs /*
1708 1.41 chs * bump v_numoutput (counter of number of active outputs).
1709 1.1 mrg */
1710 1.54 chs
1711 1.41 chs if (write) {
1712 1.1 mrg s = splbio();
1713 1.1 mrg swapdev_vp->v_numoutput++;
1714 1.1 mrg splx(s);
1715 1.1 mrg }
1716 1.1 mrg
1717 1.1 mrg /*
1718 1.41 chs * for async ops we must set up the iodone handler.
1719 1.1 mrg */
1720 1.54 chs
1721 1.41 chs if (async) {
1722 1.54 chs bp->b_flags |= B_CALL;
1723 1.41 chs bp->b_iodone = uvm_aio_biodone;
1724 1.1 mrg UVMHIST_LOG(pdhist, "doing async!", 0, 0, 0, 0);
1725 1.1 mrg }
1726 1.1 mrg UVMHIST_LOG(pdhist,
1727 1.41 chs "about to start io: data = %p blkno = 0x%x, bcount = %ld",
1728 1.1 mrg bp->b_data, bp->b_blkno, bp->b_bcount, 0);
1729 1.1 mrg
1730 1.1 mrg /*
1731 1.1 mrg * now we start the I/O, and if async, return.
1732 1.1 mrg */
1733 1.54 chs
1734 1.1 mrg VOP_STRATEGY(bp);
1735 1.41 chs if (async)
1736 1.47 chs return 0;
1737 1.1 mrg
1738 1.1 mrg /*
1739 1.1 mrg * must be sync i/o. wait for it to finish
1740 1.1 mrg */
1741 1.54 chs
1742 1.47 chs error = biowait(bp);
1743 1.1 mrg
1744 1.1 mrg /*
1745 1.1 mrg * kill the pager mapping
1746 1.1 mrg */
1747 1.54 chs
1748 1.1 mrg uvm_pagermapout(kva, npages);
1749 1.1 mrg
1750 1.1 mrg /*
1751 1.54 chs * now dispose of the buf and we're done.
1752 1.1 mrg */
1753 1.54 chs
1754 1.1 mrg s = splbio();
1755 1.41 chs if (write)
1756 1.41 chs vwakeup(bp);
1757 1.41 chs pool_put(&bufpool, bp);
1758 1.1 mrg splx(s);
1759 1.47 chs UVMHIST_LOG(pdhist, "<- done (sync) error=%d", error, 0, 0, 0);
1760 1.47 chs return (error);
1761 1.1 mrg }
1762