uvm_swap.c revision 1.52 1 1.52 chs /* $NetBSD: uvm_swap.c,v 1.52 2001/05/26 16:32:47 chs Exp $ */
2 1.1 mrg
3 1.1 mrg /*
4 1.1 mrg * Copyright (c) 1995, 1996, 1997 Matthew R. Green
5 1.1 mrg * All rights reserved.
6 1.1 mrg *
7 1.1 mrg * Redistribution and use in source and binary forms, with or without
8 1.1 mrg * modification, are permitted provided that the following conditions
9 1.1 mrg * are met:
10 1.1 mrg * 1. Redistributions of source code must retain the above copyright
11 1.1 mrg * notice, this list of conditions and the following disclaimer.
12 1.1 mrg * 2. Redistributions in binary form must reproduce the above copyright
13 1.1 mrg * notice, this list of conditions and the following disclaimer in the
14 1.1 mrg * documentation and/or other materials provided with the distribution.
15 1.1 mrg * 3. The name of the author may not be used to endorse or promote products
16 1.1 mrg * derived from this software without specific prior written permission.
17 1.1 mrg *
18 1.1 mrg * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 1.1 mrg * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 1.1 mrg * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 1.1 mrg * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 1.1 mrg * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 1.1 mrg * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 1.1 mrg * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
25 1.1 mrg * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26 1.1 mrg * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 1.1 mrg * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 1.1 mrg * SUCH DAMAGE.
29 1.3 mrg *
30 1.3 mrg * from: NetBSD: vm_swap.c,v 1.52 1997/12/02 13:47:37 pk Exp
31 1.3 mrg * from: Id: uvm_swap.c,v 1.1.2.42 1998/02/02 20:38:06 chuck Exp
32 1.1 mrg */
33 1.5 mrg
34 1.6 thorpej #include "fs_nfs.h"
35 1.5 mrg #include "opt_uvmhist.h"
36 1.16 mrg #include "opt_compat_netbsd.h"
37 1.41 chs #include "opt_ddb.h"
38 1.1 mrg
39 1.1 mrg #include <sys/param.h>
40 1.1 mrg #include <sys/systm.h>
41 1.1 mrg #include <sys/buf.h>
42 1.36 mrg #include <sys/conf.h>
43 1.1 mrg #include <sys/proc.h>
44 1.1 mrg #include <sys/namei.h>
45 1.1 mrg #include <sys/disklabel.h>
46 1.1 mrg #include <sys/errno.h>
47 1.1 mrg #include <sys/kernel.h>
48 1.1 mrg #include <sys/malloc.h>
49 1.1 mrg #include <sys/vnode.h>
50 1.1 mrg #include <sys/file.h>
51 1.1 mrg #include <sys/extent.h>
52 1.1 mrg #include <sys/mount.h>
53 1.12 pk #include <sys/pool.h>
54 1.1 mrg #include <sys/syscallargs.h>
55 1.17 mrg #include <sys/swap.h>
56 1.1 mrg
57 1.1 mrg #include <uvm/uvm.h>
58 1.1 mrg
59 1.1 mrg #include <miscfs/specfs/specdev.h>
60 1.1 mrg
61 1.1 mrg /*
62 1.1 mrg * uvm_swap.c: manage configuration and i/o to swap space.
63 1.1 mrg */
64 1.1 mrg
65 1.1 mrg /*
66 1.1 mrg * swap space is managed in the following way:
67 1.51 chs *
68 1.1 mrg * each swap partition or file is described by a "swapdev" structure.
69 1.1 mrg * each "swapdev" structure contains a "swapent" structure which contains
70 1.1 mrg * information that is passed up to the user (via system calls).
71 1.1 mrg *
72 1.1 mrg * each swap partition is assigned a "priority" (int) which controls
73 1.1 mrg * swap parition usage.
74 1.1 mrg *
75 1.1 mrg * the system maintains a global data structure describing all swap
76 1.1 mrg * partitions/files. there is a sorted LIST of "swappri" structures
77 1.1 mrg * which describe "swapdev"'s at that priority. this LIST is headed
78 1.51 chs * by the "swap_priority" global var. each "swappri" contains a
79 1.1 mrg * CIRCLEQ of "swapdev" structures at that priority.
80 1.1 mrg *
81 1.1 mrg * locking:
82 1.1 mrg * - swap_syscall_lock (sleep lock): this lock serializes the swapctl
83 1.1 mrg * system call and prevents the swap priority list from changing
84 1.1 mrg * while we are in the middle of a system call (e.g. SWAP_STATS).
85 1.26 chs * - uvm.swap_data_lock (simple_lock): this lock protects all swap data
86 1.1 mrg * structures including the priority list, the swapdev structures,
87 1.1 mrg * and the swapmap extent.
88 1.1 mrg *
89 1.1 mrg * each swap device has the following info:
90 1.1 mrg * - swap device in use (could be disabled, preventing future use)
91 1.1 mrg * - swap enabled (allows new allocations on swap)
92 1.1 mrg * - map info in /dev/drum
93 1.1 mrg * - vnode pointer
94 1.1 mrg * for swap files only:
95 1.1 mrg * - block size
96 1.1 mrg * - max byte count in buffer
97 1.1 mrg * - buffer
98 1.1 mrg * - credentials to use when doing i/o to file
99 1.1 mrg *
100 1.1 mrg * userland controls and configures swap with the swapctl(2) system call.
101 1.1 mrg * the sys_swapctl performs the following operations:
102 1.1 mrg * [1] SWAP_NSWAP: returns the number of swap devices currently configured
103 1.51 chs * [2] SWAP_STATS: given a pointer to an array of swapent structures
104 1.1 mrg * (passed in via "arg") of a size passed in via "misc" ... we load
105 1.1 mrg * the current swap config into the array.
106 1.1 mrg * [3] SWAP_ON: given a pathname in arg (could be device or file) and a
107 1.1 mrg * priority in "misc", start swapping on it.
108 1.1 mrg * [4] SWAP_OFF: as SWAP_ON, but stops swapping to a device
109 1.1 mrg * [5] SWAP_CTL: changes the priority of a swap device (new priority in
110 1.1 mrg * "misc")
111 1.1 mrg */
112 1.1 mrg
113 1.1 mrg /*
114 1.1 mrg * swapdev: describes a single swap partition/file
115 1.1 mrg *
116 1.1 mrg * note the following should be true:
117 1.1 mrg * swd_inuse <= swd_nblks [number of blocks in use is <= total blocks]
118 1.1 mrg * swd_nblks <= swd_mapsize [because mapsize includes miniroot+disklabel]
119 1.1 mrg */
120 1.1 mrg struct swapdev {
121 1.16 mrg struct oswapent swd_ose;
122 1.16 mrg #define swd_dev swd_ose.ose_dev /* device id */
123 1.16 mrg #define swd_flags swd_ose.ose_flags /* flags:inuse/enable/fake */
124 1.16 mrg #define swd_priority swd_ose.ose_priority /* our priority */
125 1.16 mrg /* also: swd_ose.ose_nblks, swd_ose.ose_inuse */
126 1.16 mrg char *swd_path; /* saved pathname of device */
127 1.16 mrg int swd_pathlen; /* length of pathname */
128 1.16 mrg int swd_npages; /* #pages we can use */
129 1.16 mrg int swd_npginuse; /* #pages in use */
130 1.32 chs int swd_npgbad; /* #pages bad */
131 1.16 mrg int swd_drumoffset; /* page0 offset in drum */
132 1.16 mrg int swd_drumsize; /* #pages in drum */
133 1.16 mrg struct extent *swd_ex; /* extent for this swapdev */
134 1.42 enami char swd_exname[12]; /* name of extent above */
135 1.16 mrg struct vnode *swd_vp; /* backing vnode */
136 1.16 mrg CIRCLEQ_ENTRY(swapdev) swd_next; /* priority circleq */
137 1.1 mrg
138 1.16 mrg int swd_bsize; /* blocksize (bytes) */
139 1.16 mrg int swd_maxactive; /* max active i/o reqs */
140 1.33 thorpej struct buf_queue swd_tab; /* buffer list */
141 1.33 thorpej int swd_active; /* number of active buffers */
142 1.16 mrg struct ucred *swd_cred; /* cred for file access */
143 1.1 mrg };
144 1.1 mrg
145 1.1 mrg /*
146 1.1 mrg * swap device priority entry; the list is kept sorted on `spi_priority'.
147 1.1 mrg */
148 1.1 mrg struct swappri {
149 1.1 mrg int spi_priority; /* priority */
150 1.1 mrg CIRCLEQ_HEAD(spi_swapdev, swapdev) spi_swapdev;
151 1.1 mrg /* circleq of swapdevs at this priority */
152 1.1 mrg LIST_ENTRY(swappri) spi_swappri; /* global list of pri's */
153 1.1 mrg };
154 1.1 mrg
155 1.1 mrg /*
156 1.1 mrg * The following two structures are used to keep track of data transfers
157 1.1 mrg * on swap devices associated with regular files.
158 1.1 mrg * NOTE: this code is more or less a copy of vnd.c; we use the same
159 1.1 mrg * structure names here to ease porting..
160 1.1 mrg */
161 1.1 mrg struct vndxfer {
162 1.1 mrg struct buf *vx_bp; /* Pointer to parent buffer */
163 1.1 mrg struct swapdev *vx_sdp;
164 1.1 mrg int vx_error;
165 1.1 mrg int vx_pending; /* # of pending aux buffers */
166 1.1 mrg int vx_flags;
167 1.1 mrg #define VX_BUSY 1
168 1.1 mrg #define VX_DEAD 2
169 1.1 mrg };
170 1.1 mrg
171 1.1 mrg struct vndbuf {
172 1.1 mrg struct buf vb_buf;
173 1.1 mrg struct vndxfer *vb_xfer;
174 1.1 mrg };
175 1.1 mrg
176 1.12 pk
177 1.1 mrg /*
178 1.12 pk * We keep a of pool vndbuf's and vndxfer structures.
179 1.1 mrg */
180 1.49 thorpej static struct pool vndxfer_pool;
181 1.49 thorpej static struct pool vndbuf_pool;
182 1.1 mrg
183 1.12 pk #define getvndxfer(vnx) do { \
184 1.12 pk int s = splbio(); \
185 1.49 thorpej vnx = pool_get(&vndxfer_pool, PR_MALLOCOK|PR_WAITOK); \
186 1.12 pk splx(s); \
187 1.12 pk } while (0)
188 1.12 pk
189 1.12 pk #define putvndxfer(vnx) { \
190 1.49 thorpej pool_put(&vndxfer_pool, (void *)(vnx)); \
191 1.12 pk }
192 1.12 pk
193 1.12 pk #define getvndbuf(vbp) do { \
194 1.12 pk int s = splbio(); \
195 1.49 thorpej vbp = pool_get(&vndbuf_pool, PR_MALLOCOK|PR_WAITOK); \
196 1.12 pk splx(s); \
197 1.12 pk } while (0)
198 1.1 mrg
199 1.12 pk #define putvndbuf(vbp) { \
200 1.49 thorpej pool_put(&vndbuf_pool, (void *)(vbp)); \
201 1.12 pk }
202 1.1 mrg
203 1.36 mrg /* /dev/drum */
204 1.36 mrg bdev_decl(sw);
205 1.36 mrg cdev_decl(sw);
206 1.1 mrg
207 1.1 mrg /*
208 1.1 mrg * local variables
209 1.1 mrg */
210 1.1 mrg static struct extent *swapmap; /* controls the mapping of /dev/drum */
211 1.1 mrg
212 1.1 mrg /* list of all active swap devices [by priority] */
213 1.1 mrg LIST_HEAD(swap_priority, swappri);
214 1.1 mrg static struct swap_priority swap_priority;
215 1.1 mrg
216 1.1 mrg /* locks */
217 1.52 chs struct lock swap_syscall_lock;
218 1.1 mrg
219 1.1 mrg /*
220 1.1 mrg * prototypes
221 1.1 mrg */
222 1.1 mrg static struct swapdev *swapdrum_getsdp __P((int));
223 1.1 mrg
224 1.1 mrg static struct swapdev *swaplist_find __P((struct vnode *, int));
225 1.51 chs static void swaplist_insert __P((struct swapdev *,
226 1.1 mrg struct swappri *, int));
227 1.1 mrg static void swaplist_trim __P((void));
228 1.1 mrg
229 1.1 mrg static int swap_on __P((struct proc *, struct swapdev *));
230 1.1 mrg static int swap_off __P((struct proc *, struct swapdev *));
231 1.1 mrg
232 1.1 mrg static void sw_reg_strategy __P((struct swapdev *, struct buf *, int));
233 1.1 mrg static void sw_reg_iodone __P((struct buf *));
234 1.1 mrg static void sw_reg_start __P((struct swapdev *));
235 1.1 mrg
236 1.1 mrg static int uvm_swap_io __P((struct vm_page **, int, int, int));
237 1.1 mrg
238 1.1 mrg /*
239 1.1 mrg * uvm_swap_init: init the swap system data structures and locks
240 1.1 mrg *
241 1.51 chs * => called at boot time from init_main.c after the filesystems
242 1.1 mrg * are brought up (which happens after uvm_init())
243 1.1 mrg */
244 1.1 mrg void
245 1.1 mrg uvm_swap_init()
246 1.1 mrg {
247 1.1 mrg UVMHIST_FUNC("uvm_swap_init");
248 1.1 mrg
249 1.1 mrg UVMHIST_CALLED(pdhist);
250 1.1 mrg /*
251 1.1 mrg * first, init the swap list, its counter, and its lock.
252 1.1 mrg * then get a handle on the vnode for /dev/drum by using
253 1.1 mrg * the its dev_t number ("swapdev", from MD conf.c).
254 1.1 mrg */
255 1.1 mrg
256 1.1 mrg LIST_INIT(&swap_priority);
257 1.1 mrg uvmexp.nswapdev = 0;
258 1.1 mrg lockinit(&swap_syscall_lock, PVM, "swapsys", 0, 0);
259 1.26 chs simple_lock_init(&uvm.swap_data_lock);
260 1.12 pk
261 1.1 mrg if (bdevvp(swapdev, &swapdev_vp))
262 1.1 mrg panic("uvm_swap_init: can't get vnode for swap device");
263 1.1 mrg
264 1.1 mrg /*
265 1.1 mrg * create swap block resource map to map /dev/drum. the range
266 1.1 mrg * from 1 to INT_MAX allows 2 gigablocks of swap space. note
267 1.51 chs * that block 0 is reserved (used to indicate an allocation
268 1.1 mrg * failure, or no allocation).
269 1.1 mrg */
270 1.1 mrg swapmap = extent_create("swapmap", 1, INT_MAX,
271 1.1 mrg M_VMSWAP, 0, 0, EX_NOWAIT);
272 1.1 mrg if (swapmap == 0)
273 1.1 mrg panic("uvm_swap_init: extent_create failed");
274 1.1 mrg
275 1.1 mrg /*
276 1.41 chs * allocate pools for structures used for swapping to files.
277 1.1 mrg */
278 1.1 mrg
279 1.49 thorpej pool_init(&vndxfer_pool, sizeof(struct vndxfer), 0, 0, 0,
280 1.49 thorpej "swp vnx", 0, NULL, NULL, 0);
281 1.49 thorpej
282 1.49 thorpej pool_init(&vndbuf_pool, sizeof(struct vndbuf), 0, 0, 0,
283 1.49 thorpej "swp vnd", 0, NULL, NULL, 0);
284 1.49 thorpej
285 1.1 mrg /*
286 1.1 mrg * done!
287 1.1 mrg */
288 1.1 mrg UVMHIST_LOG(pdhist, "<- done", 0, 0, 0, 0);
289 1.1 mrg }
290 1.1 mrg
291 1.1 mrg /*
292 1.1 mrg * swaplist functions: functions that operate on the list of swap
293 1.1 mrg * devices on the system.
294 1.1 mrg */
295 1.1 mrg
296 1.1 mrg /*
297 1.1 mrg * swaplist_insert: insert swap device "sdp" into the global list
298 1.1 mrg *
299 1.26 chs * => caller must hold both swap_syscall_lock and uvm.swap_data_lock
300 1.1 mrg * => caller must provide a newly malloc'd swappri structure (we will
301 1.1 mrg * FREE it if we don't need it... this it to prevent malloc blocking
302 1.1 mrg * here while adding swap)
303 1.1 mrg */
304 1.1 mrg static void
305 1.1 mrg swaplist_insert(sdp, newspp, priority)
306 1.1 mrg struct swapdev *sdp;
307 1.1 mrg struct swappri *newspp;
308 1.1 mrg int priority;
309 1.1 mrg {
310 1.1 mrg struct swappri *spp, *pspp;
311 1.1 mrg UVMHIST_FUNC("swaplist_insert"); UVMHIST_CALLED(pdhist);
312 1.1 mrg
313 1.1 mrg /*
314 1.1 mrg * find entry at or after which to insert the new device.
315 1.1 mrg */
316 1.32 chs for (pspp = NULL, spp = LIST_FIRST(&swap_priority); spp != NULL;
317 1.32 chs spp = LIST_NEXT(spp, spi_swappri)) {
318 1.1 mrg if (priority <= spp->spi_priority)
319 1.1 mrg break;
320 1.1 mrg pspp = spp;
321 1.1 mrg }
322 1.1 mrg
323 1.1 mrg /*
324 1.1 mrg * new priority?
325 1.1 mrg */
326 1.1 mrg if (spp == NULL || spp->spi_priority != priority) {
327 1.1 mrg spp = newspp; /* use newspp! */
328 1.32 chs UVMHIST_LOG(pdhist, "created new swappri = %d",
329 1.32 chs priority, 0, 0, 0);
330 1.1 mrg
331 1.1 mrg spp->spi_priority = priority;
332 1.1 mrg CIRCLEQ_INIT(&spp->spi_swapdev);
333 1.1 mrg
334 1.1 mrg if (pspp)
335 1.1 mrg LIST_INSERT_AFTER(pspp, spp, spi_swappri);
336 1.1 mrg else
337 1.1 mrg LIST_INSERT_HEAD(&swap_priority, spp, spi_swappri);
338 1.1 mrg } else {
339 1.1 mrg /* we don't need a new priority structure, free it */
340 1.1 mrg FREE(newspp, M_VMSWAP);
341 1.1 mrg }
342 1.1 mrg
343 1.1 mrg /*
344 1.1 mrg * priority found (or created). now insert on the priority's
345 1.1 mrg * circleq list and bump the total number of swapdevs.
346 1.1 mrg */
347 1.1 mrg sdp->swd_priority = priority;
348 1.1 mrg CIRCLEQ_INSERT_TAIL(&spp->spi_swapdev, sdp, swd_next);
349 1.1 mrg uvmexp.nswapdev++;
350 1.1 mrg }
351 1.1 mrg
352 1.1 mrg /*
353 1.1 mrg * swaplist_find: find and optionally remove a swap device from the
354 1.1 mrg * global list.
355 1.1 mrg *
356 1.26 chs * => caller must hold both swap_syscall_lock and uvm.swap_data_lock
357 1.1 mrg * => we return the swapdev we found (and removed)
358 1.1 mrg */
359 1.1 mrg static struct swapdev *
360 1.1 mrg swaplist_find(vp, remove)
361 1.1 mrg struct vnode *vp;
362 1.1 mrg boolean_t remove;
363 1.1 mrg {
364 1.1 mrg struct swapdev *sdp;
365 1.1 mrg struct swappri *spp;
366 1.1 mrg
367 1.1 mrg /*
368 1.1 mrg * search the lists for the requested vp
369 1.1 mrg */
370 1.32 chs for (spp = LIST_FIRST(&swap_priority); spp != NULL;
371 1.32 chs spp = LIST_NEXT(spp, spi_swappri)) {
372 1.32 chs for (sdp = CIRCLEQ_FIRST(&spp->spi_swapdev);
373 1.1 mrg sdp != (void *)&spp->spi_swapdev;
374 1.32 chs sdp = CIRCLEQ_NEXT(sdp, swd_next))
375 1.1 mrg if (sdp->swd_vp == vp) {
376 1.1 mrg if (remove) {
377 1.1 mrg CIRCLEQ_REMOVE(&spp->spi_swapdev,
378 1.1 mrg sdp, swd_next);
379 1.1 mrg uvmexp.nswapdev--;
380 1.1 mrg }
381 1.1 mrg return(sdp);
382 1.1 mrg }
383 1.1 mrg }
384 1.1 mrg return (NULL);
385 1.1 mrg }
386 1.1 mrg
387 1.1 mrg
388 1.1 mrg /*
389 1.1 mrg * swaplist_trim: scan priority list for empty priority entries and kill
390 1.1 mrg * them.
391 1.1 mrg *
392 1.26 chs * => caller must hold both swap_syscall_lock and uvm.swap_data_lock
393 1.1 mrg */
394 1.1 mrg static void
395 1.1 mrg swaplist_trim()
396 1.1 mrg {
397 1.1 mrg struct swappri *spp, *nextspp;
398 1.1 mrg
399 1.32 chs for (spp = LIST_FIRST(&swap_priority); spp != NULL; spp = nextspp) {
400 1.32 chs nextspp = LIST_NEXT(spp, spi_swappri);
401 1.32 chs if (CIRCLEQ_FIRST(&spp->spi_swapdev) !=
402 1.32 chs (void *)&spp->spi_swapdev)
403 1.1 mrg continue;
404 1.1 mrg LIST_REMOVE(spp, spi_swappri);
405 1.32 chs free(spp, M_VMSWAP);
406 1.1 mrg }
407 1.1 mrg }
408 1.1 mrg
409 1.1 mrg /*
410 1.1 mrg * swapdrum_getsdp: given a page offset in /dev/drum, convert it back
411 1.1 mrg * to the "swapdev" that maps that section of the drum.
412 1.1 mrg *
413 1.1 mrg * => each swapdev takes one big contig chunk of the drum
414 1.26 chs * => caller must hold uvm.swap_data_lock
415 1.1 mrg */
416 1.1 mrg static struct swapdev *
417 1.1 mrg swapdrum_getsdp(pgno)
418 1.1 mrg int pgno;
419 1.1 mrg {
420 1.1 mrg struct swapdev *sdp;
421 1.1 mrg struct swappri *spp;
422 1.51 chs
423 1.32 chs for (spp = LIST_FIRST(&swap_priority); spp != NULL;
424 1.32 chs spp = LIST_NEXT(spp, spi_swappri))
425 1.32 chs for (sdp = CIRCLEQ_FIRST(&spp->spi_swapdev);
426 1.1 mrg sdp != (void *)&spp->spi_swapdev;
427 1.48 fvdl sdp = CIRCLEQ_NEXT(sdp, swd_next)) {
428 1.48 fvdl if (sdp->swd_flags & SWF_FAKE)
429 1.48 fvdl continue;
430 1.1 mrg if (pgno >= sdp->swd_drumoffset &&
431 1.1 mrg pgno < (sdp->swd_drumoffset + sdp->swd_drumsize)) {
432 1.1 mrg return sdp;
433 1.1 mrg }
434 1.48 fvdl }
435 1.1 mrg return NULL;
436 1.1 mrg }
437 1.1 mrg
438 1.1 mrg
439 1.1 mrg /*
440 1.1 mrg * sys_swapctl: main entry point for swapctl(2) system call
441 1.1 mrg * [with two helper functions: swap_on and swap_off]
442 1.1 mrg */
443 1.1 mrg int
444 1.1 mrg sys_swapctl(p, v, retval)
445 1.1 mrg struct proc *p;
446 1.1 mrg void *v;
447 1.1 mrg register_t *retval;
448 1.1 mrg {
449 1.1 mrg struct sys_swapctl_args /* {
450 1.1 mrg syscallarg(int) cmd;
451 1.1 mrg syscallarg(void *) arg;
452 1.1 mrg syscallarg(int) misc;
453 1.1 mrg } */ *uap = (struct sys_swapctl_args *)v;
454 1.1 mrg struct vnode *vp;
455 1.1 mrg struct nameidata nd;
456 1.1 mrg struct swappri *spp;
457 1.1 mrg struct swapdev *sdp;
458 1.1 mrg struct swapent *sep;
459 1.16 mrg char userpath[PATH_MAX + 1];
460 1.18 enami size_t len;
461 1.18 enami int count, error, misc;
462 1.1 mrg int priority;
463 1.1 mrg UVMHIST_FUNC("sys_swapctl"); UVMHIST_CALLED(pdhist);
464 1.1 mrg
465 1.1 mrg misc = SCARG(uap, misc);
466 1.1 mrg
467 1.1 mrg /*
468 1.1 mrg * ensure serialized syscall access by grabbing the swap_syscall_lock
469 1.1 mrg */
470 1.32 chs lockmgr(&swap_syscall_lock, LK_EXCLUSIVE, NULL);
471 1.24 mrg
472 1.1 mrg /*
473 1.1 mrg * we handle the non-priv NSWAP and STATS request first.
474 1.1 mrg *
475 1.51 chs * SWAP_NSWAP: return number of config'd swap devices
476 1.1 mrg * [can also be obtained with uvmexp sysctl]
477 1.1 mrg */
478 1.1 mrg if (SCARG(uap, cmd) == SWAP_NSWAP) {
479 1.8 mrg UVMHIST_LOG(pdhist, "<- done SWAP_NSWAP=%d", uvmexp.nswapdev,
480 1.8 mrg 0, 0, 0);
481 1.1 mrg *retval = uvmexp.nswapdev;
482 1.16 mrg error = 0;
483 1.16 mrg goto out;
484 1.1 mrg }
485 1.1 mrg
486 1.1 mrg /*
487 1.1 mrg * SWAP_STATS: get stats on current # of configured swap devs
488 1.1 mrg *
489 1.51 chs * note that the swap_priority list can't change as long
490 1.1 mrg * as we are holding the swap_syscall_lock. we don't want
491 1.51 chs * to grab the uvm.swap_data_lock because we may fault&sleep during
492 1.1 mrg * copyout() and we don't want to be holding that lock then!
493 1.1 mrg */
494 1.16 mrg if (SCARG(uap, cmd) == SWAP_STATS
495 1.16 mrg #if defined(COMPAT_13)
496 1.16 mrg || SCARG(uap, cmd) == SWAP_OSTATS
497 1.16 mrg #endif
498 1.16 mrg ) {
499 1.1 mrg sep = (struct swapent *)SCARG(uap, arg);
500 1.1 mrg count = 0;
501 1.1 mrg
502 1.32 chs for (spp = LIST_FIRST(&swap_priority); spp != NULL;
503 1.32 chs spp = LIST_NEXT(spp, spi_swappri)) {
504 1.32 chs for (sdp = CIRCLEQ_FIRST(&spp->spi_swapdev);
505 1.1 mrg sdp != (void *)&spp->spi_swapdev && misc-- > 0;
506 1.32 chs sdp = CIRCLEQ_NEXT(sdp, swd_next)) {
507 1.16 mrg /*
508 1.16 mrg * backwards compatibility for system call.
509 1.16 mrg * note that we use 'struct oswapent' as an
510 1.16 mrg * overlay into both 'struct swapdev' and
511 1.16 mrg * the userland 'struct swapent', as we
512 1.16 mrg * want to retain backwards compatibility
513 1.16 mrg * with NetBSD 1.3.
514 1.16 mrg */
515 1.51 chs sdp->swd_ose.ose_inuse =
516 1.44 enami btodb((u_int64_t)sdp->swd_npginuse <<
517 1.44 enami PAGE_SHIFT);
518 1.32 chs error = copyout(&sdp->swd_ose, sep,
519 1.32 chs sizeof(struct oswapent));
520 1.16 mrg
521 1.16 mrg /* now copy out the path if necessary */
522 1.16 mrg #if defined(COMPAT_13)
523 1.16 mrg if (error == 0 && SCARG(uap, cmd) == SWAP_STATS)
524 1.16 mrg #else
525 1.16 mrg if (error == 0)
526 1.16 mrg #endif
527 1.32 chs error = copyout(sdp->swd_path,
528 1.32 chs &sep->se_path, sdp->swd_pathlen);
529 1.16 mrg
530 1.16 mrg if (error)
531 1.16 mrg goto out;
532 1.1 mrg count++;
533 1.16 mrg #if defined(COMPAT_13)
534 1.16 mrg if (SCARG(uap, cmd) == SWAP_OSTATS)
535 1.50 ross sep = (struct swapent *)
536 1.50 ross ((struct oswapent *)sep + 1);
537 1.16 mrg else
538 1.16 mrg #endif
539 1.16 mrg sep++;
540 1.1 mrg }
541 1.1 mrg }
542 1.1 mrg
543 1.16 mrg UVMHIST_LOG(pdhist, "<- done SWAP_STATS", 0, 0, 0, 0);
544 1.1 mrg
545 1.1 mrg *retval = count;
546 1.16 mrg error = 0;
547 1.16 mrg goto out;
548 1.51 chs }
549 1.1 mrg
550 1.1 mrg /*
551 1.1 mrg * all other requests require superuser privs. verify.
552 1.1 mrg */
553 1.16 mrg if ((error = suser(p->p_ucred, &p->p_acflag)))
554 1.16 mrg goto out;
555 1.1 mrg
556 1.40 mrg if (SCARG(uap, cmd) == SWAP_GETDUMPDEV) {
557 1.40 mrg dev_t *devp = (dev_t *)SCARG(uap, arg);
558 1.40 mrg
559 1.40 mrg error = copyout(&dumpdev, devp, sizeof(dumpdev));
560 1.40 mrg goto out;
561 1.40 mrg }
562 1.40 mrg
563 1.1 mrg /*
564 1.1 mrg * at this point we expect a path name in arg. we will
565 1.1 mrg * use namei() to gain a vnode reference (vref), and lock
566 1.1 mrg * the vnode (VOP_LOCK).
567 1.1 mrg *
568 1.1 mrg * XXX: a NULL arg means use the root vnode pointer (e.g. for
569 1.16 mrg * miniroot)
570 1.1 mrg */
571 1.1 mrg if (SCARG(uap, arg) == NULL) {
572 1.1 mrg vp = rootvp; /* miniroot */
573 1.7 fvdl if (vget(vp, LK_EXCLUSIVE)) {
574 1.16 mrg error = EBUSY;
575 1.16 mrg goto out;
576 1.1 mrg }
577 1.16 mrg if (SCARG(uap, cmd) == SWAP_ON &&
578 1.16 mrg copystr("miniroot", userpath, sizeof userpath, &len))
579 1.16 mrg panic("swapctl: miniroot copy failed");
580 1.1 mrg } else {
581 1.16 mrg int space;
582 1.16 mrg char *where;
583 1.16 mrg
584 1.16 mrg if (SCARG(uap, cmd) == SWAP_ON) {
585 1.16 mrg if ((error = copyinstr(SCARG(uap, arg), userpath,
586 1.16 mrg sizeof userpath, &len)))
587 1.16 mrg goto out;
588 1.16 mrg space = UIO_SYSSPACE;
589 1.16 mrg where = userpath;
590 1.16 mrg } else {
591 1.16 mrg space = UIO_USERSPACE;
592 1.16 mrg where = (char *)SCARG(uap, arg);
593 1.1 mrg }
594 1.16 mrg NDINIT(&nd, LOOKUP, FOLLOW|LOCKLEAF, space, where, p);
595 1.16 mrg if ((error = namei(&nd)))
596 1.16 mrg goto out;
597 1.1 mrg vp = nd.ni_vp;
598 1.1 mrg }
599 1.1 mrg /* note: "vp" is referenced and locked */
600 1.1 mrg
601 1.1 mrg error = 0; /* assume no error */
602 1.1 mrg switch(SCARG(uap, cmd)) {
603 1.40 mrg
604 1.24 mrg case SWAP_DUMPDEV:
605 1.24 mrg if (vp->v_type != VBLK) {
606 1.24 mrg error = ENOTBLK;
607 1.45 pk break;
608 1.24 mrg }
609 1.24 mrg dumpdev = vp->v_rdev;
610 1.24 mrg break;
611 1.24 mrg
612 1.1 mrg case SWAP_CTL:
613 1.1 mrg /*
614 1.1 mrg * get new priority, remove old entry (if any) and then
615 1.1 mrg * reinsert it in the correct place. finally, prune out
616 1.1 mrg * any empty priority structures.
617 1.1 mrg */
618 1.1 mrg priority = SCARG(uap, misc);
619 1.32 chs spp = malloc(sizeof *spp, M_VMSWAP, M_WAITOK);
620 1.26 chs simple_lock(&uvm.swap_data_lock);
621 1.1 mrg if ((sdp = swaplist_find(vp, 1)) == NULL) {
622 1.1 mrg error = ENOENT;
623 1.1 mrg } else {
624 1.1 mrg swaplist_insert(sdp, spp, priority);
625 1.1 mrg swaplist_trim();
626 1.1 mrg }
627 1.26 chs simple_unlock(&uvm.swap_data_lock);
628 1.1 mrg if (error)
629 1.1 mrg free(spp, M_VMSWAP);
630 1.1 mrg break;
631 1.1 mrg
632 1.1 mrg case SWAP_ON:
633 1.32 chs
634 1.1 mrg /*
635 1.1 mrg * check for duplicates. if none found, then insert a
636 1.1 mrg * dummy entry on the list to prevent someone else from
637 1.1 mrg * trying to enable this device while we are working on
638 1.1 mrg * it.
639 1.1 mrg */
640 1.32 chs
641 1.1 mrg priority = SCARG(uap, misc);
642 1.48 fvdl sdp = malloc(sizeof *sdp, M_VMSWAP, M_WAITOK);
643 1.48 fvdl spp = malloc(sizeof *spp, M_VMSWAP, M_WAITOK);
644 1.26 chs simple_lock(&uvm.swap_data_lock);
645 1.48 fvdl if (swaplist_find(vp, 0) != NULL) {
646 1.1 mrg error = EBUSY;
647 1.26 chs simple_unlock(&uvm.swap_data_lock);
648 1.48 fvdl free(sdp, M_VMSWAP);
649 1.48 fvdl free(spp, M_VMSWAP);
650 1.16 mrg break;
651 1.1 mrg }
652 1.14 perry memset(sdp, 0, sizeof(*sdp));
653 1.1 mrg sdp->swd_flags = SWF_FAKE; /* placeholder only */
654 1.1 mrg sdp->swd_vp = vp;
655 1.1 mrg sdp->swd_dev = (vp->v_type == VBLK) ? vp->v_rdev : NODEV;
656 1.33 thorpej BUFQ_INIT(&sdp->swd_tab);
657 1.32 chs
658 1.1 mrg /*
659 1.1 mrg * XXX Is NFS elaboration necessary?
660 1.1 mrg */
661 1.32 chs if (vp->v_type == VREG) {
662 1.1 mrg sdp->swd_cred = crdup(p->p_ucred);
663 1.32 chs }
664 1.32 chs
665 1.1 mrg swaplist_insert(sdp, spp, priority);
666 1.26 chs simple_unlock(&uvm.swap_data_lock);
667 1.1 mrg
668 1.16 mrg sdp->swd_pathlen = len;
669 1.16 mrg sdp->swd_path = malloc(sdp->swd_pathlen, M_VMSWAP, M_WAITOK);
670 1.19 pk if (copystr(userpath, sdp->swd_path, sdp->swd_pathlen, 0) != 0)
671 1.19 pk panic("swapctl: copystr");
672 1.32 chs
673 1.1 mrg /*
674 1.1 mrg * we've now got a FAKE placeholder in the swap list.
675 1.1 mrg * now attempt to enable swap on it. if we fail, undo
676 1.1 mrg * what we've done and kill the fake entry we just inserted.
677 1.1 mrg * if swap_on is a success, it will clear the SWF_FAKE flag
678 1.1 mrg */
679 1.32 chs
680 1.1 mrg if ((error = swap_on(p, sdp)) != 0) {
681 1.26 chs simple_lock(&uvm.swap_data_lock);
682 1.8 mrg (void) swaplist_find(vp, 1); /* kill fake entry */
683 1.1 mrg swaplist_trim();
684 1.26 chs simple_unlock(&uvm.swap_data_lock);
685 1.32 chs if (vp->v_type == VREG) {
686 1.1 mrg crfree(sdp->swd_cred);
687 1.32 chs }
688 1.19 pk free(sdp->swd_path, M_VMSWAP);
689 1.32 chs free(sdp, M_VMSWAP);
690 1.1 mrg break;
691 1.1 mrg }
692 1.1 mrg break;
693 1.1 mrg
694 1.1 mrg case SWAP_OFF:
695 1.26 chs simple_lock(&uvm.swap_data_lock);
696 1.1 mrg if ((sdp = swaplist_find(vp, 0)) == NULL) {
697 1.26 chs simple_unlock(&uvm.swap_data_lock);
698 1.1 mrg error = ENXIO;
699 1.1 mrg break;
700 1.1 mrg }
701 1.32 chs
702 1.1 mrg /*
703 1.1 mrg * If a device isn't in use or enabled, we
704 1.1 mrg * can't stop swapping from it (again).
705 1.1 mrg */
706 1.1 mrg if ((sdp->swd_flags & (SWF_INUSE|SWF_ENABLE)) == 0) {
707 1.26 chs simple_unlock(&uvm.swap_data_lock);
708 1.1 mrg error = EBUSY;
709 1.16 mrg break;
710 1.1 mrg }
711 1.1 mrg
712 1.1 mrg /*
713 1.32 chs * do the real work.
714 1.1 mrg */
715 1.45 pk error = swap_off(p, sdp);
716 1.1 mrg break;
717 1.1 mrg
718 1.1 mrg default:
719 1.1 mrg error = EINVAL;
720 1.1 mrg }
721 1.1 mrg
722 1.1 mrg /*
723 1.39 chs * done! release the ref gained by namei() and unlock.
724 1.1 mrg */
725 1.1 mrg vput(vp);
726 1.39 chs
727 1.16 mrg out:
728 1.32 chs lockmgr(&swap_syscall_lock, LK_RELEASE, NULL);
729 1.1 mrg
730 1.1 mrg UVMHIST_LOG(pdhist, "<- done! error=%d", error, 0, 0, 0);
731 1.1 mrg return (error);
732 1.1 mrg }
733 1.1 mrg
734 1.1 mrg /*
735 1.1 mrg * swap_on: attempt to enable a swapdev for swapping. note that the
736 1.1 mrg * swapdev is already on the global list, but disabled (marked
737 1.1 mrg * SWF_FAKE).
738 1.1 mrg *
739 1.1 mrg * => we avoid the start of the disk (to protect disk labels)
740 1.1 mrg * => we also avoid the miniroot, if we are swapping to root.
741 1.26 chs * => caller should leave uvm.swap_data_lock unlocked, we may lock it
742 1.1 mrg * if needed.
743 1.1 mrg */
744 1.1 mrg static int
745 1.1 mrg swap_on(p, sdp)
746 1.1 mrg struct proc *p;
747 1.1 mrg struct swapdev *sdp;
748 1.1 mrg {
749 1.1 mrg static int count = 0; /* static */
750 1.1 mrg struct vnode *vp;
751 1.1 mrg int error, npages, nblocks, size;
752 1.1 mrg long addr;
753 1.48 fvdl u_long result;
754 1.1 mrg struct vattr va;
755 1.1 mrg #ifdef NFS
756 1.1 mrg extern int (**nfsv2_vnodeop_p) __P((void *));
757 1.1 mrg #endif /* NFS */
758 1.1 mrg dev_t dev;
759 1.1 mrg UVMHIST_FUNC("swap_on"); UVMHIST_CALLED(pdhist);
760 1.1 mrg
761 1.1 mrg /*
762 1.1 mrg * we want to enable swapping on sdp. the swd_vp contains
763 1.1 mrg * the vnode we want (locked and ref'd), and the swd_dev
764 1.1 mrg * contains the dev_t of the file, if it a block device.
765 1.1 mrg */
766 1.1 mrg
767 1.1 mrg vp = sdp->swd_vp;
768 1.1 mrg dev = sdp->swd_dev;
769 1.1 mrg
770 1.1 mrg /*
771 1.1 mrg * open the swap file (mostly useful for block device files to
772 1.1 mrg * let device driver know what is up).
773 1.1 mrg *
774 1.1 mrg * we skip the open/close for root on swap because the root
775 1.1 mrg * has already been opened when root was mounted (mountroot).
776 1.1 mrg */
777 1.1 mrg if (vp != rootvp) {
778 1.1 mrg if ((error = VOP_OPEN(vp, FREAD|FWRITE, p->p_ucred, p)))
779 1.1 mrg return (error);
780 1.1 mrg }
781 1.1 mrg
782 1.1 mrg /* XXX this only works for block devices */
783 1.1 mrg UVMHIST_LOG(pdhist, " dev=%d, major(dev)=%d", dev, major(dev), 0,0);
784 1.1 mrg
785 1.1 mrg /*
786 1.1 mrg * we now need to determine the size of the swap area. for
787 1.1 mrg * block specials we can call the d_psize function.
788 1.1 mrg * for normal files, we must stat [get attrs].
789 1.1 mrg *
790 1.1 mrg * we put the result in nblks.
791 1.1 mrg * for normal files, we also want the filesystem block size
792 1.1 mrg * (which we get with statfs).
793 1.1 mrg */
794 1.1 mrg switch (vp->v_type) {
795 1.1 mrg case VBLK:
796 1.1 mrg if (bdevsw[major(dev)].d_psize == 0 ||
797 1.1 mrg (nblocks = (*bdevsw[major(dev)].d_psize)(dev)) == -1) {
798 1.1 mrg error = ENXIO;
799 1.1 mrg goto bad;
800 1.1 mrg }
801 1.1 mrg break;
802 1.1 mrg
803 1.1 mrg case VREG:
804 1.1 mrg if ((error = VOP_GETATTR(vp, &va, p->p_ucred, p)))
805 1.1 mrg goto bad;
806 1.1 mrg nblocks = (int)btodb(va.va_size);
807 1.1 mrg if ((error =
808 1.1 mrg VFS_STATFS(vp->v_mount, &vp->v_mount->mnt_stat, p)) != 0)
809 1.1 mrg goto bad;
810 1.1 mrg
811 1.1 mrg sdp->swd_bsize = vp->v_mount->mnt_stat.f_iosize;
812 1.1 mrg /*
813 1.1 mrg * limit the max # of outstanding I/O requests we issue
814 1.1 mrg * at any one time. take it easy on NFS servers.
815 1.1 mrg */
816 1.1 mrg #ifdef NFS
817 1.1 mrg if (vp->v_op == nfsv2_vnodeop_p)
818 1.1 mrg sdp->swd_maxactive = 2; /* XXX */
819 1.1 mrg else
820 1.1 mrg #endif /* NFS */
821 1.1 mrg sdp->swd_maxactive = 8; /* XXX */
822 1.1 mrg break;
823 1.1 mrg
824 1.1 mrg default:
825 1.1 mrg error = ENXIO;
826 1.1 mrg goto bad;
827 1.1 mrg }
828 1.1 mrg
829 1.1 mrg /*
830 1.1 mrg * save nblocks in a safe place and convert to pages.
831 1.1 mrg */
832 1.1 mrg
833 1.16 mrg sdp->swd_ose.ose_nblks = nblocks;
834 1.20 chs npages = dbtob((u_int64_t)nblocks) >> PAGE_SHIFT;
835 1.1 mrg
836 1.1 mrg /*
837 1.1 mrg * for block special files, we want to make sure that leave
838 1.1 mrg * the disklabel and bootblocks alone, so we arrange to skip
839 1.32 chs * over them (arbitrarily choosing to skip PAGE_SIZE bytes).
840 1.1 mrg * note that because of this the "size" can be less than the
841 1.1 mrg * actual number of blocks on the device.
842 1.1 mrg */
843 1.1 mrg if (vp->v_type == VBLK) {
844 1.1 mrg /* we use pages 1 to (size - 1) [inclusive] */
845 1.1 mrg size = npages - 1;
846 1.1 mrg addr = 1;
847 1.1 mrg } else {
848 1.1 mrg /* we use pages 0 to (size - 1) [inclusive] */
849 1.1 mrg size = npages;
850 1.1 mrg addr = 0;
851 1.1 mrg }
852 1.1 mrg
853 1.1 mrg /*
854 1.1 mrg * make sure we have enough blocks for a reasonable sized swap
855 1.1 mrg * area. we want at least one page.
856 1.1 mrg */
857 1.1 mrg
858 1.1 mrg if (size < 1) {
859 1.1 mrg UVMHIST_LOG(pdhist, " size <= 1!!", 0, 0, 0, 0);
860 1.1 mrg error = EINVAL;
861 1.1 mrg goto bad;
862 1.1 mrg }
863 1.1 mrg
864 1.1 mrg UVMHIST_LOG(pdhist, " dev=%x: size=%d addr=%ld\n", dev, size, addr, 0);
865 1.1 mrg
866 1.1 mrg /*
867 1.1 mrg * now we need to allocate an extent to manage this swap device
868 1.1 mrg */
869 1.42 enami snprintf(sdp->swd_exname, sizeof(sdp->swd_exname), "swap0x%04x",
870 1.42 enami count++);
871 1.1 mrg
872 1.1 mrg /* note that extent_create's 3rd arg is inclusive, thus "- 1" */
873 1.42 enami sdp->swd_ex = extent_create(sdp->swd_exname, 0, npages - 1, M_VMSWAP,
874 1.12 pk 0, 0, EX_WAITOK);
875 1.1 mrg /* allocate the `saved' region from the extent so it won't be used */
876 1.1 mrg if (addr) {
877 1.1 mrg if (extent_alloc_region(sdp->swd_ex, 0, addr, EX_WAITOK))
878 1.1 mrg panic("disklabel region");
879 1.1 mrg }
880 1.1 mrg
881 1.1 mrg /*
882 1.51 chs * if the vnode we are swapping to is the root vnode
883 1.1 mrg * (i.e. we are swapping to the miniroot) then we want
884 1.51 chs * to make sure we don't overwrite it. do a statfs to
885 1.1 mrg * find its size and skip over it.
886 1.1 mrg */
887 1.1 mrg if (vp == rootvp) {
888 1.1 mrg struct mount *mp;
889 1.1 mrg struct statfs *sp;
890 1.1 mrg int rootblocks, rootpages;
891 1.1 mrg
892 1.1 mrg mp = rootvnode->v_mount;
893 1.1 mrg sp = &mp->mnt_stat;
894 1.1 mrg rootblocks = sp->f_blocks * btodb(sp->f_bsize);
895 1.20 chs rootpages = round_page(dbtob(rootblocks)) >> PAGE_SHIFT;
896 1.32 chs if (rootpages > size)
897 1.1 mrg panic("swap_on: miniroot larger than swap?");
898 1.1 mrg
899 1.51 chs if (extent_alloc_region(sdp->swd_ex, addr,
900 1.1 mrg rootpages, EX_WAITOK))
901 1.1 mrg panic("swap_on: unable to preserve miniroot");
902 1.1 mrg
903 1.32 chs size -= rootpages;
904 1.1 mrg printf("Preserved %d pages of miniroot ", rootpages);
905 1.32 chs printf("leaving %d pages of swap\n", size);
906 1.1 mrg }
907 1.1 mrg
908 1.43 chs /*
909 1.43 chs * try to add anons to reflect the new swap space.
910 1.43 chs */
911 1.43 chs
912 1.43 chs error = uvm_anon_add(size);
913 1.43 chs if (error) {
914 1.43 chs goto bad;
915 1.43 chs }
916 1.43 chs
917 1.39 chs /*
918 1.39 chs * add a ref to vp to reflect usage as a swap device.
919 1.39 chs */
920 1.39 chs vref(vp);
921 1.39 chs
922 1.1 mrg /*
923 1.1 mrg * now add the new swapdev to the drum and enable.
924 1.1 mrg */
925 1.48 fvdl if (extent_alloc(swapmap, npages, EX_NOALIGN, EX_NOBOUNDARY,
926 1.48 fvdl EX_WAITOK, &result))
927 1.48 fvdl panic("swapdrum_add");
928 1.48 fvdl
929 1.48 fvdl sdp->swd_drumoffset = (int)result;
930 1.48 fvdl sdp->swd_drumsize = npages;
931 1.48 fvdl sdp->swd_npages = size;
932 1.26 chs simple_lock(&uvm.swap_data_lock);
933 1.1 mrg sdp->swd_flags &= ~SWF_FAKE; /* going live */
934 1.1 mrg sdp->swd_flags |= (SWF_INUSE|SWF_ENABLE);
935 1.32 chs uvmexp.swpages += size;
936 1.26 chs simple_unlock(&uvm.swap_data_lock);
937 1.1 mrg return (0);
938 1.1 mrg
939 1.1 mrg /*
940 1.43 chs * failure: clean up and return error.
941 1.1 mrg */
942 1.43 chs
943 1.43 chs bad:
944 1.43 chs if (sdp->swd_ex) {
945 1.43 chs extent_destroy(sdp->swd_ex);
946 1.43 chs }
947 1.43 chs if (vp != rootvp) {
948 1.1 mrg (void)VOP_CLOSE(vp, FREAD|FWRITE, p->p_ucred, p);
949 1.43 chs }
950 1.1 mrg return (error);
951 1.1 mrg }
952 1.1 mrg
953 1.1 mrg /*
954 1.1 mrg * swap_off: stop swapping on swapdev
955 1.1 mrg *
956 1.32 chs * => swap data should be locked, we will unlock.
957 1.1 mrg */
958 1.1 mrg static int
959 1.1 mrg swap_off(p, sdp)
960 1.1 mrg struct proc *p;
961 1.1 mrg struct swapdev *sdp;
962 1.1 mrg {
963 1.1 mrg UVMHIST_FUNC("swap_off"); UVMHIST_CALLED(pdhist);
964 1.32 chs UVMHIST_LOG(pdhist, " dev=%x", sdp->swd_dev,0,0,0);
965 1.1 mrg
966 1.32 chs /* disable the swap area being removed */
967 1.1 mrg sdp->swd_flags &= ~SWF_ENABLE;
968 1.32 chs simple_unlock(&uvm.swap_data_lock);
969 1.32 chs
970 1.32 chs /*
971 1.32 chs * the idea is to find all the pages that are paged out to this
972 1.32 chs * device, and page them all in. in uvm, swap-backed pageable
973 1.32 chs * memory can take two forms: aobjs and anons. call the
974 1.32 chs * swapoff hook for each subsystem to bring in pages.
975 1.32 chs */
976 1.1 mrg
977 1.32 chs if (uao_swap_off(sdp->swd_drumoffset,
978 1.32 chs sdp->swd_drumoffset + sdp->swd_drumsize) ||
979 1.32 chs anon_swap_off(sdp->swd_drumoffset,
980 1.32 chs sdp->swd_drumoffset + sdp->swd_drumsize)) {
981 1.51 chs
982 1.32 chs simple_lock(&uvm.swap_data_lock);
983 1.32 chs sdp->swd_flags |= SWF_ENABLE;
984 1.32 chs simple_unlock(&uvm.swap_data_lock);
985 1.32 chs return ENOMEM;
986 1.32 chs }
987 1.46 chs KASSERT(sdp->swd_npginuse == sdp->swd_npgbad);
988 1.1 mrg
989 1.1 mrg /*
990 1.39 chs * done with the vnode and saved creds.
991 1.39 chs * drop our ref on the vnode before calling VOP_CLOSE()
992 1.39 chs * so that spec_close() can tell if this is the last close.
993 1.1 mrg */
994 1.32 chs if (sdp->swd_vp->v_type == VREG) {
995 1.32 chs crfree(sdp->swd_cred);
996 1.32 chs }
997 1.39 chs vrele(sdp->swd_vp);
998 1.32 chs if (sdp->swd_vp != rootvp) {
999 1.32 chs (void) VOP_CLOSE(sdp->swd_vp, FREAD|FWRITE, p->p_ucred, p);
1000 1.32 chs }
1001 1.32 chs
1002 1.32 chs /* remove anons from the system */
1003 1.32 chs uvm_anon_remove(sdp->swd_npages);
1004 1.32 chs
1005 1.32 chs simple_lock(&uvm.swap_data_lock);
1006 1.32 chs uvmexp.swpages -= sdp->swd_npages;
1007 1.1 mrg
1008 1.32 chs if (swaplist_find(sdp->swd_vp, 1) == NULL)
1009 1.32 chs panic("swap_off: swapdev not in list\n");
1010 1.32 chs swaplist_trim();
1011 1.48 fvdl simple_unlock(&uvm.swap_data_lock);
1012 1.1 mrg
1013 1.32 chs /*
1014 1.32 chs * free all resources!
1015 1.32 chs */
1016 1.32 chs extent_free(swapmap, sdp->swd_drumoffset, sdp->swd_drumsize,
1017 1.32 chs EX_WAITOK);
1018 1.1 mrg extent_destroy(sdp->swd_ex);
1019 1.32 chs free(sdp, M_VMSWAP);
1020 1.1 mrg return (0);
1021 1.1 mrg }
1022 1.1 mrg
1023 1.1 mrg /*
1024 1.1 mrg * /dev/drum interface and i/o functions
1025 1.1 mrg */
1026 1.1 mrg
1027 1.1 mrg /*
1028 1.1 mrg * swread: the read function for the drum (just a call to physio)
1029 1.1 mrg */
1030 1.1 mrg /*ARGSUSED*/
1031 1.1 mrg int
1032 1.1 mrg swread(dev, uio, ioflag)
1033 1.1 mrg dev_t dev;
1034 1.1 mrg struct uio *uio;
1035 1.1 mrg int ioflag;
1036 1.1 mrg {
1037 1.1 mrg UVMHIST_FUNC("swread"); UVMHIST_CALLED(pdhist);
1038 1.1 mrg
1039 1.1 mrg UVMHIST_LOG(pdhist, " dev=%x offset=%qx", dev, uio->uio_offset, 0, 0);
1040 1.1 mrg return (physio(swstrategy, NULL, dev, B_READ, minphys, uio));
1041 1.1 mrg }
1042 1.1 mrg
1043 1.1 mrg /*
1044 1.1 mrg * swwrite: the write function for the drum (just a call to physio)
1045 1.1 mrg */
1046 1.1 mrg /*ARGSUSED*/
1047 1.1 mrg int
1048 1.1 mrg swwrite(dev, uio, ioflag)
1049 1.1 mrg dev_t dev;
1050 1.1 mrg struct uio *uio;
1051 1.1 mrg int ioflag;
1052 1.1 mrg {
1053 1.1 mrg UVMHIST_FUNC("swwrite"); UVMHIST_CALLED(pdhist);
1054 1.1 mrg
1055 1.1 mrg UVMHIST_LOG(pdhist, " dev=%x offset=%qx", dev, uio->uio_offset, 0, 0);
1056 1.1 mrg return (physio(swstrategy, NULL, dev, B_WRITE, minphys, uio));
1057 1.1 mrg }
1058 1.1 mrg
1059 1.1 mrg /*
1060 1.1 mrg * swstrategy: perform I/O on the drum
1061 1.1 mrg *
1062 1.1 mrg * => we must map the i/o request from the drum to the correct swapdev.
1063 1.1 mrg */
1064 1.1 mrg void
1065 1.1 mrg swstrategy(bp)
1066 1.1 mrg struct buf *bp;
1067 1.1 mrg {
1068 1.1 mrg struct swapdev *sdp;
1069 1.1 mrg struct vnode *vp;
1070 1.25 chs int s, pageno, bn;
1071 1.1 mrg UVMHIST_FUNC("swstrategy"); UVMHIST_CALLED(pdhist);
1072 1.1 mrg
1073 1.1 mrg /*
1074 1.1 mrg * convert block number to swapdev. note that swapdev can't
1075 1.1 mrg * be yanked out from under us because we are holding resources
1076 1.1 mrg * in it (i.e. the blocks we are doing I/O on).
1077 1.1 mrg */
1078 1.41 chs pageno = dbtob((int64_t)bp->b_blkno) >> PAGE_SHIFT;
1079 1.26 chs simple_lock(&uvm.swap_data_lock);
1080 1.1 mrg sdp = swapdrum_getsdp(pageno);
1081 1.26 chs simple_unlock(&uvm.swap_data_lock);
1082 1.1 mrg if (sdp == NULL) {
1083 1.1 mrg bp->b_error = EINVAL;
1084 1.1 mrg bp->b_flags |= B_ERROR;
1085 1.1 mrg biodone(bp);
1086 1.1 mrg UVMHIST_LOG(pdhist, " failed to get swap device", 0, 0, 0, 0);
1087 1.1 mrg return;
1088 1.1 mrg }
1089 1.1 mrg
1090 1.1 mrg /*
1091 1.1 mrg * convert drum page number to block number on this swapdev.
1092 1.1 mrg */
1093 1.1 mrg
1094 1.32 chs pageno -= sdp->swd_drumoffset; /* page # on swapdev */
1095 1.44 enami bn = btodb((u_int64_t)pageno << PAGE_SHIFT); /* convert to diskblock */
1096 1.1 mrg
1097 1.41 chs UVMHIST_LOG(pdhist, " %s: mapoff=%x bn=%x bcount=%ld",
1098 1.1 mrg ((bp->b_flags & B_READ) == 0) ? "write" : "read",
1099 1.1 mrg sdp->swd_drumoffset, bn, bp->b_bcount);
1100 1.1 mrg
1101 1.1 mrg /*
1102 1.1 mrg * for block devices we finish up here.
1103 1.32 chs * for regular files we have to do more work which we delegate
1104 1.1 mrg * to sw_reg_strategy().
1105 1.1 mrg */
1106 1.1 mrg
1107 1.1 mrg switch (sdp->swd_vp->v_type) {
1108 1.1 mrg default:
1109 1.1 mrg panic("swstrategy: vnode type 0x%x", sdp->swd_vp->v_type);
1110 1.32 chs
1111 1.1 mrg case VBLK:
1112 1.1 mrg
1113 1.1 mrg /*
1114 1.1 mrg * must convert "bp" from an I/O on /dev/drum to an I/O
1115 1.1 mrg * on the swapdev (sdp).
1116 1.1 mrg */
1117 1.25 chs s = splbio();
1118 1.1 mrg bp->b_blkno = bn; /* swapdev block number */
1119 1.1 mrg vp = sdp->swd_vp; /* swapdev vnode pointer */
1120 1.1 mrg bp->b_dev = sdp->swd_dev; /* swapdev dev_t */
1121 1.1 mrg VHOLD(vp); /* "hold" swapdev vp for i/o */
1122 1.1 mrg
1123 1.1 mrg /*
1124 1.1 mrg * if we are doing a write, we have to redirect the i/o on
1125 1.1 mrg * drum's v_numoutput counter to the swapdevs.
1126 1.1 mrg */
1127 1.1 mrg if ((bp->b_flags & B_READ) == 0) {
1128 1.1 mrg vwakeup(bp); /* kills one 'v_numoutput' on drum */
1129 1.1 mrg vp->v_numoutput++; /* put it on swapdev */
1130 1.1 mrg }
1131 1.1 mrg
1132 1.41 chs /*
1133 1.51 chs * dissassocate buffer with /dev/drum vnode
1134 1.1 mrg * [could be null if buf was from physio]
1135 1.1 mrg */
1136 1.41 chs if (bp->b_vp != NULL)
1137 1.1 mrg brelvp(bp);
1138 1.1 mrg
1139 1.41 chs /*
1140 1.1 mrg * finally plug in swapdev vnode and start I/O
1141 1.1 mrg */
1142 1.1 mrg bp->b_vp = vp;
1143 1.25 chs splx(s);
1144 1.1 mrg VOP_STRATEGY(bp);
1145 1.1 mrg return;
1146 1.32 chs
1147 1.1 mrg case VREG:
1148 1.1 mrg /*
1149 1.32 chs * delegate to sw_reg_strategy function.
1150 1.1 mrg */
1151 1.1 mrg sw_reg_strategy(sdp, bp, bn);
1152 1.1 mrg return;
1153 1.1 mrg }
1154 1.1 mrg /* NOTREACHED */
1155 1.1 mrg }
1156 1.1 mrg
1157 1.1 mrg /*
1158 1.1 mrg * sw_reg_strategy: handle swap i/o to regular files
1159 1.1 mrg */
1160 1.1 mrg static void
1161 1.1 mrg sw_reg_strategy(sdp, bp, bn)
1162 1.1 mrg struct swapdev *sdp;
1163 1.1 mrg struct buf *bp;
1164 1.1 mrg int bn;
1165 1.1 mrg {
1166 1.1 mrg struct vnode *vp;
1167 1.1 mrg struct vndxfer *vnx;
1168 1.44 enami daddr_t nbn;
1169 1.1 mrg caddr_t addr;
1170 1.44 enami off_t byteoff;
1171 1.9 mrg int s, off, nra, error, sz, resid;
1172 1.1 mrg UVMHIST_FUNC("sw_reg_strategy"); UVMHIST_CALLED(pdhist);
1173 1.1 mrg
1174 1.1 mrg /*
1175 1.1 mrg * allocate a vndxfer head for this transfer and point it to
1176 1.1 mrg * our buffer.
1177 1.1 mrg */
1178 1.12 pk getvndxfer(vnx);
1179 1.1 mrg vnx->vx_flags = VX_BUSY;
1180 1.1 mrg vnx->vx_error = 0;
1181 1.1 mrg vnx->vx_pending = 0;
1182 1.1 mrg vnx->vx_bp = bp;
1183 1.1 mrg vnx->vx_sdp = sdp;
1184 1.1 mrg
1185 1.1 mrg /*
1186 1.1 mrg * setup for main loop where we read filesystem blocks into
1187 1.1 mrg * our buffer.
1188 1.1 mrg */
1189 1.1 mrg error = 0;
1190 1.1 mrg bp->b_resid = bp->b_bcount; /* nothing transfered yet! */
1191 1.1 mrg addr = bp->b_data; /* current position in buffer */
1192 1.44 enami byteoff = dbtob((u_int64_t)bn);
1193 1.1 mrg
1194 1.1 mrg for (resid = bp->b_resid; resid; resid -= sz) {
1195 1.1 mrg struct vndbuf *nbp;
1196 1.1 mrg
1197 1.1 mrg /*
1198 1.1 mrg * translate byteoffset into block number. return values:
1199 1.1 mrg * vp = vnode of underlying device
1200 1.1 mrg * nbn = new block number (on underlying vnode dev)
1201 1.1 mrg * nra = num blocks we can read-ahead (excludes requested
1202 1.1 mrg * block)
1203 1.1 mrg */
1204 1.1 mrg nra = 0;
1205 1.1 mrg error = VOP_BMAP(sdp->swd_vp, byteoff / sdp->swd_bsize,
1206 1.1 mrg &vp, &nbn, &nra);
1207 1.1 mrg
1208 1.32 chs if (error == 0 && nbn == (daddr_t)-1) {
1209 1.51 chs /*
1210 1.23 marc * this used to just set error, but that doesn't
1211 1.23 marc * do the right thing. Instead, it causes random
1212 1.23 marc * memory errors. The panic() should remain until
1213 1.23 marc * this condition doesn't destabilize the system.
1214 1.23 marc */
1215 1.23 marc #if 1
1216 1.23 marc panic("sw_reg_strategy: swap to sparse file");
1217 1.23 marc #else
1218 1.1 mrg error = EIO; /* failure */
1219 1.23 marc #endif
1220 1.23 marc }
1221 1.1 mrg
1222 1.1 mrg /*
1223 1.1 mrg * punt if there was an error or a hole in the file.
1224 1.1 mrg * we must wait for any i/o ops we have already started
1225 1.1 mrg * to finish before returning.
1226 1.1 mrg *
1227 1.1 mrg * XXX we could deal with holes here but it would be
1228 1.1 mrg * a hassle (in the write case).
1229 1.1 mrg */
1230 1.1 mrg if (error) {
1231 1.1 mrg s = splbio();
1232 1.1 mrg vnx->vx_error = error; /* pass error up */
1233 1.1 mrg goto out;
1234 1.1 mrg }
1235 1.1 mrg
1236 1.1 mrg /*
1237 1.1 mrg * compute the size ("sz") of this transfer (in bytes).
1238 1.1 mrg */
1239 1.41 chs off = byteoff % sdp->swd_bsize;
1240 1.41 chs sz = (1 + nra) * sdp->swd_bsize - off;
1241 1.41 chs if (sz > resid)
1242 1.1 mrg sz = resid;
1243 1.1 mrg
1244 1.41 chs UVMHIST_LOG(pdhist, "sw_reg_strategy: "
1245 1.41 chs "vp %p/%p offset 0x%x/0x%x",
1246 1.41 chs sdp->swd_vp, vp, byteoff, nbn);
1247 1.1 mrg
1248 1.1 mrg /*
1249 1.1 mrg * now get a buf structure. note that the vb_buf is
1250 1.1 mrg * at the front of the nbp structure so that you can
1251 1.1 mrg * cast pointers between the two structure easily.
1252 1.1 mrg */
1253 1.12 pk getvndbuf(nbp);
1254 1.1 mrg nbp->vb_buf.b_flags = bp->b_flags | B_CALL;
1255 1.1 mrg nbp->vb_buf.b_bcount = sz;
1256 1.12 pk nbp->vb_buf.b_bufsize = sz;
1257 1.1 mrg nbp->vb_buf.b_error = 0;
1258 1.1 mrg nbp->vb_buf.b_data = addr;
1259 1.41 chs nbp->vb_buf.b_lblkno = 0;
1260 1.1 mrg nbp->vb_buf.b_blkno = nbn + btodb(off);
1261 1.34 thorpej nbp->vb_buf.b_rawblkno = nbp->vb_buf.b_blkno;
1262 1.1 mrg nbp->vb_buf.b_iodone = sw_reg_iodone;
1263 1.41 chs nbp->vb_buf.b_vp = NULL;
1264 1.30 fvdl LIST_INIT(&nbp->vb_buf.b_dep);
1265 1.1 mrg
1266 1.1 mrg nbp->vb_xfer = vnx; /* patch it back in to vnx */
1267 1.1 mrg
1268 1.1 mrg /*
1269 1.1 mrg * Just sort by block number
1270 1.1 mrg */
1271 1.1 mrg s = splbio();
1272 1.1 mrg if (vnx->vx_error != 0) {
1273 1.1 mrg putvndbuf(nbp);
1274 1.1 mrg goto out;
1275 1.1 mrg }
1276 1.1 mrg vnx->vx_pending++;
1277 1.1 mrg
1278 1.1 mrg /* assoc new buffer with underlying vnode */
1279 1.41 chs bgetvp(vp, &nbp->vb_buf);
1280 1.1 mrg
1281 1.1 mrg /* sort it in and start I/O if we are not over our limit */
1282 1.33 thorpej disksort_blkno(&sdp->swd_tab, &nbp->vb_buf);
1283 1.1 mrg sw_reg_start(sdp);
1284 1.1 mrg splx(s);
1285 1.1 mrg
1286 1.1 mrg /*
1287 1.1 mrg * advance to the next I/O
1288 1.1 mrg */
1289 1.9 mrg byteoff += sz;
1290 1.1 mrg addr += sz;
1291 1.1 mrg }
1292 1.1 mrg
1293 1.1 mrg s = splbio();
1294 1.1 mrg
1295 1.1 mrg out: /* Arrive here at splbio */
1296 1.1 mrg vnx->vx_flags &= ~VX_BUSY;
1297 1.1 mrg if (vnx->vx_pending == 0) {
1298 1.1 mrg if (vnx->vx_error != 0) {
1299 1.1 mrg bp->b_error = vnx->vx_error;
1300 1.1 mrg bp->b_flags |= B_ERROR;
1301 1.1 mrg }
1302 1.1 mrg putvndxfer(vnx);
1303 1.1 mrg biodone(bp);
1304 1.1 mrg }
1305 1.1 mrg splx(s);
1306 1.1 mrg }
1307 1.1 mrg
1308 1.1 mrg /*
1309 1.1 mrg * sw_reg_start: start an I/O request on the requested swapdev
1310 1.1 mrg *
1311 1.1 mrg * => reqs are sorted by disksort (above)
1312 1.1 mrg */
1313 1.1 mrg static void
1314 1.1 mrg sw_reg_start(sdp)
1315 1.1 mrg struct swapdev *sdp;
1316 1.1 mrg {
1317 1.1 mrg struct buf *bp;
1318 1.1 mrg UVMHIST_FUNC("sw_reg_start"); UVMHIST_CALLED(pdhist);
1319 1.1 mrg
1320 1.8 mrg /* recursion control */
1321 1.1 mrg if ((sdp->swd_flags & SWF_BUSY) != 0)
1322 1.1 mrg return;
1323 1.1 mrg
1324 1.1 mrg sdp->swd_flags |= SWF_BUSY;
1325 1.1 mrg
1326 1.33 thorpej while (sdp->swd_active < sdp->swd_maxactive) {
1327 1.33 thorpej bp = BUFQ_FIRST(&sdp->swd_tab);
1328 1.1 mrg if (bp == NULL)
1329 1.1 mrg break;
1330 1.33 thorpej BUFQ_REMOVE(&sdp->swd_tab, bp);
1331 1.33 thorpej sdp->swd_active++;
1332 1.1 mrg
1333 1.1 mrg UVMHIST_LOG(pdhist,
1334 1.1 mrg "sw_reg_start: bp %p vp %p blkno %p cnt %lx",
1335 1.1 mrg bp, bp->b_vp, bp->b_blkno, bp->b_bcount);
1336 1.1 mrg if ((bp->b_flags & B_READ) == 0)
1337 1.1 mrg bp->b_vp->v_numoutput++;
1338 1.41 chs
1339 1.1 mrg VOP_STRATEGY(bp);
1340 1.1 mrg }
1341 1.1 mrg sdp->swd_flags &= ~SWF_BUSY;
1342 1.1 mrg }
1343 1.1 mrg
1344 1.1 mrg /*
1345 1.1 mrg * sw_reg_iodone: one of our i/o's has completed and needs post-i/o cleanup
1346 1.1 mrg *
1347 1.1 mrg * => note that we can recover the vndbuf struct by casting the buf ptr
1348 1.1 mrg */
1349 1.1 mrg static void
1350 1.1 mrg sw_reg_iodone(bp)
1351 1.1 mrg struct buf *bp;
1352 1.1 mrg {
1353 1.1 mrg struct vndbuf *vbp = (struct vndbuf *) bp;
1354 1.1 mrg struct vndxfer *vnx = vbp->vb_xfer;
1355 1.1 mrg struct buf *pbp = vnx->vx_bp; /* parent buffer */
1356 1.1 mrg struct swapdev *sdp = vnx->vx_sdp;
1357 1.1 mrg int s, resid;
1358 1.1 mrg UVMHIST_FUNC("sw_reg_iodone"); UVMHIST_CALLED(pdhist);
1359 1.1 mrg
1360 1.1 mrg UVMHIST_LOG(pdhist, " vbp=%p vp=%p blkno=%x addr=%p",
1361 1.1 mrg vbp, vbp->vb_buf.b_vp, vbp->vb_buf.b_blkno, vbp->vb_buf.b_data);
1362 1.1 mrg UVMHIST_LOG(pdhist, " cnt=%lx resid=%lx",
1363 1.1 mrg vbp->vb_buf.b_bcount, vbp->vb_buf.b_resid, 0, 0);
1364 1.1 mrg
1365 1.1 mrg /*
1366 1.1 mrg * protect vbp at splbio and update.
1367 1.1 mrg */
1368 1.1 mrg
1369 1.1 mrg s = splbio();
1370 1.1 mrg resid = vbp->vb_buf.b_bcount - vbp->vb_buf.b_resid;
1371 1.1 mrg pbp->b_resid -= resid;
1372 1.1 mrg vnx->vx_pending--;
1373 1.1 mrg
1374 1.1 mrg if (vbp->vb_buf.b_error) {
1375 1.1 mrg UVMHIST_LOG(pdhist, " got error=%d !",
1376 1.1 mrg vbp->vb_buf.b_error, 0, 0, 0);
1377 1.1 mrg
1378 1.1 mrg /* pass error upward */
1379 1.1 mrg vnx->vx_error = vbp->vb_buf.b_error;
1380 1.35 chs }
1381 1.35 chs
1382 1.35 chs /*
1383 1.41 chs * disassociate this buffer from the vnode.
1384 1.35 chs */
1385 1.41 chs brelvp(&vbp->vb_buf);
1386 1.1 mrg
1387 1.1 mrg /*
1388 1.1 mrg * kill vbp structure
1389 1.1 mrg */
1390 1.1 mrg putvndbuf(vbp);
1391 1.1 mrg
1392 1.1 mrg /*
1393 1.1 mrg * wrap up this transaction if it has run to completion or, in
1394 1.1 mrg * case of an error, when all auxiliary buffers have returned.
1395 1.1 mrg */
1396 1.1 mrg if (vnx->vx_error != 0) {
1397 1.1 mrg /* pass error upward */
1398 1.1 mrg pbp->b_flags |= B_ERROR;
1399 1.1 mrg pbp->b_error = vnx->vx_error;
1400 1.1 mrg if ((vnx->vx_flags & VX_BUSY) == 0 && vnx->vx_pending == 0) {
1401 1.1 mrg putvndxfer(vnx);
1402 1.1 mrg biodone(pbp);
1403 1.1 mrg }
1404 1.11 pk } else if (pbp->b_resid == 0) {
1405 1.46 chs KASSERT(vnx->vx_pending == 0);
1406 1.1 mrg if ((vnx->vx_flags & VX_BUSY) == 0) {
1407 1.8 mrg UVMHIST_LOG(pdhist, " iodone error=%d !",
1408 1.8 mrg pbp, vnx->vx_error, 0, 0);
1409 1.8 mrg putvndxfer(vnx);
1410 1.1 mrg biodone(pbp);
1411 1.1 mrg }
1412 1.1 mrg }
1413 1.1 mrg
1414 1.1 mrg /*
1415 1.1 mrg * done! start next swapdev I/O if one is pending
1416 1.1 mrg */
1417 1.33 thorpej sdp->swd_active--;
1418 1.1 mrg sw_reg_start(sdp);
1419 1.1 mrg splx(s);
1420 1.1 mrg }
1421 1.1 mrg
1422 1.1 mrg
1423 1.1 mrg /*
1424 1.1 mrg * uvm_swap_alloc: allocate space on swap
1425 1.1 mrg *
1426 1.1 mrg * => allocation is done "round robin" down the priority list, as we
1427 1.1 mrg * allocate in a priority we "rotate" the circle queue.
1428 1.1 mrg * => space can be freed with uvm_swap_free
1429 1.1 mrg * => we return the page slot number in /dev/drum (0 == invalid slot)
1430 1.26 chs * => we lock uvm.swap_data_lock
1431 1.1 mrg * => XXXMRG: "LESSOK" INTERFACE NEEDED TO EXTENT SYSTEM
1432 1.1 mrg */
1433 1.1 mrg int
1434 1.1 mrg uvm_swap_alloc(nslots, lessok)
1435 1.1 mrg int *nslots; /* IN/OUT */
1436 1.1 mrg boolean_t lessok;
1437 1.1 mrg {
1438 1.1 mrg struct swapdev *sdp;
1439 1.1 mrg struct swappri *spp;
1440 1.1 mrg u_long result;
1441 1.1 mrg UVMHIST_FUNC("uvm_swap_alloc"); UVMHIST_CALLED(pdhist);
1442 1.1 mrg
1443 1.1 mrg /*
1444 1.1 mrg * no swap devices configured yet? definite failure.
1445 1.1 mrg */
1446 1.1 mrg if (uvmexp.nswapdev < 1)
1447 1.1 mrg return 0;
1448 1.51 chs
1449 1.1 mrg /*
1450 1.1 mrg * lock data lock, convert slots into blocks, and enter loop
1451 1.1 mrg */
1452 1.26 chs simple_lock(&uvm.swap_data_lock);
1453 1.1 mrg
1454 1.1 mrg ReTry: /* XXXMRG */
1455 1.32 chs for (spp = LIST_FIRST(&swap_priority); spp != NULL;
1456 1.32 chs spp = LIST_NEXT(spp, spi_swappri)) {
1457 1.32 chs for (sdp = CIRCLEQ_FIRST(&spp->spi_swapdev);
1458 1.1 mrg sdp != (void *)&spp->spi_swapdev;
1459 1.32 chs sdp = CIRCLEQ_NEXT(sdp,swd_next)) {
1460 1.1 mrg /* if it's not enabled, then we can't swap from it */
1461 1.1 mrg if ((sdp->swd_flags & SWF_ENABLE) == 0)
1462 1.1 mrg continue;
1463 1.1 mrg if (sdp->swd_npginuse + *nslots > sdp->swd_npages)
1464 1.1 mrg continue;
1465 1.1 mrg if (extent_alloc(sdp->swd_ex, *nslots, EX_NOALIGN,
1466 1.1 mrg EX_NOBOUNDARY, EX_MALLOCOK|EX_NOWAIT,
1467 1.1 mrg &result) != 0) {
1468 1.1 mrg continue;
1469 1.1 mrg }
1470 1.1 mrg
1471 1.1 mrg /*
1472 1.1 mrg * successful allocation! now rotate the circleq.
1473 1.1 mrg */
1474 1.1 mrg CIRCLEQ_REMOVE(&spp->spi_swapdev, sdp, swd_next);
1475 1.1 mrg CIRCLEQ_INSERT_TAIL(&spp->spi_swapdev, sdp, swd_next);
1476 1.1 mrg sdp->swd_npginuse += *nslots;
1477 1.1 mrg uvmexp.swpginuse += *nslots;
1478 1.26 chs simple_unlock(&uvm.swap_data_lock);
1479 1.1 mrg /* done! return drum slot number */
1480 1.1 mrg UVMHIST_LOG(pdhist,
1481 1.1 mrg "success! returning %d slots starting at %d",
1482 1.1 mrg *nslots, result + sdp->swd_drumoffset, 0, 0);
1483 1.1 mrg return(result + sdp->swd_drumoffset);
1484 1.1 mrg }
1485 1.1 mrg }
1486 1.1 mrg
1487 1.1 mrg /* XXXMRG: BEGIN HACK */
1488 1.1 mrg if (*nslots > 1 && lessok) {
1489 1.1 mrg *nslots = 1;
1490 1.1 mrg goto ReTry; /* XXXMRG: ugh! extent should support this for us */
1491 1.1 mrg }
1492 1.1 mrg /* XXXMRG: END HACK */
1493 1.1 mrg
1494 1.26 chs simple_unlock(&uvm.swap_data_lock);
1495 1.1 mrg return 0; /* failed */
1496 1.1 mrg }
1497 1.1 mrg
1498 1.1 mrg /*
1499 1.32 chs * uvm_swap_markbad: keep track of swap ranges where we've had i/o errors
1500 1.32 chs *
1501 1.32 chs * => we lock uvm.swap_data_lock
1502 1.32 chs */
1503 1.32 chs void
1504 1.32 chs uvm_swap_markbad(startslot, nslots)
1505 1.32 chs int startslot;
1506 1.32 chs int nslots;
1507 1.32 chs {
1508 1.32 chs struct swapdev *sdp;
1509 1.32 chs UVMHIST_FUNC("uvm_swap_markbad"); UVMHIST_CALLED(pdhist);
1510 1.32 chs
1511 1.32 chs simple_lock(&uvm.swap_data_lock);
1512 1.32 chs sdp = swapdrum_getsdp(startslot);
1513 1.32 chs
1514 1.32 chs /*
1515 1.32 chs * we just keep track of how many pages have been marked bad
1516 1.32 chs * in this device, to make everything add up in swap_off().
1517 1.32 chs * we assume here that the range of slots will all be within
1518 1.32 chs * one swap device.
1519 1.32 chs */
1520 1.41 chs
1521 1.32 chs sdp->swd_npgbad += nslots;
1522 1.41 chs UVMHIST_LOG(pdhist, "now %d bad", sdp->swd_npgbad, 0,0,0);
1523 1.32 chs simple_unlock(&uvm.swap_data_lock);
1524 1.32 chs }
1525 1.32 chs
1526 1.32 chs /*
1527 1.1 mrg * uvm_swap_free: free swap slots
1528 1.1 mrg *
1529 1.1 mrg * => this can be all or part of an allocation made by uvm_swap_alloc
1530 1.26 chs * => we lock uvm.swap_data_lock
1531 1.1 mrg */
1532 1.1 mrg void
1533 1.1 mrg uvm_swap_free(startslot, nslots)
1534 1.1 mrg int startslot;
1535 1.1 mrg int nslots;
1536 1.1 mrg {
1537 1.1 mrg struct swapdev *sdp;
1538 1.1 mrg UVMHIST_FUNC("uvm_swap_free"); UVMHIST_CALLED(pdhist);
1539 1.1 mrg
1540 1.1 mrg UVMHIST_LOG(pdhist, "freeing %d slots starting at %d", nslots,
1541 1.1 mrg startslot, 0, 0);
1542 1.32 chs
1543 1.32 chs /*
1544 1.32 chs * ignore attempts to free the "bad" slot.
1545 1.32 chs */
1546 1.46 chs
1547 1.32 chs if (startslot == SWSLOT_BAD) {
1548 1.32 chs return;
1549 1.32 chs }
1550 1.32 chs
1551 1.1 mrg /*
1552 1.51 chs * convert drum slot offset back to sdp, free the blocks
1553 1.51 chs * in the extent, and return. must hold pri lock to do
1554 1.1 mrg * lookup and access the extent.
1555 1.1 mrg */
1556 1.46 chs
1557 1.26 chs simple_lock(&uvm.swap_data_lock);
1558 1.1 mrg sdp = swapdrum_getsdp(startslot);
1559 1.46 chs KASSERT(uvmexp.nswapdev >= 1);
1560 1.46 chs KASSERT(sdp != NULL);
1561 1.46 chs KASSERT(sdp->swd_npginuse >= nslots);
1562 1.12 pk if (extent_free(sdp->swd_ex, startslot - sdp->swd_drumoffset, nslots,
1563 1.32 chs EX_MALLOCOK|EX_NOWAIT) != 0) {
1564 1.32 chs printf("warning: resource shortage: %d pages of swap lost\n",
1565 1.12 pk nslots);
1566 1.32 chs }
1567 1.1 mrg sdp->swd_npginuse -= nslots;
1568 1.1 mrg uvmexp.swpginuse -= nslots;
1569 1.26 chs simple_unlock(&uvm.swap_data_lock);
1570 1.1 mrg }
1571 1.1 mrg
1572 1.1 mrg /*
1573 1.1 mrg * uvm_swap_put: put any number of pages into a contig place on swap
1574 1.1 mrg *
1575 1.1 mrg * => can be sync or async
1576 1.1 mrg * => XXXMRG: consider making it an inline or macro
1577 1.1 mrg */
1578 1.1 mrg int
1579 1.1 mrg uvm_swap_put(swslot, ppsp, npages, flags)
1580 1.1 mrg int swslot;
1581 1.1 mrg struct vm_page **ppsp;
1582 1.1 mrg int npages;
1583 1.1 mrg int flags;
1584 1.1 mrg {
1585 1.1 mrg int result;
1586 1.1 mrg
1587 1.1 mrg result = uvm_swap_io(ppsp, swslot, npages, B_WRITE |
1588 1.1 mrg ((flags & PGO_SYNCIO) ? 0 : B_ASYNC));
1589 1.1 mrg
1590 1.1 mrg return (result);
1591 1.1 mrg }
1592 1.1 mrg
1593 1.1 mrg /*
1594 1.1 mrg * uvm_swap_get: get a single page from swap
1595 1.1 mrg *
1596 1.1 mrg * => usually a sync op (from fault)
1597 1.1 mrg * => XXXMRG: consider making it an inline or macro
1598 1.1 mrg */
1599 1.1 mrg int
1600 1.1 mrg uvm_swap_get(page, swslot, flags)
1601 1.1 mrg struct vm_page *page;
1602 1.1 mrg int swslot, flags;
1603 1.1 mrg {
1604 1.1 mrg int result;
1605 1.1 mrg
1606 1.1 mrg uvmexp.nswget++;
1607 1.46 chs KASSERT(flags & PGO_SYNCIO);
1608 1.32 chs if (swslot == SWSLOT_BAD) {
1609 1.47 chs return EIO;
1610 1.32 chs }
1611 1.32 chs
1612 1.26 chs /*
1613 1.26 chs * this page is (about to be) no longer only in swap.
1614 1.26 chs */
1615 1.47 chs
1616 1.26 chs simple_lock(&uvm.swap_data_lock);
1617 1.26 chs uvmexp.swpgonly--;
1618 1.26 chs simple_unlock(&uvm.swap_data_lock);
1619 1.26 chs
1620 1.51 chs result = uvm_swap_io(&page, swslot, 1, B_READ |
1621 1.1 mrg ((flags & PGO_SYNCIO) ? 0 : B_ASYNC));
1622 1.26 chs
1623 1.47 chs if (result != 0) {
1624 1.47 chs
1625 1.26 chs /*
1626 1.26 chs * oops, the read failed so it really is still only in swap.
1627 1.26 chs */
1628 1.47 chs
1629 1.26 chs simple_lock(&uvm.swap_data_lock);
1630 1.26 chs uvmexp.swpgonly++;
1631 1.26 chs simple_unlock(&uvm.swap_data_lock);
1632 1.26 chs }
1633 1.1 mrg
1634 1.1 mrg return (result);
1635 1.1 mrg }
1636 1.1 mrg
1637 1.1 mrg /*
1638 1.1 mrg * uvm_swap_io: do an i/o operation to swap
1639 1.1 mrg */
1640 1.1 mrg
1641 1.1 mrg static int
1642 1.1 mrg uvm_swap_io(pps, startslot, npages, flags)
1643 1.1 mrg struct vm_page **pps;
1644 1.1 mrg int startslot, npages, flags;
1645 1.1 mrg {
1646 1.1 mrg daddr_t startblk;
1647 1.1 mrg struct buf *bp;
1648 1.15 eeh vaddr_t kva;
1649 1.47 chs int error, s, mapinflags, pflag;
1650 1.41 chs boolean_t write, async;
1651 1.1 mrg UVMHIST_FUNC("uvm_swap_io"); UVMHIST_CALLED(pdhist);
1652 1.1 mrg
1653 1.1 mrg UVMHIST_LOG(pdhist, "<- called, startslot=%d, npages=%d, flags=%d",
1654 1.1 mrg startslot, npages, flags, 0);
1655 1.32 chs
1656 1.41 chs write = (flags & B_READ) == 0;
1657 1.41 chs async = (flags & B_ASYNC) != 0;
1658 1.41 chs
1659 1.1 mrg /*
1660 1.1 mrg * convert starting drum slot to block number
1661 1.1 mrg */
1662 1.44 enami startblk = btodb((u_int64_t)startslot << PAGE_SHIFT);
1663 1.1 mrg
1664 1.1 mrg /*
1665 1.1 mrg * first, map the pages into the kernel (XXX: currently required
1666 1.41 chs * by buffer system).
1667 1.41 chs */
1668 1.41 chs
1669 1.41 chs mapinflags = !write ? UVMPAGER_MAPIN_READ : UVMPAGER_MAPIN_WRITE;
1670 1.41 chs if (!async)
1671 1.37 thorpej mapinflags |= UVMPAGER_MAPIN_WAITOK;
1672 1.41 chs kva = uvm_pagermapin(pps, npages, mapinflags);
1673 1.37 thorpej if (kva == 0)
1674 1.47 chs return (EAGAIN);
1675 1.1 mrg
1676 1.51 chs /*
1677 1.41 chs * now allocate a buf for the i/o.
1678 1.1 mrg * [make sure we don't put the pagedaemon to sleep...]
1679 1.1 mrg */
1680 1.1 mrg s = splbio();
1681 1.41 chs pflag = (async || curproc == uvm.pagedaemon_proc) ? 0 : PR_WAITOK;
1682 1.41 chs bp = pool_get(&bufpool, pflag);
1683 1.41 chs splx(s);
1684 1.1 mrg
1685 1.1 mrg /*
1686 1.41 chs * if we failed to get a buf, return "try again"
1687 1.1 mrg */
1688 1.41 chs if (bp == NULL)
1689 1.47 chs return (EAGAIN);
1690 1.1 mrg
1691 1.1 mrg /*
1692 1.1 mrg * fill in the bp/sbp. we currently route our i/o through
1693 1.1 mrg * /dev/drum's vnode [swapdev_vp].
1694 1.1 mrg */
1695 1.21 mycroft bp->b_flags = B_BUSY | B_NOCACHE | (flags & (B_READ|B_ASYNC));
1696 1.1 mrg bp->b_proc = &proc0; /* XXX */
1697 1.12 pk bp->b_vnbufs.le_next = NOLIST;
1698 1.1 mrg bp->b_data = (caddr_t)kva;
1699 1.1 mrg bp->b_blkno = startblk;
1700 1.25 chs s = splbio();
1701 1.1 mrg VHOLD(swapdev_vp);
1702 1.1 mrg bp->b_vp = swapdev_vp;
1703 1.25 chs splx(s);
1704 1.1 mrg /* XXXCDC: isn't swapdev_vp always a VCHR? */
1705 1.1 mrg /* XXXMRG: probably -- this is obviously something inherited... */
1706 1.1 mrg if (swapdev_vp->v_type == VBLK)
1707 1.1 mrg bp->b_dev = swapdev_vp->v_rdev;
1708 1.41 chs bp->b_bufsize = bp->b_bcount = npages << PAGE_SHIFT;
1709 1.30 fvdl LIST_INIT(&bp->b_dep);
1710 1.1 mrg
1711 1.51 chs /*
1712 1.41 chs * bump v_numoutput (counter of number of active outputs).
1713 1.1 mrg */
1714 1.41 chs if (write) {
1715 1.1 mrg s = splbio();
1716 1.1 mrg swapdev_vp->v_numoutput++;
1717 1.1 mrg splx(s);
1718 1.1 mrg }
1719 1.1 mrg
1720 1.1 mrg /*
1721 1.41 chs * for async ops we must set up the iodone handler.
1722 1.1 mrg */
1723 1.41 chs if (async) {
1724 1.41 chs /* XXXUBC pagedaemon */
1725 1.41 chs bp->b_flags |= B_CALL | (curproc == uvm.pagedaemon_proc ?
1726 1.41 chs B_PDAEMON : 0);
1727 1.41 chs bp->b_iodone = uvm_aio_biodone;
1728 1.1 mrg UVMHIST_LOG(pdhist, "doing async!", 0, 0, 0, 0);
1729 1.1 mrg }
1730 1.1 mrg UVMHIST_LOG(pdhist,
1731 1.41 chs "about to start io: data = %p blkno = 0x%x, bcount = %ld",
1732 1.1 mrg bp->b_data, bp->b_blkno, bp->b_bcount, 0);
1733 1.1 mrg
1734 1.1 mrg /*
1735 1.1 mrg * now we start the I/O, and if async, return.
1736 1.1 mrg */
1737 1.1 mrg VOP_STRATEGY(bp);
1738 1.41 chs if (async)
1739 1.47 chs return 0;
1740 1.1 mrg
1741 1.1 mrg /*
1742 1.1 mrg * must be sync i/o. wait for it to finish
1743 1.1 mrg */
1744 1.47 chs error = biowait(bp);
1745 1.1 mrg
1746 1.1 mrg /*
1747 1.1 mrg * kill the pager mapping
1748 1.1 mrg */
1749 1.1 mrg uvm_pagermapout(kva, npages);
1750 1.1 mrg
1751 1.1 mrg /*
1752 1.41 chs * now dispose of the buf
1753 1.1 mrg */
1754 1.1 mrg s = splbio();
1755 1.1 mrg if (bp->b_vp)
1756 1.1 mrg brelvp(bp);
1757 1.41 chs if (write)
1758 1.41 chs vwakeup(bp);
1759 1.41 chs pool_put(&bufpool, bp);
1760 1.1 mrg splx(s);
1761 1.1 mrg
1762 1.1 mrg /*
1763 1.1 mrg * finally return.
1764 1.1 mrg */
1765 1.47 chs UVMHIST_LOG(pdhist, "<- done (sync) error=%d", error, 0, 0, 0);
1766 1.47 chs return (error);
1767 1.1 mrg }
1768