dksubr.c revision 1.77 1 /* $NetBSD: dksubr.c,v 1.77 2015/10/21 21:43:46 christos Exp $ */
2
3 /*-
4 * Copyright (c) 1996, 1997, 1998, 1999, 2002, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe and Roland C. Dowdeswell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: dksubr.c,v 1.77 2015/10/21 21:43:46 christos Exp $");
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/stat.h>
38 #include <sys/proc.h>
39 #include <sys/ioctl.h>
40 #include <sys/device.h>
41 #include <sys/disk.h>
42 #include <sys/disklabel.h>
43 #include <sys/buf.h>
44 #include <sys/bufq.h>
45 #include <sys/vnode.h>
46 #include <sys/fcntl.h>
47 #include <sys/namei.h>
48 #include <sys/module.h>
49 #include <sys/syslog.h>
50
51 #include <dev/dkvar.h>
52 #include <miscfs/specfs/specdev.h> /* for v_rdev */
53
54 int dkdebug = 0;
55
56 #ifdef DEBUG
57 #define DKDB_FOLLOW 0x1
58 #define DKDB_INIT 0x2
59 #define DKDB_VNODE 0x4
60 #define DKDB_DUMP 0x8
61
62 #define IFDEBUG(x,y) if (dkdebug & (x)) y
63 #define DPRINTF(x,y) IFDEBUG(x, printf y)
64 #define DPRINTF_FOLLOW(y) DPRINTF(DKDB_FOLLOW, y)
65 #else
66 #define IFDEBUG(x,y)
67 #define DPRINTF(x,y)
68 #define DPRINTF_FOLLOW(y)
69 #endif
70
71 #define DKF_READYFORDUMP (DKF_INITED|DKF_TAKEDUMP)
72
73 static int dk_subr_modcmd(modcmd_t, void *);
74
75 #define DKLABELDEV(dev) \
76 (MAKEDISKDEV(major((dev)), DISKUNIT((dev)), RAW_PART))
77
78 static void dk_makedisklabel(struct dk_softc *);
79 static int dk_translate(struct dk_softc *, struct buf *);
80 static void dk_done1(struct dk_softc *, struct buf *, bool);
81
82 void
83 dk_init(struct dk_softc *dksc, device_t dev, int dtype)
84 {
85
86 memset(dksc, 0x0, sizeof(*dksc));
87 dksc->sc_dtype = dtype;
88 dksc->sc_dev = dev;
89
90 strlcpy(dksc->sc_xname, device_xname(dev), DK_XNAME_SIZE);
91 dksc->sc_dkdev.dk_name = dksc->sc_xname;
92 }
93
94 void
95 dk_attach(struct dk_softc *dksc)
96 {
97 mutex_init(&dksc->sc_iolock, MUTEX_DEFAULT, IPL_VM);
98 dksc->sc_flags |= DKF_READYFORDUMP;
99 #ifdef DIAGNOSTIC
100 dksc->sc_flags |= DKF_WARNLABEL | DKF_LABELSANITY;
101 #endif
102
103 /* Attach the device into the rnd source list. */
104 rnd_attach_source(&dksc->sc_rnd_source, dksc->sc_xname,
105 RND_TYPE_DISK, RND_FLAG_DEFAULT);
106 }
107
108 void
109 dk_detach(struct dk_softc *dksc)
110 {
111 /* Unhook the entropy source. */
112 rnd_detach_source(&dksc->sc_rnd_source);
113
114 dksc->sc_flags &= ~DKF_READYFORDUMP;
115 mutex_destroy(&dksc->sc_iolock);
116 }
117
118 /* ARGSUSED */
119 int
120 dk_open(struct dk_softc *dksc, dev_t dev,
121 int flags, int fmt, struct lwp *l)
122 {
123 struct disklabel *lp = dksc->sc_dkdev.dk_label;
124 int part = DISKPART(dev);
125 int pmask = 1 << part;
126 int ret = 0;
127 struct disk *dk = &dksc->sc_dkdev;
128
129 DPRINTF_FOLLOW(("%s(%s, %p, 0x%"PRIx64", 0x%x)\n", __func__,
130 dksc->sc_xname, dksc, dev, flags));
131
132 mutex_enter(&dk->dk_openlock);
133
134 /*
135 * If there are wedges, and this is not RAW_PART, then we
136 * need to fail.
137 */
138 if (dk->dk_nwedges != 0 && part != RAW_PART) {
139 ret = EBUSY;
140 goto done;
141 }
142
143 /*
144 * If we're init'ed and there are no other open partitions then
145 * update the in-core disklabel.
146 */
147 if ((dksc->sc_flags & DKF_INITED)) {
148 if ((dksc->sc_flags & DKF_VLABEL) == 0) {
149 dksc->sc_flags |= DKF_VLABEL;
150 dk_getdisklabel(dksc, dev);
151 }
152 }
153
154 /* Fail if we can't find the partition. */
155 if (part != RAW_PART &&
156 ((dksc->sc_flags & DKF_VLABEL) == 0 ||
157 part >= lp->d_npartitions ||
158 lp->d_partitions[part].p_fstype == FS_UNUSED)) {
159 ret = ENXIO;
160 goto done;
161 }
162
163 /* Mark our unit as open. */
164 switch (fmt) {
165 case S_IFCHR:
166 dk->dk_copenmask |= pmask;
167 break;
168 case S_IFBLK:
169 dk->dk_bopenmask |= pmask;
170 break;
171 }
172
173 dk->dk_openmask = dk->dk_copenmask | dk->dk_bopenmask;
174
175 done:
176 mutex_exit(&dk->dk_openlock);
177 return ret;
178 }
179
180 /* ARGSUSED */
181 int
182 dk_close(struct dk_softc *dksc, dev_t dev,
183 int flags, int fmt, struct lwp *l)
184 {
185 const struct dkdriver *dkd = dksc->sc_dkdev.dk_driver;
186 int part = DISKPART(dev);
187 int pmask = 1 << part;
188 struct disk *dk = &dksc->sc_dkdev;
189
190 DPRINTF_FOLLOW(("%s(%s, %p, 0x%"PRIx64", 0x%x)\n", __func__,
191 dksc->sc_xname, dksc, dev, flags));
192
193 mutex_enter(&dk->dk_openlock);
194
195 switch (fmt) {
196 case S_IFCHR:
197 dk->dk_copenmask &= ~pmask;
198 break;
199 case S_IFBLK:
200 dk->dk_bopenmask &= ~pmask;
201 break;
202 }
203 dk->dk_openmask = dk->dk_copenmask | dk->dk_bopenmask;
204
205 if (dk->dk_openmask == 0) {
206 if (dkd->d_lastclose != NULL)
207 (*dkd->d_lastclose)(dksc->sc_dev);
208 if ((dksc->sc_flags & DKF_KLABEL) == 0)
209 dksc->sc_flags &= ~DKF_VLABEL;
210 }
211
212 mutex_exit(&dk->dk_openlock);
213 return 0;
214 }
215
216 static int
217 dk_translate(struct dk_softc *dksc, struct buf *bp)
218 {
219 int part;
220 int wlabel;
221 daddr_t blkno;
222 struct disklabel *lp;
223 struct disk *dk;
224 uint64_t numsecs;
225 unsigned secsize;
226
227 lp = dksc->sc_dkdev.dk_label;
228 dk = &dksc->sc_dkdev;
229
230 part = DISKPART(bp->b_dev);
231 numsecs = dk->dk_geom.dg_secperunit;
232 secsize = dk->dk_geom.dg_secsize;
233
234 /*
235 * The transfer must be a whole number of blocks and the offset must
236 * not be negative.
237 */
238 if ((bp->b_bcount % secsize) != 0 || bp->b_blkno < 0) {
239 bp->b_error = EINVAL;
240 goto done;
241 }
242
243 /* If there is nothing to do, then we are done */
244 if (bp->b_bcount == 0)
245 goto done;
246
247 wlabel = dksc->sc_flags & (DKF_WLABEL|DKF_LABELLING);
248 if (part == RAW_PART) {
249 if (bounds_check_with_mediasize(bp, DEV_BSIZE, numsecs) <= 0)
250 goto done;
251 } else {
252 if (bounds_check_with_label(&dksc->sc_dkdev, bp, wlabel) <= 0)
253 goto done;
254 }
255
256 /*
257 * Convert the block number to absolute and put it in terms
258 * of the device's logical block size.
259 */
260 if (secsize >= DEV_BSIZE)
261 blkno = bp->b_blkno / (secsize / DEV_BSIZE);
262 else
263 blkno = bp->b_blkno * (DEV_BSIZE / secsize);
264
265 if (part != RAW_PART)
266 blkno += lp->d_partitions[DISKPART(bp->b_dev)].p_offset;
267 bp->b_rawblkno = blkno;
268
269 return -1;
270
271 done:
272 bp->b_resid = bp->b_bcount;
273 return bp->b_error;
274 }
275
276 void
277 dk_strategy(struct dk_softc *dksc, struct buf *bp)
278 {
279 int error;
280
281 DPRINTF_FOLLOW(("%s(%s, %p, %p)\n", __func__,
282 dksc->sc_xname, dksc, bp));
283
284 if (!(dksc->sc_flags & DKF_INITED)) {
285 DPRINTF_FOLLOW(("%s: not inited\n", __func__));
286 bp->b_error = ENXIO;
287 biodone(bp);
288 return;
289 }
290
291 error = dk_translate(dksc, bp);
292 if (error >= 0) {
293 biodone(bp);
294 return;
295 }
296
297 /*
298 * Queue buffer and start unit
299 */
300 dk_start(dksc, bp);
301 }
302
303 void
304 dk_start(struct dk_softc *dksc, struct buf *bp)
305 {
306 const struct dkdriver *dkd = dksc->sc_dkdev.dk_driver;
307 int error;
308
309 mutex_enter(&dksc->sc_iolock);
310
311 if (bp != NULL)
312 bufq_put(dksc->sc_bufq, bp);
313
314 if (dksc->sc_busy)
315 goto done;
316 dksc->sc_busy = true;
317
318 /*
319 * Peeking at the buffer queue and committing the operation
320 * only after success isn't atomic.
321 *
322 * So when a diskstart fails, the buffer is saved
323 * and tried again before the next buffer is fetched.
324 * dk_drain() handles flushing of a saved buffer.
325 *
326 * This keeps order of I/O operations, unlike bufq_put.
327 */
328
329 bp = dksc->sc_deferred;
330 dksc->sc_deferred = NULL;
331
332 if (bp == NULL)
333 bp = bufq_get(dksc->sc_bufq);
334
335 while (bp != NULL) {
336
337 disk_busy(&dksc->sc_dkdev);
338 mutex_exit(&dksc->sc_iolock);
339 error = dkd->d_diskstart(dksc->sc_dev, bp);
340 mutex_enter(&dksc->sc_iolock);
341 if (error == EAGAIN) {
342 dksc->sc_deferred = bp;
343 disk_unbusy(&dksc->sc_dkdev, 0, (bp->b_flags & B_READ));
344 break;
345 }
346
347 if (error != 0) {
348 bp->b_error = error;
349 bp->b_resid = bp->b_bcount;
350 dk_done1(dksc, bp, false);
351 }
352
353 bp = bufq_get(dksc->sc_bufq);
354 }
355
356 dksc->sc_busy = false;
357 done:
358 mutex_exit(&dksc->sc_iolock);
359 }
360
361 static void
362 dk_done1(struct dk_softc *dksc, struct buf *bp, bool lock)
363 {
364 struct disk *dk = &dksc->sc_dkdev;
365
366 if (bp->b_error != 0) {
367 struct cfdriver *cd = device_cfdriver(dksc->sc_dev);
368
369 diskerr(bp, cd->cd_name, "error", LOG_PRINTF, 0,
370 dk->dk_label);
371 printf("\n");
372 }
373
374 if (lock)
375 mutex_enter(&dksc->sc_iolock);
376 disk_unbusy(dk, bp->b_bcount - bp->b_resid, (bp->b_flags & B_READ));
377 if (lock)
378 mutex_exit(&dksc->sc_iolock);
379
380 rnd_add_uint32(&dksc->sc_rnd_source, bp->b_rawblkno);
381
382 biodone(bp);
383 }
384
385 void
386 dk_done(struct dk_softc *dksc, struct buf *bp)
387 {
388 dk_done1(dksc, bp, true);
389 }
390
391 void
392 dk_drain(struct dk_softc *dksc)
393 {
394 struct buf *bp;
395
396 mutex_enter(&dksc->sc_iolock);
397 bp = dksc->sc_deferred;
398 if (bp != NULL) {
399 bp->b_error = EIO;
400 bp->b_resid = bp->b_bcount;
401 biodone(bp);
402 }
403 bufq_drain(dksc->sc_bufq);
404 mutex_exit(&dksc->sc_iolock);
405 }
406
407 int
408 dk_discard(struct dk_softc *dksc, dev_t dev, off_t pos, off_t len)
409 {
410 const struct dkdriver *dkd = dksc->sc_dkdev.dk_driver;
411 unsigned secsize = dksc->sc_dkdev.dk_geom.dg_secsize;
412 struct buf tmp, *bp = &tmp;
413 int error;
414
415 DPRINTF_FOLLOW(("%s(%s, %p, 0x"PRIx64", %jd, %jd)\n", __func__,
416 dksc->sc_xname, dksc, (intmax_t)pos, (intmax_t)len));
417
418 if (!(dksc->sc_flags & DKF_INITED)) {
419 DPRINTF_FOLLOW(("%s: not inited\n", __func__));
420 return ENXIO;
421 }
422
423 if (secsize == 0 || (pos % secsize) != 0)
424 return EINVAL;
425
426 /* enough data to please the bounds checking code */
427 bp->b_dev = dev;
428 bp->b_blkno = (daddr_t)(pos / secsize);
429 bp->b_bcount = len;
430 bp->b_flags = B_WRITE;
431
432 error = dk_translate(dksc, bp);
433 if (error >= 0)
434 return error;
435
436 error = dkd->d_discard(dksc->sc_dev,
437 (off_t)bp->b_rawblkno * secsize,
438 (off_t)bp->b_bcount);
439
440 return error;
441 }
442
443 int
444 dk_size(struct dk_softc *dksc, dev_t dev)
445 {
446 const struct dkdriver *dkd = dksc->sc_dkdev.dk_driver;
447 struct disklabel *lp;
448 int is_open;
449 int part;
450 int size;
451
452 if ((dksc->sc_flags & DKF_INITED) == 0)
453 return -1;
454
455 part = DISKPART(dev);
456 is_open = dksc->sc_dkdev.dk_openmask & (1 << part);
457
458 if (!is_open && dkd->d_open(dev, 0, S_IFBLK, curlwp))
459 return -1;
460
461 lp = dksc->sc_dkdev.dk_label;
462 if (lp->d_partitions[part].p_fstype != FS_SWAP)
463 size = -1;
464 else
465 size = lp->d_partitions[part].p_size *
466 (lp->d_secsize / DEV_BSIZE);
467
468 if (!is_open && dkd->d_close(dev, 0, S_IFBLK, curlwp))
469 return -1;
470
471 return size;
472 }
473
474 int
475 dk_ioctl(struct dk_softc *dksc, dev_t dev,
476 u_long cmd, void *data, int flag, struct lwp *l)
477 {
478 const struct dkdriver *dkd = dksc->sc_dkdev.dk_driver;
479 struct disklabel *lp;
480 struct disk *dk = &dksc->sc_dkdev;
481 #ifdef __HAVE_OLD_DISKLABEL
482 struct disklabel newlabel;
483 #endif
484 int error;
485
486 DPRINTF_FOLLOW(("%s(%s, %p, 0x%"PRIx64", 0x%lx)\n", __func__,
487 dksc->sc_xname, dksc, dev, cmd));
488
489 /* ensure that the pseudo disk is open for writes for these commands */
490 switch (cmd) {
491 case DIOCSDINFO:
492 case DIOCWDINFO:
493 #ifdef __HAVE_OLD_DISKLABEL
494 case ODIOCSDINFO:
495 case ODIOCWDINFO:
496 #endif
497 case DIOCKLABEL:
498 case DIOCWLABEL:
499 case DIOCAWEDGE:
500 case DIOCDWEDGE:
501 case DIOCSSTRATEGY:
502 if ((flag & FWRITE) == 0)
503 return EBADF;
504 }
505
506 /* ensure that the pseudo-disk is initialized for these */
507 switch (cmd) {
508 case DIOCGDINFO:
509 case DIOCSDINFO:
510 case DIOCWDINFO:
511 case DIOCGPART:
512 case DIOCKLABEL:
513 case DIOCWLABEL:
514 case DIOCGDEFLABEL:
515 case DIOCAWEDGE:
516 case DIOCDWEDGE:
517 case DIOCLWEDGES:
518 case DIOCMWEDGES:
519 case DIOCCACHESYNC:
520 #ifdef __HAVE_OLD_DISKLABEL
521 case ODIOCGDINFO:
522 case ODIOCSDINFO:
523 case ODIOCWDINFO:
524 case ODIOCGDEFLABEL:
525 #endif
526 if ((dksc->sc_flags & DKF_INITED) == 0)
527 return ENXIO;
528 }
529
530 error = disk_ioctl(dk, dev, cmd, data, flag, l);
531 if (error != EPASSTHROUGH)
532 return error;
533 else
534 error = 0;
535
536 switch (cmd) {
537 case DIOCWDINFO:
538 case DIOCSDINFO:
539 #ifdef __HAVE_OLD_DISKLABEL
540 case ODIOCWDINFO:
541 case ODIOCSDINFO:
542 #endif
543 #ifdef __HAVE_OLD_DISKLABEL
544 if (cmd == ODIOCSDINFO || cmd == ODIOCWDINFO) {
545 memset(&newlabel, 0, sizeof newlabel);
546 memcpy(&newlabel, data, sizeof (struct olddisklabel));
547 lp = &newlabel;
548 } else
549 #endif
550 lp = (struct disklabel *)data;
551
552 mutex_enter(&dk->dk_openlock);
553 dksc->sc_flags |= DKF_LABELLING;
554
555 error = setdisklabel(dksc->sc_dkdev.dk_label,
556 lp, 0, dksc->sc_dkdev.dk_cpulabel);
557 if (error == 0) {
558 if (cmd == DIOCWDINFO
559 #ifdef __HAVE_OLD_DISKLABEL
560 || cmd == ODIOCWDINFO
561 #endif
562 )
563 error = writedisklabel(DKLABELDEV(dev),
564 dkd->d_strategy, dksc->sc_dkdev.dk_label,
565 dksc->sc_dkdev.dk_cpulabel);
566 }
567
568 dksc->sc_flags &= ~DKF_LABELLING;
569 mutex_exit(&dk->dk_openlock);
570 break;
571
572 case DIOCKLABEL:
573 if (*(int *)data != 0)
574 dksc->sc_flags |= DKF_KLABEL;
575 else
576 dksc->sc_flags &= ~DKF_KLABEL;
577 break;
578
579 case DIOCWLABEL:
580 if (*(int *)data != 0)
581 dksc->sc_flags |= DKF_WLABEL;
582 else
583 dksc->sc_flags &= ~DKF_WLABEL;
584 break;
585
586 case DIOCGDEFLABEL:
587 dk_getdefaultlabel(dksc, (struct disklabel *)data);
588 break;
589
590 #ifdef __HAVE_OLD_DISKLABEL
591 case ODIOCGDEFLABEL:
592 dk_getdefaultlabel(dksc, &newlabel);
593 if (newlabel.d_npartitions > OLDMAXPARTITIONS)
594 return ENOTTY;
595 memcpy(data, &newlabel, sizeof (struct olddisklabel));
596 break;
597 #endif
598
599 case DIOCGSTRATEGY:
600 {
601 struct disk_strategy *dks = (void *)data;
602
603 mutex_enter(&dksc->sc_iolock);
604 strlcpy(dks->dks_name, bufq_getstrategyname(dksc->sc_bufq),
605 sizeof(dks->dks_name));
606 mutex_exit(&dksc->sc_iolock);
607 dks->dks_paramlen = 0;
608
609 return 0;
610 }
611
612 case DIOCSSTRATEGY:
613 {
614 struct disk_strategy *dks = (void *)data;
615 struct bufq_state *new;
616 struct bufq_state *old;
617
618 if (dks->dks_param != NULL) {
619 return EINVAL;
620 }
621 dks->dks_name[sizeof(dks->dks_name) - 1] = 0; /* ensure term */
622 error = bufq_alloc(&new, dks->dks_name,
623 BUFQ_EXACT|BUFQ_SORT_RAWBLOCK);
624 if (error) {
625 return error;
626 }
627 mutex_enter(&dksc->sc_iolock);
628 old = dksc->sc_bufq;
629 bufq_move(new, old);
630 dksc->sc_bufq = new;
631 mutex_exit(&dksc->sc_iolock);
632 bufq_free(old);
633
634 return 0;
635 }
636
637 default:
638 error = ENOTTY;
639 }
640
641 return error;
642 }
643
644 /*
645 * dk_dump dumps all of physical memory into the partition specified.
646 * This requires substantially more framework than {s,w}ddump, and hence
647 * is probably much more fragile.
648 *
649 */
650
651 #define DKFF_READYFORDUMP(x) (((x) & DKF_READYFORDUMP) == DKF_READYFORDUMP)
652 static volatile int dk_dumping = 0;
653
654 /* ARGSUSED */
655 int
656 dk_dump(struct dk_softc *dksc, dev_t dev,
657 daddr_t blkno, void *vav, size_t size)
658 {
659 const struct dkdriver *dkd = dksc->sc_dkdev.dk_driver;
660 char *va = vav;
661 struct disklabel *lp;
662 struct partition *p;
663 int part, towrt, nsects, sectoff, maxblkcnt, nblk;
664 int maxxfer, rv = 0;
665
666 /*
667 * ensure that we consider this device to be safe for dumping,
668 * and that the device is configured.
669 */
670 if (!DKFF_READYFORDUMP(dksc->sc_flags)) {
671 DPRINTF(DKF_DUMP, ("%s: bad dump flags 0x%x\n", __func__,
672 dksc->sc_flags));
673 return ENXIO;
674 }
675
676 /* ensure that we are not already dumping */
677 if (dk_dumping)
678 return EFAULT;
679 dk_dumping = 1;
680
681 if (dkd->d_dumpblocks == NULL) {
682 DPRINTF(DKF_DUMP, ("%s: no dumpblocks\n", __func__));
683 return ENXIO;
684 }
685
686 /* device specific max transfer size */
687 maxxfer = MAXPHYS;
688 if (dkd->d_iosize != NULL)
689 (*dkd->d_iosize)(dksc->sc_dev, &maxxfer);
690
691 /* Convert to disk sectors. Request must be a multiple of size. */
692 part = DISKPART(dev);
693 lp = dksc->sc_dkdev.dk_label;
694 if ((size % lp->d_secsize) != 0) {
695 DPRINTF(DKF_DUMP, ("%s: odd size %zu\n", __func__, size));
696 return EFAULT;
697 }
698 towrt = size / lp->d_secsize;
699 blkno = dbtob(blkno) / lp->d_secsize; /* blkno in secsize units */
700
701 p = &lp->d_partitions[part];
702 if (p->p_fstype != FS_SWAP) {
703 DPRINTF(DKF_DUMP, ("%s: bad fstype %d\n", __func__,
704 p->p_fstype));
705 return ENXIO;
706 }
707 nsects = p->p_size;
708 sectoff = p->p_offset;
709
710 /* Check transfer bounds against partition size. */
711 if ((blkno < 0) || ((blkno + towrt) > nsects)) {
712 DPRINTF(DKF_DUMP, ("%s: out of bounds blkno=%d, towrt=%d, "
713 "nsects=%d\n", __func__, blkno, towrt, nsects));
714 return EINVAL;
715 }
716
717 /* Offset block number to start of partition. */
718 blkno += sectoff;
719
720 /* Start dumping and return when done. */
721 maxblkcnt = howmany(maxxfer, lp->d_secsize);
722 while (towrt > 0) {
723 nblk = min(maxblkcnt, towrt);
724
725 if ((rv = (*dkd->d_dumpblocks)(dksc->sc_dev, va, blkno, nblk))
726 != 0) {
727 DPRINTF(DKF_DUMP, ("%s: dumpblocks %d\n", __func__,
728 rv));
729 return rv;
730 }
731
732 towrt -= nblk;
733 blkno += nblk;
734 va += nblk * lp->d_secsize;
735 }
736
737 dk_dumping = 0;
738
739 return 0;
740 }
741
742 /* ARGSUSED */
743 void
744 dk_getdefaultlabel(struct dk_softc *dksc, struct disklabel *lp)
745 {
746 struct disk_geom *dg = &dksc->sc_dkdev.dk_geom;
747
748 memset(lp, 0, sizeof(*lp));
749
750 if (dg->dg_secperunit > UINT32_MAX)
751 lp->d_secperunit = UINT32_MAX;
752 else
753 lp->d_secperunit = dg->dg_secperunit;
754 lp->d_secsize = dg->dg_secsize;
755 lp->d_nsectors = dg->dg_nsectors;
756 lp->d_ntracks = dg->dg_ntracks;
757 lp->d_ncylinders = dg->dg_ncylinders;
758 lp->d_secpercyl = lp->d_ntracks * lp->d_nsectors;
759
760 strlcpy(lp->d_typename, dksc->sc_xname, sizeof(lp->d_typename));
761 lp->d_type = dksc->sc_dtype;
762 strlcpy(lp->d_packname, "fictitious", sizeof(lp->d_packname));
763 lp->d_rpm = 3600;
764 lp->d_interleave = 1;
765 lp->d_flags = 0;
766
767 lp->d_partitions[RAW_PART].p_offset = 0;
768 lp->d_partitions[RAW_PART].p_size = lp->d_secperunit;
769 lp->d_partitions[RAW_PART].p_fstype = FS_UNUSED;
770 lp->d_npartitions = RAW_PART + 1;
771
772 lp->d_magic = DISKMAGIC;
773 lp->d_magic2 = DISKMAGIC;
774 lp->d_checksum = dkcksum(dksc->sc_dkdev.dk_label);
775 }
776
777 /* ARGSUSED */
778 void
779 dk_getdisklabel(struct dk_softc *dksc, dev_t dev)
780 {
781 const struct dkdriver *dkd = dksc->sc_dkdev.dk_driver;
782 struct disklabel *lp = dksc->sc_dkdev.dk_label;
783 struct cpu_disklabel *clp = dksc->sc_dkdev.dk_cpulabel;
784 struct disk_geom *dg = &dksc->sc_dkdev.dk_geom;
785 struct partition *pp;
786 int i;
787 const char *errstring;
788
789 memset(clp, 0x0, sizeof(*clp));
790 dk_getdefaultlabel(dksc, lp);
791 errstring = readdisklabel(DKLABELDEV(dev), dkd->d_strategy,
792 dksc->sc_dkdev.dk_label, dksc->sc_dkdev.dk_cpulabel);
793 if (errstring) {
794 dk_makedisklabel(dksc);
795 if (dksc->sc_flags & DKF_WARNLABEL)
796 printf("%s: %s\n", dksc->sc_xname, errstring);
797 return;
798 }
799
800 if ((dksc->sc_flags & DKF_LABELSANITY) == 0)
801 return;
802
803 /* Sanity check */
804 if (lp->d_secperunit < UINT32_MAX ?
805 lp->d_secperunit != dg->dg_secperunit :
806 lp->d_secperunit > dg->dg_secperunit)
807 printf("WARNING: %s: total sector size in disklabel (%ju) "
808 "!= the size of %s (%ju)\n", dksc->sc_xname,
809 (uintmax_t)lp->d_secperunit, dksc->sc_xname,
810 (uintmax_t)dg->dg_secperunit);
811
812 for (i=0; i < lp->d_npartitions; i++) {
813 pp = &lp->d_partitions[i];
814 if (pp->p_offset + pp->p_size > dg->dg_secperunit)
815 printf("WARNING: %s: end of partition `%c' exceeds "
816 "the size of %s (%ju)\n", dksc->sc_xname,
817 'a' + i, dksc->sc_xname,
818 (uintmax_t)dg->dg_secperunit);
819 }
820 }
821
822 /* ARGSUSED */
823 static void
824 dk_makedisklabel(struct dk_softc *dksc)
825 {
826 struct disklabel *lp = dksc->sc_dkdev.dk_label;
827
828 lp->d_partitions[RAW_PART].p_fstype = FS_BSDFFS;
829 strlcpy(lp->d_packname, "default label", sizeof(lp->d_packname));
830 lp->d_checksum = dkcksum(lp);
831 }
832
833 /* This function is taken from ccd.c:1.76 --rcd */
834
835 /*
836 * XXX this function looks too generic for dksubr.c, shouldn't we
837 * put it somewhere better?
838 */
839
840 /*
841 * Lookup the provided name in the filesystem. If the file exists,
842 * is a valid block device, and isn't being used by anyone else,
843 * set *vpp to the file's vnode.
844 */
845 int
846 dk_lookup(struct pathbuf *pb, struct lwp *l, struct vnode **vpp)
847 {
848 struct nameidata nd;
849 struct vnode *vp;
850 int error;
851
852 if (l == NULL)
853 return ESRCH; /* Is ESRCH the best choice? */
854
855 NDINIT(&nd, LOOKUP, FOLLOW, pb);
856 if ((error = vn_open(&nd, FREAD | FWRITE, 0)) != 0) {
857 DPRINTF((DKDB_FOLLOW|DKDB_INIT),
858 ("%s: vn_open error = %d\n", __func__, error));
859 return error;
860 }
861
862 vp = nd.ni_vp;
863 if (vp->v_type != VBLK) {
864 error = ENOTBLK;
865 goto out;
866 }
867
868 /* Reopen as anonymous vnode to protect against forced unmount. */
869 if ((error = bdevvp(vp->v_rdev, vpp)) != 0)
870 goto out;
871 VOP_UNLOCK(vp);
872 if ((error = vn_close(vp, FREAD | FWRITE, l->l_cred)) != 0) {
873 vrele(*vpp);
874 return error;
875 }
876 if ((error = VOP_OPEN(*vpp, FREAD | FWRITE, l->l_cred)) != 0) {
877 vrele(*vpp);
878 return error;
879 }
880 mutex_enter((*vpp)->v_interlock);
881 (*vpp)->v_writecount++;
882 mutex_exit((*vpp)->v_interlock);
883
884 IFDEBUG(DKDB_VNODE, vprint("dk_lookup: vnode info", *vpp));
885
886 return 0;
887 out:
888 VOP_UNLOCK(vp);
889 (void) vn_close(vp, FREAD | FWRITE, l->l_cred);
890 return error;
891 }
892
893 MODULE(MODULE_CLASS_MISC, dk_subr, NULL);
894
895 static int
896 dk_subr_modcmd(modcmd_t cmd, void *arg)
897 {
898 switch (cmd) {
899 case MODULE_CMD_INIT:
900 case MODULE_CMD_FINI:
901 return 0;
902 case MODULE_CMD_STAT:
903 case MODULE_CMD_AUTOUNLOAD:
904 default:
905 return ENOTTY;
906 }
907 }
908