dksubr.c revision 1.88.2.3 1 /* $NetBSD: dksubr.c,v 1.88.2.3 2017/03/20 06:57:27 pgoyette Exp $ */
2
3 /*-
4 * Copyright (c) 1996, 1997, 1998, 1999, 2002, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe and Roland C. Dowdeswell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: dksubr.c,v 1.88.2.3 2017/03/20 06:57:27 pgoyette Exp $");
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/stat.h>
38 #include <sys/proc.h>
39 #include <sys/ioctl.h>
40 #include <sys/device.h>
41 #include <sys/disk.h>
42 #include <sys/disklabel.h>
43 #include <sys/buf.h>
44 #include <sys/bufq.h>
45 #include <sys/vnode.h>
46 #include <sys/fcntl.h>
47 #include <sys/namei.h>
48 #include <sys/module.h>
49 #include <sys/syslog.h>
50
51 #include <dev/dkvar.h>
52 #include <miscfs/specfs/specdev.h> /* for v_rdev */
53
54 int dkdebug = 0;
55
56 #ifdef DEBUG
57 #define DKDB_FOLLOW 0x1
58 #define DKDB_INIT 0x2
59 #define DKDB_VNODE 0x4
60 #define DKDB_DUMP 0x8
61
62 #define IFDEBUG(x,y) if (dkdebug & (x)) y
63 #define DPRINTF(x,y) IFDEBUG(x, printf y)
64 #define DPRINTF_FOLLOW(y) DPRINTF(DKDB_FOLLOW, y)
65 #else
66 #define IFDEBUG(x,y)
67 #define DPRINTF(x,y)
68 #define DPRINTF_FOLLOW(y)
69 #endif
70
71 #define DKF_READYFORDUMP (DKF_INITED|DKF_TAKEDUMP)
72
73 static int dk_subr_modcmd(modcmd_t, void *);
74
75 #define DKLABELDEV(dev) \
76 (MAKEDISKDEV(major((dev)), DISKUNIT((dev)), RAW_PART))
77
78 static void dk_makedisklabel(struct dk_softc *);
79 static int dk_translate(struct dk_softc *, struct buf *);
80 static void dk_done1(struct dk_softc *, struct buf *, bool);
81
82 void
83 dk_init(struct dk_softc *dksc, device_t dev, int dtype)
84 {
85
86 memset(dksc, 0x0, sizeof(*dksc));
87 dksc->sc_dtype = dtype;
88 dksc->sc_dev = dev;
89
90 strlcpy(dksc->sc_xname, device_xname(dev), DK_XNAME_SIZE);
91 dksc->sc_dkdev.dk_name = dksc->sc_xname;
92 }
93
94 void
95 dk_attach(struct dk_softc *dksc)
96 {
97 KASSERT(dksc->sc_dev != NULL);
98
99 mutex_init(&dksc->sc_iolock, MUTEX_DEFAULT, IPL_VM);
100 dksc->sc_flags |= DKF_READYFORDUMP;
101 #ifdef DIAGNOSTIC
102 dksc->sc_flags |= DKF_WARNLABEL | DKF_LABELSANITY;
103 #endif
104
105 /* Attach the device into the rnd source list. */
106 rnd_attach_source(&dksc->sc_rnd_source, dksc->sc_xname,
107 RND_TYPE_DISK, RND_FLAG_DEFAULT);
108 }
109
110 void
111 dk_detach(struct dk_softc *dksc)
112 {
113 /* Unhook the entropy source. */
114 rnd_detach_source(&dksc->sc_rnd_source);
115
116 dksc->sc_flags &= ~DKF_READYFORDUMP;
117 mutex_destroy(&dksc->sc_iolock);
118 }
119
120 /* ARGSUSED */
121 int
122 dk_open(struct dk_softc *dksc, dev_t dev,
123 int flags, int fmt, struct lwp *l)
124 {
125 const struct dkdriver *dkd = dksc->sc_dkdev.dk_driver;
126 struct disklabel *lp = dksc->sc_dkdev.dk_label;
127 int part = DISKPART(dev);
128 int pmask = 1 << part;
129 int ret = 0;
130 struct disk *dk = &dksc->sc_dkdev;
131
132 DPRINTF_FOLLOW(("%s(%s, %p, 0x%"PRIx64", 0x%x)\n", __func__,
133 dksc->sc_xname, dksc, dev, flags));
134
135 mutex_enter(&dk->dk_openlock);
136
137 /*
138 * If there are wedges, and this is not RAW_PART, then we
139 * need to fail.
140 */
141 if (dk->dk_nwedges != 0 && part != RAW_PART) {
142 ret = EBUSY;
143 goto done;
144 }
145
146 /*
147 * initialize driver for the first opener
148 */
149 if (dk->dk_openmask == 0 && dkd->d_firstopen != NULL) {
150 ret = (*dkd->d_firstopen)(dksc->sc_dev, dev, flags, fmt);
151 if (ret)
152 goto done;
153 }
154
155 /*
156 * If we're init'ed and there are no other open partitions then
157 * update the in-core disklabel.
158 */
159 if ((dksc->sc_flags & DKF_INITED)) {
160 if ((dksc->sc_flags & DKF_VLABEL) == 0) {
161 dksc->sc_flags |= DKF_VLABEL;
162 dk_getdisklabel(dksc, dev);
163 }
164 }
165
166 /* Fail if we can't find the partition. */
167 if (part != RAW_PART &&
168 ((dksc->sc_flags & DKF_VLABEL) == 0 ||
169 part >= lp->d_npartitions ||
170 lp->d_partitions[part].p_fstype == FS_UNUSED)) {
171 ret = ENXIO;
172 goto done;
173 }
174
175 /* Mark our unit as open. */
176 switch (fmt) {
177 case S_IFCHR:
178 dk->dk_copenmask |= pmask;
179 break;
180 case S_IFBLK:
181 dk->dk_bopenmask |= pmask;
182 break;
183 }
184
185 dk->dk_openmask = dk->dk_copenmask | dk->dk_bopenmask;
186
187 done:
188 mutex_exit(&dk->dk_openlock);
189 return ret;
190 }
191
192 /* ARGSUSED */
193 int
194 dk_close(struct dk_softc *dksc, dev_t dev,
195 int flags, int fmt, struct lwp *l)
196 {
197 const struct dkdriver *dkd = dksc->sc_dkdev.dk_driver;
198 int part = DISKPART(dev);
199 int pmask = 1 << part;
200 struct disk *dk = &dksc->sc_dkdev;
201
202 DPRINTF_FOLLOW(("%s(%s, %p, 0x%"PRIx64", 0x%x)\n", __func__,
203 dksc->sc_xname, dksc, dev, flags));
204
205 mutex_enter(&dk->dk_openlock);
206
207 switch (fmt) {
208 case S_IFCHR:
209 dk->dk_copenmask &= ~pmask;
210 break;
211 case S_IFBLK:
212 dk->dk_bopenmask &= ~pmask;
213 break;
214 }
215 dk->dk_openmask = dk->dk_copenmask | dk->dk_bopenmask;
216
217 if (dk->dk_openmask == 0) {
218 if (dkd->d_lastclose != NULL)
219 (*dkd->d_lastclose)(dksc->sc_dev);
220 if ((dksc->sc_flags & DKF_KLABEL) == 0)
221 dksc->sc_flags &= ~DKF_VLABEL;
222 }
223
224 mutex_exit(&dk->dk_openlock);
225 return 0;
226 }
227
228 static int
229 dk_translate(struct dk_softc *dksc, struct buf *bp)
230 {
231 int part;
232 int wlabel;
233 daddr_t blkno;
234 struct disklabel *lp;
235 struct disk *dk;
236 uint64_t numsecs;
237 unsigned secsize;
238
239 lp = dksc->sc_dkdev.dk_label;
240 dk = &dksc->sc_dkdev;
241
242 part = DISKPART(bp->b_dev);
243 numsecs = dk->dk_geom.dg_secperunit;
244 secsize = dk->dk_geom.dg_secsize;
245
246 /*
247 * The transfer must be a whole number of blocks and the offset must
248 * not be negative.
249 */
250 if ((bp->b_bcount % secsize) != 0 || bp->b_blkno < 0) {
251 bp->b_error = EINVAL;
252 goto done;
253 }
254
255 /* If there is nothing to do, then we are done */
256 if (bp->b_bcount == 0)
257 goto done;
258
259 wlabel = dksc->sc_flags & (DKF_WLABEL|DKF_LABELLING);
260 if (part == RAW_PART) {
261 uint64_t numblocks = btodb(numsecs * secsize);
262 if (bounds_check_with_mediasize(bp, DEV_BSIZE, numblocks) <= 0)
263 goto done;
264 } else {
265 if (bounds_check_with_label(&dksc->sc_dkdev, bp, wlabel) <= 0)
266 goto done;
267 }
268
269 /*
270 * Convert the block number to absolute and put it in terms
271 * of the device's logical block size.
272 */
273 if (secsize >= DEV_BSIZE)
274 blkno = bp->b_blkno / (secsize / DEV_BSIZE);
275 else
276 blkno = bp->b_blkno * (DEV_BSIZE / secsize);
277
278 if (part != RAW_PART)
279 blkno += lp->d_partitions[DISKPART(bp->b_dev)].p_offset;
280 bp->b_rawblkno = blkno;
281
282 return -1;
283
284 done:
285 bp->b_resid = bp->b_bcount;
286 return bp->b_error;
287 }
288
289 static int
290 dk_strategy1(struct dk_softc *dksc, struct buf *bp)
291 {
292 int error;
293
294 DPRINTF_FOLLOW(("%s(%s, %p, %p)\n", __func__,
295 dksc->sc_xname, dksc, bp));
296
297 if (!(dksc->sc_flags & DKF_INITED)) {
298 DPRINTF_FOLLOW(("%s: not inited\n", __func__));
299 bp->b_error = ENXIO;
300 bp->b_resid = bp->b_bcount;
301 biodone(bp);
302 return 1;
303 }
304
305 error = dk_translate(dksc, bp);
306 if (error >= 0) {
307 biodone(bp);
308 return 1;
309 }
310
311 return 0;
312 }
313
314 void
315 dk_strategy(struct dk_softc *dksc, struct buf *bp)
316 {
317 int error;
318
319 error = dk_strategy1(dksc, bp);
320 if (error)
321 return;
322
323 /*
324 * Queue buffer and start unit
325 */
326 dk_start(dksc, bp);
327 }
328
329 int
330 dk_strategy_defer(struct dk_softc *dksc, struct buf *bp)
331 {
332 int error;
333
334 error = dk_strategy1(dksc, bp);
335 if (error)
336 return error;
337
338 /*
339 * Queue buffer only
340 */
341 mutex_enter(&dksc->sc_iolock);
342 disk_wait(&dksc->sc_dkdev);
343 bufq_put(dksc->sc_bufq, bp);
344 mutex_exit(&dksc->sc_iolock);
345
346 return 0;
347 }
348
349 int
350 dk_strategy_pending(struct dk_softc *dksc)
351 {
352 struct buf *bp;
353
354 if (!(dksc->sc_flags & DKF_INITED)) {
355 DPRINTF_FOLLOW(("%s: not inited\n", __func__));
356 return 0;
357 }
358
359 mutex_enter(&dksc->sc_iolock);
360 bp = bufq_peek(dksc->sc_bufq);
361 mutex_exit(&dksc->sc_iolock);
362
363 return bp != NULL;
364 }
365
366 void
367 dk_start(struct dk_softc *dksc, struct buf *bp)
368 {
369 const struct dkdriver *dkd = dksc->sc_dkdev.dk_driver;
370 int error;
371
372 if (!(dksc->sc_flags & DKF_INITED)) {
373 DPRINTF_FOLLOW(("%s: not inited\n", __func__));
374 return;
375 }
376
377 mutex_enter(&dksc->sc_iolock);
378
379 if (bp != NULL) {
380 disk_wait(&dksc->sc_dkdev);
381 bufq_put(dksc->sc_bufq, bp);
382 }
383
384 /*
385 * If another thread is running the queue, increment
386 * busy counter to 2 so that the queue is retried,
387 * because the driver may now accept additional
388 * requests.
389 */
390 if (dksc->sc_busy < 2)
391 dksc->sc_busy++;
392 if (dksc->sc_busy > 1)
393 goto done;
394
395 /*
396 * Peeking at the buffer queue and committing the operation
397 * only after success isn't atomic.
398 *
399 * So when a diskstart fails, the buffer is saved
400 * and tried again before the next buffer is fetched.
401 * dk_drain() handles flushing of a saved buffer.
402 *
403 * This keeps order of I/O operations, unlike bufq_put.
404 */
405
406 while (dksc->sc_busy > 0) {
407
408 bp = dksc->sc_deferred;
409 dksc->sc_deferred = NULL;
410
411 if (bp == NULL)
412 bp = bufq_get(dksc->sc_bufq);
413
414 while (bp != NULL) {
415
416 disk_busy(&dksc->sc_dkdev);
417 mutex_exit(&dksc->sc_iolock);
418 error = dkd->d_diskstart(dksc->sc_dev, bp);
419 mutex_enter(&dksc->sc_iolock);
420 if (error == EAGAIN) {
421 dksc->sc_deferred = bp;
422 disk_unbusy(&dksc->sc_dkdev, 0, (bp->b_flags & B_READ));
423 disk_wait(&dksc->sc_dkdev);
424 break;
425 }
426
427 if (error != 0) {
428 bp->b_error = error;
429 bp->b_resid = bp->b_bcount;
430 dk_done1(dksc, bp, false);
431 }
432
433 bp = bufq_get(dksc->sc_bufq);
434 }
435
436 dksc->sc_busy--;
437 }
438 done:
439 mutex_exit(&dksc->sc_iolock);
440 }
441
442 static void
443 dk_done1(struct dk_softc *dksc, struct buf *bp, bool lock)
444 {
445 struct disk *dk = &dksc->sc_dkdev;
446
447 if (bp->b_error != 0) {
448 struct cfdriver *cd = device_cfdriver(dksc->sc_dev);
449
450 diskerr(bp, cd->cd_name, "error", LOG_PRINTF, 0,
451 dk->dk_label);
452 printf("\n");
453 }
454
455 if (lock)
456 mutex_enter(&dksc->sc_iolock);
457 disk_unbusy(dk, bp->b_bcount - bp->b_resid, (bp->b_flags & B_READ));
458 if (lock)
459 mutex_exit(&dksc->sc_iolock);
460
461 rnd_add_uint32(&dksc->sc_rnd_source, bp->b_rawblkno);
462
463 biodone(bp);
464 }
465
466 void
467 dk_done(struct dk_softc *dksc, struct buf *bp)
468 {
469 dk_done1(dksc, bp, true);
470 }
471
472 void
473 dk_drain(struct dk_softc *dksc)
474 {
475 struct buf *bp;
476
477 mutex_enter(&dksc->sc_iolock);
478 bp = dksc->sc_deferred;
479 dksc->sc_deferred = NULL;
480 if (bp != NULL) {
481 bp->b_error = EIO;
482 bp->b_resid = bp->b_bcount;
483 biodone(bp);
484 }
485 bufq_drain(dksc->sc_bufq);
486 mutex_exit(&dksc->sc_iolock);
487 }
488
489 int
490 dk_discard(struct dk_softc *dksc, dev_t dev, off_t pos, off_t len)
491 {
492 const struct dkdriver *dkd = dksc->sc_dkdev.dk_driver;
493 unsigned secsize = dksc->sc_dkdev.dk_geom.dg_secsize;
494 struct buf tmp, *bp = &tmp;
495 int error;
496
497 DPRINTF_FOLLOW(("%s(%s, %p, 0x"PRIx64", %jd, %jd)\n", __func__,
498 dksc->sc_xname, dksc, (intmax_t)pos, (intmax_t)len));
499
500 if (!(dksc->sc_flags & DKF_INITED)) {
501 DPRINTF_FOLLOW(("%s: not inited\n", __func__));
502 return ENXIO;
503 }
504
505 if (secsize == 0 || (pos % secsize) != 0)
506 return EINVAL;
507
508 /* enough data to please the bounds checking code */
509 bp->b_dev = dev;
510 bp->b_blkno = (daddr_t)(pos / secsize);
511 bp->b_bcount = len;
512 bp->b_flags = B_WRITE;
513
514 error = dk_translate(dksc, bp);
515 if (error >= 0)
516 return error;
517
518 error = dkd->d_discard(dksc->sc_dev,
519 (off_t)bp->b_rawblkno * secsize,
520 (off_t)bp->b_bcount);
521
522 return error;
523 }
524
525 int
526 dk_size(struct dk_softc *dksc, dev_t dev)
527 {
528 const struct dkdriver *dkd = dksc->sc_dkdev.dk_driver;
529 struct disklabel *lp;
530 int is_open;
531 int part;
532 int size;
533
534 if ((dksc->sc_flags & DKF_INITED) == 0)
535 return -1;
536
537 part = DISKPART(dev);
538 is_open = dksc->sc_dkdev.dk_openmask & (1 << part);
539
540 if (!is_open && dkd->d_open(dev, 0, S_IFBLK, curlwp))
541 return -1;
542
543 lp = dksc->sc_dkdev.dk_label;
544 if (lp->d_partitions[part].p_fstype != FS_SWAP)
545 size = -1;
546 else
547 size = lp->d_partitions[part].p_size *
548 (lp->d_secsize / DEV_BSIZE);
549
550 if (!is_open && dkd->d_close(dev, 0, S_IFBLK, curlwp))
551 return -1;
552
553 return size;
554 }
555
556 int
557 dk_ioctl(struct dk_softc *dksc, dev_t dev,
558 u_long cmd, void *data, int flag, struct lwp *l)
559 {
560 const struct dkdriver *dkd = dksc->sc_dkdev.dk_driver;
561 struct disklabel *lp;
562 struct disk *dk = &dksc->sc_dkdev;
563 #ifdef __HAVE_OLD_DISKLABEL
564 struct disklabel newlabel;
565 #endif
566 int error;
567
568 DPRINTF_FOLLOW(("%s(%s, %p, 0x%"PRIx64", 0x%lx)\n", __func__,
569 dksc->sc_xname, dksc, dev, cmd));
570
571 /* ensure that the pseudo disk is open for writes for these commands */
572 switch (cmd) {
573 case DIOCSDINFO:
574 case DIOCWDINFO:
575 #ifdef __HAVE_OLD_DISKLABEL
576 case ODIOCSDINFO:
577 case ODIOCWDINFO:
578 #endif
579 case DIOCKLABEL:
580 case DIOCWLABEL:
581 case DIOCAWEDGE:
582 case DIOCDWEDGE:
583 case DIOCSSTRATEGY:
584 if ((flag & FWRITE) == 0)
585 return EBADF;
586 }
587
588 /* ensure that the pseudo-disk is initialized for these */
589 switch (cmd) {
590 case DIOCGDINFO:
591 case DIOCSDINFO:
592 case DIOCWDINFO:
593 case DIOCGPARTINFO:
594 case DIOCKLABEL:
595 case DIOCWLABEL:
596 case DIOCGDEFLABEL:
597 case DIOCAWEDGE:
598 case DIOCDWEDGE:
599 case DIOCLWEDGES:
600 case DIOCMWEDGES:
601 case DIOCCACHESYNC:
602 #ifdef __HAVE_OLD_DISKLABEL
603 case ODIOCGDINFO:
604 case ODIOCSDINFO:
605 case ODIOCWDINFO:
606 case ODIOCGDEFLABEL:
607 #endif
608 if ((dksc->sc_flags & DKF_INITED) == 0)
609 return ENXIO;
610 }
611
612 error = disk_ioctl(dk, dev, cmd, data, flag, l);
613 if (error != EPASSTHROUGH)
614 return error;
615 else
616 error = 0;
617
618 switch (cmd) {
619 case DIOCWDINFO:
620 case DIOCSDINFO:
621 #ifdef __HAVE_OLD_DISKLABEL
622 case ODIOCWDINFO:
623 case ODIOCSDINFO:
624 #endif
625 #ifdef __HAVE_OLD_DISKLABEL
626 if (cmd == ODIOCSDINFO || cmd == ODIOCWDINFO) {
627 memset(&newlabel, 0, sizeof newlabel);
628 memcpy(&newlabel, data, sizeof (struct olddisklabel));
629 lp = &newlabel;
630 } else
631 #endif
632 lp = (struct disklabel *)data;
633
634 mutex_enter(&dk->dk_openlock);
635 dksc->sc_flags |= DKF_LABELLING;
636
637 error = setdisklabel(dksc->sc_dkdev.dk_label,
638 lp, 0, dksc->sc_dkdev.dk_cpulabel);
639 if (error == 0) {
640 if (cmd == DIOCWDINFO
641 #ifdef __HAVE_OLD_DISKLABEL
642 || cmd == ODIOCWDINFO
643 #endif
644 )
645 error = writedisklabel(DKLABELDEV(dev),
646 dkd->d_strategy, dksc->sc_dkdev.dk_label,
647 dksc->sc_dkdev.dk_cpulabel);
648 }
649
650 dksc->sc_flags &= ~DKF_LABELLING;
651 mutex_exit(&dk->dk_openlock);
652 break;
653
654 case DIOCKLABEL:
655 if (*(int *)data != 0)
656 dksc->sc_flags |= DKF_KLABEL;
657 else
658 dksc->sc_flags &= ~DKF_KLABEL;
659 break;
660
661 case DIOCWLABEL:
662 if (*(int *)data != 0)
663 dksc->sc_flags |= DKF_WLABEL;
664 else
665 dksc->sc_flags &= ~DKF_WLABEL;
666 break;
667
668 case DIOCGDEFLABEL:
669 dk_getdefaultlabel(dksc, (struct disklabel *)data);
670 break;
671
672 #ifdef __HAVE_OLD_DISKLABEL
673 case ODIOCGDEFLABEL:
674 dk_getdefaultlabel(dksc, &newlabel);
675 if (newlabel.d_npartitions > OLDMAXPARTITIONS)
676 return ENOTTY;
677 memcpy(data, &newlabel, sizeof (struct olddisklabel));
678 break;
679 #endif
680
681 case DIOCGSTRATEGY:
682 {
683 struct disk_strategy *dks = (void *)data;
684
685 mutex_enter(&dksc->sc_iolock);
686 if (dksc->sc_bufq != NULL)
687 strlcpy(dks->dks_name,
688 bufq_getstrategyname(dksc->sc_bufq),
689 sizeof(dks->dks_name));
690 else
691 error = EINVAL;
692 mutex_exit(&dksc->sc_iolock);
693 dks->dks_paramlen = 0;
694 break;
695 }
696
697 case DIOCSSTRATEGY:
698 {
699 struct disk_strategy *dks = (void *)data;
700 struct bufq_state *new;
701 struct bufq_state *old;
702
703 if (dks->dks_param != NULL) {
704 return EINVAL;
705 }
706 dks->dks_name[sizeof(dks->dks_name) - 1] = 0; /* ensure term */
707 error = bufq_alloc(&new, dks->dks_name,
708 BUFQ_EXACT|BUFQ_SORT_RAWBLOCK);
709 if (error) {
710 return error;
711 }
712 mutex_enter(&dksc->sc_iolock);
713 old = dksc->sc_bufq;
714 if (old)
715 bufq_move(new, old);
716 dksc->sc_bufq = new;
717 mutex_exit(&dksc->sc_iolock);
718 if (old)
719 bufq_free(old);
720 break;
721 }
722
723 default:
724 error = ENOTTY;
725 }
726
727 return error;
728 }
729
730 /*
731 * dk_dump dumps all of physical memory into the partition specified.
732 * This requires substantially more framework than {s,w}ddump, and hence
733 * is probably much more fragile.
734 *
735 */
736
737 #define DKFF_READYFORDUMP(x) (((x) & DKF_READYFORDUMP) == DKF_READYFORDUMP)
738 static volatile int dk_dumping = 0;
739
740 /* ARGSUSED */
741 int
742 dk_dump(struct dk_softc *dksc, dev_t dev,
743 daddr_t blkno, void *vav, size_t size)
744 {
745 const struct dkdriver *dkd = dksc->sc_dkdev.dk_driver;
746 char *va = vav;
747 struct disklabel *lp;
748 struct partition *p;
749 int part, towrt, nsects, sectoff, maxblkcnt, nblk;
750 int maxxfer, rv = 0;
751
752 /*
753 * ensure that we consider this device to be safe for dumping,
754 * and that the device is configured.
755 */
756 if (!DKFF_READYFORDUMP(dksc->sc_flags)) {
757 DPRINTF(DKDB_DUMP, ("%s: bad dump flags 0x%x\n", __func__,
758 dksc->sc_flags));
759 return ENXIO;
760 }
761
762 /* ensure that we are not already dumping */
763 if (dk_dumping)
764 return EFAULT;
765 dk_dumping = 1;
766
767 if (dkd->d_dumpblocks == NULL) {
768 DPRINTF(DKDB_DUMP, ("%s: no dumpblocks\n", __func__));
769 return ENXIO;
770 }
771
772 /* device specific max transfer size */
773 maxxfer = MAXPHYS;
774 if (dkd->d_iosize != NULL)
775 (*dkd->d_iosize)(dksc->sc_dev, &maxxfer);
776
777 /* Convert to disk sectors. Request must be a multiple of size. */
778 part = DISKPART(dev);
779 lp = dksc->sc_dkdev.dk_label;
780 if ((size % lp->d_secsize) != 0) {
781 DPRINTF(DKDB_DUMP, ("%s: odd size %zu\n", __func__, size));
782 return EFAULT;
783 }
784 towrt = size / lp->d_secsize;
785 blkno = dbtob(blkno) / lp->d_secsize; /* blkno in secsize units */
786
787 p = &lp->d_partitions[part];
788 if (p->p_fstype != FS_SWAP) {
789 DPRINTF(DKDB_DUMP, ("%s: bad fstype %d\n", __func__,
790 p->p_fstype));
791 return ENXIO;
792 }
793 nsects = p->p_size;
794 sectoff = p->p_offset;
795
796 /* Check transfer bounds against partition size. */
797 if ((blkno < 0) || ((blkno + towrt) > nsects)) {
798 DPRINTF(DKDB_DUMP, ("%s: out of bounds blkno=%jd, towrt=%d, "
799 "nsects=%d\n", __func__, (intmax_t)blkno, towrt, nsects));
800 return EINVAL;
801 }
802
803 /* Offset block number to start of partition. */
804 blkno += sectoff;
805
806 /* Start dumping and return when done. */
807 maxblkcnt = howmany(maxxfer, lp->d_secsize);
808 while (towrt > 0) {
809 nblk = min(maxblkcnt, towrt);
810
811 if ((rv = (*dkd->d_dumpblocks)(dksc->sc_dev, va, blkno, nblk))
812 != 0) {
813 DPRINTF(DKDB_DUMP, ("%s: dumpblocks %d\n", __func__,
814 rv));
815 return rv;
816 }
817
818 towrt -= nblk;
819 blkno += nblk;
820 va += nblk * lp->d_secsize;
821 }
822
823 dk_dumping = 0;
824
825 return 0;
826 }
827
828 /* ARGSUSED */
829 void
830 dk_getdefaultlabel(struct dk_softc *dksc, struct disklabel *lp)
831 {
832 const struct dkdriver *dkd = dksc->sc_dkdev.dk_driver;
833 struct disk_geom *dg = &dksc->sc_dkdev.dk_geom;
834
835 memset(lp, 0, sizeof(*lp));
836
837 if (dg->dg_secperunit > UINT32_MAX)
838 lp->d_secperunit = UINT32_MAX;
839 else
840 lp->d_secperunit = dg->dg_secperunit;
841 lp->d_secsize = dg->dg_secsize;
842 lp->d_nsectors = dg->dg_nsectors;
843 lp->d_ntracks = dg->dg_ntracks;
844 lp->d_ncylinders = dg->dg_ncylinders;
845 lp->d_secpercyl = lp->d_ntracks * lp->d_nsectors;
846
847 strlcpy(lp->d_typename, dksc->sc_xname, sizeof(lp->d_typename));
848 lp->d_type = dksc->sc_dtype;
849 strlcpy(lp->d_packname, "fictitious", sizeof(lp->d_packname));
850 lp->d_rpm = 3600;
851 lp->d_interleave = 1;
852 lp->d_flags = 0;
853
854 lp->d_partitions[RAW_PART].p_offset = 0;
855 lp->d_partitions[RAW_PART].p_size = lp->d_secperunit;
856 lp->d_partitions[RAW_PART].p_fstype = FS_UNUSED;
857 lp->d_npartitions = RAW_PART + 1;
858
859 lp->d_magic = DISKMAGIC;
860 lp->d_magic2 = DISKMAGIC;
861
862 if (dkd->d_label)
863 dkd->d_label(dksc->sc_dev, lp);
864
865 lp->d_checksum = dkcksum(lp);
866 }
867
868 /* ARGSUSED */
869 void
870 dk_getdisklabel(struct dk_softc *dksc, dev_t dev)
871 {
872 const struct dkdriver *dkd = dksc->sc_dkdev.dk_driver;
873 struct disklabel *lp = dksc->sc_dkdev.dk_label;
874 struct cpu_disklabel *clp = dksc->sc_dkdev.dk_cpulabel;
875 struct disk_geom *dg = &dksc->sc_dkdev.dk_geom;
876 struct partition *pp;
877 int i;
878 const char *errstring;
879
880 memset(clp, 0x0, sizeof(*clp));
881 dk_getdefaultlabel(dksc, lp);
882 errstring = readdisklabel(DKLABELDEV(dev), dkd->d_strategy,
883 dksc->sc_dkdev.dk_label, dksc->sc_dkdev.dk_cpulabel);
884 if (errstring) {
885 dk_makedisklabel(dksc);
886 if (dksc->sc_flags & DKF_WARNLABEL)
887 printf("%s: %s\n", dksc->sc_xname, errstring);
888 return;
889 }
890
891 if ((dksc->sc_flags & DKF_LABELSANITY) == 0)
892 return;
893
894 /* Sanity check */
895 if (lp->d_secperunit > dg->dg_secperunit)
896 printf("WARNING: %s: total sector size in disklabel (%ju) "
897 "!= the size of %s (%ju)\n", dksc->sc_xname,
898 (uintmax_t)lp->d_secperunit, dksc->sc_xname,
899 (uintmax_t)dg->dg_secperunit);
900 else if (lp->d_secperunit < UINT32_MAX &&
901 lp->d_secperunit < dg->dg_secperunit)
902 printf("%s: %ju trailing sectors not covered by disklabel\n",
903 dksc->sc_xname,
904 (uintmax_t)dg->dg_secperunit - lp->d_secperunit);
905
906 for (i=0; i < lp->d_npartitions; i++) {
907 pp = &lp->d_partitions[i];
908 if (pp->p_offset + pp->p_size > dg->dg_secperunit)
909 printf("WARNING: %s: end of partition `%c' exceeds "
910 "the size of %s (%ju)\n", dksc->sc_xname,
911 'a' + i, dksc->sc_xname,
912 (uintmax_t)dg->dg_secperunit);
913 }
914 }
915
916 /* ARGSUSED */
917 static void
918 dk_makedisklabel(struct dk_softc *dksc)
919 {
920 struct disklabel *lp = dksc->sc_dkdev.dk_label;
921
922 lp->d_partitions[RAW_PART].p_fstype = FS_BSDFFS;
923 strlcpy(lp->d_packname, "default label", sizeof(lp->d_packname));
924 lp->d_checksum = dkcksum(lp);
925 }
926
927 /* This function is taken from ccd.c:1.76 --rcd */
928
929 /*
930 * XXX this function looks too generic for dksubr.c, shouldn't we
931 * put it somewhere better?
932 */
933
934 /*
935 * Lookup the provided name in the filesystem. If the file exists,
936 * is a valid block device, and isn't being used by anyone else,
937 * set *vpp to the file's vnode.
938 */
939 int
940 dk_lookup(struct pathbuf *pb, struct lwp *l, struct vnode **vpp)
941 {
942 struct nameidata nd;
943 struct vnode *vp;
944 int error;
945
946 if (l == NULL)
947 return ESRCH; /* Is ESRCH the best choice? */
948
949 NDINIT(&nd, LOOKUP, FOLLOW, pb);
950 if ((error = vn_open(&nd, FREAD | FWRITE, 0)) != 0) {
951 DPRINTF((DKDB_FOLLOW|DKDB_INIT),
952 ("%s: vn_open error = %d\n", __func__, error));
953 return error;
954 }
955
956 vp = nd.ni_vp;
957 if (vp->v_type != VBLK) {
958 error = ENOTBLK;
959 goto out;
960 }
961
962 /* Reopen as anonymous vnode to protect against forced unmount. */
963 if ((error = bdevvp(vp->v_rdev, vpp)) != 0)
964 goto out;
965 VOP_UNLOCK(vp);
966 if ((error = vn_close(vp, FREAD | FWRITE, l->l_cred)) != 0) {
967 vrele(*vpp);
968 return error;
969 }
970 if ((error = VOP_OPEN(*vpp, FREAD | FWRITE, l->l_cred)) != 0) {
971 vrele(*vpp);
972 return error;
973 }
974 mutex_enter((*vpp)->v_interlock);
975 (*vpp)->v_writecount++;
976 mutex_exit((*vpp)->v_interlock);
977
978 IFDEBUG(DKDB_VNODE, vprint("dk_lookup: vnode info", *vpp));
979
980 return 0;
981 out:
982 VOP_UNLOCK(vp);
983 (void) vn_close(vp, FREAD | FWRITE, l->l_cred);
984 return error;
985 }
986
987 MODULE(MODULE_CLASS_MISC, dk_subr, NULL);
988
989 static int
990 dk_subr_modcmd(modcmd_t cmd, void *arg)
991 {
992 switch (cmd) {
993 case MODULE_CMD_INIT:
994 case MODULE_CMD_FINI:
995 return 0;
996 case MODULE_CMD_STAT:
997 case MODULE_CMD_AUTOUNLOAD:
998 default:
999 return ENOTTY;
1000 }
1001 }
1002