dk.c revision 1.64.2.3 1 /* $NetBSD: dk.c,v 1.64.2.3 2013/06/23 06:20:16 tls Exp $ */
2
3 /*-
4 * Copyright (c) 2004, 2005, 2006, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: dk.c,v 1.64.2.3 2013/06/23 06:20:16 tls Exp $");
34
35 #ifdef _KERNEL_OPT
36 #include "opt_dkwedge.h"
37 #endif
38
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/proc.h>
42 #include <sys/errno.h>
43 #include <sys/pool.h>
44 #include <sys/ioctl.h>
45 #include <sys/disklabel.h>
46 #include <sys/disk.h>
47 #include <sys/fcntl.h>
48 #include <sys/buf.h>
49 #include <sys/bufq.h>
50 #include <sys/vnode.h>
51 #include <sys/stat.h>
52 #include <sys/conf.h>
53 #include <sys/callout.h>
54 #include <sys/kernel.h>
55 #include <sys/malloc.h>
56 #include <sys/device.h>
57 #include <sys/kauth.h>
58
59 #include <miscfs/specfs/specdev.h>
60
61 MALLOC_DEFINE(M_DKWEDGE, "dkwedge", "Disk wedge structures");
62
63 typedef enum {
64 DKW_STATE_LARVAL = 0,
65 DKW_STATE_RUNNING = 1,
66 DKW_STATE_DYING = 2,
67 DKW_STATE_DEAD = 666
68 } dkwedge_state_t;
69
70 struct dkwedge_softc {
71 device_t sc_dev; /* pointer to our pseudo-device */
72 struct cfdata sc_cfdata; /* our cfdata structure */
73 uint8_t sc_wname[128]; /* wedge name (Unicode, UTF-8) */
74
75 dkwedge_state_t sc_state; /* state this wedge is in */
76
77 struct disk *sc_parent; /* parent disk */
78 daddr_t sc_offset; /* LBA offset of wedge in parent */
79 uint64_t sc_size; /* size of wedge in blocks */
80 char sc_ptype[32]; /* partition type */
81 dev_t sc_pdev; /* cached parent's dev_t */
82 /* link on parent's wedge list */
83 LIST_ENTRY(dkwedge_softc) sc_plink;
84
85 struct disk sc_dk; /* our own disk structure */
86 struct bufq_state *sc_bufq; /* buffer queue */
87 struct callout sc_restart_ch; /* callout to restart I/O */
88
89 u_int sc_iopend; /* I/Os pending */
90 int sc_flags; /* flags (splbio) */
91 };
92
93 #define DK_F_WAIT_DRAIN 0x0001 /* waiting for I/O to drain */
94
95 static void dkstart(struct dkwedge_softc *);
96 static void dkiodone(struct buf *);
97 static void dkrestart(void *);
98 static void dkminphys(struct buf *);
99
100 static int dklastclose(struct dkwedge_softc *);
101 static int dkwedge_detach(device_t, int);
102
103 static dev_type_open(dkopen);
104 static dev_type_close(dkclose);
105 static dev_type_read(dkread);
106 static dev_type_write(dkwrite);
107 static dev_type_ioctl(dkioctl);
108 static dev_type_strategy(dkstrategy);
109 static dev_type_dump(dkdump);
110 static dev_type_size(dksize);
111
112 const struct bdevsw dk_bdevsw = {
113 dkopen, dkclose, dkstrategy, dkioctl, dkdump, dksize, D_DISK
114 };
115
116 const struct cdevsw dk_cdevsw = {
117 dkopen, dkclose, dkread, dkwrite, dkioctl,
118 nostop, notty, nopoll, nommap, nokqfilter, D_DISK
119 };
120
121 const struct dkdriver dk_dkdriver = { dkstrategy, dkminphys };
122
123 static struct dkwedge_softc **dkwedges;
124 static u_int ndkwedges;
125 static krwlock_t dkwedges_lock;
126
127 static LIST_HEAD(, dkwedge_discovery_method) dkwedge_discovery_methods;
128 static krwlock_t dkwedge_discovery_methods_lock;
129
130 /*
131 * dkwedge_match:
132 *
133 * Autoconfiguration match function for pseudo-device glue.
134 */
135 static int
136 dkwedge_match(device_t parent, cfdata_t match,
137 void *aux)
138 {
139
140 /* Pseudo-device; always present. */
141 return (1);
142 }
143
144 /*
145 * dkwedge_attach:
146 *
147 * Autoconfiguration attach function for pseudo-device glue.
148 */
149 static void
150 dkwedge_attach(device_t parent, device_t self,
151 void *aux)
152 {
153
154 if (!pmf_device_register(self, NULL, NULL))
155 aprint_error_dev(self, "couldn't establish power handler\n");
156 }
157
158 CFDRIVER_DECL(dk, DV_DISK, NULL);
159 CFATTACH_DECL3_NEW(dk, 0,
160 dkwedge_match, dkwedge_attach, dkwedge_detach, NULL, NULL, NULL,
161 DVF_DETACH_SHUTDOWN);
162
163 /*
164 * dkwedge_wait_drain:
165 *
166 * Wait for I/O on the wedge to drain.
167 * NOTE: Must be called at splbio()!
168 */
169 static void
170 dkwedge_wait_drain(struct dkwedge_softc *sc)
171 {
172
173 while (sc->sc_iopend != 0) {
174 sc->sc_flags |= DK_F_WAIT_DRAIN;
175 (void) tsleep(&sc->sc_iopend, PRIBIO, "dkdrn", 0);
176 }
177 }
178
179 /*
180 * dkwedge_compute_pdev:
181 *
182 * Compute the parent disk's dev_t.
183 */
184 static int
185 dkwedge_compute_pdev(const char *pname, dev_t *pdevp)
186 {
187 const char *name, *cp;
188 devmajor_t pmaj;
189 int punit;
190 char devname[16];
191
192 name = pname;
193 if ((pmaj = devsw_name2blk(name, devname, sizeof(devname))) == -1)
194 return (ENODEV);
195
196 name += strlen(devname);
197 for (cp = name, punit = 0; *cp >= '0' && *cp <= '9'; cp++)
198 punit = (punit * 10) + (*cp - '0');
199 if (cp == name) {
200 /* Invalid parent disk name. */
201 return (ENODEV);
202 }
203
204 *pdevp = MAKEDISKDEV(pmaj, punit, RAW_PART);
205
206 return (0);
207 }
208
209 /*
210 * dkwedge_array_expand:
211 *
212 * Expand the dkwedges array.
213 */
214 static void
215 dkwedge_array_expand(void)
216 {
217 int newcnt = ndkwedges + 16;
218 struct dkwedge_softc **newarray, **oldarray;
219
220 newarray = malloc(newcnt * sizeof(*newarray), M_DKWEDGE,
221 M_WAITOK|M_ZERO);
222 if ((oldarray = dkwedges) != NULL)
223 memcpy(newarray, dkwedges, ndkwedges * sizeof(*newarray));
224 dkwedges = newarray;
225 ndkwedges = newcnt;
226 if (oldarray != NULL)
227 free(oldarray, M_DKWEDGE);
228 }
229
230 static void
231 dkgetproperties(struct disk *disk, struct dkwedge_info *dkw)
232 {
233 struct disk_geom *dg = &disk->dk_geom;
234
235 memset(dg, 0, sizeof(*dg));
236
237 dg->dg_secperunit = dkw->dkw_size >> disk->dk_blkshift;
238 dg->dg_secsize = DEV_BSIZE << disk->dk_blkshift;
239 dg->dg_nsectors = 32;
240 dg->dg_ntracks = 64;
241 /* XXX: why is that dkw->dkw_size instead of secperunit?!?! */
242 dg->dg_ncylinders = dkw->dkw_size / (dg->dg_nsectors * dg->dg_ntracks);
243
244 disk_set_info(NULL, disk, "ESDI");
245 }
246
247 /*
248 * dkwedge_add: [exported function]
249 *
250 * Add a disk wedge based on the provided information.
251 *
252 * The incoming dkw_devname[] is ignored, instead being
253 * filled in and returned to the caller.
254 */
255 int
256 dkwedge_add(struct dkwedge_info *dkw)
257 {
258 struct dkwedge_softc *sc, *lsc;
259 struct disk *pdk;
260 u_int unit;
261 int error;
262 dev_t pdev;
263
264 dkw->dkw_parent[sizeof(dkw->dkw_parent) - 1] = '\0';
265 pdk = disk_find(dkw->dkw_parent);
266 if (pdk == NULL)
267 return (ENODEV);
268
269 error = dkwedge_compute_pdev(pdk->dk_name, &pdev);
270 if (error)
271 return (error);
272
273 if (dkw->dkw_offset < 0)
274 return (EINVAL);
275
276 sc = malloc(sizeof(*sc), M_DKWEDGE, M_WAITOK|M_ZERO);
277 sc->sc_state = DKW_STATE_LARVAL;
278 sc->sc_parent = pdk;
279 sc->sc_pdev = pdev;
280 sc->sc_offset = dkw->dkw_offset;
281 sc->sc_size = dkw->dkw_size;
282
283 memcpy(sc->sc_wname, dkw->dkw_wname, sizeof(sc->sc_wname));
284 sc->sc_wname[sizeof(sc->sc_wname) - 1] = '\0';
285
286 memcpy(sc->sc_ptype, dkw->dkw_ptype, sizeof(sc->sc_ptype));
287 sc->sc_ptype[sizeof(sc->sc_ptype) - 1] = '\0';
288
289 bufq_alloc(&sc->sc_bufq, "fcfs", 0);
290
291 callout_init(&sc->sc_restart_ch, 0);
292 callout_setfunc(&sc->sc_restart_ch, dkrestart, sc);
293
294 /*
295 * Wedge will be added; increment the wedge count for the parent.
296 * Only allow this to happend if RAW_PART is the only thing open.
297 */
298 mutex_enter(&pdk->dk_openlock);
299 if (pdk->dk_openmask & ~(1 << RAW_PART))
300 error = EBUSY;
301 else {
302 /* Check for wedge overlap. */
303 LIST_FOREACH(lsc, &pdk->dk_wedges, sc_plink) {
304 daddr_t lastblk = sc->sc_offset + sc->sc_size - 1;
305 daddr_t llastblk = lsc->sc_offset + lsc->sc_size - 1;
306
307 if (sc->sc_offset >= lsc->sc_offset &&
308 sc->sc_offset <= llastblk) {
309 /* Overlaps the tail of the existing wedge. */
310 break;
311 }
312 if (lastblk >= lsc->sc_offset &&
313 lastblk <= llastblk) {
314 /* Overlaps the head of the existing wedge. */
315 break;
316 }
317 }
318 if (lsc != NULL)
319 error = EINVAL;
320 else {
321 pdk->dk_nwedges++;
322 LIST_INSERT_HEAD(&pdk->dk_wedges, sc, sc_plink);
323 }
324 }
325 mutex_exit(&pdk->dk_openlock);
326 if (error) {
327 bufq_free(sc->sc_bufq);
328 free(sc, M_DKWEDGE);
329 return (error);
330 }
331
332 /* Fill in our cfdata for the pseudo-device glue. */
333 sc->sc_cfdata.cf_name = dk_cd.cd_name;
334 sc->sc_cfdata.cf_atname = dk_ca.ca_name;
335 /* sc->sc_cfdata.cf_unit set below */
336 sc->sc_cfdata.cf_fstate = FSTATE_STAR;
337
338 /* Insert the larval wedge into the array. */
339 rw_enter(&dkwedges_lock, RW_WRITER);
340 for (error = 0;;) {
341 struct dkwedge_softc **scpp;
342
343 /*
344 * Check for a duplicate wname while searching for
345 * a slot.
346 */
347 for (scpp = NULL, unit = 0; unit < ndkwedges; unit++) {
348 if (dkwedges[unit] == NULL) {
349 if (scpp == NULL) {
350 scpp = &dkwedges[unit];
351 sc->sc_cfdata.cf_unit = unit;
352 }
353 } else {
354 /* XXX Unicode. */
355 if (strcmp(dkwedges[unit]->sc_wname,
356 sc->sc_wname) == 0) {
357 error = EEXIST;
358 break;
359 }
360 }
361 }
362 if (error)
363 break;
364 KASSERT(unit == ndkwedges);
365 if (scpp == NULL)
366 dkwedge_array_expand();
367 else {
368 KASSERT(scpp == &dkwedges[sc->sc_cfdata.cf_unit]);
369 *scpp = sc;
370 break;
371 }
372 }
373 rw_exit(&dkwedges_lock);
374 if (error) {
375 mutex_enter(&pdk->dk_openlock);
376 pdk->dk_nwedges--;
377 LIST_REMOVE(sc, sc_plink);
378 mutex_exit(&pdk->dk_openlock);
379
380 bufq_free(sc->sc_bufq);
381 free(sc, M_DKWEDGE);
382 return (error);
383 }
384
385 /*
386 * Now that we know the unit #, attach a pseudo-device for
387 * this wedge instance. This will provide us with the
388 * device_t necessary for glue to other parts of the system.
389 *
390 * This should never fail, unless we're almost totally out of
391 * memory.
392 */
393 if ((sc->sc_dev = config_attach_pseudo(&sc->sc_cfdata)) == NULL) {
394 aprint_error("%s%u: unable to attach pseudo-device\n",
395 sc->sc_cfdata.cf_name, sc->sc_cfdata.cf_unit);
396
397 rw_enter(&dkwedges_lock, RW_WRITER);
398 dkwedges[sc->sc_cfdata.cf_unit] = NULL;
399 rw_exit(&dkwedges_lock);
400
401 mutex_enter(&pdk->dk_openlock);
402 pdk->dk_nwedges--;
403 LIST_REMOVE(sc, sc_plink);
404 mutex_exit(&pdk->dk_openlock);
405
406 bufq_free(sc->sc_bufq);
407 free(sc, M_DKWEDGE);
408 return (ENOMEM);
409 }
410
411 /* Return the devname to the caller. */
412 strlcpy(dkw->dkw_devname, device_xname(sc->sc_dev),
413 sizeof(dkw->dkw_devname));
414
415 /*
416 * XXX Really ought to make the disk_attach() and the changing
417 * of state to RUNNING atomic.
418 */
419
420 disk_init(&sc->sc_dk, device_xname(sc->sc_dev), &dk_dkdriver);
421 disk_blocksize(&sc->sc_dk, DEV_BSIZE << pdk->dk_blkshift);
422 dkgetproperties(&sc->sc_dk, dkw);
423 disk_attach(&sc->sc_dk);
424
425 /* Disk wedge is ready for use! */
426 sc->sc_state = DKW_STATE_RUNNING;
427
428 /* Announce our arrival. */
429 aprint_normal("%s at %s: %s\n", device_xname(sc->sc_dev), pdk->dk_name,
430 sc->sc_wname); /* XXX Unicode */
431 aprint_normal("%s: %"PRIu64" blocks at %"PRId64", type: %s\n",
432 device_xname(sc->sc_dev), sc->sc_size, sc->sc_offset, sc->sc_ptype);
433
434 return (0);
435 }
436
437 /*
438 * dkwedge_find:
439 *
440 * Lookup a disk wedge based on the provided information.
441 * NOTE: We look up the wedge based on the wedge devname,
442 * not wname.
443 *
444 * Return NULL if the wedge is not found, otherwise return
445 * the wedge's softc. Assign the wedge's unit number to unitp
446 * if unitp is not NULL.
447 */
448 static struct dkwedge_softc *
449 dkwedge_find(struct dkwedge_info *dkw, u_int *unitp)
450 {
451 struct dkwedge_softc *sc = NULL;
452 u_int unit;
453
454 /* Find our softc. */
455 dkw->dkw_devname[sizeof(dkw->dkw_devname) - 1] = '\0';
456 rw_enter(&dkwedges_lock, RW_READER);
457 for (unit = 0; unit < ndkwedges; unit++) {
458 if ((sc = dkwedges[unit]) != NULL &&
459 strcmp(device_xname(sc->sc_dev), dkw->dkw_devname) == 0 &&
460 strcmp(sc->sc_parent->dk_name, dkw->dkw_parent) == 0) {
461 break;
462 }
463 }
464 rw_exit(&dkwedges_lock);
465 if (unit == ndkwedges)
466 return NULL;
467
468 if (unitp != NULL)
469 *unitp = unit;
470
471 return sc;
472 }
473
474 /*
475 * dkwedge_del: [exported function]
476 *
477 * Delete a disk wedge based on the provided information.
478 * NOTE: We look up the wedge based on the wedge devname,
479 * not wname.
480 */
481 int
482 dkwedge_del(struct dkwedge_info *dkw)
483 {
484 struct dkwedge_softc *sc = NULL;
485
486 /* Find our softc. */
487 if ((sc = dkwedge_find(dkw, NULL)) == NULL)
488 return (ESRCH);
489
490 return config_detach(sc->sc_dev, DETACH_FORCE | DETACH_QUIET);
491 }
492
493 static int
494 dkwedge_begindetach(struct dkwedge_softc *sc, int flags)
495 {
496 struct disk *dk = &sc->sc_dk;
497 int rc;
498
499 rc = 0;
500 mutex_enter(&dk->dk_openlock);
501 if (dk->dk_openmask == 0)
502 ; /* nothing to do */
503 else if ((flags & DETACH_FORCE) == 0)
504 rc = EBUSY;
505 else {
506 mutex_enter(&sc->sc_parent->dk_rawlock);
507 rc = dklastclose(sc); /* releases dk_rawlock */
508 }
509 mutex_exit(&dk->dk_openlock);
510
511 return rc;
512 }
513
514 /*
515 * dkwedge_detach:
516 *
517 * Autoconfiguration detach function for pseudo-device glue.
518 */
519 static int
520 dkwedge_detach(device_t self, int flags)
521 {
522 struct dkwedge_softc *sc = NULL;
523 u_int unit;
524 int bmaj, cmaj, rc, s;
525
526 rw_enter(&dkwedges_lock, RW_WRITER);
527 for (unit = 0; unit < ndkwedges; unit++) {
528 if ((sc = dkwedges[unit]) != NULL && sc->sc_dev == self)
529 break;
530 }
531 if (unit == ndkwedges)
532 rc = ENXIO;
533 else if ((rc = dkwedge_begindetach(sc, flags)) == 0) {
534 /* Mark the wedge as dying. */
535 sc->sc_state = DKW_STATE_DYING;
536 }
537 rw_exit(&dkwedges_lock);
538
539 if (rc != 0)
540 return rc;
541
542 pmf_device_deregister(self);
543
544 /* Locate the wedge major numbers. */
545 bmaj = bdevsw_lookup_major(&dk_bdevsw);
546 cmaj = cdevsw_lookup_major(&dk_cdevsw);
547
548 /* Kill any pending restart. */
549 callout_stop(&sc->sc_restart_ch);
550
551 /*
552 * dkstart() will kill any queued buffers now that the
553 * state of the wedge is not RUNNING. Once we've done
554 * that, wait for any other pending I/O to complete.
555 */
556 s = splbio();
557 dkstart(sc);
558 dkwedge_wait_drain(sc);
559 splx(s);
560
561 /* Nuke the vnodes for any open instances. */
562 vdevgone(bmaj, unit, unit, VBLK);
563 vdevgone(cmaj, unit, unit, VCHR);
564
565 /* Clean up the parent. */
566 mutex_enter(&sc->sc_dk.dk_openlock);
567 if (sc->sc_dk.dk_openmask) {
568 mutex_enter(&sc->sc_parent->dk_rawlock);
569 if (sc->sc_parent->dk_rawopens-- == 1) {
570 KASSERT(sc->sc_parent->dk_rawvp != NULL);
571 mutex_exit(&sc->sc_parent->dk_rawlock);
572 (void) vn_close(sc->sc_parent->dk_rawvp, FREAD | FWRITE,
573 NOCRED);
574 sc->sc_parent->dk_rawvp = NULL;
575 } else
576 mutex_exit(&sc->sc_parent->dk_rawlock);
577 sc->sc_dk.dk_openmask = 0;
578 }
579 mutex_exit(&sc->sc_dk.dk_openlock);
580
581 /* Announce our departure. */
582 aprint_normal("%s at %s (%s) deleted\n", device_xname(sc->sc_dev),
583 sc->sc_parent->dk_name,
584 sc->sc_wname); /* XXX Unicode */
585
586 mutex_enter(&sc->sc_parent->dk_openlock);
587 sc->sc_parent->dk_nwedges--;
588 LIST_REMOVE(sc, sc_plink);
589 mutex_exit(&sc->sc_parent->dk_openlock);
590
591 /* Delete our buffer queue. */
592 bufq_free(sc->sc_bufq);
593
594 /* Detach from the disk list. */
595 disk_detach(&sc->sc_dk);
596 disk_destroy(&sc->sc_dk);
597
598 /* Poof. */
599 rw_enter(&dkwedges_lock, RW_WRITER);
600 dkwedges[unit] = NULL;
601 sc->sc_state = DKW_STATE_DEAD;
602 rw_exit(&dkwedges_lock);
603
604 free(sc, M_DKWEDGE);
605
606 return 0;
607 }
608
609 /*
610 * dkwedge_delall: [exported function]
611 *
612 * Delete all of the wedges on the specified disk. Used when
613 * a disk is being detached.
614 */
615 void
616 dkwedge_delall(struct disk *pdk)
617 {
618 struct dkwedge_info dkw;
619 struct dkwedge_softc *sc;
620
621 for (;;) {
622 mutex_enter(&pdk->dk_openlock);
623 if ((sc = LIST_FIRST(&pdk->dk_wedges)) == NULL) {
624 KASSERT(pdk->dk_nwedges == 0);
625 mutex_exit(&pdk->dk_openlock);
626 return;
627 }
628 strcpy(dkw.dkw_parent, pdk->dk_name);
629 strlcpy(dkw.dkw_devname, device_xname(sc->sc_dev),
630 sizeof(dkw.dkw_devname));
631 mutex_exit(&pdk->dk_openlock);
632 (void) dkwedge_del(&dkw);
633 }
634 }
635
636 /*
637 * dkwedge_list: [exported function]
638 *
639 * List all of the wedges on a particular disk.
640 * If p == NULL, the buffer is in kernel space. Otherwise, it is
641 * in user space of the specified process.
642 */
643 int
644 dkwedge_list(struct disk *pdk, struct dkwedge_list *dkwl, struct lwp *l)
645 {
646 struct uio uio;
647 struct iovec iov;
648 struct dkwedge_softc *sc;
649 struct dkwedge_info dkw;
650 int error = 0;
651
652 iov.iov_base = dkwl->dkwl_buf;
653 iov.iov_len = dkwl->dkwl_bufsize;
654
655 uio.uio_iov = &iov;
656 uio.uio_iovcnt = 1;
657 uio.uio_offset = 0;
658 uio.uio_resid = dkwl->dkwl_bufsize;
659 uio.uio_rw = UIO_READ;
660 KASSERT(l == curlwp);
661 uio.uio_vmspace = l->l_proc->p_vmspace;
662
663 dkwl->dkwl_ncopied = 0;
664
665 mutex_enter(&pdk->dk_openlock);
666 LIST_FOREACH(sc, &pdk->dk_wedges, sc_plink) {
667 if (uio.uio_resid < sizeof(dkw))
668 break;
669
670 if (sc->sc_state != DKW_STATE_RUNNING)
671 continue;
672
673 strlcpy(dkw.dkw_devname, device_xname(sc->sc_dev),
674 sizeof(dkw.dkw_devname));
675 memcpy(dkw.dkw_wname, sc->sc_wname, sizeof(dkw.dkw_wname));
676 dkw.dkw_wname[sizeof(dkw.dkw_wname) - 1] = '\0';
677 strcpy(dkw.dkw_parent, sc->sc_parent->dk_name);
678 dkw.dkw_offset = sc->sc_offset;
679 dkw.dkw_size = sc->sc_size;
680 strcpy(dkw.dkw_ptype, sc->sc_ptype);
681
682 error = uiomove(&dkw, sizeof(dkw), &uio);
683 if (error)
684 break;
685 dkwl->dkwl_ncopied++;
686 }
687 dkwl->dkwl_nwedges = pdk->dk_nwedges;
688 mutex_exit(&pdk->dk_openlock);
689
690 return (error);
691 }
692
693 device_t
694 dkwedge_find_by_wname(const char *wname)
695 {
696 device_t dv = NULL;
697 struct dkwedge_softc *sc;
698 int i;
699
700 rw_enter(&dkwedges_lock, RW_WRITER);
701 for (i = 0; i < ndkwedges; i++) {
702 if ((sc = dkwedges[i]) == NULL)
703 continue;
704 if (strcmp(sc->sc_wname, wname) == 0) {
705 if (dv != NULL) {
706 printf(
707 "WARNING: double match for wedge name %s "
708 "(%s, %s)\n", wname, device_xname(dv),
709 device_xname(sc->sc_dev));
710 continue;
711 }
712 dv = sc->sc_dev;
713 }
714 }
715 rw_exit(&dkwedges_lock);
716 return dv;
717 }
718
719 void
720 dkwedge_print_wnames(void)
721 {
722 struct dkwedge_softc *sc;
723 int i;
724
725 rw_enter(&dkwedges_lock, RW_WRITER);
726 for (i = 0; i < ndkwedges; i++) {
727 if ((sc = dkwedges[i]) == NULL)
728 continue;
729 printf(" wedge:%s", sc->sc_wname);
730 }
731 rw_exit(&dkwedges_lock);
732 }
733
734 /*
735 * We need a dummy object to stuff into the dkwedge discovery method link
736 * set to ensure that there is always at least one object in the set.
737 */
738 static struct dkwedge_discovery_method dummy_discovery_method;
739 __link_set_add_bss(dkwedge_methods, dummy_discovery_method);
740
741 /*
742 * dkwedge_init:
743 *
744 * Initialize the disk wedge subsystem.
745 */
746 void
747 dkwedge_init(void)
748 {
749 __link_set_decl(dkwedge_methods, struct dkwedge_discovery_method);
750 struct dkwedge_discovery_method * const *ddmp;
751 struct dkwedge_discovery_method *lddm, *ddm;
752
753 rw_init(&dkwedges_lock);
754 rw_init(&dkwedge_discovery_methods_lock);
755
756 if (config_cfdriver_attach(&dk_cd) != 0)
757 panic("dkwedge: unable to attach cfdriver");
758 if (config_cfattach_attach(dk_cd.cd_name, &dk_ca) != 0)
759 panic("dkwedge: unable to attach cfattach");
760
761 rw_enter(&dkwedge_discovery_methods_lock, RW_WRITER);
762
763 LIST_INIT(&dkwedge_discovery_methods);
764
765 __link_set_foreach(ddmp, dkwedge_methods) {
766 ddm = *ddmp;
767 if (ddm == &dummy_discovery_method)
768 continue;
769 if (LIST_EMPTY(&dkwedge_discovery_methods)) {
770 LIST_INSERT_HEAD(&dkwedge_discovery_methods,
771 ddm, ddm_list);
772 continue;
773 }
774 LIST_FOREACH(lddm, &dkwedge_discovery_methods, ddm_list) {
775 if (ddm->ddm_priority == lddm->ddm_priority) {
776 aprint_error("dk-method-%s: method \"%s\" "
777 "already exists at priority %d\n",
778 ddm->ddm_name, lddm->ddm_name,
779 lddm->ddm_priority);
780 /* Not inserted. */
781 break;
782 }
783 if (ddm->ddm_priority < lddm->ddm_priority) {
784 /* Higher priority; insert before. */
785 LIST_INSERT_BEFORE(lddm, ddm, ddm_list);
786 break;
787 }
788 if (LIST_NEXT(lddm, ddm_list) == NULL) {
789 /* Last one; insert after. */
790 KASSERT(lddm->ddm_priority < ddm->ddm_priority);
791 LIST_INSERT_AFTER(lddm, ddm, ddm_list);
792 break;
793 }
794 }
795 }
796
797 rw_exit(&dkwedge_discovery_methods_lock);
798 }
799
800 #ifdef DKWEDGE_AUTODISCOVER
801 int dkwedge_autodiscover = 1;
802 #else
803 int dkwedge_autodiscover = 0;
804 #endif
805
806 /*
807 * dkwedge_discover: [exported function]
808 *
809 * Discover the wedges on a newly attached disk.
810 */
811 void
812 dkwedge_discover(struct disk *pdk)
813 {
814 struct dkwedge_discovery_method *ddm;
815 struct vnode *vp;
816 int error;
817 dev_t pdev;
818
819 /*
820 * Require people playing with wedges to enable this explicitly.
821 */
822 if (dkwedge_autodiscover == 0)
823 return;
824
825 rw_enter(&dkwedge_discovery_methods_lock, RW_READER);
826
827 error = dkwedge_compute_pdev(pdk->dk_name, &pdev);
828 if (error) {
829 aprint_error("%s: unable to compute pdev, error = %d\n",
830 pdk->dk_name, error);
831 goto out;
832 }
833
834 error = bdevvp(pdev, &vp);
835 if (error) {
836 aprint_error("%s: unable to find vnode for pdev, error = %d\n",
837 pdk->dk_name, error);
838 goto out;
839 }
840
841 error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
842 if (error) {
843 aprint_error("%s: unable to lock vnode for pdev, error = %d\n",
844 pdk->dk_name, error);
845 vrele(vp);
846 goto out;
847 }
848
849 error = VOP_OPEN(vp, FREAD | FSILENT, NOCRED);
850 if (error) {
851 aprint_error("%s: unable to open device, error = %d\n",
852 pdk->dk_name, error);
853 vput(vp);
854 goto out;
855 }
856 VOP_UNLOCK(vp);
857
858 /*
859 * For each supported partition map type, look to see if
860 * this map type exists. If so, parse it and add the
861 * corresponding wedges.
862 */
863 LIST_FOREACH(ddm, &dkwedge_discovery_methods, ddm_list) {
864 error = (*ddm->ddm_discover)(pdk, vp);
865 if (error == 0) {
866 /* Successfully created wedges; we're done. */
867 break;
868 }
869 }
870
871 error = vn_close(vp, FREAD, NOCRED);
872 if (error) {
873 aprint_error("%s: unable to close device, error = %d\n",
874 pdk->dk_name, error);
875 /* We'll just assume the vnode has been cleaned up. */
876 }
877 out:
878 rw_exit(&dkwedge_discovery_methods_lock);
879 }
880
881 /*
882 * dkwedge_read:
883 *
884 * Read some data from the specified disk, used for
885 * partition discovery.
886 */
887 int
888 dkwedge_read(struct disk *pdk, struct vnode *vp, daddr_t blkno,
889 void *tbuf, size_t len)
890 {
891 struct buf *bp;
892 int result;
893
894 bp = getiobuf(vp, true);
895
896 bp->b_dev = vp->v_rdev;
897 bp->b_blkno = blkno;
898 bp->b_bcount = len;
899 bp->b_resid = len;
900 bp->b_flags = B_READ;
901 bp->b_data = tbuf;
902 SET(bp->b_cflags, BC_BUSY); /* mark buffer busy */
903
904 VOP_STRATEGY(vp, bp);
905 result = biowait(bp);
906 putiobuf(bp);
907
908 return result;
909 }
910
911 /*
912 * dkwedge_lookup:
913 *
914 * Look up a dkwedge_softc based on the provided dev_t.
915 */
916 static struct dkwedge_softc *
917 dkwedge_lookup(dev_t dev)
918 {
919 int unit = minor(dev);
920
921 if (unit >= ndkwedges)
922 return (NULL);
923
924 KASSERT(dkwedges != NULL);
925
926 return (dkwedges[unit]);
927 }
928
929 /*
930 * dkopen: [devsw entry point]
931 *
932 * Open a wedge.
933 */
934 static int
935 dkopen(dev_t dev, int flags, int fmt, struct lwp *l)
936 {
937 struct dkwedge_softc *sc = dkwedge_lookup(dev);
938 struct vnode *vp;
939 int error = 0;
940
941 if (sc == NULL)
942 return (ENODEV);
943 if (sc->sc_state != DKW_STATE_RUNNING)
944 return (ENXIO);
945
946 /*
947 * We go through a complicated little dance to only open the parent
948 * vnode once per wedge, no matter how many times the wedge is
949 * opened. The reason? We see one dkopen() per open call, but
950 * only dkclose() on the last close.
951 */
952 mutex_enter(&sc->sc_dk.dk_openlock);
953 mutex_enter(&sc->sc_parent->dk_rawlock);
954 if (sc->sc_dk.dk_openmask == 0) {
955 if (sc->sc_parent->dk_rawopens == 0) {
956 KASSERT(sc->sc_parent->dk_rawvp == NULL);
957 error = bdevvp(sc->sc_pdev, &vp);
958 if (error)
959 goto popen_fail;
960 error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
961 if (error) {
962 vrele(vp);
963 goto popen_fail;
964 }
965 error = VOP_OPEN(vp, FREAD | FWRITE, NOCRED);
966 if (error) {
967 vput(vp);
968 goto popen_fail;
969 }
970 /* VOP_OPEN() doesn't do this for us. */
971 mutex_enter(vp->v_interlock);
972 vp->v_writecount++;
973 mutex_exit(vp->v_interlock);
974 VOP_UNLOCK(vp);
975 sc->sc_parent->dk_rawvp = vp;
976 }
977 sc->sc_parent->dk_rawopens++;
978 }
979 if (fmt == S_IFCHR)
980 sc->sc_dk.dk_copenmask |= 1;
981 else
982 sc->sc_dk.dk_bopenmask |= 1;
983 sc->sc_dk.dk_openmask =
984 sc->sc_dk.dk_copenmask | sc->sc_dk.dk_bopenmask;
985
986 popen_fail:
987 mutex_exit(&sc->sc_parent->dk_rawlock);
988 mutex_exit(&sc->sc_dk.dk_openlock);
989 return (error);
990 }
991
992 /*
993 * Caller must hold sc->sc_dk.dk_openlock and sc->sc_parent->dk_rawlock.
994 */
995 static int
996 dklastclose(struct dkwedge_softc *sc)
997 {
998 int error = 0;
999
1000 if (sc->sc_parent->dk_rawopens-- == 1) {
1001 KASSERT(sc->sc_parent->dk_rawvp != NULL);
1002 mutex_exit(&sc->sc_parent->dk_rawlock);
1003 error = vn_close(sc->sc_parent->dk_rawvp,
1004 FREAD | FWRITE, NOCRED);
1005 sc->sc_parent->dk_rawvp = NULL;
1006 } else
1007 mutex_exit(&sc->sc_parent->dk_rawlock);
1008 return error;
1009 }
1010
1011 /*
1012 * dkclose: [devsw entry point]
1013 *
1014 * Close a wedge.
1015 */
1016 static int
1017 dkclose(dev_t dev, int flags, int fmt, struct lwp *l)
1018 {
1019 struct dkwedge_softc *sc = dkwedge_lookup(dev);
1020 int error = 0;
1021
1022 if (sc == NULL)
1023 return (ENODEV);
1024 if (sc->sc_state != DKW_STATE_RUNNING)
1025 return (ENXIO);
1026
1027 KASSERT(sc->sc_dk.dk_openmask != 0);
1028
1029 mutex_enter(&sc->sc_dk.dk_openlock);
1030 mutex_enter(&sc->sc_parent->dk_rawlock);
1031
1032 if (fmt == S_IFCHR)
1033 sc->sc_dk.dk_copenmask &= ~1;
1034 else
1035 sc->sc_dk.dk_bopenmask &= ~1;
1036 sc->sc_dk.dk_openmask =
1037 sc->sc_dk.dk_copenmask | sc->sc_dk.dk_bopenmask;
1038
1039 if (sc->sc_dk.dk_openmask == 0)
1040 error = dklastclose(sc); /* releases dk_rawlock */
1041 else
1042 mutex_exit(&sc->sc_parent->dk_rawlock);
1043
1044 mutex_exit(&sc->sc_dk.dk_openlock);
1045
1046 return (error);
1047 }
1048
1049 /*
1050 * dkstragegy: [devsw entry point]
1051 *
1052 * Perform I/O based on the wedge I/O strategy.
1053 */
1054 static void
1055 dkstrategy(struct buf *bp)
1056 {
1057 struct dkwedge_softc *sc = dkwedge_lookup(bp->b_dev);
1058 uint64_t p_size, p_offset;
1059 int s;
1060
1061 if (sc == NULL) {
1062 bp->b_error = ENODEV;
1063 goto done;
1064 }
1065
1066 if (sc->sc_state != DKW_STATE_RUNNING ||
1067 sc->sc_parent->dk_rawvp == NULL) {
1068 bp->b_error = ENXIO;
1069 goto done;
1070 }
1071
1072 /* If it's an empty transfer, wake up the top half now. */
1073 if (bp->b_bcount == 0)
1074 goto done;
1075
1076 p_offset = sc->sc_offset << sc->sc_parent->dk_blkshift;
1077 p_size = sc->sc_size << sc->sc_parent->dk_blkshift;
1078
1079 /* Make sure it's in-range. */
1080 if (bounds_check_with_mediasize(bp, DEV_BSIZE, p_size) <= 0)
1081 goto done;
1082
1083 /* Translate it to the parent's raw LBA. */
1084 bp->b_rawblkno = bp->b_blkno + p_offset;
1085
1086 /* Place it in the queue and start I/O on the unit. */
1087 s = splbio();
1088 sc->sc_iopend++;
1089 bufq_put(sc->sc_bufq, bp);
1090 dkstart(sc);
1091 splx(s);
1092 return;
1093
1094 done:
1095 bp->b_resid = bp->b_bcount;
1096 biodone(bp);
1097 }
1098
1099 /*
1100 * dkstart:
1101 *
1102 * Start I/O that has been enqueued on the wedge.
1103 * NOTE: Must be called at splbio()!
1104 */
1105 static void
1106 dkstart(struct dkwedge_softc *sc)
1107 {
1108 struct vnode *vp;
1109 struct buf *bp, *nbp;
1110
1111 /* Do as much work as has been enqueued. */
1112 while ((bp = bufq_peek(sc->sc_bufq)) != NULL) {
1113 if (sc->sc_state != DKW_STATE_RUNNING) {
1114 (void) bufq_get(sc->sc_bufq);
1115 if (sc->sc_iopend-- == 1 &&
1116 (sc->sc_flags & DK_F_WAIT_DRAIN) != 0) {
1117 sc->sc_flags &= ~DK_F_WAIT_DRAIN;
1118 wakeup(&sc->sc_iopend);
1119 }
1120 bp->b_error = ENXIO;
1121 bp->b_resid = bp->b_bcount;
1122 biodone(bp);
1123 }
1124
1125 /* Instrumentation. */
1126 disk_busy(&sc->sc_dk);
1127
1128 nbp = getiobuf(sc->sc_parent->dk_rawvp, false);
1129 if (nbp == NULL) {
1130 /*
1131 * No resources to run this request; leave the
1132 * buffer queued up, and schedule a timer to
1133 * restart the queue in 1/2 a second.
1134 */
1135 disk_unbusy(&sc->sc_dk, 0, bp->b_flags & B_READ);
1136 callout_schedule(&sc->sc_restart_ch, hz / 2);
1137 return;
1138 }
1139
1140 (void) bufq_get(sc->sc_bufq);
1141
1142 nbp->b_data = bp->b_data;
1143 nbp->b_flags = bp->b_flags;
1144 nbp->b_oflags = bp->b_oflags;
1145 nbp->b_cflags = bp->b_cflags;
1146 nbp->b_iodone = dkiodone;
1147 nbp->b_proc = bp->b_proc;
1148 nbp->b_blkno = bp->b_rawblkno;
1149 nbp->b_dev = sc->sc_parent->dk_rawvp->v_rdev;
1150 nbp->b_bcount = bp->b_bcount;
1151 nbp->b_private = bp;
1152 BIO_COPYPRIO(nbp, bp);
1153
1154 vp = nbp->b_vp;
1155 if ((nbp->b_flags & B_READ) == 0) {
1156 mutex_enter(vp->v_interlock);
1157 vp->v_numoutput++;
1158 mutex_exit(vp->v_interlock);
1159 }
1160 VOP_STRATEGY(vp, nbp);
1161 }
1162 }
1163
1164 /*
1165 * dkiodone:
1166 *
1167 * I/O to a wedge has completed; alert the top half.
1168 */
1169 static void
1170 dkiodone(struct buf *bp)
1171 {
1172 struct buf *obp = bp->b_private;
1173 struct dkwedge_softc *sc = dkwedge_lookup(obp->b_dev);
1174
1175 int s = splbio();
1176
1177 if (bp->b_error != 0)
1178 obp->b_error = bp->b_error;
1179 obp->b_resid = bp->b_resid;
1180 putiobuf(bp);
1181
1182 if (sc->sc_iopend-- == 1 && (sc->sc_flags & DK_F_WAIT_DRAIN) != 0) {
1183 sc->sc_flags &= ~DK_F_WAIT_DRAIN;
1184 wakeup(&sc->sc_iopend);
1185 }
1186
1187 disk_unbusy(&sc->sc_dk, obp->b_bcount - obp->b_resid,
1188 obp->b_flags & B_READ);
1189
1190 biodone(obp);
1191
1192 /* Kick the queue in case there is more work we can do. */
1193 dkstart(sc);
1194 splx(s);
1195 }
1196
1197 /*
1198 * dkrestart:
1199 *
1200 * Restart the work queue after it was stalled due to
1201 * a resource shortage. Invoked via a callout.
1202 */
1203 static void
1204 dkrestart(void *v)
1205 {
1206 struct dkwedge_softc *sc = v;
1207 int s;
1208
1209 s = splbio();
1210 dkstart(sc);
1211 splx(s);
1212 }
1213
1214 /*
1215 * dkminphys:
1216 *
1217 * Call parent's minphys function.
1218 */
1219 static void
1220 dkminphys(struct buf *bp)
1221 {
1222 struct dkwedge_softc *sc = dkwedge_lookup(bp->b_dev);
1223 dev_t dev;
1224
1225 dev = bp->b_dev;
1226 bp->b_dev = sc->sc_pdev;
1227 (*sc->sc_parent->dk_driver->d_minphys)(bp);
1228 bp->b_dev = dev;
1229 }
1230
1231 /*
1232 * dkread: [devsw entry point]
1233 *
1234 * Read from a wedge.
1235 */
1236 static int
1237 dkread(dev_t dev, struct uio *uio, int flags)
1238 {
1239 struct dkwedge_softc *sc = dkwedge_lookup(dev);
1240
1241 if (sc == NULL)
1242 return (ENODEV);
1243 if (sc->sc_state != DKW_STATE_RUNNING)
1244 return (ENXIO);
1245
1246 return (physio(dkstrategy, NULL, dev, B_READ, dkminphys, uio));
1247 }
1248
1249 /*
1250 * dkwrite: [devsw entry point]
1251 *
1252 * Write to a wedge.
1253 */
1254 static int
1255 dkwrite(dev_t dev, struct uio *uio, int flags)
1256 {
1257 struct dkwedge_softc *sc = dkwedge_lookup(dev);
1258
1259 if (sc == NULL)
1260 return (ENODEV);
1261 if (sc->sc_state != DKW_STATE_RUNNING)
1262 return (ENXIO);
1263
1264 return (physio(dkstrategy, NULL, dev, B_WRITE, dkminphys, uio));
1265 }
1266
1267 /*
1268 * dkioctl: [devsw entry point]
1269 *
1270 * Perform an ioctl request on a wedge.
1271 */
1272 static int
1273 dkioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
1274 {
1275 struct dkwedge_softc *sc = dkwedge_lookup(dev);
1276 int error = 0;
1277
1278 if (sc == NULL)
1279 return (ENODEV);
1280 if (sc->sc_state != DKW_STATE_RUNNING)
1281 return (ENXIO);
1282 if (sc->sc_parent->dk_rawvp == NULL)
1283 return (ENXIO);
1284
1285 error = disk_ioctl(&sc->sc_dk, cmd, data, flag, l);
1286 if (error != EPASSTHROUGH)
1287 return (error);
1288
1289 error = 0;
1290
1291 switch (cmd) {
1292 case DIOCCACHESYNC:
1293 /*
1294 * XXX Do we really need to care about having a writable
1295 * file descriptor here?
1296 */
1297 if ((flag & FWRITE) == 0)
1298 error = EBADF;
1299 else
1300 error = VOP_IOCTL(sc->sc_parent->dk_rawvp,
1301 cmd, data, flag,
1302 l != NULL ? l->l_cred : NOCRED);
1303 break;
1304 case DIOCGWEDGEINFO:
1305 {
1306 struct dkwedge_info *dkw = (void *) data;
1307
1308 strlcpy(dkw->dkw_devname, device_xname(sc->sc_dev),
1309 sizeof(dkw->dkw_devname));
1310 memcpy(dkw->dkw_wname, sc->sc_wname, sizeof(dkw->dkw_wname));
1311 dkw->dkw_wname[sizeof(dkw->dkw_wname) - 1] = '\0';
1312 strcpy(dkw->dkw_parent, sc->sc_parent->dk_name);
1313 dkw->dkw_offset = sc->sc_offset;
1314 dkw->dkw_size = sc->sc_size;
1315 strcpy(dkw->dkw_ptype, sc->sc_ptype);
1316
1317 break;
1318 }
1319
1320 default:
1321 error = ENOTTY;
1322 }
1323
1324 return (error);
1325 }
1326
1327 /*
1328 * dksize: [devsw entry point]
1329 *
1330 * Query the size of a wedge for the purpose of performing a dump
1331 * or for swapping to.
1332 */
1333 static int
1334 dksize(dev_t dev)
1335 {
1336 struct dkwedge_softc *sc = dkwedge_lookup(dev);
1337 int rv = -1;
1338
1339 if (sc == NULL)
1340 return (-1);
1341 if (sc->sc_state != DKW_STATE_RUNNING)
1342 return (-1);
1343
1344 mutex_enter(&sc->sc_dk.dk_openlock);
1345 mutex_enter(&sc->sc_parent->dk_rawlock);
1346
1347 /* Our content type is static, no need to open the device. */
1348
1349 if (strcmp(sc->sc_ptype, DKW_PTYPE_SWAP) == 0) {
1350 /* Saturate if we are larger than INT_MAX. */
1351 if (sc->sc_size > INT_MAX)
1352 rv = INT_MAX;
1353 else
1354 rv = (int) sc->sc_size;
1355 }
1356
1357 mutex_exit(&sc->sc_parent->dk_rawlock);
1358 mutex_exit(&sc->sc_dk.dk_openlock);
1359
1360 return (rv);
1361 }
1362
1363 /*
1364 * dkdump: [devsw entry point]
1365 *
1366 * Perform a crash dump to a wedge.
1367 */
1368 static int
1369 dkdump(dev_t dev, daddr_t blkno, void *va, size_t size)
1370 {
1371 struct dkwedge_softc *sc = dkwedge_lookup(dev);
1372 const struct bdevsw *bdev;
1373 int rv = 0;
1374
1375 if (sc == NULL)
1376 return (ENODEV);
1377 if (sc->sc_state != DKW_STATE_RUNNING)
1378 return (ENXIO);
1379
1380 mutex_enter(&sc->sc_dk.dk_openlock);
1381 mutex_enter(&sc->sc_parent->dk_rawlock);
1382
1383 /* Our content type is static, no need to open the device. */
1384
1385 if (strcmp(sc->sc_ptype, DKW_PTYPE_SWAP) != 0) {
1386 rv = ENXIO;
1387 goto out;
1388 }
1389 if (size % DEV_BSIZE != 0) {
1390 rv = EINVAL;
1391 goto out;
1392 }
1393 if (blkno + size / DEV_BSIZE > sc->sc_size) {
1394 printf("%s: blkno (%" PRIu64 ") + size / DEV_BSIZE (%zu) > "
1395 "sc->sc_size (%" PRIu64 ")\n", __func__, blkno,
1396 size / DEV_BSIZE, sc->sc_size);
1397 rv = EINVAL;
1398 goto out;
1399 }
1400
1401 bdev = bdevsw_lookup(sc->sc_pdev);
1402 rv = (*bdev->d_dump)(sc->sc_pdev, blkno + sc->sc_offset, va, size);
1403
1404 out:
1405 mutex_exit(&sc->sc_parent->dk_rawlock);
1406 mutex_exit(&sc->sc_dk.dk_openlock);
1407
1408 return rv;
1409 }
1410
1411 /*
1412 * config glue
1413 */
1414
1415 /*
1416 * dkwedge_find_partition
1417 *
1418 * Find wedge corresponding to the specified parent name
1419 * and offset/length.
1420 */
1421 device_t
1422 dkwedge_find_partition(device_t parent, daddr_t startblk, uint64_t nblks)
1423 {
1424 struct dkwedge_softc *sc;
1425 int i;
1426 device_t wedge = NULL;
1427
1428 rw_enter(&dkwedges_lock, RW_READER);
1429 for (i = 0; i < ndkwedges; i++) {
1430 if ((sc = dkwedges[i]) == NULL)
1431 continue;
1432 if (strcmp(sc->sc_parent->dk_name, device_xname(parent)) == 0 &&
1433 sc->sc_offset == startblk &&
1434 sc->sc_size == nblks) {
1435 if (wedge) {
1436 printf("WARNING: double match for boot wedge "
1437 "(%s, %s)\n",
1438 device_xname(wedge),
1439 device_xname(sc->sc_dev));
1440 continue;
1441 }
1442 wedge = sc->sc_dev;
1443 }
1444 }
1445 rw_exit(&dkwedges_lock);
1446
1447 return wedge;
1448 }
1449
1450