sd.c revision 1.232 1 /* $NetBSD: sd.c,v 1.232 2004/12/07 23:16:40 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 1998, 2003, 2004 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Originally written by Julian Elischer (julian (at) dialix.oz.au)
41 * for TRW Financial Systems for use under the MACH(2.5) operating system.
42 *
43 * TRW Financial Systems, in accordance with their agreement with Carnegie
44 * Mellon University, makes this software available to CMU to distribute
45 * or use in any manner that they see fit as long as this message is kept with
46 * the software. For this reason TFS also grants any other persons or
47 * organisations permission to use or modify this software.
48 *
49 * TFS supplies this software to be publicly redistributed
50 * on the understanding that TFS is not responsible for the correct
51 * functioning of this software in any circumstances.
52 *
53 * Ported to run under 386BSD by Julian Elischer (julian (at) dialix.oz.au) Sept 1992
54 */
55
56 #include <sys/cdefs.h>
57 __KERNEL_RCSID(0, "$NetBSD: sd.c,v 1.232 2004/12/07 23:16:40 thorpej Exp $");
58
59 #include "opt_scsi.h"
60 #include "rnd.h"
61
62 #include <sys/param.h>
63 #include <sys/systm.h>
64 #include <sys/kernel.h>
65 #include <sys/file.h>
66 #include <sys/stat.h>
67 #include <sys/ioctl.h>
68 #include <sys/scsiio.h>
69 #include <sys/buf.h>
70 #include <sys/bufq.h>
71 #include <sys/uio.h>
72 #include <sys/malloc.h>
73 #include <sys/errno.h>
74 #include <sys/device.h>
75 #include <sys/disklabel.h>
76 #include <sys/disk.h>
77 #include <sys/proc.h>
78 #include <sys/conf.h>
79 #include <sys/vnode.h>
80 #if NRND > 0
81 #include <sys/rnd.h>
82 #endif
83
84 #include <dev/scsipi/scsipi_all.h>
85 #include <dev/scsipi/scsi_all.h>
86 #include <dev/scsipi/scsipi_disk.h>
87 #include <dev/scsipi/scsi_disk.h>
88 #include <dev/scsipi/scsiconf.h>
89 #include <dev/scsipi/scsipi_base.h>
90 #include <dev/scsipi/sdvar.h>
91
92 #define SDUNIT(dev) DISKUNIT(dev)
93 #define SDPART(dev) DISKPART(dev)
94 #define SDMINOR(unit, part) DISKMINOR(unit, part)
95 #define MAKESDDEV(maj, unit, part) MAKEDISKDEV(maj, unit, part)
96
97 #define SDLABELDEV(dev) (MAKESDDEV(major(dev), SDUNIT(dev), RAW_PART))
98
99 static void sdminphys(struct buf *);
100 static void sdgetdefaultlabel(struct sd_softc *, struct disklabel *);
101 static void sdgetdisklabel(struct sd_softc *);
102 static void sdstart(struct scsipi_periph *);
103 static void sdrestart(void *);
104 static void sddone(struct scsipi_xfer *, int);
105 static void sd_shutdown(void *);
106 static int sd_interpret_sense(struct scsipi_xfer *);
107
108 static int sd_mode_sense(struct sd_softc *, u_int8_t, void *, size_t, int,
109 int, int *);
110 static int sd_mode_select(struct sd_softc *, u_int8_t, void *, size_t, int,
111 int);
112 static int sd_get_simplifiedparms(struct sd_softc *, struct disk_parms *,
113 int);
114 static int sd_get_capacity(struct sd_softc *, struct disk_parms *, int);
115 static int sd_get_parms(struct sd_softc *, struct disk_parms *, int);
116 static int sd_get_parms_page4(struct sd_softc *, struct disk_parms *,
117 int);
118 static int sd_get_parms_page5(struct sd_softc *, struct disk_parms *,
119 int);
120
121 static int sd_flush(struct sd_softc *, int);
122 static int sd_getcache(struct sd_softc *, int *);
123 static int sd_setcache(struct sd_softc *, int);
124
125 static int sdmatch(struct device *, struct cfdata *, void *);
126 static void sdattach(struct device *, struct device *, void *);
127 static int sdactivate(struct device *, enum devact);
128 static int sddetach(struct device *, int);
129
130 CFATTACH_DECL(sd, sizeof(struct sd_softc), sdmatch, sdattach, sddetach,
131 sdactivate);
132
133 extern struct cfdriver sd_cd;
134
135 static const struct scsipi_inquiry_pattern sd_patterns[] = {
136 {T_DIRECT, T_FIXED,
137 "", "", ""},
138 {T_DIRECT, T_REMOV,
139 "", "", ""},
140 {T_OPTICAL, T_FIXED,
141 "", "", ""},
142 {T_OPTICAL, T_REMOV,
143 "", "", ""},
144 {T_SIMPLE_DIRECT, T_FIXED,
145 "", "", ""},
146 {T_SIMPLE_DIRECT, T_REMOV,
147 "", "", ""},
148 };
149
150 static dev_type_open(sdopen);
151 static dev_type_close(sdclose);
152 static dev_type_read(sdread);
153 static dev_type_write(sdwrite);
154 static dev_type_ioctl(sdioctl);
155 static dev_type_strategy(sdstrategy);
156 static dev_type_dump(sddump);
157 static dev_type_size(sdsize);
158
159 const struct bdevsw sd_bdevsw = {
160 sdopen, sdclose, sdstrategy, sdioctl, sddump, sdsize, D_DISK
161 };
162
163 const struct cdevsw sd_cdevsw = {
164 sdopen, sdclose, sdread, sdwrite, sdioctl,
165 nostop, notty, nopoll, nommap, nokqfilter, D_DISK
166 };
167
168 static struct dkdriver sddkdriver = { sdstrategy, sdminphys };
169
170 static const struct scsipi_periphsw sd_switch = {
171 sd_interpret_sense, /* check our error handler first */
172 sdstart, /* have a queue, served by this */
173 NULL, /* have no async handler */
174 sddone, /* deal with stats at interrupt time */
175 };
176
177 struct sd_mode_sense_data {
178 /*
179 * XXX
180 * We are not going to parse this as-is -- it just has to be large
181 * enough.
182 */
183 union {
184 struct scsipi_mode_header small;
185 struct scsipi_mode_header_big big;
186 } header;
187 struct scsi_blk_desc blk_desc;
188 union scsi_disk_pages pages;
189 };
190
191 /*
192 * The routine called by the low level scsi routine when it discovers
193 * A device suitable for this driver
194 */
195 static int
196 sdmatch(struct device *parent, struct cfdata *match, void *aux)
197 {
198 struct scsipibus_attach_args *sa = aux;
199 int priority;
200
201 (void)scsipi_inqmatch(&sa->sa_inqbuf,
202 (caddr_t)sd_patterns, sizeof(sd_patterns) / sizeof(sd_patterns[0]),
203 sizeof(sd_patterns[0]), &priority);
204
205 return (priority);
206 }
207
208 /*
209 * Attach routine common to atapi & scsi.
210 */
211 static void
212 sdattach(struct device *parent, struct device *self, void *aux)
213 {
214 struct sd_softc *sd = (void *)self;
215 struct scsipibus_attach_args *sa = aux;
216 struct scsipi_periph *periph = sa->sa_periph;
217 int error, result;
218 struct disk_parms *dp = &sd->params;
219 char pbuf[9];
220
221 SC_DEBUG(periph, SCSIPI_DB2, ("sdattach: "));
222
223 sd->type = (sa->sa_inqbuf.type & SID_TYPE);
224 if (sd->type == T_SIMPLE_DIRECT)
225 periph->periph_quirks |= PQUIRK_ONLYBIG | PQUIRK_NOBIGMODESENSE;
226
227 if (scsipi_periph_bustype(sa->sa_periph) == SCSIPI_BUSTYPE_SCSI &&
228 periph->periph_version == 0)
229 sd->flags |= SDF_ANCIENT;
230
231 bufq_alloc(&sd->buf_queue,
232 BUFQ_DISK_DEFAULT_STRAT()|BUFQ_SORT_RAWBLOCK);
233
234 callout_init(&sd->sc_callout);
235
236 /*
237 * Store information needed to contact our base driver
238 */
239 sd->sc_periph = periph;
240
241 periph->periph_dev = &sd->sc_dev;
242 periph->periph_switch = &sd_switch;
243
244 /*
245 * Increase our openings to the maximum-per-periph
246 * supported by the adapter. This will either be
247 * clamped down or grown by the adapter if necessary.
248 */
249 periph->periph_openings =
250 SCSIPI_CHAN_MAX_PERIPH(periph->periph_channel);
251 periph->periph_flags |= PERIPH_GROW_OPENINGS;
252
253 /*
254 * Initialize and attach the disk structure.
255 */
256 sd->sc_dk.dk_driver = &sddkdriver;
257 sd->sc_dk.dk_name = sd->sc_dev.dv_xname;
258 disk_attach(&sd->sc_dk);
259
260 /*
261 * Use the subdriver to request information regarding the drive.
262 */
263 aprint_naive("\n");
264 aprint_normal("\n");
265
266 error = scsipi_test_unit_ready(periph,
267 XS_CTL_DISCOVERY | XS_CTL_IGNORE_ILLEGAL_REQUEST |
268 XS_CTL_IGNORE_MEDIA_CHANGE | XS_CTL_SILENT_NODEV);
269
270 if (error)
271 result = SDGP_RESULT_OFFLINE;
272 else
273 result = sd_get_parms(sd, &sd->params, XS_CTL_DISCOVERY);
274 aprint_normal("%s: ", sd->sc_dev.dv_xname);
275 switch (result) {
276 case SDGP_RESULT_OK:
277 format_bytes(pbuf, sizeof(pbuf),
278 (u_int64_t)dp->disksize * dp->blksize);
279 aprint_normal(
280 "%s, %ld cyl, %ld head, %ld sec, %ld bytes/sect x %llu sectors",
281 pbuf, dp->cyls, dp->heads, dp->sectors, dp->blksize,
282 (unsigned long long)dp->disksize);
283 break;
284
285 case SDGP_RESULT_OFFLINE:
286 aprint_normal("drive offline");
287 break;
288
289 case SDGP_RESULT_UNFORMATTED:
290 aprint_normal("unformatted media");
291 break;
292
293 #ifdef DIAGNOSTIC
294 default:
295 panic("sdattach: unknown result from get_parms");
296 break;
297 #endif
298 }
299 aprint_normal("\n");
300
301 /*
302 * Establish a shutdown hook so that we can ensure that
303 * our data has actually made it onto the platter at
304 * shutdown time. Note that this relies on the fact
305 * that the shutdown hook code puts us at the head of
306 * the list (thus guaranteeing that our hook runs before
307 * our ancestors').
308 */
309 if ((sd->sc_sdhook =
310 shutdownhook_establish(sd_shutdown, sd)) == NULL)
311 aprint_error("%s: WARNING: unable to establish shutdown hook\n",
312 sd->sc_dev.dv_xname);
313
314 #if NRND > 0
315 /*
316 * attach the device into the random source list
317 */
318 rnd_attach_source(&sd->rnd_source, sd->sc_dev.dv_xname,
319 RND_TYPE_DISK, 0);
320 #endif
321
322 /* Discover wedges on this disk. */
323 dkwedge_discover(&sd->sc_dk);
324 }
325
326 static int
327 sdactivate(struct device *self, enum devact act)
328 {
329 int rv = 0;
330
331 switch (act) {
332 case DVACT_ACTIVATE:
333 rv = EOPNOTSUPP;
334 break;
335
336 case DVACT_DEACTIVATE:
337 /*
338 * Nothing to do; we key off the device's DVF_ACTIVE.
339 */
340 break;
341 }
342 return (rv);
343 }
344
345 static int
346 sddetach(struct device *self, int flags)
347 {
348 struct sd_softc *sd = (struct sd_softc *) self;
349 struct buf *bp;
350 int s, bmaj, cmaj, i, mn;
351
352 /* locate the major number */
353 bmaj = bdevsw_lookup_major(&sd_bdevsw);
354 cmaj = cdevsw_lookup_major(&sd_cdevsw);
355
356 /* Nuke the vnodes for any open instances */
357 for (i = 0; i < MAXPARTITIONS; i++) {
358 mn = SDMINOR(self->dv_unit, i);
359 vdevgone(bmaj, mn, mn, VBLK);
360 vdevgone(cmaj, mn, mn, VCHR);
361 }
362
363 /* kill any pending restart */
364 callout_stop(&sd->sc_callout);
365
366 /* Delete all of our wedges. */
367 dkwedge_delall(&sd->sc_dk);
368
369 s = splbio();
370
371 /* Kill off any queued buffers. */
372 while ((bp = BUFQ_GET(&sd->buf_queue)) != NULL) {
373 bp->b_error = EIO;
374 bp->b_flags |= B_ERROR;
375 bp->b_resid = bp->b_bcount;
376 biodone(bp);
377 }
378
379 bufq_free(&sd->buf_queue);
380
381 /* Kill off any pending commands. */
382 scsipi_kill_pending(sd->sc_periph);
383
384 splx(s);
385
386 /* Detach from the disk list. */
387 disk_detach(&sd->sc_dk);
388
389 /* Get rid of the shutdown hook. */
390 shutdownhook_disestablish(sd->sc_sdhook);
391
392 #if NRND > 0
393 /* Unhook the entropy source. */
394 rnd_detach_source(&sd->rnd_source);
395 #endif
396
397 return (0);
398 }
399
400 /*
401 * open the device. Make sure the partition info is a up-to-date as can be.
402 */
403 static int
404 sdopen(dev_t dev, int flag, int fmt, struct proc *p)
405 {
406 struct sd_softc *sd;
407 struct scsipi_periph *periph;
408 struct scsipi_adapter *adapt;
409 int unit, part;
410 int error;
411
412 unit = SDUNIT(dev);
413 if (unit >= sd_cd.cd_ndevs)
414 return (ENXIO);
415 sd = sd_cd.cd_devs[unit];
416 if (sd == NULL)
417 return (ENXIO);
418
419 if ((sd->sc_dev.dv_flags & DVF_ACTIVE) == 0)
420 return (ENODEV);
421
422 part = SDPART(dev);
423
424 if ((error = lockmgr(&sd->sc_dk.dk_openlock, LK_EXCLUSIVE, NULL)) != 0)
425 return (error);
426
427 /*
428 * If there are wedges, and this is not RAW_PART, then we
429 * need to fail.
430 */
431 if (sd->sc_dk.dk_nwedges != 0 && part != RAW_PART) {
432 error = EBUSY;
433 goto bad1;
434 }
435
436 periph = sd->sc_periph;
437 adapt = periph->periph_channel->chan_adapter;
438
439 SC_DEBUG(periph, SCSIPI_DB1,
440 ("sdopen: dev=0x%x (unit %d (of %d), partition %d)\n", dev, unit,
441 sd_cd.cd_ndevs, part));
442
443 /*
444 * If this is the first open of this device, add a reference
445 * to the adapter.
446 */
447 if (sd->sc_dk.dk_openmask == 0 &&
448 (error = scsipi_adapter_addref(adapt)) != 0)
449 goto bad1;
450
451 if ((periph->periph_flags & PERIPH_OPEN) != 0) {
452 /*
453 * If any partition is open, but the disk has been invalidated,
454 * disallow further opens of non-raw partition
455 */
456 if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0 &&
457 (part != RAW_PART || fmt != S_IFCHR)) {
458 error = EIO;
459 goto bad2;
460 }
461 } else {
462 int silent;
463
464 if (part == RAW_PART && fmt == S_IFCHR)
465 silent = XS_CTL_SILENT;
466 else
467 silent = 0;
468
469 /* Check that it is still responding and ok. */
470 error = scsipi_test_unit_ready(periph,
471 XS_CTL_IGNORE_ILLEGAL_REQUEST | XS_CTL_IGNORE_MEDIA_CHANGE |
472 silent);
473
474 /*
475 * Start the pack spinning if necessary. Always allow the
476 * raw parition to be opened, for raw IOCTLs. Data transfers
477 * will check for SDEV_MEDIA_LOADED.
478 */
479 if (error == EIO) {
480 int error2;
481
482 error2 = scsipi_start(periph, SSS_START, silent);
483 switch (error2) {
484 case 0:
485 error = 0;
486 break;
487 case EIO:
488 case EINVAL:
489 break;
490 default:
491 error = error2;
492 break;
493 }
494 }
495 if (error) {
496 if (silent)
497 goto out;
498 goto bad2;
499 }
500
501 periph->periph_flags |= PERIPH_OPEN;
502
503 if (periph->periph_flags & PERIPH_REMOVABLE) {
504 /* Lock the pack in. */
505 error = scsipi_prevent(periph, PR_PREVENT,
506 XS_CTL_IGNORE_ILLEGAL_REQUEST |
507 XS_CTL_IGNORE_MEDIA_CHANGE);
508 if (error)
509 goto bad3;
510 }
511
512 if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0) {
513 int param_error;
514 periph->periph_flags |= PERIPH_MEDIA_LOADED;
515
516 /*
517 * Load the physical device parameters.
518 *
519 * Note that if media is present but unformatted,
520 * we allow the open (so that it can be formatted!).
521 * The drive should refuse real I/O, if the media is
522 * unformatted.
523 */
524 if ((param_error = sd_get_parms(sd, &sd->params, 0))
525 == SDGP_RESULT_OFFLINE) {
526 error = ENXIO;
527 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
528 goto bad3;
529 }
530 SC_DEBUG(periph, SCSIPI_DB3, ("Params loaded "));
531
532 /* Load the partition info if not already loaded. */
533 if (param_error == 0) {
534 sdgetdisklabel(sd);
535 SC_DEBUG(periph, SCSIPI_DB3,
536 ("Disklabel loaded "));
537 }
538 }
539 }
540
541 /* Check that the partition exists. */
542 if (part != RAW_PART &&
543 (part >= sd->sc_dk.dk_label->d_npartitions ||
544 sd->sc_dk.dk_label->d_partitions[part].p_fstype == FS_UNUSED)) {
545 error = ENXIO;
546 goto bad3;
547 }
548
549 out: /* Insure only one open at a time. */
550 switch (fmt) {
551 case S_IFCHR:
552 sd->sc_dk.dk_copenmask |= (1 << part);
553 break;
554 case S_IFBLK:
555 sd->sc_dk.dk_bopenmask |= (1 << part);
556 break;
557 }
558 sd->sc_dk.dk_openmask =
559 sd->sc_dk.dk_copenmask | sd->sc_dk.dk_bopenmask;
560
561 SC_DEBUG(periph, SCSIPI_DB3, ("open complete\n"));
562 (void) lockmgr(&sd->sc_dk.dk_openlock, LK_RELEASE, NULL);
563 return (0);
564
565 bad3:
566 if (sd->sc_dk.dk_openmask == 0) {
567 if (periph->periph_flags & PERIPH_REMOVABLE)
568 scsipi_prevent(periph, PR_ALLOW,
569 XS_CTL_IGNORE_ILLEGAL_REQUEST |
570 XS_CTL_IGNORE_MEDIA_CHANGE);
571 periph->periph_flags &= ~PERIPH_OPEN;
572 }
573
574 bad2:
575 if (sd->sc_dk.dk_openmask == 0)
576 scsipi_adapter_delref(adapt);
577
578 bad1:
579 (void) lockmgr(&sd->sc_dk.dk_openlock, LK_RELEASE, NULL);
580 return (error);
581 }
582
583 /*
584 * close the device.. only called if we are the LAST occurence of an open
585 * device. Convenient now but usually a pain.
586 */
587 static int
588 sdclose(dev_t dev, int flag, int fmt, struct proc *p)
589 {
590 struct sd_softc *sd = sd_cd.cd_devs[SDUNIT(dev)];
591 struct scsipi_periph *periph = sd->sc_periph;
592 struct scsipi_adapter *adapt = periph->periph_channel->chan_adapter;
593 int part = SDPART(dev);
594 int error;
595
596 if ((error = lockmgr(&sd->sc_dk.dk_openlock, LK_EXCLUSIVE, NULL)) != 0)
597 return (error);
598
599 switch (fmt) {
600 case S_IFCHR:
601 sd->sc_dk.dk_copenmask &= ~(1 << part);
602 break;
603 case S_IFBLK:
604 sd->sc_dk.dk_bopenmask &= ~(1 << part);
605 break;
606 }
607 sd->sc_dk.dk_openmask =
608 sd->sc_dk.dk_copenmask | sd->sc_dk.dk_bopenmask;
609
610 if (sd->sc_dk.dk_openmask == 0) {
611 /*
612 * If the disk cache needs flushing, and the disk supports
613 * it, do it now.
614 */
615 if ((sd->flags & SDF_DIRTY) != 0) {
616 if (sd_flush(sd, 0)) {
617 printf("%s: cache synchronization failed\n",
618 sd->sc_dev.dv_xname);
619 sd->flags &= ~SDF_FLUSHING;
620 } else
621 sd->flags &= ~(SDF_FLUSHING|SDF_DIRTY);
622 }
623
624 if (! (periph->periph_flags & PERIPH_KEEP_LABEL))
625 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
626
627 scsipi_wait_drain(periph);
628
629 if (periph->periph_flags & PERIPH_REMOVABLE)
630 scsipi_prevent(periph, PR_ALLOW,
631 XS_CTL_IGNORE_ILLEGAL_REQUEST |
632 XS_CTL_IGNORE_NOT_READY);
633 periph->periph_flags &= ~PERIPH_OPEN;
634
635 scsipi_wait_drain(periph);
636
637 scsipi_adapter_delref(adapt);
638 }
639
640 (void) lockmgr(&sd->sc_dk.dk_openlock, LK_RELEASE, NULL);
641 return (0);
642 }
643
644 /*
645 * Actually translate the requested transfer into one the physical driver
646 * can understand. The transfer is described by a buf and will include
647 * only one physical transfer.
648 */
649 static void
650 sdstrategy(struct buf *bp)
651 {
652 struct sd_softc *sd = sd_cd.cd_devs[SDUNIT(bp->b_dev)];
653 struct scsipi_periph *periph = sd->sc_periph;
654 struct disklabel *lp;
655 daddr_t blkno;
656 int s;
657 boolean_t sector_aligned;
658
659 SC_DEBUG(sd->sc_periph, SCSIPI_DB2, ("sdstrategy "));
660 SC_DEBUG(sd->sc_periph, SCSIPI_DB1,
661 ("%d bytes @ blk %" PRId64 "\n", bp->b_bcount, bp->b_blkno));
662 /*
663 * If the device has been made invalid, error out
664 */
665 if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0 ||
666 (sd->sc_dev.dv_flags & DVF_ACTIVE) == 0) {
667 if (periph->periph_flags & PERIPH_OPEN)
668 bp->b_error = EIO;
669 else
670 bp->b_error = ENODEV;
671 goto bad;
672 }
673
674 lp = sd->sc_dk.dk_label;
675
676 /*
677 * The transfer must be a whole number of blocks, offset must not be
678 * negative.
679 */
680 if (lp->d_secsize == DEV_BSIZE) {
681 sector_aligned = (bp->b_bcount & (DEV_BSIZE - 1)) == 0;
682 } else {
683 sector_aligned = (bp->b_bcount % lp->d_secsize) == 0;
684 }
685 if (!sector_aligned || bp->b_blkno < 0) {
686 bp->b_error = EINVAL;
687 goto bad;
688 }
689 /*
690 * If it's a null transfer, return immediatly
691 */
692 if (bp->b_bcount == 0)
693 goto done;
694
695 /*
696 * Do bounds checking, adjust transfer. if error, process.
697 * If end of partition, just return.
698 */
699 if (SDPART(bp->b_dev) == RAW_PART) {
700 if (bounds_check_with_mediasize(bp, DEV_BSIZE,
701 sd->params.disksize512) <= 0)
702 goto done;
703 } else {
704 if (bounds_check_with_label(&sd->sc_dk, bp,
705 (sd->flags & (SDF_WLABEL|SDF_LABELLING)) != 0) <= 0)
706 goto done;
707 }
708
709 /*
710 * Now convert the block number to absolute and put it in
711 * terms of the device's logical block size.
712 */
713 if (lp->d_secsize == DEV_BSIZE)
714 blkno = bp->b_blkno;
715 else if (lp->d_secsize > DEV_BSIZE)
716 blkno = bp->b_blkno / (lp->d_secsize / DEV_BSIZE);
717 else
718 blkno = bp->b_blkno * (DEV_BSIZE / lp->d_secsize);
719
720 if (SDPART(bp->b_dev) != RAW_PART)
721 blkno += lp->d_partitions[SDPART(bp->b_dev)].p_offset;
722
723 bp->b_rawblkno = blkno;
724
725 s = splbio();
726
727 /*
728 * Place it in the queue of disk activities for this disk.
729 *
730 * XXX Only do disksort() if the current operating mode does not
731 * XXX include tagged queueing.
732 */
733 BUFQ_PUT(&sd->buf_queue, bp);
734
735 /*
736 * Tell the device to get going on the transfer if it's
737 * not doing anything, otherwise just wait for completion
738 */
739 sdstart(sd->sc_periph);
740
741 splx(s);
742 return;
743
744 bad:
745 bp->b_flags |= B_ERROR;
746 done:
747 /*
748 * Correctly set the buf to indicate a completed xfer
749 */
750 bp->b_resid = bp->b_bcount;
751 biodone(bp);
752 }
753
754 /*
755 * sdstart looks to see if there is a buf waiting for the device
756 * and that the device is not already busy. If both are true,
757 * It dequeues the buf and creates a scsi command to perform the
758 * transfer in the buf. The transfer request will call scsipi_done
759 * on completion, which will in turn call this routine again
760 * so that the next queued transfer is performed.
761 * The bufs are queued by the strategy routine (sdstrategy)
762 *
763 * This routine is also called after other non-queued requests
764 * have been made of the scsi driver, to ensure that the queue
765 * continues to be drained.
766 *
767 * must be called at the correct (highish) spl level
768 * sdstart() is called at splbio from sdstrategy, sdrestart and scsipi_done
769 */
770 static void
771 sdstart(struct scsipi_periph *periph)
772 {
773 struct sd_softc *sd = (void *)periph->periph_dev;
774 struct disklabel *lp = sd->sc_dk.dk_label;
775 struct buf *bp = 0;
776 struct scsipi_rw_16 cmd16;
777 struct scsipi_rw_10 cmd_big;
778 struct scsi_rw_6 cmd_small;
779 struct scsipi_generic *cmdp;
780 struct scsipi_xfer *xs;
781 int nblks, cmdlen, error, flags;
782
783 SC_DEBUG(periph, SCSIPI_DB2, ("sdstart "));
784 /*
785 * Check if the device has room for another command
786 */
787 while (periph->periph_active < periph->periph_openings) {
788 /*
789 * there is excess capacity, but a special waits
790 * It'll need the adapter as soon as we clear out of the
791 * way and let it run (user level wait).
792 */
793 if (periph->periph_flags & PERIPH_WAITING) {
794 periph->periph_flags &= ~PERIPH_WAITING;
795 wakeup((caddr_t)periph);
796 return;
797 }
798
799 /*
800 * If the device has become invalid, abort all the
801 * reads and writes until all files have been closed and
802 * re-opened
803 */
804 if (__predict_false(
805 (periph->periph_flags & PERIPH_MEDIA_LOADED) == 0)) {
806 if ((bp = BUFQ_GET(&sd->buf_queue)) != NULL) {
807 bp->b_error = EIO;
808 bp->b_flags |= B_ERROR;
809 bp->b_resid = bp->b_bcount;
810 biodone(bp);
811 continue;
812 } else {
813 return;
814 }
815 }
816
817 /*
818 * See if there is a buf with work for us to do..
819 */
820 if ((bp = BUFQ_PEEK(&sd->buf_queue)) == NULL)
821 return;
822
823 /*
824 * We have a buf, now we should make a command.
825 */
826
827 if (lp->d_secsize == DEV_BSIZE)
828 nblks = bp->b_bcount >> DEV_BSHIFT;
829 else
830 nblks = howmany(bp->b_bcount, lp->d_secsize);
831
832 /*
833 * Fill out the scsi command. Use the smallest CDB possible
834 * (6-byte, 10-byte, or 16-byte).
835 */
836 if (((bp->b_rawblkno & 0x1fffff) == bp->b_rawblkno) &&
837 ((nblks & 0xff) == nblks) &&
838 !(periph->periph_quirks & PQUIRK_ONLYBIG)) {
839 /* 6-byte CDB */
840 memset(&cmd_small, 0, sizeof(cmd_small));
841 cmd_small.opcode = (bp->b_flags & B_READ) ?
842 SCSI_READ_6_COMMAND : SCSI_WRITE_6_COMMAND;
843 _lto3b(bp->b_rawblkno, cmd_small.addr);
844 cmd_small.length = nblks & 0xff;
845 cmdlen = sizeof(cmd_small);
846 cmdp = (struct scsipi_generic *)&cmd_small;
847 } else if ((bp->b_rawblkno & 0xffffffff) == bp->b_rawblkno) {
848 /* 10-byte CDB */
849 memset(&cmd_big, 0, sizeof(cmd_big));
850 cmd_big.opcode = (bp->b_flags & B_READ) ?
851 READ_10 : WRITE_10;
852 _lto4b(bp->b_rawblkno, cmd_big.addr);
853 _lto2b(nblks, cmd_big.length);
854 cmdlen = sizeof(cmd_big);
855 cmdp = (struct scsipi_generic *)&cmd_big;
856 } else {
857 /* 16-byte CDB */
858 memset(&cmd16, 0, sizeof(cmd16));
859 cmd16.opcode = (bp->b_flags & B_READ) ?
860 READ_16 : WRITE_16;
861 _lto8b(bp->b_rawblkno, cmd16.addr);
862 _lto4b(nblks, cmd16.length);
863 cmdlen = sizeof(cmd16);
864 cmdp = (struct scsipi_generic *)&cmd16;
865 }
866
867 /* Instrumentation. */
868 disk_busy(&sd->sc_dk);
869
870 /*
871 * Mark the disk dirty so that the cache will be
872 * flushed on close.
873 */
874 if ((bp->b_flags & B_READ) == 0)
875 sd->flags |= SDF_DIRTY;
876
877 /*
878 * Figure out what flags to use.
879 */
880 flags = XS_CTL_NOSLEEP|XS_CTL_ASYNC|XS_CTL_SIMPLE_TAG;
881 if (bp->b_flags & B_READ)
882 flags |= XS_CTL_DATA_IN;
883 else
884 flags |= XS_CTL_DATA_OUT;
885
886 /*
887 * Call the routine that chats with the adapter.
888 * Note: we cannot sleep as we may be an interrupt
889 */
890 xs = scsipi_make_xs(periph, cmdp, cmdlen,
891 (u_char *)bp->b_data, bp->b_bcount,
892 SDRETRIES, SD_IO_TIMEOUT, bp, flags);
893 if (__predict_false(xs == NULL)) {
894 /*
895 * out of memory. Keep this buffer in the queue, and
896 * retry later.
897 */
898 callout_reset(&sd->sc_callout, hz / 2, sdrestart,
899 periph);
900 return;
901 }
902 /*
903 * need to dequeue the buffer before queuing the command,
904 * because cdstart may be called recursively from the
905 * HBA driver
906 */
907 #ifdef DIAGNOSTIC
908 if (BUFQ_GET(&sd->buf_queue) != bp)
909 panic("sdstart(): dequeued wrong buf");
910 #else
911 BUFQ_GET(&sd->buf_queue);
912 #endif
913 error = scsipi_execute_xs(xs);
914 /* with a scsipi_xfer preallocated, scsipi_command can't fail */
915 KASSERT(error == 0);
916 }
917 }
918
919 static void
920 sdrestart(void *v)
921 {
922 int s = splbio();
923 sdstart((struct scsipi_periph *)v);
924 splx(s);
925 }
926
927 static void
928 sddone(struct scsipi_xfer *xs, int error)
929 {
930 struct sd_softc *sd = (void *)xs->xs_periph->periph_dev;
931 struct buf *bp = xs->bp;
932
933 if (sd->flags & SDF_FLUSHING) {
934 /* Flush completed, no longer dirty. */
935 sd->flags &= ~(SDF_FLUSHING|SDF_DIRTY);
936 }
937
938 if (bp) {
939 bp->b_error = error;
940 bp->b_resid = xs->resid;
941 if (error)
942 bp->b_flags |= B_ERROR;
943
944 disk_unbusy(&sd->sc_dk, bp->b_bcount - bp->b_resid,
945 (bp->b_flags & B_READ));
946 #if NRND > 0
947 rnd_add_uint32(&sd->rnd_source, bp->b_rawblkno);
948 #endif
949
950 biodone(bp);
951 }
952 }
953
954 static void
955 sdminphys(struct buf *bp)
956 {
957 struct sd_softc *sd = sd_cd.cd_devs[SDUNIT(bp->b_dev)];
958 long max;
959
960 /*
961 * If the device is ancient, we want to make sure that
962 * the transfer fits into a 6-byte cdb.
963 *
964 * XXX Note that the SCSI-I spec says that 256-block transfers
965 * are allowed in a 6-byte read/write, and are specified
966 * by settng the "length" to 0. However, we're conservative
967 * here, allowing only 255-block transfers in case an
968 * ancient device gets confused by length == 0. A length of 0
969 * in a 10-byte read/write actually means 0 blocks.
970 */
971 if ((sd->flags & SDF_ANCIENT) &&
972 ((sd->sc_periph->periph_flags &
973 (PERIPH_REMOVABLE | PERIPH_MEDIA_LOADED)) != PERIPH_REMOVABLE)) {
974 max = sd->sc_dk.dk_label->d_secsize * 0xff;
975
976 if (bp->b_bcount > max)
977 bp->b_bcount = max;
978 }
979
980 scsipi_adapter_minphys(sd->sc_periph->periph_channel, bp);
981 }
982
983 static int
984 sdread(dev_t dev, struct uio *uio, int ioflag)
985 {
986
987 return (physio(sdstrategy, NULL, dev, B_READ, sdminphys, uio));
988 }
989
990 static int
991 sdwrite(dev_t dev, struct uio *uio, int ioflag)
992 {
993
994 return (physio(sdstrategy, NULL, dev, B_WRITE, sdminphys, uio));
995 }
996
997 /*
998 * Perform special action on behalf of the user
999 * Knows about the internals of this device
1000 */
1001 static int
1002 sdioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct proc *p)
1003 {
1004 struct sd_softc *sd = sd_cd.cd_devs[SDUNIT(dev)];
1005 struct scsipi_periph *periph = sd->sc_periph;
1006 int part = SDPART(dev);
1007 int error = 0;
1008 #ifdef __HAVE_OLD_DISKLABEL
1009 struct disklabel *newlabel = NULL;
1010 #endif
1011
1012 SC_DEBUG(sd->sc_periph, SCSIPI_DB2, ("sdioctl 0x%lx ", cmd));
1013
1014 /*
1015 * If the device is not valid, some IOCTLs can still be
1016 * handled on the raw partition. Check this here.
1017 */
1018 if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0) {
1019 switch (cmd) {
1020 case DIOCKLABEL:
1021 case DIOCWLABEL:
1022 case DIOCLOCK:
1023 case DIOCEJECT:
1024 case ODIOCEJECT:
1025 case DIOCGCACHE:
1026 case DIOCSCACHE:
1027 case SCIOCIDENTIFY:
1028 case OSCIOCIDENTIFY:
1029 case SCIOCCOMMAND:
1030 case SCIOCDEBUG:
1031 if (part == RAW_PART)
1032 break;
1033 /* FALLTHROUGH */
1034 default:
1035 if ((periph->periph_flags & PERIPH_OPEN) == 0)
1036 return (ENODEV);
1037 else
1038 return (EIO);
1039 }
1040 }
1041
1042 switch (cmd) {
1043 case DIOCGDINFO:
1044 *(struct disklabel *)addr = *(sd->sc_dk.dk_label);
1045 return (0);
1046
1047 #ifdef __HAVE_OLD_DISKLABEL
1048 case ODIOCGDINFO:
1049 newlabel = malloc(sizeof *newlabel, M_TEMP, M_WAITOK);
1050 if (newlabel == NULL)
1051 return EIO;
1052 memcpy(newlabel, sd->sc_dk.dk_label, sizeof (*newlabel));
1053 if (newlabel->d_npartitions <= OLDMAXPARTITIONS)
1054 memcpy(addr, newlabel, sizeof (struct olddisklabel));
1055 else
1056 error = ENOTTY;
1057 free(newlabel, M_TEMP);
1058 return error;
1059 #endif
1060
1061 case DIOCGPART:
1062 ((struct partinfo *)addr)->disklab = sd->sc_dk.dk_label;
1063 ((struct partinfo *)addr)->part =
1064 &sd->sc_dk.dk_label->d_partitions[part];
1065 return (0);
1066
1067 case DIOCWDINFO:
1068 case DIOCSDINFO:
1069 #ifdef __HAVE_OLD_DISKLABEL
1070 case ODIOCWDINFO:
1071 case ODIOCSDINFO:
1072 #endif
1073 {
1074 struct disklabel *lp;
1075
1076 if ((flag & FWRITE) == 0)
1077 return (EBADF);
1078
1079 #ifdef __HAVE_OLD_DISKLABEL
1080 if (cmd == ODIOCSDINFO || cmd == ODIOCWDINFO) {
1081 newlabel = malloc(sizeof *newlabel, M_TEMP, M_WAITOK);
1082 if (newlabel == NULL)
1083 return EIO;
1084 memset(newlabel, 0, sizeof newlabel);
1085 memcpy(newlabel, addr, sizeof (struct olddisklabel));
1086 lp = newlabel;
1087 } else
1088 #endif
1089 lp = (struct disklabel *)addr;
1090
1091 if ((error = lockmgr(&sd->sc_dk.dk_openlock,
1092 LK_EXCLUSIVE, NULL)) != 0)
1093 goto bad;
1094 sd->flags |= SDF_LABELLING;
1095
1096 error = setdisklabel(sd->sc_dk.dk_label,
1097 lp, /*sd->sc_dk.dk_openmask : */0,
1098 sd->sc_dk.dk_cpulabel);
1099 if (error == 0) {
1100 if (cmd == DIOCWDINFO
1101 #ifdef __HAVE_OLD_DISKLABEL
1102 || cmd == ODIOCWDINFO
1103 #endif
1104 )
1105 error = writedisklabel(SDLABELDEV(dev),
1106 sdstrategy, sd->sc_dk.dk_label,
1107 sd->sc_dk.dk_cpulabel);
1108 }
1109
1110 sd->flags &= ~SDF_LABELLING;
1111 (void) lockmgr(&sd->sc_dk.dk_openlock, LK_RELEASE, NULL);
1112 bad:
1113 #ifdef __HAVE_OLD_DISKLABEL
1114 if (newlabel != NULL)
1115 free(newlabel, M_TEMP);
1116 #endif
1117 return (error);
1118 }
1119
1120 case DIOCKLABEL:
1121 if (*(int *)addr)
1122 periph->periph_flags |= PERIPH_KEEP_LABEL;
1123 else
1124 periph->periph_flags &= ~PERIPH_KEEP_LABEL;
1125 return (0);
1126
1127 case DIOCWLABEL:
1128 if ((flag & FWRITE) == 0)
1129 return (EBADF);
1130 if (*(int *)addr)
1131 sd->flags |= SDF_WLABEL;
1132 else
1133 sd->flags &= ~SDF_WLABEL;
1134 return (0);
1135
1136 case DIOCLOCK:
1137 return (scsipi_prevent(periph,
1138 (*(int *)addr) ? PR_PREVENT : PR_ALLOW, 0));
1139
1140 case DIOCEJECT:
1141 if ((periph->periph_flags & PERIPH_REMOVABLE) == 0)
1142 return (ENOTTY);
1143 if (*(int *)addr == 0) {
1144 /*
1145 * Don't force eject: check that we are the only
1146 * partition open. If so, unlock it.
1147 */
1148 if ((sd->sc_dk.dk_openmask & ~(1 << part)) == 0 &&
1149 sd->sc_dk.dk_bopenmask + sd->sc_dk.dk_copenmask ==
1150 sd->sc_dk.dk_openmask) {
1151 error = scsipi_prevent(periph, PR_ALLOW,
1152 XS_CTL_IGNORE_NOT_READY);
1153 if (error)
1154 return (error);
1155 } else {
1156 return (EBUSY);
1157 }
1158 }
1159 /* FALLTHROUGH */
1160 case ODIOCEJECT:
1161 return ((periph->periph_flags & PERIPH_REMOVABLE) == 0 ?
1162 ENOTTY : scsipi_start(periph, SSS_STOP|SSS_LOEJ, 0));
1163
1164 case DIOCGDEFLABEL:
1165 sdgetdefaultlabel(sd, (struct disklabel *)addr);
1166 return (0);
1167
1168 #ifdef __HAVE_OLD_DISKLABEL
1169 case ODIOCGDEFLABEL:
1170 newlabel = malloc(sizeof *newlabel, M_TEMP, M_WAITOK);
1171 if (newlabel == NULL)
1172 return EIO;
1173 sdgetdefaultlabel(sd, newlabel);
1174 if (newlabel->d_npartitions <= OLDMAXPARTITIONS)
1175 memcpy(addr, newlabel, sizeof (struct olddisklabel));
1176 else
1177 error = ENOTTY;
1178 free(newlabel, M_TEMP);
1179 return error;
1180 #endif
1181
1182 case DIOCGCACHE:
1183 return (sd_getcache(sd, (int *) addr));
1184
1185 case DIOCSCACHE:
1186 if ((flag & FWRITE) == 0)
1187 return (EBADF);
1188 return (sd_setcache(sd, *(int *) addr));
1189
1190 case DIOCCACHESYNC:
1191 /*
1192 * XXX Do we really need to care about having a writable
1193 * file descriptor here?
1194 */
1195 if ((flag & FWRITE) == 0)
1196 return (EBADF);
1197 if (((sd->flags & SDF_DIRTY) != 0 || *(int *)addr != 0)) {
1198 error = sd_flush(sd, 0);
1199 if (error)
1200 sd->flags &= ~SDF_FLUSHING;
1201 else
1202 sd->flags &= ~(SDF_FLUSHING|SDF_DIRTY);
1203 } else
1204 error = 0;
1205 return (error);
1206
1207 case DIOCAWEDGE:
1208 {
1209 struct dkwedge_info *dkw = (void *) addr;
1210
1211 if ((flag & FWRITE) == 0)
1212 return (EBADF);
1213
1214 /* If the ioctl happens here, the parent is us. */
1215 strcpy(dkw->dkw_parent, sd->sc_dev.dv_xname);
1216 return (dkwedge_add(dkw));
1217 }
1218
1219 case DIOCDWEDGE:
1220 {
1221 struct dkwedge_info *dkw = (void *) addr;
1222
1223 if ((flag & FWRITE) == 0)
1224 return (EBADF);
1225
1226 /* If the ioctl happens here, the parent is us. */
1227 strcpy(dkw->dkw_parent, sd->sc_dev.dv_xname);
1228 return (dkwedge_del(dkw));
1229 }
1230
1231 case DIOCLWEDGES:
1232 {
1233 struct dkwedge_list *dkwl = (void *) addr;
1234
1235 return (dkwedge_list(&sd->sc_dk, dkwl, p));
1236 }
1237
1238 default:
1239 if (part != RAW_PART)
1240 return (ENOTTY);
1241 return (scsipi_do_ioctl(periph, dev, cmd, addr, flag, p));
1242 }
1243
1244 #ifdef DIAGNOSTIC
1245 panic("sdioctl: impossible");
1246 #endif
1247 }
1248
1249 static void
1250 sdgetdefaultlabel(struct sd_softc *sd, struct disklabel *lp)
1251 {
1252
1253 memset(lp, 0, sizeof(struct disklabel));
1254
1255 lp->d_secsize = sd->params.blksize;
1256 lp->d_ntracks = sd->params.heads;
1257 lp->d_nsectors = sd->params.sectors;
1258 lp->d_ncylinders = sd->params.cyls;
1259 lp->d_secpercyl = lp->d_ntracks * lp->d_nsectors;
1260
1261 switch (scsipi_periph_bustype(sd->sc_periph)) {
1262 case SCSIPI_BUSTYPE_SCSI:
1263 lp->d_type = DTYPE_SCSI;
1264 break;
1265 case SCSIPI_BUSTYPE_ATAPI:
1266 lp->d_type = DTYPE_ATAPI;
1267 break;
1268 }
1269 /*
1270 * XXX
1271 * We could probe the mode pages to figure out what kind of disc it is.
1272 * Is this worthwhile?
1273 */
1274 strncpy(lp->d_typename, "mydisk", 16);
1275 strncpy(lp->d_packname, "fictitious", 16);
1276 lp->d_secperunit = sd->params.disksize;
1277 lp->d_rpm = sd->params.rot_rate;
1278 lp->d_interleave = 1;
1279 lp->d_flags = sd->sc_periph->periph_flags & PERIPH_REMOVABLE ?
1280 D_REMOVABLE : 0;
1281
1282 lp->d_partitions[RAW_PART].p_offset = 0;
1283 lp->d_partitions[RAW_PART].p_size =
1284 lp->d_secperunit * (lp->d_secsize / DEV_BSIZE);
1285 lp->d_partitions[RAW_PART].p_fstype = FS_UNUSED;
1286 lp->d_npartitions = RAW_PART + 1;
1287
1288 lp->d_magic = DISKMAGIC;
1289 lp->d_magic2 = DISKMAGIC;
1290 lp->d_checksum = dkcksum(lp);
1291 }
1292
1293
1294 /*
1295 * Load the label information on the named device
1296 */
1297 static void
1298 sdgetdisklabel(struct sd_softc *sd)
1299 {
1300 struct disklabel *lp = sd->sc_dk.dk_label;
1301 const char *errstring;
1302
1303 memset(sd->sc_dk.dk_cpulabel, 0, sizeof(struct cpu_disklabel));
1304
1305 sdgetdefaultlabel(sd, lp);
1306
1307 if (lp->d_secpercyl == 0) {
1308 lp->d_secpercyl = 100;
1309 /* as long as it's not 0 - readdisklabel divides by it (?) */
1310 }
1311
1312 /*
1313 * Call the generic disklabel extraction routine
1314 */
1315 errstring = readdisklabel(MAKESDDEV(0, sd->sc_dev.dv_unit, RAW_PART),
1316 sdstrategy, lp, sd->sc_dk.dk_cpulabel);
1317 if (errstring) {
1318 printf("%s: %s\n", sd->sc_dev.dv_xname, errstring);
1319 return;
1320 }
1321 }
1322
1323 static void
1324 sd_shutdown(void *arg)
1325 {
1326 struct sd_softc *sd = arg;
1327
1328 /*
1329 * If the disk cache needs to be flushed, and the disk supports
1330 * it, flush it. We're cold at this point, so we poll for
1331 * completion.
1332 */
1333 if ((sd->flags & SDF_DIRTY) != 0) {
1334 if (sd_flush(sd, XS_CTL_NOSLEEP|XS_CTL_POLL)) {
1335 printf("%s: cache synchronization failed\n",
1336 sd->sc_dev.dv_xname);
1337 sd->flags &= ~SDF_FLUSHING;
1338 } else
1339 sd->flags &= ~(SDF_FLUSHING|SDF_DIRTY);
1340 }
1341 }
1342
1343 /*
1344 * Check Errors
1345 */
1346 static int
1347 sd_interpret_sense(struct scsipi_xfer *xs)
1348 {
1349 struct scsipi_periph *periph = xs->xs_periph;
1350 struct scsipi_sense_data *sense = &xs->sense.scsi_sense;
1351 struct sd_softc *sd = (void *)periph->periph_dev;
1352 int s, error, retval = EJUSTRETURN;
1353
1354 /*
1355 * If the periph is already recovering, just do the normal
1356 * error processing.
1357 */
1358 if (periph->periph_flags & PERIPH_RECOVERING)
1359 return (retval);
1360
1361 /*
1362 * If the device is not open yet, let the generic code handle it.
1363 */
1364 if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0)
1365 return (retval);
1366
1367 /*
1368 * If it isn't a extended or extended/deferred error, let
1369 * the generic code handle it.
1370 */
1371 if ((sense->error_code & SSD_ERRCODE) != 0x70 &&
1372 (sense->error_code & SSD_ERRCODE) != 0x71)
1373 return (retval);
1374
1375 if ((sense->flags & SSD_KEY) == SKEY_NOT_READY &&
1376 sense->add_sense_code == 0x4) {
1377 if (sense->add_sense_code_qual == 0x01) {
1378 /*
1379 * Unit In The Process Of Becoming Ready.
1380 */
1381 printf("%s: waiting for pack to spin up...\n",
1382 sd->sc_dev.dv_xname);
1383 if (!callout_pending(&periph->periph_callout))
1384 scsipi_periph_freeze(periph, 1);
1385 callout_reset(&periph->periph_callout,
1386 5 * hz, scsipi_periph_timed_thaw, periph);
1387 retval = ERESTART;
1388 } else if (sense->add_sense_code_qual == 0x02) {
1389 printf("%s: pack is stopped, restarting...\n",
1390 sd->sc_dev.dv_xname);
1391 s = splbio();
1392 periph->periph_flags |= PERIPH_RECOVERING;
1393 splx(s);
1394 error = scsipi_start(periph, SSS_START,
1395 XS_CTL_URGENT|XS_CTL_HEAD_TAG|
1396 XS_CTL_THAW_PERIPH|XS_CTL_FREEZE_PERIPH);
1397 if (error) {
1398 printf("%s: unable to restart pack\n",
1399 sd->sc_dev.dv_xname);
1400 retval = error;
1401 } else
1402 retval = ERESTART;
1403 s = splbio();
1404 periph->periph_flags &= ~PERIPH_RECOVERING;
1405 splx(s);
1406 }
1407 }
1408 if ((sense->flags & SSD_KEY) == SKEY_MEDIUM_ERROR &&
1409 sense->add_sense_code == 0x31 &&
1410 sense->add_sense_code_qual == 0x00) { /* maybe for any asq ? */
1411 /* Medium Format Corrupted */
1412 retval = EFTYPE;
1413 }
1414 return (retval);
1415 }
1416
1417
1418 static int
1419 sdsize(dev_t dev)
1420 {
1421 struct sd_softc *sd;
1422 int part, unit, omask;
1423 int size;
1424
1425 unit = SDUNIT(dev);
1426 if (unit >= sd_cd.cd_ndevs)
1427 return (-1);
1428 sd = sd_cd.cd_devs[unit];
1429 if (sd == NULL)
1430 return (-1);
1431
1432 if ((sd->sc_dev.dv_flags & DVF_ACTIVE) == 0)
1433 return (-1);
1434
1435 part = SDPART(dev);
1436 omask = sd->sc_dk.dk_openmask & (1 << part);
1437
1438 if (omask == 0 && sdopen(dev, 0, S_IFBLK, NULL) != 0)
1439 return (-1);
1440 if ((sd->sc_periph->periph_flags & PERIPH_MEDIA_LOADED) == 0)
1441 size = -1;
1442 else if (sd->sc_dk.dk_label->d_partitions[part].p_fstype != FS_SWAP)
1443 size = -1;
1444 else
1445 size = sd->sc_dk.dk_label->d_partitions[part].p_size *
1446 (sd->sc_dk.dk_label->d_secsize / DEV_BSIZE);
1447 if (omask == 0 && sdclose(dev, 0, S_IFBLK, NULL) != 0)
1448 return (-1);
1449 return (size);
1450 }
1451
1452 /* #define SD_DUMP_NOT_TRUSTED if you just want to watch */
1453 static struct scsipi_xfer sx;
1454 static int sddoingadump;
1455
1456 /*
1457 * dump all of physical memory into the partition specified, starting
1458 * at offset 'dumplo' into the partition.
1459 */
1460 static int
1461 sddump(dev_t dev, daddr_t blkno, caddr_t va, size_t size)
1462 {
1463 struct sd_softc *sd; /* disk unit to do the I/O */
1464 struct disklabel *lp; /* disk's disklabel */
1465 int unit, part;
1466 int sectorsize; /* size of a disk sector */
1467 int nsects; /* number of sectors in partition */
1468 int sectoff; /* sector offset of partition */
1469 int totwrt; /* total number of sectors left to write */
1470 int nwrt; /* current number of sectors to write */
1471 struct scsipi_rw_10 cmd; /* write command */
1472 struct scsipi_xfer *xs; /* ... convenience */
1473 struct scsipi_periph *periph;
1474 struct scsipi_channel *chan;
1475
1476 /* Check if recursive dump; if so, punt. */
1477 if (sddoingadump)
1478 return (EFAULT);
1479
1480 /* Mark as active early. */
1481 sddoingadump = 1;
1482
1483 unit = SDUNIT(dev); /* Decompose unit & partition. */
1484 part = SDPART(dev);
1485
1486 /* Check for acceptable drive number. */
1487 if (unit >= sd_cd.cd_ndevs || (sd = sd_cd.cd_devs[unit]) == NULL)
1488 return (ENXIO);
1489
1490 if ((sd->sc_dev.dv_flags & DVF_ACTIVE) == 0)
1491 return (ENODEV);
1492
1493 periph = sd->sc_periph;
1494 chan = periph->periph_channel;
1495
1496 /* Make sure it was initialized. */
1497 if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0)
1498 return (ENXIO);
1499
1500 /* Convert to disk sectors. Request must be a multiple of size. */
1501 lp = sd->sc_dk.dk_label;
1502 sectorsize = lp->d_secsize;
1503 if ((size % sectorsize) != 0)
1504 return (EFAULT);
1505 totwrt = size / sectorsize;
1506 blkno = dbtob(blkno) / sectorsize; /* blkno in DEV_BSIZE units */
1507
1508 nsects = lp->d_partitions[part].p_size;
1509 sectoff = lp->d_partitions[part].p_offset;
1510
1511 /* Check transfer bounds against partition size. */
1512 if ((blkno < 0) || ((blkno + totwrt) > nsects))
1513 return (EINVAL);
1514
1515 /* Offset block number to start of partition. */
1516 blkno += sectoff;
1517
1518 xs = &sx;
1519
1520 while (totwrt > 0) {
1521 nwrt = totwrt; /* XXX */
1522 #ifndef SD_DUMP_NOT_TRUSTED
1523 /*
1524 * Fill out the scsi command
1525 */
1526 memset(&cmd, 0, sizeof(cmd));
1527 cmd.opcode = WRITE_10;
1528 _lto4b(blkno, cmd.addr);
1529 _lto2b(nwrt, cmd.length);
1530 /*
1531 * Fill out the scsipi_xfer structure
1532 * Note: we cannot sleep as we may be an interrupt
1533 * don't use scsipi_command() as it may want to wait
1534 * for an xs.
1535 */
1536 memset(xs, 0, sizeof(sx));
1537 xs->xs_control |= XS_CTL_NOSLEEP | XS_CTL_POLL |
1538 XS_CTL_DATA_OUT;
1539 xs->xs_status = 0;
1540 xs->xs_periph = periph;
1541 xs->xs_retries = SDRETRIES;
1542 xs->timeout = 10000; /* 10000 millisecs for a disk ! */
1543 xs->cmd = (struct scsipi_generic *)&cmd;
1544 xs->cmdlen = sizeof(cmd);
1545 xs->resid = nwrt * sectorsize;
1546 xs->error = XS_NOERROR;
1547 xs->bp = 0;
1548 xs->data = va;
1549 xs->datalen = nwrt * sectorsize;
1550
1551 /*
1552 * Pass all this info to the scsi driver.
1553 */
1554 scsipi_adapter_request(chan, ADAPTER_REQ_RUN_XFER, xs);
1555 if ((xs->xs_status & XS_STS_DONE) == 0 ||
1556 xs->error != XS_NOERROR)
1557 return (EIO);
1558 #else /* SD_DUMP_NOT_TRUSTED */
1559 /* Let's just talk about this first... */
1560 printf("sd%d: dump addr 0x%x, blk %d\n", unit, va, blkno);
1561 delay(500 * 1000); /* half a second */
1562 #endif /* SD_DUMP_NOT_TRUSTED */
1563
1564 /* update block count */
1565 totwrt -= nwrt;
1566 blkno += nwrt;
1567 va += sectorsize * nwrt;
1568 }
1569 sddoingadump = 0;
1570 return (0);
1571 }
1572
1573 static int
1574 sd_mode_sense(struct sd_softc *sd, u_int8_t byte2, void *sense, size_t size,
1575 int page, int flags, int *big)
1576 {
1577
1578 if ((sd->sc_periph->periph_quirks & PQUIRK_ONLYBIG) &&
1579 !(sd->sc_periph->periph_quirks & PQUIRK_NOBIGMODESENSE)) {
1580 *big = 1;
1581 return scsipi_mode_sense_big(sd->sc_periph, byte2, page, sense,
1582 size + sizeof(struct scsipi_mode_header_big),
1583 flags | XS_CTL_DATA_ONSTACK, SDRETRIES, 6000);
1584 } else {
1585 *big = 0;
1586 return scsipi_mode_sense(sd->sc_periph, byte2, page, sense,
1587 size + sizeof(struct scsipi_mode_header),
1588 flags | XS_CTL_DATA_ONSTACK, SDRETRIES, 6000);
1589 }
1590 }
1591
1592 static int
1593 sd_mode_select(struct sd_softc *sd, u_int8_t byte2, void *sense, size_t size,
1594 int flags, int big)
1595 {
1596
1597 if (big) {
1598 struct scsipi_mode_header_big *header = sense;
1599
1600 _lto2b(0, header->data_length);
1601 return scsipi_mode_select_big(sd->sc_periph, byte2, sense,
1602 size + sizeof(struct scsipi_mode_header_big),
1603 flags | XS_CTL_DATA_ONSTACK, SDRETRIES, 6000);
1604 } else {
1605 struct scsipi_mode_header *header = sense;
1606
1607 header->data_length = 0;
1608 return scsipi_mode_select(sd->sc_periph, byte2, sense,
1609 size + sizeof(struct scsipi_mode_header),
1610 flags | XS_CTL_DATA_ONSTACK, SDRETRIES, 6000);
1611 }
1612 }
1613
1614 static int
1615 sd_get_simplifiedparms(struct sd_softc *sd, struct disk_parms *dp, int flags)
1616 {
1617 struct {
1618 struct scsipi_mode_header header;
1619 /* no block descriptor */
1620 u_int8_t pg_code; /* page code (should be 6) */
1621 u_int8_t pg_length; /* page length (should be 11) */
1622 u_int8_t wcd; /* bit0: cache disable */
1623 u_int8_t lbs[2]; /* logical block size */
1624 u_int8_t size[5]; /* number of log. blocks */
1625 u_int8_t pp; /* power/performance */
1626 u_int8_t flags;
1627 u_int8_t resvd;
1628 } scsipi_sense;
1629 u_int64_t sectors;
1630 int error;
1631
1632 /*
1633 * scsipi_size (ie "read capacity") and mode sense page 6
1634 * give the same information. Do both for now, and check
1635 * for consistency.
1636 * XXX probably differs for removable media
1637 */
1638 dp->blksize = 512;
1639 if ((sectors = scsipi_size(sd->sc_periph, flags)) == 0)
1640 return (SDGP_RESULT_OFFLINE); /* XXX? */
1641
1642 error = scsipi_mode_sense(sd->sc_periph, SMS_DBD, 6,
1643 &scsipi_sense.header, sizeof(scsipi_sense),
1644 flags | XS_CTL_DATA_ONSTACK, SDRETRIES, 6000);
1645
1646 if (error != 0)
1647 return (SDGP_RESULT_OFFLINE); /* XXX? */
1648
1649 dp->blksize = _2btol(scsipi_sense.lbs);
1650 if (dp->blksize == 0)
1651 dp->blksize = 512;
1652
1653 /*
1654 * Create a pseudo-geometry.
1655 */
1656 dp->heads = 64;
1657 dp->sectors = 32;
1658 dp->cyls = sectors / (dp->heads * dp->sectors);
1659 dp->disksize = _5btol(scsipi_sense.size);
1660 if (dp->disksize <= UINT32_MAX && dp->disksize != sectors) {
1661 printf("RBC size: mode sense=%llu, get cap=%llu\n",
1662 (unsigned long long)dp->disksize,
1663 (unsigned long long)sectors);
1664 dp->disksize = sectors;
1665 }
1666 dp->disksize512 = (dp->disksize * dp->blksize) / DEV_BSIZE;
1667
1668 return (SDGP_RESULT_OK);
1669 }
1670
1671 /*
1672 * Get the scsi driver to send a full inquiry to the * device and use the
1673 * results to fill out the disk parameter structure.
1674 */
1675 static int
1676 sd_get_capacity(struct sd_softc *sd, struct disk_parms *dp, int flags)
1677 {
1678 u_int64_t sectors;
1679 int error;
1680 #if 0
1681 int i;
1682 u_int8_t *p;
1683 #endif
1684
1685 dp->disksize = sectors = scsipi_size(sd->sc_periph, flags);
1686 if (sectors == 0) {
1687 struct scsipi_read_format_capacities cmd;
1688 struct {
1689 struct scsipi_capacity_list_header header;
1690 struct scsipi_capacity_descriptor desc;
1691 } __attribute__((packed)) data;
1692
1693 memset(&cmd, 0, sizeof(cmd));
1694 memset(&data, 0, sizeof(data));
1695 cmd.opcode = READ_FORMAT_CAPACITIES;
1696 _lto2b(sizeof(data), cmd.length);
1697
1698 error = scsipi_command(sd->sc_periph,
1699 (void *)&cmd, sizeof(cmd), (void *)&data, sizeof(data),
1700 SDRETRIES, 20000, NULL,
1701 flags | XS_CTL_DATA_IN | XS_CTL_DATA_ONSTACK);
1702 if (error == EFTYPE) {
1703 /* Medium Format Corrupted, handle as not formatted */
1704 return (SDGP_RESULT_UNFORMATTED);
1705 }
1706 if (error || data.header.length == 0)
1707 return (SDGP_RESULT_OFFLINE);
1708
1709 #if 0
1710 printf("rfc: length=%d\n", data.header.length);
1711 printf("rfc result:"); for (i = sizeof(struct scsipi_capacity_list_header) + data.header.length, p = (void *)&data; i; i--, p++) printf(" %02x", *p); printf("\n");
1712 #endif
1713 switch (data.desc.byte5 & SCSIPI_CAP_DESC_CODE_MASK) {
1714 case SCSIPI_CAP_DESC_CODE_RESERVED:
1715 case SCSIPI_CAP_DESC_CODE_FORMATTED:
1716 break;
1717
1718 case SCSIPI_CAP_DESC_CODE_UNFORMATTED:
1719 return (SDGP_RESULT_UNFORMATTED);
1720
1721 case SCSIPI_CAP_DESC_CODE_NONE:
1722 return (SDGP_RESULT_OFFLINE);
1723 }
1724
1725 dp->disksize = sectors = _4btol(data.desc.nblks);
1726 if (sectors == 0)
1727 return (SDGP_RESULT_OFFLINE); /* XXX? */
1728
1729 dp->blksize = _3btol(data.desc.blklen);
1730 if (dp->blksize == 0)
1731 dp->blksize = 512;
1732 } else {
1733 struct sd_mode_sense_data scsipi_sense;
1734 int big, bsize;
1735 struct scsi_blk_desc *bdesc;
1736
1737 memset(&scsipi_sense, 0, sizeof(scsipi_sense));
1738 error = sd_mode_sense(sd, 0, &scsipi_sense,
1739 sizeof(scsipi_sense.blk_desc), 0, flags | XS_CTL_SILENT, &big);
1740 dp->blksize = 512;
1741 if (!error) {
1742 if (big) {
1743 bdesc = (void *)(&scsipi_sense.header.big + 1);
1744 bsize = _2btol(scsipi_sense.header.big.blk_desc_len);
1745 } else {
1746 bdesc = (void *)(&scsipi_sense.header.small + 1);
1747 bsize = scsipi_sense.header.small.blk_desc_len;
1748 }
1749
1750 #if 0
1751 printf("page 0 sense:"); for (i = sizeof(scsipi_sense), p = (void *)&scsipi_sense; i; i--, p++) printf(" %02x", *p); printf("\n");
1752 printf("page 0 bsize=%d\n", bsize);
1753 printf("page 0 ok\n");
1754 #endif
1755
1756 if (bsize >= 8) {
1757 dp->blksize = _3btol(bdesc->blklen);
1758 if (dp->blksize == 0)
1759 dp->blksize = 512;
1760 }
1761 }
1762 }
1763
1764 dp->disksize512 = (sectors * dp->blksize) / DEV_BSIZE;
1765 return (0);
1766 }
1767
1768 static int
1769 sd_get_parms_page4(struct sd_softc *sd, struct disk_parms *dp, int flags)
1770 {
1771 struct sd_mode_sense_data scsipi_sense;
1772 int error;
1773 int big, poffset, byte2;
1774 union scsi_disk_pages *pages;
1775 #if 0
1776 int i;
1777 u_int8_t *p;
1778 #endif
1779
1780 byte2 = SMS_DBD;
1781 again:
1782 memset(&scsipi_sense, 0, sizeof(scsipi_sense));
1783 error = sd_mode_sense(sd, byte2, &scsipi_sense,
1784 (byte2 ? 0 : sizeof(scsipi_sense.blk_desc)) +
1785 sizeof(scsipi_sense.pages.rigid_geometry), 4,
1786 flags | XS_CTL_SILENT, &big);
1787 if (error) {
1788 if (byte2 == SMS_DBD) {
1789 /* No result; try once more with DBD off */
1790 byte2 = 0;
1791 goto again;
1792 }
1793 return (error);
1794 }
1795
1796 if (big) {
1797 poffset = sizeof scsipi_sense.header.big;
1798 poffset += _2btol(scsipi_sense.header.big.blk_desc_len);
1799 } else {
1800 poffset = sizeof scsipi_sense.header.small;
1801 poffset += scsipi_sense.header.small.blk_desc_len;
1802 }
1803
1804 pages = (void *)((u_long)&scsipi_sense + poffset);
1805 #if 0
1806 printf("page 4 sense:"); for (i = sizeof(scsipi_sense), p = (void *)&scsipi_sense; i; i--, p++) printf(" %02x", *p); printf("\n");
1807 printf("page 4 pg_code=%d sense=%p/%p\n", pages->rigid_geometry.pg_code, &scsipi_sense, pages);
1808 #endif
1809
1810 if ((pages->rigid_geometry.pg_code & PGCODE_MASK) != 4)
1811 return (ERESTART);
1812
1813 SC_DEBUG(sd->sc_periph, SCSIPI_DB3,
1814 ("%d cyls, %d heads, %d precomp, %d red_write, %d land_zone\n",
1815 _3btol(pages->rigid_geometry.ncyl),
1816 pages->rigid_geometry.nheads,
1817 _2btol(pages->rigid_geometry.st_cyl_wp),
1818 _2btol(pages->rigid_geometry.st_cyl_rwc),
1819 _2btol(pages->rigid_geometry.land_zone)));
1820
1821 /*
1822 * KLUDGE!! (for zone recorded disks)
1823 * give a number of sectors so that sec * trks * cyls
1824 * is <= disk_size
1825 * can lead to wasted space! THINK ABOUT THIS !
1826 */
1827 dp->heads = pages->rigid_geometry.nheads;
1828 dp->cyls = _3btol(pages->rigid_geometry.ncyl);
1829 if (dp->heads == 0 || dp->cyls == 0)
1830 return (ERESTART);
1831 dp->sectors = dp->disksize / (dp->heads * dp->cyls); /* XXX */
1832
1833 dp->rot_rate = _2btol(pages->rigid_geometry.rpm);
1834 if (dp->rot_rate == 0)
1835 dp->rot_rate = 3600;
1836
1837 #if 0
1838 printf("page 4 ok\n");
1839 #endif
1840 return (0);
1841 }
1842
1843 static int
1844 sd_get_parms_page5(struct sd_softc *sd, struct disk_parms *dp, int flags)
1845 {
1846 struct sd_mode_sense_data scsipi_sense;
1847 int error;
1848 int big, poffset, byte2;
1849 union scsi_disk_pages *pages;
1850 #if 0
1851 int i;
1852 u_int8_t *p;
1853 #endif
1854
1855 byte2 = SMS_DBD;
1856 again:
1857 memset(&scsipi_sense, 0, sizeof(scsipi_sense));
1858 error = sd_mode_sense(sd, 0, &scsipi_sense,
1859 (byte2 ? 0 : sizeof(scsipi_sense.blk_desc)) +
1860 sizeof(scsipi_sense.pages.flex_geometry), 5,
1861 flags | XS_CTL_SILENT, &big);
1862 if (error) {
1863 if (byte2 == SMS_DBD) {
1864 /* No result; try once more with DBD off */
1865 byte2 = 0;
1866 goto again;
1867 }
1868 return (error);
1869 }
1870
1871 if (big) {
1872 poffset = sizeof scsipi_sense.header.big;
1873 poffset += _2btol(scsipi_sense.header.big.blk_desc_len);
1874 } else {
1875 poffset = sizeof scsipi_sense.header.small;
1876 poffset += scsipi_sense.header.small.blk_desc_len;
1877 }
1878
1879 pages = (void *)((u_long)&scsipi_sense + poffset);
1880 #if 0
1881 printf("page 5 sense:"); for (i = sizeof(scsipi_sense), p = (void *)&scsipi_sense; i; i--, p++) printf(" %02x", *p); printf("\n");
1882 printf("page 5 pg_code=%d sense=%p/%p\n", pages->flex_geometry.pg_code, &scsipi_sense, pages);
1883 #endif
1884
1885 if ((pages->flex_geometry.pg_code & PGCODE_MASK) != 5)
1886 return (ERESTART);
1887
1888 SC_DEBUG(sd->sc_periph, SCSIPI_DB3,
1889 ("%d cyls, %d heads, %d sec, %d bytes/sec\n",
1890 _3btol(pages->flex_geometry.ncyl),
1891 pages->flex_geometry.nheads,
1892 pages->flex_geometry.ph_sec_tr,
1893 _2btol(pages->flex_geometry.bytes_s)));
1894
1895 dp->heads = pages->flex_geometry.nheads;
1896 dp->cyls = _2btol(pages->flex_geometry.ncyl);
1897 dp->sectors = pages->flex_geometry.ph_sec_tr;
1898 if (dp->heads == 0 || dp->cyls == 0 || dp->sectors == 0)
1899 return (ERESTART);
1900
1901 dp->rot_rate = _2btol(pages->rigid_geometry.rpm);
1902 if (dp->rot_rate == 0)
1903 dp->rot_rate = 3600;
1904
1905 #if 0
1906 printf("page 5 ok\n");
1907 #endif
1908 return (0);
1909 }
1910
1911 static int
1912 sd_get_parms(struct sd_softc *sd, struct disk_parms *dp, int flags)
1913 {
1914 int error;
1915
1916 /*
1917 * If offline, the SDEV_MEDIA_LOADED flag will be
1918 * cleared by the caller if necessary.
1919 */
1920 if (sd->type == T_SIMPLE_DIRECT)
1921 return (sd_get_simplifiedparms(sd, dp, flags));
1922
1923 error = sd_get_capacity(sd, dp, flags);
1924 if (error)
1925 return (error);
1926
1927 if (sd->type == T_OPTICAL)
1928 goto page0;
1929
1930 if (sd->sc_periph->periph_flags & PERIPH_REMOVABLE) {
1931 if (!sd_get_parms_page5(sd, dp, flags) ||
1932 !sd_get_parms_page4(sd, dp, flags))
1933 return (SDGP_RESULT_OK);
1934 } else {
1935 if (!sd_get_parms_page4(sd, dp, flags) ||
1936 !sd_get_parms_page5(sd, dp, flags))
1937 return (SDGP_RESULT_OK);
1938 }
1939
1940 page0:
1941 printf("%s: fabricating a geometry\n", sd->sc_dev.dv_xname);
1942 /* Try calling driver's method for figuring out geometry. */
1943 if (!sd->sc_periph->periph_channel->chan_adapter->adapt_getgeom ||
1944 !(*sd->sc_periph->periph_channel->chan_adapter->adapt_getgeom)
1945 (sd->sc_periph, dp, dp->disksize)) {
1946 /*
1947 * Use adaptec standard fictitious geometry
1948 * this depends on which controller (e.g. 1542C is
1949 * different. but we have to put SOMETHING here..)
1950 */
1951 dp->heads = 64;
1952 dp->sectors = 32;
1953 dp->cyls = dp->disksize / (64 * 32);
1954 }
1955 dp->rot_rate = 3600;
1956 return (SDGP_RESULT_OK);
1957 }
1958
1959 static int
1960 sd_flush(struct sd_softc *sd, int flags)
1961 {
1962 struct scsipi_periph *periph = sd->sc_periph;
1963 struct scsi_synchronize_cache_10 cmd;
1964
1965 /*
1966 * If the device is SCSI-2, issue a SYNCHRONIZE CACHE.
1967 * We issue with address 0 length 0, which should be
1968 * interpreted by the device as "all remaining blocks
1969 * starting at address 0". We ignore ILLEGAL REQUEST
1970 * in the event that the command is not supported by
1971 * the device, and poll for completion so that we know
1972 * that the cache has actually been flushed.
1973 *
1974 * Unless, that is, the device can't handle the SYNCHRONIZE CACHE
1975 * command, as indicated by our quirks flags.
1976 *
1977 * XXX What about older devices?
1978 */
1979 if (periph->periph_version < 2 ||
1980 (periph->periph_quirks & PQUIRK_NOSYNCCACHE))
1981 return (0);
1982
1983 sd->flags |= SDF_FLUSHING;
1984 memset(&cmd, 0, sizeof(cmd));
1985 cmd.opcode = SCSI_SYNCHRONIZE_CACHE_10;
1986
1987 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0,
1988 SDRETRIES, 100000, NULL, flags | XS_CTL_IGNORE_ILLEGAL_REQUEST));
1989 }
1990
1991 static int
1992 sd_getcache(struct sd_softc *sd, int *bitsp)
1993 {
1994 struct scsipi_periph *periph = sd->sc_periph;
1995 struct sd_mode_sense_data scsipi_sense;
1996 int error, bits = 0;
1997 int big;
1998 union scsi_disk_pages *pages;
1999
2000 if (periph->periph_version < 2)
2001 return (EOPNOTSUPP);
2002
2003 memset(&scsipi_sense, 0, sizeof(scsipi_sense));
2004 error = sd_mode_sense(sd, SMS_DBD, &scsipi_sense,
2005 sizeof(scsipi_sense.pages.caching_params), 8, 0, &big);
2006 if (error)
2007 return (error);
2008
2009 if (big)
2010 pages = (void *)(&scsipi_sense.header.big + 1);
2011 else
2012 pages = (void *)(&scsipi_sense.header.small + 1);
2013
2014 if ((pages->caching_params.flags & CACHING_RCD) == 0)
2015 bits |= DKCACHE_READ;
2016 if (pages->caching_params.flags & CACHING_WCE)
2017 bits |= DKCACHE_WRITE;
2018 if (pages->caching_params.pg_code & PGCODE_PS)
2019 bits |= DKCACHE_SAVE;
2020
2021 memset(&scsipi_sense, 0, sizeof(scsipi_sense));
2022 error = sd_mode_sense(sd, SMS_DBD, &scsipi_sense,
2023 sizeof(scsipi_sense.pages.caching_params),
2024 SMS_PAGE_CTRL_CHANGEABLE|8, 0, &big);
2025 if (error == 0) {
2026 if (big)
2027 pages = (void *)(&scsipi_sense.header.big + 1);
2028 else
2029 pages = (void *)(&scsipi_sense.header.small + 1);
2030
2031 if (pages->caching_params.flags & CACHING_RCD)
2032 bits |= DKCACHE_RCHANGE;
2033 if (pages->caching_params.flags & CACHING_WCE)
2034 bits |= DKCACHE_WCHANGE;
2035 }
2036
2037 *bitsp = bits;
2038
2039 return (0);
2040 }
2041
2042 static int
2043 sd_setcache(struct sd_softc *sd, int bits)
2044 {
2045 struct scsipi_periph *periph = sd->sc_periph;
2046 struct sd_mode_sense_data scsipi_sense;
2047 int error;
2048 uint8_t oflags, byte2 = 0;
2049 int big;
2050 union scsi_disk_pages *pages;
2051
2052 if (periph->periph_version < 2)
2053 return (EOPNOTSUPP);
2054
2055 memset(&scsipi_sense, 0, sizeof(scsipi_sense));
2056 error = sd_mode_sense(sd, SMS_DBD, &scsipi_sense,
2057 sizeof(scsipi_sense.pages.caching_params), 8, 0, &big);
2058 if (error)
2059 return (error);
2060
2061 if (big)
2062 pages = (void *)(&scsipi_sense.header.big + 1);
2063 else
2064 pages = (void *)(&scsipi_sense.header.small + 1);
2065
2066 oflags = pages->caching_params.flags;
2067
2068 if (bits & DKCACHE_READ)
2069 pages->caching_params.flags &= ~CACHING_RCD;
2070 else
2071 pages->caching_params.flags |= CACHING_RCD;
2072
2073 if (bits & DKCACHE_WRITE)
2074 pages->caching_params.flags |= CACHING_WCE;
2075 else
2076 pages->caching_params.flags &= ~CACHING_WCE;
2077
2078 if (oflags == pages->caching_params.flags)
2079 return (0);
2080
2081 pages->caching_params.pg_code &= PGCODE_MASK;
2082
2083 if (bits & DKCACHE_SAVE)
2084 byte2 |= SMS_SP;
2085
2086 return (sd_mode_select(sd, byte2|SMS_PF, &scsipi_sense,
2087 sizeof(struct scsipi_mode_page_header) +
2088 pages->caching_params.pg_length, 0, big));
2089 }
2090