sd.c revision 1.223 1 /* $NetBSD: sd.c,v 1.223 2004/09/09 19:35:32 bouyer Exp $ */
2
3 /*-
4 * Copyright (c) 1998, 2003 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Originally written by Julian Elischer (julian (at) dialix.oz.au)
41 * for TRW Financial Systems for use under the MACH(2.5) operating system.
42 *
43 * TRW Financial Systems, in accordance with their agreement with Carnegie
44 * Mellon University, makes this software available to CMU to distribute
45 * or use in any manner that they see fit as long as this message is kept with
46 * the software. For this reason TFS also grants any other persons or
47 * organisations permission to use or modify this software.
48 *
49 * TFS supplies this software to be publicly redistributed
50 * on the understanding that TFS is not responsible for the correct
51 * functioning of this software in any circumstances.
52 *
53 * Ported to run under 386BSD by Julian Elischer (julian (at) dialix.oz.au) Sept 1992
54 */
55
56 #include <sys/cdefs.h>
57 __KERNEL_RCSID(0, "$NetBSD: sd.c,v 1.223 2004/09/09 19:35:32 bouyer Exp $");
58
59 #include "opt_scsi.h"
60 #include "rnd.h"
61
62 #include <sys/param.h>
63 #include <sys/systm.h>
64 #include <sys/kernel.h>
65 #include <sys/file.h>
66 #include <sys/stat.h>
67 #include <sys/ioctl.h>
68 #include <sys/scsiio.h>
69 #include <sys/buf.h>
70 #include <sys/uio.h>
71 #include <sys/malloc.h>
72 #include <sys/errno.h>
73 #include <sys/device.h>
74 #include <sys/disklabel.h>
75 #include <sys/disk.h>
76 #include <sys/proc.h>
77 #include <sys/conf.h>
78 #include <sys/vnode.h>
79 #if NRND > 0
80 #include <sys/rnd.h>
81 #endif
82
83 #include <dev/scsipi/scsipi_all.h>
84 #include <dev/scsipi/scsi_all.h>
85 #include <dev/scsipi/scsipi_disk.h>
86 #include <dev/scsipi/scsi_disk.h>
87 #include <dev/scsipi/scsiconf.h>
88 #include <dev/scsipi/scsipi_base.h>
89 #include <dev/scsipi/sdvar.h>
90
91 #define SDUNIT(dev) DISKUNIT(dev)
92 #define SDPART(dev) DISKPART(dev)
93 #define SDMINOR(unit, part) DISKMINOR(unit, part)
94 #define MAKESDDEV(maj, unit, part) MAKEDISKDEV(maj, unit, part)
95
96 #define SDLABELDEV(dev) (MAKESDDEV(major(dev), SDUNIT(dev), RAW_PART))
97
98 static int sdlock(struct sd_softc *);
99 static void sdunlock(struct sd_softc *);
100 static void sdminphys(struct buf *);
101 static void sdgetdefaultlabel(struct sd_softc *, struct disklabel *);
102 static void sdgetdisklabel(struct sd_softc *);
103 static void sdstart(struct scsipi_periph *);
104 static void sdrestart(void *);
105 static void sddone(struct scsipi_xfer *);
106 static void sd_shutdown(void *);
107 static int sd_interpret_sense(struct scsipi_xfer *);
108
109 static int sd_mode_sense(struct sd_softc *, u_int8_t, void *, size_t, int,
110 int, int *);
111 static int sd_mode_select(struct sd_softc *, u_int8_t, void *, size_t, int,
112 int);
113 static int sd_get_simplifiedparms(struct sd_softc *, struct disk_parms *,
114 int);
115 static int sd_get_capacity(struct sd_softc *, struct disk_parms *, int);
116 static int sd_get_parms(struct sd_softc *, struct disk_parms *, int);
117 static int sd_get_parms_page4(struct sd_softc *, struct disk_parms *,
118 int);
119 static int sd_get_parms_page5(struct sd_softc *, struct disk_parms *,
120 int);
121
122 static int sd_flush(struct sd_softc *, int);
123 static int sd_getcache(struct sd_softc *, int *);
124 static int sd_setcache(struct sd_softc *, int);
125
126 static int sdmatch(struct device *, struct cfdata *, void *);
127 static void sdattach(struct device *, struct device *, void *);
128 static int sdactivate(struct device *, enum devact);
129 static int sddetach(struct device *, int);
130
131 CFATTACH_DECL(sd, sizeof(struct sd_softc), sdmatch, sdattach, sddetach,
132 sdactivate);
133
134 extern struct cfdriver sd_cd;
135
136 static const struct scsipi_inquiry_pattern sd_patterns[] = {
137 {T_DIRECT, T_FIXED,
138 "", "", ""},
139 {T_DIRECT, T_REMOV,
140 "", "", ""},
141 {T_OPTICAL, T_FIXED,
142 "", "", ""},
143 {T_OPTICAL, T_REMOV,
144 "", "", ""},
145 {T_SIMPLE_DIRECT, T_FIXED,
146 "", "", ""},
147 {T_SIMPLE_DIRECT, T_REMOV,
148 "", "", ""},
149 };
150
151 static dev_type_open(sdopen);
152 static dev_type_close(sdclose);
153 static dev_type_read(sdread);
154 static dev_type_write(sdwrite);
155 static dev_type_ioctl(sdioctl);
156 static dev_type_strategy(sdstrategy);
157 static dev_type_dump(sddump);
158 static dev_type_size(sdsize);
159
160 const struct bdevsw sd_bdevsw = {
161 sdopen, sdclose, sdstrategy, sdioctl, sddump, sdsize, D_DISK
162 };
163
164 const struct cdevsw sd_cdevsw = {
165 sdopen, sdclose, sdread, sdwrite, sdioctl,
166 nostop, notty, nopoll, nommap, nokqfilter, D_DISK
167 };
168
169 static struct dkdriver sddkdriver = { sdstrategy };
170
171 static const struct scsipi_periphsw sd_switch = {
172 sd_interpret_sense, /* check our error handler first */
173 sdstart, /* have a queue, served by this */
174 NULL, /* have no async handler */
175 sddone, /* deal with stats at interrupt time */
176 };
177
178 struct sd_mode_sense_data {
179 /*
180 * XXX
181 * We are not going to parse this as-is -- it just has to be large
182 * enough.
183 */
184 union {
185 struct scsipi_mode_header small;
186 struct scsipi_mode_header_big big;
187 } header;
188 struct scsi_blk_desc blk_desc;
189 union scsi_disk_pages pages;
190 };
191
192 /*
193 * The routine called by the low level scsi routine when it discovers
194 * A device suitable for this driver
195 */
196 static int
197 sdmatch(struct device *parent, struct cfdata *match, void *aux)
198 {
199 struct scsipibus_attach_args *sa = aux;
200 int priority;
201
202 (void)scsipi_inqmatch(&sa->sa_inqbuf,
203 (caddr_t)sd_patterns, sizeof(sd_patterns) / sizeof(sd_patterns[0]),
204 sizeof(sd_patterns[0]), &priority);
205
206 return (priority);
207 }
208
209 /*
210 * Attach routine common to atapi & scsi.
211 */
212 static void
213 sdattach(struct device *parent, struct device *self, void *aux)
214 {
215 struct sd_softc *sd = (void *)self;
216 struct scsipibus_attach_args *sa = aux;
217 struct scsipi_periph *periph = sa->sa_periph;
218 int error, result;
219 struct disk_parms *dp = &sd->params;
220 char pbuf[9];
221
222 SC_DEBUG(periph, SCSIPI_DB2, ("sdattach: "));
223
224 sd->type = (sa->sa_inqbuf.type & SID_TYPE);
225 if (sd->type == T_SIMPLE_DIRECT)
226 periph->periph_quirks |= PQUIRK_ONLYBIG | PQUIRK_NOBIGMODESENSE;
227
228 if (scsipi_periph_bustype(sa->sa_periph) == SCSIPI_BUSTYPE_SCSI &&
229 periph->periph_version == 0)
230 sd->flags |= SDF_ANCIENT;
231
232 bufq_alloc(&sd->buf_queue,
233 BUFQ_DISK_DEFAULT_STRAT()|BUFQ_SORT_RAWBLOCK);
234
235 callout_init(&sd->sc_callout);
236
237 /*
238 * Store information needed to contact our base driver
239 */
240 sd->sc_periph = periph;
241
242 periph->periph_dev = &sd->sc_dev;
243 periph->periph_switch = &sd_switch;
244
245 /*
246 * Increase our openings to the maximum-per-periph
247 * supported by the adapter. This will either be
248 * clamped down or grown by the adapter if necessary.
249 */
250 periph->periph_openings =
251 SCSIPI_CHAN_MAX_PERIPH(periph->periph_channel);
252 periph->periph_flags |= PERIPH_GROW_OPENINGS;
253
254 /*
255 * Initialize and attach the disk structure.
256 */
257 sd->sc_dk.dk_driver = &sddkdriver;
258 sd->sc_dk.dk_name = sd->sc_dev.dv_xname;
259 disk_attach(&sd->sc_dk);
260
261 /*
262 * Use the subdriver to request information regarding the drive.
263 */
264 aprint_naive("\n");
265 aprint_normal("\n");
266
267 error = scsipi_test_unit_ready(periph,
268 XS_CTL_DISCOVERY | XS_CTL_IGNORE_ILLEGAL_REQUEST |
269 XS_CTL_IGNORE_MEDIA_CHANGE | XS_CTL_SILENT_NODEV);
270
271 if (error)
272 result = SDGP_RESULT_OFFLINE;
273 else
274 result = sd_get_parms(sd, &sd->params, XS_CTL_DISCOVERY);
275 aprint_normal("%s: ", sd->sc_dev.dv_xname);
276 switch (result) {
277 case SDGP_RESULT_OK:
278 format_bytes(pbuf, sizeof(pbuf),
279 (u_int64_t)dp->disksize * dp->blksize);
280 aprint_normal(
281 "%s, %ld cyl, %ld head, %ld sec, %ld bytes/sect x %llu sectors",
282 pbuf, dp->cyls, dp->heads, dp->sectors, dp->blksize,
283 (unsigned long long)dp->disksize);
284 break;
285
286 case SDGP_RESULT_OFFLINE:
287 aprint_normal("drive offline");
288 break;
289
290 case SDGP_RESULT_UNFORMATTED:
291 aprint_normal("unformatted media");
292 break;
293
294 #ifdef DIAGNOSTIC
295 default:
296 panic("sdattach: unknown result from get_parms");
297 break;
298 #endif
299 }
300 aprint_normal("\n");
301
302 /*
303 * Establish a shutdown hook so that we can ensure that
304 * our data has actually made it onto the platter at
305 * shutdown time. Note that this relies on the fact
306 * that the shutdown hook code puts us at the head of
307 * the list (thus guaranteeing that our hook runs before
308 * our ancestors').
309 */
310 if ((sd->sc_sdhook =
311 shutdownhook_establish(sd_shutdown, sd)) == NULL)
312 aprint_error("%s: WARNING: unable to establish shutdown hook\n",
313 sd->sc_dev.dv_xname);
314
315 #if NRND > 0
316 /*
317 * attach the device into the random source list
318 */
319 rnd_attach_source(&sd->rnd_source, sd->sc_dev.dv_xname,
320 RND_TYPE_DISK, 0);
321 #endif
322 }
323
324 static int
325 sdactivate(struct device *self, enum devact act)
326 {
327 int rv = 0;
328
329 switch (act) {
330 case DVACT_ACTIVATE:
331 rv = EOPNOTSUPP;
332 break;
333
334 case DVACT_DEACTIVATE:
335 /*
336 * Nothing to do; we key off the device's DVF_ACTIVE.
337 */
338 break;
339 }
340 return (rv);
341 }
342
343 static int
344 sddetach(struct device *self, int flags)
345 {
346 struct sd_softc *sd = (struct sd_softc *) self;
347 struct buf *bp;
348 int s, bmaj, cmaj, i, mn;
349
350 /* locate the major number */
351 bmaj = bdevsw_lookup_major(&sd_bdevsw);
352 cmaj = cdevsw_lookup_major(&sd_cdevsw);
353
354 /* kill any pending restart */
355 callout_stop(&sd->sc_callout);
356
357 s = splbio();
358
359 /* Kill off any queued buffers. */
360 while ((bp = BUFQ_GET(&sd->buf_queue)) != NULL) {
361 bp->b_error = EIO;
362 bp->b_flags |= B_ERROR;
363 bp->b_resid = bp->b_bcount;
364 biodone(bp);
365 }
366
367 bufq_free(&sd->buf_queue);
368
369 /* Kill off any pending commands. */
370 scsipi_kill_pending(sd->sc_periph);
371
372 splx(s);
373
374 /* Nuke the vnodes for any open instances */
375 for (i = 0; i < MAXPARTITIONS; i++) {
376 mn = SDMINOR(self->dv_unit, i);
377 vdevgone(bmaj, mn, mn, VBLK);
378 vdevgone(cmaj, mn, mn, VCHR);
379 }
380
381 /* Detach from the disk list. */
382 disk_detach(&sd->sc_dk);
383
384 /* Get rid of the shutdown hook. */
385 shutdownhook_disestablish(sd->sc_sdhook);
386
387 #if NRND > 0
388 /* Unhook the entropy source. */
389 rnd_detach_source(&sd->rnd_source);
390 #endif
391
392 return (0);
393 }
394
395 /*
396 * Wait interruptibly for an exclusive lock.
397 *
398 * XXX
399 * Several drivers do this; it should be abstracted and made MP-safe.
400 */
401 static int
402 sdlock(struct sd_softc *sd)
403 {
404 int error;
405
406 while ((sd->flags & SDF_LOCKED) != 0) {
407 sd->flags |= SDF_WANTED;
408 if ((error = tsleep(sd, PRIBIO | PCATCH, "sdlck", 0)) != 0)
409 return (error);
410 }
411 sd->flags |= SDF_LOCKED;
412 return (0);
413 }
414
415 /*
416 * Unlock and wake up any waiters.
417 */
418 static void
419 sdunlock(struct sd_softc *sd)
420 {
421
422 sd->flags &= ~SDF_LOCKED;
423 if ((sd->flags & SDF_WANTED) != 0) {
424 sd->flags &= ~SDF_WANTED;
425 wakeup(sd);
426 }
427 }
428
429 /*
430 * open the device. Make sure the partition info is a up-to-date as can be.
431 */
432 static int
433 sdopen(dev_t dev, int flag, int fmt, struct proc *p)
434 {
435 struct sd_softc *sd;
436 struct scsipi_periph *periph;
437 struct scsipi_adapter *adapt;
438 int unit, part;
439 int error;
440
441 unit = SDUNIT(dev);
442 if (unit >= sd_cd.cd_ndevs)
443 return (ENXIO);
444 sd = sd_cd.cd_devs[unit];
445 if (sd == NULL)
446 return (ENXIO);
447
448 if ((sd->sc_dev.dv_flags & DVF_ACTIVE) == 0)
449 return (ENODEV);
450
451 periph = sd->sc_periph;
452 adapt = periph->periph_channel->chan_adapter;
453 part = SDPART(dev);
454
455 SC_DEBUG(periph, SCSIPI_DB1,
456 ("sdopen: dev=0x%x (unit %d (of %d), partition %d)\n", dev, unit,
457 sd_cd.cd_ndevs, part));
458
459 /*
460 * If this is the first open of this device, add a reference
461 * to the adapter.
462 */
463 if (sd->sc_dk.dk_openmask == 0 &&
464 (error = scsipi_adapter_addref(adapt)) != 0)
465 return (error);
466
467 if ((error = sdlock(sd)) != 0)
468 goto bad4;
469
470 if ((periph->periph_flags & PERIPH_OPEN) != 0) {
471 /*
472 * If any partition is open, but the disk has been invalidated,
473 * disallow further opens of non-raw partition
474 */
475 if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0 &&
476 (part != RAW_PART || fmt != S_IFCHR)) {
477 error = EIO;
478 goto bad3;
479 }
480 } else {
481 int silent;
482
483 if (part == RAW_PART && fmt == S_IFCHR)
484 silent = XS_CTL_SILENT;
485 else
486 silent = 0;
487
488 /* Check that it is still responding and ok. */
489 error = scsipi_test_unit_ready(periph,
490 XS_CTL_IGNORE_ILLEGAL_REQUEST | XS_CTL_IGNORE_MEDIA_CHANGE |
491 silent);
492
493 /*
494 * Start the pack spinning if necessary. Always allow the
495 * raw parition to be opened, for raw IOCTLs. Data transfers
496 * will check for SDEV_MEDIA_LOADED.
497 */
498 if (error == EIO) {
499 int error2;
500
501 error2 = scsipi_start(periph, SSS_START, silent);
502 switch (error2) {
503 case 0:
504 error = 0;
505 break;
506 case EIO:
507 case EINVAL:
508 break;
509 default:
510 error = error2;
511 break;
512 }
513 }
514 if (error) {
515 if (silent)
516 goto out;
517 goto bad3;
518 }
519
520 periph->periph_flags |= PERIPH_OPEN;
521
522 if (periph->periph_flags & PERIPH_REMOVABLE) {
523 /* Lock the pack in. */
524 error = scsipi_prevent(periph, PR_PREVENT,
525 XS_CTL_IGNORE_ILLEGAL_REQUEST |
526 XS_CTL_IGNORE_MEDIA_CHANGE);
527 if (error)
528 goto bad;
529 }
530
531 if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0) {
532 int param_error;
533 periph->periph_flags |= PERIPH_MEDIA_LOADED;
534
535 /*
536 * Load the physical device parameters.
537 *
538 * Note that if media is present but unformatted,
539 * we allow the open (so that it can be formatted!).
540 * The drive should refuse real I/O, if the media is
541 * unformatted.
542 */
543 if ((param_error = sd_get_parms(sd, &sd->params, 0))
544 == SDGP_RESULT_OFFLINE) {
545 error = ENXIO;
546 goto bad2;
547 }
548 SC_DEBUG(periph, SCSIPI_DB3, ("Params loaded "));
549
550 /* Load the partition info if not already loaded. */
551 if (param_error == 0) {
552 sdgetdisklabel(sd);
553 SC_DEBUG(periph, SCSIPI_DB3,
554 ("Disklabel loaded "));
555 }
556 }
557 }
558
559 /* Check that the partition exists. */
560 if (part != RAW_PART &&
561 (part >= sd->sc_dk.dk_label->d_npartitions ||
562 sd->sc_dk.dk_label->d_partitions[part].p_fstype == FS_UNUSED)) {
563 error = ENXIO;
564 goto bad;
565 }
566
567 out: /* Insure only one open at a time. */
568 switch (fmt) {
569 case S_IFCHR:
570 sd->sc_dk.dk_copenmask |= (1 << part);
571 break;
572 case S_IFBLK:
573 sd->sc_dk.dk_bopenmask |= (1 << part);
574 break;
575 }
576 sd->sc_dk.dk_openmask =
577 sd->sc_dk.dk_copenmask | sd->sc_dk.dk_bopenmask;
578
579 SC_DEBUG(periph, SCSIPI_DB3, ("open complete\n"));
580 sdunlock(sd);
581 return (0);
582
583 bad2:
584 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
585
586 bad:
587 if (sd->sc_dk.dk_openmask == 0) {
588 if (periph->periph_flags & PERIPH_REMOVABLE)
589 scsipi_prevent(periph, PR_ALLOW,
590 XS_CTL_IGNORE_ILLEGAL_REQUEST |
591 XS_CTL_IGNORE_MEDIA_CHANGE);
592 periph->periph_flags &= ~PERIPH_OPEN;
593 }
594
595 bad3:
596 sdunlock(sd);
597 bad4:
598 if (sd->sc_dk.dk_openmask == 0)
599 scsipi_adapter_delref(adapt);
600 return (error);
601 }
602
603 /*
604 * close the device.. only called if we are the LAST occurence of an open
605 * device. Convenient now but usually a pain.
606 */
607 static int
608 sdclose(dev_t dev, int flag, int fmt, struct proc *p)
609 {
610 struct sd_softc *sd = sd_cd.cd_devs[SDUNIT(dev)];
611 struct scsipi_periph *periph = sd->sc_periph;
612 struct scsipi_adapter *adapt = periph->periph_channel->chan_adapter;
613 int part = SDPART(dev);
614 int error;
615
616 if ((error = sdlock(sd)) != 0)
617 return (error);
618
619 switch (fmt) {
620 case S_IFCHR:
621 sd->sc_dk.dk_copenmask &= ~(1 << part);
622 break;
623 case S_IFBLK:
624 sd->sc_dk.dk_bopenmask &= ~(1 << part);
625 break;
626 }
627 sd->sc_dk.dk_openmask =
628 sd->sc_dk.dk_copenmask | sd->sc_dk.dk_bopenmask;
629
630 if (sd->sc_dk.dk_openmask == 0) {
631 /*
632 * If the disk cache needs flushing, and the disk supports
633 * it, do it now.
634 */
635 if ((sd->flags & SDF_DIRTY) != 0) {
636 if (sd_flush(sd, 0)) {
637 printf("%s: cache synchronization failed\n",
638 sd->sc_dev.dv_xname);
639 sd->flags &= ~SDF_FLUSHING;
640 } else
641 sd->flags &= ~(SDF_FLUSHING|SDF_DIRTY);
642 }
643
644 if (! (periph->periph_flags & PERIPH_KEEP_LABEL))
645 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
646
647 scsipi_wait_drain(periph);
648
649 if (periph->periph_flags & PERIPH_REMOVABLE)
650 scsipi_prevent(periph, PR_ALLOW,
651 XS_CTL_IGNORE_ILLEGAL_REQUEST |
652 XS_CTL_IGNORE_NOT_READY);
653 periph->periph_flags &= ~PERIPH_OPEN;
654
655 scsipi_wait_drain(periph);
656
657 scsipi_adapter_delref(adapt);
658 }
659
660 sdunlock(sd);
661 return (0);
662 }
663
664 /*
665 * Actually translate the requested transfer into one the physical driver
666 * can understand. The transfer is described by a buf and will include
667 * only one physical transfer.
668 */
669 static void
670 sdstrategy(struct buf *bp)
671 {
672 struct sd_softc *sd = sd_cd.cd_devs[SDUNIT(bp->b_dev)];
673 struct scsipi_periph *periph = sd->sc_periph;
674 struct disklabel *lp;
675 daddr_t blkno;
676 int s;
677 boolean_t sector_aligned;
678
679 SC_DEBUG(sd->sc_periph, SCSIPI_DB2, ("sdstrategy "));
680 SC_DEBUG(sd->sc_periph, SCSIPI_DB1,
681 ("%ld bytes @ blk %" PRId64 "\n", bp->b_bcount, bp->b_blkno));
682 /*
683 * If the device has been made invalid, error out
684 */
685 if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0 ||
686 (sd->sc_dev.dv_flags & DVF_ACTIVE) == 0) {
687 if (periph->periph_flags & PERIPH_OPEN)
688 bp->b_error = EIO;
689 else
690 bp->b_error = ENODEV;
691 goto bad;
692 }
693
694 lp = sd->sc_dk.dk_label;
695
696 /*
697 * The transfer must be a whole number of blocks, offset must not be
698 * negative.
699 */
700 if (lp->d_secsize == DEV_BSIZE) {
701 sector_aligned = (bp->b_bcount & (DEV_BSIZE - 1)) == 0;
702 } else {
703 sector_aligned = (bp->b_bcount % lp->d_secsize) == 0;
704 }
705 if (!sector_aligned || bp->b_blkno < 0) {
706 bp->b_error = EINVAL;
707 goto bad;
708 }
709 /*
710 * If it's a null transfer, return immediatly
711 */
712 if (bp->b_bcount == 0)
713 goto done;
714
715 /*
716 * Do bounds checking, adjust transfer. if error, process.
717 * If end of partition, just return.
718 */
719 if (SDPART(bp->b_dev) == RAW_PART) {
720 if (bounds_check_with_mediasize(bp, DEV_BSIZE,
721 sd->params.disksize512) <= 0)
722 goto done;
723 } else {
724 if (bounds_check_with_label(&sd->sc_dk, bp,
725 (sd->flags & (SDF_WLABEL|SDF_LABELLING)) != 0) <= 0)
726 goto done;
727 }
728
729 /*
730 * Now convert the block number to absolute and put it in
731 * terms of the device's logical block size.
732 */
733 if (lp->d_secsize == DEV_BSIZE)
734 blkno = bp->b_blkno;
735 else if (lp->d_secsize > DEV_BSIZE)
736 blkno = bp->b_blkno / (lp->d_secsize / DEV_BSIZE);
737 else
738 blkno = bp->b_blkno * (DEV_BSIZE / lp->d_secsize);
739
740 if (SDPART(bp->b_dev) != RAW_PART)
741 blkno += lp->d_partitions[SDPART(bp->b_dev)].p_offset;
742
743 bp->b_rawblkno = blkno;
744
745 s = splbio();
746
747 /*
748 * Place it in the queue of disk activities for this disk.
749 *
750 * XXX Only do disksort() if the current operating mode does not
751 * XXX include tagged queueing.
752 */
753 BUFQ_PUT(&sd->buf_queue, bp);
754
755 /*
756 * Tell the device to get going on the transfer if it's
757 * not doing anything, otherwise just wait for completion
758 */
759 sdstart(sd->sc_periph);
760
761 splx(s);
762 return;
763
764 bad:
765 bp->b_flags |= B_ERROR;
766 done:
767 /*
768 * Correctly set the buf to indicate a completed xfer
769 */
770 bp->b_resid = bp->b_bcount;
771 biodone(bp);
772 }
773
774 /*
775 * sdstart looks to see if there is a buf waiting for the device
776 * and that the device is not already busy. If both are true,
777 * It dequeues the buf and creates a scsi command to perform the
778 * transfer in the buf. The transfer request will call scsipi_done
779 * on completion, which will in turn call this routine again
780 * so that the next queued transfer is performed.
781 * The bufs are queued by the strategy routine (sdstrategy)
782 *
783 * This routine is also called after other non-queued requests
784 * have been made of the scsi driver, to ensure that the queue
785 * continues to be drained.
786 *
787 * must be called at the correct (highish) spl level
788 * sdstart() is called at splbio from sdstrategy, sdrestart and scsipi_done
789 */
790 static void
791 sdstart(struct scsipi_periph *periph)
792 {
793 struct sd_softc *sd = (void *)periph->periph_dev;
794 struct disklabel *lp = sd->sc_dk.dk_label;
795 struct buf *bp = 0;
796 struct scsipi_rw_big cmd_big;
797 struct scsi_rw cmd_small;
798 struct scsipi_generic *cmdp;
799 struct scsipi_xfer *xs;
800 int nblks, cmdlen, error, flags;
801
802 SC_DEBUG(periph, SCSIPI_DB2, ("sdstart "));
803 /*
804 * Check if the device has room for another command
805 */
806 while (periph->periph_active < periph->periph_openings) {
807 /*
808 * there is excess capacity, but a special waits
809 * It'll need the adapter as soon as we clear out of the
810 * way and let it run (user level wait).
811 */
812 if (periph->periph_flags & PERIPH_WAITING) {
813 periph->periph_flags &= ~PERIPH_WAITING;
814 wakeup((caddr_t)periph);
815 return;
816 }
817
818 /*
819 * If the device has become invalid, abort all the
820 * reads and writes until all files have been closed and
821 * re-opened
822 */
823 if (__predict_false(
824 (periph->periph_flags & PERIPH_MEDIA_LOADED) == 0)) {
825 if ((bp = BUFQ_GET(&sd->buf_queue)) != NULL) {
826 bp->b_error = EIO;
827 bp->b_flags |= B_ERROR;
828 bp->b_resid = bp->b_bcount;
829 biodone(bp);
830 continue;
831 } else {
832 return;
833 }
834 }
835
836 /*
837 * See if there is a buf with work for us to do..
838 */
839 if ((bp = BUFQ_PEEK(&sd->buf_queue)) == NULL)
840 return;
841
842 /*
843 * We have a buf, now we should make a command.
844 */
845
846 if (lp->d_secsize == DEV_BSIZE)
847 nblks = bp->b_bcount >> DEV_BSHIFT;
848 else
849 nblks = howmany(bp->b_bcount, lp->d_secsize);
850
851 /*
852 * Fill out the scsi command. If the transfer will
853 * fit in a "small" cdb, use it.
854 */
855 if (((bp->b_rawblkno & 0x1fffff) == bp->b_rawblkno) &&
856 ((nblks & 0xff) == nblks) &&
857 !(periph->periph_quirks & PQUIRK_ONLYBIG)) {
858 /*
859 * We can fit in a small cdb.
860 */
861 memset(&cmd_small, 0, sizeof(cmd_small));
862 cmd_small.opcode = (bp->b_flags & B_READ) ?
863 SCSI_READ_COMMAND : SCSI_WRITE_COMMAND;
864 _lto3b(bp->b_rawblkno, cmd_small.addr);
865 cmd_small.length = nblks & 0xff;
866 cmdlen = sizeof(cmd_small);
867 cmdp = (struct scsipi_generic *)&cmd_small;
868 } else {
869 /*
870 * Need a large cdb.
871 */
872 memset(&cmd_big, 0, sizeof(cmd_big));
873 cmd_big.opcode = (bp->b_flags & B_READ) ?
874 READ_BIG : WRITE_BIG;
875 _lto4b(bp->b_rawblkno, cmd_big.addr);
876 _lto2b(nblks, cmd_big.length);
877 cmdlen = sizeof(cmd_big);
878 cmdp = (struct scsipi_generic *)&cmd_big;
879 }
880
881 /* Instrumentation. */
882 disk_busy(&sd->sc_dk);
883
884 /*
885 * Mark the disk dirty so that the cache will be
886 * flushed on close.
887 */
888 if ((bp->b_flags & B_READ) == 0)
889 sd->flags |= SDF_DIRTY;
890
891 /*
892 * Figure out what flags to use.
893 */
894 flags = XS_CTL_NOSLEEP|XS_CTL_ASYNC|XS_CTL_SIMPLE_TAG;
895 if (bp->b_flags & B_READ)
896 flags |= XS_CTL_DATA_IN;
897 else
898 flags |= XS_CTL_DATA_OUT;
899
900 /*
901 * Call the routine that chats with the adapter.
902 * Note: we cannot sleep as we may be an interrupt
903 */
904 xs = scsipi_make_xs(periph, cmdp, cmdlen,
905 (u_char *)bp->b_data, bp->b_bcount,
906 SDRETRIES, SD_IO_TIMEOUT, bp, flags);
907 if (__predict_false(xs == NULL)) {
908 /*
909 * out of memory. Keep this buffer in the queue, and
910 * retry later.
911 */
912 callout_reset(&sd->sc_callout, hz / 2, sdrestart,
913 periph);
914 return;
915 }
916 /*
917 * need to dequeue the buffer before queuing the command,
918 * because cdstart may be called recursively from the
919 * HBA driver
920 */
921 #ifdef DIAGNOSTIC
922 if (BUFQ_GET(&sd->buf_queue) != bp)
923 panic("sdstart(): dequeued wrong buf");
924 #else
925 BUFQ_GET(&sd->buf_queue);
926 #endif
927 error = scsipi_command(periph, xs, cmdp, cmdlen,
928 (u_char *)bp->b_data, bp->b_bcount,
929 SDRETRIES, SD_IO_TIMEOUT, bp, flags);
930 /* with a scsipi_xfer preallocated, scsipi_command can't fail */
931 KASSERT(error == 0);
932 }
933 }
934
935 static void
936 sdrestart(void *v)
937 {
938 int s = splbio();
939 sdstart((struct scsipi_periph *)v);
940 splx(s);
941 }
942
943 static void
944 sddone(struct scsipi_xfer *xs)
945 {
946 struct sd_softc *sd = (void *)xs->xs_periph->periph_dev;
947
948 if (sd->flags & SDF_FLUSHING) {
949 /* Flush completed, no longer dirty. */
950 sd->flags &= ~(SDF_FLUSHING|SDF_DIRTY);
951 }
952
953 if (xs->bp != NULL) {
954 disk_unbusy(&sd->sc_dk, xs->bp->b_bcount - xs->bp->b_resid,
955 (xs->bp->b_flags & B_READ));
956 #if NRND > 0
957 rnd_add_uint32(&sd->rnd_source, xs->bp->b_rawblkno);
958 #endif
959 }
960 }
961
962 static void
963 sdminphys(struct buf *bp)
964 {
965 struct sd_softc *sd = sd_cd.cd_devs[SDUNIT(bp->b_dev)];
966 long max;
967
968 /*
969 * If the device is ancient, we want to make sure that
970 * the transfer fits into a 6-byte cdb.
971 *
972 * XXX Note that the SCSI-I spec says that 256-block transfers
973 * are allowed in a 6-byte read/write, and are specified
974 * by settng the "length" to 0. However, we're conservative
975 * here, allowing only 255-block transfers in case an
976 * ancient device gets confused by length == 0. A length of 0
977 * in a 10-byte read/write actually means 0 blocks.
978 */
979 if ((sd->flags & SDF_ANCIENT) &&
980 ((sd->sc_periph->periph_flags &
981 (PERIPH_REMOVABLE | PERIPH_MEDIA_LOADED)) != PERIPH_REMOVABLE)) {
982 max = sd->sc_dk.dk_label->d_secsize * 0xff;
983
984 if (bp->b_bcount > max)
985 bp->b_bcount = max;
986 }
987
988 scsipi_adapter_minphys(sd->sc_periph->periph_channel, bp);
989 }
990
991 static int
992 sdread(dev_t dev, struct uio *uio, int ioflag)
993 {
994
995 return (physio(sdstrategy, NULL, dev, B_READ, sdminphys, uio));
996 }
997
998 static int
999 sdwrite(dev_t dev, struct uio *uio, int ioflag)
1000 {
1001
1002 return (physio(sdstrategy, NULL, dev, B_WRITE, sdminphys, uio));
1003 }
1004
1005 /*
1006 * Perform special action on behalf of the user
1007 * Knows about the internals of this device
1008 */
1009 static int
1010 sdioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct proc *p)
1011 {
1012 struct sd_softc *sd = sd_cd.cd_devs[SDUNIT(dev)];
1013 struct scsipi_periph *periph = sd->sc_periph;
1014 int part = SDPART(dev);
1015 int error = 0;
1016 #ifdef __HAVE_OLD_DISKLABEL
1017 struct disklabel *newlabel = NULL;
1018 #endif
1019
1020 SC_DEBUG(sd->sc_periph, SCSIPI_DB2, ("sdioctl 0x%lx ", cmd));
1021
1022 /*
1023 * If the device is not valid, some IOCTLs can still be
1024 * handled on the raw partition. Check this here.
1025 */
1026 if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0) {
1027 switch (cmd) {
1028 case DIOCKLABEL:
1029 case DIOCWLABEL:
1030 case DIOCLOCK:
1031 case DIOCEJECT:
1032 case ODIOCEJECT:
1033 case DIOCGCACHE:
1034 case DIOCSCACHE:
1035 case SCIOCIDENTIFY:
1036 case OSCIOCIDENTIFY:
1037 case SCIOCCOMMAND:
1038 case SCIOCDEBUG:
1039 if (part == RAW_PART)
1040 break;
1041 /* FALLTHROUGH */
1042 default:
1043 if ((periph->periph_flags & PERIPH_OPEN) == 0)
1044 return (ENODEV);
1045 else
1046 return (EIO);
1047 }
1048 }
1049
1050 switch (cmd) {
1051 case DIOCGDINFO:
1052 *(struct disklabel *)addr = *(sd->sc_dk.dk_label);
1053 return (0);
1054
1055 #ifdef __HAVE_OLD_DISKLABEL
1056 case ODIOCGDINFO:
1057 newlabel = malloc(sizeof *newlabel, M_TEMP, M_WAITOK);
1058 if (newlabel == NULL)
1059 return EIO;
1060 memcpy(newlabel, sd->sc_dk.dk_label, sizeof (*newlabel));
1061 if (newlabel->d_npartitions <= OLDMAXPARTITIONS)
1062 memcpy(addr, newlabel, sizeof (struct olddisklabel));
1063 else
1064 error = ENOTTY;
1065 free(newlabel, M_TEMP);
1066 return error;
1067 #endif
1068
1069 case DIOCGPART:
1070 ((struct partinfo *)addr)->disklab = sd->sc_dk.dk_label;
1071 ((struct partinfo *)addr)->part =
1072 &sd->sc_dk.dk_label->d_partitions[part];
1073 return (0);
1074
1075 case DIOCWDINFO:
1076 case DIOCSDINFO:
1077 #ifdef __HAVE_OLD_DISKLABEL
1078 case ODIOCWDINFO:
1079 case ODIOCSDINFO:
1080 #endif
1081 {
1082 struct disklabel *lp;
1083
1084 if ((flag & FWRITE) == 0)
1085 return (EBADF);
1086
1087 #ifdef __HAVE_OLD_DISKLABEL
1088 if (cmd == ODIOCSDINFO || cmd == ODIOCWDINFO) {
1089 newlabel = malloc(sizeof *newlabel, M_TEMP, M_WAITOK);
1090 if (newlabel == NULL)
1091 return EIO;
1092 memset(newlabel, 0, sizeof newlabel);
1093 memcpy(newlabel, addr, sizeof (struct olddisklabel));
1094 lp = newlabel;
1095 } else
1096 #endif
1097 lp = (struct disklabel *)addr;
1098
1099 if ((error = sdlock(sd)) != 0)
1100 goto bad;
1101 sd->flags |= SDF_LABELLING;
1102
1103 error = setdisklabel(sd->sc_dk.dk_label,
1104 lp, /*sd->sc_dk.dk_openmask : */0,
1105 sd->sc_dk.dk_cpulabel);
1106 if (error == 0) {
1107 if (cmd == DIOCWDINFO
1108 #ifdef __HAVE_OLD_DISKLABEL
1109 || cmd == ODIOCWDINFO
1110 #endif
1111 )
1112 error = writedisklabel(SDLABELDEV(dev),
1113 sdstrategy, sd->sc_dk.dk_label,
1114 sd->sc_dk.dk_cpulabel);
1115 }
1116
1117 sd->flags &= ~SDF_LABELLING;
1118 sdunlock(sd);
1119 bad:
1120 #ifdef __HAVE_OLD_DISKLABEL
1121 if (newlabel != NULL)
1122 free(newlabel, M_TEMP);
1123 #endif
1124 return (error);
1125 }
1126
1127 case DIOCKLABEL:
1128 if (*(int *)addr)
1129 periph->periph_flags |= PERIPH_KEEP_LABEL;
1130 else
1131 periph->periph_flags &= ~PERIPH_KEEP_LABEL;
1132 return (0);
1133
1134 case DIOCWLABEL:
1135 if ((flag & FWRITE) == 0)
1136 return (EBADF);
1137 if (*(int *)addr)
1138 sd->flags |= SDF_WLABEL;
1139 else
1140 sd->flags &= ~SDF_WLABEL;
1141 return (0);
1142
1143 case DIOCLOCK:
1144 return (scsipi_prevent(periph,
1145 (*(int *)addr) ? PR_PREVENT : PR_ALLOW, 0));
1146
1147 case DIOCEJECT:
1148 if ((periph->periph_flags & PERIPH_REMOVABLE) == 0)
1149 return (ENOTTY);
1150 if (*(int *)addr == 0) {
1151 /*
1152 * Don't force eject: check that we are the only
1153 * partition open. If so, unlock it.
1154 */
1155 if ((sd->sc_dk.dk_openmask & ~(1 << part)) == 0 &&
1156 sd->sc_dk.dk_bopenmask + sd->sc_dk.dk_copenmask ==
1157 sd->sc_dk.dk_openmask) {
1158 error = scsipi_prevent(periph, PR_ALLOW,
1159 XS_CTL_IGNORE_NOT_READY);
1160 if (error)
1161 return (error);
1162 } else {
1163 return (EBUSY);
1164 }
1165 }
1166 /* FALLTHROUGH */
1167 case ODIOCEJECT:
1168 return ((periph->periph_flags & PERIPH_REMOVABLE) == 0 ?
1169 ENOTTY : scsipi_start(periph, SSS_STOP|SSS_LOEJ, 0));
1170
1171 case DIOCGDEFLABEL:
1172 sdgetdefaultlabel(sd, (struct disklabel *)addr);
1173 return (0);
1174
1175 #ifdef __HAVE_OLD_DISKLABEL
1176 case ODIOCGDEFLABEL:
1177 newlabel = malloc(sizeof *newlabel, M_TEMP, M_WAITOK);
1178 if (newlabel == NULL)
1179 return EIO;
1180 sdgetdefaultlabel(sd, newlabel);
1181 if (newlabel->d_npartitions <= OLDMAXPARTITIONS)
1182 memcpy(addr, newlabel, sizeof (struct olddisklabel));
1183 else
1184 error = ENOTTY;
1185 free(newlabel, M_TEMP);
1186 return error;
1187 #endif
1188
1189 case DIOCGCACHE:
1190 return (sd_getcache(sd, (int *) addr));
1191
1192 case DIOCSCACHE:
1193 if ((flag & FWRITE) == 0)
1194 return (EBADF);
1195 return (sd_setcache(sd, *(int *) addr));
1196
1197 case DIOCCACHESYNC:
1198 /*
1199 * XXX Do we really need to care about having a writable
1200 * file descriptor here?
1201 */
1202 if ((flag & FWRITE) == 0)
1203 return (EBADF);
1204 if (((sd->flags & SDF_DIRTY) != 0 || *(int *)addr != 0)) {
1205 error = sd_flush(sd, 0);
1206 if (error)
1207 sd->flags &= ~SDF_FLUSHING;
1208 else
1209 sd->flags &= ~(SDF_FLUSHING|SDF_DIRTY);
1210 } else
1211 error = 0;
1212 return (error);
1213
1214 default:
1215 if (part != RAW_PART)
1216 return (ENOTTY);
1217 return (scsipi_do_ioctl(periph, dev, cmd, addr, flag, p));
1218 }
1219
1220 #ifdef DIAGNOSTIC
1221 panic("sdioctl: impossible");
1222 #endif
1223 }
1224
1225 static void
1226 sdgetdefaultlabel(struct sd_softc *sd, struct disklabel *lp)
1227 {
1228
1229 memset(lp, 0, sizeof(struct disklabel));
1230
1231 lp->d_secsize = sd->params.blksize;
1232 lp->d_ntracks = sd->params.heads;
1233 lp->d_nsectors = sd->params.sectors;
1234 lp->d_ncylinders = sd->params.cyls;
1235 lp->d_secpercyl = lp->d_ntracks * lp->d_nsectors;
1236
1237 switch (scsipi_periph_bustype(sd->sc_periph)) {
1238 case SCSIPI_BUSTYPE_SCSI:
1239 lp->d_type = DTYPE_SCSI;
1240 break;
1241 case SCSIPI_BUSTYPE_ATAPI:
1242 lp->d_type = DTYPE_ATAPI;
1243 break;
1244 }
1245 /*
1246 * XXX
1247 * We could probe the mode pages to figure out what kind of disc it is.
1248 * Is this worthwhile?
1249 */
1250 strncpy(lp->d_typename, "mydisk", 16);
1251 strncpy(lp->d_packname, "fictitious", 16);
1252 lp->d_secperunit = sd->params.disksize;
1253 lp->d_rpm = sd->params.rot_rate;
1254 lp->d_interleave = 1;
1255 lp->d_flags = sd->sc_periph->periph_flags & PERIPH_REMOVABLE ?
1256 D_REMOVABLE : 0;
1257
1258 lp->d_partitions[RAW_PART].p_offset = 0;
1259 lp->d_partitions[RAW_PART].p_size =
1260 lp->d_secperunit * (lp->d_secsize / DEV_BSIZE);
1261 lp->d_partitions[RAW_PART].p_fstype = FS_UNUSED;
1262 lp->d_npartitions = RAW_PART + 1;
1263
1264 lp->d_magic = DISKMAGIC;
1265 lp->d_magic2 = DISKMAGIC;
1266 lp->d_checksum = dkcksum(lp);
1267 }
1268
1269
1270 /*
1271 * Load the label information on the named device
1272 */
1273 static void
1274 sdgetdisklabel(struct sd_softc *sd)
1275 {
1276 struct disklabel *lp = sd->sc_dk.dk_label;
1277 const char *errstring;
1278
1279 memset(sd->sc_dk.dk_cpulabel, 0, sizeof(struct cpu_disklabel));
1280
1281 sdgetdefaultlabel(sd, lp);
1282
1283 if (lp->d_secpercyl == 0) {
1284 lp->d_secpercyl = 100;
1285 /* as long as it's not 0 - readdisklabel divides by it (?) */
1286 }
1287
1288 /*
1289 * Call the generic disklabel extraction routine
1290 */
1291 errstring = readdisklabel(MAKESDDEV(0, sd->sc_dev.dv_unit, RAW_PART),
1292 sdstrategy, lp, sd->sc_dk.dk_cpulabel);
1293 if (errstring) {
1294 printf("%s: %s\n", sd->sc_dev.dv_xname, errstring);
1295 return;
1296 }
1297 }
1298
1299 static void
1300 sd_shutdown(void *arg)
1301 {
1302 struct sd_softc *sd = arg;
1303
1304 /*
1305 * If the disk cache needs to be flushed, and the disk supports
1306 * it, flush it. We're cold at this point, so we poll for
1307 * completion.
1308 */
1309 if ((sd->flags & SDF_DIRTY) != 0) {
1310 if (sd_flush(sd, XS_CTL_NOSLEEP|XS_CTL_POLL)) {
1311 printf("%s: cache synchronization failed\n",
1312 sd->sc_dev.dv_xname);
1313 sd->flags &= ~SDF_FLUSHING;
1314 } else
1315 sd->flags &= ~(SDF_FLUSHING|SDF_DIRTY);
1316 }
1317 }
1318
1319 /*
1320 * Check Errors
1321 */
1322 static int
1323 sd_interpret_sense(struct scsipi_xfer *xs)
1324 {
1325 struct scsipi_periph *periph = xs->xs_periph;
1326 struct scsipi_sense_data *sense = &xs->sense.scsi_sense;
1327 struct sd_softc *sd = (void *)periph->periph_dev;
1328 int s, error, retval = EJUSTRETURN;
1329
1330 /*
1331 * If the periph is already recovering, just do the normal
1332 * error processing.
1333 */
1334 if (periph->periph_flags & PERIPH_RECOVERING)
1335 return (retval);
1336
1337 /*
1338 * If the device is not open yet, let the generic code handle it.
1339 */
1340 if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0)
1341 return (retval);
1342
1343 /*
1344 * If it isn't a extended or extended/deferred error, let
1345 * the generic code handle it.
1346 */
1347 if ((sense->error_code & SSD_ERRCODE) != 0x70 &&
1348 (sense->error_code & SSD_ERRCODE) != 0x71)
1349 return (retval);
1350
1351 if ((sense->flags & SSD_KEY) == SKEY_NOT_READY &&
1352 sense->add_sense_code == 0x4) {
1353 if (sense->add_sense_code_qual == 0x01) {
1354 /*
1355 * Unit In The Process Of Becoming Ready.
1356 */
1357 printf("%s: waiting for pack to spin up...\n",
1358 sd->sc_dev.dv_xname);
1359 if (!callout_pending(&periph->periph_callout))
1360 scsipi_periph_freeze(periph, 1);
1361 callout_reset(&periph->periph_callout,
1362 5 * hz, scsipi_periph_timed_thaw, periph);
1363 retval = ERESTART;
1364 } else if (sense->add_sense_code_qual == 0x02) {
1365 printf("%s: pack is stopped, restarting...\n",
1366 sd->sc_dev.dv_xname);
1367 s = splbio();
1368 periph->periph_flags |= PERIPH_RECOVERING;
1369 splx(s);
1370 error = scsipi_start(periph, SSS_START,
1371 XS_CTL_URGENT|XS_CTL_HEAD_TAG|
1372 XS_CTL_THAW_PERIPH|XS_CTL_FREEZE_PERIPH);
1373 if (error) {
1374 printf("%s: unable to restart pack\n",
1375 sd->sc_dev.dv_xname);
1376 retval = error;
1377 } else
1378 retval = ERESTART;
1379 s = splbio();
1380 periph->periph_flags &= ~PERIPH_RECOVERING;
1381 splx(s);
1382 }
1383 }
1384 if ((sense->flags & SSD_KEY) == SKEY_MEDIUM_ERROR &&
1385 sense->add_sense_code == 0x31 &&
1386 sense->add_sense_code_qual == 0x00) { /* maybe for any asq ? */
1387 /* Medium Format Corrupted */
1388 retval = EFTYPE;
1389 }
1390 return (retval);
1391 }
1392
1393
1394 static int
1395 sdsize(dev_t dev)
1396 {
1397 struct sd_softc *sd;
1398 int part, unit, omask;
1399 int size;
1400
1401 unit = SDUNIT(dev);
1402 if (unit >= sd_cd.cd_ndevs)
1403 return (-1);
1404 sd = sd_cd.cd_devs[unit];
1405 if (sd == NULL)
1406 return (-1);
1407
1408 if ((sd->sc_dev.dv_flags & DVF_ACTIVE) == 0)
1409 return (-1);
1410
1411 part = SDPART(dev);
1412 omask = sd->sc_dk.dk_openmask & (1 << part);
1413
1414 if (omask == 0 && sdopen(dev, 0, S_IFBLK, NULL) != 0)
1415 return (-1);
1416 if ((sd->sc_periph->periph_flags & PERIPH_MEDIA_LOADED) == 0)
1417 size = -1;
1418 else if (sd->sc_dk.dk_label->d_partitions[part].p_fstype != FS_SWAP)
1419 size = -1;
1420 else
1421 size = sd->sc_dk.dk_label->d_partitions[part].p_size *
1422 (sd->sc_dk.dk_label->d_secsize / DEV_BSIZE);
1423 if (omask == 0 && sdclose(dev, 0, S_IFBLK, NULL) != 0)
1424 return (-1);
1425 return (size);
1426 }
1427
1428 /* #define SD_DUMP_NOT_TRUSTED if you just want to watch */
1429 static struct scsipi_xfer sx;
1430 static int sddoingadump;
1431
1432 /*
1433 * dump all of physical memory into the partition specified, starting
1434 * at offset 'dumplo' into the partition.
1435 */
1436 static int
1437 sddump(dev_t dev, daddr_t blkno, caddr_t va, size_t size)
1438 {
1439 struct sd_softc *sd; /* disk unit to do the I/O */
1440 struct disklabel *lp; /* disk's disklabel */
1441 int unit, part;
1442 int sectorsize; /* size of a disk sector */
1443 int nsects; /* number of sectors in partition */
1444 int sectoff; /* sector offset of partition */
1445 int totwrt; /* total number of sectors left to write */
1446 int nwrt; /* current number of sectors to write */
1447 struct scsipi_rw_big cmd; /* write command */
1448 struct scsipi_xfer *xs; /* ... convenience */
1449 struct scsipi_periph *periph;
1450 struct scsipi_channel *chan;
1451
1452 /* Check if recursive dump; if so, punt. */
1453 if (sddoingadump)
1454 return (EFAULT);
1455
1456 /* Mark as active early. */
1457 sddoingadump = 1;
1458
1459 unit = SDUNIT(dev); /* Decompose unit & partition. */
1460 part = SDPART(dev);
1461
1462 /* Check for acceptable drive number. */
1463 if (unit >= sd_cd.cd_ndevs || (sd = sd_cd.cd_devs[unit]) == NULL)
1464 return (ENXIO);
1465
1466 if ((sd->sc_dev.dv_flags & DVF_ACTIVE) == 0)
1467 return (ENODEV);
1468
1469 periph = sd->sc_periph;
1470 chan = periph->periph_channel;
1471
1472 /* Make sure it was initialized. */
1473 if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0)
1474 return (ENXIO);
1475
1476 /* Convert to disk sectors. Request must be a multiple of size. */
1477 lp = sd->sc_dk.dk_label;
1478 sectorsize = lp->d_secsize;
1479 if ((size % sectorsize) != 0)
1480 return (EFAULT);
1481 totwrt = size / sectorsize;
1482 blkno = dbtob(blkno) / sectorsize; /* blkno in DEV_BSIZE units */
1483
1484 nsects = lp->d_partitions[part].p_size;
1485 sectoff = lp->d_partitions[part].p_offset;
1486
1487 /* Check transfer bounds against partition size. */
1488 if ((blkno < 0) || ((blkno + totwrt) > nsects))
1489 return (EINVAL);
1490
1491 /* Offset block number to start of partition. */
1492 blkno += sectoff;
1493
1494 xs = &sx;
1495
1496 while (totwrt > 0) {
1497 nwrt = totwrt; /* XXX */
1498 #ifndef SD_DUMP_NOT_TRUSTED
1499 /*
1500 * Fill out the scsi command
1501 */
1502 memset(&cmd, 0, sizeof(cmd));
1503 cmd.opcode = WRITE_BIG;
1504 _lto4b(blkno, cmd.addr);
1505 _lto2b(nwrt, cmd.length);
1506 /*
1507 * Fill out the scsipi_xfer structure
1508 * Note: we cannot sleep as we may be an interrupt
1509 * don't use scsipi_command() as it may want to wait
1510 * for an xs.
1511 */
1512 memset(xs, 0, sizeof(sx));
1513 xs->xs_control |= XS_CTL_NOSLEEP | XS_CTL_POLL |
1514 XS_CTL_DATA_OUT;
1515 xs->xs_status = 0;
1516 xs->xs_periph = periph;
1517 xs->xs_retries = SDRETRIES;
1518 xs->timeout = 10000; /* 10000 millisecs for a disk ! */
1519 xs->cmd = (struct scsipi_generic *)&cmd;
1520 xs->cmdlen = sizeof(cmd);
1521 xs->resid = nwrt * sectorsize;
1522 xs->error = XS_NOERROR;
1523 xs->bp = 0;
1524 xs->data = va;
1525 xs->datalen = nwrt * sectorsize;
1526
1527 /*
1528 * Pass all this info to the scsi driver.
1529 */
1530 scsipi_adapter_request(chan, ADAPTER_REQ_RUN_XFER, xs);
1531 if ((xs->xs_status & XS_STS_DONE) == 0 ||
1532 xs->error != XS_NOERROR)
1533 return (EIO);
1534 #else /* SD_DUMP_NOT_TRUSTED */
1535 /* Let's just talk about this first... */
1536 printf("sd%d: dump addr 0x%x, blk %d\n", unit, va, blkno);
1537 delay(500 * 1000); /* half a second */
1538 #endif /* SD_DUMP_NOT_TRUSTED */
1539
1540 /* update block count */
1541 totwrt -= nwrt;
1542 blkno += nwrt;
1543 va += sectorsize * nwrt;
1544 }
1545 sddoingadump = 0;
1546 return (0);
1547 }
1548
1549 static int
1550 sd_mode_sense(struct sd_softc *sd, u_int8_t byte2, void *sense, size_t size,
1551 int page, int flags, int *big)
1552 {
1553
1554 if ((sd->sc_periph->periph_quirks & PQUIRK_ONLYBIG) &&
1555 !(sd->sc_periph->periph_quirks & PQUIRK_NOBIGMODESENSE)) {
1556 *big = 1;
1557 return scsipi_mode_sense_big(sd->sc_periph, byte2, page, sense,
1558 size + sizeof(struct scsipi_mode_header_big),
1559 flags | XS_CTL_DATA_ONSTACK, SDRETRIES, 6000);
1560 } else {
1561 *big = 0;
1562 return scsipi_mode_sense(sd->sc_periph, byte2, page, sense,
1563 size + sizeof(struct scsipi_mode_header),
1564 flags | XS_CTL_DATA_ONSTACK, SDRETRIES, 6000);
1565 }
1566 }
1567
1568 static int
1569 sd_mode_select(struct sd_softc *sd, u_int8_t byte2, void *sense, size_t size,
1570 int flags, int big)
1571 {
1572
1573 if (big) {
1574 struct scsipi_mode_header_big *header = sense;
1575
1576 _lto2b(0, header->data_length);
1577 return scsipi_mode_select_big(sd->sc_periph, byte2, sense,
1578 size + sizeof(struct scsipi_mode_header_big),
1579 flags | XS_CTL_DATA_ONSTACK, SDRETRIES, 6000);
1580 } else {
1581 struct scsipi_mode_header *header = sense;
1582
1583 header->data_length = 0;
1584 return scsipi_mode_select(sd->sc_periph, byte2, sense,
1585 size + sizeof(struct scsipi_mode_header),
1586 flags | XS_CTL_DATA_ONSTACK, SDRETRIES, 6000);
1587 }
1588 }
1589
1590 static int
1591 sd_get_simplifiedparms(struct sd_softc *sd, struct disk_parms *dp, int flags)
1592 {
1593 struct {
1594 struct scsipi_mode_header header;
1595 /* no block descriptor */
1596 u_int8_t pg_code; /* page code (should be 6) */
1597 u_int8_t pg_length; /* page length (should be 11) */
1598 u_int8_t wcd; /* bit0: cache disable */
1599 u_int8_t lbs[2]; /* logical block size */
1600 u_int8_t size[5]; /* number of log. blocks */
1601 u_int8_t pp; /* power/performance */
1602 u_int8_t flags;
1603 u_int8_t resvd;
1604 } scsipi_sense;
1605 u_int64_t sectors;
1606 int error;
1607
1608 /*
1609 * scsipi_size (ie "read capacity") and mode sense page 6
1610 * give the same information. Do both for now, and check
1611 * for consistency.
1612 * XXX probably differs for removable media
1613 */
1614 dp->blksize = 512;
1615 if ((sectors = scsipi_size(sd->sc_periph, flags)) == 0)
1616 return (SDGP_RESULT_OFFLINE); /* XXX? */
1617
1618 error = scsipi_mode_sense(sd->sc_periph, SMS_DBD, 6,
1619 &scsipi_sense.header, sizeof(scsipi_sense),
1620 flags | XS_CTL_DATA_ONSTACK, SDRETRIES, 6000);
1621
1622 if (error != 0)
1623 return (SDGP_RESULT_OFFLINE); /* XXX? */
1624
1625 dp->blksize = _2btol(scsipi_sense.lbs);
1626 if (dp->blksize == 0)
1627 dp->blksize = 512;
1628
1629 /*
1630 * Create a pseudo-geometry.
1631 */
1632 dp->heads = 64;
1633 dp->sectors = 32;
1634 dp->cyls = sectors / (dp->heads * dp->sectors);
1635 dp->disksize = _5btol(scsipi_sense.size);
1636 if (dp->disksize <= UINT32_MAX && dp->disksize != sectors) {
1637 printf("RBC size: mode sense=%llu, get cap=%llu\n",
1638 (unsigned long long)dp->disksize,
1639 (unsigned long long)sectors);
1640 dp->disksize = sectors;
1641 }
1642 dp->disksize512 = (dp->disksize * dp->blksize) / DEV_BSIZE;
1643
1644 return (SDGP_RESULT_OK);
1645 }
1646
1647 /*
1648 * Get the scsi driver to send a full inquiry to the * device and use the
1649 * results to fill out the disk parameter structure.
1650 */
1651 static int
1652 sd_get_capacity(struct sd_softc *sd, struct disk_parms *dp, int flags)
1653 {
1654 u_int64_t sectors;
1655 int error;
1656 #if 0
1657 int i;
1658 u_int8_t *p;
1659 #endif
1660
1661 dp->disksize = sectors = scsipi_size(sd->sc_periph, flags);
1662 if (sectors == 0) {
1663 struct scsipi_read_format_capacities cmd;
1664 struct {
1665 struct scsipi_capacity_list_header header;
1666 struct scsipi_capacity_descriptor desc;
1667 } __attribute__((packed)) data;
1668
1669 memset(&cmd, 0, sizeof(cmd));
1670 memset(&data, 0, sizeof(data));
1671 cmd.opcode = READ_FORMAT_CAPACITIES;
1672 _lto2b(sizeof(data), cmd.length);
1673
1674 error = scsipi_command(sd->sc_periph, NULL,
1675 (void *)&cmd, sizeof(cmd), (void *)&data, sizeof(data),
1676 SDRETRIES, 20000, NULL,
1677 flags | XS_CTL_DATA_IN | XS_CTL_DATA_ONSTACK);
1678 if (error == EFTYPE) {
1679 /* Medium Format Corrupted, handle as not formatted */
1680 return (SDGP_RESULT_UNFORMATTED);
1681 }
1682 if (error || data.header.length == 0)
1683 return (SDGP_RESULT_OFFLINE);
1684
1685 #if 0
1686 printf("rfc: length=%d\n", data.header.length);
1687 printf("rfc result:"); for (i = sizeof(struct scsipi_capacity_list_header) + data.header.length, p = (void *)&data; i; i--, p++) printf(" %02x", *p); printf("\n");
1688 #endif
1689 switch (data.desc.byte5 & SCSIPI_CAP_DESC_CODE_MASK) {
1690 case SCSIPI_CAP_DESC_CODE_RESERVED:
1691 case SCSIPI_CAP_DESC_CODE_FORMATTED:
1692 break;
1693
1694 case SCSIPI_CAP_DESC_CODE_UNFORMATTED:
1695 return (SDGP_RESULT_UNFORMATTED);
1696
1697 case SCSIPI_CAP_DESC_CODE_NONE:
1698 return (SDGP_RESULT_OFFLINE);
1699 }
1700
1701 dp->disksize = sectors = _4btol(data.desc.nblks);
1702 if (sectors == 0)
1703 return (SDGP_RESULT_OFFLINE); /* XXX? */
1704
1705 dp->blksize = _3btol(data.desc.blklen);
1706 if (dp->blksize == 0)
1707 dp->blksize = 512;
1708 } else {
1709 struct sd_mode_sense_data scsipi_sense;
1710 int big, bsize;
1711 struct scsi_blk_desc *bdesc;
1712
1713 memset(&scsipi_sense, 0, sizeof(scsipi_sense));
1714 error = sd_mode_sense(sd, 0, &scsipi_sense,
1715 sizeof(scsipi_sense.blk_desc), 0, flags | XS_CTL_SILENT, &big);
1716 dp->blksize = 512;
1717 if (!error) {
1718 if (big) {
1719 bdesc = (void *)(&scsipi_sense.header.big + 1);
1720 bsize = _2btol(scsipi_sense.header.big.blk_desc_len);
1721 } else {
1722 bdesc = (void *)(&scsipi_sense.header.small + 1);
1723 bsize = scsipi_sense.header.small.blk_desc_len;
1724 }
1725
1726 #if 0
1727 printf("page 0 sense:"); for (i = sizeof(scsipi_sense), p = (void *)&scsipi_sense; i; i--, p++) printf(" %02x", *p); printf("\n");
1728 printf("page 0 bsize=%d\n", bsize);
1729 printf("page 0 ok\n");
1730 #endif
1731
1732 if (bsize >= 8) {
1733 dp->blksize = _3btol(bdesc->blklen);
1734 if (dp->blksize == 0)
1735 dp->blksize = 512;
1736 }
1737 }
1738 }
1739
1740 dp->disksize512 = (sectors * dp->blksize) / DEV_BSIZE;
1741 return (0);
1742 }
1743
1744 static int
1745 sd_get_parms_page4(struct sd_softc *sd, struct disk_parms *dp, int flags)
1746 {
1747 struct sd_mode_sense_data scsipi_sense;
1748 int error;
1749 int big, poffset, byte2;
1750 union scsi_disk_pages *pages;
1751 #if 0
1752 int i;
1753 u_int8_t *p;
1754 #endif
1755
1756 byte2 = SMS_DBD;
1757 again:
1758 memset(&scsipi_sense, 0, sizeof(scsipi_sense));
1759 error = sd_mode_sense(sd, byte2, &scsipi_sense,
1760 (byte2 ? 0 : sizeof(scsipi_sense.blk_desc)) +
1761 sizeof(scsipi_sense.pages.rigid_geometry), 4,
1762 flags | XS_CTL_SILENT, &big);
1763 if (error) {
1764 if (byte2 == SMS_DBD) {
1765 /* No result; try once more with DBD off */
1766 byte2 = 0;
1767 goto again;
1768 }
1769 return (error);
1770 }
1771
1772 if (big) {
1773 poffset = sizeof scsipi_sense.header.big;
1774 poffset += _2btol(scsipi_sense.header.big.blk_desc_len);
1775 } else {
1776 poffset = sizeof scsipi_sense.header.small;
1777 poffset += scsipi_sense.header.small.blk_desc_len;
1778 }
1779
1780 pages = (void *)((u_long)&scsipi_sense + poffset);
1781 #if 0
1782 printf("page 4 sense:"); for (i = sizeof(scsipi_sense), p = (void *)&scsipi_sense; i; i--, p++) printf(" %02x", *p); printf("\n");
1783 printf("page 4 pg_code=%d sense=%p/%p\n", pages->rigid_geometry.pg_code, &scsipi_sense, pages);
1784 #endif
1785
1786 if ((pages->rigid_geometry.pg_code & PGCODE_MASK) != 4)
1787 return (ERESTART);
1788
1789 SC_DEBUG(sd->sc_periph, SCSIPI_DB3,
1790 ("%d cyls, %d heads, %d precomp, %d red_write, %d land_zone\n",
1791 _3btol(pages->rigid_geometry.ncyl),
1792 pages->rigid_geometry.nheads,
1793 _2btol(pages->rigid_geometry.st_cyl_wp),
1794 _2btol(pages->rigid_geometry.st_cyl_rwc),
1795 _2btol(pages->rigid_geometry.land_zone)));
1796
1797 /*
1798 * KLUDGE!! (for zone recorded disks)
1799 * give a number of sectors so that sec * trks * cyls
1800 * is <= disk_size
1801 * can lead to wasted space! THINK ABOUT THIS !
1802 */
1803 dp->heads = pages->rigid_geometry.nheads;
1804 dp->cyls = _3btol(pages->rigid_geometry.ncyl);
1805 if (dp->heads == 0 || dp->cyls == 0)
1806 return (ERESTART);
1807 dp->sectors = dp->disksize / (dp->heads * dp->cyls); /* XXX */
1808
1809 dp->rot_rate = _2btol(pages->rigid_geometry.rpm);
1810 if (dp->rot_rate == 0)
1811 dp->rot_rate = 3600;
1812
1813 #if 0
1814 printf("page 4 ok\n");
1815 #endif
1816 return (0);
1817 }
1818
1819 static int
1820 sd_get_parms_page5(struct sd_softc *sd, struct disk_parms *dp, int flags)
1821 {
1822 struct sd_mode_sense_data scsipi_sense;
1823 int error;
1824 int big, poffset, byte2;
1825 union scsi_disk_pages *pages;
1826 #if 0
1827 int i;
1828 u_int8_t *p;
1829 #endif
1830
1831 byte2 = SMS_DBD;
1832 again:
1833 memset(&scsipi_sense, 0, sizeof(scsipi_sense));
1834 error = sd_mode_sense(sd, 0, &scsipi_sense,
1835 (byte2 ? 0 : sizeof(scsipi_sense.blk_desc)) +
1836 sizeof(scsipi_sense.pages.flex_geometry), 5,
1837 flags | XS_CTL_SILENT, &big);
1838 if (error) {
1839 if (byte2 == SMS_DBD) {
1840 /* No result; try once more with DBD off */
1841 byte2 = 0;
1842 goto again;
1843 }
1844 return (error);
1845 }
1846
1847 if (big) {
1848 poffset = sizeof scsipi_sense.header.big;
1849 poffset += _2btol(scsipi_sense.header.big.blk_desc_len);
1850 } else {
1851 poffset = sizeof scsipi_sense.header.small;
1852 poffset += scsipi_sense.header.small.blk_desc_len;
1853 }
1854
1855 pages = (void *)((u_long)&scsipi_sense + poffset);
1856 #if 0
1857 printf("page 5 sense:"); for (i = sizeof(scsipi_sense), p = (void *)&scsipi_sense; i; i--, p++) printf(" %02x", *p); printf("\n");
1858 printf("page 5 pg_code=%d sense=%p/%p\n", pages->flex_geometry.pg_code, &scsipi_sense, pages);
1859 #endif
1860
1861 if ((pages->flex_geometry.pg_code & PGCODE_MASK) != 5)
1862 return (ERESTART);
1863
1864 SC_DEBUG(sd->sc_periph, SCSIPI_DB3,
1865 ("%d cyls, %d heads, %d sec, %d bytes/sec\n",
1866 _3btol(pages->flex_geometry.ncyl),
1867 pages->flex_geometry.nheads,
1868 pages->flex_geometry.ph_sec_tr,
1869 _2btol(pages->flex_geometry.bytes_s)));
1870
1871 dp->heads = pages->flex_geometry.nheads;
1872 dp->cyls = _2btol(pages->flex_geometry.ncyl);
1873 dp->sectors = pages->flex_geometry.ph_sec_tr;
1874 if (dp->heads == 0 || dp->cyls == 0 || dp->sectors == 0)
1875 return (ERESTART);
1876
1877 dp->rot_rate = _2btol(pages->rigid_geometry.rpm);
1878 if (dp->rot_rate == 0)
1879 dp->rot_rate = 3600;
1880
1881 #if 0
1882 printf("page 5 ok\n");
1883 #endif
1884 return (0);
1885 }
1886
1887 static int
1888 sd_get_parms(struct sd_softc *sd, struct disk_parms *dp, int flags)
1889 {
1890 int error;
1891
1892 /*
1893 * If offline, the SDEV_MEDIA_LOADED flag will be
1894 * cleared by the caller if necessary.
1895 */
1896 if (sd->type == T_SIMPLE_DIRECT)
1897 return (sd_get_simplifiedparms(sd, dp, flags));
1898
1899 error = sd_get_capacity(sd, dp, flags);
1900 if (error)
1901 return (error);
1902
1903 if (sd->type == T_OPTICAL)
1904 goto page0;
1905
1906 if (sd->sc_periph->periph_flags & PERIPH_REMOVABLE) {
1907 if (!sd_get_parms_page5(sd, dp, flags) ||
1908 !sd_get_parms_page4(sd, dp, flags))
1909 return (SDGP_RESULT_OK);
1910 } else {
1911 if (!sd_get_parms_page4(sd, dp, flags) ||
1912 !sd_get_parms_page5(sd, dp, flags))
1913 return (SDGP_RESULT_OK);
1914 }
1915
1916 page0:
1917 printf("%s: fabricating a geometry\n", sd->sc_dev.dv_xname);
1918 /* Try calling driver's method for figuring out geometry. */
1919 if (!sd->sc_periph->periph_channel->chan_adapter->adapt_getgeom ||
1920 !(*sd->sc_periph->periph_channel->chan_adapter->adapt_getgeom)
1921 (sd->sc_periph, dp, dp->disksize)) {
1922 /*
1923 * Use adaptec standard fictitious geometry
1924 * this depends on which controller (e.g. 1542C is
1925 * different. but we have to put SOMETHING here..)
1926 */
1927 dp->heads = 64;
1928 dp->sectors = 32;
1929 dp->cyls = dp->disksize / (64 * 32);
1930 }
1931 dp->rot_rate = 3600;
1932 return (SDGP_RESULT_OK);
1933 }
1934
1935 static int
1936 sd_flush(struct sd_softc *sd, int flags)
1937 {
1938 struct scsipi_periph *periph = sd->sc_periph;
1939 struct scsi_synchronize_cache cmd;
1940
1941 /*
1942 * If the device is SCSI-2, issue a SYNCHRONIZE CACHE.
1943 * We issue with address 0 length 0, which should be
1944 * interpreted by the device as "all remaining blocks
1945 * starting at address 0". We ignore ILLEGAL REQUEST
1946 * in the event that the command is not supported by
1947 * the device, and poll for completion so that we know
1948 * that the cache has actually been flushed.
1949 *
1950 * Unless, that is, the device can't handle the SYNCHRONIZE CACHE
1951 * command, as indicated by our quirks flags.
1952 *
1953 * XXX What about older devices?
1954 */
1955 if (periph->periph_version < 2 ||
1956 (periph->periph_quirks & PQUIRK_NOSYNCCACHE))
1957 return (0);
1958
1959 sd->flags |= SDF_FLUSHING;
1960 memset(&cmd, 0, sizeof(cmd));
1961 cmd.opcode = SCSI_SYNCHRONIZE_CACHE;
1962
1963 return (scsipi_command(periph, NULL, (void *)&cmd, sizeof(cmd), 0, 0,
1964 SDRETRIES, 100000, NULL, flags | XS_CTL_IGNORE_ILLEGAL_REQUEST));
1965 }
1966
1967 static int
1968 sd_getcache(struct sd_softc *sd, int *bitsp)
1969 {
1970 struct scsipi_periph *periph = sd->sc_periph;
1971 struct sd_mode_sense_data scsipi_sense;
1972 int error, bits = 0;
1973 int big;
1974 union scsi_disk_pages *pages;
1975
1976 if (periph->periph_version < 2)
1977 return (EOPNOTSUPP);
1978
1979 memset(&scsipi_sense, 0, sizeof(scsipi_sense));
1980 error = sd_mode_sense(sd, SMS_DBD, &scsipi_sense,
1981 sizeof(scsipi_sense.pages.caching_params), 8, 0, &big);
1982 if (error)
1983 return (error);
1984
1985 if (big)
1986 pages = (void *)(&scsipi_sense.header.big + 1);
1987 else
1988 pages = (void *)(&scsipi_sense.header.small + 1);
1989
1990 if ((pages->caching_params.flags & CACHING_RCD) == 0)
1991 bits |= DKCACHE_READ;
1992 if (pages->caching_params.flags & CACHING_WCE)
1993 bits |= DKCACHE_WRITE;
1994 if (pages->caching_params.pg_code & PGCODE_PS)
1995 bits |= DKCACHE_SAVE;
1996
1997 memset(&scsipi_sense, 0, sizeof(scsipi_sense));
1998 error = sd_mode_sense(sd, SMS_DBD, &scsipi_sense,
1999 sizeof(scsipi_sense.pages.caching_params),
2000 SMS_PAGE_CTRL_CHANGEABLE|8, 0, &big);
2001 if (error == 0) {
2002 if (big)
2003 pages = (void *)(&scsipi_sense.header.big + 1);
2004 else
2005 pages = (void *)(&scsipi_sense.header.small + 1);
2006
2007 if (pages->caching_params.flags & CACHING_RCD)
2008 bits |= DKCACHE_RCHANGE;
2009 if (pages->caching_params.flags & CACHING_WCE)
2010 bits |= DKCACHE_WCHANGE;
2011 }
2012
2013 *bitsp = bits;
2014
2015 return (0);
2016 }
2017
2018 static int
2019 sd_setcache(struct sd_softc *sd, int bits)
2020 {
2021 struct scsipi_periph *periph = sd->sc_periph;
2022 struct sd_mode_sense_data scsipi_sense;
2023 int error;
2024 uint8_t oflags, byte2 = 0;
2025 int big;
2026 union scsi_disk_pages *pages;
2027
2028 if (periph->periph_version < 2)
2029 return (EOPNOTSUPP);
2030
2031 memset(&scsipi_sense, 0, sizeof(scsipi_sense));
2032 error = sd_mode_sense(sd, SMS_DBD, &scsipi_sense,
2033 sizeof(scsipi_sense.pages.caching_params), 8, 0, &big);
2034 if (error)
2035 return (error);
2036
2037 if (big)
2038 pages = (void *)(&scsipi_sense.header.big + 1);
2039 else
2040 pages = (void *)(&scsipi_sense.header.small + 1);
2041
2042 oflags = pages->caching_params.flags;
2043
2044 if (bits & DKCACHE_READ)
2045 pages->caching_params.flags &= ~CACHING_RCD;
2046 else
2047 pages->caching_params.flags |= CACHING_RCD;
2048
2049 if (bits & DKCACHE_WRITE)
2050 pages->caching_params.flags |= CACHING_WCE;
2051 else
2052 pages->caching_params.flags &= ~CACHING_WCE;
2053
2054 if (oflags == pages->caching_params.flags)
2055 return (0);
2056
2057 pages->caching_params.pg_code &= PGCODE_MASK;
2058
2059 if (bits & DKCACHE_SAVE)
2060 byte2 |= SMS_SP;
2061
2062 return (sd_mode_select(sd, byte2|SMS_PF, &scsipi_sense,
2063 sizeof(struct scsipi_mode_page_header) +
2064 pages->caching_params.pg_length, 0, big));
2065 }
2066