sd.c revision 1.214 1 /* $NetBSD: sd.c,v 1.214 2003/12/23 13:12:25 pk Exp $ */
2
3 /*-
4 * Copyright (c) 1998, 2003 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Originally written by Julian Elischer (julian (at) dialix.oz.au)
41 * for TRW Financial Systems for use under the MACH(2.5) operating system.
42 *
43 * TRW Financial Systems, in accordance with their agreement with Carnegie
44 * Mellon University, makes this software available to CMU to distribute
45 * or use in any manner that they see fit as long as this message is kept with
46 * the software. For this reason TFS also grants any other persons or
47 * organisations permission to use or modify this software.
48 *
49 * TFS supplies this software to be publicly redistributed
50 * on the understanding that TFS is not responsible for the correct
51 * functioning of this software in any circumstances.
52 *
53 * Ported to run under 386BSD by Julian Elischer (julian (at) dialix.oz.au) Sept 1992
54 */
55
56 #include <sys/cdefs.h>
57 __KERNEL_RCSID(0, "$NetBSD: sd.c,v 1.214 2003/12/23 13:12:25 pk Exp $");
58
59 #include "opt_scsi.h"
60 #include "opt_bufq.h"
61 #include "rnd.h"
62
63 #include <sys/param.h>
64 #include <sys/systm.h>
65 #include <sys/kernel.h>
66 #include <sys/file.h>
67 #include <sys/stat.h>
68 #include <sys/ioctl.h>
69 #include <sys/scsiio.h>
70 #include <sys/buf.h>
71 #include <sys/uio.h>
72 #include <sys/malloc.h>
73 #include <sys/errno.h>
74 #include <sys/device.h>
75 #include <sys/disklabel.h>
76 #include <sys/disk.h>
77 #include <sys/proc.h>
78 #include <sys/conf.h>
79 #include <sys/vnode.h>
80 #if NRND > 0
81 #include <sys/rnd.h>
82 #endif
83
84 #include <dev/scsipi/scsipi_all.h>
85 #include <dev/scsipi/scsi_all.h>
86 #include <dev/scsipi/scsipi_disk.h>
87 #include <dev/scsipi/scsi_disk.h>
88 #include <dev/scsipi/scsiconf.h>
89 #include <dev/scsipi/sdvar.h>
90
91 #define SDUNIT(dev) DISKUNIT(dev)
92 #define SDPART(dev) DISKPART(dev)
93 #define SDMINOR(unit, part) DISKMINOR(unit, part)
94 #define MAKESDDEV(maj, unit, part) MAKEDISKDEV(maj, unit, part)
95
96 #define SDLABELDEV(dev) (MAKESDDEV(major(dev), SDUNIT(dev), RAW_PART))
97
98 int sdlock __P((struct sd_softc *));
99 void sdunlock __P((struct sd_softc *));
100 void sdminphys __P((struct buf *));
101 void sdgetdefaultlabel __P((struct sd_softc *, struct disklabel *));
102 void sdgetdisklabel __P((struct sd_softc *));
103 void sdstart __P((struct scsipi_periph *));
104 void sddone __P((struct scsipi_xfer *));
105 void sd_shutdown __P((void *));
106 int sd_reassign_blocks __P((struct sd_softc *, u_long));
107 int sd_interpret_sense __P((struct scsipi_xfer *));
108
109 int sd_mode_sense __P((struct sd_softc *, u_int8_t, void *, size_t, int,
110 int, int *));
111 int sd_mode_select __P((struct sd_softc *, u_int8_t, void *, size_t, int,
112 int));
113 int sd_get_simplifiedparms __P((struct sd_softc *, struct disk_parms *,
114 int));
115 int sd_get_capacity __P((struct sd_softc *, struct disk_parms *, int));
116 int sd_get_parms __P((struct sd_softc *, struct disk_parms *, int));
117 int sd_flush __P((struct sd_softc *, int));
118 int sd_getcache __P((struct sd_softc *, int *));
119 int sd_setcache __P((struct sd_softc *, int));
120
121 int sdmatch __P((struct device *, struct cfdata *, void *));
122 void sdattach __P((struct device *, struct device *, void *));
123 int sdactivate __P((struct device *, enum devact));
124 int sddetach __P((struct device *, int));
125
126 CFATTACH_DECL(sd, sizeof(struct sd_softc), sdmatch, sdattach, sddetach,
127 sdactivate);
128
129 extern struct cfdriver sd_cd;
130
131 const struct scsipi_inquiry_pattern sd_patterns[] = {
132 {T_DIRECT, T_FIXED,
133 "", "", ""},
134 {T_DIRECT, T_REMOV,
135 "", "", ""},
136 {T_OPTICAL, T_FIXED,
137 "", "", ""},
138 {T_OPTICAL, T_REMOV,
139 "", "", ""},
140 {T_SIMPLE_DIRECT, T_FIXED,
141 "", "", ""},
142 {T_SIMPLE_DIRECT, T_REMOV,
143 "", "", ""},
144 };
145
146 dev_type_open(sdopen);
147 dev_type_close(sdclose);
148 dev_type_read(sdread);
149 dev_type_write(sdwrite);
150 dev_type_ioctl(sdioctl);
151 dev_type_strategy(sdstrategy);
152 dev_type_dump(sddump);
153 dev_type_size(sdsize);
154
155 const struct bdevsw sd_bdevsw = {
156 sdopen, sdclose, sdstrategy, sdioctl, sddump, sdsize, D_DISK
157 };
158
159 const struct cdevsw sd_cdevsw = {
160 sdopen, sdclose, sdread, sdwrite, sdioctl,
161 nostop, notty, nopoll, nommap, nokqfilter, D_DISK
162 };
163
164 struct dkdriver sddkdriver = { sdstrategy };
165
166 const struct scsipi_periphsw sd_switch = {
167 sd_interpret_sense, /* check our error handler first */
168 sdstart, /* have a queue, served by this */
169 NULL, /* have no async handler */
170 sddone, /* deal with stats at interrupt time */
171 };
172
173 struct sd_mode_sense_data {
174 /*
175 * XXX
176 * We are not going to parse this as-is -- it just has to be large
177 * enough.
178 */
179 union {
180 struct scsipi_mode_header small;
181 struct scsipi_mode_header_big big;
182 } header;
183 struct scsi_blk_desc blk_desc;
184 union scsi_disk_pages pages;
185 };
186
187 /*
188 * The routine called by the low level scsi routine when it discovers
189 * A device suitable for this driver
190 */
191 int
192 sdmatch(parent, match, aux)
193 struct device *parent;
194 struct cfdata *match;
195 void *aux;
196 {
197 struct scsipibus_attach_args *sa = aux;
198 int priority;
199
200 (void)scsipi_inqmatch(&sa->sa_inqbuf,
201 (caddr_t)sd_patterns, sizeof(sd_patterns) / sizeof(sd_patterns[0]),
202 sizeof(sd_patterns[0]), &priority);
203
204 return (priority);
205 }
206
207 /*
208 * Attach routine common to atapi & scsi.
209 */
210 void
211 sdattach(parent, self, aux)
212 struct device *parent, *self;
213 void *aux;
214 {
215 struct sd_softc *sd = (void *)self;
216 struct scsipibus_attach_args *sa = aux;
217 struct scsipi_periph *periph = sa->sa_periph;
218 int error, result;
219 struct disk_parms *dp = &sd->params;
220 char pbuf[9];
221
222 SC_DEBUG(periph, SCSIPI_DB2, ("sdattach: "));
223
224 sd->type = (sa->sa_inqbuf.type & SID_TYPE);
225 if (sd->type == T_SIMPLE_DIRECT)
226 periph->periph_quirks |= PQUIRK_ONLYBIG | PQUIRK_NOBIGMODESENSE;
227
228 if (scsipi_periph_bustype(sa->sa_periph) == SCSIPI_BUSTYPE_SCSI &&
229 periph->periph_version == 0)
230 sd->flags |= SDF_ANCIENT;
231
232 #ifdef NEW_BUFQ_STRATEGY
233 bufq_alloc(&sd->buf_queue, BUFQ_READ_PRIO|BUFQ_SORT_RAWBLOCK);
234 #else
235 bufq_alloc(&sd->buf_queue, BUFQ_DISKSORT|BUFQ_SORT_RAWBLOCK);
236 #endif
237
238 /*
239 * Store information needed to contact our base driver
240 */
241 sd->sc_periph = periph;
242
243 periph->periph_dev = &sd->sc_dev;
244 periph->periph_switch = &sd_switch;
245
246 /*
247 * Increase our openings to the maximum-per-periph
248 * supported by the adapter. This will either be
249 * clamped down or grown by the adapter if necessary.
250 */
251 periph->periph_openings =
252 SCSIPI_CHAN_MAX_PERIPH(periph->periph_channel);
253 periph->periph_flags |= PERIPH_GROW_OPENINGS;
254
255 /*
256 * Initialize and attach the disk structure.
257 */
258 sd->sc_dk.dk_driver = &sddkdriver;
259 sd->sc_dk.dk_name = sd->sc_dev.dv_xname;
260 disk_attach(&sd->sc_dk);
261
262 /*
263 * Use the subdriver to request information regarding the drive.
264 */
265 aprint_naive("\n");
266 aprint_normal("\n");
267
268 error = scsipi_test_unit_ready(periph,
269 XS_CTL_DISCOVERY | XS_CTL_IGNORE_ILLEGAL_REQUEST |
270 XS_CTL_IGNORE_MEDIA_CHANGE | XS_CTL_SILENT_NODEV);
271
272 if (error)
273 result = SDGP_RESULT_OFFLINE;
274 else
275 result = sd_get_parms(sd, &sd->params, XS_CTL_DISCOVERY);
276 aprint_normal("%s: ", sd->sc_dev.dv_xname);
277 switch (result) {
278 case SDGP_RESULT_OK:
279 format_bytes(pbuf, sizeof(pbuf),
280 (u_int64_t)dp->disksize * dp->blksize);
281 aprint_normal(
282 "%s, %ld cyl, %ld head, %ld sec, %ld bytes/sect x %llu sectors",
283 pbuf, dp->cyls, dp->heads, dp->sectors, dp->blksize,
284 (unsigned long long)dp->disksize);
285 break;
286
287 case SDGP_RESULT_OFFLINE:
288 aprint_normal("drive offline");
289 break;
290
291 case SDGP_RESULT_UNFORMATTED:
292 aprint_normal("unformatted media");
293 break;
294
295 #ifdef DIAGNOSTIC
296 default:
297 panic("sdattach: unknown result from get_parms");
298 break;
299 #endif
300 }
301 aprint_normal("\n");
302
303 /*
304 * Establish a shutdown hook so that we can ensure that
305 * our data has actually made it onto the platter at
306 * shutdown time. Note that this relies on the fact
307 * that the shutdown hook code puts us at the head of
308 * the list (thus guaranteeing that our hook runs before
309 * our ancestors').
310 */
311 if ((sd->sc_sdhook =
312 shutdownhook_establish(sd_shutdown, sd)) == NULL)
313 aprint_error("%s: WARNING: unable to establish shutdown hook\n",
314 sd->sc_dev.dv_xname);
315
316 #if NRND > 0
317 /*
318 * attach the device into the random source list
319 */
320 rnd_attach_source(&sd->rnd_source, sd->sc_dev.dv_xname,
321 RND_TYPE_DISK, 0);
322 #endif
323 }
324
325 int
326 sdactivate(self, act)
327 struct device *self;
328 enum devact act;
329 {
330 int rv = 0;
331
332 switch (act) {
333 case DVACT_ACTIVATE:
334 rv = EOPNOTSUPP;
335 break;
336
337 case DVACT_DEACTIVATE:
338 /*
339 * Nothing to do; we key off the device's DVF_ACTIVE.
340 */
341 break;
342 }
343 return (rv);
344 }
345
346 int
347 sddetach(self, flags)
348 struct device *self;
349 int flags;
350 {
351 struct sd_softc *sd = (struct sd_softc *) self;
352 struct buf *bp;
353 int s, bmaj, cmaj, i, mn;
354
355 /* locate the major number */
356 bmaj = bdevsw_lookup_major(&sd_bdevsw);
357 cmaj = cdevsw_lookup_major(&sd_cdevsw);
358
359 s = splbio();
360
361 /* Kill off any queued buffers. */
362 while ((bp = BUFQ_GET(&sd->buf_queue)) != NULL) {
363 bp->b_error = EIO;
364 bp->b_flags |= B_ERROR;
365 bp->b_resid = bp->b_bcount;
366 biodone(bp);
367 }
368
369 bufq_free(&sd->buf_queue);
370
371 /* Kill off any pending commands. */
372 scsipi_kill_pending(sd->sc_periph);
373
374 splx(s);
375
376 /* Nuke the vnodes for any open instances */
377 for (i = 0; i < MAXPARTITIONS; i++) {
378 mn = SDMINOR(self->dv_unit, i);
379 vdevgone(bmaj, mn, mn, VBLK);
380 vdevgone(cmaj, mn, mn, VCHR);
381 }
382
383 /* Detach from the disk list. */
384 disk_detach(&sd->sc_dk);
385
386 /* Get rid of the shutdown hook. */
387 shutdownhook_disestablish(sd->sc_sdhook);
388
389 #if NRND > 0
390 /* Unhook the entropy source. */
391 rnd_detach_source(&sd->rnd_source);
392 #endif
393
394 return (0);
395 }
396
397 /*
398 * Wait interruptibly for an exclusive lock.
399 *
400 * XXX
401 * Several drivers do this; it should be abstracted and made MP-safe.
402 */
403 int
404 sdlock(sd)
405 struct sd_softc *sd;
406 {
407 int error;
408
409 while ((sd->flags & SDF_LOCKED) != 0) {
410 sd->flags |= SDF_WANTED;
411 if ((error = tsleep(sd, PRIBIO | PCATCH, "sdlck", 0)) != 0)
412 return (error);
413 }
414 sd->flags |= SDF_LOCKED;
415 return (0);
416 }
417
418 /*
419 * Unlock and wake up any waiters.
420 */
421 void
422 sdunlock(sd)
423 struct sd_softc *sd;
424 {
425
426 sd->flags &= ~SDF_LOCKED;
427 if ((sd->flags & SDF_WANTED) != 0) {
428 sd->flags &= ~SDF_WANTED;
429 wakeup(sd);
430 }
431 }
432
433 /*
434 * open the device. Make sure the partition info is a up-to-date as can be.
435 */
436 int
437 sdopen(dev, flag, fmt, p)
438 dev_t dev;
439 int flag, fmt;
440 struct proc *p;
441 {
442 struct sd_softc *sd;
443 struct scsipi_periph *periph;
444 struct scsipi_adapter *adapt;
445 int unit, part;
446 int error;
447
448 unit = SDUNIT(dev);
449 if (unit >= sd_cd.cd_ndevs)
450 return (ENXIO);
451 sd = sd_cd.cd_devs[unit];
452 if (sd == NULL)
453 return (ENXIO);
454
455 if ((sd->sc_dev.dv_flags & DVF_ACTIVE) == 0)
456 return (ENODEV);
457
458 periph = sd->sc_periph;
459 adapt = periph->periph_channel->chan_adapter;
460 part = SDPART(dev);
461
462 SC_DEBUG(periph, SCSIPI_DB1,
463 ("sdopen: dev=0x%x (unit %d (of %d), partition %d)\n", dev, unit,
464 sd_cd.cd_ndevs, part));
465
466 /*
467 * If this is the first open of this device, add a reference
468 * to the adapter.
469 */
470 if (sd->sc_dk.dk_openmask == 0 &&
471 (error = scsipi_adapter_addref(adapt)) != 0)
472 return (error);
473
474 if ((error = sdlock(sd)) != 0)
475 goto bad4;
476
477 if ((periph->periph_flags & PERIPH_OPEN) != 0) {
478 /*
479 * If any partition is open, but the disk has been invalidated,
480 * disallow further opens of non-raw partition
481 */
482 if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0 &&
483 (part != RAW_PART || fmt != S_IFCHR)) {
484 error = EIO;
485 goto bad3;
486 }
487 } else {
488 int silent;
489
490 if (part == RAW_PART && fmt == S_IFCHR)
491 silent = XS_CTL_SILENT;
492 else
493 silent = 0;
494
495 /* Check that it is still responding and ok. */
496 error = scsipi_test_unit_ready(periph,
497 XS_CTL_IGNORE_ILLEGAL_REQUEST | XS_CTL_IGNORE_MEDIA_CHANGE |
498 silent);
499
500 /*
501 * Start the pack spinning if necessary. Always allow the
502 * raw parition to be opened, for raw IOCTLs. Data transfers
503 * will check for SDEV_MEDIA_LOADED.
504 */
505 if (error == EIO) {
506 int error2;
507
508 error2 = scsipi_start(periph, SSS_START, silent);
509 switch (error2) {
510 case 0:
511 error = 0;
512 break;
513 case EIO:
514 case EINVAL:
515 break;
516 default:
517 error = error2;
518 break;
519 }
520 }
521 if (error) {
522 if (silent)
523 goto out;
524 goto bad3;
525 }
526
527 periph->periph_flags |= PERIPH_OPEN;
528
529 if (periph->periph_flags & PERIPH_REMOVABLE) {
530 /* Lock the pack in. */
531 error = scsipi_prevent(periph, PR_PREVENT,
532 XS_CTL_IGNORE_ILLEGAL_REQUEST |
533 XS_CTL_IGNORE_MEDIA_CHANGE);
534 if (error)
535 goto bad;
536 }
537
538 if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0) {
539 periph->periph_flags |= PERIPH_MEDIA_LOADED;
540
541 /*
542 * Load the physical device parameters.
543 *
544 * Note that if media is present but unformatted,
545 * we allow the open (so that it can be formatted!).
546 * The drive should refuse real I/O, if the media is
547 * unformatted.
548 */
549 if (sd_get_parms(sd, &sd->params,
550 0) == SDGP_RESULT_OFFLINE) {
551 error = ENXIO;
552 goto bad2;
553 }
554 SC_DEBUG(periph, SCSIPI_DB3, ("Params loaded "));
555
556 /* Load the partition info if not already loaded. */
557 sdgetdisklabel(sd);
558 SC_DEBUG(periph, SCSIPI_DB3, ("Disklabel loaded "));
559 }
560 }
561
562 /* Check that the partition exists. */
563 if (part != RAW_PART &&
564 (part >= sd->sc_dk.dk_label->d_npartitions ||
565 sd->sc_dk.dk_label->d_partitions[part].p_fstype == FS_UNUSED)) {
566 error = ENXIO;
567 goto bad;
568 }
569
570 out: /* Insure only one open at a time. */
571 switch (fmt) {
572 case S_IFCHR:
573 sd->sc_dk.dk_copenmask |= (1 << part);
574 break;
575 case S_IFBLK:
576 sd->sc_dk.dk_bopenmask |= (1 << part);
577 break;
578 }
579 sd->sc_dk.dk_openmask =
580 sd->sc_dk.dk_copenmask | sd->sc_dk.dk_bopenmask;
581
582 SC_DEBUG(periph, SCSIPI_DB3, ("open complete\n"));
583 sdunlock(sd);
584 return (0);
585
586 bad2:
587 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
588
589 bad:
590 if (sd->sc_dk.dk_openmask == 0) {
591 if (periph->periph_flags & PERIPH_REMOVABLE)
592 scsipi_prevent(periph, PR_ALLOW,
593 XS_CTL_IGNORE_ILLEGAL_REQUEST |
594 XS_CTL_IGNORE_MEDIA_CHANGE);
595 periph->periph_flags &= ~PERIPH_OPEN;
596 }
597
598 bad3:
599 sdunlock(sd);
600 bad4:
601 if (sd->sc_dk.dk_openmask == 0)
602 scsipi_adapter_delref(adapt);
603 return (error);
604 }
605
606 /*
607 * close the device.. only called if we are the LAST occurence of an open
608 * device. Convenient now but usually a pain.
609 */
610 int
611 sdclose(dev, flag, fmt, p)
612 dev_t dev;
613 int flag, fmt;
614 struct proc *p;
615 {
616 struct sd_softc *sd = sd_cd.cd_devs[SDUNIT(dev)];
617 struct scsipi_periph *periph = sd->sc_periph;
618 struct scsipi_adapter *adapt = periph->periph_channel->chan_adapter;
619 int part = SDPART(dev);
620 int error;
621
622 if ((error = sdlock(sd)) != 0)
623 return (error);
624
625 switch (fmt) {
626 case S_IFCHR:
627 sd->sc_dk.dk_copenmask &= ~(1 << part);
628 break;
629 case S_IFBLK:
630 sd->sc_dk.dk_bopenmask &= ~(1 << part);
631 break;
632 }
633 sd->sc_dk.dk_openmask =
634 sd->sc_dk.dk_copenmask | sd->sc_dk.dk_bopenmask;
635
636 if (sd->sc_dk.dk_openmask == 0) {
637 /*
638 * If the disk cache needs flushing, and the disk supports
639 * it, do it now.
640 */
641 if ((sd->flags & SDF_DIRTY) != 0) {
642 if (sd_flush(sd, 0)) {
643 printf("%s: cache synchronization failed\n",
644 sd->sc_dev.dv_xname);
645 sd->flags &= ~SDF_FLUSHING;
646 } else
647 sd->flags &= ~(SDF_FLUSHING|SDF_DIRTY);
648 }
649
650 if (! (periph->periph_flags & PERIPH_KEEP_LABEL))
651 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
652
653 scsipi_wait_drain(periph);
654
655 if (periph->periph_flags & PERIPH_REMOVABLE)
656 scsipi_prevent(periph, PR_ALLOW,
657 XS_CTL_IGNORE_ILLEGAL_REQUEST |
658 XS_CTL_IGNORE_NOT_READY);
659 periph->periph_flags &= ~PERIPH_OPEN;
660
661 scsipi_wait_drain(periph);
662
663 scsipi_adapter_delref(adapt);
664 }
665
666 sdunlock(sd);
667 return (0);
668 }
669
670 /*
671 * Actually translate the requested transfer into one the physical driver
672 * can understand. The transfer is described by a buf and will include
673 * only one physical transfer.
674 */
675 void
676 sdstrategy(bp)
677 struct buf *bp;
678 {
679 struct sd_softc *sd = sd_cd.cd_devs[SDUNIT(bp->b_dev)];
680 struct scsipi_periph *periph = sd->sc_periph;
681 struct disklabel *lp;
682 daddr_t blkno;
683 int s;
684 boolean_t sector_aligned;
685
686 SC_DEBUG(sd->sc_periph, SCSIPI_DB2, ("sdstrategy "));
687 SC_DEBUG(sd->sc_periph, SCSIPI_DB1,
688 ("%ld bytes @ blk %" PRId64 "\n", bp->b_bcount, bp->b_blkno));
689 /*
690 * If the device has been made invalid, error out
691 */
692 if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0 ||
693 (sd->sc_dev.dv_flags & DVF_ACTIVE) == 0) {
694 if (periph->periph_flags & PERIPH_OPEN)
695 bp->b_error = EIO;
696 else
697 bp->b_error = ENODEV;
698 goto bad;
699 }
700
701 lp = sd->sc_dk.dk_label;
702
703 /*
704 * The transfer must be a whole number of blocks, offset must not be
705 * negative.
706 */
707 if (lp->d_secsize == DEV_BSIZE) {
708 sector_aligned = (bp->b_bcount & (DEV_BSIZE - 1)) == 0;
709 } else {
710 sector_aligned = (bp->b_bcount % lp->d_secsize) == 0;
711 }
712 if (!sector_aligned || bp->b_blkno < 0) {
713 bp->b_error = EINVAL;
714 goto bad;
715 }
716 /*
717 * If it's a null transfer, return immediatly
718 */
719 if (bp->b_bcount == 0)
720 goto done;
721
722 /*
723 * Do bounds checking, adjust transfer. if error, process.
724 * If end of partition, just return.
725 */
726 if (SDPART(bp->b_dev) == RAW_PART) {
727 if (bounds_check_with_mediasize(bp, DEV_BSIZE,
728 sd->params.disksize512) <= 0)
729 goto done;
730 } else {
731 if (bounds_check_with_label(&sd->sc_dk, bp,
732 (sd->flags & (SDF_WLABEL|SDF_LABELLING)) != 0) <= 0)
733 goto done;
734 }
735
736 /*
737 * Now convert the block number to absolute and put it in
738 * terms of the device's logical block size.
739 */
740 if (lp->d_secsize == DEV_BSIZE)
741 blkno = bp->b_blkno;
742 else if (lp->d_secsize > DEV_BSIZE)
743 blkno = bp->b_blkno / (lp->d_secsize / DEV_BSIZE);
744 else
745 blkno = bp->b_blkno * (DEV_BSIZE / lp->d_secsize);
746
747 if (SDPART(bp->b_dev) != RAW_PART)
748 blkno += lp->d_partitions[SDPART(bp->b_dev)].p_offset;
749
750 bp->b_rawblkno = blkno;
751
752 s = splbio();
753
754 /*
755 * Place it in the queue of disk activities for this disk.
756 *
757 * XXX Only do disksort() if the current operating mode does not
758 * XXX include tagged queueing.
759 */
760 BUFQ_PUT(&sd->buf_queue, bp);
761
762 /*
763 * Tell the device to get going on the transfer if it's
764 * not doing anything, otherwise just wait for completion
765 */
766 sdstart(sd->sc_periph);
767
768 splx(s);
769 return;
770
771 bad:
772 bp->b_flags |= B_ERROR;
773 done:
774 /*
775 * Correctly set the buf to indicate a completed xfer
776 */
777 bp->b_resid = bp->b_bcount;
778 biodone(bp);
779 }
780
781 /*
782 * sdstart looks to see if there is a buf waiting for the device
783 * and that the device is not already busy. If both are true,
784 * It dequeues the buf and creates a scsi command to perform the
785 * transfer in the buf. The transfer request will call scsipi_done
786 * on completion, which will in turn call this routine again
787 * so that the next queued transfer is performed.
788 * The bufs are queued by the strategy routine (sdstrategy)
789 *
790 * This routine is also called after other non-queued requests
791 * have been made of the scsi driver, to ensure that the queue
792 * continues to be drained.
793 *
794 * must be called at the correct (highish) spl level
795 * sdstart() is called at splbio from sdstrategy and scsipi_done
796 */
797 void
798 sdstart(periph)
799 struct scsipi_periph *periph;
800 {
801 struct sd_softc *sd = (void *)periph->periph_dev;
802 struct disklabel *lp = sd->sc_dk.dk_label;
803 struct buf *bp = 0;
804 struct scsipi_rw_big cmd_big;
805 struct scsi_rw cmd_small;
806 struct scsipi_generic *cmdp;
807 int nblks, cmdlen, error, flags;
808
809 SC_DEBUG(periph, SCSIPI_DB2, ("sdstart "));
810 /*
811 * Check if the device has room for another command
812 */
813 while (periph->periph_active < periph->periph_openings) {
814 /*
815 * there is excess capacity, but a special waits
816 * It'll need the adapter as soon as we clear out of the
817 * way and let it run (user level wait).
818 */
819 if (periph->periph_flags & PERIPH_WAITING) {
820 periph->periph_flags &= ~PERIPH_WAITING;
821 wakeup((caddr_t)periph);
822 return;
823 }
824
825 /*
826 * See if there is a buf with work for us to do..
827 */
828 if ((bp = BUFQ_GET(&sd->buf_queue)) == NULL)
829 return;
830
831 /*
832 * If the device has become invalid, abort all the
833 * reads and writes until all files have been closed and
834 * re-opened
835 */
836 if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0) {
837 bp->b_error = EIO;
838 bp->b_flags |= B_ERROR;
839 bp->b_resid = bp->b_bcount;
840 biodone(bp);
841 continue;
842 }
843
844 /*
845 * We have a buf, now we should make a command.
846 */
847
848 if (lp->d_secsize == DEV_BSIZE)
849 nblks = bp->b_bcount >> DEV_BSHIFT;
850 else
851 nblks = howmany(bp->b_bcount, lp->d_secsize);
852
853 /*
854 * Fill out the scsi command. If the transfer will
855 * fit in a "small" cdb, use it.
856 */
857 if (((bp->b_rawblkno & 0x1fffff) == bp->b_rawblkno) &&
858 ((nblks & 0xff) == nblks) &&
859 !(periph->periph_quirks & PQUIRK_ONLYBIG)) {
860 /*
861 * We can fit in a small cdb.
862 */
863 memset(&cmd_small, 0, sizeof(cmd_small));
864 cmd_small.opcode = (bp->b_flags & B_READ) ?
865 SCSI_READ_COMMAND : SCSI_WRITE_COMMAND;
866 _lto3b(bp->b_rawblkno, cmd_small.addr);
867 cmd_small.length = nblks & 0xff;
868 cmdlen = sizeof(cmd_small);
869 cmdp = (struct scsipi_generic *)&cmd_small;
870 } else {
871 /*
872 * Need a large cdb.
873 */
874 memset(&cmd_big, 0, sizeof(cmd_big));
875 cmd_big.opcode = (bp->b_flags & B_READ) ?
876 READ_BIG : WRITE_BIG;
877 _lto4b(bp->b_rawblkno, cmd_big.addr);
878 _lto2b(nblks, cmd_big.length);
879 cmdlen = sizeof(cmd_big);
880 cmdp = (struct scsipi_generic *)&cmd_big;
881 }
882
883 /* Instrumentation. */
884 disk_busy(&sd->sc_dk);
885
886 /*
887 * Mark the disk dirty so that the cache will be
888 * flushed on close.
889 */
890 if ((bp->b_flags & B_READ) == 0)
891 sd->flags |= SDF_DIRTY;
892
893 /*
894 * Figure out what flags to use.
895 */
896 flags = XS_CTL_NOSLEEP|XS_CTL_ASYNC|XS_CTL_SIMPLE_TAG;
897 if (bp->b_flags & B_READ)
898 flags |= XS_CTL_DATA_IN;
899 else
900 flags |= XS_CTL_DATA_OUT;
901
902 /*
903 * Call the routine that chats with the adapter.
904 * Note: we cannot sleep as we may be an interrupt
905 */
906 error = scsipi_command(periph, cmdp, cmdlen,
907 (u_char *)bp->b_data, bp->b_bcount,
908 SDRETRIES, SD_IO_TIMEOUT, bp, flags);
909 if (error) {
910 disk_unbusy(&sd->sc_dk, 0, 0);
911 printf("%s: not queued, error %d\n",
912 sd->sc_dev.dv_xname, error);
913 }
914 }
915 }
916
917 void
918 sddone(xs)
919 struct scsipi_xfer *xs;
920 {
921 struct sd_softc *sd = (void *)xs->xs_periph->periph_dev;
922
923 if (sd->flags & SDF_FLUSHING) {
924 /* Flush completed, no longer dirty. */
925 sd->flags &= ~(SDF_FLUSHING|SDF_DIRTY);
926 }
927
928 if (xs->bp != NULL) {
929 disk_unbusy(&sd->sc_dk, xs->bp->b_bcount - xs->bp->b_resid,
930 (xs->bp->b_flags & B_READ));
931 #if NRND > 0
932 rnd_add_uint32(&sd->rnd_source, xs->bp->b_rawblkno);
933 #endif
934 }
935 }
936
937 void
938 sdminphys(bp)
939 struct buf *bp;
940 {
941 struct sd_softc *sd = sd_cd.cd_devs[SDUNIT(bp->b_dev)];
942 long max;
943
944 /*
945 * If the device is ancient, we want to make sure that
946 * the transfer fits into a 6-byte cdb.
947 *
948 * XXX Note that the SCSI-I spec says that 256-block transfers
949 * are allowed in a 6-byte read/write, and are specified
950 * by settng the "length" to 0. However, we're conservative
951 * here, allowing only 255-block transfers in case an
952 * ancient device gets confused by length == 0. A length of 0
953 * in a 10-byte read/write actually means 0 blocks.
954 */
955 if ((sd->flags & SDF_ANCIENT) &&
956 ((sd->sc_periph->periph_flags &
957 (PERIPH_REMOVABLE | PERIPH_MEDIA_LOADED)) != PERIPH_REMOVABLE)) {
958 max = sd->sc_dk.dk_label->d_secsize * 0xff;
959
960 if (bp->b_bcount > max)
961 bp->b_bcount = max;
962 }
963
964 (*sd->sc_periph->periph_channel->chan_adapter->adapt_minphys)(bp);
965 }
966
967 int
968 sdread(dev, uio, ioflag)
969 dev_t dev;
970 struct uio *uio;
971 int ioflag;
972 {
973
974 return (physio(sdstrategy, NULL, dev, B_READ, sdminphys, uio));
975 }
976
977 int
978 sdwrite(dev, uio, ioflag)
979 dev_t dev;
980 struct uio *uio;
981 int ioflag;
982 {
983
984 return (physio(sdstrategy, NULL, dev, B_WRITE, sdminphys, uio));
985 }
986
987 /*
988 * Perform special action on behalf of the user
989 * Knows about the internals of this device
990 */
991 int
992 sdioctl(dev, cmd, addr, flag, p)
993 dev_t dev;
994 u_long cmd;
995 caddr_t addr;
996 int flag;
997 struct proc *p;
998 {
999 struct sd_softc *sd = sd_cd.cd_devs[SDUNIT(dev)];
1000 struct scsipi_periph *periph = sd->sc_periph;
1001 int part = SDPART(dev);
1002 int error = 0;
1003 #ifdef __HAVE_OLD_DISKLABEL
1004 struct disklabel *newlabel = NULL;
1005 #endif
1006
1007 SC_DEBUG(sd->sc_periph, SCSIPI_DB2, ("sdioctl 0x%lx ", cmd));
1008
1009 /*
1010 * If the device is not valid, some IOCTLs can still be
1011 * handled on the raw partition. Check this here.
1012 */
1013 if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0) {
1014 switch (cmd) {
1015 case DIOCKLABEL:
1016 case DIOCWLABEL:
1017 case DIOCLOCK:
1018 case DIOCEJECT:
1019 case ODIOCEJECT:
1020 case DIOCGCACHE:
1021 case DIOCSCACHE:
1022 case SCIOCIDENTIFY:
1023 case OSCIOCIDENTIFY:
1024 case SCIOCCOMMAND:
1025 case SCIOCDEBUG:
1026 if (part == RAW_PART)
1027 break;
1028 /* FALLTHROUGH */
1029 default:
1030 if ((periph->periph_flags & PERIPH_OPEN) == 0)
1031 return (ENODEV);
1032 else
1033 return (EIO);
1034 }
1035 }
1036
1037 switch (cmd) {
1038 case DIOCGDINFO:
1039 *(struct disklabel *)addr = *(sd->sc_dk.dk_label);
1040 return (0);
1041
1042 #ifdef __HAVE_OLD_DISKLABEL
1043 case ODIOCGDINFO:
1044 newlabel = malloc(sizeof *newlabel, M_TEMP, M_WAITOK);
1045 if (newlabel == NULL)
1046 return EIO;
1047 memcpy(newlabel, sd->sc_dk.dk_label, sizeof (*newlabel));
1048 if (newlabel->d_npartitions <= OLDMAXPARTITIONS)
1049 memcpy(addr, newlabel, sizeof (struct olddisklabel));
1050 else
1051 error = ENOTTY;
1052 free(newlabel, M_TEMP);
1053 return error;
1054 #endif
1055
1056 case DIOCGPART:
1057 ((struct partinfo *)addr)->disklab = sd->sc_dk.dk_label;
1058 ((struct partinfo *)addr)->part =
1059 &sd->sc_dk.dk_label->d_partitions[part];
1060 return (0);
1061
1062 case DIOCWDINFO:
1063 case DIOCSDINFO:
1064 #ifdef __HAVE_OLD_DISKLABEL
1065 case ODIOCWDINFO:
1066 case ODIOCSDINFO:
1067 #endif
1068 {
1069 struct disklabel *lp;
1070
1071 if ((flag & FWRITE) == 0)
1072 return (EBADF);
1073
1074 #ifdef __HAVE_OLD_DISKLABEL
1075 if (cmd == ODIOCSDINFO || cmd == ODIOCWDINFO) {
1076 newlabel = malloc(sizeof *newlabel, M_TEMP, M_WAITOK);
1077 if (newlabel == NULL)
1078 return EIO;
1079 memset(newlabel, 0, sizeof newlabel);
1080 memcpy(newlabel, addr, sizeof (struct olddisklabel));
1081 lp = newlabel;
1082 } else
1083 #endif
1084 lp = (struct disklabel *)addr;
1085
1086 if ((error = sdlock(sd)) != 0)
1087 goto bad;
1088 sd->flags |= SDF_LABELLING;
1089
1090 error = setdisklabel(sd->sc_dk.dk_label,
1091 lp, /*sd->sc_dk.dk_openmask : */0,
1092 sd->sc_dk.dk_cpulabel);
1093 if (error == 0) {
1094 if (cmd == DIOCWDINFO
1095 #ifdef __HAVE_OLD_DISKLABEL
1096 || cmd == ODIOCWDINFO
1097 #endif
1098 )
1099 error = writedisklabel(SDLABELDEV(dev),
1100 sdstrategy, sd->sc_dk.dk_label,
1101 sd->sc_dk.dk_cpulabel);
1102 }
1103
1104 sd->flags &= ~SDF_LABELLING;
1105 sdunlock(sd);
1106 bad:
1107 #ifdef __HAVE_OLD_DISKLABEL
1108 if (newlabel != NULL)
1109 free(newlabel, M_TEMP);
1110 #endif
1111 return (error);
1112 }
1113
1114 case DIOCKLABEL:
1115 if (*(int *)addr)
1116 periph->periph_flags |= PERIPH_KEEP_LABEL;
1117 else
1118 periph->periph_flags &= ~PERIPH_KEEP_LABEL;
1119 return (0);
1120
1121 case DIOCWLABEL:
1122 if ((flag & FWRITE) == 0)
1123 return (EBADF);
1124 if (*(int *)addr)
1125 sd->flags |= SDF_WLABEL;
1126 else
1127 sd->flags &= ~SDF_WLABEL;
1128 return (0);
1129
1130 case DIOCLOCK:
1131 return (scsipi_prevent(periph,
1132 (*(int *)addr) ? PR_PREVENT : PR_ALLOW, 0));
1133
1134 case DIOCEJECT:
1135 if ((periph->periph_flags & PERIPH_REMOVABLE) == 0)
1136 return (ENOTTY);
1137 if (*(int *)addr == 0) {
1138 /*
1139 * Don't force eject: check that we are the only
1140 * partition open. If so, unlock it.
1141 */
1142 if ((sd->sc_dk.dk_openmask & ~(1 << part)) == 0 &&
1143 sd->sc_dk.dk_bopenmask + sd->sc_dk.dk_copenmask ==
1144 sd->sc_dk.dk_openmask) {
1145 error = scsipi_prevent(periph, PR_ALLOW,
1146 XS_CTL_IGNORE_NOT_READY);
1147 if (error)
1148 return (error);
1149 } else {
1150 return (EBUSY);
1151 }
1152 }
1153 /* FALLTHROUGH */
1154 case ODIOCEJECT:
1155 return ((periph->periph_flags & PERIPH_REMOVABLE) == 0 ?
1156 ENOTTY : scsipi_start(periph, SSS_STOP|SSS_LOEJ, 0));
1157
1158 case DIOCGDEFLABEL:
1159 sdgetdefaultlabel(sd, (struct disklabel *)addr);
1160 return (0);
1161
1162 #ifdef __HAVE_OLD_DISKLABEL
1163 case ODIOCGDEFLABEL:
1164 newlabel = malloc(sizeof *newlabel, M_TEMP, M_WAITOK);
1165 if (newlabel == NULL)
1166 return EIO;
1167 sdgetdefaultlabel(sd, newlabel);
1168 if (newlabel->d_npartitions <= OLDMAXPARTITIONS)
1169 memcpy(addr, newlabel, sizeof (struct olddisklabel));
1170 else
1171 error = ENOTTY;
1172 free(newlabel, M_TEMP);
1173 return error;
1174 #endif
1175
1176 case DIOCGCACHE:
1177 return (sd_getcache(sd, (int *) addr));
1178
1179 case DIOCSCACHE:
1180 if ((flag & FWRITE) == 0)
1181 return (EBADF);
1182 return (sd_setcache(sd, *(int *) addr));
1183
1184 case DIOCCACHESYNC:
1185 /*
1186 * XXX Do we really need to care about having a writable
1187 * file descriptor here?
1188 */
1189 if ((flag & FWRITE) == 0)
1190 return (EBADF);
1191 if (((sd->flags & SDF_DIRTY) != 0 || *(int *)addr != 0)) {
1192 error = sd_flush(sd, 0);
1193 if (error)
1194 sd->flags &= ~SDF_FLUSHING;
1195 else
1196 sd->flags &= ~(SDF_FLUSHING|SDF_DIRTY);
1197 } else
1198 error = 0;
1199 return (error);
1200
1201 default:
1202 if (part != RAW_PART)
1203 return (ENOTTY);
1204 return (scsipi_do_ioctl(periph, dev, cmd, addr, flag, p));
1205 }
1206
1207 #ifdef DIAGNOSTIC
1208 panic("sdioctl: impossible");
1209 #endif
1210 }
1211
1212 void
1213 sdgetdefaultlabel(sd, lp)
1214 struct sd_softc *sd;
1215 struct disklabel *lp;
1216 {
1217
1218 memset(lp, 0, sizeof(struct disklabel));
1219
1220 lp->d_secsize = sd->params.blksize;
1221 lp->d_ntracks = sd->params.heads;
1222 lp->d_nsectors = sd->params.sectors;
1223 lp->d_ncylinders = sd->params.cyls;
1224 lp->d_secpercyl = lp->d_ntracks * lp->d_nsectors;
1225
1226 switch (scsipi_periph_bustype(sd->sc_periph)) {
1227 case SCSIPI_BUSTYPE_SCSI:
1228 lp->d_type = DTYPE_SCSI;
1229 break;
1230 case SCSIPI_BUSTYPE_ATAPI:
1231 lp->d_type = DTYPE_ATAPI;
1232 break;
1233 }
1234 /*
1235 * XXX
1236 * We could probe the mode pages to figure out what kind of disc it is.
1237 * Is this worthwhile?
1238 */
1239 strncpy(lp->d_typename, "mydisk", 16);
1240 strncpy(lp->d_packname, "fictitious", 16);
1241 lp->d_secperunit = sd->params.disksize;
1242 lp->d_rpm = sd->params.rot_rate;
1243 lp->d_interleave = 1;
1244 lp->d_flags = sd->sc_periph->periph_flags & PERIPH_REMOVABLE ?
1245 D_REMOVABLE : 0;
1246
1247 lp->d_partitions[RAW_PART].p_offset = 0;
1248 lp->d_partitions[RAW_PART].p_size =
1249 lp->d_secperunit * (lp->d_secsize / DEV_BSIZE);
1250 lp->d_partitions[RAW_PART].p_fstype = FS_UNUSED;
1251 lp->d_npartitions = RAW_PART + 1;
1252
1253 lp->d_magic = DISKMAGIC;
1254 lp->d_magic2 = DISKMAGIC;
1255 lp->d_checksum = dkcksum(lp);
1256 }
1257
1258
1259 /*
1260 * Load the label information on the named device
1261 */
1262 void
1263 sdgetdisklabel(sd)
1264 struct sd_softc *sd;
1265 {
1266 struct disklabel *lp = sd->sc_dk.dk_label;
1267 const char *errstring;
1268
1269 memset(sd->sc_dk.dk_cpulabel, 0, sizeof(struct cpu_disklabel));
1270
1271 sdgetdefaultlabel(sd, lp);
1272
1273 if (lp->d_secpercyl == 0) {
1274 lp->d_secpercyl = 100;
1275 /* as long as it's not 0 - readdisklabel divides by it (?) */
1276 }
1277
1278 /*
1279 * Call the generic disklabel extraction routine
1280 */
1281 errstring = readdisklabel(MAKESDDEV(0, sd->sc_dev.dv_unit, RAW_PART),
1282 sdstrategy, lp, sd->sc_dk.dk_cpulabel);
1283 if (errstring) {
1284 printf("%s: %s\n", sd->sc_dev.dv_xname, errstring);
1285 return;
1286 }
1287 }
1288
1289 void
1290 sd_shutdown(arg)
1291 void *arg;
1292 {
1293 struct sd_softc *sd = arg;
1294
1295 /*
1296 * If the disk cache needs to be flushed, and the disk supports
1297 * it, flush it. We're cold at this point, so we poll for
1298 * completion.
1299 */
1300 if ((sd->flags & SDF_DIRTY) != 0) {
1301 if (sd_flush(sd, XS_CTL_NOSLEEP|XS_CTL_POLL)) {
1302 printf("%s: cache synchronization failed\n",
1303 sd->sc_dev.dv_xname);
1304 sd->flags &= ~SDF_FLUSHING;
1305 } else
1306 sd->flags &= ~(SDF_FLUSHING|SDF_DIRTY);
1307 }
1308 }
1309
1310 /*
1311 * Tell the device to map out a defective block
1312 */
1313 int
1314 sd_reassign_blocks(sd, blkno)
1315 struct sd_softc *sd;
1316 u_long blkno;
1317 {
1318 struct scsi_reassign_blocks scsipi_cmd;
1319 struct scsi_reassign_blocks_data rbdata;
1320
1321 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1322 memset(&rbdata, 0, sizeof(rbdata));
1323 scsipi_cmd.opcode = SCSI_REASSIGN_BLOCKS;
1324
1325 _lto2b(sizeof(rbdata.defect_descriptor[0]), rbdata.length);
1326 _lto4b(blkno, rbdata.defect_descriptor[0].dlbaddr);
1327
1328 return (scsipi_command(sd->sc_periph,
1329 (struct scsipi_generic *)&scsipi_cmd, sizeof(scsipi_cmd),
1330 (u_char *)&rbdata, sizeof(rbdata), SDRETRIES, 5000, NULL,
1331 XS_CTL_DATA_OUT | XS_CTL_DATA_ONSTACK));
1332 }
1333
1334 /*
1335 * Check Errors
1336 */
1337 int
1338 sd_interpret_sense(xs)
1339 struct scsipi_xfer *xs;
1340 {
1341 struct scsipi_periph *periph = xs->xs_periph;
1342 struct scsipi_sense_data *sense = &xs->sense.scsi_sense;
1343 struct sd_softc *sd = (void *)periph->periph_dev;
1344 int s, error, retval = EJUSTRETURN;
1345
1346 /*
1347 * If the periph is already recovering, just do the normal
1348 * error processing.
1349 */
1350 if (periph->periph_flags & PERIPH_RECOVERING)
1351 return (retval);
1352
1353 /*
1354 * If the device is not open yet, let the generic code handle it.
1355 */
1356 if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0)
1357 return (retval);
1358
1359 /*
1360 * If it isn't a extended or extended/deferred error, let
1361 * the generic code handle it.
1362 */
1363 if ((sense->error_code & SSD_ERRCODE) != 0x70 &&
1364 (sense->error_code & SSD_ERRCODE) != 0x71)
1365 return (retval);
1366
1367 if ((sense->flags & SSD_KEY) == SKEY_NOT_READY &&
1368 sense->add_sense_code == 0x4) {
1369 if (sense->add_sense_code_qual == 0x01) {
1370 /*
1371 * Unit In The Process Of Becoming Ready.
1372 */
1373 printf("%s: waiting for pack to spin up...\n",
1374 sd->sc_dev.dv_xname);
1375 if (!callout_pending(&periph->periph_callout))
1376 scsipi_periph_freeze(periph, 1);
1377 callout_reset(&periph->periph_callout,
1378 5 * hz, scsipi_periph_timed_thaw, periph);
1379 retval = ERESTART;
1380 } else if (sense->add_sense_code_qual == 0x02) {
1381 printf("%s: pack is stopped, restarting...\n",
1382 sd->sc_dev.dv_xname);
1383 s = splbio();
1384 periph->periph_flags |= PERIPH_RECOVERING;
1385 splx(s);
1386 error = scsipi_start(periph, SSS_START,
1387 XS_CTL_URGENT|XS_CTL_HEAD_TAG|
1388 XS_CTL_THAW_PERIPH|XS_CTL_FREEZE_PERIPH);
1389 if (error) {
1390 printf("%s: unable to restart pack\n",
1391 sd->sc_dev.dv_xname);
1392 retval = error;
1393 } else
1394 retval = ERESTART;
1395 s = splbio();
1396 periph->periph_flags &= ~PERIPH_RECOVERING;
1397 splx(s);
1398 }
1399 }
1400 return (retval);
1401 }
1402
1403
1404 int
1405 sdsize(dev)
1406 dev_t dev;
1407 {
1408 struct sd_softc *sd;
1409 int part, unit, omask;
1410 int size;
1411
1412 unit = SDUNIT(dev);
1413 if (unit >= sd_cd.cd_ndevs)
1414 return (-1);
1415 sd = sd_cd.cd_devs[unit];
1416 if (sd == NULL)
1417 return (-1);
1418
1419 if ((sd->sc_dev.dv_flags & DVF_ACTIVE) == 0)
1420 return (-1);
1421
1422 part = SDPART(dev);
1423 omask = sd->sc_dk.dk_openmask & (1 << part);
1424
1425 if (omask == 0 && sdopen(dev, 0, S_IFBLK, NULL) != 0)
1426 return (-1);
1427 if ((sd->sc_periph->periph_flags & PERIPH_MEDIA_LOADED) == 0)
1428 size = -1;
1429 else if (sd->sc_dk.dk_label->d_partitions[part].p_fstype != FS_SWAP)
1430 size = -1;
1431 else
1432 size = sd->sc_dk.dk_label->d_partitions[part].p_size *
1433 (sd->sc_dk.dk_label->d_secsize / DEV_BSIZE);
1434 if (omask == 0 && sdclose(dev, 0, S_IFBLK, NULL) != 0)
1435 return (-1);
1436 return (size);
1437 }
1438
1439 /* #define SD_DUMP_NOT_TRUSTED if you just want to watch */
1440 static struct scsipi_xfer sx;
1441 static int sddoingadump;
1442
1443 /*
1444 * dump all of physical memory into the partition specified, starting
1445 * at offset 'dumplo' into the partition.
1446 */
1447 int
1448 sddump(dev, blkno, va, size)
1449 dev_t dev;
1450 daddr_t blkno;
1451 caddr_t va;
1452 size_t size;
1453 {
1454 struct sd_softc *sd; /* disk unit to do the I/O */
1455 struct disklabel *lp; /* disk's disklabel */
1456 int unit, part;
1457 int sectorsize; /* size of a disk sector */
1458 int nsects; /* number of sectors in partition */
1459 int sectoff; /* sector offset of partition */
1460 int totwrt; /* total number of sectors left to write */
1461 int nwrt; /* current number of sectors to write */
1462 struct scsipi_rw_big cmd; /* write command */
1463 struct scsipi_xfer *xs; /* ... convenience */
1464 struct scsipi_periph *periph;
1465 struct scsipi_channel *chan;
1466
1467 /* Check if recursive dump; if so, punt. */
1468 if (sddoingadump)
1469 return (EFAULT);
1470
1471 /* Mark as active early. */
1472 sddoingadump = 1;
1473
1474 unit = SDUNIT(dev); /* Decompose unit & partition. */
1475 part = SDPART(dev);
1476
1477 /* Check for acceptable drive number. */
1478 if (unit >= sd_cd.cd_ndevs || (sd = sd_cd.cd_devs[unit]) == NULL)
1479 return (ENXIO);
1480
1481 if ((sd->sc_dev.dv_flags & DVF_ACTIVE) == 0)
1482 return (ENODEV);
1483
1484 periph = sd->sc_periph;
1485 chan = periph->periph_channel;
1486
1487 /* Make sure it was initialized. */
1488 if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0)
1489 return (ENXIO);
1490
1491 /* Convert to disk sectors. Request must be a multiple of size. */
1492 lp = sd->sc_dk.dk_label;
1493 sectorsize = lp->d_secsize;
1494 if ((size % sectorsize) != 0)
1495 return (EFAULT);
1496 totwrt = size / sectorsize;
1497 blkno = dbtob(blkno) / sectorsize; /* blkno in DEV_BSIZE units */
1498
1499 nsects = lp->d_partitions[part].p_size;
1500 sectoff = lp->d_partitions[part].p_offset;
1501
1502 /* Check transfer bounds against partition size. */
1503 if ((blkno < 0) || ((blkno + totwrt) > nsects))
1504 return (EINVAL);
1505
1506 /* Offset block number to start of partition. */
1507 blkno += sectoff;
1508
1509 xs = &sx;
1510
1511 while (totwrt > 0) {
1512 nwrt = totwrt; /* XXX */
1513 #ifndef SD_DUMP_NOT_TRUSTED
1514 /*
1515 * Fill out the scsi command
1516 */
1517 memset(&cmd, 0, sizeof(cmd));
1518 cmd.opcode = WRITE_BIG;
1519 _lto4b(blkno, cmd.addr);
1520 _lto2b(nwrt, cmd.length);
1521 /*
1522 * Fill out the scsipi_xfer structure
1523 * Note: we cannot sleep as we may be an interrupt
1524 * don't use scsipi_command() as it may want to wait
1525 * for an xs.
1526 */
1527 memset(xs, 0, sizeof(sx));
1528 xs->xs_control |= XS_CTL_NOSLEEP | XS_CTL_POLL |
1529 XS_CTL_DATA_OUT;
1530 xs->xs_status = 0;
1531 xs->xs_periph = periph;
1532 xs->xs_retries = SDRETRIES;
1533 xs->timeout = 10000; /* 10000 millisecs for a disk ! */
1534 xs->cmd = (struct scsipi_generic *)&cmd;
1535 xs->cmdlen = sizeof(cmd);
1536 xs->resid = nwrt * sectorsize;
1537 xs->error = XS_NOERROR;
1538 xs->bp = 0;
1539 xs->data = va;
1540 xs->datalen = nwrt * sectorsize;
1541
1542 /*
1543 * Pass all this info to the scsi driver.
1544 */
1545 scsipi_adapter_request(chan, ADAPTER_REQ_RUN_XFER, xs);
1546 if ((xs->xs_status & XS_STS_DONE) == 0 ||
1547 xs->error != XS_NOERROR)
1548 return (EIO);
1549 #else /* SD_DUMP_NOT_TRUSTED */
1550 /* Let's just talk about this first... */
1551 printf("sd%d: dump addr 0x%x, blk %d\n", unit, va, blkno);
1552 delay(500 * 1000); /* half a second */
1553 #endif /* SD_DUMP_NOT_TRUSTED */
1554
1555 /* update block count */
1556 totwrt -= nwrt;
1557 blkno += nwrt;
1558 va += sectorsize * nwrt;
1559 }
1560 sddoingadump = 0;
1561 return (0);
1562 }
1563
1564 int
1565 sd_mode_sense(sd, byte2, sense, size, page, flags, big)
1566 struct sd_softc *sd;
1567 u_int8_t byte2;
1568 void *sense;
1569 size_t size;
1570 int page, flags;
1571 int *big;
1572 {
1573
1574 if ((sd->sc_periph->periph_quirks & PQUIRK_ONLYBIG) &&
1575 !(sd->sc_periph->periph_quirks & PQUIRK_NOBIGMODESENSE)) {
1576 *big = 1;
1577 return scsipi_mode_sense_big(sd->sc_periph, byte2, page, sense,
1578 size + sizeof(struct scsipi_mode_header_big),
1579 flags | XS_CTL_DATA_ONSTACK, SDRETRIES, 6000);
1580 } else {
1581 *big = 0;
1582 return scsipi_mode_sense(sd->sc_periph, byte2, page, sense,
1583 size + sizeof(struct scsipi_mode_header),
1584 flags | XS_CTL_DATA_ONSTACK, SDRETRIES, 6000);
1585 }
1586 }
1587
1588 int
1589 sd_mode_select(sd, byte2, sense, size, flags, big)
1590 struct sd_softc *sd;
1591 u_int8_t byte2;
1592 void *sense;
1593 size_t size;
1594 int flags, big;
1595 {
1596
1597 if (big) {
1598 struct scsipi_mode_header_big *header = sense;
1599
1600 _lto2b(0, header->data_length);
1601 return scsipi_mode_select_big(sd->sc_periph, byte2, sense,
1602 size + sizeof(struct scsipi_mode_header_big),
1603 flags | XS_CTL_DATA_ONSTACK, SDRETRIES, 6000);
1604 } else {
1605 struct scsipi_mode_header *header = sense;
1606
1607 header->data_length = 0;
1608 return scsipi_mode_select(sd->sc_periph, byte2, sense,
1609 size + sizeof(struct scsipi_mode_header),
1610 flags | XS_CTL_DATA_ONSTACK, SDRETRIES, 6000);
1611 }
1612 }
1613
1614 int
1615 sd_get_simplifiedparms(sd, dp, flags)
1616 struct sd_softc *sd;
1617 struct disk_parms *dp;
1618 int flags;
1619 {
1620 struct {
1621 struct scsipi_mode_header header;
1622 /* no block descriptor */
1623 u_int8_t pg_code; /* page code (should be 6) */
1624 u_int8_t pg_length; /* page length (should be 11) */
1625 u_int8_t wcd; /* bit0: cache disable */
1626 u_int8_t lbs[2]; /* logical block size */
1627 u_int8_t size[5]; /* number of log. blocks */
1628 u_int8_t pp; /* power/performance */
1629 u_int8_t flags;
1630 u_int8_t resvd;
1631 } scsipi_sense;
1632 u_int64_t sectors;
1633 int error;
1634
1635 /*
1636 * scsipi_size (ie "read capacity") and mode sense page 6
1637 * give the same information. Do both for now, and check
1638 * for consistency.
1639 * XXX probably differs for removable media
1640 */
1641 dp->blksize = 512;
1642 if ((sectors = scsipi_size(sd->sc_periph, flags)) == 0)
1643 return (SDGP_RESULT_OFFLINE); /* XXX? */
1644
1645 error = scsipi_mode_sense(sd->sc_periph, SMS_DBD, 6,
1646 &scsipi_sense.header, sizeof(scsipi_sense),
1647 flags | XS_CTL_DATA_ONSTACK, SDRETRIES, 6000);
1648
1649 if (error != 0)
1650 return (SDGP_RESULT_OFFLINE); /* XXX? */
1651
1652 dp->blksize = _2btol(scsipi_sense.lbs);
1653 if (dp->blksize == 0)
1654 dp->blksize = 512;
1655
1656 /*
1657 * Create a pseudo-geometry.
1658 */
1659 dp->heads = 64;
1660 dp->sectors = 32;
1661 dp->cyls = sectors / (dp->heads * dp->sectors);
1662 dp->disksize = _5btol(scsipi_sense.size);
1663 if (dp->disksize <= UINT32_MAX && dp->disksize != sectors) {
1664 printf("RBC size: mode sense=%llu, get cap=%llu\n",
1665 (unsigned long long)dp->disksize,
1666 (unsigned long long)sectors);
1667 dp->disksize = sectors;
1668 }
1669 dp->disksize512 = (dp->disksize * dp->blksize) / DEV_BSIZE;
1670
1671 return (SDGP_RESULT_OK);
1672 }
1673
1674 /*
1675 * Get the scsi driver to send a full inquiry to the * device and use the
1676 * results to fill out the disk parameter structure.
1677 */
1678 int
1679 sd_get_capacity(sd, dp, flags)
1680 struct sd_softc *sd;
1681 struct disk_parms *dp;
1682 int flags;
1683 {
1684 u_int64_t sectors;
1685 int error;
1686 #if 0
1687 int i;
1688 u_int8_t *p;
1689 #endif
1690
1691 dp->disksize = sectors = scsipi_size(sd->sc_periph, flags);
1692 if (sectors == 0) {
1693 struct scsipi_read_format_capacities scsipi_cmd;
1694 struct {
1695 struct scsipi_capacity_list_header header;
1696 struct scsipi_capacity_descriptor desc;
1697 } __attribute__((packed)) scsipi_result;
1698
1699 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1700 memset(&scsipi_result, 0, sizeof(scsipi_result));
1701 scsipi_cmd.opcode = READ_FORMAT_CAPACITIES;
1702 _lto2b(sizeof(scsipi_result), scsipi_cmd.length);
1703 error = scsipi_command(sd->sc_periph, (void *)&scsipi_cmd,
1704 sizeof(scsipi_cmd), (void *)&scsipi_result,
1705 sizeof(scsipi_result), SDRETRIES, 20000,
1706 NULL, flags | XS_CTL_DATA_IN | XS_CTL_DATA_ONSTACK /*|
1707 XS_CTL_IGNORE_ILLEGAL_REQUEST*/);
1708 if (error || scsipi_result.header.length == 0)
1709 return (SDGP_RESULT_OFFLINE);
1710
1711 #if 0
1712 printf("rfc: length=%d\n", scsipi_result.header.length);
1713 printf("rfc result:"); for (i = sizeof(struct scsipi_capacity_list_header) + scsipi_result.header.length, p = (void *)&scsipi_result; i; i--, p++) printf(" %02x", *p); printf("\n");
1714 #endif
1715 switch (scsipi_result.desc.byte5 & SCSIPI_CAP_DESC_CODE_MASK) {
1716 case SCSIPI_CAP_DESC_CODE_RESERVED:
1717 case SCSIPI_CAP_DESC_CODE_FORMATTED:
1718 break;
1719
1720 case SCSIPI_CAP_DESC_CODE_UNFORMATTED:
1721 return (SDGP_RESULT_UNFORMATTED);
1722
1723 case SCSIPI_CAP_DESC_CODE_NONE:
1724 return (SDGP_RESULT_OFFLINE);
1725 }
1726
1727 dp->disksize = sectors = _4btol(scsipi_result.desc.nblks);
1728 if (sectors == 0)
1729 return (SDGP_RESULT_OFFLINE); /* XXX? */
1730
1731 dp->blksize = _3btol(scsipi_result.desc.blklen);
1732 if (dp->blksize == 0)
1733 dp->blksize = 512;
1734 } else {
1735 struct sd_mode_sense_data scsipi_sense;
1736 int big, bsize;
1737 struct scsi_blk_desc *bdesc;
1738
1739 memset(&scsipi_sense, 0, sizeof(scsipi_sense));
1740 error = sd_mode_sense(sd, 0, &scsipi_sense,
1741 sizeof(scsipi_sense.blk_desc), 0, flags | XS_CTL_SILENT, &big);
1742 dp->blksize = 512;
1743 if (!error) {
1744 if (big) {
1745 bdesc = (void *)(&scsipi_sense.header.big + 1);
1746 bsize = _2btol(scsipi_sense.header.big.blk_desc_len);
1747 } else {
1748 bdesc = (void *)(&scsipi_sense.header.small + 1);
1749 bsize = scsipi_sense.header.small.blk_desc_len;
1750 }
1751
1752 #if 0
1753 printf("page 0 sense:"); for (i = sizeof(scsipi_sense), p = (void *)&scsipi_sense; i; i--, p++) printf(" %02x", *p); printf("\n");
1754 printf("page 0 bsize=%d\n", bsize);
1755 printf("page 0 ok\n");
1756 #endif
1757
1758 if (bsize >= 8) {
1759 dp->blksize = _3btol(bdesc->blklen);
1760 if (dp->blksize == 0)
1761 dp->blksize = 512;
1762 }
1763 }
1764 }
1765
1766 dp->disksize512 = (sectors * dp->blksize) / DEV_BSIZE;
1767 return (0);
1768 }
1769
1770 int
1771 sd_get_parms(sd, dp, flags)
1772 struct sd_softc *sd;
1773 struct disk_parms *dp;
1774 int flags;
1775 {
1776 struct sd_mode_sense_data scsipi_sense;
1777 int error;
1778 int big;
1779 int byte2;
1780 union scsi_disk_pages *pages;
1781 #if 0
1782 int i;
1783 u_int8_t *p;
1784 #endif
1785
1786 /*
1787 * If offline, the SDEV_MEDIA_LOADED flag will be
1788 * cleared by the caller if necessary.
1789 */
1790 if (sd->type == T_SIMPLE_DIRECT)
1791 return (sd_get_simplifiedparms(sd, dp, flags));
1792
1793 error = sd_get_capacity(sd, dp, flags);
1794 if (error)
1795 return (error);
1796
1797 if (sd->type == T_OPTICAL)
1798 goto page0;
1799
1800 /* Try MODE SENSE with `disable block descriptors' first */
1801 byte2 = SMS_DBD;
1802 do_ms_again:
1803 memset(&scsipi_sense, 0, sizeof(scsipi_sense));
1804 error = sd_mode_sense(sd, byte2, &scsipi_sense,
1805 sizeof(scsipi_sense.blk_desc) +
1806 sizeof(scsipi_sense.pages.rigid_geometry), 4,
1807 flags | XS_CTL_SILENT, &big);
1808 if (error != 0 && byte2 == SMS_DBD) {
1809 /* No result; try once more with DBD off */
1810 byte2 = 0;
1811 goto do_ms_again;
1812 }
1813
1814 if (!error) {
1815 int poffset;
1816 if (big) {
1817 poffset = sizeof scsipi_sense.header.big;
1818 poffset += _2btol(scsipi_sense.header.big.blk_desc_len);
1819 } else {
1820 poffset = sizeof scsipi_sense.header.small;
1821 poffset += scsipi_sense.header.small.blk_desc_len;
1822 }
1823
1824 pages = (void *)((u_long)&scsipi_sense + poffset);
1825 #if 0
1826 printf("page 4 sense:"); for (i = sizeof(scsipi_sense), p = (void *)&scsipi_sense; i; i--, p++) printf(" %02x", *p); printf("\n");
1827 printf("page 4 pg_code=%d sense=%p/%p\n", pages->rigid_geometry.pg_code, &scsipi_sense, pages);
1828 #endif
1829
1830 if ((pages->rigid_geometry.pg_code & PGCODE_MASK) != 4)
1831 goto page5;
1832
1833 SC_DEBUG(sd->sc_periph, SCSIPI_DB3,
1834 ("%d cyls, %d heads, %d precomp, %d red_write, %d land_zone\n",
1835 _3btol(pages->rigid_geometry.ncyl),
1836 pages->rigid_geometry.nheads,
1837 _2btol(pages->rigid_geometry.st_cyl_wp),
1838 _2btol(pages->rigid_geometry.st_cyl_rwc),
1839 _2btol(pages->rigid_geometry.land_zone)));
1840
1841 /*
1842 * KLUDGE!! (for zone recorded disks)
1843 * give a number of sectors so that sec * trks * cyls
1844 * is <= disk_size
1845 * can lead to wasted space! THINK ABOUT THIS !
1846 */
1847 dp->heads = pages->rigid_geometry.nheads;
1848 dp->cyls = _3btol(pages->rigid_geometry.ncyl);
1849 if (dp->heads == 0 || dp->cyls == 0)
1850 goto page5;
1851 dp->sectors = dp->disksize / (dp->heads * dp->cyls); /* XXX */
1852
1853 dp->rot_rate = _2btol(pages->rigid_geometry.rpm);
1854 if (dp->rot_rate == 0)
1855 dp->rot_rate = 3600;
1856
1857 #if 0
1858 printf("page 4 ok\n");
1859 #endif
1860 goto blksize;
1861 }
1862
1863 page5:
1864 /* XXX - Try with SMS_DBD first, like in the page 4 case? */
1865 memset(&scsipi_sense, 0, sizeof(scsipi_sense));
1866 error = sd_mode_sense(sd, 0, &scsipi_sense,
1867 sizeof(scsipi_sense.blk_desc) +
1868 sizeof(scsipi_sense.pages.flex_geometry), 5,
1869 flags | XS_CTL_SILENT, &big);
1870 if (!error) {
1871 int poffset;
1872 if (big) {
1873 poffset = sizeof scsipi_sense.header.big;
1874 poffset += _2btol(scsipi_sense.header.big.blk_desc_len);
1875 } else {
1876 poffset = sizeof scsipi_sense.header.small;
1877 poffset += scsipi_sense.header.small.blk_desc_len;
1878 }
1879
1880 pages = (void *)((u_long)&scsipi_sense + poffset);
1881
1882 #if 0
1883 printf("page 5 sense:"); for (i = sizeof(scsipi_sense), p = (void *)&scsipi_sense; i; i--, p++) printf(" %02x", *p); printf("\n");
1884 printf("page 5 pg_code=%d sense=%p/%p\n", pages->flex_geometry.pg_code, &scsipi_sense, pages);
1885 #endif
1886
1887 if ((pages->flex_geometry.pg_code & PGCODE_MASK) != 5)
1888 goto page0;
1889
1890 SC_DEBUG(sd->sc_periph, SCSIPI_DB3,
1891 ("%d cyls, %d heads, %d sec, %d bytes/sec\n",
1892 _3btol(pages->flex_geometry.ncyl),
1893 pages->flex_geometry.nheads,
1894 pages->flex_geometry.ph_sec_tr,
1895 _2btol(pages->flex_geometry.bytes_s)));
1896
1897 dp->heads = pages->flex_geometry.nheads;
1898 dp->cyls = _2btol(pages->flex_geometry.ncyl);
1899 dp->sectors = pages->flex_geometry.ph_sec_tr;
1900 if (dp->heads == 0 || dp->cyls == 0 || dp->sectors == 0)
1901 goto page0;
1902
1903 dp->rot_rate = _2btol(pages->rigid_geometry.rpm);
1904 if (dp->rot_rate == 0)
1905 dp->rot_rate = 3600;
1906
1907 #if 0
1908 printf("page 5 ok\n");
1909 #endif
1910 goto blksize;
1911 }
1912
1913 page0:
1914 printf("%s: fabricating a geometry\n", sd->sc_dev.dv_xname);
1915 /* Try calling driver's method for figuring out geometry. */
1916 if (!sd->sc_periph->periph_channel->chan_adapter->adapt_getgeom ||
1917 !(*sd->sc_periph->periph_channel->chan_adapter->adapt_getgeom)
1918 (sd->sc_periph, dp, dp->disksize)) {
1919 /*
1920 * Use adaptec standard fictitious geometry
1921 * this depends on which controller (e.g. 1542C is
1922 * different. but we have to put SOMETHING here..)
1923 */
1924 dp->heads = 64;
1925 dp->sectors = 32;
1926 dp->cyls = dp->disksize / (64 * 32);
1927 }
1928 dp->rot_rate = 3600;
1929
1930 blksize:
1931 return (SDGP_RESULT_OK);
1932 }
1933
1934 int
1935 sd_flush(sd, flags)
1936 struct sd_softc *sd;
1937 int flags;
1938 {
1939 struct scsipi_periph *periph = sd->sc_periph;
1940 struct scsi_synchronize_cache sync_cmd;
1941
1942 /*
1943 * If the device is SCSI-2, issue a SYNCHRONIZE CACHE.
1944 * We issue with address 0 length 0, which should be
1945 * interpreted by the device as "all remaining blocks
1946 * starting at address 0". We ignore ILLEGAL REQUEST
1947 * in the event that the command is not supported by
1948 * the device, and poll for completion so that we know
1949 * that the cache has actually been flushed.
1950 *
1951 * Unless, that is, the device can't handle the SYNCHRONIZE CACHE
1952 * command, as indicated by our quirks flags.
1953 *
1954 * XXX What about older devices?
1955 */
1956 if (periph->periph_version >= 2 &&
1957 (periph->periph_quirks & PQUIRK_NOSYNCCACHE) == 0) {
1958 sd->flags |= SDF_FLUSHING;
1959 memset(&sync_cmd, 0, sizeof(sync_cmd));
1960 sync_cmd.opcode = SCSI_SYNCHRONIZE_CACHE;
1961
1962 return(scsipi_command(periph,
1963 (struct scsipi_generic *)&sync_cmd, sizeof(sync_cmd),
1964 NULL, 0, SDRETRIES, 100000, NULL,
1965 flags|XS_CTL_IGNORE_ILLEGAL_REQUEST));
1966 } else
1967 return (0);
1968 }
1969
1970 int
1971 sd_getcache(sd, bitsp)
1972 struct sd_softc *sd;
1973 int *bitsp;
1974 {
1975 struct scsipi_periph *periph = sd->sc_periph;
1976 struct sd_mode_sense_data scsipi_sense;
1977 int error, bits = 0;
1978 int big;
1979 union scsi_disk_pages *pages;
1980
1981 if (periph->periph_version < 2)
1982 return (EOPNOTSUPP);
1983
1984 memset(&scsipi_sense, 0, sizeof(scsipi_sense));
1985 error = sd_mode_sense(sd, SMS_DBD, &scsipi_sense,
1986 sizeof(scsipi_sense.pages.caching_params), 8, 0, &big);
1987 if (error)
1988 return (error);
1989
1990 if (big)
1991 pages = (void *)(&scsipi_sense.header.big + 1);
1992 else
1993 pages = (void *)(&scsipi_sense.header.small + 1);
1994
1995 if ((pages->caching_params.flags & CACHING_RCD) == 0)
1996 bits |= DKCACHE_READ;
1997 if (pages->caching_params.flags & CACHING_WCE)
1998 bits |= DKCACHE_WRITE;
1999 if (pages->caching_params.pg_code & PGCODE_PS)
2000 bits |= DKCACHE_SAVE;
2001
2002 memset(&scsipi_sense, 0, sizeof(scsipi_sense));
2003 error = sd_mode_sense(sd, SMS_DBD, &scsipi_sense,
2004 sizeof(scsipi_sense.pages.caching_params),
2005 SMS_PAGE_CTRL_CHANGEABLE|8, 0, &big);
2006 if (error == 0) {
2007 if (big)
2008 pages = (void *)(&scsipi_sense.header.big + 1);
2009 else
2010 pages = (void *)(&scsipi_sense.header.small + 1);
2011
2012 if (pages->caching_params.flags & CACHING_RCD)
2013 bits |= DKCACHE_RCHANGE;
2014 if (pages->caching_params.flags & CACHING_WCE)
2015 bits |= DKCACHE_WCHANGE;
2016 }
2017
2018 *bitsp = bits;
2019
2020 return (0);
2021 }
2022
2023 int
2024 sd_setcache(sd, bits)
2025 struct sd_softc *sd;
2026 int bits;
2027 {
2028 struct scsipi_periph *periph = sd->sc_periph;
2029 struct sd_mode_sense_data scsipi_sense;
2030 int error;
2031 uint8_t oflags, byte2 = 0;
2032 int big;
2033 union scsi_disk_pages *pages;
2034
2035 if (periph->periph_version < 2)
2036 return (EOPNOTSUPP);
2037
2038 memset(&scsipi_sense, 0, sizeof(scsipi_sense));
2039 error = sd_mode_sense(sd, SMS_DBD, &scsipi_sense,
2040 sizeof(scsipi_sense.pages.caching_params), 8, 0, &big);
2041 if (error)
2042 return (error);
2043
2044 if (big)
2045 pages = (void *)(&scsipi_sense.header.big + 1);
2046 else
2047 pages = (void *)(&scsipi_sense.header.small + 1);
2048
2049 oflags = pages->caching_params.flags;
2050
2051 if (bits & DKCACHE_READ)
2052 pages->caching_params.flags &= ~CACHING_RCD;
2053 else
2054 pages->caching_params.flags |= CACHING_RCD;
2055
2056 if (bits & DKCACHE_WRITE)
2057 pages->caching_params.flags |= CACHING_WCE;
2058 else
2059 pages->caching_params.flags &= ~CACHING_WCE;
2060
2061 if (oflags == pages->caching_params.flags)
2062 return (0);
2063
2064 pages->caching_params.pg_code &= PGCODE_MASK;
2065
2066 if (bits & DKCACHE_SAVE)
2067 byte2 |= SMS_SP;
2068
2069 return (sd_mode_select(sd, byte2|SMS_PF, &scsipi_sense,
2070 sizeof(struct scsipi_mode_page_header) +
2071 pages->caching_params.pg_length, 0, big));
2072 }
2073