sd.c revision 1.211 1 /* $NetBSD: sd.c,v 1.211 2003/10/27 23:03:05 fredb Exp $ */
2
3 /*-
4 * Copyright (c) 1998, 2003 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Originally written by Julian Elischer (julian (at) dialix.oz.au)
41 * for TRW Financial Systems for use under the MACH(2.5) operating system.
42 *
43 * TRW Financial Systems, in accordance with their agreement with Carnegie
44 * Mellon University, makes this software available to CMU to distribute
45 * or use in any manner that they see fit as long as this message is kept with
46 * the software. For this reason TFS also grants any other persons or
47 * organisations permission to use or modify this software.
48 *
49 * TFS supplies this software to be publicly redistributed
50 * on the understanding that TFS is not responsible for the correct
51 * functioning of this software in any circumstances.
52 *
53 * Ported to run under 386BSD by Julian Elischer (julian (at) dialix.oz.au) Sept 1992
54 */
55
56 #include <sys/cdefs.h>
57 __KERNEL_RCSID(0, "$NetBSD: sd.c,v 1.211 2003/10/27 23:03:05 fredb Exp $");
58
59 #include "opt_scsi.h"
60 #include "opt_bufq.h"
61 #include "rnd.h"
62
63 #include <sys/param.h>
64 #include <sys/systm.h>
65 #include <sys/kernel.h>
66 #include <sys/file.h>
67 #include <sys/stat.h>
68 #include <sys/ioctl.h>
69 #include <sys/scsiio.h>
70 #include <sys/buf.h>
71 #include <sys/uio.h>
72 #include <sys/malloc.h>
73 #include <sys/errno.h>
74 #include <sys/device.h>
75 #include <sys/disklabel.h>
76 #include <sys/disk.h>
77 #include <sys/proc.h>
78 #include <sys/conf.h>
79 #include <sys/vnode.h>
80 #if NRND > 0
81 #include <sys/rnd.h>
82 #endif
83
84 #include <dev/scsipi/scsipi_all.h>
85 #include <dev/scsipi/scsi_all.h>
86 #include <dev/scsipi/scsipi_disk.h>
87 #include <dev/scsipi/scsi_disk.h>
88 #include <dev/scsipi/scsiconf.h>
89 #include <dev/scsipi/sdvar.h>
90
91 #define SDUNIT(dev) DISKUNIT(dev)
92 #define SDPART(dev) DISKPART(dev)
93 #define SDMINOR(unit, part) DISKMINOR(unit, part)
94 #define MAKESDDEV(maj, unit, part) MAKEDISKDEV(maj, unit, part)
95
96 #define SDLABELDEV(dev) (MAKESDDEV(major(dev), SDUNIT(dev), RAW_PART))
97
98 int sdlock __P((struct sd_softc *));
99 void sdunlock __P((struct sd_softc *));
100 void sdminphys __P((struct buf *));
101 void sdgetdefaultlabel __P((struct sd_softc *, struct disklabel *));
102 void sdgetdisklabel __P((struct sd_softc *));
103 void sdstart __P((struct scsipi_periph *));
104 void sddone __P((struct scsipi_xfer *));
105 void sd_shutdown __P((void *));
106 int sd_reassign_blocks __P((struct sd_softc *, u_long));
107 int sd_interpret_sense __P((struct scsipi_xfer *));
108
109 int sd_mode_sense __P((struct sd_softc *, u_int8_t, void *, size_t, int,
110 int, int *));
111 int sd_mode_select __P((struct sd_softc *, u_int8_t, void *, size_t, int,
112 int));
113 int sd_get_simplifiedparms __P((struct sd_softc *, struct disk_parms *,
114 int));
115 int sd_get_capacity __P((struct sd_softc *, struct disk_parms *, int));
116 int sd_get_parms __P((struct sd_softc *, struct disk_parms *, int));
117 int sd_flush __P((struct sd_softc *, int));
118 int sd_getcache __P((struct sd_softc *, int *));
119 int sd_setcache __P((struct sd_softc *, int));
120
121 int sdmatch __P((struct device *, struct cfdata *, void *));
122 void sdattach __P((struct device *, struct device *, void *));
123 int sdactivate __P((struct device *, enum devact));
124 int sddetach __P((struct device *, int));
125
126 CFATTACH_DECL(sd, sizeof(struct sd_softc), sdmatch, sdattach, sddetach,
127 sdactivate);
128
129 extern struct cfdriver sd_cd;
130
131 const struct scsipi_inquiry_pattern sd_patterns[] = {
132 {T_DIRECT, T_FIXED,
133 "", "", ""},
134 {T_DIRECT, T_REMOV,
135 "", "", ""},
136 {T_OPTICAL, T_FIXED,
137 "", "", ""},
138 {T_OPTICAL, T_REMOV,
139 "", "", ""},
140 {T_SIMPLE_DIRECT, T_FIXED,
141 "", "", ""},
142 {T_SIMPLE_DIRECT, T_REMOV,
143 "", "", ""},
144 };
145
146 dev_type_open(sdopen);
147 dev_type_close(sdclose);
148 dev_type_read(sdread);
149 dev_type_write(sdwrite);
150 dev_type_ioctl(sdioctl);
151 dev_type_strategy(sdstrategy);
152 dev_type_dump(sddump);
153 dev_type_size(sdsize);
154
155 const struct bdevsw sd_bdevsw = {
156 sdopen, sdclose, sdstrategy, sdioctl, sddump, sdsize, D_DISK
157 };
158
159 const struct cdevsw sd_cdevsw = {
160 sdopen, sdclose, sdread, sdwrite, sdioctl,
161 nostop, notty, nopoll, nommap, nokqfilter, D_DISK
162 };
163
164 struct dkdriver sddkdriver = { sdstrategy };
165
166 const struct scsipi_periphsw sd_switch = {
167 sd_interpret_sense, /* check our error handler first */
168 sdstart, /* have a queue, served by this */
169 NULL, /* have no async handler */
170 sddone, /* deal with stats at interrupt time */
171 };
172
173 struct sd_mode_sense_data {
174 /*
175 * XXX
176 * We are not going to parse this as-is -- it just has to be large
177 * enough.
178 */
179 union {
180 struct scsipi_mode_header small;
181 struct scsipi_mode_header_big big;
182 } header;
183 struct scsi_blk_desc blk_desc;
184 union scsi_disk_pages pages;
185 };
186
187 /*
188 * The routine called by the low level scsi routine when it discovers
189 * A device suitable for this driver
190 */
191 int
192 sdmatch(parent, match, aux)
193 struct device *parent;
194 struct cfdata *match;
195 void *aux;
196 {
197 struct scsipibus_attach_args *sa = aux;
198 int priority;
199
200 (void)scsipi_inqmatch(&sa->sa_inqbuf,
201 (caddr_t)sd_patterns, sizeof(sd_patterns) / sizeof(sd_patterns[0]),
202 sizeof(sd_patterns[0]), &priority);
203
204 return (priority);
205 }
206
207 /*
208 * Attach routine common to atapi & scsi.
209 */
210 void
211 sdattach(parent, self, aux)
212 struct device *parent, *self;
213 void *aux;
214 {
215 struct sd_softc *sd = (void *)self;
216 struct scsipibus_attach_args *sa = aux;
217 struct scsipi_periph *periph = sa->sa_periph;
218 int error, result;
219 struct disk_parms *dp = &sd->params;
220 char pbuf[9];
221
222 SC_DEBUG(periph, SCSIPI_DB2, ("sdattach: "));
223
224 sd->type = (sa->sa_inqbuf.type & SID_TYPE);
225 if (sd->type == T_SIMPLE_DIRECT)
226 periph->periph_quirks |= PQUIRK_ONLYBIG | PQUIRK_NOBIGMODESENSE;
227
228 if (scsipi_periph_bustype(sa->sa_periph) == SCSIPI_BUSTYPE_SCSI &&
229 periph->periph_version == 0)
230 sd->flags |= SDF_ANCIENT;
231
232 #ifdef NEW_BUFQ_STRATEGY
233 bufq_alloc(&sd->buf_queue, BUFQ_READ_PRIO|BUFQ_SORT_RAWBLOCK);
234 #else
235 bufq_alloc(&sd->buf_queue, BUFQ_DISKSORT|BUFQ_SORT_RAWBLOCK);
236 #endif
237
238 /*
239 * Store information needed to contact our base driver
240 */
241 sd->sc_periph = periph;
242
243 periph->periph_dev = &sd->sc_dev;
244 periph->periph_switch = &sd_switch;
245
246 /*
247 * Increase our openings to the maximum-per-periph
248 * supported by the adapter. This will either be
249 * clamped down or grown by the adapter if necessary.
250 */
251 periph->periph_openings =
252 SCSIPI_CHAN_MAX_PERIPH(periph->periph_channel);
253 periph->periph_flags |= PERIPH_GROW_OPENINGS;
254
255 /*
256 * Initialize and attach the disk structure.
257 */
258 sd->sc_dk.dk_driver = &sddkdriver;
259 sd->sc_dk.dk_name = sd->sc_dev.dv_xname;
260 disk_attach(&sd->sc_dk);
261
262 /*
263 * Use the subdriver to request information regarding the drive.
264 */
265 aprint_naive("\n");
266 aprint_normal("\n");
267
268 error = scsipi_test_unit_ready(periph,
269 XS_CTL_DISCOVERY | XS_CTL_IGNORE_ILLEGAL_REQUEST |
270 XS_CTL_IGNORE_MEDIA_CHANGE | XS_CTL_SILENT_NODEV);
271
272 if (error)
273 result = SDGP_RESULT_OFFLINE;
274 else
275 result = sd_get_parms(sd, &sd->params, XS_CTL_DISCOVERY);
276 aprint_normal("%s: ", sd->sc_dev.dv_xname);
277 switch (result) {
278 case SDGP_RESULT_OK:
279 format_bytes(pbuf, sizeof(pbuf),
280 (u_int64_t)dp->disksize * dp->blksize);
281 aprint_normal(
282 "%s, %ld cyl, %ld head, %ld sec, %ld bytes/sect x %llu sectors",
283 pbuf, dp->cyls, dp->heads, dp->sectors, dp->blksize,
284 (unsigned long long)dp->disksize);
285 break;
286
287 case SDGP_RESULT_OFFLINE:
288 aprint_normal("drive offline");
289 break;
290
291 case SDGP_RESULT_UNFORMATTED:
292 aprint_normal("unformatted media");
293 break;
294
295 #ifdef DIAGNOSTIC
296 default:
297 panic("sdattach: unknown result from get_parms");
298 break;
299 #endif
300 }
301 aprint_normal("\n");
302
303 /*
304 * Establish a shutdown hook so that we can ensure that
305 * our data has actually made it onto the platter at
306 * shutdown time. Note that this relies on the fact
307 * that the shutdown hook code puts us at the head of
308 * the list (thus guaranteeing that our hook runs before
309 * our ancestors').
310 */
311 if ((sd->sc_sdhook =
312 shutdownhook_establish(sd_shutdown, sd)) == NULL)
313 aprint_error("%s: WARNING: unable to establish shutdown hook\n",
314 sd->sc_dev.dv_xname);
315
316 #if NRND > 0
317 /*
318 * attach the device into the random source list
319 */
320 rnd_attach_source(&sd->rnd_source, sd->sc_dev.dv_xname,
321 RND_TYPE_DISK, 0);
322 #endif
323 }
324
325 int
326 sdactivate(self, act)
327 struct device *self;
328 enum devact act;
329 {
330 int rv = 0;
331
332 switch (act) {
333 case DVACT_ACTIVATE:
334 rv = EOPNOTSUPP;
335 break;
336
337 case DVACT_DEACTIVATE:
338 /*
339 * Nothing to do; we key off the device's DVF_ACTIVE.
340 */
341 break;
342 }
343 return (rv);
344 }
345
346 int
347 sddetach(self, flags)
348 struct device *self;
349 int flags;
350 {
351 struct sd_softc *sd = (struct sd_softc *) self;
352 struct buf *bp;
353 int s, bmaj, cmaj, i, mn;
354
355 /* locate the major number */
356 bmaj = bdevsw_lookup_major(&sd_bdevsw);
357 cmaj = cdevsw_lookup_major(&sd_cdevsw);
358
359 s = splbio();
360
361 /* Kill off any queued buffers. */
362 while ((bp = BUFQ_GET(&sd->buf_queue)) != NULL) {
363 bp->b_error = EIO;
364 bp->b_flags |= B_ERROR;
365 bp->b_resid = bp->b_bcount;
366 biodone(bp);
367 }
368
369 bufq_free(&sd->buf_queue);
370
371 /* Kill off any pending commands. */
372 scsipi_kill_pending(sd->sc_periph);
373
374 splx(s);
375
376 /* Nuke the vnodes for any open instances */
377 for (i = 0; i < MAXPARTITIONS; i++) {
378 mn = SDMINOR(self->dv_unit, i);
379 vdevgone(bmaj, mn, mn, VBLK);
380 vdevgone(cmaj, mn, mn, VCHR);
381 }
382
383 /* Detach from the disk list. */
384 disk_detach(&sd->sc_dk);
385
386 /* Get rid of the shutdown hook. */
387 shutdownhook_disestablish(sd->sc_sdhook);
388
389 #if NRND > 0
390 /* Unhook the entropy source. */
391 rnd_detach_source(&sd->rnd_source);
392 #endif
393
394 return (0);
395 }
396
397 /*
398 * Wait interruptibly for an exclusive lock.
399 *
400 * XXX
401 * Several drivers do this; it should be abstracted and made MP-safe.
402 */
403 int
404 sdlock(sd)
405 struct sd_softc *sd;
406 {
407 int error;
408
409 while ((sd->flags & SDF_LOCKED) != 0) {
410 sd->flags |= SDF_WANTED;
411 if ((error = tsleep(sd, PRIBIO | PCATCH, "sdlck", 0)) != 0)
412 return (error);
413 }
414 sd->flags |= SDF_LOCKED;
415 return (0);
416 }
417
418 /*
419 * Unlock and wake up any waiters.
420 */
421 void
422 sdunlock(sd)
423 struct sd_softc *sd;
424 {
425
426 sd->flags &= ~SDF_LOCKED;
427 if ((sd->flags & SDF_WANTED) != 0) {
428 sd->flags &= ~SDF_WANTED;
429 wakeup(sd);
430 }
431 }
432
433 /*
434 * open the device. Make sure the partition info is a up-to-date as can be.
435 */
436 int
437 sdopen(dev, flag, fmt, p)
438 dev_t dev;
439 int flag, fmt;
440 struct proc *p;
441 {
442 struct sd_softc *sd;
443 struct scsipi_periph *periph;
444 struct scsipi_adapter *adapt;
445 int unit, part;
446 int error;
447
448 unit = SDUNIT(dev);
449 if (unit >= sd_cd.cd_ndevs)
450 return (ENXIO);
451 sd = sd_cd.cd_devs[unit];
452 if (sd == NULL)
453 return (ENXIO);
454
455 if ((sd->sc_dev.dv_flags & DVF_ACTIVE) == 0)
456 return (ENODEV);
457
458 periph = sd->sc_periph;
459 adapt = periph->periph_channel->chan_adapter;
460 part = SDPART(dev);
461
462 SC_DEBUG(periph, SCSIPI_DB1,
463 ("sdopen: dev=0x%x (unit %d (of %d), partition %d)\n", dev, unit,
464 sd_cd.cd_ndevs, part));
465
466 /*
467 * If this is the first open of this device, add a reference
468 * to the adapter.
469 */
470 if (sd->sc_dk.dk_openmask == 0 &&
471 (error = scsipi_adapter_addref(adapt)) != 0)
472 return (error);
473
474 if ((error = sdlock(sd)) != 0)
475 goto bad4;
476
477 if ((periph->periph_flags & PERIPH_OPEN) != 0) {
478 /*
479 * If any partition is open, but the disk has been invalidated,
480 * disallow further opens of non-raw partition
481 */
482 if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0 &&
483 (part != RAW_PART || fmt != S_IFCHR)) {
484 error = EIO;
485 goto bad3;
486 }
487 } else {
488 int silent;
489
490 if (part == RAW_PART && fmt == S_IFCHR)
491 silent = XS_CTL_SILENT;
492 else
493 silent = 0;
494
495 /* Check that it is still responding and ok. */
496 error = scsipi_test_unit_ready(periph,
497 XS_CTL_IGNORE_ILLEGAL_REQUEST | XS_CTL_IGNORE_MEDIA_CHANGE |
498 silent);
499
500 /*
501 * Start the pack spinning if necessary. Always allow the
502 * raw parition to be opened, for raw IOCTLs. Data transfers
503 * will check for SDEV_MEDIA_LOADED.
504 */
505 if (error == EIO) {
506 int error2;
507
508 error2 = scsipi_start(periph, SSS_START, silent);
509 switch (error2) {
510 case 0:
511 error = 0;
512 break;
513 case EIO:
514 case EINVAL:
515 break;
516 default:
517 error = error2;
518 break;
519 }
520 }
521 if (error) {
522 if (silent)
523 goto out;
524 goto bad3;
525 }
526
527 periph->periph_flags |= PERIPH_OPEN;
528
529 if (periph->periph_flags & PERIPH_REMOVABLE) {
530 /* Lock the pack in. */
531 error = scsipi_prevent(periph, PR_PREVENT,
532 XS_CTL_IGNORE_ILLEGAL_REQUEST |
533 XS_CTL_IGNORE_MEDIA_CHANGE);
534 if (error)
535 goto bad;
536 }
537
538 if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0) {
539 periph->periph_flags |= PERIPH_MEDIA_LOADED;
540
541 /*
542 * Load the physical device parameters.
543 *
544 * Note that if media is present but unformatted,
545 * we allow the open (so that it can be formatted!).
546 * The drive should refuse real I/O, if the media is
547 * unformatted.
548 */
549 if (sd_get_parms(sd, &sd->params,
550 0) == SDGP_RESULT_OFFLINE) {
551 error = ENXIO;
552 goto bad2;
553 }
554 SC_DEBUG(periph, SCSIPI_DB3, ("Params loaded "));
555
556 /* Load the partition info if not already loaded. */
557 sdgetdisklabel(sd);
558 SC_DEBUG(periph, SCSIPI_DB3, ("Disklabel loaded "));
559 }
560 }
561
562 /* Check that the partition exists. */
563 if (part != RAW_PART &&
564 (part >= sd->sc_dk.dk_label->d_npartitions ||
565 sd->sc_dk.dk_label->d_partitions[part].p_fstype == FS_UNUSED)) {
566 error = ENXIO;
567 goto bad;
568 }
569
570 out: /* Insure only one open at a time. */
571 switch (fmt) {
572 case S_IFCHR:
573 sd->sc_dk.dk_copenmask |= (1 << part);
574 break;
575 case S_IFBLK:
576 sd->sc_dk.dk_bopenmask |= (1 << part);
577 break;
578 }
579 sd->sc_dk.dk_openmask =
580 sd->sc_dk.dk_copenmask | sd->sc_dk.dk_bopenmask;
581
582 SC_DEBUG(periph, SCSIPI_DB3, ("open complete\n"));
583 sdunlock(sd);
584 return (0);
585
586 bad2:
587 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
588
589 bad:
590 if (sd->sc_dk.dk_openmask == 0) {
591 if (periph->periph_flags & PERIPH_REMOVABLE)
592 scsipi_prevent(periph, PR_ALLOW,
593 XS_CTL_IGNORE_ILLEGAL_REQUEST |
594 XS_CTL_IGNORE_MEDIA_CHANGE);
595 periph->periph_flags &= ~PERIPH_OPEN;
596 }
597
598 bad3:
599 sdunlock(sd);
600 bad4:
601 if (sd->sc_dk.dk_openmask == 0)
602 scsipi_adapter_delref(adapt);
603 return (error);
604 }
605
606 /*
607 * close the device.. only called if we are the LAST occurence of an open
608 * device. Convenient now but usually a pain.
609 */
610 int
611 sdclose(dev, flag, fmt, p)
612 dev_t dev;
613 int flag, fmt;
614 struct proc *p;
615 {
616 struct sd_softc *sd = sd_cd.cd_devs[SDUNIT(dev)];
617 struct scsipi_periph *periph = sd->sc_periph;
618 struct scsipi_adapter *adapt = periph->periph_channel->chan_adapter;
619 int part = SDPART(dev);
620 int error;
621
622 if ((error = sdlock(sd)) != 0)
623 return (error);
624
625 switch (fmt) {
626 case S_IFCHR:
627 sd->sc_dk.dk_copenmask &= ~(1 << part);
628 break;
629 case S_IFBLK:
630 sd->sc_dk.dk_bopenmask &= ~(1 << part);
631 break;
632 }
633 sd->sc_dk.dk_openmask =
634 sd->sc_dk.dk_copenmask | sd->sc_dk.dk_bopenmask;
635
636 if (sd->sc_dk.dk_openmask == 0) {
637 /*
638 * If the disk cache needs flushing, and the disk supports
639 * it, do it now.
640 */
641 if ((sd->flags & SDF_DIRTY) != 0) {
642 if (sd_flush(sd, 0)) {
643 printf("%s: cache synchronization failed\n",
644 sd->sc_dev.dv_xname);
645 sd->flags &= ~SDF_FLUSHING;
646 } else
647 sd->flags &= ~(SDF_FLUSHING|SDF_DIRTY);
648 }
649
650 if (! (periph->periph_flags & PERIPH_KEEP_LABEL))
651 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
652
653 scsipi_wait_drain(periph);
654
655 if (periph->periph_flags & PERIPH_REMOVABLE)
656 scsipi_prevent(periph, PR_ALLOW,
657 XS_CTL_IGNORE_ILLEGAL_REQUEST |
658 XS_CTL_IGNORE_NOT_READY);
659 periph->periph_flags &= ~PERIPH_OPEN;
660
661 scsipi_wait_drain(periph);
662
663 scsipi_adapter_delref(adapt);
664 }
665
666 sdunlock(sd);
667 return (0);
668 }
669
670 /*
671 * Actually translate the requested transfer into one the physical driver
672 * can understand. The transfer is described by a buf and will include
673 * only one physical transfer.
674 */
675 void
676 sdstrategy(bp)
677 struct buf *bp;
678 {
679 struct sd_softc *sd = sd_cd.cd_devs[SDUNIT(bp->b_dev)];
680 struct scsipi_periph *periph = sd->sc_periph;
681 struct disklabel *lp;
682 daddr_t blkno;
683 int s;
684 boolean_t sector_aligned;
685
686 blkno = 0; /* XXX to appease gcc3 */
687
688 SC_DEBUG(sd->sc_periph, SCSIPI_DB2, ("sdstrategy "));
689 SC_DEBUG(sd->sc_periph, SCSIPI_DB1,
690 ("%ld bytes @ blk %" PRId64 "\n", bp->b_bcount, bp->b_blkno));
691 /*
692 * If the device has been made invalid, error out
693 */
694 if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0 ||
695 (sd->sc_dev.dv_flags & DVF_ACTIVE) == 0) {
696 if (periph->periph_flags & PERIPH_OPEN)
697 bp->b_error = EIO;
698 else
699 bp->b_error = ENODEV;
700 goto bad;
701 }
702
703 lp = sd->sc_dk.dk_label;
704
705 /*
706 * The transfer must be a whole number of blocks, offset must not be
707 * negative.
708 */
709 if (lp->d_secsize == DEV_BSIZE) {
710 sector_aligned = (bp->b_bcount & (DEV_BSIZE - 1)) == 0;
711 } else {
712 sector_aligned = (bp->b_bcount % lp->d_secsize) == 0;
713 }
714 if (!sector_aligned || bp->b_blkno < 0) {
715 bp->b_error = EINVAL;
716 goto bad;
717 }
718 /*
719 * If it's a null transfer, return immediatly
720 */
721 if (bp->b_bcount == 0)
722 goto done;
723
724 /*
725 * Do bounds checking, adjust transfer. if error, process.
726 * If end of partition, just return.
727 */
728 if (SDPART(bp->b_dev) == RAW_PART) {
729 if (bounds_check_with_mediasize(bp, DEV_BSIZE,
730 sd->params.disksize512) <= 0)
731 goto done;
732 } else {
733 if (bounds_check_with_label(&sd->sc_dk, bp,
734 (sd->flags & (SDF_WLABEL|SDF_LABELLING)) != 0) <= 0)
735 goto done;
736 }
737
738 /*
739 * Now convert the block number to absolute and put it in
740 * terms of the device's logical block size.
741 */
742 if (lp->d_secsize == DEV_BSIZE)
743 blkno = bp->b_blkno;
744 else if (lp->d_secsize > DEV_BSIZE)
745 blkno = bp->b_blkno / (lp->d_secsize / DEV_BSIZE);
746 else
747 blkno = bp->b_blkno * (DEV_BSIZE / lp->d_secsize);
748
749 if (SDPART(bp->b_dev) != RAW_PART)
750 blkno += lp->d_partitions[SDPART(bp->b_dev)].p_offset;
751
752 bp->b_rawblkno = blkno;
753
754 s = splbio();
755
756 /*
757 * Place it in the queue of disk activities for this disk.
758 *
759 * XXX Only do disksort() if the current operating mode does not
760 * XXX include tagged queueing.
761 */
762 BUFQ_PUT(&sd->buf_queue, bp);
763
764 /*
765 * Tell the device to get going on the transfer if it's
766 * not doing anything, otherwise just wait for completion
767 */
768 sdstart(sd->sc_periph);
769
770 splx(s);
771 return;
772
773 bad:
774 bp->b_flags |= B_ERROR;
775 done:
776 /*
777 * Correctly set the buf to indicate a completed xfer
778 */
779 bp->b_resid = bp->b_bcount;
780 biodone(bp);
781 }
782
783 /*
784 * sdstart looks to see if there is a buf waiting for the device
785 * and that the device is not already busy. If both are true,
786 * It dequeues the buf and creates a scsi command to perform the
787 * transfer in the buf. The transfer request will call scsipi_done
788 * on completion, which will in turn call this routine again
789 * so that the next queued transfer is performed.
790 * The bufs are queued by the strategy routine (sdstrategy)
791 *
792 * This routine is also called after other non-queued requests
793 * have been made of the scsi driver, to ensure that the queue
794 * continues to be drained.
795 *
796 * must be called at the correct (highish) spl level
797 * sdstart() is called at splbio from sdstrategy and scsipi_done
798 */
799 void
800 sdstart(periph)
801 struct scsipi_periph *periph;
802 {
803 struct sd_softc *sd = (void *)periph->periph_dev;
804 struct disklabel *lp = sd->sc_dk.dk_label;
805 struct buf *bp = 0;
806 struct scsipi_rw_big cmd_big;
807 struct scsi_rw cmd_small;
808 struct scsipi_generic *cmdp;
809 int nblks, cmdlen, error, flags;
810
811 SC_DEBUG(periph, SCSIPI_DB2, ("sdstart "));
812 /*
813 * Check if the device has room for another command
814 */
815 while (periph->periph_active < periph->periph_openings) {
816 /*
817 * there is excess capacity, but a special waits
818 * It'll need the adapter as soon as we clear out of the
819 * way and let it run (user level wait).
820 */
821 if (periph->periph_flags & PERIPH_WAITING) {
822 periph->periph_flags &= ~PERIPH_WAITING;
823 wakeup((caddr_t)periph);
824 return;
825 }
826
827 /*
828 * See if there is a buf with work for us to do..
829 */
830 if ((bp = BUFQ_GET(&sd->buf_queue)) == NULL)
831 return;
832
833 /*
834 * If the device has become invalid, abort all the
835 * reads and writes until all files have been closed and
836 * re-opened
837 */
838 if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0) {
839 bp->b_error = EIO;
840 bp->b_flags |= B_ERROR;
841 bp->b_resid = bp->b_bcount;
842 biodone(bp);
843 continue;
844 }
845
846 /*
847 * We have a buf, now we should make a command.
848 */
849
850 if (lp->d_secsize == DEV_BSIZE)
851 nblks = bp->b_bcount >> DEV_BSHIFT;
852 else
853 nblks = howmany(bp->b_bcount, lp->d_secsize);
854
855 /*
856 * Fill out the scsi command. If the transfer will
857 * fit in a "small" cdb, use it.
858 */
859 if (((bp->b_rawblkno & 0x1fffff) == bp->b_rawblkno) &&
860 ((nblks & 0xff) == nblks) &&
861 !(periph->periph_quirks & PQUIRK_ONLYBIG)) {
862 /*
863 * We can fit in a small cdb.
864 */
865 memset(&cmd_small, 0, sizeof(cmd_small));
866 cmd_small.opcode = (bp->b_flags & B_READ) ?
867 SCSI_READ_COMMAND : SCSI_WRITE_COMMAND;
868 _lto3b(bp->b_rawblkno, cmd_small.addr);
869 cmd_small.length = nblks & 0xff;
870 cmdlen = sizeof(cmd_small);
871 cmdp = (struct scsipi_generic *)&cmd_small;
872 } else {
873 /*
874 * Need a large cdb.
875 */
876 memset(&cmd_big, 0, sizeof(cmd_big));
877 cmd_big.opcode = (bp->b_flags & B_READ) ?
878 READ_BIG : WRITE_BIG;
879 _lto4b(bp->b_rawblkno, cmd_big.addr);
880 _lto2b(nblks, cmd_big.length);
881 cmdlen = sizeof(cmd_big);
882 cmdp = (struct scsipi_generic *)&cmd_big;
883 }
884
885 /* Instrumentation. */
886 disk_busy(&sd->sc_dk);
887
888 /*
889 * Mark the disk dirty so that the cache will be
890 * flushed on close.
891 */
892 if ((bp->b_flags & B_READ) == 0)
893 sd->flags |= SDF_DIRTY;
894
895 /*
896 * Figure out what flags to use.
897 */
898 flags = XS_CTL_NOSLEEP|XS_CTL_ASYNC|XS_CTL_SIMPLE_TAG;
899 if (bp->b_flags & B_READ)
900 flags |= XS_CTL_DATA_IN;
901 else
902 flags |= XS_CTL_DATA_OUT;
903
904 /*
905 * Call the routine that chats with the adapter.
906 * Note: we cannot sleep as we may be an interrupt
907 */
908 error = scsipi_command(periph, cmdp, cmdlen,
909 (u_char *)bp->b_data, bp->b_bcount,
910 SDRETRIES, SD_IO_TIMEOUT, bp, flags);
911 if (error) {
912 disk_unbusy(&sd->sc_dk, 0, 0);
913 printf("%s: not queued, error %d\n",
914 sd->sc_dev.dv_xname, error);
915 }
916 }
917 }
918
919 void
920 sddone(xs)
921 struct scsipi_xfer *xs;
922 {
923 struct sd_softc *sd = (void *)xs->xs_periph->periph_dev;
924
925 if (sd->flags & SDF_FLUSHING) {
926 /* Flush completed, no longer dirty. */
927 sd->flags &= ~(SDF_FLUSHING|SDF_DIRTY);
928 }
929
930 if (xs->bp != NULL) {
931 disk_unbusy(&sd->sc_dk, xs->bp->b_bcount - xs->bp->b_resid,
932 (xs->bp->b_flags & B_READ));
933 #if NRND > 0
934 rnd_add_uint32(&sd->rnd_source, xs->bp->b_rawblkno);
935 #endif
936 }
937 }
938
939 void
940 sdminphys(bp)
941 struct buf *bp;
942 {
943 struct sd_softc *sd = sd_cd.cd_devs[SDUNIT(bp->b_dev)];
944 long max;
945
946 /*
947 * If the device is ancient, we want to make sure that
948 * the transfer fits into a 6-byte cdb.
949 *
950 * XXX Note that the SCSI-I spec says that 256-block transfers
951 * are allowed in a 6-byte read/write, and are specified
952 * by settng the "length" to 0. However, we're conservative
953 * here, allowing only 255-block transfers in case an
954 * ancient device gets confused by length == 0. A length of 0
955 * in a 10-byte read/write actually means 0 blocks.
956 */
957 if ((sd->flags & SDF_ANCIENT) &&
958 ((sd->sc_periph->periph_flags &
959 (PERIPH_REMOVABLE | PERIPH_MEDIA_LOADED)) != PERIPH_REMOVABLE)) {
960 max = sd->sc_dk.dk_label->d_secsize * 0xff;
961
962 if (bp->b_bcount > max)
963 bp->b_bcount = max;
964 }
965
966 (*sd->sc_periph->periph_channel->chan_adapter->adapt_minphys)(bp);
967 }
968
969 int
970 sdread(dev, uio, ioflag)
971 dev_t dev;
972 struct uio *uio;
973 int ioflag;
974 {
975
976 return (physio(sdstrategy, NULL, dev, B_READ, sdminphys, uio));
977 }
978
979 int
980 sdwrite(dev, uio, ioflag)
981 dev_t dev;
982 struct uio *uio;
983 int ioflag;
984 {
985
986 return (physio(sdstrategy, NULL, dev, B_WRITE, sdminphys, uio));
987 }
988
989 /*
990 * Perform special action on behalf of the user
991 * Knows about the internals of this device
992 */
993 int
994 sdioctl(dev, cmd, addr, flag, p)
995 dev_t dev;
996 u_long cmd;
997 caddr_t addr;
998 int flag;
999 struct proc *p;
1000 {
1001 struct sd_softc *sd = sd_cd.cd_devs[SDUNIT(dev)];
1002 struct scsipi_periph *periph = sd->sc_periph;
1003 int part = SDPART(dev);
1004 int error = 0;
1005 #ifdef __HAVE_OLD_DISKLABEL
1006 struct disklabel *newlabel = NULL;
1007 #endif
1008
1009 SC_DEBUG(sd->sc_periph, SCSIPI_DB2, ("sdioctl 0x%lx ", cmd));
1010
1011 /*
1012 * If the device is not valid, some IOCTLs can still be
1013 * handled on the raw partition. Check this here.
1014 */
1015 if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0) {
1016 switch (cmd) {
1017 case DIOCKLABEL:
1018 case DIOCWLABEL:
1019 case DIOCLOCK:
1020 case DIOCEJECT:
1021 case ODIOCEJECT:
1022 case DIOCGCACHE:
1023 case DIOCSCACHE:
1024 case SCIOCIDENTIFY:
1025 case OSCIOCIDENTIFY:
1026 case SCIOCCOMMAND:
1027 case SCIOCDEBUG:
1028 if (part == RAW_PART)
1029 break;
1030 /* FALLTHROUGH */
1031 default:
1032 if ((periph->periph_flags & PERIPH_OPEN) == 0)
1033 return (ENODEV);
1034 else
1035 return (EIO);
1036 }
1037 }
1038
1039 switch (cmd) {
1040 case DIOCGDINFO:
1041 *(struct disklabel *)addr = *(sd->sc_dk.dk_label);
1042 return (0);
1043
1044 #ifdef __HAVE_OLD_DISKLABEL
1045 case ODIOCGDINFO:
1046 newlabel = malloc(sizeof *newlabel, M_TEMP, M_WAITOK);
1047 if (newlabel == NULL)
1048 return EIO;
1049 memcpy(newlabel, sd->sc_dk.dk_label, sizeof (*newlabel));
1050 if (newlabel->d_npartitions <= OLDMAXPARTITIONS)
1051 memcpy(addr, newlabel, sizeof (struct olddisklabel));
1052 else
1053 error = ENOTTY;
1054 free(newlabel, M_TEMP);
1055 return error;
1056 #endif
1057
1058 case DIOCGPART:
1059 ((struct partinfo *)addr)->disklab = sd->sc_dk.dk_label;
1060 ((struct partinfo *)addr)->part =
1061 &sd->sc_dk.dk_label->d_partitions[part];
1062 return (0);
1063
1064 case DIOCWDINFO:
1065 case DIOCSDINFO:
1066 #ifdef __HAVE_OLD_DISKLABEL
1067 case ODIOCWDINFO:
1068 case ODIOCSDINFO:
1069 #endif
1070 {
1071 struct disklabel *lp;
1072
1073 if ((flag & FWRITE) == 0)
1074 return (EBADF);
1075
1076 #ifdef __HAVE_OLD_DISKLABEL
1077 if (cmd == ODIOCSDINFO || cmd == ODIOCWDINFO) {
1078 newlabel = malloc(sizeof *newlabel, M_TEMP, M_WAITOK);
1079 if (newlabel == NULL)
1080 return EIO;
1081 memset(newlabel, 0, sizeof newlabel);
1082 memcpy(newlabel, addr, sizeof (struct olddisklabel));
1083 lp = newlabel;
1084 } else
1085 #endif
1086 lp = (struct disklabel *)addr;
1087
1088 if ((error = sdlock(sd)) != 0)
1089 goto bad;
1090 sd->flags |= SDF_LABELLING;
1091
1092 error = setdisklabel(sd->sc_dk.dk_label,
1093 lp, /*sd->sc_dk.dk_openmask : */0,
1094 sd->sc_dk.dk_cpulabel);
1095 if (error == 0) {
1096 if (cmd == DIOCWDINFO
1097 #ifdef __HAVE_OLD_DISKLABEL
1098 || cmd == ODIOCWDINFO
1099 #endif
1100 )
1101 error = writedisklabel(SDLABELDEV(dev),
1102 sdstrategy, sd->sc_dk.dk_label,
1103 sd->sc_dk.dk_cpulabel);
1104 }
1105
1106 sd->flags &= ~SDF_LABELLING;
1107 sdunlock(sd);
1108 bad:
1109 #ifdef __HAVE_OLD_DISKLABEL
1110 if (newlabel != NULL)
1111 free(newlabel, M_TEMP);
1112 #endif
1113 return (error);
1114 }
1115
1116 case DIOCKLABEL:
1117 if (*(int *)addr)
1118 periph->periph_flags |= PERIPH_KEEP_LABEL;
1119 else
1120 periph->periph_flags &= ~PERIPH_KEEP_LABEL;
1121 return (0);
1122
1123 case DIOCWLABEL:
1124 if ((flag & FWRITE) == 0)
1125 return (EBADF);
1126 if (*(int *)addr)
1127 sd->flags |= SDF_WLABEL;
1128 else
1129 sd->flags &= ~SDF_WLABEL;
1130 return (0);
1131
1132 case DIOCLOCK:
1133 return (scsipi_prevent(periph,
1134 (*(int *)addr) ? PR_PREVENT : PR_ALLOW, 0));
1135
1136 case DIOCEJECT:
1137 if ((periph->periph_flags & PERIPH_REMOVABLE) == 0)
1138 return (ENOTTY);
1139 if (*(int *)addr == 0) {
1140 /*
1141 * Don't force eject: check that we are the only
1142 * partition open. If so, unlock it.
1143 */
1144 if ((sd->sc_dk.dk_openmask & ~(1 << part)) == 0 &&
1145 sd->sc_dk.dk_bopenmask + sd->sc_dk.dk_copenmask ==
1146 sd->sc_dk.dk_openmask) {
1147 error = scsipi_prevent(periph, PR_ALLOW,
1148 XS_CTL_IGNORE_NOT_READY);
1149 if (error)
1150 return (error);
1151 } else {
1152 return (EBUSY);
1153 }
1154 }
1155 /* FALLTHROUGH */
1156 case ODIOCEJECT:
1157 return ((periph->periph_flags & PERIPH_REMOVABLE) == 0 ?
1158 ENOTTY : scsipi_start(periph, SSS_STOP|SSS_LOEJ, 0));
1159
1160 case DIOCGDEFLABEL:
1161 sdgetdefaultlabel(sd, (struct disklabel *)addr);
1162 return (0);
1163
1164 #ifdef __HAVE_OLD_DISKLABEL
1165 case ODIOCGDEFLABEL:
1166 newlabel = malloc(sizeof *newlabel, M_TEMP, M_WAITOK);
1167 if (newlabel == NULL)
1168 return EIO;
1169 sdgetdefaultlabel(sd, newlabel);
1170 if (newlabel->d_npartitions <= OLDMAXPARTITIONS)
1171 memcpy(addr, newlabel, sizeof (struct olddisklabel));
1172 else
1173 error = ENOTTY;
1174 free(newlabel, M_TEMP);
1175 return error;
1176 #endif
1177
1178 case DIOCGCACHE:
1179 return (sd_getcache(sd, (int *) addr));
1180
1181 case DIOCSCACHE:
1182 if ((flag & FWRITE) == 0)
1183 return (EBADF);
1184 return (sd_setcache(sd, *(int *) addr));
1185
1186 case DIOCCACHESYNC:
1187 /*
1188 * XXX Do we really need to care about having a writable
1189 * file descriptor here?
1190 */
1191 if ((flag & FWRITE) == 0)
1192 return (EBADF);
1193 if (((sd->flags & SDF_DIRTY) != 0 || *(int *)addr != 0)) {
1194 error = sd_flush(sd, 0);
1195 if (error)
1196 sd->flags &= ~SDF_FLUSHING;
1197 else
1198 sd->flags &= ~(SDF_FLUSHING|SDF_DIRTY);
1199 } else
1200 error = 0;
1201 return (error);
1202
1203 default:
1204 if (part != RAW_PART)
1205 return (ENOTTY);
1206 return (scsipi_do_ioctl(periph, dev, cmd, addr, flag, p));
1207 }
1208
1209 #ifdef DIAGNOSTIC
1210 panic("sdioctl: impossible");
1211 #endif
1212 }
1213
1214 void
1215 sdgetdefaultlabel(sd, lp)
1216 struct sd_softc *sd;
1217 struct disklabel *lp;
1218 {
1219
1220 memset(lp, 0, sizeof(struct disklabel));
1221
1222 lp->d_secsize = sd->params.blksize;
1223 lp->d_ntracks = sd->params.heads;
1224 lp->d_nsectors = sd->params.sectors;
1225 lp->d_ncylinders = sd->params.cyls;
1226 lp->d_secpercyl = lp->d_ntracks * lp->d_nsectors;
1227
1228 switch (scsipi_periph_bustype(sd->sc_periph)) {
1229 case SCSIPI_BUSTYPE_SCSI:
1230 lp->d_type = DTYPE_SCSI;
1231 break;
1232 case SCSIPI_BUSTYPE_ATAPI:
1233 lp->d_type = DTYPE_ATAPI;
1234 break;
1235 }
1236 /*
1237 * XXX
1238 * We could probe the mode pages to figure out what kind of disc it is.
1239 * Is this worthwhile?
1240 */
1241 strncpy(lp->d_typename, "mydisk", 16);
1242 strncpy(lp->d_packname, "fictitious", 16);
1243 lp->d_secperunit = sd->params.disksize;
1244 lp->d_rpm = sd->params.rot_rate;
1245 lp->d_interleave = 1;
1246 lp->d_flags = sd->sc_periph->periph_flags & PERIPH_REMOVABLE ?
1247 D_REMOVABLE : 0;
1248
1249 lp->d_partitions[RAW_PART].p_offset = 0;
1250 lp->d_partitions[RAW_PART].p_size =
1251 lp->d_secperunit * (lp->d_secsize / DEV_BSIZE);
1252 lp->d_partitions[RAW_PART].p_fstype = FS_UNUSED;
1253 lp->d_npartitions = RAW_PART + 1;
1254
1255 lp->d_magic = DISKMAGIC;
1256 lp->d_magic2 = DISKMAGIC;
1257 lp->d_checksum = dkcksum(lp);
1258 }
1259
1260
1261 /*
1262 * Load the label information on the named device
1263 */
1264 void
1265 sdgetdisklabel(sd)
1266 struct sd_softc *sd;
1267 {
1268 struct disklabel *lp = sd->sc_dk.dk_label;
1269 const char *errstring;
1270
1271 memset(sd->sc_dk.dk_cpulabel, 0, sizeof(struct cpu_disklabel));
1272
1273 sdgetdefaultlabel(sd, lp);
1274
1275 if (lp->d_secpercyl == 0) {
1276 lp->d_secpercyl = 100;
1277 /* as long as it's not 0 - readdisklabel divides by it (?) */
1278 }
1279
1280 /*
1281 * Call the generic disklabel extraction routine
1282 */
1283 errstring = readdisklabel(MAKESDDEV(0, sd->sc_dev.dv_unit, RAW_PART),
1284 sdstrategy, lp, sd->sc_dk.dk_cpulabel);
1285 if (errstring) {
1286 printf("%s: %s\n", sd->sc_dev.dv_xname, errstring);
1287 return;
1288 }
1289 }
1290
1291 void
1292 sd_shutdown(arg)
1293 void *arg;
1294 {
1295 struct sd_softc *sd = arg;
1296
1297 /*
1298 * If the disk cache needs to be flushed, and the disk supports
1299 * it, flush it. We're cold at this point, so we poll for
1300 * completion.
1301 */
1302 if ((sd->flags & SDF_DIRTY) != 0) {
1303 if (sd_flush(sd, XS_CTL_NOSLEEP|XS_CTL_POLL)) {
1304 printf("%s: cache synchronization failed\n",
1305 sd->sc_dev.dv_xname);
1306 sd->flags &= ~SDF_FLUSHING;
1307 } else
1308 sd->flags &= ~(SDF_FLUSHING|SDF_DIRTY);
1309 }
1310 }
1311
1312 /*
1313 * Tell the device to map out a defective block
1314 */
1315 int
1316 sd_reassign_blocks(sd, blkno)
1317 struct sd_softc *sd;
1318 u_long blkno;
1319 {
1320 struct scsi_reassign_blocks scsipi_cmd;
1321 struct scsi_reassign_blocks_data rbdata;
1322
1323 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1324 memset(&rbdata, 0, sizeof(rbdata));
1325 scsipi_cmd.opcode = SCSI_REASSIGN_BLOCKS;
1326
1327 _lto2b(sizeof(rbdata.defect_descriptor[0]), rbdata.length);
1328 _lto4b(blkno, rbdata.defect_descriptor[0].dlbaddr);
1329
1330 return (scsipi_command(sd->sc_periph,
1331 (struct scsipi_generic *)&scsipi_cmd, sizeof(scsipi_cmd),
1332 (u_char *)&rbdata, sizeof(rbdata), SDRETRIES, 5000, NULL,
1333 XS_CTL_DATA_OUT | XS_CTL_DATA_ONSTACK));
1334 }
1335
1336 /*
1337 * Check Errors
1338 */
1339 int
1340 sd_interpret_sense(xs)
1341 struct scsipi_xfer *xs;
1342 {
1343 struct scsipi_periph *periph = xs->xs_periph;
1344 struct scsipi_sense_data *sense = &xs->sense.scsi_sense;
1345 struct sd_softc *sd = (void *)periph->periph_dev;
1346 int s, error, retval = EJUSTRETURN;
1347
1348 /*
1349 * If the periph is already recovering, just do the normal
1350 * error processing.
1351 */
1352 if (periph->periph_flags & PERIPH_RECOVERING)
1353 return (retval);
1354
1355 /*
1356 * If the device is not open yet, let the generic code handle it.
1357 */
1358 if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0)
1359 return (retval);
1360
1361 /*
1362 * If it isn't a extended or extended/deferred error, let
1363 * the generic code handle it.
1364 */
1365 if ((sense->error_code & SSD_ERRCODE) != 0x70 &&
1366 (sense->error_code & SSD_ERRCODE) != 0x71)
1367 return (retval);
1368
1369 if ((sense->flags & SSD_KEY) == SKEY_NOT_READY &&
1370 sense->add_sense_code == 0x4) {
1371 if (sense->add_sense_code_qual == 0x01) {
1372 /*
1373 * Unit In The Process Of Becoming Ready.
1374 */
1375 printf("%s: waiting for pack to spin up...\n",
1376 sd->sc_dev.dv_xname);
1377 if (!callout_pending(&periph->periph_callout))
1378 scsipi_periph_freeze(periph, 1);
1379 callout_reset(&periph->periph_callout,
1380 5 * hz, scsipi_periph_timed_thaw, periph);
1381 retval = ERESTART;
1382 } else if (sense->add_sense_code_qual == 0x02) {
1383 printf("%s: pack is stopped, restarting...\n",
1384 sd->sc_dev.dv_xname);
1385 s = splbio();
1386 periph->periph_flags |= PERIPH_RECOVERING;
1387 splx(s);
1388 error = scsipi_start(periph, SSS_START,
1389 XS_CTL_URGENT|XS_CTL_HEAD_TAG|
1390 XS_CTL_THAW_PERIPH|XS_CTL_FREEZE_PERIPH);
1391 if (error) {
1392 printf("%s: unable to restart pack\n",
1393 sd->sc_dev.dv_xname);
1394 retval = error;
1395 } else
1396 retval = ERESTART;
1397 s = splbio();
1398 periph->periph_flags &= ~PERIPH_RECOVERING;
1399 splx(s);
1400 }
1401 }
1402 return (retval);
1403 }
1404
1405
1406 int
1407 sdsize(dev)
1408 dev_t dev;
1409 {
1410 struct sd_softc *sd;
1411 int part, unit, omask;
1412 int size;
1413
1414 unit = SDUNIT(dev);
1415 if (unit >= sd_cd.cd_ndevs)
1416 return (-1);
1417 sd = sd_cd.cd_devs[unit];
1418 if (sd == NULL)
1419 return (-1);
1420
1421 if ((sd->sc_dev.dv_flags & DVF_ACTIVE) == 0)
1422 return (-1);
1423
1424 part = SDPART(dev);
1425 omask = sd->sc_dk.dk_openmask & (1 << part);
1426
1427 if (omask == 0 && sdopen(dev, 0, S_IFBLK, NULL) != 0)
1428 return (-1);
1429 if ((sd->sc_periph->periph_flags & PERIPH_MEDIA_LOADED) == 0)
1430 size = -1;
1431 else if (sd->sc_dk.dk_label->d_partitions[part].p_fstype != FS_SWAP)
1432 size = -1;
1433 else
1434 size = sd->sc_dk.dk_label->d_partitions[part].p_size *
1435 (sd->sc_dk.dk_label->d_secsize / DEV_BSIZE);
1436 if (omask == 0 && sdclose(dev, 0, S_IFBLK, NULL) != 0)
1437 return (-1);
1438 return (size);
1439 }
1440
1441 /* #define SD_DUMP_NOT_TRUSTED if you just want to watch */
1442 static struct scsipi_xfer sx;
1443 static int sddoingadump;
1444
1445 /*
1446 * dump all of physical memory into the partition specified, starting
1447 * at offset 'dumplo' into the partition.
1448 */
1449 int
1450 sddump(dev, blkno, va, size)
1451 dev_t dev;
1452 daddr_t blkno;
1453 caddr_t va;
1454 size_t size;
1455 {
1456 struct sd_softc *sd; /* disk unit to do the I/O */
1457 struct disklabel *lp; /* disk's disklabel */
1458 int unit, part;
1459 int sectorsize; /* size of a disk sector */
1460 int nsects; /* number of sectors in partition */
1461 int sectoff; /* sector offset of partition */
1462 int totwrt; /* total number of sectors left to write */
1463 int nwrt; /* current number of sectors to write */
1464 struct scsipi_rw_big cmd; /* write command */
1465 struct scsipi_xfer *xs; /* ... convenience */
1466 struct scsipi_periph *periph;
1467 struct scsipi_channel *chan;
1468
1469 /* Check if recursive dump; if so, punt. */
1470 if (sddoingadump)
1471 return (EFAULT);
1472
1473 /* Mark as active early. */
1474 sddoingadump = 1;
1475
1476 unit = SDUNIT(dev); /* Decompose unit & partition. */
1477 part = SDPART(dev);
1478
1479 /* Check for acceptable drive number. */
1480 if (unit >= sd_cd.cd_ndevs || (sd = sd_cd.cd_devs[unit]) == NULL)
1481 return (ENXIO);
1482
1483 if ((sd->sc_dev.dv_flags & DVF_ACTIVE) == 0)
1484 return (ENODEV);
1485
1486 periph = sd->sc_periph;
1487 chan = periph->periph_channel;
1488
1489 /* Make sure it was initialized. */
1490 if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0)
1491 return (ENXIO);
1492
1493 /* Convert to disk sectors. Request must be a multiple of size. */
1494 lp = sd->sc_dk.dk_label;
1495 sectorsize = lp->d_secsize;
1496 if ((size % sectorsize) != 0)
1497 return (EFAULT);
1498 totwrt = size / sectorsize;
1499 blkno = dbtob(blkno) / sectorsize; /* blkno in DEV_BSIZE units */
1500
1501 nsects = lp->d_partitions[part].p_size;
1502 sectoff = lp->d_partitions[part].p_offset;
1503
1504 /* Check transfer bounds against partition size. */
1505 if ((blkno < 0) || ((blkno + totwrt) > nsects))
1506 return (EINVAL);
1507
1508 /* Offset block number to start of partition. */
1509 blkno += sectoff;
1510
1511 xs = &sx;
1512
1513 while (totwrt > 0) {
1514 nwrt = totwrt; /* XXX */
1515 #ifndef SD_DUMP_NOT_TRUSTED
1516 /*
1517 * Fill out the scsi command
1518 */
1519 memset(&cmd, 0, sizeof(cmd));
1520 cmd.opcode = WRITE_BIG;
1521 _lto4b(blkno, cmd.addr);
1522 _lto2b(nwrt, cmd.length);
1523 /*
1524 * Fill out the scsipi_xfer structure
1525 * Note: we cannot sleep as we may be an interrupt
1526 * don't use scsipi_command() as it may want to wait
1527 * for an xs.
1528 */
1529 memset(xs, 0, sizeof(sx));
1530 xs->xs_control |= XS_CTL_NOSLEEP | XS_CTL_POLL |
1531 XS_CTL_DATA_OUT;
1532 xs->xs_status = 0;
1533 xs->xs_periph = periph;
1534 xs->xs_retries = SDRETRIES;
1535 xs->timeout = 10000; /* 10000 millisecs for a disk ! */
1536 xs->cmd = (struct scsipi_generic *)&cmd;
1537 xs->cmdlen = sizeof(cmd);
1538 xs->resid = nwrt * sectorsize;
1539 xs->error = XS_NOERROR;
1540 xs->bp = 0;
1541 xs->data = va;
1542 xs->datalen = nwrt * sectorsize;
1543
1544 /*
1545 * Pass all this info to the scsi driver.
1546 */
1547 scsipi_adapter_request(chan, ADAPTER_REQ_RUN_XFER, xs);
1548 if ((xs->xs_status & XS_STS_DONE) == 0 ||
1549 xs->error != XS_NOERROR)
1550 return (EIO);
1551 #else /* SD_DUMP_NOT_TRUSTED */
1552 /* Let's just talk about this first... */
1553 printf("sd%d: dump addr 0x%x, blk %d\n", unit, va, blkno);
1554 delay(500 * 1000); /* half a second */
1555 #endif /* SD_DUMP_NOT_TRUSTED */
1556
1557 /* update block count */
1558 totwrt -= nwrt;
1559 blkno += nwrt;
1560 va += sectorsize * nwrt;
1561 }
1562 sddoingadump = 0;
1563 return (0);
1564 }
1565
1566 int
1567 sd_mode_sense(sd, byte2, sense, size, page, flags, big)
1568 struct sd_softc *sd;
1569 u_int8_t byte2;
1570 void *sense;
1571 size_t size;
1572 int page, flags;
1573 int *big;
1574 {
1575
1576 if ((sd->sc_periph->periph_quirks & PQUIRK_ONLYBIG) &&
1577 !(sd->sc_periph->periph_quirks & PQUIRK_NOBIGMODESENSE)) {
1578 *big = 1;
1579 return scsipi_mode_sense_big(sd->sc_periph, byte2, page, sense,
1580 size + sizeof(struct scsipi_mode_header_big),
1581 flags | XS_CTL_DATA_ONSTACK, SDRETRIES, 6000);
1582 } else {
1583 *big = 0;
1584 return scsipi_mode_sense(sd->sc_periph, byte2, page, sense,
1585 size + sizeof(struct scsipi_mode_header),
1586 flags | XS_CTL_DATA_ONSTACK, SDRETRIES, 6000);
1587 }
1588 }
1589
1590 int
1591 sd_mode_select(sd, byte2, sense, size, flags, big)
1592 struct sd_softc *sd;
1593 u_int8_t byte2;
1594 void *sense;
1595 size_t size;
1596 int flags, big;
1597 {
1598
1599 if (big) {
1600 struct scsipi_mode_header_big *header = sense;
1601
1602 _lto2b(0, header->data_length);
1603 return scsipi_mode_select_big(sd->sc_periph, byte2, sense,
1604 size + sizeof(struct scsipi_mode_header_big),
1605 flags | XS_CTL_DATA_ONSTACK, SDRETRIES, 6000);
1606 } else {
1607 struct scsipi_mode_header *header = sense;
1608
1609 header->data_length = 0;
1610 return scsipi_mode_select(sd->sc_periph, byte2, sense,
1611 size + sizeof(struct scsipi_mode_header),
1612 flags | XS_CTL_DATA_ONSTACK, SDRETRIES, 6000);
1613 }
1614 }
1615
1616 int
1617 sd_get_simplifiedparms(sd, dp, flags)
1618 struct sd_softc *sd;
1619 struct disk_parms *dp;
1620 int flags;
1621 {
1622 struct {
1623 struct scsipi_mode_header header;
1624 /* no block descriptor */
1625 u_int8_t pg_code; /* page code (should be 6) */
1626 u_int8_t pg_length; /* page length (should be 11) */
1627 u_int8_t wcd; /* bit0: cache disable */
1628 u_int8_t lbs[2]; /* logical block size */
1629 u_int8_t size[5]; /* number of log. blocks */
1630 u_int8_t pp; /* power/performance */
1631 u_int8_t flags;
1632 u_int8_t resvd;
1633 } scsipi_sense;
1634 u_int64_t sectors;
1635 int error;
1636
1637 /*
1638 * scsipi_size (ie "read capacity") and mode sense page 6
1639 * give the same information. Do both for now, and check
1640 * for consistency.
1641 * XXX probably differs for removable media
1642 */
1643 dp->blksize = 512;
1644 if ((sectors = scsipi_size(sd->sc_periph, flags)) == 0)
1645 return (SDGP_RESULT_OFFLINE); /* XXX? */
1646
1647 error = scsipi_mode_sense(sd->sc_periph, SMS_DBD, 6,
1648 &scsipi_sense.header, sizeof(scsipi_sense),
1649 flags | XS_CTL_DATA_ONSTACK, SDRETRIES, 6000);
1650
1651 if (error != 0)
1652 return (SDGP_RESULT_OFFLINE); /* XXX? */
1653
1654 dp->blksize = _2btol(scsipi_sense.lbs);
1655 if (dp->blksize == 0)
1656 dp->blksize = 512;
1657
1658 /*
1659 * Create a pseudo-geometry.
1660 */
1661 dp->heads = 64;
1662 dp->sectors = 32;
1663 dp->cyls = sectors / (dp->heads * dp->sectors);
1664 dp->disksize = _5btol(scsipi_sense.size);
1665 if (dp->disksize <= UINT32_MAX && dp->disksize != sectors) {
1666 printf("RBC size: mode sense=%llu, get cap=%llu\n",
1667 (unsigned long long)dp->disksize,
1668 (unsigned long long)sectors);
1669 dp->disksize = sectors;
1670 }
1671 dp->disksize512 = (dp->disksize * dp->blksize) / DEV_BSIZE;
1672
1673 return (SDGP_RESULT_OK);
1674 }
1675
1676 /*
1677 * Get the scsi driver to send a full inquiry to the * device and use the
1678 * results to fill out the disk parameter structure.
1679 */
1680 int
1681 sd_get_capacity(sd, dp, flags)
1682 struct sd_softc *sd;
1683 struct disk_parms *dp;
1684 int flags;
1685 {
1686 u_int64_t sectors;
1687 int error;
1688 #if 0
1689 int i;
1690 u_int8_t *p;
1691 #endif
1692
1693 dp->disksize = sectors = scsipi_size(sd->sc_periph, flags);
1694 if (sectors == 0) {
1695 struct scsipi_read_format_capacities scsipi_cmd;
1696 struct {
1697 struct scsipi_capacity_list_header header;
1698 struct scsipi_capacity_descriptor desc;
1699 } __attribute__((packed)) scsipi_result;
1700
1701 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1702 memset(&scsipi_result, 0, sizeof(scsipi_result));
1703 scsipi_cmd.opcode = READ_FORMAT_CAPACITIES;
1704 _lto2b(sizeof(scsipi_result), scsipi_cmd.length);
1705 error = scsipi_command(sd->sc_periph, (void *)&scsipi_cmd,
1706 sizeof(scsipi_cmd), (void *)&scsipi_result,
1707 sizeof(scsipi_result), SDRETRIES, 20000,
1708 NULL, flags | XS_CTL_DATA_IN | XS_CTL_DATA_ONSTACK /*|
1709 XS_CTL_IGNORE_ILLEGAL_REQUEST*/);
1710 if (error || scsipi_result.header.length == 0)
1711 return (SDGP_RESULT_OFFLINE);
1712
1713 #if 0
1714 printf("rfc: length=%d\n", scsipi_result.header.length);
1715 printf("rfc result:"); for (i = sizeof(struct scsipi_capacity_list_header) + scsipi_result.header.length, p = (void *)&scsipi_result; i; i--, p++) printf(" %02x", *p); printf("\n");
1716 #endif
1717 switch (scsipi_result.desc.byte5 & SCSIPI_CAP_DESC_CODE_MASK) {
1718 case SCSIPI_CAP_DESC_CODE_RESERVED:
1719 case SCSIPI_CAP_DESC_CODE_FORMATTED:
1720 break;
1721
1722 case SCSIPI_CAP_DESC_CODE_UNFORMATTED:
1723 return (SDGP_RESULT_UNFORMATTED);
1724
1725 case SCSIPI_CAP_DESC_CODE_NONE:
1726 return (SDGP_RESULT_OFFLINE);
1727 }
1728
1729 dp->disksize = sectors = _4btol(scsipi_result.desc.nblks);
1730 if (sectors == 0)
1731 return (SDGP_RESULT_OFFLINE); /* XXX? */
1732
1733 dp->blksize = _3btol(scsipi_result.desc.blklen);
1734 if (dp->blksize == 0)
1735 dp->blksize = 512;
1736 } else {
1737 struct sd_mode_sense_data scsipi_sense;
1738 int big, bsize;
1739 struct scsi_blk_desc *bdesc;
1740
1741 memset(&scsipi_sense, 0, sizeof(scsipi_sense));
1742 error = sd_mode_sense(sd, 0, &scsipi_sense,
1743 sizeof(scsipi_sense.blk_desc), 0, flags | XS_CTL_SILENT, &big);
1744 dp->blksize = 512;
1745 if (!error) {
1746 if (big) {
1747 bdesc = (void *)(&scsipi_sense.header.big + 1);
1748 bsize = _2btol(scsipi_sense.header.big.blk_desc_len);
1749 } else {
1750 bdesc = (void *)(&scsipi_sense.header.small + 1);
1751 bsize = scsipi_sense.header.small.blk_desc_len;
1752 }
1753
1754 #if 0
1755 printf("page 0 sense:"); for (i = sizeof(scsipi_sense), p = (void *)&scsipi_sense; i; i--, p++) printf(" %02x", *p); printf("\n");
1756 printf("page 0 bsize=%d\n", bsize);
1757 printf("page 0 ok\n");
1758 #endif
1759
1760 if (bsize >= 8) {
1761 dp->blksize = _3btol(bdesc->blklen);
1762 if (dp->blksize == 0)
1763 dp->blksize = 512;
1764 }
1765 }
1766 }
1767
1768 dp->disksize512 = (sectors * dp->blksize) / DEV_BSIZE;
1769 return (0);
1770 }
1771
1772 int
1773 sd_get_parms(sd, dp, flags)
1774 struct sd_softc *sd;
1775 struct disk_parms *dp;
1776 int flags;
1777 {
1778 struct sd_mode_sense_data scsipi_sense;
1779 int error;
1780 int big;
1781 union scsi_disk_pages *pages;
1782 #if 0
1783 int i;
1784 u_int8_t *p;
1785 #endif
1786
1787 /*
1788 * If offline, the SDEV_MEDIA_LOADED flag will be
1789 * cleared by the caller if necessary.
1790 */
1791 if (sd->type == T_SIMPLE_DIRECT)
1792 return (sd_get_simplifiedparms(sd, dp, flags));
1793
1794 error = sd_get_capacity(sd, dp, flags);
1795 if (error)
1796 return (error);
1797
1798 if (sd->type == T_OPTICAL)
1799 goto page0;
1800
1801 memset(&scsipi_sense, 0, sizeof(scsipi_sense));
1802 error = sd_mode_sense(sd, SMS_DBD, &scsipi_sense,
1803 sizeof(scsipi_sense.blk_desc) +
1804 sizeof(scsipi_sense.pages.rigid_geometry), 4,
1805 flags | XS_CTL_SILENT, &big);
1806 if (!error) {
1807 if (big)
1808 pages = (void *)(&scsipi_sense.header.big + 1);
1809 else
1810 pages = (void *)(&scsipi_sense.header.small + 1);
1811
1812 #if 0
1813 printf("page 4 sense:"); for (i = sizeof(scsipi_sense), p = (void *)&scsipi_sense; i; i--, p++) printf(" %02x", *p); printf("\n");
1814 printf("page 4 pg_code=%d sense=%p/%p\n", pages->rigid_geometry.pg_code, &scsipi_sense, pages);
1815 #endif
1816
1817 if ((pages->rigid_geometry.pg_code & PGCODE_MASK) != 4)
1818 goto page5;
1819
1820 SC_DEBUG(sd->sc_periph, SCSIPI_DB3,
1821 ("%d cyls, %d heads, %d precomp, %d red_write, %d land_zone\n",
1822 _3btol(pages->rigid_geometry.ncyl),
1823 pages->rigid_geometry.nheads,
1824 _2btol(pages->rigid_geometry.st_cyl_wp),
1825 _2btol(pages->rigid_geometry.st_cyl_rwc),
1826 _2btol(pages->rigid_geometry.land_zone)));
1827
1828 /*
1829 * KLUDGE!! (for zone recorded disks)
1830 * give a number of sectors so that sec * trks * cyls
1831 * is <= disk_size
1832 * can lead to wasted space! THINK ABOUT THIS !
1833 */
1834 dp->heads = pages->rigid_geometry.nheads;
1835 dp->cyls = _3btol(pages->rigid_geometry.ncyl);
1836 if (dp->heads == 0 || dp->cyls == 0)
1837 goto page5;
1838 dp->sectors = dp->disksize / (dp->heads * dp->cyls); /* XXX */
1839
1840 dp->rot_rate = _2btol(pages->rigid_geometry.rpm);
1841 if (dp->rot_rate == 0)
1842 dp->rot_rate = 3600;
1843
1844 #if 0
1845 printf("page 4 ok\n");
1846 #endif
1847 goto blksize;
1848 }
1849
1850 page5:
1851 memset(&scsipi_sense, SMS_DBD, sizeof(scsipi_sense));
1852 error = sd_mode_sense(sd, 0, &scsipi_sense,
1853 sizeof(scsipi_sense.blk_desc) +
1854 sizeof(scsipi_sense.pages.flex_geometry), 5,
1855 flags | XS_CTL_SILENT, &big);
1856 if (!error) {
1857 if (big)
1858 pages = (void *)(&scsipi_sense.header.big + 1);
1859 else
1860 pages = (void *)(&scsipi_sense.header.small + 1);
1861
1862 #if 0
1863 printf("page 5 sense:"); for (i = sizeof(scsipi_sense), p = (void *)&scsipi_sense; i; i--, p++) printf(" %02x", *p); printf("\n");
1864 printf("page 5 pg_code=%d sense=%p/%p\n", pages->flex_geometry.pg_code, &scsipi_sense, pages);
1865 #endif
1866
1867 if ((pages->flex_geometry.pg_code & PGCODE_MASK) != 5)
1868 goto page0;
1869
1870 SC_DEBUG(sd->sc_periph, SCSIPI_DB3,
1871 ("%d cyls, %d heads, %d sec, %d bytes/sec\n",
1872 _3btol(pages->flex_geometry.ncyl),
1873 pages->flex_geometry.nheads,
1874 pages->flex_geometry.ph_sec_tr,
1875 _2btol(pages->flex_geometry.bytes_s)));
1876
1877 dp->heads = pages->flex_geometry.nheads;
1878 dp->cyls = _2btol(pages->flex_geometry.ncyl);
1879 dp->sectors = pages->flex_geometry.ph_sec_tr;
1880 if (dp->heads == 0 || dp->cyls == 0 || dp->sectors == 0)
1881 goto page0;
1882
1883 dp->rot_rate = _2btol(pages->rigid_geometry.rpm);
1884 if (dp->rot_rate == 0)
1885 dp->rot_rate = 3600;
1886
1887 #if 0
1888 printf("page 5 ok\n");
1889 #endif
1890 goto blksize;
1891 }
1892
1893 page0:
1894 printf("%s: fabricating a geometry\n", sd->sc_dev.dv_xname);
1895 /* Try calling driver's method for figuring out geometry. */
1896 if (!sd->sc_periph->periph_channel->chan_adapter->adapt_getgeom ||
1897 !(*sd->sc_periph->periph_channel->chan_adapter->adapt_getgeom)
1898 (sd->sc_periph, dp, dp->disksize)) {
1899 /*
1900 * Use adaptec standard fictitious geometry
1901 * this depends on which controller (e.g. 1542C is
1902 * different. but we have to put SOMETHING here..)
1903 */
1904 dp->heads = 64;
1905 dp->sectors = 32;
1906 dp->cyls = dp->disksize / (64 * 32);
1907 }
1908 dp->rot_rate = 3600;
1909
1910 blksize:
1911 return (SDGP_RESULT_OK);
1912 }
1913
1914 int
1915 sd_flush(sd, flags)
1916 struct sd_softc *sd;
1917 int flags;
1918 {
1919 struct scsipi_periph *periph = sd->sc_periph;
1920 struct scsi_synchronize_cache sync_cmd;
1921
1922 /*
1923 * If the device is SCSI-2, issue a SYNCHRONIZE CACHE.
1924 * We issue with address 0 length 0, which should be
1925 * interpreted by the device as "all remaining blocks
1926 * starting at address 0". We ignore ILLEGAL REQUEST
1927 * in the event that the command is not supported by
1928 * the device, and poll for completion so that we know
1929 * that the cache has actually been flushed.
1930 *
1931 * Unless, that is, the device can't handle the SYNCHRONIZE CACHE
1932 * command, as indicated by our quirks flags.
1933 *
1934 * XXX What about older devices?
1935 */
1936 if (periph->periph_version >= 2 &&
1937 (periph->periph_quirks & PQUIRK_NOSYNCCACHE) == 0) {
1938 sd->flags |= SDF_FLUSHING;
1939 memset(&sync_cmd, 0, sizeof(sync_cmd));
1940 sync_cmd.opcode = SCSI_SYNCHRONIZE_CACHE;
1941
1942 return(scsipi_command(periph,
1943 (struct scsipi_generic *)&sync_cmd, sizeof(sync_cmd),
1944 NULL, 0, SDRETRIES, 100000, NULL,
1945 flags|XS_CTL_IGNORE_ILLEGAL_REQUEST));
1946 } else
1947 return (0);
1948 }
1949
1950 int
1951 sd_getcache(sd, bitsp)
1952 struct sd_softc *sd;
1953 int *bitsp;
1954 {
1955 struct scsipi_periph *periph = sd->sc_periph;
1956 struct sd_mode_sense_data scsipi_sense;
1957 int error, bits = 0;
1958 int big;
1959 union scsi_disk_pages *pages;
1960
1961 if (periph->periph_version < 2)
1962 return (EOPNOTSUPP);
1963
1964 memset(&scsipi_sense, 0, sizeof(scsipi_sense));
1965 error = sd_mode_sense(sd, SMS_DBD, &scsipi_sense,
1966 sizeof(scsipi_sense.pages.caching_params), 8, 0, &big);
1967 if (error)
1968 return (error);
1969
1970 if (big)
1971 pages = (void *)(&scsipi_sense.header.big + 1);
1972 else
1973 pages = (void *)(&scsipi_sense.header.small + 1);
1974
1975 if ((pages->caching_params.flags & CACHING_RCD) == 0)
1976 bits |= DKCACHE_READ;
1977 if (pages->caching_params.flags & CACHING_WCE)
1978 bits |= DKCACHE_WRITE;
1979 if (pages->caching_params.pg_code & PGCODE_PS)
1980 bits |= DKCACHE_SAVE;
1981
1982 memset(&scsipi_sense, 0, sizeof(scsipi_sense));
1983 error = sd_mode_sense(sd, SMS_DBD, &scsipi_sense,
1984 sizeof(scsipi_sense.pages.caching_params),
1985 SMS_PAGE_CTRL_CHANGEABLE|8, 0, &big);
1986 if (error == 0) {
1987 if (big)
1988 pages = (void *)(&scsipi_sense.header.big + 1);
1989 else
1990 pages = (void *)(&scsipi_sense.header.small + 1);
1991
1992 if (pages->caching_params.flags & CACHING_RCD)
1993 bits |= DKCACHE_RCHANGE;
1994 if (pages->caching_params.flags & CACHING_WCE)
1995 bits |= DKCACHE_WCHANGE;
1996 }
1997
1998 *bitsp = bits;
1999
2000 return (0);
2001 }
2002
2003 int
2004 sd_setcache(sd, bits)
2005 struct sd_softc *sd;
2006 int bits;
2007 {
2008 struct scsipi_periph *periph = sd->sc_periph;
2009 struct sd_mode_sense_data scsipi_sense;
2010 int error;
2011 uint8_t oflags, byte2 = 0;
2012 int big;
2013 union scsi_disk_pages *pages;
2014
2015 if (periph->periph_version < 2)
2016 return (EOPNOTSUPP);
2017
2018 memset(&scsipi_sense, 0, sizeof(scsipi_sense));
2019 error = sd_mode_sense(sd, SMS_DBD, &scsipi_sense,
2020 sizeof(scsipi_sense.pages.caching_params), 8, 0, &big);
2021 if (error)
2022 return (error);
2023
2024 if (big)
2025 pages = (void *)(&scsipi_sense.header.big + 1);
2026 else
2027 pages = (void *)(&scsipi_sense.header.small + 1);
2028
2029 oflags = pages->caching_params.flags;
2030
2031 if (bits & DKCACHE_READ)
2032 pages->caching_params.flags &= ~CACHING_RCD;
2033 else
2034 pages->caching_params.flags |= CACHING_RCD;
2035
2036 if (bits & DKCACHE_WRITE)
2037 pages->caching_params.flags |= CACHING_WCE;
2038 else
2039 pages->caching_params.flags &= ~CACHING_WCE;
2040
2041 if (oflags == pages->caching_params.flags)
2042 return (0);
2043
2044 pages->caching_params.pg_code &= PGCODE_MASK;
2045
2046 if (bits & DKCACHE_SAVE)
2047 byte2 |= SMS_SP;
2048
2049 return (sd_mode_select(sd, byte2|SMS_PF, &scsipi_sense,
2050 sizeof(struct scsipi_mode_page_header) +
2051 pages->caching_params.pg_length, 0, big));
2052 }
2053