sd.c revision 1.209 1 /* $NetBSD: sd.c,v 1.209 2003/09/18 00:06:36 mycroft Exp $ */
2
3 /*-
4 * Copyright (c) 1998, 2003 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Originally written by Julian Elischer (julian (at) dialix.oz.au)
41 * for TRW Financial Systems for use under the MACH(2.5) operating system.
42 *
43 * TRW Financial Systems, in accordance with their agreement with Carnegie
44 * Mellon University, makes this software available to CMU to distribute
45 * or use in any manner that they see fit as long as this message is kept with
46 * the software. For this reason TFS also grants any other persons or
47 * organisations permission to use or modify this software.
48 *
49 * TFS supplies this software to be publicly redistributed
50 * on the understanding that TFS is not responsible for the correct
51 * functioning of this software in any circumstances.
52 *
53 * Ported to run under 386BSD by Julian Elischer (julian (at) dialix.oz.au) Sept 1992
54 */
55
56 #include <sys/cdefs.h>
57 __KERNEL_RCSID(0, "$NetBSD: sd.c,v 1.209 2003/09/18 00:06:36 mycroft Exp $");
58
59 #include "opt_scsi.h"
60 #include "opt_bufq.h"
61 #include "rnd.h"
62
63 #include <sys/param.h>
64 #include <sys/systm.h>
65 #include <sys/kernel.h>
66 #include <sys/file.h>
67 #include <sys/stat.h>
68 #include <sys/ioctl.h>
69 #include <sys/scsiio.h>
70 #include <sys/buf.h>
71 #include <sys/uio.h>
72 #include <sys/malloc.h>
73 #include <sys/errno.h>
74 #include <sys/device.h>
75 #include <sys/disklabel.h>
76 #include <sys/disk.h>
77 #include <sys/proc.h>
78 #include <sys/conf.h>
79 #include <sys/vnode.h>
80 #if NRND > 0
81 #include <sys/rnd.h>
82 #endif
83
84 #include <dev/scsipi/scsipi_all.h>
85 #include <dev/scsipi/scsi_all.h>
86 #include <dev/scsipi/scsipi_disk.h>
87 #include <dev/scsipi/scsi_disk.h>
88 #include <dev/scsipi/scsiconf.h>
89 #include <dev/scsipi/sdvar.h>
90
91 #include "sd.h" /* NSD_SCSIBUS and NSD_ATAPIBUS come from here */
92
93 #define SDUNIT(dev) DISKUNIT(dev)
94 #define SDPART(dev) DISKPART(dev)
95 #define SDMINOR(unit, part) DISKMINOR(unit, part)
96 #define MAKESDDEV(maj, unit, part) MAKEDISKDEV(maj, unit, part)
97
98 #define SDLABELDEV(dev) (MAKESDDEV(major(dev), SDUNIT(dev), RAW_PART))
99
100 int sdlock __P((struct sd_softc *));
101 void sdunlock __P((struct sd_softc *));
102 void sdminphys __P((struct buf *));
103 void sdgetdefaultlabel __P((struct sd_softc *, struct disklabel *));
104 void sdgetdisklabel __P((struct sd_softc *));
105 void sdstart __P((struct scsipi_periph *));
106 void sddone __P((struct scsipi_xfer *));
107 void sd_shutdown __P((void *));
108 int sd_reassign_blocks __P((struct sd_softc *, u_long));
109 int sd_interpret_sense __P((struct scsipi_xfer *));
110
111 int sd_mode_sense __P((struct sd_softc *, u_int8_t, void *, size_t, int,
112 int, int *));
113 int sd_mode_select __P((struct sd_softc *, u_int8_t, void *, size_t, int,
114 int));
115 int sd_get_simplifiedparms __P((struct sd_softc *, struct disk_parms *,
116 int));
117 int sd_get_capacity __P((struct sd_softc *, struct disk_parms *, int));
118 int sd_get_parms __P((struct sd_softc *, struct disk_parms *, int));
119 int sd_flush __P((struct sd_softc *, int));
120 int sd_getcache __P((struct sd_softc *, int *));
121 int sd_setcache __P((struct sd_softc *, int));
122
123 int sdmatch __P((struct device *, struct cfdata *, void *));
124 void sdattach __P((struct device *, struct device *, void *));
125 int sdactivate __P((struct device *, enum devact));
126 int sddetach __P((struct device *, int));
127
128 CFATTACH_DECL(sd, sizeof(struct sd_softc), sdmatch, sdattach, sddetach,
129 sdactivate);
130
131 extern struct cfdriver sd_cd;
132
133 const struct scsipi_inquiry_pattern sd_patterns[] = {
134 {T_DIRECT, T_FIXED,
135 "", "", ""},
136 {T_DIRECT, T_REMOV,
137 "", "", ""},
138 {T_OPTICAL, T_FIXED,
139 "", "", ""},
140 {T_OPTICAL, T_REMOV,
141 "", "", ""},
142 {T_SIMPLE_DIRECT, T_FIXED,
143 "", "", ""},
144 {T_SIMPLE_DIRECT, T_REMOV,
145 "", "", ""},
146 };
147
148 dev_type_open(sdopen);
149 dev_type_close(sdclose);
150 dev_type_read(sdread);
151 dev_type_write(sdwrite);
152 dev_type_ioctl(sdioctl);
153 dev_type_strategy(sdstrategy);
154 dev_type_dump(sddump);
155 dev_type_size(sdsize);
156
157 const struct bdevsw sd_bdevsw = {
158 sdopen, sdclose, sdstrategy, sdioctl, sddump, sdsize, D_DISK
159 };
160
161 const struct cdevsw sd_cdevsw = {
162 sdopen, sdclose, sdread, sdwrite, sdioctl,
163 nostop, notty, nopoll, nommap, nokqfilter, D_DISK
164 };
165
166 struct dkdriver sddkdriver = { sdstrategy };
167
168 const struct scsipi_periphsw sd_switch = {
169 sd_interpret_sense, /* check our error handler first */
170 sdstart, /* have a queue, served by this */
171 NULL, /* have no async handler */
172 sddone, /* deal with stats at interrupt time */
173 };
174
175 struct sd_mode_sense_data {
176 /*
177 * XXX
178 * We are not going to parse this as-is -- it just has to be large
179 * enough.
180 */
181 union {
182 struct scsipi_mode_header small;
183 struct scsipi_mode_header_big big;
184 } header;
185 struct scsi_blk_desc blk_desc;
186 union scsi_disk_pages pages;
187 };
188
189 /*
190 * The routine called by the low level scsi routine when it discovers
191 * A device suitable for this driver
192 */
193 int
194 sdmatch(parent, match, aux)
195 struct device *parent;
196 struct cfdata *match;
197 void *aux;
198 {
199 struct scsipibus_attach_args *sa = aux;
200 int priority;
201
202 (void)scsipi_inqmatch(&sa->sa_inqbuf,
203 (caddr_t)sd_patterns, sizeof(sd_patterns) / sizeof(sd_patterns[0]),
204 sizeof(sd_patterns[0]), &priority);
205
206 return (priority);
207 }
208
209 /*
210 * Attach routine common to atapi & scsi.
211 */
212 void
213 sdattach(parent, self, aux)
214 struct device *parent, *self;
215 void *aux;
216 {
217 struct sd_softc *sd = (void *)self;
218 struct scsipibus_attach_args *sa = aux;
219 struct scsipi_periph *periph = sa->sa_periph;
220 int error, result;
221 struct disk_parms *dp = &sd->params;
222 char pbuf[9];
223
224 SC_DEBUG(periph, SCSIPI_DB2, ("sdattach: "));
225
226 sd->type = (sa->sa_inqbuf.type & SID_TYPE);
227 if (sd->type == T_SIMPLE_DIRECT)
228 periph->periph_quirks |= PQUIRK_ONLYBIG | PQUIRK_NOBIGMODESENSE;
229
230 if (scsipi_periph_bustype(sa->sa_periph) == SCSIPI_BUSTYPE_SCSI &&
231 periph->periph_version == 0)
232 sd->flags |= SDF_ANCIENT;
233
234 #ifdef NEW_BUFQ_STRATEGY
235 bufq_alloc(&sd->buf_queue, BUFQ_READ_PRIO|BUFQ_SORT_RAWBLOCK);
236 #else
237 bufq_alloc(&sd->buf_queue, BUFQ_DISKSORT|BUFQ_SORT_RAWBLOCK);
238 #endif
239
240 /*
241 * Store information needed to contact our base driver
242 */
243 sd->sc_periph = periph;
244
245 periph->periph_dev = &sd->sc_dev;
246 periph->periph_switch = &sd_switch;
247
248 /*
249 * Increase our openings to the maximum-per-periph
250 * supported by the adapter. This will either be
251 * clamped down or grown by the adapter if necessary.
252 */
253 periph->periph_openings =
254 SCSIPI_CHAN_MAX_PERIPH(periph->periph_channel);
255 periph->periph_flags |= PERIPH_GROW_OPENINGS;
256
257 /*
258 * Initialize and attach the disk structure.
259 */
260 sd->sc_dk.dk_driver = &sddkdriver;
261 sd->sc_dk.dk_name = sd->sc_dev.dv_xname;
262 disk_attach(&sd->sc_dk);
263
264 /*
265 * Use the subdriver to request information regarding the drive.
266 */
267 aprint_naive("\n");
268 aprint_normal("\n");
269
270 error = scsipi_test_unit_ready(periph,
271 XS_CTL_DISCOVERY | XS_CTL_IGNORE_ILLEGAL_REQUEST |
272 XS_CTL_IGNORE_MEDIA_CHANGE | XS_CTL_SILENT_NODEV);
273
274 if (error)
275 result = SDGP_RESULT_OFFLINE;
276 else
277 result = sd_get_parms(sd, &sd->params, XS_CTL_DISCOVERY);
278 aprint_normal("%s: ", sd->sc_dev.dv_xname);
279 switch (result) {
280 case SDGP_RESULT_OK:
281 format_bytes(pbuf, sizeof(pbuf),
282 (u_int64_t)dp->disksize * dp->blksize);
283 aprint_normal(
284 "%s, %ld cyl, %ld head, %ld sec, %ld bytes/sect x %llu sectors",
285 pbuf, dp->cyls, dp->heads, dp->sectors, dp->blksize,
286 (unsigned long long)dp->disksize);
287 break;
288
289 case SDGP_RESULT_OFFLINE:
290 aprint_normal("drive offline");
291 break;
292
293 case SDGP_RESULT_UNFORMATTED:
294 aprint_normal("unformatted media");
295 break;
296
297 #ifdef DIAGNOSTIC
298 default:
299 panic("sdattach: unknown result from get_parms");
300 break;
301 #endif
302 }
303 aprint_normal("\n");
304
305 /*
306 * Establish a shutdown hook so that we can ensure that
307 * our data has actually made it onto the platter at
308 * shutdown time. Note that this relies on the fact
309 * that the shutdown hook code puts us at the head of
310 * the list (thus guaranteeing that our hook runs before
311 * our ancestors').
312 */
313 if ((sd->sc_sdhook =
314 shutdownhook_establish(sd_shutdown, sd)) == NULL)
315 aprint_error("%s: WARNING: unable to establish shutdown hook\n",
316 sd->sc_dev.dv_xname);
317
318 #if NRND > 0
319 /*
320 * attach the device into the random source list
321 */
322 rnd_attach_source(&sd->rnd_source, sd->sc_dev.dv_xname,
323 RND_TYPE_DISK, 0);
324 #endif
325 }
326
327 int
328 sdactivate(self, act)
329 struct device *self;
330 enum devact act;
331 {
332 int rv = 0;
333
334 switch (act) {
335 case DVACT_ACTIVATE:
336 rv = EOPNOTSUPP;
337 break;
338
339 case DVACT_DEACTIVATE:
340 /*
341 * Nothing to do; we key off the device's DVF_ACTIVE.
342 */
343 break;
344 }
345 return (rv);
346 }
347
348 int
349 sddetach(self, flags)
350 struct device *self;
351 int flags;
352 {
353 struct sd_softc *sd = (struct sd_softc *) self;
354 struct buf *bp;
355 int s, bmaj, cmaj, i, mn;
356
357 /* locate the major number */
358 bmaj = bdevsw_lookup_major(&sd_bdevsw);
359 cmaj = cdevsw_lookup_major(&sd_cdevsw);
360
361 s = splbio();
362
363 /* Kill off any queued buffers. */
364 while ((bp = BUFQ_GET(&sd->buf_queue)) != NULL) {
365 bp->b_error = EIO;
366 bp->b_flags |= B_ERROR;
367 bp->b_resid = bp->b_bcount;
368 biodone(bp);
369 }
370
371 bufq_free(&sd->buf_queue);
372
373 /* Kill off any pending commands. */
374 scsipi_kill_pending(sd->sc_periph);
375
376 splx(s);
377
378 /* Nuke the vnodes for any open instances */
379 for (i = 0; i < MAXPARTITIONS; i++) {
380 mn = SDMINOR(self->dv_unit, i);
381 vdevgone(bmaj, mn, mn, VBLK);
382 vdevgone(cmaj, mn, mn, VCHR);
383 }
384
385 /* Detach from the disk list. */
386 disk_detach(&sd->sc_dk);
387
388 /* Get rid of the shutdown hook. */
389 shutdownhook_disestablish(sd->sc_sdhook);
390
391 #if NRND > 0
392 /* Unhook the entropy source. */
393 rnd_detach_source(&sd->rnd_source);
394 #endif
395
396 return (0);
397 }
398
399 /*
400 * Wait interruptibly for an exclusive lock.
401 *
402 * XXX
403 * Several drivers do this; it should be abstracted and made MP-safe.
404 */
405 int
406 sdlock(sd)
407 struct sd_softc *sd;
408 {
409 int error;
410
411 while ((sd->flags & SDF_LOCKED) != 0) {
412 sd->flags |= SDF_WANTED;
413 if ((error = tsleep(sd, PRIBIO | PCATCH, "sdlck", 0)) != 0)
414 return (error);
415 }
416 sd->flags |= SDF_LOCKED;
417 return (0);
418 }
419
420 /*
421 * Unlock and wake up any waiters.
422 */
423 void
424 sdunlock(sd)
425 struct sd_softc *sd;
426 {
427
428 sd->flags &= ~SDF_LOCKED;
429 if ((sd->flags & SDF_WANTED) != 0) {
430 sd->flags &= ~SDF_WANTED;
431 wakeup(sd);
432 }
433 }
434
435 /*
436 * open the device. Make sure the partition info is a up-to-date as can be.
437 */
438 int
439 sdopen(dev, flag, fmt, p)
440 dev_t dev;
441 int flag, fmt;
442 struct proc *p;
443 {
444 struct sd_softc *sd;
445 struct scsipi_periph *periph;
446 struct scsipi_adapter *adapt;
447 int unit, part;
448 int error;
449
450 unit = SDUNIT(dev);
451 if (unit >= sd_cd.cd_ndevs)
452 return (ENXIO);
453 sd = sd_cd.cd_devs[unit];
454 if (sd == NULL)
455 return (ENXIO);
456
457 if ((sd->sc_dev.dv_flags & DVF_ACTIVE) == 0)
458 return (ENODEV);
459
460 periph = sd->sc_periph;
461 adapt = periph->periph_channel->chan_adapter;
462 part = SDPART(dev);
463
464 SC_DEBUG(periph, SCSIPI_DB1,
465 ("sdopen: dev=0x%x (unit %d (of %d), partition %d)\n", dev, unit,
466 sd_cd.cd_ndevs, part));
467
468 /*
469 * If this is the first open of this device, add a reference
470 * to the adapter.
471 */
472 if (sd->sc_dk.dk_openmask == 0 &&
473 (error = scsipi_adapter_addref(adapt)) != 0)
474 return (error);
475
476 if ((error = sdlock(sd)) != 0)
477 goto bad4;
478
479 if ((periph->periph_flags & PERIPH_OPEN) != 0) {
480 /*
481 * If any partition is open, but the disk has been invalidated,
482 * disallow further opens of non-raw partition
483 */
484 if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0 &&
485 (part != RAW_PART || fmt != S_IFCHR)) {
486 error = EIO;
487 goto bad3;
488 }
489 } else {
490 int silent;
491
492 if (part == RAW_PART && fmt == S_IFCHR)
493 silent = XS_CTL_SILENT;
494 else
495 silent = 0;
496
497 /* Check that it is still responding and ok. */
498 error = scsipi_test_unit_ready(periph,
499 XS_CTL_IGNORE_ILLEGAL_REQUEST | XS_CTL_IGNORE_MEDIA_CHANGE |
500 silent);
501
502 /*
503 * Start the pack spinning if necessary. Always allow the
504 * raw parition to be opened, for raw IOCTLs. Data transfers
505 * will check for SDEV_MEDIA_LOADED.
506 */
507 if (error == EIO) {
508 int error2;
509
510 error2 = scsipi_start(periph, SSS_START, silent);
511 switch (error2) {
512 case 0:
513 error = 0;
514 break;
515 case EIO:
516 case EINVAL:
517 break;
518 default:
519 error = error2;
520 break;
521 }
522 }
523 if (error) {
524 if (silent)
525 goto out;
526 goto bad3;
527 }
528
529 periph->periph_flags |= PERIPH_OPEN;
530
531 if (periph->periph_flags & PERIPH_REMOVABLE) {
532 /* Lock the pack in. */
533 error = scsipi_prevent(periph, PR_PREVENT,
534 XS_CTL_IGNORE_ILLEGAL_REQUEST |
535 XS_CTL_IGNORE_MEDIA_CHANGE);
536 if (error)
537 goto bad;
538 }
539
540 if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0) {
541 periph->periph_flags |= PERIPH_MEDIA_LOADED;
542
543 /*
544 * Load the physical device parameters.
545 *
546 * Note that if media is present but unformatted,
547 * we allow the open (so that it can be formatted!).
548 * The drive should refuse real I/O, if the media is
549 * unformatted.
550 */
551 if (sd_get_parms(sd, &sd->params,
552 0) == SDGP_RESULT_OFFLINE) {
553 error = ENXIO;
554 goto bad2;
555 }
556 SC_DEBUG(periph, SCSIPI_DB3, ("Params loaded "));
557
558 /* Load the partition info if not already loaded. */
559 sdgetdisklabel(sd);
560 SC_DEBUG(periph, SCSIPI_DB3, ("Disklabel loaded "));
561 }
562 }
563
564 /* Check that the partition exists. */
565 if (part != RAW_PART &&
566 (part >= sd->sc_dk.dk_label->d_npartitions ||
567 sd->sc_dk.dk_label->d_partitions[part].p_fstype == FS_UNUSED)) {
568 error = ENXIO;
569 goto bad;
570 }
571
572 out: /* Insure only one open at a time. */
573 switch (fmt) {
574 case S_IFCHR:
575 sd->sc_dk.dk_copenmask |= (1 << part);
576 break;
577 case S_IFBLK:
578 sd->sc_dk.dk_bopenmask |= (1 << part);
579 break;
580 }
581 sd->sc_dk.dk_openmask =
582 sd->sc_dk.dk_copenmask | sd->sc_dk.dk_bopenmask;
583
584 SC_DEBUG(periph, SCSIPI_DB3, ("open complete\n"));
585 sdunlock(sd);
586 return (0);
587
588 bad2:
589 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
590
591 bad:
592 if (sd->sc_dk.dk_openmask == 0) {
593 if (periph->periph_flags & PERIPH_REMOVABLE)
594 scsipi_prevent(periph, PR_ALLOW,
595 XS_CTL_IGNORE_ILLEGAL_REQUEST |
596 XS_CTL_IGNORE_MEDIA_CHANGE);
597 periph->periph_flags &= ~PERIPH_OPEN;
598 }
599
600 bad3:
601 sdunlock(sd);
602 bad4:
603 if (sd->sc_dk.dk_openmask == 0)
604 scsipi_adapter_delref(adapt);
605 return (error);
606 }
607
608 /*
609 * close the device.. only called if we are the LAST occurence of an open
610 * device. Convenient now but usually a pain.
611 */
612 int
613 sdclose(dev, flag, fmt, p)
614 dev_t dev;
615 int flag, fmt;
616 struct proc *p;
617 {
618 struct sd_softc *sd = sd_cd.cd_devs[SDUNIT(dev)];
619 struct scsipi_periph *periph = sd->sc_periph;
620 struct scsipi_adapter *adapt = periph->periph_channel->chan_adapter;
621 int part = SDPART(dev);
622 int error;
623
624 if ((error = sdlock(sd)) != 0)
625 return (error);
626
627 switch (fmt) {
628 case S_IFCHR:
629 sd->sc_dk.dk_copenmask &= ~(1 << part);
630 break;
631 case S_IFBLK:
632 sd->sc_dk.dk_bopenmask &= ~(1 << part);
633 break;
634 }
635 sd->sc_dk.dk_openmask =
636 sd->sc_dk.dk_copenmask | sd->sc_dk.dk_bopenmask;
637
638 if (sd->sc_dk.dk_openmask == 0) {
639 /*
640 * If the disk cache needs flushing, and the disk supports
641 * it, do it now.
642 */
643 if ((sd->flags & SDF_DIRTY) != 0) {
644 if (sd_flush(sd, 0)) {
645 printf("%s: cache synchronization failed\n",
646 sd->sc_dev.dv_xname);
647 sd->flags &= ~SDF_FLUSHING;
648 } else
649 sd->flags &= ~(SDF_FLUSHING|SDF_DIRTY);
650 }
651
652 if (! (periph->periph_flags & PERIPH_KEEP_LABEL))
653 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
654
655 scsipi_wait_drain(periph);
656
657 if (periph->periph_flags & PERIPH_REMOVABLE)
658 scsipi_prevent(periph, PR_ALLOW,
659 XS_CTL_IGNORE_ILLEGAL_REQUEST |
660 XS_CTL_IGNORE_NOT_READY);
661 periph->periph_flags &= ~PERIPH_OPEN;
662
663 scsipi_wait_drain(periph);
664
665 scsipi_adapter_delref(adapt);
666 }
667
668 sdunlock(sd);
669 return (0);
670 }
671
672 /*
673 * Actually translate the requested transfer into one the physical driver
674 * can understand. The transfer is described by a buf and will include
675 * only one physical transfer.
676 */
677 void
678 sdstrategy(bp)
679 struct buf *bp;
680 {
681 struct sd_softc *sd = sd_cd.cd_devs[SDUNIT(bp->b_dev)];
682 struct scsipi_periph *periph = sd->sc_periph;
683 struct disklabel *lp;
684 daddr_t blkno;
685 int s;
686 boolean_t sector_aligned;
687
688 SC_DEBUG(sd->sc_periph, SCSIPI_DB2, ("sdstrategy "));
689 SC_DEBUG(sd->sc_periph, SCSIPI_DB1,
690 ("%ld bytes @ blk %" PRId64 "\n", bp->b_bcount, bp->b_blkno));
691 /*
692 * If the device has been made invalid, error out
693 */
694 if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0 ||
695 (sd->sc_dev.dv_flags & DVF_ACTIVE) == 0) {
696 if (periph->periph_flags & PERIPH_OPEN)
697 bp->b_error = EIO;
698 else
699 bp->b_error = ENODEV;
700 goto bad;
701 }
702
703 lp = sd->sc_dk.dk_label;
704
705 /*
706 * The transfer must be a whole number of blocks, offset must not be
707 * negative.
708 */
709 if (lp->d_secsize == DEV_BSIZE) {
710 sector_aligned = (bp->b_bcount & (DEV_BSIZE - 1)) == 0;
711 } else {
712 sector_aligned = (bp->b_bcount % lp->d_secsize) == 0;
713 }
714 if (!sector_aligned || bp->b_blkno < 0) {
715 bp->b_error = EINVAL;
716 goto bad;
717 }
718 /*
719 * If it's a null transfer, return immediatly
720 */
721 if (bp->b_bcount == 0)
722 goto done;
723
724 /*
725 * Do bounds checking, adjust transfer. if error, process.
726 * If end of partition, just return.
727 */
728 if (SDPART(bp->b_dev) == RAW_PART) {
729 if (bounds_check_with_mediasize(bp, DEV_BSIZE,
730 sd->params.disksize512) <= 0)
731 goto done;
732 } else {
733 if (bounds_check_with_label(&sd->sc_dk, bp,
734 (sd->flags & (SDF_WLABEL|SDF_LABELLING)) != 0) <= 0)
735 goto done;
736 }
737
738 /*
739 * Now convert the block number to absolute and put it in
740 * terms of the device's logical block size.
741 */
742 if (lp->d_secsize == DEV_BSIZE)
743 blkno = bp->b_blkno;
744 else if (lp->d_secsize > DEV_BSIZE)
745 blkno = bp->b_blkno / (lp->d_secsize / DEV_BSIZE);
746 else
747 blkno = bp->b_blkno * (DEV_BSIZE / lp->d_secsize);
748
749 if (SDPART(bp->b_dev) != RAW_PART)
750 blkno += lp->d_partitions[SDPART(bp->b_dev)].p_offset;
751
752 bp->b_rawblkno = blkno;
753
754 s = splbio();
755
756 /*
757 * Place it in the queue of disk activities for this disk.
758 *
759 * XXX Only do disksort() if the current operating mode does not
760 * XXX include tagged queueing.
761 */
762 BUFQ_PUT(&sd->buf_queue, bp);
763
764 /*
765 * Tell the device to get going on the transfer if it's
766 * not doing anything, otherwise just wait for completion
767 */
768 sdstart(sd->sc_periph);
769
770 splx(s);
771 return;
772
773 bad:
774 bp->b_flags |= B_ERROR;
775 done:
776 /*
777 * Correctly set the buf to indicate a completed xfer
778 */
779 bp->b_resid = bp->b_bcount;
780 biodone(bp);
781 }
782
783 /*
784 * sdstart looks to see if there is a buf waiting for the device
785 * and that the device is not already busy. If both are true,
786 * It dequeues the buf and creates a scsi command to perform the
787 * transfer in the buf. The transfer request will call scsipi_done
788 * on completion, which will in turn call this routine again
789 * so that the next queued transfer is performed.
790 * The bufs are queued by the strategy routine (sdstrategy)
791 *
792 * This routine is also called after other non-queued requests
793 * have been made of the scsi driver, to ensure that the queue
794 * continues to be drained.
795 *
796 * must be called at the correct (highish) spl level
797 * sdstart() is called at splbio from sdstrategy and scsipi_done
798 */
799 void
800 sdstart(periph)
801 struct scsipi_periph *periph;
802 {
803 struct sd_softc *sd = (void *)periph->periph_dev;
804 struct disklabel *lp = sd->sc_dk.dk_label;
805 struct buf *bp = 0;
806 struct scsipi_rw_big cmd_big;
807 #if NSD_SCSIBUS > 0
808 struct scsi_rw cmd_small;
809 #endif
810 struct scsipi_generic *cmdp;
811 int nblks, cmdlen, error, flags;
812
813 SC_DEBUG(periph, SCSIPI_DB2, ("sdstart "));
814 /*
815 * Check if the device has room for another command
816 */
817 while (periph->periph_active < periph->periph_openings) {
818 /*
819 * there is excess capacity, but a special waits
820 * It'll need the adapter as soon as we clear out of the
821 * way and let it run (user level wait).
822 */
823 if (periph->periph_flags & PERIPH_WAITING) {
824 periph->periph_flags &= ~PERIPH_WAITING;
825 wakeup((caddr_t)periph);
826 return;
827 }
828
829 /*
830 * See if there is a buf with work for us to do..
831 */
832 if ((bp = BUFQ_GET(&sd->buf_queue)) == NULL)
833 return;
834
835 /*
836 * If the device has become invalid, abort all the
837 * reads and writes until all files have been closed and
838 * re-opened
839 */
840 if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0) {
841 bp->b_error = EIO;
842 bp->b_flags |= B_ERROR;
843 bp->b_resid = bp->b_bcount;
844 biodone(bp);
845 continue;
846 }
847
848 /*
849 * We have a buf, now we should make a command.
850 */
851
852 if (lp->d_secsize == DEV_BSIZE)
853 nblks = bp->b_bcount >> DEV_BSHIFT;
854 else
855 nblks = howmany(bp->b_bcount, lp->d_secsize);
856
857 #if NSD_SCSIBUS > 0
858 /*
859 * Fill out the scsi command. If the transfer will
860 * fit in a "small" cdb, use it.
861 */
862 if (((bp->b_rawblkno & 0x1fffff) == bp->b_rawblkno) &&
863 ((nblks & 0xff) == nblks) &&
864 !(periph->periph_quirks & PQUIRK_ONLYBIG)) {
865 /*
866 * We can fit in a small cdb.
867 */
868 memset(&cmd_small, 0, sizeof(cmd_small));
869 cmd_small.opcode = (bp->b_flags & B_READ) ?
870 SCSI_READ_COMMAND : SCSI_WRITE_COMMAND;
871 _lto3b(bp->b_rawblkno, cmd_small.addr);
872 cmd_small.length = nblks & 0xff;
873 cmdlen = sizeof(cmd_small);
874 cmdp = (struct scsipi_generic *)&cmd_small;
875 } else
876 #endif /* NSD_SCSIBUS > 0 */
877 {
878 /*
879 * Need a large cdb.
880 */
881 memset(&cmd_big, 0, sizeof(cmd_big));
882 cmd_big.opcode = (bp->b_flags & B_READ) ?
883 READ_BIG : WRITE_BIG;
884 _lto4b(bp->b_rawblkno, cmd_big.addr);
885 _lto2b(nblks, cmd_big.length);
886 cmdlen = sizeof(cmd_big);
887 cmdp = (struct scsipi_generic *)&cmd_big;
888 }
889
890 /* Instrumentation. */
891 disk_busy(&sd->sc_dk);
892
893 /*
894 * Mark the disk dirty so that the cache will be
895 * flushed on close.
896 */
897 if ((bp->b_flags & B_READ) == 0)
898 sd->flags |= SDF_DIRTY;
899
900 /*
901 * Figure out what flags to use.
902 */
903 flags = XS_CTL_NOSLEEP|XS_CTL_ASYNC|XS_CTL_SIMPLE_TAG;
904 if (bp->b_flags & B_READ)
905 flags |= XS_CTL_DATA_IN;
906 else
907 flags |= XS_CTL_DATA_OUT;
908
909 /*
910 * Call the routine that chats with the adapter.
911 * Note: we cannot sleep as we may be an interrupt
912 */
913 error = scsipi_command(periph, cmdp, cmdlen,
914 (u_char *)bp->b_data, bp->b_bcount,
915 SDRETRIES, SD_IO_TIMEOUT, bp, flags);
916 if (error) {
917 disk_unbusy(&sd->sc_dk, 0, 0);
918 printf("%s: not queued, error %d\n",
919 sd->sc_dev.dv_xname, error);
920 }
921 }
922 }
923
924 void
925 sddone(xs)
926 struct scsipi_xfer *xs;
927 {
928 struct sd_softc *sd = (void *)xs->xs_periph->periph_dev;
929
930 if (sd->flags & SDF_FLUSHING) {
931 /* Flush completed, no longer dirty. */
932 sd->flags &= ~(SDF_FLUSHING|SDF_DIRTY);
933 }
934
935 if (xs->bp != NULL) {
936 disk_unbusy(&sd->sc_dk, xs->bp->b_bcount - xs->bp->b_resid,
937 (xs->bp->b_flags & B_READ));
938 #if NRND > 0
939 rnd_add_uint32(&sd->rnd_source, xs->bp->b_rawblkno);
940 #endif
941 }
942 }
943
944 void
945 sdminphys(bp)
946 struct buf *bp;
947 {
948 struct sd_softc *sd = sd_cd.cd_devs[SDUNIT(bp->b_dev)];
949 long max;
950
951 /*
952 * If the device is ancient, we want to make sure that
953 * the transfer fits into a 6-byte cdb.
954 *
955 * XXX Note that the SCSI-I spec says that 256-block transfers
956 * are allowed in a 6-byte read/write, and are specified
957 * by settng the "length" to 0. However, we're conservative
958 * here, allowing only 255-block transfers in case an
959 * ancient device gets confused by length == 0. A length of 0
960 * in a 10-byte read/write actually means 0 blocks.
961 */
962 if ((sd->flags & SDF_ANCIENT) &&
963 ((sd->sc_periph->periph_flags &
964 (PERIPH_REMOVABLE | PERIPH_MEDIA_LOADED)) != PERIPH_REMOVABLE)) {
965 max = sd->sc_dk.dk_label->d_secsize * 0xff;
966
967 if (bp->b_bcount > max)
968 bp->b_bcount = max;
969 }
970
971 (*sd->sc_periph->periph_channel->chan_adapter->adapt_minphys)(bp);
972 }
973
974 int
975 sdread(dev, uio, ioflag)
976 dev_t dev;
977 struct uio *uio;
978 int ioflag;
979 {
980
981 return (physio(sdstrategy, NULL, dev, B_READ, sdminphys, uio));
982 }
983
984 int
985 sdwrite(dev, uio, ioflag)
986 dev_t dev;
987 struct uio *uio;
988 int ioflag;
989 {
990
991 return (physio(sdstrategy, NULL, dev, B_WRITE, sdminphys, uio));
992 }
993
994 /*
995 * Perform special action on behalf of the user
996 * Knows about the internals of this device
997 */
998 int
999 sdioctl(dev, cmd, addr, flag, p)
1000 dev_t dev;
1001 u_long cmd;
1002 caddr_t addr;
1003 int flag;
1004 struct proc *p;
1005 {
1006 struct sd_softc *sd = sd_cd.cd_devs[SDUNIT(dev)];
1007 struct scsipi_periph *periph = sd->sc_periph;
1008 int part = SDPART(dev);
1009 int error = 0;
1010 #ifdef __HAVE_OLD_DISKLABEL
1011 struct disklabel *newlabel = NULL;
1012 #endif
1013
1014 SC_DEBUG(sd->sc_periph, SCSIPI_DB2, ("sdioctl 0x%lx ", cmd));
1015
1016 /*
1017 * If the device is not valid, some IOCTLs can still be
1018 * handled on the raw partition. Check this here.
1019 */
1020 if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0) {
1021 switch (cmd) {
1022 case DIOCKLABEL:
1023 case DIOCWLABEL:
1024 case DIOCLOCK:
1025 case DIOCEJECT:
1026 case ODIOCEJECT:
1027 case DIOCGCACHE:
1028 case DIOCSCACHE:
1029 case SCIOCIDENTIFY:
1030 case OSCIOCIDENTIFY:
1031 case SCIOCCOMMAND:
1032 case SCIOCDEBUG:
1033 if (part == RAW_PART)
1034 break;
1035 /* FALLTHROUGH */
1036 default:
1037 if ((periph->periph_flags & PERIPH_OPEN) == 0)
1038 return (ENODEV);
1039 else
1040 return (EIO);
1041 }
1042 }
1043
1044 switch (cmd) {
1045 case DIOCGDINFO:
1046 *(struct disklabel *)addr = *(sd->sc_dk.dk_label);
1047 return (0);
1048
1049 #ifdef __HAVE_OLD_DISKLABEL
1050 case ODIOCGDINFO:
1051 newlabel = malloc(sizeof *newlabel, M_TEMP, M_WAITOK);
1052 if (newlabel == NULL)
1053 return EIO;
1054 memcpy(newlabel, sd->sc_dk.dk_label, sizeof (*newlabel));
1055 if (newlabel->d_npartitions <= OLDMAXPARTITIONS)
1056 memcpy(addr, newlabel, sizeof (struct olddisklabel));
1057 else
1058 error = ENOTTY;
1059 free(newlabel, M_TEMP);
1060 return error;
1061 #endif
1062
1063 case DIOCGPART:
1064 ((struct partinfo *)addr)->disklab = sd->sc_dk.dk_label;
1065 ((struct partinfo *)addr)->part =
1066 &sd->sc_dk.dk_label->d_partitions[part];
1067 return (0);
1068
1069 case DIOCWDINFO:
1070 case DIOCSDINFO:
1071 #ifdef __HAVE_OLD_DISKLABEL
1072 case ODIOCWDINFO:
1073 case ODIOCSDINFO:
1074 #endif
1075 {
1076 struct disklabel *lp;
1077
1078 if ((flag & FWRITE) == 0)
1079 return (EBADF);
1080
1081 #ifdef __HAVE_OLD_DISKLABEL
1082 if (cmd == ODIOCSDINFO || cmd == ODIOCWDINFO) {
1083 newlabel = malloc(sizeof *newlabel, M_TEMP, M_WAITOK);
1084 if (newlabel == NULL)
1085 return EIO;
1086 memset(newlabel, 0, sizeof newlabel);
1087 memcpy(newlabel, addr, sizeof (struct olddisklabel));
1088 lp = newlabel;
1089 } else
1090 #endif
1091 lp = (struct disklabel *)addr;
1092
1093 if ((error = sdlock(sd)) != 0)
1094 goto bad;
1095 sd->flags |= SDF_LABELLING;
1096
1097 error = setdisklabel(sd->sc_dk.dk_label,
1098 lp, /*sd->sc_dk.dk_openmask : */0,
1099 sd->sc_dk.dk_cpulabel);
1100 if (error == 0) {
1101 if (cmd == DIOCWDINFO
1102 #ifdef __HAVE_OLD_DISKLABEL
1103 || cmd == ODIOCWDINFO
1104 #endif
1105 )
1106 error = writedisklabel(SDLABELDEV(dev),
1107 sdstrategy, sd->sc_dk.dk_label,
1108 sd->sc_dk.dk_cpulabel);
1109 }
1110
1111 sd->flags &= ~SDF_LABELLING;
1112 sdunlock(sd);
1113 bad:
1114 #ifdef __HAVE_OLD_DISKLABEL
1115 if (newlabel != NULL)
1116 free(newlabel, M_TEMP);
1117 #endif
1118 return (error);
1119 }
1120
1121 case DIOCKLABEL:
1122 if (*(int *)addr)
1123 periph->periph_flags |= PERIPH_KEEP_LABEL;
1124 else
1125 periph->periph_flags &= ~PERIPH_KEEP_LABEL;
1126 return (0);
1127
1128 case DIOCWLABEL:
1129 if ((flag & FWRITE) == 0)
1130 return (EBADF);
1131 if (*(int *)addr)
1132 sd->flags |= SDF_WLABEL;
1133 else
1134 sd->flags &= ~SDF_WLABEL;
1135 return (0);
1136
1137 case DIOCLOCK:
1138 return (scsipi_prevent(periph,
1139 (*(int *)addr) ? PR_PREVENT : PR_ALLOW, 0));
1140
1141 case DIOCEJECT:
1142 if ((periph->periph_flags & PERIPH_REMOVABLE) == 0)
1143 return (ENOTTY);
1144 if (*(int *)addr == 0) {
1145 /*
1146 * Don't force eject: check that we are the only
1147 * partition open. If so, unlock it.
1148 */
1149 if ((sd->sc_dk.dk_openmask & ~(1 << part)) == 0 &&
1150 sd->sc_dk.dk_bopenmask + sd->sc_dk.dk_copenmask ==
1151 sd->sc_dk.dk_openmask) {
1152 error = scsipi_prevent(periph, PR_ALLOW,
1153 XS_CTL_IGNORE_NOT_READY);
1154 if (error)
1155 return (error);
1156 } else {
1157 return (EBUSY);
1158 }
1159 }
1160 /* FALLTHROUGH */
1161 case ODIOCEJECT:
1162 return ((periph->periph_flags & PERIPH_REMOVABLE) == 0 ?
1163 ENOTTY : scsipi_start(periph, SSS_STOP|SSS_LOEJ, 0));
1164
1165 case DIOCGDEFLABEL:
1166 sdgetdefaultlabel(sd, (struct disklabel *)addr);
1167 return (0);
1168
1169 #ifdef __HAVE_OLD_DISKLABEL
1170 case ODIOCGDEFLABEL:
1171 newlabel = malloc(sizeof *newlabel, M_TEMP, M_WAITOK);
1172 if (newlabel == NULL)
1173 return EIO;
1174 sdgetdefaultlabel(sd, newlabel);
1175 if (newlabel->d_npartitions <= OLDMAXPARTITIONS)
1176 memcpy(addr, newlabel, sizeof (struct olddisklabel));
1177 else
1178 error = ENOTTY;
1179 free(newlabel, M_TEMP);
1180 return error;
1181 #endif
1182
1183 case DIOCGCACHE:
1184 return (sd_getcache(sd, (int *) addr));
1185
1186 case DIOCSCACHE:
1187 if ((flag & FWRITE) == 0)
1188 return (EBADF);
1189 return (sd_setcache(sd, *(int *) addr));
1190
1191 case DIOCCACHESYNC:
1192 /*
1193 * XXX Do we really need to care about having a writable
1194 * file descriptor here?
1195 */
1196 if ((flag & FWRITE) == 0)
1197 return (EBADF);
1198 if (((sd->flags & SDF_DIRTY) != 0 || *(int *)addr != 0)) {
1199 error = sd_flush(sd, 0);
1200 if (error)
1201 sd->flags &= ~SDF_FLUSHING;
1202 else
1203 sd->flags &= ~(SDF_FLUSHING|SDF_DIRTY);
1204 } else
1205 error = 0;
1206 return (error);
1207
1208 default:
1209 if (part != RAW_PART)
1210 return (ENOTTY);
1211 return (scsipi_do_ioctl(periph, dev, cmd, addr, flag, p));
1212 }
1213
1214 #ifdef DIAGNOSTIC
1215 panic("sdioctl: impossible");
1216 #endif
1217 }
1218
1219 void
1220 sdgetdefaultlabel(sd, lp)
1221 struct sd_softc *sd;
1222 struct disklabel *lp;
1223 {
1224
1225 memset(lp, 0, sizeof(struct disklabel));
1226
1227 lp->d_secsize = sd->params.blksize;
1228 lp->d_ntracks = sd->params.heads;
1229 lp->d_nsectors = sd->params.sectors;
1230 lp->d_ncylinders = sd->params.cyls;
1231 lp->d_secpercyl = lp->d_ntracks * lp->d_nsectors;
1232
1233 switch (scsipi_periph_bustype(sd->sc_periph)) {
1234 #if NSD_SCSIBUS > 0
1235 case SCSIPI_BUSTYPE_SCSI:
1236 lp->d_type = DTYPE_SCSI;
1237 break;
1238 #endif
1239 #if NSD_ATAPIBUS > 0
1240 case SCSIPI_BUSTYPE_ATAPI:
1241 lp->d_type = DTYPE_ATAPI;
1242 break;
1243 #endif
1244 }
1245 strncpy(lp->d_typename, "mydisk", 16);
1246 strncpy(lp->d_packname, "fictitious", 16);
1247 lp->d_secperunit = sd->params.disksize;
1248 lp->d_rpm = sd->params.rot_rate;
1249 lp->d_interleave = 1;
1250 lp->d_flags = 0;
1251
1252 lp->d_partitions[RAW_PART].p_offset = 0;
1253 lp->d_partitions[RAW_PART].p_size =
1254 lp->d_secperunit * (lp->d_secsize / DEV_BSIZE);
1255 lp->d_partitions[RAW_PART].p_fstype = FS_UNUSED;
1256 lp->d_npartitions = RAW_PART + 1;
1257
1258 lp->d_magic = DISKMAGIC;
1259 lp->d_magic2 = DISKMAGIC;
1260 lp->d_checksum = dkcksum(lp);
1261 }
1262
1263
1264 /*
1265 * Load the label information on the named device
1266 */
1267 void
1268 sdgetdisklabel(sd)
1269 struct sd_softc *sd;
1270 {
1271 struct disklabel *lp = sd->sc_dk.dk_label;
1272 const char *errstring;
1273
1274 memset(sd->sc_dk.dk_cpulabel, 0, sizeof(struct cpu_disklabel));
1275
1276 sdgetdefaultlabel(sd, lp);
1277
1278 if (lp->d_secpercyl == 0) {
1279 lp->d_secpercyl = 100;
1280 /* as long as it's not 0 - readdisklabel divides by it (?) */
1281 }
1282
1283 /*
1284 * Call the generic disklabel extraction routine
1285 */
1286 errstring = readdisklabel(MAKESDDEV(0, sd->sc_dev.dv_unit, RAW_PART),
1287 sdstrategy, lp, sd->sc_dk.dk_cpulabel);
1288 if (errstring) {
1289 printf("%s: %s\n", sd->sc_dev.dv_xname, errstring);
1290 return;
1291 }
1292 }
1293
1294 void
1295 sd_shutdown(arg)
1296 void *arg;
1297 {
1298 struct sd_softc *sd = arg;
1299
1300 /*
1301 * If the disk cache needs to be flushed, and the disk supports
1302 * it, flush it. We're cold at this point, so we poll for
1303 * completion.
1304 */
1305 if ((sd->flags & SDF_DIRTY) != 0) {
1306 if (sd_flush(sd, XS_CTL_NOSLEEP|XS_CTL_POLL)) {
1307 printf("%s: cache synchronization failed\n",
1308 sd->sc_dev.dv_xname);
1309 sd->flags &= ~SDF_FLUSHING;
1310 } else
1311 sd->flags &= ~(SDF_FLUSHING|SDF_DIRTY);
1312 }
1313 }
1314
1315 /*
1316 * Tell the device to map out a defective block
1317 */
1318 int
1319 sd_reassign_blocks(sd, blkno)
1320 struct sd_softc *sd;
1321 u_long blkno;
1322 {
1323 struct scsi_reassign_blocks scsipi_cmd;
1324 struct scsi_reassign_blocks_data rbdata;
1325
1326 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1327 memset(&rbdata, 0, sizeof(rbdata));
1328 scsipi_cmd.opcode = SCSI_REASSIGN_BLOCKS;
1329
1330 _lto2b(sizeof(rbdata.defect_descriptor[0]), rbdata.length);
1331 _lto4b(blkno, rbdata.defect_descriptor[0].dlbaddr);
1332
1333 return (scsipi_command(sd->sc_periph,
1334 (struct scsipi_generic *)&scsipi_cmd, sizeof(scsipi_cmd),
1335 (u_char *)&rbdata, sizeof(rbdata), SDRETRIES, 5000, NULL,
1336 XS_CTL_DATA_OUT | XS_CTL_DATA_ONSTACK));
1337 }
1338
1339 /*
1340 * Check Errors
1341 */
1342 int
1343 sd_interpret_sense(xs)
1344 struct scsipi_xfer *xs;
1345 {
1346 struct scsipi_periph *periph = xs->xs_periph;
1347 struct scsipi_sense_data *sense = &xs->sense.scsi_sense;
1348 struct sd_softc *sd = (void *)periph->periph_dev;
1349 int s, error, retval = EJUSTRETURN;
1350
1351 /*
1352 * If the periph is already recovering, just do the normal
1353 * error processing.
1354 */
1355 if (periph->periph_flags & PERIPH_RECOVERING)
1356 return (retval);
1357
1358 /*
1359 * If the device is not open yet, let the generic code handle it.
1360 */
1361 if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0)
1362 return (retval);
1363
1364 /*
1365 * If it isn't a extended or extended/deferred error, let
1366 * the generic code handle it.
1367 */
1368 if ((sense->error_code & SSD_ERRCODE) != 0x70 &&
1369 (sense->error_code & SSD_ERRCODE) != 0x71)
1370 return (retval);
1371
1372 if ((sense->flags & SSD_KEY) == SKEY_NOT_READY &&
1373 sense->add_sense_code == 0x4) {
1374 if (sense->add_sense_code_qual == 0x01) {
1375 /*
1376 * Unit In The Process Of Becoming Ready.
1377 */
1378 printf("%s: waiting for pack to spin up...\n",
1379 sd->sc_dev.dv_xname);
1380 if (!callout_pending(&periph->periph_callout))
1381 scsipi_periph_freeze(periph, 1);
1382 callout_reset(&periph->periph_callout,
1383 5 * hz, scsipi_periph_timed_thaw, periph);
1384 retval = ERESTART;
1385 } else if (sense->add_sense_code_qual == 0x02) {
1386 printf("%s: pack is stopped, restarting...\n",
1387 sd->sc_dev.dv_xname);
1388 s = splbio();
1389 periph->periph_flags |= PERIPH_RECOVERING;
1390 splx(s);
1391 error = scsipi_start(periph, SSS_START,
1392 XS_CTL_URGENT|XS_CTL_HEAD_TAG|
1393 XS_CTL_THAW_PERIPH|XS_CTL_FREEZE_PERIPH);
1394 if (error) {
1395 printf("%s: unable to restart pack\n",
1396 sd->sc_dev.dv_xname);
1397 retval = error;
1398 } else
1399 retval = ERESTART;
1400 s = splbio();
1401 periph->periph_flags &= ~PERIPH_RECOVERING;
1402 splx(s);
1403 }
1404 }
1405 return (retval);
1406 }
1407
1408
1409 int
1410 sdsize(dev)
1411 dev_t dev;
1412 {
1413 struct sd_softc *sd;
1414 int part, unit, omask;
1415 int size;
1416
1417 unit = SDUNIT(dev);
1418 if (unit >= sd_cd.cd_ndevs)
1419 return (-1);
1420 sd = sd_cd.cd_devs[unit];
1421 if (sd == NULL)
1422 return (-1);
1423
1424 if ((sd->sc_dev.dv_flags & DVF_ACTIVE) == 0)
1425 return (-1);
1426
1427 part = SDPART(dev);
1428 omask = sd->sc_dk.dk_openmask & (1 << part);
1429
1430 if (omask == 0 && sdopen(dev, 0, S_IFBLK, NULL) != 0)
1431 return (-1);
1432 if ((sd->sc_periph->periph_flags & PERIPH_MEDIA_LOADED) == 0)
1433 size = -1;
1434 else if (sd->sc_dk.dk_label->d_partitions[part].p_fstype != FS_SWAP)
1435 size = -1;
1436 else
1437 size = sd->sc_dk.dk_label->d_partitions[part].p_size *
1438 (sd->sc_dk.dk_label->d_secsize / DEV_BSIZE);
1439 if (omask == 0 && sdclose(dev, 0, S_IFBLK, NULL) != 0)
1440 return (-1);
1441 return (size);
1442 }
1443
1444 /* #define SD_DUMP_NOT_TRUSTED if you just want to watch */
1445 static struct scsipi_xfer sx;
1446 static int sddoingadump;
1447
1448 /*
1449 * dump all of physical memory into the partition specified, starting
1450 * at offset 'dumplo' into the partition.
1451 */
1452 int
1453 sddump(dev, blkno, va, size)
1454 dev_t dev;
1455 daddr_t blkno;
1456 caddr_t va;
1457 size_t size;
1458 {
1459 struct sd_softc *sd; /* disk unit to do the I/O */
1460 struct disklabel *lp; /* disk's disklabel */
1461 int unit, part;
1462 int sectorsize; /* size of a disk sector */
1463 int nsects; /* number of sectors in partition */
1464 int sectoff; /* sector offset of partition */
1465 int totwrt; /* total number of sectors left to write */
1466 int nwrt; /* current number of sectors to write */
1467 struct scsipi_rw_big cmd; /* write command */
1468 struct scsipi_xfer *xs; /* ... convenience */
1469 struct scsipi_periph *periph;
1470 struct scsipi_channel *chan;
1471
1472 /* Check if recursive dump; if so, punt. */
1473 if (sddoingadump)
1474 return (EFAULT);
1475
1476 /* Mark as active early. */
1477 sddoingadump = 1;
1478
1479 unit = SDUNIT(dev); /* Decompose unit & partition. */
1480 part = SDPART(dev);
1481
1482 /* Check for acceptable drive number. */
1483 if (unit >= sd_cd.cd_ndevs || (sd = sd_cd.cd_devs[unit]) == NULL)
1484 return (ENXIO);
1485
1486 if ((sd->sc_dev.dv_flags & DVF_ACTIVE) == 0)
1487 return (ENODEV);
1488
1489 periph = sd->sc_periph;
1490 chan = periph->periph_channel;
1491
1492 /* Make sure it was initialized. */
1493 if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0)
1494 return (ENXIO);
1495
1496 /* Convert to disk sectors. Request must be a multiple of size. */
1497 lp = sd->sc_dk.dk_label;
1498 sectorsize = lp->d_secsize;
1499 if ((size % sectorsize) != 0)
1500 return (EFAULT);
1501 totwrt = size / sectorsize;
1502 blkno = dbtob(blkno) / sectorsize; /* blkno in DEV_BSIZE units */
1503
1504 nsects = lp->d_partitions[part].p_size;
1505 sectoff = lp->d_partitions[part].p_offset;
1506
1507 /* Check transfer bounds against partition size. */
1508 if ((blkno < 0) || ((blkno + totwrt) > nsects))
1509 return (EINVAL);
1510
1511 /* Offset block number to start of partition. */
1512 blkno += sectoff;
1513
1514 xs = &sx;
1515
1516 while (totwrt > 0) {
1517 nwrt = totwrt; /* XXX */
1518 #ifndef SD_DUMP_NOT_TRUSTED
1519 /*
1520 * Fill out the scsi command
1521 */
1522 memset(&cmd, 0, sizeof(cmd));
1523 cmd.opcode = WRITE_BIG;
1524 _lto4b(blkno, cmd.addr);
1525 _lto2b(nwrt, cmd.length);
1526 /*
1527 * Fill out the scsipi_xfer structure
1528 * Note: we cannot sleep as we may be an interrupt
1529 * don't use scsipi_command() as it may want to wait
1530 * for an xs.
1531 */
1532 memset(xs, 0, sizeof(sx));
1533 xs->xs_control |= XS_CTL_NOSLEEP | XS_CTL_POLL |
1534 XS_CTL_DATA_OUT;
1535 xs->xs_status = 0;
1536 xs->xs_periph = periph;
1537 xs->xs_retries = SDRETRIES;
1538 xs->timeout = 10000; /* 10000 millisecs for a disk ! */
1539 xs->cmd = (struct scsipi_generic *)&cmd;
1540 xs->cmdlen = sizeof(cmd);
1541 xs->resid = nwrt * sectorsize;
1542 xs->error = XS_NOERROR;
1543 xs->bp = 0;
1544 xs->data = va;
1545 xs->datalen = nwrt * sectorsize;
1546
1547 /*
1548 * Pass all this info to the scsi driver.
1549 */
1550 scsipi_adapter_request(chan, ADAPTER_REQ_RUN_XFER, xs);
1551 if ((xs->xs_status & XS_STS_DONE) == 0 ||
1552 xs->error != XS_NOERROR)
1553 return (EIO);
1554 #else /* SD_DUMP_NOT_TRUSTED */
1555 /* Let's just talk about this first... */
1556 printf("sd%d: dump addr 0x%x, blk %d\n", unit, va, blkno);
1557 delay(500 * 1000); /* half a second */
1558 #endif /* SD_DUMP_NOT_TRUSTED */
1559
1560 /* update block count */
1561 totwrt -= nwrt;
1562 blkno += nwrt;
1563 va += sectorsize * nwrt;
1564 }
1565 sddoingadump = 0;
1566 return (0);
1567 }
1568
1569 int
1570 sd_mode_sense(sd, byte2, sense, size, page, flags, big)
1571 struct sd_softc *sd;
1572 u_int8_t byte2;
1573 void *sense;
1574 size_t size;
1575 int page, flags;
1576 int *big;
1577 {
1578
1579 if ((sd->sc_periph->periph_quirks & PQUIRK_ONLYBIG) &&
1580 !(sd->sc_periph->periph_quirks & PQUIRK_NOBIGMODESENSE)) {
1581 *big = 1;
1582 return scsipi_mode_sense_big(sd->sc_periph, byte2, page, sense,
1583 size + sizeof(struct scsipi_mode_header_big),
1584 flags | XS_CTL_DATA_ONSTACK, SDRETRIES, 6000);
1585 } else {
1586 *big = 0;
1587 return scsipi_mode_sense(sd->sc_periph, byte2, page, sense,
1588 size + sizeof(struct scsipi_mode_header),
1589 flags | XS_CTL_DATA_ONSTACK, SDRETRIES, 6000);
1590 }
1591 }
1592
1593 int
1594 sd_mode_select(sd, byte2, sense, size, flags, big)
1595 struct sd_softc *sd;
1596 u_int8_t byte2;
1597 void *sense;
1598 size_t size;
1599 int flags, big;
1600 {
1601
1602 if (big) {
1603 struct scsipi_mode_header_big *header = sense;
1604
1605 _lto2b(0, header->data_length);
1606 return scsipi_mode_select_big(sd->sc_periph, byte2, sense,
1607 size + sizeof(struct scsipi_mode_header_big),
1608 flags | XS_CTL_DATA_ONSTACK, SDRETRIES, 6000);
1609 } else {
1610 struct scsipi_mode_header *header = sense;
1611
1612 header->data_length = 0;
1613 return scsipi_mode_select(sd->sc_periph, byte2, sense,
1614 size + sizeof(struct scsipi_mode_header),
1615 flags | XS_CTL_DATA_ONSTACK, SDRETRIES, 6000);
1616 }
1617 }
1618
1619 int
1620 sd_get_simplifiedparms(sd, dp, flags)
1621 struct sd_softc *sd;
1622 struct disk_parms *dp;
1623 int flags;
1624 {
1625 struct {
1626 struct scsipi_mode_header header;
1627 /* no block descriptor */
1628 u_int8_t pg_code; /* page code (should be 6) */
1629 u_int8_t pg_length; /* page length (should be 11) */
1630 u_int8_t wcd; /* bit0: cache disable */
1631 u_int8_t lbs[2]; /* logical block size */
1632 u_int8_t size[5]; /* number of log. blocks */
1633 u_int8_t pp; /* power/performance */
1634 u_int8_t flags;
1635 u_int8_t resvd;
1636 } scsipi_sense;
1637 u_int64_t sectors;
1638 int error;
1639
1640 /*
1641 * scsipi_size (ie "read capacity") and mode sense page 6
1642 * give the same information. Do both for now, and check
1643 * for consistency.
1644 * XXX probably differs for removable media
1645 */
1646 dp->blksize = 512;
1647 if ((sectors = scsipi_size(sd->sc_periph, flags)) == 0)
1648 return (SDGP_RESULT_OFFLINE); /* XXX? */
1649
1650 error = scsipi_mode_sense(sd->sc_periph, SMS_DBD, 6,
1651 &scsipi_sense.header, sizeof(scsipi_sense),
1652 flags | XS_CTL_DATA_ONSTACK, SDRETRIES, 6000);
1653
1654 if (error != 0)
1655 return (SDGP_RESULT_OFFLINE); /* XXX? */
1656
1657 dp->blksize = _2btol(scsipi_sense.lbs);
1658 if (dp->blksize == 0)
1659 dp->blksize = 512;
1660
1661 /*
1662 * Create a pseudo-geometry.
1663 */
1664 dp->heads = 64;
1665 dp->sectors = 32;
1666 dp->cyls = sectors / (dp->heads * dp->sectors);
1667 dp->disksize = _5btol(scsipi_sense.size);
1668 if (dp->disksize <= UINT32_MAX && dp->disksize != sectors) {
1669 printf("RBC size: mode sense=%llu, get cap=%llu\n",
1670 (unsigned long long)dp->disksize,
1671 (unsigned long long)sectors);
1672 dp->disksize = sectors;
1673 }
1674 dp->disksize512 = (dp->disksize * dp->blksize) / DEV_BSIZE;
1675
1676 return (SDGP_RESULT_OK);
1677 }
1678
1679 /*
1680 * Get the scsi driver to send a full inquiry to the * device and use the
1681 * results to fill out the disk parameter structure.
1682 */
1683 int
1684 sd_get_capacity(sd, dp, flags)
1685 struct sd_softc *sd;
1686 struct disk_parms *dp;
1687 int flags;
1688 {
1689 u_int64_t sectors;
1690 int error;
1691 #if 0
1692 int i;
1693 u_int8_t *p;
1694 #endif
1695
1696 dp->disksize = sectors = scsipi_size(sd->sc_periph, flags);
1697 if (sectors == 0) {
1698 struct scsipi_read_format_capacities scsipi_cmd;
1699 struct {
1700 struct scsipi_capacity_list_header header;
1701 struct scsipi_capacity_descriptor desc;
1702 } __attribute__((packed)) scsipi_result;
1703
1704 memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1705 memset(&scsipi_result, 0, sizeof(scsipi_result));
1706 scsipi_cmd.opcode = READ_FORMAT_CAPACITIES;
1707 _lto2b(sizeof(scsipi_result), scsipi_cmd.length);
1708 error = scsipi_command(sd->sc_periph, (void *)&scsipi_cmd,
1709 sizeof(scsipi_cmd), (void *)&scsipi_result,
1710 sizeof(scsipi_result), SDRETRIES, 20000,
1711 NULL, flags | XS_CTL_DATA_IN | XS_CTL_DATA_ONSTACK /*|
1712 XS_CTL_IGNORE_ILLEGAL_REQUEST*/);
1713 if (error || scsipi_result.header.length == 0)
1714 return (SDGP_RESULT_OFFLINE);
1715
1716 #if 0
1717 printf("rfc: length=%d\n", scsipi_result.header.length);
1718 printf("rfc result:"); for (i = sizeof(struct scsipi_capacity_list_header) + scsipi_result.header.length, p = (void *)&scsipi_result; i; i--, p++) printf(" %02x", *p); printf("\n");
1719 #endif
1720 switch (scsipi_result.desc.byte5 & SCSIPI_CAP_DESC_CODE_MASK) {
1721 case SCSIPI_CAP_DESC_CODE_RESERVED:
1722 case SCSIPI_CAP_DESC_CODE_FORMATTED:
1723 break;
1724
1725 case SCSIPI_CAP_DESC_CODE_UNFORMATTED:
1726 return (SDGP_RESULT_UNFORMATTED);
1727
1728 case SCSIPI_CAP_DESC_CODE_NONE:
1729 return (SDGP_RESULT_OFFLINE);
1730 }
1731
1732 dp->disksize = sectors = _4btol(scsipi_result.desc.nblks);
1733 if (sectors == 0)
1734 return (SDGP_RESULT_OFFLINE); /* XXX? */
1735
1736 dp->blksize = _3btol(scsipi_result.desc.blklen);
1737 if (dp->blksize == 0)
1738 dp->blksize = 512;
1739 } else {
1740 struct sd_mode_sense_data scsipi_sense;
1741 int big, bsize;
1742 struct scsi_blk_desc *bdesc;
1743
1744 memset(&scsipi_sense, 0, sizeof(scsipi_sense));
1745 error = sd_mode_sense(sd, 0, &scsipi_sense,
1746 sizeof(scsipi_sense.blk_desc), 0, flags | XS_CTL_SILENT, &big);
1747 dp->blksize = 512;
1748 if (!error) {
1749 if (big) {
1750 bdesc = (void *)(&scsipi_sense.header.big + 1);
1751 bsize = _2btol(scsipi_sense.header.big.blk_desc_len);
1752 } else {
1753 bdesc = (void *)(&scsipi_sense.header.small + 1);
1754 bsize = scsipi_sense.header.small.blk_desc_len;
1755 }
1756
1757 #if 0
1758 printf("page 0 sense:"); for (i = sizeof(scsipi_sense), p = (void *)&scsipi_sense; i; i--, p++) printf(" %02x", *p); printf("\n");
1759 printf("page 0 bsize=%d\n", bsize);
1760 printf("page 0 ok\n");
1761 #endif
1762
1763 if (bsize >= 8) {
1764 dp->blksize = _3btol(bdesc->blklen);
1765 if (dp->blksize == 0)
1766 dp->blksize = 512;
1767 }
1768 }
1769 }
1770
1771 dp->disksize512 = (sectors * dp->blksize) / DEV_BSIZE;
1772 return (0);
1773 }
1774
1775 int
1776 sd_get_parms(sd, dp, flags)
1777 struct sd_softc *sd;
1778 struct disk_parms *dp;
1779 int flags;
1780 {
1781 struct sd_mode_sense_data scsipi_sense;
1782 int error;
1783 int big;
1784 union scsi_disk_pages *pages;
1785 #if 0
1786 int i;
1787 u_int8_t *p;
1788 #endif
1789
1790 /*
1791 * If offline, the SDEV_MEDIA_LOADED flag will be
1792 * cleared by the caller if necessary.
1793 */
1794 if (sd->type == T_SIMPLE_DIRECT)
1795 return (sd_get_simplifiedparms(sd, dp, flags));
1796
1797 error = sd_get_capacity(sd, dp, flags);
1798 if (error)
1799 return (error);
1800
1801 if (sd->type == T_OPTICAL)
1802 goto page0;
1803
1804 memset(&scsipi_sense, 0, sizeof(scsipi_sense));
1805 error = sd_mode_sense(sd, SMS_DBD, &scsipi_sense,
1806 sizeof(scsipi_sense.blk_desc) +
1807 sizeof(scsipi_sense.pages.rigid_geometry), 4,
1808 flags | XS_CTL_SILENT, &big);
1809 if (!error) {
1810 if (big)
1811 pages = (void *)(&scsipi_sense.header.big + 1);
1812 else
1813 pages = (void *)(&scsipi_sense.header.small + 1);
1814
1815 #if 0
1816 printf("page 4 sense:"); for (i = sizeof(scsipi_sense), p = (void *)&scsipi_sense; i; i--, p++) printf(" %02x", *p); printf("\n");
1817 printf("page 4 pg_code=%d sense=%p/%p\n", pages->rigid_geometry.pg_code, &scsipi_sense, pages);
1818 #endif
1819
1820 if ((pages->rigid_geometry.pg_code & PGCODE_MASK) != 4)
1821 goto page5;
1822
1823 SC_DEBUG(sd->sc_periph, SCSIPI_DB3,
1824 ("%d cyls, %d heads, %d precomp, %d red_write, %d land_zone\n",
1825 _3btol(pages->rigid_geometry.ncyl),
1826 pages->rigid_geometry.nheads,
1827 _2btol(pages->rigid_geometry.st_cyl_wp),
1828 _2btol(pages->rigid_geometry.st_cyl_rwc),
1829 _2btol(pages->rigid_geometry.land_zone)));
1830
1831 /*
1832 * KLUDGE!! (for zone recorded disks)
1833 * give a number of sectors so that sec * trks * cyls
1834 * is <= disk_size
1835 * can lead to wasted space! THINK ABOUT THIS !
1836 */
1837 dp->heads = pages->rigid_geometry.nheads;
1838 dp->cyls = _3btol(pages->rigid_geometry.ncyl);
1839 if (dp->heads == 0 || dp->cyls == 0)
1840 goto page5;
1841 dp->sectors = dp->disksize / (dp->heads * dp->cyls); /* XXX */
1842
1843 dp->rot_rate = _2btol(pages->rigid_geometry.rpm);
1844 if (dp->rot_rate == 0)
1845 dp->rot_rate = 3600;
1846
1847 #if 0
1848 printf("page 4 ok\n");
1849 #endif
1850 goto blksize;
1851 }
1852
1853 page5:
1854 memset(&scsipi_sense, SMS_DBD, sizeof(scsipi_sense));
1855 error = sd_mode_sense(sd, 0, &scsipi_sense,
1856 sizeof(scsipi_sense.blk_desc) +
1857 sizeof(scsipi_sense.pages.flex_geometry), 5,
1858 flags | XS_CTL_SILENT, &big);
1859 if (!error) {
1860 if (big)
1861 pages = (void *)(&scsipi_sense.header.big + 1);
1862 else
1863 pages = (void *)(&scsipi_sense.header.small + 1);
1864
1865 #if 0
1866 printf("page 5 sense:"); for (i = sizeof(scsipi_sense), p = (void *)&scsipi_sense; i; i--, p++) printf(" %02x", *p); printf("\n");
1867 printf("page 5 pg_code=%d sense=%p/%p\n", pages->flex_geometry.pg_code, &scsipi_sense, pages);
1868 #endif
1869
1870 if ((pages->flex_geometry.pg_code & PGCODE_MASK) != 5)
1871 goto page0;
1872
1873 SC_DEBUG(sd->sc_periph, SCSIPI_DB3,
1874 ("%d cyls, %d heads, %d sec, %d bytes/sec\n",
1875 _3btol(pages->flex_geometry.ncyl),
1876 pages->flex_geometry.nheads,
1877 pages->flex_geometry.ph_sec_tr,
1878 _2btol(pages->flex_geometry.bytes_s)));
1879
1880 dp->heads = pages->flex_geometry.nheads;
1881 dp->cyls = _2btol(pages->flex_geometry.ncyl);
1882 dp->sectors = pages->flex_geometry.ph_sec_tr;
1883 if (dp->heads == 0 || dp->cyls == 0 || dp->sectors == 0)
1884 goto page0;
1885
1886 dp->rot_rate = _2btol(pages->rigid_geometry.rpm);
1887 if (dp->rot_rate == 0)
1888 dp->rot_rate = 3600;
1889
1890 #if 0
1891 printf("page 5 ok\n");
1892 #endif
1893 goto blksize;
1894 }
1895
1896 page0:
1897 printf("%s: fabricating a geometry\n", sd->sc_dev.dv_xname);
1898 /* Try calling driver's method for figuring out geometry. */
1899 if (!sd->sc_periph->periph_channel->chan_adapter->adapt_getgeom ||
1900 !(*sd->sc_periph->periph_channel->chan_adapter->adapt_getgeom)
1901 (sd->sc_periph, dp, dp->disksize)) {
1902 /*
1903 * Use adaptec standard fictitious geometry
1904 * this depends on which controller (e.g. 1542C is
1905 * different. but we have to put SOMETHING here..)
1906 */
1907 dp->heads = 64;
1908 dp->sectors = 32;
1909 dp->cyls = dp->disksize / (64 * 32);
1910 }
1911 dp->rot_rate = 3600;
1912
1913 blksize:
1914 return (SDGP_RESULT_OK);
1915 }
1916
1917 int
1918 sd_flush(sd, flags)
1919 struct sd_softc *sd;
1920 int flags;
1921 {
1922 struct scsipi_periph *periph = sd->sc_periph;
1923 struct scsi_synchronize_cache sync_cmd;
1924
1925 /*
1926 * If the device is SCSI-2, issue a SYNCHRONIZE CACHE.
1927 * We issue with address 0 length 0, which should be
1928 * interpreted by the device as "all remaining blocks
1929 * starting at address 0". We ignore ILLEGAL REQUEST
1930 * in the event that the command is not supported by
1931 * the device, and poll for completion so that we know
1932 * that the cache has actually been flushed.
1933 *
1934 * Unless, that is, the device can't handle the SYNCHRONIZE CACHE
1935 * command, as indicated by our quirks flags.
1936 *
1937 * XXX What about older devices?
1938 */
1939 if (periph->periph_version >= 2 &&
1940 (periph->periph_quirks & PQUIRK_NOSYNCCACHE) == 0) {
1941 sd->flags |= SDF_FLUSHING;
1942 memset(&sync_cmd, 0, sizeof(sync_cmd));
1943 sync_cmd.opcode = SCSI_SYNCHRONIZE_CACHE;
1944
1945 return(scsipi_command(periph,
1946 (struct scsipi_generic *)&sync_cmd, sizeof(sync_cmd),
1947 NULL, 0, SDRETRIES, 100000, NULL,
1948 flags|XS_CTL_IGNORE_ILLEGAL_REQUEST));
1949 } else
1950 return(0);
1951 }
1952
1953 int
1954 sd_getcache(sd, bitsp)
1955 struct sd_softc *sd;
1956 int *bitsp;
1957 {
1958 struct scsipi_periph *periph = sd->sc_periph;
1959 struct sd_mode_sense_data scsipi_sense;
1960 int error, bits = 0;
1961 int big;
1962 union scsi_disk_pages *pages;
1963
1964 if (periph->periph_version < 2)
1965 return (EOPNOTSUPP);
1966
1967 memset(&scsipi_sense, 0, sizeof(scsipi_sense));
1968 error = sd_mode_sense(sd, SMS_DBD, &scsipi_sense,
1969 sizeof(scsipi_sense.pages.caching_params), 8, 0, &big);
1970 if (error)
1971 return (error);
1972
1973 if (big)
1974 pages = (void *)(&scsipi_sense.header.big + 1);
1975 else
1976 pages = (void *)(&scsipi_sense.header.small + 1);
1977
1978 if ((pages->caching_params.flags & CACHING_RCD) == 0)
1979 bits |= DKCACHE_READ;
1980 if (pages->caching_params.flags & CACHING_WCE)
1981 bits |= DKCACHE_WRITE;
1982 if (pages->caching_params.pg_code & PGCODE_PS)
1983 bits |= DKCACHE_SAVE;
1984
1985 memset(&scsipi_sense, 0, sizeof(scsipi_sense));
1986 error = sd_mode_sense(sd, SMS_DBD, &scsipi_sense,
1987 sizeof(scsipi_sense.pages.caching_params),
1988 SMS_PAGE_CTRL_CHANGEABLE|8, 0, &big);
1989 if (error == 0) {
1990 if (big)
1991 pages = (void *)(&scsipi_sense.header.big + 1);
1992 else
1993 pages = (void *)(&scsipi_sense.header.small + 1);
1994
1995 if (pages->caching_params.flags & CACHING_RCD)
1996 bits |= DKCACHE_RCHANGE;
1997 if (pages->caching_params.flags & CACHING_WCE)
1998 bits |= DKCACHE_WCHANGE;
1999 }
2000
2001 *bitsp = bits;
2002
2003 return (0);
2004 }
2005
2006 int
2007 sd_setcache(sd, bits)
2008 struct sd_softc *sd;
2009 int bits;
2010 {
2011 struct scsipi_periph *periph = sd->sc_periph;
2012 struct sd_mode_sense_data scsipi_sense;
2013 int error;
2014 uint8_t oflags, byte2 = 0;
2015 int big;
2016 union scsi_disk_pages *pages;
2017
2018 if (periph->periph_version < 2)
2019 return (EOPNOTSUPP);
2020
2021 memset(&scsipi_sense, 0, sizeof(scsipi_sense));
2022 error = sd_mode_sense(sd, SMS_DBD, &scsipi_sense,
2023 sizeof(scsipi_sense.pages.caching_params), 8, 0, &big);
2024 if (error)
2025 return (error);
2026
2027 if (big)
2028 pages = (void *)(&scsipi_sense.header.big + 1);
2029 else
2030 pages = (void *)(&scsipi_sense.header.small + 1);
2031
2032 oflags = pages->caching_params.flags;
2033
2034 if (bits & DKCACHE_READ)
2035 pages->caching_params.flags &= ~CACHING_RCD;
2036 else
2037 pages->caching_params.flags |= CACHING_RCD;
2038
2039 if (bits & DKCACHE_WRITE)
2040 pages->caching_params.flags |= CACHING_WCE;
2041 else
2042 pages->caching_params.flags &= ~CACHING_WCE;
2043
2044 if (oflags == pages->caching_params.flags)
2045 return (0);
2046
2047 pages->caching_params.pg_code &= PGCODE_MASK;
2048
2049 if (bits & DKCACHE_SAVE)
2050 byte2 |= SMS_SP;
2051
2052 return (sd_mode_select(sd, byte2|SMS_PF, &scsipi_sense,
2053 sizeof(struct scsipi_mode_page_header) +
2054 pages->caching_params.pg_length, 0, big));
2055 }
2056