sd.c revision 1.269.6.7 1 /* $NetBSD: sd.c,v 1.269.6.7 2009/01/17 13:29:08 mjf Exp $ */
2
3 /*-
4 * Copyright (c) 1998, 2003, 2004 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Originally written by Julian Elischer (julian (at) dialix.oz.au)
34 * for TRW Financial Systems for use under the MACH(2.5) operating system.
35 *
36 * TRW Financial Systems, in accordance with their agreement with Carnegie
37 * Mellon University, makes this software available to CMU to distribute
38 * or use in any manner that they see fit as long as this message is kept with
39 * the software. For this reason TFS also grants any other persons or
40 * organisations permission to use or modify this software.
41 *
42 * TFS supplies this software to be publicly redistributed
43 * on the understanding that TFS is not responsible for the correct
44 * functioning of this software in any circumstances.
45 *
46 * Ported to run under 386BSD by Julian Elischer (julian (at) dialix.oz.au) Sept 1992
47 */
48
49 #include <sys/cdefs.h>
50 __KERNEL_RCSID(0, "$NetBSD: sd.c,v 1.269.6.7 2009/01/17 13:29:08 mjf Exp $");
51
52 #include "opt_scsi.h"
53 #include "rnd.h"
54
55 #include <sys/param.h>
56 #include <sys/systm.h>
57 #include <sys/kernel.h>
58 #include <sys/file.h>
59 #include <sys/stat.h>
60 #include <sys/ioctl.h>
61 #include <sys/scsiio.h>
62 #include <sys/buf.h>
63 #include <sys/bufq.h>
64 #include <sys/uio.h>
65 #include <sys/malloc.h>
66 #include <sys/errno.h>
67 #include <sys/device.h>
68 #include <sys/disklabel.h>
69 #include <sys/disk.h>
70 #include <sys/proc.h>
71 #include <sys/conf.h>
72 #include <sys/vnode.h>
73 #if NRND > 0
74 #include <sys/rnd.h>
75 #endif
76
77 #include <dev/scsipi/scsi_spc.h>
78 #include <dev/scsipi/scsipi_all.h>
79 #include <dev/scsipi/scsi_all.h>
80 #include <dev/scsipi/scsipi_disk.h>
81 #include <dev/scsipi/scsi_disk.h>
82 #include <dev/scsipi/scsiconf.h>
83 #include <dev/scsipi/scsipi_base.h>
84 #include <dev/scsipi/sdvar.h>
85
86 #include <prop/proplib.h>
87
88 #define SDUNIT(dev) DISKUNIT(dev)
89 #define SDPART(dev) DISKPART(dev)
90 #define SDMINOR(unit, part) DISKMINOR(unit, part)
91 #define MAKESDDEV(maj, unit, part) MAKEDISKDEV(maj, unit, part)
92
93 #define SDLABELDEV(dev) (MAKESDDEV(major(dev), SDUNIT(dev), RAW_PART))
94
95 #define SD_DEFAULT_BLKSIZE 512
96
97 static void sdminphys(struct buf *);
98 static void sdgetdefaultlabel(struct sd_softc *, struct disklabel *);
99 static int sdgetdisklabel(struct sd_softc *);
100 static void sdstart(struct scsipi_periph *);
101 static void sdrestart(void *);
102 static void sddone(struct scsipi_xfer *, int);
103 static bool sd_suspend(device_t PMF_FN_PROTO);
104 static void sd_shutdown(void *);
105 static int sd_interpret_sense(struct scsipi_xfer *);
106
107 static int sd_mode_sense(struct sd_softc *, u_int8_t, void *, size_t, int,
108 int, int *);
109 static int sd_mode_select(struct sd_softc *, u_int8_t, void *, size_t, int,
110 int);
111 static int sd_validate_blksize(struct scsipi_periph *, int);
112 static u_int64_t sd_read_capacity(struct scsipi_periph *, int *, int flags);
113 static int sd_get_simplifiedparms(struct sd_softc *, struct disk_parms *,
114 int);
115 static int sd_get_capacity(struct sd_softc *, struct disk_parms *, int);
116 static int sd_get_parms(struct sd_softc *, struct disk_parms *, int);
117 static int sd_get_parms_page4(struct sd_softc *, struct disk_parms *,
118 int);
119 static int sd_get_parms_page5(struct sd_softc *, struct disk_parms *,
120 int);
121
122 static int sd_flush(struct sd_softc *, int);
123 static int sd_getcache(struct sd_softc *, int *);
124 static int sd_setcache(struct sd_softc *, int);
125
126 static int sdmatch(struct device *, struct cfdata *, void *);
127 static void sdattach(struct device *, struct device *, void *);
128 static int sdactivate(struct device *, enum devact);
129 static int sddetach(struct device *, int);
130 static void sd_set_properties(struct sd_softc *);
131
132 CFATTACH_DECL_NEW(sd, sizeof(struct sd_softc), sdmatch, sdattach, sddetach,
133 sdactivate);
134
135 extern struct cfdriver sd_cd;
136
137 static const struct scsipi_inquiry_pattern sd_patterns[] = {
138 {T_DIRECT, T_FIXED,
139 "", "", ""},
140 {T_DIRECT, T_REMOV,
141 "", "", ""},
142 {T_OPTICAL, T_FIXED,
143 "", "", ""},
144 {T_OPTICAL, T_REMOV,
145 "", "", ""},
146 {T_SIMPLE_DIRECT, T_FIXED,
147 "", "", ""},
148 {T_SIMPLE_DIRECT, T_REMOV,
149 "", "", ""},
150 };
151
152 static dev_type_open(sdopen);
153 static dev_type_close(sdclose);
154 static dev_type_read(sdread);
155 static dev_type_write(sdwrite);
156 static dev_type_ioctl(sdioctl);
157 static dev_type_strategy(sdstrategy);
158 static dev_type_dump(sddump);
159 static dev_type_size(sdsize);
160
161 const struct bdevsw sd_bdevsw = {
162 sdopen, sdclose, sdstrategy, sdioctl, sddump, sdsize, D_DISK
163 };
164
165 const struct cdevsw sd_cdevsw = {
166 sdopen, sdclose, sdread, sdwrite, sdioctl,
167 nostop, notty, nopoll, nommap, nokqfilter, D_DISK
168 };
169
170 static struct dkdriver sddkdriver = { sdstrategy, sdminphys };
171
172 static const struct scsipi_periphsw sd_switch = {
173 sd_interpret_sense, /* check our error handler first */
174 sdstart, /* have a queue, served by this */
175 NULL, /* have no async handler */
176 sddone, /* deal with stats at interrupt time */
177 };
178
179 struct sd_mode_sense_data {
180 /*
181 * XXX
182 * We are not going to parse this as-is -- it just has to be large
183 * enough.
184 */
185 union {
186 struct scsi_mode_parameter_header_6 small;
187 struct scsi_mode_parameter_header_10 big;
188 } header;
189 struct scsi_general_block_descriptor blk_desc;
190 union scsi_disk_pages pages;
191 };
192
193 /*
194 * The routine called by the low level scsi routine when it discovers
195 * A device suitable for this driver
196 */
197 static int
198 sdmatch(struct device *parent, struct cfdata *match,
199 void *aux)
200 {
201 struct scsipibus_attach_args *sa = aux;
202 int priority;
203
204 (void)scsipi_inqmatch(&sa->sa_inqbuf,
205 sd_patterns, sizeof(sd_patterns) / sizeof(sd_patterns[0]),
206 sizeof(sd_patterns[0]), &priority);
207
208 return (priority);
209 }
210
211 /*
212 * Attach routine common to atapi & scsi.
213 */
214 static void
215 sdattach(struct device *parent, struct device *self, void *aux)
216 {
217 struct sd_softc *sd = device_private(self);
218 struct scsipibus_attach_args *sa = aux;
219 struct scsipi_periph *periph = sa->sa_periph;
220 int error, result;
221 struct disk_parms *dp = &sd->params;
222 char pbuf[9];
223 int i;
224 uint16_t np;
225 int cmajor, bmajor;
226
227 SC_DEBUG(periph, SCSIPI_DB2, ("sdattach: "));
228
229 sd->sc_dev = self;
230 sd->type = (sa->sa_inqbuf.type & SID_TYPE);
231 strncpy(sd->name, sa->sa_inqbuf.product, sizeof(sd->name));
232 if (sd->type == T_SIMPLE_DIRECT)
233 periph->periph_quirks |= PQUIRK_ONLYBIG | PQUIRK_NOBIGMODESENSE;
234
235 if (scsipi_periph_bustype(sa->sa_periph) == SCSIPI_BUSTYPE_SCSI &&
236 periph->periph_version == 0)
237 sd->flags |= SDF_ANCIENT;
238
239 bufq_alloc(&sd->buf_queue, BUFQ_DISK_DEFAULT_STRAT, BUFQ_SORT_RAWBLOCK);
240
241 callout_init(&sd->sc_callout, 0);
242
243 /*
244 * Store information needed to contact our base driver
245 */
246 sd->sc_periph = periph;
247
248 periph->periph_dev = sd->sc_dev;
249 periph->periph_switch = &sd_switch;
250
251 /*
252 * Increase our openings to the maximum-per-periph
253 * supported by the adapter. This will either be
254 * clamped down or grown by the adapter if necessary.
255 */
256 periph->periph_openings =
257 SCSIPI_CHAN_MAX_PERIPH(periph->periph_channel);
258 periph->periph_flags |= PERIPH_GROW_OPENINGS;
259
260 /*
261 * Initialize and attach the disk structure.
262 */
263 disk_init(&sd->sc_dk, device_xname(sd->sc_dev), &sddkdriver);
264 disk_attach(&sd->sc_dk);
265
266 /*
267 * Use the subdriver to request information regarding the drive.
268 */
269 aprint_naive("\n");
270 aprint_normal("\n");
271
272 error = scsipi_test_unit_ready(periph,
273 XS_CTL_DISCOVERY | XS_CTL_IGNORE_ILLEGAL_REQUEST |
274 XS_CTL_IGNORE_MEDIA_CHANGE | XS_CTL_SILENT_NODEV);
275
276 if (error)
277 result = SDGP_RESULT_OFFLINE;
278 else
279 result = sd_get_parms(sd, &sd->params, XS_CTL_DISCOVERY);
280 aprint_normal_dev(sd->sc_dev, "");
281 switch (result) {
282 case SDGP_RESULT_OK:
283 format_bytes(pbuf, sizeof(pbuf),
284 (u_int64_t)dp->disksize * dp->blksize);
285 aprint_normal(
286 "%s, %ld cyl, %ld head, %ld sec, %ld bytes/sect x %llu sectors",
287 pbuf, dp->cyls, dp->heads, dp->sectors, dp->blksize,
288 (unsigned long long)dp->disksize);
289 break;
290
291 case SDGP_RESULT_OFFLINE:
292 aprint_normal("drive offline");
293 break;
294
295 case SDGP_RESULT_UNFORMATTED:
296 aprint_normal("unformatted media");
297 break;
298
299 #ifdef DIAGNOSTIC
300 default:
301 panic("sdattach: unknown result from get_parms");
302 break;
303 #endif
304 }
305 aprint_normal("\n");
306
307 /*
308 * Establish a shutdown hook so that we can ensure that
309 * our data has actually made it onto the platter at
310 * shutdown time. Note that this relies on the fact
311 * that the shutdown hook code puts us at the head of
312 * the list (thus guaranteeing that our hook runs before
313 * our ancestors').
314 */
315 if ((sd->sc_sdhook =
316 shutdownhook_establish(sd_shutdown, sd)) == NULL)
317 aprint_error_dev(sd->sc_dev,
318 "WARNING: unable to establish shutdown hook\n");
319
320 if (!pmf_device_register(self, sd_suspend, NULL))
321 aprint_error_dev(self, "couldn't establish power handler\n");
322
323 #if NRND > 0
324 /*
325 * attach the device into the random source list
326 */
327 rnd_attach_source(&sd->rnd_source, device_xname(sd->sc_dev),
328 RND_TYPE_DISK, 0);
329 #endif
330
331 /* Discover wedges on this disk. */
332 dkwedge_discover(&sd->sc_dk);
333
334 sd_set_properties(sd);
335
336 np = sd->sc_dk.dk_label->d_npartitions;
337
338 /* locate the major numbers */
339 bmajor = bdevsw_lookup_major(&sd_bdevsw);
340 cmajor = cdevsw_lookup_major(&sd_cdevsw);
341 for (i = 0; i < 16; i++) {
342 device_register_name(
343 MAKEDISKDEV(bmajor, device_unit(&sd->sc_dev), i),
344 &sd->sc_dev, false, DEV_DISK,
345 "sd%d%c", device_unit(&sd->sc_dev), 'a'+i);
346
347 device_register_name(
348 MAKEDISKDEV(cmajor, device_unit(&sd->sc_dev), i),
349 &sd->sc_dev, true, DEV_DISK,
350 "rsd%d%c", device_unit(&sd->sc_dev), 'a' + i);
351 }
352 }
353
354 static int
355 sdactivate(struct device *self, enum devact act)
356 {
357 int rv = 0;
358
359 switch (act) {
360 case DVACT_ACTIVATE:
361 rv = EOPNOTSUPP;
362 break;
363
364 case DVACT_DEACTIVATE:
365 /*
366 * Nothing to do; we key off the device's DVF_ACTIVE.
367 */
368 break;
369 }
370 return (rv);
371 }
372
373 static int
374 sddetach(struct device *self, int flags)
375 {
376 struct sd_softc *sd = device_private(self);
377 int s, bmaj, cmaj, i, mn;
378
379 device_deregister_all(self);
380
381 /* locate the major number */
382 bmaj = bdevsw_lookup_major(&sd_bdevsw);
383 cmaj = cdevsw_lookup_major(&sd_cdevsw);
384
385 /*
386 * Nuke the vnodes for any open instances and deregister
387 * any device node names.
388 */
389 for (i = 0; i < MAXPARTITIONS; i++) {
390 mn = SDMINOR(device_unit(self), i);
391 vdevgone(bmaj, mn, mn, VBLK);
392 vdevgone(cmaj, mn, mn, VCHR);
393 }
394
395 /* kill any pending restart */
396 callout_stop(&sd->sc_callout);
397
398 /* Delete all of our wedges. */
399 dkwedge_delall(&sd->sc_dk);
400
401 s = splbio();
402
403 /* Kill off any queued buffers. */
404 bufq_drain(sd->buf_queue);
405
406 bufq_free(sd->buf_queue);
407
408 /* Kill off any pending commands. */
409 scsipi_kill_pending(sd->sc_periph);
410
411 splx(s);
412
413 /* Detach from the disk list. */
414 disk_detach(&sd->sc_dk);
415 disk_destroy(&sd->sc_dk);
416
417 pmf_device_deregister(self);
418 shutdownhook_disestablish(sd->sc_sdhook);
419
420 #if NRND > 0
421 /* Unhook the entropy source. */
422 rnd_detach_source(&sd->rnd_source);
423 #endif
424
425 return (0);
426 }
427
428 /*
429 * open the device. Make sure the partition info is a up-to-date as can be.
430 */
431 static int
432 sdopen(dev_t dev, int flag, int fmt, struct lwp *l)
433 {
434 struct sd_softc *sd;
435 struct scsipi_periph *periph;
436 struct scsipi_adapter *adapt;
437 int unit, part;
438 int error;
439
440 unit = SDUNIT(dev);
441 sd = device_lookup_private(&sd_cd, unit);
442 if (sd == NULL)
443 return (ENXIO);
444
445 if (!device_is_active(sd->sc_dev))
446 return (ENODEV);
447
448 part = SDPART(dev);
449
450 mutex_enter(&sd->sc_dk.dk_openlock);
451
452 /*
453 * If there are wedges, and this is not RAW_PART, then we
454 * need to fail.
455 */
456 if (sd->sc_dk.dk_nwedges != 0 && part != RAW_PART) {
457 error = EBUSY;
458 goto bad1;
459 }
460
461 periph = sd->sc_periph;
462 adapt = periph->periph_channel->chan_adapter;
463
464 SC_DEBUG(periph, SCSIPI_DB1,
465 ("sdopen: dev=0x%"PRIx64" (unit %d (of %d), partition %d)\n", dev, unit,
466 sd_cd.cd_ndevs, part));
467
468 /*
469 * If this is the first open of this device, add a reference
470 * to the adapter.
471 */
472 if (sd->sc_dk.dk_openmask == 0 &&
473 (error = scsipi_adapter_addref(adapt)) != 0)
474 goto bad1;
475
476 if ((periph->periph_flags & PERIPH_OPEN) != 0) {
477 /*
478 * If any partition is open, but the disk has been invalidated,
479 * disallow further opens of non-raw partition
480 */
481 if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0 &&
482 (part != RAW_PART || fmt != S_IFCHR)) {
483 error = EIO;
484 goto bad2;
485 }
486 } else {
487 int silent;
488
489 if (part == RAW_PART && fmt == S_IFCHR)
490 silent = XS_CTL_SILENT;
491 else
492 silent = 0;
493
494 /* Check that it is still responding and ok. */
495 error = scsipi_test_unit_ready(periph,
496 XS_CTL_IGNORE_ILLEGAL_REQUEST | XS_CTL_IGNORE_MEDIA_CHANGE |
497 silent);
498
499 /*
500 * Start the pack spinning if necessary. Always allow the
501 * raw parition to be opened, for raw IOCTLs. Data transfers
502 * will check for SDEV_MEDIA_LOADED.
503 */
504 if (error == EIO) {
505 int error2;
506
507 error2 = scsipi_start(periph, SSS_START, silent);
508 switch (error2) {
509 case 0:
510 error = 0;
511 break;
512 case EIO:
513 case EINVAL:
514 break;
515 default:
516 error = error2;
517 break;
518 }
519 }
520 if (error) {
521 if (silent)
522 goto out;
523 goto bad2;
524 }
525
526 periph->periph_flags |= PERIPH_OPEN;
527
528 if (periph->periph_flags & PERIPH_REMOVABLE) {
529 /* Lock the pack in. */
530 error = scsipi_prevent(periph, SPAMR_PREVENT_DT,
531 XS_CTL_IGNORE_ILLEGAL_REQUEST |
532 XS_CTL_IGNORE_MEDIA_CHANGE |
533 XS_CTL_SILENT);
534 if (error)
535 goto bad3;
536 }
537
538 if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0) {
539 int param_error;
540 periph->periph_flags |= PERIPH_MEDIA_LOADED;
541
542 /*
543 * Load the physical device parameters.
544 *
545 * Note that if media is present but unformatted,
546 * we allow the open (so that it can be formatted!).
547 * The drive should refuse real I/O, if the media is
548 * unformatted.
549 */
550 if ((param_error = sd_get_parms(sd, &sd->params, 0))
551 == SDGP_RESULT_OFFLINE) {
552 error = ENXIO;
553 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
554 goto bad3;
555 }
556 SC_DEBUG(periph, SCSIPI_DB3, ("Params loaded "));
557
558 /* Load the partition info if not already loaded. */
559 if (param_error == 0) {
560 if ((sdgetdisklabel(sd) != 0) && (part != RAW_PART)) {
561 error = EIO;
562 goto bad3;
563 }
564 SC_DEBUG(periph, SCSIPI_DB3,
565 ("Disklabel loaded "));
566 }
567 }
568 }
569
570 /* Check that the partition exists. */
571 if (part != RAW_PART &&
572 (part >= sd->sc_dk.dk_label->d_npartitions ||
573 sd->sc_dk.dk_label->d_partitions[part].p_fstype == FS_UNUSED)) {
574 error = ENXIO;
575 goto bad3;
576 }
577
578 out: /* Insure only one open at a time. */
579 switch (fmt) {
580 case S_IFCHR:
581 sd->sc_dk.dk_copenmask |= (1 << part);
582 break;
583 case S_IFBLK:
584 sd->sc_dk.dk_bopenmask |= (1 << part);
585 break;
586 }
587 sd->sc_dk.dk_openmask =
588 sd->sc_dk.dk_copenmask | sd->sc_dk.dk_bopenmask;
589
590 SC_DEBUG(periph, SCSIPI_DB3, ("open complete\n"));
591 mutex_exit(&sd->sc_dk.dk_openlock);
592 return (0);
593
594 bad3:
595 if (sd->sc_dk.dk_openmask == 0) {
596 if (periph->periph_flags & PERIPH_REMOVABLE)
597 scsipi_prevent(periph, SPAMR_ALLOW,
598 XS_CTL_IGNORE_ILLEGAL_REQUEST |
599 XS_CTL_IGNORE_MEDIA_CHANGE |
600 XS_CTL_SILENT);
601 periph->periph_flags &= ~PERIPH_OPEN;
602 }
603
604 bad2:
605 if (sd->sc_dk.dk_openmask == 0)
606 scsipi_adapter_delref(adapt);
607
608 bad1:
609 mutex_exit(&sd->sc_dk.dk_openlock);
610 return (error);
611 }
612
613 /*
614 * close the device.. only called if we are the LAST occurence of an open
615 * device. Convenient now but usually a pain.
616 */
617 static int
618 sdclose(dev_t dev, int flag, int fmt, struct lwp *l)
619 {
620 struct sd_softc *sd = device_lookup_private(&sd_cd, SDUNIT(dev));
621 struct scsipi_periph *periph = sd->sc_periph;
622 struct scsipi_adapter *adapt = periph->periph_channel->chan_adapter;
623 int part = SDPART(dev);
624
625 mutex_enter(&sd->sc_dk.dk_openlock);
626 switch (fmt) {
627 case S_IFCHR:
628 sd->sc_dk.dk_copenmask &= ~(1 << part);
629 break;
630 case S_IFBLK:
631 sd->sc_dk.dk_bopenmask &= ~(1 << part);
632 break;
633 }
634 sd->sc_dk.dk_openmask =
635 sd->sc_dk.dk_copenmask | sd->sc_dk.dk_bopenmask;
636
637 if (sd->sc_dk.dk_openmask == 0) {
638 /*
639 * If the disk cache needs flushing, and the disk supports
640 * it, do it now.
641 */
642 if ((sd->flags & SDF_DIRTY) != 0) {
643 if (sd_flush(sd, 0)) {
644 aprint_error_dev(sd->sc_dev,
645 "cache synchronization failed\n");
646 sd->flags &= ~SDF_FLUSHING;
647 } else
648 sd->flags &= ~(SDF_FLUSHING|SDF_DIRTY);
649 }
650
651 scsipi_wait_drain(periph);
652
653 if (periph->periph_flags & PERIPH_REMOVABLE)
654 scsipi_prevent(periph, SPAMR_ALLOW,
655 XS_CTL_IGNORE_ILLEGAL_REQUEST |
656 XS_CTL_IGNORE_NOT_READY |
657 XS_CTL_SILENT);
658 periph->periph_flags &= ~PERIPH_OPEN;
659
660 scsipi_wait_drain(periph);
661
662 scsipi_adapter_delref(adapt);
663 }
664
665 mutex_exit(&sd->sc_dk.dk_openlock);
666 return (0);
667 }
668
669 /*
670 * Actually translate the requested transfer into one the physical driver
671 * can understand. The transfer is described by a buf and will include
672 * only one physical transfer.
673 */
674 static void
675 sdstrategy(struct buf *bp)
676 {
677 struct sd_softc *sd = device_lookup_private(&sd_cd, SDUNIT(bp->b_dev));
678 struct scsipi_periph *periph = sd->sc_periph;
679 struct disklabel *lp;
680 daddr_t blkno;
681 int s;
682 bool sector_aligned;
683
684 SC_DEBUG(sd->sc_periph, SCSIPI_DB2, ("sdstrategy "));
685 SC_DEBUG(sd->sc_periph, SCSIPI_DB1,
686 ("%d bytes @ blk %" PRId64 "\n", bp->b_bcount, bp->b_blkno));
687 /*
688 * If the device has been made invalid, error out
689 */
690 if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0 ||
691 !device_is_active(sd->sc_dev)) {
692 if (periph->periph_flags & PERIPH_OPEN)
693 bp->b_error = EIO;
694 else
695 bp->b_error = ENODEV;
696 goto done;
697 }
698
699 lp = sd->sc_dk.dk_label;
700
701 /*
702 * The transfer must be a whole number of blocks, offset must not be
703 * negative.
704 */
705 if (lp->d_secsize == DEV_BSIZE) {
706 sector_aligned = (bp->b_bcount & (DEV_BSIZE - 1)) == 0;
707 } else {
708 sector_aligned = (bp->b_bcount % lp->d_secsize) == 0;
709 }
710 if (!sector_aligned || bp->b_blkno < 0) {
711 bp->b_error = EINVAL;
712 goto done;
713 }
714 /*
715 * If it's a null transfer, return immediatly
716 */
717 if (bp->b_bcount == 0)
718 goto done;
719
720 /*
721 * Do bounds checking, adjust transfer. if error, process.
722 * If end of partition, just return.
723 */
724 if (SDPART(bp->b_dev) == RAW_PART) {
725 if (bounds_check_with_mediasize(bp, DEV_BSIZE,
726 sd->params.disksize512) <= 0)
727 goto done;
728 } else {
729 if (bounds_check_with_label(&sd->sc_dk, bp,
730 (sd->flags & (SDF_WLABEL|SDF_LABELLING)) != 0) <= 0)
731 goto done;
732 }
733
734 /*
735 * Now convert the block number to absolute and put it in
736 * terms of the device's logical block size.
737 */
738 if (lp->d_secsize == DEV_BSIZE)
739 blkno = bp->b_blkno;
740 else if (lp->d_secsize > DEV_BSIZE)
741 blkno = bp->b_blkno / (lp->d_secsize / DEV_BSIZE);
742 else
743 blkno = bp->b_blkno * (DEV_BSIZE / lp->d_secsize);
744
745 if (SDPART(bp->b_dev) != RAW_PART)
746 blkno += lp->d_partitions[SDPART(bp->b_dev)].p_offset;
747
748 bp->b_rawblkno = blkno;
749
750 s = splbio();
751
752 /*
753 * Place it in the queue of disk activities for this disk.
754 *
755 * XXX Only do disksort() if the current operating mode does not
756 * XXX include tagged queueing.
757 */
758 bufq_put(sd->buf_queue, bp);
759
760 /*
761 * Tell the device to get going on the transfer if it's
762 * not doing anything, otherwise just wait for completion
763 */
764 sdstart(sd->sc_periph);
765
766 splx(s);
767 return;
768
769 done:
770 /*
771 * Correctly set the buf to indicate a completed xfer
772 */
773 bp->b_resid = bp->b_bcount;
774 biodone(bp);
775 }
776
777 /*
778 * sdstart looks to see if there is a buf waiting for the device
779 * and that the device is not already busy. If both are true,
780 * It dequeues the buf and creates a scsi command to perform the
781 * transfer in the buf. The transfer request will call scsipi_done
782 * on completion, which will in turn call this routine again
783 * so that the next queued transfer is performed.
784 * The bufs are queued by the strategy routine (sdstrategy)
785 *
786 * This routine is also called after other non-queued requests
787 * have been made of the scsi driver, to ensure that the queue
788 * continues to be drained.
789 *
790 * must be called at the correct (highish) spl level
791 * sdstart() is called at splbio from sdstrategy, sdrestart and scsipi_done
792 */
793 static void
794 sdstart(struct scsipi_periph *periph)
795 {
796 struct sd_softc *sd = device_private(periph->periph_dev);
797 struct disklabel *lp = sd->sc_dk.dk_label;
798 struct buf *bp = 0;
799 struct scsipi_rw_16 cmd16;
800 struct scsipi_rw_10 cmd_big;
801 struct scsi_rw_6 cmd_small;
802 struct scsipi_generic *cmdp;
803 struct scsipi_xfer *xs;
804 int nblks, cmdlen, error, flags;
805
806 SC_DEBUG(periph, SCSIPI_DB2, ("sdstart "));
807 /*
808 * Check if the device has room for another command
809 */
810 while (periph->periph_active < periph->periph_openings) {
811 /*
812 * there is excess capacity, but a special waits
813 * It'll need the adapter as soon as we clear out of the
814 * way and let it run (user level wait).
815 */
816 if (periph->periph_flags & PERIPH_WAITING) {
817 periph->periph_flags &= ~PERIPH_WAITING;
818 wakeup((void *)periph);
819 return;
820 }
821
822 /*
823 * If the device has become invalid, abort all the
824 * reads and writes until all files have been closed and
825 * re-opened
826 */
827 if (__predict_false(
828 (periph->periph_flags & PERIPH_MEDIA_LOADED) == 0)) {
829 if ((bp = bufq_get(sd->buf_queue)) != NULL) {
830 bp->b_error = EIO;
831 bp->b_resid = bp->b_bcount;
832 biodone(bp);
833 continue;
834 } else {
835 return;
836 }
837 }
838
839 /*
840 * See if there is a buf with work for us to do..
841 */
842 if ((bp = bufq_peek(sd->buf_queue)) == NULL)
843 return;
844
845 /*
846 * We have a buf, now we should make a command.
847 */
848
849 if (lp->d_secsize == DEV_BSIZE)
850 nblks = bp->b_bcount >> DEV_BSHIFT;
851 else
852 nblks = howmany(bp->b_bcount, lp->d_secsize);
853
854 /*
855 * Fill out the scsi command. Use the smallest CDB possible
856 * (6-byte, 10-byte, or 16-byte).
857 */
858 if (((bp->b_rawblkno & 0x1fffff) == bp->b_rawblkno) &&
859 ((nblks & 0xff) == nblks) &&
860 !(periph->periph_quirks & PQUIRK_ONLYBIG)) {
861 /* 6-byte CDB */
862 memset(&cmd_small, 0, sizeof(cmd_small));
863 cmd_small.opcode = (bp->b_flags & B_READ) ?
864 SCSI_READ_6_COMMAND : SCSI_WRITE_6_COMMAND;
865 _lto3b(bp->b_rawblkno, cmd_small.addr);
866 cmd_small.length = nblks & 0xff;
867 cmdlen = sizeof(cmd_small);
868 cmdp = (struct scsipi_generic *)&cmd_small;
869 } else if ((bp->b_rawblkno & 0xffffffff) == bp->b_rawblkno) {
870 /* 10-byte CDB */
871 memset(&cmd_big, 0, sizeof(cmd_big));
872 cmd_big.opcode = (bp->b_flags & B_READ) ?
873 READ_10 : WRITE_10;
874 _lto4b(bp->b_rawblkno, cmd_big.addr);
875 _lto2b(nblks, cmd_big.length);
876 cmdlen = sizeof(cmd_big);
877 cmdp = (struct scsipi_generic *)&cmd_big;
878 } else {
879 /* 16-byte CDB */
880 memset(&cmd16, 0, sizeof(cmd16));
881 cmd16.opcode = (bp->b_flags & B_READ) ?
882 READ_16 : WRITE_16;
883 _lto8b(bp->b_rawblkno, cmd16.addr);
884 _lto4b(nblks, cmd16.length);
885 cmdlen = sizeof(cmd16);
886 cmdp = (struct scsipi_generic *)&cmd16;
887 }
888
889 /* Instrumentation. */
890 disk_busy(&sd->sc_dk);
891
892 /*
893 * Mark the disk dirty so that the cache will be
894 * flushed on close.
895 */
896 if ((bp->b_flags & B_READ) == 0)
897 sd->flags |= SDF_DIRTY;
898
899 /*
900 * Figure out what flags to use.
901 */
902 flags = XS_CTL_NOSLEEP|XS_CTL_ASYNC|XS_CTL_SIMPLE_TAG;
903 if (bp->b_flags & B_READ)
904 flags |= XS_CTL_DATA_IN;
905 else
906 flags |= XS_CTL_DATA_OUT;
907
908 /*
909 * Call the routine that chats with the adapter.
910 * Note: we cannot sleep as we may be an interrupt
911 */
912 xs = scsipi_make_xs(periph, cmdp, cmdlen,
913 (u_char *)bp->b_data, bp->b_bcount,
914 SDRETRIES, SD_IO_TIMEOUT, bp, flags);
915 if (__predict_false(xs == NULL)) {
916 /*
917 * out of memory. Keep this buffer in the queue, and
918 * retry later.
919 */
920 callout_reset(&sd->sc_callout, hz / 2, sdrestart,
921 periph);
922 return;
923 }
924 /*
925 * need to dequeue the buffer before queuing the command,
926 * because cdstart may be called recursively from the
927 * HBA driver
928 */
929 #ifdef DIAGNOSTIC
930 if (bufq_get(sd->buf_queue) != bp)
931 panic("sdstart(): dequeued wrong buf");
932 #else
933 bufq_get(sd->buf_queue);
934 #endif
935 error = scsipi_execute_xs(xs);
936 /* with a scsipi_xfer preallocated, scsipi_command can't fail */
937 KASSERT(error == 0);
938 }
939 }
940
941 static void
942 sdrestart(void *v)
943 {
944 int s = splbio();
945 sdstart((struct scsipi_periph *)v);
946 splx(s);
947 }
948
949 static void
950 sddone(struct scsipi_xfer *xs, int error)
951 {
952 struct sd_softc *sd = device_private(xs->xs_periph->periph_dev);
953 struct buf *bp = xs->bp;
954
955 if (sd->flags & SDF_FLUSHING) {
956 /* Flush completed, no longer dirty. */
957 sd->flags &= ~(SDF_FLUSHING|SDF_DIRTY);
958 }
959
960 if (bp) {
961 bp->b_error = error;
962 bp->b_resid = xs->resid;
963 if (error) {
964 /* on a read/write error bp->b_resid is zero, so fix */
965 bp->b_resid = bp->b_bcount;
966 }
967
968 disk_unbusy(&sd->sc_dk, bp->b_bcount - bp->b_resid,
969 (bp->b_flags & B_READ));
970 #if NRND > 0
971 rnd_add_uint32(&sd->rnd_source, bp->b_rawblkno);
972 #endif
973
974 biodone(bp);
975 }
976 }
977
978 static void
979 sdminphys(struct buf *bp)
980 {
981 struct sd_softc *sd = device_lookup_private(&sd_cd, SDUNIT(bp->b_dev));
982 long xmax;
983
984 /*
985 * If the device is ancient, we want to make sure that
986 * the transfer fits into a 6-byte cdb.
987 *
988 * XXX Note that the SCSI-I spec says that 256-block transfers
989 * are allowed in a 6-byte read/write, and are specified
990 * by settng the "length" to 0. However, we're conservative
991 * here, allowing only 255-block transfers in case an
992 * ancient device gets confused by length == 0. A length of 0
993 * in a 10-byte read/write actually means 0 blocks.
994 */
995 if ((sd->flags & SDF_ANCIENT) &&
996 ((sd->sc_periph->periph_flags &
997 (PERIPH_REMOVABLE | PERIPH_MEDIA_LOADED)) != PERIPH_REMOVABLE)) {
998 xmax = sd->sc_dk.dk_label->d_secsize * 0xff;
999
1000 if (bp->b_bcount > xmax)
1001 bp->b_bcount = xmax;
1002 }
1003
1004 scsipi_adapter_minphys(sd->sc_periph->periph_channel, bp);
1005 }
1006
1007 static int
1008 sdread(dev_t dev, struct uio *uio, int ioflag)
1009 {
1010
1011 return (physio(sdstrategy, NULL, dev, B_READ, sdminphys, uio));
1012 }
1013
1014 static int
1015 sdwrite(dev_t dev, struct uio *uio, int ioflag)
1016 {
1017
1018 return (physio(sdstrategy, NULL, dev, B_WRITE, sdminphys, uio));
1019 }
1020
1021 /*
1022 * Perform special action on behalf of the user
1023 * Knows about the internals of this device
1024 */
1025 static int
1026 sdioctl(dev_t dev, u_long cmd, void *addr, int flag, struct lwp *l)
1027 {
1028 struct sd_softc *sd = device_lookup_private(&sd_cd, SDUNIT(dev));
1029 struct scsipi_periph *periph = sd->sc_periph;
1030 int part = SDPART(dev);
1031 int error = 0;
1032 #ifdef __HAVE_OLD_DISKLABEL
1033 struct disklabel *newlabel = NULL;
1034 #endif
1035
1036 SC_DEBUG(sd->sc_periph, SCSIPI_DB2, ("sdioctl 0x%lx ", cmd));
1037
1038 /*
1039 * If the device is not valid, some IOCTLs can still be
1040 * handled on the raw partition. Check this here.
1041 */
1042 if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0) {
1043 switch (cmd) {
1044 case DIOCKLABEL:
1045 case DIOCWLABEL:
1046 case DIOCLOCK:
1047 case DIOCEJECT:
1048 case ODIOCEJECT:
1049 case DIOCGCACHE:
1050 case DIOCSCACHE:
1051 case SCIOCIDENTIFY:
1052 case OSCIOCIDENTIFY:
1053 case SCIOCCOMMAND:
1054 case SCIOCDEBUG:
1055 if (part == RAW_PART)
1056 break;
1057 /* FALLTHROUGH */
1058 default:
1059 if ((periph->periph_flags & PERIPH_OPEN) == 0)
1060 return (ENODEV);
1061 else
1062 return (EIO);
1063 }
1064 }
1065
1066 switch (cmd) {
1067 case DIOCGDINFO:
1068 *(struct disklabel *)addr = *(sd->sc_dk.dk_label);
1069 return (0);
1070
1071 #ifdef __HAVE_OLD_DISKLABEL
1072 case ODIOCGDINFO:
1073 newlabel = malloc(sizeof *newlabel, M_TEMP, M_WAITOK);
1074 if (newlabel == NULL)
1075 return EIO;
1076 memcpy(newlabel, sd->sc_dk.dk_label, sizeof (*newlabel));
1077 if (newlabel->d_npartitions <= OLDMAXPARTITIONS)
1078 memcpy(addr, newlabel, sizeof (struct olddisklabel));
1079 else
1080 error = ENOTTY;
1081 free(newlabel, M_TEMP);
1082 return error;
1083 #endif
1084
1085 case DIOCGPART:
1086 ((struct partinfo *)addr)->disklab = sd->sc_dk.dk_label;
1087 ((struct partinfo *)addr)->part =
1088 &sd->sc_dk.dk_label->d_partitions[part];
1089 return (0);
1090
1091 case DIOCWDINFO:
1092 case DIOCSDINFO:
1093 #ifdef __HAVE_OLD_DISKLABEL
1094 case ODIOCWDINFO:
1095 case ODIOCSDINFO:
1096 #endif
1097 {
1098 struct disklabel *lp;
1099
1100 if ((flag & FWRITE) == 0)
1101 return (EBADF);
1102
1103 #ifdef __HAVE_OLD_DISKLABEL
1104 if (cmd == ODIOCSDINFO || cmd == ODIOCWDINFO) {
1105 newlabel = malloc(sizeof *newlabel, M_TEMP, M_WAITOK);
1106 if (newlabel == NULL)
1107 return EIO;
1108 memset(newlabel, 0, sizeof newlabel);
1109 memcpy(newlabel, addr, sizeof (struct olddisklabel));
1110 lp = newlabel;
1111 } else
1112 #endif
1113 lp = (struct disklabel *)addr;
1114
1115 mutex_enter(&sd->sc_dk.dk_openlock);
1116 sd->flags |= SDF_LABELLING;
1117
1118 error = setdisklabel(sd->sc_dk.dk_label,
1119 lp, /*sd->sc_dk.dk_openmask : */0,
1120 sd->sc_dk.dk_cpulabel);
1121 if (error == 0) {
1122 if (cmd == DIOCWDINFO
1123 #ifdef __HAVE_OLD_DISKLABEL
1124 || cmd == ODIOCWDINFO
1125 #endif
1126 )
1127 error = writedisklabel(SDLABELDEV(dev),
1128 sdstrategy, sd->sc_dk.dk_label,
1129 sd->sc_dk.dk_cpulabel);
1130 }
1131
1132 sd->flags &= ~SDF_LABELLING;
1133 mutex_exit(&sd->sc_dk.dk_openlock);
1134 #ifdef __HAVE_OLD_DISKLABEL
1135 if (newlabel != NULL)
1136 free(newlabel, M_TEMP);
1137 #endif
1138 return (error);
1139 }
1140
1141 case DIOCKLABEL:
1142 if (*(int *)addr)
1143 periph->periph_flags |= PERIPH_KEEP_LABEL;
1144 else
1145 periph->periph_flags &= ~PERIPH_KEEP_LABEL;
1146 return (0);
1147
1148 case DIOCWLABEL:
1149 if ((flag & FWRITE) == 0)
1150 return (EBADF);
1151 if (*(int *)addr)
1152 sd->flags |= SDF_WLABEL;
1153 else
1154 sd->flags &= ~SDF_WLABEL;
1155 return (0);
1156
1157 case DIOCLOCK:
1158 if (periph->periph_flags & PERIPH_REMOVABLE)
1159 return (scsipi_prevent(periph,
1160 (*(int *)addr) ?
1161 SPAMR_PREVENT_DT : SPAMR_ALLOW, 0));
1162 else
1163 return (ENOTTY);
1164
1165 case DIOCEJECT:
1166 if ((periph->periph_flags & PERIPH_REMOVABLE) == 0)
1167 return (ENOTTY);
1168 if (*(int *)addr == 0) {
1169 /*
1170 * Don't force eject: check that we are the only
1171 * partition open. If so, unlock it.
1172 */
1173 if ((sd->sc_dk.dk_openmask & ~(1 << part)) == 0 &&
1174 sd->sc_dk.dk_bopenmask + sd->sc_dk.dk_copenmask ==
1175 sd->sc_dk.dk_openmask) {
1176 error = scsipi_prevent(periph, SPAMR_ALLOW,
1177 XS_CTL_IGNORE_NOT_READY);
1178 if (error)
1179 return (error);
1180 } else {
1181 return (EBUSY);
1182 }
1183 }
1184 /* FALLTHROUGH */
1185 case ODIOCEJECT:
1186 return ((periph->periph_flags & PERIPH_REMOVABLE) == 0 ?
1187 ENOTTY : scsipi_start(periph, SSS_STOP|SSS_LOEJ, 0));
1188
1189 case DIOCGDEFLABEL:
1190 sdgetdefaultlabel(sd, (struct disklabel *)addr);
1191 return (0);
1192
1193 #ifdef __HAVE_OLD_DISKLABEL
1194 case ODIOCGDEFLABEL:
1195 newlabel = malloc(sizeof *newlabel, M_TEMP, M_WAITOK);
1196 if (newlabel == NULL)
1197 return EIO;
1198 sdgetdefaultlabel(sd, newlabel);
1199 if (newlabel->d_npartitions <= OLDMAXPARTITIONS)
1200 memcpy(addr, newlabel, sizeof (struct olddisklabel));
1201 else
1202 error = ENOTTY;
1203 free(newlabel, M_TEMP);
1204 return error;
1205 #endif
1206
1207 case DIOCGCACHE:
1208 return (sd_getcache(sd, (int *) addr));
1209
1210 case DIOCSCACHE:
1211 if ((flag & FWRITE) == 0)
1212 return (EBADF);
1213 return (sd_setcache(sd, *(int *) addr));
1214
1215 case DIOCCACHESYNC:
1216 /*
1217 * XXX Do we really need to care about having a writable
1218 * file descriptor here?
1219 */
1220 if ((flag & FWRITE) == 0)
1221 return (EBADF);
1222 if (((sd->flags & SDF_DIRTY) != 0 || *(int *)addr != 0)) {
1223 error = sd_flush(sd, 0);
1224 if (error)
1225 sd->flags &= ~SDF_FLUSHING;
1226 else
1227 sd->flags &= ~(SDF_FLUSHING|SDF_DIRTY);
1228 } else
1229 error = 0;
1230 return (error);
1231
1232 case DIOCAWEDGE:
1233 {
1234 struct dkwedge_info *dkw = (void *) addr;
1235
1236 if ((flag & FWRITE) == 0)
1237 return (EBADF);
1238
1239 /* If the ioctl happens here, the parent is us. */
1240 strlcpy(dkw->dkw_parent, device_xname(sd->sc_dev),
1241 sizeof(dkw->dkw_parent));
1242 return (dkwedge_add(dkw));
1243 }
1244
1245 case DIOCDWEDGE:
1246 {
1247 struct dkwedge_info *dkw = (void *) addr;
1248
1249 if ((flag & FWRITE) == 0)
1250 return (EBADF);
1251
1252 /* If the ioctl happens here, the parent is us. */
1253 strlcpy(dkw->dkw_parent, device_xname(sd->sc_dev),
1254 sizeof(dkw->dkw_parent));
1255 return (dkwedge_del(dkw));
1256 }
1257
1258 case DIOCLWEDGES:
1259 {
1260 struct dkwedge_list *dkwl = (void *) addr;
1261
1262 return (dkwedge_list(&sd->sc_dk, dkwl, l));
1263 }
1264
1265 default:
1266 if (part != RAW_PART)
1267 return (ENOTTY);
1268 return (scsipi_do_ioctl(periph, dev, cmd, addr, flag, l));
1269 }
1270
1271 #ifdef DIAGNOSTIC
1272 panic("sdioctl: impossible");
1273 #endif
1274 }
1275
1276 static void
1277 sdgetdefaultlabel(struct sd_softc *sd, struct disklabel *lp)
1278 {
1279
1280 memset(lp, 0, sizeof(struct disklabel));
1281
1282 lp->d_secsize = sd->params.blksize;
1283 lp->d_ntracks = sd->params.heads;
1284 lp->d_nsectors = sd->params.sectors;
1285 lp->d_ncylinders = sd->params.cyls;
1286 lp->d_secpercyl = lp->d_ntracks * lp->d_nsectors;
1287
1288 switch (scsipi_periph_bustype(sd->sc_periph)) {
1289 case SCSIPI_BUSTYPE_SCSI:
1290 lp->d_type = DTYPE_SCSI;
1291 break;
1292 case SCSIPI_BUSTYPE_ATAPI:
1293 lp->d_type = DTYPE_ATAPI;
1294 break;
1295 }
1296 /*
1297 * XXX
1298 * We could probe the mode pages to figure out what kind of disc it is.
1299 * Is this worthwhile?
1300 */
1301 strncpy(lp->d_typename, sd->name, 16);
1302 strncpy(lp->d_packname, "fictitious", 16);
1303 lp->d_secperunit = sd->params.disksize;
1304 lp->d_rpm = sd->params.rot_rate;
1305 lp->d_interleave = 1;
1306 lp->d_flags = sd->sc_periph->periph_flags & PERIPH_REMOVABLE ?
1307 D_REMOVABLE : 0;
1308
1309 lp->d_partitions[RAW_PART].p_offset = 0;
1310 lp->d_partitions[RAW_PART].p_size = lp->d_secperunit;
1311 lp->d_partitions[RAW_PART].p_fstype = FS_UNUSED;
1312 lp->d_npartitions = RAW_PART + 1;
1313
1314 lp->d_magic = DISKMAGIC;
1315 lp->d_magic2 = DISKMAGIC;
1316 lp->d_checksum = dkcksum(lp);
1317 }
1318
1319
1320 /*
1321 * Load the label information on the named device
1322 */
1323 static int
1324 sdgetdisklabel(struct sd_softc *sd)
1325 {
1326 struct disklabel *lp = sd->sc_dk.dk_label;
1327 const char *errstring;
1328
1329 memset(sd->sc_dk.dk_cpulabel, 0, sizeof(struct cpu_disklabel));
1330
1331 sdgetdefaultlabel(sd, lp);
1332
1333 if (lp->d_secpercyl == 0) {
1334 lp->d_secpercyl = 100;
1335 /* as long as it's not 0 - readdisklabel divides by it (?) */
1336 }
1337
1338 /*
1339 * Call the generic disklabel extraction routine
1340 */
1341 errstring = readdisklabel(MAKESDDEV(0, device_unit(sd->sc_dev),
1342 RAW_PART), sdstrategy, lp, sd->sc_dk.dk_cpulabel);
1343 if (errstring) {
1344 aprint_error_dev(sd->sc_dev, "%s\n", errstring);
1345 return EIO;
1346 }
1347 return 0;
1348 }
1349
1350 static void
1351 sd_shutdown(void *arg)
1352 {
1353 struct sd_softc *sd = arg;
1354
1355 /*
1356 * If the disk cache needs to be flushed, and the disk supports
1357 * it, flush it. We're cold at this point, so we poll for
1358 * completion.
1359 */
1360 if ((sd->flags & SDF_DIRTY) != 0) {
1361 if (sd_flush(sd, XS_CTL_NOSLEEP|XS_CTL_POLL)) {
1362 aprint_error_dev(sd->sc_dev,
1363 "cache synchronization failed\n");
1364 sd->flags &= ~SDF_FLUSHING;
1365 } else
1366 sd->flags &= ~(SDF_FLUSHING|SDF_DIRTY);
1367 }
1368 }
1369
1370 static bool
1371 sd_suspend(device_t dv PMF_FN_ARGS)
1372 {
1373 struct sd_softc *sd = device_private(dv);
1374
1375 sd_shutdown(sd); /* XXX no need to poll */
1376 return true;
1377 }
1378
1379 /*
1380 * Check Errors
1381 */
1382 static int
1383 sd_interpret_sense(struct scsipi_xfer *xs)
1384 {
1385 struct scsipi_periph *periph = xs->xs_periph;
1386 struct scsi_sense_data *sense = &xs->sense.scsi_sense;
1387 struct sd_softc *sd = device_private(periph->periph_dev);
1388 int s, error, retval = EJUSTRETURN;
1389
1390 /*
1391 * If the periph is already recovering, just do the normal
1392 * error processing.
1393 */
1394 if (periph->periph_flags & PERIPH_RECOVERING)
1395 return (retval);
1396
1397 /*
1398 * Ignore errors from accessing illegal fields (e.g. trying to
1399 * lock the door of a digicam, which doesn't have a door that
1400 * can be locked) for the SCSI_PREVENT_ALLOW_MEDIUM_REMOVAL command.
1401 */
1402 if (xs->cmd->opcode == SCSI_PREVENT_ALLOW_MEDIUM_REMOVAL &&
1403 SSD_SENSE_KEY(sense->flags) == SKEY_ILLEGAL_REQUEST &&
1404 sense->asc == 0x24 &&
1405 sense->ascq == 0x00) { /* Illegal field in CDB */
1406 if (!(xs->xs_control & XS_CTL_SILENT)) {
1407 scsipi_printaddr(periph);
1408 printf("no door lock\n");
1409 }
1410 xs->xs_control |= XS_CTL_IGNORE_ILLEGAL_REQUEST;
1411 return (retval);
1412 }
1413
1414
1415
1416 /*
1417 * If the device is not open yet, let the generic code handle it.
1418 */
1419 if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0)
1420 return (retval);
1421
1422 /*
1423 * If it isn't a extended or extended/deferred error, let
1424 * the generic code handle it.
1425 */
1426 if (SSD_RCODE(sense->response_code) != SSD_RCODE_CURRENT &&
1427 SSD_RCODE(sense->response_code) != SSD_RCODE_DEFERRED)
1428 return (retval);
1429
1430 if (SSD_SENSE_KEY(sense->flags) == SKEY_NOT_READY &&
1431 sense->asc == 0x4) {
1432 if (sense->ascq == 0x01) {
1433 /*
1434 * Unit In The Process Of Becoming Ready.
1435 */
1436 printf("%s: waiting for pack to spin up...\n",
1437 device_xname(sd->sc_dev));
1438 if (!callout_pending(&periph->periph_callout))
1439 scsipi_periph_freeze(periph, 1);
1440 callout_reset(&periph->periph_callout,
1441 5 * hz, scsipi_periph_timed_thaw, periph);
1442 retval = ERESTART;
1443 } else if (sense->ascq == 0x02) {
1444 printf("%s: pack is stopped, restarting...\n",
1445 device_xname(sd->sc_dev));
1446 s = splbio();
1447 periph->periph_flags |= PERIPH_RECOVERING;
1448 splx(s);
1449 error = scsipi_start(periph, SSS_START,
1450 XS_CTL_URGENT|XS_CTL_HEAD_TAG|
1451 XS_CTL_THAW_PERIPH|XS_CTL_FREEZE_PERIPH);
1452 if (error) {
1453 aprint_error_dev(sd->sc_dev,
1454 "unable to restart pack\n");
1455 retval = error;
1456 } else
1457 retval = ERESTART;
1458 s = splbio();
1459 periph->periph_flags &= ~PERIPH_RECOVERING;
1460 splx(s);
1461 }
1462 }
1463 if (SSD_SENSE_KEY(sense->flags) == SKEY_MEDIUM_ERROR &&
1464 sense->asc == 0x31 &&
1465 sense->ascq == 0x00) { /* maybe for any asq ? */
1466 /* Medium Format Corrupted */
1467 retval = EFTYPE;
1468 }
1469 return (retval);
1470 }
1471
1472
1473 static int
1474 sdsize(dev_t dev)
1475 {
1476 struct sd_softc *sd;
1477 int part, unit, omask;
1478 int size;
1479
1480 unit = SDUNIT(dev);
1481 sd = device_lookup_private(&sd_cd, unit);
1482 if (sd == NULL)
1483 return (-1);
1484
1485 if (!device_is_active(sd->sc_dev))
1486 return (-1);
1487
1488 part = SDPART(dev);
1489 omask = sd->sc_dk.dk_openmask & (1 << part);
1490
1491 if (omask == 0 && sdopen(dev, 0, S_IFBLK, NULL) != 0)
1492 return (-1);
1493 if ((sd->sc_periph->periph_flags & PERIPH_MEDIA_LOADED) == 0)
1494 size = -1;
1495 else if (sd->sc_dk.dk_label->d_partitions[part].p_fstype != FS_SWAP)
1496 size = -1;
1497 else
1498 size = sd->sc_dk.dk_label->d_partitions[part].p_size *
1499 (sd->sc_dk.dk_label->d_secsize / DEV_BSIZE);
1500 if (omask == 0 && sdclose(dev, 0, S_IFBLK, NULL) != 0)
1501 return (-1);
1502 return (size);
1503 }
1504
1505 /* #define SD_DUMP_NOT_TRUSTED if you just want to watch */
1506 static struct scsipi_xfer sx;
1507 static int sddoingadump;
1508
1509 /*
1510 * dump all of physical memory into the partition specified, starting
1511 * at offset 'dumplo' into the partition.
1512 */
1513 static int
1514 sddump(dev_t dev, daddr_t blkno, void *va, size_t size)
1515 {
1516 struct sd_softc *sd; /* disk unit to do the I/O */
1517 struct disklabel *lp; /* disk's disklabel */
1518 int unit, part;
1519 int sectorsize; /* size of a disk sector */
1520 int nsects; /* number of sectors in partition */
1521 int sectoff; /* sector offset of partition */
1522 int totwrt; /* total number of sectors left to write */
1523 int nwrt; /* current number of sectors to write */
1524 struct scsipi_rw_10 cmd; /* write command */
1525 struct scsipi_xfer *xs; /* ... convenience */
1526 struct scsipi_periph *periph;
1527 struct scsipi_channel *chan;
1528
1529 /* Check if recursive dump; if so, punt. */
1530 if (sddoingadump)
1531 return (EFAULT);
1532
1533 /* Mark as active early. */
1534 sddoingadump = 1;
1535
1536 unit = SDUNIT(dev); /* Decompose unit & partition. */
1537 part = SDPART(dev);
1538
1539 /* Check for acceptable drive number. */
1540 sd = device_lookup_private(&sd_cd, unit);
1541 if (sd == NULL)
1542 return (ENXIO);
1543
1544 if (!device_is_active(sd->sc_dev))
1545 return (ENODEV);
1546
1547 periph = sd->sc_periph;
1548 chan = periph->periph_channel;
1549
1550 /* Make sure it was initialized. */
1551 if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0)
1552 return (ENXIO);
1553
1554 /* Convert to disk sectors. Request must be a multiple of size. */
1555 lp = sd->sc_dk.dk_label;
1556 sectorsize = lp->d_secsize;
1557 if ((size % sectorsize) != 0)
1558 return (EFAULT);
1559 totwrt = size / sectorsize;
1560 blkno = dbtob(blkno) / sectorsize; /* blkno in DEV_BSIZE units */
1561
1562 nsects = lp->d_partitions[part].p_size;
1563 sectoff = lp->d_partitions[part].p_offset;
1564
1565 /* Check transfer bounds against partition size. */
1566 if ((blkno < 0) || ((blkno + totwrt) > nsects))
1567 return (EINVAL);
1568
1569 /* Offset block number to start of partition. */
1570 blkno += sectoff;
1571
1572 xs = &sx;
1573
1574 while (totwrt > 0) {
1575 nwrt = totwrt; /* XXX */
1576 #ifndef SD_DUMP_NOT_TRUSTED
1577 /*
1578 * Fill out the scsi command
1579 */
1580 memset(&cmd, 0, sizeof(cmd));
1581 cmd.opcode = WRITE_10;
1582 _lto4b(blkno, cmd.addr);
1583 _lto2b(nwrt, cmd.length);
1584 /*
1585 * Fill out the scsipi_xfer structure
1586 * Note: we cannot sleep as we may be an interrupt
1587 * don't use scsipi_command() as it may want to wait
1588 * for an xs.
1589 */
1590 memset(xs, 0, sizeof(sx));
1591 xs->xs_control |= XS_CTL_NOSLEEP | XS_CTL_POLL |
1592 XS_CTL_DATA_OUT;
1593 xs->xs_status = 0;
1594 xs->xs_periph = periph;
1595 xs->xs_retries = SDRETRIES;
1596 xs->timeout = 10000; /* 10000 millisecs for a disk ! */
1597 xs->cmd = (struct scsipi_generic *)&cmd;
1598 xs->cmdlen = sizeof(cmd);
1599 xs->resid = nwrt * sectorsize;
1600 xs->error = XS_NOERROR;
1601 xs->bp = 0;
1602 xs->data = va;
1603 xs->datalen = nwrt * sectorsize;
1604 callout_init(&xs->xs_callout, 0);
1605
1606 /*
1607 * Pass all this info to the scsi driver.
1608 */
1609 scsipi_adapter_request(chan, ADAPTER_REQ_RUN_XFER, xs);
1610 if ((xs->xs_status & XS_STS_DONE) == 0 ||
1611 xs->error != XS_NOERROR)
1612 return (EIO);
1613 #else /* SD_DUMP_NOT_TRUSTED */
1614 /* Let's just talk about this first... */
1615 printf("sd%d: dump addr 0x%x, blk %d\n", unit, va, blkno);
1616 delay(500 * 1000); /* half a second */
1617 #endif /* SD_DUMP_NOT_TRUSTED */
1618
1619 /* update block count */
1620 totwrt -= nwrt;
1621 blkno += nwrt;
1622 va = (char *)va + sectorsize * nwrt;
1623 }
1624 sddoingadump = 0;
1625 return (0);
1626 }
1627
1628 static int
1629 sd_mode_sense(struct sd_softc *sd, u_int8_t byte2, void *sense, size_t size,
1630 int page, int flags, int *big)
1631 {
1632
1633 if ((sd->sc_periph->periph_quirks & PQUIRK_ONLYBIG) &&
1634 !(sd->sc_periph->periph_quirks & PQUIRK_NOBIGMODESENSE)) {
1635 *big = 1;
1636 return scsipi_mode_sense_big(sd->sc_periph, byte2, page, sense,
1637 size + sizeof(struct scsi_mode_parameter_header_10),
1638 flags | XS_CTL_DATA_ONSTACK, SDRETRIES, 6000);
1639 } else {
1640 *big = 0;
1641 return scsipi_mode_sense(sd->sc_periph, byte2, page, sense,
1642 size + sizeof(struct scsi_mode_parameter_header_6),
1643 flags | XS_CTL_DATA_ONSTACK, SDRETRIES, 6000);
1644 }
1645 }
1646
1647 static int
1648 sd_mode_select(struct sd_softc *sd, u_int8_t byte2, void *sense, size_t size,
1649 int flags, int big)
1650 {
1651
1652 if (big) {
1653 struct scsi_mode_parameter_header_10 *header = sense;
1654
1655 _lto2b(0, header->data_length);
1656 return scsipi_mode_select_big(sd->sc_periph, byte2, sense,
1657 size + sizeof(struct scsi_mode_parameter_header_10),
1658 flags | XS_CTL_DATA_ONSTACK, SDRETRIES, 6000);
1659 } else {
1660 struct scsi_mode_parameter_header_6 *header = sense;
1661
1662 header->data_length = 0;
1663 return scsipi_mode_select(sd->sc_periph, byte2, sense,
1664 size + sizeof(struct scsi_mode_parameter_header_6),
1665 flags | XS_CTL_DATA_ONSTACK, SDRETRIES, 6000);
1666 }
1667 }
1668
1669 /*
1670 * sd_validate_blksize:
1671 *
1672 * Validate the block size. Print error if periph is specified,
1673 */
1674 static int
1675 sd_validate_blksize(struct scsipi_periph *periph, int len)
1676 {
1677
1678 switch (len) {
1679 case 256:
1680 case 512:
1681 case 1024:
1682 case 2048:
1683 case 4096:
1684 return 1;
1685 }
1686
1687 if (periph) {
1688 scsipi_printaddr(periph);
1689 printf("%s sector size: 0x%x. Defaulting to %d bytes.\n",
1690 (len ^ (1 << (ffs(len) - 1))) ?
1691 "preposterous" : "unsupported",
1692 len, SD_DEFAULT_BLKSIZE);
1693 }
1694
1695 return 0;
1696 }
1697
1698 /*
1699 * sd_read_capacity:
1700 *
1701 * Find out from the device what its capacity is.
1702 */
1703 static u_int64_t
1704 sd_read_capacity(struct scsipi_periph *periph, int *blksize, int flags)
1705 {
1706 union {
1707 struct scsipi_read_capacity_10 cmd;
1708 struct scsipi_read_capacity_16 cmd16;
1709 } cmd;
1710 union {
1711 struct scsipi_read_capacity_10_data data;
1712 struct scsipi_read_capacity_16_data data16;
1713 } *datap;
1714 uint64_t rv;
1715
1716 memset(&cmd, 0, sizeof(cmd));
1717 cmd.cmd.opcode = READ_CAPACITY_10;
1718
1719 /*
1720 * Don't allocate data buffer on stack;
1721 * The lower driver layer might use the same stack and
1722 * if it uses region which is in the same cacheline,
1723 * cache flush ops against the data buffer won't work properly.
1724 */
1725 datap = malloc(sizeof(*datap), M_TEMP, M_WAITOK);
1726 if (datap == NULL)
1727 return 0;
1728
1729 /*
1730 * If the command works, interpret the result as a 4 byte
1731 * number of blocks
1732 */
1733 rv = 0;
1734 memset(datap, 0, sizeof(datap->data));
1735 if (scsipi_command(periph, (void *)&cmd.cmd, sizeof(cmd.cmd),
1736 (void *)datap, sizeof(datap->data), SCSIPIRETRIES, 20000, NULL,
1737 flags | XS_CTL_DATA_IN | XS_CTL_SILENT) != 0)
1738 goto out;
1739
1740 if (_4btol(datap->data.addr) != 0xffffffff) {
1741 *blksize = _4btol(datap->data.length);
1742 rv = _4btol(datap->data.addr) + 1;
1743 goto out;
1744 }
1745
1746 /*
1747 * Device is larger than can be reflected by READ CAPACITY (10).
1748 * Try READ CAPACITY (16).
1749 */
1750
1751 memset(&cmd, 0, sizeof(cmd));
1752 cmd.cmd16.opcode = READ_CAPACITY_16;
1753 cmd.cmd16.byte2 = SRC16_SERVICE_ACTION;
1754 _lto4b(sizeof(datap->data16), cmd.cmd16.len);
1755
1756 memset(datap, 0, sizeof(datap->data16));
1757 if (scsipi_command(periph, (void *)&cmd.cmd16, sizeof(cmd.cmd16),
1758 (void *)datap, sizeof(datap->data16), SCSIPIRETRIES, 20000, NULL,
1759 flags | XS_CTL_DATA_IN | XS_CTL_SILENT) != 0)
1760 goto out;
1761
1762 *blksize = _4btol(datap->data16.length);
1763 rv = _8btol(datap->data16.addr) + 1;
1764
1765 out:
1766 free(datap, M_TEMP);
1767 return rv;
1768 }
1769
1770 static int
1771 sd_get_simplifiedparms(struct sd_softc *sd, struct disk_parms *dp, int flags)
1772 {
1773 struct {
1774 struct scsi_mode_parameter_header_6 header;
1775 /* no block descriptor */
1776 u_int8_t pg_code; /* page code (should be 6) */
1777 u_int8_t pg_length; /* page length (should be 11) */
1778 u_int8_t wcd; /* bit0: cache disable */
1779 u_int8_t lbs[2]; /* logical block size */
1780 u_int8_t size[5]; /* number of log. blocks */
1781 u_int8_t pp; /* power/performance */
1782 u_int8_t flags;
1783 u_int8_t resvd;
1784 } scsipi_sense;
1785 u_int64_t blocks;
1786 int error, blksize;
1787
1788 /*
1789 * sd_read_capacity (ie "read capacity") and mode sense page 6
1790 * give the same information. Do both for now, and check
1791 * for consistency.
1792 * XXX probably differs for removable media
1793 */
1794 dp->blksize = SD_DEFAULT_BLKSIZE;
1795 if ((blocks = sd_read_capacity(sd->sc_periph, &blksize, flags)) == 0)
1796 return (SDGP_RESULT_OFFLINE); /* XXX? */
1797
1798 error = scsipi_mode_sense(sd->sc_periph, SMS_DBD, 6,
1799 &scsipi_sense.header, sizeof(scsipi_sense),
1800 flags | XS_CTL_DATA_ONSTACK, SDRETRIES, 6000);
1801
1802 if (error != 0)
1803 return (SDGP_RESULT_OFFLINE); /* XXX? */
1804
1805 dp->blksize = blksize;
1806 if (!sd_validate_blksize(NULL, dp->blksize))
1807 dp->blksize = _2btol(scsipi_sense.lbs);
1808 if (!sd_validate_blksize(sd->sc_periph, dp->blksize))
1809 dp->blksize = SD_DEFAULT_BLKSIZE;
1810
1811 /*
1812 * Create a pseudo-geometry.
1813 */
1814 dp->heads = 64;
1815 dp->sectors = 32;
1816 dp->cyls = blocks / (dp->heads * dp->sectors);
1817 dp->disksize = _5btol(scsipi_sense.size);
1818 if (dp->disksize <= UINT32_MAX && dp->disksize != blocks) {
1819 printf("RBC size: mode sense=%llu, get cap=%llu\n",
1820 (unsigned long long)dp->disksize,
1821 (unsigned long long)blocks);
1822 dp->disksize = blocks;
1823 }
1824 dp->disksize512 = (dp->disksize * dp->blksize) / DEV_BSIZE;
1825
1826 return (SDGP_RESULT_OK);
1827 }
1828
1829 /*
1830 * Get the scsi driver to send a full inquiry to the * device and use the
1831 * results to fill out the disk parameter structure.
1832 */
1833 static int
1834 sd_get_capacity(struct sd_softc *sd, struct disk_parms *dp, int flags)
1835 {
1836 u_int64_t blocks;
1837 int error, blksize;
1838 #if 0
1839 int i;
1840 u_int8_t *p;
1841 #endif
1842
1843 dp->disksize = blocks = sd_read_capacity(sd->sc_periph, &blksize,
1844 flags);
1845 if (blocks == 0) {
1846 struct scsipi_read_format_capacities cmd;
1847 struct {
1848 struct scsipi_capacity_list_header header;
1849 struct scsipi_capacity_descriptor desc;
1850 } __packed data;
1851
1852 memset(&cmd, 0, sizeof(cmd));
1853 memset(&data, 0, sizeof(data));
1854 cmd.opcode = READ_FORMAT_CAPACITIES;
1855 _lto2b(sizeof(data), cmd.length);
1856
1857 error = scsipi_command(sd->sc_periph,
1858 (void *)&cmd, sizeof(cmd), (void *)&data, sizeof(data),
1859 SDRETRIES, 20000, NULL,
1860 flags | XS_CTL_DATA_IN | XS_CTL_DATA_ONSTACK);
1861 if (error == EFTYPE) {
1862 /* Medium Format Corrupted, handle as not formatted */
1863 return (SDGP_RESULT_UNFORMATTED);
1864 }
1865 if (error || data.header.length == 0)
1866 return (SDGP_RESULT_OFFLINE);
1867
1868 #if 0
1869 printf("rfc: length=%d\n", data.header.length);
1870 printf("rfc result:"); for (i = sizeof(struct scsipi_capacity_list_header) + data.header.length, p = (void *)&data; i; i--, p++) printf(" %02x", *p); printf("\n");
1871 #endif
1872 switch (data.desc.byte5 & SCSIPI_CAP_DESC_CODE_MASK) {
1873 case SCSIPI_CAP_DESC_CODE_RESERVED:
1874 case SCSIPI_CAP_DESC_CODE_FORMATTED:
1875 break;
1876
1877 case SCSIPI_CAP_DESC_CODE_UNFORMATTED:
1878 return (SDGP_RESULT_UNFORMATTED);
1879
1880 case SCSIPI_CAP_DESC_CODE_NONE:
1881 return (SDGP_RESULT_OFFLINE);
1882 }
1883
1884 dp->disksize = blocks = _4btol(data.desc.nblks);
1885 if (blocks == 0)
1886 return (SDGP_RESULT_OFFLINE); /* XXX? */
1887
1888 blksize = _3btol(data.desc.blklen);
1889
1890 } else if (!sd_validate_blksize(NULL, blksize)) {
1891 struct sd_mode_sense_data scsipi_sense;
1892 int big, bsize;
1893 struct scsi_general_block_descriptor *bdesc;
1894
1895 memset(&scsipi_sense, 0, sizeof(scsipi_sense));
1896 error = sd_mode_sense(sd, 0, &scsipi_sense,
1897 sizeof(scsipi_sense.blk_desc), 0, flags | XS_CTL_SILENT, &big);
1898 if (!error) {
1899 if (big) {
1900 bdesc = (void *)(&scsipi_sense.header.big + 1);
1901 bsize = _2btol(scsipi_sense.header.big.blk_desc_len);
1902 } else {
1903 bdesc = (void *)(&scsipi_sense.header.small + 1);
1904 bsize = scsipi_sense.header.small.blk_desc_len;
1905 }
1906
1907 #if 0
1908 printf("page 0 sense:"); for (i = sizeof(scsipi_sense), p = (void *)&scsipi_sense; i; i--, p++) printf(" %02x", *p); printf("\n");
1909 printf("page 0 bsize=%d\n", bsize);
1910 printf("page 0 ok\n");
1911 #endif
1912
1913 if (bsize >= 8) {
1914 blksize = _3btol(bdesc->blklen);
1915 }
1916 }
1917 }
1918
1919 if (!sd_validate_blksize(sd->sc_periph, blksize))
1920 blksize = SD_DEFAULT_BLKSIZE;
1921
1922 dp->blksize = blksize;
1923 dp->disksize512 = (blocks * dp->blksize) / DEV_BSIZE;
1924 return (0);
1925 }
1926
1927 static int
1928 sd_get_parms_page4(struct sd_softc *sd, struct disk_parms *dp, int flags)
1929 {
1930 struct sd_mode_sense_data scsipi_sense;
1931 int error;
1932 int big, byte2;
1933 size_t poffset;
1934 union scsi_disk_pages *pages;
1935
1936 byte2 = SMS_DBD;
1937 again:
1938 memset(&scsipi_sense, 0, sizeof(scsipi_sense));
1939 error = sd_mode_sense(sd, byte2, &scsipi_sense,
1940 (byte2 ? 0 : sizeof(scsipi_sense.blk_desc)) +
1941 sizeof(scsipi_sense.pages.rigid_geometry), 4,
1942 flags | XS_CTL_SILENT, &big);
1943 if (error) {
1944 if (byte2 == SMS_DBD) {
1945 /* No result; try once more with DBD off */
1946 byte2 = 0;
1947 goto again;
1948 }
1949 return (error);
1950 }
1951
1952 if (big) {
1953 poffset = sizeof scsipi_sense.header.big;
1954 poffset += _2btol(scsipi_sense.header.big.blk_desc_len);
1955 } else {
1956 poffset = sizeof scsipi_sense.header.small;
1957 poffset += scsipi_sense.header.small.blk_desc_len;
1958 }
1959
1960 if (poffset > sizeof(scsipi_sense) - sizeof(pages->rigid_geometry))
1961 return ERESTART;
1962
1963 pages = (void *)((u_long)&scsipi_sense + poffset);
1964 #if 0
1965 {
1966 size_t i;
1967 u_int8_t *p;
1968
1969 printf("page 4 sense:");
1970 for (i = sizeof(scsipi_sense), p = (void *)&scsipi_sense; i;
1971 i--, p++)
1972 printf(" %02x", *p);
1973 printf("\n");
1974 printf("page 4 pg_code=%d sense=%p/%p\n",
1975 pages->rigid_geometry.pg_code, &scsipi_sense, pages);
1976 }
1977 #endif
1978
1979 if ((pages->rigid_geometry.pg_code & PGCODE_MASK) != 4)
1980 return (ERESTART);
1981
1982 SC_DEBUG(sd->sc_periph, SCSIPI_DB3,
1983 ("%d cyls, %d heads, %d precomp, %d red_write, %d land_zone\n",
1984 _3btol(pages->rigid_geometry.ncyl),
1985 pages->rigid_geometry.nheads,
1986 _2btol(pages->rigid_geometry.st_cyl_wp),
1987 _2btol(pages->rigid_geometry.st_cyl_rwc),
1988 _2btol(pages->rigid_geometry.land_zone)));
1989
1990 /*
1991 * KLUDGE!! (for zone recorded disks)
1992 * give a number of sectors so that sec * trks * cyls
1993 * is <= disk_size
1994 * can lead to wasted space! THINK ABOUT THIS !
1995 */
1996 dp->heads = pages->rigid_geometry.nheads;
1997 dp->cyls = _3btol(pages->rigid_geometry.ncyl);
1998 if (dp->heads == 0 || dp->cyls == 0)
1999 return (ERESTART);
2000 dp->sectors = dp->disksize / (dp->heads * dp->cyls); /* XXX */
2001
2002 dp->rot_rate = _2btol(pages->rigid_geometry.rpm);
2003 if (dp->rot_rate == 0)
2004 dp->rot_rate = 3600;
2005
2006 #if 0
2007 printf("page 4 ok\n");
2008 #endif
2009 return (0);
2010 }
2011
2012 static int
2013 sd_get_parms_page5(struct sd_softc *sd, struct disk_parms *dp, int flags)
2014 {
2015 struct sd_mode_sense_data scsipi_sense;
2016 int error;
2017 int big, byte2;
2018 size_t poffset;
2019 union scsi_disk_pages *pages;
2020
2021 byte2 = SMS_DBD;
2022 again:
2023 memset(&scsipi_sense, 0, sizeof(scsipi_sense));
2024 error = sd_mode_sense(sd, 0, &scsipi_sense,
2025 (byte2 ? 0 : sizeof(scsipi_sense.blk_desc)) +
2026 sizeof(scsipi_sense.pages.flex_geometry), 5,
2027 flags | XS_CTL_SILENT, &big);
2028 if (error) {
2029 if (byte2 == SMS_DBD) {
2030 /* No result; try once more with DBD off */
2031 byte2 = 0;
2032 goto again;
2033 }
2034 return (error);
2035 }
2036
2037 if (big) {
2038 poffset = sizeof scsipi_sense.header.big;
2039 poffset += _2btol(scsipi_sense.header.big.blk_desc_len);
2040 } else {
2041 poffset = sizeof scsipi_sense.header.small;
2042 poffset += scsipi_sense.header.small.blk_desc_len;
2043 }
2044
2045 if (poffset > sizeof(scsipi_sense) - sizeof(pages->flex_geometry))
2046 return ERESTART;
2047
2048 pages = (void *)((u_long)&scsipi_sense + poffset);
2049 #if 0
2050 {
2051 size_t i;
2052 u_int8_t *p;
2053
2054 printf("page 5 sense:");
2055 for (i = sizeof(scsipi_sense), p = (void *)&scsipi_sense; i;
2056 i--, p++)
2057 printf(" %02x", *p);
2058 printf("\n");
2059 printf("page 5 pg_code=%d sense=%p/%p\n",
2060 pages->flex_geometry.pg_code, &scsipi_sense, pages);
2061 }
2062 #endif
2063
2064 if ((pages->flex_geometry.pg_code & PGCODE_MASK) != 5)
2065 return (ERESTART);
2066
2067 SC_DEBUG(sd->sc_periph, SCSIPI_DB3,
2068 ("%d cyls, %d heads, %d sec, %d bytes/sec\n",
2069 _3btol(pages->flex_geometry.ncyl),
2070 pages->flex_geometry.nheads,
2071 pages->flex_geometry.ph_sec_tr,
2072 _2btol(pages->flex_geometry.bytes_s)));
2073
2074 dp->heads = pages->flex_geometry.nheads;
2075 dp->cyls = _2btol(pages->flex_geometry.ncyl);
2076 dp->sectors = pages->flex_geometry.ph_sec_tr;
2077 if (dp->heads == 0 || dp->cyls == 0 || dp->sectors == 0)
2078 return (ERESTART);
2079
2080 dp->rot_rate = _2btol(pages->rigid_geometry.rpm);
2081 if (dp->rot_rate == 0)
2082 dp->rot_rate = 3600;
2083
2084 #if 0
2085 printf("page 5 ok\n");
2086 #endif
2087 return (0);
2088 }
2089
2090 static int
2091 sd_get_parms(struct sd_softc *sd, struct disk_parms *dp, int flags)
2092 {
2093 int error;
2094
2095 /*
2096 * If offline, the SDEV_MEDIA_LOADED flag will be
2097 * cleared by the caller if necessary.
2098 */
2099 if (sd->type == T_SIMPLE_DIRECT) {
2100 error = sd_get_simplifiedparms(sd, dp, flags);
2101 if (!error)
2102 disk_blocksize(&sd->sc_dk, dp->blksize);
2103 return (error);
2104 }
2105
2106 error = sd_get_capacity(sd, dp, flags);
2107 if (error)
2108 return (error);
2109
2110 disk_blocksize(&sd->sc_dk, dp->blksize);
2111
2112 if (sd->type == T_OPTICAL)
2113 goto page0;
2114
2115 if (sd->sc_periph->periph_flags & PERIPH_REMOVABLE) {
2116 if (!sd_get_parms_page5(sd, dp, flags) ||
2117 !sd_get_parms_page4(sd, dp, flags))
2118 return (SDGP_RESULT_OK);
2119 } else {
2120 if (!sd_get_parms_page4(sd, dp, flags) ||
2121 !sd_get_parms_page5(sd, dp, flags))
2122 return (SDGP_RESULT_OK);
2123 }
2124
2125 page0:
2126 printf("%s: fabricating a geometry\n", device_xname(sd->sc_dev));
2127 /* Try calling driver's method for figuring out geometry. */
2128 if (!sd->sc_periph->periph_channel->chan_adapter->adapt_getgeom ||
2129 !(*sd->sc_periph->periph_channel->chan_adapter->adapt_getgeom)
2130 (sd->sc_periph, dp, dp->disksize)) {
2131 /*
2132 * Use adaptec standard fictitious geometry
2133 * this depends on which controller (e.g. 1542C is
2134 * different. but we have to put SOMETHING here..)
2135 */
2136 dp->heads = 64;
2137 dp->sectors = 32;
2138 dp->cyls = dp->disksize / (64 * 32);
2139 }
2140 dp->rot_rate = 3600;
2141 return (SDGP_RESULT_OK);
2142 }
2143
2144 static int
2145 sd_flush(struct sd_softc *sd, int flags)
2146 {
2147 struct scsipi_periph *periph = sd->sc_periph;
2148 struct scsi_synchronize_cache_10 cmd;
2149
2150 /*
2151 * If the device is SCSI-2, issue a SYNCHRONIZE CACHE.
2152 * We issue with address 0 length 0, which should be
2153 * interpreted by the device as "all remaining blocks
2154 * starting at address 0". We ignore ILLEGAL REQUEST
2155 * in the event that the command is not supported by
2156 * the device, and poll for completion so that we know
2157 * that the cache has actually been flushed.
2158 *
2159 * Unless, that is, the device can't handle the SYNCHRONIZE CACHE
2160 * command, as indicated by our quirks flags.
2161 *
2162 * XXX What about older devices?
2163 */
2164 if (periph->periph_version < 2 ||
2165 (periph->periph_quirks & PQUIRK_NOSYNCCACHE))
2166 return (0);
2167
2168 sd->flags |= SDF_FLUSHING;
2169 memset(&cmd, 0, sizeof(cmd));
2170 cmd.opcode = SCSI_SYNCHRONIZE_CACHE_10;
2171
2172 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0,
2173 SDRETRIES, 100000, NULL, flags | XS_CTL_IGNORE_ILLEGAL_REQUEST));
2174 }
2175
2176 static int
2177 sd_getcache(struct sd_softc *sd, int *bitsp)
2178 {
2179 struct scsipi_periph *periph = sd->sc_periph;
2180 struct sd_mode_sense_data scsipi_sense;
2181 int error, bits = 0;
2182 int big;
2183 union scsi_disk_pages *pages;
2184
2185 if (periph->periph_version < 2)
2186 return (EOPNOTSUPP);
2187
2188 memset(&scsipi_sense, 0, sizeof(scsipi_sense));
2189 error = sd_mode_sense(sd, SMS_DBD, &scsipi_sense,
2190 sizeof(scsipi_sense.pages.caching_params), 8, 0, &big);
2191 if (error)
2192 return (error);
2193
2194 if (big)
2195 pages = (void *)(&scsipi_sense.header.big + 1);
2196 else
2197 pages = (void *)(&scsipi_sense.header.small + 1);
2198
2199 if ((pages->caching_params.flags & CACHING_RCD) == 0)
2200 bits |= DKCACHE_READ;
2201 if (pages->caching_params.flags & CACHING_WCE)
2202 bits |= DKCACHE_WRITE;
2203 if (pages->caching_params.pg_code & PGCODE_PS)
2204 bits |= DKCACHE_SAVE;
2205
2206 memset(&scsipi_sense, 0, sizeof(scsipi_sense));
2207 error = sd_mode_sense(sd, SMS_DBD, &scsipi_sense,
2208 sizeof(scsipi_sense.pages.caching_params),
2209 SMS_PCTRL_CHANGEABLE|8, 0, &big);
2210 if (error == 0) {
2211 if (big)
2212 pages = (void *)(&scsipi_sense.header.big + 1);
2213 else
2214 pages = (void *)(&scsipi_sense.header.small + 1);
2215
2216 if (pages->caching_params.flags & CACHING_RCD)
2217 bits |= DKCACHE_RCHANGE;
2218 if (pages->caching_params.flags & CACHING_WCE)
2219 bits |= DKCACHE_WCHANGE;
2220 }
2221
2222 *bitsp = bits;
2223
2224 return (0);
2225 }
2226
2227 static int
2228 sd_setcache(struct sd_softc *sd, int bits)
2229 {
2230 struct scsipi_periph *periph = sd->sc_periph;
2231 struct sd_mode_sense_data scsipi_sense;
2232 int error;
2233 uint8_t oflags, byte2 = 0;
2234 int big;
2235 union scsi_disk_pages *pages;
2236
2237 if (periph->periph_version < 2)
2238 return (EOPNOTSUPP);
2239
2240 memset(&scsipi_sense, 0, sizeof(scsipi_sense));
2241 error = sd_mode_sense(sd, SMS_DBD, &scsipi_sense,
2242 sizeof(scsipi_sense.pages.caching_params), 8, 0, &big);
2243 if (error)
2244 return (error);
2245
2246 if (big)
2247 pages = (void *)(&scsipi_sense.header.big + 1);
2248 else
2249 pages = (void *)(&scsipi_sense.header.small + 1);
2250
2251 oflags = pages->caching_params.flags;
2252
2253 if (bits & DKCACHE_READ)
2254 pages->caching_params.flags &= ~CACHING_RCD;
2255 else
2256 pages->caching_params.flags |= CACHING_RCD;
2257
2258 if (bits & DKCACHE_WRITE)
2259 pages->caching_params.flags |= CACHING_WCE;
2260 else
2261 pages->caching_params.flags &= ~CACHING_WCE;
2262
2263 if (oflags == pages->caching_params.flags)
2264 return (0);
2265
2266 pages->caching_params.pg_code &= PGCODE_MASK;
2267
2268 if (bits & DKCACHE_SAVE)
2269 byte2 |= SMS_SP;
2270
2271 return (sd_mode_select(sd, byte2|SMS_PF, &scsipi_sense,
2272 sizeof(struct scsi_mode_page_header) +
2273 pages->caching_params.pg_length, 0, big));
2274 }
2275
2276 static void
2277 sd_set_properties(struct sd_softc *sd)
2278 {
2279 prop_dictionary_t disk_info, odisk_info, geom;
2280
2281 disk_info = prop_dictionary_create();
2282
2283 geom = prop_dictionary_create();
2284
2285 prop_dictionary_set_uint64(geom, "sectors-per-unit",
2286 sd->params.disksize);
2287
2288 prop_dictionary_set_uint32(geom, "sector-size",
2289 sd->params.blksize);
2290
2291 prop_dictionary_set_uint16(geom, "sectors-per-track",
2292 sd->params.sectors);
2293
2294 prop_dictionary_set_uint16(geom, "tracks-per-cylinder",
2295 sd->params.heads);
2296
2297 prop_dictionary_set_uint64(geom, "cylinders-per-unit",
2298 sd->params.cyls);
2299
2300 prop_dictionary_set(disk_info, "geometry", geom);
2301 prop_object_release(geom);
2302
2303 prop_dictionary_set(device_properties(sd->sc_dev),
2304 "disk-info", disk_info);
2305
2306 /*
2307 * Don't release disk_info here; we keep a reference to it.
2308 * disk_detach() will release it when we go away.
2309 */
2310
2311 odisk_info = sd->sc_dk.dk_info;
2312 sd->sc_dk.dk_info = disk_info;
2313 if (odisk_info)
2314 prop_object_release(odisk_info);
2315 }
2316