sd.c revision 1.269.6.1 1 /* $NetBSD: sd.c,v 1.269.6.1 2008/03/29 16:17:57 mjf Exp $ */
2
3 /*-
4 * Copyright (c) 1998, 2003, 2004 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Originally written by Julian Elischer (julian (at) dialix.oz.au)
41 * for TRW Financial Systems for use under the MACH(2.5) operating system.
42 *
43 * TRW Financial Systems, in accordance with their agreement with Carnegie
44 * Mellon University, makes this software available to CMU to distribute
45 * or use in any manner that they see fit as long as this message is kept with
46 * the software. For this reason TFS also grants any other persons or
47 * organisations permission to use or modify this software.
48 *
49 * TFS supplies this software to be publicly redistributed
50 * on the understanding that TFS is not responsible for the correct
51 * functioning of this software in any circumstances.
52 *
53 * Ported to run under 386BSD by Julian Elischer (julian (at) dialix.oz.au) Sept 1992
54 */
55
56 #include <sys/cdefs.h>
57 __KERNEL_RCSID(0, "$NetBSD: sd.c,v 1.269.6.1 2008/03/29 16:17:57 mjf Exp $");
58
59 #include "opt_scsi.h"
60 #include "rnd.h"
61
62 #include <sys/param.h>
63 #include <sys/systm.h>
64 #include <sys/kernel.h>
65 #include <sys/file.h>
66 #include <sys/stat.h>
67 #include <sys/ioctl.h>
68 #include <sys/scsiio.h>
69 #include <sys/buf.h>
70 #include <sys/bufq.h>
71 #include <sys/uio.h>
72 #include <sys/malloc.h>
73 #include <sys/errno.h>
74 #include <sys/device.h>
75 #include <sys/disklabel.h>
76 #include <sys/disk.h>
77 #include <sys/proc.h>
78 #include <sys/conf.h>
79 #include <sys/vnode.h>
80 #if NRND > 0
81 #include <sys/rnd.h>
82 #endif
83
84 #include <dev/scsipi/scsi_spc.h>
85 #include <dev/scsipi/scsipi_all.h>
86 #include <dev/scsipi/scsi_all.h>
87 #include <dev/scsipi/scsipi_disk.h>
88 #include <dev/scsipi/scsi_disk.h>
89 #include <dev/scsipi/scsiconf.h>
90 #include <dev/scsipi/scsipi_base.h>
91 #include <dev/scsipi/sdvar.h>
92
93 #include <prop/proplib.h>
94
95 #define SDUNIT(dev) DISKUNIT(dev)
96 #define SDPART(dev) DISKPART(dev)
97 #define SDMINOR(unit, part) DISKMINOR(unit, part)
98 #define MAKESDDEV(maj, unit, part) MAKEDISKDEV(maj, unit, part)
99
100 #define SDLABELDEV(dev) (MAKESDDEV(major(dev), SDUNIT(dev), RAW_PART))
101
102 #define SD_DEFAULT_BLKSIZE 512
103
104 static void sdminphys(struct buf *);
105 static void sdgetdefaultlabel(struct sd_softc *, struct disklabel *);
106 static int sdgetdisklabel(struct sd_softc *);
107 static void sdstart(struct scsipi_periph *);
108 static void sdrestart(void *);
109 static void sddone(struct scsipi_xfer *, int);
110 static bool sd_suspend(device_t);
111 static int sd_interpret_sense(struct scsipi_xfer *);
112
113 static int sd_mode_sense(struct sd_softc *, u_int8_t, void *, size_t, int,
114 int, int *);
115 static int sd_mode_select(struct sd_softc *, u_int8_t, void *, size_t, int,
116 int);
117 static int sd_validate_blksize(struct scsipi_periph *, int);
118 static u_int64_t sd_read_capacity(struct scsipi_periph *, int *, int flags);
119 static int sd_get_simplifiedparms(struct sd_softc *, struct disk_parms *,
120 int);
121 static int sd_get_capacity(struct sd_softc *, struct disk_parms *, int);
122 static int sd_get_parms(struct sd_softc *, struct disk_parms *, int);
123 static int sd_get_parms_page4(struct sd_softc *, struct disk_parms *,
124 int);
125 static int sd_get_parms_page5(struct sd_softc *, struct disk_parms *,
126 int);
127
128 static int sd_flush(struct sd_softc *, int);
129 static int sd_getcache(struct sd_softc *, int *);
130 static int sd_setcache(struct sd_softc *, int);
131
132 static int sdmatch(struct device *, struct cfdata *, void *);
133 static void sdattach(struct device *, struct device *, void *);
134 static int sdactivate(struct device *, enum devact);
135 static int sddetach(struct device *, int);
136 static void sd_set_properties(struct sd_softc *);
137
138 CFATTACH_DECL(sd, sizeof(struct sd_softc), sdmatch, sdattach, sddetach,
139 sdactivate);
140
141 extern struct cfdriver sd_cd;
142
143 static const struct scsipi_inquiry_pattern sd_patterns[] = {
144 {T_DIRECT, T_FIXED,
145 "", "", ""},
146 {T_DIRECT, T_REMOV,
147 "", "", ""},
148 {T_OPTICAL, T_FIXED,
149 "", "", ""},
150 {T_OPTICAL, T_REMOV,
151 "", "", ""},
152 {T_SIMPLE_DIRECT, T_FIXED,
153 "", "", ""},
154 {T_SIMPLE_DIRECT, T_REMOV,
155 "", "", ""},
156 };
157
158 static dev_type_open(sdopen);
159 static dev_type_close(sdclose);
160 static dev_type_read(sdread);
161 static dev_type_write(sdwrite);
162 static dev_type_ioctl(sdioctl);
163 static dev_type_strategy(sdstrategy);
164 static dev_type_dump(sddump);
165 static dev_type_size(sdsize);
166
167 const struct bdevsw sd_bdevsw = {
168 sdopen, sdclose, sdstrategy, sdioctl, sddump, sdsize, D_DISK
169 };
170
171 const struct cdevsw sd_cdevsw = {
172 sdopen, sdclose, sdread, sdwrite, sdioctl,
173 nostop, notty, nopoll, nommap, nokqfilter, D_DISK
174 };
175
176 static struct dkdriver sddkdriver = { sdstrategy, sdminphys };
177
178 static const struct scsipi_periphsw sd_switch = {
179 sd_interpret_sense, /* check our error handler first */
180 sdstart, /* have a queue, served by this */
181 NULL, /* have no async handler */
182 sddone, /* deal with stats at interrupt time */
183 };
184
185 struct sd_mode_sense_data {
186 /*
187 * XXX
188 * We are not going to parse this as-is -- it just has to be large
189 * enough.
190 */
191 union {
192 struct scsi_mode_parameter_header_6 small;
193 struct scsi_mode_parameter_header_10 big;
194 } header;
195 struct scsi_general_block_descriptor blk_desc;
196 union scsi_disk_pages pages;
197 };
198
199 /*
200 * The routine called by the low level scsi routine when it discovers
201 * A device suitable for this driver
202 */
203 static int
204 sdmatch(struct device *parent, struct cfdata *match,
205 void *aux)
206 {
207 struct scsipibus_attach_args *sa = aux;
208 int priority;
209
210 (void)scsipi_inqmatch(&sa->sa_inqbuf,
211 sd_patterns, sizeof(sd_patterns) / sizeof(sd_patterns[0]),
212 sizeof(sd_patterns[0]), &priority);
213
214 return (priority);
215 }
216
217 /*
218 * Attach routine common to atapi & scsi.
219 */
220 static void
221 sdattach(struct device *parent, struct device *self, void *aux)
222 {
223 struct sd_softc *sd = device_private(self);
224 struct scsipibus_attach_args *sa = aux;
225 struct scsipi_periph *periph = sa->sa_periph;
226 int error, result;
227 struct disk_parms *dp = &sd->params;
228 char pbuf[9];
229 int i;
230 uint16_t np;
231 int cmajor, bmajor;
232
233 SC_DEBUG(periph, SCSIPI_DB2, ("sdattach: "));
234
235 sd->type = (sa->sa_inqbuf.type & SID_TYPE);
236 strncpy(sd->name, sa->sa_inqbuf.product, sizeof(sd->name));
237 if (sd->type == T_SIMPLE_DIRECT)
238 periph->periph_quirks |= PQUIRK_ONLYBIG | PQUIRK_NOBIGMODESENSE;
239
240 if (scsipi_periph_bustype(sa->sa_periph) == SCSIPI_BUSTYPE_SCSI &&
241 periph->periph_version == 0)
242 sd->flags |= SDF_ANCIENT;
243
244 bufq_alloc(&sd->buf_queue, BUFQ_DISK_DEFAULT_STRAT, BUFQ_SORT_RAWBLOCK);
245
246 callout_init(&sd->sc_callout, 0);
247
248 /*
249 * Store information needed to contact our base driver
250 */
251 sd->sc_periph = periph;
252
253 periph->periph_dev = &sd->sc_dev;
254 periph->periph_switch = &sd_switch;
255
256 /*
257 * Increase our openings to the maximum-per-periph
258 * supported by the adapter. This will either be
259 * clamped down or grown by the adapter if necessary.
260 */
261 periph->periph_openings =
262 SCSIPI_CHAN_MAX_PERIPH(periph->periph_channel);
263 periph->periph_flags |= PERIPH_GROW_OPENINGS;
264
265 /*
266 * Initialize and attach the disk structure.
267 */
268 disk_init(&sd->sc_dk, sd->sc_dev.dv_xname, &sddkdriver);
269 disk_attach(&sd->sc_dk);
270
271 /*
272 * Use the subdriver to request information regarding the drive.
273 */
274 aprint_naive("\n");
275 aprint_normal("\n");
276
277 error = scsipi_test_unit_ready(periph,
278 XS_CTL_DISCOVERY | XS_CTL_IGNORE_ILLEGAL_REQUEST |
279 XS_CTL_IGNORE_MEDIA_CHANGE | XS_CTL_SILENT_NODEV);
280
281 if (error)
282 result = SDGP_RESULT_OFFLINE;
283 else
284 result = sd_get_parms(sd, &sd->params, XS_CTL_DISCOVERY);
285 aprint_normal("%s: ", sd->sc_dev.dv_xname);
286 switch (result) {
287 case SDGP_RESULT_OK:
288 format_bytes(pbuf, sizeof(pbuf),
289 (u_int64_t)dp->disksize * dp->blksize);
290 aprint_normal(
291 "%s, %ld cyl, %ld head, %ld sec, %ld bytes/sect x %llu sectors",
292 pbuf, dp->cyls, dp->heads, dp->sectors, dp->blksize,
293 (unsigned long long)dp->disksize);
294 break;
295
296 case SDGP_RESULT_OFFLINE:
297 aprint_normal("drive offline");
298 break;
299
300 case SDGP_RESULT_UNFORMATTED:
301 aprint_normal("unformatted media");
302 break;
303
304 #ifdef DIAGNOSTIC
305 default:
306 panic("sdattach: unknown result from get_parms");
307 break;
308 #endif
309 }
310 aprint_normal("\n");
311
312 if (!pmf_device_register(self, sd_suspend, NULL))
313 aprint_error_dev(self, "couldn't establish power handler\n");
314
315 #if NRND > 0
316 /*
317 * attach the device into the random source list
318 */
319 rnd_attach_source(&sd->rnd_source, sd->sc_dev.dv_xname,
320 RND_TYPE_DISK, 0);
321 #endif
322
323 /* Discover wedges on this disk. */
324 dkwedge_discover(&sd->sc_dk);
325
326 sd_set_properties(sd);
327
328 np = sd->sc_dk.dk_label->d_npartitions;
329
330 /* locate the major numbers */
331 bmajor = bdevsw_lookup_major(&sd_bdevsw);
332 cmajor = cdevsw_lookup_major(&sd_cdevsw);
333 for (i = 0; i < 16; i++) {
334 device_register_name(
335 MAKEDISKDEV(bmajor, device_unit(&sd->sc_dev), i),
336 &sd->sc_dev, false, DEV_DISK,
337 "sd%d%c", device_unit(&sd->sc_dev), 'a'+i);
338
339 device_register_name(
340 MAKEDISKDEV(cmajor, device_unit(&sd->sc_dev), i),
341 &sd->sc_dev, true, DEV_DISK,
342 "rsd%d%c", device_unit(&sd->sc_dev), 'a' + i);
343 }
344 }
345
346 static int
347 sdactivate(struct device *self, enum devact act)
348 {
349 int rv = 0;
350
351 switch (act) {
352 case DVACT_ACTIVATE:
353 rv = EOPNOTSUPP;
354 break;
355
356 case DVACT_DEACTIVATE:
357 /*
358 * Nothing to do; we key off the device's DVF_ACTIVE.
359 */
360 break;
361 }
362 return (rv);
363 }
364
365 static int
366 sddetach(struct device *self, int flags)
367 {
368 struct sd_softc *sd = device_private(self);
369 int s, bmaj, cmaj, i, mn;
370 int error;
371
372 /* locate the major number */
373 bmaj = bdevsw_lookup_major(&sd_bdevsw);
374 cmaj = cdevsw_lookup_major(&sd_cdevsw);
375
376 /*
377 * Nuke the vnodes for any open instances and deregister
378 * any device node names.
379 */
380 for (i = 0; i < MAXPARTITIONS; i++) {
381 mn = SDMINOR(device_unit(self), i);
382 vdevgone(bmaj, mn, mn, VBLK);
383 vdevgone(cmaj, mn, mn, VCHR);
384
385 error = device_unregister_name(makedev(bmaj, mn),
386 "sd%d%c", device_unit(self), 'a' + i);
387 #ifdef DIAGNOSTIC
388 if (error != 0)
389 panic("could not unregister block device name");
390 #endif
391 error = device_unregister_name(makedev(cmaj, mn),
392 "rsd%d%c", device_unit(self), 'a' + i);
393 #ifdef DIAGNOSTIC
394 if (error != 0)
395 panic("could not unregister char device name");
396 #endif
397 }
398
399 /* kill any pending restart */
400 callout_stop(&sd->sc_callout);
401
402 /* Delete all of our wedges. */
403 dkwedge_delall(&sd->sc_dk);
404
405 s = splbio();
406
407 /* Kill off any queued buffers. */
408 bufq_drain(sd->buf_queue);
409
410 bufq_free(sd->buf_queue);
411
412 /* Kill off any pending commands. */
413 scsipi_kill_pending(sd->sc_periph);
414
415 splx(s);
416
417 /* Detach from the disk list. */
418 disk_detach(&sd->sc_dk);
419 disk_destroy(&sd->sc_dk);
420
421 pmf_device_deregister(self);
422
423 #if NRND > 0
424 /* Unhook the entropy source. */
425 rnd_detach_source(&sd->rnd_source);
426 #endif
427
428 return (0);
429 }
430
431 /*
432 * open the device. Make sure the partition info is a up-to-date as can be.
433 */
434 static int
435 sdopen(dev_t dev, int flag, int fmt, struct lwp *l)
436 {
437 struct sd_softc *sd;
438 struct scsipi_periph *periph;
439 struct scsipi_adapter *adapt;
440 int unit, part;
441 int error;
442
443 unit = SDUNIT(dev);
444 if (unit >= sd_cd.cd_ndevs)
445 return (ENXIO);
446 sd = sd_cd.cd_devs[unit];
447 if (sd == NULL)
448 return (ENXIO);
449
450 if (!device_is_active(&sd->sc_dev))
451 return (ENODEV);
452
453 part = SDPART(dev);
454
455 mutex_enter(&sd->sc_dk.dk_openlock);
456
457 /*
458 * If there are wedges, and this is not RAW_PART, then we
459 * need to fail.
460 */
461 if (sd->sc_dk.dk_nwedges != 0 && part != RAW_PART) {
462 error = EBUSY;
463 goto bad1;
464 }
465
466 periph = sd->sc_periph;
467 adapt = periph->periph_channel->chan_adapter;
468
469 SC_DEBUG(periph, SCSIPI_DB1,
470 ("sdopen: dev=0x%x (unit %d (of %d), partition %d)\n", dev, unit,
471 sd_cd.cd_ndevs, part));
472
473 /*
474 * If this is the first open of this device, add a reference
475 * to the adapter.
476 */
477 if (sd->sc_dk.dk_openmask == 0 &&
478 (error = scsipi_adapter_addref(adapt)) != 0)
479 goto bad1;
480
481 if ((periph->periph_flags & PERIPH_OPEN) != 0) {
482 /*
483 * If any partition is open, but the disk has been invalidated,
484 * disallow further opens of non-raw partition
485 */
486 if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0 &&
487 (part != RAW_PART || fmt != S_IFCHR)) {
488 error = EIO;
489 goto bad2;
490 }
491 } else {
492 int silent;
493
494 if (part == RAW_PART && fmt == S_IFCHR)
495 silent = XS_CTL_SILENT;
496 else
497 silent = 0;
498
499 /* Check that it is still responding and ok. */
500 error = scsipi_test_unit_ready(periph,
501 XS_CTL_IGNORE_ILLEGAL_REQUEST | XS_CTL_IGNORE_MEDIA_CHANGE |
502 silent);
503
504 /*
505 * Start the pack spinning if necessary. Always allow the
506 * raw parition to be opened, for raw IOCTLs. Data transfers
507 * will check for SDEV_MEDIA_LOADED.
508 */
509 if (error == EIO) {
510 int error2;
511
512 error2 = scsipi_start(periph, SSS_START, silent);
513 switch (error2) {
514 case 0:
515 error = 0;
516 break;
517 case EIO:
518 case EINVAL:
519 break;
520 default:
521 error = error2;
522 break;
523 }
524 }
525 if (error) {
526 if (silent)
527 goto out;
528 goto bad2;
529 }
530
531 periph->periph_flags |= PERIPH_OPEN;
532
533 if (periph->periph_flags & PERIPH_REMOVABLE) {
534 /* Lock the pack in. */
535 error = scsipi_prevent(periph, SPAMR_PREVENT_DT,
536 XS_CTL_IGNORE_ILLEGAL_REQUEST |
537 XS_CTL_IGNORE_MEDIA_CHANGE |
538 XS_CTL_SILENT);
539 if (error)
540 goto bad3;
541 }
542
543 if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0) {
544 int param_error;
545 periph->periph_flags |= PERIPH_MEDIA_LOADED;
546
547 /*
548 * Load the physical device parameters.
549 *
550 * Note that if media is present but unformatted,
551 * we allow the open (so that it can be formatted!).
552 * The drive should refuse real I/O, if the media is
553 * unformatted.
554 */
555 if ((param_error = sd_get_parms(sd, &sd->params, 0))
556 == SDGP_RESULT_OFFLINE) {
557 error = ENXIO;
558 periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
559 goto bad3;
560 }
561 SC_DEBUG(periph, SCSIPI_DB3, ("Params loaded "));
562
563 /* Load the partition info if not already loaded. */
564 if (param_error == 0) {
565 if ((sdgetdisklabel(sd) != 0) && (part != RAW_PART)) {
566 error = EIO;
567 goto bad3;
568 }
569 SC_DEBUG(periph, SCSIPI_DB3,
570 ("Disklabel loaded "));
571 }
572 }
573 }
574
575 /* Check that the partition exists. */
576 if (part != RAW_PART &&
577 (part >= sd->sc_dk.dk_label->d_npartitions ||
578 sd->sc_dk.dk_label->d_partitions[part].p_fstype == FS_UNUSED)) {
579 error = ENXIO;
580 goto bad3;
581 }
582
583 out: /* Insure only one open at a time. */
584 switch (fmt) {
585 case S_IFCHR:
586 sd->sc_dk.dk_copenmask |= (1 << part);
587 break;
588 case S_IFBLK:
589 sd->sc_dk.dk_bopenmask |= (1 << part);
590 break;
591 }
592 sd->sc_dk.dk_openmask =
593 sd->sc_dk.dk_copenmask | sd->sc_dk.dk_bopenmask;
594
595 SC_DEBUG(periph, SCSIPI_DB3, ("open complete\n"));
596 mutex_exit(&sd->sc_dk.dk_openlock);
597 return (0);
598
599 bad3:
600 if (sd->sc_dk.dk_openmask == 0) {
601 if (periph->periph_flags & PERIPH_REMOVABLE)
602 scsipi_prevent(periph, SPAMR_ALLOW,
603 XS_CTL_IGNORE_ILLEGAL_REQUEST |
604 XS_CTL_IGNORE_MEDIA_CHANGE |
605 XS_CTL_SILENT);
606 periph->periph_flags &= ~PERIPH_OPEN;
607 }
608
609 bad2:
610 if (sd->sc_dk.dk_openmask == 0)
611 scsipi_adapter_delref(adapt);
612
613 bad1:
614 mutex_exit(&sd->sc_dk.dk_openlock);
615 return (error);
616 }
617
618 /*
619 * close the device.. only called if we are the LAST occurence of an open
620 * device. Convenient now but usually a pain.
621 */
622 static int
623 sdclose(dev_t dev, int flag, int fmt, struct lwp *l)
624 {
625 struct sd_softc *sd = sd_cd.cd_devs[SDUNIT(dev)];
626 struct scsipi_periph *periph = sd->sc_periph;
627 struct scsipi_adapter *adapt = periph->periph_channel->chan_adapter;
628 int part = SDPART(dev);
629
630 mutex_enter(&sd->sc_dk.dk_openlock);
631 switch (fmt) {
632 case S_IFCHR:
633 sd->sc_dk.dk_copenmask &= ~(1 << part);
634 break;
635 case S_IFBLK:
636 sd->sc_dk.dk_bopenmask &= ~(1 << part);
637 break;
638 }
639 sd->sc_dk.dk_openmask =
640 sd->sc_dk.dk_copenmask | sd->sc_dk.dk_bopenmask;
641
642 if (sd->sc_dk.dk_openmask == 0) {
643 /*
644 * If the disk cache needs flushing, and the disk supports
645 * it, do it now.
646 */
647 if ((sd->flags & SDF_DIRTY) != 0) {
648 if (sd_flush(sd, 0)) {
649 printf("%s: cache synchronization failed\n",
650 sd->sc_dev.dv_xname);
651 sd->flags &= ~SDF_FLUSHING;
652 } else
653 sd->flags &= ~(SDF_FLUSHING|SDF_DIRTY);
654 }
655
656 scsipi_wait_drain(periph);
657
658 if (periph->periph_flags & PERIPH_REMOVABLE)
659 scsipi_prevent(periph, SPAMR_ALLOW,
660 XS_CTL_IGNORE_ILLEGAL_REQUEST |
661 XS_CTL_IGNORE_NOT_READY |
662 XS_CTL_SILENT);
663 periph->periph_flags &= ~PERIPH_OPEN;
664
665 scsipi_wait_drain(periph);
666
667 scsipi_adapter_delref(adapt);
668 }
669
670 mutex_exit(&sd->sc_dk.dk_openlock);
671 return (0);
672 }
673
674 /*
675 * Actually translate the requested transfer into one the physical driver
676 * can understand. The transfer is described by a buf and will include
677 * only one physical transfer.
678 */
679 static void
680 sdstrategy(struct buf *bp)
681 {
682 struct sd_softc *sd = sd_cd.cd_devs[SDUNIT(bp->b_dev)];
683 struct scsipi_periph *periph = sd->sc_periph;
684 struct disklabel *lp;
685 daddr_t blkno;
686 int s;
687 bool sector_aligned;
688
689 SC_DEBUG(sd->sc_periph, SCSIPI_DB2, ("sdstrategy "));
690 SC_DEBUG(sd->sc_periph, SCSIPI_DB1,
691 ("%d bytes @ blk %" PRId64 "\n", bp->b_bcount, bp->b_blkno));
692 /*
693 * If the device has been made invalid, error out
694 */
695 if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0 ||
696 !device_is_active(&sd->sc_dev)) {
697 if (periph->periph_flags & PERIPH_OPEN)
698 bp->b_error = EIO;
699 else
700 bp->b_error = ENODEV;
701 goto done;
702 }
703
704 lp = sd->sc_dk.dk_label;
705
706 /*
707 * The transfer must be a whole number of blocks, offset must not be
708 * negative.
709 */
710 if (lp->d_secsize == DEV_BSIZE) {
711 sector_aligned = (bp->b_bcount & (DEV_BSIZE - 1)) == 0;
712 } else {
713 sector_aligned = (bp->b_bcount % lp->d_secsize) == 0;
714 }
715 if (!sector_aligned || bp->b_blkno < 0) {
716 bp->b_error = EINVAL;
717 goto done;
718 }
719 /*
720 * If it's a null transfer, return immediatly
721 */
722 if (bp->b_bcount == 0)
723 goto done;
724
725 /*
726 * Do bounds checking, adjust transfer. if error, process.
727 * If end of partition, just return.
728 */
729 if (SDPART(bp->b_dev) == RAW_PART) {
730 if (bounds_check_with_mediasize(bp, DEV_BSIZE,
731 sd->params.disksize512) <= 0)
732 goto done;
733 } else {
734 if (bounds_check_with_label(&sd->sc_dk, bp,
735 (sd->flags & (SDF_WLABEL|SDF_LABELLING)) != 0) <= 0)
736 goto done;
737 }
738
739 /*
740 * Now convert the block number to absolute and put it in
741 * terms of the device's logical block size.
742 */
743 if (lp->d_secsize == DEV_BSIZE)
744 blkno = bp->b_blkno;
745 else if (lp->d_secsize > DEV_BSIZE)
746 blkno = bp->b_blkno / (lp->d_secsize / DEV_BSIZE);
747 else
748 blkno = bp->b_blkno * (DEV_BSIZE / lp->d_secsize);
749
750 if (SDPART(bp->b_dev) != RAW_PART)
751 blkno += lp->d_partitions[SDPART(bp->b_dev)].p_offset;
752
753 bp->b_rawblkno = blkno;
754
755 s = splbio();
756
757 /*
758 * Place it in the queue of disk activities for this disk.
759 *
760 * XXX Only do disksort() if the current operating mode does not
761 * XXX include tagged queueing.
762 */
763 BUFQ_PUT(sd->buf_queue, bp);
764
765 /*
766 * Tell the device to get going on the transfer if it's
767 * not doing anything, otherwise just wait for completion
768 */
769 sdstart(sd->sc_periph);
770
771 splx(s);
772 return;
773
774 done:
775 /*
776 * Correctly set the buf to indicate a completed xfer
777 */
778 bp->b_resid = bp->b_bcount;
779 biodone(bp);
780 }
781
782 /*
783 * sdstart looks to see if there is a buf waiting for the device
784 * and that the device is not already busy. If both are true,
785 * It dequeues the buf and creates a scsi command to perform the
786 * transfer in the buf. The transfer request will call scsipi_done
787 * on completion, which will in turn call this routine again
788 * so that the next queued transfer is performed.
789 * The bufs are queued by the strategy routine (sdstrategy)
790 *
791 * This routine is also called after other non-queued requests
792 * have been made of the scsi driver, to ensure that the queue
793 * continues to be drained.
794 *
795 * must be called at the correct (highish) spl level
796 * sdstart() is called at splbio from sdstrategy, sdrestart and scsipi_done
797 */
798 static void
799 sdstart(struct scsipi_periph *periph)
800 {
801 struct sd_softc *sd = (void *)periph->periph_dev;
802 struct disklabel *lp = sd->sc_dk.dk_label;
803 struct buf *bp = 0;
804 struct scsipi_rw_16 cmd16;
805 struct scsipi_rw_10 cmd_big;
806 struct scsi_rw_6 cmd_small;
807 struct scsipi_generic *cmdp;
808 struct scsipi_xfer *xs;
809 int nblks, cmdlen, error, flags;
810
811 SC_DEBUG(periph, SCSIPI_DB2, ("sdstart "));
812 /*
813 * Check if the device has room for another command
814 */
815 while (periph->periph_active < periph->periph_openings) {
816 /*
817 * there is excess capacity, but a special waits
818 * It'll need the adapter as soon as we clear out of the
819 * way and let it run (user level wait).
820 */
821 if (periph->periph_flags & PERIPH_WAITING) {
822 periph->periph_flags &= ~PERIPH_WAITING;
823 wakeup((void *)periph);
824 return;
825 }
826
827 /*
828 * If the device has become invalid, abort all the
829 * reads and writes until all files have been closed and
830 * re-opened
831 */
832 if (__predict_false(
833 (periph->periph_flags & PERIPH_MEDIA_LOADED) == 0)) {
834 if ((bp = BUFQ_GET(sd->buf_queue)) != NULL) {
835 bp->b_error = EIO;
836 bp->b_resid = bp->b_bcount;
837 biodone(bp);
838 continue;
839 } else {
840 return;
841 }
842 }
843
844 /*
845 * See if there is a buf with work for us to do..
846 */
847 if ((bp = BUFQ_PEEK(sd->buf_queue)) == NULL)
848 return;
849
850 /*
851 * We have a buf, now we should make a command.
852 */
853
854 if (lp->d_secsize == DEV_BSIZE)
855 nblks = bp->b_bcount >> DEV_BSHIFT;
856 else
857 nblks = howmany(bp->b_bcount, lp->d_secsize);
858
859 /*
860 * Fill out the scsi command. Use the smallest CDB possible
861 * (6-byte, 10-byte, or 16-byte).
862 */
863 if (((bp->b_rawblkno & 0x1fffff) == bp->b_rawblkno) &&
864 ((nblks & 0xff) == nblks) &&
865 !(periph->periph_quirks & PQUIRK_ONLYBIG)) {
866 /* 6-byte CDB */
867 memset(&cmd_small, 0, sizeof(cmd_small));
868 cmd_small.opcode = (bp->b_flags & B_READ) ?
869 SCSI_READ_6_COMMAND : SCSI_WRITE_6_COMMAND;
870 _lto3b(bp->b_rawblkno, cmd_small.addr);
871 cmd_small.length = nblks & 0xff;
872 cmdlen = sizeof(cmd_small);
873 cmdp = (struct scsipi_generic *)&cmd_small;
874 } else if ((bp->b_rawblkno & 0xffffffff) == bp->b_rawblkno) {
875 /* 10-byte CDB */
876 memset(&cmd_big, 0, sizeof(cmd_big));
877 cmd_big.opcode = (bp->b_flags & B_READ) ?
878 READ_10 : WRITE_10;
879 _lto4b(bp->b_rawblkno, cmd_big.addr);
880 _lto2b(nblks, cmd_big.length);
881 cmdlen = sizeof(cmd_big);
882 cmdp = (struct scsipi_generic *)&cmd_big;
883 } else {
884 /* 16-byte CDB */
885 memset(&cmd16, 0, sizeof(cmd16));
886 cmd16.opcode = (bp->b_flags & B_READ) ?
887 READ_16 : WRITE_16;
888 _lto8b(bp->b_rawblkno, cmd16.addr);
889 _lto4b(nblks, cmd16.length);
890 cmdlen = sizeof(cmd16);
891 cmdp = (struct scsipi_generic *)&cmd16;
892 }
893
894 /* Instrumentation. */
895 disk_busy(&sd->sc_dk);
896
897 /*
898 * Mark the disk dirty so that the cache will be
899 * flushed on close.
900 */
901 if ((bp->b_flags & B_READ) == 0)
902 sd->flags |= SDF_DIRTY;
903
904 /*
905 * Figure out what flags to use.
906 */
907 flags = XS_CTL_NOSLEEP|XS_CTL_ASYNC|XS_CTL_SIMPLE_TAG;
908 if (bp->b_flags & B_READ)
909 flags |= XS_CTL_DATA_IN;
910 else
911 flags |= XS_CTL_DATA_OUT;
912
913 /*
914 * Call the routine that chats with the adapter.
915 * Note: we cannot sleep as we may be an interrupt
916 */
917 xs = scsipi_make_xs(periph, cmdp, cmdlen,
918 (u_char *)bp->b_data, bp->b_bcount,
919 SDRETRIES, SD_IO_TIMEOUT, bp, flags);
920 if (__predict_false(xs == NULL)) {
921 /*
922 * out of memory. Keep this buffer in the queue, and
923 * retry later.
924 */
925 callout_reset(&sd->sc_callout, hz / 2, sdrestart,
926 periph);
927 return;
928 }
929 /*
930 * need to dequeue the buffer before queuing the command,
931 * because cdstart may be called recursively from the
932 * HBA driver
933 */
934 #ifdef DIAGNOSTIC
935 if (BUFQ_GET(sd->buf_queue) != bp)
936 panic("sdstart(): dequeued wrong buf");
937 #else
938 BUFQ_GET(sd->buf_queue);
939 #endif
940 error = scsipi_execute_xs(xs);
941 /* with a scsipi_xfer preallocated, scsipi_command can't fail */
942 KASSERT(error == 0);
943 }
944 }
945
946 static void
947 sdrestart(void *v)
948 {
949 int s = splbio();
950 sdstart((struct scsipi_periph *)v);
951 splx(s);
952 }
953
954 static void
955 sddone(struct scsipi_xfer *xs, int error)
956 {
957 struct sd_softc *sd = (void *)xs->xs_periph->periph_dev;
958 struct buf *bp = xs->bp;
959
960 if (sd->flags & SDF_FLUSHING) {
961 /* Flush completed, no longer dirty. */
962 sd->flags &= ~(SDF_FLUSHING|SDF_DIRTY);
963 }
964
965 if (bp) {
966 bp->b_error = error;
967 bp->b_resid = xs->resid;
968 if (error) {
969 /* on a read/write error bp->b_resid is zero, so fix */
970 bp->b_resid = bp->b_bcount;
971 }
972
973 disk_unbusy(&sd->sc_dk, bp->b_bcount - bp->b_resid,
974 (bp->b_flags & B_READ));
975 #if NRND > 0
976 rnd_add_uint32(&sd->rnd_source, bp->b_rawblkno);
977 #endif
978
979 biodone(bp);
980 }
981 }
982
983 static void
984 sdminphys(struct buf *bp)
985 {
986 struct sd_softc *sd = sd_cd.cd_devs[SDUNIT(bp->b_dev)];
987 long xmax;
988
989 /*
990 * If the device is ancient, we want to make sure that
991 * the transfer fits into a 6-byte cdb.
992 *
993 * XXX Note that the SCSI-I spec says that 256-block transfers
994 * are allowed in a 6-byte read/write, and are specified
995 * by settng the "length" to 0. However, we're conservative
996 * here, allowing only 255-block transfers in case an
997 * ancient device gets confused by length == 0. A length of 0
998 * in a 10-byte read/write actually means 0 blocks.
999 */
1000 if ((sd->flags & SDF_ANCIENT) &&
1001 ((sd->sc_periph->periph_flags &
1002 (PERIPH_REMOVABLE | PERIPH_MEDIA_LOADED)) != PERIPH_REMOVABLE)) {
1003 xmax = sd->sc_dk.dk_label->d_secsize * 0xff;
1004
1005 if (bp->b_bcount > xmax)
1006 bp->b_bcount = xmax;
1007 }
1008
1009 scsipi_adapter_minphys(sd->sc_periph->periph_channel, bp);
1010 }
1011
1012 static int
1013 sdread(dev_t dev, struct uio *uio, int ioflag)
1014 {
1015
1016 return (physio(sdstrategy, NULL, dev, B_READ, sdminphys, uio));
1017 }
1018
1019 static int
1020 sdwrite(dev_t dev, struct uio *uio, int ioflag)
1021 {
1022
1023 return (physio(sdstrategy, NULL, dev, B_WRITE, sdminphys, uio));
1024 }
1025
1026 /*
1027 * Perform special action on behalf of the user
1028 * Knows about the internals of this device
1029 */
1030 static int
1031 sdioctl(dev_t dev, u_long cmd, void *addr, int flag, struct lwp *l)
1032 {
1033 struct sd_softc *sd = sd_cd.cd_devs[SDUNIT(dev)];
1034 struct scsipi_periph *periph = sd->sc_periph;
1035 int part = SDPART(dev);
1036 int error = 0;
1037 #ifdef __HAVE_OLD_DISKLABEL
1038 struct disklabel *newlabel = NULL;
1039 #endif
1040
1041 SC_DEBUG(sd->sc_periph, SCSIPI_DB2, ("sdioctl 0x%lx ", cmd));
1042
1043 /*
1044 * If the device is not valid, some IOCTLs can still be
1045 * handled on the raw partition. Check this here.
1046 */
1047 if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0) {
1048 switch (cmd) {
1049 case DIOCKLABEL:
1050 case DIOCWLABEL:
1051 case DIOCLOCK:
1052 case DIOCEJECT:
1053 case ODIOCEJECT:
1054 case DIOCGCACHE:
1055 case DIOCSCACHE:
1056 case SCIOCIDENTIFY:
1057 case OSCIOCIDENTIFY:
1058 case SCIOCCOMMAND:
1059 case SCIOCDEBUG:
1060 if (part == RAW_PART)
1061 break;
1062 /* FALLTHROUGH */
1063 default:
1064 if ((periph->periph_flags & PERIPH_OPEN) == 0)
1065 return (ENODEV);
1066 else
1067 return (EIO);
1068 }
1069 }
1070
1071 switch (cmd) {
1072 case DIOCGDINFO:
1073 *(struct disklabel *)addr = *(sd->sc_dk.dk_label);
1074 return (0);
1075
1076 #ifdef __HAVE_OLD_DISKLABEL
1077 case ODIOCGDINFO:
1078 newlabel = malloc(sizeof *newlabel, M_TEMP, M_WAITOK);
1079 if (newlabel == NULL)
1080 return EIO;
1081 memcpy(newlabel, sd->sc_dk.dk_label, sizeof (*newlabel));
1082 if (newlabel->d_npartitions <= OLDMAXPARTITIONS)
1083 memcpy(addr, newlabel, sizeof (struct olddisklabel));
1084 else
1085 error = ENOTTY;
1086 free(newlabel, M_TEMP);
1087 return error;
1088 #endif
1089
1090 case DIOCGPART:
1091 ((struct partinfo *)addr)->disklab = sd->sc_dk.dk_label;
1092 ((struct partinfo *)addr)->part =
1093 &sd->sc_dk.dk_label->d_partitions[part];
1094 return (0);
1095
1096 case DIOCWDINFO:
1097 case DIOCSDINFO:
1098 #ifdef __HAVE_OLD_DISKLABEL
1099 case ODIOCWDINFO:
1100 case ODIOCSDINFO:
1101 #endif
1102 {
1103 struct disklabel *lp;
1104
1105 if ((flag & FWRITE) == 0)
1106 return (EBADF);
1107
1108 #ifdef __HAVE_OLD_DISKLABEL
1109 if (cmd == ODIOCSDINFO || cmd == ODIOCWDINFO) {
1110 newlabel = malloc(sizeof *newlabel, M_TEMP, M_WAITOK);
1111 if (newlabel == NULL)
1112 return EIO;
1113 memset(newlabel, 0, sizeof newlabel);
1114 memcpy(newlabel, addr, sizeof (struct olddisklabel));
1115 lp = newlabel;
1116 } else
1117 #endif
1118 lp = (struct disklabel *)addr;
1119
1120 mutex_enter(&sd->sc_dk.dk_openlock);
1121 sd->flags |= SDF_LABELLING;
1122
1123 error = setdisklabel(sd->sc_dk.dk_label,
1124 lp, /*sd->sc_dk.dk_openmask : */0,
1125 sd->sc_dk.dk_cpulabel);
1126 if (error == 0) {
1127 if (cmd == DIOCWDINFO
1128 #ifdef __HAVE_OLD_DISKLABEL
1129 || cmd == ODIOCWDINFO
1130 #endif
1131 )
1132 error = writedisklabel(SDLABELDEV(dev),
1133 sdstrategy, sd->sc_dk.dk_label,
1134 sd->sc_dk.dk_cpulabel);
1135 }
1136
1137 sd->flags &= ~SDF_LABELLING;
1138 mutex_exit(&sd->sc_dk.dk_openlock);
1139 #ifdef __HAVE_OLD_DISKLABEL
1140 if (newlabel != NULL)
1141 free(newlabel, M_TEMP);
1142 #endif
1143 return (error);
1144 }
1145
1146 case DIOCKLABEL:
1147 if (*(int *)addr)
1148 periph->periph_flags |= PERIPH_KEEP_LABEL;
1149 else
1150 periph->periph_flags &= ~PERIPH_KEEP_LABEL;
1151 return (0);
1152
1153 case DIOCWLABEL:
1154 if ((flag & FWRITE) == 0)
1155 return (EBADF);
1156 if (*(int *)addr)
1157 sd->flags |= SDF_WLABEL;
1158 else
1159 sd->flags &= ~SDF_WLABEL;
1160 return (0);
1161
1162 case DIOCLOCK:
1163 if (periph->periph_flags & PERIPH_REMOVABLE)
1164 return (scsipi_prevent(periph,
1165 (*(int *)addr) ?
1166 SPAMR_PREVENT_DT : SPAMR_ALLOW, 0));
1167 else
1168 return (ENOTTY);
1169
1170 case DIOCEJECT:
1171 if ((periph->periph_flags & PERIPH_REMOVABLE) == 0)
1172 return (ENOTTY);
1173 if (*(int *)addr == 0) {
1174 /*
1175 * Don't force eject: check that we are the only
1176 * partition open. If so, unlock it.
1177 */
1178 if ((sd->sc_dk.dk_openmask & ~(1 << part)) == 0 &&
1179 sd->sc_dk.dk_bopenmask + sd->sc_dk.dk_copenmask ==
1180 sd->sc_dk.dk_openmask) {
1181 error = scsipi_prevent(periph, SPAMR_ALLOW,
1182 XS_CTL_IGNORE_NOT_READY);
1183 if (error)
1184 return (error);
1185 } else {
1186 return (EBUSY);
1187 }
1188 }
1189 /* FALLTHROUGH */
1190 case ODIOCEJECT:
1191 return ((periph->periph_flags & PERIPH_REMOVABLE) == 0 ?
1192 ENOTTY : scsipi_start(periph, SSS_STOP|SSS_LOEJ, 0));
1193
1194 case DIOCGDEFLABEL:
1195 sdgetdefaultlabel(sd, (struct disklabel *)addr);
1196 return (0);
1197
1198 #ifdef __HAVE_OLD_DISKLABEL
1199 case ODIOCGDEFLABEL:
1200 newlabel = malloc(sizeof *newlabel, M_TEMP, M_WAITOK);
1201 if (newlabel == NULL)
1202 return EIO;
1203 sdgetdefaultlabel(sd, newlabel);
1204 if (newlabel->d_npartitions <= OLDMAXPARTITIONS)
1205 memcpy(addr, newlabel, sizeof (struct olddisklabel));
1206 else
1207 error = ENOTTY;
1208 free(newlabel, M_TEMP);
1209 return error;
1210 #endif
1211
1212 case DIOCGCACHE:
1213 return (sd_getcache(sd, (int *) addr));
1214
1215 case DIOCSCACHE:
1216 if ((flag & FWRITE) == 0)
1217 return (EBADF);
1218 return (sd_setcache(sd, *(int *) addr));
1219
1220 case DIOCCACHESYNC:
1221 /*
1222 * XXX Do we really need to care about having a writable
1223 * file descriptor here?
1224 */
1225 if ((flag & FWRITE) == 0)
1226 return (EBADF);
1227 if (((sd->flags & SDF_DIRTY) != 0 || *(int *)addr != 0)) {
1228 error = sd_flush(sd, 0);
1229 if (error)
1230 sd->flags &= ~SDF_FLUSHING;
1231 else
1232 sd->flags &= ~(SDF_FLUSHING|SDF_DIRTY);
1233 } else
1234 error = 0;
1235 return (error);
1236
1237 case DIOCAWEDGE:
1238 {
1239 struct dkwedge_info *dkw = (void *) addr;
1240
1241 if ((flag & FWRITE) == 0)
1242 return (EBADF);
1243
1244 /* If the ioctl happens here, the parent is us. */
1245 strcpy(dkw->dkw_parent, sd->sc_dev.dv_xname);
1246 return (dkwedge_add(dkw));
1247 }
1248
1249 case DIOCDWEDGE:
1250 {
1251 struct dkwedge_info *dkw = (void *) addr;
1252
1253 if ((flag & FWRITE) == 0)
1254 return (EBADF);
1255
1256 /* If the ioctl happens here, the parent is us. */
1257 strcpy(dkw->dkw_parent, sd->sc_dev.dv_xname);
1258 return (dkwedge_del(dkw));
1259 }
1260
1261 case DIOCLWEDGES:
1262 {
1263 struct dkwedge_list *dkwl = (void *) addr;
1264
1265 return (dkwedge_list(&sd->sc_dk, dkwl, l));
1266 }
1267
1268 default:
1269 if (part != RAW_PART)
1270 return (ENOTTY);
1271 return (scsipi_do_ioctl(periph, dev, cmd, addr, flag, l));
1272 }
1273
1274 #ifdef DIAGNOSTIC
1275 panic("sdioctl: impossible");
1276 #endif
1277 }
1278
1279 static void
1280 sdgetdefaultlabel(struct sd_softc *sd, struct disklabel *lp)
1281 {
1282
1283 memset(lp, 0, sizeof(struct disklabel));
1284
1285 lp->d_secsize = sd->params.blksize;
1286 lp->d_ntracks = sd->params.heads;
1287 lp->d_nsectors = sd->params.sectors;
1288 lp->d_ncylinders = sd->params.cyls;
1289 lp->d_secpercyl = lp->d_ntracks * lp->d_nsectors;
1290
1291 switch (scsipi_periph_bustype(sd->sc_periph)) {
1292 case SCSIPI_BUSTYPE_SCSI:
1293 lp->d_type = DTYPE_SCSI;
1294 break;
1295 case SCSIPI_BUSTYPE_ATAPI:
1296 lp->d_type = DTYPE_ATAPI;
1297 break;
1298 }
1299 /*
1300 * XXX
1301 * We could probe the mode pages to figure out what kind of disc it is.
1302 * Is this worthwhile?
1303 */
1304 strncpy(lp->d_typename, sd->name, 16);
1305 strncpy(lp->d_packname, "fictitious", 16);
1306 lp->d_secperunit = sd->params.disksize;
1307 lp->d_rpm = sd->params.rot_rate;
1308 lp->d_interleave = 1;
1309 lp->d_flags = sd->sc_periph->periph_flags & PERIPH_REMOVABLE ?
1310 D_REMOVABLE : 0;
1311
1312 lp->d_partitions[RAW_PART].p_offset = 0;
1313 lp->d_partitions[RAW_PART].p_size = lp->d_secperunit;
1314 lp->d_partitions[RAW_PART].p_fstype = FS_UNUSED;
1315 lp->d_npartitions = RAW_PART + 1;
1316
1317 lp->d_magic = DISKMAGIC;
1318 lp->d_magic2 = DISKMAGIC;
1319 lp->d_checksum = dkcksum(lp);
1320 }
1321
1322
1323 /*
1324 * Load the label information on the named device
1325 */
1326 static int
1327 sdgetdisklabel(struct sd_softc *sd)
1328 {
1329 struct disklabel *lp = sd->sc_dk.dk_label;
1330 const char *errstring;
1331
1332 memset(sd->sc_dk.dk_cpulabel, 0, sizeof(struct cpu_disklabel));
1333
1334 sdgetdefaultlabel(sd, lp);
1335
1336 if (lp->d_secpercyl == 0) {
1337 lp->d_secpercyl = 100;
1338 /* as long as it's not 0 - readdisklabel divides by it (?) */
1339 }
1340
1341 /*
1342 * Call the generic disklabel extraction routine
1343 */
1344 errstring = readdisklabel(MAKESDDEV(0, device_unit(&sd->sc_dev),
1345 RAW_PART), sdstrategy, lp, sd->sc_dk.dk_cpulabel);
1346 if (errstring) {
1347 printf("%s: %s\n", sd->sc_dev.dv_xname, errstring);
1348 return EIO;
1349 }
1350 return 0;
1351 }
1352
1353 static bool
1354 sd_suspend(device_t dv)
1355 {
1356 struct sd_softc *sd = device_private(dv);
1357
1358 /*
1359 * If the disk cache needs to be flushed, and the disk supports
1360 * it, flush it. We're cold at this point, so we poll for
1361 * completion.
1362 */
1363 if ((sd->flags & SDF_DIRTY) != 0) {
1364 if (sd_flush(sd, XS_CTL_NOSLEEP|XS_CTL_POLL)) {
1365 printf("%s: cache synchronization failed\n",
1366 sd->sc_dev.dv_xname);
1367 sd->flags &= ~SDF_FLUSHING;
1368 } else
1369 sd->flags &= ~(SDF_FLUSHING|SDF_DIRTY);
1370 }
1371
1372 return true;
1373 }
1374
1375 /*
1376 * Check Errors
1377 */
1378 static int
1379 sd_interpret_sense(struct scsipi_xfer *xs)
1380 {
1381 struct scsipi_periph *periph = xs->xs_periph;
1382 struct scsi_sense_data *sense = &xs->sense.scsi_sense;
1383 struct sd_softc *sd = (void *)periph->periph_dev;
1384 int s, error, retval = EJUSTRETURN;
1385
1386 /*
1387 * If the periph is already recovering, just do the normal
1388 * error processing.
1389 */
1390 if (periph->periph_flags & PERIPH_RECOVERING)
1391 return (retval);
1392
1393 /*
1394 * Ignore errors from accessing illegal fields (e.g. trying to
1395 * lock the door of a digicam, which doesn't have a door that
1396 * can be locked) for the SCSI_PREVENT_ALLOW_MEDIUM_REMOVAL command.
1397 */
1398 if (xs->cmd->opcode == SCSI_PREVENT_ALLOW_MEDIUM_REMOVAL &&
1399 SSD_SENSE_KEY(sense->flags) == SKEY_ILLEGAL_REQUEST &&
1400 sense->asc == 0x24 &&
1401 sense->ascq == 0x00) { /* Illegal field in CDB */
1402 if (!(xs->xs_control & XS_CTL_SILENT)) {
1403 scsipi_printaddr(periph);
1404 printf("no door lock\n");
1405 }
1406 xs->xs_control |= XS_CTL_IGNORE_ILLEGAL_REQUEST;
1407 return (retval);
1408 }
1409
1410
1411
1412 /*
1413 * If the device is not open yet, let the generic code handle it.
1414 */
1415 if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0)
1416 return (retval);
1417
1418 /*
1419 * If it isn't a extended or extended/deferred error, let
1420 * the generic code handle it.
1421 */
1422 if (SSD_RCODE(sense->response_code) != SSD_RCODE_CURRENT &&
1423 SSD_RCODE(sense->response_code) != SSD_RCODE_DEFERRED)
1424 return (retval);
1425
1426 if (SSD_SENSE_KEY(sense->flags) == SKEY_NOT_READY &&
1427 sense->asc == 0x4) {
1428 if (sense->ascq == 0x01) {
1429 /*
1430 * Unit In The Process Of Becoming Ready.
1431 */
1432 printf("%s: waiting for pack to spin up...\n",
1433 sd->sc_dev.dv_xname);
1434 if (!callout_pending(&periph->periph_callout))
1435 scsipi_periph_freeze(periph, 1);
1436 callout_reset(&periph->periph_callout,
1437 5 * hz, scsipi_periph_timed_thaw, periph);
1438 retval = ERESTART;
1439 } else if (sense->ascq == 0x02) {
1440 printf("%s: pack is stopped, restarting...\n",
1441 sd->sc_dev.dv_xname);
1442 s = splbio();
1443 periph->periph_flags |= PERIPH_RECOVERING;
1444 splx(s);
1445 error = scsipi_start(periph, SSS_START,
1446 XS_CTL_URGENT|XS_CTL_HEAD_TAG|
1447 XS_CTL_THAW_PERIPH|XS_CTL_FREEZE_PERIPH);
1448 if (error) {
1449 printf("%s: unable to restart pack\n",
1450 sd->sc_dev.dv_xname);
1451 retval = error;
1452 } else
1453 retval = ERESTART;
1454 s = splbio();
1455 periph->periph_flags &= ~PERIPH_RECOVERING;
1456 splx(s);
1457 }
1458 }
1459 if (SSD_SENSE_KEY(sense->flags) == SKEY_MEDIUM_ERROR &&
1460 sense->asc == 0x31 &&
1461 sense->ascq == 0x00) { /* maybe for any asq ? */
1462 /* Medium Format Corrupted */
1463 retval = EFTYPE;
1464 }
1465 return (retval);
1466 }
1467
1468
1469 static int
1470 sdsize(dev_t dev)
1471 {
1472 struct sd_softc *sd;
1473 int part, unit, omask;
1474 int size;
1475
1476 unit = SDUNIT(dev);
1477 if (unit >= sd_cd.cd_ndevs)
1478 return (-1);
1479 sd = sd_cd.cd_devs[unit];
1480 if (sd == NULL)
1481 return (-1);
1482
1483 if (!device_is_active(&sd->sc_dev))
1484 return (-1);
1485
1486 part = SDPART(dev);
1487 omask = sd->sc_dk.dk_openmask & (1 << part);
1488
1489 if (omask == 0 && sdopen(dev, 0, S_IFBLK, NULL) != 0)
1490 return (-1);
1491 if ((sd->sc_periph->periph_flags & PERIPH_MEDIA_LOADED) == 0)
1492 size = -1;
1493 else if (sd->sc_dk.dk_label->d_partitions[part].p_fstype != FS_SWAP)
1494 size = -1;
1495 else
1496 size = sd->sc_dk.dk_label->d_partitions[part].p_size *
1497 (sd->sc_dk.dk_label->d_secsize / DEV_BSIZE);
1498 if (omask == 0 && sdclose(dev, 0, S_IFBLK, NULL) != 0)
1499 return (-1);
1500 return (size);
1501 }
1502
1503 /* #define SD_DUMP_NOT_TRUSTED if you just want to watch */
1504 static struct scsipi_xfer sx;
1505 static int sddoingadump;
1506
1507 /*
1508 * dump all of physical memory into the partition specified, starting
1509 * at offset 'dumplo' into the partition.
1510 */
1511 static int
1512 sddump(dev_t dev, daddr_t blkno, void *va, size_t size)
1513 {
1514 struct sd_softc *sd; /* disk unit to do the I/O */
1515 struct disklabel *lp; /* disk's disklabel */
1516 int unit, part;
1517 int sectorsize; /* size of a disk sector */
1518 int nsects; /* number of sectors in partition */
1519 int sectoff; /* sector offset of partition */
1520 int totwrt; /* total number of sectors left to write */
1521 int nwrt; /* current number of sectors to write */
1522 struct scsipi_rw_10 cmd; /* write command */
1523 struct scsipi_xfer *xs; /* ... convenience */
1524 struct scsipi_periph *periph;
1525 struct scsipi_channel *chan;
1526
1527 /* Check if recursive dump; if so, punt. */
1528 if (sddoingadump)
1529 return (EFAULT);
1530
1531 /* Mark as active early. */
1532 sddoingadump = 1;
1533
1534 unit = SDUNIT(dev); /* Decompose unit & partition. */
1535 part = SDPART(dev);
1536
1537 /* Check for acceptable drive number. */
1538 if (unit >= sd_cd.cd_ndevs || (sd = sd_cd.cd_devs[unit]) == NULL)
1539 return (ENXIO);
1540
1541 if (!device_is_active(&sd->sc_dev))
1542 return (ENODEV);
1543
1544 periph = sd->sc_periph;
1545 chan = periph->periph_channel;
1546
1547 /* Make sure it was initialized. */
1548 if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0)
1549 return (ENXIO);
1550
1551 /* Convert to disk sectors. Request must be a multiple of size. */
1552 lp = sd->sc_dk.dk_label;
1553 sectorsize = lp->d_secsize;
1554 if ((size % sectorsize) != 0)
1555 return (EFAULT);
1556 totwrt = size / sectorsize;
1557 blkno = dbtob(blkno) / sectorsize; /* blkno in DEV_BSIZE units */
1558
1559 nsects = lp->d_partitions[part].p_size;
1560 sectoff = lp->d_partitions[part].p_offset;
1561
1562 /* Check transfer bounds against partition size. */
1563 if ((blkno < 0) || ((blkno + totwrt) > nsects))
1564 return (EINVAL);
1565
1566 /* Offset block number to start of partition. */
1567 blkno += sectoff;
1568
1569 xs = &sx;
1570
1571 while (totwrt > 0) {
1572 nwrt = totwrt; /* XXX */
1573 #ifndef SD_DUMP_NOT_TRUSTED
1574 /*
1575 * Fill out the scsi command
1576 */
1577 memset(&cmd, 0, sizeof(cmd));
1578 cmd.opcode = WRITE_10;
1579 _lto4b(blkno, cmd.addr);
1580 _lto2b(nwrt, cmd.length);
1581 /*
1582 * Fill out the scsipi_xfer structure
1583 * Note: we cannot sleep as we may be an interrupt
1584 * don't use scsipi_command() as it may want to wait
1585 * for an xs.
1586 */
1587 memset(xs, 0, sizeof(sx));
1588 xs->xs_control |= XS_CTL_NOSLEEP | XS_CTL_POLL |
1589 XS_CTL_DATA_OUT;
1590 xs->xs_status = 0;
1591 xs->xs_periph = periph;
1592 xs->xs_retries = SDRETRIES;
1593 xs->timeout = 10000; /* 10000 millisecs for a disk ! */
1594 xs->cmd = (struct scsipi_generic *)&cmd;
1595 xs->cmdlen = sizeof(cmd);
1596 xs->resid = nwrt * sectorsize;
1597 xs->error = XS_NOERROR;
1598 xs->bp = 0;
1599 xs->data = va;
1600 xs->datalen = nwrt * sectorsize;
1601 callout_init(&xs->xs_callout, 0);
1602
1603 /*
1604 * Pass all this info to the scsi driver.
1605 */
1606 scsipi_adapter_request(chan, ADAPTER_REQ_RUN_XFER, xs);
1607 if ((xs->xs_status & XS_STS_DONE) == 0 ||
1608 xs->error != XS_NOERROR)
1609 return (EIO);
1610 #else /* SD_DUMP_NOT_TRUSTED */
1611 /* Let's just talk about this first... */
1612 printf("sd%d: dump addr 0x%x, blk %d\n", unit, va, blkno);
1613 delay(500 * 1000); /* half a second */
1614 #endif /* SD_DUMP_NOT_TRUSTED */
1615
1616 /* update block count */
1617 totwrt -= nwrt;
1618 blkno += nwrt;
1619 va = (char *)va + sectorsize * nwrt;
1620 }
1621 sddoingadump = 0;
1622 return (0);
1623 }
1624
1625 static int
1626 sd_mode_sense(struct sd_softc *sd, u_int8_t byte2, void *sense, size_t size,
1627 int page, int flags, int *big)
1628 {
1629
1630 if ((sd->sc_periph->periph_quirks & PQUIRK_ONLYBIG) &&
1631 !(sd->sc_periph->periph_quirks & PQUIRK_NOBIGMODESENSE)) {
1632 *big = 1;
1633 return scsipi_mode_sense_big(sd->sc_periph, byte2, page, sense,
1634 size + sizeof(struct scsi_mode_parameter_header_10),
1635 flags | XS_CTL_DATA_ONSTACK, SDRETRIES, 6000);
1636 } else {
1637 *big = 0;
1638 return scsipi_mode_sense(sd->sc_periph, byte2, page, sense,
1639 size + sizeof(struct scsi_mode_parameter_header_6),
1640 flags | XS_CTL_DATA_ONSTACK, SDRETRIES, 6000);
1641 }
1642 }
1643
1644 static int
1645 sd_mode_select(struct sd_softc *sd, u_int8_t byte2, void *sense, size_t size,
1646 int flags, int big)
1647 {
1648
1649 if (big) {
1650 struct scsi_mode_parameter_header_10 *header = sense;
1651
1652 _lto2b(0, header->data_length);
1653 return scsipi_mode_select_big(sd->sc_periph, byte2, sense,
1654 size + sizeof(struct scsi_mode_parameter_header_10),
1655 flags | XS_CTL_DATA_ONSTACK, SDRETRIES, 6000);
1656 } else {
1657 struct scsi_mode_parameter_header_6 *header = sense;
1658
1659 header->data_length = 0;
1660 return scsipi_mode_select(sd->sc_periph, byte2, sense,
1661 size + sizeof(struct scsi_mode_parameter_header_6),
1662 flags | XS_CTL_DATA_ONSTACK, SDRETRIES, 6000);
1663 }
1664 }
1665
1666 /*
1667 * sd_validate_blksize:
1668 *
1669 * Validate the block size. Print error if periph is specified,
1670 */
1671 static int
1672 sd_validate_blksize(struct scsipi_periph *periph, int len)
1673 {
1674
1675 switch (len) {
1676 case 256:
1677 case 512:
1678 case 1024:
1679 case 2048:
1680 case 4096:
1681 return 1;
1682 }
1683
1684 if (periph) {
1685 scsipi_printaddr(periph);
1686 printf("%s sector size: 0x%x. Defaulting to %d bytes.\n",
1687 (len ^ (1 << (ffs(len) - 1))) ?
1688 "preposterous" : "unsupported",
1689 len, SD_DEFAULT_BLKSIZE);
1690 }
1691
1692 return 0;
1693 }
1694
1695 /*
1696 * sd_read_capacity:
1697 *
1698 * Find out from the device what its capacity is.
1699 */
1700 static u_int64_t
1701 sd_read_capacity(struct scsipi_periph *periph, int *blksize, int flags)
1702 {
1703 union {
1704 struct scsipi_read_capacity_10 cmd;
1705 struct scsipi_read_capacity_16 cmd16;
1706 } cmd;
1707 union {
1708 struct scsipi_read_capacity_10_data data;
1709 struct scsipi_read_capacity_16_data data16;
1710 } *datap;
1711 uint64_t rv;
1712
1713 memset(&cmd, 0, sizeof(cmd));
1714 cmd.cmd.opcode = READ_CAPACITY_10;
1715
1716 /*
1717 * Don't allocate data buffer on stack;
1718 * The lower driver layer might use the same stack and
1719 * if it uses region which is in the same cacheline,
1720 * cache flush ops against the data buffer won't work properly.
1721 */
1722 datap = malloc(sizeof(*datap), M_TEMP, M_WAITOK);
1723 if (datap == NULL)
1724 return 0;
1725
1726 /*
1727 * If the command works, interpret the result as a 4 byte
1728 * number of blocks
1729 */
1730 rv = 0;
1731 memset(datap, 0, sizeof(datap->data));
1732 if (scsipi_command(periph, (void *)&cmd.cmd, sizeof(cmd.cmd),
1733 (void *)datap, sizeof(datap->data), SCSIPIRETRIES, 20000, NULL,
1734 flags | XS_CTL_DATA_IN | XS_CTL_SILENT) != 0)
1735 goto out;
1736
1737 if (_4btol(datap->data.addr) != 0xffffffff) {
1738 *blksize = _4btol(datap->data.length);
1739 rv = _4btol(datap->data.addr) + 1;
1740 goto out;
1741 }
1742
1743 /*
1744 * Device is larger than can be reflected by READ CAPACITY (10).
1745 * Try READ CAPACITY (16).
1746 */
1747
1748 memset(&cmd, 0, sizeof(cmd));
1749 cmd.cmd16.opcode = READ_CAPACITY_16;
1750 cmd.cmd16.byte2 = SRC16_SERVICE_ACTION;
1751 _lto4b(sizeof(datap->data16), cmd.cmd16.len);
1752
1753 memset(datap, 0, sizeof(datap->data16));
1754 if (scsipi_command(periph, (void *)&cmd.cmd16, sizeof(cmd.cmd16),
1755 (void *)datap, sizeof(datap->data16), SCSIPIRETRIES, 20000, NULL,
1756 flags | XS_CTL_DATA_IN | XS_CTL_SILENT) != 0)
1757 goto out;
1758
1759 *blksize = _4btol(datap->data16.length);
1760 rv = _8btol(datap->data16.addr) + 1;
1761
1762 out:
1763 free(datap, M_TEMP);
1764 return rv;
1765 }
1766
1767 static int
1768 sd_get_simplifiedparms(struct sd_softc *sd, struct disk_parms *dp, int flags)
1769 {
1770 struct {
1771 struct scsi_mode_parameter_header_6 header;
1772 /* no block descriptor */
1773 u_int8_t pg_code; /* page code (should be 6) */
1774 u_int8_t pg_length; /* page length (should be 11) */
1775 u_int8_t wcd; /* bit0: cache disable */
1776 u_int8_t lbs[2]; /* logical block size */
1777 u_int8_t size[5]; /* number of log. blocks */
1778 u_int8_t pp; /* power/performance */
1779 u_int8_t flags;
1780 u_int8_t resvd;
1781 } scsipi_sense;
1782 u_int64_t blocks;
1783 int error, blksize;
1784
1785 /*
1786 * sd_read_capacity (ie "read capacity") and mode sense page 6
1787 * give the same information. Do both for now, and check
1788 * for consistency.
1789 * XXX probably differs for removable media
1790 */
1791 dp->blksize = SD_DEFAULT_BLKSIZE;
1792 if ((blocks = sd_read_capacity(sd->sc_periph, &blksize, flags)) == 0)
1793 return (SDGP_RESULT_OFFLINE); /* XXX? */
1794
1795 error = scsipi_mode_sense(sd->sc_periph, SMS_DBD, 6,
1796 &scsipi_sense.header, sizeof(scsipi_sense),
1797 flags | XS_CTL_DATA_ONSTACK, SDRETRIES, 6000);
1798
1799 if (error != 0)
1800 return (SDGP_RESULT_OFFLINE); /* XXX? */
1801
1802 dp->blksize = blksize;
1803 if (!sd_validate_blksize(NULL, dp->blksize))
1804 dp->blksize = _2btol(scsipi_sense.lbs);
1805 if (!sd_validate_blksize(sd->sc_periph, dp->blksize))
1806 dp->blksize = SD_DEFAULT_BLKSIZE;
1807
1808 /*
1809 * Create a pseudo-geometry.
1810 */
1811 dp->heads = 64;
1812 dp->sectors = 32;
1813 dp->cyls = blocks / (dp->heads * dp->sectors);
1814 dp->disksize = _5btol(scsipi_sense.size);
1815 if (dp->disksize <= UINT32_MAX && dp->disksize != blocks) {
1816 printf("RBC size: mode sense=%llu, get cap=%llu\n",
1817 (unsigned long long)dp->disksize,
1818 (unsigned long long)blocks);
1819 dp->disksize = blocks;
1820 }
1821 dp->disksize512 = (dp->disksize * dp->blksize) / DEV_BSIZE;
1822
1823 return (SDGP_RESULT_OK);
1824 }
1825
1826 /*
1827 * Get the scsi driver to send a full inquiry to the * device and use the
1828 * results to fill out the disk parameter structure.
1829 */
1830 static int
1831 sd_get_capacity(struct sd_softc *sd, struct disk_parms *dp, int flags)
1832 {
1833 u_int64_t blocks;
1834 int error, blksize;
1835 #if 0
1836 int i;
1837 u_int8_t *p;
1838 #endif
1839
1840 dp->disksize = blocks = sd_read_capacity(sd->sc_periph, &blksize,
1841 flags);
1842 if (blocks == 0) {
1843 struct scsipi_read_format_capacities cmd;
1844 struct {
1845 struct scsipi_capacity_list_header header;
1846 struct scsipi_capacity_descriptor desc;
1847 } __packed data;
1848
1849 memset(&cmd, 0, sizeof(cmd));
1850 memset(&data, 0, sizeof(data));
1851 cmd.opcode = READ_FORMAT_CAPACITIES;
1852 _lto2b(sizeof(data), cmd.length);
1853
1854 error = scsipi_command(sd->sc_periph,
1855 (void *)&cmd, sizeof(cmd), (void *)&data, sizeof(data),
1856 SDRETRIES, 20000, NULL,
1857 flags | XS_CTL_DATA_IN | XS_CTL_DATA_ONSTACK);
1858 if (error == EFTYPE) {
1859 /* Medium Format Corrupted, handle as not formatted */
1860 return (SDGP_RESULT_UNFORMATTED);
1861 }
1862 if (error || data.header.length == 0)
1863 return (SDGP_RESULT_OFFLINE);
1864
1865 #if 0
1866 printf("rfc: length=%d\n", data.header.length);
1867 printf("rfc result:"); for (i = sizeof(struct scsipi_capacity_list_header) + data.header.length, p = (void *)&data; i; i--, p++) printf(" %02x", *p); printf("\n");
1868 #endif
1869 switch (data.desc.byte5 & SCSIPI_CAP_DESC_CODE_MASK) {
1870 case SCSIPI_CAP_DESC_CODE_RESERVED:
1871 case SCSIPI_CAP_DESC_CODE_FORMATTED:
1872 break;
1873
1874 case SCSIPI_CAP_DESC_CODE_UNFORMATTED:
1875 return (SDGP_RESULT_UNFORMATTED);
1876
1877 case SCSIPI_CAP_DESC_CODE_NONE:
1878 return (SDGP_RESULT_OFFLINE);
1879 }
1880
1881 dp->disksize = blocks = _4btol(data.desc.nblks);
1882 if (blocks == 0)
1883 return (SDGP_RESULT_OFFLINE); /* XXX? */
1884
1885 blksize = _3btol(data.desc.blklen);
1886
1887 } else if (!sd_validate_blksize(NULL, blksize)) {
1888 struct sd_mode_sense_data scsipi_sense;
1889 int big, bsize;
1890 struct scsi_general_block_descriptor *bdesc;
1891
1892 memset(&scsipi_sense, 0, sizeof(scsipi_sense));
1893 error = sd_mode_sense(sd, 0, &scsipi_sense,
1894 sizeof(scsipi_sense.blk_desc), 0, flags | XS_CTL_SILENT, &big);
1895 if (!error) {
1896 if (big) {
1897 bdesc = (void *)(&scsipi_sense.header.big + 1);
1898 bsize = _2btol(scsipi_sense.header.big.blk_desc_len);
1899 } else {
1900 bdesc = (void *)(&scsipi_sense.header.small + 1);
1901 bsize = scsipi_sense.header.small.blk_desc_len;
1902 }
1903
1904 #if 0
1905 printf("page 0 sense:"); for (i = sizeof(scsipi_sense), p = (void *)&scsipi_sense; i; i--, p++) printf(" %02x", *p); printf("\n");
1906 printf("page 0 bsize=%d\n", bsize);
1907 printf("page 0 ok\n");
1908 #endif
1909
1910 if (bsize >= 8) {
1911 blksize = _3btol(bdesc->blklen);
1912 }
1913 }
1914 }
1915
1916 if (!sd_validate_blksize(sd->sc_periph, blksize))
1917 blksize = SD_DEFAULT_BLKSIZE;
1918
1919 dp->blksize = blksize;
1920 dp->disksize512 = (blocks * dp->blksize) / DEV_BSIZE;
1921 return (0);
1922 }
1923
1924 static int
1925 sd_get_parms_page4(struct sd_softc *sd, struct disk_parms *dp, int flags)
1926 {
1927 struct sd_mode_sense_data scsipi_sense;
1928 int error;
1929 int big, byte2;
1930 size_t poffset;
1931 union scsi_disk_pages *pages;
1932
1933 byte2 = SMS_DBD;
1934 again:
1935 memset(&scsipi_sense, 0, sizeof(scsipi_sense));
1936 error = sd_mode_sense(sd, byte2, &scsipi_sense,
1937 (byte2 ? 0 : sizeof(scsipi_sense.blk_desc)) +
1938 sizeof(scsipi_sense.pages.rigid_geometry), 4,
1939 flags | XS_CTL_SILENT, &big);
1940 if (error) {
1941 if (byte2 == SMS_DBD) {
1942 /* No result; try once more with DBD off */
1943 byte2 = 0;
1944 goto again;
1945 }
1946 return (error);
1947 }
1948
1949 if (big) {
1950 poffset = sizeof scsipi_sense.header.big;
1951 poffset += _2btol(scsipi_sense.header.big.blk_desc_len);
1952 } else {
1953 poffset = sizeof scsipi_sense.header.small;
1954 poffset += scsipi_sense.header.small.blk_desc_len;
1955 }
1956
1957 if (poffset > sizeof(scsipi_sense) - sizeof(pages->rigid_geometry))
1958 return ERESTART;
1959
1960 pages = (void *)((u_long)&scsipi_sense + poffset);
1961 #if 0
1962 {
1963 size_t i;
1964 u_int8_t *p;
1965
1966 printf("page 4 sense:");
1967 for (i = sizeof(scsipi_sense), p = (void *)&scsipi_sense; i;
1968 i--, p++)
1969 printf(" %02x", *p);
1970 printf("\n");
1971 printf("page 4 pg_code=%d sense=%p/%p\n",
1972 pages->rigid_geometry.pg_code, &scsipi_sense, pages);
1973 }
1974 #endif
1975
1976 if ((pages->rigid_geometry.pg_code & PGCODE_MASK) != 4)
1977 return (ERESTART);
1978
1979 SC_DEBUG(sd->sc_periph, SCSIPI_DB3,
1980 ("%d cyls, %d heads, %d precomp, %d red_write, %d land_zone\n",
1981 _3btol(pages->rigid_geometry.ncyl),
1982 pages->rigid_geometry.nheads,
1983 _2btol(pages->rigid_geometry.st_cyl_wp),
1984 _2btol(pages->rigid_geometry.st_cyl_rwc),
1985 _2btol(pages->rigid_geometry.land_zone)));
1986
1987 /*
1988 * KLUDGE!! (for zone recorded disks)
1989 * give a number of sectors so that sec * trks * cyls
1990 * is <= disk_size
1991 * can lead to wasted space! THINK ABOUT THIS !
1992 */
1993 dp->heads = pages->rigid_geometry.nheads;
1994 dp->cyls = _3btol(pages->rigid_geometry.ncyl);
1995 if (dp->heads == 0 || dp->cyls == 0)
1996 return (ERESTART);
1997 dp->sectors = dp->disksize / (dp->heads * dp->cyls); /* XXX */
1998
1999 dp->rot_rate = _2btol(pages->rigid_geometry.rpm);
2000 if (dp->rot_rate == 0)
2001 dp->rot_rate = 3600;
2002
2003 #if 0
2004 printf("page 4 ok\n");
2005 #endif
2006 return (0);
2007 }
2008
2009 static int
2010 sd_get_parms_page5(struct sd_softc *sd, struct disk_parms *dp, int flags)
2011 {
2012 struct sd_mode_sense_data scsipi_sense;
2013 int error;
2014 int big, byte2;
2015 size_t poffset;
2016 union scsi_disk_pages *pages;
2017
2018 byte2 = SMS_DBD;
2019 again:
2020 memset(&scsipi_sense, 0, sizeof(scsipi_sense));
2021 error = sd_mode_sense(sd, 0, &scsipi_sense,
2022 (byte2 ? 0 : sizeof(scsipi_sense.blk_desc)) +
2023 sizeof(scsipi_sense.pages.flex_geometry), 5,
2024 flags | XS_CTL_SILENT, &big);
2025 if (error) {
2026 if (byte2 == SMS_DBD) {
2027 /* No result; try once more with DBD off */
2028 byte2 = 0;
2029 goto again;
2030 }
2031 return (error);
2032 }
2033
2034 if (big) {
2035 poffset = sizeof scsipi_sense.header.big;
2036 poffset += _2btol(scsipi_sense.header.big.blk_desc_len);
2037 } else {
2038 poffset = sizeof scsipi_sense.header.small;
2039 poffset += scsipi_sense.header.small.blk_desc_len;
2040 }
2041
2042 if (poffset > sizeof(scsipi_sense) - sizeof(pages->flex_geometry))
2043 return ERESTART;
2044
2045 pages = (void *)((u_long)&scsipi_sense + poffset);
2046 #if 0
2047 {
2048 size_t i;
2049 u_int8_t *p;
2050
2051 printf("page 5 sense:");
2052 for (i = sizeof(scsipi_sense), p = (void *)&scsipi_sense; i;
2053 i--, p++)
2054 printf(" %02x", *p);
2055 printf("\n");
2056 printf("page 5 pg_code=%d sense=%p/%p\n",
2057 pages->flex_geometry.pg_code, &scsipi_sense, pages);
2058 }
2059 #endif
2060
2061 if ((pages->flex_geometry.pg_code & PGCODE_MASK) != 5)
2062 return (ERESTART);
2063
2064 SC_DEBUG(sd->sc_periph, SCSIPI_DB3,
2065 ("%d cyls, %d heads, %d sec, %d bytes/sec\n",
2066 _3btol(pages->flex_geometry.ncyl),
2067 pages->flex_geometry.nheads,
2068 pages->flex_geometry.ph_sec_tr,
2069 _2btol(pages->flex_geometry.bytes_s)));
2070
2071 dp->heads = pages->flex_geometry.nheads;
2072 dp->cyls = _2btol(pages->flex_geometry.ncyl);
2073 dp->sectors = pages->flex_geometry.ph_sec_tr;
2074 if (dp->heads == 0 || dp->cyls == 0 || dp->sectors == 0)
2075 return (ERESTART);
2076
2077 dp->rot_rate = _2btol(pages->rigid_geometry.rpm);
2078 if (dp->rot_rate == 0)
2079 dp->rot_rate = 3600;
2080
2081 #if 0
2082 printf("page 5 ok\n");
2083 #endif
2084 return (0);
2085 }
2086
2087 static int
2088 sd_get_parms(struct sd_softc *sd, struct disk_parms *dp, int flags)
2089 {
2090 int error;
2091
2092 /*
2093 * If offline, the SDEV_MEDIA_LOADED flag will be
2094 * cleared by the caller if necessary.
2095 */
2096 if (sd->type == T_SIMPLE_DIRECT) {
2097 error = sd_get_simplifiedparms(sd, dp, flags);
2098 if (!error)
2099 disk_blocksize(&sd->sc_dk, dp->blksize);
2100 return (error);
2101 }
2102
2103 error = sd_get_capacity(sd, dp, flags);
2104 if (error)
2105 return (error);
2106
2107 disk_blocksize(&sd->sc_dk, dp->blksize);
2108
2109 if (sd->type == T_OPTICAL)
2110 goto page0;
2111
2112 if (sd->sc_periph->periph_flags & PERIPH_REMOVABLE) {
2113 if (!sd_get_parms_page5(sd, dp, flags) ||
2114 !sd_get_parms_page4(sd, dp, flags))
2115 return (SDGP_RESULT_OK);
2116 } else {
2117 if (!sd_get_parms_page4(sd, dp, flags) ||
2118 !sd_get_parms_page5(sd, dp, flags))
2119 return (SDGP_RESULT_OK);
2120 }
2121
2122 page0:
2123 printf("%s: fabricating a geometry\n", sd->sc_dev.dv_xname);
2124 /* Try calling driver's method for figuring out geometry. */
2125 if (!sd->sc_periph->periph_channel->chan_adapter->adapt_getgeom ||
2126 !(*sd->sc_periph->periph_channel->chan_adapter->adapt_getgeom)
2127 (sd->sc_periph, dp, dp->disksize)) {
2128 /*
2129 * Use adaptec standard fictitious geometry
2130 * this depends on which controller (e.g. 1542C is
2131 * different. but we have to put SOMETHING here..)
2132 */
2133 dp->heads = 64;
2134 dp->sectors = 32;
2135 dp->cyls = dp->disksize / (64 * 32);
2136 }
2137 dp->rot_rate = 3600;
2138 return (SDGP_RESULT_OK);
2139 }
2140
2141 static int
2142 sd_flush(struct sd_softc *sd, int flags)
2143 {
2144 struct scsipi_periph *periph = sd->sc_periph;
2145 struct scsi_synchronize_cache_10 cmd;
2146
2147 /*
2148 * If the device is SCSI-2, issue a SYNCHRONIZE CACHE.
2149 * We issue with address 0 length 0, which should be
2150 * interpreted by the device as "all remaining blocks
2151 * starting at address 0". We ignore ILLEGAL REQUEST
2152 * in the event that the command is not supported by
2153 * the device, and poll for completion so that we know
2154 * that the cache has actually been flushed.
2155 *
2156 * Unless, that is, the device can't handle the SYNCHRONIZE CACHE
2157 * command, as indicated by our quirks flags.
2158 *
2159 * XXX What about older devices?
2160 */
2161 if (periph->periph_version < 2 ||
2162 (periph->periph_quirks & PQUIRK_NOSYNCCACHE))
2163 return (0);
2164
2165 sd->flags |= SDF_FLUSHING;
2166 memset(&cmd, 0, sizeof(cmd));
2167 cmd.opcode = SCSI_SYNCHRONIZE_CACHE_10;
2168
2169 return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0,
2170 SDRETRIES, 100000, NULL, flags | XS_CTL_IGNORE_ILLEGAL_REQUEST));
2171 }
2172
2173 static int
2174 sd_getcache(struct sd_softc *sd, int *bitsp)
2175 {
2176 struct scsipi_periph *periph = sd->sc_periph;
2177 struct sd_mode_sense_data scsipi_sense;
2178 int error, bits = 0;
2179 int big;
2180 union scsi_disk_pages *pages;
2181
2182 if (periph->periph_version < 2)
2183 return (EOPNOTSUPP);
2184
2185 memset(&scsipi_sense, 0, sizeof(scsipi_sense));
2186 error = sd_mode_sense(sd, SMS_DBD, &scsipi_sense,
2187 sizeof(scsipi_sense.pages.caching_params), 8, 0, &big);
2188 if (error)
2189 return (error);
2190
2191 if (big)
2192 pages = (void *)(&scsipi_sense.header.big + 1);
2193 else
2194 pages = (void *)(&scsipi_sense.header.small + 1);
2195
2196 if ((pages->caching_params.flags & CACHING_RCD) == 0)
2197 bits |= DKCACHE_READ;
2198 if (pages->caching_params.flags & CACHING_WCE)
2199 bits |= DKCACHE_WRITE;
2200 if (pages->caching_params.pg_code & PGCODE_PS)
2201 bits |= DKCACHE_SAVE;
2202
2203 memset(&scsipi_sense, 0, sizeof(scsipi_sense));
2204 error = sd_mode_sense(sd, SMS_DBD, &scsipi_sense,
2205 sizeof(scsipi_sense.pages.caching_params),
2206 SMS_PCTRL_CHANGEABLE|8, 0, &big);
2207 if (error == 0) {
2208 if (big)
2209 pages = (void *)(&scsipi_sense.header.big + 1);
2210 else
2211 pages = (void *)(&scsipi_sense.header.small + 1);
2212
2213 if (pages->caching_params.flags & CACHING_RCD)
2214 bits |= DKCACHE_RCHANGE;
2215 if (pages->caching_params.flags & CACHING_WCE)
2216 bits |= DKCACHE_WCHANGE;
2217 }
2218
2219 *bitsp = bits;
2220
2221 return (0);
2222 }
2223
2224 static int
2225 sd_setcache(struct sd_softc *sd, int bits)
2226 {
2227 struct scsipi_periph *periph = sd->sc_periph;
2228 struct sd_mode_sense_data scsipi_sense;
2229 int error;
2230 uint8_t oflags, byte2 = 0;
2231 int big;
2232 union scsi_disk_pages *pages;
2233
2234 if (periph->periph_version < 2)
2235 return (EOPNOTSUPP);
2236
2237 memset(&scsipi_sense, 0, sizeof(scsipi_sense));
2238 error = sd_mode_sense(sd, SMS_DBD, &scsipi_sense,
2239 sizeof(scsipi_sense.pages.caching_params), 8, 0, &big);
2240 if (error)
2241 return (error);
2242
2243 if (big)
2244 pages = (void *)(&scsipi_sense.header.big + 1);
2245 else
2246 pages = (void *)(&scsipi_sense.header.small + 1);
2247
2248 oflags = pages->caching_params.flags;
2249
2250 if (bits & DKCACHE_READ)
2251 pages->caching_params.flags &= ~CACHING_RCD;
2252 else
2253 pages->caching_params.flags |= CACHING_RCD;
2254
2255 if (bits & DKCACHE_WRITE)
2256 pages->caching_params.flags |= CACHING_WCE;
2257 else
2258 pages->caching_params.flags &= ~CACHING_WCE;
2259
2260 if (oflags == pages->caching_params.flags)
2261 return (0);
2262
2263 pages->caching_params.pg_code &= PGCODE_MASK;
2264
2265 if (bits & DKCACHE_SAVE)
2266 byte2 |= SMS_SP;
2267
2268 return (sd_mode_select(sd, byte2|SMS_PF, &scsipi_sense,
2269 sizeof(struct scsi_mode_page_header) +
2270 pages->caching_params.pg_length, 0, big));
2271 }
2272
2273 static void
2274 sd_set_properties(struct sd_softc *sd)
2275 {
2276 prop_dictionary_t disk_info, odisk_info, geom;
2277
2278 disk_info = prop_dictionary_create();
2279
2280 geom = prop_dictionary_create();
2281
2282 prop_dictionary_set_uint64(geom, "sectors-per-unit",
2283 sd->params.disksize);
2284
2285 prop_dictionary_set_uint32(geom, "sector-size",
2286 sd->params.blksize);
2287
2288 prop_dictionary_set_uint16(geom, "sectors-per-track",
2289 sd->params.sectors);
2290
2291 prop_dictionary_set_uint16(geom, "tracks-per-cylinder",
2292 sd->params.heads);
2293
2294 prop_dictionary_set_uint64(geom, "cylinders-per-unit",
2295 sd->params.cyls);
2296
2297 prop_dictionary_set(disk_info, "geometry", geom);
2298 prop_object_release(geom);
2299
2300 prop_dictionary_set(device_properties(&sd->sc_dev),
2301 "disk-info", disk_info);
2302
2303 /*
2304 * Don't release disk_info here; we keep a reference to it.
2305 * disk_detach() will release it when we go away.
2306 */
2307
2308 odisk_info = sd->sc_dk.dk_info;
2309 sd->sc_dk.dk_info = disk_info;
2310 if (odisk_info)
2311 prop_object_release(odisk_info);
2312 }
2313