Home | History | Annotate | Line # | Download | only in scsipi
sd.c revision 1.151.2.2
      1 /*	$NetBSD: sd.c,v 1.151.2.2 1999/10/20 20:39:29 thorpej Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1998 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Charles M. Hannum.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. All advertising materials mentioning features or use of this software
     19  *    must display the following acknowledgement:
     20  *        This product includes software developed by the NetBSD
     21  *        Foundation, Inc. and its contributors.
     22  * 4. Neither the name of The NetBSD Foundation nor the names of its
     23  *    contributors may be used to endorse or promote products derived
     24  *    from this software without specific prior written permission.
     25  *
     26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     36  * POSSIBILITY OF SUCH DAMAGE.
     37  */
     38 
     39 /*
     40  * Originally written by Julian Elischer (julian (at) dialix.oz.au)
     41  * for TRW Financial Systems for use under the MACH(2.5) operating system.
     42  *
     43  * TRW Financial Systems, in accordance with their agreement with Carnegie
     44  * Mellon University, makes this software available to CMU to distribute
     45  * or use in any manner that they see fit as long as this message is kept with
     46  * the software. For this reason TFS also grants any other persons or
     47  * organisations permission to use or modify this software.
     48  *
     49  * TFS supplies this software to be publicly redistributed
     50  * on the understanding that TFS is not responsible for the correct
     51  * functioning of this software in any circumstances.
     52  *
     53  * Ported to run under 386BSD by Julian Elischer (julian (at) dialix.oz.au) Sept 1992
     54  */
     55 
     56 #include "opt_scsi.h"
     57 #include "rnd.h"
     58 
     59 #include <sys/types.h>
     60 #include <sys/param.h>
     61 #include <sys/systm.h>
     62 #include <sys/kernel.h>
     63 #include <sys/file.h>
     64 #include <sys/stat.h>
     65 #include <sys/ioctl.h>
     66 #include <sys/scsiio.h>
     67 #include <sys/buf.h>
     68 #include <sys/uio.h>
     69 #include <sys/malloc.h>
     70 #include <sys/errno.h>
     71 #include <sys/device.h>
     72 #include <sys/disklabel.h>
     73 #include <sys/disk.h>
     74 #include <sys/proc.h>
     75 #include <sys/conf.h>
     76 #include <sys/vnode.h>
     77 #if NRND > 0
     78 #include <sys/rnd.h>
     79 #endif
     80 
     81 #include <dev/scsipi/scsipi_all.h>
     82 #include <dev/scsipi/scsi_all.h>
     83 #include <dev/scsipi/scsipi_disk.h>
     84 #include <dev/scsipi/scsi_disk.h>
     85 #include <dev/scsipi/scsiconf.h>
     86 #include <dev/scsipi/sdvar.h>
     87 
     88 #include "sd.h"		/* NSD_SCSIBUS and NSD_ATAPIBUS come from here */
     89 
     90 #define	SDUNIT(dev)			DISKUNIT(dev)
     91 #define	SDPART(dev)			DISKPART(dev)
     92 #define	SDMINOR(unit, part)		DISKMINOR(unit, part)
     93 #define	MAKESDDEV(maj, unit, part)	MAKEDISKDEV(maj, unit, part)
     94 
     95 #define	SDLABELDEV(dev)	(MAKESDDEV(major(dev), SDUNIT(dev), RAW_PART))
     96 
     97 int	sdlock __P((struct sd_softc *));
     98 void	sdunlock __P((struct sd_softc *));
     99 void	sdminphys __P((struct buf *));
    100 void	sdgetdefaultlabel __P((struct sd_softc *, struct disklabel *));
    101 void	sdgetdisklabel __P((struct sd_softc *));
    102 void	sdstart __P((struct scsipi_periph *));
    103 void	sddone __P((struct scsipi_xfer *));
    104 void	sd_shutdown __P((void *));
    105 int	sd_reassign_blocks __P((struct sd_softc *, u_long));
    106 int	sd_interpret_sense __P((struct scsipi_xfer *));
    107 
    108 extern struct cfdriver sd_cd;
    109 
    110 struct dkdriver sddkdriver = { sdstrategy };
    111 
    112 const struct scsipi_periphsw sd_switch = {
    113 	sd_interpret_sense,	/* check our error handler first */
    114 	sdstart,		/* have a queue, served by this */
    115 	NULL,			/* have no async handler */
    116 	sddone,			/* deal with stats at interrupt time */
    117 };
    118 
    119 /*
    120  * The routine called by the low level scsi routine when it discovers
    121  * a device suitable for this driver.
    122  */
    123 void
    124 sdattach(parent, sd, periph, ops)
    125 	struct device *parent;
    126 	struct sd_softc *sd;
    127 	struct scsipi_periph *periph;
    128 	const struct sd_ops *ops;
    129 {
    130 	int error, result;
    131 	struct disk_parms *dp = &sd->params;
    132 	char pbuf[9];
    133 
    134 	SC_DEBUG(sc_link, SDEV_DB2, ("sdattach: "));
    135 
    136 	/*
    137 	 * Store information needed to contact our base driver
    138 	 */
    139 	sd->sc_periph = periph;
    140 	sd->sc_ops = ops;
    141 
    142 	periph->periph_dev = &sd->sc_dev;
    143 	periph->periph_switch = &sd_switch;
    144 
    145         /*
    146          * Increase our openings to the maximum-per-periph
    147          * supported by the adapter.  This will either be
    148          * clamped down or grown by the adapter if necessary.
    149          */
    150 	periph->periph_openings =
    151 	    SCSIPI_CHAN_MAX_PERIPH(periph->periph_channel);
    152 	periph->periph_flags |= PERIPH_GROW_OPENINGS;
    153 
    154 	/*
    155 	 * Initialize and attach the disk structure.
    156 	 */
    157 	sd->sc_dk.dk_driver = &sddkdriver;
    158 	sd->sc_dk.dk_name = sd->sc_dev.dv_xname;
    159 	disk_attach(&sd->sc_dk);
    160 
    161 #if !defined(i386) && !defined(vax)
    162 	dk_establish(&sd->sc_dk, &sd->sc_dev);		/* XXX */
    163 #endif
    164 
    165 	/*
    166 	 * Use the subdriver to request information regarding
    167 	 * the drive. We cannot use interrupts yet, so the
    168 	 * request must specify this.
    169 	 */
    170 	printf("\n");
    171 
    172 	error = scsipi_start(periph, SSS_START,
    173 	    XS_CTL_DISCOVERY | XS_CTL_IGNORE_ILLEGAL_REQUEST |
    174 	    XS_CTL_IGNORE_MEDIA_CHANGE | XS_CTL_SILENT);
    175 
    176 	if (error)
    177 		result = SDGP_RESULT_OFFLINE;
    178 	else
    179 		result = (*sd->sc_ops->sdo_get_parms)(sd, &sd->params,
    180 		    XS_CTL_DISCOVERY);
    181 	printf("%s: ", sd->sc_dev.dv_xname);
    182 	switch (result) {
    183 	case SDGP_RESULT_OK:
    184 		format_bytes(pbuf, sizeof(pbuf),
    185 		    (u_int64_t)dp->disksize * dp->blksize);
    186 	        printf(
    187 		"%s, %ld cyl, %ld head, %ld sec, %ld bytes/sect x %ld sectors",
    188 		    pbuf, dp->cyls, dp->heads, dp->sectors, dp->blksize,
    189 		    dp->disksize);
    190 		break;
    191 
    192 	case SDGP_RESULT_OFFLINE:
    193 		printf("drive offline");
    194 		break;
    195 
    196 	case SDGP_RESULT_UNFORMATTED:
    197 		printf("unformatted media");
    198 		break;
    199 
    200 #ifdef DIAGNOSTIC
    201 	default:
    202 		panic("sdattach: unknown result from get_parms");
    203 		break;
    204 #endif
    205 	}
    206 	printf("\n");
    207 
    208 	/*
    209 	 * Establish a shutdown hook so that we can ensure that
    210 	 * our data has actually made it onto the platter at
    211 	 * shutdown time.  Note that this relies on the fact
    212 	 * that the shutdown hook code puts us at the head of
    213 	 * the list (thus guaranteeing that our hook runs before
    214 	 * our ancestors').
    215 	 */
    216 	if ((sd->sc_sdhook =
    217 	    shutdownhook_establish(sd_shutdown, sd)) == NULL)
    218 		printf("%s: WARNING: unable to establish shutdown hook\n",
    219 		    sd->sc_dev.dv_xname);
    220 
    221 #if NRND > 0
    222 	/*
    223 	 * attach the device into the random source list
    224 	 */
    225 	rnd_attach_source(&sd->rnd_source, sd->sc_dev.dv_xname,
    226 			  RND_TYPE_DISK, 0);
    227 #endif
    228 }
    229 
    230 int
    231 sdactivate(self, act)
    232 	struct device *self;
    233 	enum devact act;
    234 {
    235 	int rv = 0;
    236 
    237 	switch (act) {
    238 	case DVACT_ACTIVATE:
    239 		rv = EOPNOTSUPP;
    240 		break;
    241 
    242 	case DVACT_DEACTIVATE:
    243 		/*
    244 		 * Nothing to do; we key off the device's DVF_ACTIVE.
    245 		 */
    246 		break;
    247 	}
    248 	return (rv);
    249 }
    250 
    251 int
    252 sddetach(self, flags)
    253 	struct device *self;
    254 	int flags;
    255 {
    256 	struct sd_softc *sd = (struct sd_softc *) self;
    257 	struct buf *bp;
    258 	int s, bmaj, cmaj, mn;
    259 
    260 	/* locate the major number */
    261 	for (bmaj = 0; bmaj <= nblkdev; bmaj++)
    262 		if (bdevsw[bmaj].d_open == sdopen)
    263 			break;
    264 	for (cmaj = 0; cmaj <= nchrdev; cmaj++)
    265 		if (cdevsw[cmaj].d_open == sdopen)
    266 			break;
    267 
    268 	s = splbio();
    269 
    270 	/* Kill off any queued buffers. */
    271 	while ((bp = sd->buf_queue.b_actf) != NULL) {
    272 		sd->buf_queue.b_actf = bp->b_actf;
    273 		bp->b_error = EIO;
    274 		bp->b_flags |= B_ERROR;
    275 		bp->b_resid = bp->b_bcount;
    276 		biodone(bp);
    277 	}
    278 
    279 	/* Kill off any pending commands. */
    280 	scsipi_kill_pending(sd->sc_periph);
    281 
    282 	splx(s);
    283 
    284 	/* Nuke the the vnodes for any open instances */
    285 	mn = SDMINOR(self->dv_unit, 0);
    286 	vdevgone(bmaj, mn, mn + (MAXPARTITIONS - 1), VBLK);
    287 	vdevgone(cmaj, mn, mn + (MAXPARTITIONS - 1), VCHR);
    288 
    289 	/* Detach from the disk list. */
    290 	disk_detach(&sd->sc_dk);
    291 
    292 	/* Get rid of the shutdown hook. */
    293 	shutdownhook_disestablish(sd->sc_sdhook);
    294 
    295 #if NRND > 0
    296 	/* Unhook the entropy source. */
    297 	rnd_detach_source(&sd->rnd_source);
    298 #endif
    299 
    300 	return (0);
    301 }
    302 
    303 /*
    304  * Wait interruptibly for an exclusive lock.
    305  *
    306  * XXX
    307  * Several drivers do this; it should be abstracted and made MP-safe.
    308  */
    309 int
    310 sdlock(sd)
    311 	struct sd_softc *sd;
    312 {
    313 	int error;
    314 
    315 	while ((sd->flags & SDF_LOCKED) != 0) {
    316 		sd->flags |= SDF_WANTED;
    317 		if ((error = tsleep(sd, PRIBIO | PCATCH, "sdlck", 0)) != 0)
    318 			return (error);
    319 	}
    320 	sd->flags |= SDF_LOCKED;
    321 	return (0);
    322 }
    323 
    324 /*
    325  * Unlock and wake up any waiters.
    326  */
    327 void
    328 sdunlock(sd)
    329 	struct sd_softc *sd;
    330 {
    331 
    332 	sd->flags &= ~SDF_LOCKED;
    333 	if ((sd->flags & SDF_WANTED) != 0) {
    334 		sd->flags &= ~SDF_WANTED;
    335 		wakeup(sd);
    336 	}
    337 }
    338 
    339 /*
    340  * open the device. Make sure the partition info is a up-to-date as can be.
    341  */
    342 int
    343 sdopen(dev, flag, fmt, p)
    344 	dev_t dev;
    345 	int flag, fmt;
    346 	struct proc *p;
    347 {
    348 	struct sd_softc *sd;
    349 	struct scsipi_periph *periph;
    350 	struct scsipi_adapter *adapt;
    351 	int unit, part;
    352 	int error;
    353 
    354 	unit = SDUNIT(dev);
    355 	if (unit >= sd_cd.cd_ndevs)
    356 		return (ENXIO);
    357 	sd = sd_cd.cd_devs[unit];
    358 	if (sd == NULL)
    359 		return (ENXIO);
    360 
    361 	if ((sd->sc_dev.dv_flags & DVF_ACTIVE) == 0)
    362 		return (ENODEV);
    363 
    364 	periph = sd->sc_periph;
    365 	adapt = periph->periph_channel->chan_adapter;
    366 	part = SDPART(dev);
    367 
    368 	SC_DEBUG(sc_link, SDEV_DB1,
    369 	    ("sdopen: dev=0x%x (unit %d (of %d), partition %d)\n", dev, unit,
    370 	    sd_cd.cd_ndevs, part));
    371 
    372 	/*
    373 	 * If this is the first open of this device, add a reference
    374 	 * to the adapter.
    375 	 */
    376 	if (sd->sc_dk.dk_openmask == 0 &&
    377 	    (error = scsipi_adapter_addref(adapt)) != 0)
    378 		return (error);
    379 
    380 	if ((error = sdlock(sd)) != 0)
    381 		goto bad4;
    382 
    383 	if ((periph->periph_flags & PERIPH_OPEN) != 0) {
    384 		/*
    385 		 * If any partition is open, but the disk has been invalidated,
    386 		 * disallow further opens of non-raw partition
    387 		 */
    388 		if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0 &&
    389 		    (part != RAW_PART || fmt != S_IFCHR)) {
    390 			error = EIO;
    391 			goto bad3;
    392 		}
    393 	} else {
    394 		/* Check that it is still responding and ok. */
    395 		error = scsipi_test_unit_ready(periph,
    396 		    XS_CTL_IGNORE_ILLEGAL_REQUEST | XS_CTL_IGNORE_MEDIA_CHANGE |
    397 		    XS_CTL_IGNORE_NOT_READY);
    398 		if (error)
    399 			goto bad3;
    400 
    401 		/*
    402 		 * Start the pack spinning if necessary. Always allow the
    403 		 * raw parition to be opened, for raw IOCTLs. Data transfers
    404 		 * will check for SDEV_MEDIA_LOADED.
    405 		 */
    406 		error = scsipi_start(periph, SSS_START,
    407 		    XS_CTL_IGNORE_ILLEGAL_REQUEST |
    408 		    XS_CTL_IGNORE_MEDIA_CHANGE | XS_CTL_SILENT);
    409 		if (error) {
    410 			if (part != RAW_PART || fmt != S_IFCHR)
    411 				goto bad3;
    412 			else
    413 				goto out;
    414 		}
    415 
    416 		periph->periph_flags |= PERIPH_OPEN;
    417 
    418 		/* Lock the pack in. */
    419 		error = scsipi_prevent(periph, PR_PREVENT,
    420 		    XS_CTL_IGNORE_ILLEGAL_REQUEST | XS_CTL_IGNORE_MEDIA_CHANGE);
    421 		if (error)
    422 			goto bad;
    423 
    424 		if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0) {
    425 			periph->periph_flags |= PERIPH_MEDIA_LOADED;
    426 
    427 			/*
    428 			 * Load the physical device parameters.
    429 			 *
    430 			 * Note that if media is present but unformatted,
    431 			 * we allow the open (so that it can be formatted!).
    432 			 * The drive should refuse real I/O, if the media is
    433 			 * unformatted.
    434 			 */
    435 			if ((*sd->sc_ops->sdo_get_parms)(sd, &sd->params,
    436 			    0) == SDGP_RESULT_OFFLINE) {
    437 				error = ENXIO;
    438 				goto bad2;
    439 			}
    440 			SC_DEBUG(sc_link, SDEV_DB3, ("Params loaded "));
    441 
    442 			/* Load the partition info if not already loaded. */
    443 			sdgetdisklabel(sd);
    444 			SC_DEBUG(sc_link, SDEV_DB3, ("Disklabel loaded "));
    445 		}
    446 	}
    447 
    448 	/* Check that the partition exists. */
    449 	if (part != RAW_PART &&
    450 	    (part >= sd->sc_dk.dk_label->d_npartitions ||
    451 	     sd->sc_dk.dk_label->d_partitions[part].p_fstype == FS_UNUSED)) {
    452 		error = ENXIO;
    453 		goto bad;
    454 	}
    455 
    456 out:	/* Insure only one open at a time. */
    457 	switch (fmt) {
    458 	case S_IFCHR:
    459 		sd->sc_dk.dk_copenmask |= (1 << part);
    460 		break;
    461 	case S_IFBLK:
    462 		sd->sc_dk.dk_bopenmask |= (1 << part);
    463 		break;
    464 	}
    465 	sd->sc_dk.dk_openmask =
    466 	    sd->sc_dk.dk_copenmask | sd->sc_dk.dk_bopenmask;
    467 
    468 	SC_DEBUG(sc_link, SDEV_DB3, ("open complete\n"));
    469 	sdunlock(sd);
    470 	return (0);
    471 
    472 bad2:
    473 	periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
    474 
    475 bad:
    476 	if (sd->sc_dk.dk_openmask == 0) {
    477 		scsipi_prevent(periph, PR_ALLOW,
    478 		    XS_CTL_IGNORE_ILLEGAL_REQUEST | XS_CTL_IGNORE_MEDIA_CHANGE);
    479 		periph->periph_flags &= ~PERIPH_OPEN;
    480 	}
    481 
    482 bad3:
    483 	sdunlock(sd);
    484 bad4:
    485 	if (sd->sc_dk.dk_openmask == 0)
    486 		scsipi_adapter_delref(adapt);
    487 	return (error);
    488 }
    489 
    490 /*
    491  * close the device.. only called if we are the LAST occurence of an open
    492  * device.  Convenient now but usually a pain.
    493  */
    494 int
    495 sdclose(dev, flag, fmt, p)
    496 	dev_t dev;
    497 	int flag, fmt;
    498 	struct proc *p;
    499 {
    500 	struct sd_softc *sd = sd_cd.cd_devs[SDUNIT(dev)];
    501 	struct scsipi_periph *periph = sd->sc_periph;
    502 	struct scsipi_adapter *adapt = periph->periph_channel->chan_adapter;
    503 	int part = SDPART(dev);
    504 	int error;
    505 
    506 	if ((error = sdlock(sd)) != 0)
    507 		return (error);
    508 
    509 	switch (fmt) {
    510 	case S_IFCHR:
    511 		sd->sc_dk.dk_copenmask &= ~(1 << part);
    512 		break;
    513 	case S_IFBLK:
    514 		sd->sc_dk.dk_bopenmask &= ~(1 << part);
    515 		break;
    516 	}
    517 	sd->sc_dk.dk_openmask =
    518 	    sd->sc_dk.dk_copenmask | sd->sc_dk.dk_bopenmask;
    519 
    520 	if (sd->sc_dk.dk_openmask == 0) {
    521 		/*
    522 		 * If the disk cache needs flushing, and the disk supports
    523 		 * it, do it now.
    524 		 */
    525 		if ((sd->flags & SDF_DIRTY) != 0 &&
    526 		    sd->sc_ops->sdo_flush != NULL) {
    527 			if ((*sd->sc_ops->sdo_flush)(sd, 0)) {
    528 				printf("%s: cache synchronization failed\n",
    529 				    sd->sc_dev.dv_xname);
    530 				sd->flags &= ~SDF_FLUSHING;
    531 			} else
    532 				sd->flags &= ~(SDF_FLUSHING|SDF_DIRTY);
    533 		}
    534 
    535 		scsipi_wait_drain(periph);
    536 
    537 		scsipi_prevent(periph, PR_ALLOW,
    538 		    XS_CTL_IGNORE_ILLEGAL_REQUEST | XS_CTL_IGNORE_NOT_READY);
    539 		periph->periph_flags &= ~PERIPH_OPEN;
    540 
    541 		scsipi_wait_drain(periph);
    542 
    543 		scsipi_adapter_delref(adapt);
    544 	}
    545 
    546 	sdunlock(sd);
    547 	return (0);
    548 }
    549 
    550 /*
    551  * Actually translate the requested transfer into one the physical driver
    552  * can understand.  The transfer is described by a buf and will include
    553  * only one physical transfer.
    554  */
    555 void
    556 sdstrategy(bp)
    557 	struct buf *bp;
    558 {
    559 	struct sd_softc *sd = sd_cd.cd_devs[SDUNIT(bp->b_dev)];
    560 	struct scsipi_periph *periph = sd->sc_periph;
    561 	int s;
    562 
    563 	SC_DEBUG(sd->sc_link, SDEV_DB2, ("sdstrategy "));
    564 	SC_DEBUG(sd->sc_link, SDEV_DB1,
    565 	    ("%ld bytes @ blk %d\n", bp->b_bcount, bp->b_blkno));
    566 	/*
    567 	 * If the device has been made invalid, error out
    568 	 */
    569 	if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0 ||
    570 	    (sd->sc_dev.dv_flags & DVF_ACTIVE) == 0) {
    571 		if (periph->periph_flags & PERIPH_OPEN)
    572 			bp->b_error = EIO;
    573 		else
    574 			bp->b_error = ENODEV;
    575 		goto bad;
    576 	}
    577 	/*
    578 	 * The transfer must be a whole number of blocks, offset must not be
    579 	 * negative.
    580 	 */
    581 	if ((bp->b_bcount % sd->sc_dk.dk_label->d_secsize) != 0 ||
    582 	    bp->b_blkno < 0) {
    583 		bp->b_error = EINVAL;
    584 		goto bad;
    585 	}
    586 	/*
    587 	 * If it's a null transfer, return immediatly
    588 	 */
    589 	if (bp->b_bcount == 0)
    590 		goto done;
    591 
    592 	/*
    593 	 * Do bounds checking, adjust transfer. if error, process.
    594 	 * If end of partition, just return.
    595 	 */
    596 	if (SDPART(bp->b_dev) != RAW_PART &&
    597 	    bounds_check_with_label(bp, sd->sc_dk.dk_label,
    598 	    (sd->flags & (SDF_WLABEL|SDF_LABELLING)) != 0) <= 0)
    599 		goto done;
    600 
    601 	s = splbio();
    602 
    603 	/*
    604 	 * Place it in the queue of disk activities for this disk.
    605 	 *
    606 	 * XXX Only do disksort() if the current operating mode does not
    607 	 * XXX include tagged queueing.
    608 	 */
    609 	disksort(&sd->buf_queue, bp);
    610 
    611 	/*
    612 	 * Tell the device to get going on the transfer if it's
    613 	 * not doing anything, otherwise just wait for completion
    614 	 */
    615 	sdstart(sd->sc_periph);
    616 
    617 	splx(s);
    618 	return;
    619 
    620 bad:
    621 	bp->b_flags |= B_ERROR;
    622 done:
    623 	/*
    624 	 * Correctly set the buf to indicate a completed xfer
    625 	 */
    626 	bp->b_resid = bp->b_bcount;
    627 	biodone(bp);
    628 }
    629 
    630 /*
    631  * sdstart looks to see if there is a buf waiting for the device
    632  * and that the device is not already busy. If both are true,
    633  * It dequeues the buf and creates a scsi command to perform the
    634  * transfer in the buf. The transfer request will call scsipi_done
    635  * on completion, which will in turn call this routine again
    636  * so that the next queued transfer is performed.
    637  * The bufs are queued by the strategy routine (sdstrategy)
    638  *
    639  * This routine is also called after other non-queued requests
    640  * have been made of the scsi driver, to ensure that the queue
    641  * continues to be drained.
    642  *
    643  * must be called at the correct (highish) spl level
    644  * sdstart() is called at splbio from sdstrategy and scsipi_done
    645  */
    646 void
    647 sdstart(periph)
    648 	struct scsipi_periph *periph;
    649 {
    650 	struct sd_softc *sd = (void *)periph->periph_dev;
    651 	struct disklabel *lp = sd->sc_dk.dk_label;
    652 	struct buf *bp = 0;
    653 	struct buf *dp;
    654 	struct scsipi_rw_big cmd_big;
    655 #if NSD_SCSIBUS > 0
    656 	struct scsi_rw cmd_small;
    657 #endif
    658 	struct scsipi_generic *cmdp;
    659 	int flags, blkno, nblks, cmdlen, error;
    660 	struct partition *p;
    661 
    662 	SC_DEBUG(sc_link, SDEV_DB2, ("sdstart "));
    663 	/*
    664 	 * Check if the device has room for another command
    665 	 */
    666 	while (periph->periph_active < periph->periph_openings) {
    667 		/*
    668 		 * there is excess capacity, but a special waits
    669 		 * It'll need the adapter as soon as we clear out of the
    670 		 * way and let it run (user level wait).
    671 		 */
    672 		if (periph->periph_flags & PERIPH_WAITING) {
    673 			periph->periph_flags &= ~PERIPH_WAITING;
    674 			wakeup((caddr_t)periph);
    675 			return;
    676 		}
    677 
    678 		/*
    679 		 * See if there is a buf with work for us to do..
    680 		 */
    681 		dp = &sd->buf_queue;
    682 		if ((bp = dp->b_actf) == NULL)	/* yes, an assign */
    683 			return;
    684 		dp->b_actf = bp->b_actf;
    685 
    686 		/*
    687 		 * If the device has become invalid, abort all the
    688 		 * reads and writes until all files have been closed and
    689 		 * re-opened
    690 		 */
    691 		if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0) {
    692 			bp->b_error = EIO;
    693 			bp->b_flags |= B_ERROR;
    694 			bp->b_resid = bp->b_bcount;
    695 			biodone(bp);
    696 			continue;
    697 		}
    698 
    699 		/*
    700 		 * We have a buf, now we should make a command
    701 		 *
    702 		 * First, translate the block to absolute and put it in terms
    703 		 * of the logical blocksize of the device.
    704 		 */
    705 		blkno = bp->b_blkno / (lp->d_secsize / DEV_BSIZE);
    706 		if (SDPART(bp->b_dev) != RAW_PART) {
    707 			p = &lp->d_partitions[SDPART(bp->b_dev)];
    708 			blkno += p->p_offset;
    709 		}
    710 		nblks = howmany(bp->b_bcount, lp->d_secsize);
    711 
    712 #if NSD_SCSIBUS > 0
    713 		/*
    714 		 *  Fill out the scsi command.  If the transfer will
    715 		 *  fit in a "small" cdb, use it.
    716 		 */
    717 		if (((blkno & 0x1fffff) == blkno) &&
    718 		    ((nblks & 0xff) == nblks) &&
    719 		    scsipi_periph_bustype(periph) == SCSIPI_BUSTYPE_SCSI) {
    720 			/*
    721 			 * We can fit in a small cdb.
    722 			 */
    723 			bzero(&cmd_small, sizeof(cmd_small));
    724 			cmd_small.opcode = (bp->b_flags & B_READ) ?
    725 			    SCSI_READ_COMMAND : SCSI_WRITE_COMMAND;
    726 			_lto3b(blkno, cmd_small.addr);
    727 			cmd_small.length = nblks & 0xff;
    728 			cmdlen = sizeof(cmd_small);
    729 			cmdp = (struct scsipi_generic *)&cmd_small;
    730 		} else
    731 #endif
    732 		{
    733 			/*
    734 			 * Need a large cdb.
    735 			 */
    736 			bzero(&cmd_big, sizeof(cmd_big));
    737 			cmd_big.opcode = (bp->b_flags & B_READ) ?
    738 			    READ_BIG : WRITE_BIG;
    739 			_lto4b(blkno, cmd_big.addr);
    740 			_lto2b(nblks, cmd_big.length);
    741 			cmdlen = sizeof(cmd_big);
    742 			cmdp = (struct scsipi_generic *)&cmd_big;
    743 		}
    744 
    745 		/* Instrumentation. */
    746 		disk_busy(&sd->sc_dk);
    747 
    748 		/*
    749 		 * Mark the disk dirty so that the cache will be
    750 		 * flushed on close.
    751 		 */
    752 		if ((bp->b_flags & B_READ) == 0)
    753 			sd->flags |= SDF_DIRTY;
    754 
    755 		/*
    756 		 * Figure out what flags to use.
    757 		 * XXX Need a B_ORDERED.
    758 		 */
    759 		flags = XS_CTL_NOSLEEP|XS_CTL_ASYNC;
    760 		if (bp->b_flags & B_READ)
    761 			flags |= XS_CTL_DATA_IN | XS_CTL_SIMPLE_TAG;
    762 		else
    763 			flags |= XS_CTL_DATA_OUT | XS_CTL_ORDERED_TAG;
    764 
    765 		/*
    766 		 * Call the routine that chats with the adapter.
    767 		 * Note: we cannot sleep as we may be an interrupt
    768 		 */
    769 		error = scsipi_command(periph, cmdp, cmdlen,
    770 		    (u_char *)bp->b_data, bp->b_bcount,
    771 		    SDRETRIES, 60000, bp, flags);
    772 		if (error) {
    773 			disk_unbusy(&sd->sc_dk, 0);
    774 			printf("%s: not queued, error %d\n",
    775 			    sd->sc_dev.dv_xname, error);
    776 		}
    777 	}
    778 }
    779 
    780 void
    781 sddone(xs)
    782 	struct scsipi_xfer *xs;
    783 {
    784 	struct sd_softc *sd = (void *)xs->xs_periph->periph_dev;
    785 
    786 	if (sd->flags & SDF_FLUSHING) {
    787 		/* Flush completed, no longer dirty. */
    788 		sd->flags &= ~(SDF_FLUSHING|SDF_DIRTY);
    789 	}
    790 
    791 	if (xs->bp != NULL) {
    792 		disk_unbusy(&sd->sc_dk, xs->bp->b_bcount - xs->bp->b_resid);
    793 #if NRND > 0
    794 		rnd_add_uint32(&sd->rnd_source, xs->bp->b_blkno);
    795 #endif
    796 	}
    797 }
    798 
    799 void
    800 sdminphys(bp)
    801 	struct buf *bp;
    802 {
    803 	struct sd_softc *sd = sd_cd.cd_devs[SDUNIT(bp->b_dev)];
    804 	long max;
    805 
    806 	/*
    807 	 * If the device is ancient, we want to make sure that
    808 	 * the transfer fits into a 6-byte cdb.
    809 	 *
    810 	 * XXX Note that the SCSI-I spec says that 256-block transfers
    811 	 * are allowed in a 6-byte read/write, and are specified
    812 	 * by settng the "length" to 0.  However, we're conservative
    813 	 * here, allowing only 255-block transfers in case an
    814 	 * ancient device gets confused by length == 0.  A length of 0
    815 	 * in a 10-byte read/write actually means 0 blocks.
    816 	 */
    817 	if (sd->flags & SDF_ANCIENT) {
    818 		max = sd->sc_dk.dk_label->d_secsize * 0xff;
    819 
    820 		if (bp->b_bcount > max)
    821 			bp->b_bcount = max;
    822 	}
    823 
    824 	(*sd->sc_periph->periph_channel->chan_adapter->adapt_minphys)(bp);
    825 }
    826 
    827 int
    828 sdread(dev, uio, ioflag)
    829 	dev_t dev;
    830 	struct uio *uio;
    831 	int ioflag;
    832 {
    833 
    834 	return (physio(sdstrategy, NULL, dev, B_READ, sdminphys, uio));
    835 }
    836 
    837 int
    838 sdwrite(dev, uio, ioflag)
    839 	dev_t dev;
    840 	struct uio *uio;
    841 	int ioflag;
    842 {
    843 
    844 	return (physio(sdstrategy, NULL, dev, B_WRITE, sdminphys, uio));
    845 }
    846 
    847 /*
    848  * Perform special action on behalf of the user
    849  * Knows about the internals of this device
    850  */
    851 int
    852 sdioctl(dev, cmd, addr, flag, p)
    853 	dev_t dev;
    854 	u_long cmd;
    855 	caddr_t addr;
    856 	int flag;
    857 	struct proc *p;
    858 {
    859 	struct sd_softc *sd = sd_cd.cd_devs[SDUNIT(dev)];
    860 	struct scsipi_periph *periph = sd->sc_periph;
    861 	int part = SDPART(dev);
    862 	int error;
    863 
    864 	SC_DEBUG(sd->sc_link, SDEV_DB2, ("sdioctl 0x%lx ", cmd));
    865 
    866 	if ((sd->sc_dev.dv_flags & DVF_ACTIVE) == 0)
    867 		return (ENODEV);
    868 
    869 	/*
    870 	 * If the device is not valid, some IOCTLs can still be
    871 	 * handled on the raw partition. Check this here.
    872 	 */
    873 	if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0) {
    874 		switch (cmd) {
    875 		case DIOCWLABEL:
    876 		case DIOCLOCK:
    877 		case DIOCEJECT:
    878 		case ODIOCEJECT:
    879 		case SCIOCIDENTIFY:
    880 		case OSCIOCIDENTIFY:
    881 		case SCIOCCOMMAND:
    882 		case SCIOCDEBUG:
    883 			if (part == RAW_PART)
    884 				break;
    885 		/* FALLTHROUGH */
    886 		default:
    887 			if ((periph->periph_flags & PERIPH_OPEN) == 0)
    888 				return (ENODEV);
    889 			else
    890 				return (EIO);
    891 		}
    892 	}
    893 
    894 	switch (cmd) {
    895 	case DIOCGDINFO:
    896 		*(struct disklabel *)addr = *(sd->sc_dk.dk_label);
    897 		return (0);
    898 
    899 	case DIOCGPART:
    900 		((struct partinfo *)addr)->disklab = sd->sc_dk.dk_label;
    901 		((struct partinfo *)addr)->part =
    902 		    &sd->sc_dk.dk_label->d_partitions[part];
    903 		return (0);
    904 
    905 	case DIOCWDINFO:
    906 	case DIOCSDINFO:
    907 		if ((flag & FWRITE) == 0)
    908 			return (EBADF);
    909 
    910 		if ((error = sdlock(sd)) != 0)
    911 			return (error);
    912 		sd->flags |= SDF_LABELLING;
    913 
    914 		error = setdisklabel(sd->sc_dk.dk_label,
    915 		    (struct disklabel *)addr, /*sd->sc_dk.dk_openmask : */0,
    916 		    sd->sc_dk.dk_cpulabel);
    917 		if (error == 0) {
    918 			if (cmd == DIOCWDINFO)
    919 				error = writedisklabel(SDLABELDEV(dev),
    920 				    sdstrategy, sd->sc_dk.dk_label,
    921 				    sd->sc_dk.dk_cpulabel);
    922 		}
    923 
    924 		sd->flags &= ~SDF_LABELLING;
    925 		sdunlock(sd);
    926 		return (error);
    927 
    928 	case DIOCWLABEL:
    929 		if ((flag & FWRITE) == 0)
    930 			return (EBADF);
    931 		if (*(int *)addr)
    932 			sd->flags |= SDF_WLABEL;
    933 		else
    934 			sd->flags &= ~SDF_WLABEL;
    935 		return (0);
    936 
    937 	case DIOCLOCK:
    938 		return (scsipi_prevent(periph,
    939 		    (*(int *)addr) ? PR_PREVENT : PR_ALLOW, 0));
    940 
    941 	case DIOCEJECT:
    942 		if ((periph->periph_flags & PERIPH_REMOVABLE) == 0)
    943 			return (ENOTTY);
    944 		if (*(int *)addr == 0) {
    945 			/*
    946 			 * Don't force eject: check that we are the only
    947 			 * partition open. If so, unlock it.
    948 			 */
    949 			if ((sd->sc_dk.dk_openmask & ~(1 << part)) == 0 &&
    950 			    sd->sc_dk.dk_bopenmask + sd->sc_dk.dk_copenmask ==
    951 			    sd->sc_dk.dk_openmask) {
    952 				error = scsipi_prevent(periph, PR_ALLOW,
    953 				    XS_CTL_IGNORE_NOT_READY);
    954 				if (error)
    955 					return (error);
    956 			} else {
    957 				return (EBUSY);
    958 			}
    959 		}
    960 		/* FALLTHROUGH */
    961 	case ODIOCEJECT:
    962 		return ((periph->periph_flags & PERIPH_REMOVABLE) == 0 ?
    963 		    ENOTTY : scsipi_start(periph, SSS_STOP|SSS_LOEJ, 0));
    964 
    965 	case DIOCGDEFLABEL:
    966 		sdgetdefaultlabel(sd, (struct disklabel *)addr);
    967 		return (0);
    968 
    969 	default:
    970 		if (part != RAW_PART)
    971 			return (ENOTTY);
    972 		return (scsipi_do_ioctl(periph, dev, cmd, addr, flag, p));
    973 	}
    974 
    975 #ifdef DIAGNOSTIC
    976 	panic("sdioctl: impossible");
    977 #endif
    978 }
    979 
    980 void
    981 sdgetdefaultlabel(sd, lp)
    982 	struct sd_softc *sd;
    983 	struct disklabel *lp;
    984 {
    985 
    986 	bzero(lp, sizeof(struct disklabel));
    987 
    988 	lp->d_secsize = sd->params.blksize;
    989 	lp->d_ntracks = sd->params.heads;
    990 	lp->d_nsectors = sd->params.sectors;
    991 	lp->d_ncylinders = sd->params.cyls;
    992 	lp->d_secpercyl = lp->d_ntracks * lp->d_nsectors;
    993 
    994 	switch (scsipi_periph_bustype(sd->sc_periph)) {
    995 #if NSD_SCSIBUS > 0
    996 	case SCSIPI_BUSTYPE_SCSI:
    997 		lp->d_type = DTYPE_SCSI;
    998 		break;
    999 #endif
   1000 #if NSD_ATAPIBUS > 0
   1001 	case SCSIPI_BUSTYPE_ATAPI:
   1002 		lp->d_type = DTYPE_ATAPI;
   1003 		break;
   1004 #endif
   1005 	}
   1006 	strncpy(lp->d_typename, sd->name, 16);
   1007 	strncpy(lp->d_packname, "fictitious", 16);
   1008 	lp->d_secperunit = sd->params.disksize;
   1009 	lp->d_rpm = sd->params.rot_rate;
   1010 	lp->d_interleave = 1;
   1011 	lp->d_flags = 0;
   1012 
   1013 	lp->d_partitions[RAW_PART].p_offset = 0;
   1014 	lp->d_partitions[RAW_PART].p_size =
   1015 	    lp->d_secperunit * (lp->d_secsize / DEV_BSIZE);
   1016 	lp->d_partitions[RAW_PART].p_fstype = FS_UNUSED;
   1017 	lp->d_npartitions = RAW_PART + 1;
   1018 
   1019 	lp->d_magic = DISKMAGIC;
   1020 	lp->d_magic2 = DISKMAGIC;
   1021 	lp->d_checksum = dkcksum(lp);
   1022 }
   1023 
   1024 
   1025 /*
   1026  * Load the label information on the named device
   1027  */
   1028 void
   1029 sdgetdisklabel(sd)
   1030 	struct sd_softc *sd;
   1031 {
   1032 	struct disklabel *lp = sd->sc_dk.dk_label;
   1033 	char *errstring;
   1034 
   1035 	bzero(sd->sc_dk.dk_cpulabel, sizeof(struct cpu_disklabel));
   1036 
   1037 	sdgetdefaultlabel(sd, lp);
   1038 
   1039 	if (lp->d_secpercyl == 0) {
   1040 		lp->d_secpercyl = 100;
   1041 		/* as long as it's not 0 - readdisklabel divides by it (?) */
   1042 	}
   1043 
   1044 	/*
   1045 	 * Call the generic disklabel extraction routine
   1046 	 */
   1047 	errstring = readdisklabel(MAKESDDEV(0, sd->sc_dev.dv_unit, RAW_PART),
   1048 	    sdstrategy, lp, sd->sc_dk.dk_cpulabel);
   1049 	if (errstring) {
   1050 		printf("%s: %s\n", sd->sc_dev.dv_xname, errstring);
   1051 		return;
   1052 	}
   1053 }
   1054 
   1055 void
   1056 sd_shutdown(arg)
   1057 	void *arg;
   1058 {
   1059 	struct sd_softc *sd = arg;
   1060 
   1061 	/*
   1062 	 * If the disk cache needs to be flushed, and the disk supports
   1063 	 * it, flush it.  We're cold at this point, so we poll for
   1064 	 * completion.
   1065 	 */
   1066 	if ((sd->flags & SDF_DIRTY) != 0 && sd->sc_ops->sdo_flush != NULL) {
   1067 		if ((*sd->sc_ops->sdo_flush)(sd, XS_CTL_NOSLEEP|XS_CTL_POLL)) {
   1068 			printf("%s: cache synchronization failed\n",
   1069 			    sd->sc_dev.dv_xname);
   1070 			sd->flags &= ~SDF_FLUSHING;
   1071 		} else
   1072 			sd->flags &= ~(SDF_FLUSHING|SDF_DIRTY);
   1073 	}
   1074 }
   1075 
   1076 /*
   1077  * Tell the device to map out a defective block
   1078  */
   1079 int
   1080 sd_reassign_blocks(sd, blkno)
   1081 	struct sd_softc *sd;
   1082 	u_long blkno;
   1083 {
   1084 	struct scsi_reassign_blocks scsipi_cmd;
   1085 	struct scsi_reassign_blocks_data rbdata;
   1086 
   1087 	bzero(&scsipi_cmd, sizeof(scsipi_cmd));
   1088 	bzero(&rbdata, sizeof(rbdata));
   1089 	scsipi_cmd.opcode = SCSI_REASSIGN_BLOCKS;
   1090 
   1091 	_lto2b(sizeof(rbdata.defect_descriptor[0]), rbdata.length);
   1092 	_lto4b(blkno, rbdata.defect_descriptor[0].dlbaddr);
   1093 
   1094 	return (scsipi_command(sd->sc_periph,
   1095 	    (struct scsipi_generic *)&scsipi_cmd, sizeof(scsipi_cmd),
   1096 	    (u_char *)&rbdata, sizeof(rbdata), SDRETRIES, 5000, NULL,
   1097 	    XS_CTL_DATA_OUT));
   1098 }
   1099 
   1100 /*
   1101  * Check Errors
   1102  */
   1103 int
   1104 sd_interpret_sense(xs)
   1105 	struct scsipi_xfer *xs;
   1106 {
   1107 	struct scsipi_periph *periph = xs->xs_periph;
   1108 	struct scsipi_sense_data *sense = &xs->sense.scsi_sense;
   1109 	struct sd_softc *sd = (void *)periph->periph_dev;
   1110 	int s, error, retval = EJUSTRETURN;
   1111 
   1112 	/*
   1113 	 * If the periph is already recovering, just do the normal
   1114 	 * error processing.
   1115 	 */
   1116 	if (periph->periph_flags & PERIPH_RECOVERING)
   1117 		return (retval);
   1118 
   1119 	/*
   1120 	 * If the device is not open yet, let the generic code handle it.
   1121 	 */
   1122 	if ((periph->periph_flags & PERIPH_MEDIA_LOADED) == 0)
   1123 		return (retval);
   1124 
   1125 	/*
   1126 	 * If it isn't a extended or extended/deferred error, let
   1127 	 * the generic code handle it.
   1128 	 */
   1129 	if ((sense->error_code & SSD_ERRCODE) != 0x70 &&
   1130 	    (sense->error_code & SSD_ERRCODE) != 0x71)
   1131 		return (retval);
   1132 
   1133 	if ((sense->flags & SSD_KEY) == SKEY_NOT_READY &&
   1134 	    sense->add_sense_code == 0x4) {
   1135 		if (sense->add_sense_code_qual == 0x01)	{
   1136 			/*
   1137 			 * Unit In The Process Of Becoming Ready.
   1138 			 */
   1139 			printf("%s: waiting for pack to spin up...\n",
   1140 			    sd->sc_dev.dv_xname);
   1141 			scsipi_periph_freeze(periph, 1);
   1142 			timeout(scsipi_periph_timed_thaw, periph, 5 * hz);
   1143 			retval = ERESTART;
   1144 		} else if ((sense->add_sense_code_qual == 0x2) &&
   1145 		    (periph->periph_quirks & PQUIRK_NOSTARTUNIT) == 0) {
   1146 			printf("%s: pack is stopped, restarting...\n",
   1147 			    sd->sc_dev.dv_xname);
   1148 			s = splbio();
   1149 			periph->periph_flags |= PERIPH_RECOVERING;
   1150 			splx(s);
   1151 			error = scsipi_start(periph, SSS_START,
   1152 			    XS_CTL_URGENT|XS_CTL_HEAD_TAG|
   1153 			    XS_CTL_THAW_PERIPH|XS_CTL_FREEZE_PERIPH);
   1154 			if (error) {
   1155 				printf("%s: unable to restart pack\n",
   1156 				    sd->sc_dev.dv_xname);
   1157 				retval = error;
   1158 			} else
   1159 				retval = ERESTART;
   1160 			s = splbio();
   1161 			periph->periph_flags &= ~PERIPH_RECOVERING;
   1162 			splx(s);
   1163 		}
   1164 	}
   1165 	return (retval);
   1166 }
   1167 
   1168 
   1169 int
   1170 sdsize(dev)
   1171 	dev_t dev;
   1172 {
   1173 	struct sd_softc *sd;
   1174 	int part, unit, omask;
   1175 	int size;
   1176 
   1177 	unit = SDUNIT(dev);
   1178 	if (unit >= sd_cd.cd_ndevs)
   1179 		return (-1);
   1180 	sd = sd_cd.cd_devs[unit];
   1181 	if (sd == NULL)
   1182 		return (-1);
   1183 
   1184 	if ((sd->sc_dev.dv_flags & DVF_ACTIVE) == 0)
   1185 		return (-1);
   1186 
   1187 	part = SDPART(dev);
   1188 	omask = sd->sc_dk.dk_openmask & (1 << part);
   1189 
   1190 	if (omask == 0 && sdopen(dev, 0, S_IFBLK, NULL) != 0)
   1191 		return (-1);
   1192 	if ((sd->sc_periph->periph_flags & PERIPH_MEDIA_LOADED) == 0)
   1193 		size = -1;
   1194 	else if (sd->sc_dk.dk_label->d_partitions[part].p_fstype != FS_SWAP)
   1195 		size = -1;
   1196 	else
   1197 		size = sd->sc_dk.dk_label->d_partitions[part].p_size *
   1198 		    (sd->sc_dk.dk_label->d_secsize / DEV_BSIZE);
   1199 	if (omask == 0 && sdclose(dev, 0, S_IFBLK, NULL) != 0)
   1200 		return (-1);
   1201 	return (size);
   1202 }
   1203 
   1204 #ifndef __BDEVSW_DUMP_OLD_TYPE
   1205 /* #define SD_DUMP_NOT_TRUSTED if you just want to watch */
   1206 static struct scsipi_xfer sx;
   1207 static int sddoingadump;
   1208 
   1209 /*
   1210  * dump all of physical memory into the partition specified, starting
   1211  * at offset 'dumplo' into the partition.
   1212  */
   1213 int
   1214 sddump(dev, blkno, va, size)
   1215 	dev_t dev;
   1216 	daddr_t blkno;
   1217 	caddr_t va;
   1218 	size_t size;
   1219 {
   1220 	struct sd_softc *sd;	/* disk unit to do the I/O */
   1221 	struct disklabel *lp;	/* disk's disklabel */
   1222 	int	unit, part;
   1223 	int	sectorsize;	/* size of a disk sector */
   1224 	int	nsects;		/* number of sectors in partition */
   1225 	int	sectoff;	/* sector offset of partition */
   1226 	int	totwrt;		/* total number of sectors left to write */
   1227 	int	nwrt;		/* current number of sectors to write */
   1228 	struct scsipi_rw_big cmd;	/* write command */
   1229 	struct scsipi_xfer *xs;	/* ... convenience */
   1230 	struct scsipi_periph *periph = sd->sc_periph;
   1231 	struct scsipi_channel *chan = periph->periph_channel;
   1232 
   1233 	if ((sd->sc_dev.dv_flags & DVF_ACTIVE) == 0)
   1234 		return (ENODEV);
   1235 
   1236 	/* Check if recursive dump; if so, punt. */
   1237 	if (sddoingadump)
   1238 		return (EFAULT);
   1239 
   1240 	/* Mark as active early. */
   1241 	sddoingadump = 1;
   1242 
   1243 	unit = SDUNIT(dev);	/* Decompose unit & partition. */
   1244 	part = SDPART(dev);
   1245 
   1246 	/* Check for acceptable drive number. */
   1247 	if (unit >= sd_cd.cd_ndevs || (sd = sd_cd.cd_devs[unit]) == NULL)
   1248 		return (ENXIO);
   1249 
   1250 	/* Make sure it was initialized. */
   1251 	if ((periph->periph_flags & PERIPH_MEDIA_LOADED) != 0)
   1252 		return (ENXIO);
   1253 
   1254 	/* Convert to disk sectors.  Request must be a multiple of size. */
   1255 	lp = sd->sc_dk.dk_label;
   1256 	sectorsize = lp->d_secsize;
   1257 	if ((size % sectorsize) != 0)
   1258 		return (EFAULT);
   1259 	totwrt = size / sectorsize;
   1260 	blkno = dbtob(blkno) / sectorsize;	/* blkno in DEV_BSIZE units */
   1261 
   1262 	nsects = lp->d_partitions[part].p_size;
   1263 	sectoff = lp->d_partitions[part].p_offset;
   1264 
   1265 	/* Check transfer bounds against partition size. */
   1266 	if ((blkno < 0) || ((blkno + totwrt) > nsects))
   1267 		return (EINVAL);
   1268 
   1269 	/* Offset block number to start of partition. */
   1270 	blkno += sectoff;
   1271 
   1272 	xs = &sx;
   1273 
   1274 	while (totwrt > 0) {
   1275 		nwrt = totwrt;		/* XXX */
   1276 #ifndef	SD_DUMP_NOT_TRUSTED
   1277 		/*
   1278 		 *  Fill out the scsi command
   1279 		 */
   1280 		bzero(&cmd, sizeof(cmd));
   1281 		cmd.opcode = WRITE_BIG;
   1282 		_lto4b(blkno, cmd.addr);
   1283 		_lto2b(nwrt, cmd.length);
   1284 		/*
   1285 		 * Fill out the scsipi_xfer structure
   1286 		 *    Note: we cannot sleep as we may be an interrupt
   1287 		 * don't use scsipi_command() as it may want to wait
   1288 		 * for an xs.
   1289 		 */
   1290 		bzero(xs, sizeof(sx));
   1291 		xs->xs_control |= XS_CTL_NOSLEEP | XS_CTL_POLL |
   1292 		    XS_CTL_DATA_OUT;
   1293 		xs->xs_status = 0;
   1294 		xs->xs_periph = periph;
   1295 		xs->xs_retries = SDRETRIES;
   1296 		xs->timeout = 10000;	/* 10000 millisecs for a disk ! */
   1297 		xs->cmd = (struct scsipi_generic *)&cmd;
   1298 		xs->cmdlen = sizeof(cmd);
   1299 		xs->resid = nwrt * sectorsize;
   1300 		xs->error = XS_NOERROR;
   1301 		xs->bp = 0;
   1302 		xs->data = va;
   1303 		xs->datalen = nwrt * sectorsize;
   1304 
   1305 		/*
   1306 		 * Pass all this info to the scsi driver.
   1307 		 */
   1308 		scsipi_adapter_request(chan, ADAPTER_REQ_RUN_XFER, xs);
   1309 		if ((xs->xs_status & XS_STS_DONE) == 0 ||
   1310 		    xs->error != XS_NOERROR)
   1311 			return (EIO);
   1312 #else	/* SD_DUMP_NOT_TRUSTED */
   1313 		/* Let's just talk about this first... */
   1314 		printf("sd%d: dump addr 0x%x, blk %d\n", unit, va, blkno);
   1315 		delay(500 * 1000);	/* half a second */
   1316 #endif	/* SD_DUMP_NOT_TRUSTED */
   1317 
   1318 		/* update block count */
   1319 		totwrt -= nwrt;
   1320 		blkno += nwrt;
   1321 		va += sectorsize * nwrt;
   1322 	}
   1323 	sddoingadump = 0;
   1324 	return (0);
   1325 }
   1326 #else	/* __BDEVSW_DUMP_NEW_TYPE */
   1327 int
   1328 sddump(dev, blkno, va, size)
   1329 	dev_t dev;
   1330 	daddr_t blkno;
   1331 	caddr_t va;
   1332 	size_t size;
   1333 {
   1334 
   1335 	/* Not implemented. */
   1336 	return (ENXIO);
   1337 }
   1338 #endif	/* __BDEVSW_DUMP_NEW_TYPE */
   1339