Home | History | Annotate | Line # | Download | only in scsipi
scsipi_base.c revision 1.26.2.6
      1 /*	$NetBSD: scsipi_base.c,v 1.26.2.6 1999/11/01 22:54:19 thorpej Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1998, 1999 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Charles M. Hannum; by Jason R. Thorpe of the Numerical Aerospace
      9  * Simulation Facility, NASA Ames Research Center.
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  * 3. All advertising materials mentioning features or use of this software
     20  *    must display the following acknowledgement:
     21  *        This product includes software developed by the NetBSD
     22  *        Foundation, Inc. and its contributors.
     23  * 4. Neither the name of The NetBSD Foundation nor the names of its
     24  *    contributors may be used to endorse or promote products derived
     25  *    from this software without specific prior written permission.
     26  *
     27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     37  * POSSIBILITY OF SUCH DAMAGE.
     38  */
     39 
     40 #include "opt_scsi.h"
     41 
     42 #include <sys/types.h>
     43 #include <sys/param.h>
     44 #include <sys/systm.h>
     45 #include <sys/kernel.h>
     46 #include <sys/buf.h>
     47 #include <sys/uio.h>
     48 #include <sys/malloc.h>
     49 #include <sys/pool.h>
     50 #include <sys/errno.h>
     51 #include <sys/device.h>
     52 #include <sys/proc.h>
     53 #include <sys/kthread.h>
     54 
     55 #include <dev/scsipi/scsipi_all.h>
     56 #include <dev/scsipi/scsipi_disk.h>
     57 #include <dev/scsipi/scsipiconf.h>
     58 #include <dev/scsipi/scsipi_base.h>
     59 
     60 #include <dev/scsipi/scsi_all.h>
     61 #include <dev/scsipi/scsi_message.h>
     62 
     63 int	scsipi_complete __P((struct scsipi_xfer *));
     64 int	scsipi_enqueue __P((struct scsipi_xfer *));
     65 void	scsipi_run_queue __P((struct scsipi_channel *chan));
     66 
     67 void	scsipi_completion_thread __P((void *));
     68 
     69 void	scsipi_get_tag __P((struct scsipi_xfer *));
     70 void	scsipi_put_tag __P((struct scsipi_xfer *));
     71 
     72 void	scsipi_async_event_max_openings __P((struct scsipi_channel *,
     73 	    struct scsipi_max_openings *));
     74 void	scsipi_async_event_xfer_mode __P((struct scsipi_channel *,
     75 	    struct scsipi_xfer_mode *));
     76 
     77 struct pool scsipi_xfer_pool;
     78 
     79 /*
     80  * scsipi_init:
     81  *
     82  *	Called when a scsibus or atapibus is attached to the system
     83  *	to initialize shared data structures.
     84  */
     85 void
     86 scsipi_init()
     87 {
     88 	static int scsipi_init_done;
     89 
     90 	if (scsipi_init_done)
     91 		return;
     92 	scsipi_init_done = 1;
     93 
     94 	/* Initialize the scsipi_xfer pool. */
     95 	pool_init(&scsipi_xfer_pool, sizeof(struct scsipi_xfer), 0,
     96 	    0, 0, "scxspl", 0, NULL, NULL, M_DEVBUF);
     97 }
     98 
     99 /*
    100  * scsipi_channel_init:
    101  *
    102  *	Initialize a scsipi_channel when it is attached.
    103  */
    104 void
    105 scsipi_channel_init(chan)
    106 	struct scsipi_channel *chan;
    107 {
    108 	size_t nbytes;
    109 	int i;
    110 
    111 	/* Initialize shared data. */
    112 	scsipi_init();
    113 
    114 	/* Initialize the queues. */
    115 	TAILQ_INIT(&chan->chan_queue);
    116 	TAILQ_INIT(&chan->chan_complete);
    117 
    118 	nbytes = chan->chan_ntargets * sizeof(struct scsipi_link **);
    119 	chan->chan_periphs = malloc(nbytes, M_DEVBUF, M_WAITOK);
    120 
    121 	nbytes = chan->chan_nluns * sizeof(struct scsipi_periph *);
    122 	for (i = 0; i < chan->chan_ntargets; i++) {
    123 		chan->chan_periphs[i] = malloc(nbytes, M_DEVBUF, M_WAITOK);
    124 		memset(chan->chan_periphs[i], 0, nbytes);
    125 	}
    126 
    127 	/*
    128 	 * Create the asynchronous completion thread.
    129 	 */
    130 	kthread_create(scsipi_create_completion_thread, chan);
    131 }
    132 
    133 /*
    134  * scsipi_lookup_periph:
    135  *
    136  *	Lookup a periph on the specified channel.
    137  */
    138 struct scsipi_periph *
    139 scsipi_lookup_periph(chan, target, lun)
    140 	struct scsipi_channel *chan;
    141 	int target, lun;
    142 {
    143 	struct scsipi_periph *periph;
    144 	int s;
    145 
    146 	if (target >= chan->chan_ntargets ||
    147 	    lun >= chan->chan_nluns)
    148 		return (NULL);
    149 
    150 	s = splbio();
    151 	periph = chan->chan_periphs[target][lun];
    152 	splx(s);
    153 
    154 	return (periph);
    155 }
    156 
    157 /*
    158  * scsipi_get_resource:
    159  *
    160  *	Allocate a single xfer `resource' from the channel.
    161  *
    162  *	NOTE: Must be called at splbio().
    163  */
    164 int
    165 scsipi_get_resource(chan)
    166 	struct scsipi_channel *chan;
    167 {
    168 	struct scsipi_adapter *adapt = chan->chan_adapter;
    169 
    170 	if (chan->chan_flags & SCSIPI_CHAN_OPENINGS) {
    171 		if (chan->chan_openings > 0) {
    172 			chan->chan_openings--;
    173 			return (1);
    174 		}
    175 		return (0);
    176 	}
    177 
    178 	if (adapt->adapt_openings > 0) {
    179 		adapt->adapt_openings--;
    180 		return (1);
    181 	}
    182 	return (0);
    183 }
    184 
    185 /*
    186  * scsipi_grow_resources:
    187  *
    188  *	Attempt to grow resources for a channel.  If this succeeds,
    189  *	we allocate one for our caller.
    190  *
    191  *	NOTE: Must be called at splbio().
    192  */
    193 int
    194 scsipi_grow_resources(chan)
    195 	struct scsipi_channel *chan;
    196 {
    197 
    198 	if (chan->chan_flags & SCSIPI_CHAN_CANGROW) {
    199 		scsipi_adapter_request(chan, ADAPTER_REQ_GROW_RESOURCES, NULL);
    200 		return (scsipi_get_resource(chan));
    201 	}
    202 
    203 	return (0);
    204 }
    205 
    206 /*
    207  * scsipi_put_resource:
    208  *
    209  *	Free a single xfer `resource' to the channel.
    210  *
    211  *	NOTE: Must be called at splbio().
    212  */
    213 void
    214 scsipi_put_resource(chan)
    215 	struct scsipi_channel *chan;
    216 {
    217 	struct scsipi_adapter *adapt = chan->chan_adapter;
    218 
    219 	if (chan->chan_flags & SCSIPI_CHAN_OPENINGS)
    220 		chan->chan_openings++;
    221 	else
    222 		adapt->adapt_openings++;
    223 }
    224 
    225 /*
    226  * scsipi_get_tag:
    227  *
    228  *	Get a tag ID for the specified xfer.
    229  *
    230  *	NOTE: Must be called at splbio().
    231  */
    232 void
    233 scsipi_get_tag(xs)
    234 	struct scsipi_xfer *xs;
    235 {
    236 	struct scsipi_periph *periph = xs->xs_periph;
    237 	int word, bit, tag;
    238 
    239 	for (word = 0; word < PERIPH_NTAGWORDS; word++) {
    240 		bit = ffs(periph->periph_freetags[word]);
    241 		if (bit != 0)
    242 			break;
    243 	}
    244 #ifdef DIAGNOSTIC
    245 	if (word == PERIPH_NTAGWORDS) {
    246 		scsipi_printaddr(periph);
    247 		printf("no free tags\n");
    248 		panic("scsipi_get_tag");
    249 	}
    250 #endif
    251 
    252 	bit -= 1;
    253 	periph->periph_freetags[word] &= ~(1 << bit);
    254 	tag = (word << 5) | bit;
    255 
    256 	/* XXX Should eventually disallow this completely. */
    257 	if (tag >= periph->periph_openings) {
    258 		scsipi_printaddr(periph);
    259 		printf("WARNING: tag %d greater than available openings %d\n",
    260 		    tag, periph->periph_openings);
    261 	}
    262 
    263 	xs->xs_tag_id = tag;
    264 }
    265 
    266 /*
    267  * scsipi_put_tag:
    268  *
    269  *	Put the tag ID for the specified xfer back into the pool.
    270  *
    271  *	NOTE: Must be called at splbio().
    272  */
    273 void
    274 scsipi_put_tag(xs)
    275 	struct scsipi_xfer *xs;
    276 {
    277 	struct scsipi_periph *periph = xs->xs_periph;
    278 	int word, bit;
    279 
    280 	word = xs->xs_tag_id >> 5;
    281 	bit = xs->xs_tag_id & 0x1f;
    282 
    283 	periph->periph_freetags[word] |= (1 << bit);
    284 }
    285 
    286 /*
    287  * scsipi_get_xs:
    288  *
    289  *	Allocate an xfer descriptor and associate it with the
    290  *	specified peripherial.  If the peripherial has no more
    291  *	available command openings, we either block waiting for
    292  *	one to become available, or fail.
    293  */
    294 struct scsipi_xfer *
    295 scsipi_get_xs(periph, flags)
    296 	struct scsipi_periph *periph;
    297 	int flags;
    298 {
    299 	struct scsipi_xfer *xs;
    300 	int s;
    301 
    302 	SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_get_xs\n"));
    303 
    304 	/*
    305 	 * If we're cold, make sure we poll.
    306 	 */
    307 	if (cold)
    308 		flags |= XS_CTL_NOSLEEP | XS_CTL_POLL;
    309 
    310 #ifdef DIAGNOSTIC
    311 	/*
    312 	 * URGENT commands can never be ASYNC.
    313 	 */
    314 	if ((flags & (XS_CTL_URGENT|XS_CTL_ASYNC)) ==
    315 	    (XS_CTL_URGENT|XS_CTL_ASYNC)) {
    316 		scsipi_printaddr(periph);
    317 		printf("URGENT and ASYNC\n");
    318 		panic("scsipi_get_xs");
    319 	}
    320 #endif
    321 
    322 	s = splbio();
    323 	/*
    324 	 * Wait for a command opening to become available.  Rules:
    325 	 *
    326 	 *	- All xfers must wait for an available opening.
    327 	 *	  Exception: URGENT xfers can proceed when
    328 	 *	  active == openings, because we use the opening
    329 	 *	  of the command we're recovering for.
    330 	 *
    331 	 *	- If the periph is recovering, only URGENT xfers may
    332 	 *	  proceed.
    333 	 *
    334 	 *	- If the periph is currently executing a recovery
    335 	 *	  command, URGENT commands must block, because only
    336 	 *	  one recovery command can execute at a time.
    337 	 */
    338 	for (;;) {
    339 		if (flags & XS_CTL_URGENT) {
    340 			if (periph->periph_active > periph->periph_openings ||
    341 			    (periph->periph_flags &
    342 			     PERIPH_RECOVERY_ACTIVE) != 0)
    343 				goto wait_for_opening;
    344 			periph->periph_flags |= PERIPH_RECOVERY_ACTIVE;
    345 			break;
    346 		}
    347 		if (periph->periph_active >= periph->periph_openings ||
    348 		    (periph->periph_flags & PERIPH_RECOVERING) != 0)
    349 			goto wait_for_opening;
    350 		periph->periph_active++;
    351 		break;
    352 
    353  wait_for_opening:
    354 		if (flags & XS_CTL_NOSLEEP) {
    355 			splx(s);
    356 			return (NULL);
    357 		}
    358 		SC_DEBUG(periph, SCSIPI_DB3, ("sleeping\n"));
    359 		periph->periph_flags |= PERIPH_WAITING;
    360 		(void) tsleep(periph, PRIBIO, "getxs", 0);
    361 	}
    362 	SC_DEBUG(periph, SCSIPI_DB3, ("calling pool_get\n"));
    363 	xs = pool_get(&scsipi_xfer_pool,
    364 	    ((flags & XS_CTL_NOSLEEP) != 0 ? PR_NOWAIT : PR_WAITOK));
    365 	if (xs == NULL) {
    366 		if (flags & XS_CTL_URGENT)
    367 			periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
    368 		else
    369 			periph->periph_active--;
    370 		scsipi_printaddr(periph);
    371 		printf("unable to allocate %sscsipi_xfer\n",
    372 		    (flags & XS_CTL_URGENT) ? "URGENT " : "");
    373 	}
    374 	splx(s);
    375 
    376 	SC_DEBUG(periph, SCSIPI_DB3, ("returning\n"));
    377 
    378 	if (xs != NULL) {
    379 		memset(xs, 0, sizeof(*xs));
    380 		xs->xs_periph = periph;
    381 		xs->xs_control = flags;
    382 		s = splbio();
    383 		TAILQ_INSERT_TAIL(&periph->periph_xferq, xs, device_q);
    384 		splx(s);
    385 	}
    386 	return (xs);
    387 }
    388 
    389 /*
    390  * scsipi_put_xs:
    391  *
    392  *	Release an xfer descriptor, decreasing the outstanding command
    393  *	count for the peripherial.  If there is a thread waiting for
    394  *	an opening, wake it up.  If not, kick any queued I/O the
    395  *	peripherial may have.
    396  *
    397  *	NOTE: Must be called at splbio().
    398  */
    399 void
    400 scsipi_put_xs(xs)
    401 	struct scsipi_xfer *xs;
    402 {
    403 	struct scsipi_periph *periph = xs->xs_periph;
    404 	int flags = xs->xs_control;
    405 
    406 	SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_free_xs\n"));
    407 
    408 	TAILQ_REMOVE(&periph->periph_xferq, xs, device_q);
    409 	pool_put(&scsipi_xfer_pool, xs);
    410 
    411 #ifdef DIAGNOSTIC
    412 	if ((periph->periph_flags & PERIPH_RECOVERY_ACTIVE) != 0 &&
    413 	    periph->periph_active == 0) {
    414 		scsipi_printaddr(periph);
    415 		printf("recovery without a command to recovery for\n");
    416 		panic("scsipi_put_xs");
    417 	}
    418 #endif
    419 
    420 	if (flags & XS_CTL_URGENT)
    421 		periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
    422 	else
    423 		periph->periph_active--;
    424 	if (periph->periph_active == 0 &&
    425 	    (periph->periph_flags & PERIPH_WAITDRAIN) != 0) {
    426 		periph->periph_flags &= ~PERIPH_WAITDRAIN;
    427 		wakeup(&periph->periph_active);
    428 	}
    429 
    430 	if (periph->periph_flags & PERIPH_WAITING) {
    431 		periph->periph_flags &= ~PERIPH_WAITING;
    432 		wakeup(periph);
    433 	} else {
    434 		if (periph->periph_switch->psw_start != NULL) {
    435 			SC_DEBUG(periph, SCSIPI_DB2,
    436 			    ("calling private start()\n"));
    437 			(*periph->periph_switch->psw_start)(periph);
    438 		}
    439 	}
    440 }
    441 
    442 /*
    443  * scsipi_channel_freeze:
    444  *
    445  *	Freeze a channel's xfer queue.
    446  */
    447 void
    448 scsipi_channel_freeze(chan, count)
    449 	struct scsipi_channel *chan;
    450 	int count;
    451 {
    452 	int s;
    453 
    454 	s = splbio();
    455 	chan->chan_qfreeze += count;
    456 	splx(s);
    457 }
    458 
    459 /*
    460  * scsipi_channel_thaw:
    461  *
    462  *	Thaw a channel's xfer queue.
    463  */
    464 void
    465 scsipi_channel_thaw(chan, count)
    466 	struct scsipi_channel *chan;
    467 	int count;
    468 {
    469 	int s;
    470 
    471 	s = splbio();
    472 	chan->chan_qfreeze -= count;
    473 	splx(s);
    474 }
    475 
    476 /*
    477  * scsipi_channel_timed_thaw:
    478  *
    479  *	Thaw a channel after some time has expired.
    480  */
    481 void
    482 scsipi_channel_timed_thaw(arg)
    483 	void *arg;
    484 {
    485 	struct scsipi_channel *chan = arg;
    486 
    487 	scsipi_channel_thaw(chan, 1);
    488 
    489 	/*
    490 	 * Kick the channel's queue here.  Note, we're running in
    491 	 * interrupt context (softclock), so the adapter driver
    492 	 * had better not sleep.
    493 	 */
    494 	scsipi_run_queue(chan);
    495 }
    496 
    497 /*
    498  * scsipi_periph_freeze:
    499  *
    500  *	Freeze a device's xfer queue.
    501  */
    502 void
    503 scsipi_periph_freeze(periph, count)
    504 	struct scsipi_periph *periph;
    505 	int count;
    506 {
    507 	int s;
    508 
    509 	s = splbio();
    510 	periph->periph_qfreeze += count;
    511 	splx(s);
    512 }
    513 
    514 /*
    515  * scsipi_periph_thaw:
    516  *
    517  *	Thaw a device's xfer queue.
    518  */
    519 void
    520 scsipi_periph_thaw(periph, count)
    521 	struct scsipi_periph *periph;
    522 	int count;
    523 {
    524 	int s;
    525 
    526 	s = splbio();
    527 	periph->periph_qfreeze -= count;
    528 	if (periph->periph_qfreeze == 0 &&
    529 	    (periph->periph_flags & PERIPH_WAITING) != 0)
    530 		wakeup(periph);
    531 	splx(s);
    532 }
    533 
    534 /*
    535  * scsipi_periph_timed_thaw:
    536  *
    537  *	Thaw a device after some time has expired.
    538  */
    539 void
    540 scsipi_periph_timed_thaw(arg)
    541 	void *arg;
    542 {
    543 	struct scsipi_periph *periph = arg;
    544 
    545 	scsipi_periph_thaw(periph, 1);
    546 
    547 	/*
    548 	 * Kick the channel's queue here.  Note, we're running in
    549 	 * interrupt context (softclock), so the adapter driver
    550 	 * had better not sleep.
    551 	 */
    552 	scsipi_run_queue(periph->periph_channel);
    553 }
    554 
    555 /*
    556  * scsipi_wait_drain:
    557  *
    558  *	Wait for a periph's pending xfers to drain.
    559  */
    560 void
    561 scsipi_wait_drain(periph)
    562 	struct scsipi_periph *periph;
    563 {
    564 	int s;
    565 
    566 	s = splbio();
    567 	while (periph->periph_active != 0) {
    568 		periph->periph_flags |= PERIPH_WAITDRAIN;
    569 		(void) tsleep(&periph->periph_active, PRIBIO, "sxdrn", 0);
    570 	}
    571 	splx(s);
    572 }
    573 
    574 /*
    575  * scsipi_kill_pending:
    576  *
    577  *	Kill off all pending xfers for a periph.
    578  *
    579  *	NOTE: Must be called at splbio().
    580  */
    581 void
    582 scsipi_kill_pending(periph)
    583 	struct scsipi_periph *periph;
    584 {
    585 
    586 	(*periph->periph_channel->chan_bustype->bustype_kill_pending)(periph);
    587 #ifdef DIAGNOSTIC
    588 	if (TAILQ_FIRST(&periph->periph_xferq) != NULL)
    589 		panic("scsipi_kill_pending");
    590 #endif
    591 }
    592 
    593 /*
    594  * scsipi_interpret_sense:
    595  *
    596  *	Look at the returned sense and act on the error, determining
    597  *	the unix error number to pass back.  (0 = report no error)
    598  *
    599  *	NOTE: If we return ERESTART, we are expected to haved
    600  *	thawed the device!
    601  *
    602  *	THIS IS THE DEFAULT ERROR HANDLER FOR SCSI DEVICES.
    603  */
    604 int
    605 scsipi_interpret_sense(xs)
    606 	struct scsipi_xfer *xs;
    607 {
    608 	struct scsipi_sense_data *sense;
    609 	struct scsipi_periph *periph = xs->xs_periph;
    610 	u_int8_t key;
    611 	u_int32_t info;
    612 	int error;
    613 #ifndef	SCSIVERBOSE
    614 	static char *error_mes[] = {
    615 		"soft error (corrected)",
    616 		"not ready", "medium error",
    617 		"non-media hardware failure", "illegal request",
    618 		"unit attention", "readonly device",
    619 		"no data found", "vendor unique",
    620 		"copy aborted", "command aborted",
    621 		"search returned equal", "volume overflow",
    622 		"verify miscompare", "unknown error key"
    623 	};
    624 #endif
    625 
    626 	sense = &xs->sense.scsi_sense;
    627 #ifdef SCSIPI_DEBUG
    628 	if (periph->periph_flags & SCSIPI_DB1) {
    629 		int count;
    630 		scsipi_printaddr(periph);
    631 		printf(" sense debug information:\n");
    632 		printf("\tcode 0x%x valid 0x%x\n",
    633 			sense->error_code & SSD_ERRCODE,
    634 			sense->error_code & SSD_ERRCODE_VALID ? 1 : 0);
    635 		printf("\tseg 0x%x key 0x%x ili 0x%x eom 0x%x fmark 0x%x\n",
    636 			sense->segment,
    637 			sense->flags & SSD_KEY,
    638 			sense->flags & SSD_ILI ? 1 : 0,
    639 			sense->flags & SSD_EOM ? 1 : 0,
    640 			sense->flags & SSD_FILEMARK ? 1 : 0);
    641 		printf("\ninfo: 0x%x 0x%x 0x%x 0x%x followed by %d "
    642 			"extra bytes\n",
    643 			sense->info[0],
    644 			sense->info[1],
    645 			sense->info[2],
    646 			sense->info[3],
    647 			sense->extra_len);
    648 		printf("\textra: ");
    649 		for (count = 0; count < ADD_BYTES_LIM(sense); count++)
    650 			printf("0x%x ", sense->cmd_spec_info[count]);
    651 		printf("\n");
    652 	}
    653 #endif
    654 
    655 	/*
    656 	 * If the periph has it's own error handler, call it first.
    657 	 * If it returns a legit error value, return that, otherwise
    658 	 * it wants us to continue with normal error processing.
    659 	 */
    660 	if (periph->periph_switch->psw_error != NULL) {
    661 		SC_DEBUG(periph, SCSIPI_DB2,
    662 		    ("calling private err_handler()\n"));
    663 		error = (*periph->periph_switch->psw_error)(xs);
    664 		if (error != EJUSTRETURN)
    665 			return (error);
    666 	}
    667 	/* otherwise use the default */
    668 	switch (sense->error_code & SSD_ERRCODE) {
    669 		/*
    670 		 * If it's code 70, use the extended stuff and
    671 		 * interpret the key
    672 		 */
    673 	case 0x71:		/* delayed error */
    674 		scsipi_printaddr(periph);
    675 		key = sense->flags & SSD_KEY;
    676 		printf(" DEFERRED ERROR, key = 0x%x\n", key);
    677 		/* FALLTHROUGH */
    678 	case 0x70:
    679 		if ((sense->error_code & SSD_ERRCODE_VALID) != 0)
    680 			info = _4btol(sense->info);
    681 		else
    682 			info = 0;
    683 		key = sense->flags & SSD_KEY;
    684 
    685 		switch (key) {
    686 		case SKEY_NO_SENSE:
    687 		case SKEY_RECOVERED_ERROR:
    688 			if (xs->resid == xs->datalen && xs->datalen) {
    689 				/*
    690 				 * Why is this here?
    691 				 */
    692 				xs->resid = 0;	/* not short read */
    693 			}
    694 		case SKEY_EQUAL:
    695 			error = 0;
    696 			break;
    697 		case SKEY_NOT_READY:
    698 			if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
    699 				periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
    700 			if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
    701 				return (0);
    702 			if (sense->add_sense_code == 0x3A &&
    703 			    sense->add_sense_code_qual == 0x00)
    704 				error = ENODEV; /* Medium not present */
    705 			else
    706 				error = EIO;
    707 			if ((xs->xs_control & XS_CTL_SILENT) != 0)
    708 				return (error);
    709 			break;
    710 		case SKEY_ILLEGAL_REQUEST:
    711 			if ((xs->xs_control &
    712 			     XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
    713 				return (0);
    714 			/*
    715 			 * Handle the case where a device reports
    716 			 * Logical Unit Not Supported during discovery.
    717 			 */
    718 			if ((xs->xs_control & XS_CTL_DISCOVERY) != 0 &&
    719 			    sense->add_sense_code == 0x25 &&
    720 			    sense->add_sense_code_qual == 0x00)
    721 				return (EINVAL);
    722 			if ((xs->xs_control & XS_CTL_SILENT) != 0)
    723 				return (EIO);
    724 			error = EINVAL;
    725 			break;
    726 		case SKEY_UNIT_ATTENTION:
    727 			if (sense->add_sense_code == 0x29 &&
    728 			    sense->add_sense_code_qual == 0x00) {
    729 				/* device or bus reset */
    730 				return (ERESTART);
    731 			}
    732 			if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
    733 				periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
    734 			if ((xs->xs_control &
    735 			     XS_CTL_IGNORE_MEDIA_CHANGE) != 0 ||
    736 				/* XXX Should reupload any transient state. */
    737 				(periph->periph_flags &
    738 				 PERIPH_REMOVABLE) == 0) {
    739 				return (ERESTART);
    740 			}
    741 			if ((xs->xs_control & XS_CTL_SILENT) != 0)
    742 				return (EIO);
    743 			error = EIO;
    744 			break;
    745 		case SKEY_WRITE_PROTECT:
    746 			error = EROFS;
    747 			break;
    748 		case SKEY_BLANK_CHECK:
    749 			error = 0;
    750 			break;
    751 		case SKEY_ABORTED_COMMAND:
    752 			error = ERESTART;
    753 			break;
    754 		case SKEY_VOLUME_OVERFLOW:
    755 			error = ENOSPC;
    756 			break;
    757 		default:
    758 			error = EIO;
    759 			break;
    760 		}
    761 
    762 #ifdef SCSIVERBOSE
    763 		if ((xs->xs_control & XS_CTL_SILENT) == 0)
    764 			scsipi_print_sense(xs, 0);
    765 #else
    766 		if (key) {
    767 			scsipi_printaddr(periph);
    768 			printf("%s", error_mes[key - 1]);
    769 			if ((sense->error_code & SSD_ERRCODE_VALID) != 0) {
    770 				switch (key) {
    771 				case SKEY_NOT_READY:
    772 				case SKEY_ILLEGAL_REQUEST:
    773 				case SKEY_UNIT_ATTENTION:
    774 				case SKEY_WRITE_PROTECT:
    775 					break;
    776 				case SKEY_BLANK_CHECK:
    777 					printf(", requested size: %d (decimal)",
    778 					    info);
    779 					break;
    780 				case SKEY_ABORTED_COMMAND:
    781 					if (xs->xs_retries)
    782 						printf(", retrying");
    783 					printf(", cmd 0x%x, info 0x%x",
    784 					    xs->cmd->opcode, info);
    785 					break;
    786 				default:
    787 					printf(", info = %d (decimal)", info);
    788 				}
    789 			}
    790 			if (sense->extra_len != 0) {
    791 				int n;
    792 				printf(", data =");
    793 				for (n = 0; n < sense->extra_len; n++)
    794 					printf(" %02x",
    795 					    sense->cmd_spec_info[n]);
    796 			}
    797 			printf("\n");
    798 		}
    799 #endif
    800 		return (error);
    801 
    802 	/*
    803 	 * Not code 70, just report it
    804 	 */
    805 	default:
    806 		scsipi_printaddr(periph);
    807 		printf("Sense Error Code 0x%x",
    808 			sense->error_code & SSD_ERRCODE);
    809 		if ((sense->error_code & SSD_ERRCODE_VALID) != 0) {
    810 			struct scsipi_sense_data_unextended *usense =
    811 			    (struct scsipi_sense_data_unextended *)sense;
    812 			printf(" at block no. %d (decimal)",
    813 			    _3btol(usense->block));
    814 		}
    815 		printf("\n");
    816 		return (EIO);
    817 	}
    818 }
    819 
    820 /*
    821  * scsipi_size:
    822  *
    823  *	Find out from the device what its capacity is.
    824  */
    825 u_long
    826 scsipi_size(periph, flags)
    827 	struct scsipi_periph *periph;
    828 	int flags;
    829 {
    830 	struct scsipi_read_cap_data rdcap;
    831 	struct scsipi_read_capacity scsipi_cmd;
    832 
    833 	bzero(&scsipi_cmd, sizeof(scsipi_cmd));
    834 	scsipi_cmd.opcode = READ_CAPACITY;
    835 
    836 	/*
    837 	 * If the command works, interpret the result as a 4 byte
    838 	 * number of blocks
    839 	 */
    840 	if (scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
    841 	    sizeof(scsipi_cmd), (u_char *)&rdcap, sizeof(rdcap),
    842 	    2, 20000, NULL, flags | XS_CTL_DATA_IN) != 0) {
    843 		scsipi_printaddr(periph);
    844 		printf("could not get size\n");
    845 		return (0);
    846 	}
    847 
    848 	return (_4btol(rdcap.addr) + 1);
    849 }
    850 
    851 /*
    852  * scsipi_test_unit_ready:
    853  *
    854  *	Issue a `test unit ready' request.
    855  */
    856 int
    857 scsipi_test_unit_ready(periph, flags)
    858 	struct scsipi_periph *periph;
    859 	int flags;
    860 {
    861 	struct scsipi_test_unit_ready scsipi_cmd;
    862 
    863 	/* some ATAPI drives don't support TEST_UNIT_READY. Sigh */
    864 	if (periph->periph_quirks & PQUIRK_NOTUR)
    865 		return (0);
    866 
    867 	bzero(&scsipi_cmd, sizeof(scsipi_cmd));
    868 	scsipi_cmd.opcode = TEST_UNIT_READY;
    869 
    870 	return (scsipi_command(periph,
    871 	    (struct scsipi_generic *)&scsipi_cmd, sizeof(scsipi_cmd),
    872 	    0, 0, 2, 10000, NULL, flags));
    873 }
    874 
    875 /*
    876  * scsipi_inquire:
    877  *
    878  *	Ask the device about itself.
    879  */
    880 int
    881 scsipi_inquire(periph, inqbuf, flags)
    882 	struct scsipi_periph *periph;
    883 	struct scsipi_inquiry_data *inqbuf;
    884 	int flags;
    885 {
    886 	struct scsipi_inquiry scsipi_cmd;
    887 
    888 	bzero(&scsipi_cmd, sizeof(scsipi_cmd));
    889 	scsipi_cmd.opcode = INQUIRY;
    890 	scsipi_cmd.length = sizeof(struct scsipi_inquiry_data);
    891 
    892 	return (scsipi_command(periph,
    893 	    (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
    894 	    (u_char *) inqbuf, sizeof(struct scsipi_inquiry_data),
    895 	    2, 10000, NULL, XS_CTL_DATA_IN | flags));
    896 }
    897 
    898 /*
    899  * scsipi_prevent:
    900  *
    901  *	Prevent or allow the user to remove the media
    902  */
    903 int
    904 scsipi_prevent(periph, type, flags)
    905 	struct scsipi_periph *periph;
    906 	int type, flags;
    907 {
    908 	struct scsipi_prevent scsipi_cmd;
    909 
    910 	if (periph->periph_quirks & PQUIRK_NODOORLOCK)
    911 		return (0);
    912 
    913 	bzero(&scsipi_cmd, sizeof(scsipi_cmd));
    914 	scsipi_cmd.opcode = PREVENT_ALLOW;
    915 	scsipi_cmd.how = type;
    916 
    917 	return (scsipi_command(periph,
    918 	    (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
    919 	    0, 0, 2, 5000, NULL, flags));
    920 }
    921 
    922 /*
    923  * scsipi_start:
    924  *
    925  *	Send a START UNIT.
    926  */
    927 int
    928 scsipi_start(periph, type, flags)
    929 	struct scsipi_periph *periph;
    930 	int type, flags;
    931 {
    932 	struct scsipi_start_stop scsipi_cmd;
    933 
    934 	if (periph->periph_quirks & PQUIRK_NOSTARTUNIT)
    935 		return 0;
    936 
    937 	bzero(&scsipi_cmd, sizeof(scsipi_cmd));
    938 	scsipi_cmd.opcode = START_STOP;
    939 	scsipi_cmd.byte2 = 0x00;
    940 	scsipi_cmd.how = type;
    941 
    942 	return (scsipi_command(periph,
    943 	    (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
    944 	    0, 0, 2, (type & SSS_START) ? 60000 : 10000, NULL, flags));
    945 }
    946 
    947 /*
    948  * scsipi_done:
    949  *
    950  *	This routine is called by an adapter's interrupt handler when
    951  *	an xfer is completed.
    952  */
    953 void
    954 scsipi_done(xs)
    955 	struct scsipi_xfer *xs;
    956 {
    957 	struct scsipi_periph *periph = xs->xs_periph;
    958 	struct scsipi_channel *chan = periph->periph_channel;
    959 	int s, freezecnt;
    960 
    961 	SC_DEBUG(periph, SCSIPI_DB2, ("scsipi_done\n"));
    962 #ifdef SCSIPI_DEBUG
    963 	if (periph->periph_dbflags & SCSIPI_DB1)
    964 		show_scsipi_cmd(xs);
    965 #endif
    966 
    967 	s = splbio();
    968 	/*
    969 	 * The resource this command was using is now free.
    970 	 */
    971 	scsipi_put_resource(chan);
    972 
    973 	/*
    974 	 * If the command was tagged, free the tag.
    975 	 */
    976 	if (XS_CTL_TAGTYPE(xs) != 0)
    977 		scsipi_put_tag(xs);
    978 
    979 	/* Mark the command as `done'. */
    980 	xs->xs_status |= XS_STS_DONE;
    981 
    982 	/*
    983 	 * If it's a user level request, bypass all usual completion
    984 	 * processing, let the user work it out..  We take reponsibility
    985 	 * for freeing the xs (and restarting the device's queue) when
    986 	 * the user returns.
    987 	 */
    988 	if ((xs->xs_control & XS_CTL_USERCMD) != 0) {
    989 		splx(s);
    990 		SC_DEBUG(periph, SCSIPI_DB3, ("calling user done()\n"));
    991 		scsipi_user_done(xs);
    992 		SC_DEBUG(periph, SCSIPI_DB3, ("returned from user done()\n "));
    993 		goto out;
    994 	}
    995 
    996 #ifdef DIAGNOSTIC
    997 	if ((xs->xs_control & (XS_CTL_ASYNC|XS_CTL_POLL)) ==
    998 	    (XS_CTL_ASYNC|XS_CTL_POLL))
    999 		panic("scsipi_done: ASYNC and POLL");
   1000 #endif
   1001 
   1002 	/*
   1003 	 * If the xfer had an error of any sort, freeze the
   1004 	 * periph's queue.  Freeze it again if we were requested
   1005 	 * to do so in the xfer.
   1006 	 */
   1007 	freezecnt = 0;
   1008 	if (xs->error != XS_NOERROR)
   1009 		freezecnt++;
   1010 	if (xs->xs_control & XS_CTL_FREEZE_PERIPH)
   1011 		freezecnt++;
   1012 	if (freezecnt != 0)
   1013 		scsipi_periph_freeze(periph, freezecnt);
   1014 
   1015 	/*
   1016 	 * If this was an xfer that was not to complete asynchrnously,
   1017 	 * let the requesting thread perform error checking/handling
   1018 	 * in its context.
   1019 	 */
   1020 	if ((xs->xs_control & XS_CTL_ASYNC) == 0) {
   1021 		splx(s);
   1022 		/*
   1023 		 * If it's a polling job, just return, to unwind the
   1024 		 * call graph.  We don't need to restart the queue,
   1025 		 * because pollings jobs are treated specially, and
   1026 		 * are really only used during crash dumps anyway
   1027 		 * (XXX or during boot-time autconfiguration of
   1028 		 * ATAPI devices).
   1029 		 */
   1030 		if (xs->xs_control & XS_CTL_POLL)
   1031 			return;
   1032 		wakeup(xs);
   1033 		goto out;
   1034 	}
   1035 
   1036 	/*
   1037 	 * Catch the extremely common case of I/O completing
   1038 	 * without error; no use in taking a context switch
   1039 	 * if we can handle it in interrupt context.
   1040 	 */
   1041 	if (xs->error == XS_NOERROR) {
   1042 		splx(s);
   1043 		(void) scsipi_complete(xs);
   1044 		goto out;
   1045 	}
   1046 
   1047 	/*
   1048 	 * There is an error on this xfer.  Put it on the channel's
   1049 	 * completion queue, and wake up the completion thread.
   1050 	 */
   1051 	TAILQ_INSERT_TAIL(&chan->chan_complete, xs, channel_q);
   1052 	splx(s);
   1053 	wakeup(&chan->chan_complete);
   1054 
   1055  out:
   1056 	/*
   1057 	 * If there are more xfers on the channel's queue, attempt to
   1058 	 * run them.
   1059 	 */
   1060 	scsipi_run_queue(chan);
   1061 }
   1062 
   1063 /*
   1064  * scsipi_complete:
   1065  *
   1066  *	Completion of a scsipi_xfer.  This is the guts of scsipi_done().
   1067  *
   1068  *	NOTE: This routine MUST be called with valid thread context
   1069  *	except for the case where the following two conditions are
   1070  *	true:
   1071  *
   1072  *		xs->error == XS_NOERROR
   1073  *		XS_CTL_ASYNC is set in xs->xs_control
   1074  *
   1075  *	The semantics of this routine can be tricky, so here is an
   1076  *	explanation:
   1077  *
   1078  *		0		Xfer completed successfully.
   1079  *
   1080  *		ERESTART	Xfer had an error, but was restarted.
   1081  *
   1082  *		anything else	Xfer had an error, return value is Unix
   1083  *				errno.
   1084  *
   1085  *	If the return value is anything but ERESTART:
   1086  *
   1087  *		- If XS_CTL_ASYNC is set, `xs' has been freed back to
   1088  *		  the pool.
   1089  *		- If there is a buf associated with the xfer,
   1090  *		  it has been biodone()'d.
   1091  */
   1092 int
   1093 scsipi_complete(xs)
   1094 	struct scsipi_xfer *xs;
   1095 {
   1096 	struct scsipi_periph *periph = xs->xs_periph;
   1097 	struct scsipi_channel *chan = periph->periph_channel;
   1098 	struct buf *bp;
   1099 	int error, s;
   1100 
   1101 #ifdef DIAGNOSTIC
   1102 	if ((xs->xs_control & XS_CTL_ASYNC) != 0 && xs->bp == NULL)
   1103 		panic("scsipi_complete: XS_CTL_ASYNC but no buf");
   1104 #endif
   1105 
   1106 	switch (xs->error) {
   1107 	case XS_NOERROR:
   1108 		error = 0;
   1109 		break;
   1110 
   1111 	case XS_SENSE:
   1112 	case XS_SHORTSENSE:
   1113 		error = (*chan->chan_bustype->bustype_interpret_sense)(xs);
   1114 		break;
   1115 
   1116 	case XS_RESOURCE_SHORTAGE:
   1117 		/*
   1118 		 * XXX Should freeze channel's queue.
   1119 		 */
   1120 		scsipi_printaddr(periph);
   1121 		printf("adapter resource shortage\n");
   1122 		/* FALLTHROUGH */
   1123 
   1124 	case XS_BUSY:
   1125 		if (xs->error == XS_BUSY && xs->status == SCSI_QUEUE_FULL) {
   1126 			struct scsipi_max_openings mo;
   1127 
   1128 			/*
   1129 			 * We set the openings to active - 1, assuming that
   1130 			 * the command that got us here is the first one that
   1131 			 * can't fit into the device's queue.  If that's not
   1132 			 * the case, I guess we'll find out soon enough.
   1133 			 */
   1134 			mo.mo_target = periph->periph_target;
   1135 			mo.mo_lun = periph->periph_lun;
   1136 			mo.mo_openings = periph->periph_active - 1;
   1137 #ifdef DIAGNOSTIC
   1138 			if (mo.mo_openings < 0) {
   1139 				scsipi_printaddr(periph);
   1140 				printf("QUEUE FULL resulted in < 0 openings\n");
   1141 				panic("scsipi_done");
   1142 			}
   1143 #endif
   1144 			if (mo.mo_openings == 0) {
   1145 				scsipi_printaddr(periph);
   1146 				printf("QUEUE FULL resulted in 0 openings\n");
   1147 				mo.mo_openings = 1;
   1148 			}
   1149 			scsipi_async_event(chan, ASYNC_EVENT_MAX_OPENINGS, &mo);
   1150 			error = ERESTART;
   1151 		} else if (xs->xs_retries != 0) {
   1152 			xs->xs_retries--;
   1153 			/*
   1154 			 * Wait one second, and try again.
   1155 			 */
   1156 			if (xs->xs_control & XS_CTL_POLL)
   1157 				delay(1000000);
   1158 			else {
   1159 				scsipi_periph_freeze(periph, 1);
   1160 				timeout(scsipi_periph_timed_thaw, periph, hz);
   1161 			}
   1162 			error = ERESTART;
   1163 		} else
   1164 			error = EBUSY;
   1165 		break;
   1166 
   1167 	case XS_TIMEOUT:
   1168 		if (xs->xs_retries != 0) {
   1169 			xs->xs_retries--;
   1170 			error = ERESTART;
   1171 		} else
   1172 			error = EIO;
   1173 		break;
   1174 
   1175 	case XS_SELTIMEOUT:
   1176 		/* XXX Disable device? */
   1177 		error = EIO;
   1178 		break;
   1179 
   1180 	case XS_RESET:
   1181 		if (xs->xs_retries != 0) {
   1182 			xs->xs_retries--;
   1183 			error = ERESTART;
   1184 		} else
   1185 			error = EIO;
   1186 		break;
   1187 
   1188 	default:
   1189 		scsipi_printaddr(periph);
   1190 		printf("invalid return code from adapter: %d\n", xs->error);
   1191 		error = EIO;
   1192 		break;
   1193 	}
   1194 
   1195 	s = splbio();
   1196 	if (error == ERESTART) {
   1197 		/*
   1198 		 * If we get here, the periph has been thawed and frozen
   1199 		 * again if we had to issue recovery commands.  Alternatively,
   1200 		 * it may have been frozen again and in a timed thaw.  In
   1201 		 * any case, we thaw the periph once we re-enqueue the
   1202 		 * command.  Once the periph is fully thawed, it will begin
   1203 		 * operation again.
   1204 		 */
   1205 		xs->error = XS_NOERROR;
   1206 		xs->status = SCSI_OK;
   1207 		xs->xs_status &= ~XS_STS_DONE;
   1208 		xs->xs_requeuecnt++;
   1209 		error = scsipi_enqueue(xs);
   1210 		if (error == 0) {
   1211 			scsipi_periph_thaw(periph, 1);
   1212 			splx(s);
   1213 			return (ERESTART);
   1214 		}
   1215 	}
   1216 
   1217 	/*
   1218 	 * scsipi_done() freezes the queue if not XS_NOERROR.
   1219 	 * Thaw it here.
   1220 	 */
   1221 	if (xs->error != XS_NOERROR)
   1222 		scsipi_periph_thaw(periph, 1);
   1223 
   1224 	if ((bp = xs->bp) != NULL) {
   1225 		if (error) {
   1226 			bp->b_error = error;
   1227 			bp->b_flags |= B_ERROR;
   1228 			bp->b_resid = bp->b_bcount;
   1229 		} else {
   1230 			bp->b_error = 0;
   1231 			bp->b_resid = xs->resid;
   1232 		}
   1233 		biodone(bp);
   1234 	}
   1235 
   1236 	if (xs->xs_control & XS_CTL_ASYNC)
   1237 		scsipi_put_xs(xs);
   1238 	splx(s);
   1239 
   1240 	return (error);
   1241 }
   1242 
   1243 /*
   1244  * scsipi_enqueue:
   1245  *
   1246  *	Enqueue an xfer on a channel.
   1247  */
   1248 int
   1249 scsipi_enqueue(xs)
   1250 	struct scsipi_xfer *xs;
   1251 {
   1252 	struct scsipi_channel *chan = xs->xs_periph->periph_channel;
   1253 	struct scsipi_xfer *qxs;
   1254 	int s;
   1255 
   1256 	s = splbio();
   1257 
   1258 	/*
   1259 	 * If the xfer is to be polled, and there are already jobs on
   1260 	 * the queue, we can't proceed.
   1261 	 */
   1262 	if ((xs->xs_control & XS_CTL_POLL) != 0 &&
   1263 	    TAILQ_FIRST(&chan->chan_queue) != NULL) {
   1264 		splx(s);
   1265 		xs->error = XS_DRIVER_STUFFUP;
   1266 		return (EAGAIN);
   1267 	}
   1268 
   1269 	/*
   1270 	 * If we have an URGENT xfer, it's an error recovery command
   1271 	 * and it should just go on the head of the channel's queue.
   1272 	 */
   1273 	if (xs->xs_control & XS_CTL_URGENT) {
   1274 		TAILQ_INSERT_HEAD(&chan->chan_queue, xs, channel_q);
   1275 		goto out;
   1276 	}
   1277 
   1278 	/*
   1279 	 * If this xfer has already been on the queue before, we
   1280 	 * need to reinsert it in the correct order.  That order is:
   1281 	 *
   1282 	 *	Immediately before the first xfer for this periph
   1283 	 *	with a requeuecnt less than xs->xs_requeuecnt.
   1284 	 *
   1285 	 * Failing that, at the end of the queue.  (We'll end up
   1286 	 * there naturally.)
   1287 	 */
   1288 	if (xs->xs_requeuecnt != 0) {
   1289 		for (qxs = TAILQ_FIRST(&chan->chan_queue); qxs != NULL;
   1290 		     qxs = TAILQ_NEXT(qxs, channel_q)) {
   1291 			if (qxs->xs_periph == xs->xs_periph &&
   1292 			    qxs->xs_requeuecnt < xs->xs_requeuecnt)
   1293 				break;
   1294 		}
   1295 		if (qxs != NULL) {
   1296 			TAILQ_INSERT_AFTER(&chan->chan_queue, qxs, xs,
   1297 			    channel_q);
   1298 			goto out;
   1299 		}
   1300 	}
   1301 	TAILQ_INSERT_TAIL(&chan->chan_queue, xs, channel_q);
   1302  out:
   1303 	if (xs->xs_control & XS_CTL_THAW_PERIPH)
   1304 		scsipi_periph_thaw(xs->xs_periph, 1);
   1305 	splx(s);
   1306 	return (0);
   1307 }
   1308 
   1309 /*
   1310  * scsipi_run_queue:
   1311  *
   1312  *	Start as many xfers as possible running on the channel.
   1313  */
   1314 void
   1315 scsipi_run_queue(chan)
   1316 	struct scsipi_channel *chan;
   1317 {
   1318 	struct scsipi_xfer *xs;
   1319 	struct scsipi_periph *periph;
   1320 	int s;
   1321 
   1322 	for (;;) {
   1323 		s = splbio();
   1324 
   1325 		/*
   1326 		 * If the channel is frozen, we can't do any work right
   1327 		 * now.
   1328 		 */
   1329 		if (chan->chan_qfreeze != 0) {
   1330 			splx(s);
   1331 			return;
   1332 		}
   1333 
   1334 		/*
   1335 		 * Look for work to do, and make sure we can do it.
   1336 		 */
   1337 		for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL;
   1338 		     xs = TAILQ_NEXT(xs, channel_q)) {
   1339 			periph = xs->xs_periph;
   1340 
   1341 			if ((periph->periph_active > periph->periph_openings) ||			    periph->periph_qfreeze != 0)
   1342 				continue;
   1343 
   1344 			if ((periph->periph_flags & PERIPH_RECOVERING) != 0 &&
   1345 			    (xs->xs_control & XS_CTL_URGENT) == 0)
   1346 				continue;
   1347 
   1348 			/*
   1349 			 * We can issue this xfer!
   1350 			 */
   1351 			goto got_one;
   1352 		}
   1353 
   1354 		/*
   1355 		 * Can't find any work to do right now.
   1356 		 */
   1357 		splx(s);
   1358 		return;
   1359 
   1360  got_one:
   1361 		/*
   1362 		 * Have an xfer to run.  Allocate a resource from
   1363 		 * the adapter to run it.  If we can't allocate that
   1364 		 * resource, we don't dequeue the xfer.
   1365 		 */
   1366 		if (scsipi_get_resource(chan) == 0) {
   1367 			/*
   1368 			 * Adapter is out of resources.  If the adapter
   1369 			 * supports it, attempt to grow them.
   1370 			 */
   1371 			if (scsipi_grow_resources(chan) == 0) {
   1372 				/*
   1373 				 * Wasn't able to grow resources,
   1374 				 * nothing more we can do.
   1375 				 */
   1376 				if (xs->xs_control & XS_CTL_POLL) {
   1377 					scsipi_printaddr(xs->xs_periph);
   1378 					printf("polling command but no "
   1379 					    "adapter resources");
   1380 					/* We'll panic shortly... */
   1381 				}
   1382 				splx(s);
   1383 				return;
   1384 			}
   1385 			/*
   1386 			 * scsipi_grow_resources() allocated the resource
   1387 			 * for us.
   1388 			 */
   1389 		}
   1390 
   1391 		/*
   1392 		 * We have a resource to run this xfer, do it!
   1393 		 */
   1394 		TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
   1395 
   1396 		/*
   1397 		 * If the command is to be tagged, allocate a tag ID
   1398 		 * for it.
   1399 		 */
   1400 		if (XS_CTL_TAGTYPE(xs) != 0)
   1401 			scsipi_get_tag(xs);
   1402 		splx(s);
   1403 
   1404 		scsipi_adapter_request(chan, ADAPTER_REQ_RUN_XFER, xs);
   1405 	}
   1406 #ifdef DIAGNOSTIC
   1407 	panic("scsipi_run_queue: impossible");
   1408 #endif
   1409 }
   1410 
   1411 /*
   1412  * scsipi_execute_xs:
   1413  *
   1414  *	Begin execution of an xfer, waiting for it to complete, if necessary.
   1415  */
   1416 int
   1417 scsipi_execute_xs(xs)
   1418 	struct scsipi_xfer *xs;
   1419 {
   1420 	struct scsipi_periph *periph = xs->xs_periph;
   1421 	struct scsipi_channel *chan = periph->periph_channel;
   1422 	int async, poll, retries, error, s;
   1423 
   1424 	xs->xs_status &= ~XS_STS_DONE;
   1425 	xs->error = XS_NOERROR;
   1426 	xs->resid = xs->datalen;
   1427 	xs->status = SCSI_OK;
   1428 
   1429 #ifdef SCSIPI_DEBUG
   1430 	if (xs->xs_periph->periph_dbflags & SCSIPI_DB3) {
   1431 		printf("scsipi_execute_xs: ");
   1432 		show_scsipi_xs(xs);
   1433 		printf("\n");
   1434 	}
   1435 #endif
   1436 
   1437 	/*
   1438 	 * Deal with command tagging:
   1439 	 *
   1440 	 *	- If the device's current operating mode doesn't
   1441 	 *	  include tagged queueing, clear the tag mask.
   1442 	 *
   1443 	 *	- If the device's current operating mode *does*
   1444 	 *	  include tagged queueing, set the tag_type in
   1445 	 *	  the xfer to the appropriate byte for the tag
   1446 	 *	  message.
   1447 	 */
   1448 	if ((PERIPH_XFER_MODE(periph) & PERIPH_CAP_TQING) == 0) {
   1449 		xs->xs_control &= ~XS_CTL_TAGMASK;
   1450 		xs->xs_tag_type = 0;
   1451 	} else {
   1452 		/*
   1453 		 * If the request doesn't specify a tag, give Head
   1454 		 * tags to URGENT operations and Ordered tags to
   1455 		 * everything else.
   1456 		 */
   1457 		if (XS_CTL_TAGTYPE(xs) == 0) {
   1458 			if (xs->xs_control & XS_CTL_URGENT)
   1459 				xs->xs_control |= XS_CTL_HEAD_TAG;
   1460 			else
   1461 				xs->xs_control |= XS_CTL_ORDERED_TAG;
   1462 		}
   1463 
   1464 		switch (XS_CTL_TAGTYPE(xs)) {
   1465 		case XS_CTL_ORDERED_TAG:
   1466 			xs->xs_tag_type = MSG_ORDERED_Q_TAG;
   1467 			break;
   1468 
   1469 		case XS_CTL_SIMPLE_TAG:
   1470 			xs->xs_tag_type = MSG_SIMPLE_Q_TAG;
   1471 			break;
   1472 
   1473 		case XS_CTL_HEAD_TAG:
   1474 			xs->xs_tag_type = MSG_HEAD_OF_Q_TAG;
   1475 			break;
   1476 
   1477 		default:
   1478 			scsipi_printaddr(periph);
   1479 			printf("invalid tag mask 0x%08x\n",
   1480 			    XS_CTL_TAGTYPE(xs));
   1481 			panic("scsipi_execute_xs");
   1482 		}
   1483 	}
   1484 
   1485 	/*
   1486 	 * If we don't yet have a completion thread, or we are to poll for
   1487 	 * completion, clear the ASYNC flag.
   1488 	 */
   1489 	if (chan->chan_thread == NULL || (xs->xs_control & XS_CTL_POLL) != 0)
   1490 		xs->xs_control &= ~XS_CTL_ASYNC;
   1491 
   1492 	async = (xs->xs_control & XS_CTL_ASYNC);
   1493 	poll = (xs->xs_control & XS_CTL_POLL);
   1494 	retries = xs->xs_retries;		/* for polling commands */
   1495 
   1496 #ifdef DIAGNOSTIC
   1497 	if (async != 0 && xs->bp == NULL)
   1498 		panic("scsipi_execute_xs: XS_CTL_ASYNC but no buf");
   1499 #endif
   1500 
   1501 	/*
   1502 	 * Enqueue the transfer.  If we're not polling for completion, this
   1503 	 * should ALWAYS return `no error'.
   1504 	 */
   1505  try_again:
   1506 	error = scsipi_enqueue(xs);
   1507 	if (error) {
   1508 		if (poll == 0) {
   1509 			scsipi_printaddr(periph);
   1510 			printf("not polling, but enqueue failed with %d\n",
   1511 			    error);
   1512 			panic("scsipi_execute_xs");
   1513 		}
   1514 
   1515 		scsipi_printaddr(periph);
   1516 		printf("failed to enqueue polling command");
   1517 		if (retries != 0) {
   1518 			printf(", retrying...\n");
   1519 			delay(1000000);
   1520 			retries--;
   1521 			goto try_again;
   1522 		}
   1523 		printf("\n");
   1524 		goto free_xs;
   1525 	}
   1526 
   1527  restarted:
   1528 	scsipi_run_queue(chan);
   1529 
   1530 	/*
   1531 	 * The xfer is enqueued, and possibly running.  If it's to be
   1532 	 * completed asynchronously, just return now.
   1533 	 */
   1534 	if (async)
   1535 		return (EJUSTRETURN);
   1536 
   1537 	/*
   1538 	 * Not an asynchronous command; wait for it to complete.
   1539 	 */
   1540 	while ((xs->xs_status & XS_STS_DONE) == 0) {
   1541 		if (poll) {
   1542 			scsipi_printaddr(periph);
   1543 			printf("polling command not done\n");
   1544 			panic("scsipi_execute_xs");
   1545 		}
   1546 		(void) tsleep(xs, PRIBIO, "xscmd", 0);
   1547 	}
   1548 
   1549 	/*
   1550 	 * Command is complete.  scsipi_done() has awakened us to perform
   1551 	 * the error handling.
   1552 	 */
   1553 	error = scsipi_complete(xs);
   1554 	if (error == ERESTART)
   1555 		goto restarted;
   1556 
   1557 	/*
   1558 	 * Command completed successfully or fatal error occurred.  Fall
   1559 	 * into....
   1560 	 */
   1561  free_xs:
   1562 	s = splbio();
   1563 	scsipi_put_xs(xs);
   1564 	splx(s);
   1565 
   1566 	/*
   1567 	 * Kick the queue, keep it running in case it stopped for some
   1568 	 * reason.
   1569 	 */
   1570 	scsipi_run_queue(chan);
   1571 
   1572 	return (error);
   1573 }
   1574 
   1575 /*
   1576  * scsipi_completion_thread:
   1577  *
   1578  *	This is the completion thread.  We wait for errors on
   1579  *	asynchronous xfers, and perform the error handling
   1580  *	function, restarting the command, if necessary.
   1581  */
   1582 void
   1583 scsipi_completion_thread(arg)
   1584 	void *arg;
   1585 {
   1586 	struct scsipi_channel *chan = arg;
   1587 	struct scsipi_xfer *xs;
   1588 	int s;
   1589 
   1590 	while ((chan->chan_flags & SCSIPI_CHAN_SHUTDOWN) == 0) {
   1591 		s = splbio();
   1592 		if ((xs = TAILQ_FIRST(&chan->chan_complete)) == NULL) {
   1593 			splx(s);
   1594 			(void) tsleep(&chan->chan_complete, PRIBIO,
   1595 			    "sccomp", 0);
   1596 			continue;
   1597 		}
   1598 		TAILQ_REMOVE(&chan->chan_complete, xs, channel_q);
   1599 		splx(s);
   1600 
   1601 		/*
   1602 		 * Have an xfer with an error; process it.
   1603 		 */
   1604 		(void) scsipi_complete(xs);
   1605 
   1606 		/*
   1607 		 * Kick the queue; keep it running if it was stopped
   1608 		 * for some reason.
   1609 		 */
   1610 		scsipi_run_queue(chan);
   1611 	}
   1612 
   1613 	chan->chan_thread = NULL;
   1614 
   1615 	/* In case parent is waiting for us to exit. */
   1616 	wakeup(&chan->chan_thread);
   1617 
   1618 	kthread_exit(0);
   1619 }
   1620 
   1621 /*
   1622  * scsipi_create_completion_thread:
   1623  *
   1624  *	Callback to actually create the completion thread.
   1625  */
   1626 void
   1627 scsipi_create_completion_thread(arg)
   1628 	void *arg;
   1629 {
   1630 	struct scsipi_channel *chan = arg;
   1631 	struct scsipi_adapter *adapt = chan->chan_adapter;
   1632 
   1633 	if (kthread_create1(scsipi_completion_thread, chan,
   1634 	    &chan->chan_thread, "%s:%d", adapt->adapt_dev->dv_xname,
   1635 	    chan->chan_channel)) {
   1636 		printf("%s: unable to create completion thread for "
   1637 		    "channel %d\n", adapt->adapt_dev->dv_xname,
   1638 		    chan->chan_channel);
   1639 		panic("scsipi_create_completion_thread");
   1640 	}
   1641 }
   1642 
   1643 /*
   1644  * scsipi_async_event:
   1645  *
   1646  *	Handle an asynchronous event from an adapter.
   1647  */
   1648 void
   1649 scsipi_async_event(chan, event, arg)
   1650 	struct scsipi_channel *chan;
   1651 	scsipi_async_event_t event;
   1652 	void *arg;
   1653 {
   1654 	int s;
   1655 
   1656 	s = splbio();
   1657 	switch (event) {
   1658 	case ASYNC_EVENT_MAX_OPENINGS:
   1659 		scsipi_async_event_max_openings(chan,
   1660 		    (struct scsipi_max_openings *)arg);
   1661 		break;
   1662 
   1663 	case ASYNC_EVENT_XFER_MODE:
   1664 		scsipi_async_event_xfer_mode(chan,
   1665 		    (struct scsipi_xfer_mode *)arg);
   1666 		break;
   1667 	}
   1668 	splx(s);
   1669 }
   1670 
   1671 /*
   1672  * scsipi_print_xfer_mode:
   1673  *
   1674  *	Print a periph's capabilities.
   1675  */
   1676 void
   1677 scsipi_print_xfer_mode(periph)
   1678 	struct scsipi_periph *periph;
   1679 {
   1680 	int period, freq, speed, mbs;
   1681 
   1682 	if ((periph->periph_flags & PERIPH_MODE_VALID) == 0)
   1683 		return;
   1684 
   1685 	printf("%s: ", periph->periph_dev->dv_xname);
   1686 	if (periph->periph_mode & PERIPH_CAP_SYNC) {
   1687 		period = scsipi_sync_factor_to_period(periph->periph_period);
   1688 		printf("Sync (%d.%dns offset %d)",
   1689 		    period / 10, period % 10, periph->periph_offset);
   1690 	} else
   1691 		printf("Async");
   1692 
   1693 	if (periph->periph_mode & PERIPH_CAP_WIDE32)
   1694 		printf(", 32-bit");
   1695 	else if (periph->periph_mode & PERIPH_CAP_WIDE16)
   1696 		printf(", 16-bit");
   1697 	else
   1698 		printf(", 8-bit");
   1699 
   1700 	if (periph->periph_mode & PERIPH_CAP_SYNC) {
   1701 		freq = scsipi_sync_factor_to_freq(periph->periph_period);
   1702 		speed = freq;
   1703 		if (periph->periph_mode & PERIPH_CAP_WIDE32)
   1704 			speed *= 4;
   1705 		else if (periph->periph_mode & PERIPH_CAP_WIDE16)
   1706 			speed *= 2;
   1707 		mbs = speed / 1000;
   1708 		if (mbs > 0)
   1709 			printf(" (%d.%03dMB/s)", mbs, speed % 1000);
   1710 		else
   1711 			printf(" (%dKB/s)", speed % 1000);
   1712 	}
   1713 
   1714 	printf(" transfers");
   1715 
   1716 	if (periph->periph_mode & PERIPH_CAP_TQING)
   1717 		printf(", tagged queueing");
   1718 
   1719 	printf("\n");
   1720 }
   1721 
   1722 /*
   1723  * scsipi_async_event_max_openings:
   1724  *
   1725  *	Update the maximum number of outstanding commands a
   1726  *	device may have.
   1727  */
   1728 void
   1729 scsipi_async_event_max_openings(chan, mo)
   1730 	struct scsipi_channel *chan;
   1731 	struct scsipi_max_openings *mo;
   1732 {
   1733 	struct scsipi_periph *periph;
   1734 	int minlun, maxlun;
   1735 
   1736 	if (mo->mo_lun == -1) {
   1737 		/*
   1738 		 * Wildcarded; apply it to all LUNs.
   1739 		 */
   1740 		minlun = 0;
   1741 		maxlun = chan->chan_nluns - 1;
   1742 	} else
   1743 		minlun = maxlun = mo->mo_lun;
   1744 
   1745 	for (; minlun <= maxlun; minlun++) {
   1746 		periph = scsipi_lookup_periph(chan, mo->mo_target, minlun);
   1747 		if (periph == NULL)
   1748 			continue;
   1749 
   1750 		if (mo->mo_openings < periph->periph_openings)
   1751 			periph->periph_openings = mo->mo_openings;
   1752 		else if (mo->mo_openings > periph->periph_openings &&
   1753 		    (periph->periph_flags & PERIPH_GROW_OPENINGS) != 0)
   1754 			periph->periph_openings = mo->mo_openings;
   1755 	}
   1756 }
   1757 
   1758 /*
   1759  * scsipi_async_event_xfer_mode:
   1760  *
   1761  *	Update the xfer mode for all periphs sharing the
   1762  *	specified I_T Nexus.
   1763  */
   1764 void
   1765 scsipi_async_event_xfer_mode(chan, xm)
   1766 	struct scsipi_channel *chan;
   1767 	struct scsipi_xfer_mode *xm;
   1768 {
   1769 	struct scsipi_periph *periph;
   1770 	int lun, announce, mode, period, offset;
   1771 
   1772 	for (lun = 0; lun < chan->chan_nluns; lun++) {
   1773 		periph = scsipi_lookup_periph(chan, xm->xm_target, lun);
   1774 		if (periph == NULL)
   1775 			continue;
   1776 		announce = 0;
   1777 
   1778 		/*
   1779 		 * Clamp the xfer mode down to this periph's capabilities.
   1780 		 */
   1781 		mode = xm->xm_mode & periph->periph_cap;
   1782 		if (mode & PERIPH_CAP_SYNC) {
   1783 			period = xm->xm_period;
   1784 			offset = xm->xm_offset;
   1785 		} else {
   1786 			period = 0;
   1787 			offset = 0;
   1788 		}
   1789 
   1790 		/*
   1791 		 * If we do not have a valid xfer mode yet, or the parameters
   1792 		 * are different, announce them.
   1793 		 */
   1794 		if ((periph->periph_flags & PERIPH_MODE_VALID) == 0 ||
   1795 		    periph->periph_mode != mode ||
   1796 		    periph->periph_period != period ||
   1797 		    periph->periph_offset != offset)
   1798 			announce = 1;
   1799 
   1800 		periph->periph_mode = mode;
   1801 		periph->periph_period = period;
   1802 		periph->periph_offset = offset;
   1803 		periph->periph_flags |= PERIPH_MODE_VALID;
   1804 
   1805 		if (announce)
   1806 			scsipi_print_xfer_mode(periph);
   1807 	}
   1808 }
   1809 
   1810 /*
   1811  * scsipi_set_xfer_mode:
   1812  *
   1813  *	Set the xfer mode for the specified I_T Nexus.
   1814  */
   1815 void
   1816 scsipi_set_xfer_mode(chan, target, immed)
   1817 	struct scsipi_channel *chan;
   1818 	int target, immed;
   1819 {
   1820 	struct scsipi_xfer_mode xm;
   1821 	struct scsipi_periph *itperiph;
   1822 	int lun, s;
   1823 
   1824 	/*
   1825 	 * Go to the minimal xfer mode.
   1826 	 */
   1827 	xm.xm_target = target;
   1828 	xm.xm_mode = 0;
   1829 	xm.xm_period = 0;			/* ignored */
   1830 	xm.xm_offset = 0;			/* ignored */
   1831 
   1832 	/*
   1833 	 * Find the first LUN we know about on this I_T Nexus.
   1834 	 */
   1835 	for (lun = 0; lun < chan->chan_nluns; lun++) {
   1836 		itperiph = scsipi_lookup_periph(chan, target, lun);
   1837 		if (itperiph != NULL)
   1838 			break;
   1839 	}
   1840 	if (itperiph != NULL)
   1841 		xm.xm_mode = itperiph->periph_cap;
   1842 
   1843 	/*
   1844 	 * Now issue the request to the adapter.
   1845 	 */
   1846 	s = splbio();
   1847 	scsipi_adapter_request(chan, ADAPTER_REQ_SET_XFER_MODE, &xm);
   1848 	splx(s);
   1849 
   1850 	/*
   1851 	 * If we want this to happen immediately, issue a dummy command,
   1852 	 * since most adapters can't really negotiate unless they're
   1853 	 * executing a job.
   1854 	 */
   1855 	if (immed != 0 && itperiph != NULL) {
   1856 		(void) scsipi_test_unit_ready(itperiph,
   1857 		    XS_CTL_DISCOVERY | XS_CTL_IGNORE_ILLEGAL_REQUEST |
   1858 		    XS_CTL_IGNORE_NOT_READY |
   1859 		    XS_CTL_IGNORE_MEDIA_CHANGE);
   1860 	}
   1861 }
   1862 
   1863 /*
   1864  * scsipi_adapter_addref:
   1865  *
   1866  *	Add a reference to the adapter pointed to by the provided
   1867  *	link, enabling the adapter if necessary.
   1868  */
   1869 int
   1870 scsipi_adapter_addref(adapt)
   1871 	struct scsipi_adapter *adapt;
   1872 {
   1873 	int s, error = 0;
   1874 
   1875 	s = splbio();
   1876 	if (adapt->adapt_refcnt++ == 0 && adapt->adapt_enable != NULL) {
   1877 		error = (*adapt->adapt_enable)(adapt->adapt_dev, 1);
   1878 		if (error)
   1879 			adapt->adapt_refcnt--;
   1880 	}
   1881 	splx(s);
   1882 	return (error);
   1883 }
   1884 
   1885 /*
   1886  * scsipi_adapter_delref:
   1887  *
   1888  *	Delete a reference to the adapter pointed to by the provided
   1889  *	link, disabling the adapter if possible.
   1890  */
   1891 void
   1892 scsipi_adapter_delref(adapt)
   1893 	struct scsipi_adapter *adapt;
   1894 {
   1895 	int s;
   1896 
   1897 	s = splbio();
   1898 	if (adapt->adapt_refcnt-- == 1 && adapt->adapt_enable != NULL)
   1899 		(void) (*adapt->adapt_enable)(adapt->adapt_dev, 0);
   1900 	splx(s);
   1901 }
   1902 
   1903 struct scsipi_syncparam {
   1904 	int	ss_factor;
   1905 	int	ss_period;	/* ns * 10 */
   1906 } scsipi_syncparams[] = {
   1907 	{ 0x0a,		250 },
   1908 	{ 0x0b,		303 },
   1909 	{ 0x0c,		500 },
   1910 };
   1911 const int scsipi_nsyncparams =
   1912     sizeof(scsipi_syncparams) / sizeof(scsipi_syncparams[0]);
   1913 
   1914 int
   1915 scsipi_sync_period_to_factor(period)
   1916 	int period;		/* ns * 10 */
   1917 {
   1918 	int i;
   1919 
   1920 	for (i = 0; i < scsipi_nsyncparams; i++) {
   1921 		if (period <= scsipi_syncparams[i].ss_period)
   1922 			return (scsipi_syncparams[i].ss_factor);
   1923 	}
   1924 
   1925 	return ((period / 10) / 4);
   1926 }
   1927 
   1928 int
   1929 scsipi_sync_factor_to_period(factor)
   1930 	int factor;
   1931 {
   1932 	int i;
   1933 
   1934 	for (i = 0; i < scsipi_nsyncparams; i++) {
   1935 		if (factor == scsipi_syncparams[i].ss_factor)
   1936 			return (scsipi_syncparams[i].ss_period);
   1937 	}
   1938 
   1939 	return ((factor * 4) * 10);
   1940 }
   1941 
   1942 int
   1943 scsipi_sync_factor_to_freq(factor)
   1944 	int factor;
   1945 {
   1946 	int i;
   1947 
   1948 	for (i = 0; i < scsipi_nsyncparams; i++) {
   1949 		if (factor == scsipi_syncparams[i].ss_factor)
   1950 			return (10000000 / scsipi_syncparams[i].ss_period);
   1951 	}
   1952 
   1953 	return (10000000 / ((factor * 4) * 10));
   1954 }
   1955 
   1956 #ifdef SCSIPI_DEBUG
   1957 /*
   1958  * Given a scsipi_xfer, dump the request, in all it's glory
   1959  */
   1960 void
   1961 show_scsipi_xs(xs)
   1962 	struct scsipi_xfer *xs;
   1963 {
   1964 
   1965 	printf("xs(%p): ", xs);
   1966 	printf("xs_control(0x%08x)", xs->xs_control);
   1967 	printf("xs_status(0x%08x)", xs->xs_status);
   1968 	printf("periph(%p)", xs->xs_periph);
   1969 	printf("retr(0x%x)", xs->xs_retries);
   1970 	printf("timo(0x%x)", xs->timeout);
   1971 	printf("cmd(%p)", xs->cmd);
   1972 	printf("len(0x%x)", xs->cmdlen);
   1973 	printf("data(%p)", xs->data);
   1974 	printf("len(0x%x)", xs->datalen);
   1975 	printf("res(0x%x)", xs->resid);
   1976 	printf("err(0x%x)", xs->error);
   1977 	printf("bp(%p)", xs->bp);
   1978 	show_scsipi_cmd(xs);
   1979 }
   1980 
   1981 void
   1982 show_scsipi_cmd(xs)
   1983 	struct scsipi_xfer *xs;
   1984 {
   1985 	u_char *b = (u_char *) xs->cmd;
   1986 	int i = 0;
   1987 
   1988 	scsipi_printaddr(xs->xs_periph);
   1989 	printf(" command: ");
   1990 
   1991 	if ((xs->xs_control & XS_CTL_RESET) == 0) {
   1992 		while (i < xs->cmdlen) {
   1993 			if (i)
   1994 				printf(",");
   1995 			printf("0x%x", b[i++]);
   1996 		}
   1997 		printf("-[%d bytes]\n", xs->datalen);
   1998 		if (xs->datalen)
   1999 			show_mem(xs->data, min(64, xs->datalen));
   2000 	} else
   2001 		printf("-RESET-\n");
   2002 }
   2003 
   2004 void
   2005 show_mem(address, num)
   2006 	u_char *address;
   2007 	int num;
   2008 {
   2009 	int x;
   2010 
   2011 	printf("------------------------------");
   2012 	for (x = 0; x < num; x++) {
   2013 		if ((x % 16) == 0)
   2014 			printf("\n%03d: ", x);
   2015 		printf("%02x ", *address++);
   2016 	}
   2017 	printf("\n------------------------------\n");
   2018 }
   2019 #endif /* SCSIPI_DEBUG */
   2020