Home | History | Annotate | Line # | Download | only in scsipi
scsipi_base.c revision 1.55.2.1
      1 /*	$NetBSD: scsipi_base.c,v 1.55.2.1 2001/10/01 12:46:21 fvdl Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1998, 1999, 2000 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Charles M. Hannum; by Jason R. Thorpe of the Numerical Aerospace
      9  * Simulation Facility, NASA Ames Research Center.
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  * 3. All advertising materials mentioning features or use of this software
     20  *    must display the following acknowledgement:
     21  *        This product includes software developed by the NetBSD
     22  *        Foundation, Inc. and its contributors.
     23  * 4. Neither the name of The NetBSD Foundation nor the names of its
     24  *    contributors may be used to endorse or promote products derived
     25  *    from this software without specific prior written permission.
     26  *
     27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     37  * POSSIBILITY OF SUCH DAMAGE.
     38  */
     39 
     40 #include "opt_scsi.h"
     41 
     42 #include <sys/types.h>
     43 #include <sys/param.h>
     44 #include <sys/systm.h>
     45 #include <sys/kernel.h>
     46 #include <sys/buf.h>
     47 #include <sys/uio.h>
     48 #include <sys/malloc.h>
     49 #include <sys/pool.h>
     50 #include <sys/errno.h>
     51 #include <sys/device.h>
     52 #include <sys/proc.h>
     53 #include <sys/kthread.h>
     54 
     55 #include <dev/scsipi/scsipi_all.h>
     56 #include <dev/scsipi/scsipi_disk.h>
     57 #include <dev/scsipi/scsipiconf.h>
     58 #include <dev/scsipi/scsipi_base.h>
     59 
     60 #include <dev/scsipi/scsi_all.h>
     61 #include <dev/scsipi/scsi_message.h>
     62 
     63 int	scsipi_complete __P((struct scsipi_xfer *));
     64 void	scsipi_request_sense __P((struct scsipi_xfer *));
     65 int	scsipi_enqueue __P((struct scsipi_xfer *));
     66 void	scsipi_run_queue __P((struct scsipi_channel *chan));
     67 
     68 void	scsipi_completion_thread __P((void *));
     69 
     70 void	scsipi_get_tag __P((struct scsipi_xfer *));
     71 void	scsipi_put_tag __P((struct scsipi_xfer *));
     72 
     73 int	scsipi_get_resource __P((struct scsipi_channel *));
     74 void	scsipi_put_resource __P((struct scsipi_channel *));
     75 __inline int scsipi_grow_resources __P((struct scsipi_channel *));
     76 
     77 void	scsipi_async_event_max_openings __P((struct scsipi_channel *,
     78 	    struct scsipi_max_openings *));
     79 void	scsipi_async_event_xfer_mode __P((struct scsipi_channel *,
     80 	    struct scsipi_xfer_mode *));
     81 void	scsipi_async_event_channel_reset __P((struct scsipi_channel *));
     82 
     83 struct pool scsipi_xfer_pool;
     84 
     85 /*
     86  * scsipi_init:
     87  *
     88  *	Called when a scsibus or atapibus is attached to the system
     89  *	to initialize shared data structures.
     90  */
     91 void
     92 scsipi_init()
     93 {
     94 	static int scsipi_init_done;
     95 
     96 	if (scsipi_init_done)
     97 		return;
     98 	scsipi_init_done = 1;
     99 
    100 	/* Initialize the scsipi_xfer pool. */
    101 	pool_init(&scsipi_xfer_pool, sizeof(struct scsipi_xfer), 0,
    102 	    0, 0, "scxspl", 0, NULL, NULL, M_DEVBUF);
    103 }
    104 
    105 /*
    106  * scsipi_channel_init:
    107  *
    108  *	Initialize a scsipi_channel when it is attached.
    109  */
    110 int
    111 scsipi_channel_init(chan)
    112 	struct scsipi_channel *chan;
    113 {
    114 	size_t nbytes;
    115 	int i;
    116 
    117 	/* Initialize shared data. */
    118 	scsipi_init();
    119 
    120 	/* Initialize the queues. */
    121 	TAILQ_INIT(&chan->chan_queue);
    122 	TAILQ_INIT(&chan->chan_complete);
    123 
    124 	nbytes = chan->chan_ntargets * sizeof(struct scsipi_periph **);
    125 	chan->chan_periphs = malloc(nbytes, M_DEVBUF, M_NOWAIT);
    126 	if (chan->chan_periphs == NULL)
    127 		return (ENOMEM);
    128 
    129 
    130 	nbytes = chan->chan_nluns * sizeof(struct scsipi_periph *);
    131 	for (i = 0; i < chan->chan_ntargets; i++) {
    132 		chan->chan_periphs[i] = malloc(nbytes, M_DEVBUF, M_NOWAIT);
    133 		if (chan->chan_periphs[i] == NULL) {
    134 			while (--i >= 0) {
    135 				free(chan->chan_periphs[i], M_DEVBUF);
    136 			}
    137 			return (ENOMEM);
    138 		}
    139 		memset(chan->chan_periphs[i], 0, nbytes);
    140 	}
    141 
    142 	/*
    143 	 * Create the asynchronous completion thread.
    144 	 */
    145 	kthread_create(scsipi_create_completion_thread, chan);
    146 	return (0);
    147 }
    148 
    149 /*
    150  * scsipi_channel_shutdown:
    151  *
    152  *	Shutdown a scsipi_channel.
    153  */
    154 void
    155 scsipi_channel_shutdown(chan)
    156 	struct scsipi_channel *chan;
    157 {
    158 
    159 	/*
    160 	 * Shut down the completion thread.
    161 	 */
    162 	chan->chan_flags |= SCSIPI_CHAN_SHUTDOWN;
    163 	wakeup(&chan->chan_complete);
    164 
    165 	/*
    166 	 * Now wait for the thread to exit.
    167 	 */
    168 	while (chan->chan_thread != NULL)
    169 		(void) tsleep(&chan->chan_thread, PRIBIO, "scshut", 0);
    170 }
    171 
    172 /*
    173  * scsipi_insert_periph:
    174  *
    175  *	Insert a periph into the channel.
    176  */
    177 void
    178 scsipi_insert_periph(chan, periph)
    179 	struct scsipi_channel *chan;
    180 	struct scsipi_periph *periph;
    181 {
    182 	int s;
    183 
    184 	s = splbio();
    185 	chan->chan_periphs[periph->periph_target][periph->periph_lun] = periph;
    186 	splx(s);
    187 }
    188 
    189 /*
    190  * scsipi_remove_periph:
    191  *
    192  *	Remove a periph from the channel.
    193  */
    194 void
    195 scsipi_remove_periph(chan, periph)
    196 	struct scsipi_channel *chan;
    197 	struct scsipi_periph *periph;
    198 {
    199 	int s;
    200 
    201 	s = splbio();
    202 	chan->chan_periphs[periph->periph_target][periph->periph_lun] = NULL;
    203 	splx(s);
    204 }
    205 
    206 /*
    207  * scsipi_lookup_periph:
    208  *
    209  *	Lookup a periph on the specified channel.
    210  */
    211 struct scsipi_periph *
    212 scsipi_lookup_periph(chan, target, lun)
    213 	struct scsipi_channel *chan;
    214 	int target, lun;
    215 {
    216 	struct scsipi_periph *periph;
    217 	int s;
    218 
    219 	if (target >= chan->chan_ntargets ||
    220 	    lun >= chan->chan_nluns)
    221 		return (NULL);
    222 
    223 	s = splbio();
    224 	periph = chan->chan_periphs[target][lun];
    225 	splx(s);
    226 
    227 	return (periph);
    228 }
    229 
    230 /*
    231  * scsipi_get_resource:
    232  *
    233  *	Allocate a single xfer `resource' from the channel.
    234  *
    235  *	NOTE: Must be called at splbio().
    236  */
    237 int
    238 scsipi_get_resource(chan)
    239 	struct scsipi_channel *chan;
    240 {
    241 	struct scsipi_adapter *adapt = chan->chan_adapter;
    242 
    243 	if (chan->chan_flags & SCSIPI_CHAN_OPENINGS) {
    244 		if (chan->chan_openings > 0) {
    245 			chan->chan_openings--;
    246 			return (1);
    247 		}
    248 		return (0);
    249 	}
    250 
    251 	if (adapt->adapt_openings > 0) {
    252 		adapt->adapt_openings--;
    253 		return (1);
    254 	}
    255 	return (0);
    256 }
    257 
    258 /*
    259  * scsipi_grow_resources:
    260  *
    261  *	Attempt to grow resources for a channel.  If this succeeds,
    262  *	we allocate one for our caller.
    263  *
    264  *	NOTE: Must be called at splbio().
    265  */
    266 __inline int
    267 scsipi_grow_resources(chan)
    268 	struct scsipi_channel *chan;
    269 {
    270 
    271 	if (chan->chan_flags & SCSIPI_CHAN_CANGROW) {
    272 		scsipi_adapter_request(chan, ADAPTER_REQ_GROW_RESOURCES, NULL);
    273 		return (scsipi_get_resource(chan));
    274 	}
    275 
    276 	return (0);
    277 }
    278 
    279 /*
    280  * scsipi_put_resource:
    281  *
    282  *	Free a single xfer `resource' to the channel.
    283  *
    284  *	NOTE: Must be called at splbio().
    285  */
    286 void
    287 scsipi_put_resource(chan)
    288 	struct scsipi_channel *chan;
    289 {
    290 	struct scsipi_adapter *adapt = chan->chan_adapter;
    291 
    292 	if (chan->chan_flags & SCSIPI_CHAN_OPENINGS)
    293 		chan->chan_openings++;
    294 	else
    295 		adapt->adapt_openings++;
    296 }
    297 
    298 /*
    299  * scsipi_get_tag:
    300  *
    301  *	Get a tag ID for the specified xfer.
    302  *
    303  *	NOTE: Must be called at splbio().
    304  */
    305 void
    306 scsipi_get_tag(xs)
    307 	struct scsipi_xfer *xs;
    308 {
    309 	struct scsipi_periph *periph = xs->xs_periph;
    310 	int word, bit, tag;
    311 
    312 	for (word = 0; word < PERIPH_NTAGWORDS; word++) {
    313 		bit = ffs(periph->periph_freetags[word]);
    314 		if (bit != 0)
    315 			break;
    316 	}
    317 #ifdef DIAGNOSTIC
    318 	if (word == PERIPH_NTAGWORDS) {
    319 		scsipi_printaddr(periph);
    320 		printf("no free tags\n");
    321 		panic("scsipi_get_tag");
    322 	}
    323 #endif
    324 
    325 	bit -= 1;
    326 	periph->periph_freetags[word] &= ~(1 << bit);
    327 	tag = (word << 5) | bit;
    328 
    329 	/* XXX Should eventually disallow this completely. */
    330 	if (tag >= periph->periph_openings) {
    331 		scsipi_printaddr(periph);
    332 		printf("WARNING: tag %d greater than available openings %d\n",
    333 		    tag, periph->periph_openings);
    334 	}
    335 
    336 	xs->xs_tag_id = tag;
    337 }
    338 
    339 /*
    340  * scsipi_put_tag:
    341  *
    342  *	Put the tag ID for the specified xfer back into the pool.
    343  *
    344  *	NOTE: Must be called at splbio().
    345  */
    346 void
    347 scsipi_put_tag(xs)
    348 	struct scsipi_xfer *xs;
    349 {
    350 	struct scsipi_periph *periph = xs->xs_periph;
    351 	int word, bit;
    352 
    353 	word = xs->xs_tag_id >> 5;
    354 	bit = xs->xs_tag_id & 0x1f;
    355 
    356 	periph->periph_freetags[word] |= (1 << bit);
    357 }
    358 
    359 /*
    360  * scsipi_get_xs:
    361  *
    362  *	Allocate an xfer descriptor and associate it with the
    363  *	specified peripherial.  If the peripherial has no more
    364  *	available command openings, we either block waiting for
    365  *	one to become available, or fail.
    366  */
    367 struct scsipi_xfer *
    368 scsipi_get_xs(periph, flags)
    369 	struct scsipi_periph *periph;
    370 	int flags;
    371 {
    372 	struct scsipi_xfer *xs;
    373 	int s;
    374 
    375 	SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_get_xs\n"));
    376 
    377 	/*
    378 	 * If we're cold, make sure we poll.
    379 	 */
    380 	if (cold)
    381 		flags |= XS_CTL_NOSLEEP | XS_CTL_POLL;
    382 
    383 #ifdef DIAGNOSTIC
    384 	/*
    385 	 * URGENT commands can never be ASYNC.
    386 	 */
    387 	if ((flags & (XS_CTL_URGENT|XS_CTL_ASYNC)) ==
    388 	    (XS_CTL_URGENT|XS_CTL_ASYNC)) {
    389 		scsipi_printaddr(periph);
    390 		printf("URGENT and ASYNC\n");
    391 		panic("scsipi_get_xs");
    392 	}
    393 #endif
    394 
    395 	s = splbio();
    396 	/*
    397 	 * Wait for a command opening to become available.  Rules:
    398 	 *
    399 	 *	- All xfers must wait for an available opening.
    400 	 *	  Exception: URGENT xfers can proceed when
    401 	 *	  active == openings, because we use the opening
    402 	 *	  of the command we're recovering for.
    403 	 *	- if the periph has sense pending, only URGENT & REQSENSE
    404 	 *	  xfers may proceed.
    405 	 *
    406 	 *	- If the periph is recovering, only URGENT xfers may
    407 	 *	  proceed.
    408 	 *
    409 	 *	- If the periph is currently executing a recovery
    410 	 *	  command, URGENT commands must block, because only
    411 	 *	  one recovery command can execute at a time.
    412 	 */
    413 	for (;;) {
    414 		if (flags & XS_CTL_URGENT) {
    415 			if (periph->periph_active > periph->periph_openings)
    416 				goto wait_for_opening;
    417 			if (periph->periph_flags & PERIPH_SENSE) {
    418 				if ((flags & XS_CTL_REQSENSE) == 0)
    419 					goto wait_for_opening;
    420 			} else {
    421 				if ((periph->periph_flags &
    422 				    PERIPH_RECOVERY_ACTIVE) != 0)
    423 					goto wait_for_opening;
    424 				periph->periph_flags |= PERIPH_RECOVERY_ACTIVE;
    425 			}
    426 			break;
    427 		}
    428 		if (periph->periph_active >= periph->periph_openings ||
    429 		    (periph->periph_flags & PERIPH_RECOVERING) != 0)
    430 			goto wait_for_opening;
    431 		periph->periph_active++;
    432 		break;
    433 
    434  wait_for_opening:
    435 		if (flags & XS_CTL_NOSLEEP) {
    436 			splx(s);
    437 			return (NULL);
    438 		}
    439 		SC_DEBUG(periph, SCSIPI_DB3, ("sleeping\n"));
    440 		periph->periph_flags |= PERIPH_WAITING;
    441 		(void) tsleep(periph, PRIBIO, "getxs", 0);
    442 	}
    443 	SC_DEBUG(periph, SCSIPI_DB3, ("calling pool_get\n"));
    444 	xs = pool_get(&scsipi_xfer_pool,
    445 	    ((flags & XS_CTL_NOSLEEP) != 0 ? PR_NOWAIT : PR_WAITOK));
    446 	if (xs == NULL) {
    447 		if (flags & XS_CTL_URGENT) {
    448 			if ((flags & XS_CTL_REQSENSE) == 0)
    449 				periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
    450 		} else
    451 			periph->periph_active--;
    452 		scsipi_printaddr(periph);
    453 		printf("unable to allocate %sscsipi_xfer\n",
    454 		    (flags & XS_CTL_URGENT) ? "URGENT " : "");
    455 	}
    456 	splx(s);
    457 
    458 	SC_DEBUG(periph, SCSIPI_DB3, ("returning\n"));
    459 
    460 	if (xs != NULL) {
    461 		callout_init(&xs->xs_callout);
    462 		memset(xs, 0, sizeof(*xs));
    463 		xs->xs_periph = periph;
    464 		xs->xs_control = flags;
    465 		xs->xs_status = 0;
    466 		s = splbio();
    467 		TAILQ_INSERT_TAIL(&periph->periph_xferq, xs, device_q);
    468 		splx(s);
    469 	}
    470 	return (xs);
    471 }
    472 
    473 /*
    474  * scsipi_put_xs:
    475  *
    476  *	Release an xfer descriptor, decreasing the outstanding command
    477  *	count for the peripherial.  If there is a thread waiting for
    478  *	an opening, wake it up.  If not, kick any queued I/O the
    479  *	peripherial may have.
    480  *
    481  *	NOTE: Must be called at splbio().
    482  */
    483 void
    484 scsipi_put_xs(xs)
    485 	struct scsipi_xfer *xs;
    486 {
    487 	struct scsipi_periph *periph = xs->xs_periph;
    488 	int flags = xs->xs_control;
    489 
    490 	SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_free_xs\n"));
    491 
    492 	TAILQ_REMOVE(&periph->periph_xferq, xs, device_q);
    493 	pool_put(&scsipi_xfer_pool, xs);
    494 
    495 #ifdef DIAGNOSTIC
    496 	if ((periph->periph_flags & PERIPH_RECOVERY_ACTIVE) != 0 &&
    497 	    periph->periph_active == 0) {
    498 		scsipi_printaddr(periph);
    499 		printf("recovery without a command to recovery for\n");
    500 		panic("scsipi_put_xs");
    501 	}
    502 #endif
    503 
    504 	if (flags & XS_CTL_URGENT) {
    505 		if ((flags & XS_CTL_REQSENSE) == 0)
    506 			periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
    507 	} else
    508 		periph->periph_active--;
    509 	if (periph->periph_active == 0 &&
    510 	    (periph->periph_flags & PERIPH_WAITDRAIN) != 0) {
    511 		periph->periph_flags &= ~PERIPH_WAITDRAIN;
    512 		wakeup(&periph->periph_active);
    513 	}
    514 
    515 	if (periph->periph_flags & PERIPH_WAITING) {
    516 		periph->periph_flags &= ~PERIPH_WAITING;
    517 		wakeup(periph);
    518 	} else {
    519 		if (periph->periph_switch->psw_start != NULL) {
    520 			SC_DEBUG(periph, SCSIPI_DB2,
    521 			    ("calling private start()\n"));
    522 			(*periph->periph_switch->psw_start)(periph);
    523 		}
    524 	}
    525 }
    526 
    527 /*
    528  * scsipi_channel_freeze:
    529  *
    530  *	Freeze a channel's xfer queue.
    531  */
    532 void
    533 scsipi_channel_freeze(chan, count)
    534 	struct scsipi_channel *chan;
    535 	int count;
    536 {
    537 	int s;
    538 
    539 	s = splbio();
    540 	chan->chan_qfreeze += count;
    541 	splx(s);
    542 }
    543 
    544 /*
    545  * scsipi_channel_thaw:
    546  *
    547  *	Thaw a channel's xfer queue.
    548  */
    549 void
    550 scsipi_channel_thaw(chan, count)
    551 	struct scsipi_channel *chan;
    552 	int count;
    553 {
    554 	int s;
    555 
    556 	s = splbio();
    557 	chan->chan_qfreeze -= count;
    558 	/*
    559 	 * Don't let the freeze count go negative.
    560 	 *
    561 	 * Presumably the adapter driver could keep track of this,
    562 	 * but it might just be easier to do this here so as to allow
    563 	 * multiple callers, including those outside the adapter driver.
    564 	 */
    565 	if (chan->chan_qfreeze < 0) {
    566 		chan->chan_qfreeze = 0;
    567 	}
    568 	splx(s);
    569 	/*
    570 	 * Kick the channel's queue here.  Note, we may be running in
    571 	 * interrupt context (softclock or HBA's interrupt), so the adapter
    572 	 * driver had better not sleep.
    573 	 */
    574 	if (chan->chan_qfreeze == 0)
    575 		scsipi_run_queue(chan);
    576 }
    577 
    578 /*
    579  * scsipi_channel_timed_thaw:
    580  *
    581  *	Thaw a channel after some time has expired. This will also
    582  * 	run the channel's queue if the freeze count has reached 0.
    583  */
    584 void
    585 scsipi_channel_timed_thaw(arg)
    586 	void *arg;
    587 {
    588 	struct scsipi_channel *chan = arg;
    589 
    590 	scsipi_channel_thaw(chan, 1);
    591 }
    592 
    593 /*
    594  * scsipi_periph_freeze:
    595  *
    596  *	Freeze a device's xfer queue.
    597  */
    598 void
    599 scsipi_periph_freeze(periph, count)
    600 	struct scsipi_periph *periph;
    601 	int count;
    602 {
    603 	int s;
    604 
    605 	s = splbio();
    606 	periph->periph_qfreeze += count;
    607 	splx(s);
    608 }
    609 
    610 /*
    611  * scsipi_periph_thaw:
    612  *
    613  *	Thaw a device's xfer queue.
    614  */
    615 void
    616 scsipi_periph_thaw(periph, count)
    617 	struct scsipi_periph *periph;
    618 	int count;
    619 {
    620 	int s;
    621 
    622 	s = splbio();
    623 	periph->periph_qfreeze -= count;
    624 #ifdef DIAGNOSTIC
    625 	if (periph->periph_qfreeze < 0) {
    626 		static const char pc[] = "periph freeze count < 0";
    627 		scsipi_printaddr(periph);
    628 		printf("%s\n", pc);
    629 		panic(pc);
    630 	}
    631 #endif
    632 	if (periph->periph_qfreeze == 0 &&
    633 	    (periph->periph_flags & PERIPH_WAITING) != 0)
    634 		wakeup(periph);
    635 	splx(s);
    636 }
    637 
    638 /*
    639  * scsipi_periph_timed_thaw:
    640  *
    641  *	Thaw a device after some time has expired.
    642  */
    643 void
    644 scsipi_periph_timed_thaw(arg)
    645 	void *arg;
    646 {
    647 	int s;
    648 	struct scsipi_periph *periph = arg;
    649 
    650 	callout_stop(&periph->periph_callout);
    651 
    652 	s = splbio();
    653 	scsipi_periph_thaw(periph, 1);
    654 	if ((periph->periph_channel->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
    655 		/*
    656 		 * Kick the channel's queue here.  Note, we're running in
    657 		 * interrupt context (softclock), so the adapter driver
    658 		 * had better not sleep.
    659 		 */
    660 		scsipi_run_queue(periph->periph_channel);
    661 	} else {
    662 		/*
    663 		 * Tell the completion thread to kick the channel's queue here.
    664 		 */
    665 		periph->periph_channel->chan_flags |= SCSIPI_CHAN_KICK;
    666 		wakeup(&periph->periph_channel->chan_complete);
    667 	}
    668 	splx(s);
    669 }
    670 
    671 /*
    672  * scsipi_wait_drain:
    673  *
    674  *	Wait for a periph's pending xfers to drain.
    675  */
    676 void
    677 scsipi_wait_drain(periph)
    678 	struct scsipi_periph *periph;
    679 {
    680 	int s;
    681 
    682 	s = splbio();
    683 	while (periph->periph_active != 0) {
    684 		periph->periph_flags |= PERIPH_WAITDRAIN;
    685 		(void) tsleep(&periph->periph_active, PRIBIO, "sxdrn", 0);
    686 	}
    687 	splx(s);
    688 }
    689 
    690 /*
    691  * scsipi_kill_pending:
    692  *
    693  *	Kill off all pending xfers for a periph.
    694  *
    695  *	NOTE: Must be called at splbio().
    696  */
    697 void
    698 scsipi_kill_pending(periph)
    699 	struct scsipi_periph *periph;
    700 {
    701 
    702 	(*periph->periph_channel->chan_bustype->bustype_kill_pending)(periph);
    703 #ifdef DIAGNOSTIC
    704 	if (TAILQ_FIRST(&periph->periph_xferq) != NULL)
    705 		panic("scsipi_kill_pending");
    706 #endif
    707 	scsipi_wait_drain(periph);
    708 }
    709 
    710 /*
    711  * scsipi_interpret_sense:
    712  *
    713  *	Look at the returned sense and act on the error, determining
    714  *	the unix error number to pass back.  (0 = report no error)
    715  *
    716  *	NOTE: If we return ERESTART, we are expected to haved
    717  *	thawed the device!
    718  *
    719  *	THIS IS THE DEFAULT ERROR HANDLER FOR SCSI DEVICES.
    720  */
    721 int
    722 scsipi_interpret_sense(xs)
    723 	struct scsipi_xfer *xs;
    724 {
    725 	struct scsipi_sense_data *sense;
    726 	struct scsipi_periph *periph = xs->xs_periph;
    727 	u_int8_t key;
    728 	u_int32_t info;
    729 	int error;
    730 #ifndef	SCSIVERBOSE
    731 	static char *error_mes[] = {
    732 		"soft error (corrected)",
    733 		"not ready", "medium error",
    734 		"non-media hardware failure", "illegal request",
    735 		"unit attention", "readonly device",
    736 		"no data found", "vendor unique",
    737 		"copy aborted", "command aborted",
    738 		"search returned equal", "volume overflow",
    739 		"verify miscompare", "unknown error key"
    740 	};
    741 #endif
    742 
    743 	sense = &xs->sense.scsi_sense;
    744 #ifdef SCSIPI_DEBUG
    745 	if (periph->periph_flags & SCSIPI_DB1) {
    746 		int count;
    747 		scsipi_printaddr(periph);
    748 		printf(" sense debug information:\n");
    749 		printf("\tcode 0x%x valid 0x%x\n",
    750 			sense->error_code & SSD_ERRCODE,
    751 			sense->error_code & SSD_ERRCODE_VALID ? 1 : 0);
    752 		printf("\tseg 0x%x key 0x%x ili 0x%x eom 0x%x fmark 0x%x\n",
    753 			sense->segment,
    754 			sense->flags & SSD_KEY,
    755 			sense->flags & SSD_ILI ? 1 : 0,
    756 			sense->flags & SSD_EOM ? 1 : 0,
    757 			sense->flags & SSD_FILEMARK ? 1 : 0);
    758 		printf("\ninfo: 0x%x 0x%x 0x%x 0x%x followed by %d "
    759 			"extra bytes\n",
    760 			sense->info[0],
    761 			sense->info[1],
    762 			sense->info[2],
    763 			sense->info[3],
    764 			sense->extra_len);
    765 		printf("\textra: ");
    766 		for (count = 0; count < ADD_BYTES_LIM(sense); count++)
    767 			printf("0x%x ", sense->cmd_spec_info[count]);
    768 		printf("\n");
    769 	}
    770 #endif
    771 
    772 	/*
    773 	 * If the periph has it's own error handler, call it first.
    774 	 * If it returns a legit error value, return that, otherwise
    775 	 * it wants us to continue with normal error processing.
    776 	 */
    777 	if (periph->periph_switch->psw_error != NULL) {
    778 		SC_DEBUG(periph, SCSIPI_DB2,
    779 		    ("calling private err_handler()\n"));
    780 		error = (*periph->periph_switch->psw_error)(xs);
    781 		if (error != EJUSTRETURN)
    782 			return (error);
    783 	}
    784 	/* otherwise use the default */
    785 	switch (sense->error_code & SSD_ERRCODE) {
    786 		/*
    787 		 * If it's code 70, use the extended stuff and
    788 		 * interpret the key
    789 		 */
    790 	case 0x71:		/* delayed error */
    791 		scsipi_printaddr(periph);
    792 		key = sense->flags & SSD_KEY;
    793 		printf(" DEFERRED ERROR, key = 0x%x\n", key);
    794 		/* FALLTHROUGH */
    795 	case 0x70:
    796 		if ((sense->error_code & SSD_ERRCODE_VALID) != 0)
    797 			info = _4btol(sense->info);
    798 		else
    799 			info = 0;
    800 		key = sense->flags & SSD_KEY;
    801 
    802 		switch (key) {
    803 		case SKEY_NO_SENSE:
    804 		case SKEY_RECOVERED_ERROR:
    805 			if (xs->resid == xs->datalen && xs->datalen) {
    806 				/*
    807 				 * Why is this here?
    808 				 */
    809 				xs->resid = 0;	/* not short read */
    810 			}
    811 		case SKEY_EQUAL:
    812 			error = 0;
    813 			break;
    814 		case SKEY_NOT_READY:
    815 			if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
    816 				periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
    817 			if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
    818 				return (0);
    819 			if (sense->add_sense_code == 0x3A) {
    820 				error = ENODEV; /* Medium not present */
    821 				if (xs->xs_control & XS_CTL_SILENT_NODEV)
    822 					return (error);
    823 			} else
    824 				error = EIO;
    825 			if ((xs->xs_control & XS_CTL_SILENT) != 0)
    826 				return (error);
    827 			break;
    828 		case SKEY_ILLEGAL_REQUEST:
    829 			if ((xs->xs_control &
    830 			     XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
    831 				return (0);
    832 			/*
    833 			 * Handle the case where a device reports
    834 			 * Logical Unit Not Supported during discovery.
    835 			 */
    836 			if ((xs->xs_control & XS_CTL_DISCOVERY) != 0 &&
    837 			    sense->add_sense_code == 0x25 &&
    838 			    sense->add_sense_code_qual == 0x00)
    839 				return (EINVAL);
    840 			if ((xs->xs_control & XS_CTL_SILENT) != 0)
    841 				return (EIO);
    842 			error = EINVAL;
    843 			break;
    844 		case SKEY_UNIT_ATTENTION:
    845 			if (sense->add_sense_code == 0x29 &&
    846 			    sense->add_sense_code_qual == 0x00) {
    847 				/* device or bus reset */
    848 				return (ERESTART);
    849 			}
    850 			if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
    851 				periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
    852 			if ((xs->xs_control &
    853 			     XS_CTL_IGNORE_MEDIA_CHANGE) != 0 ||
    854 				/* XXX Should reupload any transient state. */
    855 				(periph->periph_flags &
    856 				 PERIPH_REMOVABLE) == 0) {
    857 				return (ERESTART);
    858 			}
    859 			if ((xs->xs_control & XS_CTL_SILENT) != 0)
    860 				return (EIO);
    861 			error = EIO;
    862 			break;
    863 		case SKEY_WRITE_PROTECT:
    864 			error = EROFS;
    865 			break;
    866 		case SKEY_BLANK_CHECK:
    867 			error = 0;
    868 			break;
    869 		case SKEY_ABORTED_COMMAND:
    870 			error = ERESTART;
    871 			break;
    872 		case SKEY_VOLUME_OVERFLOW:
    873 			error = ENOSPC;
    874 			break;
    875 		default:
    876 			error = EIO;
    877 			break;
    878 		}
    879 
    880 #ifdef SCSIVERBOSE
    881 		if (key && (xs->xs_control & XS_CTL_SILENT) == 0)
    882 			scsipi_print_sense(xs, 0);
    883 #else
    884 		if (key) {
    885 			scsipi_printaddr(periph);
    886 			printf("%s", error_mes[key - 1]);
    887 			if ((sense->error_code & SSD_ERRCODE_VALID) != 0) {
    888 				switch (key) {
    889 				case SKEY_NOT_READY:
    890 				case SKEY_ILLEGAL_REQUEST:
    891 				case SKEY_UNIT_ATTENTION:
    892 				case SKEY_WRITE_PROTECT:
    893 					break;
    894 				case SKEY_BLANK_CHECK:
    895 					printf(", requested size: %d (decimal)",
    896 					    info);
    897 					break;
    898 				case SKEY_ABORTED_COMMAND:
    899 					if (xs->xs_retries)
    900 						printf(", retrying");
    901 					printf(", cmd 0x%x, info 0x%x",
    902 					    xs->cmd->opcode, info);
    903 					break;
    904 				default:
    905 					printf(", info = %d (decimal)", info);
    906 				}
    907 			}
    908 			if (sense->extra_len != 0) {
    909 				int n;
    910 				printf(", data =");
    911 				for (n = 0; n < sense->extra_len; n++)
    912 					printf(" %02x",
    913 					    sense->cmd_spec_info[n]);
    914 			}
    915 			printf("\n");
    916 		}
    917 #endif
    918 		return (error);
    919 
    920 	/*
    921 	 * Not code 70, just report it
    922 	 */
    923 	default:
    924 #if    defined(SCSIDEBUG) || defined(DEBUG)
    925 	{
    926 		static char *uc = "undecodable sense error";
    927 		int i;
    928 		u_int8_t *cptr = (u_int8_t *) sense;
    929 		scsipi_printaddr(periph);
    930 		if (xs->cmd == &xs->cmdstore) {
    931 			printf("%s for opcode 0x%x, data=",
    932 			    uc, xs->cmdstore.opcode);
    933 		} else {
    934 			printf("%s, data=", uc);
    935 		}
    936 		for (i = 0; i < sizeof (sense); i++)
    937 			printf(" 0x%02x", *(cptr++) & 0xff);
    938 		printf("\n");
    939 	}
    940 #else
    941 		scsipi_printaddr(periph);
    942 		printf("Sense Error Code 0x%x",
    943 			sense->error_code & SSD_ERRCODE);
    944 		if ((sense->error_code & SSD_ERRCODE_VALID) != 0) {
    945 			struct scsipi_sense_data_unextended *usense =
    946 			    (struct scsipi_sense_data_unextended *)sense;
    947 			printf(" at block no. %d (decimal)",
    948 			    _3btol(usense->block));
    949 		}
    950 		printf("\n");
    951 #endif
    952 		return (EIO);
    953 	}
    954 }
    955 
    956 /*
    957  * scsipi_size:
    958  *
    959  *	Find out from the device what its capacity is.
    960  */
    961 u_long
    962 scsipi_size(periph, flags)
    963 	struct scsipi_periph *periph;
    964 	int flags;
    965 {
    966 	struct scsipi_read_cap_data rdcap;
    967 	struct scsipi_read_capacity scsipi_cmd;
    968 
    969 	memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
    970 	scsipi_cmd.opcode = READ_CAPACITY;
    971 
    972 	/*
    973 	 * If the command works, interpret the result as a 4 byte
    974 	 * number of blocks
    975 	 */
    976 	if (scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
    977 	    sizeof(scsipi_cmd), (u_char *)&rdcap, sizeof(rdcap),
    978 	    SCSIPIRETRIES, 20000, NULL,
    979 	    flags | XS_CTL_DATA_IN | XS_CTL_DATA_ONSTACK) != 0) {
    980 		scsipi_printaddr(periph);
    981 		printf("could not get size\n");
    982 		return (0);
    983 	}
    984 
    985 	return (_4btol(rdcap.addr) + 1);
    986 }
    987 
    988 /*
    989  * scsipi_test_unit_ready:
    990  *
    991  *	Issue a `test unit ready' request.
    992  */
    993 int
    994 scsipi_test_unit_ready(periph, flags)
    995 	struct scsipi_periph *periph;
    996 	int flags;
    997 {
    998 	struct scsipi_test_unit_ready scsipi_cmd;
    999 
   1000 	/* some ATAPI drives don't support TEST_UNIT_READY. Sigh */
   1001 	if (periph->periph_quirks & PQUIRK_NOTUR)
   1002 		return (0);
   1003 
   1004 	memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
   1005 	scsipi_cmd.opcode = TEST_UNIT_READY;
   1006 
   1007 	return (scsipi_command(periph,
   1008 	    (struct scsipi_generic *)&scsipi_cmd, sizeof(scsipi_cmd),
   1009 	    0, 0, SCSIPIRETRIES, 10000, NULL, flags));
   1010 }
   1011 
   1012 /*
   1013  * scsipi_inquire:
   1014  *
   1015  *	Ask the device about itself.
   1016  */
   1017 int
   1018 scsipi_inquire(periph, inqbuf, flags)
   1019 	struct scsipi_periph *periph;
   1020 	struct scsipi_inquiry_data *inqbuf;
   1021 	int flags;
   1022 {
   1023 	struct scsipi_inquiry scsipi_cmd;
   1024 
   1025 	memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
   1026 	scsipi_cmd.opcode = INQUIRY;
   1027 	scsipi_cmd.length = sizeof(struct scsipi_inquiry_data);
   1028 
   1029 	return (scsipi_command(periph,
   1030 	    (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
   1031 	    (u_char *) inqbuf, sizeof(struct scsipi_inquiry_data),
   1032 	    SCSIPIRETRIES, 10000, NULL, XS_CTL_DATA_IN | flags));
   1033 }
   1034 
   1035 /*
   1036  * scsipi_prevent:
   1037  *
   1038  *	Prevent or allow the user to remove the media
   1039  */
   1040 int
   1041 scsipi_prevent(periph, type, flags)
   1042 	struct scsipi_periph *periph;
   1043 	int type, flags;
   1044 {
   1045 	struct scsipi_prevent scsipi_cmd;
   1046 
   1047 	if (periph->periph_quirks & PQUIRK_NODOORLOCK)
   1048 		return (0);
   1049 
   1050 	memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
   1051 	scsipi_cmd.opcode = PREVENT_ALLOW;
   1052 	scsipi_cmd.how = type;
   1053 
   1054 	return (scsipi_command(periph,
   1055 	    (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
   1056 	    0, 0, SCSIPIRETRIES, 5000, NULL, flags));
   1057 }
   1058 
   1059 /*
   1060  * scsipi_start:
   1061  *
   1062  *	Send a START UNIT.
   1063  */
   1064 int
   1065 scsipi_start(periph, type, flags)
   1066 	struct scsipi_periph *periph;
   1067 	int type, flags;
   1068 {
   1069 	struct scsipi_start_stop scsipi_cmd;
   1070 
   1071 	if (periph->periph_quirks & PQUIRK_NOSTARTUNIT)
   1072 		return 0;
   1073 
   1074 	memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
   1075 	scsipi_cmd.opcode = START_STOP;
   1076 	scsipi_cmd.byte2 = 0x00;
   1077 	scsipi_cmd.how = type;
   1078 
   1079 	return (scsipi_command(periph,
   1080 	    (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
   1081 	    0, 0, SCSIPIRETRIES, (type & SSS_START) ? 60000 : 10000,
   1082 	    NULL, flags));
   1083 }
   1084 
   1085 /*
   1086  * scsipi_mode_sense, scsipi_mode_sense_big:
   1087  *	get a sense page from a device
   1088  */
   1089 
   1090 int
   1091 scsipi_mode_sense(periph, byte2, page, data, len, flags, retries, timeout)
   1092 	struct scsipi_periph *periph;
   1093 	int byte2, page, len, flags, retries, timeout;
   1094 	struct scsipi_mode_header *data;
   1095 {
   1096 	struct scsipi_mode_sense scsipi_cmd;
   1097 	int error;
   1098 
   1099 	memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
   1100 	scsipi_cmd.opcode = MODE_SENSE;
   1101 	scsipi_cmd.byte2 = byte2;
   1102 	scsipi_cmd.page = page;
   1103 	if (scsipi_periph_bustype(periph) == SCSIPI_BUSTYPE_ATAPI)
   1104 		_lto2b(len, scsipi_cmd.u_len.atapi.length);
   1105 	else
   1106 		scsipi_cmd.u_len.scsi.length = len & 0xff;
   1107 	error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
   1108 	    sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL,
   1109 	    flags | XS_CTL_DATA_IN);
   1110 	SC_DEBUG(periph, SCSIPI_DB2,
   1111 	    ("scsipi_mode_sense: error=%d\n", error));
   1112 	return (error);
   1113 }
   1114 
   1115 int
   1116 scsipi_mode_sense_big(periph, byte2, page, data, len, flags, retries, timeout)
   1117 	struct scsipi_periph *periph;
   1118 	int byte2, page, len, flags, retries, timeout;
   1119 	struct scsipi_mode_header_big *data;
   1120 {
   1121 	struct scsipi_mode_sense_big scsipi_cmd;
   1122 	int error;
   1123 
   1124 	memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
   1125 	scsipi_cmd.opcode = MODE_SENSE_BIG;
   1126 	scsipi_cmd.byte2 = byte2;
   1127 	scsipi_cmd.page = page;
   1128 	_lto2b(len, scsipi_cmd.length);
   1129 	error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
   1130 	    sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL,
   1131 	    flags | XS_CTL_DATA_IN);
   1132 	SC_DEBUG(periph, SCSIPI_DB2,
   1133 	    ("scsipi_mode_sense_big: error=%d\n", error));
   1134 	return (error);
   1135 }
   1136 
   1137 int
   1138 scsipi_mode_select(periph, byte2, data, len, flags, retries, timeout)
   1139 	struct scsipi_periph *periph;
   1140 	int byte2, len, flags, retries, timeout;
   1141 	struct scsipi_mode_header *data;
   1142 {
   1143 	struct scsipi_mode_select scsipi_cmd;
   1144 	int error;
   1145 
   1146 	memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
   1147 	scsipi_cmd.opcode = MODE_SELECT;
   1148 	scsipi_cmd.byte2 = byte2;
   1149 	if (scsipi_periph_bustype(periph) == SCSIPI_BUSTYPE_ATAPI)
   1150 		_lto2b(len, scsipi_cmd.u_len.atapi.length);
   1151 	else
   1152 		scsipi_cmd.u_len.scsi.length = len & 0xff;
   1153 	error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
   1154 	    sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL,
   1155 	    flags | XS_CTL_DATA_OUT);
   1156 	SC_DEBUG(periph, SCSIPI_DB2,
   1157 	    ("scsipi_mode_select: error=%d\n", error));
   1158 	return (error);
   1159 }
   1160 
   1161 int
   1162 scsipi_mode_select_big(periph, byte2, data, len, flags, retries, timeout)
   1163 	struct scsipi_periph *periph;
   1164 	int byte2, len, flags, retries, timeout;
   1165 	struct scsipi_mode_header_big *data;
   1166 {
   1167 	struct scsipi_mode_select_big scsipi_cmd;
   1168 	int error;
   1169 
   1170 	memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
   1171 	scsipi_cmd.opcode = MODE_SELECT_BIG;
   1172 	scsipi_cmd.byte2 = byte2;
   1173 	_lto2b(len, scsipi_cmd.length);
   1174 	error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
   1175 	    sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL,
   1176 	    flags | XS_CTL_DATA_OUT);
   1177 	SC_DEBUG(periph, SCSIPI_DB2,
   1178 	    ("scsipi_mode_select: error=%d\n", error));
   1179 	return (error);
   1180 }
   1181 
   1182 /*
   1183  * scsipi_done:
   1184  *
   1185  *	This routine is called by an adapter's interrupt handler when
   1186  *	an xfer is completed.
   1187  */
   1188 void
   1189 scsipi_done(xs)
   1190 	struct scsipi_xfer *xs;
   1191 {
   1192 	struct scsipi_periph *periph = xs->xs_periph;
   1193 	struct scsipi_channel *chan = periph->periph_channel;
   1194 	int s, freezecnt;
   1195 
   1196 	SC_DEBUG(periph, SCSIPI_DB2, ("scsipi_done\n"));
   1197 #ifdef SCSIPI_DEBUG
   1198 	if (periph->periph_dbflags & SCSIPI_DB1)
   1199 		show_scsipi_cmd(xs);
   1200 #endif
   1201 
   1202 	s = splbio();
   1203 	/*
   1204 	 * The resource this command was using is now free.
   1205 	 */
   1206 	scsipi_put_resource(chan);
   1207 	xs->xs_periph->periph_sent--;
   1208 
   1209 	/*
   1210 	 * If the command was tagged, free the tag.
   1211 	 */
   1212 	if (XS_CTL_TAGTYPE(xs) != 0)
   1213 		scsipi_put_tag(xs);
   1214 	else
   1215 		periph->periph_flags &= ~PERIPH_UNTAG;
   1216 
   1217 	/* Mark the command as `done'. */
   1218 	xs->xs_status |= XS_STS_DONE;
   1219 
   1220 #ifdef DIAGNOSTIC
   1221 	if ((xs->xs_control & (XS_CTL_ASYNC|XS_CTL_POLL)) ==
   1222 	    (XS_CTL_ASYNC|XS_CTL_POLL))
   1223 		panic("scsipi_done: ASYNC and POLL");
   1224 #endif
   1225 
   1226 	/*
   1227 	 * If the xfer had an error of any sort, freeze the
   1228 	 * periph's queue.  Freeze it again if we were requested
   1229 	 * to do so in the xfer.
   1230 	 */
   1231 	freezecnt = 0;
   1232 	if (xs->error != XS_NOERROR)
   1233 		freezecnt++;
   1234 	if (xs->xs_control & XS_CTL_FREEZE_PERIPH)
   1235 		freezecnt++;
   1236 	if (freezecnt != 0)
   1237 		scsipi_periph_freeze(periph, freezecnt);
   1238 
   1239 	/*
   1240 	 * record the xfer with a pending sense, in case a SCSI reset is
   1241 	 * received before the thread is waked up.
   1242 	 */
   1243 	if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
   1244 		periph->periph_flags |= PERIPH_SENSE;
   1245 		periph->periph_xscheck = xs;
   1246 	}
   1247 
   1248 	/*
   1249 	 * If this was an xfer that was not to complete asynchronously,
   1250 	 * let the requesting thread perform error checking/handling
   1251 	 * in its context.
   1252 	 */
   1253 	if ((xs->xs_control & XS_CTL_ASYNC) == 0) {
   1254 		splx(s);
   1255 		/*
   1256 		 * If it's a polling job, just return, to unwind the
   1257 		 * call graph.  We don't need to restart the queue,
   1258 		 * because pollings jobs are treated specially, and
   1259 		 * are really only used during crash dumps anyway
   1260 		 * (XXX or during boot-time autconfiguration of
   1261 		 * ATAPI devices).
   1262 		 */
   1263 		if (xs->xs_control & XS_CTL_POLL)
   1264 			return;
   1265 		wakeup(xs);
   1266 		goto out;
   1267 	}
   1268 
   1269 	/*
   1270 	 * Catch the extremely common case of I/O completing
   1271 	 * without error; no use in taking a context switch
   1272 	 * if we can handle it in interrupt context.
   1273 	 */
   1274 	if (xs->error == XS_NOERROR) {
   1275 		splx(s);
   1276 		(void) scsipi_complete(xs);
   1277 		goto out;
   1278 	}
   1279 
   1280 	/*
   1281 	 * There is an error on this xfer.  Put it on the channel's
   1282 	 * completion queue, and wake up the completion thread.
   1283 	 */
   1284 	TAILQ_INSERT_TAIL(&chan->chan_complete, xs, channel_q);
   1285 	splx(s);
   1286 	wakeup(&chan->chan_complete);
   1287 
   1288  out:
   1289 	/*
   1290 	 * If there are more xfers on the channel's queue, attempt to
   1291 	 * run them.
   1292 	 */
   1293 	scsipi_run_queue(chan);
   1294 }
   1295 
   1296 /*
   1297  * scsipi_complete:
   1298  *
   1299  *	Completion of a scsipi_xfer.  This is the guts of scsipi_done().
   1300  *
   1301  *	NOTE: This routine MUST be called with valid thread context
   1302  *	except for the case where the following two conditions are
   1303  *	true:
   1304  *
   1305  *		xs->error == XS_NOERROR
   1306  *		XS_CTL_ASYNC is set in xs->xs_control
   1307  *
   1308  *	The semantics of this routine can be tricky, so here is an
   1309  *	explanation:
   1310  *
   1311  *		0		Xfer completed successfully.
   1312  *
   1313  *		ERESTART	Xfer had an error, but was restarted.
   1314  *
   1315  *		anything else	Xfer had an error, return value is Unix
   1316  *				errno.
   1317  *
   1318  *	If the return value is anything but ERESTART:
   1319  *
   1320  *		- If XS_CTL_ASYNC is set, `xs' has been freed back to
   1321  *		  the pool.
   1322  *		- If there is a buf associated with the xfer,
   1323  *		  it has been biodone()'d.
   1324  */
   1325 int
   1326 scsipi_complete(xs)
   1327 	struct scsipi_xfer *xs;
   1328 {
   1329 	struct scsipi_periph *periph = xs->xs_periph;
   1330 	struct scsipi_channel *chan = periph->periph_channel;
   1331 	struct buf *bp;
   1332 	int error, s;
   1333 
   1334 #ifdef DIAGNOSTIC
   1335 	if ((xs->xs_control & XS_CTL_ASYNC) != 0 && xs->bp == NULL)
   1336 		panic("scsipi_complete: XS_CTL_ASYNC but no buf");
   1337 #endif
   1338 	/*
   1339 	 * If command terminated with a CHECK CONDITION, we need to issue a
   1340 	 * REQUEST_SENSE command. Once the REQUEST_SENSE has been processed
   1341 	 * we'll have the real status.
   1342 	 * Must be processed at splbio() to avoid missing a SCSI bus reset
   1343 	 * for this command.
   1344 	 */
   1345 	s = splbio();
   1346 	if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
   1347 		/* request sense for a request sense ? */
   1348 		if (xs->xs_control & XS_CTL_REQSENSE) {
   1349 			scsipi_printaddr(periph);
   1350 			printf("request sense for a request sense ?\n");
   1351 			/* XXX maybe we should reset the device ? */
   1352 			/* we've been frozen because xs->error != XS_NOERROR */
   1353 			scsipi_periph_thaw(periph, 1);
   1354 			splx(s);
   1355 			if (xs->resid < xs->datalen) {
   1356 				printf("we read %d bytes of sense anyway:\n",
   1357 				    xs->datalen - xs->resid);
   1358 #ifdef SCSIVERBOSE
   1359 				scsipi_print_sense_data((void *)xs->data, 0);
   1360 #endif
   1361 			}
   1362 			return EINVAL;
   1363 		}
   1364 		scsipi_request_sense(xs);
   1365 	}
   1366 	splx(s);
   1367 
   1368 	/*
   1369 	 * If it's a user level request, bypass all usual completion
   1370 	 * processing, let the user work it out..
   1371 	 */
   1372 	if ((xs->xs_control & XS_CTL_USERCMD) != 0) {
   1373 		SC_DEBUG(periph, SCSIPI_DB3, ("calling user done()\n"));
   1374 		if (xs->error != XS_NOERROR)
   1375 			scsipi_periph_thaw(periph, 1);
   1376 		scsipi_user_done(xs);
   1377 		SC_DEBUG(periph, SCSIPI_DB3, ("returned from user done()\n "));
   1378 		return 0;
   1379 	}
   1380 
   1381 	switch (xs->error) {
   1382 	case XS_NOERROR:
   1383 		error = 0;
   1384 		break;
   1385 
   1386 	case XS_SENSE:
   1387 	case XS_SHORTSENSE:
   1388 		error = (*chan->chan_bustype->bustype_interpret_sense)(xs);
   1389 		break;
   1390 
   1391 	case XS_RESOURCE_SHORTAGE:
   1392 		/*
   1393 		 * XXX Should freeze channel's queue.
   1394 		 */
   1395 		scsipi_printaddr(periph);
   1396 		printf("adapter resource shortage\n");
   1397 		/* FALLTHROUGH */
   1398 
   1399 	case XS_BUSY:
   1400 		if (xs->error == XS_BUSY && xs->status == SCSI_QUEUE_FULL) {
   1401 			struct scsipi_max_openings mo;
   1402 
   1403 			/*
   1404 			 * We set the openings to active - 1, assuming that
   1405 			 * the command that got us here is the first one that
   1406 			 * can't fit into the device's queue.  If that's not
   1407 			 * the case, I guess we'll find out soon enough.
   1408 			 */
   1409 			mo.mo_target = periph->periph_target;
   1410 			mo.mo_lun = periph->periph_lun;
   1411 			if (periph->periph_active < periph->periph_openings)
   1412 				mo.mo_openings = periph->periph_active - 1;
   1413 			else
   1414 				mo.mo_openings = periph->periph_openings - 1;
   1415 #ifdef DIAGNOSTIC
   1416 			if (mo.mo_openings < 0) {
   1417 				scsipi_printaddr(periph);
   1418 				printf("QUEUE FULL resulted in < 0 openings\n");
   1419 				panic("scsipi_done");
   1420 			}
   1421 #endif
   1422 			if (mo.mo_openings == 0) {
   1423 				scsipi_printaddr(periph);
   1424 				printf("QUEUE FULL resulted in 0 openings\n");
   1425 				mo.mo_openings = 1;
   1426 			}
   1427 			scsipi_async_event(chan, ASYNC_EVENT_MAX_OPENINGS, &mo);
   1428 			error = ERESTART;
   1429 		} else if (xs->xs_retries != 0) {
   1430 			xs->xs_retries--;
   1431 			/*
   1432 			 * Wait one second, and try again.
   1433 			 */
   1434 			if ((xs->xs_control & XS_CTL_POLL) ||
   1435 			    (chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
   1436 				delay(1000000);
   1437 			} else {
   1438 				scsipi_periph_freeze(periph, 1);
   1439 				callout_reset(&periph->periph_callout,
   1440 				    hz, scsipi_periph_timed_thaw, periph);
   1441 			}
   1442 			error = ERESTART;
   1443 		} else
   1444 			error = EBUSY;
   1445 		break;
   1446 
   1447 	case XS_REQUEUE:
   1448 		error = ERESTART;
   1449 		break;
   1450 
   1451 	case XS_TIMEOUT:
   1452 		if (xs->xs_retries != 0) {
   1453 			xs->xs_retries--;
   1454 			error = ERESTART;
   1455 		} else
   1456 			error = EIO;
   1457 		break;
   1458 
   1459 	case XS_SELTIMEOUT:
   1460 		/* XXX Disable device? */
   1461 		error = EIO;
   1462 		break;
   1463 
   1464 	case XS_RESET:
   1465 		if (xs->xs_control & XS_CTL_REQSENSE) {
   1466 			/*
   1467 			 * request sense interrupted by reset: signal it
   1468 			 * with EINTR return code.
   1469 			 */
   1470 			error = EINTR;
   1471 		} else {
   1472 			if (xs->xs_retries != 0) {
   1473 				xs->xs_retries--;
   1474 				error = ERESTART;
   1475 			} else
   1476 				error = EIO;
   1477 		}
   1478 		break;
   1479 
   1480 	default:
   1481 		scsipi_printaddr(periph);
   1482 		printf("invalid return code from adapter: %d\n", xs->error);
   1483 		error = EIO;
   1484 		break;
   1485 	}
   1486 
   1487 	s = splbio();
   1488 	if (error == ERESTART) {
   1489 		/*
   1490 		 * If we get here, the periph has been thawed and frozen
   1491 		 * again if we had to issue recovery commands.  Alternatively,
   1492 		 * it may have been frozen again and in a timed thaw.  In
   1493 		 * any case, we thaw the periph once we re-enqueue the
   1494 		 * command.  Once the periph is fully thawed, it will begin
   1495 		 * operation again.
   1496 		 */
   1497 		xs->error = XS_NOERROR;
   1498 		xs->status = SCSI_OK;
   1499 		xs->xs_status &= ~XS_STS_DONE;
   1500 		xs->xs_requeuecnt++;
   1501 		error = scsipi_enqueue(xs);
   1502 		if (error == 0) {
   1503 			scsipi_periph_thaw(periph, 1);
   1504 			splx(s);
   1505 			return (ERESTART);
   1506 		}
   1507 	}
   1508 
   1509 	/*
   1510 	 * scsipi_done() freezes the queue if not XS_NOERROR.
   1511 	 * Thaw it here.
   1512 	 */
   1513 	if (xs->error != XS_NOERROR)
   1514 		scsipi_periph_thaw(periph, 1);
   1515 
   1516 
   1517 	if (periph->periph_switch->psw_done)
   1518 		periph->periph_switch->psw_done(xs);
   1519 	if ((bp = xs->bp) != NULL) {
   1520 		if (error) {
   1521 			bp->b_error = error;
   1522 			bp->b_flags |= B_ERROR;
   1523 			bp->b_resid = bp->b_bcount;
   1524 		} else {
   1525 			bp->b_error = 0;
   1526 			bp->b_resid = xs->resid;
   1527 																		}
   1528 		biodone(bp);
   1529 	}
   1530 
   1531 	if (xs->xs_control & XS_CTL_ASYNC)
   1532 		scsipi_put_xs(xs);
   1533 	splx(s);
   1534 
   1535 	return (error);
   1536 }
   1537 
   1538 /*
   1539  * Issue a request sense for the given scsipi_xfer. Called when the xfer
   1540  * returns with a CHECK_CONDITION status. Must be called in valid thread
   1541  * context and at splbio().
   1542  */
   1543 
   1544 void
   1545 scsipi_request_sense(xs)
   1546 	struct scsipi_xfer *xs;
   1547 {
   1548 	struct scsipi_periph *periph = xs->xs_periph;
   1549 	int flags, error;
   1550 	struct scsipi_sense cmd;
   1551 
   1552 	periph->periph_flags |= PERIPH_SENSE;
   1553 
   1554 	/* if command was polling, request sense will too */
   1555 	flags = xs->xs_control & XS_CTL_POLL;
   1556 	/* Polling commands can't sleep */
   1557 	if (flags)
   1558 		flags |= XS_CTL_NOSLEEP;
   1559 
   1560 	flags |= XS_CTL_REQSENSE | XS_CTL_URGENT | XS_CTL_DATA_IN |
   1561 	    XS_CTL_THAW_PERIPH | XS_CTL_FREEZE_PERIPH;
   1562 
   1563 	memset(&cmd, 0, sizeof(cmd));
   1564 	cmd.opcode = REQUEST_SENSE;
   1565 	cmd.length = sizeof(struct scsipi_sense_data);
   1566 
   1567 	error = scsipi_command(periph,
   1568 	    (struct scsipi_generic *) &cmd, sizeof(cmd),
   1569 	    (u_char*)&xs->sense.scsi_sense, sizeof(struct scsipi_sense_data),
   1570 	    0, 1000, NULL, flags);
   1571 	periph->periph_flags &= ~PERIPH_SENSE;
   1572 	periph->periph_xscheck = NULL;
   1573 	switch(error) {
   1574 	case 0:
   1575 		/* we have a valid sense */
   1576 		xs->error = XS_SENSE;
   1577 		return;
   1578 	case EINTR:
   1579 		/* REQUEST_SENSE interrupted by bus reset. */
   1580 		xs->error = XS_RESET;
   1581 		return;
   1582 	case EIO:
   1583 		 /* request sense coudn't be performed */
   1584 		/*
   1585 		 * XXX this isn't quite rigth but we don't have anything
   1586 		 * better for now
   1587 		 */
   1588 		xs->error = XS_DRIVER_STUFFUP;
   1589 		return;
   1590 	default:
   1591 		 /* Notify that request sense failed. */
   1592 		xs->error = XS_DRIVER_STUFFUP;
   1593 		scsipi_printaddr(periph);
   1594 		printf("request sense failed with error %d\n", error);
   1595 		return;
   1596 	}
   1597 }
   1598 
   1599 /*
   1600  * scsipi_enqueue:
   1601  *
   1602  *	Enqueue an xfer on a channel.
   1603  */
   1604 int
   1605 scsipi_enqueue(xs)
   1606 	struct scsipi_xfer *xs;
   1607 {
   1608 	struct scsipi_channel *chan = xs->xs_periph->periph_channel;
   1609 	struct scsipi_xfer *qxs;
   1610 	int s;
   1611 
   1612 	s = splbio();
   1613 
   1614 	/*
   1615 	 * If the xfer is to be polled, and there are already jobs on
   1616 	 * the queue, we can't proceed.
   1617 	 */
   1618 	if ((xs->xs_control & XS_CTL_POLL) != 0 &&
   1619 	    TAILQ_FIRST(&chan->chan_queue) != NULL) {
   1620 		splx(s);
   1621 		xs->error = XS_DRIVER_STUFFUP;
   1622 		return (EAGAIN);
   1623 	}
   1624 
   1625 	/*
   1626 	 * If we have an URGENT xfer, it's an error recovery command
   1627 	 * and it should just go on the head of the channel's queue.
   1628 	 */
   1629 	if (xs->xs_control & XS_CTL_URGENT) {
   1630 		TAILQ_INSERT_HEAD(&chan->chan_queue, xs, channel_q);
   1631 		goto out;
   1632 	}
   1633 
   1634 	/*
   1635 	 * If this xfer has already been on the queue before, we
   1636 	 * need to reinsert it in the correct order.  That order is:
   1637 	 *
   1638 	 *	Immediately before the first xfer for this periph
   1639 	 *	with a requeuecnt less than xs->xs_requeuecnt.
   1640 	 *
   1641 	 * Failing that, at the end of the queue.  (We'll end up
   1642 	 * there naturally.)
   1643 	 */
   1644 	if (xs->xs_requeuecnt != 0) {
   1645 		for (qxs = TAILQ_FIRST(&chan->chan_queue); qxs != NULL;
   1646 		     qxs = TAILQ_NEXT(qxs, channel_q)) {
   1647 			if (qxs->xs_periph == xs->xs_periph &&
   1648 			    qxs->xs_requeuecnt < xs->xs_requeuecnt)
   1649 				break;
   1650 		}
   1651 		if (qxs != NULL) {
   1652 			TAILQ_INSERT_AFTER(&chan->chan_queue, qxs, xs,
   1653 			    channel_q);
   1654 			goto out;
   1655 		}
   1656 	}
   1657 	TAILQ_INSERT_TAIL(&chan->chan_queue, xs, channel_q);
   1658  out:
   1659 	if (xs->xs_control & XS_CTL_THAW_PERIPH)
   1660 		scsipi_periph_thaw(xs->xs_periph, 1);
   1661 	splx(s);
   1662 	return (0);
   1663 }
   1664 
   1665 /*
   1666  * scsipi_run_queue:
   1667  *
   1668  *	Start as many xfers as possible running on the channel.
   1669  */
   1670 void
   1671 scsipi_run_queue(chan)
   1672 	struct scsipi_channel *chan;
   1673 {
   1674 	struct scsipi_xfer *xs;
   1675 	struct scsipi_periph *periph;
   1676 	int s;
   1677 
   1678 	for (;;) {
   1679 		s = splbio();
   1680 
   1681 		/*
   1682 		 * If the channel is frozen, we can't do any work right
   1683 		 * now.
   1684 		 */
   1685 		if (chan->chan_qfreeze != 0) {
   1686 			splx(s);
   1687 			return;
   1688 		}
   1689 
   1690 		/*
   1691 		 * Look for work to do, and make sure we can do it.
   1692 		 */
   1693 		for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL;
   1694 		     xs = TAILQ_NEXT(xs, channel_q)) {
   1695 			periph = xs->xs_periph;
   1696 
   1697 			if ((periph->periph_sent >= periph->periph_openings) ||
   1698 			    periph->periph_qfreeze != 0 ||
   1699 			    (periph->periph_flags & PERIPH_UNTAG) != 0)
   1700 				continue;
   1701 
   1702 			if ((periph->periph_flags &
   1703 			    (PERIPH_RECOVERING | PERIPH_SENSE)) != 0 &&
   1704 			    (xs->xs_control & XS_CTL_URGENT) == 0)
   1705 				continue;
   1706 
   1707 			/*
   1708 			 * We can issue this xfer!
   1709 			 */
   1710 			goto got_one;
   1711 		}
   1712 
   1713 		/*
   1714 		 * Can't find any work to do right now.
   1715 		 */
   1716 		splx(s);
   1717 		return;
   1718 
   1719  got_one:
   1720 		/*
   1721 		 * Have an xfer to run.  Allocate a resource from
   1722 		 * the adapter to run it.  If we can't allocate that
   1723 		 * resource, we don't dequeue the xfer.
   1724 		 */
   1725 		if (scsipi_get_resource(chan) == 0) {
   1726 			/*
   1727 			 * Adapter is out of resources.  If the adapter
   1728 			 * supports it, attempt to grow them.
   1729 			 */
   1730 			if (scsipi_grow_resources(chan) == 0) {
   1731 				/*
   1732 				 * Wasn't able to grow resources,
   1733 				 * nothing more we can do.
   1734 				 */
   1735 				if (xs->xs_control & XS_CTL_POLL) {
   1736 					scsipi_printaddr(xs->xs_periph);
   1737 					printf("polling command but no "
   1738 					    "adapter resources");
   1739 					/* We'll panic shortly... */
   1740 				}
   1741 				splx(s);
   1742 
   1743 				/*
   1744 				 * XXX: We should be able to note that
   1745 				 * XXX: that resources are needed here!
   1746 				 */
   1747 				return;
   1748 			}
   1749 			/*
   1750 			 * scsipi_grow_resources() allocated the resource
   1751 			 * for us.
   1752 			 */
   1753 		}
   1754 
   1755 		/*
   1756 		 * We have a resource to run this xfer, do it!
   1757 		 */
   1758 		TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
   1759 
   1760 		/*
   1761 		 * If the command is to be tagged, allocate a tag ID
   1762 		 * for it.
   1763 		 */
   1764 		if (XS_CTL_TAGTYPE(xs) != 0)
   1765 			scsipi_get_tag(xs);
   1766 		else
   1767 			periph->periph_flags |= PERIPH_UNTAG;
   1768 		periph->periph_sent++;
   1769 		splx(s);
   1770 
   1771 		scsipi_adapter_request(chan, ADAPTER_REQ_RUN_XFER, xs);
   1772 	}
   1773 #ifdef DIAGNOSTIC
   1774 	panic("scsipi_run_queue: impossible");
   1775 #endif
   1776 }
   1777 
   1778 /*
   1779  * scsipi_execute_xs:
   1780  *
   1781  *	Begin execution of an xfer, waiting for it to complete, if necessary.
   1782  */
   1783 int
   1784 scsipi_execute_xs(xs)
   1785 	struct scsipi_xfer *xs;
   1786 {
   1787 	struct scsipi_periph *periph = xs->xs_periph;
   1788 	struct scsipi_channel *chan = periph->periph_channel;
   1789 	int async, poll, retries, error, s;
   1790 
   1791 	xs->xs_status &= ~XS_STS_DONE;
   1792 	xs->error = XS_NOERROR;
   1793 	xs->resid = xs->datalen;
   1794 	xs->status = SCSI_OK;
   1795 
   1796 #ifdef SCSIPI_DEBUG
   1797 	if (xs->xs_periph->periph_dbflags & SCSIPI_DB3) {
   1798 		printf("scsipi_execute_xs: ");
   1799 		show_scsipi_xs(xs);
   1800 		printf("\n");
   1801 	}
   1802 #endif
   1803 
   1804 	/*
   1805 	 * Deal with command tagging:
   1806 	 *
   1807 	 *	- If the device's current operating mode doesn't
   1808 	 *	  include tagged queueing, clear the tag mask.
   1809 	 *
   1810 	 *	- If the device's current operating mode *does*
   1811 	 *	  include tagged queueing, set the tag_type in
   1812 	 *	  the xfer to the appropriate byte for the tag
   1813 	 *	  message.
   1814 	 */
   1815 	if ((PERIPH_XFER_MODE(periph) & PERIPH_CAP_TQING) == 0 ||
   1816 		(xs->xs_control & XS_CTL_REQSENSE)) {
   1817 		xs->xs_control &= ~XS_CTL_TAGMASK;
   1818 		xs->xs_tag_type = 0;
   1819 	} else {
   1820 		/*
   1821 		 * If the request doesn't specify a tag, give Head
   1822 		 * tags to URGENT operations and Ordered tags to
   1823 		 * everything else.
   1824 		 */
   1825 		if (XS_CTL_TAGTYPE(xs) == 0) {
   1826 			if (xs->xs_control & XS_CTL_URGENT)
   1827 				xs->xs_control |= XS_CTL_HEAD_TAG;
   1828 			else
   1829 				xs->xs_control |= XS_CTL_ORDERED_TAG;
   1830 		}
   1831 
   1832 		switch (XS_CTL_TAGTYPE(xs)) {
   1833 		case XS_CTL_ORDERED_TAG:
   1834 			xs->xs_tag_type = MSG_ORDERED_Q_TAG;
   1835 			break;
   1836 
   1837 		case XS_CTL_SIMPLE_TAG:
   1838 			xs->xs_tag_type = MSG_SIMPLE_Q_TAG;
   1839 			break;
   1840 
   1841 		case XS_CTL_HEAD_TAG:
   1842 			xs->xs_tag_type = MSG_HEAD_OF_Q_TAG;
   1843 			break;
   1844 
   1845 		default:
   1846 			scsipi_printaddr(periph);
   1847 			printf("invalid tag mask 0x%08x\n",
   1848 			    XS_CTL_TAGTYPE(xs));
   1849 			panic("scsipi_execute_xs");
   1850 		}
   1851 	}
   1852 
   1853 	/* If the adaptor wants us to poll, poll. */
   1854 	if (chan->chan_adapter->adapt_flags & SCSIPI_ADAPT_POLL_ONLY)
   1855 		xs->xs_control |= XS_CTL_POLL;
   1856 
   1857 	/*
   1858 	 * If we don't yet have a completion thread, or we are to poll for
   1859 	 * completion, clear the ASYNC flag.
   1860 	 */
   1861 	if (chan->chan_thread == NULL || (xs->xs_control & XS_CTL_POLL) != 0)
   1862 		xs->xs_control &= ~XS_CTL_ASYNC;
   1863 
   1864 	async = (xs->xs_control & XS_CTL_ASYNC);
   1865 	poll = (xs->xs_control & XS_CTL_POLL);
   1866 	retries = xs->xs_retries;		/* for polling commands */
   1867 
   1868 #ifdef DIAGNOSTIC
   1869 	if (async != 0 && xs->bp == NULL)
   1870 		panic("scsipi_execute_xs: XS_CTL_ASYNC but no buf");
   1871 #endif
   1872 
   1873 	/*
   1874 	 * Enqueue the transfer.  If we're not polling for completion, this
   1875 	 * should ALWAYS return `no error'.
   1876 	 */
   1877  try_again:
   1878 	error = scsipi_enqueue(xs);
   1879 	if (error) {
   1880 		if (poll == 0) {
   1881 			scsipi_printaddr(periph);
   1882 			printf("not polling, but enqueue failed with %d\n",
   1883 			    error);
   1884 			panic("scsipi_execute_xs");
   1885 		}
   1886 
   1887 		scsipi_printaddr(periph);
   1888 		printf("failed to enqueue polling command");
   1889 		if (retries != 0) {
   1890 			printf(", retrying...\n");
   1891 			delay(1000000);
   1892 			retries--;
   1893 			goto try_again;
   1894 		}
   1895 		printf("\n");
   1896 		goto free_xs;
   1897 	}
   1898 
   1899  restarted:
   1900 	scsipi_run_queue(chan);
   1901 
   1902 	/*
   1903 	 * The xfer is enqueued, and possibly running.  If it's to be
   1904 	 * completed asynchronously, just return now.
   1905 	 */
   1906 	if (async)
   1907 		return (EJUSTRETURN);
   1908 
   1909 	/*
   1910 	 * Not an asynchronous command; wait for it to complete.
   1911 	 */
   1912 	s = splbio();
   1913 	while ((xs->xs_status & XS_STS_DONE) == 0) {
   1914 		if (poll) {
   1915 			scsipi_printaddr(periph);
   1916 			printf("polling command not done\n");
   1917 			panic("scsipi_execute_xs");
   1918 		}
   1919 		(void) tsleep(xs, PRIBIO, "xscmd", 0);
   1920 	}
   1921 	splx(s);
   1922 
   1923 	/*
   1924 	 * Command is complete.  scsipi_done() has awakened us to perform
   1925 	 * the error handling.
   1926 	 */
   1927 	error = scsipi_complete(xs);
   1928 	if (error == ERESTART)
   1929 		goto restarted;
   1930 
   1931 	/*
   1932 	 * Command completed successfully or fatal error occurred.  Fall
   1933 	 * into....
   1934 	 */
   1935  free_xs:
   1936 	s = splbio();
   1937 	scsipi_put_xs(xs);
   1938 	splx(s);
   1939 
   1940 	/*
   1941 	 * Kick the queue, keep it running in case it stopped for some
   1942 	 * reason.
   1943 	 */
   1944 	scsipi_run_queue(chan);
   1945 
   1946 	return (error);
   1947 }
   1948 
   1949 /*
   1950  * scsipi_completion_thread:
   1951  *
   1952  *	This is the completion thread.  We wait for errors on
   1953  *	asynchronous xfers, and perform the error handling
   1954  *	function, restarting the command, if necessary.
   1955  */
   1956 void
   1957 scsipi_completion_thread(arg)
   1958 	void *arg;
   1959 {
   1960 	struct scsipi_channel *chan = arg;
   1961 	struct scsipi_xfer *xs;
   1962 	int s;
   1963 
   1964 	s = splbio();
   1965 	chan->chan_flags |= SCSIPI_CHAN_TACTIVE;
   1966 	splx(s);
   1967 	for (;;) {
   1968 		s = splbio();
   1969 		xs = TAILQ_FIRST(&chan->chan_complete);
   1970 		if (xs == NULL &&
   1971 		    (chan->chan_flags &
   1972 		     (SCSIPI_CHAN_SHUTDOWN | SCSIPI_CHAN_CALLBACK |
   1973 		     SCSIPI_CHAN_KICK)) == 0) {
   1974 			(void) tsleep(&chan->chan_complete, PRIBIO,
   1975 			    "sccomp", 0);
   1976 			splx(s);
   1977 			continue;
   1978 		}
   1979 		if (chan->chan_flags & SCSIPI_CHAN_CALLBACK) {
   1980 			/* call chan_callback from thread context */
   1981 			chan->chan_flags &= ~SCSIPI_CHAN_CALLBACK;
   1982 			chan->chan_callback(chan, chan->chan_callback_arg);
   1983 			splx(s);
   1984 			continue;
   1985 		}
   1986 		if (chan->chan_flags & SCSIPI_CHAN_KICK) {
   1987 			/* explicitly run the queues for this channel */
   1988 			chan->chan_flags &= ~SCSIPI_CHAN_KICK;
   1989 			scsipi_run_queue(chan);
   1990 			splx(s);
   1991 			continue;
   1992 		}
   1993 		if (chan->chan_flags & SCSIPI_CHAN_SHUTDOWN) {
   1994 			splx(s);
   1995 			break;
   1996 		}
   1997 		if (xs) {
   1998 			TAILQ_REMOVE(&chan->chan_complete, xs, channel_q);
   1999 			splx(s);
   2000 
   2001 			/*
   2002 			 * Have an xfer with an error; process it.
   2003 			 */
   2004 			(void) scsipi_complete(xs);
   2005 
   2006 			/*
   2007 			 * Kick the queue; keep it running if it was stopped
   2008 			 * for some reason.
   2009 			 */
   2010 			scsipi_run_queue(chan);
   2011 		} else {
   2012 			splx(s);
   2013 		}
   2014 	}
   2015 
   2016 	chan->chan_thread = NULL;
   2017 
   2018 	/* In case parent is waiting for us to exit. */
   2019 	wakeup(&chan->chan_thread);
   2020 
   2021 	kthread_exit(0);
   2022 }
   2023 
   2024 /*
   2025  * scsipi_create_completion_thread:
   2026  *
   2027  *	Callback to actually create the completion thread.
   2028  */
   2029 void
   2030 scsipi_create_completion_thread(arg)
   2031 	void *arg;
   2032 {
   2033 	struct scsipi_channel *chan = arg;
   2034 	struct scsipi_adapter *adapt = chan->chan_adapter;
   2035 
   2036 	if (kthread_create1(scsipi_completion_thread, chan,
   2037 	    &chan->chan_thread, "%s:%d", adapt->adapt_dev->dv_xname,
   2038 	    chan->chan_channel)) {
   2039 		printf("%s: unable to create completion thread for "
   2040 		    "channel %d\n", adapt->adapt_dev->dv_xname,
   2041 		    chan->chan_channel);
   2042 		panic("scsipi_create_completion_thread");
   2043 	}
   2044 }
   2045 
   2046 /*
   2047  * scsipi_thread_call_callback:
   2048  *
   2049  * 	request to call a callback from the completion thread
   2050  */
   2051 int
   2052 scsipi_thread_call_callback(chan, callback, arg)
   2053 	struct scsipi_channel *chan;
   2054 	void (*callback) __P((struct scsipi_channel *, void *));
   2055 	void *arg;
   2056 {
   2057 	int s;
   2058 
   2059 	s = splbio();
   2060 	if (chan->chan_flags & SCSIPI_CHAN_CALLBACK) {
   2061 		splx(s);
   2062 		return EBUSY;
   2063 	}
   2064 	scsipi_channel_freeze(chan, 1);
   2065 	chan->chan_callback = callback;
   2066 	chan->chan_callback_arg = arg;
   2067 	chan->chan_flags |= SCSIPI_CHAN_CALLBACK;
   2068 	wakeup(&chan->chan_complete);
   2069 	splx(s);
   2070 	return(0);
   2071 }
   2072 
   2073 /*
   2074  * scsipi_async_event:
   2075  *
   2076  *	Handle an asynchronous event from an adapter.
   2077  */
   2078 void
   2079 scsipi_async_event(chan, event, arg)
   2080 	struct scsipi_channel *chan;
   2081 	scsipi_async_event_t event;
   2082 	void *arg;
   2083 {
   2084 	int s;
   2085 
   2086 	s = splbio();
   2087 	switch (event) {
   2088 	case ASYNC_EVENT_MAX_OPENINGS:
   2089 		scsipi_async_event_max_openings(chan,
   2090 		    (struct scsipi_max_openings *)arg);
   2091 		break;
   2092 
   2093 	case ASYNC_EVENT_XFER_MODE:
   2094 		scsipi_async_event_xfer_mode(chan,
   2095 		    (struct scsipi_xfer_mode *)arg);
   2096 		break;
   2097 	case ASYNC_EVENT_RESET:
   2098 		scsipi_async_event_channel_reset(chan);
   2099 		break;
   2100 	}
   2101 	splx(s);
   2102 }
   2103 
   2104 /*
   2105  * scsipi_print_xfer_mode:
   2106  *
   2107  *	Print a periph's capabilities.
   2108  */
   2109 void
   2110 scsipi_print_xfer_mode(periph)
   2111 	struct scsipi_periph *periph;
   2112 {
   2113 	int period, freq, speed, mbs;
   2114 
   2115 	if ((periph->periph_flags & PERIPH_MODE_VALID) == 0)
   2116 		return;
   2117 
   2118 	printf("%s: ", periph->periph_dev->dv_xname);
   2119 	if (periph->periph_mode & PERIPH_CAP_SYNC) {
   2120 		period = scsipi_sync_factor_to_period(periph->periph_period);
   2121 		printf("sync (%d.%dns offset %d)",
   2122 		    period / 10, period % 10, periph->periph_offset);
   2123 	} else
   2124 		printf("async");
   2125 
   2126 	if (periph->periph_mode & PERIPH_CAP_WIDE32)
   2127 		printf(", 32-bit");
   2128 	else if (periph->periph_mode & PERIPH_CAP_WIDE16)
   2129 		printf(", 16-bit");
   2130 	else
   2131 		printf(", 8-bit");
   2132 
   2133 	if (periph->periph_mode & PERIPH_CAP_SYNC) {
   2134 		freq = scsipi_sync_factor_to_freq(periph->periph_period);
   2135 		speed = freq;
   2136 		if (periph->periph_mode & PERIPH_CAP_WIDE32)
   2137 			speed *= 4;
   2138 		else if (periph->periph_mode & PERIPH_CAP_WIDE16)
   2139 			speed *= 2;
   2140 		mbs = speed / 1000;
   2141 		if (mbs > 0)
   2142 			printf(" (%d.%03dMB/s)", mbs, speed % 1000);
   2143 		else
   2144 			printf(" (%dKB/s)", speed % 1000);
   2145 	}
   2146 
   2147 	printf(" transfers");
   2148 
   2149 	if (periph->periph_mode & PERIPH_CAP_TQING)
   2150 		printf(", tagged queueing");
   2151 
   2152 	printf("\n");
   2153 }
   2154 
   2155 /*
   2156  * scsipi_async_event_max_openings:
   2157  *
   2158  *	Update the maximum number of outstanding commands a
   2159  *	device may have.
   2160  */
   2161 void
   2162 scsipi_async_event_max_openings(chan, mo)
   2163 	struct scsipi_channel *chan;
   2164 	struct scsipi_max_openings *mo;
   2165 {
   2166 	struct scsipi_periph *periph;
   2167 	int minlun, maxlun;
   2168 
   2169 	if (mo->mo_lun == -1) {
   2170 		/*
   2171 		 * Wildcarded; apply it to all LUNs.
   2172 		 */
   2173 		minlun = 0;
   2174 		maxlun = chan->chan_nluns - 1;
   2175 	} else
   2176 		minlun = maxlun = mo->mo_lun;
   2177 
   2178 	for (; minlun <= maxlun; minlun++) {
   2179 		periph = scsipi_lookup_periph(chan, mo->mo_target, minlun);
   2180 		if (periph == NULL)
   2181 			continue;
   2182 
   2183 		if (mo->mo_openings < periph->periph_openings)
   2184 			periph->periph_openings = mo->mo_openings;
   2185 		else if (mo->mo_openings > periph->periph_openings &&
   2186 		    (periph->periph_flags & PERIPH_GROW_OPENINGS) != 0)
   2187 			periph->periph_openings = mo->mo_openings;
   2188 	}
   2189 }
   2190 
   2191 /*
   2192  * scsipi_async_event_xfer_mode:
   2193  *
   2194  *	Update the xfer mode for all periphs sharing the
   2195  *	specified I_T Nexus.
   2196  */
   2197 void
   2198 scsipi_async_event_xfer_mode(chan, xm)
   2199 	struct scsipi_channel *chan;
   2200 	struct scsipi_xfer_mode *xm;
   2201 {
   2202 	struct scsipi_periph *periph;
   2203 	int lun, announce, mode, period, offset;
   2204 
   2205 	for (lun = 0; lun < chan->chan_nluns; lun++) {
   2206 		periph = scsipi_lookup_periph(chan, xm->xm_target, lun);
   2207 		if (periph == NULL)
   2208 			continue;
   2209 		announce = 0;
   2210 
   2211 		/*
   2212 		 * Clamp the xfer mode down to this periph's capabilities.
   2213 		 */
   2214 		mode = xm->xm_mode & periph->periph_cap;
   2215 		if (mode & PERIPH_CAP_SYNC) {
   2216 			period = xm->xm_period;
   2217 			offset = xm->xm_offset;
   2218 		} else {
   2219 			period = 0;
   2220 			offset = 0;
   2221 		}
   2222 
   2223 		/*
   2224 		 * If we do not have a valid xfer mode yet, or the parameters
   2225 		 * are different, announce them.
   2226 		 */
   2227 		if ((periph->periph_flags & PERIPH_MODE_VALID) == 0 ||
   2228 		    periph->periph_mode != mode ||
   2229 		    periph->periph_period != period ||
   2230 		    periph->periph_offset != offset)
   2231 			announce = 1;
   2232 
   2233 		periph->periph_mode = mode;
   2234 		periph->periph_period = period;
   2235 		periph->periph_offset = offset;
   2236 		periph->periph_flags |= PERIPH_MODE_VALID;
   2237 
   2238 		if (announce)
   2239 			scsipi_print_xfer_mode(periph);
   2240 	}
   2241 }
   2242 
   2243 /*
   2244  * scsipi_set_xfer_mode:
   2245  *
   2246  *	Set the xfer mode for the specified I_T Nexus.
   2247  */
   2248 void
   2249 scsipi_set_xfer_mode(chan, target, immed)
   2250 	struct scsipi_channel *chan;
   2251 	int target, immed;
   2252 {
   2253 	struct scsipi_xfer_mode xm;
   2254 	struct scsipi_periph *itperiph;
   2255 	int lun, s;
   2256 
   2257 	/*
   2258 	 * Go to the minimal xfer mode.
   2259 	 */
   2260 	xm.xm_target = target;
   2261 	xm.xm_mode = 0;
   2262 	xm.xm_period = 0;			/* ignored */
   2263 	xm.xm_offset = 0;			/* ignored */
   2264 
   2265 	/*
   2266 	 * Find the first LUN we know about on this I_T Nexus.
   2267 	 */
   2268 	for (lun = 0; lun < chan->chan_nluns; lun++) {
   2269 		itperiph = scsipi_lookup_periph(chan, target, lun);
   2270 		if (itperiph != NULL)
   2271 			break;
   2272 	}
   2273 	if (itperiph != NULL) {
   2274 		xm.xm_mode = itperiph->periph_cap;
   2275 		/*
   2276 		 * Now issue the request to the adapter.
   2277 		 */
   2278 		s = splbio();
   2279 		scsipi_adapter_request(chan, ADAPTER_REQ_SET_XFER_MODE, &xm);
   2280 		splx(s);
   2281 		/*
   2282 		 * If we want this to happen immediately, issue a dummy
   2283 		 * command, since most adapters can't really negotiate unless
   2284 		 * they're executing a job.
   2285 		 */
   2286 		if (immed != 0) {
   2287 			(void) scsipi_test_unit_ready(itperiph,
   2288 			    XS_CTL_DISCOVERY | XS_CTL_IGNORE_ILLEGAL_REQUEST |
   2289 			    XS_CTL_IGNORE_NOT_READY |
   2290 			    XS_CTL_IGNORE_MEDIA_CHANGE);
   2291 		}
   2292 	}
   2293 }
   2294 
   2295 /*
   2296  * scsipi_channel_reset:
   2297  *
   2298  *	handle scsi bus reset
   2299  * called at splbio
   2300  */
   2301 void
   2302 scsipi_async_event_channel_reset(chan)
   2303 	struct scsipi_channel *chan;
   2304 {
   2305 	struct scsipi_xfer *xs, *xs_next;
   2306 	struct scsipi_periph *periph;
   2307 	int target, lun;
   2308 
   2309 	/*
   2310 	 * Channel has been reset. Also mark as reset pending REQUEST_SENSE
   2311 	 * commands; as the sense is not available any more.
   2312 	 * can't call scsipi_done() from here, as the command has not been
   2313 	 * sent to the adapter yet (this would corrupt accounting).
   2314 	 */
   2315 
   2316 	for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL; xs = xs_next) {
   2317 		xs_next = TAILQ_NEXT(xs, channel_q);
   2318 		if (xs->xs_control & XS_CTL_REQSENSE) {
   2319 			TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
   2320 			xs->error = XS_RESET;
   2321 			if ((xs->xs_control & XS_CTL_ASYNC) != 0)
   2322 				TAILQ_INSERT_TAIL(&chan->chan_complete, xs,
   2323 				    channel_q);
   2324 		}
   2325 	}
   2326 	wakeup(&chan->chan_complete);
   2327 	/* Catch xs with pending sense which may not have a REQSENSE xs yet */
   2328 	for (target = 0; target < chan->chan_ntargets; target++) {
   2329 		if (target == chan->chan_id)
   2330 			continue;
   2331 		for (lun = 0; lun <  chan->chan_nluns; lun++) {
   2332 			periph = chan->chan_periphs[target][lun];
   2333 			if (periph) {
   2334 				xs = periph->periph_xscheck;
   2335 				if (xs)
   2336 					xs->error = XS_RESET;
   2337 			}
   2338 		}
   2339 	}
   2340 }
   2341 
   2342 /*
   2343  * scsipi_target_detach:
   2344  *
   2345  *	detach all periph associated with a I_T
   2346  * 	must be called from valid thread context
   2347  */
   2348 int
   2349 scsipi_target_detach(chan, target, lun, flags)
   2350 	struct scsipi_channel *chan;
   2351 	int target, lun;
   2352 	int flags;
   2353 {
   2354 	struct scsipi_periph *periph;
   2355 	int ctarget, mintarget, maxtarget;
   2356 	int clun, minlun, maxlun;
   2357 	int error;
   2358 
   2359 	if (target == -1) {
   2360 		mintarget = 0;
   2361 		maxtarget = chan->chan_ntargets;
   2362 	} else {
   2363 		if (target == chan->chan_id)
   2364 			return EINVAL;
   2365 		if (target < 0 || target >= chan->chan_ntargets)
   2366 			return EINVAL;
   2367 		mintarget = target;
   2368 		maxtarget = target + 1;
   2369 	}
   2370 
   2371 	if (lun == -1) {
   2372 		minlun = 0;
   2373 		maxlun = chan->chan_nluns;
   2374 	} else {
   2375 		if (lun < 0 || lun >= chan->chan_nluns)
   2376 			return EINVAL;
   2377 		minlun = lun;
   2378 		maxlun = lun + 1;
   2379 	}
   2380 
   2381 	for (ctarget = mintarget; ctarget < maxtarget; ctarget++) {
   2382 		if (ctarget == chan->chan_id)
   2383 			continue;
   2384 
   2385 		for (clun = minlun; clun < maxlun; clun++) {
   2386 			periph = scsipi_lookup_periph(chan, ctarget, clun);
   2387 			if (periph == NULL)
   2388 				continue;
   2389 			error = config_detach(periph->periph_dev, flags);
   2390 			if (error)
   2391 				return (error);
   2392 			scsipi_remove_periph(chan, periph);
   2393 			free(periph, M_DEVBUF);
   2394 		}
   2395 	}
   2396 	return(0);
   2397 }
   2398 
   2399 /*
   2400  * scsipi_adapter_addref:
   2401  *
   2402  *	Add a reference to the adapter pointed to by the provided
   2403  *	link, enabling the adapter if necessary.
   2404  */
   2405 int
   2406 scsipi_adapter_addref(adapt)
   2407 	struct scsipi_adapter *adapt;
   2408 {
   2409 	int s, error = 0;
   2410 
   2411 	s = splbio();
   2412 	if (adapt->adapt_refcnt++ == 0 && adapt->adapt_enable != NULL) {
   2413 		error = (*adapt->adapt_enable)(adapt->adapt_dev, 1);
   2414 		if (error)
   2415 			adapt->adapt_refcnt--;
   2416 	}
   2417 	splx(s);
   2418 	return (error);
   2419 }
   2420 
   2421 /*
   2422  * scsipi_adapter_delref:
   2423  *
   2424  *	Delete a reference to the adapter pointed to by the provided
   2425  *	link, disabling the adapter if possible.
   2426  */
   2427 void
   2428 scsipi_adapter_delref(adapt)
   2429 	struct scsipi_adapter *adapt;
   2430 {
   2431 	int s;
   2432 
   2433 	s = splbio();
   2434 	if (adapt->adapt_refcnt-- == 1 && adapt->adapt_enable != NULL)
   2435 		(void) (*adapt->adapt_enable)(adapt->adapt_dev, 0);
   2436 	splx(s);
   2437 }
   2438 
   2439 struct scsipi_syncparam {
   2440 	int	ss_factor;
   2441 	int	ss_period;	/* ns * 10 */
   2442 } scsipi_syncparams[] = {
   2443 	{ 0x09,		125 },
   2444 	{ 0x0a,		250 },
   2445 	{ 0x0b,		303 },
   2446 	{ 0x0c,		500 },
   2447 };
   2448 const int scsipi_nsyncparams =
   2449     sizeof(scsipi_syncparams) / sizeof(scsipi_syncparams[0]);
   2450 
   2451 int
   2452 scsipi_sync_period_to_factor(period)
   2453 	int period;		/* ns * 10 */
   2454 {
   2455 	int i;
   2456 
   2457 	for (i = 0; i < scsipi_nsyncparams; i++) {
   2458 		if (period <= scsipi_syncparams[i].ss_period)
   2459 			return (scsipi_syncparams[i].ss_factor);
   2460 	}
   2461 
   2462 	return ((period / 10) / 4);
   2463 }
   2464 
   2465 int
   2466 scsipi_sync_factor_to_period(factor)
   2467 	int factor;
   2468 {
   2469 	int i;
   2470 
   2471 	for (i = 0; i < scsipi_nsyncparams; i++) {
   2472 		if (factor == scsipi_syncparams[i].ss_factor)
   2473 			return (scsipi_syncparams[i].ss_period);
   2474 	}
   2475 
   2476 	return ((factor * 4) * 10);
   2477 }
   2478 
   2479 int
   2480 scsipi_sync_factor_to_freq(factor)
   2481 	int factor;
   2482 {
   2483 	int i;
   2484 
   2485 	for (i = 0; i < scsipi_nsyncparams; i++) {
   2486 		if (factor == scsipi_syncparams[i].ss_factor)
   2487 			return (10000000 / scsipi_syncparams[i].ss_period);
   2488 	}
   2489 
   2490 	return (10000000 / ((factor * 4) * 10));
   2491 }
   2492 
   2493 #ifdef SCSIPI_DEBUG
   2494 /*
   2495  * Given a scsipi_xfer, dump the request, in all it's glory
   2496  */
   2497 void
   2498 show_scsipi_xs(xs)
   2499 	struct scsipi_xfer *xs;
   2500 {
   2501 
   2502 	printf("xs(%p): ", xs);
   2503 	printf("xs_control(0x%08x)", xs->xs_control);
   2504 	printf("xs_status(0x%08x)", xs->xs_status);
   2505 	printf("periph(%p)", xs->xs_periph);
   2506 	printf("retr(0x%x)", xs->xs_retries);
   2507 	printf("timo(0x%x)", xs->timeout);
   2508 	printf("cmd(%p)", xs->cmd);
   2509 	printf("len(0x%x)", xs->cmdlen);
   2510 	printf("data(%p)", xs->data);
   2511 	printf("len(0x%x)", xs->datalen);
   2512 	printf("res(0x%x)", xs->resid);
   2513 	printf("err(0x%x)", xs->error);
   2514 	printf("bp(%p)", xs->bp);
   2515 	show_scsipi_cmd(xs);
   2516 }
   2517 
   2518 void
   2519 show_scsipi_cmd(xs)
   2520 	struct scsipi_xfer *xs;
   2521 {
   2522 	u_char *b = (u_char *) xs->cmd;
   2523 	int i = 0;
   2524 
   2525 	scsipi_printaddr(xs->xs_periph);
   2526 	printf(" command: ");
   2527 
   2528 	if ((xs->xs_control & XS_CTL_RESET) == 0) {
   2529 		while (i < xs->cmdlen) {
   2530 			if (i)
   2531 				printf(",");
   2532 			printf("0x%x", b[i++]);
   2533 		}
   2534 		printf("-[%d bytes]\n", xs->datalen);
   2535 		if (xs->datalen)
   2536 			show_mem(xs->data, min(64, xs->datalen));
   2537 	} else
   2538 		printf("-RESET-\n");
   2539 }
   2540 
   2541 void
   2542 show_mem(address, num)
   2543 	u_char *address;
   2544 	int num;
   2545 {
   2546 	int x;
   2547 
   2548 	printf("------------------------------");
   2549 	for (x = 0; x < num; x++) {
   2550 		if ((x % 16) == 0)
   2551 			printf("\n%03d: ", x);
   2552 		printf("%02x ", *address++);
   2553 	}
   2554 	printf("\n------------------------------\n");
   2555 }
   2556 #endif /* SCSIPI_DEBUG */
   2557