Home | History | Annotate | Line # | Download | only in scsipi
scsipi_base.c revision 1.182
      1 /*	$NetBSD: scsipi_base.c,v 1.182 2019/03/28 10:44:29 kardel Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1998, 1999, 2000, 2002, 2003, 2004 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Charles M. Hannum; by Jason R. Thorpe of the Numerical Aerospace
      9  * Simulation Facility, NASA Ames Research Center.
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  *
     20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30  * POSSIBILITY OF SUCH DAMAGE.
     31  */
     32 
     33 #include <sys/cdefs.h>
     34 __KERNEL_RCSID(0, "$NetBSD: scsipi_base.c,v 1.182 2019/03/28 10:44:29 kardel Exp $");
     35 
     36 #ifdef _KERNEL_OPT
     37 #include "opt_scsi.h"
     38 #endif
     39 
     40 #include <sys/param.h>
     41 #include <sys/systm.h>
     42 #include <sys/kernel.h>
     43 #include <sys/buf.h>
     44 #include <sys/uio.h>
     45 #include <sys/malloc.h>
     46 #include <sys/pool.h>
     47 #include <sys/errno.h>
     48 #include <sys/device.h>
     49 #include <sys/proc.h>
     50 #include <sys/kthread.h>
     51 #include <sys/hash.h>
     52 #include <sys/atomic.h>
     53 
     54 #include <dev/scsipi/scsi_spc.h>
     55 #include <dev/scsipi/scsipi_all.h>
     56 #include <dev/scsipi/scsipi_disk.h>
     57 #include <dev/scsipi/scsipiconf.h>
     58 #include <dev/scsipi/scsipi_base.h>
     59 
     60 #include <dev/scsipi/scsi_all.h>
     61 #include <dev/scsipi/scsi_message.h>
     62 
     63 #include <machine/param.h>
     64 
     65 static int	scsipi_complete(struct scsipi_xfer *);
     66 static void	scsipi_request_sense(struct scsipi_xfer *);
     67 static int	scsipi_enqueue(struct scsipi_xfer *);
     68 static void	scsipi_run_queue(struct scsipi_channel *chan);
     69 
     70 static void	scsipi_completion_thread(void *);
     71 
     72 static void	scsipi_get_tag(struct scsipi_xfer *);
     73 static void	scsipi_put_tag(struct scsipi_xfer *);
     74 
     75 static int	scsipi_get_resource(struct scsipi_channel *);
     76 static void	scsipi_put_resource(struct scsipi_channel *);
     77 
     78 static void	scsipi_async_event_max_openings(struct scsipi_channel *,
     79 		    struct scsipi_max_openings *);
     80 static void	scsipi_async_event_channel_reset(struct scsipi_channel *);
     81 
     82 static void	scsipi_channel_freeze_locked(struct scsipi_channel *, int);
     83 
     84 static void	scsipi_adapter_lock(struct scsipi_adapter *adapt);
     85 static void	scsipi_adapter_unlock(struct scsipi_adapter *adapt);
     86 
     87 static void	scsipi_update_timeouts(struct scsipi_xfer *xs);
     88 
     89 static struct pool scsipi_xfer_pool;
     90 
     91 int scsipi_xs_count = 0;
     92 
     93 /*
     94  * scsipi_init:
     95  *
     96  *	Called when a scsibus or atapibus is attached to the system
     97  *	to initialize shared data structures.
     98  */
     99 void
    100 scsipi_init(void)
    101 {
    102 	static int scsipi_init_done;
    103 
    104 	if (scsipi_init_done)
    105 		return;
    106 	scsipi_init_done = 1;
    107 
    108 	/* Initialize the scsipi_xfer pool. */
    109 	pool_init(&scsipi_xfer_pool, sizeof(struct scsipi_xfer), 0,
    110 	    0, 0, "scxspl", NULL, IPL_BIO);
    111 	if (pool_prime(&scsipi_xfer_pool,
    112 	    PAGE_SIZE / sizeof(struct scsipi_xfer)) == ENOMEM) {
    113 		printf("WARNING: not enough memory for scsipi_xfer_pool\n");
    114 	}
    115 
    116 	scsipi_ioctl_init();
    117 }
    118 
    119 /*
    120  * scsipi_channel_init:
    121  *
    122  *	Initialize a scsipi_channel when it is attached.
    123  */
    124 int
    125 scsipi_channel_init(struct scsipi_channel *chan)
    126 {
    127 	struct scsipi_adapter *adapt = chan->chan_adapter;
    128 	int i;
    129 
    130 	/* Initialize shared data. */
    131 	scsipi_init();
    132 
    133 	/* Initialize the queues. */
    134 	TAILQ_INIT(&chan->chan_queue);
    135 	TAILQ_INIT(&chan->chan_complete);
    136 
    137 	for (i = 0; i < SCSIPI_CHAN_PERIPH_BUCKETS; i++)
    138 		LIST_INIT(&chan->chan_periphtab[i]);
    139 
    140 	/*
    141 	 * Create the asynchronous completion thread.
    142 	 */
    143 	if (kthread_create(PRI_NONE, 0, NULL, scsipi_completion_thread, chan,
    144 	    &chan->chan_thread, "%s", chan->chan_name)) {
    145 		aprint_error_dev(adapt->adapt_dev, "unable to create completion thread for "
    146 		    "channel %d\n", chan->chan_channel);
    147 		panic("scsipi_channel_init");
    148 	}
    149 
    150 	return 0;
    151 }
    152 
    153 /*
    154  * scsipi_channel_shutdown:
    155  *
    156  *	Shutdown a scsipi_channel.
    157  */
    158 void
    159 scsipi_channel_shutdown(struct scsipi_channel *chan)
    160 {
    161 
    162 	mutex_enter(chan_mtx(chan));
    163 	/*
    164 	 * Shut down the completion thread.
    165 	 */
    166 	chan->chan_tflags |= SCSIPI_CHANT_SHUTDOWN;
    167 	cv_broadcast(chan_cv_complete(chan));
    168 
    169 	/*
    170 	 * Now wait for the thread to exit.
    171 	 */
    172 	while (chan->chan_thread != NULL)
    173 		cv_wait(chan_cv_thread(chan), chan_mtx(chan));
    174 	mutex_exit(chan_mtx(chan));
    175 }
    176 
    177 static uint32_t
    178 scsipi_chan_periph_hash(uint64_t t, uint64_t l)
    179 {
    180 	uint32_t hash;
    181 
    182 	hash = hash32_buf(&t, sizeof(t), HASH32_BUF_INIT);
    183 	hash = hash32_buf(&l, sizeof(l), hash);
    184 
    185 	return hash & SCSIPI_CHAN_PERIPH_HASHMASK;
    186 }
    187 
    188 /*
    189  * scsipi_insert_periph:
    190  *
    191  *	Insert a periph into the channel.
    192  */
    193 void
    194 scsipi_insert_periph(struct scsipi_channel *chan, struct scsipi_periph *periph)
    195 {
    196 	uint32_t hash;
    197 
    198 	hash = scsipi_chan_periph_hash(periph->periph_target,
    199 	    periph->periph_lun);
    200 
    201 	mutex_enter(chan_mtx(chan));
    202 	LIST_INSERT_HEAD(&chan->chan_periphtab[hash], periph, periph_hash);
    203 	mutex_exit(chan_mtx(chan));
    204 }
    205 
    206 /*
    207  * scsipi_remove_periph:
    208  *
    209  *	Remove a periph from the channel.
    210  */
    211 void
    212 scsipi_remove_periph(struct scsipi_channel *chan,
    213     struct scsipi_periph *periph)
    214 {
    215 
    216 	LIST_REMOVE(periph, periph_hash);
    217 }
    218 
    219 /*
    220  * scsipi_lookup_periph:
    221  *
    222  *	Lookup a periph on the specified channel.
    223  */
    224 static struct scsipi_periph *
    225 scsipi_lookup_periph_internal(struct scsipi_channel *chan, int target, int lun, bool lock)
    226 {
    227 	struct scsipi_periph *periph;
    228 	uint32_t hash;
    229 
    230 	if (target >= chan->chan_ntargets ||
    231 	    lun >= chan->chan_nluns)
    232 		return NULL;
    233 
    234 	hash = scsipi_chan_periph_hash(target, lun);
    235 
    236 	if (lock)
    237 		mutex_enter(chan_mtx(chan));
    238 	LIST_FOREACH(periph, &chan->chan_periphtab[hash], periph_hash) {
    239 		if (periph->periph_target == target &&
    240 		    periph->periph_lun == lun)
    241 			break;
    242 	}
    243 	if (lock)
    244 		mutex_exit(chan_mtx(chan));
    245 
    246 	return periph;
    247 }
    248 
    249 struct scsipi_periph *
    250 scsipi_lookup_periph_locked(struct scsipi_channel *chan, int target, int lun)
    251 {
    252 	return scsipi_lookup_periph_internal(chan, target, lun, false);
    253 }
    254 
    255 struct scsipi_periph *
    256 scsipi_lookup_periph(struct scsipi_channel *chan, int target, int lun)
    257 {
    258 	return scsipi_lookup_periph_internal(chan, target, lun, true);
    259 }
    260 
    261 /*
    262  * scsipi_get_resource:
    263  *
    264  *	Allocate a single xfer `resource' from the channel.
    265  *
    266  *	NOTE: Must be called with channel lock held
    267  */
    268 static int
    269 scsipi_get_resource(struct scsipi_channel *chan)
    270 {
    271 	struct scsipi_adapter *adapt = chan->chan_adapter;
    272 
    273 	if (chan->chan_flags & SCSIPI_CHAN_OPENINGS) {
    274 		if (chan->chan_openings > 0) {
    275 			chan->chan_openings--;
    276 			return 1;
    277 		}
    278 		return 0;
    279 	}
    280 
    281 	if (adapt->adapt_openings > 0) {
    282 		adapt->adapt_openings--;
    283 		return 1;
    284 	}
    285 	return 0;
    286 }
    287 
    288 /*
    289  * scsipi_grow_resources:
    290  *
    291  *	Attempt to grow resources for a channel.  If this succeeds,
    292  *	we allocate one for our caller.
    293  *
    294  *	NOTE: Must be called with channel lock held
    295  */
    296 static inline int
    297 scsipi_grow_resources(struct scsipi_channel *chan)
    298 {
    299 
    300 	if (chan->chan_flags & SCSIPI_CHAN_CANGROW) {
    301 		if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
    302 			mutex_exit(chan_mtx(chan));
    303 			scsipi_adapter_request(chan,
    304 			    ADAPTER_REQ_GROW_RESOURCES, NULL);
    305 			mutex_enter(chan_mtx(chan));
    306 			return scsipi_get_resource(chan);
    307 		}
    308 		/*
    309 		 * ask the channel thread to do it. It'll have to thaw the
    310 		 * queue
    311 		 */
    312 		scsipi_channel_freeze_locked(chan, 1);
    313 		chan->chan_tflags |= SCSIPI_CHANT_GROWRES;
    314 		cv_broadcast(chan_cv_complete(chan));
    315 		return 0;
    316 	}
    317 
    318 	return 0;
    319 }
    320 
    321 /*
    322  * scsipi_put_resource:
    323  *
    324  *	Free a single xfer `resource' to the channel.
    325  *
    326  *	NOTE: Must be called with channel lock held
    327  */
    328 static void
    329 scsipi_put_resource(struct scsipi_channel *chan)
    330 {
    331 	struct scsipi_adapter *adapt = chan->chan_adapter;
    332 
    333 	if (chan->chan_flags & SCSIPI_CHAN_OPENINGS)
    334 		chan->chan_openings++;
    335 	else
    336 		adapt->adapt_openings++;
    337 }
    338 
    339 /*
    340  * scsipi_get_tag:
    341  *
    342  *	Get a tag ID for the specified xfer.
    343  *
    344  *	NOTE: Must be called with channel lock held
    345  */
    346 static void
    347 scsipi_get_tag(struct scsipi_xfer *xs)
    348 {
    349 	struct scsipi_periph *periph = xs->xs_periph;
    350 	int bit, tag;
    351 	u_int word;
    352 
    353 	KASSERT(mutex_owned(chan_mtx(periph->periph_channel)));
    354 
    355 	bit = 0;	/* XXX gcc */
    356 	for (word = 0; word < PERIPH_NTAGWORDS; word++) {
    357 		bit = ffs(periph->periph_freetags[word]);
    358 		if (bit != 0)
    359 			break;
    360 	}
    361 #ifdef DIAGNOSTIC
    362 	if (word == PERIPH_NTAGWORDS) {
    363 		scsipi_printaddr(periph);
    364 		printf("no free tags\n");
    365 		panic("scsipi_get_tag");
    366 	}
    367 #endif
    368 
    369 	bit -= 1;
    370 	periph->periph_freetags[word] &= ~(1 << bit);
    371 	tag = (word << 5) | bit;
    372 
    373 	/* XXX Should eventually disallow this completely. */
    374 	if (tag >= periph->periph_openings) {
    375 		scsipi_printaddr(periph);
    376 		printf("WARNING: tag %d greater than available openings %d\n",
    377 		    tag, periph->periph_openings);
    378 	}
    379 
    380 	xs->xs_tag_id = tag;
    381 }
    382 
    383 /*
    384  * scsipi_put_tag:
    385  *
    386  *	Put the tag ID for the specified xfer back into the pool.
    387  *
    388  *	NOTE: Must be called with channel lock held
    389  */
    390 static void
    391 scsipi_put_tag(struct scsipi_xfer *xs)
    392 {
    393 	struct scsipi_periph *periph = xs->xs_periph;
    394 	int word, bit;
    395 
    396 	KASSERT(mutex_owned(chan_mtx(periph->periph_channel)));
    397 
    398 	word = xs->xs_tag_id >> 5;
    399 	bit = xs->xs_tag_id & 0x1f;
    400 
    401 	periph->periph_freetags[word] |= (1 << bit);
    402 }
    403 
    404 /*
    405  * scsipi_get_xs:
    406  *
    407  *	Allocate an xfer descriptor and associate it with the
    408  *	specified peripheral.  If the peripheral has no more
    409  *	available command openings, we either block waiting for
    410  *	one to become available, or fail.
    411  *
    412  *	When this routine is called with the channel lock held
    413  *	the flags must include XS_CTL_NOSLEEP.
    414  */
    415 struct scsipi_xfer *
    416 scsipi_get_xs(struct scsipi_periph *periph, int flags)
    417 {
    418 	struct scsipi_xfer *xs;
    419 	bool lock = (flags & XS_CTL_NOSLEEP) == 0;
    420 
    421 	SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_get_xs\n"));
    422 
    423 	KASSERT(!cold);
    424 
    425 #ifdef DIAGNOSTIC
    426 	/*
    427 	 * URGENT commands can never be ASYNC.
    428 	 */
    429 	if ((flags & (XS_CTL_URGENT|XS_CTL_ASYNC)) ==
    430 	    (XS_CTL_URGENT|XS_CTL_ASYNC)) {
    431 		scsipi_printaddr(periph);
    432 		printf("URGENT and ASYNC\n");
    433 		panic("scsipi_get_xs");
    434 	}
    435 #endif
    436 
    437 	/*
    438 	 * Wait for a command opening to become available.  Rules:
    439 	 *
    440 	 *	- All xfers must wait for an available opening.
    441 	 *	  Exception: URGENT xfers can proceed when
    442 	 *	  active == openings, because we use the opening
    443 	 *	  of the command we're recovering for.
    444 	 *	- if the periph has sense pending, only URGENT & REQSENSE
    445 	 *	  xfers may proceed.
    446 	 *
    447 	 *	- If the periph is recovering, only URGENT xfers may
    448 	 *	  proceed.
    449 	 *
    450 	 *	- If the periph is currently executing a recovery
    451 	 *	  command, URGENT commands must block, because only
    452 	 *	  one recovery command can execute at a time.
    453 	 */
    454 	if (lock)
    455 		mutex_enter(chan_mtx(periph->periph_channel));
    456 	for (;;) {
    457 		if (flags & XS_CTL_URGENT) {
    458 			if (periph->periph_active > periph->periph_openings)
    459 				goto wait_for_opening;
    460 			if (periph->periph_flags & PERIPH_SENSE) {
    461 				if ((flags & XS_CTL_REQSENSE) == 0)
    462 					goto wait_for_opening;
    463 			} else {
    464 				if ((periph->periph_flags &
    465 				    PERIPH_RECOVERY_ACTIVE) != 0)
    466 					goto wait_for_opening;
    467 				periph->periph_flags |= PERIPH_RECOVERY_ACTIVE;
    468 			}
    469 			break;
    470 		}
    471 		if (periph->periph_active >= periph->periph_openings ||
    472 		    (periph->periph_flags & PERIPH_RECOVERING) != 0)
    473 			goto wait_for_opening;
    474 		periph->periph_active++;
    475 		KASSERT(mutex_owned(chan_mtx(periph->periph_channel)));
    476 		break;
    477 
    478  wait_for_opening:
    479 		if (flags & XS_CTL_NOSLEEP) {
    480 			KASSERT(!lock);
    481 			return NULL;
    482 		}
    483 		KASSERT(lock);
    484 		SC_DEBUG(periph, SCSIPI_DB3, ("sleeping\n"));
    485 		periph->periph_flags |= PERIPH_WAITING;
    486 		cv_wait(periph_cv_periph(periph),
    487 		    chan_mtx(periph->periph_channel));
    488 	}
    489 	if (lock)
    490 		mutex_exit(chan_mtx(periph->periph_channel));
    491 
    492 	SC_DEBUG(periph, SCSIPI_DB3, ("calling pool_get\n"));
    493 	xs = pool_get(&scsipi_xfer_pool,
    494 	    ((flags & XS_CTL_NOSLEEP) != 0 ? PR_NOWAIT : PR_WAITOK));
    495 	if (xs == NULL) {
    496 		if (lock)
    497 			mutex_enter(chan_mtx(periph->periph_channel));
    498 		if (flags & XS_CTL_URGENT) {
    499 			if ((flags & XS_CTL_REQSENSE) == 0)
    500 				periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
    501 		} else
    502 			periph->periph_active--;
    503 		if (lock)
    504 			mutex_exit(chan_mtx(periph->periph_channel));
    505 		scsipi_printaddr(periph);
    506 		printf("unable to allocate %sscsipi_xfer\n",
    507 		    (flags & XS_CTL_URGENT) ? "URGENT " : "");
    508 	}
    509 
    510 	SC_DEBUG(periph, SCSIPI_DB3, ("returning\n"));
    511 
    512 	if (xs != NULL) {
    513 		memset(xs, 0, sizeof(*xs));
    514 		callout_init(&xs->xs_callout, 0);
    515 		xs->xs_periph = periph;
    516 		xs->xs_control = flags;
    517 		xs->xs_status = 0;
    518 		if ((flags & XS_CTL_NOSLEEP) == 0)
    519 			mutex_enter(chan_mtx(periph->periph_channel));
    520 		TAILQ_INSERT_TAIL(&periph->periph_xferq, xs, device_q);
    521 		KASSERT(mutex_owned(chan_mtx(periph->periph_channel)));
    522 		if ((flags & XS_CTL_NOSLEEP) == 0)
    523 			mutex_exit(chan_mtx(periph->periph_channel));
    524 	}
    525 	return xs;
    526 }
    527 
    528 /*
    529  * scsipi_put_xs:
    530  *
    531  *	Release an xfer descriptor, decreasing the outstanding command
    532  *	count for the peripheral.  If there is a thread waiting for
    533  *	an opening, wake it up.  If not, kick any queued I/O the
    534  *	peripheral may have.
    535  *
    536  *	NOTE: Must be called with channel lock held
    537  */
    538 void
    539 scsipi_put_xs(struct scsipi_xfer *xs)
    540 {
    541 	struct scsipi_periph *periph = xs->xs_periph;
    542 	int flags = xs->xs_control;
    543 
    544 	SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_free_xs\n"));
    545 	KASSERT(mutex_owned(chan_mtx(periph->periph_channel)));
    546 
    547 	TAILQ_REMOVE(&periph->periph_xferq, xs, device_q);
    548 	callout_destroy(&xs->xs_callout);
    549 	pool_put(&scsipi_xfer_pool, xs);
    550 
    551 #ifdef DIAGNOSTIC
    552 	if ((periph->periph_flags & PERIPH_RECOVERY_ACTIVE) != 0 &&
    553 	    periph->periph_active == 0) {
    554 		scsipi_printaddr(periph);
    555 		printf("recovery without a command to recovery for\n");
    556 		panic("scsipi_put_xs");
    557 	}
    558 #endif
    559 
    560 	if (flags & XS_CTL_URGENT) {
    561 		if ((flags & XS_CTL_REQSENSE) == 0)
    562 			periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
    563 	} else
    564 		periph->periph_active--;
    565 	if (periph->periph_active == 0 &&
    566 	    (periph->periph_flags & PERIPH_WAITDRAIN) != 0) {
    567 		periph->periph_flags &= ~PERIPH_WAITDRAIN;
    568 		cv_broadcast(periph_cv_active(periph));
    569 	}
    570 
    571 	if (periph->periph_flags & PERIPH_WAITING) {
    572 		periph->periph_flags &= ~PERIPH_WAITING;
    573 		cv_broadcast(periph_cv_periph(periph));
    574 	} else {
    575 		if (periph->periph_switch->psw_start != NULL &&
    576 		    device_is_active(periph->periph_dev)) {
    577 			SC_DEBUG(periph, SCSIPI_DB2,
    578 			    ("calling private start()\n"));
    579 			(*periph->periph_switch->psw_start)(periph);
    580 		}
    581 	}
    582 }
    583 
    584 /*
    585  * scsipi_channel_freeze:
    586  *
    587  *	Freeze a channel's xfer queue.
    588  */
    589 void
    590 scsipi_channel_freeze(struct scsipi_channel *chan, int count)
    591 {
    592 	bool lock = chan_running(chan) > 0;
    593 
    594 	if (lock)
    595 		mutex_enter(chan_mtx(chan));
    596 	chan->chan_qfreeze += count;
    597 	if (lock)
    598 		mutex_exit(chan_mtx(chan));
    599 }
    600 
    601 static void
    602 scsipi_channel_freeze_locked(struct scsipi_channel *chan, int count)
    603 {
    604 
    605 	chan->chan_qfreeze += count;
    606 }
    607 
    608 /*
    609  * scsipi_channel_thaw:
    610  *
    611  *	Thaw a channel's xfer queue.
    612  */
    613 void
    614 scsipi_channel_thaw(struct scsipi_channel *chan, int count)
    615 {
    616 	bool lock = chan_running(chan) > 0;
    617 
    618 	if (lock)
    619 		mutex_enter(chan_mtx(chan));
    620 	chan->chan_qfreeze -= count;
    621 	/*
    622 	 * Don't let the freeze count go negative.
    623 	 *
    624 	 * Presumably the adapter driver could keep track of this,
    625 	 * but it might just be easier to do this here so as to allow
    626 	 * multiple callers, including those outside the adapter driver.
    627 	 */
    628 	if (chan->chan_qfreeze < 0) {
    629 		chan->chan_qfreeze = 0;
    630 	}
    631 	if (lock)
    632 		mutex_exit(chan_mtx(chan));
    633 
    634 	/*
    635 	 * until the channel is running
    636 	 */
    637 	if (!lock)
    638 		return;
    639 
    640 	/*
    641 	 * Kick the channel's queue here.  Note, we may be running in
    642 	 * interrupt context (softclock or HBA's interrupt), so the adapter
    643 	 * driver had better not sleep.
    644 	 */
    645 	if (chan->chan_qfreeze == 0)
    646 		scsipi_run_queue(chan);
    647 }
    648 
    649 /*
    650  * scsipi_channel_timed_thaw:
    651  *
    652  *	Thaw a channel after some time has expired. This will also
    653  * 	run the channel's queue if the freeze count has reached 0.
    654  */
    655 void
    656 scsipi_channel_timed_thaw(void *arg)
    657 {
    658 	struct scsipi_channel *chan = arg;
    659 
    660 	scsipi_channel_thaw(chan, 1);
    661 }
    662 
    663 /*
    664  * scsipi_periph_freeze:
    665  *
    666  *	Freeze a device's xfer queue.
    667  */
    668 void
    669 scsipi_periph_freeze_locked(struct scsipi_periph *periph, int count)
    670 {
    671 
    672 	periph->periph_qfreeze += count;
    673 }
    674 
    675 /*
    676  * scsipi_periph_thaw:
    677  *
    678  *	Thaw a device's xfer queue.
    679  */
    680 void
    681 scsipi_periph_thaw_locked(struct scsipi_periph *periph, int count)
    682 {
    683 
    684 	periph->periph_qfreeze -= count;
    685 #ifdef DIAGNOSTIC
    686 	if (periph->periph_qfreeze < 0) {
    687 		static const char pc[] = "periph freeze count < 0";
    688 		scsipi_printaddr(periph);
    689 		printf("%s\n", pc);
    690 		panic(pc);
    691 	}
    692 #endif
    693 	if (periph->periph_qfreeze == 0 &&
    694 	    (periph->periph_flags & PERIPH_WAITING) != 0)
    695 		cv_broadcast(periph_cv_periph(periph));
    696 }
    697 
    698 void
    699 scsipi_periph_freeze(struct scsipi_periph *periph, int count)
    700 {
    701 
    702 	mutex_enter(chan_mtx(periph->periph_channel));
    703 	scsipi_periph_freeze_locked(periph, count);
    704 	mutex_exit(chan_mtx(periph->periph_channel));
    705 }
    706 
    707 void
    708 scsipi_periph_thaw(struct scsipi_periph *periph, int count)
    709 {
    710 
    711 	mutex_enter(chan_mtx(periph->periph_channel));
    712 	scsipi_periph_thaw_locked(periph, count);
    713 	mutex_exit(chan_mtx(periph->periph_channel));
    714 }
    715 
    716 /*
    717  * scsipi_periph_timed_thaw:
    718  *
    719  *	Thaw a device after some time has expired.
    720  */
    721 void
    722 scsipi_periph_timed_thaw(void *arg)
    723 {
    724 	struct scsipi_periph *periph = arg;
    725 	struct scsipi_channel *chan = periph->periph_channel;
    726 
    727 	callout_stop(&periph->periph_callout);
    728 
    729 	mutex_enter(chan_mtx(chan));
    730 	scsipi_periph_thaw_locked(periph, 1);
    731 	if ((periph->periph_channel->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
    732 		/*
    733 		 * Kick the channel's queue here.  Note, we're running in
    734 		 * interrupt context (softclock), so the adapter driver
    735 		 * had better not sleep.
    736 		 */
    737 		mutex_exit(chan_mtx(chan));
    738 		scsipi_run_queue(periph->periph_channel);
    739 	} else {
    740 		/*
    741 		 * Tell the completion thread to kick the channel's queue here.
    742 		 */
    743 		periph->periph_channel->chan_tflags |= SCSIPI_CHANT_KICK;
    744 		cv_broadcast(chan_cv_complete(chan));
    745 		mutex_exit(chan_mtx(chan));
    746 	}
    747 }
    748 
    749 /*
    750  * scsipi_wait_drain:
    751  *
    752  *	Wait for a periph's pending xfers to drain.
    753  */
    754 void
    755 scsipi_wait_drain(struct scsipi_periph *periph)
    756 {
    757 	struct scsipi_channel *chan = periph->periph_channel;
    758 
    759 	mutex_enter(chan_mtx(chan));
    760 	while (periph->periph_active != 0) {
    761 		periph->periph_flags |= PERIPH_WAITDRAIN;
    762 		cv_wait(periph_cv_active(periph), chan_mtx(chan));
    763 	}
    764 	mutex_exit(chan_mtx(chan));
    765 }
    766 
    767 /*
    768  * scsipi_kill_pending:
    769  *
    770  *	Kill off all pending xfers for a periph.
    771  *
    772  *	NOTE: Must be called with channel lock held
    773  */
    774 void
    775 scsipi_kill_pending(struct scsipi_periph *periph)
    776 {
    777 	struct scsipi_channel *chan = periph->periph_channel;
    778 
    779 	(*chan->chan_bustype->bustype_kill_pending)(periph);
    780 	while (periph->periph_active != 0) {
    781 		periph->periph_flags |= PERIPH_WAITDRAIN;
    782 		cv_wait(periph_cv_active(periph), chan_mtx(chan));
    783 	}
    784 }
    785 
    786 /*
    787  * scsipi_print_cdb:
    788  * prints a command descriptor block (for debug purpose, error messages,
    789  * SCSIVERBOSE, ...)
    790  */
    791 void
    792 scsipi_print_cdb(struct scsipi_generic *cmd)
    793 {
    794 	int i, j;
    795 
    796  	printf("0x%02x", cmd->opcode);
    797 
    798  	switch (CDB_GROUPID(cmd->opcode)) {
    799  	case CDB_GROUPID_0:
    800  		j = CDB_GROUP0;
    801  		break;
    802  	case CDB_GROUPID_1:
    803  		j = CDB_GROUP1;
    804  		break;
    805  	case CDB_GROUPID_2:
    806  		j = CDB_GROUP2;
    807  		break;
    808  	case CDB_GROUPID_3:
    809  		j = CDB_GROUP3;
    810  		break;
    811  	case CDB_GROUPID_4:
    812  		j = CDB_GROUP4;
    813  		break;
    814  	case CDB_GROUPID_5:
    815  		j = CDB_GROUP5;
    816  		break;
    817  	case CDB_GROUPID_6:
    818  		j = CDB_GROUP6;
    819  		break;
    820  	case CDB_GROUPID_7:
    821  		j = CDB_GROUP7;
    822  		break;
    823  	default:
    824  		j = 0;
    825  	}
    826  	if (j == 0)
    827  		j = sizeof (cmd->bytes);
    828  	for (i = 0; i < j-1; i++) /* already done the opcode */
    829  		printf(" %02x", cmd->bytes[i]);
    830 }
    831 
    832 /*
    833  * scsipi_interpret_sense:
    834  *
    835  *	Look at the returned sense and act on the error, determining
    836  *	the unix error number to pass back.  (0 = report no error)
    837  *
    838  *	NOTE: If we return ERESTART, we are expected to haved
    839  *	thawed the device!
    840  *
    841  *	THIS IS THE DEFAULT ERROR HANDLER FOR SCSI DEVICES.
    842  */
    843 int
    844 scsipi_interpret_sense(struct scsipi_xfer *xs)
    845 {
    846 	struct scsi_sense_data *sense;
    847 	struct scsipi_periph *periph = xs->xs_periph;
    848 	u_int8_t key;
    849 	int error;
    850 	u_int32_t info;
    851 	static const char *error_mes[] = {
    852 		"soft error (corrected)",
    853 		"not ready", "medium error",
    854 		"non-media hardware failure", "illegal request",
    855 		"unit attention", "readonly device",
    856 		"no data found", "vendor unique",
    857 		"copy aborted", "command aborted",
    858 		"search returned equal", "volume overflow",
    859 		"verify miscompare", "unknown error key"
    860 	};
    861 
    862 	sense = &xs->sense.scsi_sense;
    863 #ifdef SCSIPI_DEBUG
    864 	if (periph->periph_flags & SCSIPI_DB1) {
    865 	        int count, len;
    866 		scsipi_printaddr(periph);
    867 		printf(" sense debug information:\n");
    868 		printf("\tcode 0x%x valid %d\n",
    869 			SSD_RCODE(sense->response_code),
    870 			sense->response_code & SSD_RCODE_VALID ? 1 : 0);
    871 		printf("\tseg 0x%x key 0x%x ili 0x%x eom 0x%x fmark 0x%x\n",
    872 			sense->segment,
    873 			SSD_SENSE_KEY(sense->flags),
    874 			sense->flags & SSD_ILI ? 1 : 0,
    875 			sense->flags & SSD_EOM ? 1 : 0,
    876 			sense->flags & SSD_FILEMARK ? 1 : 0);
    877 		printf("\ninfo: 0x%x 0x%x 0x%x 0x%x followed by %d "
    878 			"extra bytes\n",
    879 			sense->info[0],
    880 			sense->info[1],
    881 			sense->info[2],
    882 			sense->info[3],
    883 			sense->extra_len);
    884 		len = SSD_ADD_BYTES_LIM(sense);
    885 		printf("\textra (up to %d bytes): ", len);
    886 		for (count = 0; count < len; count++)
    887 			printf("0x%x ", sense->csi[count]);
    888 		printf("\n");
    889 	}
    890 #endif
    891 
    892 	/*
    893 	 * If the periph has its own error handler, call it first.
    894 	 * If it returns a legit error value, return that, otherwise
    895 	 * it wants us to continue with normal error processing.
    896 	 */
    897 	if (periph->periph_switch->psw_error != NULL) {
    898 		SC_DEBUG(periph, SCSIPI_DB2,
    899 		    ("calling private err_handler()\n"));
    900 		error = (*periph->periph_switch->psw_error)(xs);
    901 		if (error != EJUSTRETURN)
    902 			return error;
    903 	}
    904 	/* otherwise use the default */
    905 	switch (SSD_RCODE(sense->response_code)) {
    906 
    907 		/*
    908 		 * Old SCSI-1 and SASI devices respond with
    909 		 * codes other than 70.
    910 		 */
    911 	case 0x00:		/* no error (command completed OK) */
    912 		return 0;
    913 	case 0x04:		/* drive not ready after it was selected */
    914 		if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
    915 			periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
    916 		if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
    917 			return 0;
    918 		/* XXX - display some sort of error here? */
    919 		return EIO;
    920 	case 0x20:		/* invalid command */
    921 		if ((xs->xs_control &
    922 		     XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
    923 			return 0;
    924 		return EINVAL;
    925 	case 0x25:		/* invalid LUN (Adaptec ACB-4000) */
    926 		return EACCES;
    927 
    928 		/*
    929 		 * If it's code 70, use the extended stuff and
    930 		 * interpret the key
    931 		 */
    932 	case 0x71:		/* delayed error */
    933 		scsipi_printaddr(periph);
    934 		key = SSD_SENSE_KEY(sense->flags);
    935 		printf(" DEFERRED ERROR, key = 0x%x\n", key);
    936 		/* FALLTHROUGH */
    937 	case 0x70:
    938 		if ((sense->response_code & SSD_RCODE_VALID) != 0)
    939 			info = _4btol(sense->info);
    940 		else
    941 			info = 0;
    942 		key = SSD_SENSE_KEY(sense->flags);
    943 
    944 		switch (key) {
    945 		case SKEY_NO_SENSE:
    946 		case SKEY_RECOVERED_ERROR:
    947 			if (xs->resid == xs->datalen && xs->datalen) {
    948 				/*
    949 				 * Why is this here?
    950 				 */
    951 				xs->resid = 0;	/* not short read */
    952 			}
    953 			error = 0;
    954 			break;
    955 		case SKEY_EQUAL:
    956 			error = 0;
    957 			break;
    958 		case SKEY_NOT_READY:
    959 			if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
    960 				periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
    961 			if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
    962 				return 0;
    963 			if (sense->asc == 0x3A) {
    964 				error = ENODEV; /* Medium not present */
    965 				if (xs->xs_control & XS_CTL_SILENT_NODEV)
    966 					return error;
    967 			} else
    968 				error = EIO;
    969 			if ((xs->xs_control & XS_CTL_SILENT) != 0)
    970 				return error;
    971 			break;
    972 		case SKEY_ILLEGAL_REQUEST:
    973 			if ((xs->xs_control &
    974 			     XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
    975 				return 0;
    976 			/*
    977 			 * Handle the case where a device reports
    978 			 * Logical Unit Not Supported during discovery.
    979 			 */
    980 			if ((xs->xs_control & XS_CTL_DISCOVERY) != 0 &&
    981 			    sense->asc == 0x25 &&
    982 			    sense->ascq == 0x00)
    983 				return EINVAL;
    984 			if ((xs->xs_control & XS_CTL_SILENT) != 0)
    985 				return EIO;
    986 			error = EINVAL;
    987 			break;
    988 		case SKEY_UNIT_ATTENTION:
    989 			if (sense->asc == 0x29 &&
    990 			    sense->ascq == 0x00) {
    991 				/* device or bus reset */
    992 				return ERESTART;
    993 			}
    994 			if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
    995 				periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
    996 			if ((xs->xs_control &
    997 			     XS_CTL_IGNORE_MEDIA_CHANGE) != 0 ||
    998 				/* XXX Should reupload any transient state. */
    999 				(periph->periph_flags &
   1000 				 PERIPH_REMOVABLE) == 0) {
   1001 				return ERESTART;
   1002 			}
   1003 			if ((xs->xs_control & XS_CTL_SILENT) != 0)
   1004 				return EIO;
   1005 			error = EIO;
   1006 			break;
   1007 		case SKEY_DATA_PROTECT:
   1008 			error = EROFS;
   1009 			break;
   1010 		case SKEY_BLANK_CHECK:
   1011 			error = 0;
   1012 			break;
   1013 		case SKEY_ABORTED_COMMAND:
   1014 			if (xs->xs_retries != 0) {
   1015 				xs->xs_retries--;
   1016 				error = ERESTART;
   1017 			} else
   1018 				error = EIO;
   1019 			break;
   1020 		case SKEY_VOLUME_OVERFLOW:
   1021 			error = ENOSPC;
   1022 			break;
   1023 		default:
   1024 			error = EIO;
   1025 			break;
   1026 		}
   1027 
   1028 		/* Print verbose decode if appropriate and possible */
   1029 		if ((key == 0) ||
   1030 		    ((xs->xs_control & XS_CTL_SILENT) != 0) ||
   1031 		    (scsipi_print_sense(xs, 0) != 0))
   1032 			return error;
   1033 
   1034 		/* Print brief(er) sense information */
   1035 		scsipi_printaddr(periph);
   1036 		printf("%s", error_mes[key - 1]);
   1037 		if ((sense->response_code & SSD_RCODE_VALID) != 0) {
   1038 			switch (key) {
   1039 			case SKEY_NOT_READY:
   1040 			case SKEY_ILLEGAL_REQUEST:
   1041 			case SKEY_UNIT_ATTENTION:
   1042 			case SKEY_DATA_PROTECT:
   1043 				break;
   1044 			case SKEY_BLANK_CHECK:
   1045 				printf(", requested size: %d (decimal)",
   1046 				    info);
   1047 				break;
   1048 			case SKEY_ABORTED_COMMAND:
   1049 				if (xs->xs_retries)
   1050 					printf(", retrying");
   1051 				printf(", cmd 0x%x, info 0x%x",
   1052 				    xs->cmd->opcode, info);
   1053 				break;
   1054 			default:
   1055 				printf(", info = %d (decimal)", info);
   1056 			}
   1057 		}
   1058 		if (sense->extra_len != 0) {
   1059 			int n;
   1060 			printf(", data =");
   1061 			for (n = 0; n < sense->extra_len; n++)
   1062 				printf(" %02x",
   1063 				    sense->csi[n]);
   1064 		}
   1065 		printf("\n");
   1066 		return error;
   1067 
   1068 	/*
   1069 	 * Some other code, just report it
   1070 	 */
   1071 	default:
   1072 #if    defined(SCSIDEBUG) || defined(DEBUG)
   1073 	{
   1074 		static const char *uc = "undecodable sense error";
   1075 		int i;
   1076 		u_int8_t *cptr = (u_int8_t *) sense;
   1077 		scsipi_printaddr(periph);
   1078 		if (xs->cmd == &xs->cmdstore) {
   1079 			printf("%s for opcode 0x%x, data=",
   1080 			    uc, xs->cmdstore.opcode);
   1081 		} else {
   1082 			printf("%s, data=", uc);
   1083 		}
   1084 		for (i = 0; i < sizeof (sense); i++)
   1085 			printf(" 0x%02x", *(cptr++) & 0xff);
   1086 		printf("\n");
   1087 	}
   1088 #else
   1089 		scsipi_printaddr(periph);
   1090 		printf("Sense Error Code 0x%x",
   1091 			SSD_RCODE(sense->response_code));
   1092 		if ((sense->response_code & SSD_RCODE_VALID) != 0) {
   1093 			struct scsi_sense_data_unextended *usense =
   1094 			    (struct scsi_sense_data_unextended *)sense;
   1095 			printf(" at block no. %d (decimal)",
   1096 			    _3btol(usense->block));
   1097 		}
   1098 		printf("\n");
   1099 #endif
   1100 		return EIO;
   1101 	}
   1102 }
   1103 
   1104 /*
   1105  * scsipi_test_unit_ready:
   1106  *
   1107  *	Issue a `test unit ready' request.
   1108  */
   1109 int
   1110 scsipi_test_unit_ready(struct scsipi_periph *periph, int flags)
   1111 {
   1112 	struct scsi_test_unit_ready cmd;
   1113 	int retries;
   1114 
   1115 	/* some ATAPI drives don't support TEST UNIT READY. Sigh */
   1116 	if (periph->periph_quirks & PQUIRK_NOTUR)
   1117 		return 0;
   1118 
   1119 	if (flags & XS_CTL_DISCOVERY)
   1120 		retries = 0;
   1121 	else
   1122 		retries = SCSIPIRETRIES;
   1123 
   1124 	memset(&cmd, 0, sizeof(cmd));
   1125 	cmd.opcode = SCSI_TEST_UNIT_READY;
   1126 
   1127 	return scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0,
   1128 	    retries, 10000, NULL, flags);
   1129 }
   1130 
   1131 static const struct scsipi_inquiry3_pattern {
   1132 	const char vendor[8];
   1133 	const char product[16];
   1134 	const char revision[4];
   1135 } scsipi_inquiry3_quirk[] = {
   1136 	{ "ES-6600 ", "", "" },
   1137 };
   1138 
   1139 static int
   1140 scsipi_inquiry3_ok(const struct scsipi_inquiry_data *ib)
   1141 {
   1142 	for (size_t i = 0; i < __arraycount(scsipi_inquiry3_quirk); i++) {
   1143 		const struct scsipi_inquiry3_pattern *q =
   1144 		    &scsipi_inquiry3_quirk[i];
   1145 #define MATCH(field) \
   1146     (q->field[0] ? memcmp(ib->field, q->field, sizeof(ib->field)) == 0 : 1)
   1147 		if (MATCH(vendor) && MATCH(product) && MATCH(revision))
   1148 			return 0;
   1149 	}
   1150 	return 1;
   1151 }
   1152 
   1153 /*
   1154  * scsipi_inquire:
   1155  *
   1156  *	Ask the device about itself.
   1157  */
   1158 int
   1159 scsipi_inquire(struct scsipi_periph *periph, struct scsipi_inquiry_data *inqbuf,
   1160     int flags)
   1161 {
   1162 	struct scsipi_inquiry cmd;
   1163 	int error;
   1164 	int retries;
   1165 
   1166 	if (flags & XS_CTL_DISCOVERY)
   1167 		retries = 0;
   1168 	else
   1169 		retries = SCSIPIRETRIES;
   1170 
   1171 	/*
   1172 	 * If we request more data than the device can provide, it SHOULD just
   1173 	 * return a short response.  However, some devices error with an
   1174 	 * ILLEGAL REQUEST sense code, and yet others have even more special
   1175 	 * failture modes (such as the GL641USB flash adapter, which goes loony
   1176 	 * and sends corrupted CRCs).  To work around this, and to bring our
   1177 	 * behavior more in line with other OSes, we do a shorter inquiry,
   1178 	 * covering all the SCSI-2 information, first, and then request more
   1179 	 * data iff the "additional length" field indicates there is more.
   1180 	 * - mycroft, 2003/10/16
   1181 	 */
   1182 	memset(&cmd, 0, sizeof(cmd));
   1183 	cmd.opcode = INQUIRY;
   1184 	cmd.length = SCSIPI_INQUIRY_LENGTH_SCSI2;
   1185 	error = scsipi_command(periph, (void *)&cmd, sizeof(cmd),
   1186 	    (void *)inqbuf, SCSIPI_INQUIRY_LENGTH_SCSI2, retries,
   1187 	    10000, NULL, flags | XS_CTL_DATA_IN);
   1188 	if (!error &&
   1189 	    inqbuf->additional_length > SCSIPI_INQUIRY_LENGTH_SCSI2 - 4) {
   1190 	    if (scsipi_inquiry3_ok(inqbuf)) {
   1191 #if 0
   1192 printf("inquire: addlen=%d, retrying\n", inqbuf->additional_length);
   1193 #endif
   1194 		cmd.length = SCSIPI_INQUIRY_LENGTH_SCSI3;
   1195 		error = scsipi_command(periph, (void *)&cmd, sizeof(cmd),
   1196 		    (void *)inqbuf, SCSIPI_INQUIRY_LENGTH_SCSI3, retries,
   1197 		    10000, NULL, flags | XS_CTL_DATA_IN);
   1198 #if 0
   1199 printf("inquire: error=%d\n", error);
   1200 #endif
   1201 	    }
   1202 	}
   1203 
   1204 #ifdef SCSI_OLD_NOINQUIRY
   1205 	/*
   1206 	 * Kludge for the Adaptec ACB-4000 SCSI->MFM translator.
   1207 	 * This board doesn't support the INQUIRY command at all.
   1208 	 */
   1209 	if (error == EINVAL || error == EACCES) {
   1210 		/*
   1211 		 * Conjure up an INQUIRY response.
   1212 		 */
   1213 		inqbuf->device = (error == EINVAL ?
   1214 			 SID_QUAL_LU_PRESENT :
   1215 			 SID_QUAL_LU_NOTPRESENT) | T_DIRECT;
   1216 		inqbuf->dev_qual2 = 0;
   1217 		inqbuf->version = 0;
   1218 		inqbuf->response_format = SID_FORMAT_SCSI1;
   1219 		inqbuf->additional_length = SCSIPI_INQUIRY_LENGTH_SCSI2 - 4;
   1220 		inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0;
   1221 		memcpy(inqbuf->vendor, "ADAPTEC ACB-4000            ", 28);
   1222 		error = 0;
   1223 	}
   1224 
   1225 	/*
   1226 	 * Kludge for the Emulex MT-02 SCSI->QIC translator.
   1227 	 * This board gives an empty response to an INQUIRY command.
   1228 	 */
   1229 	else if (error == 0 &&
   1230 	    inqbuf->device == (SID_QUAL_LU_PRESENT | T_DIRECT) &&
   1231 	    inqbuf->dev_qual2 == 0 &&
   1232 	    inqbuf->version == 0 &&
   1233 	    inqbuf->response_format == SID_FORMAT_SCSI1) {
   1234 		/*
   1235 		 * Fill out the INQUIRY response.
   1236 		 */
   1237 		inqbuf->device = (SID_QUAL_LU_PRESENT | T_SEQUENTIAL);
   1238 		inqbuf->dev_qual2 = SID_REMOVABLE;
   1239 		inqbuf->additional_length = SCSIPI_INQUIRY_LENGTH_SCSI2 - 4;
   1240 		inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0;
   1241 		memcpy(inqbuf->vendor, "EMULEX  MT-02 QIC           ", 28);
   1242 	}
   1243 #endif /* SCSI_OLD_NOINQUIRY */
   1244 
   1245 	return error;
   1246 }
   1247 
   1248 /*
   1249  * scsipi_prevent:
   1250  *
   1251  *	Prevent or allow the user to remove the media
   1252  */
   1253 int
   1254 scsipi_prevent(struct scsipi_periph *periph, int type, int flags)
   1255 {
   1256 	struct scsi_prevent_allow_medium_removal cmd;
   1257 
   1258 	if (periph->periph_quirks & PQUIRK_NODOORLOCK)
   1259 		return 0;
   1260 
   1261 	memset(&cmd, 0, sizeof(cmd));
   1262 	cmd.opcode = SCSI_PREVENT_ALLOW_MEDIUM_REMOVAL;
   1263 	cmd.how = type;
   1264 
   1265 	return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0,
   1266 	    SCSIPIRETRIES, 5000, NULL, flags));
   1267 }
   1268 
   1269 /*
   1270  * scsipi_start:
   1271  *
   1272  *	Send a START UNIT.
   1273  */
   1274 int
   1275 scsipi_start(struct scsipi_periph *periph, int type, int flags)
   1276 {
   1277 	struct scsipi_start_stop cmd;
   1278 
   1279 	memset(&cmd, 0, sizeof(cmd));
   1280 	cmd.opcode = START_STOP;
   1281 	cmd.byte2 = 0x00;
   1282 	cmd.how = type;
   1283 
   1284 	return scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0,
   1285 	    SCSIPIRETRIES, (type & SSS_START) ? 60000 : 10000, NULL, flags);
   1286 }
   1287 
   1288 /*
   1289  * scsipi_mode_sense, scsipi_mode_sense_big:
   1290  *	get a sense page from a device
   1291  */
   1292 
   1293 int
   1294 scsipi_mode_sense(struct scsipi_periph *periph, int byte2, int page,
   1295     struct scsi_mode_parameter_header_6 *data, int len, int flags, int retries,
   1296     int timeout)
   1297 {
   1298 	struct scsi_mode_sense_6 cmd;
   1299 
   1300 	memset(&cmd, 0, sizeof(cmd));
   1301 	cmd.opcode = SCSI_MODE_SENSE_6;
   1302 	cmd.byte2 = byte2;
   1303 	cmd.page = page;
   1304 	cmd.length = len & 0xff;
   1305 
   1306 	return scsipi_command(periph, (void *)&cmd, sizeof(cmd),
   1307 	    (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_IN);
   1308 }
   1309 
   1310 int
   1311 scsipi_mode_sense_big(struct scsipi_periph *periph, int byte2, int page,
   1312     struct scsi_mode_parameter_header_10 *data, int len, int flags, int retries,
   1313     int timeout)
   1314 {
   1315 	struct scsi_mode_sense_10 cmd;
   1316 
   1317 	memset(&cmd, 0, sizeof(cmd));
   1318 	cmd.opcode = SCSI_MODE_SENSE_10;
   1319 	cmd.byte2 = byte2;
   1320 	cmd.page = page;
   1321 	_lto2b(len, cmd.length);
   1322 
   1323 	return scsipi_command(periph, (void *)&cmd, sizeof(cmd),
   1324 	    (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_IN);
   1325 }
   1326 
   1327 int
   1328 scsipi_mode_select(struct scsipi_periph *periph, int byte2,
   1329     struct scsi_mode_parameter_header_6 *data, int len, int flags, int retries,
   1330     int timeout)
   1331 {
   1332 	struct scsi_mode_select_6 cmd;
   1333 
   1334 	memset(&cmd, 0, sizeof(cmd));
   1335 	cmd.opcode = SCSI_MODE_SELECT_6;
   1336 	cmd.byte2 = byte2;
   1337 	cmd.length = len & 0xff;
   1338 
   1339 	return scsipi_command(periph, (void *)&cmd, sizeof(cmd),
   1340 	    (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_OUT);
   1341 }
   1342 
   1343 int
   1344 scsipi_mode_select_big(struct scsipi_periph *periph, int byte2,
   1345     struct scsi_mode_parameter_header_10 *data, int len, int flags, int retries,
   1346     int timeout)
   1347 {
   1348 	struct scsi_mode_select_10 cmd;
   1349 
   1350 	memset(&cmd, 0, sizeof(cmd));
   1351 	cmd.opcode = SCSI_MODE_SELECT_10;
   1352 	cmd.byte2 = byte2;
   1353 	_lto2b(len, cmd.length);
   1354 
   1355 	return scsipi_command(periph, (void *)&cmd, sizeof(cmd),
   1356 	    (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_OUT);
   1357 }
   1358 
   1359 /*
   1360  * scsipi_get_opcodeinfo:
   1361  *
   1362  * query the device for supported commends and their timeout
   1363  * building a timeout lookup table if timeout information is available.
   1364  */
   1365 void
   1366 scsipi_get_opcodeinfo(struct scsipi_periph *periph)
   1367 {
   1368 	u_int8_t *data;
   1369 	int len = 16*1024;
   1370 	int rc;
   1371 	struct scsi_repsuppopcode cmd;
   1372 
   1373 	/* refrain from asking for supported opcodes */
   1374 	if (periph->periph_quirks & PQUIRK_NOREPSUPPOPC ||
   1375 	    periph->periph_type == T_PROCESSOR || /* spec. */
   1376 	    periph->periph_type == T_CDROM) /* spec. */
   1377 		return;
   1378 
   1379 	scsipi_free_opcodeinfo(periph);
   1380 
   1381 	/*
   1382 	 * query REPORT SUPPORTED OPERATION CODES
   1383 	 * if OK
   1384 	 *   enumerate all codes
   1385 	 *     if timeout exists insert maximum into opcode table
   1386 	 */
   1387 
   1388 	data = malloc(len, M_DEVBUF, M_NOWAIT|M_ZERO);
   1389 	if (data == NULL) {
   1390 		SC_DEBUG(periph, SCSIPI_DB3,
   1391 			 ("unable to allocate data buffer "
   1392 			  "for REPORT SUPPORTED OPERATION CODES\n"));
   1393 		return;
   1394 	}
   1395 
   1396 	memset(&cmd, 0, sizeof(cmd));
   1397 
   1398 	cmd.opcode = SCSI_MAINTENANCE_IN;
   1399 	cmd.svcaction = RSOC_REPORT_SUPPORTED_OPCODES;
   1400 	cmd.repoption = RSOC_RCTD|RSOC_ALL;
   1401 	_lto4b(len, cmd.alloclen);
   1402 
   1403 	rc = scsipi_command(periph, (void *)&cmd, sizeof(cmd),
   1404 			    (void *)data, len, 0, 1000, NULL,
   1405 			    XS_CTL_DATA_IN|XS_CTL_SILENT);
   1406 
   1407 	if (rc == 0) {
   1408 		int count;
   1409                 int dlen = _4btol(data);
   1410                 u_int8_t *c = data + 4;
   1411 
   1412 		SC_DEBUG(periph, SCSIPI_DB3,
   1413 			 ("supported opcode timeout-values loaded\n"));
   1414 		SC_DEBUG(periph, SCSIPI_DB3,
   1415 			 ("CMD  LEN  SA    spec  nom. time  cmd timeout\n"));
   1416 
   1417 		struct scsipi_opcodes *tot =
   1418 		  (struct scsipi_opcodes *)malloc(sizeof(struct scsipi_opcodes),
   1419 						  M_DEVBUF, M_NOWAIT|M_ZERO);
   1420 
   1421 		count = 0;
   1422                 while (tot != NULL &&
   1423 		       dlen >= (int)sizeof(struct scsi_repsupopcode_all_commands_descriptor)) {
   1424                         struct scsi_repsupopcode_all_commands_descriptor *acd
   1425 				= (struct scsi_repsupopcode_all_commands_descriptor *)c;
   1426 #ifdef SCSIPI_DEBUG
   1427                         int cdblen = _2btol((const u_int8_t *)&acd->cdblen);
   1428 #endif
   1429                         dlen -= sizeof(struct scsi_repsupopcode_all_commands_descriptor);
   1430                         c += sizeof(struct scsi_repsupopcode_all_commands_descriptor);
   1431                         SC_DEBUG(periph, SCSIPI_DB3,
   1432 				 ("0x%02x(%2d) ", acd->opcode, cdblen));
   1433 
   1434 			tot->opcode_info[acd->opcode].ti_flags = SCSIPI_TI_VALID;
   1435 
   1436                         if (acd->flags & RSOC_ACD_SERVACTV) {
   1437                                 SC_DEBUGN(periph, SCSIPI_DB3,
   1438 					 ("0x%02x%02x ",
   1439 					  acd->serviceaction[0],
   1440 					  acd->serviceaction[1]));
   1441                         } else {
   1442 				SC_DEBUGN(periph, SCSIPI_DB3, ("       "));
   1443                         }
   1444 
   1445                         if (acd->flags & RSOC_ACD_CTDP
   1446 			    && dlen >= (int)sizeof(struct scsi_repsupopcode_timeouts_descriptor)) {
   1447                                 struct scsi_repsupopcode_timeouts_descriptor *td
   1448 					= (struct scsi_repsupopcode_timeouts_descriptor *)c;
   1449                                 long nomto = _4btol(td->nom_process_timeout);
   1450                                 long cmdto = _4btol(td->cmd_process_timeout);
   1451 				long t = (cmdto > nomto) ? cmdto : nomto;
   1452 
   1453                                 dlen -= sizeof(struct scsi_repsupopcode_timeouts_descriptor);
   1454                                 c += sizeof(struct scsi_repsupopcode_timeouts_descriptor);
   1455 
   1456                                 SC_DEBUGN(periph, SCSIPI_DB3,
   1457 					  ("0x%02x %10ld %10ld",
   1458 					   td->cmd_specific,
   1459 					   nomto, cmdto));
   1460 
   1461 				if (t > tot->opcode_info[acd->opcode].ti_timeout) {
   1462 					tot->opcode_info[acd->opcode].ti_timeout = t;
   1463 					++count;
   1464 				}
   1465                         }
   1466                         SC_DEBUGN(periph, SCSIPI_DB3,("\n"));
   1467                 }
   1468 
   1469 		if (count > 0) {
   1470 			periph->periph_opcs = tot;
   1471 		} else {
   1472 			free(tot, M_DEVBUF);
   1473 			SC_DEBUG(periph, SCSIPI_DB3,
   1474 			 	("no usable timeout values available\n"));
   1475 		}
   1476 	} else {
   1477 		SC_DEBUG(periph, SCSIPI_DB3,
   1478 			 ("SCSI_MAINTENANCE_IN"
   1479 			  "[RSOC_REPORT_SUPPORTED_OPCODES] failed error=%d"
   1480 			  " - no device provided timeout "
   1481 			  "values available\n", rc));
   1482 	}
   1483 
   1484 	free(data, M_DEVBUF);
   1485 }
   1486 
   1487 /*
   1488  * scsipi_update_timeouts:
   1489  * 	Overide timeout value if device/config provided
   1490  *      timeouts are available.
   1491  */
   1492 static void
   1493 scsipi_update_timeouts(struct scsipi_xfer *xs)
   1494 {
   1495 	struct scsipi_opcodes *opcs;
   1496 	u_int8_t cmd;
   1497 	int timeout;
   1498 	struct scsipi_opinfo *oi;
   1499 
   1500 	if (xs->timeout <= 0) {
   1501 		return;
   1502 	}
   1503 
   1504 	opcs = xs->xs_periph->periph_opcs;
   1505 
   1506 	if (opcs == NULL) {
   1507 		return;
   1508 	}
   1509 
   1510 	cmd = xs->cmd->opcode;
   1511 	oi = &opcs->opcode_info[cmd];
   1512 
   1513 	timeout = 1000 * (int)oi->ti_timeout;
   1514 
   1515 
   1516 	if (timeout > xs->timeout && timeout < 86400000) {
   1517 		/*
   1518 		 * pick up device configured timeouts if they
   1519 		 * are longer than the requested ones but less
   1520 		 * than a day
   1521 		 */
   1522 #ifdef SCSIPI_DEBUG
   1523 		if ((oi->ti_flags & SCSIPI_TI_LOGGED) == 0) {
   1524 			SC_DEBUG(xs->xs_periph, SCSIPI_DB3,
   1525 				 ("Overriding command 0x%02x "
   1526 				  "timeout of %d with %d ms\n",
   1527 				  cmd, xs->timeout, timeout));
   1528 			oi->ti_flags |= SCSIPI_TI_LOGGED;
   1529 		}
   1530 #endif
   1531 		xs->timeout = timeout;
   1532 	}
   1533 }
   1534 
   1535 /*
   1536  * scsipi_free_opcodeinfo:
   1537  *
   1538  * free the opcode information table
   1539  */
   1540 void
   1541 scsipi_free_opcodeinfo(struct scsipi_periph *periph)
   1542 {
   1543 	if (periph->periph_opcs != NULL) {
   1544 		free(periph->periph_opcs, M_DEVBUF);
   1545 	}
   1546 
   1547 	periph->periph_opcs = NULL;
   1548 }
   1549 
   1550 /*
   1551  * scsipi_done:
   1552  *
   1553  *	This routine is called by an adapter's interrupt handler when
   1554  *	an xfer is completed.
   1555  */
   1556 void
   1557 scsipi_done(struct scsipi_xfer *xs)
   1558 {
   1559 	struct scsipi_periph *periph = xs->xs_periph;
   1560 	struct scsipi_channel *chan = periph->periph_channel;
   1561 	int freezecnt;
   1562 
   1563 	SC_DEBUG(periph, SCSIPI_DB2, ("scsipi_done\n"));
   1564 #ifdef SCSIPI_DEBUG
   1565 	if (periph->periph_dbflags & SCSIPI_DB1)
   1566 		show_scsipi_cmd(xs);
   1567 #endif
   1568 
   1569 	mutex_enter(chan_mtx(chan));
   1570 	/*
   1571 	 * The resource this command was using is now free.
   1572 	 */
   1573 	if (xs->xs_status & XS_STS_DONE) {
   1574 		/* XXX in certain circumstances, such as a device
   1575 		 * being detached, a xs that has already been
   1576 		 * scsipi_done()'d by the main thread will be done'd
   1577 		 * again by scsibusdetach(). Putting the xs on the
   1578 		 * chan_complete queue causes list corruption and
   1579 		 * everyone dies. This prevents that, but perhaps
   1580 		 * there should be better coordination somewhere such
   1581 		 * that this won't ever happen (and can be turned into
   1582 		 * a KASSERT().
   1583 		 */
   1584 		mutex_exit(chan_mtx(chan));
   1585 		goto out;
   1586 	}
   1587 	scsipi_put_resource(chan);
   1588 	xs->xs_periph->periph_sent--;
   1589 
   1590 	/*
   1591 	 * If the command was tagged, free the tag.
   1592 	 */
   1593 	if (XS_CTL_TAGTYPE(xs) != 0)
   1594 		scsipi_put_tag(xs);
   1595 	else
   1596 		periph->periph_flags &= ~PERIPH_UNTAG;
   1597 
   1598 	/* Mark the command as `done'. */
   1599 	xs->xs_status |= XS_STS_DONE;
   1600 
   1601 #ifdef DIAGNOSTIC
   1602 	if ((xs->xs_control & (XS_CTL_ASYNC|XS_CTL_POLL)) ==
   1603 	    (XS_CTL_ASYNC|XS_CTL_POLL))
   1604 		panic("scsipi_done: ASYNC and POLL");
   1605 #endif
   1606 
   1607 	/*
   1608 	 * If the xfer had an error of any sort, freeze the
   1609 	 * periph's queue.  Freeze it again if we were requested
   1610 	 * to do so in the xfer.
   1611 	 */
   1612 	freezecnt = 0;
   1613 	if (xs->error != XS_NOERROR)
   1614 		freezecnt++;
   1615 	if (xs->xs_control & XS_CTL_FREEZE_PERIPH)
   1616 		freezecnt++;
   1617 	if (freezecnt != 0)
   1618 		scsipi_periph_freeze_locked(periph, freezecnt);
   1619 
   1620 	/*
   1621 	 * record the xfer with a pending sense, in case a SCSI reset is
   1622 	 * received before the thread is waked up.
   1623 	 */
   1624 	if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
   1625 		periph->periph_flags |= PERIPH_SENSE;
   1626 		periph->periph_xscheck = xs;
   1627 	}
   1628 
   1629 	/*
   1630 	 * If this was an xfer that was not to complete asynchronously,
   1631 	 * let the requesting thread perform error checking/handling
   1632 	 * in its context.
   1633 	 */
   1634 	if ((xs->xs_control & XS_CTL_ASYNC) == 0) {
   1635 		/*
   1636 		 * If it's a polling job, just return, to unwind the
   1637 		 * call graph.  We don't need to restart the queue,
   1638 		 * because pollings jobs are treated specially, and
   1639 		 * are really only used during crash dumps anyway
   1640 		 * (XXX or during boot-time autconfiguration of
   1641 		 * ATAPI devices).
   1642 		 */
   1643 		if (xs->xs_control & XS_CTL_POLL) {
   1644 			mutex_exit(chan_mtx(chan));
   1645 			return;
   1646 		}
   1647 		cv_broadcast(xs_cv(xs));
   1648 		mutex_exit(chan_mtx(chan));
   1649 		goto out;
   1650 	}
   1651 
   1652 	/*
   1653 	 * Catch the extremely common case of I/O completing
   1654 	 * without error; no use in taking a context switch
   1655 	 * if we can handle it in interrupt context.
   1656 	 */
   1657 	if (xs->error == XS_NOERROR) {
   1658 		mutex_exit(chan_mtx(chan));
   1659 		(void) scsipi_complete(xs);
   1660 		goto out;
   1661 	}
   1662 
   1663 	/*
   1664 	 * There is an error on this xfer.  Put it on the channel's
   1665 	 * completion queue, and wake up the completion thread.
   1666 	 */
   1667 	TAILQ_INSERT_TAIL(&chan->chan_complete, xs, channel_q);
   1668 	cv_broadcast(chan_cv_complete(chan));
   1669 	mutex_exit(chan_mtx(chan));
   1670 
   1671  out:
   1672 	/*
   1673 	 * If there are more xfers on the channel's queue, attempt to
   1674 	 * run them.
   1675 	 */
   1676 	scsipi_run_queue(chan);
   1677 }
   1678 
   1679 /*
   1680  * scsipi_complete:
   1681  *
   1682  *	Completion of a scsipi_xfer.  This is the guts of scsipi_done().
   1683  *
   1684  *	NOTE: This routine MUST be called with valid thread context
   1685  *	except for the case where the following two conditions are
   1686  *	true:
   1687  *
   1688  *		xs->error == XS_NOERROR
   1689  *		XS_CTL_ASYNC is set in xs->xs_control
   1690  *
   1691  *	The semantics of this routine can be tricky, so here is an
   1692  *	explanation:
   1693  *
   1694  *		0		Xfer completed successfully.
   1695  *
   1696  *		ERESTART	Xfer had an error, but was restarted.
   1697  *
   1698  *		anything else	Xfer had an error, return value is Unix
   1699  *				errno.
   1700  *
   1701  *	If the return value is anything but ERESTART:
   1702  *
   1703  *		- If XS_CTL_ASYNC is set, `xs' has been freed back to
   1704  *		  the pool.
   1705  *		- If there is a buf associated with the xfer,
   1706  *		  it has been biodone()'d.
   1707  */
   1708 static int
   1709 scsipi_complete(struct scsipi_xfer *xs)
   1710 {
   1711 	struct scsipi_periph *periph = xs->xs_periph;
   1712 	struct scsipi_channel *chan = periph->periph_channel;
   1713 	int error;
   1714 
   1715 #ifdef DIAGNOSTIC
   1716 	if ((xs->xs_control & XS_CTL_ASYNC) != 0 && xs->bp == NULL)
   1717 		panic("scsipi_complete: XS_CTL_ASYNC but no buf");
   1718 #endif
   1719 	/*
   1720 	 * If command terminated with a CHECK CONDITION, we need to issue a
   1721 	 * REQUEST_SENSE command. Once the REQUEST_SENSE has been processed
   1722 	 * we'll have the real status.
   1723 	 * Must be processed with channel lock held to avoid missing
   1724 	 * a SCSI bus reset for this command.
   1725 	 */
   1726 	mutex_enter(chan_mtx(chan));
   1727 	if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
   1728 		/* request sense for a request sense ? */
   1729 		if (xs->xs_control & XS_CTL_REQSENSE) {
   1730 			scsipi_printaddr(periph);
   1731 			printf("request sense for a request sense ?\n");
   1732 			/* XXX maybe we should reset the device ? */
   1733 			/* we've been frozen because xs->error != XS_NOERROR */
   1734 			scsipi_periph_thaw_locked(periph, 1);
   1735 			mutex_exit(chan_mtx(chan));
   1736 			if (xs->resid < xs->datalen) {
   1737 				printf("we read %d bytes of sense anyway:\n",
   1738 				    xs->datalen - xs->resid);
   1739 				scsipi_print_sense_data((void *)xs->data, 0);
   1740 			}
   1741 			return EINVAL;
   1742 		}
   1743 		mutex_exit(chan_mtx(chan)); // XXX allows other commands to queue or run
   1744 		scsipi_request_sense(xs);
   1745 	} else
   1746 		mutex_exit(chan_mtx(chan));
   1747 
   1748 	/*
   1749 	 * If it's a user level request, bypass all usual completion
   1750 	 * processing, let the user work it out..
   1751 	 */
   1752 	if ((xs->xs_control & XS_CTL_USERCMD) != 0) {
   1753 		SC_DEBUG(periph, SCSIPI_DB3, ("calling user done()\n"));
   1754 		mutex_enter(chan_mtx(chan));
   1755 		if (xs->error != XS_NOERROR)
   1756 			scsipi_periph_thaw_locked(periph, 1);
   1757 		mutex_exit(chan_mtx(chan));
   1758 		scsipi_user_done(xs);
   1759 		SC_DEBUG(periph, SCSIPI_DB3, ("returned from user done()\n "));
   1760 		return 0;
   1761 	}
   1762 
   1763 	switch (xs->error) {
   1764 	case XS_NOERROR:
   1765 		error = 0;
   1766 		break;
   1767 
   1768 	case XS_SENSE:
   1769 	case XS_SHORTSENSE:
   1770 		error = (*chan->chan_bustype->bustype_interpret_sense)(xs);
   1771 		break;
   1772 
   1773 	case XS_RESOURCE_SHORTAGE:
   1774 		/*
   1775 		 * XXX Should freeze channel's queue.
   1776 		 */
   1777 		scsipi_printaddr(periph);
   1778 		printf("adapter resource shortage\n");
   1779 		/* FALLTHROUGH */
   1780 
   1781 	case XS_BUSY:
   1782 		if (xs->error == XS_BUSY && xs->status == SCSI_QUEUE_FULL) {
   1783 			struct scsipi_max_openings mo;
   1784 
   1785 			/*
   1786 			 * We set the openings to active - 1, assuming that
   1787 			 * the command that got us here is the first one that
   1788 			 * can't fit into the device's queue.  If that's not
   1789 			 * the case, I guess we'll find out soon enough.
   1790 			 */
   1791 			mo.mo_target = periph->periph_target;
   1792 			mo.mo_lun = periph->periph_lun;
   1793 			if (periph->periph_active < periph->periph_openings)
   1794 				mo.mo_openings = periph->periph_active - 1;
   1795 			else
   1796 				mo.mo_openings = periph->periph_openings - 1;
   1797 #ifdef DIAGNOSTIC
   1798 			if (mo.mo_openings < 0) {
   1799 				scsipi_printaddr(periph);
   1800 				printf("QUEUE FULL resulted in < 0 openings\n");
   1801 				panic("scsipi_done");
   1802 			}
   1803 #endif
   1804 			if (mo.mo_openings == 0) {
   1805 				scsipi_printaddr(periph);
   1806 				printf("QUEUE FULL resulted in 0 openings\n");
   1807 				mo.mo_openings = 1;
   1808 			}
   1809 			scsipi_async_event(chan, ASYNC_EVENT_MAX_OPENINGS, &mo);
   1810 			error = ERESTART;
   1811 		} else if (xs->xs_retries != 0) {
   1812 			xs->xs_retries--;
   1813 			/*
   1814 			 * Wait one second, and try again.
   1815 			 */
   1816 			mutex_enter(chan_mtx(chan));
   1817 			if ((xs->xs_control & XS_CTL_POLL) ||
   1818 			    (chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
   1819 				/* XXX: quite extreme */
   1820 				kpause("xsbusy", false, hz, chan_mtx(chan));
   1821 			} else if (!callout_pending(&periph->periph_callout)) {
   1822 				scsipi_periph_freeze_locked(periph, 1);
   1823 				callout_reset(&periph->periph_callout,
   1824 				    hz, scsipi_periph_timed_thaw, periph);
   1825 			}
   1826 			mutex_exit(chan_mtx(chan));
   1827 			error = ERESTART;
   1828 		} else
   1829 			error = EBUSY;
   1830 		break;
   1831 
   1832 	case XS_REQUEUE:
   1833 		error = ERESTART;
   1834 		break;
   1835 
   1836 	case XS_SELTIMEOUT:
   1837 	case XS_TIMEOUT:
   1838 		/*
   1839 		 * If the device hasn't gone away, honor retry counts.
   1840 		 *
   1841 		 * Note that if we're in the middle of probing it,
   1842 		 * it won't be found because it isn't here yet so
   1843 		 * we won't honor the retry count in that case.
   1844 		 */
   1845 		if (scsipi_lookup_periph(chan, periph->periph_target,
   1846 		    periph->periph_lun) && xs->xs_retries != 0) {
   1847 			xs->xs_retries--;
   1848 			error = ERESTART;
   1849 		} else
   1850 			error = EIO;
   1851 		break;
   1852 
   1853 	case XS_RESET:
   1854 		if (xs->xs_control & XS_CTL_REQSENSE) {
   1855 			/*
   1856 			 * request sense interrupted by reset: signal it
   1857 			 * with EINTR return code.
   1858 			 */
   1859 			error = EINTR;
   1860 		} else {
   1861 			if (xs->xs_retries != 0) {
   1862 				xs->xs_retries--;
   1863 				error = ERESTART;
   1864 			} else
   1865 				error = EIO;
   1866 		}
   1867 		break;
   1868 
   1869 	case XS_DRIVER_STUFFUP:
   1870 		scsipi_printaddr(periph);
   1871 		printf("generic HBA error\n");
   1872 		error = EIO;
   1873 		break;
   1874 	default:
   1875 		scsipi_printaddr(periph);
   1876 		printf("invalid return code from adapter: %d\n", xs->error);
   1877 		error = EIO;
   1878 		break;
   1879 	}
   1880 
   1881 	mutex_enter(chan_mtx(chan));
   1882 	if (error == ERESTART) {
   1883 		/*
   1884 		 * If we get here, the periph has been thawed and frozen
   1885 		 * again if we had to issue recovery commands.  Alternatively,
   1886 		 * it may have been frozen again and in a timed thaw.  In
   1887 		 * any case, we thaw the periph once we re-enqueue the
   1888 		 * command.  Once the periph is fully thawed, it will begin
   1889 		 * operation again.
   1890 		 */
   1891 		xs->error = XS_NOERROR;
   1892 		xs->status = SCSI_OK;
   1893 		xs->xs_status &= ~XS_STS_DONE;
   1894 		xs->xs_requeuecnt++;
   1895 		error = scsipi_enqueue(xs);
   1896 		if (error == 0) {
   1897 			scsipi_periph_thaw_locked(periph, 1);
   1898 			mutex_exit(chan_mtx(chan));
   1899 			return ERESTART;
   1900 		}
   1901 	}
   1902 
   1903 	/*
   1904 	 * scsipi_done() freezes the queue if not XS_NOERROR.
   1905 	 * Thaw it here.
   1906 	 */
   1907 	if (xs->error != XS_NOERROR)
   1908 		scsipi_periph_thaw_locked(periph, 1);
   1909 	mutex_exit(chan_mtx(chan));
   1910 
   1911 	if (periph->periph_switch->psw_done)
   1912 		periph->periph_switch->psw_done(xs, error);
   1913 
   1914 	mutex_enter(chan_mtx(chan));
   1915 	if (xs->xs_control & XS_CTL_ASYNC)
   1916 		scsipi_put_xs(xs);
   1917 	mutex_exit(chan_mtx(chan));
   1918 
   1919 	return error;
   1920 }
   1921 
   1922 /*
   1923  * Issue a request sense for the given scsipi_xfer. Called when the xfer
   1924  * returns with a CHECK_CONDITION status. Must be called in valid thread
   1925  * context.
   1926  */
   1927 
   1928 static void
   1929 scsipi_request_sense(struct scsipi_xfer *xs)
   1930 {
   1931 	struct scsipi_periph *periph = xs->xs_periph;
   1932 	int flags, error;
   1933 	struct scsi_request_sense cmd;
   1934 
   1935 	periph->periph_flags |= PERIPH_SENSE;
   1936 
   1937 	/* if command was polling, request sense will too */
   1938 	flags = xs->xs_control & XS_CTL_POLL;
   1939 	/* Polling commands can't sleep */
   1940 	if (flags)
   1941 		flags |= XS_CTL_NOSLEEP;
   1942 
   1943 	flags |= XS_CTL_REQSENSE | XS_CTL_URGENT | XS_CTL_DATA_IN |
   1944 	    XS_CTL_THAW_PERIPH | XS_CTL_FREEZE_PERIPH;
   1945 
   1946 	memset(&cmd, 0, sizeof(cmd));
   1947 	cmd.opcode = SCSI_REQUEST_SENSE;
   1948 	cmd.length = sizeof(struct scsi_sense_data);
   1949 
   1950 	error = scsipi_command(periph, (void *)&cmd, sizeof(cmd),
   1951 	    (void *)&xs->sense.scsi_sense, sizeof(struct scsi_sense_data),
   1952 	    0, 1000, NULL, flags);
   1953 	periph->periph_flags &= ~PERIPH_SENSE;
   1954 	periph->periph_xscheck = NULL;
   1955 	switch (error) {
   1956 	case 0:
   1957 		/* we have a valid sense */
   1958 		xs->error = XS_SENSE;
   1959 		return;
   1960 	case EINTR:
   1961 		/* REQUEST_SENSE interrupted by bus reset. */
   1962 		xs->error = XS_RESET;
   1963 		return;
   1964 	case EIO:
   1965 		 /* request sense coudn't be performed */
   1966 		/*
   1967 		 * XXX this isn't quite right but we don't have anything
   1968 		 * better for now
   1969 		 */
   1970 		xs->error = XS_DRIVER_STUFFUP;
   1971 		return;
   1972 	default:
   1973 		 /* Notify that request sense failed. */
   1974 		xs->error = XS_DRIVER_STUFFUP;
   1975 		scsipi_printaddr(periph);
   1976 		printf("request sense failed with error %d\n", error);
   1977 		return;
   1978 	}
   1979 }
   1980 
   1981 /*
   1982  * scsipi_enqueue:
   1983  *
   1984  *	Enqueue an xfer on a channel.
   1985  */
   1986 static int
   1987 scsipi_enqueue(struct scsipi_xfer *xs)
   1988 {
   1989 	struct scsipi_channel *chan = xs->xs_periph->periph_channel;
   1990 	struct scsipi_xfer *qxs;
   1991 
   1992 	/*
   1993 	 * If the xfer is to be polled, and there are already jobs on
   1994 	 * the queue, we can't proceed.
   1995 	 */
   1996 	KASSERT(mutex_owned(chan_mtx(chan)));
   1997 	if ((xs->xs_control & XS_CTL_POLL) != 0 &&
   1998 	    TAILQ_FIRST(&chan->chan_queue) != NULL) {
   1999 		xs->error = XS_DRIVER_STUFFUP;
   2000 		return EAGAIN;
   2001 	}
   2002 
   2003 	/*
   2004 	 * If we have an URGENT xfer, it's an error recovery command
   2005 	 * and it should just go on the head of the channel's queue.
   2006 	 */
   2007 	if (xs->xs_control & XS_CTL_URGENT) {
   2008 		TAILQ_INSERT_HEAD(&chan->chan_queue, xs, channel_q);
   2009 		goto out;
   2010 	}
   2011 
   2012 	/*
   2013 	 * If this xfer has already been on the queue before, we
   2014 	 * need to reinsert it in the correct order.  That order is:
   2015 	 *
   2016 	 *	Immediately before the first xfer for this periph
   2017 	 *	with a requeuecnt less than xs->xs_requeuecnt.
   2018 	 *
   2019 	 * Failing that, at the end of the queue.  (We'll end up
   2020 	 * there naturally.)
   2021 	 */
   2022 	if (xs->xs_requeuecnt != 0) {
   2023 		for (qxs = TAILQ_FIRST(&chan->chan_queue); qxs != NULL;
   2024 		     qxs = TAILQ_NEXT(qxs, channel_q)) {
   2025 			if (qxs->xs_periph == xs->xs_periph &&
   2026 			    qxs->xs_requeuecnt < xs->xs_requeuecnt)
   2027 				break;
   2028 		}
   2029 		if (qxs != NULL) {
   2030 			TAILQ_INSERT_AFTER(&chan->chan_queue, qxs, xs,
   2031 			    channel_q);
   2032 			goto out;
   2033 		}
   2034 	}
   2035 	TAILQ_INSERT_TAIL(&chan->chan_queue, xs, channel_q);
   2036  out:
   2037 	if (xs->xs_control & XS_CTL_THAW_PERIPH)
   2038 		scsipi_periph_thaw_locked(xs->xs_periph, 1);
   2039 	return 0;
   2040 }
   2041 
   2042 /*
   2043  * scsipi_run_queue:
   2044  *
   2045  *	Start as many xfers as possible running on the channel.
   2046  */
   2047 static void
   2048 scsipi_run_queue(struct scsipi_channel *chan)
   2049 {
   2050 	struct scsipi_xfer *xs;
   2051 	struct scsipi_periph *periph;
   2052 
   2053 	for (;;) {
   2054 		mutex_enter(chan_mtx(chan));
   2055 
   2056 		/*
   2057 		 * If the channel is frozen, we can't do any work right
   2058 		 * now.
   2059 		 */
   2060 		if (chan->chan_qfreeze != 0) {
   2061 			mutex_exit(chan_mtx(chan));
   2062 			return;
   2063 		}
   2064 
   2065 		/*
   2066 		 * Look for work to do, and make sure we can do it.
   2067 		 */
   2068 		for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL;
   2069 		     xs = TAILQ_NEXT(xs, channel_q)) {
   2070 			periph = xs->xs_periph;
   2071 
   2072 			if ((periph->periph_sent >= periph->periph_openings) ||
   2073 			    periph->periph_qfreeze != 0 ||
   2074 			    (periph->periph_flags & PERIPH_UNTAG) != 0)
   2075 				continue;
   2076 
   2077 			if ((periph->periph_flags &
   2078 			    (PERIPH_RECOVERING | PERIPH_SENSE)) != 0 &&
   2079 			    (xs->xs_control & XS_CTL_URGENT) == 0)
   2080 				continue;
   2081 
   2082 			/*
   2083 			 * We can issue this xfer!
   2084 			 */
   2085 			goto got_one;
   2086 		}
   2087 
   2088 		/*
   2089 		 * Can't find any work to do right now.
   2090 		 */
   2091 		mutex_exit(chan_mtx(chan));
   2092 		return;
   2093 
   2094  got_one:
   2095 		/*
   2096 		 * Have an xfer to run.  Allocate a resource from
   2097 		 * the adapter to run it.  If we can't allocate that
   2098 		 * resource, we don't dequeue the xfer.
   2099 		 */
   2100 		if (scsipi_get_resource(chan) == 0) {
   2101 			/*
   2102 			 * Adapter is out of resources.  If the adapter
   2103 			 * supports it, attempt to grow them.
   2104 			 */
   2105 			if (scsipi_grow_resources(chan) == 0) {
   2106 				/*
   2107 				 * Wasn't able to grow resources,
   2108 				 * nothing more we can do.
   2109 				 */
   2110 				if (xs->xs_control & XS_CTL_POLL) {
   2111 					scsipi_printaddr(xs->xs_periph);
   2112 					printf("polling command but no "
   2113 					    "adapter resources");
   2114 					/* We'll panic shortly... */
   2115 				}
   2116 				mutex_exit(chan_mtx(chan));
   2117 
   2118 				/*
   2119 				 * XXX: We should be able to note that
   2120 				 * XXX: that resources are needed here!
   2121 				 */
   2122 				return;
   2123 			}
   2124 			/*
   2125 			 * scsipi_grow_resources() allocated the resource
   2126 			 * for us.
   2127 			 */
   2128 		}
   2129 
   2130 		/*
   2131 		 * We have a resource to run this xfer, do it!
   2132 		 */
   2133 		TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
   2134 
   2135 		/*
   2136 		 * If the command is to be tagged, allocate a tag ID
   2137 		 * for it.
   2138 		 */
   2139 		if (XS_CTL_TAGTYPE(xs) != 0)
   2140 			scsipi_get_tag(xs);
   2141 		else
   2142 			periph->periph_flags |= PERIPH_UNTAG;
   2143 		periph->periph_sent++;
   2144 		mutex_exit(chan_mtx(chan));
   2145 
   2146 		scsipi_adapter_request(chan, ADAPTER_REQ_RUN_XFER, xs);
   2147 	}
   2148 #ifdef DIAGNOSTIC
   2149 	panic("scsipi_run_queue: impossible");
   2150 #endif
   2151 }
   2152 
   2153 /*
   2154  * scsipi_execute_xs:
   2155  *
   2156  *	Begin execution of an xfer, waiting for it to complete, if necessary.
   2157  */
   2158 int
   2159 scsipi_execute_xs(struct scsipi_xfer *xs)
   2160 {
   2161 	struct scsipi_periph *periph = xs->xs_periph;
   2162 	struct scsipi_channel *chan = periph->periph_channel;
   2163 	int oasync, async, poll, error;
   2164 
   2165 	KASSERT(!cold);
   2166 
   2167 	scsipi_update_timeouts(xs);
   2168 
   2169 	(chan->chan_bustype->bustype_cmd)(xs);
   2170 
   2171 	xs->xs_status &= ~XS_STS_DONE;
   2172 	xs->error = XS_NOERROR;
   2173 	xs->resid = xs->datalen;
   2174 	xs->status = SCSI_OK;
   2175 
   2176 #ifdef SCSIPI_DEBUG
   2177 	if (xs->xs_periph->periph_dbflags & SCSIPI_DB3) {
   2178 		printf("scsipi_execute_xs: ");
   2179 		show_scsipi_xs(xs);
   2180 		printf("\n");
   2181 	}
   2182 #endif
   2183 
   2184 	/*
   2185 	 * Deal with command tagging:
   2186 	 *
   2187 	 *	- If the device's current operating mode doesn't
   2188 	 *	  include tagged queueing, clear the tag mask.
   2189 	 *
   2190 	 *	- If the device's current operating mode *does*
   2191 	 *	  include tagged queueing, set the tag_type in
   2192 	 *	  the xfer to the appropriate byte for the tag
   2193 	 *	  message.
   2194 	 */
   2195 	if ((PERIPH_XFER_MODE(periph) & PERIPH_CAP_TQING) == 0 ||
   2196 		(xs->xs_control & XS_CTL_REQSENSE)) {
   2197 		xs->xs_control &= ~XS_CTL_TAGMASK;
   2198 		xs->xs_tag_type = 0;
   2199 	} else {
   2200 		/*
   2201 		 * If the request doesn't specify a tag, give Head
   2202 		 * tags to URGENT operations and Simple tags to
   2203 		 * everything else.
   2204 		 */
   2205 		if (XS_CTL_TAGTYPE(xs) == 0) {
   2206 			if (xs->xs_control & XS_CTL_URGENT)
   2207 				xs->xs_control |= XS_CTL_HEAD_TAG;
   2208 			else
   2209 				xs->xs_control |= XS_CTL_SIMPLE_TAG;
   2210 		}
   2211 
   2212 		switch (XS_CTL_TAGTYPE(xs)) {
   2213 		case XS_CTL_ORDERED_TAG:
   2214 			xs->xs_tag_type = MSG_ORDERED_Q_TAG;
   2215 			break;
   2216 
   2217 		case XS_CTL_SIMPLE_TAG:
   2218 			xs->xs_tag_type = MSG_SIMPLE_Q_TAG;
   2219 			break;
   2220 
   2221 		case XS_CTL_HEAD_TAG:
   2222 			xs->xs_tag_type = MSG_HEAD_OF_Q_TAG;
   2223 			break;
   2224 
   2225 		default:
   2226 			scsipi_printaddr(periph);
   2227 			printf("invalid tag mask 0x%08x\n",
   2228 			    XS_CTL_TAGTYPE(xs));
   2229 			panic("scsipi_execute_xs");
   2230 		}
   2231 	}
   2232 
   2233 	/* If the adaptor wants us to poll, poll. */
   2234 	if (chan->chan_adapter->adapt_flags & SCSIPI_ADAPT_POLL_ONLY)
   2235 		xs->xs_control |= XS_CTL_POLL;
   2236 
   2237 	/*
   2238 	 * If we don't yet have a completion thread, or we are to poll for
   2239 	 * completion, clear the ASYNC flag.
   2240 	 */
   2241 	oasync =  (xs->xs_control & XS_CTL_ASYNC);
   2242 	if (chan->chan_thread == NULL || (xs->xs_control & XS_CTL_POLL) != 0)
   2243 		xs->xs_control &= ~XS_CTL_ASYNC;
   2244 
   2245 	async = (xs->xs_control & XS_CTL_ASYNC);
   2246 	poll = (xs->xs_control & XS_CTL_POLL);
   2247 
   2248 #ifdef DIAGNOSTIC
   2249 	if (oasync != 0 && xs->bp == NULL)
   2250 		panic("scsipi_execute_xs: XS_CTL_ASYNC but no buf");
   2251 #endif
   2252 
   2253 	/*
   2254 	 * Enqueue the transfer.  If we're not polling for completion, this
   2255 	 * should ALWAYS return `no error'.
   2256 	 */
   2257 	error = scsipi_enqueue(xs);
   2258 	if (error) {
   2259 		if (poll == 0) {
   2260 			scsipi_printaddr(periph);
   2261 			printf("not polling, but enqueue failed with %d\n",
   2262 			    error);
   2263 			panic("scsipi_execute_xs");
   2264 		}
   2265 
   2266 		scsipi_printaddr(periph);
   2267 		printf("should have flushed queue?\n");
   2268 		goto free_xs;
   2269 	}
   2270 
   2271 	mutex_exit(chan_mtx(chan));
   2272  restarted:
   2273 	scsipi_run_queue(chan);
   2274 	mutex_enter(chan_mtx(chan));
   2275 
   2276 	/*
   2277 	 * The xfer is enqueued, and possibly running.  If it's to be
   2278 	 * completed asynchronously, just return now.
   2279 	 */
   2280 	if (async)
   2281 		return 0;
   2282 
   2283 	/*
   2284 	 * Not an asynchronous command; wait for it to complete.
   2285 	 */
   2286 	while ((xs->xs_status & XS_STS_DONE) == 0) {
   2287 		if (poll) {
   2288 			scsipi_printaddr(periph);
   2289 			printf("polling command not done\n");
   2290 			panic("scsipi_execute_xs");
   2291 		}
   2292 		cv_wait(xs_cv(xs), chan_mtx(chan));
   2293 	}
   2294 
   2295 	/*
   2296 	 * Command is complete.  scsipi_done() has awakened us to perform
   2297 	 * the error handling.
   2298 	 */
   2299 	mutex_exit(chan_mtx(chan));
   2300 	error = scsipi_complete(xs);
   2301 	if (error == ERESTART)
   2302 		goto restarted;
   2303 
   2304 	/*
   2305 	 * If it was meant to run async and we cleared aync ourselve,
   2306 	 * don't return an error here. It has already been handled
   2307 	 */
   2308 	if (oasync)
   2309 		error = 0;
   2310 	/*
   2311 	 * Command completed successfully or fatal error occurred.  Fall
   2312 	 * into....
   2313 	 */
   2314 	mutex_enter(chan_mtx(chan));
   2315  free_xs:
   2316 	scsipi_put_xs(xs);
   2317 	mutex_exit(chan_mtx(chan));
   2318 
   2319 	/*
   2320 	 * Kick the queue, keep it running in case it stopped for some
   2321 	 * reason.
   2322 	 */
   2323 	scsipi_run_queue(chan);
   2324 
   2325 	mutex_enter(chan_mtx(chan));
   2326 	return error;
   2327 }
   2328 
   2329 /*
   2330  * scsipi_completion_thread:
   2331  *
   2332  *	This is the completion thread.  We wait for errors on
   2333  *	asynchronous xfers, and perform the error handling
   2334  *	function, restarting the command, if necessary.
   2335  */
   2336 static void
   2337 scsipi_completion_thread(void *arg)
   2338 {
   2339 	struct scsipi_channel *chan = arg;
   2340 	struct scsipi_xfer *xs;
   2341 
   2342 	if (chan->chan_init_cb)
   2343 		(*chan->chan_init_cb)(chan, chan->chan_init_cb_arg);
   2344 
   2345 	mutex_enter(chan_mtx(chan));
   2346 	chan->chan_flags |= SCSIPI_CHAN_TACTIVE;
   2347 	for (;;) {
   2348 		xs = TAILQ_FIRST(&chan->chan_complete);
   2349 		if (xs == NULL && chan->chan_tflags == 0) {
   2350 			/* nothing to do; wait */
   2351 			cv_wait(chan_cv_complete(chan), chan_mtx(chan));
   2352 			continue;
   2353 		}
   2354 		if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) {
   2355 			/* call chan_callback from thread context */
   2356 			chan->chan_tflags &= ~SCSIPI_CHANT_CALLBACK;
   2357 			chan->chan_callback(chan, chan->chan_callback_arg);
   2358 			continue;
   2359 		}
   2360 		if (chan->chan_tflags & SCSIPI_CHANT_GROWRES) {
   2361 			/* attempt to get more openings for this channel */
   2362 			chan->chan_tflags &= ~SCSIPI_CHANT_GROWRES;
   2363 			mutex_exit(chan_mtx(chan));
   2364 			scsipi_adapter_request(chan,
   2365 			    ADAPTER_REQ_GROW_RESOURCES, NULL);
   2366 			scsipi_channel_thaw(chan, 1);
   2367 			if (chan->chan_tflags & SCSIPI_CHANT_GROWRES)
   2368 				kpause("scsizzz", FALSE, hz/10, NULL);
   2369 			mutex_enter(chan_mtx(chan));
   2370 			continue;
   2371 		}
   2372 		if (chan->chan_tflags & SCSIPI_CHANT_KICK) {
   2373 			/* explicitly run the queues for this channel */
   2374 			chan->chan_tflags &= ~SCSIPI_CHANT_KICK;
   2375 			mutex_exit(chan_mtx(chan));
   2376 			scsipi_run_queue(chan);
   2377 			mutex_enter(chan_mtx(chan));
   2378 			continue;
   2379 		}
   2380 		if (chan->chan_tflags & SCSIPI_CHANT_SHUTDOWN) {
   2381 			break;
   2382 		}
   2383 		if (xs) {
   2384 			TAILQ_REMOVE(&chan->chan_complete, xs, channel_q);
   2385 			mutex_exit(chan_mtx(chan));
   2386 
   2387 			/*
   2388 			 * Have an xfer with an error; process it.
   2389 			 */
   2390 			(void) scsipi_complete(xs);
   2391 
   2392 			/*
   2393 			 * Kick the queue; keep it running if it was stopped
   2394 			 * for some reason.
   2395 			 */
   2396 			scsipi_run_queue(chan);
   2397 			mutex_enter(chan_mtx(chan));
   2398 		}
   2399 	}
   2400 
   2401 	chan->chan_thread = NULL;
   2402 
   2403 	/* In case parent is waiting for us to exit. */
   2404 	cv_broadcast(chan_cv_thread(chan));
   2405 	mutex_exit(chan_mtx(chan));
   2406 
   2407 	kthread_exit(0);
   2408 }
   2409 /*
   2410  * scsipi_thread_call_callback:
   2411  *
   2412  * 	request to call a callback from the completion thread
   2413  */
   2414 int
   2415 scsipi_thread_call_callback(struct scsipi_channel *chan,
   2416     void (*callback)(struct scsipi_channel *, void *), void *arg)
   2417 {
   2418 
   2419 	mutex_enter(chan_mtx(chan));
   2420 	if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
   2421 		/* kernel thread doesn't exist yet */
   2422 		mutex_exit(chan_mtx(chan));
   2423 		return ESRCH;
   2424 	}
   2425 	if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) {
   2426 		mutex_exit(chan_mtx(chan));
   2427 		return EBUSY;
   2428 	}
   2429 	scsipi_channel_freeze(chan, 1);
   2430 	chan->chan_callback = callback;
   2431 	chan->chan_callback_arg = arg;
   2432 	chan->chan_tflags |= SCSIPI_CHANT_CALLBACK;
   2433 	cv_broadcast(chan_cv_complete(chan));
   2434 	mutex_exit(chan_mtx(chan));
   2435 	return 0;
   2436 }
   2437 
   2438 /*
   2439  * scsipi_async_event:
   2440  *
   2441  *	Handle an asynchronous event from an adapter.
   2442  */
   2443 void
   2444 scsipi_async_event(struct scsipi_channel *chan, scsipi_async_event_t event,
   2445     void *arg)
   2446 {
   2447 	bool lock = chan_running(chan) > 0;
   2448 
   2449 	if (lock)
   2450 		mutex_enter(chan_mtx(chan));
   2451 	switch (event) {
   2452 	case ASYNC_EVENT_MAX_OPENINGS:
   2453 		scsipi_async_event_max_openings(chan,
   2454 		    (struct scsipi_max_openings *)arg);
   2455 		break;
   2456 
   2457 	case ASYNC_EVENT_XFER_MODE:
   2458 		if (chan->chan_bustype->bustype_async_event_xfer_mode) {
   2459 			chan->chan_bustype->bustype_async_event_xfer_mode(
   2460 			    chan, arg);
   2461 		}
   2462 		break;
   2463 	case ASYNC_EVENT_RESET:
   2464 		scsipi_async_event_channel_reset(chan);
   2465 		break;
   2466 	}
   2467 	if (lock)
   2468 		mutex_exit(chan_mtx(chan));
   2469 }
   2470 
   2471 /*
   2472  * scsipi_async_event_max_openings:
   2473  *
   2474  *	Update the maximum number of outstanding commands a
   2475  *	device may have.
   2476  */
   2477 static void
   2478 scsipi_async_event_max_openings(struct scsipi_channel *chan,
   2479     struct scsipi_max_openings *mo)
   2480 {
   2481 	struct scsipi_periph *periph;
   2482 	int minlun, maxlun;
   2483 
   2484 	if (mo->mo_lun == -1) {
   2485 		/*
   2486 		 * Wildcarded; apply it to all LUNs.
   2487 		 */
   2488 		minlun = 0;
   2489 		maxlun = chan->chan_nluns - 1;
   2490 	} else
   2491 		minlun = maxlun = mo->mo_lun;
   2492 
   2493 	/* XXX This could really suck with a large LUN space. */
   2494 	for (; minlun <= maxlun; minlun++) {
   2495 		periph = scsipi_lookup_periph_locked(chan, mo->mo_target, minlun);
   2496 		if (periph == NULL)
   2497 			continue;
   2498 
   2499 		if (mo->mo_openings < periph->periph_openings)
   2500 			periph->periph_openings = mo->mo_openings;
   2501 		else if (mo->mo_openings > periph->periph_openings &&
   2502 		    (periph->periph_flags & PERIPH_GROW_OPENINGS) != 0)
   2503 			periph->periph_openings = mo->mo_openings;
   2504 	}
   2505 }
   2506 
   2507 /*
   2508  * scsipi_set_xfer_mode:
   2509  *
   2510  *	Set the xfer mode for the specified I_T Nexus.
   2511  */
   2512 void
   2513 scsipi_set_xfer_mode(struct scsipi_channel *chan, int target, int immed)
   2514 {
   2515 	struct scsipi_xfer_mode xm;
   2516 	struct scsipi_periph *itperiph;
   2517 	int lun;
   2518 
   2519 	/*
   2520 	 * Go to the minimal xfer mode.
   2521 	 */
   2522 	xm.xm_target = target;
   2523 	xm.xm_mode = 0;
   2524 	xm.xm_period = 0;			/* ignored */
   2525 	xm.xm_offset = 0;			/* ignored */
   2526 
   2527 	/*
   2528 	 * Find the first LUN we know about on this I_T Nexus.
   2529 	 */
   2530 	for (itperiph = NULL, lun = 0; lun < chan->chan_nluns; lun++) {
   2531 		itperiph = scsipi_lookup_periph(chan, target, lun);
   2532 		if (itperiph != NULL)
   2533 			break;
   2534 	}
   2535 	if (itperiph != NULL) {
   2536 		xm.xm_mode = itperiph->periph_cap;
   2537 		/*
   2538 		 * Now issue the request to the adapter.
   2539 		 */
   2540 		scsipi_adapter_request(chan, ADAPTER_REQ_SET_XFER_MODE, &xm);
   2541 		/*
   2542 		 * If we want this to happen immediately, issue a dummy
   2543 		 * command, since most adapters can't really negotiate unless
   2544 		 * they're executing a job.
   2545 		 */
   2546 		if (immed != 0) {
   2547 			(void) scsipi_test_unit_ready(itperiph,
   2548 			    XS_CTL_DISCOVERY | XS_CTL_IGNORE_ILLEGAL_REQUEST |
   2549 			    XS_CTL_IGNORE_NOT_READY |
   2550 			    XS_CTL_IGNORE_MEDIA_CHANGE);
   2551 		}
   2552 	}
   2553 }
   2554 
   2555 /*
   2556  * scsipi_channel_reset:
   2557  *
   2558  *	handle scsi bus reset
   2559  * called with channel lock held
   2560  */
   2561 static void
   2562 scsipi_async_event_channel_reset(struct scsipi_channel *chan)
   2563 {
   2564 	struct scsipi_xfer *xs, *xs_next;
   2565 	struct scsipi_periph *periph;
   2566 	int target, lun;
   2567 
   2568 	/*
   2569 	 * Channel has been reset. Also mark as reset pending REQUEST_SENSE
   2570 	 * commands; as the sense is not available any more.
   2571 	 * can't call scsipi_done() from here, as the command has not been
   2572 	 * sent to the adapter yet (this would corrupt accounting).
   2573 	 */
   2574 
   2575 	for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL; xs = xs_next) {
   2576 		xs_next = TAILQ_NEXT(xs, channel_q);
   2577 		if (xs->xs_control & XS_CTL_REQSENSE) {
   2578 			TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
   2579 			xs->error = XS_RESET;
   2580 			if ((xs->xs_control & XS_CTL_ASYNC) != 0)
   2581 				TAILQ_INSERT_TAIL(&chan->chan_complete, xs,
   2582 				    channel_q);
   2583 		}
   2584 	}
   2585 	cv_broadcast(chan_cv_complete(chan));
   2586 	/* Catch xs with pending sense which may not have a REQSENSE xs yet */
   2587 	for (target = 0; target < chan->chan_ntargets; target++) {
   2588 		if (target == chan->chan_id)
   2589 			continue;
   2590 		for (lun = 0; lun <  chan->chan_nluns; lun++) {
   2591 			periph = scsipi_lookup_periph_locked(chan, target, lun);
   2592 			if (periph) {
   2593 				xs = periph->periph_xscheck;
   2594 				if (xs)
   2595 					xs->error = XS_RESET;
   2596 			}
   2597 		}
   2598 	}
   2599 }
   2600 
   2601 /*
   2602  * scsipi_target_detach:
   2603  *
   2604  *	detach all periph associated with a I_T
   2605  * 	must be called from valid thread context
   2606  */
   2607 int
   2608 scsipi_target_detach(struct scsipi_channel *chan, int target, int lun,
   2609     int flags)
   2610 {
   2611 	struct scsipi_periph *periph;
   2612 	device_t tdev;
   2613 	int ctarget, mintarget, maxtarget;
   2614 	int clun, minlun, maxlun;
   2615 	int error = 0;
   2616 
   2617 	if (target == -1) {
   2618 		mintarget = 0;
   2619 		maxtarget = chan->chan_ntargets;
   2620 	} else {
   2621 		if (target == chan->chan_id)
   2622 			return EINVAL;
   2623 		if (target < 0 || target >= chan->chan_ntargets)
   2624 			return EINVAL;
   2625 		mintarget = target;
   2626 		maxtarget = target + 1;
   2627 	}
   2628 
   2629 	if (lun == -1) {
   2630 		minlun = 0;
   2631 		maxlun = chan->chan_nluns;
   2632 	} else {
   2633 		if (lun < 0 || lun >= chan->chan_nluns)
   2634 			return EINVAL;
   2635 		minlun = lun;
   2636 		maxlun = lun + 1;
   2637 	}
   2638 
   2639 	/* for config_detach */
   2640 	KERNEL_LOCK(1, curlwp);
   2641 
   2642 	mutex_enter(chan_mtx(chan));
   2643 	for (ctarget = mintarget; ctarget < maxtarget; ctarget++) {
   2644 		if (ctarget == chan->chan_id)
   2645 			continue;
   2646 
   2647 		for (clun = minlun; clun < maxlun; clun++) {
   2648 			periph = scsipi_lookup_periph_locked(chan, ctarget, clun);
   2649 			if (periph == NULL)
   2650 				continue;
   2651 			tdev = periph->periph_dev;
   2652 			mutex_exit(chan_mtx(chan));
   2653 			error = config_detach(tdev, flags);
   2654 			if (error)
   2655 				goto out;
   2656 			mutex_enter(chan_mtx(chan));
   2657 			KASSERT(scsipi_lookup_periph_locked(chan, ctarget, clun) == NULL);
   2658 		}
   2659 	}
   2660 	mutex_exit(chan_mtx(chan));
   2661 
   2662 out:
   2663 	KERNEL_UNLOCK_ONE(curlwp);
   2664 
   2665 	return error;
   2666 }
   2667 
   2668 /*
   2669  * scsipi_adapter_addref:
   2670  *
   2671  *	Add a reference to the adapter pointed to by the provided
   2672  *	link, enabling the adapter if necessary.
   2673  */
   2674 int
   2675 scsipi_adapter_addref(struct scsipi_adapter *adapt)
   2676 {
   2677 	int error = 0;
   2678 
   2679 	if (atomic_inc_uint_nv(&adapt->adapt_refcnt) == 1
   2680 	    && adapt->adapt_enable != NULL) {
   2681 		scsipi_adapter_lock(adapt);
   2682 		error = scsipi_adapter_enable(adapt, 1);
   2683 		scsipi_adapter_unlock(adapt);
   2684 		if (error)
   2685 			atomic_dec_uint(&adapt->adapt_refcnt);
   2686 	}
   2687 	return error;
   2688 }
   2689 
   2690 /*
   2691  * scsipi_adapter_delref:
   2692  *
   2693  *	Delete a reference to the adapter pointed to by the provided
   2694  *	link, disabling the adapter if possible.
   2695  */
   2696 void
   2697 scsipi_adapter_delref(struct scsipi_adapter *adapt)
   2698 {
   2699 
   2700 	if (atomic_dec_uint_nv(&adapt->adapt_refcnt) == 0
   2701 	    && adapt->adapt_enable != NULL) {
   2702 		scsipi_adapter_lock(adapt);
   2703 		(void) scsipi_adapter_enable(adapt, 0);
   2704 		scsipi_adapter_unlock(adapt);
   2705 	}
   2706 }
   2707 
   2708 static struct scsipi_syncparam {
   2709 	int	ss_factor;
   2710 	int	ss_period;	/* ns * 100 */
   2711 } scsipi_syncparams[] = {
   2712 	{ 0x08,		 625 },	/* FAST-160 (Ultra320) */
   2713 	{ 0x09,		1250 },	/* FAST-80 (Ultra160) */
   2714 	{ 0x0a,		2500 },	/* FAST-40 40MHz (Ultra2) */
   2715 	{ 0x0b,		3030 },	/* FAST-40 33MHz (Ultra2) */
   2716 	{ 0x0c,		5000 },	/* FAST-20 (Ultra) */
   2717 };
   2718 static const int scsipi_nsyncparams =
   2719     sizeof(scsipi_syncparams) / sizeof(scsipi_syncparams[0]);
   2720 
   2721 int
   2722 scsipi_sync_period_to_factor(int period /* ns * 100 */)
   2723 {
   2724 	int i;
   2725 
   2726 	for (i = 0; i < scsipi_nsyncparams; i++) {
   2727 		if (period <= scsipi_syncparams[i].ss_period)
   2728 			return scsipi_syncparams[i].ss_factor;
   2729 	}
   2730 
   2731 	return (period / 100) / 4;
   2732 }
   2733 
   2734 int
   2735 scsipi_sync_factor_to_period(int factor)
   2736 {
   2737 	int i;
   2738 
   2739 	for (i = 0; i < scsipi_nsyncparams; i++) {
   2740 		if (factor == scsipi_syncparams[i].ss_factor)
   2741 			return scsipi_syncparams[i].ss_period;
   2742 	}
   2743 
   2744 	return (factor * 4) * 100;
   2745 }
   2746 
   2747 int
   2748 scsipi_sync_factor_to_freq(int factor)
   2749 {
   2750 	int i;
   2751 
   2752 	for (i = 0; i < scsipi_nsyncparams; i++) {
   2753 		if (factor == scsipi_syncparams[i].ss_factor)
   2754 			return 100000000 / scsipi_syncparams[i].ss_period;
   2755 	}
   2756 
   2757 	return 10000000 / ((factor * 4) * 10);
   2758 }
   2759 
   2760 static inline void
   2761 scsipi_adapter_lock(struct scsipi_adapter *adapt)
   2762 {
   2763 
   2764 	if ((adapt->adapt_flags & SCSIPI_ADAPT_MPSAFE) == 0)
   2765 		KERNEL_LOCK(1, NULL);
   2766 }
   2767 
   2768 static inline void
   2769 scsipi_adapter_unlock(struct scsipi_adapter *adapt)
   2770 {
   2771 
   2772 	if ((adapt->adapt_flags & SCSIPI_ADAPT_MPSAFE) == 0)
   2773 		KERNEL_UNLOCK_ONE(NULL);
   2774 }
   2775 
   2776 void
   2777 scsipi_adapter_minphys(struct scsipi_channel *chan, struct buf *bp)
   2778 {
   2779 	struct scsipi_adapter *adapt = chan->chan_adapter;
   2780 
   2781 	scsipi_adapter_lock(adapt);
   2782 	(adapt->adapt_minphys)(bp);
   2783 	scsipi_adapter_unlock(chan->chan_adapter);
   2784 }
   2785 
   2786 void
   2787 scsipi_adapter_request(struct scsipi_channel *chan,
   2788 	scsipi_adapter_req_t req, void *arg)
   2789 
   2790 {
   2791 	struct scsipi_adapter *adapt = chan->chan_adapter;
   2792 
   2793 	scsipi_adapter_lock(adapt);
   2794 	(adapt->adapt_request)(chan, req, arg);
   2795 	scsipi_adapter_unlock(adapt);
   2796 }
   2797 
   2798 int
   2799 scsipi_adapter_ioctl(struct scsipi_channel *chan, u_long cmd,
   2800 	void *data, int flag, struct proc *p)
   2801 {
   2802 	struct scsipi_adapter *adapt = chan->chan_adapter;
   2803 	int error;
   2804 
   2805 	if (adapt->adapt_ioctl == NULL)
   2806 		return ENOTTY;
   2807 
   2808 	scsipi_adapter_lock(adapt);
   2809 	error = (adapt->adapt_ioctl)(chan, cmd, data, flag, p);
   2810 	scsipi_adapter_unlock(adapt);
   2811 	return error;
   2812 }
   2813 
   2814 int
   2815 scsipi_adapter_enable(struct scsipi_adapter *adapt, int enable)
   2816 {
   2817 	int error;
   2818 
   2819 	scsipi_adapter_lock(adapt);
   2820 	error = (adapt->adapt_enable)(adapt->adapt_dev, enable);
   2821 	scsipi_adapter_unlock(adapt);
   2822 	return error;
   2823 }
   2824 
   2825 #ifdef SCSIPI_DEBUG
   2826 /*
   2827  * Given a scsipi_xfer, dump the request, in all its glory
   2828  */
   2829 void
   2830 show_scsipi_xs(struct scsipi_xfer *xs)
   2831 {
   2832 
   2833 	printf("xs(%p): ", xs);
   2834 	printf("xs_control(0x%08x)", xs->xs_control);
   2835 	printf("xs_status(0x%08x)", xs->xs_status);
   2836 	printf("periph(%p)", xs->xs_periph);
   2837 	printf("retr(0x%x)", xs->xs_retries);
   2838 	printf("timo(0x%x)", xs->timeout);
   2839 	printf("cmd(%p)", xs->cmd);
   2840 	printf("len(0x%x)", xs->cmdlen);
   2841 	printf("data(%p)", xs->data);
   2842 	printf("len(0x%x)", xs->datalen);
   2843 	printf("res(0x%x)", xs->resid);
   2844 	printf("err(0x%x)", xs->error);
   2845 	printf("bp(%p)", xs->bp);
   2846 	show_scsipi_cmd(xs);
   2847 }
   2848 
   2849 void
   2850 show_scsipi_cmd(struct scsipi_xfer *xs)
   2851 {
   2852 	u_char *b = (u_char *) xs->cmd;
   2853 	int i = 0;
   2854 
   2855 	scsipi_printaddr(xs->xs_periph);
   2856 	printf(" command: ");
   2857 
   2858 	if ((xs->xs_control & XS_CTL_RESET) == 0) {
   2859 		while (i < xs->cmdlen) {
   2860 			if (i)
   2861 				printf(",");
   2862 			printf("0x%x", b[i++]);
   2863 		}
   2864 		printf("-[%d bytes]\n", xs->datalen);
   2865 		if (xs->datalen)
   2866 			show_mem(xs->data, uimin(64, xs->datalen));
   2867 	} else
   2868 		printf("-RESET-\n");
   2869 }
   2870 
   2871 void
   2872 show_mem(u_char *address, int num)
   2873 {
   2874 	int x;
   2875 
   2876 	printf("------------------------------");
   2877 	for (x = 0; x < num; x++) {
   2878 		if ((x % 16) == 0)
   2879 			printf("\n%03d: ", x);
   2880 		printf("%02x ", *address++);
   2881 	}
   2882 	printf("\n------------------------------\n");
   2883 }
   2884 #endif /* SCSIPI_DEBUG */
   2885