Home | History | Annotate | Line # | Download | only in scsipi
      1 /*	$NetBSD: scsipi_base.c,v 1.193 2024/10/29 15:50:07 nat Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1998, 1999, 2000, 2002, 2003, 2004 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Charles M. Hannum; by Jason R. Thorpe of the Numerical Aerospace
      9  * Simulation Facility, NASA Ames Research Center.
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  *
     20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30  * POSSIBILITY OF SUCH DAMAGE.
     31  */
     32 
     33 #include <sys/cdefs.h>
     34 __KERNEL_RCSID(0, "$NetBSD: scsipi_base.c,v 1.193 2024/10/29 15:50:07 nat Exp $");
     35 
     36 #ifdef _KERNEL_OPT
     37 #include "opt_scsi.h"
     38 #endif
     39 
     40 #include <sys/param.h>
     41 #include <sys/systm.h>
     42 #include <sys/kernel.h>
     43 #include <sys/buf.h>
     44 #include <sys/uio.h>
     45 #include <sys/malloc.h>
     46 #include <sys/pool.h>
     47 #include <sys/errno.h>
     48 #include <sys/device.h>
     49 #include <sys/proc.h>
     50 #include <sys/kthread.h>
     51 #include <sys/hash.h>
     52 #include <sys/atomic.h>
     53 
     54 #include <dev/scsipi/scsi_sdt.h>
     55 #include <dev/scsipi/scsi_spc.h>
     56 #include <dev/scsipi/scsipi_all.h>
     57 #include <dev/scsipi/scsipi_disk.h>
     58 #include <dev/scsipi/scsipiconf.h>
     59 #include <dev/scsipi/scsipi_base.h>
     60 
     61 #include <dev/scsipi/scsi_all.h>
     62 #include <dev/scsipi/scsi_message.h>
     63 
     64 #include <machine/param.h>
     65 
     66 SDT_PROVIDER_DEFINE(scsi);
     67 
     68 SDT_PROBE_DEFINE3(scsi, base, tag, get,
     69     "struct scsipi_xfer *"/*xs*/, "uint8_t"/*tag*/, "uint8_t"/*type*/);
     70 SDT_PROBE_DEFINE3(scsi, base, tag, put,
     71     "struct scsipi_xfer *"/*xs*/, "uint8_t"/*tag*/, "uint8_t"/*type*/);
     72 
     73 SDT_PROBE_DEFINE3(scsi, base, adapter, request__start,
     74     "struct scsipi_channel *"/*chan*/,
     75     "scsipi_adapter_req_t"/*req*/,
     76     "void *"/*arg*/);
     77 SDT_PROBE_DEFINE3(scsi, base, adapter, request__done,
     78     "struct scsipi_channel *"/*chan*/,
     79     "scsipi_adapter_req_t"/*req*/,
     80     "void *"/*arg*/);
     81 
     82 SDT_PROBE_DEFINE1(scsi, base, queue, batch__start,
     83     "struct scsipi_channel *"/*chan*/);
     84 SDT_PROBE_DEFINE2(scsi, base, queue, run,
     85     "struct scsipi_channel *"/*chan*/,
     86     "struct scsipi_xfer *"/*xs*/);
     87 SDT_PROBE_DEFINE1(scsi, base, queue, batch__done,
     88     "struct scsipi_channel *"/*chan*/);
     89 
     90 SDT_PROBE_DEFINE1(scsi, base, xfer, execute,  "struct scsipi_xfer *"/*xs*/);
     91 SDT_PROBE_DEFINE1(scsi, base, xfer, enqueue,  "struct scsipi_xfer *"/*xs*/);
     92 SDT_PROBE_DEFINE1(scsi, base, xfer, done,  "struct scsipi_xfer *"/*xs*/);
     93 SDT_PROBE_DEFINE1(scsi, base, xfer, redone,  "struct scsipi_xfer *"/*xs*/);
     94 SDT_PROBE_DEFINE1(scsi, base, xfer, complete,  "struct scsipi_xfer *"/*xs*/);
     95 SDT_PROBE_DEFINE1(scsi, base, xfer, restart,  "struct scsipi_xfer *"/*xs*/);
     96 SDT_PROBE_DEFINE1(scsi, base, xfer, free,  "struct scsipi_xfer *"/*xs*/);
     97 
     98 static int	scsipi_complete(struct scsipi_xfer *);
     99 static void	scsipi_request_sense(struct scsipi_xfer *);
    100 static int	scsipi_enqueue(struct scsipi_xfer *);
    101 static void	scsipi_run_queue(struct scsipi_channel *chan);
    102 
    103 static void	scsipi_completion_thread(void *);
    104 
    105 static void	scsipi_get_tag(struct scsipi_xfer *);
    106 static void	scsipi_put_tag(struct scsipi_xfer *);
    107 
    108 static int	scsipi_get_resource(struct scsipi_channel *);
    109 static void	scsipi_put_resource(struct scsipi_channel *);
    110 
    111 static void	scsipi_async_event_max_openings(struct scsipi_channel *,
    112 		    struct scsipi_max_openings *);
    113 static void	scsipi_async_event_channel_reset(struct scsipi_channel *);
    114 
    115 static void	scsipi_channel_freeze_locked(struct scsipi_channel *, int);
    116 
    117 static void	scsipi_adapter_lock(struct scsipi_adapter *adapt);
    118 static void	scsipi_adapter_unlock(struct scsipi_adapter *adapt);
    119 
    120 static void	scsipi_update_timeouts(struct scsipi_xfer *xs);
    121 
    122 static struct pool scsipi_xfer_pool;
    123 
    124 int scsipi_xs_count = 0;
    125 
    126 /*
    127  * scsipi_init:
    128  *
    129  *	Called when a scsibus or atapibus is attached to the system
    130  *	to initialize shared data structures.
    131  */
    132 void
    133 scsipi_init(void)
    134 {
    135 	static int scsipi_init_done;
    136 
    137 	if (scsipi_init_done)
    138 		return;
    139 	scsipi_init_done = 1;
    140 
    141 	/* Initialize the scsipi_xfer pool. */
    142 	pool_init(&scsipi_xfer_pool, sizeof(struct scsipi_xfer), 0,
    143 	    0, 0, "scxspl", NULL, IPL_BIO);
    144 	pool_prime(&scsipi_xfer_pool, 1);
    145 
    146 	scsipi_ioctl_init();
    147 }
    148 
    149 /*
    150  * scsipi_channel_init:
    151  *
    152  *	Initialize a scsipi_channel when it is attached.
    153  */
    154 int
    155 scsipi_channel_init(struct scsipi_channel *chan)
    156 {
    157 	struct scsipi_adapter *adapt = chan->chan_adapter;
    158 	int i;
    159 
    160 	/* Initialize shared data. */
    161 	scsipi_init();
    162 
    163 	/* Initialize the queues. */
    164 	TAILQ_INIT(&chan->chan_queue);
    165 	TAILQ_INIT(&chan->chan_complete);
    166 
    167 	for (i = 0; i < SCSIPI_CHAN_PERIPH_BUCKETS; i++)
    168 		LIST_INIT(&chan->chan_periphtab[i]);
    169 
    170 	/*
    171 	 * Create the asynchronous completion thread.
    172 	 */
    173 	if (kthread_create(PRI_NONE, 0, NULL, scsipi_completion_thread, chan,
    174 	    &chan->chan_thread, "%s", chan->chan_name)) {
    175 		aprint_error_dev(adapt->adapt_dev, "unable to create completion thread for "
    176 		    "channel %d\n", chan->chan_channel);
    177 		panic("scsipi_channel_init");
    178 	}
    179 
    180 	return 0;
    181 }
    182 
    183 /*
    184  * scsipi_channel_shutdown:
    185  *
    186  *	Shutdown a scsipi_channel.
    187  */
    188 void
    189 scsipi_channel_shutdown(struct scsipi_channel *chan)
    190 {
    191 
    192 	mutex_enter(chan_mtx(chan));
    193 	/*
    194 	 * Shut down the completion thread.
    195 	 */
    196 	chan->chan_tflags |= SCSIPI_CHANT_SHUTDOWN;
    197 	cv_broadcast(chan_cv_complete(chan));
    198 
    199 	/*
    200 	 * Now wait for the thread to exit.
    201 	 */
    202 	while (chan->chan_thread != NULL)
    203 		cv_wait(chan_cv_thread(chan), chan_mtx(chan));
    204 	mutex_exit(chan_mtx(chan));
    205 }
    206 
    207 static uint32_t
    208 scsipi_chan_periph_hash(uint64_t t, uint64_t l)
    209 {
    210 	uint32_t hash;
    211 
    212 	hash = hash32_buf(&t, sizeof(t), HASH32_BUF_INIT);
    213 	hash = hash32_buf(&l, sizeof(l), hash);
    214 
    215 	return hash & SCSIPI_CHAN_PERIPH_HASHMASK;
    216 }
    217 
    218 /*
    219  * scsipi_insert_periph:
    220  *
    221  *	Insert a periph into the channel.
    222  */
    223 void
    224 scsipi_insert_periph(struct scsipi_channel *chan, struct scsipi_periph *periph)
    225 {
    226 	uint32_t hash;
    227 
    228 	hash = scsipi_chan_periph_hash(periph->periph_target,
    229 	    periph->periph_lun);
    230 
    231 	mutex_enter(chan_mtx(chan));
    232 	LIST_INSERT_HEAD(&chan->chan_periphtab[hash], periph, periph_hash);
    233 	mutex_exit(chan_mtx(chan));
    234 }
    235 
    236 /*
    237  * scsipi_remove_periph:
    238  *
    239  *	Remove a periph from the channel.
    240  */
    241 void
    242 scsipi_remove_periph(struct scsipi_channel *chan,
    243     struct scsipi_periph *periph)
    244 {
    245 
    246 	LIST_REMOVE(periph, periph_hash);
    247 }
    248 
    249 /*
    250  * scsipi_lookup_periph:
    251  *
    252  *	Lookup a periph on the specified channel.
    253  */
    254 static struct scsipi_periph *
    255 scsipi_lookup_periph_internal(struct scsipi_channel *chan, int target, int lun, bool lock)
    256 {
    257 	struct scsipi_periph *periph;
    258 	uint32_t hash;
    259 
    260 	if (target >= chan->chan_ntargets ||
    261 	    lun >= chan->chan_nluns)
    262 		return NULL;
    263 
    264 	hash = scsipi_chan_periph_hash(target, lun);
    265 
    266 	if (lock)
    267 		mutex_enter(chan_mtx(chan));
    268 	LIST_FOREACH(periph, &chan->chan_periphtab[hash], periph_hash) {
    269 		if (periph->periph_target == target &&
    270 		    periph->periph_lun == lun)
    271 			break;
    272 	}
    273 	if (lock)
    274 		mutex_exit(chan_mtx(chan));
    275 
    276 	return periph;
    277 }
    278 
    279 struct scsipi_periph *
    280 scsipi_lookup_periph_locked(struct scsipi_channel *chan, int target, int lun)
    281 {
    282 	return scsipi_lookup_periph_internal(chan, target, lun, false);
    283 }
    284 
    285 struct scsipi_periph *
    286 scsipi_lookup_periph(struct scsipi_channel *chan, int target, int lun)
    287 {
    288 	return scsipi_lookup_periph_internal(chan, target, lun, true);
    289 }
    290 
    291 /*
    292  * scsipi_get_resource:
    293  *
    294  *	Allocate a single xfer `resource' from the channel.
    295  *
    296  *	NOTE: Must be called with channel lock held
    297  */
    298 static int
    299 scsipi_get_resource(struct scsipi_channel *chan)
    300 {
    301 	struct scsipi_adapter *adapt = chan->chan_adapter;
    302 
    303 	if (chan->chan_flags & SCSIPI_CHAN_OPENINGS) {
    304 		if (chan->chan_openings > 0) {
    305 			chan->chan_openings--;
    306 			return 1;
    307 		}
    308 		return 0;
    309 	}
    310 
    311 	if (adapt->adapt_openings > 0) {
    312 		adapt->adapt_openings--;
    313 		return 1;
    314 	}
    315 	return 0;
    316 }
    317 
    318 /*
    319  * scsipi_grow_resources:
    320  *
    321  *	Attempt to grow resources for a channel.  If this succeeds,
    322  *	we allocate one for our caller.
    323  *
    324  *	NOTE: Must be called with channel lock held
    325  */
    326 static inline int
    327 scsipi_grow_resources(struct scsipi_channel *chan)
    328 {
    329 
    330 	if (chan->chan_flags & SCSIPI_CHAN_CANGROW) {
    331 		if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
    332 			mutex_exit(chan_mtx(chan));
    333 			scsipi_adapter_request(chan,
    334 			    ADAPTER_REQ_GROW_RESOURCES, NULL);
    335 			mutex_enter(chan_mtx(chan));
    336 			return scsipi_get_resource(chan);
    337 		}
    338 		/*
    339 		 * ask the channel thread to do it. It'll have to thaw the
    340 		 * queue
    341 		 */
    342 		scsipi_channel_freeze_locked(chan, 1);
    343 		chan->chan_tflags |= SCSIPI_CHANT_GROWRES;
    344 		cv_broadcast(chan_cv_complete(chan));
    345 		return 0;
    346 	}
    347 
    348 	return 0;
    349 }
    350 
    351 /*
    352  * scsipi_put_resource:
    353  *
    354  *	Free a single xfer `resource' to the channel.
    355  *
    356  *	NOTE: Must be called with channel lock held
    357  */
    358 static void
    359 scsipi_put_resource(struct scsipi_channel *chan)
    360 {
    361 	struct scsipi_adapter *adapt = chan->chan_adapter;
    362 
    363 	if (chan->chan_flags & SCSIPI_CHAN_OPENINGS)
    364 		chan->chan_openings++;
    365 	else
    366 		adapt->adapt_openings++;
    367 }
    368 
    369 /*
    370  * scsipi_get_tag:
    371  *
    372  *	Get a tag ID for the specified xfer.
    373  *
    374  *	NOTE: Must be called with channel lock held
    375  */
    376 static void
    377 scsipi_get_tag(struct scsipi_xfer *xs)
    378 {
    379 	struct scsipi_periph *periph = xs->xs_periph;
    380 	int bit, tag;
    381 	u_int word;
    382 
    383 	KASSERT(mutex_owned(chan_mtx(periph->periph_channel)));
    384 
    385 	bit = 0;	/* XXX gcc */
    386 	for (word = 0; word < PERIPH_NTAGWORDS; word++) {
    387 		bit = ffs(periph->periph_freetags[word]);
    388 		if (bit != 0)
    389 			break;
    390 	}
    391 #ifdef DIAGNOSTIC
    392 	if (word == PERIPH_NTAGWORDS) {
    393 		scsipi_printaddr(periph);
    394 		printf("no free tags\n");
    395 		panic("scsipi_get_tag");
    396 	}
    397 #endif
    398 
    399 	bit -= 1;
    400 	periph->periph_freetags[word] &= ~(1U << bit);
    401 	tag = (word << 5) | bit;
    402 
    403 	/* XXX Should eventually disallow this completely. */
    404 	if (tag >= periph->periph_openings) {
    405 		scsipi_printaddr(periph);
    406 		printf("WARNING: tag %d greater than available openings %d\n",
    407 		    tag, periph->periph_openings);
    408 	}
    409 
    410 	xs->xs_tag_id = tag;
    411 	SDT_PROBE3(scsi, base, tag, get,
    412 	    xs, xs->xs_tag_id, xs->xs_tag_type);
    413 }
    414 
    415 /*
    416  * scsipi_put_tag:
    417  *
    418  *	Put the tag ID for the specified xfer back into the pool.
    419  *
    420  *	NOTE: Must be called with channel lock held
    421  */
    422 static void
    423 scsipi_put_tag(struct scsipi_xfer *xs)
    424 {
    425 	struct scsipi_periph *periph = xs->xs_periph;
    426 	int word, bit;
    427 
    428 	KASSERT(mutex_owned(chan_mtx(periph->periph_channel)));
    429 
    430 	SDT_PROBE3(scsi, base, tag, put,
    431 	    xs, xs->xs_tag_id, xs->xs_tag_type);
    432 
    433 	word = xs->xs_tag_id >> 5;
    434 	bit = xs->xs_tag_id & 0x1f;
    435 
    436 	periph->periph_freetags[word] |= (1U << bit);
    437 }
    438 
    439 /*
    440  * scsipi_get_xs:
    441  *
    442  *	Allocate an xfer descriptor and associate it with the
    443  *	specified peripheral.  If the peripheral has no more
    444  *	available command openings, we either block waiting for
    445  *	one to become available, or fail.
    446  *
    447  *	When this routine is called with the channel lock held
    448  *	the flags must include XS_CTL_NOSLEEP.
    449  */
    450 struct scsipi_xfer *
    451 scsipi_get_xs(struct scsipi_periph *periph, int flags)
    452 {
    453 	struct scsipi_xfer *xs;
    454 	bool lock = (flags & XS_CTL_NOSLEEP) == 0;
    455 
    456 	SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_get_xs\n"));
    457 
    458 	KASSERT(!cold);
    459 
    460 #ifdef DIAGNOSTIC
    461 	/*
    462 	 * URGENT commands can never be ASYNC.
    463 	 */
    464 	if ((flags & (XS_CTL_URGENT|XS_CTL_ASYNC)) ==
    465 	    (XS_CTL_URGENT|XS_CTL_ASYNC)) {
    466 		scsipi_printaddr(periph);
    467 		printf("URGENT and ASYNC\n");
    468 		panic("scsipi_get_xs");
    469 	}
    470 #endif
    471 
    472 	/*
    473 	 * Wait for a command opening to become available.  Rules:
    474 	 *
    475 	 *	- All xfers must wait for an available opening.
    476 	 *	  Exception: URGENT xfers can proceed when
    477 	 *	  active == openings, because we use the opening
    478 	 *	  of the command we're recovering for.
    479 	 *	- if the periph has sense pending, only URGENT & REQSENSE
    480 	 *	  xfers may proceed.
    481 	 *
    482 	 *	- If the periph is recovering, only URGENT xfers may
    483 	 *	  proceed.
    484 	 *
    485 	 *	- If the periph is currently executing a recovery
    486 	 *	  command, URGENT commands must block, because only
    487 	 *	  one recovery command can execute at a time.
    488 	 */
    489 	if (lock)
    490 		mutex_enter(chan_mtx(periph->periph_channel));
    491 	for (;;) {
    492 		if (flags & XS_CTL_URGENT) {
    493 			if (periph->periph_active > periph->periph_openings)
    494 				goto wait_for_opening;
    495 			if (periph->periph_flags & PERIPH_SENSE) {
    496 				if ((flags & XS_CTL_REQSENSE) == 0)
    497 					goto wait_for_opening;
    498 			} else {
    499 				if ((periph->periph_flags &
    500 				    PERIPH_RECOVERY_ACTIVE) != 0)
    501 					goto wait_for_opening;
    502 				periph->periph_flags |= PERIPH_RECOVERY_ACTIVE;
    503 			}
    504 			break;
    505 		}
    506 		if (periph->periph_active >= periph->periph_openings ||
    507 		    (periph->periph_flags & PERIPH_RECOVERING) != 0)
    508 			goto wait_for_opening;
    509 		periph->periph_active++;
    510 		KASSERT(mutex_owned(chan_mtx(periph->periph_channel)));
    511 		break;
    512 
    513  wait_for_opening:
    514 		if (flags & XS_CTL_NOSLEEP) {
    515 			KASSERT(!lock);
    516 			return NULL;
    517 		}
    518 		KASSERT(lock);
    519 		SC_DEBUG(periph, SCSIPI_DB3, ("sleeping\n"));
    520 		periph->periph_flags |= PERIPH_WAITING;
    521 		cv_wait(periph_cv_periph(periph),
    522 		    chan_mtx(periph->periph_channel));
    523 	}
    524 	if (lock)
    525 		mutex_exit(chan_mtx(periph->periph_channel));
    526 
    527 	SC_DEBUG(periph, SCSIPI_DB3, ("calling pool_get\n"));
    528 	xs = pool_get(&scsipi_xfer_pool,
    529 	    ((flags & XS_CTL_NOSLEEP) != 0 ? PR_NOWAIT : PR_WAITOK));
    530 	if (xs == NULL) {
    531 		if (lock)
    532 			mutex_enter(chan_mtx(periph->periph_channel));
    533 		if (flags & XS_CTL_URGENT) {
    534 			if ((flags & XS_CTL_REQSENSE) == 0)
    535 				periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
    536 		} else
    537 			periph->periph_active--;
    538 		if (lock)
    539 			mutex_exit(chan_mtx(periph->periph_channel));
    540 		scsipi_printaddr(periph);
    541 		printf("unable to allocate %sscsipi_xfer\n",
    542 		    (flags & XS_CTL_URGENT) ? "URGENT " : "");
    543 	}
    544 
    545 	SC_DEBUG(periph, SCSIPI_DB3, ("returning\n"));
    546 
    547 	if (xs != NULL) {
    548 		memset(xs, 0, sizeof(*xs));
    549 		callout_init(&xs->xs_callout, 0);
    550 		xs->xs_periph = periph;
    551 		xs->xs_control = flags;
    552 		xs->xs_status = 0;
    553 		if ((flags & XS_CTL_NOSLEEP) == 0)
    554 			mutex_enter(chan_mtx(periph->periph_channel));
    555 		TAILQ_INSERT_TAIL(&periph->periph_xferq, xs, device_q);
    556 		KASSERT(mutex_owned(chan_mtx(periph->periph_channel)));
    557 		if ((flags & XS_CTL_NOSLEEP) == 0)
    558 			mutex_exit(chan_mtx(periph->periph_channel));
    559 	}
    560 	return xs;
    561 }
    562 
    563 /*
    564  * scsipi_put_xs:
    565  *
    566  *	Release an xfer descriptor, decreasing the outstanding command
    567  *	count for the peripheral.  If there is a thread waiting for
    568  *	an opening, wake it up.  If not, kick any queued I/O the
    569  *	peripheral may have.
    570  *
    571  *	NOTE: Must be called with channel lock held
    572  */
    573 void
    574 scsipi_put_xs(struct scsipi_xfer *xs)
    575 {
    576 	struct scsipi_periph *periph = xs->xs_periph;
    577 	int flags = xs->xs_control;
    578 
    579 	SDT_PROBE1(scsi, base, xfer, free,  xs);
    580 	SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_free_xs\n"));
    581 	KASSERT(mutex_owned(chan_mtx(periph->periph_channel)));
    582 
    583 	TAILQ_REMOVE(&periph->periph_xferq, xs, device_q);
    584 	callout_destroy(&xs->xs_callout);
    585 	pool_put(&scsipi_xfer_pool, xs);
    586 
    587 #ifdef DIAGNOSTIC
    588 	if ((periph->periph_flags & PERIPH_RECOVERY_ACTIVE) != 0 &&
    589 	    periph->periph_active == 0) {
    590 		scsipi_printaddr(periph);
    591 		printf("recovery without a command to recovery for\n");
    592 		panic("scsipi_put_xs");
    593 	}
    594 #endif
    595 
    596 	if (flags & XS_CTL_URGENT) {
    597 		if ((flags & XS_CTL_REQSENSE) == 0)
    598 			periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
    599 	} else
    600 		periph->periph_active--;
    601 	if (periph->periph_active == 0 &&
    602 	    (periph->periph_flags & PERIPH_WAITDRAIN) != 0) {
    603 		periph->periph_flags &= ~PERIPH_WAITDRAIN;
    604 		cv_broadcast(periph_cv_active(periph));
    605 	}
    606 
    607 	if (periph->periph_flags & PERIPH_WAITING) {
    608 		periph->periph_flags &= ~PERIPH_WAITING;
    609 		cv_broadcast(periph_cv_periph(periph));
    610 	} else {
    611 		if (periph->periph_switch->psw_start != NULL &&
    612 		    device_is_active(periph->periph_dev)) {
    613 			SC_DEBUG(periph, SCSIPI_DB2,
    614 			    ("calling private start()\n"));
    615 			(*periph->periph_switch->psw_start)(periph);
    616 		}
    617 	}
    618 }
    619 
    620 /*
    621  * scsipi_channel_freeze:
    622  *
    623  *	Freeze a channel's xfer queue.
    624  */
    625 void
    626 scsipi_channel_freeze(struct scsipi_channel *chan, int count)
    627 {
    628 	bool lock = chan_running(chan) > 0;
    629 
    630 	if (lock)
    631 		mutex_enter(chan_mtx(chan));
    632 	chan->chan_qfreeze += count;
    633 	if (lock)
    634 		mutex_exit(chan_mtx(chan));
    635 }
    636 
    637 static void
    638 scsipi_channel_freeze_locked(struct scsipi_channel *chan, int count)
    639 {
    640 
    641 	chan->chan_qfreeze += count;
    642 }
    643 
    644 /*
    645  * scsipi_channel_thaw:
    646  *
    647  *	Thaw a channel's xfer queue.
    648  */
    649 void
    650 scsipi_channel_thaw(struct scsipi_channel *chan, int count)
    651 {
    652 	bool lock = chan_running(chan) > 0;
    653 
    654 	if (lock)
    655 		mutex_enter(chan_mtx(chan));
    656 	chan->chan_qfreeze -= count;
    657 	/*
    658 	 * Don't let the freeze count go negative.
    659 	 *
    660 	 * Presumably the adapter driver could keep track of this,
    661 	 * but it might just be easier to do this here so as to allow
    662 	 * multiple callers, including those outside the adapter driver.
    663 	 */
    664 	if (chan->chan_qfreeze < 0) {
    665 		chan->chan_qfreeze = 0;
    666 	}
    667 	if (lock)
    668 		mutex_exit(chan_mtx(chan));
    669 
    670 	/*
    671 	 * until the channel is running
    672 	 */
    673 	if (!lock)
    674 		return;
    675 
    676 	/*
    677 	 * Kick the channel's queue here.  Note, we may be running in
    678 	 * interrupt context (softclock or HBA's interrupt), so the adapter
    679 	 * driver had better not sleep.
    680 	 */
    681 	if (chan->chan_qfreeze == 0)
    682 		scsipi_run_queue(chan);
    683 }
    684 
    685 /*
    686  * scsipi_channel_timed_thaw:
    687  *
    688  *	Thaw a channel after some time has expired. This will also
    689  * 	run the channel's queue if the freeze count has reached 0.
    690  */
    691 void
    692 scsipi_channel_timed_thaw(void *arg)
    693 {
    694 	struct scsipi_channel *chan = arg;
    695 
    696 	scsipi_channel_thaw(chan, 1);
    697 }
    698 
    699 /*
    700  * scsipi_periph_freeze:
    701  *
    702  *	Freeze a device's xfer queue.
    703  */
    704 void
    705 scsipi_periph_freeze_locked(struct scsipi_periph *periph, int count)
    706 {
    707 
    708 	periph->periph_qfreeze += count;
    709 }
    710 
    711 /*
    712  * scsipi_periph_thaw:
    713  *
    714  *	Thaw a device's xfer queue.
    715  */
    716 void
    717 scsipi_periph_thaw_locked(struct scsipi_periph *periph, int count)
    718 {
    719 
    720 	periph->periph_qfreeze -= count;
    721 #ifdef DIAGNOSTIC
    722 	if (periph->periph_qfreeze < 0) {
    723 		static const char pc[] = "periph freeze count < 0";
    724 		scsipi_printaddr(periph);
    725 		printf("%s\n", pc);
    726 		panic(pc);
    727 	}
    728 #endif
    729 	if (periph->periph_qfreeze == 0 &&
    730 	    (periph->periph_flags & PERIPH_WAITING) != 0)
    731 		cv_broadcast(periph_cv_periph(periph));
    732 }
    733 
    734 void
    735 scsipi_periph_freeze(struct scsipi_periph *periph, int count)
    736 {
    737 
    738 	mutex_enter(chan_mtx(periph->periph_channel));
    739 	scsipi_periph_freeze_locked(periph, count);
    740 	mutex_exit(chan_mtx(periph->periph_channel));
    741 }
    742 
    743 void
    744 scsipi_periph_thaw(struct scsipi_periph *periph, int count)
    745 {
    746 
    747 	mutex_enter(chan_mtx(periph->periph_channel));
    748 	scsipi_periph_thaw_locked(periph, count);
    749 	mutex_exit(chan_mtx(periph->periph_channel));
    750 }
    751 
    752 /*
    753  * scsipi_periph_timed_thaw:
    754  *
    755  *	Thaw a device after some time has expired.
    756  */
    757 void
    758 scsipi_periph_timed_thaw(void *arg)
    759 {
    760 	struct scsipi_periph *periph = arg;
    761 	struct scsipi_channel *chan = periph->periph_channel;
    762 
    763 	callout_stop(&periph->periph_callout);
    764 
    765 	mutex_enter(chan_mtx(chan));
    766 	scsipi_periph_thaw_locked(periph, 1);
    767 	if ((periph->periph_channel->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
    768 		/*
    769 		 * Kick the channel's queue here.  Note, we're running in
    770 		 * interrupt context (softclock), so the adapter driver
    771 		 * had better not sleep.
    772 		 */
    773 		mutex_exit(chan_mtx(chan));
    774 		scsipi_run_queue(periph->periph_channel);
    775 	} else {
    776 		/*
    777 		 * Tell the completion thread to kick the channel's queue here.
    778 		 */
    779 		periph->periph_channel->chan_tflags |= SCSIPI_CHANT_KICK;
    780 		cv_broadcast(chan_cv_complete(chan));
    781 		mutex_exit(chan_mtx(chan));
    782 	}
    783 }
    784 
    785 /*
    786  * scsipi_wait_drain:
    787  *
    788  *	Wait for a periph's pending xfers to drain.
    789  */
    790 void
    791 scsipi_wait_drain(struct scsipi_periph *periph)
    792 {
    793 	struct scsipi_channel *chan = periph->periph_channel;
    794 
    795 	mutex_enter(chan_mtx(chan));
    796 	while (periph->periph_active != 0) {
    797 		periph->periph_flags |= PERIPH_WAITDRAIN;
    798 		cv_wait(periph_cv_active(periph), chan_mtx(chan));
    799 	}
    800 	mutex_exit(chan_mtx(chan));
    801 }
    802 
    803 /*
    804  * scsipi_kill_pending:
    805  *
    806  *	Kill off all pending xfers for a periph.
    807  *
    808  *	NOTE: Must be called with channel lock held
    809  */
    810 void
    811 scsipi_kill_pending(struct scsipi_periph *periph)
    812 {
    813 	struct scsipi_channel *chan = periph->periph_channel;
    814 
    815 	(*chan->chan_bustype->bustype_kill_pending)(periph);
    816 	while (periph->periph_active != 0) {
    817 		periph->periph_flags |= PERIPH_WAITDRAIN;
    818 		cv_wait(periph_cv_active(periph), chan_mtx(chan));
    819 	}
    820 }
    821 
    822 /*
    823  * scsipi_print_cdb:
    824  * prints a command descriptor block (for debug purpose, error messages,
    825  * SCSIVERBOSE, ...)
    826  */
    827 void
    828 scsipi_print_cdb(struct scsipi_generic *cmd)
    829 {
    830 	int i, j;
    831 
    832  	printf("0x%02x", cmd->opcode);
    833 
    834  	switch (CDB_GROUPID(cmd->opcode)) {
    835  	case CDB_GROUPID_0:
    836  		j = CDB_GROUP0;
    837  		break;
    838  	case CDB_GROUPID_1:
    839  		j = CDB_GROUP1;
    840  		break;
    841  	case CDB_GROUPID_2:
    842  		j = CDB_GROUP2;
    843  		break;
    844  	case CDB_GROUPID_3:
    845  		j = CDB_GROUP3;
    846  		break;
    847  	case CDB_GROUPID_4:
    848  		j = CDB_GROUP4;
    849  		break;
    850  	case CDB_GROUPID_5:
    851  		j = CDB_GROUP5;
    852  		break;
    853  	case CDB_GROUPID_6:
    854  		j = CDB_GROUP6;
    855  		break;
    856  	case CDB_GROUPID_7:
    857  		j = CDB_GROUP7;
    858  		break;
    859  	default:
    860  		j = 0;
    861  	}
    862  	if (j == 0)
    863  		j = sizeof (cmd->bytes);
    864  	for (i = 0; i < j-1; i++) /* already done the opcode */
    865  		printf(" %02x", cmd->bytes[i]);
    866 }
    867 
    868 /*
    869  * scsipi_interpret_sense:
    870  *
    871  *	Look at the returned sense and act on the error, determining
    872  *	the unix error number to pass back.  (0 = report no error)
    873  *
    874  *	NOTE: If we return ERESTART, we are expected to have
    875  *	thawed the device!
    876  *
    877  *	THIS IS THE DEFAULT ERROR HANDLER FOR SCSI DEVICES.
    878  */
    879 int
    880 scsipi_interpret_sense(struct scsipi_xfer *xs)
    881 {
    882 	struct scsi_sense_data *sense;
    883 	struct scsipi_periph *periph = xs->xs_periph;
    884 	u_int8_t key;
    885 	int error;
    886 	u_int32_t info;
    887 	static const char *error_mes[] = {
    888 		"soft error (corrected)",
    889 		"not ready", "medium error",
    890 		"non-media hardware failure", "illegal request",
    891 		"unit attention", "readonly device",
    892 		"no data found", "vendor unique",
    893 		"copy aborted", "command aborted",
    894 		"search returned equal", "volume overflow",
    895 		"verify miscompare", "unknown error key"
    896 	};
    897 
    898 	sense = &xs->sense.scsi_sense;
    899 #ifdef SCSIPI_DEBUG
    900 	if (periph->periph_flags & SCSIPI_DB1) {
    901 	        int count, len;
    902 		scsipi_printaddr(periph);
    903 		printf(" sense debug information:\n");
    904 		printf("\tcode 0x%x valid %d\n",
    905 			SSD_RCODE(sense->response_code),
    906 			sense->response_code & SSD_RCODE_VALID ? 1 : 0);
    907 		printf("\tseg 0x%x key 0x%x ili 0x%x eom 0x%x fmark 0x%x\n",
    908 			sense->segment,
    909 			SSD_SENSE_KEY(sense->flags),
    910 			sense->flags & SSD_ILI ? 1 : 0,
    911 			sense->flags & SSD_EOM ? 1 : 0,
    912 			sense->flags & SSD_FILEMARK ? 1 : 0);
    913 		printf("\ninfo: 0x%x 0x%x 0x%x 0x%x followed by %d "
    914 			"extra bytes\n",
    915 			sense->info[0],
    916 			sense->info[1],
    917 			sense->info[2],
    918 			sense->info[3],
    919 			sense->extra_len);
    920 		len = SSD_ADD_BYTES_LIM(sense);
    921 		printf("\textra (up to %d bytes): ", len);
    922 		for (count = 0; count < len; count++)
    923 			printf("0x%x ", sense->csi[count]);
    924 		printf("\n");
    925 	}
    926 #endif
    927 
    928 	/*
    929 	 * If the periph has its own error handler, call it first.
    930 	 * If it returns a legit error value, return that, otherwise
    931 	 * it wants us to continue with normal error processing.
    932 	 */
    933 	if (periph->periph_switch->psw_error != NULL) {
    934 		SC_DEBUG(periph, SCSIPI_DB2,
    935 		    ("calling private err_handler()\n"));
    936 		error = (*periph->periph_switch->psw_error)(xs);
    937 		if (error != EJUSTRETURN)
    938 			return error;
    939 	}
    940 	/* otherwise use the default */
    941 	switch (SSD_RCODE(sense->response_code)) {
    942 
    943 		/*
    944 		 * Old SCSI-1 and SASI devices respond with
    945 		 * codes other than 70.
    946 		 */
    947 	case 0x00:		/* no error (command completed OK) */
    948 		return 0;
    949 	case 0x04:		/* drive not ready after it was selected */
    950 		if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
    951 			periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
    952 		if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
    953 			return 0;
    954 		/* XXX - display some sort of error here? */
    955 		return EIO;
    956 	case 0x20:		/* invalid command */
    957 		if ((xs->xs_control &
    958 		     XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
    959 			return 0;
    960 		return EINVAL;
    961 	case 0x25:		/* invalid LUN (Adaptec ACB-4000) */
    962 		return EACCES;
    963 
    964 		/*
    965 		 * If it's code 70, use the extended stuff and
    966 		 * interpret the key
    967 		 */
    968 	case 0x71:		/* delayed error */
    969 		scsipi_printaddr(periph);
    970 		key = SSD_SENSE_KEY(sense->flags);
    971 		printf(" DEFERRED ERROR, key = 0x%x\n", key);
    972 		/* FALLTHROUGH */
    973 	case 0x70:
    974 		if ((sense->response_code & SSD_RCODE_VALID) != 0)
    975 			info = _4btol(sense->info);
    976 		else
    977 			info = 0;
    978 		key = SSD_SENSE_KEY(sense->flags);
    979 
    980 		switch (key) {
    981 		case SKEY_NO_SENSE:
    982 		case SKEY_RECOVERED_ERROR:
    983 			if (xs->resid == xs->datalen && xs->datalen) {
    984 				/*
    985 				 * Why is this here?
    986 				 */
    987 				xs->resid = 0;	/* not short read */
    988 			}
    989 			error = 0;
    990 			break;
    991 		case SKEY_EQUAL:
    992 			error = 0;
    993 			break;
    994 		case SKEY_NOT_READY:
    995 			if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
    996 				periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
    997 			if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
    998 				return 0;
    999 			if (sense->asc == 0x3A) {
   1000 				error = ENODEV; /* Medium not present */
   1001 				if (xs->xs_control & XS_CTL_SILENT_NODEV)
   1002 					return error;
   1003 			} else
   1004 				error = EIO;
   1005 			if ((xs->xs_control & XS_CTL_SILENT) != 0)
   1006 				return error;
   1007 			break;
   1008 		case SKEY_ILLEGAL_REQUEST:
   1009 			if ((xs->xs_control &
   1010 			     XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
   1011 				return 0;
   1012 			/*
   1013 			 * Handle the case where a device reports
   1014 			 * Logical Unit Not Supported during discovery.
   1015 			 */
   1016 			if ((xs->xs_control & XS_CTL_DISCOVERY) != 0 &&
   1017 			    sense->asc == 0x25 &&
   1018 			    sense->ascq == 0x00)
   1019 				return EINVAL;
   1020 			if ((xs->xs_control & XS_CTL_SILENT) != 0)
   1021 				return EIO;
   1022 			error = EINVAL;
   1023 			break;
   1024 		case SKEY_UNIT_ATTENTION:
   1025 			if (sense->asc == 0x29 &&
   1026 			    sense->ascq == 0x00) {
   1027 				/* device or bus reset */
   1028 				return ERESTART;
   1029 			}
   1030 			if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
   1031 				periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
   1032 			if ((xs->xs_control &
   1033 			     XS_CTL_IGNORE_MEDIA_CHANGE) != 0 ||
   1034 				/* XXX Should reupload any transient state. */
   1035 				(periph->periph_flags &
   1036 				 PERIPH_REMOVABLE) == 0) {
   1037 				return ERESTART;
   1038 			}
   1039 			if ((xs->xs_control & XS_CTL_SILENT) != 0)
   1040 				return EIO;
   1041 			error = EIO;
   1042 			break;
   1043 		case SKEY_DATA_PROTECT:
   1044 			error = EROFS;
   1045 			break;
   1046 		case SKEY_BLANK_CHECK:
   1047 			error = 0;
   1048 			break;
   1049 		case SKEY_ABORTED_COMMAND:
   1050 			if (xs->xs_retries != 0) {
   1051 				xs->xs_retries--;
   1052 				error = ERESTART;
   1053 			} else
   1054 				error = EIO;
   1055 			break;
   1056 		case SKEY_VOLUME_OVERFLOW:
   1057 			error = ENOSPC;
   1058 			break;
   1059 		case SKEY_MEDIUM_ERROR:
   1060 			if (xs->xs_retries != 0) {
   1061 				xs->xs_retries--;
   1062 				error = ERESTART;
   1063 			} else
   1064 				error = EIO;
   1065 			break;
   1066 		default:
   1067 			error = EIO;
   1068 			break;
   1069 		}
   1070 
   1071 		/* Print verbose decode if appropriate and possible */
   1072 		if ((key == 0) ||
   1073 		    ((xs->xs_control & XS_CTL_SILENT) != 0) ||
   1074 		    (scsipi_print_sense(xs, 0) != 0))
   1075 			return error;
   1076 
   1077 		/* Print brief(er) sense information */
   1078 		scsipi_printaddr(periph);
   1079 		printf("%s", error_mes[key - 1]);
   1080 		if ((sense->response_code & SSD_RCODE_VALID) != 0) {
   1081 			switch (key) {
   1082 			case SKEY_NOT_READY:
   1083 			case SKEY_ILLEGAL_REQUEST:
   1084 			case SKEY_UNIT_ATTENTION:
   1085 			case SKEY_DATA_PROTECT:
   1086 				break;
   1087 			case SKEY_BLANK_CHECK:
   1088 				printf(", requested size: %d (decimal)",
   1089 				    info);
   1090 				break;
   1091 			case SKEY_ABORTED_COMMAND:
   1092 				if (xs->xs_retries)
   1093 					printf(", retrying");
   1094 				printf(", cmd 0x%x, info 0x%x",
   1095 				    xs->cmd->opcode, info);
   1096 				break;
   1097 			default:
   1098 				printf(", info = %d (decimal)", info);
   1099 			}
   1100 		}
   1101 		if (sense->extra_len != 0) {
   1102 			int n;
   1103 			printf(", data =");
   1104 			for (n = 0; n < sense->extra_len; n++)
   1105 				printf(" %02x",
   1106 				    sense->csi[n]);
   1107 		}
   1108 		printf("\n");
   1109 		return error;
   1110 
   1111 	/*
   1112 	 * Some other code, just report it
   1113 	 */
   1114 	default:
   1115 #if    defined(SCSIDEBUG) || defined(DEBUG)
   1116 	{
   1117 		static const char *uc = "undecodable sense error";
   1118 		int i;
   1119 		u_int8_t *cptr = (u_int8_t *) sense;
   1120 		scsipi_printaddr(periph);
   1121 		if (xs->cmd == &xs->cmdstore) {
   1122 			printf("%s for opcode 0x%x, data=",
   1123 			    uc, xs->cmdstore.opcode);
   1124 		} else {
   1125 			printf("%s, data=", uc);
   1126 		}
   1127 		for (i = 0; i < sizeof (sense); i++)
   1128 			printf(" 0x%02x", *(cptr++) & 0xff);
   1129 		printf("\n");
   1130 	}
   1131 #else
   1132 		scsipi_printaddr(periph);
   1133 		printf("Sense Error Code 0x%x",
   1134 			SSD_RCODE(sense->response_code));
   1135 		if ((sense->response_code & SSD_RCODE_VALID) != 0) {
   1136 			struct scsi_sense_data_unextended *usense =
   1137 			    (struct scsi_sense_data_unextended *)sense;
   1138 			printf(" at block no. %d (decimal)",
   1139 			    _3btol(usense->block));
   1140 		}
   1141 		printf("\n");
   1142 #endif
   1143 		return EIO;
   1144 	}
   1145 }
   1146 
   1147 /*
   1148  * scsipi_test_unit_ready:
   1149  *
   1150  *	Issue a `test unit ready' request.
   1151  */
   1152 int
   1153 scsipi_test_unit_ready(struct scsipi_periph *periph, int flags)
   1154 {
   1155 	struct scsi_test_unit_ready cmd;
   1156 	int retries;
   1157 
   1158 	/* some ATAPI drives don't support TEST UNIT READY. Sigh */
   1159 	if (periph->periph_quirks & PQUIRK_NOTUR)
   1160 		return 0;
   1161 
   1162 	if (flags & XS_CTL_DISCOVERY)
   1163 		retries = 0;
   1164 	else
   1165 		retries = SCSIPIRETRIES;
   1166 
   1167 	memset(&cmd, 0, sizeof(cmd));
   1168 	cmd.opcode = SCSI_TEST_UNIT_READY;
   1169 
   1170 	return scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0,
   1171 	    retries, 10000, NULL, flags);
   1172 }
   1173 
   1174 static const struct scsipi_inquiry3_pattern {
   1175 	const char vendor[8];
   1176 	const char product[16];
   1177 	const char revision[4];
   1178 } scsipi_inquiry3_quirk[] = {
   1179 	{ "ES-6600 ", "", "" },
   1180 };
   1181 
   1182 static int
   1183 scsipi_inquiry3_ok(const struct scsipi_inquiry_data *ib)
   1184 {
   1185 	for (size_t i = 0; i < __arraycount(scsipi_inquiry3_quirk); i++) {
   1186 		const struct scsipi_inquiry3_pattern *q =
   1187 		    &scsipi_inquiry3_quirk[i];
   1188 #define MATCH(field) \
   1189     (q->field[0] ? memcmp(ib->field, q->field, sizeof(ib->field)) == 0 : 1)
   1190 		if (MATCH(vendor) && MATCH(product) && MATCH(revision))
   1191 			return 0;
   1192 	}
   1193 	return 1;
   1194 }
   1195 
   1196 /*
   1197  * scsipi_inquire:
   1198  *
   1199  *	Ask the device about itself.
   1200  */
   1201 int
   1202 scsipi_inquire(struct scsipi_periph *periph, struct scsipi_inquiry_data *inqbuf,
   1203     int flags)
   1204 {
   1205 	struct scsipi_inquiry cmd;
   1206 	int error;
   1207 	int retries;
   1208 
   1209 	if (flags & XS_CTL_DISCOVERY)
   1210 		retries = 0;
   1211 	else
   1212 		retries = SCSIPIRETRIES;
   1213 
   1214 	/*
   1215 	 * If we request more data than the device can provide, it SHOULD just
   1216 	 * return a short response.  However, some devices error with an
   1217 	 * ILLEGAL REQUEST sense code, and yet others have even more special
   1218 	 * failure modes (such as the GL641USB flash adapter, which goes loony
   1219 	 * and sends corrupted CRCs).  To work around this, and to bring our
   1220 	 * behavior more in line with other OSes, we do a shorter inquiry,
   1221 	 * covering all the SCSI-2 information, first, and then request more
   1222 	 * data iff the "additional length" field indicates there is more.
   1223 	 * - mycroft, 2003/10/16
   1224 	 */
   1225 	memset(&cmd, 0, sizeof(cmd));
   1226 	cmd.opcode = INQUIRY;
   1227 	cmd.length = SCSIPI_INQUIRY_LENGTH_SCSI2;
   1228 	error = scsipi_command(periph, (void *)&cmd, sizeof(cmd),
   1229 	    (void *)inqbuf, SCSIPI_INQUIRY_LENGTH_SCSI2, retries,
   1230 	    10000, NULL, flags | XS_CTL_DATA_IN);
   1231 	if (!error &&
   1232 	    inqbuf->additional_length > SCSIPI_INQUIRY_LENGTH_SCSI2 - 4) {
   1233 	    if (scsipi_inquiry3_ok(inqbuf)) {
   1234 #if 0
   1235 printf("inquire: addlen=%d, retrying\n", inqbuf->additional_length);
   1236 #endif
   1237 		cmd.length = SCSIPI_INQUIRY_LENGTH_SCSI3;
   1238 		error = scsipi_command(periph, (void *)&cmd, sizeof(cmd),
   1239 		    (void *)inqbuf, SCSIPI_INQUIRY_LENGTH_SCSI3, retries,
   1240 		    10000, NULL, flags | XS_CTL_DATA_IN);
   1241 #if 0
   1242 printf("inquire: error=%d\n", error);
   1243 #endif
   1244 	    }
   1245 	}
   1246 
   1247 #ifdef SCSI_OLD_NOINQUIRY
   1248 	/*
   1249 	 * Kludge for the Adaptec ACB-4000 SCSI->MFM translator.
   1250 	 * This board doesn't support the INQUIRY command at all.
   1251 	 */
   1252 	if (error == EINVAL || error == EACCES) {
   1253 		/*
   1254 		 * Conjure up an INQUIRY response.
   1255 		 */
   1256 		inqbuf->device = (error == EINVAL ?
   1257 			 SID_QUAL_LU_PRESENT :
   1258 			 SID_QUAL_LU_NOTPRESENT) | T_DIRECT;
   1259 		inqbuf->dev_qual2 = 0;
   1260 		inqbuf->version = 0;
   1261 		inqbuf->response_format = SID_FORMAT_SCSI1;
   1262 		inqbuf->additional_length = SCSIPI_INQUIRY_LENGTH_SCSI2 - 4;
   1263 		inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0;
   1264 		memcpy(inqbuf->vendor, "ADAPTEC ACB-4000            ", 28);
   1265 		error = 0;
   1266 	}
   1267 
   1268 	/*
   1269 	 * Kludge for the Emulex MT-02 SCSI->QIC translator.
   1270 	 * This board gives an empty response to an INQUIRY command.
   1271 	 */
   1272 	else if (error == 0 &&
   1273 	    inqbuf->device == (SID_QUAL_LU_PRESENT | T_DIRECT) &&
   1274 	    inqbuf->dev_qual2 == 0 &&
   1275 	    inqbuf->version == 0 &&
   1276 	    inqbuf->response_format == SID_FORMAT_SCSI1) {
   1277 		/*
   1278 		 * Fill out the INQUIRY response.
   1279 		 */
   1280 		inqbuf->device = (SID_QUAL_LU_PRESENT | T_SEQUENTIAL);
   1281 		inqbuf->dev_qual2 = SID_REMOVABLE;
   1282 		inqbuf->additional_length = SCSIPI_INQUIRY_LENGTH_SCSI2 - 4;
   1283 		inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0;
   1284 		memcpy(inqbuf->vendor, "EMULEX  MT-02 QIC           ", 28);
   1285 	}
   1286 #endif /* SCSI_OLD_NOINQUIRY */
   1287 
   1288 	return error;
   1289 }
   1290 
   1291 /*
   1292  * scsipi_prevent:
   1293  *
   1294  *	Prevent or allow the user to remove the media
   1295  */
   1296 int
   1297 scsipi_prevent(struct scsipi_periph *periph, int type, int flags)
   1298 {
   1299 	struct scsi_prevent_allow_medium_removal cmd;
   1300 
   1301 	if (periph->periph_quirks & PQUIRK_NODOORLOCK)
   1302 		return 0;
   1303 
   1304 	memset(&cmd, 0, sizeof(cmd));
   1305 	cmd.opcode = SCSI_PREVENT_ALLOW_MEDIUM_REMOVAL;
   1306 	cmd.how = type;
   1307 
   1308 	return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0,
   1309 	    SCSIPIRETRIES, 5000, NULL, flags));
   1310 }
   1311 
   1312 /*
   1313  * scsipi_start:
   1314  *
   1315  *	Send a START UNIT.
   1316  */
   1317 int
   1318 scsipi_start(struct scsipi_periph *periph, int type, int flags)
   1319 {
   1320 	struct scsipi_start_stop cmd;
   1321 
   1322 	memset(&cmd, 0, sizeof(cmd));
   1323 	cmd.opcode = START_STOP;
   1324 	cmd.byte2 = 0x00;
   1325 	cmd.how = type;
   1326 
   1327 	return scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0,
   1328 	    SCSIPIRETRIES, (type & SSS_START) ? 60000 : 10000, NULL, flags);
   1329 }
   1330 
   1331 /*
   1332  * scsipi_mode_sense, scsipi_mode_sense_big:
   1333  *	get a sense page from a device
   1334  */
   1335 
   1336 int
   1337 scsipi_mode_sense(struct scsipi_periph *periph, int byte2, int page,
   1338     struct scsi_mode_parameter_header_6 *data, int len, int flags, int retries,
   1339     int timeout)
   1340 {
   1341 	struct scsi_mode_sense_6 cmd;
   1342 
   1343 	memset(&cmd, 0, sizeof(cmd));
   1344 	cmd.opcode = SCSI_MODE_SENSE_6;
   1345 	cmd.byte2 = byte2;
   1346 	cmd.page = page;
   1347 	cmd.length = len & 0xff;
   1348 
   1349 	return scsipi_command(periph, (void *)&cmd, sizeof(cmd),
   1350 	    (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_IN);
   1351 }
   1352 
   1353 int
   1354 scsipi_mode_sense_big(struct scsipi_periph *periph, int byte2, int page,
   1355     struct scsi_mode_parameter_header_10 *data, int len, int flags, int retries,
   1356     int timeout)
   1357 {
   1358 	struct scsi_mode_sense_10 cmd;
   1359 
   1360 	memset(&cmd, 0, sizeof(cmd));
   1361 	cmd.opcode = SCSI_MODE_SENSE_10;
   1362 	cmd.byte2 = byte2;
   1363 	cmd.page = page;
   1364 	_lto2b(len, cmd.length);
   1365 
   1366 	return scsipi_command(periph, (void *)&cmd, sizeof(cmd),
   1367 	    (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_IN);
   1368 }
   1369 
   1370 int
   1371 scsipi_mode_select(struct scsipi_periph *periph, int byte2,
   1372     struct scsi_mode_parameter_header_6 *data, int len, int flags, int retries,
   1373     int timeout)
   1374 {
   1375 	struct scsi_mode_select_6 cmd;
   1376 
   1377 	memset(&cmd, 0, sizeof(cmd));
   1378 	cmd.opcode = SCSI_MODE_SELECT_6;
   1379 	cmd.byte2 = byte2;
   1380 	cmd.length = len & 0xff;
   1381 
   1382 	return scsipi_command(periph, (void *)&cmd, sizeof(cmd),
   1383 	    (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_OUT);
   1384 }
   1385 
   1386 int
   1387 scsipi_mode_select_big(struct scsipi_periph *periph, int byte2,
   1388     struct scsi_mode_parameter_header_10 *data, int len, int flags, int retries,
   1389     int timeout)
   1390 {
   1391 	struct scsi_mode_select_10 cmd;
   1392 
   1393 	memset(&cmd, 0, sizeof(cmd));
   1394 	cmd.opcode = SCSI_MODE_SELECT_10;
   1395 	cmd.byte2 = byte2;
   1396 	_lto2b(len, cmd.length);
   1397 
   1398 	return scsipi_command(periph, (void *)&cmd, sizeof(cmd),
   1399 	    (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_OUT);
   1400 }
   1401 
   1402 /*
   1403  * scsipi_get_opcodeinfo:
   1404  *
   1405  * query the device for supported commands and their timeout
   1406  * building a timeout lookup table if timeout information is available.
   1407  */
   1408 void
   1409 scsipi_get_opcodeinfo(struct scsipi_periph *periph)
   1410 {
   1411 	u_int8_t *data;
   1412 	int len = 16*1024;
   1413 	int rc;
   1414 	int retries;
   1415 	struct scsi_repsuppopcode cmd;
   1416 
   1417 	/* refrain from asking for supported opcodes */
   1418 	if (periph->periph_quirks & PQUIRK_NOREPSUPPOPC ||
   1419 	    periph->periph_type == T_PROCESSOR || /* spec. */
   1420 	    periph->periph_type == T_CDROM) /* spec. */
   1421 		return;
   1422 
   1423 	scsipi_free_opcodeinfo(periph);
   1424 
   1425 	/*
   1426 	 * query REPORT SUPPORTED OPERATION CODES
   1427 	 * if OK
   1428 	 *   enumerate all codes
   1429 	 *     if timeout exists insert maximum into opcode table
   1430 	 */
   1431 	data = malloc(len, M_DEVBUF, M_WAITOK|M_ZERO);
   1432 
   1433 	memset(&cmd, 0, sizeof(cmd));
   1434 	cmd.opcode = SCSI_MAINTENANCE_IN;
   1435 	cmd.svcaction = RSOC_REPORT_SUPPORTED_OPCODES;
   1436 	cmd.repoption = RSOC_RCTD|RSOC_ALL;
   1437 	_lto4b(len, cmd.alloclen);
   1438 
   1439 	/* loop to skip any UNIT ATTENTIONS at this point */
   1440 	retries = 3;
   1441 	do {
   1442 		rc = scsipi_command(periph, (void *)&cmd, sizeof(cmd),
   1443 				    (void *)data, len, 0, 60000, NULL,
   1444 				    XS_CTL_DATA_IN|XS_CTL_SILENT);
   1445 #ifdef SCSIPI_DEBUG
   1446 		if (rc != 0) {
   1447 			SC_DEBUG(periph, SCSIPI_DB3,
   1448 				("SCSI_MAINTENANCE_IN"
   1449 			 	"[RSOC_REPORT_SUPPORTED_OPCODES] command"
   1450 				" failed: rc=%d, retries=%d\n",
   1451 				rc, retries));
   1452 		}
   1453 #endif
   1454         } while (rc == EIO && retries-- > 0);
   1455 
   1456 	if (rc == 0) {
   1457 		int count;
   1458                 int dlen = _4btol(data);
   1459                 u_int8_t *c = data + 4;
   1460 
   1461 		SC_DEBUG(periph, SCSIPI_DB3,
   1462 			 ("supported opcode timeout-values loaded\n"));
   1463 		SC_DEBUG(periph, SCSIPI_DB3,
   1464 			 ("CMD  LEN  SA    spec  nom. time  cmd timeout\n"));
   1465 
   1466 		struct scsipi_opcodes *tot = malloc(sizeof(struct scsipi_opcodes),
   1467 		    M_DEVBUF, M_WAITOK|M_ZERO);
   1468 
   1469 		count = 0;
   1470                 while (tot != NULL &&
   1471 		       dlen >= (int)sizeof(struct scsi_repsupopcode_all_commands_descriptor)) {
   1472                         struct scsi_repsupopcode_all_commands_descriptor *acd
   1473 				= (struct scsi_repsupopcode_all_commands_descriptor *)c;
   1474 #ifdef SCSIPI_DEBUG
   1475                         int cdblen = _2btol((const u_int8_t *)&acd->cdblen);
   1476 #endif
   1477                         dlen -= sizeof(struct scsi_repsupopcode_all_commands_descriptor);
   1478                         c += sizeof(struct scsi_repsupopcode_all_commands_descriptor);
   1479                         SC_DEBUG(periph, SCSIPI_DB3,
   1480 				 ("0x%02x(%2d) ", acd->opcode, cdblen));
   1481 
   1482 			tot->opcode_info[acd->opcode].ti_flags = SCSIPI_TI_VALID;
   1483 
   1484                         if (acd->flags & RSOC_ACD_SERVACTV) {
   1485                                 SC_DEBUGN(periph, SCSIPI_DB3,
   1486 					 ("0x%02x%02x ",
   1487 					  acd->serviceaction[0],
   1488 					  acd->serviceaction[1]));
   1489                         } else {
   1490 				SC_DEBUGN(periph, SCSIPI_DB3, ("       "));
   1491                         }
   1492 
   1493                         if (acd->flags & RSOC_ACD_CTDP
   1494 			    && dlen >= (int)sizeof(struct scsi_repsupopcode_timeouts_descriptor)) {
   1495                                 struct scsi_repsupopcode_timeouts_descriptor *td
   1496 					= (struct scsi_repsupopcode_timeouts_descriptor *)c;
   1497                                 long nomto = _4btol(td->nom_process_timeout);
   1498                                 long cmdto = _4btol(td->cmd_process_timeout);
   1499 				long t = (cmdto > nomto) ? cmdto : nomto;
   1500 
   1501                                 dlen -= sizeof(struct scsi_repsupopcode_timeouts_descriptor);
   1502                                 c += sizeof(struct scsi_repsupopcode_timeouts_descriptor);
   1503 
   1504                                 SC_DEBUGN(periph, SCSIPI_DB3,
   1505 					  ("0x%02x %10ld %10ld",
   1506 					   td->cmd_specific,
   1507 					   nomto, cmdto));
   1508 
   1509 				if (t > tot->opcode_info[acd->opcode].ti_timeout) {
   1510 					tot->opcode_info[acd->opcode].ti_timeout = t;
   1511 					++count;
   1512 				}
   1513                         }
   1514                         SC_DEBUGN(periph, SCSIPI_DB3,("\n"));
   1515                 }
   1516 
   1517 		if (count > 0) {
   1518 			periph->periph_opcs = tot;
   1519 		} else {
   1520 			free(tot, M_DEVBUF);
   1521 			SC_DEBUG(periph, SCSIPI_DB3,
   1522 			 	("no usable timeout values available\n"));
   1523 		}
   1524 	} else {
   1525 		SC_DEBUG(periph, SCSIPI_DB3,
   1526 			 ("SCSI_MAINTENANCE_IN"
   1527 			  "[RSOC_REPORT_SUPPORTED_OPCODES] failed error=%d"
   1528 			  " - no device provided timeout "
   1529 			  "values available\n", rc));
   1530 	}
   1531 
   1532 	free(data, M_DEVBUF);
   1533 }
   1534 
   1535 /*
   1536  * scsipi_update_timeouts:
   1537  * 	Override timeout value if device/config provided
   1538  *      timeouts are available.
   1539  */
   1540 static void
   1541 scsipi_update_timeouts(struct scsipi_xfer *xs)
   1542 {
   1543 	struct scsipi_opcodes *opcs;
   1544 	u_int8_t cmd;
   1545 	int timeout;
   1546 	struct scsipi_opinfo *oi;
   1547 
   1548 	if (xs->timeout <= 0) {
   1549 		return;
   1550 	}
   1551 
   1552 	opcs = xs->xs_periph->periph_opcs;
   1553 
   1554 	if (opcs == NULL) {
   1555 		return;
   1556 	}
   1557 
   1558 	cmd = xs->cmd->opcode;
   1559 	oi = &opcs->opcode_info[cmd];
   1560 
   1561 	timeout = 1000 * (int)oi->ti_timeout;
   1562 
   1563 
   1564 	if (timeout > xs->timeout && timeout < 86400000) {
   1565 		/*
   1566 		 * pick up device configured timeouts if they
   1567 		 * are longer than the requested ones but less
   1568 		 * than a day
   1569 		 */
   1570 #ifdef SCSIPI_DEBUG
   1571 		if ((oi->ti_flags & SCSIPI_TI_LOGGED) == 0) {
   1572 			SC_DEBUG(xs->xs_periph, SCSIPI_DB3,
   1573 				 ("Overriding command 0x%02x "
   1574 				  "timeout of %d with %d ms\n",
   1575 				  cmd, xs->timeout, timeout));
   1576 			oi->ti_flags |= SCSIPI_TI_LOGGED;
   1577 		}
   1578 #endif
   1579 		xs->timeout = timeout;
   1580 	}
   1581 }
   1582 
   1583 /*
   1584  * scsipi_free_opcodeinfo:
   1585  *
   1586  * free the opcode information table
   1587  */
   1588 void
   1589 scsipi_free_opcodeinfo(struct scsipi_periph *periph)
   1590 {
   1591 	if (periph->periph_opcs != NULL) {
   1592 		free(periph->periph_opcs, M_DEVBUF);
   1593 	}
   1594 
   1595 	periph->periph_opcs = NULL;
   1596 }
   1597 
   1598 /*
   1599  * scsipi_done:
   1600  *
   1601  *	This routine is called by an adapter's interrupt handler when
   1602  *	an xfer is completed.
   1603  */
   1604 void
   1605 scsipi_done(struct scsipi_xfer *xs)
   1606 {
   1607 	struct scsipi_periph *periph = xs->xs_periph;
   1608 	struct scsipi_channel *chan = periph->periph_channel;
   1609 	int freezecnt;
   1610 
   1611 	SC_DEBUG(periph, SCSIPI_DB2, ("scsipi_done\n"));
   1612 #ifdef SCSIPI_DEBUG
   1613 	if (periph->periph_dbflags & SCSIPI_DB1)
   1614 		show_scsipi_cmd(xs);
   1615 #endif
   1616 
   1617 	mutex_enter(chan_mtx(chan));
   1618 	SDT_PROBE1(scsi, base, xfer, done,  xs);
   1619 	/*
   1620 	 * The resource this command was using is now free.
   1621 	 */
   1622 	if (xs->xs_status & XS_STS_DONE) {
   1623 		/* XXX in certain circumstances, such as a device
   1624 		 * being detached, a xs that has already been
   1625 		 * scsipi_done()'d by the main thread will be done'd
   1626 		 * again by scsibusdetach(). Putting the xs on the
   1627 		 * chan_complete queue causes list corruption and
   1628 		 * everyone dies. This prevents that, but perhaps
   1629 		 * there should be better coordination somewhere such
   1630 		 * that this won't ever happen (and can be turned into
   1631 		 * a KASSERT().
   1632 		 */
   1633 		SDT_PROBE1(scsi, base, xfer, redone,  xs);
   1634 		mutex_exit(chan_mtx(chan));
   1635 		goto out;
   1636 	}
   1637 	scsipi_put_resource(chan);
   1638 	xs->xs_periph->periph_sent--;
   1639 
   1640 	/*
   1641 	 * If the command was tagged, free the tag.
   1642 	 */
   1643 	if (XS_CTL_TAGTYPE(xs) != 0)
   1644 		scsipi_put_tag(xs);
   1645 	else
   1646 		periph->periph_flags &= ~PERIPH_UNTAG;
   1647 
   1648 	/* Mark the command as `done'. */
   1649 	xs->xs_status |= XS_STS_DONE;
   1650 
   1651 #ifdef DIAGNOSTIC
   1652 	if ((xs->xs_control & (XS_CTL_ASYNC|XS_CTL_POLL)) ==
   1653 	    (XS_CTL_ASYNC|XS_CTL_POLL))
   1654 		panic("scsipi_done: ASYNC and POLL");
   1655 #endif
   1656 
   1657 	/*
   1658 	 * If the xfer had an error of any sort, freeze the
   1659 	 * periph's queue.  Freeze it again if we were requested
   1660 	 * to do so in the xfer.
   1661 	 */
   1662 	freezecnt = 0;
   1663 	if (xs->error != XS_NOERROR)
   1664 		freezecnt++;
   1665 	if (xs->xs_control & XS_CTL_FREEZE_PERIPH)
   1666 		freezecnt++;
   1667 	if (freezecnt != 0)
   1668 		scsipi_periph_freeze_locked(periph, freezecnt);
   1669 
   1670 	/*
   1671 	 * record the xfer with a pending sense, in case a SCSI reset is
   1672 	 * received before the thread is waked up.
   1673 	 */
   1674 	if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
   1675 		periph->periph_flags |= PERIPH_SENSE;
   1676 		periph->periph_xscheck = xs;
   1677 	}
   1678 
   1679 	/*
   1680 	 * If this was an xfer that was not to complete asynchronously,
   1681 	 * let the requesting thread perform error checking/handling
   1682 	 * in its context.
   1683 	 */
   1684 	if ((xs->xs_control & XS_CTL_ASYNC) == 0) {
   1685 		/*
   1686 		 * If it's a polling job, just return, to unwind the
   1687 		 * call graph.  We don't need to restart the queue,
   1688 		 * because polling jobs are treated specially, and
   1689 		 * are really only used during crash dumps anyway
   1690 		 * (XXX or during boot-time autoconfiguration of
   1691 		 * ATAPI devices).
   1692 		 */
   1693 		if (xs->xs_control & XS_CTL_POLL) {
   1694 			mutex_exit(chan_mtx(chan));
   1695 			return;
   1696 		}
   1697 		cv_broadcast(xs_cv(xs));
   1698 		mutex_exit(chan_mtx(chan));
   1699 		goto out;
   1700 	}
   1701 
   1702 	/*
   1703 	 * Catch the extremely common case of I/O completing
   1704 	 * without error; no use in taking a context switch
   1705 	 * if we can handle it in interrupt context.
   1706 	 */
   1707 	if (xs->error == XS_NOERROR) {
   1708 		mutex_exit(chan_mtx(chan));
   1709 		(void) scsipi_complete(xs);
   1710 		goto out;
   1711 	}
   1712 
   1713 	/*
   1714 	 * There is an error on this xfer.  Put it on the channel's
   1715 	 * completion queue, and wake up the completion thread.
   1716 	 */
   1717 	TAILQ_INSERT_TAIL(&chan->chan_complete, xs, channel_q);
   1718 	cv_broadcast(chan_cv_complete(chan));
   1719 	mutex_exit(chan_mtx(chan));
   1720 
   1721  out:
   1722 	/*
   1723 	 * If there are more xfers on the channel's queue, attempt to
   1724 	 * run them.
   1725 	 */
   1726 	scsipi_run_queue(chan);
   1727 }
   1728 
   1729 /*
   1730  * scsipi_complete:
   1731  *
   1732  *	Completion of a scsipi_xfer.  This is the guts of scsipi_done().
   1733  *
   1734  *	NOTE: This routine MUST be called with valid thread context
   1735  *	except for the case where the following two conditions are
   1736  *	true:
   1737  *
   1738  *		xs->error == XS_NOERROR
   1739  *		XS_CTL_ASYNC is set in xs->xs_control
   1740  *
   1741  *	The semantics of this routine can be tricky, so here is an
   1742  *	explanation:
   1743  *
   1744  *		0		Xfer completed successfully.
   1745  *
   1746  *		ERESTART	Xfer had an error, but was restarted.
   1747  *
   1748  *		anything else	Xfer had an error, return value is Unix
   1749  *				errno.
   1750  *
   1751  *	If the return value is anything but ERESTART:
   1752  *
   1753  *		- If XS_CTL_ASYNC is set, `xs' has been freed back to
   1754  *		  the pool.
   1755  *		- If there is a buf associated with the xfer,
   1756  *		  it has been biodone()'d.
   1757  */
   1758 static int
   1759 scsipi_complete(struct scsipi_xfer *xs)
   1760 {
   1761 	struct scsipi_periph *periph = xs->xs_periph;
   1762 	struct scsipi_channel *chan = periph->periph_channel;
   1763 	int error;
   1764 
   1765 	SDT_PROBE1(scsi, base, xfer, complete,  xs);
   1766 
   1767 #ifdef DIAGNOSTIC
   1768 	if ((xs->xs_control & XS_CTL_ASYNC) != 0 && xs->bp == NULL)
   1769 		panic("scsipi_complete: XS_CTL_ASYNC but no buf");
   1770 #endif
   1771 	/*
   1772 	 * If command terminated with a CHECK CONDITION, we need to issue a
   1773 	 * REQUEST_SENSE command. Once the REQUEST_SENSE has been processed
   1774 	 * we'll have the real status.
   1775 	 * Must be processed with channel lock held to avoid missing
   1776 	 * a SCSI bus reset for this command.
   1777 	 */
   1778 	mutex_enter(chan_mtx(chan));
   1779 	if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
   1780 		/* request sense for a request sense ? */
   1781 		if (xs->xs_control & XS_CTL_REQSENSE) {
   1782 			scsipi_printaddr(periph);
   1783 			printf("request sense for a request sense ?\n");
   1784 			/* XXX maybe we should reset the device ? */
   1785 			/* we've been frozen because xs->error != XS_NOERROR */
   1786 			scsipi_periph_thaw_locked(periph, 1);
   1787 			mutex_exit(chan_mtx(chan));
   1788 			if (xs->resid < xs->datalen) {
   1789 				printf("we read %d bytes of sense anyway:\n",
   1790 				    xs->datalen - xs->resid);
   1791 				scsipi_print_sense_data((void *)xs->data, 0);
   1792 			}
   1793 			return EINVAL;
   1794 		}
   1795 		mutex_exit(chan_mtx(chan)); // XXX allows other commands to queue or run
   1796 		scsipi_request_sense(xs);
   1797 	} else
   1798 		mutex_exit(chan_mtx(chan));
   1799 
   1800 	/*
   1801 	 * If it's a user level request, bypass all usual completion
   1802 	 * processing, let the user work it out..
   1803 	 */
   1804 	if ((xs->xs_control & XS_CTL_USERCMD) != 0) {
   1805 		SC_DEBUG(periph, SCSIPI_DB3, ("calling user done()\n"));
   1806 		mutex_enter(chan_mtx(chan));
   1807 		if (xs->error != XS_NOERROR)
   1808 			scsipi_periph_thaw_locked(periph, 1);
   1809 		mutex_exit(chan_mtx(chan));
   1810 		scsipi_user_done(xs);
   1811 		SC_DEBUG(periph, SCSIPI_DB3, ("returned from user done()\n "));
   1812 		return 0;
   1813 	}
   1814 
   1815 	switch (xs->error) {
   1816 	case XS_NOERROR:
   1817 		error = 0;
   1818 		break;
   1819 
   1820 	case XS_SENSE:
   1821 	case XS_SHORTSENSE:
   1822 		error = (*chan->chan_bustype->bustype_interpret_sense)(xs);
   1823 		break;
   1824 
   1825 	case XS_RESOURCE_SHORTAGE:
   1826 		/*
   1827 		 * XXX Should freeze channel's queue.
   1828 		 */
   1829 		scsipi_printaddr(periph);
   1830 		printf("adapter resource shortage\n");
   1831 		/* FALLTHROUGH */
   1832 
   1833 	case XS_BUSY:
   1834 		if (xs->error == XS_BUSY && xs->status == SCSI_QUEUE_FULL) {
   1835 			struct scsipi_max_openings mo;
   1836 
   1837 			/*
   1838 			 * We set the openings to active - 1, assuming that
   1839 			 * the command that got us here is the first one that
   1840 			 * can't fit into the device's queue.  If that's not
   1841 			 * the case, I guess we'll find out soon enough.
   1842 			 */
   1843 			mo.mo_target = periph->periph_target;
   1844 			mo.mo_lun = periph->periph_lun;
   1845 			if (periph->periph_active < periph->periph_openings)
   1846 				mo.mo_openings = periph->periph_active - 1;
   1847 			else
   1848 				mo.mo_openings = periph->periph_openings - 1;
   1849 #ifdef DIAGNOSTIC
   1850 			if (mo.mo_openings < 0) {
   1851 				scsipi_printaddr(periph);
   1852 				printf("QUEUE FULL resulted in < 0 openings\n");
   1853 				panic("scsipi_done");
   1854 			}
   1855 #endif
   1856 			if (mo.mo_openings == 0) {
   1857 				scsipi_printaddr(periph);
   1858 				printf("QUEUE FULL resulted in 0 openings\n");
   1859 				mo.mo_openings = 1;
   1860 			}
   1861 			scsipi_async_event(chan, ASYNC_EVENT_MAX_OPENINGS, &mo);
   1862 			error = ERESTART;
   1863 		} else if (xs->xs_retries != 0) {
   1864 			xs->xs_retries--;
   1865 			/*
   1866 			 * Wait one second, and try again.
   1867 			 */
   1868 			mutex_enter(chan_mtx(chan));
   1869 			if ((xs->xs_control & XS_CTL_POLL) ||
   1870 			    (chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
   1871 				/* XXX: quite extreme */
   1872 				kpause("xsbusy", false, hz, chan_mtx(chan));
   1873 			} else if (!callout_pending(&periph->periph_callout)) {
   1874 				scsipi_periph_freeze_locked(periph, 1);
   1875 				callout_reset(&periph->periph_callout,
   1876 				    hz, scsipi_periph_timed_thaw, periph);
   1877 			}
   1878 			mutex_exit(chan_mtx(chan));
   1879 			error = ERESTART;
   1880 		} else
   1881 			error = EBUSY;
   1882 		break;
   1883 
   1884 	case XS_REQUEUE:
   1885 		error = ERESTART;
   1886 		break;
   1887 
   1888 	case XS_SELTIMEOUT:
   1889 	case XS_TIMEOUT:
   1890 		/*
   1891 		 * If the device hasn't gone away, honor retry counts.
   1892 		 *
   1893 		 * Note that if we're in the middle of probing it,
   1894 		 * it won't be found because it isn't here yet so
   1895 		 * we won't honor the retry count in that case.
   1896 		 */
   1897 		if (scsipi_lookup_periph(chan, periph->periph_target,
   1898 		    periph->periph_lun) && xs->xs_retries != 0) {
   1899 			xs->xs_retries--;
   1900 			error = ERESTART;
   1901 		} else
   1902 			error = EIO;
   1903 		break;
   1904 
   1905 	case XS_RESET:
   1906 		if (xs->xs_control & XS_CTL_REQSENSE) {
   1907 			/*
   1908 			 * request sense interrupted by reset: signal it
   1909 			 * with EINTR return code.
   1910 			 */
   1911 			error = EINTR;
   1912 		} else {
   1913 			if (xs->xs_retries != 0) {
   1914 				xs->xs_retries--;
   1915 				error = ERESTART;
   1916 			} else
   1917 				error = EIO;
   1918 		}
   1919 		break;
   1920 
   1921 	case XS_DRIVER_STUFFUP:
   1922 		scsipi_printaddr(periph);
   1923 		printf("generic HBA error\n");
   1924 		error = EIO;
   1925 		break;
   1926 	default:
   1927 		scsipi_printaddr(periph);
   1928 		printf("invalid return code from adapter: %d\n", xs->error);
   1929 		error = EIO;
   1930 		break;
   1931 	}
   1932 
   1933 	mutex_enter(chan_mtx(chan));
   1934 	if (error == ERESTART) {
   1935 		SDT_PROBE1(scsi, base, xfer, restart,  xs);
   1936 		/*
   1937 		 * If we get here, the periph has been thawed and frozen
   1938 		 * again if we had to issue recovery commands.  Alternatively,
   1939 		 * it may have been frozen again and in a timed thaw.  In
   1940 		 * any case, we thaw the periph once we re-enqueue the
   1941 		 * command.  Once the periph is fully thawed, it will begin
   1942 		 * operation again.
   1943 		 */
   1944 		xs->error = XS_NOERROR;
   1945 		xs->status = SCSI_OK;
   1946 		xs->xs_status &= ~XS_STS_DONE;
   1947 		xs->xs_requeuecnt++;
   1948 		error = scsipi_enqueue(xs);
   1949 		if (error == 0) {
   1950 			scsipi_periph_thaw_locked(periph, 1);
   1951 			mutex_exit(chan_mtx(chan));
   1952 			return ERESTART;
   1953 		}
   1954 	}
   1955 
   1956 	/*
   1957 	 * scsipi_done() freezes the queue if not XS_NOERROR.
   1958 	 * Thaw it here.
   1959 	 */
   1960 	if (xs->error != XS_NOERROR)
   1961 		scsipi_periph_thaw_locked(periph, 1);
   1962 	mutex_exit(chan_mtx(chan));
   1963 
   1964 	if (periph->periph_switch->psw_done)
   1965 		periph->periph_switch->psw_done(xs, error);
   1966 
   1967 	mutex_enter(chan_mtx(chan));
   1968 	if (xs->xs_control & XS_CTL_ASYNC)
   1969 		scsipi_put_xs(xs);
   1970 	mutex_exit(chan_mtx(chan));
   1971 
   1972 	return error;
   1973 }
   1974 
   1975 /*
   1976  * Issue a request sense for the given scsipi_xfer. Called when the xfer
   1977  * returns with a CHECK_CONDITION status. Must be called in valid thread
   1978  * context.
   1979  */
   1980 
   1981 static void
   1982 scsipi_request_sense(struct scsipi_xfer *xs)
   1983 {
   1984 	struct scsipi_periph *periph = xs->xs_periph;
   1985 	int flags, error;
   1986 	struct scsi_request_sense cmd;
   1987 
   1988 	periph->periph_flags |= PERIPH_SENSE;
   1989 
   1990 	/* if command was polling, request sense will too */
   1991 	flags = xs->xs_control & XS_CTL_POLL;
   1992 	/* Polling commands can't sleep */
   1993 	if (flags)
   1994 		flags |= XS_CTL_NOSLEEP;
   1995 
   1996 	flags |= XS_CTL_REQSENSE | XS_CTL_URGENT | XS_CTL_DATA_IN |
   1997 	    XS_CTL_THAW_PERIPH | XS_CTL_FREEZE_PERIPH;
   1998 
   1999 	memset(&cmd, 0, sizeof(cmd));
   2000 	cmd.opcode = SCSI_REQUEST_SENSE;
   2001 	cmd.length = sizeof(struct scsi_sense_data);
   2002 
   2003 	error = scsipi_command(periph, (void *)&cmd, sizeof(cmd),
   2004 	    (void *)&xs->sense.scsi_sense, sizeof(struct scsi_sense_data),
   2005 	    0, 1000, NULL, flags);
   2006 	periph->periph_flags &= ~PERIPH_SENSE;
   2007 	periph->periph_xscheck = NULL;
   2008 	switch (error) {
   2009 	case 0:
   2010 		/* we have a valid sense */
   2011 		xs->error = XS_SENSE;
   2012 		return;
   2013 	case EINTR:
   2014 		/* REQUEST_SENSE interrupted by bus reset. */
   2015 		xs->error = XS_RESET;
   2016 		return;
   2017 	case EIO:
   2018 		 /* request sense couldn't be performed */
   2019 		/*
   2020 		 * XXX this isn't quite right but we don't have anything
   2021 		 * better for now
   2022 		 */
   2023 		xs->error = XS_DRIVER_STUFFUP;
   2024 		return;
   2025 	default:
   2026 		 /* Notify that request sense failed. */
   2027 		xs->error = XS_DRIVER_STUFFUP;
   2028 		scsipi_printaddr(periph);
   2029 		printf("request sense failed with error %d\n", error);
   2030 		return;
   2031 	}
   2032 }
   2033 
   2034 /*
   2035  * scsipi_enqueue:
   2036  *
   2037  *	Enqueue an xfer on a channel.
   2038  */
   2039 static int
   2040 scsipi_enqueue(struct scsipi_xfer *xs)
   2041 {
   2042 	struct scsipi_channel *chan = xs->xs_periph->periph_channel;
   2043 	struct scsipi_xfer *qxs;
   2044 
   2045 	SDT_PROBE1(scsi, base, xfer, enqueue,  xs);
   2046 
   2047 	/*
   2048 	 * If the xfer is to be polled, and there are already jobs on
   2049 	 * the queue, we can't proceed.
   2050 	 */
   2051 	KASSERT(mutex_owned(chan_mtx(chan)));
   2052 	if ((xs->xs_control & XS_CTL_POLL) != 0 &&
   2053 	    TAILQ_FIRST(&chan->chan_queue) != NULL) {
   2054 		xs->error = XS_DRIVER_STUFFUP;
   2055 		return EAGAIN;
   2056 	}
   2057 
   2058 	/*
   2059 	 * If we have an URGENT xfer, it's an error recovery command
   2060 	 * and it should just go on the head of the channel's queue.
   2061 	 */
   2062 	if (xs->xs_control & XS_CTL_URGENT) {
   2063 		TAILQ_INSERT_HEAD(&chan->chan_queue, xs, channel_q);
   2064 		goto out;
   2065 	}
   2066 
   2067 	/*
   2068 	 * If this xfer has already been on the queue before, we
   2069 	 * need to reinsert it in the correct order.  That order is:
   2070 	 *
   2071 	 *	Immediately before the first xfer for this periph
   2072 	 *	with a requeuecnt less than xs->xs_requeuecnt.
   2073 	 *
   2074 	 * Failing that, at the end of the queue.  (We'll end up
   2075 	 * there naturally.)
   2076 	 */
   2077 	if (xs->xs_requeuecnt != 0) {
   2078 		for (qxs = TAILQ_FIRST(&chan->chan_queue); qxs != NULL;
   2079 		     qxs = TAILQ_NEXT(qxs, channel_q)) {
   2080 			if (qxs->xs_periph == xs->xs_periph &&
   2081 			    qxs->xs_requeuecnt < xs->xs_requeuecnt)
   2082 				break;
   2083 		}
   2084 		if (qxs != NULL) {
   2085 			TAILQ_INSERT_AFTER(&chan->chan_queue, qxs, xs,
   2086 			    channel_q);
   2087 			goto out;
   2088 		}
   2089 	}
   2090 	TAILQ_INSERT_TAIL(&chan->chan_queue, xs, channel_q);
   2091  out:
   2092 	if (xs->xs_control & XS_CTL_THAW_PERIPH)
   2093 		scsipi_periph_thaw_locked(xs->xs_periph, 1);
   2094 	return 0;
   2095 }
   2096 
   2097 /*
   2098  * scsipi_run_queue:
   2099  *
   2100  *	Start as many xfers as possible running on the channel.
   2101  */
   2102 static void
   2103 scsipi_run_queue(struct scsipi_channel *chan)
   2104 {
   2105 	struct scsipi_xfer *xs;
   2106 	struct scsipi_periph *periph;
   2107 
   2108 	SDT_PROBE1(scsi, base, queue, batch__start,  chan);
   2109 	for (;;) {
   2110 		mutex_enter(chan_mtx(chan));
   2111 
   2112 		/*
   2113 		 * If the channel is frozen, we can't do any work right
   2114 		 * now.
   2115 		 */
   2116 		if (chan->chan_qfreeze != 0) {
   2117 			mutex_exit(chan_mtx(chan));
   2118 			break;
   2119 		}
   2120 
   2121 		/*
   2122 		 * Look for work to do, and make sure we can do it.
   2123 		 */
   2124 		for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL;
   2125 		     xs = TAILQ_NEXT(xs, channel_q)) {
   2126 			periph = xs->xs_periph;
   2127 
   2128 			if ((periph->periph_sent >= periph->periph_openings) ||
   2129 			    periph->periph_qfreeze != 0 ||
   2130 			    (periph->periph_flags & PERIPH_UNTAG) != 0)
   2131 				continue;
   2132 
   2133 			if ((periph->periph_flags &
   2134 			    (PERIPH_RECOVERING | PERIPH_SENSE)) != 0 &&
   2135 			    (xs->xs_control & XS_CTL_URGENT) == 0)
   2136 				continue;
   2137 
   2138 			/*
   2139 			 * We can issue this xfer!
   2140 			 */
   2141 			goto got_one;
   2142 		}
   2143 
   2144 		/*
   2145 		 * Can't find any work to do right now.
   2146 		 */
   2147 		mutex_exit(chan_mtx(chan));
   2148 		break;
   2149 
   2150  got_one:
   2151 		/*
   2152 		 * Have an xfer to run.  Allocate a resource from
   2153 		 * the adapter to run it.  If we can't allocate that
   2154 		 * resource, we don't dequeue the xfer.
   2155 		 */
   2156 		if (scsipi_get_resource(chan) == 0) {
   2157 			/*
   2158 			 * Adapter is out of resources.  If the adapter
   2159 			 * supports it, attempt to grow them.
   2160 			 */
   2161 			if (scsipi_grow_resources(chan) == 0) {
   2162 				/*
   2163 				 * Wasn't able to grow resources,
   2164 				 * nothing more we can do.
   2165 				 */
   2166 				if (xs->xs_control & XS_CTL_POLL) {
   2167 					scsipi_printaddr(xs->xs_periph);
   2168 					printf("polling command but no "
   2169 					    "adapter resources");
   2170 					/* We'll panic shortly... */
   2171 				}
   2172 				mutex_exit(chan_mtx(chan));
   2173 
   2174 				/*
   2175 				 * XXX: We should be able to note that
   2176 				 * XXX: that resources are needed here!
   2177 				 */
   2178 				break;
   2179 			}
   2180 			/*
   2181 			 * scsipi_grow_resources() allocated the resource
   2182 			 * for us.
   2183 			 */
   2184 		}
   2185 
   2186 		/*
   2187 		 * We have a resource to run this xfer, do it!
   2188 		 */
   2189 		TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
   2190 
   2191 		/*
   2192 		 * If the command is to be tagged, allocate a tag ID
   2193 		 * for it.
   2194 		 */
   2195 		if (XS_CTL_TAGTYPE(xs) != 0)
   2196 			scsipi_get_tag(xs);
   2197 		else
   2198 			periph->periph_flags |= PERIPH_UNTAG;
   2199 		periph->periph_sent++;
   2200 		mutex_exit(chan_mtx(chan));
   2201 
   2202 		SDT_PROBE2(scsi, base, queue, run,  chan, xs);
   2203 		scsipi_adapter_request(chan, ADAPTER_REQ_RUN_XFER, xs);
   2204 	}
   2205 	SDT_PROBE1(scsi, base, queue, batch__done,  chan);
   2206 }
   2207 
   2208 /*
   2209  * scsipi_execute_xs:
   2210  *
   2211  *	Begin execution of an xfer, waiting for it to complete, if necessary.
   2212  */
   2213 int
   2214 scsipi_execute_xs(struct scsipi_xfer *xs)
   2215 {
   2216 	struct scsipi_periph *periph = xs->xs_periph;
   2217 	struct scsipi_channel *chan = periph->periph_channel;
   2218 	int oasync, async, poll, error;
   2219 
   2220 	KASSERT(!cold);
   2221 
   2222 	scsipi_update_timeouts(xs);
   2223 
   2224 	(chan->chan_bustype->bustype_cmd)(xs);
   2225 
   2226 	xs->xs_status &= ~XS_STS_DONE;
   2227 	xs->error = XS_NOERROR;
   2228 	xs->resid = xs->datalen;
   2229 	xs->status = SCSI_OK;
   2230 	SDT_PROBE1(scsi, base, xfer, execute,  xs);
   2231 
   2232 #ifdef SCSIPI_DEBUG
   2233 	if (xs->xs_periph->periph_dbflags & SCSIPI_DB3) {
   2234 		printf("scsipi_execute_xs: ");
   2235 		show_scsipi_xs(xs);
   2236 		printf("\n");
   2237 	}
   2238 #endif
   2239 
   2240 	/*
   2241 	 * Deal with command tagging:
   2242 	 *
   2243 	 *	- If the device's current operating mode doesn't
   2244 	 *	  include tagged queueing, clear the tag mask.
   2245 	 *
   2246 	 *	- If the device's current operating mode *does*
   2247 	 *	  include tagged queueing, set the tag_type in
   2248 	 *	  the xfer to the appropriate byte for the tag
   2249 	 *	  message.
   2250 	 */
   2251 	if ((PERIPH_XFER_MODE(periph) & PERIPH_CAP_TQING) == 0 ||
   2252 		(xs->xs_control & XS_CTL_REQSENSE)) {
   2253 		xs->xs_control &= ~XS_CTL_TAGMASK;
   2254 		xs->xs_tag_type = 0;
   2255 	} else {
   2256 		/*
   2257 		 * If the request doesn't specify a tag, give Head
   2258 		 * tags to URGENT operations and Simple tags to
   2259 		 * everything else.
   2260 		 */
   2261 		if (XS_CTL_TAGTYPE(xs) == 0) {
   2262 			if (xs->xs_control & XS_CTL_URGENT)
   2263 				xs->xs_control |= XS_CTL_HEAD_TAG;
   2264 			else
   2265 				xs->xs_control |= XS_CTL_SIMPLE_TAG;
   2266 		}
   2267 
   2268 		switch (XS_CTL_TAGTYPE(xs)) {
   2269 		case XS_CTL_ORDERED_TAG:
   2270 			xs->xs_tag_type = MSG_ORDERED_Q_TAG;
   2271 			break;
   2272 
   2273 		case XS_CTL_SIMPLE_TAG:
   2274 			xs->xs_tag_type = MSG_SIMPLE_Q_TAG;
   2275 			break;
   2276 
   2277 		case XS_CTL_HEAD_TAG:
   2278 			xs->xs_tag_type = MSG_HEAD_OF_Q_TAG;
   2279 			break;
   2280 
   2281 		default:
   2282 			scsipi_printaddr(periph);
   2283 			printf("invalid tag mask 0x%08x\n",
   2284 			    XS_CTL_TAGTYPE(xs));
   2285 			panic("scsipi_execute_xs");
   2286 		}
   2287 	}
   2288 
   2289 	/* If the adapter wants us to poll, poll. */
   2290 	if (chan->chan_adapter->adapt_flags & SCSIPI_ADAPT_POLL_ONLY)
   2291 		xs->xs_control |= XS_CTL_POLL;
   2292 
   2293 	/*
   2294 	 * If we don't yet have a completion thread, or we are to poll for
   2295 	 * completion, clear the ASYNC flag.
   2296 	 */
   2297 	oasync =  (xs->xs_control & XS_CTL_ASYNC);
   2298 	if (chan->chan_thread == NULL || (xs->xs_control & XS_CTL_POLL) != 0)
   2299 		xs->xs_control &= ~XS_CTL_ASYNC;
   2300 
   2301 	async = (xs->xs_control & XS_CTL_ASYNC);
   2302 	poll = (xs->xs_control & XS_CTL_POLL);
   2303 
   2304 #ifdef DIAGNOSTIC
   2305 	if (oasync != 0 && xs->bp == NULL)
   2306 		panic("scsipi_execute_xs: XS_CTL_ASYNC but no buf");
   2307 #endif
   2308 
   2309 	/*
   2310 	 * Enqueue the transfer.  If we're not polling for completion, this
   2311 	 * should ALWAYS return `no error'.
   2312 	 */
   2313 	error = scsipi_enqueue(xs);
   2314 	if (error) {
   2315 		if (poll == 0) {
   2316 			scsipi_printaddr(periph);
   2317 			printf("not polling, but enqueue failed with %d\n",
   2318 			    error);
   2319 			panic("scsipi_execute_xs");
   2320 		}
   2321 
   2322 		scsipi_printaddr(periph);
   2323 		printf("should have flushed queue?\n");
   2324 		goto free_xs;
   2325 	}
   2326 
   2327 	mutex_exit(chan_mtx(chan));
   2328  restarted:
   2329 	scsipi_run_queue(chan);
   2330 	mutex_enter(chan_mtx(chan));
   2331 
   2332 	/*
   2333 	 * The xfer is enqueued, and possibly running.  If it's to be
   2334 	 * completed asynchronously, just return now.
   2335 	 */
   2336 	if (async)
   2337 		return 0;
   2338 
   2339 	/*
   2340 	 * Not an asynchronous command; wait for it to complete.
   2341 	 */
   2342 	while ((xs->xs_status & XS_STS_DONE) == 0) {
   2343 		if (poll) {
   2344 			scsipi_printaddr(periph);
   2345 			printf("polling command not done\n");
   2346 			panic("scsipi_execute_xs");
   2347 		}
   2348 		cv_wait(xs_cv(xs), chan_mtx(chan));
   2349 	}
   2350 
   2351 	/*
   2352 	 * Command is complete.  scsipi_done() has awakened us to perform
   2353 	 * the error handling.
   2354 	 */
   2355 	mutex_exit(chan_mtx(chan));
   2356 	error = scsipi_complete(xs);
   2357 	if (error == ERESTART)
   2358 		goto restarted;
   2359 
   2360 	/*
   2361 	 * If it was meant to run async and we cleared async ourselves,
   2362 	 * don't return an error here. It has already been handled
   2363 	 */
   2364 	if (oasync)
   2365 		error = 0;
   2366 	/*
   2367 	 * Command completed successfully or fatal error occurred.  Fall
   2368 	 * into....
   2369 	 */
   2370 	mutex_enter(chan_mtx(chan));
   2371  free_xs:
   2372 	scsipi_put_xs(xs);
   2373 	mutex_exit(chan_mtx(chan));
   2374 
   2375 	/*
   2376 	 * Kick the queue, keep it running in case it stopped for some
   2377 	 * reason.
   2378 	 */
   2379 	scsipi_run_queue(chan);
   2380 
   2381 	mutex_enter(chan_mtx(chan));
   2382 	return error;
   2383 }
   2384 
   2385 /*
   2386  * scsipi_completion_thread:
   2387  *
   2388  *	This is the completion thread.  We wait for errors on
   2389  *	asynchronous xfers, and perform the error handling
   2390  *	function, restarting the command, if necessary.
   2391  */
   2392 static void
   2393 scsipi_completion_thread(void *arg)
   2394 {
   2395 	struct scsipi_channel *chan = arg;
   2396 	struct scsipi_xfer *xs;
   2397 
   2398 	if (chan->chan_init_cb)
   2399 		(*chan->chan_init_cb)(chan, chan->chan_init_cb_arg);
   2400 
   2401 	mutex_enter(chan_mtx(chan));
   2402 	chan->chan_flags |= SCSIPI_CHAN_TACTIVE;
   2403 	for (;;) {
   2404 		xs = TAILQ_FIRST(&chan->chan_complete);
   2405 		if (xs == NULL && chan->chan_tflags == 0) {
   2406 			/* nothing to do; wait */
   2407 			cv_wait(chan_cv_complete(chan), chan_mtx(chan));
   2408 			continue;
   2409 		}
   2410 		if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) {
   2411 			/* call chan_callback from thread context */
   2412 			chan->chan_tflags &= ~SCSIPI_CHANT_CALLBACK;
   2413 			chan->chan_callback(chan, chan->chan_callback_arg);
   2414 			continue;
   2415 		}
   2416 		if (chan->chan_tflags & SCSIPI_CHANT_GROWRES) {
   2417 			/* attempt to get more openings for this channel */
   2418 			chan->chan_tflags &= ~SCSIPI_CHANT_GROWRES;
   2419 			mutex_exit(chan_mtx(chan));
   2420 			scsipi_adapter_request(chan,
   2421 			    ADAPTER_REQ_GROW_RESOURCES, NULL);
   2422 			scsipi_channel_thaw(chan, 1);
   2423 			if (chan->chan_tflags & SCSIPI_CHANT_GROWRES)
   2424 				kpause("scsizzz", FALSE, hz/10, NULL);
   2425 			mutex_enter(chan_mtx(chan));
   2426 			continue;
   2427 		}
   2428 		if (chan->chan_tflags & SCSIPI_CHANT_KICK) {
   2429 			/* explicitly run the queues for this channel */
   2430 			chan->chan_tflags &= ~SCSIPI_CHANT_KICK;
   2431 			mutex_exit(chan_mtx(chan));
   2432 			scsipi_run_queue(chan);
   2433 			mutex_enter(chan_mtx(chan));
   2434 			continue;
   2435 		}
   2436 		if (chan->chan_tflags & SCSIPI_CHANT_SHUTDOWN) {
   2437 			break;
   2438 		}
   2439 		if (xs) {
   2440 			TAILQ_REMOVE(&chan->chan_complete, xs, channel_q);
   2441 			mutex_exit(chan_mtx(chan));
   2442 
   2443 			/*
   2444 			 * Have an xfer with an error; process it.
   2445 			 */
   2446 			(void) scsipi_complete(xs);
   2447 
   2448 			/*
   2449 			 * Kick the queue; keep it running if it was stopped
   2450 			 * for some reason.
   2451 			 */
   2452 			scsipi_run_queue(chan);
   2453 			mutex_enter(chan_mtx(chan));
   2454 		}
   2455 	}
   2456 
   2457 	chan->chan_thread = NULL;
   2458 
   2459 	/* In case parent is waiting for us to exit. */
   2460 	cv_broadcast(chan_cv_thread(chan));
   2461 	mutex_exit(chan_mtx(chan));
   2462 
   2463 	kthread_exit(0);
   2464 }
   2465 /*
   2466  * scsipi_thread_call_callback:
   2467  *
   2468  * 	request to call a callback from the completion thread
   2469  */
   2470 int
   2471 scsipi_thread_call_callback(struct scsipi_channel *chan,
   2472     void (*callback)(struct scsipi_channel *, void *), void *arg)
   2473 {
   2474 
   2475 	mutex_enter(chan_mtx(chan));
   2476 	if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
   2477 		/* kernel thread doesn't exist yet */
   2478 		mutex_exit(chan_mtx(chan));
   2479 		return ESRCH;
   2480 	}
   2481 	if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) {
   2482 		mutex_exit(chan_mtx(chan));
   2483 		return EBUSY;
   2484 	}
   2485 	scsipi_channel_freeze(chan, 1);
   2486 	chan->chan_callback = callback;
   2487 	chan->chan_callback_arg = arg;
   2488 	chan->chan_tflags |= SCSIPI_CHANT_CALLBACK;
   2489 	cv_broadcast(chan_cv_complete(chan));
   2490 	mutex_exit(chan_mtx(chan));
   2491 	return 0;
   2492 }
   2493 
   2494 /*
   2495  * scsipi_async_event:
   2496  *
   2497  *	Handle an asynchronous event from an adapter.
   2498  */
   2499 void
   2500 scsipi_async_event(struct scsipi_channel *chan, scsipi_async_event_t event,
   2501     void *arg)
   2502 {
   2503 	bool lock = chan_running(chan) > 0;
   2504 
   2505 	if (lock)
   2506 		mutex_enter(chan_mtx(chan));
   2507 	switch (event) {
   2508 	case ASYNC_EVENT_MAX_OPENINGS:
   2509 		scsipi_async_event_max_openings(chan,
   2510 		    (struct scsipi_max_openings *)arg);
   2511 		break;
   2512 
   2513 	case ASYNC_EVENT_XFER_MODE:
   2514 		if (chan->chan_bustype->bustype_async_event_xfer_mode) {
   2515 			chan->chan_bustype->bustype_async_event_xfer_mode(
   2516 			    chan, arg);
   2517 		}
   2518 		break;
   2519 	case ASYNC_EVENT_RESET:
   2520 		scsipi_async_event_channel_reset(chan);
   2521 		break;
   2522 	}
   2523 	if (lock)
   2524 		mutex_exit(chan_mtx(chan));
   2525 }
   2526 
   2527 /*
   2528  * scsipi_async_event_max_openings:
   2529  *
   2530  *	Update the maximum number of outstanding commands a
   2531  *	device may have.
   2532  */
   2533 static void
   2534 scsipi_async_event_max_openings(struct scsipi_channel *chan,
   2535     struct scsipi_max_openings *mo)
   2536 {
   2537 	struct scsipi_periph *periph;
   2538 	int minlun, maxlun;
   2539 
   2540 	if (mo->mo_lun == -1) {
   2541 		/*
   2542 		 * Wildcarded; apply it to all LUNs.
   2543 		 */
   2544 		minlun = 0;
   2545 		maxlun = chan->chan_nluns - 1;
   2546 	} else
   2547 		minlun = maxlun = mo->mo_lun;
   2548 
   2549 	/* XXX This could really suck with a large LUN space. */
   2550 	for (; minlun <= maxlun; minlun++) {
   2551 		periph = scsipi_lookup_periph_locked(chan, mo->mo_target, minlun);
   2552 		if (periph == NULL)
   2553 			continue;
   2554 
   2555 		if (mo->mo_openings < periph->periph_openings)
   2556 			periph->periph_openings = mo->mo_openings;
   2557 		else if (mo->mo_openings > periph->periph_openings &&
   2558 		    (periph->periph_flags & PERIPH_GROW_OPENINGS) != 0)
   2559 			periph->periph_openings = mo->mo_openings;
   2560 	}
   2561 }
   2562 
   2563 /*
   2564  * scsipi_set_xfer_mode:
   2565  *
   2566  *	Set the xfer mode for the specified I_T Nexus.
   2567  */
   2568 void
   2569 scsipi_set_xfer_mode(struct scsipi_channel *chan, int target, int immed)
   2570 {
   2571 	struct scsipi_xfer_mode xm;
   2572 	struct scsipi_periph *itperiph;
   2573 	int lun;
   2574 
   2575 	/*
   2576 	 * Go to the minimal xfer mode.
   2577 	 */
   2578 	xm.xm_target = target;
   2579 	xm.xm_mode = 0;
   2580 	xm.xm_period = 0;			/* ignored */
   2581 	xm.xm_offset = 0;			/* ignored */
   2582 
   2583 	/*
   2584 	 * Find the first LUN we know about on this I_T Nexus.
   2585 	 */
   2586 	for (itperiph = NULL, lun = 0; lun < chan->chan_nluns; lun++) {
   2587 		itperiph = scsipi_lookup_periph(chan, target, lun);
   2588 		if (itperiph != NULL)
   2589 			break;
   2590 	}
   2591 	if (itperiph != NULL) {
   2592 		xm.xm_mode = itperiph->periph_cap;
   2593 		/*
   2594 		 * Now issue the request to the adapter.
   2595 		 */
   2596 		scsipi_adapter_request(chan, ADAPTER_REQ_SET_XFER_MODE, &xm);
   2597 		/*
   2598 		 * If we want this to happen immediately, issue a dummy
   2599 		 * command, since most adapters can't really negotiate unless
   2600 		 * they're executing a job.
   2601 		 */
   2602 		if (immed != 0) {
   2603 			(void) scsipi_test_unit_ready(itperiph,
   2604 			    XS_CTL_DISCOVERY | XS_CTL_IGNORE_ILLEGAL_REQUEST |
   2605 			    XS_CTL_IGNORE_NOT_READY |
   2606 			    XS_CTL_IGNORE_MEDIA_CHANGE);
   2607 		}
   2608 	}
   2609 }
   2610 
   2611 /*
   2612  * scsipi_channel_reset:
   2613  *
   2614  *	handle scsi bus reset
   2615  * called with channel lock held
   2616  */
   2617 static void
   2618 scsipi_async_event_channel_reset(struct scsipi_channel *chan)
   2619 {
   2620 	struct scsipi_xfer *xs, *xs_next;
   2621 	struct scsipi_periph *periph;
   2622 	int target, lun;
   2623 
   2624 	/*
   2625 	 * Channel has been reset. Also mark as reset pending REQUEST_SENSE
   2626 	 * commands; as the sense is not available any more.
   2627 	 * can't call scsipi_done() from here, as the command has not been
   2628 	 * sent to the adapter yet (this would corrupt accounting).
   2629 	 */
   2630 
   2631 	for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL; xs = xs_next) {
   2632 		xs_next = TAILQ_NEXT(xs, channel_q);
   2633 		if (xs->xs_control & XS_CTL_REQSENSE) {
   2634 			TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
   2635 			xs->error = XS_RESET;
   2636 			if ((xs->xs_control & XS_CTL_ASYNC) != 0)
   2637 				TAILQ_INSERT_TAIL(&chan->chan_complete, xs,
   2638 				    channel_q);
   2639 		}
   2640 	}
   2641 	cv_broadcast(chan_cv_complete(chan));
   2642 	/* Catch xs with pending sense which may not have a REQSENSE xs yet */
   2643 	for (target = 0; target < chan->chan_ntargets; target++) {
   2644 		if (target == chan->chan_id)
   2645 			continue;
   2646 		for (lun = 0; lun <  chan->chan_nluns; lun++) {
   2647 			periph = scsipi_lookup_periph_locked(chan, target, lun);
   2648 			if (periph) {
   2649 				xs = periph->periph_xscheck;
   2650 				if (xs)
   2651 					xs->error = XS_RESET;
   2652 			}
   2653 		}
   2654 	}
   2655 }
   2656 
   2657 /*
   2658  * scsipi_target_detach:
   2659  *
   2660  *	detach all periph associated with a I_T
   2661  * 	must be called from valid thread context
   2662  */
   2663 int
   2664 scsipi_target_detach(struct scsipi_channel *chan, int target, int lun,
   2665     int flags)
   2666 {
   2667 	struct scsipi_periph *periph;
   2668 	device_t tdev;
   2669 	int ctarget, mintarget, maxtarget;
   2670 	int clun, minlun, maxlun;
   2671 	int error = 0;
   2672 
   2673 	if (target == -1) {
   2674 		mintarget = 0;
   2675 		maxtarget = chan->chan_ntargets;
   2676 	} else {
   2677 		if (target == chan->chan_id)
   2678 			return EINVAL;
   2679 		if (target < 0 || target >= chan->chan_ntargets)
   2680 			return EINVAL;
   2681 		mintarget = target;
   2682 		maxtarget = target + 1;
   2683 	}
   2684 
   2685 	if (lun == -1) {
   2686 		minlun = 0;
   2687 		maxlun = chan->chan_nluns;
   2688 	} else {
   2689 		if (lun < 0 || lun >= chan->chan_nluns)
   2690 			return EINVAL;
   2691 		minlun = lun;
   2692 		maxlun = lun + 1;
   2693 	}
   2694 
   2695 	/* for config_detach */
   2696 	KERNEL_LOCK(1, curlwp);
   2697 
   2698 	mutex_enter(chan_mtx(chan));
   2699 	for (ctarget = mintarget; ctarget < maxtarget; ctarget++) {
   2700 		if (ctarget == chan->chan_id)
   2701 			continue;
   2702 
   2703 		for (clun = minlun; clun < maxlun; clun++) {
   2704 			periph = scsipi_lookup_periph_locked(chan, ctarget, clun);
   2705 			if (periph == NULL)
   2706 				continue;
   2707 			tdev = periph->periph_dev;
   2708 			mutex_exit(chan_mtx(chan));
   2709 			error = config_detach(tdev, flags);
   2710 			if (error)
   2711 				goto out;
   2712 			mutex_enter(chan_mtx(chan));
   2713 			KASSERT(scsipi_lookup_periph_locked(chan, ctarget, clun) == NULL);
   2714 		}
   2715 	}
   2716 	mutex_exit(chan_mtx(chan));
   2717 
   2718 out:
   2719 	KERNEL_UNLOCK_ONE(curlwp);
   2720 
   2721 	return error;
   2722 }
   2723 
   2724 /*
   2725  * scsipi_adapter_addref:
   2726  *
   2727  *	Add a reference to the adapter pointed to by the provided
   2728  *	link, enabling the adapter if necessary.
   2729  */
   2730 int
   2731 scsipi_adapter_addref(struct scsipi_adapter *adapt)
   2732 {
   2733 	int error = 0;
   2734 
   2735 	if (atomic_inc_uint_nv(&adapt->adapt_refcnt) == 1
   2736 	    && adapt->adapt_enable != NULL) {
   2737 		scsipi_adapter_lock(adapt);
   2738 		error = scsipi_adapter_enable(adapt, 1);
   2739 		scsipi_adapter_unlock(adapt);
   2740 		if (error)
   2741 			atomic_dec_uint(&adapt->adapt_refcnt);
   2742 	}
   2743 	return error;
   2744 }
   2745 
   2746 /*
   2747  * scsipi_adapter_delref:
   2748  *
   2749  *	Delete a reference to the adapter pointed to by the provided
   2750  *	link, disabling the adapter if possible.
   2751  */
   2752 void
   2753 scsipi_adapter_delref(struct scsipi_adapter *adapt)
   2754 {
   2755 
   2756 	membar_release();
   2757 	if (atomic_dec_uint_nv(&adapt->adapt_refcnt) == 0
   2758 	    && adapt->adapt_enable != NULL) {
   2759 		membar_acquire();
   2760 		scsipi_adapter_lock(adapt);
   2761 		(void) scsipi_adapter_enable(adapt, 0);
   2762 		scsipi_adapter_unlock(adapt);
   2763 	}
   2764 }
   2765 
   2766 static struct scsipi_syncparam {
   2767 	int	ss_factor;
   2768 	int	ss_period;	/* ns * 100 */
   2769 } scsipi_syncparams[] = {
   2770 	{ 0x08,		 625 },	/* FAST-160 (Ultra320) */
   2771 	{ 0x09,		1250 },	/* FAST-80 (Ultra160) */
   2772 	{ 0x0a,		2500 },	/* FAST-40 40MHz (Ultra2) */
   2773 	{ 0x0b,		3030 },	/* FAST-40 33MHz (Ultra2) */
   2774 	{ 0x0c,		5000 },	/* FAST-20 (Ultra) */
   2775 };
   2776 static const int scsipi_nsyncparams =
   2777     sizeof(scsipi_syncparams) / sizeof(scsipi_syncparams[0]);
   2778 
   2779 int
   2780 scsipi_sync_period_to_factor(int period /* ns * 100 */)
   2781 {
   2782 	int i;
   2783 
   2784 	for (i = 0; i < scsipi_nsyncparams; i++) {
   2785 		if (period <= scsipi_syncparams[i].ss_period)
   2786 			return scsipi_syncparams[i].ss_factor;
   2787 	}
   2788 
   2789 	return (period / 100) / 4;
   2790 }
   2791 
   2792 int
   2793 scsipi_sync_factor_to_period(int factor)
   2794 {
   2795 	int i;
   2796 
   2797 	for (i = 0; i < scsipi_nsyncparams; i++) {
   2798 		if (factor == scsipi_syncparams[i].ss_factor)
   2799 			return scsipi_syncparams[i].ss_period;
   2800 	}
   2801 
   2802 	return (factor * 4) * 100;
   2803 }
   2804 
   2805 int
   2806 scsipi_sync_factor_to_freq(int factor)
   2807 {
   2808 	int i;
   2809 
   2810 	for (i = 0; i < scsipi_nsyncparams; i++) {
   2811 		if (factor == scsipi_syncparams[i].ss_factor)
   2812 			return 100000000 / scsipi_syncparams[i].ss_period;
   2813 	}
   2814 
   2815 	return 10000000 / ((factor * 4) * 10);
   2816 }
   2817 
   2818 static inline void
   2819 scsipi_adapter_lock(struct scsipi_adapter *adapt)
   2820 {
   2821 
   2822 	if ((adapt->adapt_flags & SCSIPI_ADAPT_MPSAFE) == 0)
   2823 		KERNEL_LOCK(1, NULL);
   2824 }
   2825 
   2826 static inline void
   2827 scsipi_adapter_unlock(struct scsipi_adapter *adapt)
   2828 {
   2829 
   2830 	if ((adapt->adapt_flags & SCSIPI_ADAPT_MPSAFE) == 0)
   2831 		KERNEL_UNLOCK_ONE(NULL);
   2832 }
   2833 
   2834 void
   2835 scsipi_adapter_minphys(struct scsipi_channel *chan, struct buf *bp)
   2836 {
   2837 	struct scsipi_adapter *adapt = chan->chan_adapter;
   2838 
   2839 	scsipi_adapter_lock(adapt);
   2840 	(adapt->adapt_minphys)(bp);
   2841 	scsipi_adapter_unlock(chan->chan_adapter);
   2842 }
   2843 
   2844 void
   2845 scsipi_adapter_request(struct scsipi_channel *chan,
   2846 	scsipi_adapter_req_t req, void *arg)
   2847 
   2848 {
   2849 	struct scsipi_adapter *adapt = chan->chan_adapter;
   2850 
   2851 	scsipi_adapter_lock(adapt);
   2852 	SDT_PROBE3(scsi, base, adapter, request__start,  chan, req, arg);
   2853 	(adapt->adapt_request)(chan, req, arg);
   2854 	SDT_PROBE3(scsi, base, adapter, request__done,  chan, req, arg);
   2855 	scsipi_adapter_unlock(adapt);
   2856 }
   2857 
   2858 int
   2859 scsipi_adapter_ioctl(struct scsipi_channel *chan, u_long cmd,
   2860 	void *data, int flag, struct proc *p)
   2861 {
   2862 	struct scsipi_adapter *adapt = chan->chan_adapter;
   2863 	int error;
   2864 
   2865 	if (adapt->adapt_ioctl == NULL)
   2866 		return ENOTTY;
   2867 
   2868 	scsipi_adapter_lock(adapt);
   2869 	error = (adapt->adapt_ioctl)(chan, cmd, data, flag, p);
   2870 	scsipi_adapter_unlock(adapt);
   2871 	return error;
   2872 }
   2873 
   2874 int
   2875 scsipi_adapter_enable(struct scsipi_adapter *adapt, int enable)
   2876 {
   2877 	int error;
   2878 
   2879 	scsipi_adapter_lock(adapt);
   2880 	error = (adapt->adapt_enable)(adapt->adapt_dev, enable);
   2881 	scsipi_adapter_unlock(adapt);
   2882 	return error;
   2883 }
   2884 
   2885 #ifdef SCSIPI_DEBUG
   2886 /*
   2887  * Given a scsipi_xfer, dump the request, in all its glory
   2888  */
   2889 void
   2890 show_scsipi_xs(struct scsipi_xfer *xs)
   2891 {
   2892 
   2893 	printf("xs(%p): ", xs);
   2894 	printf("xs_control(0x%08x)", xs->xs_control);
   2895 	printf("xs_status(0x%08x)", xs->xs_status);
   2896 	printf("periph(%p)", xs->xs_periph);
   2897 	printf("retr(0x%x)", xs->xs_retries);
   2898 	printf("timo(0x%x)", xs->timeout);
   2899 	printf("cmd(%p)", xs->cmd);
   2900 	printf("len(0x%x)", xs->cmdlen);
   2901 	printf("data(%p)", xs->data);
   2902 	printf("len(0x%x)", xs->datalen);
   2903 	printf("res(0x%x)", xs->resid);
   2904 	printf("err(0x%x)", xs->error);
   2905 	printf("bp(%p)", xs->bp);
   2906 	show_scsipi_cmd(xs);
   2907 }
   2908 
   2909 void
   2910 show_scsipi_cmd(struct scsipi_xfer *xs)
   2911 {
   2912 	u_char *b = (u_char *) xs->cmd;
   2913 	int i = 0;
   2914 
   2915 	scsipi_printaddr(xs->xs_periph);
   2916 	printf(" command: ");
   2917 
   2918 	if ((xs->xs_control & XS_CTL_RESET) == 0) {
   2919 		while (i < xs->cmdlen) {
   2920 			if (i)
   2921 				printf(",");
   2922 			printf("0x%x", b[i++]);
   2923 		}
   2924 		printf("-[%d bytes]\n", xs->datalen);
   2925 		if (xs->datalen)
   2926 			show_mem(xs->data, uimin(64, xs->datalen));
   2927 	} else
   2928 		printf("-RESET-\n");
   2929 }
   2930 
   2931 void
   2932 show_mem(u_char *address, int num)
   2933 {
   2934 	int x;
   2935 
   2936 	printf("------------------------------");
   2937 	for (x = 0; x < num; x++) {
   2938 		if ((x % 16) == 0)
   2939 			printf("\n%03d: ", x);
   2940 		printf("%02x ", *address++);
   2941 	}
   2942 	printf("\n------------------------------\n");
   2943 }
   2944 #endif /* SCSIPI_DEBUG */
   2945