Home | History | Annotate | Line # | Download | only in scsipi
scsipi_base.c revision 1.178.6.2
      1 /*	$NetBSD: scsipi_base.c,v 1.178.6.2 2020/04/08 14:08:12 martin Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1998, 1999, 2000, 2002, 2003, 2004 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Charles M. Hannum; by Jason R. Thorpe of the Numerical Aerospace
      9  * Simulation Facility, NASA Ames Research Center.
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  *
     20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30  * POSSIBILITY OF SUCH DAMAGE.
     31  */
     32 
     33 #include <sys/cdefs.h>
     34 __KERNEL_RCSID(0, "$NetBSD: scsipi_base.c,v 1.178.6.2 2020/04/08 14:08:12 martin Exp $");
     35 
     36 #ifdef _KERNEL_OPT
     37 #include "opt_scsi.h"
     38 #endif
     39 
     40 #include <sys/param.h>
     41 #include <sys/systm.h>
     42 #include <sys/kernel.h>
     43 #include <sys/buf.h>
     44 #include <sys/uio.h>
     45 #include <sys/malloc.h>
     46 #include <sys/pool.h>
     47 #include <sys/errno.h>
     48 #include <sys/device.h>
     49 #include <sys/proc.h>
     50 #include <sys/kthread.h>
     51 #include <sys/hash.h>
     52 #include <sys/atomic.h>
     53 
     54 #include <dev/scsipi/scsi_sdt.h>
     55 #include <dev/scsipi/scsi_spc.h>
     56 #include <dev/scsipi/scsipi_all.h>
     57 #include <dev/scsipi/scsipi_disk.h>
     58 #include <dev/scsipi/scsipiconf.h>
     59 #include <dev/scsipi/scsipi_base.h>
     60 
     61 #include <dev/scsipi/scsi_all.h>
     62 #include <dev/scsipi/scsi_message.h>
     63 
     64 #include <machine/param.h>
     65 
     66 SDT_PROVIDER_DEFINE(scsi);
     67 
     68 SDT_PROBE_DEFINE3(scsi, base, tag, get,
     69     "struct scsipi_xfer *"/*xs*/, "uint8_t"/*tag*/, "uint8_t"/*type*/);
     70 SDT_PROBE_DEFINE3(scsi, base, tag, put,
     71     "struct scsipi_xfer *"/*xs*/, "uint8_t"/*tag*/, "uint8_t"/*type*/);
     72 
     73 SDT_PROBE_DEFINE3(scsi, base, adapter, request__start,
     74     "struct scsipi_channel *"/*chan*/,
     75     "scsipi_adapter_req_t"/*req*/,
     76     "void *"/*arg*/);
     77 SDT_PROBE_DEFINE3(scsi, base, adapter, request__done,
     78     "struct scsipi_channel *"/*chan*/,
     79     "scsipi_adapter_req_t"/*req*/,
     80     "void *"/*arg*/);
     81 
     82 SDT_PROBE_DEFINE1(scsi, base, queue, batch__start,
     83     "struct scsipi_channel *"/*chan*/);
     84 SDT_PROBE_DEFINE2(scsi, base, queue, run,
     85     "struct scsipi_channel *"/*chan*/,
     86     "struct scsipi_xfer *"/*xs*/);
     87 SDT_PROBE_DEFINE1(scsi, base, queue, batch__done,
     88     "struct scsipi_channel *"/*chan*/);
     89 
     90 SDT_PROBE_DEFINE1(scsi, base, xfer, execute,  "struct scsipi_xfer *"/*xs*/);
     91 SDT_PROBE_DEFINE1(scsi, base, xfer, enqueue,  "struct scsipi_xfer *"/*xs*/);
     92 SDT_PROBE_DEFINE1(scsi, base, xfer, done,  "struct scsipi_xfer *"/*xs*/);
     93 SDT_PROBE_DEFINE1(scsi, base, xfer, redone,  "struct scsipi_xfer *"/*xs*/);
     94 SDT_PROBE_DEFINE1(scsi, base, xfer, complete,  "struct scsipi_xfer *"/*xs*/);
     95 SDT_PROBE_DEFINE1(scsi, base, xfer, restart,  "struct scsipi_xfer *"/*xs*/);
     96 SDT_PROBE_DEFINE1(scsi, base, xfer, free,  "struct scsipi_xfer *"/*xs*/);
     97 
     98 static int	scsipi_complete(struct scsipi_xfer *);
     99 static void	scsipi_request_sense(struct scsipi_xfer *);
    100 static int	scsipi_enqueue(struct scsipi_xfer *);
    101 static void	scsipi_run_queue(struct scsipi_channel *chan);
    102 
    103 static void	scsipi_completion_thread(void *);
    104 
    105 static void	scsipi_get_tag(struct scsipi_xfer *);
    106 static void	scsipi_put_tag(struct scsipi_xfer *);
    107 
    108 static int	scsipi_get_resource(struct scsipi_channel *);
    109 static void	scsipi_put_resource(struct scsipi_channel *);
    110 
    111 static void	scsipi_async_event_max_openings(struct scsipi_channel *,
    112 		    struct scsipi_max_openings *);
    113 static void	scsipi_async_event_channel_reset(struct scsipi_channel *);
    114 
    115 static void	scsipi_channel_freeze_locked(struct scsipi_channel *, int);
    116 
    117 static void	scsipi_adapter_lock(struct scsipi_adapter *adapt);
    118 static void	scsipi_adapter_unlock(struct scsipi_adapter *adapt);
    119 
    120 static void	scsipi_update_timeouts(struct scsipi_xfer *xs);
    121 
    122 static struct pool scsipi_xfer_pool;
    123 
    124 int scsipi_xs_count = 0;
    125 
    126 /*
    127  * scsipi_init:
    128  *
    129  *	Called when a scsibus or atapibus is attached to the system
    130  *	to initialize shared data structures.
    131  */
    132 void
    133 scsipi_init(void)
    134 {
    135 	static int scsipi_init_done;
    136 
    137 	if (scsipi_init_done)
    138 		return;
    139 	scsipi_init_done = 1;
    140 
    141 	/* Initialize the scsipi_xfer pool. */
    142 	pool_init(&scsipi_xfer_pool, sizeof(struct scsipi_xfer), 0,
    143 	    0, 0, "scxspl", NULL, IPL_BIO);
    144 	if (pool_prime(&scsipi_xfer_pool,
    145 	    PAGE_SIZE / sizeof(struct scsipi_xfer)) == ENOMEM) {
    146 		printf("WARNING: not enough memory for scsipi_xfer_pool\n");
    147 	}
    148 
    149 	scsipi_ioctl_init();
    150 }
    151 
    152 /*
    153  * scsipi_channel_init:
    154  *
    155  *	Initialize a scsipi_channel when it is attached.
    156  */
    157 int
    158 scsipi_channel_init(struct scsipi_channel *chan)
    159 {
    160 	struct scsipi_adapter *adapt = chan->chan_adapter;
    161 	int i;
    162 
    163 	/* Initialize shared data. */
    164 	scsipi_init();
    165 
    166 	/* Initialize the queues. */
    167 	TAILQ_INIT(&chan->chan_queue);
    168 	TAILQ_INIT(&chan->chan_complete);
    169 
    170 	for (i = 0; i < SCSIPI_CHAN_PERIPH_BUCKETS; i++)
    171 		LIST_INIT(&chan->chan_periphtab[i]);
    172 
    173 	/*
    174 	 * Create the asynchronous completion thread.
    175 	 */
    176 	if (kthread_create(PRI_NONE, 0, NULL, scsipi_completion_thread, chan,
    177 	    &chan->chan_thread, "%s", chan->chan_name)) {
    178 		aprint_error_dev(adapt->adapt_dev, "unable to create completion thread for "
    179 		    "channel %d\n", chan->chan_channel);
    180 		panic("scsipi_channel_init");
    181 	}
    182 
    183 	return 0;
    184 }
    185 
    186 /*
    187  * scsipi_channel_shutdown:
    188  *
    189  *	Shutdown a scsipi_channel.
    190  */
    191 void
    192 scsipi_channel_shutdown(struct scsipi_channel *chan)
    193 {
    194 
    195 	mutex_enter(chan_mtx(chan));
    196 	/*
    197 	 * Shut down the completion thread.
    198 	 */
    199 	chan->chan_tflags |= SCSIPI_CHANT_SHUTDOWN;
    200 	cv_broadcast(chan_cv_complete(chan));
    201 
    202 	/*
    203 	 * Now wait for the thread to exit.
    204 	 */
    205 	while (chan->chan_thread != NULL)
    206 		cv_wait(chan_cv_thread(chan), chan_mtx(chan));
    207 	mutex_exit(chan_mtx(chan));
    208 }
    209 
    210 static uint32_t
    211 scsipi_chan_periph_hash(uint64_t t, uint64_t l)
    212 {
    213 	uint32_t hash;
    214 
    215 	hash = hash32_buf(&t, sizeof(t), HASH32_BUF_INIT);
    216 	hash = hash32_buf(&l, sizeof(l), hash);
    217 
    218 	return hash & SCSIPI_CHAN_PERIPH_HASHMASK;
    219 }
    220 
    221 /*
    222  * scsipi_insert_periph:
    223  *
    224  *	Insert a periph into the channel.
    225  */
    226 void
    227 scsipi_insert_periph(struct scsipi_channel *chan, struct scsipi_periph *periph)
    228 {
    229 	uint32_t hash;
    230 
    231 	hash = scsipi_chan_periph_hash(periph->periph_target,
    232 	    periph->periph_lun);
    233 
    234 	mutex_enter(chan_mtx(chan));
    235 	LIST_INSERT_HEAD(&chan->chan_periphtab[hash], periph, periph_hash);
    236 	mutex_exit(chan_mtx(chan));
    237 }
    238 
    239 /*
    240  * scsipi_remove_periph:
    241  *
    242  *	Remove a periph from the channel.
    243  */
    244 void
    245 scsipi_remove_periph(struct scsipi_channel *chan,
    246     struct scsipi_periph *periph)
    247 {
    248 
    249 	LIST_REMOVE(periph, periph_hash);
    250 }
    251 
    252 /*
    253  * scsipi_lookup_periph:
    254  *
    255  *	Lookup a periph on the specified channel.
    256  */
    257 static struct scsipi_periph *
    258 scsipi_lookup_periph_internal(struct scsipi_channel *chan, int target, int lun, bool lock)
    259 {
    260 	struct scsipi_periph *periph;
    261 	uint32_t hash;
    262 
    263 	if (target >= chan->chan_ntargets ||
    264 	    lun >= chan->chan_nluns)
    265 		return NULL;
    266 
    267 	hash = scsipi_chan_periph_hash(target, lun);
    268 
    269 	if (lock)
    270 		mutex_enter(chan_mtx(chan));
    271 	LIST_FOREACH(periph, &chan->chan_periphtab[hash], periph_hash) {
    272 		if (periph->periph_target == target &&
    273 		    periph->periph_lun == lun)
    274 			break;
    275 	}
    276 	if (lock)
    277 		mutex_exit(chan_mtx(chan));
    278 
    279 	return periph;
    280 }
    281 
    282 struct scsipi_periph *
    283 scsipi_lookup_periph_locked(struct scsipi_channel *chan, int target, int lun)
    284 {
    285 	return scsipi_lookup_periph_internal(chan, target, lun, false);
    286 }
    287 
    288 struct scsipi_periph *
    289 scsipi_lookup_periph(struct scsipi_channel *chan, int target, int lun)
    290 {
    291 	return scsipi_lookup_periph_internal(chan, target, lun, true);
    292 }
    293 
    294 /*
    295  * scsipi_get_resource:
    296  *
    297  *	Allocate a single xfer `resource' from the channel.
    298  *
    299  *	NOTE: Must be called with channel lock held
    300  */
    301 static int
    302 scsipi_get_resource(struct scsipi_channel *chan)
    303 {
    304 	struct scsipi_adapter *adapt = chan->chan_adapter;
    305 
    306 	if (chan->chan_flags & SCSIPI_CHAN_OPENINGS) {
    307 		if (chan->chan_openings > 0) {
    308 			chan->chan_openings--;
    309 			return 1;
    310 		}
    311 		return 0;
    312 	}
    313 
    314 	if (adapt->adapt_openings > 0) {
    315 		adapt->adapt_openings--;
    316 		return 1;
    317 	}
    318 	return 0;
    319 }
    320 
    321 /*
    322  * scsipi_grow_resources:
    323  *
    324  *	Attempt to grow resources for a channel.  If this succeeds,
    325  *	we allocate one for our caller.
    326  *
    327  *	NOTE: Must be called with channel lock held
    328  */
    329 static inline int
    330 scsipi_grow_resources(struct scsipi_channel *chan)
    331 {
    332 
    333 	if (chan->chan_flags & SCSIPI_CHAN_CANGROW) {
    334 		if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
    335 			mutex_exit(chan_mtx(chan));
    336 			scsipi_adapter_request(chan,
    337 			    ADAPTER_REQ_GROW_RESOURCES, NULL);
    338 			mutex_enter(chan_mtx(chan));
    339 			return scsipi_get_resource(chan);
    340 		}
    341 		/*
    342 		 * ask the channel thread to do it. It'll have to thaw the
    343 		 * queue
    344 		 */
    345 		scsipi_channel_freeze_locked(chan, 1);
    346 		chan->chan_tflags |= SCSIPI_CHANT_GROWRES;
    347 		cv_broadcast(chan_cv_complete(chan));
    348 		return 0;
    349 	}
    350 
    351 	return 0;
    352 }
    353 
    354 /*
    355  * scsipi_put_resource:
    356  *
    357  *	Free a single xfer `resource' to the channel.
    358  *
    359  *	NOTE: Must be called with channel lock held
    360  */
    361 static void
    362 scsipi_put_resource(struct scsipi_channel *chan)
    363 {
    364 	struct scsipi_adapter *adapt = chan->chan_adapter;
    365 
    366 	if (chan->chan_flags & SCSIPI_CHAN_OPENINGS)
    367 		chan->chan_openings++;
    368 	else
    369 		adapt->adapt_openings++;
    370 }
    371 
    372 /*
    373  * scsipi_get_tag:
    374  *
    375  *	Get a tag ID for the specified xfer.
    376  *
    377  *	NOTE: Must be called with channel lock held
    378  */
    379 static void
    380 scsipi_get_tag(struct scsipi_xfer *xs)
    381 {
    382 	struct scsipi_periph *periph = xs->xs_periph;
    383 	int bit, tag;
    384 	u_int word;
    385 
    386 	KASSERT(mutex_owned(chan_mtx(periph->periph_channel)));
    387 
    388 	bit = 0;	/* XXX gcc */
    389 	for (word = 0; word < PERIPH_NTAGWORDS; word++) {
    390 		bit = ffs(periph->periph_freetags[word]);
    391 		if (bit != 0)
    392 			break;
    393 	}
    394 #ifdef DIAGNOSTIC
    395 	if (word == PERIPH_NTAGWORDS) {
    396 		scsipi_printaddr(periph);
    397 		printf("no free tags\n");
    398 		panic("scsipi_get_tag");
    399 	}
    400 #endif
    401 
    402 	bit -= 1;
    403 	periph->periph_freetags[word] &= ~(1 << bit);
    404 	tag = (word << 5) | bit;
    405 
    406 	/* XXX Should eventually disallow this completely. */
    407 	if (tag >= periph->periph_openings) {
    408 		scsipi_printaddr(periph);
    409 		printf("WARNING: tag %d greater than available openings %d\n",
    410 		    tag, periph->periph_openings);
    411 	}
    412 
    413 	xs->xs_tag_id = tag;
    414 	SDT_PROBE3(scsi, base, tag, get,
    415 	    xs, xs->xs_tag_id, xs->xs_tag_type);
    416 }
    417 
    418 /*
    419  * scsipi_put_tag:
    420  *
    421  *	Put the tag ID for the specified xfer back into the pool.
    422  *
    423  *	NOTE: Must be called with channel lock held
    424  */
    425 static void
    426 scsipi_put_tag(struct scsipi_xfer *xs)
    427 {
    428 	struct scsipi_periph *periph = xs->xs_periph;
    429 	int word, bit;
    430 
    431 	KASSERT(mutex_owned(chan_mtx(periph->periph_channel)));
    432 
    433 	SDT_PROBE3(scsi, base, tag, put,
    434 	    xs, xs->xs_tag_id, xs->xs_tag_type);
    435 
    436 	word = xs->xs_tag_id >> 5;
    437 	bit = xs->xs_tag_id & 0x1f;
    438 
    439 	periph->periph_freetags[word] |= (1 << bit);
    440 }
    441 
    442 /*
    443  * scsipi_get_xs:
    444  *
    445  *	Allocate an xfer descriptor and associate it with the
    446  *	specified peripheral.  If the peripheral has no more
    447  *	available command openings, we either block waiting for
    448  *	one to become available, or fail.
    449  *
    450  *	When this routine is called with the channel lock held
    451  *	the flags must include XS_CTL_NOSLEEP.
    452  */
    453 struct scsipi_xfer *
    454 scsipi_get_xs(struct scsipi_periph *periph, int flags)
    455 {
    456 	struct scsipi_xfer *xs;
    457 	bool lock = (flags & XS_CTL_NOSLEEP) == 0;
    458 
    459 	SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_get_xs\n"));
    460 
    461 	KASSERT(!cold);
    462 
    463 #ifdef DIAGNOSTIC
    464 	/*
    465 	 * URGENT commands can never be ASYNC.
    466 	 */
    467 	if ((flags & (XS_CTL_URGENT|XS_CTL_ASYNC)) ==
    468 	    (XS_CTL_URGENT|XS_CTL_ASYNC)) {
    469 		scsipi_printaddr(periph);
    470 		printf("URGENT and ASYNC\n");
    471 		panic("scsipi_get_xs");
    472 	}
    473 #endif
    474 
    475 	/*
    476 	 * Wait for a command opening to become available.  Rules:
    477 	 *
    478 	 *	- All xfers must wait for an available opening.
    479 	 *	  Exception: URGENT xfers can proceed when
    480 	 *	  active == openings, because we use the opening
    481 	 *	  of the command we're recovering for.
    482 	 *	- if the periph has sense pending, only URGENT & REQSENSE
    483 	 *	  xfers may proceed.
    484 	 *
    485 	 *	- If the periph is recovering, only URGENT xfers may
    486 	 *	  proceed.
    487 	 *
    488 	 *	- If the periph is currently executing a recovery
    489 	 *	  command, URGENT commands must block, because only
    490 	 *	  one recovery command can execute at a time.
    491 	 */
    492 	if (lock)
    493 		mutex_enter(chan_mtx(periph->periph_channel));
    494 	for (;;) {
    495 		if (flags & XS_CTL_URGENT) {
    496 			if (periph->periph_active > periph->periph_openings)
    497 				goto wait_for_opening;
    498 			if (periph->periph_flags & PERIPH_SENSE) {
    499 				if ((flags & XS_CTL_REQSENSE) == 0)
    500 					goto wait_for_opening;
    501 			} else {
    502 				if ((periph->periph_flags &
    503 				    PERIPH_RECOVERY_ACTIVE) != 0)
    504 					goto wait_for_opening;
    505 				periph->periph_flags |= PERIPH_RECOVERY_ACTIVE;
    506 			}
    507 			break;
    508 		}
    509 		if (periph->periph_active >= periph->periph_openings ||
    510 		    (periph->periph_flags & PERIPH_RECOVERING) != 0)
    511 			goto wait_for_opening;
    512 		periph->periph_active++;
    513 		KASSERT(mutex_owned(chan_mtx(periph->periph_channel)));
    514 		break;
    515 
    516  wait_for_opening:
    517 		if (flags & XS_CTL_NOSLEEP) {
    518 			KASSERT(!lock);
    519 			return NULL;
    520 		}
    521 		KASSERT(lock);
    522 		SC_DEBUG(periph, SCSIPI_DB3, ("sleeping\n"));
    523 		periph->periph_flags |= PERIPH_WAITING;
    524 		cv_wait(periph_cv_periph(periph),
    525 		    chan_mtx(periph->periph_channel));
    526 	}
    527 	if (lock)
    528 		mutex_exit(chan_mtx(periph->periph_channel));
    529 
    530 	SC_DEBUG(periph, SCSIPI_DB3, ("calling pool_get\n"));
    531 	xs = pool_get(&scsipi_xfer_pool,
    532 	    ((flags & XS_CTL_NOSLEEP) != 0 ? PR_NOWAIT : PR_WAITOK));
    533 	if (xs == NULL) {
    534 		if (lock)
    535 			mutex_enter(chan_mtx(periph->periph_channel));
    536 		if (flags & XS_CTL_URGENT) {
    537 			if ((flags & XS_CTL_REQSENSE) == 0)
    538 				periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
    539 		} else
    540 			periph->periph_active--;
    541 		if (lock)
    542 			mutex_exit(chan_mtx(periph->periph_channel));
    543 		scsipi_printaddr(periph);
    544 		printf("unable to allocate %sscsipi_xfer\n",
    545 		    (flags & XS_CTL_URGENT) ? "URGENT " : "");
    546 	}
    547 
    548 	SC_DEBUG(periph, SCSIPI_DB3, ("returning\n"));
    549 
    550 	if (xs != NULL) {
    551 		memset(xs, 0, sizeof(*xs));
    552 		callout_init(&xs->xs_callout, 0);
    553 		xs->xs_periph = periph;
    554 		xs->xs_control = flags;
    555 		xs->xs_status = 0;
    556 		if ((flags & XS_CTL_NOSLEEP) == 0)
    557 			mutex_enter(chan_mtx(periph->periph_channel));
    558 		TAILQ_INSERT_TAIL(&periph->periph_xferq, xs, device_q);
    559 		KASSERT(mutex_owned(chan_mtx(periph->periph_channel)));
    560 		if ((flags & XS_CTL_NOSLEEP) == 0)
    561 			mutex_exit(chan_mtx(periph->periph_channel));
    562 	}
    563 	return xs;
    564 }
    565 
    566 /*
    567  * scsipi_put_xs:
    568  *
    569  *	Release an xfer descriptor, decreasing the outstanding command
    570  *	count for the peripheral.  If there is a thread waiting for
    571  *	an opening, wake it up.  If not, kick any queued I/O the
    572  *	peripheral may have.
    573  *
    574  *	NOTE: Must be called with channel lock held
    575  */
    576 void
    577 scsipi_put_xs(struct scsipi_xfer *xs)
    578 {
    579 	struct scsipi_periph *periph = xs->xs_periph;
    580 	int flags = xs->xs_control;
    581 
    582 	SDT_PROBE1(scsi, base, xfer, free,  xs);
    583 	SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_free_xs\n"));
    584 	KASSERT(mutex_owned(chan_mtx(periph->periph_channel)));
    585 
    586 	TAILQ_REMOVE(&periph->periph_xferq, xs, device_q);
    587 	callout_destroy(&xs->xs_callout);
    588 	pool_put(&scsipi_xfer_pool, xs);
    589 
    590 #ifdef DIAGNOSTIC
    591 	if ((periph->periph_flags & PERIPH_RECOVERY_ACTIVE) != 0 &&
    592 	    periph->periph_active == 0) {
    593 		scsipi_printaddr(periph);
    594 		printf("recovery without a command to recovery for\n");
    595 		panic("scsipi_put_xs");
    596 	}
    597 #endif
    598 
    599 	if (flags & XS_CTL_URGENT) {
    600 		if ((flags & XS_CTL_REQSENSE) == 0)
    601 			periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
    602 	} else
    603 		periph->periph_active--;
    604 	if (periph->periph_active == 0 &&
    605 	    (periph->periph_flags & PERIPH_WAITDRAIN) != 0) {
    606 		periph->periph_flags &= ~PERIPH_WAITDRAIN;
    607 		cv_broadcast(periph_cv_active(periph));
    608 	}
    609 
    610 	if (periph->periph_flags & PERIPH_WAITING) {
    611 		periph->periph_flags &= ~PERIPH_WAITING;
    612 		cv_broadcast(periph_cv_periph(periph));
    613 	} else {
    614 		if (periph->periph_switch->psw_start != NULL &&
    615 		    device_is_active(periph->periph_dev)) {
    616 			SC_DEBUG(periph, SCSIPI_DB2,
    617 			    ("calling private start()\n"));
    618 			(*periph->periph_switch->psw_start)(periph);
    619 		}
    620 	}
    621 }
    622 
    623 /*
    624  * scsipi_channel_freeze:
    625  *
    626  *	Freeze a channel's xfer queue.
    627  */
    628 void
    629 scsipi_channel_freeze(struct scsipi_channel *chan, int count)
    630 {
    631 	bool lock = chan_running(chan) > 0;
    632 
    633 	if (lock)
    634 		mutex_enter(chan_mtx(chan));
    635 	chan->chan_qfreeze += count;
    636 	if (lock)
    637 		mutex_exit(chan_mtx(chan));
    638 }
    639 
    640 static void
    641 scsipi_channel_freeze_locked(struct scsipi_channel *chan, int count)
    642 {
    643 
    644 	chan->chan_qfreeze += count;
    645 }
    646 
    647 /*
    648  * scsipi_channel_thaw:
    649  *
    650  *	Thaw a channel's xfer queue.
    651  */
    652 void
    653 scsipi_channel_thaw(struct scsipi_channel *chan, int count)
    654 {
    655 	bool lock = chan_running(chan) > 0;
    656 
    657 	if (lock)
    658 		mutex_enter(chan_mtx(chan));
    659 	chan->chan_qfreeze -= count;
    660 	/*
    661 	 * Don't let the freeze count go negative.
    662 	 *
    663 	 * Presumably the adapter driver could keep track of this,
    664 	 * but it might just be easier to do this here so as to allow
    665 	 * multiple callers, including those outside the adapter driver.
    666 	 */
    667 	if (chan->chan_qfreeze < 0) {
    668 		chan->chan_qfreeze = 0;
    669 	}
    670 	if (lock)
    671 		mutex_exit(chan_mtx(chan));
    672 
    673 	/*
    674 	 * until the channel is running
    675 	 */
    676 	if (!lock)
    677 		return;
    678 
    679 	/*
    680 	 * Kick the channel's queue here.  Note, we may be running in
    681 	 * interrupt context (softclock or HBA's interrupt), so the adapter
    682 	 * driver had better not sleep.
    683 	 */
    684 	if (chan->chan_qfreeze == 0)
    685 		scsipi_run_queue(chan);
    686 }
    687 
    688 /*
    689  * scsipi_channel_timed_thaw:
    690  *
    691  *	Thaw a channel after some time has expired. This will also
    692  * 	run the channel's queue if the freeze count has reached 0.
    693  */
    694 void
    695 scsipi_channel_timed_thaw(void *arg)
    696 {
    697 	struct scsipi_channel *chan = arg;
    698 
    699 	scsipi_channel_thaw(chan, 1);
    700 }
    701 
    702 /*
    703  * scsipi_periph_freeze:
    704  *
    705  *	Freeze a device's xfer queue.
    706  */
    707 void
    708 scsipi_periph_freeze_locked(struct scsipi_periph *periph, int count)
    709 {
    710 
    711 	periph->periph_qfreeze += count;
    712 }
    713 
    714 /*
    715  * scsipi_periph_thaw:
    716  *
    717  *	Thaw a device's xfer queue.
    718  */
    719 void
    720 scsipi_periph_thaw_locked(struct scsipi_periph *periph, int count)
    721 {
    722 
    723 	periph->periph_qfreeze -= count;
    724 #ifdef DIAGNOSTIC
    725 	if (periph->periph_qfreeze < 0) {
    726 		static const char pc[] = "periph freeze count < 0";
    727 		scsipi_printaddr(periph);
    728 		printf("%s\n", pc);
    729 		panic(pc);
    730 	}
    731 #endif
    732 	if (periph->periph_qfreeze == 0 &&
    733 	    (periph->periph_flags & PERIPH_WAITING) != 0)
    734 		cv_broadcast(periph_cv_periph(periph));
    735 }
    736 
    737 void
    738 scsipi_periph_freeze(struct scsipi_periph *periph, int count)
    739 {
    740 
    741 	mutex_enter(chan_mtx(periph->periph_channel));
    742 	scsipi_periph_freeze_locked(periph, count);
    743 	mutex_exit(chan_mtx(periph->periph_channel));
    744 }
    745 
    746 void
    747 scsipi_periph_thaw(struct scsipi_periph *periph, int count)
    748 {
    749 
    750 	mutex_enter(chan_mtx(periph->periph_channel));
    751 	scsipi_periph_thaw_locked(periph, count);
    752 	mutex_exit(chan_mtx(periph->periph_channel));
    753 }
    754 
    755 /*
    756  * scsipi_periph_timed_thaw:
    757  *
    758  *	Thaw a device after some time has expired.
    759  */
    760 void
    761 scsipi_periph_timed_thaw(void *arg)
    762 {
    763 	struct scsipi_periph *periph = arg;
    764 	struct scsipi_channel *chan = periph->periph_channel;
    765 
    766 	callout_stop(&periph->periph_callout);
    767 
    768 	mutex_enter(chan_mtx(chan));
    769 	scsipi_periph_thaw_locked(periph, 1);
    770 	if ((periph->periph_channel->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
    771 		/*
    772 		 * Kick the channel's queue here.  Note, we're running in
    773 		 * interrupt context (softclock), so the adapter driver
    774 		 * had better not sleep.
    775 		 */
    776 		mutex_exit(chan_mtx(chan));
    777 		scsipi_run_queue(periph->periph_channel);
    778 	} else {
    779 		/*
    780 		 * Tell the completion thread to kick the channel's queue here.
    781 		 */
    782 		periph->periph_channel->chan_tflags |= SCSIPI_CHANT_KICK;
    783 		cv_broadcast(chan_cv_complete(chan));
    784 		mutex_exit(chan_mtx(chan));
    785 	}
    786 }
    787 
    788 /*
    789  * scsipi_wait_drain:
    790  *
    791  *	Wait for a periph's pending xfers to drain.
    792  */
    793 void
    794 scsipi_wait_drain(struct scsipi_periph *periph)
    795 {
    796 	struct scsipi_channel *chan = periph->periph_channel;
    797 
    798 	mutex_enter(chan_mtx(chan));
    799 	while (periph->periph_active != 0) {
    800 		periph->periph_flags |= PERIPH_WAITDRAIN;
    801 		cv_wait(periph_cv_active(periph), chan_mtx(chan));
    802 	}
    803 	mutex_exit(chan_mtx(chan));
    804 }
    805 
    806 /*
    807  * scsipi_kill_pending:
    808  *
    809  *	Kill off all pending xfers for a periph.
    810  *
    811  *	NOTE: Must be called with channel lock held
    812  */
    813 void
    814 scsipi_kill_pending(struct scsipi_periph *periph)
    815 {
    816 	struct scsipi_channel *chan = periph->periph_channel;
    817 
    818 	(*chan->chan_bustype->bustype_kill_pending)(periph);
    819 	while (periph->periph_active != 0) {
    820 		periph->periph_flags |= PERIPH_WAITDRAIN;
    821 		cv_wait(periph_cv_active(periph), chan_mtx(chan));
    822 	}
    823 }
    824 
    825 /*
    826  * scsipi_print_cdb:
    827  * prints a command descriptor block (for debug purpose, error messages,
    828  * SCSIVERBOSE, ...)
    829  */
    830 void
    831 scsipi_print_cdb(struct scsipi_generic *cmd)
    832 {
    833 	int i, j;
    834 
    835  	printf("0x%02x", cmd->opcode);
    836 
    837  	switch (CDB_GROUPID(cmd->opcode)) {
    838  	case CDB_GROUPID_0:
    839  		j = CDB_GROUP0;
    840  		break;
    841  	case CDB_GROUPID_1:
    842  		j = CDB_GROUP1;
    843  		break;
    844  	case CDB_GROUPID_2:
    845  		j = CDB_GROUP2;
    846  		break;
    847  	case CDB_GROUPID_3:
    848  		j = CDB_GROUP3;
    849  		break;
    850  	case CDB_GROUPID_4:
    851  		j = CDB_GROUP4;
    852  		break;
    853  	case CDB_GROUPID_5:
    854  		j = CDB_GROUP5;
    855  		break;
    856  	case CDB_GROUPID_6:
    857  		j = CDB_GROUP6;
    858  		break;
    859  	case CDB_GROUPID_7:
    860  		j = CDB_GROUP7;
    861  		break;
    862  	default:
    863  		j = 0;
    864  	}
    865  	if (j == 0)
    866  		j = sizeof (cmd->bytes);
    867  	for (i = 0; i < j-1; i++) /* already done the opcode */
    868  		printf(" %02x", cmd->bytes[i]);
    869 }
    870 
    871 /*
    872  * scsipi_interpret_sense:
    873  *
    874  *	Look at the returned sense and act on the error, determining
    875  *	the unix error number to pass back.  (0 = report no error)
    876  *
    877  *	NOTE: If we return ERESTART, we are expected to haved
    878  *	thawed the device!
    879  *
    880  *	THIS IS THE DEFAULT ERROR HANDLER FOR SCSI DEVICES.
    881  */
    882 int
    883 scsipi_interpret_sense(struct scsipi_xfer *xs)
    884 {
    885 	struct scsi_sense_data *sense;
    886 	struct scsipi_periph *periph = xs->xs_periph;
    887 	u_int8_t key;
    888 	int error;
    889 	u_int32_t info;
    890 	static const char *error_mes[] = {
    891 		"soft error (corrected)",
    892 		"not ready", "medium error",
    893 		"non-media hardware failure", "illegal request",
    894 		"unit attention", "readonly device",
    895 		"no data found", "vendor unique",
    896 		"copy aborted", "command aborted",
    897 		"search returned equal", "volume overflow",
    898 		"verify miscompare", "unknown error key"
    899 	};
    900 
    901 	sense = &xs->sense.scsi_sense;
    902 #ifdef SCSIPI_DEBUG
    903 	if (periph->periph_flags & SCSIPI_DB1) {
    904 	        int count, len;
    905 		scsipi_printaddr(periph);
    906 		printf(" sense debug information:\n");
    907 		printf("\tcode 0x%x valid %d\n",
    908 			SSD_RCODE(sense->response_code),
    909 			sense->response_code & SSD_RCODE_VALID ? 1 : 0);
    910 		printf("\tseg 0x%x key 0x%x ili 0x%x eom 0x%x fmark 0x%x\n",
    911 			sense->segment,
    912 			SSD_SENSE_KEY(sense->flags),
    913 			sense->flags & SSD_ILI ? 1 : 0,
    914 			sense->flags & SSD_EOM ? 1 : 0,
    915 			sense->flags & SSD_FILEMARK ? 1 : 0);
    916 		printf("\ninfo: 0x%x 0x%x 0x%x 0x%x followed by %d "
    917 			"extra bytes\n",
    918 			sense->info[0],
    919 			sense->info[1],
    920 			sense->info[2],
    921 			sense->info[3],
    922 			sense->extra_len);
    923 		len = SSD_ADD_BYTES_LIM(sense);
    924 		printf("\textra (up to %d bytes): ", len);
    925 		for (count = 0; count < len; count++)
    926 			printf("0x%x ", sense->csi[count]);
    927 		printf("\n");
    928 	}
    929 #endif
    930 
    931 	/*
    932 	 * If the periph has its own error handler, call it first.
    933 	 * If it returns a legit error value, return that, otherwise
    934 	 * it wants us to continue with normal error processing.
    935 	 */
    936 	if (periph->periph_switch->psw_error != NULL) {
    937 		SC_DEBUG(periph, SCSIPI_DB2,
    938 		    ("calling private err_handler()\n"));
    939 		error = (*periph->periph_switch->psw_error)(xs);
    940 		if (error != EJUSTRETURN)
    941 			return error;
    942 	}
    943 	/* otherwise use the default */
    944 	switch (SSD_RCODE(sense->response_code)) {
    945 
    946 		/*
    947 		 * Old SCSI-1 and SASI devices respond with
    948 		 * codes other than 70.
    949 		 */
    950 	case 0x00:		/* no error (command completed OK) */
    951 		return 0;
    952 	case 0x04:		/* drive not ready after it was selected */
    953 		if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
    954 			periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
    955 		if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
    956 			return 0;
    957 		/* XXX - display some sort of error here? */
    958 		return EIO;
    959 	case 0x20:		/* invalid command */
    960 		if ((xs->xs_control &
    961 		     XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
    962 			return 0;
    963 		return EINVAL;
    964 	case 0x25:		/* invalid LUN (Adaptec ACB-4000) */
    965 		return EACCES;
    966 
    967 		/*
    968 		 * If it's code 70, use the extended stuff and
    969 		 * interpret the key
    970 		 */
    971 	case 0x71:		/* delayed error */
    972 		scsipi_printaddr(periph);
    973 		key = SSD_SENSE_KEY(sense->flags);
    974 		printf(" DEFERRED ERROR, key = 0x%x\n", key);
    975 		/* FALLTHROUGH */
    976 	case 0x70:
    977 		if ((sense->response_code & SSD_RCODE_VALID) != 0)
    978 			info = _4btol(sense->info);
    979 		else
    980 			info = 0;
    981 		key = SSD_SENSE_KEY(sense->flags);
    982 
    983 		switch (key) {
    984 		case SKEY_NO_SENSE:
    985 		case SKEY_RECOVERED_ERROR:
    986 			if (xs->resid == xs->datalen && xs->datalen) {
    987 				/*
    988 				 * Why is this here?
    989 				 */
    990 				xs->resid = 0;	/* not short read */
    991 			}
    992 			error = 0;
    993 			break;
    994 		case SKEY_EQUAL:
    995 			error = 0;
    996 			break;
    997 		case SKEY_NOT_READY:
    998 			if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
    999 				periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
   1000 			if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
   1001 				return 0;
   1002 			if (sense->asc == 0x3A) {
   1003 				error = ENODEV; /* Medium not present */
   1004 				if (xs->xs_control & XS_CTL_SILENT_NODEV)
   1005 					return error;
   1006 			} else
   1007 				error = EIO;
   1008 			if ((xs->xs_control & XS_CTL_SILENT) != 0)
   1009 				return error;
   1010 			break;
   1011 		case SKEY_ILLEGAL_REQUEST:
   1012 			if ((xs->xs_control &
   1013 			     XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
   1014 				return 0;
   1015 			/*
   1016 			 * Handle the case where a device reports
   1017 			 * Logical Unit Not Supported during discovery.
   1018 			 */
   1019 			if ((xs->xs_control & XS_CTL_DISCOVERY) != 0 &&
   1020 			    sense->asc == 0x25 &&
   1021 			    sense->ascq == 0x00)
   1022 				return EINVAL;
   1023 			if ((xs->xs_control & XS_CTL_SILENT) != 0)
   1024 				return EIO;
   1025 			error = EINVAL;
   1026 			break;
   1027 		case SKEY_UNIT_ATTENTION:
   1028 			if (sense->asc == 0x29 &&
   1029 			    sense->ascq == 0x00) {
   1030 				/* device or bus reset */
   1031 				return ERESTART;
   1032 			}
   1033 			if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
   1034 				periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
   1035 			if ((xs->xs_control &
   1036 			     XS_CTL_IGNORE_MEDIA_CHANGE) != 0 ||
   1037 				/* XXX Should reupload any transient state. */
   1038 				(periph->periph_flags &
   1039 				 PERIPH_REMOVABLE) == 0) {
   1040 				return ERESTART;
   1041 			}
   1042 			if ((xs->xs_control & XS_CTL_SILENT) != 0)
   1043 				return EIO;
   1044 			error = EIO;
   1045 			break;
   1046 		case SKEY_DATA_PROTECT:
   1047 			error = EROFS;
   1048 			break;
   1049 		case SKEY_BLANK_CHECK:
   1050 			error = 0;
   1051 			break;
   1052 		case SKEY_ABORTED_COMMAND:
   1053 			if (xs->xs_retries != 0) {
   1054 				xs->xs_retries--;
   1055 				error = ERESTART;
   1056 			} else
   1057 				error = EIO;
   1058 			break;
   1059 		case SKEY_VOLUME_OVERFLOW:
   1060 			error = ENOSPC;
   1061 			break;
   1062 		default:
   1063 			error = EIO;
   1064 			break;
   1065 		}
   1066 
   1067 		/* Print verbose decode if appropriate and possible */
   1068 		if ((key == 0) ||
   1069 		    ((xs->xs_control & XS_CTL_SILENT) != 0) ||
   1070 		    (scsipi_print_sense(xs, 0) != 0))
   1071 			return error;
   1072 
   1073 		/* Print brief(er) sense information */
   1074 		scsipi_printaddr(periph);
   1075 		printf("%s", error_mes[key - 1]);
   1076 		if ((sense->response_code & SSD_RCODE_VALID) != 0) {
   1077 			switch (key) {
   1078 			case SKEY_NOT_READY:
   1079 			case SKEY_ILLEGAL_REQUEST:
   1080 			case SKEY_UNIT_ATTENTION:
   1081 			case SKEY_DATA_PROTECT:
   1082 				break;
   1083 			case SKEY_BLANK_CHECK:
   1084 				printf(", requested size: %d (decimal)",
   1085 				    info);
   1086 				break;
   1087 			case SKEY_ABORTED_COMMAND:
   1088 				if (xs->xs_retries)
   1089 					printf(", retrying");
   1090 				printf(", cmd 0x%x, info 0x%x",
   1091 				    xs->cmd->opcode, info);
   1092 				break;
   1093 			default:
   1094 				printf(", info = %d (decimal)", info);
   1095 			}
   1096 		}
   1097 		if (sense->extra_len != 0) {
   1098 			int n;
   1099 			printf(", data =");
   1100 			for (n = 0; n < sense->extra_len; n++)
   1101 				printf(" %02x",
   1102 				    sense->csi[n]);
   1103 		}
   1104 		printf("\n");
   1105 		return error;
   1106 
   1107 	/*
   1108 	 * Some other code, just report it
   1109 	 */
   1110 	default:
   1111 #if    defined(SCSIDEBUG) || defined(DEBUG)
   1112 	{
   1113 		static const char *uc = "undecodable sense error";
   1114 		int i;
   1115 		u_int8_t *cptr = (u_int8_t *) sense;
   1116 		scsipi_printaddr(periph);
   1117 		if (xs->cmd == &xs->cmdstore) {
   1118 			printf("%s for opcode 0x%x, data=",
   1119 			    uc, xs->cmdstore.opcode);
   1120 		} else {
   1121 			printf("%s, data=", uc);
   1122 		}
   1123 		for (i = 0; i < sizeof (sense); i++)
   1124 			printf(" 0x%02x", *(cptr++) & 0xff);
   1125 		printf("\n");
   1126 	}
   1127 #else
   1128 		scsipi_printaddr(periph);
   1129 		printf("Sense Error Code 0x%x",
   1130 			SSD_RCODE(sense->response_code));
   1131 		if ((sense->response_code & SSD_RCODE_VALID) != 0) {
   1132 			struct scsi_sense_data_unextended *usense =
   1133 			    (struct scsi_sense_data_unextended *)sense;
   1134 			printf(" at block no. %d (decimal)",
   1135 			    _3btol(usense->block));
   1136 		}
   1137 		printf("\n");
   1138 #endif
   1139 		return EIO;
   1140 	}
   1141 }
   1142 
   1143 /*
   1144  * scsipi_test_unit_ready:
   1145  *
   1146  *	Issue a `test unit ready' request.
   1147  */
   1148 int
   1149 scsipi_test_unit_ready(struct scsipi_periph *periph, int flags)
   1150 {
   1151 	struct scsi_test_unit_ready cmd;
   1152 	int retries;
   1153 
   1154 	/* some ATAPI drives don't support TEST UNIT READY. Sigh */
   1155 	if (periph->periph_quirks & PQUIRK_NOTUR)
   1156 		return 0;
   1157 
   1158 	if (flags & XS_CTL_DISCOVERY)
   1159 		retries = 0;
   1160 	else
   1161 		retries = SCSIPIRETRIES;
   1162 
   1163 	memset(&cmd, 0, sizeof(cmd));
   1164 	cmd.opcode = SCSI_TEST_UNIT_READY;
   1165 
   1166 	return scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0,
   1167 	    retries, 10000, NULL, flags);
   1168 }
   1169 
   1170 static const struct scsipi_inquiry3_pattern {
   1171 	const char vendor[8];
   1172 	const char product[16];
   1173 	const char revision[4];
   1174 } scsipi_inquiry3_quirk[] = {
   1175 	{ "ES-6600 ", "", "" },
   1176 };
   1177 
   1178 static int
   1179 scsipi_inquiry3_ok(const struct scsipi_inquiry_data *ib)
   1180 {
   1181 	for (size_t i = 0; i < __arraycount(scsipi_inquiry3_quirk); i++) {
   1182 		const struct scsipi_inquiry3_pattern *q =
   1183 		    &scsipi_inquiry3_quirk[i];
   1184 #define MATCH(field) \
   1185     (q->field[0] ? memcmp(ib->field, q->field, sizeof(ib->field)) == 0 : 1)
   1186 		if (MATCH(vendor) && MATCH(product) && MATCH(revision))
   1187 			return 0;
   1188 	}
   1189 	return 1;
   1190 }
   1191 
   1192 /*
   1193  * scsipi_inquire:
   1194  *
   1195  *	Ask the device about itself.
   1196  */
   1197 int
   1198 scsipi_inquire(struct scsipi_periph *periph, struct scsipi_inquiry_data *inqbuf,
   1199     int flags)
   1200 {
   1201 	struct scsipi_inquiry cmd;
   1202 	int error;
   1203 	int retries;
   1204 
   1205 	if (flags & XS_CTL_DISCOVERY)
   1206 		retries = 0;
   1207 	else
   1208 		retries = SCSIPIRETRIES;
   1209 
   1210 	/*
   1211 	 * If we request more data than the device can provide, it SHOULD just
   1212 	 * return a short response.  However, some devices error with an
   1213 	 * ILLEGAL REQUEST sense code, and yet others have even more special
   1214 	 * failture modes (such as the GL641USB flash adapter, which goes loony
   1215 	 * and sends corrupted CRCs).  To work around this, and to bring our
   1216 	 * behavior more in line with other OSes, we do a shorter inquiry,
   1217 	 * covering all the SCSI-2 information, first, and then request more
   1218 	 * data iff the "additional length" field indicates there is more.
   1219 	 * - mycroft, 2003/10/16
   1220 	 */
   1221 	memset(&cmd, 0, sizeof(cmd));
   1222 	cmd.opcode = INQUIRY;
   1223 	cmd.length = SCSIPI_INQUIRY_LENGTH_SCSI2;
   1224 	error = scsipi_command(periph, (void *)&cmd, sizeof(cmd),
   1225 	    (void *)inqbuf, SCSIPI_INQUIRY_LENGTH_SCSI2, retries,
   1226 	    10000, NULL, flags | XS_CTL_DATA_IN);
   1227 	if (!error &&
   1228 	    inqbuf->additional_length > SCSIPI_INQUIRY_LENGTH_SCSI2 - 4) {
   1229 	    if (scsipi_inquiry3_ok(inqbuf)) {
   1230 #if 0
   1231 printf("inquire: addlen=%d, retrying\n", inqbuf->additional_length);
   1232 #endif
   1233 		cmd.length = SCSIPI_INQUIRY_LENGTH_SCSI3;
   1234 		error = scsipi_command(periph, (void *)&cmd, sizeof(cmd),
   1235 		    (void *)inqbuf, SCSIPI_INQUIRY_LENGTH_SCSI3, retries,
   1236 		    10000, NULL, flags | XS_CTL_DATA_IN);
   1237 #if 0
   1238 printf("inquire: error=%d\n", error);
   1239 #endif
   1240 	    }
   1241 	}
   1242 
   1243 #ifdef SCSI_OLD_NOINQUIRY
   1244 	/*
   1245 	 * Kludge for the Adaptec ACB-4000 SCSI->MFM translator.
   1246 	 * This board doesn't support the INQUIRY command at all.
   1247 	 */
   1248 	if (error == EINVAL || error == EACCES) {
   1249 		/*
   1250 		 * Conjure up an INQUIRY response.
   1251 		 */
   1252 		inqbuf->device = (error == EINVAL ?
   1253 			 SID_QUAL_LU_PRESENT :
   1254 			 SID_QUAL_LU_NOTPRESENT) | T_DIRECT;
   1255 		inqbuf->dev_qual2 = 0;
   1256 		inqbuf->version = 0;
   1257 		inqbuf->response_format = SID_FORMAT_SCSI1;
   1258 		inqbuf->additional_length = SCSIPI_INQUIRY_LENGTH_SCSI2 - 4;
   1259 		inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0;
   1260 		memcpy(inqbuf->vendor, "ADAPTEC ACB-4000            ", 28);
   1261 		error = 0;
   1262 	}
   1263 
   1264 	/*
   1265 	 * Kludge for the Emulex MT-02 SCSI->QIC translator.
   1266 	 * This board gives an empty response to an INQUIRY command.
   1267 	 */
   1268 	else if (error == 0 &&
   1269 	    inqbuf->device == (SID_QUAL_LU_PRESENT | T_DIRECT) &&
   1270 	    inqbuf->dev_qual2 == 0 &&
   1271 	    inqbuf->version == 0 &&
   1272 	    inqbuf->response_format == SID_FORMAT_SCSI1) {
   1273 		/*
   1274 		 * Fill out the INQUIRY response.
   1275 		 */
   1276 		inqbuf->device = (SID_QUAL_LU_PRESENT | T_SEQUENTIAL);
   1277 		inqbuf->dev_qual2 = SID_REMOVABLE;
   1278 		inqbuf->additional_length = SCSIPI_INQUIRY_LENGTH_SCSI2 - 4;
   1279 		inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0;
   1280 		memcpy(inqbuf->vendor, "EMULEX  MT-02 QIC           ", 28);
   1281 	}
   1282 #endif /* SCSI_OLD_NOINQUIRY */
   1283 
   1284 	return error;
   1285 }
   1286 
   1287 /*
   1288  * scsipi_prevent:
   1289  *
   1290  *	Prevent or allow the user to remove the media
   1291  */
   1292 int
   1293 scsipi_prevent(struct scsipi_periph *periph, int type, int flags)
   1294 {
   1295 	struct scsi_prevent_allow_medium_removal cmd;
   1296 
   1297 	if (periph->periph_quirks & PQUIRK_NODOORLOCK)
   1298 		return 0;
   1299 
   1300 	memset(&cmd, 0, sizeof(cmd));
   1301 	cmd.opcode = SCSI_PREVENT_ALLOW_MEDIUM_REMOVAL;
   1302 	cmd.how = type;
   1303 
   1304 	return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0,
   1305 	    SCSIPIRETRIES, 5000, NULL, flags));
   1306 }
   1307 
   1308 /*
   1309  * scsipi_start:
   1310  *
   1311  *	Send a START UNIT.
   1312  */
   1313 int
   1314 scsipi_start(struct scsipi_periph *periph, int type, int flags)
   1315 {
   1316 	struct scsipi_start_stop cmd;
   1317 
   1318 	memset(&cmd, 0, sizeof(cmd));
   1319 	cmd.opcode = START_STOP;
   1320 	cmd.byte2 = 0x00;
   1321 	cmd.how = type;
   1322 
   1323 	return scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0,
   1324 	    SCSIPIRETRIES, (type & SSS_START) ? 60000 : 10000, NULL, flags);
   1325 }
   1326 
   1327 /*
   1328  * scsipi_mode_sense, scsipi_mode_sense_big:
   1329  *	get a sense page from a device
   1330  */
   1331 
   1332 int
   1333 scsipi_mode_sense(struct scsipi_periph *periph, int byte2, int page,
   1334     struct scsi_mode_parameter_header_6 *data, int len, int flags, int retries,
   1335     int timeout)
   1336 {
   1337 	struct scsi_mode_sense_6 cmd;
   1338 
   1339 	memset(&cmd, 0, sizeof(cmd));
   1340 	cmd.opcode = SCSI_MODE_SENSE_6;
   1341 	cmd.byte2 = byte2;
   1342 	cmd.page = page;
   1343 	cmd.length = len & 0xff;
   1344 
   1345 	return scsipi_command(periph, (void *)&cmd, sizeof(cmd),
   1346 	    (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_IN);
   1347 }
   1348 
   1349 int
   1350 scsipi_mode_sense_big(struct scsipi_periph *periph, int byte2, int page,
   1351     struct scsi_mode_parameter_header_10 *data, int len, int flags, int retries,
   1352     int timeout)
   1353 {
   1354 	struct scsi_mode_sense_10 cmd;
   1355 
   1356 	memset(&cmd, 0, sizeof(cmd));
   1357 	cmd.opcode = SCSI_MODE_SENSE_10;
   1358 	cmd.byte2 = byte2;
   1359 	cmd.page = page;
   1360 	_lto2b(len, cmd.length);
   1361 
   1362 	return scsipi_command(periph, (void *)&cmd, sizeof(cmd),
   1363 	    (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_IN);
   1364 }
   1365 
   1366 int
   1367 scsipi_mode_select(struct scsipi_periph *periph, int byte2,
   1368     struct scsi_mode_parameter_header_6 *data, int len, int flags, int retries,
   1369     int timeout)
   1370 {
   1371 	struct scsi_mode_select_6 cmd;
   1372 
   1373 	memset(&cmd, 0, sizeof(cmd));
   1374 	cmd.opcode = SCSI_MODE_SELECT_6;
   1375 	cmd.byte2 = byte2;
   1376 	cmd.length = len & 0xff;
   1377 
   1378 	return scsipi_command(periph, (void *)&cmd, sizeof(cmd),
   1379 	    (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_OUT);
   1380 }
   1381 
   1382 int
   1383 scsipi_mode_select_big(struct scsipi_periph *periph, int byte2,
   1384     struct scsi_mode_parameter_header_10 *data, int len, int flags, int retries,
   1385     int timeout)
   1386 {
   1387 	struct scsi_mode_select_10 cmd;
   1388 
   1389 	memset(&cmd, 0, sizeof(cmd));
   1390 	cmd.opcode = SCSI_MODE_SELECT_10;
   1391 	cmd.byte2 = byte2;
   1392 	_lto2b(len, cmd.length);
   1393 
   1394 	return scsipi_command(periph, (void *)&cmd, sizeof(cmd),
   1395 	    (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_OUT);
   1396 }
   1397 
   1398 /*
   1399  * scsipi_get_opcodeinfo:
   1400  *
   1401  * query the device for supported commends and their timeout
   1402  * building a timeout lookup table if timeout information is available.
   1403  */
   1404 void
   1405 scsipi_get_opcodeinfo(struct scsipi_periph *periph)
   1406 {
   1407 	u_int8_t *data;
   1408 	int len = 16*1024;
   1409 	int rc;
   1410 	struct scsi_repsuppopcode cmd;
   1411 
   1412 	/* refrain from asking for supported opcodes */
   1413 	if (periph->periph_quirks & PQUIRK_NOREPSUPPOPC ||
   1414 	    periph->periph_type == T_PROCESSOR || /* spec. */
   1415 	    periph->periph_type == T_CDROM) /* spec. */
   1416 		return;
   1417 
   1418 	scsipi_free_opcodeinfo(periph);
   1419 
   1420 	/*
   1421 	 * query REPORT SUPPORTED OPERATION CODES
   1422 	 * if OK
   1423 	 *   enumerate all codes
   1424 	 *     if timeout exists insert maximum into opcode table
   1425 	 */
   1426 
   1427 	data = malloc(len, M_DEVBUF, M_NOWAIT|M_ZERO);
   1428 	if (data == NULL) {
   1429 		SC_DEBUG(periph, SCSIPI_DB3,
   1430 			 ("unable to allocate data buffer "
   1431 			  "for REPORT SUPPORTED OPERATION CODES\n"));
   1432 		return;
   1433 	}
   1434 
   1435 	memset(&cmd, 0, sizeof(cmd));
   1436 
   1437 	cmd.opcode = SCSI_MAINTENANCE_IN;
   1438 	cmd.svcaction = RSOC_REPORT_SUPPORTED_OPCODES;
   1439 	cmd.repoption = RSOC_RCTD|RSOC_ALL;
   1440 	_lto4b(len, cmd.alloclen);
   1441 
   1442 	rc = scsipi_command(periph, (void *)&cmd, sizeof(cmd),
   1443 			    (void *)data, len, 0, 1000, NULL,
   1444 			    XS_CTL_DATA_IN|XS_CTL_SILENT);
   1445 
   1446 	if (rc == 0) {
   1447 		int count;
   1448                 int dlen = _4btol(data);
   1449                 u_int8_t *c = data + 4;
   1450 
   1451 		SC_DEBUG(periph, SCSIPI_DB3,
   1452 			 ("supported opcode timeout-values loaded\n"));
   1453 		SC_DEBUG(periph, SCSIPI_DB3,
   1454 			 ("CMD  LEN  SA    spec  nom. time  cmd timeout\n"));
   1455 
   1456 		struct scsipi_opcodes *tot =
   1457 		  (struct scsipi_opcodes *)malloc(sizeof(struct scsipi_opcodes),
   1458 						  M_DEVBUF, M_NOWAIT|M_ZERO);
   1459 
   1460 		count = 0;
   1461                 while (tot != NULL &&
   1462 		       dlen >= (int)sizeof(struct scsi_repsupopcode_all_commands_descriptor)) {
   1463                         struct scsi_repsupopcode_all_commands_descriptor *acd
   1464 				= (struct scsi_repsupopcode_all_commands_descriptor *)c;
   1465 #ifdef SCSIPI_DEBUG
   1466                         int cdblen = _2btol((const u_int8_t *)&acd->cdblen);
   1467 #endif
   1468                         dlen -= sizeof(struct scsi_repsupopcode_all_commands_descriptor);
   1469                         c += sizeof(struct scsi_repsupopcode_all_commands_descriptor);
   1470                         SC_DEBUG(periph, SCSIPI_DB3,
   1471 				 ("0x%02x(%2d) ", acd->opcode, cdblen));
   1472 
   1473 			tot->opcode_info[acd->opcode].ti_flags = SCSIPI_TI_VALID;
   1474 
   1475                         if (acd->flags & RSOC_ACD_SERVACTV) {
   1476                                 SC_DEBUGN(periph, SCSIPI_DB3,
   1477 					 ("0x%02x%02x ",
   1478 					  acd->serviceaction[0],
   1479 					  acd->serviceaction[1]));
   1480                         } else {
   1481 				SC_DEBUGN(periph, SCSIPI_DB3, ("       "));
   1482                         }
   1483 
   1484                         if (acd->flags & RSOC_ACD_CTDP
   1485 			    && dlen >= (int)sizeof(struct scsi_repsupopcode_timeouts_descriptor)) {
   1486                                 struct scsi_repsupopcode_timeouts_descriptor *td
   1487 					= (struct scsi_repsupopcode_timeouts_descriptor *)c;
   1488                                 long nomto = _4btol(td->nom_process_timeout);
   1489                                 long cmdto = _4btol(td->cmd_process_timeout);
   1490 				long t = (cmdto > nomto) ? cmdto : nomto;
   1491 
   1492                                 dlen -= sizeof(struct scsi_repsupopcode_timeouts_descriptor);
   1493                                 c += sizeof(struct scsi_repsupopcode_timeouts_descriptor);
   1494 
   1495                                 SC_DEBUGN(periph, SCSIPI_DB3,
   1496 					  ("0x%02x %10ld %10ld",
   1497 					   td->cmd_specific,
   1498 					   nomto, cmdto));
   1499 
   1500 				if (t > tot->opcode_info[acd->opcode].ti_timeout) {
   1501 					tot->opcode_info[acd->opcode].ti_timeout = t;
   1502 					++count;
   1503 				}
   1504                         }
   1505                         SC_DEBUGN(periph, SCSIPI_DB3,("\n"));
   1506                 }
   1507 
   1508 		if (count > 0) {
   1509 			periph->periph_opcs = tot;
   1510 		} else {
   1511 			free(tot, M_DEVBUF);
   1512 			SC_DEBUG(periph, SCSIPI_DB3,
   1513 			 	("no usable timeout values available\n"));
   1514 		}
   1515 	} else {
   1516 		SC_DEBUG(periph, SCSIPI_DB3,
   1517 			 ("SCSI_MAINTENANCE_IN"
   1518 			  "[RSOC_REPORT_SUPPORTED_OPCODES] failed error=%d"
   1519 			  " - no device provided timeout "
   1520 			  "values available\n", rc));
   1521 	}
   1522 
   1523 	free(data, M_DEVBUF);
   1524 }
   1525 
   1526 /*
   1527  * scsipi_update_timeouts:
   1528  * 	Overide timeout value if device/config provided
   1529  *      timeouts are available.
   1530  */
   1531 static void
   1532 scsipi_update_timeouts(struct scsipi_xfer *xs)
   1533 {
   1534 	struct scsipi_opcodes *opcs;
   1535 	u_int8_t cmd;
   1536 	int timeout;
   1537 	struct scsipi_opinfo *oi;
   1538 
   1539 	if (xs->timeout <= 0) {
   1540 		return;
   1541 	}
   1542 
   1543 	opcs = xs->xs_periph->periph_opcs;
   1544 
   1545 	if (opcs == NULL) {
   1546 		return;
   1547 	}
   1548 
   1549 	cmd = xs->cmd->opcode;
   1550 	oi = &opcs->opcode_info[cmd];
   1551 
   1552 	timeout = 1000 * (int)oi->ti_timeout;
   1553 
   1554 
   1555 	if (timeout > xs->timeout && timeout < 86400000) {
   1556 		/*
   1557 		 * pick up device configured timeouts if they
   1558 		 * are longer than the requested ones but less
   1559 		 * than a day
   1560 		 */
   1561 #ifdef SCSIPI_DEBUG
   1562 		if ((oi->ti_flags & SCSIPI_TI_LOGGED) == 0) {
   1563 			SC_DEBUG(xs->xs_periph, SCSIPI_DB3,
   1564 				 ("Overriding command 0x%02x "
   1565 				  "timeout of %d with %d ms\n",
   1566 				  cmd, xs->timeout, timeout));
   1567 			oi->ti_flags |= SCSIPI_TI_LOGGED;
   1568 		}
   1569 #endif
   1570 		xs->timeout = timeout;
   1571 	}
   1572 }
   1573 
   1574 /*
   1575  * scsipi_free_opcodeinfo:
   1576  *
   1577  * free the opcode information table
   1578  */
   1579 void
   1580 scsipi_free_opcodeinfo(struct scsipi_periph *periph)
   1581 {
   1582 	if (periph->periph_opcs != NULL) {
   1583 		free(periph->periph_opcs, M_DEVBUF);
   1584 	}
   1585 
   1586 	periph->periph_opcs = NULL;
   1587 }
   1588 
   1589 /*
   1590  * scsipi_done:
   1591  *
   1592  *	This routine is called by an adapter's interrupt handler when
   1593  *	an xfer is completed.
   1594  */
   1595 void
   1596 scsipi_done(struct scsipi_xfer *xs)
   1597 {
   1598 	struct scsipi_periph *periph = xs->xs_periph;
   1599 	struct scsipi_channel *chan = periph->periph_channel;
   1600 	int freezecnt;
   1601 
   1602 	SC_DEBUG(periph, SCSIPI_DB2, ("scsipi_done\n"));
   1603 #ifdef SCSIPI_DEBUG
   1604 	if (periph->periph_dbflags & SCSIPI_DB1)
   1605 		show_scsipi_cmd(xs);
   1606 #endif
   1607 
   1608 	mutex_enter(chan_mtx(chan));
   1609 	SDT_PROBE1(scsi, base, xfer, done,  xs);
   1610 	/*
   1611 	 * The resource this command was using is now free.
   1612 	 */
   1613 	if (xs->xs_status & XS_STS_DONE) {
   1614 		/* XXX in certain circumstances, such as a device
   1615 		 * being detached, a xs that has already been
   1616 		 * scsipi_done()'d by the main thread will be done'd
   1617 		 * again by scsibusdetach(). Putting the xs on the
   1618 		 * chan_complete queue causes list corruption and
   1619 		 * everyone dies. This prevents that, but perhaps
   1620 		 * there should be better coordination somewhere such
   1621 		 * that this won't ever happen (and can be turned into
   1622 		 * a KASSERT().
   1623 		 */
   1624 		SDT_PROBE1(scsi, base, xfer, redone,  xs);
   1625 		mutex_exit(chan_mtx(chan));
   1626 		goto out;
   1627 	}
   1628 	scsipi_put_resource(chan);
   1629 	xs->xs_periph->periph_sent--;
   1630 
   1631 	/*
   1632 	 * If the command was tagged, free the tag.
   1633 	 */
   1634 	if (XS_CTL_TAGTYPE(xs) != 0)
   1635 		scsipi_put_tag(xs);
   1636 	else
   1637 		periph->periph_flags &= ~PERIPH_UNTAG;
   1638 
   1639 	/* Mark the command as `done'. */
   1640 	xs->xs_status |= XS_STS_DONE;
   1641 
   1642 #ifdef DIAGNOSTIC
   1643 	if ((xs->xs_control & (XS_CTL_ASYNC|XS_CTL_POLL)) ==
   1644 	    (XS_CTL_ASYNC|XS_CTL_POLL))
   1645 		panic("scsipi_done: ASYNC and POLL");
   1646 #endif
   1647 
   1648 	/*
   1649 	 * If the xfer had an error of any sort, freeze the
   1650 	 * periph's queue.  Freeze it again if we were requested
   1651 	 * to do so in the xfer.
   1652 	 */
   1653 	freezecnt = 0;
   1654 	if (xs->error != XS_NOERROR)
   1655 		freezecnt++;
   1656 	if (xs->xs_control & XS_CTL_FREEZE_PERIPH)
   1657 		freezecnt++;
   1658 	if (freezecnt != 0)
   1659 		scsipi_periph_freeze_locked(periph, freezecnt);
   1660 
   1661 	/*
   1662 	 * record the xfer with a pending sense, in case a SCSI reset is
   1663 	 * received before the thread is waked up.
   1664 	 */
   1665 	if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
   1666 		periph->periph_flags |= PERIPH_SENSE;
   1667 		periph->periph_xscheck = xs;
   1668 	}
   1669 
   1670 	/*
   1671 	 * If this was an xfer that was not to complete asynchronously,
   1672 	 * let the requesting thread perform error checking/handling
   1673 	 * in its context.
   1674 	 */
   1675 	if ((xs->xs_control & XS_CTL_ASYNC) == 0) {
   1676 		/*
   1677 		 * If it's a polling job, just return, to unwind the
   1678 		 * call graph.  We don't need to restart the queue,
   1679 		 * because pollings jobs are treated specially, and
   1680 		 * are really only used during crash dumps anyway
   1681 		 * (XXX or during boot-time autconfiguration of
   1682 		 * ATAPI devices).
   1683 		 */
   1684 		if (xs->xs_control & XS_CTL_POLL) {
   1685 			mutex_exit(chan_mtx(chan));
   1686 			return;
   1687 		}
   1688 		cv_broadcast(xs_cv(xs));
   1689 		mutex_exit(chan_mtx(chan));
   1690 		goto out;
   1691 	}
   1692 
   1693 	/*
   1694 	 * Catch the extremely common case of I/O completing
   1695 	 * without error; no use in taking a context switch
   1696 	 * if we can handle it in interrupt context.
   1697 	 */
   1698 	if (xs->error == XS_NOERROR) {
   1699 		mutex_exit(chan_mtx(chan));
   1700 		(void) scsipi_complete(xs);
   1701 		goto out;
   1702 	}
   1703 
   1704 	/*
   1705 	 * There is an error on this xfer.  Put it on the channel's
   1706 	 * completion queue, and wake up the completion thread.
   1707 	 */
   1708 	TAILQ_INSERT_TAIL(&chan->chan_complete, xs, channel_q);
   1709 	cv_broadcast(chan_cv_complete(chan));
   1710 	mutex_exit(chan_mtx(chan));
   1711 
   1712  out:
   1713 	/*
   1714 	 * If there are more xfers on the channel's queue, attempt to
   1715 	 * run them.
   1716 	 */
   1717 	scsipi_run_queue(chan);
   1718 }
   1719 
   1720 /*
   1721  * scsipi_complete:
   1722  *
   1723  *	Completion of a scsipi_xfer.  This is the guts of scsipi_done().
   1724  *
   1725  *	NOTE: This routine MUST be called with valid thread context
   1726  *	except for the case where the following two conditions are
   1727  *	true:
   1728  *
   1729  *		xs->error == XS_NOERROR
   1730  *		XS_CTL_ASYNC is set in xs->xs_control
   1731  *
   1732  *	The semantics of this routine can be tricky, so here is an
   1733  *	explanation:
   1734  *
   1735  *		0		Xfer completed successfully.
   1736  *
   1737  *		ERESTART	Xfer had an error, but was restarted.
   1738  *
   1739  *		anything else	Xfer had an error, return value is Unix
   1740  *				errno.
   1741  *
   1742  *	If the return value is anything but ERESTART:
   1743  *
   1744  *		- If XS_CTL_ASYNC is set, `xs' has been freed back to
   1745  *		  the pool.
   1746  *		- If there is a buf associated with the xfer,
   1747  *		  it has been biodone()'d.
   1748  */
   1749 static int
   1750 scsipi_complete(struct scsipi_xfer *xs)
   1751 {
   1752 	struct scsipi_periph *periph = xs->xs_periph;
   1753 	struct scsipi_channel *chan = periph->periph_channel;
   1754 	int error;
   1755 
   1756 	SDT_PROBE1(scsi, base, xfer, complete,  xs);
   1757 
   1758 #ifdef DIAGNOSTIC
   1759 	if ((xs->xs_control & XS_CTL_ASYNC) != 0 && xs->bp == NULL)
   1760 		panic("scsipi_complete: XS_CTL_ASYNC but no buf");
   1761 #endif
   1762 	/*
   1763 	 * If command terminated with a CHECK CONDITION, we need to issue a
   1764 	 * REQUEST_SENSE command. Once the REQUEST_SENSE has been processed
   1765 	 * we'll have the real status.
   1766 	 * Must be processed with channel lock held to avoid missing
   1767 	 * a SCSI bus reset for this command.
   1768 	 */
   1769 	mutex_enter(chan_mtx(chan));
   1770 	if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
   1771 		/* request sense for a request sense ? */
   1772 		if (xs->xs_control & XS_CTL_REQSENSE) {
   1773 			scsipi_printaddr(periph);
   1774 			printf("request sense for a request sense ?\n");
   1775 			/* XXX maybe we should reset the device ? */
   1776 			/* we've been frozen because xs->error != XS_NOERROR */
   1777 			scsipi_periph_thaw_locked(periph, 1);
   1778 			mutex_exit(chan_mtx(chan));
   1779 			if (xs->resid < xs->datalen) {
   1780 				printf("we read %d bytes of sense anyway:\n",
   1781 				    xs->datalen - xs->resid);
   1782 				scsipi_print_sense_data((void *)xs->data, 0);
   1783 			}
   1784 			return EINVAL;
   1785 		}
   1786 		mutex_exit(chan_mtx(chan)); // XXX allows other commands to queue or run
   1787 		scsipi_request_sense(xs);
   1788 	} else
   1789 		mutex_exit(chan_mtx(chan));
   1790 
   1791 	/*
   1792 	 * If it's a user level request, bypass all usual completion
   1793 	 * processing, let the user work it out..
   1794 	 */
   1795 	if ((xs->xs_control & XS_CTL_USERCMD) != 0) {
   1796 		SC_DEBUG(periph, SCSIPI_DB3, ("calling user done()\n"));
   1797 		mutex_enter(chan_mtx(chan));
   1798 		if (xs->error != XS_NOERROR)
   1799 			scsipi_periph_thaw_locked(periph, 1);
   1800 		mutex_exit(chan_mtx(chan));
   1801 		scsipi_user_done(xs);
   1802 		SC_DEBUG(periph, SCSIPI_DB3, ("returned from user done()\n "));
   1803 		return 0;
   1804 	}
   1805 
   1806 	switch (xs->error) {
   1807 	case XS_NOERROR:
   1808 		error = 0;
   1809 		break;
   1810 
   1811 	case XS_SENSE:
   1812 	case XS_SHORTSENSE:
   1813 		error = (*chan->chan_bustype->bustype_interpret_sense)(xs);
   1814 		break;
   1815 
   1816 	case XS_RESOURCE_SHORTAGE:
   1817 		/*
   1818 		 * XXX Should freeze channel's queue.
   1819 		 */
   1820 		scsipi_printaddr(periph);
   1821 		printf("adapter resource shortage\n");
   1822 		/* FALLTHROUGH */
   1823 
   1824 	case XS_BUSY:
   1825 		if (xs->error == XS_BUSY && xs->status == SCSI_QUEUE_FULL) {
   1826 			struct scsipi_max_openings mo;
   1827 
   1828 			/*
   1829 			 * We set the openings to active - 1, assuming that
   1830 			 * the command that got us here is the first one that
   1831 			 * can't fit into the device's queue.  If that's not
   1832 			 * the case, I guess we'll find out soon enough.
   1833 			 */
   1834 			mo.mo_target = periph->periph_target;
   1835 			mo.mo_lun = periph->periph_lun;
   1836 			if (periph->periph_active < periph->periph_openings)
   1837 				mo.mo_openings = periph->periph_active - 1;
   1838 			else
   1839 				mo.mo_openings = periph->periph_openings - 1;
   1840 #ifdef DIAGNOSTIC
   1841 			if (mo.mo_openings < 0) {
   1842 				scsipi_printaddr(periph);
   1843 				printf("QUEUE FULL resulted in < 0 openings\n");
   1844 				panic("scsipi_done");
   1845 			}
   1846 #endif
   1847 			if (mo.mo_openings == 0) {
   1848 				scsipi_printaddr(periph);
   1849 				printf("QUEUE FULL resulted in 0 openings\n");
   1850 				mo.mo_openings = 1;
   1851 			}
   1852 			scsipi_async_event(chan, ASYNC_EVENT_MAX_OPENINGS, &mo);
   1853 			error = ERESTART;
   1854 		} else if (xs->xs_retries != 0) {
   1855 			xs->xs_retries--;
   1856 			/*
   1857 			 * Wait one second, and try again.
   1858 			 */
   1859 			mutex_enter(chan_mtx(chan));
   1860 			if ((xs->xs_control & XS_CTL_POLL) ||
   1861 			    (chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
   1862 				/* XXX: quite extreme */
   1863 				kpause("xsbusy", false, hz, chan_mtx(chan));
   1864 			} else if (!callout_pending(&periph->periph_callout)) {
   1865 				scsipi_periph_freeze_locked(periph, 1);
   1866 				callout_reset(&periph->periph_callout,
   1867 				    hz, scsipi_periph_timed_thaw, periph);
   1868 			}
   1869 			mutex_exit(chan_mtx(chan));
   1870 			error = ERESTART;
   1871 		} else
   1872 			error = EBUSY;
   1873 		break;
   1874 
   1875 	case XS_REQUEUE:
   1876 		error = ERESTART;
   1877 		break;
   1878 
   1879 	case XS_SELTIMEOUT:
   1880 	case XS_TIMEOUT:
   1881 		/*
   1882 		 * If the device hasn't gone away, honor retry counts.
   1883 		 *
   1884 		 * Note that if we're in the middle of probing it,
   1885 		 * it won't be found because it isn't here yet so
   1886 		 * we won't honor the retry count in that case.
   1887 		 */
   1888 		if (scsipi_lookup_periph(chan, periph->periph_target,
   1889 		    periph->periph_lun) && xs->xs_retries != 0) {
   1890 			xs->xs_retries--;
   1891 			error = ERESTART;
   1892 		} else
   1893 			error = EIO;
   1894 		break;
   1895 
   1896 	case XS_RESET:
   1897 		if (xs->xs_control & XS_CTL_REQSENSE) {
   1898 			/*
   1899 			 * request sense interrupted by reset: signal it
   1900 			 * with EINTR return code.
   1901 			 */
   1902 			error = EINTR;
   1903 		} else {
   1904 			if (xs->xs_retries != 0) {
   1905 				xs->xs_retries--;
   1906 				error = ERESTART;
   1907 			} else
   1908 				error = EIO;
   1909 		}
   1910 		break;
   1911 
   1912 	case XS_DRIVER_STUFFUP:
   1913 		scsipi_printaddr(periph);
   1914 		printf("generic HBA error\n");
   1915 		error = EIO;
   1916 		break;
   1917 	default:
   1918 		scsipi_printaddr(periph);
   1919 		printf("invalid return code from adapter: %d\n", xs->error);
   1920 		error = EIO;
   1921 		break;
   1922 	}
   1923 
   1924 	mutex_enter(chan_mtx(chan));
   1925 	if (error == ERESTART) {
   1926 		SDT_PROBE1(scsi, base, xfer, restart,  xs);
   1927 		/*
   1928 		 * If we get here, the periph has been thawed and frozen
   1929 		 * again if we had to issue recovery commands.  Alternatively,
   1930 		 * it may have been frozen again and in a timed thaw.  In
   1931 		 * any case, we thaw the periph once we re-enqueue the
   1932 		 * command.  Once the periph is fully thawed, it will begin
   1933 		 * operation again.
   1934 		 */
   1935 		xs->error = XS_NOERROR;
   1936 		xs->status = SCSI_OK;
   1937 		xs->xs_status &= ~XS_STS_DONE;
   1938 		xs->xs_requeuecnt++;
   1939 		error = scsipi_enqueue(xs);
   1940 		if (error == 0) {
   1941 			scsipi_periph_thaw_locked(periph, 1);
   1942 			mutex_exit(chan_mtx(chan));
   1943 			return ERESTART;
   1944 		}
   1945 	}
   1946 
   1947 	/*
   1948 	 * scsipi_done() freezes the queue if not XS_NOERROR.
   1949 	 * Thaw it here.
   1950 	 */
   1951 	if (xs->error != XS_NOERROR)
   1952 		scsipi_periph_thaw_locked(periph, 1);
   1953 	mutex_exit(chan_mtx(chan));
   1954 
   1955 	if (periph->periph_switch->psw_done)
   1956 		periph->periph_switch->psw_done(xs, error);
   1957 
   1958 	mutex_enter(chan_mtx(chan));
   1959 	if (xs->xs_control & XS_CTL_ASYNC)
   1960 		scsipi_put_xs(xs);
   1961 	mutex_exit(chan_mtx(chan));
   1962 
   1963 	return error;
   1964 }
   1965 
   1966 /*
   1967  * Issue a request sense for the given scsipi_xfer. Called when the xfer
   1968  * returns with a CHECK_CONDITION status. Must be called in valid thread
   1969  * context.
   1970  */
   1971 
   1972 static void
   1973 scsipi_request_sense(struct scsipi_xfer *xs)
   1974 {
   1975 	struct scsipi_periph *periph = xs->xs_periph;
   1976 	int flags, error;
   1977 	struct scsi_request_sense cmd;
   1978 
   1979 	periph->periph_flags |= PERIPH_SENSE;
   1980 
   1981 	/* if command was polling, request sense will too */
   1982 	flags = xs->xs_control & XS_CTL_POLL;
   1983 	/* Polling commands can't sleep */
   1984 	if (flags)
   1985 		flags |= XS_CTL_NOSLEEP;
   1986 
   1987 	flags |= XS_CTL_REQSENSE | XS_CTL_URGENT | XS_CTL_DATA_IN |
   1988 	    XS_CTL_THAW_PERIPH | XS_CTL_FREEZE_PERIPH;
   1989 
   1990 	memset(&cmd, 0, sizeof(cmd));
   1991 	cmd.opcode = SCSI_REQUEST_SENSE;
   1992 	cmd.length = sizeof(struct scsi_sense_data);
   1993 
   1994 	error = scsipi_command(periph, (void *)&cmd, sizeof(cmd),
   1995 	    (void *)&xs->sense.scsi_sense, sizeof(struct scsi_sense_data),
   1996 	    0, 1000, NULL, flags);
   1997 	periph->periph_flags &= ~PERIPH_SENSE;
   1998 	periph->periph_xscheck = NULL;
   1999 	switch (error) {
   2000 	case 0:
   2001 		/* we have a valid sense */
   2002 		xs->error = XS_SENSE;
   2003 		return;
   2004 	case EINTR:
   2005 		/* REQUEST_SENSE interrupted by bus reset. */
   2006 		xs->error = XS_RESET;
   2007 		return;
   2008 	case EIO:
   2009 		 /* request sense coudn't be performed */
   2010 		/*
   2011 		 * XXX this isn't quite right but we don't have anything
   2012 		 * better for now
   2013 		 */
   2014 		xs->error = XS_DRIVER_STUFFUP;
   2015 		return;
   2016 	default:
   2017 		 /* Notify that request sense failed. */
   2018 		xs->error = XS_DRIVER_STUFFUP;
   2019 		scsipi_printaddr(periph);
   2020 		printf("request sense failed with error %d\n", error);
   2021 		return;
   2022 	}
   2023 }
   2024 
   2025 /*
   2026  * scsipi_enqueue:
   2027  *
   2028  *	Enqueue an xfer on a channel.
   2029  */
   2030 static int
   2031 scsipi_enqueue(struct scsipi_xfer *xs)
   2032 {
   2033 	struct scsipi_channel *chan = xs->xs_periph->periph_channel;
   2034 	struct scsipi_xfer *qxs;
   2035 
   2036 	SDT_PROBE1(scsi, base, xfer, enqueue,  xs);
   2037 
   2038 	/*
   2039 	 * If the xfer is to be polled, and there are already jobs on
   2040 	 * the queue, we can't proceed.
   2041 	 */
   2042 	KASSERT(mutex_owned(chan_mtx(chan)));
   2043 	if ((xs->xs_control & XS_CTL_POLL) != 0 &&
   2044 	    TAILQ_FIRST(&chan->chan_queue) != NULL) {
   2045 		xs->error = XS_DRIVER_STUFFUP;
   2046 		return EAGAIN;
   2047 	}
   2048 
   2049 	/*
   2050 	 * If we have an URGENT xfer, it's an error recovery command
   2051 	 * and it should just go on the head of the channel's queue.
   2052 	 */
   2053 	if (xs->xs_control & XS_CTL_URGENT) {
   2054 		TAILQ_INSERT_HEAD(&chan->chan_queue, xs, channel_q);
   2055 		goto out;
   2056 	}
   2057 
   2058 	/*
   2059 	 * If this xfer has already been on the queue before, we
   2060 	 * need to reinsert it in the correct order.  That order is:
   2061 	 *
   2062 	 *	Immediately before the first xfer for this periph
   2063 	 *	with a requeuecnt less than xs->xs_requeuecnt.
   2064 	 *
   2065 	 * Failing that, at the end of the queue.  (We'll end up
   2066 	 * there naturally.)
   2067 	 */
   2068 	if (xs->xs_requeuecnt != 0) {
   2069 		for (qxs = TAILQ_FIRST(&chan->chan_queue); qxs != NULL;
   2070 		     qxs = TAILQ_NEXT(qxs, channel_q)) {
   2071 			if (qxs->xs_periph == xs->xs_periph &&
   2072 			    qxs->xs_requeuecnt < xs->xs_requeuecnt)
   2073 				break;
   2074 		}
   2075 		if (qxs != NULL) {
   2076 			TAILQ_INSERT_AFTER(&chan->chan_queue, qxs, xs,
   2077 			    channel_q);
   2078 			goto out;
   2079 		}
   2080 	}
   2081 	TAILQ_INSERT_TAIL(&chan->chan_queue, xs, channel_q);
   2082  out:
   2083 	if (xs->xs_control & XS_CTL_THAW_PERIPH)
   2084 		scsipi_periph_thaw_locked(xs->xs_periph, 1);
   2085 	return 0;
   2086 }
   2087 
   2088 /*
   2089  * scsipi_run_queue:
   2090  *
   2091  *	Start as many xfers as possible running on the channel.
   2092  */
   2093 static void
   2094 scsipi_run_queue(struct scsipi_channel *chan)
   2095 {
   2096 	struct scsipi_xfer *xs;
   2097 	struct scsipi_periph *periph;
   2098 
   2099 	SDT_PROBE1(scsi, base, queue, batch__start,  chan);
   2100 	for (;;) {
   2101 		mutex_enter(chan_mtx(chan));
   2102 
   2103 		/*
   2104 		 * If the channel is frozen, we can't do any work right
   2105 		 * now.
   2106 		 */
   2107 		if (chan->chan_qfreeze != 0) {
   2108 			mutex_exit(chan_mtx(chan));
   2109 			break;
   2110 		}
   2111 
   2112 		/*
   2113 		 * Look for work to do, and make sure we can do it.
   2114 		 */
   2115 		for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL;
   2116 		     xs = TAILQ_NEXT(xs, channel_q)) {
   2117 			periph = xs->xs_periph;
   2118 
   2119 			if ((periph->periph_sent >= periph->periph_openings) ||
   2120 			    periph->periph_qfreeze != 0 ||
   2121 			    (periph->periph_flags & PERIPH_UNTAG) != 0)
   2122 				continue;
   2123 
   2124 			if ((periph->periph_flags &
   2125 			    (PERIPH_RECOVERING | PERIPH_SENSE)) != 0 &&
   2126 			    (xs->xs_control & XS_CTL_URGENT) == 0)
   2127 				continue;
   2128 
   2129 			/*
   2130 			 * We can issue this xfer!
   2131 			 */
   2132 			goto got_one;
   2133 		}
   2134 
   2135 		/*
   2136 		 * Can't find any work to do right now.
   2137 		 */
   2138 		mutex_exit(chan_mtx(chan));
   2139 		break;
   2140 
   2141  got_one:
   2142 		/*
   2143 		 * Have an xfer to run.  Allocate a resource from
   2144 		 * the adapter to run it.  If we can't allocate that
   2145 		 * resource, we don't dequeue the xfer.
   2146 		 */
   2147 		if (scsipi_get_resource(chan) == 0) {
   2148 			/*
   2149 			 * Adapter is out of resources.  If the adapter
   2150 			 * supports it, attempt to grow them.
   2151 			 */
   2152 			if (scsipi_grow_resources(chan) == 0) {
   2153 				/*
   2154 				 * Wasn't able to grow resources,
   2155 				 * nothing more we can do.
   2156 				 */
   2157 				if (xs->xs_control & XS_CTL_POLL) {
   2158 					scsipi_printaddr(xs->xs_periph);
   2159 					printf("polling command but no "
   2160 					    "adapter resources");
   2161 					/* We'll panic shortly... */
   2162 				}
   2163 				mutex_exit(chan_mtx(chan));
   2164 
   2165 				/*
   2166 				 * XXX: We should be able to note that
   2167 				 * XXX: that resources are needed here!
   2168 				 */
   2169 				break;
   2170 			}
   2171 			/*
   2172 			 * scsipi_grow_resources() allocated the resource
   2173 			 * for us.
   2174 			 */
   2175 		}
   2176 
   2177 		/*
   2178 		 * We have a resource to run this xfer, do it!
   2179 		 */
   2180 		TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
   2181 
   2182 		/*
   2183 		 * If the command is to be tagged, allocate a tag ID
   2184 		 * for it.
   2185 		 */
   2186 		if (XS_CTL_TAGTYPE(xs) != 0)
   2187 			scsipi_get_tag(xs);
   2188 		else
   2189 			periph->periph_flags |= PERIPH_UNTAG;
   2190 		periph->periph_sent++;
   2191 		mutex_exit(chan_mtx(chan));
   2192 
   2193 		SDT_PROBE2(scsi, base, queue, run,  chan, xs);
   2194 		scsipi_adapter_request(chan, ADAPTER_REQ_RUN_XFER, xs);
   2195 	}
   2196 	SDT_PROBE1(scsi, base, queue, batch__done,  chan);
   2197 }
   2198 
   2199 /*
   2200  * scsipi_execute_xs:
   2201  *
   2202  *	Begin execution of an xfer, waiting for it to complete, if necessary.
   2203  */
   2204 int
   2205 scsipi_execute_xs(struct scsipi_xfer *xs)
   2206 {
   2207 	struct scsipi_periph *periph = xs->xs_periph;
   2208 	struct scsipi_channel *chan = periph->periph_channel;
   2209 	int oasync, async, poll, error;
   2210 
   2211 	KASSERT(!cold);
   2212 
   2213 	scsipi_update_timeouts(xs);
   2214 
   2215 	(chan->chan_bustype->bustype_cmd)(xs);
   2216 
   2217 	xs->xs_status &= ~XS_STS_DONE;
   2218 	xs->error = XS_NOERROR;
   2219 	xs->resid = xs->datalen;
   2220 	xs->status = SCSI_OK;
   2221 	SDT_PROBE1(scsi, base, xfer, execute,  xs);
   2222 
   2223 #ifdef SCSIPI_DEBUG
   2224 	if (xs->xs_periph->periph_dbflags & SCSIPI_DB3) {
   2225 		printf("scsipi_execute_xs: ");
   2226 		show_scsipi_xs(xs);
   2227 		printf("\n");
   2228 	}
   2229 #endif
   2230 
   2231 	/*
   2232 	 * Deal with command tagging:
   2233 	 *
   2234 	 *	- If the device's current operating mode doesn't
   2235 	 *	  include tagged queueing, clear the tag mask.
   2236 	 *
   2237 	 *	- If the device's current operating mode *does*
   2238 	 *	  include tagged queueing, set the tag_type in
   2239 	 *	  the xfer to the appropriate byte for the tag
   2240 	 *	  message.
   2241 	 */
   2242 	if ((PERIPH_XFER_MODE(periph) & PERIPH_CAP_TQING) == 0 ||
   2243 		(xs->xs_control & XS_CTL_REQSENSE)) {
   2244 		xs->xs_control &= ~XS_CTL_TAGMASK;
   2245 		xs->xs_tag_type = 0;
   2246 	} else {
   2247 		/*
   2248 		 * If the request doesn't specify a tag, give Head
   2249 		 * tags to URGENT operations and Simple tags to
   2250 		 * everything else.
   2251 		 */
   2252 		if (XS_CTL_TAGTYPE(xs) == 0) {
   2253 			if (xs->xs_control & XS_CTL_URGENT)
   2254 				xs->xs_control |= XS_CTL_HEAD_TAG;
   2255 			else
   2256 				xs->xs_control |= XS_CTL_SIMPLE_TAG;
   2257 		}
   2258 
   2259 		switch (XS_CTL_TAGTYPE(xs)) {
   2260 		case XS_CTL_ORDERED_TAG:
   2261 			xs->xs_tag_type = MSG_ORDERED_Q_TAG;
   2262 			break;
   2263 
   2264 		case XS_CTL_SIMPLE_TAG:
   2265 			xs->xs_tag_type = MSG_SIMPLE_Q_TAG;
   2266 			break;
   2267 
   2268 		case XS_CTL_HEAD_TAG:
   2269 			xs->xs_tag_type = MSG_HEAD_OF_Q_TAG;
   2270 			break;
   2271 
   2272 		default:
   2273 			scsipi_printaddr(periph);
   2274 			printf("invalid tag mask 0x%08x\n",
   2275 			    XS_CTL_TAGTYPE(xs));
   2276 			panic("scsipi_execute_xs");
   2277 		}
   2278 	}
   2279 
   2280 	/* If the adaptor wants us to poll, poll. */
   2281 	if (chan->chan_adapter->adapt_flags & SCSIPI_ADAPT_POLL_ONLY)
   2282 		xs->xs_control |= XS_CTL_POLL;
   2283 
   2284 	/*
   2285 	 * If we don't yet have a completion thread, or we are to poll for
   2286 	 * completion, clear the ASYNC flag.
   2287 	 */
   2288 	oasync =  (xs->xs_control & XS_CTL_ASYNC);
   2289 	if (chan->chan_thread == NULL || (xs->xs_control & XS_CTL_POLL) != 0)
   2290 		xs->xs_control &= ~XS_CTL_ASYNC;
   2291 
   2292 	async = (xs->xs_control & XS_CTL_ASYNC);
   2293 	poll = (xs->xs_control & XS_CTL_POLL);
   2294 
   2295 #ifdef DIAGNOSTIC
   2296 	if (oasync != 0 && xs->bp == NULL)
   2297 		panic("scsipi_execute_xs: XS_CTL_ASYNC but no buf");
   2298 #endif
   2299 
   2300 	/*
   2301 	 * Enqueue the transfer.  If we're not polling for completion, this
   2302 	 * should ALWAYS return `no error'.
   2303 	 */
   2304 	error = scsipi_enqueue(xs);
   2305 	if (error) {
   2306 		if (poll == 0) {
   2307 			scsipi_printaddr(periph);
   2308 			printf("not polling, but enqueue failed with %d\n",
   2309 			    error);
   2310 			panic("scsipi_execute_xs");
   2311 		}
   2312 
   2313 		scsipi_printaddr(periph);
   2314 		printf("should have flushed queue?\n");
   2315 		goto free_xs;
   2316 	}
   2317 
   2318 	mutex_exit(chan_mtx(chan));
   2319  restarted:
   2320 	scsipi_run_queue(chan);
   2321 	mutex_enter(chan_mtx(chan));
   2322 
   2323 	/*
   2324 	 * The xfer is enqueued, and possibly running.  If it's to be
   2325 	 * completed asynchronously, just return now.
   2326 	 */
   2327 	if (async)
   2328 		return 0;
   2329 
   2330 	/*
   2331 	 * Not an asynchronous command; wait for it to complete.
   2332 	 */
   2333 	while ((xs->xs_status & XS_STS_DONE) == 0) {
   2334 		if (poll) {
   2335 			scsipi_printaddr(periph);
   2336 			printf("polling command not done\n");
   2337 			panic("scsipi_execute_xs");
   2338 		}
   2339 		cv_wait(xs_cv(xs), chan_mtx(chan));
   2340 	}
   2341 
   2342 	/*
   2343 	 * Command is complete.  scsipi_done() has awakened us to perform
   2344 	 * the error handling.
   2345 	 */
   2346 	mutex_exit(chan_mtx(chan));
   2347 	error = scsipi_complete(xs);
   2348 	if (error == ERESTART)
   2349 		goto restarted;
   2350 
   2351 	/*
   2352 	 * If it was meant to run async and we cleared aync ourselve,
   2353 	 * don't return an error here. It has already been handled
   2354 	 */
   2355 	if (oasync)
   2356 		error = 0;
   2357 	/*
   2358 	 * Command completed successfully or fatal error occurred.  Fall
   2359 	 * into....
   2360 	 */
   2361 	mutex_enter(chan_mtx(chan));
   2362  free_xs:
   2363 	scsipi_put_xs(xs);
   2364 	mutex_exit(chan_mtx(chan));
   2365 
   2366 	/*
   2367 	 * Kick the queue, keep it running in case it stopped for some
   2368 	 * reason.
   2369 	 */
   2370 	scsipi_run_queue(chan);
   2371 
   2372 	mutex_enter(chan_mtx(chan));
   2373 	return error;
   2374 }
   2375 
   2376 /*
   2377  * scsipi_completion_thread:
   2378  *
   2379  *	This is the completion thread.  We wait for errors on
   2380  *	asynchronous xfers, and perform the error handling
   2381  *	function, restarting the command, if necessary.
   2382  */
   2383 static void
   2384 scsipi_completion_thread(void *arg)
   2385 {
   2386 	struct scsipi_channel *chan = arg;
   2387 	struct scsipi_xfer *xs;
   2388 
   2389 	if (chan->chan_init_cb)
   2390 		(*chan->chan_init_cb)(chan, chan->chan_init_cb_arg);
   2391 
   2392 	mutex_enter(chan_mtx(chan));
   2393 	chan->chan_flags |= SCSIPI_CHAN_TACTIVE;
   2394 	for (;;) {
   2395 		xs = TAILQ_FIRST(&chan->chan_complete);
   2396 		if (xs == NULL && chan->chan_tflags == 0) {
   2397 			/* nothing to do; wait */
   2398 			cv_wait(chan_cv_complete(chan), chan_mtx(chan));
   2399 			continue;
   2400 		}
   2401 		if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) {
   2402 			/* call chan_callback from thread context */
   2403 			chan->chan_tflags &= ~SCSIPI_CHANT_CALLBACK;
   2404 			chan->chan_callback(chan, chan->chan_callback_arg);
   2405 			continue;
   2406 		}
   2407 		if (chan->chan_tflags & SCSIPI_CHANT_GROWRES) {
   2408 			/* attempt to get more openings for this channel */
   2409 			chan->chan_tflags &= ~SCSIPI_CHANT_GROWRES;
   2410 			mutex_exit(chan_mtx(chan));
   2411 			scsipi_adapter_request(chan,
   2412 			    ADAPTER_REQ_GROW_RESOURCES, NULL);
   2413 			scsipi_channel_thaw(chan, 1);
   2414 			if (chan->chan_tflags & SCSIPI_CHANT_GROWRES)
   2415 				kpause("scsizzz", FALSE, hz/10, NULL);
   2416 			mutex_enter(chan_mtx(chan));
   2417 			continue;
   2418 		}
   2419 		if (chan->chan_tflags & SCSIPI_CHANT_KICK) {
   2420 			/* explicitly run the queues for this channel */
   2421 			chan->chan_tflags &= ~SCSIPI_CHANT_KICK;
   2422 			mutex_exit(chan_mtx(chan));
   2423 			scsipi_run_queue(chan);
   2424 			mutex_enter(chan_mtx(chan));
   2425 			continue;
   2426 		}
   2427 		if (chan->chan_tflags & SCSIPI_CHANT_SHUTDOWN) {
   2428 			break;
   2429 		}
   2430 		if (xs) {
   2431 			TAILQ_REMOVE(&chan->chan_complete, xs, channel_q);
   2432 			mutex_exit(chan_mtx(chan));
   2433 
   2434 			/*
   2435 			 * Have an xfer with an error; process it.
   2436 			 */
   2437 			(void) scsipi_complete(xs);
   2438 
   2439 			/*
   2440 			 * Kick the queue; keep it running if it was stopped
   2441 			 * for some reason.
   2442 			 */
   2443 			scsipi_run_queue(chan);
   2444 			mutex_enter(chan_mtx(chan));
   2445 		}
   2446 	}
   2447 
   2448 	chan->chan_thread = NULL;
   2449 
   2450 	/* In case parent is waiting for us to exit. */
   2451 	cv_broadcast(chan_cv_thread(chan));
   2452 	mutex_exit(chan_mtx(chan));
   2453 
   2454 	kthread_exit(0);
   2455 }
   2456 /*
   2457  * scsipi_thread_call_callback:
   2458  *
   2459  * 	request to call a callback from the completion thread
   2460  */
   2461 int
   2462 scsipi_thread_call_callback(struct scsipi_channel *chan,
   2463     void (*callback)(struct scsipi_channel *, void *), void *arg)
   2464 {
   2465 
   2466 	mutex_enter(chan_mtx(chan));
   2467 	if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
   2468 		/* kernel thread doesn't exist yet */
   2469 		mutex_exit(chan_mtx(chan));
   2470 		return ESRCH;
   2471 	}
   2472 	if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) {
   2473 		mutex_exit(chan_mtx(chan));
   2474 		return EBUSY;
   2475 	}
   2476 	scsipi_channel_freeze(chan, 1);
   2477 	chan->chan_callback = callback;
   2478 	chan->chan_callback_arg = arg;
   2479 	chan->chan_tflags |= SCSIPI_CHANT_CALLBACK;
   2480 	cv_broadcast(chan_cv_complete(chan));
   2481 	mutex_exit(chan_mtx(chan));
   2482 	return 0;
   2483 }
   2484 
   2485 /*
   2486  * scsipi_async_event:
   2487  *
   2488  *	Handle an asynchronous event from an adapter.
   2489  */
   2490 void
   2491 scsipi_async_event(struct scsipi_channel *chan, scsipi_async_event_t event,
   2492     void *arg)
   2493 {
   2494 	bool lock = chan_running(chan) > 0;
   2495 
   2496 	if (lock)
   2497 		mutex_enter(chan_mtx(chan));
   2498 	switch (event) {
   2499 	case ASYNC_EVENT_MAX_OPENINGS:
   2500 		scsipi_async_event_max_openings(chan,
   2501 		    (struct scsipi_max_openings *)arg);
   2502 		break;
   2503 
   2504 	case ASYNC_EVENT_XFER_MODE:
   2505 		if (chan->chan_bustype->bustype_async_event_xfer_mode) {
   2506 			chan->chan_bustype->bustype_async_event_xfer_mode(
   2507 			    chan, arg);
   2508 		}
   2509 		break;
   2510 	case ASYNC_EVENT_RESET:
   2511 		scsipi_async_event_channel_reset(chan);
   2512 		break;
   2513 	}
   2514 	if (lock)
   2515 		mutex_exit(chan_mtx(chan));
   2516 }
   2517 
   2518 /*
   2519  * scsipi_async_event_max_openings:
   2520  *
   2521  *	Update the maximum number of outstanding commands a
   2522  *	device may have.
   2523  */
   2524 static void
   2525 scsipi_async_event_max_openings(struct scsipi_channel *chan,
   2526     struct scsipi_max_openings *mo)
   2527 {
   2528 	struct scsipi_periph *periph;
   2529 	int minlun, maxlun;
   2530 
   2531 	if (mo->mo_lun == -1) {
   2532 		/*
   2533 		 * Wildcarded; apply it to all LUNs.
   2534 		 */
   2535 		minlun = 0;
   2536 		maxlun = chan->chan_nluns - 1;
   2537 	} else
   2538 		minlun = maxlun = mo->mo_lun;
   2539 
   2540 	/* XXX This could really suck with a large LUN space. */
   2541 	for (; minlun <= maxlun; minlun++) {
   2542 		periph = scsipi_lookup_periph_locked(chan, mo->mo_target, minlun);
   2543 		if (periph == NULL)
   2544 			continue;
   2545 
   2546 		if (mo->mo_openings < periph->periph_openings)
   2547 			periph->periph_openings = mo->mo_openings;
   2548 		else if (mo->mo_openings > periph->periph_openings &&
   2549 		    (periph->periph_flags & PERIPH_GROW_OPENINGS) != 0)
   2550 			periph->periph_openings = mo->mo_openings;
   2551 	}
   2552 }
   2553 
   2554 /*
   2555  * scsipi_set_xfer_mode:
   2556  *
   2557  *	Set the xfer mode for the specified I_T Nexus.
   2558  */
   2559 void
   2560 scsipi_set_xfer_mode(struct scsipi_channel *chan, int target, int immed)
   2561 {
   2562 	struct scsipi_xfer_mode xm;
   2563 	struct scsipi_periph *itperiph;
   2564 	int lun;
   2565 
   2566 	/*
   2567 	 * Go to the minimal xfer mode.
   2568 	 */
   2569 	xm.xm_target = target;
   2570 	xm.xm_mode = 0;
   2571 	xm.xm_period = 0;			/* ignored */
   2572 	xm.xm_offset = 0;			/* ignored */
   2573 
   2574 	/*
   2575 	 * Find the first LUN we know about on this I_T Nexus.
   2576 	 */
   2577 	for (itperiph = NULL, lun = 0; lun < chan->chan_nluns; lun++) {
   2578 		itperiph = scsipi_lookup_periph(chan, target, lun);
   2579 		if (itperiph != NULL)
   2580 			break;
   2581 	}
   2582 	if (itperiph != NULL) {
   2583 		xm.xm_mode = itperiph->periph_cap;
   2584 		/*
   2585 		 * Now issue the request to the adapter.
   2586 		 */
   2587 		scsipi_adapter_request(chan, ADAPTER_REQ_SET_XFER_MODE, &xm);
   2588 		/*
   2589 		 * If we want this to happen immediately, issue a dummy
   2590 		 * command, since most adapters can't really negotiate unless
   2591 		 * they're executing a job.
   2592 		 */
   2593 		if (immed != 0) {
   2594 			(void) scsipi_test_unit_ready(itperiph,
   2595 			    XS_CTL_DISCOVERY | XS_CTL_IGNORE_ILLEGAL_REQUEST |
   2596 			    XS_CTL_IGNORE_NOT_READY |
   2597 			    XS_CTL_IGNORE_MEDIA_CHANGE);
   2598 		}
   2599 	}
   2600 }
   2601 
   2602 /*
   2603  * scsipi_channel_reset:
   2604  *
   2605  *	handle scsi bus reset
   2606  * called with channel lock held
   2607  */
   2608 static void
   2609 scsipi_async_event_channel_reset(struct scsipi_channel *chan)
   2610 {
   2611 	struct scsipi_xfer *xs, *xs_next;
   2612 	struct scsipi_periph *periph;
   2613 	int target, lun;
   2614 
   2615 	/*
   2616 	 * Channel has been reset. Also mark as reset pending REQUEST_SENSE
   2617 	 * commands; as the sense is not available any more.
   2618 	 * can't call scsipi_done() from here, as the command has not been
   2619 	 * sent to the adapter yet (this would corrupt accounting).
   2620 	 */
   2621 
   2622 	for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL; xs = xs_next) {
   2623 		xs_next = TAILQ_NEXT(xs, channel_q);
   2624 		if (xs->xs_control & XS_CTL_REQSENSE) {
   2625 			TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
   2626 			xs->error = XS_RESET;
   2627 			if ((xs->xs_control & XS_CTL_ASYNC) != 0)
   2628 				TAILQ_INSERT_TAIL(&chan->chan_complete, xs,
   2629 				    channel_q);
   2630 		}
   2631 	}
   2632 	cv_broadcast(chan_cv_complete(chan));
   2633 	/* Catch xs with pending sense which may not have a REQSENSE xs yet */
   2634 	for (target = 0; target < chan->chan_ntargets; target++) {
   2635 		if (target == chan->chan_id)
   2636 			continue;
   2637 		for (lun = 0; lun <  chan->chan_nluns; lun++) {
   2638 			periph = scsipi_lookup_periph_locked(chan, target, lun);
   2639 			if (periph) {
   2640 				xs = periph->periph_xscheck;
   2641 				if (xs)
   2642 					xs->error = XS_RESET;
   2643 			}
   2644 		}
   2645 	}
   2646 }
   2647 
   2648 /*
   2649  * scsipi_target_detach:
   2650  *
   2651  *	detach all periph associated with a I_T
   2652  * 	must be called from valid thread context
   2653  */
   2654 int
   2655 scsipi_target_detach(struct scsipi_channel *chan, int target, int lun,
   2656     int flags)
   2657 {
   2658 	struct scsipi_periph *periph;
   2659 	device_t tdev;
   2660 	int ctarget, mintarget, maxtarget;
   2661 	int clun, minlun, maxlun;
   2662 	int error = 0;
   2663 
   2664 	if (target == -1) {
   2665 		mintarget = 0;
   2666 		maxtarget = chan->chan_ntargets;
   2667 	} else {
   2668 		if (target == chan->chan_id)
   2669 			return EINVAL;
   2670 		if (target < 0 || target >= chan->chan_ntargets)
   2671 			return EINVAL;
   2672 		mintarget = target;
   2673 		maxtarget = target + 1;
   2674 	}
   2675 
   2676 	if (lun == -1) {
   2677 		minlun = 0;
   2678 		maxlun = chan->chan_nluns;
   2679 	} else {
   2680 		if (lun < 0 || lun >= chan->chan_nluns)
   2681 			return EINVAL;
   2682 		minlun = lun;
   2683 		maxlun = lun + 1;
   2684 	}
   2685 
   2686 	/* for config_detach */
   2687 	KERNEL_LOCK(1, curlwp);
   2688 
   2689 	mutex_enter(chan_mtx(chan));
   2690 	for (ctarget = mintarget; ctarget < maxtarget; ctarget++) {
   2691 		if (ctarget == chan->chan_id)
   2692 			continue;
   2693 
   2694 		for (clun = minlun; clun < maxlun; clun++) {
   2695 			periph = scsipi_lookup_periph_locked(chan, ctarget, clun);
   2696 			if (periph == NULL)
   2697 				continue;
   2698 			tdev = periph->periph_dev;
   2699 			mutex_exit(chan_mtx(chan));
   2700 			error = config_detach(tdev, flags);
   2701 			if (error)
   2702 				goto out;
   2703 			mutex_enter(chan_mtx(chan));
   2704 			KASSERT(scsipi_lookup_periph_locked(chan, ctarget, clun) == NULL);
   2705 		}
   2706 	}
   2707 	mutex_exit(chan_mtx(chan));
   2708 
   2709 out:
   2710 	KERNEL_UNLOCK_ONE(curlwp);
   2711 
   2712 	return error;
   2713 }
   2714 
   2715 /*
   2716  * scsipi_adapter_addref:
   2717  *
   2718  *	Add a reference to the adapter pointed to by the provided
   2719  *	link, enabling the adapter if necessary.
   2720  */
   2721 int
   2722 scsipi_adapter_addref(struct scsipi_adapter *adapt)
   2723 {
   2724 	int error = 0;
   2725 
   2726 	if (atomic_inc_uint_nv(&adapt->adapt_refcnt) == 1
   2727 	    && adapt->adapt_enable != NULL) {
   2728 		scsipi_adapter_lock(adapt);
   2729 		error = scsipi_adapter_enable(adapt, 1);
   2730 		scsipi_adapter_unlock(adapt);
   2731 		if (error)
   2732 			atomic_dec_uint(&adapt->adapt_refcnt);
   2733 	}
   2734 	return error;
   2735 }
   2736 
   2737 /*
   2738  * scsipi_adapter_delref:
   2739  *
   2740  *	Delete a reference to the adapter pointed to by the provided
   2741  *	link, disabling the adapter if possible.
   2742  */
   2743 void
   2744 scsipi_adapter_delref(struct scsipi_adapter *adapt)
   2745 {
   2746 
   2747 	if (atomic_dec_uint_nv(&adapt->adapt_refcnt) == 0
   2748 	    && adapt->adapt_enable != NULL) {
   2749 		scsipi_adapter_lock(adapt);
   2750 		(void) scsipi_adapter_enable(adapt, 0);
   2751 		scsipi_adapter_unlock(adapt);
   2752 	}
   2753 }
   2754 
   2755 static struct scsipi_syncparam {
   2756 	int	ss_factor;
   2757 	int	ss_period;	/* ns * 100 */
   2758 } scsipi_syncparams[] = {
   2759 	{ 0x08,		 625 },	/* FAST-160 (Ultra320) */
   2760 	{ 0x09,		1250 },	/* FAST-80 (Ultra160) */
   2761 	{ 0x0a,		2500 },	/* FAST-40 40MHz (Ultra2) */
   2762 	{ 0x0b,		3030 },	/* FAST-40 33MHz (Ultra2) */
   2763 	{ 0x0c,		5000 },	/* FAST-20 (Ultra) */
   2764 };
   2765 static const int scsipi_nsyncparams =
   2766     sizeof(scsipi_syncparams) / sizeof(scsipi_syncparams[0]);
   2767 
   2768 int
   2769 scsipi_sync_period_to_factor(int period /* ns * 100 */)
   2770 {
   2771 	int i;
   2772 
   2773 	for (i = 0; i < scsipi_nsyncparams; i++) {
   2774 		if (period <= scsipi_syncparams[i].ss_period)
   2775 			return scsipi_syncparams[i].ss_factor;
   2776 	}
   2777 
   2778 	return (period / 100) / 4;
   2779 }
   2780 
   2781 int
   2782 scsipi_sync_factor_to_period(int factor)
   2783 {
   2784 	int i;
   2785 
   2786 	for (i = 0; i < scsipi_nsyncparams; i++) {
   2787 		if (factor == scsipi_syncparams[i].ss_factor)
   2788 			return scsipi_syncparams[i].ss_period;
   2789 	}
   2790 
   2791 	return (factor * 4) * 100;
   2792 }
   2793 
   2794 int
   2795 scsipi_sync_factor_to_freq(int factor)
   2796 {
   2797 	int i;
   2798 
   2799 	for (i = 0; i < scsipi_nsyncparams; i++) {
   2800 		if (factor == scsipi_syncparams[i].ss_factor)
   2801 			return 100000000 / scsipi_syncparams[i].ss_period;
   2802 	}
   2803 
   2804 	return 10000000 / ((factor * 4) * 10);
   2805 }
   2806 
   2807 static inline void
   2808 scsipi_adapter_lock(struct scsipi_adapter *adapt)
   2809 {
   2810 
   2811 	if ((adapt->adapt_flags & SCSIPI_ADAPT_MPSAFE) == 0)
   2812 		KERNEL_LOCK(1, NULL);
   2813 }
   2814 
   2815 static inline void
   2816 scsipi_adapter_unlock(struct scsipi_adapter *adapt)
   2817 {
   2818 
   2819 	if ((adapt->adapt_flags & SCSIPI_ADAPT_MPSAFE) == 0)
   2820 		KERNEL_UNLOCK_ONE(NULL);
   2821 }
   2822 
   2823 void
   2824 scsipi_adapter_minphys(struct scsipi_channel *chan, struct buf *bp)
   2825 {
   2826 	struct scsipi_adapter *adapt = chan->chan_adapter;
   2827 
   2828 	scsipi_adapter_lock(adapt);
   2829 	(adapt->adapt_minphys)(bp);
   2830 	scsipi_adapter_unlock(chan->chan_adapter);
   2831 }
   2832 
   2833 void
   2834 scsipi_adapter_request(struct scsipi_channel *chan,
   2835 	scsipi_adapter_req_t req, void *arg)
   2836 
   2837 {
   2838 	struct scsipi_adapter *adapt = chan->chan_adapter;
   2839 
   2840 	scsipi_adapter_lock(adapt);
   2841 	SDT_PROBE3(scsi, base, adapter, request__start,  chan, req, arg);
   2842 	(adapt->adapt_request)(chan, req, arg);
   2843 	SDT_PROBE3(scsi, base, adapter, request__done,  chan, req, arg);
   2844 	scsipi_adapter_unlock(adapt);
   2845 }
   2846 
   2847 int
   2848 scsipi_adapter_ioctl(struct scsipi_channel *chan, u_long cmd,
   2849 	void *data, int flag, struct proc *p)
   2850 {
   2851 	struct scsipi_adapter *adapt = chan->chan_adapter;
   2852 	int error;
   2853 
   2854 	if (adapt->adapt_ioctl == NULL)
   2855 		return ENOTTY;
   2856 
   2857 	scsipi_adapter_lock(adapt);
   2858 	error = (adapt->adapt_ioctl)(chan, cmd, data, flag, p);
   2859 	scsipi_adapter_unlock(adapt);
   2860 	return error;
   2861 }
   2862 
   2863 int
   2864 scsipi_adapter_enable(struct scsipi_adapter *adapt, int enable)
   2865 {
   2866 	int error;
   2867 
   2868 	scsipi_adapter_lock(adapt);
   2869 	error = (adapt->adapt_enable)(adapt->adapt_dev, enable);
   2870 	scsipi_adapter_unlock(adapt);
   2871 	return error;
   2872 }
   2873 
   2874 #ifdef SCSIPI_DEBUG
   2875 /*
   2876  * Given a scsipi_xfer, dump the request, in all its glory
   2877  */
   2878 void
   2879 show_scsipi_xs(struct scsipi_xfer *xs)
   2880 {
   2881 
   2882 	printf("xs(%p): ", xs);
   2883 	printf("xs_control(0x%08x)", xs->xs_control);
   2884 	printf("xs_status(0x%08x)", xs->xs_status);
   2885 	printf("periph(%p)", xs->xs_periph);
   2886 	printf("retr(0x%x)", xs->xs_retries);
   2887 	printf("timo(0x%x)", xs->timeout);
   2888 	printf("cmd(%p)", xs->cmd);
   2889 	printf("len(0x%x)", xs->cmdlen);
   2890 	printf("data(%p)", xs->data);
   2891 	printf("len(0x%x)", xs->datalen);
   2892 	printf("res(0x%x)", xs->resid);
   2893 	printf("err(0x%x)", xs->error);
   2894 	printf("bp(%p)", xs->bp);
   2895 	show_scsipi_cmd(xs);
   2896 }
   2897 
   2898 void
   2899 show_scsipi_cmd(struct scsipi_xfer *xs)
   2900 {
   2901 	u_char *b = (u_char *) xs->cmd;
   2902 	int i = 0;
   2903 
   2904 	scsipi_printaddr(xs->xs_periph);
   2905 	printf(" command: ");
   2906 
   2907 	if ((xs->xs_control & XS_CTL_RESET) == 0) {
   2908 		while (i < xs->cmdlen) {
   2909 			if (i)
   2910 				printf(",");
   2911 			printf("0x%x", b[i++]);
   2912 		}
   2913 		printf("-[%d bytes]\n", xs->datalen);
   2914 		if (xs->datalen)
   2915 			show_mem(xs->data, uimin(64, xs->datalen));
   2916 	} else
   2917 		printf("-RESET-\n");
   2918 }
   2919 
   2920 void
   2921 show_mem(u_char *address, int num)
   2922 {
   2923 	int x;
   2924 
   2925 	printf("------------------------------");
   2926 	for (x = 0; x < num; x++) {
   2927 		if ((x % 16) == 0)
   2928 			printf("\n%03d: ", x);
   2929 		printf("%02x ", *address++);
   2930 	}
   2931 	printf("\n------------------------------\n");
   2932 }
   2933 #endif /* SCSIPI_DEBUG */
   2934