Home | History | Annotate | Line # | Download | only in scsipi
scsipi_base.c revision 1.192
      1 /*	$NetBSD: scsipi_base.c,v 1.192 2024/10/28 14:42:06 nat Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1998, 1999, 2000, 2002, 2003, 2004 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Charles M. Hannum; by Jason R. Thorpe of the Numerical Aerospace
      9  * Simulation Facility, NASA Ames Research Center.
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  *
     20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30  * POSSIBILITY OF SUCH DAMAGE.
     31  */
     32 
     33 #include <sys/cdefs.h>
     34 __KERNEL_RCSID(0, "$NetBSD: scsipi_base.c,v 1.192 2024/10/28 14:42:06 nat Exp $");
     35 
     36 #ifdef _KERNEL_OPT
     37 #include "opt_scsi.h"
     38 #endif
     39 
     40 #include <sys/param.h>
     41 #include <sys/systm.h>
     42 #include <sys/kernel.h>
     43 #include <sys/buf.h>
     44 #include <sys/uio.h>
     45 #include <sys/malloc.h>
     46 #include <sys/pool.h>
     47 #include <sys/errno.h>
     48 #include <sys/device.h>
     49 #include <sys/proc.h>
     50 #include <sys/kthread.h>
     51 #include <sys/hash.h>
     52 #include <sys/atomic.h>
     53 
     54 #include <dev/scsipi/scsi_sdt.h>
     55 #include <dev/scsipi/scsi_spc.h>
     56 #include <dev/scsipi/scsipi_all.h>
     57 #include <dev/scsipi/scsipi_disk.h>
     58 #include <dev/scsipi/scsipiconf.h>
     59 #include <dev/scsipi/scsipi_base.h>
     60 
     61 #include <dev/scsipi/scsi_all.h>
     62 #include <dev/scsipi/scsi_message.h>
     63 
     64 #include <machine/param.h>
     65 
     66 SDT_PROVIDER_DEFINE(scsi);
     67 
     68 SDT_PROBE_DEFINE3(scsi, base, tag, get,
     69     "struct scsipi_xfer *"/*xs*/, "uint8_t"/*tag*/, "uint8_t"/*type*/);
     70 SDT_PROBE_DEFINE3(scsi, base, tag, put,
     71     "struct scsipi_xfer *"/*xs*/, "uint8_t"/*tag*/, "uint8_t"/*type*/);
     72 
     73 SDT_PROBE_DEFINE3(scsi, base, adapter, request__start,
     74     "struct scsipi_channel *"/*chan*/,
     75     "scsipi_adapter_req_t"/*req*/,
     76     "void *"/*arg*/);
     77 SDT_PROBE_DEFINE3(scsi, base, adapter, request__done,
     78     "struct scsipi_channel *"/*chan*/,
     79     "scsipi_adapter_req_t"/*req*/,
     80     "void *"/*arg*/);
     81 
     82 SDT_PROBE_DEFINE1(scsi, base, queue, batch__start,
     83     "struct scsipi_channel *"/*chan*/);
     84 SDT_PROBE_DEFINE2(scsi, base, queue, run,
     85     "struct scsipi_channel *"/*chan*/,
     86     "struct scsipi_xfer *"/*xs*/);
     87 SDT_PROBE_DEFINE1(scsi, base, queue, batch__done,
     88     "struct scsipi_channel *"/*chan*/);
     89 
     90 SDT_PROBE_DEFINE1(scsi, base, xfer, execute,  "struct scsipi_xfer *"/*xs*/);
     91 SDT_PROBE_DEFINE1(scsi, base, xfer, enqueue,  "struct scsipi_xfer *"/*xs*/);
     92 SDT_PROBE_DEFINE1(scsi, base, xfer, done,  "struct scsipi_xfer *"/*xs*/);
     93 SDT_PROBE_DEFINE1(scsi, base, xfer, redone,  "struct scsipi_xfer *"/*xs*/);
     94 SDT_PROBE_DEFINE1(scsi, base, xfer, complete,  "struct scsipi_xfer *"/*xs*/);
     95 SDT_PROBE_DEFINE1(scsi, base, xfer, restart,  "struct scsipi_xfer *"/*xs*/);
     96 SDT_PROBE_DEFINE1(scsi, base, xfer, free,  "struct scsipi_xfer *"/*xs*/);
     97 
     98 static int	scsipi_complete(struct scsipi_xfer *);
     99 static struct scsipi_channel*
    100 		scsipi_done_internal(struct scsipi_xfer *, bool);
    101 static void	scsipi_request_sense(struct scsipi_xfer *);
    102 static int	scsipi_enqueue(struct scsipi_xfer *);
    103 static void	scsipi_run_queue(struct scsipi_channel *chan);
    104 
    105 static void	scsipi_completion_thread(void *);
    106 
    107 static void	scsipi_get_tag(struct scsipi_xfer *);
    108 static void	scsipi_put_tag(struct scsipi_xfer *);
    109 
    110 static int	scsipi_get_resource(struct scsipi_channel *);
    111 static void	scsipi_put_resource(struct scsipi_channel *);
    112 
    113 static void	scsipi_async_event_max_openings(struct scsipi_channel *,
    114 		    struct scsipi_max_openings *);
    115 static void	scsipi_async_event_channel_reset(struct scsipi_channel *);
    116 
    117 static void	scsipi_channel_freeze_locked(struct scsipi_channel *, int);
    118 
    119 static void	scsipi_adapter_lock(struct scsipi_adapter *adapt);
    120 static void	scsipi_adapter_unlock(struct scsipi_adapter *adapt);
    121 
    122 static void	scsipi_update_timeouts(struct scsipi_xfer *xs);
    123 
    124 static struct pool scsipi_xfer_pool;
    125 
    126 int scsipi_xs_count = 0;
    127 
    128 /*
    129  * scsipi_init:
    130  *
    131  *	Called when a scsibus or atapibus is attached to the system
    132  *	to initialize shared data structures.
    133  */
    134 void
    135 scsipi_init(void)
    136 {
    137 	static int scsipi_init_done;
    138 
    139 	if (scsipi_init_done)
    140 		return;
    141 	scsipi_init_done = 1;
    142 
    143 	/* Initialize the scsipi_xfer pool. */
    144 	pool_init(&scsipi_xfer_pool, sizeof(struct scsipi_xfer), 0,
    145 	    0, 0, "scxspl", NULL, IPL_BIO);
    146 	pool_prime(&scsipi_xfer_pool, 1);
    147 
    148 	scsipi_ioctl_init();
    149 }
    150 
    151 /*
    152  * scsipi_channel_init:
    153  *
    154  *	Initialize a scsipi_channel when it is attached.
    155  */
    156 int
    157 scsipi_channel_init(struct scsipi_channel *chan)
    158 {
    159 	struct scsipi_adapter *adapt = chan->chan_adapter;
    160 	int i;
    161 
    162 	/* Initialize shared data. */
    163 	scsipi_init();
    164 
    165 	/* Initialize the queues. */
    166 	TAILQ_INIT(&chan->chan_queue);
    167 	TAILQ_INIT(&chan->chan_complete);
    168 
    169 	for (i = 0; i < SCSIPI_CHAN_PERIPH_BUCKETS; i++)
    170 		LIST_INIT(&chan->chan_periphtab[i]);
    171 
    172 	/*
    173 	 * Create the asynchronous completion thread.
    174 	 */
    175 	if (kthread_create(PRI_NONE, 0, NULL, scsipi_completion_thread, chan,
    176 	    &chan->chan_thread, "%s", chan->chan_name)) {
    177 		aprint_error_dev(adapt->adapt_dev, "unable to create completion thread for "
    178 		    "channel %d\n", chan->chan_channel);
    179 		panic("scsipi_channel_init");
    180 	}
    181 
    182 	return 0;
    183 }
    184 
    185 /*
    186  * scsipi_channel_shutdown:
    187  *
    188  *	Shutdown a scsipi_channel.
    189  */
    190 void
    191 scsipi_channel_shutdown(struct scsipi_channel *chan)
    192 {
    193 
    194 	mutex_enter(chan_mtx(chan));
    195 	/*
    196 	 * Shut down the completion thread.
    197 	 */
    198 	chan->chan_tflags |= SCSIPI_CHANT_SHUTDOWN;
    199 	cv_broadcast(chan_cv_complete(chan));
    200 
    201 	/*
    202 	 * Now wait for the thread to exit.
    203 	 */
    204 	while (chan->chan_thread != NULL)
    205 		cv_wait(chan_cv_thread(chan), chan_mtx(chan));
    206 	mutex_exit(chan_mtx(chan));
    207 }
    208 
    209 static uint32_t
    210 scsipi_chan_periph_hash(uint64_t t, uint64_t l)
    211 {
    212 	uint32_t hash;
    213 
    214 	hash = hash32_buf(&t, sizeof(t), HASH32_BUF_INIT);
    215 	hash = hash32_buf(&l, sizeof(l), hash);
    216 
    217 	return hash & SCSIPI_CHAN_PERIPH_HASHMASK;
    218 }
    219 
    220 /*
    221  * scsipi_insert_periph:
    222  *
    223  *	Insert a periph into the channel.
    224  */
    225 void
    226 scsipi_insert_periph(struct scsipi_channel *chan, struct scsipi_periph *periph)
    227 {
    228 	uint32_t hash;
    229 
    230 	hash = scsipi_chan_periph_hash(periph->periph_target,
    231 	    periph->periph_lun);
    232 
    233 	mutex_enter(chan_mtx(chan));
    234 	LIST_INSERT_HEAD(&chan->chan_periphtab[hash], periph, periph_hash);
    235 	mutex_exit(chan_mtx(chan));
    236 }
    237 
    238 /*
    239  * scsipi_remove_periph:
    240  *
    241  *	Remove a periph from the channel.
    242  */
    243 void
    244 scsipi_remove_periph(struct scsipi_channel *chan,
    245     struct scsipi_periph *periph)
    246 {
    247 
    248 	LIST_REMOVE(periph, periph_hash);
    249 }
    250 
    251 /*
    252  * scsipi_lookup_periph:
    253  *
    254  *	Lookup a periph on the specified channel.
    255  */
    256 static struct scsipi_periph *
    257 scsipi_lookup_periph_internal(struct scsipi_channel *chan, int target, int lun, bool lock)
    258 {
    259 	struct scsipi_periph *periph;
    260 	uint32_t hash;
    261 
    262 	if (target >= chan->chan_ntargets ||
    263 	    lun >= chan->chan_nluns)
    264 		return NULL;
    265 
    266 	hash = scsipi_chan_periph_hash(target, lun);
    267 
    268 	if (lock)
    269 		mutex_enter(chan_mtx(chan));
    270 	LIST_FOREACH(periph, &chan->chan_periphtab[hash], periph_hash) {
    271 		if (periph->periph_target == target &&
    272 		    periph->periph_lun == lun)
    273 			break;
    274 	}
    275 	if (lock)
    276 		mutex_exit(chan_mtx(chan));
    277 
    278 	return periph;
    279 }
    280 
    281 struct scsipi_periph *
    282 scsipi_lookup_periph_locked(struct scsipi_channel *chan, int target, int lun)
    283 {
    284 	return scsipi_lookup_periph_internal(chan, target, lun, false);
    285 }
    286 
    287 struct scsipi_periph *
    288 scsipi_lookup_periph(struct scsipi_channel *chan, int target, int lun)
    289 {
    290 	return scsipi_lookup_periph_internal(chan, target, lun, true);
    291 }
    292 
    293 /*
    294  * scsipi_get_resource:
    295  *
    296  *	Allocate a single xfer `resource' from the channel.
    297  *
    298  *	NOTE: Must be called with channel lock held
    299  */
    300 static int
    301 scsipi_get_resource(struct scsipi_channel *chan)
    302 {
    303 	struct scsipi_adapter *adapt = chan->chan_adapter;
    304 
    305 	if (chan->chan_flags & SCSIPI_CHAN_OPENINGS) {
    306 		if (chan->chan_openings > 0) {
    307 			chan->chan_openings--;
    308 			return 1;
    309 		}
    310 		return 0;
    311 	}
    312 
    313 	if (adapt->adapt_openings > 0) {
    314 		adapt->adapt_openings--;
    315 		return 1;
    316 	}
    317 	return 0;
    318 }
    319 
    320 /*
    321  * scsipi_grow_resources:
    322  *
    323  *	Attempt to grow resources for a channel.  If this succeeds,
    324  *	we allocate one for our caller.
    325  *
    326  *	NOTE: Must be called with channel lock held
    327  */
    328 static inline int
    329 scsipi_grow_resources(struct scsipi_channel *chan)
    330 {
    331 
    332 	if (chan->chan_flags & SCSIPI_CHAN_CANGROW) {
    333 		if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
    334 			mutex_exit(chan_mtx(chan));
    335 			scsipi_adapter_request(chan,
    336 			    ADAPTER_REQ_GROW_RESOURCES, NULL);
    337 			mutex_enter(chan_mtx(chan));
    338 			return scsipi_get_resource(chan);
    339 		}
    340 		/*
    341 		 * ask the channel thread to do it. It'll have to thaw the
    342 		 * queue
    343 		 */
    344 		scsipi_channel_freeze_locked(chan, 1);
    345 		chan->chan_tflags |= SCSIPI_CHANT_GROWRES;
    346 		cv_broadcast(chan_cv_complete(chan));
    347 		return 0;
    348 	}
    349 
    350 	return 0;
    351 }
    352 
    353 /*
    354  * scsipi_put_resource:
    355  *
    356  *	Free a single xfer `resource' to the channel.
    357  *
    358  *	NOTE: Must be called with channel lock held
    359  */
    360 static void
    361 scsipi_put_resource(struct scsipi_channel *chan)
    362 {
    363 	struct scsipi_adapter *adapt = chan->chan_adapter;
    364 
    365 	if (chan->chan_flags & SCSIPI_CHAN_OPENINGS)
    366 		chan->chan_openings++;
    367 	else
    368 		adapt->adapt_openings++;
    369 }
    370 
    371 /*
    372  * scsipi_get_tag:
    373  *
    374  *	Get a tag ID for the specified xfer.
    375  *
    376  *	NOTE: Must be called with channel lock held
    377  */
    378 static void
    379 scsipi_get_tag(struct scsipi_xfer *xs)
    380 {
    381 	struct scsipi_periph *periph = xs->xs_periph;
    382 	int bit, tag;
    383 	u_int word;
    384 
    385 	KASSERT(mutex_owned(chan_mtx(periph->periph_channel)));
    386 
    387 	bit = 0;	/* XXX gcc */
    388 	for (word = 0; word < PERIPH_NTAGWORDS; word++) {
    389 		bit = ffs(periph->periph_freetags[word]);
    390 		if (bit != 0)
    391 			break;
    392 	}
    393 #ifdef DIAGNOSTIC
    394 	if (word == PERIPH_NTAGWORDS) {
    395 		scsipi_printaddr(periph);
    396 		printf("no free tags\n");
    397 		panic("scsipi_get_tag");
    398 	}
    399 #endif
    400 
    401 	bit -= 1;
    402 	periph->periph_freetags[word] &= ~(1U << bit);
    403 	tag = (word << 5) | bit;
    404 
    405 	/* XXX Should eventually disallow this completely. */
    406 	if (tag >= periph->periph_openings) {
    407 		scsipi_printaddr(periph);
    408 		printf("WARNING: tag %d greater than available openings %d\n",
    409 		    tag, periph->periph_openings);
    410 	}
    411 
    412 	xs->xs_tag_id = tag;
    413 	SDT_PROBE3(scsi, base, tag, get,
    414 	    xs, xs->xs_tag_id, xs->xs_tag_type);
    415 }
    416 
    417 /*
    418  * scsipi_put_tag:
    419  *
    420  *	Put the tag ID for the specified xfer back into the pool.
    421  *
    422  *	NOTE: Must be called with channel lock held
    423  */
    424 static void
    425 scsipi_put_tag(struct scsipi_xfer *xs)
    426 {
    427 	struct scsipi_periph *periph = xs->xs_periph;
    428 	int word, bit;
    429 
    430 	KASSERT(mutex_owned(chan_mtx(periph->periph_channel)));
    431 
    432 	SDT_PROBE3(scsi, base, tag, put,
    433 	    xs, xs->xs_tag_id, xs->xs_tag_type);
    434 
    435 	word = xs->xs_tag_id >> 5;
    436 	bit = xs->xs_tag_id & 0x1f;
    437 
    438 	periph->periph_freetags[word] |= (1U << bit);
    439 }
    440 
    441 /*
    442  * scsipi_get_xs:
    443  *
    444  *	Allocate an xfer descriptor and associate it with the
    445  *	specified peripheral.  If the peripheral has no more
    446  *	available command openings, we either block waiting for
    447  *	one to become available, or fail.
    448  *
    449  *	When this routine is called with the channel lock held
    450  *	the flags must include XS_CTL_NOSLEEP.
    451  */
    452 struct scsipi_xfer *
    453 scsipi_get_xs(struct scsipi_periph *periph, int flags)
    454 {
    455 	struct scsipi_xfer *xs;
    456 	bool lock = (flags & XS_CTL_NOSLEEP) == 0;
    457 
    458 	SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_get_xs\n"));
    459 
    460 	KASSERT(!cold);
    461 
    462 #ifdef DIAGNOSTIC
    463 	/*
    464 	 * URGENT commands can never be ASYNC.
    465 	 */
    466 	if ((flags & (XS_CTL_URGENT|XS_CTL_ASYNC)) ==
    467 	    (XS_CTL_URGENT|XS_CTL_ASYNC)) {
    468 		scsipi_printaddr(periph);
    469 		printf("URGENT and ASYNC\n");
    470 		panic("scsipi_get_xs");
    471 	}
    472 #endif
    473 
    474 	/*
    475 	 * Wait for a command opening to become available.  Rules:
    476 	 *
    477 	 *	- All xfers must wait for an available opening.
    478 	 *	  Exception: URGENT xfers can proceed when
    479 	 *	  active == openings, because we use the opening
    480 	 *	  of the command we're recovering for.
    481 	 *	- if the periph has sense pending, only URGENT & REQSENSE
    482 	 *	  xfers may proceed.
    483 	 *
    484 	 *	- If the periph is recovering, only URGENT xfers may
    485 	 *	  proceed.
    486 	 *
    487 	 *	- If the periph is currently executing a recovery
    488 	 *	  command, URGENT commands must block, because only
    489 	 *	  one recovery command can execute at a time.
    490 	 */
    491 	if (lock)
    492 		mutex_enter(chan_mtx(periph->periph_channel));
    493 	for (;;) {
    494 		if (flags & XS_CTL_URGENT) {
    495 			if (periph->periph_active > periph->periph_openings)
    496 				goto wait_for_opening;
    497 			if (periph->periph_flags & PERIPH_SENSE) {
    498 				if ((flags & XS_CTL_REQSENSE) == 0)
    499 					goto wait_for_opening;
    500 			} else {
    501 				if ((periph->periph_flags &
    502 				    PERIPH_RECOVERY_ACTIVE) != 0)
    503 					goto wait_for_opening;
    504 				periph->periph_flags |= PERIPH_RECOVERY_ACTIVE;
    505 			}
    506 			break;
    507 		}
    508 		if (periph->periph_active >= periph->periph_openings ||
    509 		    (periph->periph_flags & PERIPH_RECOVERING) != 0)
    510 			goto wait_for_opening;
    511 		periph->periph_active++;
    512 		KASSERT(mutex_owned(chan_mtx(periph->periph_channel)));
    513 		break;
    514 
    515  wait_for_opening:
    516 		if (flags & XS_CTL_NOSLEEP) {
    517 			KASSERT(!lock);
    518 			return NULL;
    519 		}
    520 		KASSERT(lock);
    521 		SC_DEBUG(periph, SCSIPI_DB3, ("sleeping\n"));
    522 		periph->periph_flags |= PERIPH_WAITING;
    523 		cv_wait(periph_cv_periph(periph),
    524 		    chan_mtx(periph->periph_channel));
    525 	}
    526 	if (lock)
    527 		mutex_exit(chan_mtx(periph->periph_channel));
    528 
    529 	SC_DEBUG(periph, SCSIPI_DB3, ("calling pool_get\n"));
    530 	xs = pool_get(&scsipi_xfer_pool,
    531 	    ((flags & XS_CTL_NOSLEEP) != 0 ? PR_NOWAIT : PR_WAITOK));
    532 	if (xs == NULL) {
    533 		if (lock)
    534 			mutex_enter(chan_mtx(periph->periph_channel));
    535 		if (flags & XS_CTL_URGENT) {
    536 			if ((flags & XS_CTL_REQSENSE) == 0)
    537 				periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
    538 		} else
    539 			periph->periph_active--;
    540 		if (lock)
    541 			mutex_exit(chan_mtx(periph->periph_channel));
    542 		scsipi_printaddr(periph);
    543 		printf("unable to allocate %sscsipi_xfer\n",
    544 		    (flags & XS_CTL_URGENT) ? "URGENT " : "");
    545 	}
    546 
    547 	SC_DEBUG(periph, SCSIPI_DB3, ("returning\n"));
    548 
    549 	if (xs != NULL) {
    550 		memset(xs, 0, sizeof(*xs));
    551 		callout_init(&xs->xs_callout, 0);
    552 		xs->xs_periph = periph;
    553 		xs->xs_control = flags;
    554 		xs->xs_status = 0;
    555 		if ((flags & XS_CTL_NOSLEEP) == 0)
    556 			mutex_enter(chan_mtx(periph->periph_channel));
    557 		TAILQ_INSERT_TAIL(&periph->periph_xferq, xs, device_q);
    558 		KASSERT(mutex_owned(chan_mtx(periph->periph_channel)));
    559 		if ((flags & XS_CTL_NOSLEEP) == 0)
    560 			mutex_exit(chan_mtx(periph->periph_channel));
    561 	}
    562 	return xs;
    563 }
    564 
    565 /*
    566  * scsipi_put_xs:
    567  *
    568  *	Release an xfer descriptor, decreasing the outstanding command
    569  *	count for the peripheral.  If there is a thread waiting for
    570  *	an opening, wake it up.  If not, kick any queued I/O the
    571  *	peripheral may have.
    572  *
    573  *	NOTE: Must be called with channel lock held
    574  */
    575 void
    576 scsipi_put_xs(struct scsipi_xfer *xs)
    577 {
    578 	struct scsipi_periph *periph = xs->xs_periph;
    579 	int flags = xs->xs_control;
    580 
    581 	SDT_PROBE1(scsi, base, xfer, free,  xs);
    582 	SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_free_xs\n"));
    583 	KASSERT(mutex_owned(chan_mtx(periph->periph_channel)));
    584 
    585 	TAILQ_REMOVE(&periph->periph_xferq, xs, device_q);
    586 	callout_destroy(&xs->xs_callout);
    587 	pool_put(&scsipi_xfer_pool, xs);
    588 
    589 #ifdef DIAGNOSTIC
    590 	if ((periph->periph_flags & PERIPH_RECOVERY_ACTIVE) != 0 &&
    591 	    periph->periph_active == 0) {
    592 		scsipi_printaddr(periph);
    593 		printf("recovery without a command to recovery for\n");
    594 		panic("scsipi_put_xs");
    595 	}
    596 #endif
    597 
    598 	if (flags & XS_CTL_URGENT) {
    599 		if ((flags & XS_CTL_REQSENSE) == 0)
    600 			periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
    601 	} else
    602 		periph->periph_active--;
    603 	if (periph->periph_active == 0 &&
    604 	    (periph->periph_flags & PERIPH_WAITDRAIN) != 0) {
    605 		periph->periph_flags &= ~PERIPH_WAITDRAIN;
    606 		cv_broadcast(periph_cv_active(periph));
    607 	}
    608 
    609 	if (periph->periph_flags & PERIPH_WAITING) {
    610 		periph->periph_flags &= ~PERIPH_WAITING;
    611 		cv_broadcast(periph_cv_periph(periph));
    612 	} else {
    613 		if (periph->periph_switch->psw_start != NULL &&
    614 		    device_is_active(periph->periph_dev)) {
    615 			SC_DEBUG(periph, SCSIPI_DB2,
    616 			    ("calling private start()\n"));
    617 			(*periph->periph_switch->psw_start)(periph);
    618 		}
    619 	}
    620 }
    621 
    622 /*
    623  * scsipi_channel_freeze:
    624  *
    625  *	Freeze a channel's xfer queue.
    626  */
    627 void
    628 scsipi_channel_freeze(struct scsipi_channel *chan, int count)
    629 {
    630 	bool lock = chan_running(chan) > 0;
    631 
    632 	if (lock)
    633 		mutex_enter(chan_mtx(chan));
    634 	chan->chan_qfreeze += count;
    635 	if (lock)
    636 		mutex_exit(chan_mtx(chan));
    637 }
    638 
    639 static void
    640 scsipi_channel_freeze_locked(struct scsipi_channel *chan, int count)
    641 {
    642 
    643 	chan->chan_qfreeze += count;
    644 }
    645 
    646 /*
    647  * scsipi_channel_thaw:
    648  *
    649  *	Thaw a channel's xfer queue.
    650  */
    651 void
    652 scsipi_channel_thaw(struct scsipi_channel *chan, int count)
    653 {
    654 	bool lock = chan_running(chan) > 0;
    655 
    656 	if (lock)
    657 		mutex_enter(chan_mtx(chan));
    658 	chan->chan_qfreeze -= count;
    659 	/*
    660 	 * Don't let the freeze count go negative.
    661 	 *
    662 	 * Presumably the adapter driver could keep track of this,
    663 	 * but it might just be easier to do this here so as to allow
    664 	 * multiple callers, including those outside the adapter driver.
    665 	 */
    666 	if (chan->chan_qfreeze < 0) {
    667 		chan->chan_qfreeze = 0;
    668 	}
    669 	if (lock)
    670 		mutex_exit(chan_mtx(chan));
    671 
    672 	/*
    673 	 * until the channel is running
    674 	 */
    675 	if (!lock)
    676 		return;
    677 
    678 	/*
    679 	 * Kick the channel's queue here.  Note, we may be running in
    680 	 * interrupt context (softclock or HBA's interrupt), so the adapter
    681 	 * driver had better not sleep.
    682 	 */
    683 	if (chan->chan_qfreeze == 0)
    684 		scsipi_run_queue(chan);
    685 }
    686 
    687 /*
    688  * scsipi_channel_timed_thaw:
    689  *
    690  *	Thaw a channel after some time has expired. This will also
    691  * 	run the channel's queue if the freeze count has reached 0.
    692  */
    693 void
    694 scsipi_channel_timed_thaw(void *arg)
    695 {
    696 	struct scsipi_channel *chan = arg;
    697 
    698 	scsipi_channel_thaw(chan, 1);
    699 }
    700 
    701 /*
    702  * scsipi_periph_freeze:
    703  *
    704  *	Freeze a device's xfer queue.
    705  */
    706 void
    707 scsipi_periph_freeze_locked(struct scsipi_periph *periph, int count)
    708 {
    709 
    710 	periph->periph_qfreeze += count;
    711 }
    712 
    713 /*
    714  * scsipi_periph_thaw:
    715  *
    716  *	Thaw a device's xfer queue.
    717  */
    718 void
    719 scsipi_periph_thaw_locked(struct scsipi_periph *periph, int count)
    720 {
    721 
    722 	periph->periph_qfreeze -= count;
    723 #ifdef DIAGNOSTIC
    724 	if (periph->periph_qfreeze < 0) {
    725 		static const char pc[] = "periph freeze count < 0";
    726 		scsipi_printaddr(periph);
    727 		printf("%s\n", pc);
    728 		panic(pc);
    729 	}
    730 #endif
    731 	if (periph->periph_qfreeze == 0 &&
    732 	    (periph->periph_flags & PERIPH_WAITING) != 0)
    733 		cv_broadcast(periph_cv_periph(periph));
    734 }
    735 
    736 void
    737 scsipi_periph_freeze(struct scsipi_periph *periph, int count)
    738 {
    739 
    740 	mutex_enter(chan_mtx(periph->periph_channel));
    741 	scsipi_periph_freeze_locked(periph, count);
    742 	mutex_exit(chan_mtx(periph->periph_channel));
    743 }
    744 
    745 void
    746 scsipi_periph_thaw(struct scsipi_periph *periph, int count)
    747 {
    748 
    749 	mutex_enter(chan_mtx(periph->periph_channel));
    750 	scsipi_periph_thaw_locked(periph, count);
    751 	mutex_exit(chan_mtx(periph->periph_channel));
    752 }
    753 
    754 /*
    755  * scsipi_periph_timed_thaw:
    756  *
    757  *	Thaw a device after some time has expired.
    758  */
    759 void
    760 scsipi_periph_timed_thaw(void *arg)
    761 {
    762 	struct scsipi_periph *periph = arg;
    763 	struct scsipi_channel *chan = periph->periph_channel;
    764 
    765 	callout_stop(&periph->periph_callout);
    766 
    767 	mutex_enter(chan_mtx(chan));
    768 	scsipi_periph_thaw_locked(periph, 1);
    769 	if ((periph->periph_channel->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
    770 		/*
    771 		 * Kick the channel's queue here.  Note, we're running in
    772 		 * interrupt context (softclock), so the adapter driver
    773 		 * had better not sleep.
    774 		 */
    775 		mutex_exit(chan_mtx(chan));
    776 		scsipi_run_queue(periph->periph_channel);
    777 	} else {
    778 		/*
    779 		 * Tell the completion thread to kick the channel's queue here.
    780 		 */
    781 		periph->periph_channel->chan_tflags |= SCSIPI_CHANT_KICK;
    782 		cv_broadcast(chan_cv_complete(chan));
    783 		mutex_exit(chan_mtx(chan));
    784 	}
    785 }
    786 
    787 /*
    788  * scsipi_wait_drain:
    789  *
    790  *	Wait for a periph's pending xfers to drain.
    791  */
    792 void
    793 scsipi_wait_drain(struct scsipi_periph *periph)
    794 {
    795 	struct scsipi_channel *chan = periph->periph_channel;
    796 
    797 	mutex_enter(chan_mtx(chan));
    798 	while (periph->periph_active != 0) {
    799 		periph->periph_flags |= PERIPH_WAITDRAIN;
    800 		cv_wait(periph_cv_active(periph), chan_mtx(chan));
    801 	}
    802 	mutex_exit(chan_mtx(chan));
    803 }
    804 
    805 /*
    806  * scsipi_kill_pending:
    807  *
    808  *	Kill off all pending xfers for a periph.
    809  *
    810  *	NOTE: Must be called with channel lock held
    811  */
    812 void
    813 scsipi_kill_pending(struct scsipi_periph *periph)
    814 {
    815 	struct scsipi_channel *chan = periph->periph_channel;
    816 
    817 	(*chan->chan_bustype->bustype_kill_pending)(periph);
    818 	while (periph->periph_active != 0) {
    819 		periph->periph_flags |= PERIPH_WAITDRAIN;
    820 		cv_wait(periph_cv_active(periph), chan_mtx(chan));
    821 	}
    822 }
    823 
    824 /*
    825  * scsipi_print_cdb:
    826  * prints a command descriptor block (for debug purpose, error messages,
    827  * SCSIVERBOSE, ...)
    828  */
    829 void
    830 scsipi_print_cdb(struct scsipi_generic *cmd)
    831 {
    832 	int i, j;
    833 
    834  	printf("0x%02x", cmd->opcode);
    835 
    836  	switch (CDB_GROUPID(cmd->opcode)) {
    837  	case CDB_GROUPID_0:
    838  		j = CDB_GROUP0;
    839  		break;
    840  	case CDB_GROUPID_1:
    841  		j = CDB_GROUP1;
    842  		break;
    843  	case CDB_GROUPID_2:
    844  		j = CDB_GROUP2;
    845  		break;
    846  	case CDB_GROUPID_3:
    847  		j = CDB_GROUP3;
    848  		break;
    849  	case CDB_GROUPID_4:
    850  		j = CDB_GROUP4;
    851  		break;
    852  	case CDB_GROUPID_5:
    853  		j = CDB_GROUP5;
    854  		break;
    855  	case CDB_GROUPID_6:
    856  		j = CDB_GROUP6;
    857  		break;
    858  	case CDB_GROUPID_7:
    859  		j = CDB_GROUP7;
    860  		break;
    861  	default:
    862  		j = 0;
    863  	}
    864  	if (j == 0)
    865  		j = sizeof (cmd->bytes);
    866  	for (i = 0; i < j-1; i++) /* already done the opcode */
    867  		printf(" %02x", cmd->bytes[i]);
    868 }
    869 
    870 /*
    871  * scsipi_interpret_sense:
    872  *
    873  *	Look at the returned sense and act on the error, determining
    874  *	the unix error number to pass back.  (0 = report no error)
    875  *
    876  *	NOTE: If we return ERESTART, we are expected to have
    877  *	thawed the device!
    878  *
    879  *	THIS IS THE DEFAULT ERROR HANDLER FOR SCSI DEVICES.
    880  */
    881 int
    882 scsipi_interpret_sense(struct scsipi_xfer *xs)
    883 {
    884 	struct scsi_sense_data *sense;
    885 	struct scsipi_periph *periph = xs->xs_periph;
    886 	u_int8_t key;
    887 	int error;
    888 	u_int32_t info;
    889 	static const char *error_mes[] = {
    890 		"soft error (corrected)",
    891 		"not ready", "medium error",
    892 		"non-media hardware failure", "illegal request",
    893 		"unit attention", "readonly device",
    894 		"no data found", "vendor unique",
    895 		"copy aborted", "command aborted",
    896 		"search returned equal", "volume overflow",
    897 		"verify miscompare", "unknown error key"
    898 	};
    899 
    900 	sense = &xs->sense.scsi_sense;
    901 #ifdef SCSIPI_DEBUG
    902 	if (periph->periph_flags & SCSIPI_DB1) {
    903 	        int count, len;
    904 		scsipi_printaddr(periph);
    905 		printf(" sense debug information:\n");
    906 		printf("\tcode 0x%x valid %d\n",
    907 			SSD_RCODE(sense->response_code),
    908 			sense->response_code & SSD_RCODE_VALID ? 1 : 0);
    909 		printf("\tseg 0x%x key 0x%x ili 0x%x eom 0x%x fmark 0x%x\n",
    910 			sense->segment,
    911 			SSD_SENSE_KEY(sense->flags),
    912 			sense->flags & SSD_ILI ? 1 : 0,
    913 			sense->flags & SSD_EOM ? 1 : 0,
    914 			sense->flags & SSD_FILEMARK ? 1 : 0);
    915 		printf("\ninfo: 0x%x 0x%x 0x%x 0x%x followed by %d "
    916 			"extra bytes\n",
    917 			sense->info[0],
    918 			sense->info[1],
    919 			sense->info[2],
    920 			sense->info[3],
    921 			sense->extra_len);
    922 		len = SSD_ADD_BYTES_LIM(sense);
    923 		printf("\textra (up to %d bytes): ", len);
    924 		for (count = 0; count < len; count++)
    925 			printf("0x%x ", sense->csi[count]);
    926 		printf("\n");
    927 	}
    928 #endif
    929 
    930 	/*
    931 	 * If the periph has its own error handler, call it first.
    932 	 * If it returns a legit error value, return that, otherwise
    933 	 * it wants us to continue with normal error processing.
    934 	 */
    935 	if (periph->periph_switch->psw_error != NULL) {
    936 		SC_DEBUG(periph, SCSIPI_DB2,
    937 		    ("calling private err_handler()\n"));
    938 		error = (*periph->periph_switch->psw_error)(xs);
    939 		if (error != EJUSTRETURN)
    940 			return error;
    941 	}
    942 	/* otherwise use the default */
    943 	switch (SSD_RCODE(sense->response_code)) {
    944 
    945 		/*
    946 		 * Old SCSI-1 and SASI devices respond with
    947 		 * codes other than 70.
    948 		 */
    949 	case 0x00:		/* no error (command completed OK) */
    950 		return 0;
    951 	case 0x04:		/* drive not ready after it was selected */
    952 		if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
    953 			periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
    954 		if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
    955 			return 0;
    956 		/* XXX - display some sort of error here? */
    957 		return EIO;
    958 	case 0x20:		/* invalid command */
    959 		if ((xs->xs_control &
    960 		     XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
    961 			return 0;
    962 		return EINVAL;
    963 	case 0x25:		/* invalid LUN (Adaptec ACB-4000) */
    964 		return EACCES;
    965 
    966 		/*
    967 		 * If it's code 70, use the extended stuff and
    968 		 * interpret the key
    969 		 */
    970 	case 0x71:		/* delayed error */
    971 		scsipi_printaddr(periph);
    972 		key = SSD_SENSE_KEY(sense->flags);
    973 		printf(" DEFERRED ERROR, key = 0x%x\n", key);
    974 		/* FALLTHROUGH */
    975 	case 0x70:
    976 		if ((sense->response_code & SSD_RCODE_VALID) != 0)
    977 			info = _4btol(sense->info);
    978 		else
    979 			info = 0;
    980 		key = SSD_SENSE_KEY(sense->flags);
    981 
    982 		switch (key) {
    983 		case SKEY_NO_SENSE:
    984 		case SKEY_RECOVERED_ERROR:
    985 			if (xs->resid == xs->datalen && xs->datalen) {
    986 				/*
    987 				 * Why is this here?
    988 				 */
    989 				xs->resid = 0;	/* not short read */
    990 			}
    991 			error = 0;
    992 			break;
    993 		case SKEY_EQUAL:
    994 			error = 0;
    995 			break;
    996 		case SKEY_NOT_READY:
    997 			if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
    998 				periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
    999 			if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
   1000 				return 0;
   1001 			if (sense->asc == 0x3A) {
   1002 				error = ENODEV; /* Medium not present */
   1003 				if (xs->xs_control & XS_CTL_SILENT_NODEV)
   1004 					return error;
   1005 			} else
   1006 				error = EIO;
   1007 			if ((xs->xs_control & XS_CTL_SILENT) != 0)
   1008 				return error;
   1009 			break;
   1010 		case SKEY_ILLEGAL_REQUEST:
   1011 			if ((xs->xs_control &
   1012 			     XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
   1013 				return 0;
   1014 			/*
   1015 			 * Handle the case where a device reports
   1016 			 * Logical Unit Not Supported during discovery.
   1017 			 */
   1018 			if ((xs->xs_control & XS_CTL_DISCOVERY) != 0 &&
   1019 			    sense->asc == 0x25 &&
   1020 			    sense->ascq == 0x00)
   1021 				return EINVAL;
   1022 			if ((xs->xs_control & XS_CTL_SILENT) != 0)
   1023 				return EIO;
   1024 			error = EINVAL;
   1025 			break;
   1026 		case SKEY_UNIT_ATTENTION:
   1027 			if (sense->asc == 0x29 &&
   1028 			    sense->ascq == 0x00) {
   1029 				/* device or bus reset */
   1030 				return ERESTART;
   1031 			}
   1032 			if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
   1033 				periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
   1034 			if ((xs->xs_control &
   1035 			     XS_CTL_IGNORE_MEDIA_CHANGE) != 0 ||
   1036 				/* XXX Should reupload any transient state. */
   1037 				(periph->periph_flags &
   1038 				 PERIPH_REMOVABLE) == 0) {
   1039 				return ERESTART;
   1040 			}
   1041 			if ((xs->xs_control & XS_CTL_SILENT) != 0)
   1042 				return EIO;
   1043 			error = EIO;
   1044 			break;
   1045 		case SKEY_DATA_PROTECT:
   1046 			error = EROFS;
   1047 			break;
   1048 		case SKEY_BLANK_CHECK:
   1049 			error = 0;
   1050 			break;
   1051 		case SKEY_ABORTED_COMMAND:
   1052 			if (xs->xs_retries != 0) {
   1053 				xs->xs_retries--;
   1054 				error = ERESTART;
   1055 			} else
   1056 				error = EIO;
   1057 			break;
   1058 		case SKEY_VOLUME_OVERFLOW:
   1059 			error = ENOSPC;
   1060 			break;
   1061 		case SKEY_MEDIUM_ERROR:
   1062 			if (xs->xs_retries != 0) {
   1063 				xs->xs_retries--;
   1064 				error = ERESTART;
   1065 			} else
   1066 				error = EIO;
   1067 			break;
   1068 		default:
   1069 			error = EIO;
   1070 			break;
   1071 		}
   1072 
   1073 		/* Print verbose decode if appropriate and possible */
   1074 		if ((key == 0) ||
   1075 		    ((xs->xs_control & XS_CTL_SILENT) != 0) ||
   1076 		    (scsipi_print_sense(xs, 0) != 0))
   1077 			return error;
   1078 
   1079 		/* Print brief(er) sense information */
   1080 		scsipi_printaddr(periph);
   1081 		printf("%s", error_mes[key - 1]);
   1082 		if ((sense->response_code & SSD_RCODE_VALID) != 0) {
   1083 			switch (key) {
   1084 			case SKEY_NOT_READY:
   1085 			case SKEY_ILLEGAL_REQUEST:
   1086 			case SKEY_UNIT_ATTENTION:
   1087 			case SKEY_DATA_PROTECT:
   1088 				break;
   1089 			case SKEY_BLANK_CHECK:
   1090 				printf(", requested size: %d (decimal)",
   1091 				    info);
   1092 				break;
   1093 			case SKEY_ABORTED_COMMAND:
   1094 				if (xs->xs_retries)
   1095 					printf(", retrying");
   1096 				printf(", cmd 0x%x, info 0x%x",
   1097 				    xs->cmd->opcode, info);
   1098 				break;
   1099 			default:
   1100 				printf(", info = %d (decimal)", info);
   1101 			}
   1102 		}
   1103 		if (sense->extra_len != 0) {
   1104 			int n;
   1105 			printf(", data =");
   1106 			for (n = 0; n < sense->extra_len; n++)
   1107 				printf(" %02x",
   1108 				    sense->csi[n]);
   1109 		}
   1110 		printf("\n");
   1111 		return error;
   1112 
   1113 	/*
   1114 	 * Some other code, just report it
   1115 	 */
   1116 	default:
   1117 #if    defined(SCSIDEBUG) || defined(DEBUG)
   1118 	{
   1119 		static const char *uc = "undecodable sense error";
   1120 		int i;
   1121 		u_int8_t *cptr = (u_int8_t *) sense;
   1122 		scsipi_printaddr(periph);
   1123 		if (xs->cmd == &xs->cmdstore) {
   1124 			printf("%s for opcode 0x%x, data=",
   1125 			    uc, xs->cmdstore.opcode);
   1126 		} else {
   1127 			printf("%s, data=", uc);
   1128 		}
   1129 		for (i = 0; i < sizeof (sense); i++)
   1130 			printf(" 0x%02x", *(cptr++) & 0xff);
   1131 		printf("\n");
   1132 	}
   1133 #else
   1134 		scsipi_printaddr(periph);
   1135 		printf("Sense Error Code 0x%x",
   1136 			SSD_RCODE(sense->response_code));
   1137 		if ((sense->response_code & SSD_RCODE_VALID) != 0) {
   1138 			struct scsi_sense_data_unextended *usense =
   1139 			    (struct scsi_sense_data_unextended *)sense;
   1140 			printf(" at block no. %d (decimal)",
   1141 			    _3btol(usense->block));
   1142 		}
   1143 		printf("\n");
   1144 #endif
   1145 		return EIO;
   1146 	}
   1147 }
   1148 
   1149 /*
   1150  * scsipi_test_unit_ready:
   1151  *
   1152  *	Issue a `test unit ready' request.
   1153  */
   1154 int
   1155 scsipi_test_unit_ready(struct scsipi_periph *periph, int flags)
   1156 {
   1157 	struct scsi_test_unit_ready cmd;
   1158 	int retries;
   1159 
   1160 	/* some ATAPI drives don't support TEST UNIT READY. Sigh */
   1161 	if (periph->periph_quirks & PQUIRK_NOTUR)
   1162 		return 0;
   1163 
   1164 	if (flags & XS_CTL_DISCOVERY)
   1165 		retries = 0;
   1166 	else
   1167 		retries = SCSIPIRETRIES;
   1168 
   1169 	memset(&cmd, 0, sizeof(cmd));
   1170 	cmd.opcode = SCSI_TEST_UNIT_READY;
   1171 
   1172 	return scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0,
   1173 	    retries, 10000, NULL, flags);
   1174 }
   1175 
   1176 static const struct scsipi_inquiry3_pattern {
   1177 	const char vendor[8];
   1178 	const char product[16];
   1179 	const char revision[4];
   1180 } scsipi_inquiry3_quirk[] = {
   1181 	{ "ES-6600 ", "", "" },
   1182 };
   1183 
   1184 static int
   1185 scsipi_inquiry3_ok(const struct scsipi_inquiry_data *ib)
   1186 {
   1187 	for (size_t i = 0; i < __arraycount(scsipi_inquiry3_quirk); i++) {
   1188 		const struct scsipi_inquiry3_pattern *q =
   1189 		    &scsipi_inquiry3_quirk[i];
   1190 #define MATCH(field) \
   1191     (q->field[0] ? memcmp(ib->field, q->field, sizeof(ib->field)) == 0 : 1)
   1192 		if (MATCH(vendor) && MATCH(product) && MATCH(revision))
   1193 			return 0;
   1194 	}
   1195 	return 1;
   1196 }
   1197 
   1198 /*
   1199  * scsipi_inquire:
   1200  *
   1201  *	Ask the device about itself.
   1202  */
   1203 int
   1204 scsipi_inquire(struct scsipi_periph *periph, struct scsipi_inquiry_data *inqbuf,
   1205     int flags)
   1206 {
   1207 	struct scsipi_inquiry cmd;
   1208 	int error;
   1209 	int retries;
   1210 
   1211 	if (flags & XS_CTL_DISCOVERY)
   1212 		retries = 0;
   1213 	else
   1214 		retries = SCSIPIRETRIES;
   1215 
   1216 	/*
   1217 	 * If we request more data than the device can provide, it SHOULD just
   1218 	 * return a short response.  However, some devices error with an
   1219 	 * ILLEGAL REQUEST sense code, and yet others have even more special
   1220 	 * failure modes (such as the GL641USB flash adapter, which goes loony
   1221 	 * and sends corrupted CRCs).  To work around this, and to bring our
   1222 	 * behavior more in line with other OSes, we do a shorter inquiry,
   1223 	 * covering all the SCSI-2 information, first, and then request more
   1224 	 * data iff the "additional length" field indicates there is more.
   1225 	 * - mycroft, 2003/10/16
   1226 	 */
   1227 	memset(&cmd, 0, sizeof(cmd));
   1228 	cmd.opcode = INQUIRY;
   1229 	cmd.length = SCSIPI_INQUIRY_LENGTH_SCSI2;
   1230 	error = scsipi_command(periph, (void *)&cmd, sizeof(cmd),
   1231 	    (void *)inqbuf, SCSIPI_INQUIRY_LENGTH_SCSI2, retries,
   1232 	    10000, NULL, flags | XS_CTL_DATA_IN);
   1233 	if (!error &&
   1234 	    inqbuf->additional_length > SCSIPI_INQUIRY_LENGTH_SCSI2 - 4) {
   1235 	    if (scsipi_inquiry3_ok(inqbuf)) {
   1236 #if 0
   1237 printf("inquire: addlen=%d, retrying\n", inqbuf->additional_length);
   1238 #endif
   1239 		cmd.length = SCSIPI_INQUIRY_LENGTH_SCSI3;
   1240 		error = scsipi_command(periph, (void *)&cmd, sizeof(cmd),
   1241 		    (void *)inqbuf, SCSIPI_INQUIRY_LENGTH_SCSI3, retries,
   1242 		    10000, NULL, flags | XS_CTL_DATA_IN);
   1243 #if 0
   1244 printf("inquire: error=%d\n", error);
   1245 #endif
   1246 	    }
   1247 	}
   1248 
   1249 #ifdef SCSI_OLD_NOINQUIRY
   1250 	/*
   1251 	 * Kludge for the Adaptec ACB-4000 SCSI->MFM translator.
   1252 	 * This board doesn't support the INQUIRY command at all.
   1253 	 */
   1254 	if (error == EINVAL || error == EACCES) {
   1255 		/*
   1256 		 * Conjure up an INQUIRY response.
   1257 		 */
   1258 		inqbuf->device = (error == EINVAL ?
   1259 			 SID_QUAL_LU_PRESENT :
   1260 			 SID_QUAL_LU_NOTPRESENT) | T_DIRECT;
   1261 		inqbuf->dev_qual2 = 0;
   1262 		inqbuf->version = 0;
   1263 		inqbuf->response_format = SID_FORMAT_SCSI1;
   1264 		inqbuf->additional_length = SCSIPI_INQUIRY_LENGTH_SCSI2 - 4;
   1265 		inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0;
   1266 		memcpy(inqbuf->vendor, "ADAPTEC ACB-4000            ", 28);
   1267 		error = 0;
   1268 	}
   1269 
   1270 	/*
   1271 	 * Kludge for the Emulex MT-02 SCSI->QIC translator.
   1272 	 * This board gives an empty response to an INQUIRY command.
   1273 	 */
   1274 	else if (error == 0 &&
   1275 	    inqbuf->device == (SID_QUAL_LU_PRESENT | T_DIRECT) &&
   1276 	    inqbuf->dev_qual2 == 0 &&
   1277 	    inqbuf->version == 0 &&
   1278 	    inqbuf->response_format == SID_FORMAT_SCSI1) {
   1279 		/*
   1280 		 * Fill out the INQUIRY response.
   1281 		 */
   1282 		inqbuf->device = (SID_QUAL_LU_PRESENT | T_SEQUENTIAL);
   1283 		inqbuf->dev_qual2 = SID_REMOVABLE;
   1284 		inqbuf->additional_length = SCSIPI_INQUIRY_LENGTH_SCSI2 - 4;
   1285 		inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0;
   1286 		memcpy(inqbuf->vendor, "EMULEX  MT-02 QIC           ", 28);
   1287 	}
   1288 #endif /* SCSI_OLD_NOINQUIRY */
   1289 
   1290 	return error;
   1291 }
   1292 
   1293 /*
   1294  * scsipi_prevent:
   1295  *
   1296  *	Prevent or allow the user to remove the media
   1297  */
   1298 int
   1299 scsipi_prevent(struct scsipi_periph *periph, int type, int flags)
   1300 {
   1301 	struct scsi_prevent_allow_medium_removal cmd;
   1302 
   1303 	if (periph->periph_quirks & PQUIRK_NODOORLOCK)
   1304 		return 0;
   1305 
   1306 	memset(&cmd, 0, sizeof(cmd));
   1307 	cmd.opcode = SCSI_PREVENT_ALLOW_MEDIUM_REMOVAL;
   1308 	cmd.how = type;
   1309 
   1310 	return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0,
   1311 	    SCSIPIRETRIES, 5000, NULL, flags));
   1312 }
   1313 
   1314 /*
   1315  * scsipi_start:
   1316  *
   1317  *	Send a START UNIT.
   1318  */
   1319 int
   1320 scsipi_start(struct scsipi_periph *periph, int type, int flags)
   1321 {
   1322 	struct scsipi_start_stop cmd;
   1323 
   1324 	memset(&cmd, 0, sizeof(cmd));
   1325 	cmd.opcode = START_STOP;
   1326 	cmd.byte2 = 0x00;
   1327 	cmd.how = type;
   1328 
   1329 	return scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0,
   1330 	    SCSIPIRETRIES, (type & SSS_START) ? 60000 : 10000, NULL, flags);
   1331 }
   1332 
   1333 /*
   1334  * scsipi_mode_sense, scsipi_mode_sense_big:
   1335  *	get a sense page from a device
   1336  */
   1337 
   1338 int
   1339 scsipi_mode_sense(struct scsipi_periph *periph, int byte2, int page,
   1340     struct scsi_mode_parameter_header_6 *data, int len, int flags, int retries,
   1341     int timeout)
   1342 {
   1343 	struct scsi_mode_sense_6 cmd;
   1344 
   1345 	memset(&cmd, 0, sizeof(cmd));
   1346 	cmd.opcode = SCSI_MODE_SENSE_6;
   1347 	cmd.byte2 = byte2;
   1348 	cmd.page = page;
   1349 	cmd.length = len & 0xff;
   1350 
   1351 	return scsipi_command(periph, (void *)&cmd, sizeof(cmd),
   1352 	    (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_IN);
   1353 }
   1354 
   1355 int
   1356 scsipi_mode_sense_big(struct scsipi_periph *periph, int byte2, int page,
   1357     struct scsi_mode_parameter_header_10 *data, int len, int flags, int retries,
   1358     int timeout)
   1359 {
   1360 	struct scsi_mode_sense_10 cmd;
   1361 
   1362 	memset(&cmd, 0, sizeof(cmd));
   1363 	cmd.opcode = SCSI_MODE_SENSE_10;
   1364 	cmd.byte2 = byte2;
   1365 	cmd.page = page;
   1366 	_lto2b(len, cmd.length);
   1367 
   1368 	return scsipi_command(periph, (void *)&cmd, sizeof(cmd),
   1369 	    (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_IN);
   1370 }
   1371 
   1372 int
   1373 scsipi_mode_select(struct scsipi_periph *periph, int byte2,
   1374     struct scsi_mode_parameter_header_6 *data, int len, int flags, int retries,
   1375     int timeout)
   1376 {
   1377 	struct scsi_mode_select_6 cmd;
   1378 
   1379 	memset(&cmd, 0, sizeof(cmd));
   1380 	cmd.opcode = SCSI_MODE_SELECT_6;
   1381 	cmd.byte2 = byte2;
   1382 	cmd.length = len & 0xff;
   1383 
   1384 	return scsipi_command(periph, (void *)&cmd, sizeof(cmd),
   1385 	    (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_OUT);
   1386 }
   1387 
   1388 int
   1389 scsipi_mode_select_big(struct scsipi_periph *periph, int byte2,
   1390     struct scsi_mode_parameter_header_10 *data, int len, int flags, int retries,
   1391     int timeout)
   1392 {
   1393 	struct scsi_mode_select_10 cmd;
   1394 
   1395 	memset(&cmd, 0, sizeof(cmd));
   1396 	cmd.opcode = SCSI_MODE_SELECT_10;
   1397 	cmd.byte2 = byte2;
   1398 	_lto2b(len, cmd.length);
   1399 
   1400 	return scsipi_command(periph, (void *)&cmd, sizeof(cmd),
   1401 	    (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_OUT);
   1402 }
   1403 
   1404 /*
   1405  * scsipi_get_opcodeinfo:
   1406  *
   1407  * query the device for supported commands and their timeout
   1408  * building a timeout lookup table if timeout information is available.
   1409  */
   1410 void
   1411 scsipi_get_opcodeinfo(struct scsipi_periph *periph)
   1412 {
   1413 	u_int8_t *data;
   1414 	int len = 16*1024;
   1415 	int rc;
   1416 	int retries;
   1417 	struct scsi_repsuppopcode cmd;
   1418 
   1419 	/* refrain from asking for supported opcodes */
   1420 	if (periph->periph_quirks & PQUIRK_NOREPSUPPOPC ||
   1421 	    periph->periph_type == T_PROCESSOR || /* spec. */
   1422 	    periph->periph_type == T_CDROM) /* spec. */
   1423 		return;
   1424 
   1425 	scsipi_free_opcodeinfo(periph);
   1426 
   1427 	/*
   1428 	 * query REPORT SUPPORTED OPERATION CODES
   1429 	 * if OK
   1430 	 *   enumerate all codes
   1431 	 *     if timeout exists insert maximum into opcode table
   1432 	 */
   1433 	data = malloc(len, M_DEVBUF, M_WAITOK|M_ZERO);
   1434 
   1435 	memset(&cmd, 0, sizeof(cmd));
   1436 	cmd.opcode = SCSI_MAINTENANCE_IN;
   1437 	cmd.svcaction = RSOC_REPORT_SUPPORTED_OPCODES;
   1438 	cmd.repoption = RSOC_RCTD|RSOC_ALL;
   1439 	_lto4b(len, cmd.alloclen);
   1440 
   1441 	/* loop to skip any UNIT ATTENTIONS at this point */
   1442 	retries = 3;
   1443 	do {
   1444 		rc = scsipi_command(periph, (void *)&cmd, sizeof(cmd),
   1445 				    (void *)data, len, 0, 60000, NULL,
   1446 				    XS_CTL_DATA_IN|XS_CTL_SILENT);
   1447 #ifdef SCSIPI_DEBUG
   1448 		if (rc != 0) {
   1449 			SC_DEBUG(periph, SCSIPI_DB3,
   1450 				("SCSI_MAINTENANCE_IN"
   1451 			 	"[RSOC_REPORT_SUPPORTED_OPCODES] command"
   1452 				" failed: rc=%d, retries=%d\n",
   1453 				rc, retries));
   1454 		}
   1455 #endif
   1456         } while (rc == EIO && retries-- > 0);
   1457 
   1458 	if (rc == 0) {
   1459 		int count;
   1460                 int dlen = _4btol(data);
   1461                 u_int8_t *c = data + 4;
   1462 
   1463 		SC_DEBUG(periph, SCSIPI_DB3,
   1464 			 ("supported opcode timeout-values loaded\n"));
   1465 		SC_DEBUG(periph, SCSIPI_DB3,
   1466 			 ("CMD  LEN  SA    spec  nom. time  cmd timeout\n"));
   1467 
   1468 		struct scsipi_opcodes *tot = malloc(sizeof(struct scsipi_opcodes),
   1469 		    M_DEVBUF, M_WAITOK|M_ZERO);
   1470 
   1471 		count = 0;
   1472                 while (tot != NULL &&
   1473 		       dlen >= (int)sizeof(struct scsi_repsupopcode_all_commands_descriptor)) {
   1474                         struct scsi_repsupopcode_all_commands_descriptor *acd
   1475 				= (struct scsi_repsupopcode_all_commands_descriptor *)c;
   1476 #ifdef SCSIPI_DEBUG
   1477                         int cdblen = _2btol((const u_int8_t *)&acd->cdblen);
   1478 #endif
   1479                         dlen -= sizeof(struct scsi_repsupopcode_all_commands_descriptor);
   1480                         c += sizeof(struct scsi_repsupopcode_all_commands_descriptor);
   1481                         SC_DEBUG(periph, SCSIPI_DB3,
   1482 				 ("0x%02x(%2d) ", acd->opcode, cdblen));
   1483 
   1484 			tot->opcode_info[acd->opcode].ti_flags = SCSIPI_TI_VALID;
   1485 
   1486                         if (acd->flags & RSOC_ACD_SERVACTV) {
   1487                                 SC_DEBUGN(periph, SCSIPI_DB3,
   1488 					 ("0x%02x%02x ",
   1489 					  acd->serviceaction[0],
   1490 					  acd->serviceaction[1]));
   1491                         } else {
   1492 				SC_DEBUGN(periph, SCSIPI_DB3, ("       "));
   1493                         }
   1494 
   1495                         if (acd->flags & RSOC_ACD_CTDP
   1496 			    && dlen >= (int)sizeof(struct scsi_repsupopcode_timeouts_descriptor)) {
   1497                                 struct scsi_repsupopcode_timeouts_descriptor *td
   1498 					= (struct scsi_repsupopcode_timeouts_descriptor *)c;
   1499                                 long nomto = _4btol(td->nom_process_timeout);
   1500                                 long cmdto = _4btol(td->cmd_process_timeout);
   1501 				long t = (cmdto > nomto) ? cmdto : nomto;
   1502 
   1503                                 dlen -= sizeof(struct scsi_repsupopcode_timeouts_descriptor);
   1504                                 c += sizeof(struct scsi_repsupopcode_timeouts_descriptor);
   1505 
   1506                                 SC_DEBUGN(periph, SCSIPI_DB3,
   1507 					  ("0x%02x %10ld %10ld",
   1508 					   td->cmd_specific,
   1509 					   nomto, cmdto));
   1510 
   1511 				if (t > tot->opcode_info[acd->opcode].ti_timeout) {
   1512 					tot->opcode_info[acd->opcode].ti_timeout = t;
   1513 					++count;
   1514 				}
   1515                         }
   1516                         SC_DEBUGN(periph, SCSIPI_DB3,("\n"));
   1517                 }
   1518 
   1519 		if (count > 0) {
   1520 			periph->periph_opcs = tot;
   1521 		} else {
   1522 			free(tot, M_DEVBUF);
   1523 			SC_DEBUG(periph, SCSIPI_DB3,
   1524 			 	("no usable timeout values available\n"));
   1525 		}
   1526 	} else {
   1527 		SC_DEBUG(periph, SCSIPI_DB3,
   1528 			 ("SCSI_MAINTENANCE_IN"
   1529 			  "[RSOC_REPORT_SUPPORTED_OPCODES] failed error=%d"
   1530 			  " - no device provided timeout "
   1531 			  "values available\n", rc));
   1532 	}
   1533 
   1534 	free(data, M_DEVBUF);
   1535 }
   1536 
   1537 /*
   1538  * scsipi_update_timeouts:
   1539  * 	Override timeout value if device/config provided
   1540  *      timeouts are available.
   1541  */
   1542 static void
   1543 scsipi_update_timeouts(struct scsipi_xfer *xs)
   1544 {
   1545 	struct scsipi_opcodes *opcs;
   1546 	u_int8_t cmd;
   1547 	int timeout;
   1548 	struct scsipi_opinfo *oi;
   1549 
   1550 	if (xs->timeout <= 0) {
   1551 		return;
   1552 	}
   1553 
   1554 	opcs = xs->xs_periph->periph_opcs;
   1555 
   1556 	if (opcs == NULL) {
   1557 		return;
   1558 	}
   1559 
   1560 	cmd = xs->cmd->opcode;
   1561 	oi = &opcs->opcode_info[cmd];
   1562 
   1563 	timeout = 1000 * (int)oi->ti_timeout;
   1564 
   1565 
   1566 	if (timeout > xs->timeout && timeout < 86400000) {
   1567 		/*
   1568 		 * pick up device configured timeouts if they
   1569 		 * are longer than the requested ones but less
   1570 		 * than a day
   1571 		 */
   1572 #ifdef SCSIPI_DEBUG
   1573 		if ((oi->ti_flags & SCSIPI_TI_LOGGED) == 0) {
   1574 			SC_DEBUG(xs->xs_periph, SCSIPI_DB3,
   1575 				 ("Overriding command 0x%02x "
   1576 				  "timeout of %d with %d ms\n",
   1577 				  cmd, xs->timeout, timeout));
   1578 			oi->ti_flags |= SCSIPI_TI_LOGGED;
   1579 		}
   1580 #endif
   1581 		xs->timeout = timeout;
   1582 	}
   1583 }
   1584 
   1585 /*
   1586  * scsipi_free_opcodeinfo:
   1587  *
   1588  * free the opcode information table
   1589  */
   1590 void
   1591 scsipi_free_opcodeinfo(struct scsipi_periph *periph)
   1592 {
   1593 	if (periph->periph_opcs != NULL) {
   1594 		free(periph->periph_opcs, M_DEVBUF);
   1595 	}
   1596 
   1597 	periph->periph_opcs = NULL;
   1598 }
   1599 
   1600 /*
   1601  * scsipi_done:
   1602  *
   1603  *	This routine is called by an adapter's interrupt handler when
   1604  *	an xfer is completed.
   1605  */
   1606 void
   1607 scsipi_done(struct scsipi_xfer *xs)
   1608 {
   1609 	struct scsipi_channel *chan;
   1610 	/*
   1611 	 * If there are more xfers on the channel's queue, attempt to
   1612 	 * run them.
   1613 	 */
   1614 	if ((chan = scsipi_done_internal(xs, true)) != NULL)
   1615 		scsipi_run_queue(chan);
   1616 }
   1617 
   1618 /*
   1619  * Just like scsipi_done(), but no recursion.  Useful if aborting the current
   1620  * transfer.
   1621  */
   1622 void
   1623 scsipi_done_once(struct scsipi_xfer *xs)
   1624 {
   1625 	(void)scsipi_done_internal(xs, false);
   1626 }
   1627 
   1628 static struct scsipi_channel*
   1629 scsipi_done_internal(struct scsipi_xfer *xs, bool more)
   1630 {
   1631 	struct scsipi_periph *periph = xs->xs_periph;
   1632 	struct scsipi_channel *chan = periph->periph_channel;
   1633 	int freezecnt;
   1634 
   1635 	SC_DEBUG(periph, SCSIPI_DB2, ("scsipi_done\n"));
   1636 #ifdef SCSIPI_DEBUG
   1637 	if (periph->periph_dbflags & SCSIPI_DB1)
   1638 		show_scsipi_cmd(xs);
   1639 #endif
   1640 
   1641 	mutex_enter(chan_mtx(chan));
   1642 	SDT_PROBE1(scsi, base, xfer, done,  xs);
   1643 	/*
   1644 	 * The resource this command was using is now free.
   1645 	 */
   1646 	if (xs->xs_status & XS_STS_DONE) {
   1647 		/* XXX in certain circumstances, such as a device
   1648 		 * being detached, a xs that has already been
   1649 		 * scsipi_done()'d by the main thread will be done'd
   1650 		 * again by scsibusdetach(). Putting the xs on the
   1651 		 * chan_complete queue causes list corruption and
   1652 		 * everyone dies. This prevents that, but perhaps
   1653 		 * there should be better coordination somewhere such
   1654 		 * that this won't ever happen (and can be turned into
   1655 		 * a KASSERT().
   1656 		 */
   1657 		SDT_PROBE1(scsi, base, xfer, redone,  xs);
   1658 		mutex_exit(chan_mtx(chan));
   1659 		goto out;
   1660 	}
   1661 	scsipi_put_resource(chan);
   1662 	xs->xs_periph->periph_sent--;
   1663 
   1664 	/*
   1665 	 * If the command was tagged, free the tag.
   1666 	 */
   1667 	if (XS_CTL_TAGTYPE(xs) != 0)
   1668 		scsipi_put_tag(xs);
   1669 	else
   1670 		periph->periph_flags &= ~PERIPH_UNTAG;
   1671 
   1672 	/* Mark the command as `done'. */
   1673 	xs->xs_status |= XS_STS_DONE;
   1674 
   1675 #ifdef DIAGNOSTIC
   1676 	if ((xs->xs_control & (XS_CTL_ASYNC|XS_CTL_POLL)) ==
   1677 	    (XS_CTL_ASYNC|XS_CTL_POLL))
   1678 		panic("scsipi_done: ASYNC and POLL");
   1679 #endif
   1680 
   1681 	/*
   1682 	 * If the xfer had an error of any sort, freeze the
   1683 	 * periph's queue.  Freeze it again if we were requested
   1684 	 * to do so in the xfer.
   1685 	 */
   1686 	freezecnt = 0;
   1687 	if (xs->error != XS_NOERROR)
   1688 		freezecnt++;
   1689 	if (xs->xs_control & XS_CTL_FREEZE_PERIPH)
   1690 		freezecnt++;
   1691 	if (freezecnt != 0)
   1692 		scsipi_periph_freeze_locked(periph, freezecnt);
   1693 
   1694 	/*
   1695 	 * record the xfer with a pending sense, in case a SCSI reset is
   1696 	 * received before the thread is waked up.
   1697 	 */
   1698 	if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
   1699 		periph->periph_flags |= PERIPH_SENSE;
   1700 		periph->periph_xscheck = xs;
   1701 	}
   1702 
   1703 	/*
   1704 	 * If this was an xfer that was not to complete asynchronously,
   1705 	 * let the requesting thread perform error checking/handling
   1706 	 * in its context.
   1707 	 */
   1708 	if ((xs->xs_control & XS_CTL_ASYNC) == 0) {
   1709 		/*
   1710 		 * If it's a polling job, just return, to unwind the
   1711 		 * call graph.  We don't need to restart the queue,
   1712 		 * because polling jobs are treated specially, and
   1713 		 * are really only used during crash dumps anyway
   1714 		 * (XXX or during boot-time autoconfiguration of
   1715 		 * ATAPI devices).
   1716 		 */
   1717 		if (xs->xs_control & XS_CTL_POLL) {
   1718 			mutex_exit(chan_mtx(chan));
   1719 			return NULL;
   1720 		}
   1721 		cv_broadcast(xs_cv(xs));
   1722 		mutex_exit(chan_mtx(chan));
   1723 		goto out;
   1724 	}
   1725 
   1726 	/*
   1727 	 * Catch the extremely common case of I/O completing
   1728 	 * without error; no use in taking a context switch
   1729 	 * if we can handle it in interrupt context.
   1730 	 */
   1731 	if (xs->error == XS_NOERROR && more == true) {
   1732 		mutex_exit(chan_mtx(chan));
   1733 		(void) scsipi_complete(xs);
   1734 		goto out;
   1735 	}
   1736 
   1737 	/*
   1738 	 * There is an error on this xfer.  Put it on the channel's
   1739 	 * completion queue, and wake up the completion thread.
   1740 	 */
   1741 	TAILQ_INSERT_TAIL(&chan->chan_complete, xs, channel_q);
   1742 	cv_broadcast(chan_cv_complete(chan));
   1743 	mutex_exit(chan_mtx(chan));
   1744 
   1745  out:
   1746 	return chan;
   1747 }
   1748 
   1749 /*
   1750  * scsipi_complete:
   1751  *
   1752  *	Completion of a scsipi_xfer.  This is the guts of scsipi_done().
   1753  *
   1754  *	NOTE: This routine MUST be called with valid thread context
   1755  *	except for the case where the following two conditions are
   1756  *	true:
   1757  *
   1758  *		xs->error == XS_NOERROR
   1759  *		XS_CTL_ASYNC is set in xs->xs_control
   1760  *
   1761  *	The semantics of this routine can be tricky, so here is an
   1762  *	explanation:
   1763  *
   1764  *		0		Xfer completed successfully.
   1765  *
   1766  *		ERESTART	Xfer had an error, but was restarted.
   1767  *
   1768  *		anything else	Xfer had an error, return value is Unix
   1769  *				errno.
   1770  *
   1771  *	If the return value is anything but ERESTART:
   1772  *
   1773  *		- If XS_CTL_ASYNC is set, `xs' has been freed back to
   1774  *		  the pool.
   1775  *		- If there is a buf associated with the xfer,
   1776  *		  it has been biodone()'d.
   1777  */
   1778 static int
   1779 scsipi_complete(struct scsipi_xfer *xs)
   1780 {
   1781 	struct scsipi_periph *periph = xs->xs_periph;
   1782 	struct scsipi_channel *chan = periph->periph_channel;
   1783 	int error;
   1784 
   1785 	SDT_PROBE1(scsi, base, xfer, complete,  xs);
   1786 
   1787 #ifdef DIAGNOSTIC
   1788 	if ((xs->xs_control & XS_CTL_ASYNC) != 0 && xs->bp == NULL)
   1789 		panic("scsipi_complete: XS_CTL_ASYNC but no buf");
   1790 #endif
   1791 	/*
   1792 	 * If command terminated with a CHECK CONDITION, we need to issue a
   1793 	 * REQUEST_SENSE command. Once the REQUEST_SENSE has been processed
   1794 	 * we'll have the real status.
   1795 	 * Must be processed with channel lock held to avoid missing
   1796 	 * a SCSI bus reset for this command.
   1797 	 */
   1798 	mutex_enter(chan_mtx(chan));
   1799 	if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
   1800 		/* request sense for a request sense ? */
   1801 		if (xs->xs_control & XS_CTL_REQSENSE) {
   1802 			scsipi_printaddr(periph);
   1803 			printf("request sense for a request sense ?\n");
   1804 			/* XXX maybe we should reset the device ? */
   1805 			/* we've been frozen because xs->error != XS_NOERROR */
   1806 			scsipi_periph_thaw_locked(periph, 1);
   1807 			mutex_exit(chan_mtx(chan));
   1808 			if (xs->resid < xs->datalen) {
   1809 				printf("we read %d bytes of sense anyway:\n",
   1810 				    xs->datalen - xs->resid);
   1811 				scsipi_print_sense_data((void *)xs->data, 0);
   1812 			}
   1813 			return EINVAL;
   1814 		}
   1815 		mutex_exit(chan_mtx(chan)); // XXX allows other commands to queue or run
   1816 		scsipi_request_sense(xs);
   1817 	} else
   1818 		mutex_exit(chan_mtx(chan));
   1819 
   1820 	/*
   1821 	 * If it's a user level request, bypass all usual completion
   1822 	 * processing, let the user work it out..
   1823 	 */
   1824 	if ((xs->xs_control & XS_CTL_USERCMD) != 0) {
   1825 		SC_DEBUG(periph, SCSIPI_DB3, ("calling user done()\n"));
   1826 		mutex_enter(chan_mtx(chan));
   1827 		if (xs->error != XS_NOERROR)
   1828 			scsipi_periph_thaw_locked(periph, 1);
   1829 		mutex_exit(chan_mtx(chan));
   1830 		scsipi_user_done(xs);
   1831 		SC_DEBUG(periph, SCSIPI_DB3, ("returned from user done()\n "));
   1832 		return 0;
   1833 	}
   1834 
   1835 	switch (xs->error) {
   1836 	case XS_NOERROR:
   1837 		error = 0;
   1838 		break;
   1839 
   1840 	case XS_SENSE:
   1841 	case XS_SHORTSENSE:
   1842 		error = (*chan->chan_bustype->bustype_interpret_sense)(xs);
   1843 		break;
   1844 
   1845 	case XS_RESOURCE_SHORTAGE:
   1846 		/*
   1847 		 * XXX Should freeze channel's queue.
   1848 		 */
   1849 		scsipi_printaddr(periph);
   1850 		printf("adapter resource shortage\n");
   1851 		/* FALLTHROUGH */
   1852 
   1853 	case XS_BUSY:
   1854 		if (xs->error == XS_BUSY && xs->status == SCSI_QUEUE_FULL) {
   1855 			struct scsipi_max_openings mo;
   1856 
   1857 			/*
   1858 			 * We set the openings to active - 1, assuming that
   1859 			 * the command that got us here is the first one that
   1860 			 * can't fit into the device's queue.  If that's not
   1861 			 * the case, I guess we'll find out soon enough.
   1862 			 */
   1863 			mo.mo_target = periph->periph_target;
   1864 			mo.mo_lun = periph->periph_lun;
   1865 			if (periph->periph_active < periph->periph_openings)
   1866 				mo.mo_openings = periph->periph_active - 1;
   1867 			else
   1868 				mo.mo_openings = periph->periph_openings - 1;
   1869 #ifdef DIAGNOSTIC
   1870 			if (mo.mo_openings < 0) {
   1871 				scsipi_printaddr(periph);
   1872 				printf("QUEUE FULL resulted in < 0 openings\n");
   1873 				panic("scsipi_done");
   1874 			}
   1875 #endif
   1876 			if (mo.mo_openings == 0) {
   1877 				scsipi_printaddr(periph);
   1878 				printf("QUEUE FULL resulted in 0 openings\n");
   1879 				mo.mo_openings = 1;
   1880 			}
   1881 			scsipi_async_event(chan, ASYNC_EVENT_MAX_OPENINGS, &mo);
   1882 			error = ERESTART;
   1883 		} else if (xs->xs_retries != 0) {
   1884 			xs->xs_retries--;
   1885 			/*
   1886 			 * Wait one second, and try again.
   1887 			 */
   1888 			mutex_enter(chan_mtx(chan));
   1889 			if ((xs->xs_control & XS_CTL_POLL) ||
   1890 			    (chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
   1891 				/* XXX: quite extreme */
   1892 				kpause("xsbusy", false, hz, chan_mtx(chan));
   1893 			} else if (!callout_pending(&periph->periph_callout)) {
   1894 				scsipi_periph_freeze_locked(periph, 1);
   1895 				callout_reset(&periph->periph_callout,
   1896 				    hz, scsipi_periph_timed_thaw, periph);
   1897 			}
   1898 			mutex_exit(chan_mtx(chan));
   1899 			error = ERESTART;
   1900 		} else
   1901 			error = EBUSY;
   1902 		break;
   1903 
   1904 	case XS_REQUEUE:
   1905 		error = ERESTART;
   1906 		break;
   1907 
   1908 	case XS_SELTIMEOUT:
   1909 	case XS_TIMEOUT:
   1910 		/*
   1911 		 * If the device hasn't gone away, honor retry counts.
   1912 		 *
   1913 		 * Note that if we're in the middle of probing it,
   1914 		 * it won't be found because it isn't here yet so
   1915 		 * we won't honor the retry count in that case.
   1916 		 */
   1917 		if (scsipi_lookup_periph(chan, periph->periph_target,
   1918 		    periph->periph_lun) && xs->xs_retries != 0) {
   1919 			xs->xs_retries--;
   1920 			error = ERESTART;
   1921 		} else
   1922 			error = EIO;
   1923 		break;
   1924 
   1925 	case XS_RESET:
   1926 		if (xs->xs_control & XS_CTL_REQSENSE) {
   1927 			/*
   1928 			 * request sense interrupted by reset: signal it
   1929 			 * with EINTR return code.
   1930 			 */
   1931 			error = EINTR;
   1932 		} else {
   1933 			if (xs->xs_retries != 0) {
   1934 				xs->xs_retries--;
   1935 				error = ERESTART;
   1936 			} else
   1937 				error = EIO;
   1938 		}
   1939 		break;
   1940 
   1941 	case XS_DRIVER_STUFFUP:
   1942 		scsipi_printaddr(periph);
   1943 		printf("generic HBA error\n");
   1944 		error = EIO;
   1945 		break;
   1946 	default:
   1947 		scsipi_printaddr(periph);
   1948 		printf("invalid return code from adapter: %d\n", xs->error);
   1949 		error = EIO;
   1950 		break;
   1951 	}
   1952 
   1953 	mutex_enter(chan_mtx(chan));
   1954 	if (error == ERESTART) {
   1955 		SDT_PROBE1(scsi, base, xfer, restart,  xs);
   1956 		/*
   1957 		 * If we get here, the periph has been thawed and frozen
   1958 		 * again if we had to issue recovery commands.  Alternatively,
   1959 		 * it may have been frozen again and in a timed thaw.  In
   1960 		 * any case, we thaw the periph once we re-enqueue the
   1961 		 * command.  Once the periph is fully thawed, it will begin
   1962 		 * operation again.
   1963 		 */
   1964 		xs->error = XS_NOERROR;
   1965 		xs->status = SCSI_OK;
   1966 		xs->xs_status &= ~XS_STS_DONE;
   1967 		xs->xs_requeuecnt++;
   1968 		error = scsipi_enqueue(xs);
   1969 		if (error == 0) {
   1970 			scsipi_periph_thaw_locked(periph, 1);
   1971 			mutex_exit(chan_mtx(chan));
   1972 			return ERESTART;
   1973 		}
   1974 	}
   1975 
   1976 	/*
   1977 	 * scsipi_done() freezes the queue if not XS_NOERROR.
   1978 	 * Thaw it here.
   1979 	 */
   1980 	if (xs->error != XS_NOERROR)
   1981 		scsipi_periph_thaw_locked(periph, 1);
   1982 	mutex_exit(chan_mtx(chan));
   1983 
   1984 	if (periph->periph_switch->psw_done)
   1985 		periph->periph_switch->psw_done(xs, error);
   1986 
   1987 	mutex_enter(chan_mtx(chan));
   1988 	if (xs->xs_control & XS_CTL_ASYNC)
   1989 		scsipi_put_xs(xs);
   1990 	mutex_exit(chan_mtx(chan));
   1991 
   1992 	return error;
   1993 }
   1994 
   1995 /*
   1996  * Issue a request sense for the given scsipi_xfer. Called when the xfer
   1997  * returns with a CHECK_CONDITION status. Must be called in valid thread
   1998  * context.
   1999  */
   2000 
   2001 static void
   2002 scsipi_request_sense(struct scsipi_xfer *xs)
   2003 {
   2004 	struct scsipi_periph *periph = xs->xs_periph;
   2005 	int flags, error;
   2006 	struct scsi_request_sense cmd;
   2007 
   2008 	periph->periph_flags |= PERIPH_SENSE;
   2009 
   2010 	/* if command was polling, request sense will too */
   2011 	flags = xs->xs_control & XS_CTL_POLL;
   2012 	/* Polling commands can't sleep */
   2013 	if (flags)
   2014 		flags |= XS_CTL_NOSLEEP;
   2015 
   2016 	flags |= XS_CTL_REQSENSE | XS_CTL_URGENT | XS_CTL_DATA_IN |
   2017 	    XS_CTL_THAW_PERIPH | XS_CTL_FREEZE_PERIPH;
   2018 
   2019 	memset(&cmd, 0, sizeof(cmd));
   2020 	cmd.opcode = SCSI_REQUEST_SENSE;
   2021 	cmd.length = sizeof(struct scsi_sense_data);
   2022 
   2023 	error = scsipi_command(periph, (void *)&cmd, sizeof(cmd),
   2024 	    (void *)&xs->sense.scsi_sense, sizeof(struct scsi_sense_data),
   2025 	    0, 1000, NULL, flags);
   2026 	periph->periph_flags &= ~PERIPH_SENSE;
   2027 	periph->periph_xscheck = NULL;
   2028 	switch (error) {
   2029 	case 0:
   2030 		/* we have a valid sense */
   2031 		xs->error = XS_SENSE;
   2032 		return;
   2033 	case EINTR:
   2034 		/* REQUEST_SENSE interrupted by bus reset. */
   2035 		xs->error = XS_RESET;
   2036 		return;
   2037 	case EIO:
   2038 		 /* request sense couldn't be performed */
   2039 		/*
   2040 		 * XXX this isn't quite right but we don't have anything
   2041 		 * better for now
   2042 		 */
   2043 		xs->error = XS_DRIVER_STUFFUP;
   2044 		return;
   2045 	default:
   2046 		 /* Notify that request sense failed. */
   2047 		xs->error = XS_DRIVER_STUFFUP;
   2048 		scsipi_printaddr(periph);
   2049 		printf("request sense failed with error %d\n", error);
   2050 		return;
   2051 	}
   2052 }
   2053 
   2054 /*
   2055  * scsipi_enqueue:
   2056  *
   2057  *	Enqueue an xfer on a channel.
   2058  */
   2059 static int
   2060 scsipi_enqueue(struct scsipi_xfer *xs)
   2061 {
   2062 	struct scsipi_channel *chan = xs->xs_periph->periph_channel;
   2063 	struct scsipi_xfer *qxs;
   2064 
   2065 	SDT_PROBE1(scsi, base, xfer, enqueue,  xs);
   2066 
   2067 	/*
   2068 	 * If the xfer is to be polled, and there are already jobs on
   2069 	 * the queue, we can't proceed.
   2070 	 */
   2071 	KASSERT(mutex_owned(chan_mtx(chan)));
   2072 	if ((xs->xs_control & XS_CTL_POLL) != 0 &&
   2073 	    TAILQ_FIRST(&chan->chan_queue) != NULL) {
   2074 		xs->error = XS_DRIVER_STUFFUP;
   2075 		return EAGAIN;
   2076 	}
   2077 
   2078 	/*
   2079 	 * If we have an URGENT xfer, it's an error recovery command
   2080 	 * and it should just go on the head of the channel's queue.
   2081 	 */
   2082 	if (xs->xs_control & XS_CTL_URGENT) {
   2083 		TAILQ_INSERT_HEAD(&chan->chan_queue, xs, channel_q);
   2084 		goto out;
   2085 	}
   2086 
   2087 	/*
   2088 	 * If this xfer has already been on the queue before, we
   2089 	 * need to reinsert it in the correct order.  That order is:
   2090 	 *
   2091 	 *	Immediately before the first xfer for this periph
   2092 	 *	with a requeuecnt less than xs->xs_requeuecnt.
   2093 	 *
   2094 	 * Failing that, at the end of the queue.  (We'll end up
   2095 	 * there naturally.)
   2096 	 */
   2097 	if (xs->xs_requeuecnt != 0) {
   2098 		for (qxs = TAILQ_FIRST(&chan->chan_queue); qxs != NULL;
   2099 		     qxs = TAILQ_NEXT(qxs, channel_q)) {
   2100 			if (qxs->xs_periph == xs->xs_periph &&
   2101 			    qxs->xs_requeuecnt < xs->xs_requeuecnt)
   2102 				break;
   2103 		}
   2104 		if (qxs != NULL) {
   2105 			TAILQ_INSERT_AFTER(&chan->chan_queue, qxs, xs,
   2106 			    channel_q);
   2107 			goto out;
   2108 		}
   2109 	}
   2110 	TAILQ_INSERT_TAIL(&chan->chan_queue, xs, channel_q);
   2111  out:
   2112 	if (xs->xs_control & XS_CTL_THAW_PERIPH)
   2113 		scsipi_periph_thaw_locked(xs->xs_periph, 1);
   2114 	return 0;
   2115 }
   2116 
   2117 /*
   2118  * scsipi_run_queue:
   2119  *
   2120  *	Start as many xfers as possible running on the channel.
   2121  */
   2122 static void
   2123 scsipi_run_queue(struct scsipi_channel *chan)
   2124 {
   2125 	struct scsipi_xfer *xs;
   2126 	struct scsipi_periph *periph;
   2127 
   2128 	SDT_PROBE1(scsi, base, queue, batch__start,  chan);
   2129 	for (;;) {
   2130 		mutex_enter(chan_mtx(chan));
   2131 
   2132 		/*
   2133 		 * If the channel is frozen, we can't do any work right
   2134 		 * now.
   2135 		 */
   2136 		if (chan->chan_qfreeze != 0) {
   2137 			mutex_exit(chan_mtx(chan));
   2138 			break;
   2139 		}
   2140 
   2141 		/*
   2142 		 * Look for work to do, and make sure we can do it.
   2143 		 */
   2144 		for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL;
   2145 		     xs = TAILQ_NEXT(xs, channel_q)) {
   2146 			periph = xs->xs_periph;
   2147 
   2148 			if ((periph->periph_sent >= periph->periph_openings) ||
   2149 			    periph->periph_qfreeze != 0 ||
   2150 			    (periph->periph_flags & PERIPH_UNTAG) != 0)
   2151 				continue;
   2152 
   2153 			if ((periph->periph_flags &
   2154 			    (PERIPH_RECOVERING | PERIPH_SENSE)) != 0 &&
   2155 			    (xs->xs_control & XS_CTL_URGENT) == 0)
   2156 				continue;
   2157 
   2158 			/*
   2159 			 * We can issue this xfer!
   2160 			 */
   2161 			goto got_one;
   2162 		}
   2163 
   2164 		/*
   2165 		 * Can't find any work to do right now.
   2166 		 */
   2167 		mutex_exit(chan_mtx(chan));
   2168 		break;
   2169 
   2170  got_one:
   2171 		/*
   2172 		 * Have an xfer to run.  Allocate a resource from
   2173 		 * the adapter to run it.  If we can't allocate that
   2174 		 * resource, we don't dequeue the xfer.
   2175 		 */
   2176 		if (scsipi_get_resource(chan) == 0) {
   2177 			/*
   2178 			 * Adapter is out of resources.  If the adapter
   2179 			 * supports it, attempt to grow them.
   2180 			 */
   2181 			if (scsipi_grow_resources(chan) == 0) {
   2182 				/*
   2183 				 * Wasn't able to grow resources,
   2184 				 * nothing more we can do.
   2185 				 */
   2186 				if (xs->xs_control & XS_CTL_POLL) {
   2187 					scsipi_printaddr(xs->xs_periph);
   2188 					printf("polling command but no "
   2189 					    "adapter resources");
   2190 					/* We'll panic shortly... */
   2191 				}
   2192 				mutex_exit(chan_mtx(chan));
   2193 
   2194 				/*
   2195 				 * XXX: We should be able to note that
   2196 				 * XXX: that resources are needed here!
   2197 				 */
   2198 				break;
   2199 			}
   2200 			/*
   2201 			 * scsipi_grow_resources() allocated the resource
   2202 			 * for us.
   2203 			 */
   2204 		}
   2205 
   2206 		/*
   2207 		 * We have a resource to run this xfer, do it!
   2208 		 */
   2209 		TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
   2210 
   2211 		/*
   2212 		 * If the command is to be tagged, allocate a tag ID
   2213 		 * for it.
   2214 		 */
   2215 		if (XS_CTL_TAGTYPE(xs) != 0)
   2216 			scsipi_get_tag(xs);
   2217 		else
   2218 			periph->periph_flags |= PERIPH_UNTAG;
   2219 		periph->periph_sent++;
   2220 		mutex_exit(chan_mtx(chan));
   2221 
   2222 		SDT_PROBE2(scsi, base, queue, run,  chan, xs);
   2223 		scsipi_adapter_request(chan, ADAPTER_REQ_RUN_XFER, xs);
   2224 	}
   2225 	SDT_PROBE1(scsi, base, queue, batch__done,  chan);
   2226 }
   2227 
   2228 /*
   2229  * scsipi_execute_xs:
   2230  *
   2231  *	Begin execution of an xfer, waiting for it to complete, if necessary.
   2232  */
   2233 int
   2234 scsipi_execute_xs(struct scsipi_xfer *xs)
   2235 {
   2236 	struct scsipi_periph *periph = xs->xs_periph;
   2237 	struct scsipi_channel *chan = periph->periph_channel;
   2238 	int oasync, async, poll, error;
   2239 
   2240 	KASSERT(!cold);
   2241 
   2242 	scsipi_update_timeouts(xs);
   2243 
   2244 	(chan->chan_bustype->bustype_cmd)(xs);
   2245 
   2246 	xs->xs_status &= ~XS_STS_DONE;
   2247 	xs->error = XS_NOERROR;
   2248 	xs->resid = xs->datalen;
   2249 	xs->status = SCSI_OK;
   2250 	SDT_PROBE1(scsi, base, xfer, execute,  xs);
   2251 
   2252 #ifdef SCSIPI_DEBUG
   2253 	if (xs->xs_periph->periph_dbflags & SCSIPI_DB3) {
   2254 		printf("scsipi_execute_xs: ");
   2255 		show_scsipi_xs(xs);
   2256 		printf("\n");
   2257 	}
   2258 #endif
   2259 
   2260 	/*
   2261 	 * Deal with command tagging:
   2262 	 *
   2263 	 *	- If the device's current operating mode doesn't
   2264 	 *	  include tagged queueing, clear the tag mask.
   2265 	 *
   2266 	 *	- If the device's current operating mode *does*
   2267 	 *	  include tagged queueing, set the tag_type in
   2268 	 *	  the xfer to the appropriate byte for the tag
   2269 	 *	  message.
   2270 	 */
   2271 	if ((PERIPH_XFER_MODE(periph) & PERIPH_CAP_TQING) == 0 ||
   2272 		(xs->xs_control & XS_CTL_REQSENSE)) {
   2273 		xs->xs_control &= ~XS_CTL_TAGMASK;
   2274 		xs->xs_tag_type = 0;
   2275 	} else {
   2276 		/*
   2277 		 * If the request doesn't specify a tag, give Head
   2278 		 * tags to URGENT operations and Simple tags to
   2279 		 * everything else.
   2280 		 */
   2281 		if (XS_CTL_TAGTYPE(xs) == 0) {
   2282 			if (xs->xs_control & XS_CTL_URGENT)
   2283 				xs->xs_control |= XS_CTL_HEAD_TAG;
   2284 			else
   2285 				xs->xs_control |= XS_CTL_SIMPLE_TAG;
   2286 		}
   2287 
   2288 		switch (XS_CTL_TAGTYPE(xs)) {
   2289 		case XS_CTL_ORDERED_TAG:
   2290 			xs->xs_tag_type = MSG_ORDERED_Q_TAG;
   2291 			break;
   2292 
   2293 		case XS_CTL_SIMPLE_TAG:
   2294 			xs->xs_tag_type = MSG_SIMPLE_Q_TAG;
   2295 			break;
   2296 
   2297 		case XS_CTL_HEAD_TAG:
   2298 			xs->xs_tag_type = MSG_HEAD_OF_Q_TAG;
   2299 			break;
   2300 
   2301 		default:
   2302 			scsipi_printaddr(periph);
   2303 			printf("invalid tag mask 0x%08x\n",
   2304 			    XS_CTL_TAGTYPE(xs));
   2305 			panic("scsipi_execute_xs");
   2306 		}
   2307 	}
   2308 
   2309 	/* If the adapter wants us to poll, poll. */
   2310 	if (chan->chan_adapter->adapt_flags & SCSIPI_ADAPT_POLL_ONLY)
   2311 		xs->xs_control |= XS_CTL_POLL;
   2312 
   2313 	/*
   2314 	 * If we don't yet have a completion thread, or we are to poll for
   2315 	 * completion, clear the ASYNC flag.
   2316 	 */
   2317 	oasync =  (xs->xs_control & XS_CTL_ASYNC);
   2318 	if (chan->chan_thread == NULL || (xs->xs_control & XS_CTL_POLL) != 0)
   2319 		xs->xs_control &= ~XS_CTL_ASYNC;
   2320 
   2321 	async = (xs->xs_control & XS_CTL_ASYNC);
   2322 	poll = (xs->xs_control & XS_CTL_POLL);
   2323 
   2324 #ifdef DIAGNOSTIC
   2325 	if (oasync != 0 && xs->bp == NULL)
   2326 		panic("scsipi_execute_xs: XS_CTL_ASYNC but no buf");
   2327 #endif
   2328 
   2329 	/*
   2330 	 * Enqueue the transfer.  If we're not polling for completion, this
   2331 	 * should ALWAYS return `no error'.
   2332 	 */
   2333 	error = scsipi_enqueue(xs);
   2334 	if (error) {
   2335 		if (poll == 0) {
   2336 			scsipi_printaddr(periph);
   2337 			printf("not polling, but enqueue failed with %d\n",
   2338 			    error);
   2339 			panic("scsipi_execute_xs");
   2340 		}
   2341 
   2342 		scsipi_printaddr(periph);
   2343 		printf("should have flushed queue?\n");
   2344 		goto free_xs;
   2345 	}
   2346 
   2347 	mutex_exit(chan_mtx(chan));
   2348  restarted:
   2349 	scsipi_run_queue(chan);
   2350 	mutex_enter(chan_mtx(chan));
   2351 
   2352 	/*
   2353 	 * The xfer is enqueued, and possibly running.  If it's to be
   2354 	 * completed asynchronously, just return now.
   2355 	 */
   2356 	if (async)
   2357 		return 0;
   2358 
   2359 	/*
   2360 	 * Not an asynchronous command; wait for it to complete.
   2361 	 */
   2362 	while ((xs->xs_status & XS_STS_DONE) == 0) {
   2363 		if (poll) {
   2364 			scsipi_printaddr(periph);
   2365 			printf("polling command not done\n");
   2366 			panic("scsipi_execute_xs");
   2367 		}
   2368 		cv_wait(xs_cv(xs), chan_mtx(chan));
   2369 	}
   2370 
   2371 	/*
   2372 	 * Command is complete.  scsipi_done() has awakened us to perform
   2373 	 * the error handling.
   2374 	 */
   2375 	mutex_exit(chan_mtx(chan));
   2376 	error = scsipi_complete(xs);
   2377 	if (error == ERESTART)
   2378 		goto restarted;
   2379 
   2380 	/*
   2381 	 * If it was meant to run async and we cleared async ourselves,
   2382 	 * don't return an error here. It has already been handled
   2383 	 */
   2384 	if (oasync)
   2385 		error = 0;
   2386 	/*
   2387 	 * Command completed successfully or fatal error occurred.  Fall
   2388 	 * into....
   2389 	 */
   2390 	mutex_enter(chan_mtx(chan));
   2391  free_xs:
   2392 	scsipi_put_xs(xs);
   2393 	mutex_exit(chan_mtx(chan));
   2394 
   2395 	/*
   2396 	 * Kick the queue, keep it running in case it stopped for some
   2397 	 * reason.
   2398 	 */
   2399 	scsipi_run_queue(chan);
   2400 
   2401 	mutex_enter(chan_mtx(chan));
   2402 	return error;
   2403 }
   2404 
   2405 /*
   2406  * scsipi_completion_thread:
   2407  *
   2408  *	This is the completion thread.  We wait for errors on
   2409  *	asynchronous xfers, and perform the error handling
   2410  *	function, restarting the command, if necessary.
   2411  */
   2412 static void
   2413 scsipi_completion_thread(void *arg)
   2414 {
   2415 	struct scsipi_channel *chan = arg;
   2416 	struct scsipi_xfer *xs;
   2417 
   2418 	if (chan->chan_init_cb)
   2419 		(*chan->chan_init_cb)(chan, chan->chan_init_cb_arg);
   2420 
   2421 	mutex_enter(chan_mtx(chan));
   2422 	chan->chan_flags |= SCSIPI_CHAN_TACTIVE;
   2423 	for (;;) {
   2424 		xs = TAILQ_FIRST(&chan->chan_complete);
   2425 		if (xs == NULL && chan->chan_tflags == 0) {
   2426 			/* nothing to do; wait */
   2427 			cv_wait(chan_cv_complete(chan), chan_mtx(chan));
   2428 			continue;
   2429 		}
   2430 		if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) {
   2431 			/* call chan_callback from thread context */
   2432 			chan->chan_tflags &= ~SCSIPI_CHANT_CALLBACK;
   2433 			chan->chan_callback(chan, chan->chan_callback_arg);
   2434 			continue;
   2435 		}
   2436 		if (chan->chan_tflags & SCSIPI_CHANT_GROWRES) {
   2437 			/* attempt to get more openings for this channel */
   2438 			chan->chan_tflags &= ~SCSIPI_CHANT_GROWRES;
   2439 			mutex_exit(chan_mtx(chan));
   2440 			scsipi_adapter_request(chan,
   2441 			    ADAPTER_REQ_GROW_RESOURCES, NULL);
   2442 			scsipi_channel_thaw(chan, 1);
   2443 			if (chan->chan_tflags & SCSIPI_CHANT_GROWRES)
   2444 				kpause("scsizzz", FALSE, hz/10, NULL);
   2445 			mutex_enter(chan_mtx(chan));
   2446 			continue;
   2447 		}
   2448 		if (chan->chan_tflags & SCSIPI_CHANT_KICK) {
   2449 			/* explicitly run the queues for this channel */
   2450 			chan->chan_tflags &= ~SCSIPI_CHANT_KICK;
   2451 			mutex_exit(chan_mtx(chan));
   2452 			scsipi_run_queue(chan);
   2453 			mutex_enter(chan_mtx(chan));
   2454 			continue;
   2455 		}
   2456 		if (chan->chan_tflags & SCSIPI_CHANT_SHUTDOWN) {
   2457 			break;
   2458 		}
   2459 		if (xs) {
   2460 			TAILQ_REMOVE(&chan->chan_complete, xs, channel_q);
   2461 			mutex_exit(chan_mtx(chan));
   2462 
   2463 			/*
   2464 			 * Have an xfer with an error; process it.
   2465 			 */
   2466 			(void) scsipi_complete(xs);
   2467 
   2468 			/*
   2469 			 * Kick the queue; keep it running if it was stopped
   2470 			 * for some reason.
   2471 			 */
   2472 			scsipi_run_queue(chan);
   2473 			mutex_enter(chan_mtx(chan));
   2474 		}
   2475 	}
   2476 
   2477 	chan->chan_thread = NULL;
   2478 
   2479 	/* In case parent is waiting for us to exit. */
   2480 	cv_broadcast(chan_cv_thread(chan));
   2481 	mutex_exit(chan_mtx(chan));
   2482 
   2483 	kthread_exit(0);
   2484 }
   2485 /*
   2486  * scsipi_thread_call_callback:
   2487  *
   2488  * 	request to call a callback from the completion thread
   2489  */
   2490 int
   2491 scsipi_thread_call_callback(struct scsipi_channel *chan,
   2492     void (*callback)(struct scsipi_channel *, void *), void *arg)
   2493 {
   2494 
   2495 	mutex_enter(chan_mtx(chan));
   2496 	if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
   2497 		/* kernel thread doesn't exist yet */
   2498 		mutex_exit(chan_mtx(chan));
   2499 		return ESRCH;
   2500 	}
   2501 	if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) {
   2502 		mutex_exit(chan_mtx(chan));
   2503 		return EBUSY;
   2504 	}
   2505 	scsipi_channel_freeze(chan, 1);
   2506 	chan->chan_callback = callback;
   2507 	chan->chan_callback_arg = arg;
   2508 	chan->chan_tflags |= SCSIPI_CHANT_CALLBACK;
   2509 	cv_broadcast(chan_cv_complete(chan));
   2510 	mutex_exit(chan_mtx(chan));
   2511 	return 0;
   2512 }
   2513 
   2514 /*
   2515  * scsipi_async_event:
   2516  *
   2517  *	Handle an asynchronous event from an adapter.
   2518  */
   2519 void
   2520 scsipi_async_event(struct scsipi_channel *chan, scsipi_async_event_t event,
   2521     void *arg)
   2522 {
   2523 	bool lock = chan_running(chan) > 0;
   2524 
   2525 	if (lock)
   2526 		mutex_enter(chan_mtx(chan));
   2527 	switch (event) {
   2528 	case ASYNC_EVENT_MAX_OPENINGS:
   2529 		scsipi_async_event_max_openings(chan,
   2530 		    (struct scsipi_max_openings *)arg);
   2531 		break;
   2532 
   2533 	case ASYNC_EVENT_XFER_MODE:
   2534 		if (chan->chan_bustype->bustype_async_event_xfer_mode) {
   2535 			chan->chan_bustype->bustype_async_event_xfer_mode(
   2536 			    chan, arg);
   2537 		}
   2538 		break;
   2539 	case ASYNC_EVENT_RESET:
   2540 		scsipi_async_event_channel_reset(chan);
   2541 		break;
   2542 	}
   2543 	if (lock)
   2544 		mutex_exit(chan_mtx(chan));
   2545 }
   2546 
   2547 /*
   2548  * scsipi_async_event_max_openings:
   2549  *
   2550  *	Update the maximum number of outstanding commands a
   2551  *	device may have.
   2552  */
   2553 static void
   2554 scsipi_async_event_max_openings(struct scsipi_channel *chan,
   2555     struct scsipi_max_openings *mo)
   2556 {
   2557 	struct scsipi_periph *periph;
   2558 	int minlun, maxlun;
   2559 
   2560 	if (mo->mo_lun == -1) {
   2561 		/*
   2562 		 * Wildcarded; apply it to all LUNs.
   2563 		 */
   2564 		minlun = 0;
   2565 		maxlun = chan->chan_nluns - 1;
   2566 	} else
   2567 		minlun = maxlun = mo->mo_lun;
   2568 
   2569 	/* XXX This could really suck with a large LUN space. */
   2570 	for (; minlun <= maxlun; minlun++) {
   2571 		periph = scsipi_lookup_periph_locked(chan, mo->mo_target, minlun);
   2572 		if (periph == NULL)
   2573 			continue;
   2574 
   2575 		if (mo->mo_openings < periph->periph_openings)
   2576 			periph->periph_openings = mo->mo_openings;
   2577 		else if (mo->mo_openings > periph->periph_openings &&
   2578 		    (periph->periph_flags & PERIPH_GROW_OPENINGS) != 0)
   2579 			periph->periph_openings = mo->mo_openings;
   2580 	}
   2581 }
   2582 
   2583 /*
   2584  * scsipi_set_xfer_mode:
   2585  *
   2586  *	Set the xfer mode for the specified I_T Nexus.
   2587  */
   2588 void
   2589 scsipi_set_xfer_mode(struct scsipi_channel *chan, int target, int immed)
   2590 {
   2591 	struct scsipi_xfer_mode xm;
   2592 	struct scsipi_periph *itperiph;
   2593 	int lun;
   2594 
   2595 	/*
   2596 	 * Go to the minimal xfer mode.
   2597 	 */
   2598 	xm.xm_target = target;
   2599 	xm.xm_mode = 0;
   2600 	xm.xm_period = 0;			/* ignored */
   2601 	xm.xm_offset = 0;			/* ignored */
   2602 
   2603 	/*
   2604 	 * Find the first LUN we know about on this I_T Nexus.
   2605 	 */
   2606 	for (itperiph = NULL, lun = 0; lun < chan->chan_nluns; lun++) {
   2607 		itperiph = scsipi_lookup_periph(chan, target, lun);
   2608 		if (itperiph != NULL)
   2609 			break;
   2610 	}
   2611 	if (itperiph != NULL) {
   2612 		xm.xm_mode = itperiph->periph_cap;
   2613 		/*
   2614 		 * Now issue the request to the adapter.
   2615 		 */
   2616 		scsipi_adapter_request(chan, ADAPTER_REQ_SET_XFER_MODE, &xm);
   2617 		/*
   2618 		 * If we want this to happen immediately, issue a dummy
   2619 		 * command, since most adapters can't really negotiate unless
   2620 		 * they're executing a job.
   2621 		 */
   2622 		if (immed != 0) {
   2623 			(void) scsipi_test_unit_ready(itperiph,
   2624 			    XS_CTL_DISCOVERY | XS_CTL_IGNORE_ILLEGAL_REQUEST |
   2625 			    XS_CTL_IGNORE_NOT_READY |
   2626 			    XS_CTL_IGNORE_MEDIA_CHANGE);
   2627 		}
   2628 	}
   2629 }
   2630 
   2631 /*
   2632  * scsipi_channel_reset:
   2633  *
   2634  *	handle scsi bus reset
   2635  * called with channel lock held
   2636  */
   2637 static void
   2638 scsipi_async_event_channel_reset(struct scsipi_channel *chan)
   2639 {
   2640 	struct scsipi_xfer *xs, *xs_next;
   2641 	struct scsipi_periph *periph;
   2642 	int target, lun;
   2643 
   2644 	/*
   2645 	 * Channel has been reset. Also mark as reset pending REQUEST_SENSE
   2646 	 * commands; as the sense is not available any more.
   2647 	 * can't call scsipi_done() from here, as the command has not been
   2648 	 * sent to the adapter yet (this would corrupt accounting).
   2649 	 */
   2650 
   2651 	for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL; xs = xs_next) {
   2652 		xs_next = TAILQ_NEXT(xs, channel_q);
   2653 		if (xs->xs_control & XS_CTL_REQSENSE) {
   2654 			TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
   2655 			xs->error = XS_RESET;
   2656 			if ((xs->xs_control & XS_CTL_ASYNC) != 0)
   2657 				TAILQ_INSERT_TAIL(&chan->chan_complete, xs,
   2658 				    channel_q);
   2659 		}
   2660 	}
   2661 	cv_broadcast(chan_cv_complete(chan));
   2662 	/* Catch xs with pending sense which may not have a REQSENSE xs yet */
   2663 	for (target = 0; target < chan->chan_ntargets; target++) {
   2664 		if (target == chan->chan_id)
   2665 			continue;
   2666 		for (lun = 0; lun <  chan->chan_nluns; lun++) {
   2667 			periph = scsipi_lookup_periph_locked(chan, target, lun);
   2668 			if (periph) {
   2669 				xs = periph->periph_xscheck;
   2670 				if (xs)
   2671 					xs->error = XS_RESET;
   2672 			}
   2673 		}
   2674 	}
   2675 }
   2676 
   2677 /*
   2678  * scsipi_target_detach:
   2679  *
   2680  *	detach all periph associated with a I_T
   2681  * 	must be called from valid thread context
   2682  */
   2683 int
   2684 scsipi_target_detach(struct scsipi_channel *chan, int target, int lun,
   2685     int flags)
   2686 {
   2687 	struct scsipi_periph *periph;
   2688 	device_t tdev;
   2689 	int ctarget, mintarget, maxtarget;
   2690 	int clun, minlun, maxlun;
   2691 	int error = 0;
   2692 
   2693 	if (target == -1) {
   2694 		mintarget = 0;
   2695 		maxtarget = chan->chan_ntargets;
   2696 	} else {
   2697 		if (target == chan->chan_id)
   2698 			return EINVAL;
   2699 		if (target < 0 || target >= chan->chan_ntargets)
   2700 			return EINVAL;
   2701 		mintarget = target;
   2702 		maxtarget = target + 1;
   2703 	}
   2704 
   2705 	if (lun == -1) {
   2706 		minlun = 0;
   2707 		maxlun = chan->chan_nluns;
   2708 	} else {
   2709 		if (lun < 0 || lun >= chan->chan_nluns)
   2710 			return EINVAL;
   2711 		minlun = lun;
   2712 		maxlun = lun + 1;
   2713 	}
   2714 
   2715 	/* for config_detach */
   2716 	KERNEL_LOCK(1, curlwp);
   2717 
   2718 	mutex_enter(chan_mtx(chan));
   2719 	for (ctarget = mintarget; ctarget < maxtarget; ctarget++) {
   2720 		if (ctarget == chan->chan_id)
   2721 			continue;
   2722 
   2723 		for (clun = minlun; clun < maxlun; clun++) {
   2724 			periph = scsipi_lookup_periph_locked(chan, ctarget, clun);
   2725 			if (periph == NULL)
   2726 				continue;
   2727 			tdev = periph->periph_dev;
   2728 			mutex_exit(chan_mtx(chan));
   2729 			error = config_detach(tdev, flags);
   2730 			if (error)
   2731 				goto out;
   2732 			mutex_enter(chan_mtx(chan));
   2733 			KASSERT(scsipi_lookup_periph_locked(chan, ctarget, clun) == NULL);
   2734 		}
   2735 	}
   2736 	mutex_exit(chan_mtx(chan));
   2737 
   2738 out:
   2739 	KERNEL_UNLOCK_ONE(curlwp);
   2740 
   2741 	return error;
   2742 }
   2743 
   2744 /*
   2745  * scsipi_adapter_addref:
   2746  *
   2747  *	Add a reference to the adapter pointed to by the provided
   2748  *	link, enabling the adapter if necessary.
   2749  */
   2750 int
   2751 scsipi_adapter_addref(struct scsipi_adapter *adapt)
   2752 {
   2753 	int error = 0;
   2754 
   2755 	if (atomic_inc_uint_nv(&adapt->adapt_refcnt) == 1
   2756 	    && adapt->adapt_enable != NULL) {
   2757 		scsipi_adapter_lock(adapt);
   2758 		error = scsipi_adapter_enable(adapt, 1);
   2759 		scsipi_adapter_unlock(adapt);
   2760 		if (error)
   2761 			atomic_dec_uint(&adapt->adapt_refcnt);
   2762 	}
   2763 	return error;
   2764 }
   2765 
   2766 /*
   2767  * scsipi_adapter_delref:
   2768  *
   2769  *	Delete a reference to the adapter pointed to by the provided
   2770  *	link, disabling the adapter if possible.
   2771  */
   2772 void
   2773 scsipi_adapter_delref(struct scsipi_adapter *adapt)
   2774 {
   2775 
   2776 	membar_release();
   2777 	if (atomic_dec_uint_nv(&adapt->adapt_refcnt) == 0
   2778 	    && adapt->adapt_enable != NULL) {
   2779 		membar_acquire();
   2780 		scsipi_adapter_lock(adapt);
   2781 		(void) scsipi_adapter_enable(adapt, 0);
   2782 		scsipi_adapter_unlock(adapt);
   2783 	}
   2784 }
   2785 
   2786 static struct scsipi_syncparam {
   2787 	int	ss_factor;
   2788 	int	ss_period;	/* ns * 100 */
   2789 } scsipi_syncparams[] = {
   2790 	{ 0x08,		 625 },	/* FAST-160 (Ultra320) */
   2791 	{ 0x09,		1250 },	/* FAST-80 (Ultra160) */
   2792 	{ 0x0a,		2500 },	/* FAST-40 40MHz (Ultra2) */
   2793 	{ 0x0b,		3030 },	/* FAST-40 33MHz (Ultra2) */
   2794 	{ 0x0c,		5000 },	/* FAST-20 (Ultra) */
   2795 };
   2796 static const int scsipi_nsyncparams =
   2797     sizeof(scsipi_syncparams) / sizeof(scsipi_syncparams[0]);
   2798 
   2799 int
   2800 scsipi_sync_period_to_factor(int period /* ns * 100 */)
   2801 {
   2802 	int i;
   2803 
   2804 	for (i = 0; i < scsipi_nsyncparams; i++) {
   2805 		if (period <= scsipi_syncparams[i].ss_period)
   2806 			return scsipi_syncparams[i].ss_factor;
   2807 	}
   2808 
   2809 	return (period / 100) / 4;
   2810 }
   2811 
   2812 int
   2813 scsipi_sync_factor_to_period(int factor)
   2814 {
   2815 	int i;
   2816 
   2817 	for (i = 0; i < scsipi_nsyncparams; i++) {
   2818 		if (factor == scsipi_syncparams[i].ss_factor)
   2819 			return scsipi_syncparams[i].ss_period;
   2820 	}
   2821 
   2822 	return (factor * 4) * 100;
   2823 }
   2824 
   2825 int
   2826 scsipi_sync_factor_to_freq(int factor)
   2827 {
   2828 	int i;
   2829 
   2830 	for (i = 0; i < scsipi_nsyncparams; i++) {
   2831 		if (factor == scsipi_syncparams[i].ss_factor)
   2832 			return 100000000 / scsipi_syncparams[i].ss_period;
   2833 	}
   2834 
   2835 	return 10000000 / ((factor * 4) * 10);
   2836 }
   2837 
   2838 static inline void
   2839 scsipi_adapter_lock(struct scsipi_adapter *adapt)
   2840 {
   2841 
   2842 	if ((adapt->adapt_flags & SCSIPI_ADAPT_MPSAFE) == 0)
   2843 		KERNEL_LOCK(1, NULL);
   2844 }
   2845 
   2846 static inline void
   2847 scsipi_adapter_unlock(struct scsipi_adapter *adapt)
   2848 {
   2849 
   2850 	if ((adapt->adapt_flags & SCSIPI_ADAPT_MPSAFE) == 0)
   2851 		KERNEL_UNLOCK_ONE(NULL);
   2852 }
   2853 
   2854 void
   2855 scsipi_adapter_minphys(struct scsipi_channel *chan, struct buf *bp)
   2856 {
   2857 	struct scsipi_adapter *adapt = chan->chan_adapter;
   2858 
   2859 	scsipi_adapter_lock(adapt);
   2860 	(adapt->adapt_minphys)(bp);
   2861 	scsipi_adapter_unlock(chan->chan_adapter);
   2862 }
   2863 
   2864 void
   2865 scsipi_adapter_request(struct scsipi_channel *chan,
   2866 	scsipi_adapter_req_t req, void *arg)
   2867 
   2868 {
   2869 	struct scsipi_adapter *adapt = chan->chan_adapter;
   2870 
   2871 	scsipi_adapter_lock(adapt);
   2872 	SDT_PROBE3(scsi, base, adapter, request__start,  chan, req, arg);
   2873 	(adapt->adapt_request)(chan, req, arg);
   2874 	SDT_PROBE3(scsi, base, adapter, request__done,  chan, req, arg);
   2875 	scsipi_adapter_unlock(adapt);
   2876 }
   2877 
   2878 int
   2879 scsipi_adapter_ioctl(struct scsipi_channel *chan, u_long cmd,
   2880 	void *data, int flag, struct proc *p)
   2881 {
   2882 	struct scsipi_adapter *adapt = chan->chan_adapter;
   2883 	int error;
   2884 
   2885 	if (adapt->adapt_ioctl == NULL)
   2886 		return ENOTTY;
   2887 
   2888 	scsipi_adapter_lock(adapt);
   2889 	error = (adapt->adapt_ioctl)(chan, cmd, data, flag, p);
   2890 	scsipi_adapter_unlock(adapt);
   2891 	return error;
   2892 }
   2893 
   2894 int
   2895 scsipi_adapter_enable(struct scsipi_adapter *adapt, int enable)
   2896 {
   2897 	int error;
   2898 
   2899 	scsipi_adapter_lock(adapt);
   2900 	error = (adapt->adapt_enable)(adapt->adapt_dev, enable);
   2901 	scsipi_adapter_unlock(adapt);
   2902 	return error;
   2903 }
   2904 
   2905 #ifdef SCSIPI_DEBUG
   2906 /*
   2907  * Given a scsipi_xfer, dump the request, in all its glory
   2908  */
   2909 void
   2910 show_scsipi_xs(struct scsipi_xfer *xs)
   2911 {
   2912 
   2913 	printf("xs(%p): ", xs);
   2914 	printf("xs_control(0x%08x)", xs->xs_control);
   2915 	printf("xs_status(0x%08x)", xs->xs_status);
   2916 	printf("periph(%p)", xs->xs_periph);
   2917 	printf("retr(0x%x)", xs->xs_retries);
   2918 	printf("timo(0x%x)", xs->timeout);
   2919 	printf("cmd(%p)", xs->cmd);
   2920 	printf("len(0x%x)", xs->cmdlen);
   2921 	printf("data(%p)", xs->data);
   2922 	printf("len(0x%x)", xs->datalen);
   2923 	printf("res(0x%x)", xs->resid);
   2924 	printf("err(0x%x)", xs->error);
   2925 	printf("bp(%p)", xs->bp);
   2926 	show_scsipi_cmd(xs);
   2927 }
   2928 
   2929 void
   2930 show_scsipi_cmd(struct scsipi_xfer *xs)
   2931 {
   2932 	u_char *b = (u_char *) xs->cmd;
   2933 	int i = 0;
   2934 
   2935 	scsipi_printaddr(xs->xs_periph);
   2936 	printf(" command: ");
   2937 
   2938 	if ((xs->xs_control & XS_CTL_RESET) == 0) {
   2939 		while (i < xs->cmdlen) {
   2940 			if (i)
   2941 				printf(",");
   2942 			printf("0x%x", b[i++]);
   2943 		}
   2944 		printf("-[%d bytes]\n", xs->datalen);
   2945 		if (xs->datalen)
   2946 			show_mem(xs->data, uimin(64, xs->datalen));
   2947 	} else
   2948 		printf("-RESET-\n");
   2949 }
   2950 
   2951 void
   2952 show_mem(u_char *address, int num)
   2953 {
   2954 	int x;
   2955 
   2956 	printf("------------------------------");
   2957 	for (x = 0; x < num; x++) {
   2958 		if ((x % 16) == 0)
   2959 			printf("\n%03d: ", x);
   2960 		printf("%02x ", *address++);
   2961 	}
   2962 	printf("\n------------------------------\n");
   2963 }
   2964 #endif /* SCSIPI_DEBUG */
   2965