Home | History | Annotate | Line # | Download | only in scsipi
scsipi_base.c revision 1.168
      1 /*	$NetBSD: scsipi_base.c,v 1.168 2016/11/21 21:03:22 mlelstv Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1998, 1999, 2000, 2002, 2003, 2004 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Charles M. Hannum; by Jason R. Thorpe of the Numerical Aerospace
      9  * Simulation Facility, NASA Ames Research Center.
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  *
     20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30  * POSSIBILITY OF SUCH DAMAGE.
     31  */
     32 
     33 #include <sys/cdefs.h>
     34 __KERNEL_RCSID(0, "$NetBSD: scsipi_base.c,v 1.168 2016/11/21 21:03:22 mlelstv Exp $");
     35 
     36 #ifdef _KERNEL_OPT
     37 #include "opt_scsi.h"
     38 #endif
     39 
     40 #include <sys/param.h>
     41 #include <sys/systm.h>
     42 #include <sys/kernel.h>
     43 #include <sys/buf.h>
     44 #include <sys/uio.h>
     45 #include <sys/malloc.h>
     46 #include <sys/pool.h>
     47 #include <sys/errno.h>
     48 #include <sys/device.h>
     49 #include <sys/proc.h>
     50 #include <sys/kthread.h>
     51 #include <sys/hash.h>
     52 #include <sys/atomic.h>
     53 
     54 #include <dev/scsipi/scsi_spc.h>
     55 #include <dev/scsipi/scsipi_all.h>
     56 #include <dev/scsipi/scsipi_disk.h>
     57 #include <dev/scsipi/scsipiconf.h>
     58 #include <dev/scsipi/scsipi_base.h>
     59 
     60 #include <dev/scsipi/scsi_all.h>
     61 #include <dev/scsipi/scsi_message.h>
     62 
     63 #include <machine/param.h>
     64 
     65 static int	scsipi_complete(struct scsipi_xfer *);
     66 static void	scsipi_request_sense(struct scsipi_xfer *);
     67 static int	scsipi_enqueue(struct scsipi_xfer *);
     68 static void	scsipi_run_queue(struct scsipi_channel *chan);
     69 
     70 static void	scsipi_completion_thread(void *);
     71 
     72 static void	scsipi_get_tag(struct scsipi_xfer *);
     73 static void	scsipi_put_tag(struct scsipi_xfer *);
     74 
     75 static int	scsipi_get_resource(struct scsipi_channel *);
     76 static void	scsipi_put_resource(struct scsipi_channel *);
     77 
     78 static void	scsipi_async_event_max_openings(struct scsipi_channel *,
     79 		    struct scsipi_max_openings *);
     80 static void	scsipi_async_event_channel_reset(struct scsipi_channel *);
     81 
     82 static void	scsipi_channel_freeze_locked(struct scsipi_channel *, int);
     83 
     84 static void	scsipi_adapter_lock(struct scsipi_adapter *adapt);
     85 static void	scsipi_adapter_unlock(struct scsipi_adapter *adapt);
     86 
     87 static struct pool scsipi_xfer_pool;
     88 
     89 int scsipi_xs_count = 0;
     90 
     91 /*
     92  * scsipi_init:
     93  *
     94  *	Called when a scsibus or atapibus is attached to the system
     95  *	to initialize shared data structures.
     96  */
     97 void
     98 scsipi_init(void)
     99 {
    100 	static int scsipi_init_done;
    101 
    102 	if (scsipi_init_done)
    103 		return;
    104 	scsipi_init_done = 1;
    105 
    106 	/* Initialize the scsipi_xfer pool. */
    107 	pool_init(&scsipi_xfer_pool, sizeof(struct scsipi_xfer), 0,
    108 	    0, 0, "scxspl", NULL, IPL_BIO);
    109 	if (pool_prime(&scsipi_xfer_pool,
    110 	    PAGE_SIZE / sizeof(struct scsipi_xfer)) == ENOMEM) {
    111 		printf("WARNING: not enough memory for scsipi_xfer_pool\n");
    112 	}
    113 
    114 	scsipi_ioctl_init();
    115 }
    116 
    117 /*
    118  * scsipi_channel_init:
    119  *
    120  *	Initialize a scsipi_channel when it is attached.
    121  */
    122 int
    123 scsipi_channel_init(struct scsipi_channel *chan)
    124 {
    125 	struct scsipi_adapter *adapt = chan->chan_adapter;
    126 	int i;
    127 
    128 	/* Initialize shared data. */
    129 	scsipi_init();
    130 
    131 	/* Initialize the queues. */
    132 	TAILQ_INIT(&chan->chan_queue);
    133 	TAILQ_INIT(&chan->chan_complete);
    134 
    135 	for (i = 0; i < SCSIPI_CHAN_PERIPH_BUCKETS; i++)
    136 		LIST_INIT(&chan->chan_periphtab[i]);
    137 
    138 	/*
    139 	 * Create the asynchronous completion thread.
    140 	 */
    141 	if (kthread_create(PRI_NONE, 0, NULL, scsipi_completion_thread, chan,
    142 	    &chan->chan_thread, "%s", chan->chan_name)) {
    143 		aprint_error_dev(adapt->adapt_dev, "unable to create completion thread for "
    144 		    "channel %d\n", chan->chan_channel);
    145 		panic("scsipi_channel_init");
    146 	}
    147 
    148 	return (0);
    149 }
    150 
    151 /*
    152  * scsipi_channel_shutdown:
    153  *
    154  *	Shutdown a scsipi_channel.
    155  */
    156 void
    157 scsipi_channel_shutdown(struct scsipi_channel *chan)
    158 {
    159 
    160 	mutex_enter(chan_mtx(chan));
    161 	/*
    162 	 * Shut down the completion thread.
    163 	 */
    164 	chan->chan_tflags |= SCSIPI_CHANT_SHUTDOWN;
    165 	cv_broadcast(chan_cv_complete(chan));
    166 
    167 	/*
    168 	 * Now wait for the thread to exit.
    169 	 */
    170 	while (chan->chan_thread != NULL)
    171 		cv_wait(chan_cv_thread(chan), chan_mtx(chan));
    172 	mutex_exit(chan_mtx(chan));
    173 }
    174 
    175 static uint32_t
    176 scsipi_chan_periph_hash(uint64_t t, uint64_t l)
    177 {
    178 	uint32_t hash;
    179 
    180 	hash = hash32_buf(&t, sizeof(t), HASH32_BUF_INIT);
    181 	hash = hash32_buf(&l, sizeof(l), hash);
    182 
    183 	return (hash & SCSIPI_CHAN_PERIPH_HASHMASK);
    184 }
    185 
    186 /*
    187  * scsipi_insert_periph:
    188  *
    189  *	Insert a periph into the channel.
    190  */
    191 void
    192 scsipi_insert_periph(struct scsipi_channel *chan, struct scsipi_periph *periph)
    193 {
    194 	uint32_t hash;
    195 
    196 	hash = scsipi_chan_periph_hash(periph->periph_target,
    197 	    periph->periph_lun);
    198 
    199 	mutex_enter(chan_mtx(chan));
    200 	LIST_INSERT_HEAD(&chan->chan_periphtab[hash], periph, periph_hash);
    201 	mutex_exit(chan_mtx(chan));
    202 }
    203 
    204 /*
    205  * scsipi_remove_periph:
    206  *
    207  *	Remove a periph from the channel.
    208  */
    209 void
    210 scsipi_remove_periph(struct scsipi_channel *chan,
    211     struct scsipi_periph *periph)
    212 {
    213 
    214 	LIST_REMOVE(periph, periph_hash);
    215 }
    216 
    217 /*
    218  * scsipi_lookup_periph:
    219  *
    220  *	Lookup a periph on the specified channel.
    221  */
    222 static struct scsipi_periph *
    223 scsipi_lookup_periph_internal(struct scsipi_channel *chan, int target, int lun, bool lock)
    224 {
    225 	struct scsipi_periph *periph;
    226 	uint32_t hash;
    227 
    228 	if (target >= chan->chan_ntargets ||
    229 	    lun >= chan->chan_nluns)
    230 		return (NULL);
    231 
    232 	hash = scsipi_chan_periph_hash(target, lun);
    233 
    234 	if (lock)
    235 		mutex_enter(chan_mtx(chan));
    236 	LIST_FOREACH(periph, &chan->chan_periphtab[hash], periph_hash) {
    237 		if (periph->periph_target == target &&
    238 		    periph->periph_lun == lun)
    239 			break;
    240 	}
    241 	if (lock)
    242 		mutex_exit(chan_mtx(chan));
    243 
    244 	return (periph);
    245 }
    246 
    247 struct scsipi_periph *
    248 scsipi_lookup_periph_locked(struct scsipi_channel *chan, int target, int lun)
    249 {
    250 	return scsipi_lookup_periph_internal(chan, target, lun, false);
    251 }
    252 
    253 struct scsipi_periph *
    254 scsipi_lookup_periph(struct scsipi_channel *chan, int target, int lun)
    255 {
    256 	return scsipi_lookup_periph_internal(chan, target, lun, true);
    257 }
    258 
    259 /*
    260  * scsipi_get_resource:
    261  *
    262  *	Allocate a single xfer `resource' from the channel.
    263  *
    264  *	NOTE: Must be called with channel lock held
    265  */
    266 static int
    267 scsipi_get_resource(struct scsipi_channel *chan)
    268 {
    269 	struct scsipi_adapter *adapt = chan->chan_adapter;
    270 
    271 	if (chan->chan_flags & SCSIPI_CHAN_OPENINGS) {
    272 		if (chan->chan_openings > 0) {
    273 			chan->chan_openings--;
    274 			return (1);
    275 		}
    276 		return (0);
    277 	}
    278 
    279 	if (adapt->adapt_openings > 0) {
    280 		adapt->adapt_openings--;
    281 		return (1);
    282 	}
    283 	return (0);
    284 }
    285 
    286 /*
    287  * scsipi_grow_resources:
    288  *
    289  *	Attempt to grow resources for a channel.  If this succeeds,
    290  *	we allocate one for our caller.
    291  *
    292  *	NOTE: Must be called with channel lock held
    293  */
    294 static inline int
    295 scsipi_grow_resources(struct scsipi_channel *chan)
    296 {
    297 
    298 	if (chan->chan_flags & SCSIPI_CHAN_CANGROW) {
    299 		if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
    300 			mutex_exit(chan_mtx(chan));
    301 			scsipi_adapter_request(chan,
    302 			    ADAPTER_REQ_GROW_RESOURCES, NULL);
    303 			mutex_enter(chan_mtx(chan));
    304 			return (scsipi_get_resource(chan));
    305 		}
    306 		/*
    307 		 * ask the channel thread to do it. It'll have to thaw the
    308 		 * queue
    309 		 */
    310 		scsipi_channel_freeze_locked(chan, 1);
    311 		chan->chan_tflags |= SCSIPI_CHANT_GROWRES;
    312 		cv_broadcast(chan_cv_complete(chan));
    313 		return (0);
    314 	}
    315 
    316 	return (0);
    317 }
    318 
    319 /*
    320  * scsipi_put_resource:
    321  *
    322  *	Free a single xfer `resource' to the channel.
    323  *
    324  *	NOTE: Must be called with channel lock held
    325  */
    326 static void
    327 scsipi_put_resource(struct scsipi_channel *chan)
    328 {
    329 	struct scsipi_adapter *adapt = chan->chan_adapter;
    330 
    331 	if (chan->chan_flags & SCSIPI_CHAN_OPENINGS)
    332 		chan->chan_openings++;
    333 	else
    334 		adapt->adapt_openings++;
    335 }
    336 
    337 /*
    338  * scsipi_get_tag:
    339  *
    340  *	Get a tag ID for the specified xfer.
    341  *
    342  *	NOTE: Must be called with channel lock held
    343  */
    344 static void
    345 scsipi_get_tag(struct scsipi_xfer *xs)
    346 {
    347 	struct scsipi_periph *periph = xs->xs_periph;
    348 	int bit, tag;
    349 	u_int word;
    350 
    351 	bit = 0;	/* XXX gcc */
    352 	for (word = 0; word < PERIPH_NTAGWORDS; word++) {
    353 		bit = ffs(periph->periph_freetags[word]);
    354 		if (bit != 0)
    355 			break;
    356 	}
    357 #ifdef DIAGNOSTIC
    358 	if (word == PERIPH_NTAGWORDS) {
    359 		scsipi_printaddr(periph);
    360 		printf("no free tags\n");
    361 		panic("scsipi_get_tag");
    362 	}
    363 #endif
    364 
    365 	bit -= 1;
    366 	periph->periph_freetags[word] &= ~(1 << bit);
    367 	tag = (word << 5) | bit;
    368 
    369 	/* XXX Should eventually disallow this completely. */
    370 	if (tag >= periph->periph_openings) {
    371 		scsipi_printaddr(periph);
    372 		printf("WARNING: tag %d greater than available openings %d\n",
    373 		    tag, periph->periph_openings);
    374 	}
    375 
    376 	xs->xs_tag_id = tag;
    377 }
    378 
    379 /*
    380  * scsipi_put_tag:
    381  *
    382  *	Put the tag ID for the specified xfer back into the pool.
    383  *
    384  *	NOTE: Must be called with channel lock held
    385  */
    386 static void
    387 scsipi_put_tag(struct scsipi_xfer *xs)
    388 {
    389 	struct scsipi_periph *periph = xs->xs_periph;
    390 	int word, bit;
    391 
    392 	word = xs->xs_tag_id >> 5;
    393 	bit = xs->xs_tag_id & 0x1f;
    394 
    395 	periph->periph_freetags[word] |= (1 << bit);
    396 }
    397 
    398 /*
    399  * scsipi_get_xs:
    400  *
    401  *	Allocate an xfer descriptor and associate it with the
    402  *	specified peripheral.  If the peripheral has no more
    403  *	available command openings, we either block waiting for
    404  *	one to become available, or fail.
    405  *
    406  *	When this routine is called with the channel lock held
    407  *	the flags must include XS_CTL_NOSLEEP.
    408  */
    409 struct scsipi_xfer *
    410 scsipi_get_xs(struct scsipi_periph *periph, int flags)
    411 {
    412 	struct scsipi_xfer *xs;
    413 
    414 	SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_get_xs\n"));
    415 
    416 	KASSERT(!cold);
    417 
    418 #ifdef DIAGNOSTIC
    419 	/*
    420 	 * URGENT commands can never be ASYNC.
    421 	 */
    422 	if ((flags & (XS_CTL_URGENT|XS_CTL_ASYNC)) ==
    423 	    (XS_CTL_URGENT|XS_CTL_ASYNC)) {
    424 		scsipi_printaddr(periph);
    425 		printf("URGENT and ASYNC\n");
    426 		panic("scsipi_get_xs");
    427 	}
    428 #endif
    429 
    430 	/*
    431 	 * Wait for a command opening to become available.  Rules:
    432 	 *
    433 	 *	- All xfers must wait for an available opening.
    434 	 *	  Exception: URGENT xfers can proceed when
    435 	 *	  active == openings, because we use the opening
    436 	 *	  of the command we're recovering for.
    437 	 *	- if the periph has sense pending, only URGENT & REQSENSE
    438 	 *	  xfers may proceed.
    439 	 *
    440 	 *	- If the periph is recovering, only URGENT xfers may
    441 	 *	  proceed.
    442 	 *
    443 	 *	- If the periph is currently executing a recovery
    444 	 *	  command, URGENT commands must block, because only
    445 	 *	  one recovery command can execute at a time.
    446 	 */
    447 	for (;;) {
    448 		if (flags & XS_CTL_URGENT) {
    449 			if (periph->periph_active > periph->periph_openings)
    450 				goto wait_for_opening;
    451 			if (periph->periph_flags & PERIPH_SENSE) {
    452 				if ((flags & XS_CTL_REQSENSE) == 0)
    453 					goto wait_for_opening;
    454 			} else {
    455 				if ((periph->periph_flags &
    456 				    PERIPH_RECOVERY_ACTIVE) != 0)
    457 					goto wait_for_opening;
    458 				periph->periph_flags |= PERIPH_RECOVERY_ACTIVE;
    459 			}
    460 			break;
    461 		}
    462 		if (periph->periph_active >= periph->periph_openings ||
    463 		    (periph->periph_flags & PERIPH_RECOVERING) != 0)
    464 			goto wait_for_opening;
    465 		periph->periph_active++;
    466 		break;
    467 
    468  wait_for_opening:
    469 		if (flags & XS_CTL_NOSLEEP) {
    470 			return (NULL);
    471 		}
    472 		SC_DEBUG(periph, SCSIPI_DB3, ("sleeping\n"));
    473 		mutex_enter(chan_mtx(periph->periph_channel));
    474 		periph->periph_flags |= PERIPH_WAITING;
    475 		cv_wait(periph_cv_periph(periph),
    476 		    chan_mtx(periph->periph_channel));
    477 		mutex_exit(chan_mtx(periph->periph_channel));
    478 	}
    479 
    480 	SC_DEBUG(periph, SCSIPI_DB3, ("calling pool_get\n"));
    481 	xs = pool_get(&scsipi_xfer_pool,
    482 	    ((flags & XS_CTL_NOSLEEP) != 0 ? PR_NOWAIT : PR_WAITOK));
    483 	if (xs == NULL) {
    484 		if (flags & XS_CTL_URGENT) {
    485 			if ((flags & XS_CTL_REQSENSE) == 0)
    486 				periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
    487 		} else
    488 			periph->periph_active--;
    489 		scsipi_printaddr(periph);
    490 		printf("unable to allocate %sscsipi_xfer\n",
    491 		    (flags & XS_CTL_URGENT) ? "URGENT " : "");
    492 	}
    493 
    494 	SC_DEBUG(periph, SCSIPI_DB3, ("returning\n"));
    495 
    496 	if (xs != NULL) {
    497 		memset(xs, 0, sizeof(*xs));
    498 		callout_init(&xs->xs_callout, 0);
    499 		xs->xs_periph = periph;
    500 		xs->xs_control = flags;
    501 		xs->xs_status = 0;
    502 		if ((flags & XS_CTL_NOSLEEP) == 0)
    503 			mutex_enter(chan_mtx(periph->periph_channel));
    504 		TAILQ_INSERT_TAIL(&periph->periph_xferq, xs, device_q);
    505 		if ((flags & XS_CTL_NOSLEEP) == 0)
    506 			mutex_exit(chan_mtx(periph->periph_channel));
    507 	}
    508 	return (xs);
    509 }
    510 
    511 /*
    512  * scsipi_put_xs:
    513  *
    514  *	Release an xfer descriptor, decreasing the outstanding command
    515  *	count for the peripheral.  If there is a thread waiting for
    516  *	an opening, wake it up.  If not, kick any queued I/O the
    517  *	peripheral may have.
    518  *
    519  *	NOTE: Must be called with channel lock held
    520  */
    521 void
    522 scsipi_put_xs(struct scsipi_xfer *xs)
    523 {
    524 	struct scsipi_periph *periph = xs->xs_periph;
    525 	int flags = xs->xs_control;
    526 
    527 	SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_free_xs\n"));
    528 
    529 	TAILQ_REMOVE(&periph->periph_xferq, xs, device_q);
    530 	callout_destroy(&xs->xs_callout);
    531 	pool_put(&scsipi_xfer_pool, xs);
    532 
    533 #ifdef DIAGNOSTIC
    534 	if ((periph->periph_flags & PERIPH_RECOVERY_ACTIVE) != 0 &&
    535 	    periph->periph_active == 0) {
    536 		scsipi_printaddr(periph);
    537 		printf("recovery without a command to recovery for\n");
    538 		panic("scsipi_put_xs");
    539 	}
    540 #endif
    541 
    542 	if (flags & XS_CTL_URGENT) {
    543 		if ((flags & XS_CTL_REQSENSE) == 0)
    544 			periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
    545 	} else
    546 		periph->periph_active--;
    547 	if (periph->periph_active == 0 &&
    548 	    (periph->periph_flags & PERIPH_WAITDRAIN) != 0) {
    549 		periph->periph_flags &= ~PERIPH_WAITDRAIN;
    550 		cv_broadcast(periph_cv_active(periph));
    551 	}
    552 
    553 	if (periph->periph_flags & PERIPH_WAITING) {
    554 		periph->periph_flags &= ~PERIPH_WAITING;
    555 		cv_broadcast(periph_cv_periph(periph));
    556 	} else {
    557 		if (periph->periph_switch->psw_start != NULL &&
    558 		    device_is_active(periph->periph_dev)) {
    559 			SC_DEBUG(periph, SCSIPI_DB2,
    560 			    ("calling private start()\n"));
    561 			(*periph->periph_switch->psw_start)(periph);
    562 		}
    563 	}
    564 }
    565 
    566 /*
    567  * scsipi_channel_freeze:
    568  *
    569  *	Freeze a channel's xfer queue.
    570  */
    571 void
    572 scsipi_channel_freeze(struct scsipi_channel *chan, int count)
    573 {
    574 	bool lock = chan_running(chan);
    575 
    576 	if (lock)
    577 		mutex_enter(chan_mtx(chan));
    578 	chan->chan_qfreeze += count;
    579 	if (lock)
    580 		mutex_exit(chan_mtx(chan));
    581 }
    582 
    583 static void
    584 scsipi_channel_freeze_locked(struct scsipi_channel *chan, int count)
    585 {
    586 
    587 	chan->chan_qfreeze += count;
    588 }
    589 
    590 /*
    591  * scsipi_channel_thaw:
    592  *
    593  *	Thaw a channel's xfer queue.
    594  */
    595 void
    596 scsipi_channel_thaw(struct scsipi_channel *chan, int count)
    597 {
    598 	bool lock = chan_running(chan);
    599 
    600 	if (lock)
    601 		mutex_enter(chan_mtx(chan));
    602 	chan->chan_qfreeze -= count;
    603 	/*
    604 	 * Don't let the freeze count go negative.
    605 	 *
    606 	 * Presumably the adapter driver could keep track of this,
    607 	 * but it might just be easier to do this here so as to allow
    608 	 * multiple callers, including those outside the adapter driver.
    609 	 */
    610 	if (chan->chan_qfreeze < 0) {
    611 		chan->chan_qfreeze = 0;
    612 	}
    613 	if (lock)
    614 		mutex_exit(chan_mtx(chan));
    615 
    616 	/*
    617 	 * until the channel is running
    618 	 */
    619 	if (!lock)
    620 		return;
    621 
    622 	/*
    623 	 * Kick the channel's queue here.  Note, we may be running in
    624 	 * interrupt context (softclock or HBA's interrupt), so the adapter
    625 	 * driver had better not sleep.
    626 	 */
    627 	if (chan->chan_qfreeze == 0)
    628 		scsipi_run_queue(chan);
    629 }
    630 
    631 /*
    632  * scsipi_channel_timed_thaw:
    633  *
    634  *	Thaw a channel after some time has expired. This will also
    635  * 	run the channel's queue if the freeze count has reached 0.
    636  */
    637 void
    638 scsipi_channel_timed_thaw(void *arg)
    639 {
    640 	struct scsipi_channel *chan = arg;
    641 
    642 	scsipi_channel_thaw(chan, 1);
    643 }
    644 
    645 /*
    646  * scsipi_periph_freeze:
    647  *
    648  *	Freeze a device's xfer queue.
    649  */
    650 void
    651 scsipi_periph_freeze_locked(struct scsipi_periph *periph, int count)
    652 {
    653 
    654 	periph->periph_qfreeze += count;
    655 }
    656 
    657 /*
    658  * scsipi_periph_thaw:
    659  *
    660  *	Thaw a device's xfer queue.
    661  */
    662 void
    663 scsipi_periph_thaw_locked(struct scsipi_periph *periph, int count)
    664 {
    665 
    666 	periph->periph_qfreeze -= count;
    667 #ifdef DIAGNOSTIC
    668 	if (periph->periph_qfreeze < 0) {
    669 		static const char pc[] = "periph freeze count < 0";
    670 		scsipi_printaddr(periph);
    671 		printf("%s\n", pc);
    672 		panic(pc);
    673 	}
    674 #endif
    675 	if (periph->periph_qfreeze == 0 &&
    676 	    (periph->periph_flags & PERIPH_WAITING) != 0)
    677 		cv_broadcast(periph_cv_periph(periph));
    678 }
    679 
    680 void
    681 scsipi_periph_freeze(struct scsipi_periph *periph, int count)
    682 {
    683 
    684 	mutex_enter(chan_mtx(periph->periph_channel));
    685 	scsipi_periph_freeze_locked(periph, count);
    686 	mutex_exit(chan_mtx(periph->periph_channel));
    687 }
    688 
    689 void
    690 scsipi_periph_thaw(struct scsipi_periph *periph, int count)
    691 {
    692 
    693 	mutex_enter(chan_mtx(periph->periph_channel));
    694 	scsipi_periph_thaw_locked(periph, count);
    695 	mutex_exit(chan_mtx(periph->periph_channel));
    696 }
    697 
    698 /*
    699  * scsipi_periph_timed_thaw:
    700  *
    701  *	Thaw a device after some time has expired.
    702  */
    703 void
    704 scsipi_periph_timed_thaw(void *arg)
    705 {
    706 	struct scsipi_periph *periph = arg;
    707 	struct scsipi_channel *chan = periph->periph_channel;
    708 
    709 	callout_stop(&periph->periph_callout);
    710 
    711 	mutex_enter(chan_mtx(chan));
    712 	scsipi_periph_thaw_locked(periph, 1);
    713 	if ((periph->periph_channel->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
    714 		/*
    715 		 * Kick the channel's queue here.  Note, we're running in
    716 		 * interrupt context (softclock), so the adapter driver
    717 		 * had better not sleep.
    718 		 */
    719 		mutex_exit(chan_mtx(chan));
    720 		scsipi_run_queue(periph->periph_channel);
    721 	} else {
    722 		/*
    723 		 * Tell the completion thread to kick the channel's queue here.
    724 		 */
    725 		periph->periph_channel->chan_tflags |= SCSIPI_CHANT_KICK;
    726 		cv_broadcast(chan_cv_complete(chan));
    727 		mutex_exit(chan_mtx(chan));
    728 	}
    729 }
    730 
    731 /*
    732  * scsipi_wait_drain:
    733  *
    734  *	Wait for a periph's pending xfers to drain.
    735  */
    736 void
    737 scsipi_wait_drain(struct scsipi_periph *periph)
    738 {
    739 	struct scsipi_channel *chan = periph->periph_channel;
    740 
    741 	mutex_enter(chan_mtx(chan));
    742 	while (periph->periph_active != 0) {
    743 		periph->periph_flags |= PERIPH_WAITDRAIN;
    744 		cv_wait(periph_cv_active(periph), chan_mtx(chan));
    745 	}
    746 	mutex_exit(chan_mtx(chan));
    747 }
    748 
    749 /*
    750  * scsipi_kill_pending:
    751  *
    752  *	Kill off all pending xfers for a periph.
    753  *
    754  *	NOTE: Must be called with channel lock held
    755  */
    756 void
    757 scsipi_kill_pending(struct scsipi_periph *periph)
    758 {
    759 	struct scsipi_channel *chan = periph->periph_channel;
    760 
    761 	(*chan->chan_bustype->bustype_kill_pending)(periph);
    762 	while (periph->periph_active != 0) {
    763 		periph->periph_flags |= PERIPH_WAITDRAIN;
    764 		cv_wait(periph_cv_active(periph), chan_mtx(chan));
    765 	}
    766 }
    767 
    768 /*
    769  * scsipi_print_cdb:
    770  * prints a command descriptor block (for debug purpose, error messages,
    771  * SCSIVERBOSE, ...)
    772  */
    773 void
    774 scsipi_print_cdb(struct scsipi_generic *cmd)
    775 {
    776 	int i, j;
    777 
    778  	printf("0x%02x", cmd->opcode);
    779 
    780  	switch (CDB_GROUPID(cmd->opcode)) {
    781  	case CDB_GROUPID_0:
    782  		j = CDB_GROUP0;
    783  		break;
    784  	case CDB_GROUPID_1:
    785  		j = CDB_GROUP1;
    786  		break;
    787  	case CDB_GROUPID_2:
    788  		j = CDB_GROUP2;
    789  		break;
    790  	case CDB_GROUPID_3:
    791  		j = CDB_GROUP3;
    792  		break;
    793  	case CDB_GROUPID_4:
    794  		j = CDB_GROUP4;
    795  		break;
    796  	case CDB_GROUPID_5:
    797  		j = CDB_GROUP5;
    798  		break;
    799  	case CDB_GROUPID_6:
    800  		j = CDB_GROUP6;
    801  		break;
    802  	case CDB_GROUPID_7:
    803  		j = CDB_GROUP7;
    804  		break;
    805  	default:
    806  		j = 0;
    807  	}
    808  	if (j == 0)
    809  		j = sizeof (cmd->bytes);
    810  	for (i = 0; i < j-1; i++) /* already done the opcode */
    811  		printf(" %02x", cmd->bytes[i]);
    812 }
    813 
    814 /*
    815  * scsipi_interpret_sense:
    816  *
    817  *	Look at the returned sense and act on the error, determining
    818  *	the unix error number to pass back.  (0 = report no error)
    819  *
    820  *	NOTE: If we return ERESTART, we are expected to haved
    821  *	thawed the device!
    822  *
    823  *	THIS IS THE DEFAULT ERROR HANDLER FOR SCSI DEVICES.
    824  */
    825 int
    826 scsipi_interpret_sense(struct scsipi_xfer *xs)
    827 {
    828 	struct scsi_sense_data *sense;
    829 	struct scsipi_periph *periph = xs->xs_periph;
    830 	u_int8_t key;
    831 	int error;
    832 	u_int32_t info;
    833 	static const char *error_mes[] = {
    834 		"soft error (corrected)",
    835 		"not ready", "medium error",
    836 		"non-media hardware failure", "illegal request",
    837 		"unit attention", "readonly device",
    838 		"no data found", "vendor unique",
    839 		"copy aborted", "command aborted",
    840 		"search returned equal", "volume overflow",
    841 		"verify miscompare", "unknown error key"
    842 	};
    843 
    844 	sense = &xs->sense.scsi_sense;
    845 #ifdef SCSIPI_DEBUG
    846 	if (periph->periph_flags & SCSIPI_DB1) {
    847 		int count;
    848 		scsipi_printaddr(periph);
    849 		printf(" sense debug information:\n");
    850 		printf("\tcode 0x%x valid %d\n",
    851 			SSD_RCODE(sense->response_code),
    852 			sense->response_code & SSD_RCODE_VALID ? 1 : 0);
    853 		printf("\tseg 0x%x key 0x%x ili 0x%x eom 0x%x fmark 0x%x\n",
    854 			sense->segment,
    855 			SSD_SENSE_KEY(sense->flags),
    856 			sense->flags & SSD_ILI ? 1 : 0,
    857 			sense->flags & SSD_EOM ? 1 : 0,
    858 			sense->flags & SSD_FILEMARK ? 1 : 0);
    859 		printf("\ninfo: 0x%x 0x%x 0x%x 0x%x followed by %d "
    860 			"extra bytes\n",
    861 			sense->info[0],
    862 			sense->info[1],
    863 			sense->info[2],
    864 			sense->info[3],
    865 			sense->extra_len);
    866 		printf("\textra: ");
    867 		for (count = 0; count < SSD_ADD_BYTES_LIM(sense); count++)
    868 			printf("0x%x ", sense->csi[count]);
    869 		printf("\n");
    870 	}
    871 #endif
    872 
    873 	/*
    874 	 * If the periph has its own error handler, call it first.
    875 	 * If it returns a legit error value, return that, otherwise
    876 	 * it wants us to continue with normal error processing.
    877 	 */
    878 	if (periph->periph_switch->psw_error != NULL) {
    879 		SC_DEBUG(periph, SCSIPI_DB2,
    880 		    ("calling private err_handler()\n"));
    881 		error = (*periph->periph_switch->psw_error)(xs);
    882 		if (error != EJUSTRETURN)
    883 			return (error);
    884 	}
    885 	/* otherwise use the default */
    886 	switch (SSD_RCODE(sense->response_code)) {
    887 
    888 		/*
    889 		 * Old SCSI-1 and SASI devices respond with
    890 		 * codes other than 70.
    891 		 */
    892 	case 0x00:		/* no error (command completed OK) */
    893 		return (0);
    894 	case 0x04:		/* drive not ready after it was selected */
    895 		if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
    896 			periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
    897 		if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
    898 			return (0);
    899 		/* XXX - display some sort of error here? */
    900 		return (EIO);
    901 	case 0x20:		/* invalid command */
    902 		if ((xs->xs_control &
    903 		     XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
    904 			return (0);
    905 		return (EINVAL);
    906 	case 0x25:		/* invalid LUN (Adaptec ACB-4000) */
    907 		return (EACCES);
    908 
    909 		/*
    910 		 * If it's code 70, use the extended stuff and
    911 		 * interpret the key
    912 		 */
    913 	case 0x71:		/* delayed error */
    914 		scsipi_printaddr(periph);
    915 		key = SSD_SENSE_KEY(sense->flags);
    916 		printf(" DEFERRED ERROR, key = 0x%x\n", key);
    917 		/* FALLTHROUGH */
    918 	case 0x70:
    919 		if ((sense->response_code & SSD_RCODE_VALID) != 0)
    920 			info = _4btol(sense->info);
    921 		else
    922 			info = 0;
    923 		key = SSD_SENSE_KEY(sense->flags);
    924 
    925 		switch (key) {
    926 		case SKEY_NO_SENSE:
    927 		case SKEY_RECOVERED_ERROR:
    928 			if (xs->resid == xs->datalen && xs->datalen) {
    929 				/*
    930 				 * Why is this here?
    931 				 */
    932 				xs->resid = 0;	/* not short read */
    933 			}
    934 		case SKEY_EQUAL:
    935 			error = 0;
    936 			break;
    937 		case SKEY_NOT_READY:
    938 			if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
    939 				periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
    940 			if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
    941 				return (0);
    942 			if (sense->asc == 0x3A) {
    943 				error = ENODEV; /* Medium not present */
    944 				if (xs->xs_control & XS_CTL_SILENT_NODEV)
    945 					return (error);
    946 			} else
    947 				error = EIO;
    948 			if ((xs->xs_control & XS_CTL_SILENT) != 0)
    949 				return (error);
    950 			break;
    951 		case SKEY_ILLEGAL_REQUEST:
    952 			if ((xs->xs_control &
    953 			     XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
    954 				return (0);
    955 			/*
    956 			 * Handle the case where a device reports
    957 			 * Logical Unit Not Supported during discovery.
    958 			 */
    959 			if ((xs->xs_control & XS_CTL_DISCOVERY) != 0 &&
    960 			    sense->asc == 0x25 &&
    961 			    sense->ascq == 0x00)
    962 				return (EINVAL);
    963 			if ((xs->xs_control & XS_CTL_SILENT) != 0)
    964 				return (EIO);
    965 			error = EINVAL;
    966 			break;
    967 		case SKEY_UNIT_ATTENTION:
    968 			if (sense->asc == 0x29 &&
    969 			    sense->ascq == 0x00) {
    970 				/* device or bus reset */
    971 				return (ERESTART);
    972 			}
    973 			if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
    974 				periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
    975 			if ((xs->xs_control &
    976 			     XS_CTL_IGNORE_MEDIA_CHANGE) != 0 ||
    977 				/* XXX Should reupload any transient state. */
    978 				(periph->periph_flags &
    979 				 PERIPH_REMOVABLE) == 0) {
    980 				return (ERESTART);
    981 			}
    982 			if ((xs->xs_control & XS_CTL_SILENT) != 0)
    983 				return (EIO);
    984 			error = EIO;
    985 			break;
    986 		case SKEY_DATA_PROTECT:
    987 			error = EROFS;
    988 			break;
    989 		case SKEY_BLANK_CHECK:
    990 			error = 0;
    991 			break;
    992 		case SKEY_ABORTED_COMMAND:
    993 			if (xs->xs_retries != 0) {
    994 				xs->xs_retries--;
    995 				error = ERESTART;
    996 			} else
    997 				error = EIO;
    998 			break;
    999 		case SKEY_VOLUME_OVERFLOW:
   1000 			error = ENOSPC;
   1001 			break;
   1002 		default:
   1003 			error = EIO;
   1004 			break;
   1005 		}
   1006 
   1007 		/* Print verbose decode if appropriate and possible */
   1008 		if ((key == 0) ||
   1009 		    ((xs->xs_control & XS_CTL_SILENT) != 0) ||
   1010 		    (scsipi_print_sense(xs, 0) != 0))
   1011 			return (error);
   1012 
   1013 		/* Print brief(er) sense information */
   1014 		scsipi_printaddr(periph);
   1015 		printf("%s", error_mes[key - 1]);
   1016 		if ((sense->response_code & SSD_RCODE_VALID) != 0) {
   1017 			switch (key) {
   1018 			case SKEY_NOT_READY:
   1019 			case SKEY_ILLEGAL_REQUEST:
   1020 			case SKEY_UNIT_ATTENTION:
   1021 			case SKEY_DATA_PROTECT:
   1022 				break;
   1023 			case SKEY_BLANK_CHECK:
   1024 				printf(", requested size: %d (decimal)",
   1025 				    info);
   1026 				break;
   1027 			case SKEY_ABORTED_COMMAND:
   1028 				if (xs->xs_retries)
   1029 					printf(", retrying");
   1030 				printf(", cmd 0x%x, info 0x%x",
   1031 				    xs->cmd->opcode, info);
   1032 				break;
   1033 			default:
   1034 				printf(", info = %d (decimal)", info);
   1035 			}
   1036 		}
   1037 		if (sense->extra_len != 0) {
   1038 			int n;
   1039 			printf(", data =");
   1040 			for (n = 0; n < sense->extra_len; n++)
   1041 				printf(" %02x",
   1042 				    sense->csi[n]);
   1043 		}
   1044 		printf("\n");
   1045 		return (error);
   1046 
   1047 	/*
   1048 	 * Some other code, just report it
   1049 	 */
   1050 	default:
   1051 #if    defined(SCSIDEBUG) || defined(DEBUG)
   1052 	{
   1053 		static const char *uc = "undecodable sense error";
   1054 		int i;
   1055 		u_int8_t *cptr = (u_int8_t *) sense;
   1056 		scsipi_printaddr(periph);
   1057 		if (xs->cmd == &xs->cmdstore) {
   1058 			printf("%s for opcode 0x%x, data=",
   1059 			    uc, xs->cmdstore.opcode);
   1060 		} else {
   1061 			printf("%s, data=", uc);
   1062 		}
   1063 		for (i = 0; i < sizeof (sense); i++)
   1064 			printf(" 0x%02x", *(cptr++) & 0xff);
   1065 		printf("\n");
   1066 	}
   1067 #else
   1068 		scsipi_printaddr(periph);
   1069 		printf("Sense Error Code 0x%x",
   1070 			SSD_RCODE(sense->response_code));
   1071 		if ((sense->response_code & SSD_RCODE_VALID) != 0) {
   1072 			struct scsi_sense_data_unextended *usense =
   1073 			    (struct scsi_sense_data_unextended *)sense;
   1074 			printf(" at block no. %d (decimal)",
   1075 			    _3btol(usense->block));
   1076 		}
   1077 		printf("\n");
   1078 #endif
   1079 		return (EIO);
   1080 	}
   1081 }
   1082 
   1083 /*
   1084  * scsipi_test_unit_ready:
   1085  *
   1086  *	Issue a `test unit ready' request.
   1087  */
   1088 int
   1089 scsipi_test_unit_ready(struct scsipi_periph *periph, int flags)
   1090 {
   1091 	struct scsi_test_unit_ready cmd;
   1092 	int retries;
   1093 
   1094 	/* some ATAPI drives don't support TEST UNIT READY. Sigh */
   1095 	if (periph->periph_quirks & PQUIRK_NOTUR)
   1096 		return (0);
   1097 
   1098 	if (flags & XS_CTL_DISCOVERY)
   1099 		retries = 0;
   1100 	else
   1101 		retries = SCSIPIRETRIES;
   1102 
   1103 	memset(&cmd, 0, sizeof(cmd));
   1104 	cmd.opcode = SCSI_TEST_UNIT_READY;
   1105 
   1106 	return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0,
   1107 	    retries, 10000, NULL, flags));
   1108 }
   1109 
   1110 static const struct scsipi_inquiry3_pattern {
   1111 	const char vendor[8];
   1112 	const char product[16];
   1113 	const char revision[4];
   1114 } scsipi_inquiry3_quirk[] = {
   1115 	{ "ES-6600 ", "", "" },
   1116 };
   1117 
   1118 static int
   1119 scsipi_inquiry3_ok(const struct scsipi_inquiry_data *ib)
   1120 {
   1121 	for (size_t i = 0; i < __arraycount(scsipi_inquiry3_quirk); i++) {
   1122 		const struct scsipi_inquiry3_pattern *q =
   1123 		    &scsipi_inquiry3_quirk[i];
   1124 #define MATCH(field) \
   1125     (q->field[0] ? memcmp(ib->field, q->field, sizeof(ib->field)) == 0 : 1)
   1126 		if (MATCH(vendor) && MATCH(product) && MATCH(revision))
   1127 			return 0;
   1128 	}
   1129 	return 1;
   1130 }
   1131 
   1132 /*
   1133  * scsipi_inquire:
   1134  *
   1135  *	Ask the device about itself.
   1136  */
   1137 int
   1138 scsipi_inquire(struct scsipi_periph *periph, struct scsipi_inquiry_data *inqbuf,
   1139     int flags)
   1140 {
   1141 	struct scsipi_inquiry cmd;
   1142 	int error;
   1143 	int retries;
   1144 
   1145 	if (flags & XS_CTL_DISCOVERY)
   1146 		retries = 0;
   1147 	else
   1148 		retries = SCSIPIRETRIES;
   1149 
   1150 	/*
   1151 	 * If we request more data than the device can provide, it SHOULD just
   1152 	 * return a short response.  However, some devices error with an
   1153 	 * ILLEGAL REQUEST sense code, and yet others have even more special
   1154 	 * failture modes (such as the GL641USB flash adapter, which goes loony
   1155 	 * and sends corrupted CRCs).  To work around this, and to bring our
   1156 	 * behavior more in line with other OSes, we do a shorter inquiry,
   1157 	 * covering all the SCSI-2 information, first, and then request more
   1158 	 * data iff the "additional length" field indicates there is more.
   1159 	 * - mycroft, 2003/10/16
   1160 	 */
   1161 	memset(&cmd, 0, sizeof(cmd));
   1162 	cmd.opcode = INQUIRY;
   1163 	cmd.length = SCSIPI_INQUIRY_LENGTH_SCSI2;
   1164 	error = scsipi_command(periph, (void *)&cmd, sizeof(cmd),
   1165 	    (void *)inqbuf, SCSIPI_INQUIRY_LENGTH_SCSI2, retries,
   1166 	    10000, NULL, flags | XS_CTL_DATA_IN);
   1167 	if (!error &&
   1168 	    inqbuf->additional_length > SCSIPI_INQUIRY_LENGTH_SCSI2 - 4) {
   1169 	    if (scsipi_inquiry3_ok(inqbuf)) {
   1170 #if 0
   1171 printf("inquire: addlen=%d, retrying\n", inqbuf->additional_length);
   1172 #endif
   1173 		cmd.length = SCSIPI_INQUIRY_LENGTH_SCSI3;
   1174 		error = scsipi_command(periph, (void *)&cmd, sizeof(cmd),
   1175 		    (void *)inqbuf, SCSIPI_INQUIRY_LENGTH_SCSI3, retries,
   1176 		    10000, NULL, flags | XS_CTL_DATA_IN);
   1177 #if 0
   1178 printf("inquire: error=%d\n", error);
   1179 #endif
   1180 	    }
   1181 	}
   1182 
   1183 #ifdef SCSI_OLD_NOINQUIRY
   1184 	/*
   1185 	 * Kludge for the Adaptec ACB-4000 SCSI->MFM translator.
   1186 	 * This board doesn't support the INQUIRY command at all.
   1187 	 */
   1188 	if (error == EINVAL || error == EACCES) {
   1189 		/*
   1190 		 * Conjure up an INQUIRY response.
   1191 		 */
   1192 		inqbuf->device = (error == EINVAL ?
   1193 			 SID_QUAL_LU_PRESENT :
   1194 			 SID_QUAL_LU_NOTPRESENT) | T_DIRECT;
   1195 		inqbuf->dev_qual2 = 0;
   1196 		inqbuf->version = 0;
   1197 		inqbuf->response_format = SID_FORMAT_SCSI1;
   1198 		inqbuf->additional_length = SCSIPI_INQUIRY_LENGTH_SCSI2 - 4;
   1199 		inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0;
   1200 		memcpy(inqbuf->vendor, "ADAPTEC ACB-4000            ", 28);
   1201 		error = 0;
   1202 	}
   1203 
   1204 	/*
   1205 	 * Kludge for the Emulex MT-02 SCSI->QIC translator.
   1206 	 * This board gives an empty response to an INQUIRY command.
   1207 	 */
   1208 	else if (error == 0 &&
   1209 	    inqbuf->device == (SID_QUAL_LU_PRESENT | T_DIRECT) &&
   1210 	    inqbuf->dev_qual2 == 0 &&
   1211 	    inqbuf->version == 0 &&
   1212 	    inqbuf->response_format == SID_FORMAT_SCSI1) {
   1213 		/*
   1214 		 * Fill out the INQUIRY response.
   1215 		 */
   1216 		inqbuf->device = (SID_QUAL_LU_PRESENT | T_SEQUENTIAL);
   1217 		inqbuf->dev_qual2 = SID_REMOVABLE;
   1218 		inqbuf->additional_length = SCSIPI_INQUIRY_LENGTH_SCSI2 - 4;
   1219 		inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0;
   1220 		memcpy(inqbuf->vendor, "EMULEX  MT-02 QIC           ", 28);
   1221 	}
   1222 #endif /* SCSI_OLD_NOINQUIRY */
   1223 
   1224 	return error;
   1225 }
   1226 
   1227 /*
   1228  * scsipi_prevent:
   1229  *
   1230  *	Prevent or allow the user to remove the media
   1231  */
   1232 int
   1233 scsipi_prevent(struct scsipi_periph *periph, int type, int flags)
   1234 {
   1235 	struct scsi_prevent_allow_medium_removal cmd;
   1236 
   1237 	if (periph->periph_quirks & PQUIRK_NODOORLOCK)
   1238 		return 0;
   1239 
   1240 	memset(&cmd, 0, sizeof(cmd));
   1241 	cmd.opcode = SCSI_PREVENT_ALLOW_MEDIUM_REMOVAL;
   1242 	cmd.how = type;
   1243 
   1244 	return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0,
   1245 	    SCSIPIRETRIES, 5000, NULL, flags));
   1246 }
   1247 
   1248 /*
   1249  * scsipi_start:
   1250  *
   1251  *	Send a START UNIT.
   1252  */
   1253 int
   1254 scsipi_start(struct scsipi_periph *periph, int type, int flags)
   1255 {
   1256 	struct scsipi_start_stop cmd;
   1257 
   1258 	memset(&cmd, 0, sizeof(cmd));
   1259 	cmd.opcode = START_STOP;
   1260 	cmd.byte2 = 0x00;
   1261 	cmd.how = type;
   1262 
   1263 	return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0,
   1264 	    SCSIPIRETRIES, (type & SSS_START) ? 60000 : 10000, NULL, flags));
   1265 }
   1266 
   1267 /*
   1268  * scsipi_mode_sense, scsipi_mode_sense_big:
   1269  *	get a sense page from a device
   1270  */
   1271 
   1272 int
   1273 scsipi_mode_sense(struct scsipi_periph *periph, int byte2, int page,
   1274     struct scsi_mode_parameter_header_6 *data, int len, int flags, int retries,
   1275     int timeout)
   1276 {
   1277 	struct scsi_mode_sense_6 cmd;
   1278 
   1279 	memset(&cmd, 0, sizeof(cmd));
   1280 	cmd.opcode = SCSI_MODE_SENSE_6;
   1281 	cmd.byte2 = byte2;
   1282 	cmd.page = page;
   1283 	cmd.length = len & 0xff;
   1284 
   1285 	return (scsipi_command(periph, (void *)&cmd, sizeof(cmd),
   1286 	    (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_IN));
   1287 }
   1288 
   1289 int
   1290 scsipi_mode_sense_big(struct scsipi_periph *periph, int byte2, int page,
   1291     struct scsi_mode_parameter_header_10 *data, int len, int flags, int retries,
   1292     int timeout)
   1293 {
   1294 	struct scsi_mode_sense_10 cmd;
   1295 
   1296 	memset(&cmd, 0, sizeof(cmd));
   1297 	cmd.opcode = SCSI_MODE_SENSE_10;
   1298 	cmd.byte2 = byte2;
   1299 	cmd.page = page;
   1300 	_lto2b(len, cmd.length);
   1301 
   1302 	return (scsipi_command(periph, (void *)&cmd, sizeof(cmd),
   1303 	    (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_IN));
   1304 }
   1305 
   1306 int
   1307 scsipi_mode_select(struct scsipi_periph *periph, int byte2,
   1308     struct scsi_mode_parameter_header_6 *data, int len, int flags, int retries,
   1309     int timeout)
   1310 {
   1311 	struct scsi_mode_select_6 cmd;
   1312 
   1313 	memset(&cmd, 0, sizeof(cmd));
   1314 	cmd.opcode = SCSI_MODE_SELECT_6;
   1315 	cmd.byte2 = byte2;
   1316 	cmd.length = len & 0xff;
   1317 
   1318 	return (scsipi_command(periph, (void *)&cmd, sizeof(cmd),
   1319 	    (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_OUT));
   1320 }
   1321 
   1322 int
   1323 scsipi_mode_select_big(struct scsipi_periph *periph, int byte2,
   1324     struct scsi_mode_parameter_header_10 *data, int len, int flags, int retries,
   1325     int timeout)
   1326 {
   1327 	struct scsi_mode_select_10 cmd;
   1328 
   1329 	memset(&cmd, 0, sizeof(cmd));
   1330 	cmd.opcode = SCSI_MODE_SELECT_10;
   1331 	cmd.byte2 = byte2;
   1332 	_lto2b(len, cmd.length);
   1333 
   1334 	return (scsipi_command(periph, (void *)&cmd, sizeof(cmd),
   1335 	    (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_OUT));
   1336 }
   1337 
   1338 /*
   1339  * scsipi_done:
   1340  *
   1341  *	This routine is called by an adapter's interrupt handler when
   1342  *	an xfer is completed.
   1343  */
   1344 void
   1345 scsipi_done(struct scsipi_xfer *xs)
   1346 {
   1347 	struct scsipi_periph *periph = xs->xs_periph;
   1348 	struct scsipi_channel *chan = periph->periph_channel;
   1349 	int freezecnt;
   1350 
   1351 	SC_DEBUG(periph, SCSIPI_DB2, ("scsipi_done\n"));
   1352 #ifdef SCSIPI_DEBUG
   1353 	if (periph->periph_dbflags & SCSIPI_DB1)
   1354 		show_scsipi_cmd(xs);
   1355 #endif
   1356 
   1357 	mutex_enter(chan_mtx(chan));
   1358 	/*
   1359 	 * The resource this command was using is now free.
   1360 	 */
   1361 	if (xs->xs_status & XS_STS_DONE) {
   1362 		/* XXX in certain circumstances, such as a device
   1363 		 * being detached, a xs that has already been
   1364 		 * scsipi_done()'d by the main thread will be done'd
   1365 		 * again by scsibusdetach(). Putting the xs on the
   1366 		 * chan_complete queue causes list corruption and
   1367 		 * everyone dies. This prevents that, but perhaps
   1368 		 * there should be better coordination somewhere such
   1369 		 * that this won't ever happen (and can be turned into
   1370 		 * a KASSERT().
   1371 		 */
   1372 		mutex_exit(chan_mtx(chan));
   1373 		goto out;
   1374 	}
   1375 	scsipi_put_resource(chan);
   1376 	xs->xs_periph->periph_sent--;
   1377 
   1378 	/*
   1379 	 * If the command was tagged, free the tag.
   1380 	 */
   1381 	if (XS_CTL_TAGTYPE(xs) != 0)
   1382 		scsipi_put_tag(xs);
   1383 	else
   1384 		periph->periph_flags &= ~PERIPH_UNTAG;
   1385 
   1386 	/* Mark the command as `done'. */
   1387 	xs->xs_status |= XS_STS_DONE;
   1388 
   1389 #ifdef DIAGNOSTIC
   1390 	if ((xs->xs_control & (XS_CTL_ASYNC|XS_CTL_POLL)) ==
   1391 	    (XS_CTL_ASYNC|XS_CTL_POLL))
   1392 		panic("scsipi_done: ASYNC and POLL");
   1393 #endif
   1394 
   1395 	/*
   1396 	 * If the xfer had an error of any sort, freeze the
   1397 	 * periph's queue.  Freeze it again if we were requested
   1398 	 * to do so in the xfer.
   1399 	 */
   1400 	freezecnt = 0;
   1401 	if (xs->error != XS_NOERROR)
   1402 		freezecnt++;
   1403 	if (xs->xs_control & XS_CTL_FREEZE_PERIPH)
   1404 		freezecnt++;
   1405 	if (freezecnt != 0)
   1406 		scsipi_periph_freeze_locked(periph, freezecnt);
   1407 
   1408 	/*
   1409 	 * record the xfer with a pending sense, in case a SCSI reset is
   1410 	 * received before the thread is waked up.
   1411 	 */
   1412 	if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
   1413 		periph->periph_flags |= PERIPH_SENSE;
   1414 		periph->periph_xscheck = xs;
   1415 	}
   1416 
   1417 	/*
   1418 	 * If this was an xfer that was not to complete asynchronously,
   1419 	 * let the requesting thread perform error checking/handling
   1420 	 * in its context.
   1421 	 */
   1422 	if ((xs->xs_control & XS_CTL_ASYNC) == 0) {
   1423 		/*
   1424 		 * If it's a polling job, just return, to unwind the
   1425 		 * call graph.  We don't need to restart the queue,
   1426 		 * because pollings jobs are treated specially, and
   1427 		 * are really only used during crash dumps anyway
   1428 		 * (XXX or during boot-time autconfiguration of
   1429 		 * ATAPI devices).
   1430 		 */
   1431 		if (xs->xs_control & XS_CTL_POLL) {
   1432 			mutex_exit(chan_mtx(chan));
   1433 			return;
   1434 		}
   1435 		cv_broadcast(xs_cv(xs));
   1436 		mutex_exit(chan_mtx(chan));
   1437 		goto out;
   1438 	}
   1439 
   1440 	/*
   1441 	 * Catch the extremely common case of I/O completing
   1442 	 * without error; no use in taking a context switch
   1443 	 * if we can handle it in interrupt context.
   1444 	 */
   1445 	if (xs->error == XS_NOERROR) {
   1446 		mutex_exit(chan_mtx(chan));
   1447 		(void) scsipi_complete(xs);
   1448 		goto out;
   1449 	}
   1450 
   1451 	/*
   1452 	 * There is an error on this xfer.  Put it on the channel's
   1453 	 * completion queue, and wake up the completion thread.
   1454 	 */
   1455 	TAILQ_INSERT_TAIL(&chan->chan_complete, xs, channel_q);
   1456 	mutex_exit(chan_mtx(chan));
   1457 	cv_broadcast(chan_cv_complete(chan));
   1458 
   1459  out:
   1460 	/*
   1461 	 * If there are more xfers on the channel's queue, attempt to
   1462 	 * run them.
   1463 	 */
   1464 	scsipi_run_queue(chan);
   1465 }
   1466 
   1467 /*
   1468  * scsipi_complete:
   1469  *
   1470  *	Completion of a scsipi_xfer.  This is the guts of scsipi_done().
   1471  *
   1472  *	NOTE: This routine MUST be called with valid thread context
   1473  *	except for the case where the following two conditions are
   1474  *	true:
   1475  *
   1476  *		xs->error == XS_NOERROR
   1477  *		XS_CTL_ASYNC is set in xs->xs_control
   1478  *
   1479  *	The semantics of this routine can be tricky, so here is an
   1480  *	explanation:
   1481  *
   1482  *		0		Xfer completed successfully.
   1483  *
   1484  *		ERESTART	Xfer had an error, but was restarted.
   1485  *
   1486  *		anything else	Xfer had an error, return value is Unix
   1487  *				errno.
   1488  *
   1489  *	If the return value is anything but ERESTART:
   1490  *
   1491  *		- If XS_CTL_ASYNC is set, `xs' has been freed back to
   1492  *		  the pool.
   1493  *		- If there is a buf associated with the xfer,
   1494  *		  it has been biodone()'d.
   1495  */
   1496 static int
   1497 scsipi_complete(struct scsipi_xfer *xs)
   1498 {
   1499 	struct scsipi_periph *periph = xs->xs_periph;
   1500 	struct scsipi_channel *chan = periph->periph_channel;
   1501 	int error;
   1502 
   1503 #ifdef DIAGNOSTIC
   1504 	if ((xs->xs_control & XS_CTL_ASYNC) != 0 && xs->bp == NULL)
   1505 		panic("scsipi_complete: XS_CTL_ASYNC but no buf");
   1506 #endif
   1507 	/*
   1508 	 * If command terminated with a CHECK CONDITION, we need to issue a
   1509 	 * REQUEST_SENSE command. Once the REQUEST_SENSE has been processed
   1510 	 * we'll have the real status.
   1511 	 * Must be processed with channel lock held to avoid missing
   1512 	 * a SCSI bus reset for this command.
   1513 	 */
   1514 	mutex_enter(chan_mtx(chan));
   1515 	if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
   1516 		/* request sense for a request sense ? */
   1517 		if (xs->xs_control & XS_CTL_REQSENSE) {
   1518 			scsipi_printaddr(periph);
   1519 			printf("request sense for a request sense ?\n");
   1520 			/* XXX maybe we should reset the device ? */
   1521 			/* we've been frozen because xs->error != XS_NOERROR */
   1522 			scsipi_periph_thaw_locked(periph, 1);
   1523 			mutex_exit(chan_mtx(chan));
   1524 			if (xs->resid < xs->datalen) {
   1525 				printf("we read %d bytes of sense anyway:\n",
   1526 				    xs->datalen - xs->resid);
   1527 				scsipi_print_sense_data((void *)xs->data, 0);
   1528 			}
   1529 			return EINVAL;
   1530 		}
   1531 		mutex_exit(chan_mtx(chan)); // XXX allows other commands to queue or run
   1532 		scsipi_request_sense(xs);
   1533 	} else
   1534 		mutex_exit(chan_mtx(chan));
   1535 
   1536 	/*
   1537 	 * If it's a user level request, bypass all usual completion
   1538 	 * processing, let the user work it out..
   1539 	 */
   1540 	if ((xs->xs_control & XS_CTL_USERCMD) != 0) {
   1541 		SC_DEBUG(periph, SCSIPI_DB3, ("calling user done()\n"));
   1542 		mutex_enter(chan_mtx(chan));
   1543 		if (xs->error != XS_NOERROR)
   1544 			scsipi_periph_thaw_locked(periph, 1);
   1545 		mutex_exit(chan_mtx(chan));
   1546 		scsipi_user_done(xs);
   1547 		SC_DEBUG(periph, SCSIPI_DB3, ("returned from user done()\n "));
   1548 		return 0;
   1549 	}
   1550 
   1551 	switch (xs->error) {
   1552 	case XS_NOERROR:
   1553 		error = 0;
   1554 		break;
   1555 
   1556 	case XS_SENSE:
   1557 	case XS_SHORTSENSE:
   1558 		error = (*chan->chan_bustype->bustype_interpret_sense)(xs);
   1559 		break;
   1560 
   1561 	case XS_RESOURCE_SHORTAGE:
   1562 		/*
   1563 		 * XXX Should freeze channel's queue.
   1564 		 */
   1565 		scsipi_printaddr(periph);
   1566 		printf("adapter resource shortage\n");
   1567 		/* FALLTHROUGH */
   1568 
   1569 	case XS_BUSY:
   1570 		if (xs->error == XS_BUSY && xs->status == SCSI_QUEUE_FULL) {
   1571 			struct scsipi_max_openings mo;
   1572 
   1573 			/*
   1574 			 * We set the openings to active - 1, assuming that
   1575 			 * the command that got us here is the first one that
   1576 			 * can't fit into the device's queue.  If that's not
   1577 			 * the case, I guess we'll find out soon enough.
   1578 			 */
   1579 			mo.mo_target = periph->periph_target;
   1580 			mo.mo_lun = periph->periph_lun;
   1581 			if (periph->periph_active < periph->periph_openings)
   1582 				mo.mo_openings = periph->periph_active - 1;
   1583 			else
   1584 				mo.mo_openings = periph->periph_openings - 1;
   1585 #ifdef DIAGNOSTIC
   1586 			if (mo.mo_openings < 0) {
   1587 				scsipi_printaddr(periph);
   1588 				printf("QUEUE FULL resulted in < 0 openings\n");
   1589 				panic("scsipi_done");
   1590 			}
   1591 #endif
   1592 			if (mo.mo_openings == 0) {
   1593 				scsipi_printaddr(periph);
   1594 				printf("QUEUE FULL resulted in 0 openings\n");
   1595 				mo.mo_openings = 1;
   1596 			}
   1597 			scsipi_async_event(chan, ASYNC_EVENT_MAX_OPENINGS, &mo);
   1598 			error = ERESTART;
   1599 		} else if (xs->xs_retries != 0) {
   1600 			xs->xs_retries--;
   1601 			/*
   1602 			 * Wait one second, and try again.
   1603 			 */
   1604 			mutex_enter(chan_mtx(chan));
   1605 			if ((xs->xs_control & XS_CTL_POLL) ||
   1606 			    (chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
   1607 				/* XXX: quite extreme */
   1608 				kpause("xsbusy", false, hz, chan_mtx(chan));
   1609 			} else if (!callout_pending(&periph->periph_callout)) {
   1610 				scsipi_periph_freeze_locked(periph, 1);
   1611 				callout_reset(&periph->periph_callout,
   1612 				    hz, scsipi_periph_timed_thaw, periph);
   1613 			}
   1614 			mutex_exit(chan_mtx(chan));
   1615 			error = ERESTART;
   1616 		} else
   1617 			error = EBUSY;
   1618 		break;
   1619 
   1620 	case XS_REQUEUE:
   1621 		error = ERESTART;
   1622 		break;
   1623 
   1624 	case XS_SELTIMEOUT:
   1625 	case XS_TIMEOUT:
   1626 		/*
   1627 		 * If the device hasn't gone away, honor retry counts.
   1628 		 *
   1629 		 * Note that if we're in the middle of probing it,
   1630 		 * it won't be found because it isn't here yet so
   1631 		 * we won't honor the retry count in that case.
   1632 		 */
   1633 		if (scsipi_lookup_periph(chan, periph->periph_target,
   1634 		    periph->periph_lun) && xs->xs_retries != 0) {
   1635 			xs->xs_retries--;
   1636 			error = ERESTART;
   1637 		} else
   1638 			error = EIO;
   1639 		break;
   1640 
   1641 	case XS_RESET:
   1642 		if (xs->xs_control & XS_CTL_REQSENSE) {
   1643 			/*
   1644 			 * request sense interrupted by reset: signal it
   1645 			 * with EINTR return code.
   1646 			 */
   1647 			error = EINTR;
   1648 		} else {
   1649 			if (xs->xs_retries != 0) {
   1650 				xs->xs_retries--;
   1651 				error = ERESTART;
   1652 			} else
   1653 				error = EIO;
   1654 		}
   1655 		break;
   1656 
   1657 	case XS_DRIVER_STUFFUP:
   1658 		scsipi_printaddr(periph);
   1659 		printf("generic HBA error\n");
   1660 		error = EIO;
   1661 		break;
   1662 	default:
   1663 		scsipi_printaddr(periph);
   1664 		printf("invalid return code from adapter: %d\n", xs->error);
   1665 		error = EIO;
   1666 		break;
   1667 	}
   1668 
   1669 	mutex_enter(chan_mtx(chan));
   1670 	if (error == ERESTART) {
   1671 		/*
   1672 		 * If we get here, the periph has been thawed and frozen
   1673 		 * again if we had to issue recovery commands.  Alternatively,
   1674 		 * it may have been frozen again and in a timed thaw.  In
   1675 		 * any case, we thaw the periph once we re-enqueue the
   1676 		 * command.  Once the periph is fully thawed, it will begin
   1677 		 * operation again.
   1678 		 */
   1679 		xs->error = XS_NOERROR;
   1680 		xs->status = SCSI_OK;
   1681 		xs->xs_status &= ~XS_STS_DONE;
   1682 		xs->xs_requeuecnt++;
   1683 		error = scsipi_enqueue(xs);
   1684 		if (error == 0) {
   1685 			scsipi_periph_thaw_locked(periph, 1);
   1686 			mutex_exit(chan_mtx(chan));
   1687 			return (ERESTART);
   1688 		}
   1689 	}
   1690 
   1691 	/*
   1692 	 * scsipi_done() freezes the queue if not XS_NOERROR.
   1693 	 * Thaw it here.
   1694 	 */
   1695 	if (xs->error != XS_NOERROR)
   1696 		scsipi_periph_thaw_locked(periph, 1);
   1697 	mutex_exit(chan_mtx(chan));
   1698 
   1699 	if (periph->periph_switch->psw_done)
   1700 		periph->periph_switch->psw_done(xs, error);
   1701 
   1702 	mutex_enter(chan_mtx(chan));
   1703 	if (xs->xs_control & XS_CTL_ASYNC)
   1704 		scsipi_put_xs(xs);
   1705 	mutex_exit(chan_mtx(chan));
   1706 
   1707 	return (error);
   1708 }
   1709 
   1710 /*
   1711  * Issue a request sense for the given scsipi_xfer. Called when the xfer
   1712  * returns with a CHECK_CONDITION status. Must be called in valid thread
   1713  * context and with channel lock held.
   1714  */
   1715 
   1716 static void
   1717 scsipi_request_sense(struct scsipi_xfer *xs)
   1718 {
   1719 	struct scsipi_periph *periph = xs->xs_periph;
   1720 	int flags, error;
   1721 	struct scsi_request_sense cmd;
   1722 
   1723 	periph->periph_flags |= PERIPH_SENSE;
   1724 
   1725 	/* if command was polling, request sense will too */
   1726 	flags = xs->xs_control & XS_CTL_POLL;
   1727 	/* Polling commands can't sleep */
   1728 	if (flags)
   1729 		flags |= XS_CTL_NOSLEEP;
   1730 
   1731 	flags |= XS_CTL_REQSENSE | XS_CTL_URGENT | XS_CTL_DATA_IN |
   1732 	    XS_CTL_THAW_PERIPH | XS_CTL_FREEZE_PERIPH;
   1733 
   1734 	memset(&cmd, 0, sizeof(cmd));
   1735 	cmd.opcode = SCSI_REQUEST_SENSE;
   1736 	cmd.length = sizeof(struct scsi_sense_data);
   1737 
   1738 	error = scsipi_command(periph, (void *)&cmd, sizeof(cmd),
   1739 	    (void *)&xs->sense.scsi_sense, sizeof(struct scsi_sense_data),
   1740 	    0, 1000, NULL, flags);
   1741 	periph->periph_flags &= ~PERIPH_SENSE;
   1742 	periph->periph_xscheck = NULL;
   1743 	switch (error) {
   1744 	case 0:
   1745 		/* we have a valid sense */
   1746 		xs->error = XS_SENSE;
   1747 		return;
   1748 	case EINTR:
   1749 		/* REQUEST_SENSE interrupted by bus reset. */
   1750 		xs->error = XS_RESET;
   1751 		return;
   1752 	case EIO:
   1753 		 /* request sense coudn't be performed */
   1754 		/*
   1755 		 * XXX this isn't quite right but we don't have anything
   1756 		 * better for now
   1757 		 */
   1758 		xs->error = XS_DRIVER_STUFFUP;
   1759 		return;
   1760 	default:
   1761 		 /* Notify that request sense failed. */
   1762 		xs->error = XS_DRIVER_STUFFUP;
   1763 		scsipi_printaddr(periph);
   1764 		printf("request sense failed with error %d\n", error);
   1765 		return;
   1766 	}
   1767 }
   1768 
   1769 /*
   1770  * scsipi_enqueue:
   1771  *
   1772  *	Enqueue an xfer on a channel.
   1773  */
   1774 static int
   1775 scsipi_enqueue(struct scsipi_xfer *xs)
   1776 {
   1777 	struct scsipi_channel *chan = xs->xs_periph->periph_channel;
   1778 	struct scsipi_xfer *qxs;
   1779 
   1780 	/*
   1781 	 * If the xfer is to be polled, and there are already jobs on
   1782 	 * the queue, we can't proceed.
   1783 	 */
   1784 	if ((xs->xs_control & XS_CTL_POLL) != 0 &&
   1785 	    TAILQ_FIRST(&chan->chan_queue) != NULL) {
   1786 		xs->error = XS_DRIVER_STUFFUP;
   1787 		return (EAGAIN);
   1788 	}
   1789 
   1790 	/*
   1791 	 * If we have an URGENT xfer, it's an error recovery command
   1792 	 * and it should just go on the head of the channel's queue.
   1793 	 */
   1794 	if (xs->xs_control & XS_CTL_URGENT) {
   1795 		TAILQ_INSERT_HEAD(&chan->chan_queue, xs, channel_q);
   1796 		goto out;
   1797 	}
   1798 
   1799 	/*
   1800 	 * If this xfer has already been on the queue before, we
   1801 	 * need to reinsert it in the correct order.  That order is:
   1802 	 *
   1803 	 *	Immediately before the first xfer for this periph
   1804 	 *	with a requeuecnt less than xs->xs_requeuecnt.
   1805 	 *
   1806 	 * Failing that, at the end of the queue.  (We'll end up
   1807 	 * there naturally.)
   1808 	 */
   1809 	if (xs->xs_requeuecnt != 0) {
   1810 		for (qxs = TAILQ_FIRST(&chan->chan_queue); qxs != NULL;
   1811 		     qxs = TAILQ_NEXT(qxs, channel_q)) {
   1812 			if (qxs->xs_periph == xs->xs_periph &&
   1813 			    qxs->xs_requeuecnt < xs->xs_requeuecnt)
   1814 				break;
   1815 		}
   1816 		if (qxs != NULL) {
   1817 			TAILQ_INSERT_AFTER(&chan->chan_queue, qxs, xs,
   1818 			    channel_q);
   1819 			goto out;
   1820 		}
   1821 	}
   1822 	TAILQ_INSERT_TAIL(&chan->chan_queue, xs, channel_q);
   1823  out:
   1824 	if (xs->xs_control & XS_CTL_THAW_PERIPH)
   1825 		scsipi_periph_thaw_locked(xs->xs_periph, 1);
   1826 	return (0);
   1827 }
   1828 
   1829 /*
   1830  * scsipi_run_queue:
   1831  *
   1832  *	Start as many xfers as possible running on the channel.
   1833  */
   1834 static void
   1835 scsipi_run_queue(struct scsipi_channel *chan)
   1836 {
   1837 	struct scsipi_xfer *xs;
   1838 	struct scsipi_periph *periph;
   1839 
   1840 	for (;;) {
   1841 		mutex_enter(chan_mtx(chan));
   1842 
   1843 		/*
   1844 		 * If the channel is frozen, we can't do any work right
   1845 		 * now.
   1846 		 */
   1847 		if (chan->chan_qfreeze != 0) {
   1848 			mutex_exit(chan_mtx(chan));
   1849 			return;
   1850 		}
   1851 
   1852 		/*
   1853 		 * Look for work to do, and make sure we can do it.
   1854 		 */
   1855 		for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL;
   1856 		     xs = TAILQ_NEXT(xs, channel_q)) {
   1857 			periph = xs->xs_periph;
   1858 
   1859 			if ((periph->periph_sent >= periph->periph_openings) ||
   1860 			    periph->periph_qfreeze != 0 ||
   1861 			    (periph->periph_flags & PERIPH_UNTAG) != 0)
   1862 				continue;
   1863 
   1864 			if ((periph->periph_flags &
   1865 			    (PERIPH_RECOVERING | PERIPH_SENSE)) != 0 &&
   1866 			    (xs->xs_control & XS_CTL_URGENT) == 0)
   1867 				continue;
   1868 
   1869 			/*
   1870 			 * We can issue this xfer!
   1871 			 */
   1872 			goto got_one;
   1873 		}
   1874 
   1875 		/*
   1876 		 * Can't find any work to do right now.
   1877 		 */
   1878 		mutex_exit(chan_mtx(chan));
   1879 		return;
   1880 
   1881  got_one:
   1882 		/*
   1883 		 * Have an xfer to run.  Allocate a resource from
   1884 		 * the adapter to run it.  If we can't allocate that
   1885 		 * resource, we don't dequeue the xfer.
   1886 		 */
   1887 		if (scsipi_get_resource(chan) == 0) {
   1888 			/*
   1889 			 * Adapter is out of resources.  If the adapter
   1890 			 * supports it, attempt to grow them.
   1891 			 */
   1892 			if (scsipi_grow_resources(chan) == 0) {
   1893 				/*
   1894 				 * Wasn't able to grow resources,
   1895 				 * nothing more we can do.
   1896 				 */
   1897 				if (xs->xs_control & XS_CTL_POLL) {
   1898 					scsipi_printaddr(xs->xs_periph);
   1899 					printf("polling command but no "
   1900 					    "adapter resources");
   1901 					/* We'll panic shortly... */
   1902 				}
   1903 				mutex_exit(chan_mtx(chan));
   1904 
   1905 				/*
   1906 				 * XXX: We should be able to note that
   1907 				 * XXX: that resources are needed here!
   1908 				 */
   1909 				return;
   1910 			}
   1911 			/*
   1912 			 * scsipi_grow_resources() allocated the resource
   1913 			 * for us.
   1914 			 */
   1915 		}
   1916 
   1917 		/*
   1918 		 * We have a resource to run this xfer, do it!
   1919 		 */
   1920 		TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
   1921 
   1922 		/*
   1923 		 * If the command is to be tagged, allocate a tag ID
   1924 		 * for it.
   1925 		 */
   1926 		if (XS_CTL_TAGTYPE(xs) != 0)
   1927 			scsipi_get_tag(xs);
   1928 		else
   1929 			periph->periph_flags |= PERIPH_UNTAG;
   1930 		periph->periph_sent++;
   1931 		mutex_exit(chan_mtx(chan));
   1932 
   1933 		scsipi_adapter_request(chan, ADAPTER_REQ_RUN_XFER, xs);
   1934 	}
   1935 #ifdef DIAGNOSTIC
   1936 	panic("scsipi_run_queue: impossible");
   1937 #endif
   1938 }
   1939 
   1940 /*
   1941  * scsipi_execute_xs:
   1942  *
   1943  *	Begin execution of an xfer, waiting for it to complete, if necessary.
   1944  */
   1945 int
   1946 scsipi_execute_xs(struct scsipi_xfer *xs)
   1947 {
   1948 	struct scsipi_periph *periph = xs->xs_periph;
   1949 	struct scsipi_channel *chan = periph->periph_channel;
   1950 	int oasync, async, poll, error;
   1951 
   1952 	KASSERT(!cold);
   1953 
   1954 	(chan->chan_bustype->bustype_cmd)(xs);
   1955 
   1956 	xs->xs_status &= ~XS_STS_DONE;
   1957 	xs->error = XS_NOERROR;
   1958 	xs->resid = xs->datalen;
   1959 	xs->status = SCSI_OK;
   1960 
   1961 #ifdef SCSIPI_DEBUG
   1962 	if (xs->xs_periph->periph_dbflags & SCSIPI_DB3) {
   1963 		printf("scsipi_execute_xs: ");
   1964 		show_scsipi_xs(xs);
   1965 		printf("\n");
   1966 	}
   1967 #endif
   1968 
   1969 	/*
   1970 	 * Deal with command tagging:
   1971 	 *
   1972 	 *	- If the device's current operating mode doesn't
   1973 	 *	  include tagged queueing, clear the tag mask.
   1974 	 *
   1975 	 *	- If the device's current operating mode *does*
   1976 	 *	  include tagged queueing, set the tag_type in
   1977 	 *	  the xfer to the appropriate byte for the tag
   1978 	 *	  message.
   1979 	 */
   1980 	if ((PERIPH_XFER_MODE(periph) & PERIPH_CAP_TQING) == 0 ||
   1981 		(xs->xs_control & XS_CTL_REQSENSE)) {
   1982 		xs->xs_control &= ~XS_CTL_TAGMASK;
   1983 		xs->xs_tag_type = 0;
   1984 	} else {
   1985 		/*
   1986 		 * If the request doesn't specify a tag, give Head
   1987 		 * tags to URGENT operations and Simple tags to
   1988 		 * everything else.
   1989 		 */
   1990 		if (XS_CTL_TAGTYPE(xs) == 0) {
   1991 			if (xs->xs_control & XS_CTL_URGENT)
   1992 				xs->xs_control |= XS_CTL_HEAD_TAG;
   1993 			else
   1994 				xs->xs_control |= XS_CTL_SIMPLE_TAG;
   1995 		}
   1996 
   1997 		switch (XS_CTL_TAGTYPE(xs)) {
   1998 		case XS_CTL_ORDERED_TAG:
   1999 			xs->xs_tag_type = MSG_ORDERED_Q_TAG;
   2000 			break;
   2001 
   2002 		case XS_CTL_SIMPLE_TAG:
   2003 			xs->xs_tag_type = MSG_SIMPLE_Q_TAG;
   2004 			break;
   2005 
   2006 		case XS_CTL_HEAD_TAG:
   2007 			xs->xs_tag_type = MSG_HEAD_OF_Q_TAG;
   2008 			break;
   2009 
   2010 		default:
   2011 			scsipi_printaddr(periph);
   2012 			printf("invalid tag mask 0x%08x\n",
   2013 			    XS_CTL_TAGTYPE(xs));
   2014 			panic("scsipi_execute_xs");
   2015 		}
   2016 	}
   2017 
   2018 	/* If the adaptor wants us to poll, poll. */
   2019 	if (chan->chan_adapter->adapt_flags & SCSIPI_ADAPT_POLL_ONLY)
   2020 		xs->xs_control |= XS_CTL_POLL;
   2021 
   2022 	/*
   2023 	 * If we don't yet have a completion thread, or we are to poll for
   2024 	 * completion, clear the ASYNC flag.
   2025 	 */
   2026 	oasync =  (xs->xs_control & XS_CTL_ASYNC);
   2027 	if (chan->chan_thread == NULL || (xs->xs_control & XS_CTL_POLL) != 0)
   2028 		xs->xs_control &= ~XS_CTL_ASYNC;
   2029 
   2030 	async = (xs->xs_control & XS_CTL_ASYNC);
   2031 	poll = (xs->xs_control & XS_CTL_POLL);
   2032 
   2033 #ifdef DIAGNOSTIC
   2034 	if (oasync != 0 && xs->bp == NULL)
   2035 		panic("scsipi_execute_xs: XS_CTL_ASYNC but no buf");
   2036 #endif
   2037 
   2038 	/*
   2039 	 * Enqueue the transfer.  If we're not polling for completion, this
   2040 	 * should ALWAYS return `no error'.
   2041 	 */
   2042 	error = scsipi_enqueue(xs);
   2043 	if (error) {
   2044 		if (poll == 0) {
   2045 			scsipi_printaddr(periph);
   2046 			printf("not polling, but enqueue failed with %d\n",
   2047 			    error);
   2048 			panic("scsipi_execute_xs");
   2049 		}
   2050 
   2051 		scsipi_printaddr(periph);
   2052 		printf("should have flushed queue?\n");
   2053 		goto free_xs;
   2054 	}
   2055 
   2056 	mutex_exit(chan_mtx(chan));
   2057  restarted:
   2058 	scsipi_run_queue(chan);
   2059 	mutex_enter(chan_mtx(chan));
   2060 
   2061 	/*
   2062 	 * The xfer is enqueued, and possibly running.  If it's to be
   2063 	 * completed asynchronously, just return now.
   2064 	 */
   2065 	if (async)
   2066 		return (0);
   2067 
   2068 	/*
   2069 	 * Not an asynchronous command; wait for it to complete.
   2070 	 */
   2071 	while ((xs->xs_status & XS_STS_DONE) == 0) {
   2072 		if (poll) {
   2073 			scsipi_printaddr(periph);
   2074 			printf("polling command not done\n");
   2075 			panic("scsipi_execute_xs");
   2076 		}
   2077 		cv_wait(xs_cv(xs), chan_mtx(chan));
   2078 	}
   2079 
   2080 	/*
   2081 	 * Command is complete.  scsipi_done() has awakened us to perform
   2082 	 * the error handling.
   2083 	 */
   2084 	mutex_exit(chan_mtx(chan));
   2085 	error = scsipi_complete(xs);
   2086 	if (error == ERESTART)
   2087 		goto restarted;
   2088 
   2089 	/*
   2090 	 * If it was meant to run async and we cleared aync ourselve,
   2091 	 * don't return an error here. It has already been handled
   2092 	 */
   2093 	if (oasync)
   2094 		error = 0;
   2095 	/*
   2096 	 * Command completed successfully or fatal error occurred.  Fall
   2097 	 * into....
   2098 	 */
   2099 	mutex_enter(chan_mtx(chan));
   2100  free_xs:
   2101 	scsipi_put_xs(xs);
   2102 	mutex_exit(chan_mtx(chan));
   2103 
   2104 	/*
   2105 	 * Kick the queue, keep it running in case it stopped for some
   2106 	 * reason.
   2107 	 */
   2108 	scsipi_run_queue(chan);
   2109 
   2110 	mutex_enter(chan_mtx(chan));
   2111 	return (error);
   2112 }
   2113 
   2114 /*
   2115  * scsipi_completion_thread:
   2116  *
   2117  *	This is the completion thread.  We wait for errors on
   2118  *	asynchronous xfers, and perform the error handling
   2119  *	function, restarting the command, if necessary.
   2120  */
   2121 static void
   2122 scsipi_completion_thread(void *arg)
   2123 {
   2124 	struct scsipi_channel *chan = arg;
   2125 	struct scsipi_xfer *xs;
   2126 
   2127 	if (chan->chan_init_cb)
   2128 		(*chan->chan_init_cb)(chan, chan->chan_init_cb_arg);
   2129 
   2130 	mutex_enter(chan_mtx(chan));
   2131 	chan->chan_flags |= SCSIPI_CHAN_TACTIVE;
   2132 	mutex_exit(chan_mtx(chan));
   2133 	for (;;) {
   2134 		mutex_enter(chan_mtx(chan));
   2135 		xs = TAILQ_FIRST(&chan->chan_complete);
   2136 		if (xs == NULL && chan->chan_tflags  == 0) {
   2137 			/* nothing to do; wait */
   2138 			cv_wait(chan_cv_complete(chan), chan_mtx(chan));
   2139 			mutex_exit(chan_mtx(chan));
   2140 			continue;
   2141 		}
   2142 		if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) {
   2143 			/* call chan_callback from thread context */
   2144 			chan->chan_tflags &= ~SCSIPI_CHANT_CALLBACK;
   2145 			chan->chan_callback(chan, chan->chan_callback_arg);
   2146 			mutex_exit(chan_mtx(chan));
   2147 			continue;
   2148 		}
   2149 		if (chan->chan_tflags & SCSIPI_CHANT_GROWRES) {
   2150 			/* attempt to get more openings for this channel */
   2151 			chan->chan_tflags &= ~SCSIPI_CHANT_GROWRES;
   2152 			mutex_exit(chan_mtx(chan));
   2153 			scsipi_adapter_request(chan,
   2154 			    ADAPTER_REQ_GROW_RESOURCES, NULL);
   2155 			scsipi_channel_thaw(chan, 1);
   2156 			if (chan->chan_tflags & SCSIPI_CHANT_GROWRES)
   2157 				kpause("scsizzz", FALSE, hz/10, NULL);
   2158 			continue;
   2159 		}
   2160 		if (chan->chan_tflags & SCSIPI_CHANT_KICK) {
   2161 			/* explicitly run the queues for this channel */
   2162 			chan->chan_tflags &= ~SCSIPI_CHANT_KICK;
   2163 			mutex_exit(chan_mtx(chan));
   2164 			scsipi_run_queue(chan);
   2165 			continue;
   2166 		}
   2167 		if (chan->chan_tflags & SCSIPI_CHANT_SHUTDOWN) {
   2168 			mutex_exit(chan_mtx(chan));
   2169 			break;
   2170 		}
   2171 		if (xs) {
   2172 			TAILQ_REMOVE(&chan->chan_complete, xs, channel_q);
   2173 			mutex_exit(chan_mtx(chan));
   2174 
   2175 			/*
   2176 			 * Have an xfer with an error; process it.
   2177 			 */
   2178 			(void) scsipi_complete(xs);
   2179 
   2180 			/*
   2181 			 * Kick the queue; keep it running if it was stopped
   2182 			 * for some reason.
   2183 			 */
   2184 			scsipi_run_queue(chan);
   2185 		} else {
   2186 			mutex_exit(chan_mtx(chan));
   2187 		}
   2188 	}
   2189 
   2190 	chan->chan_thread = NULL;
   2191 
   2192 	/* In case parent is waiting for us to exit. */
   2193 	cv_broadcast(chan_cv_thread(chan));
   2194 
   2195 	kthread_exit(0);
   2196 }
   2197 /*
   2198  * scsipi_thread_call_callback:
   2199  *
   2200  * 	request to call a callback from the completion thread
   2201  */
   2202 int
   2203 scsipi_thread_call_callback(struct scsipi_channel *chan,
   2204     void (*callback)(struct scsipi_channel *, void *), void *arg)
   2205 {
   2206 
   2207 	mutex_enter(chan_mtx(chan));
   2208 	if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
   2209 		/* kernel thread doesn't exist yet */
   2210 		mutex_exit(chan_mtx(chan));
   2211 		return ESRCH;
   2212 	}
   2213 	if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) {
   2214 		mutex_exit(chan_mtx(chan));
   2215 		return EBUSY;
   2216 	}
   2217 	scsipi_channel_freeze(chan, 1);
   2218 	chan->chan_callback = callback;
   2219 	chan->chan_callback_arg = arg;
   2220 	chan->chan_tflags |= SCSIPI_CHANT_CALLBACK;
   2221 	cv_broadcast(chan_cv_complete(chan));
   2222 	mutex_exit(chan_mtx(chan));
   2223 	return(0);
   2224 }
   2225 
   2226 /*
   2227  * scsipi_async_event:
   2228  *
   2229  *	Handle an asynchronous event from an adapter.
   2230  */
   2231 void
   2232 scsipi_async_event(struct scsipi_channel *chan, scsipi_async_event_t event,
   2233     void *arg)
   2234 {
   2235 
   2236 	mutex_enter(chan_mtx(chan));
   2237 	switch (event) {
   2238 	case ASYNC_EVENT_MAX_OPENINGS:
   2239 		scsipi_async_event_max_openings(chan,
   2240 		    (struct scsipi_max_openings *)arg);
   2241 		break;
   2242 
   2243 	case ASYNC_EVENT_XFER_MODE:
   2244 		if (chan->chan_bustype->bustype_async_event_xfer_mode) {
   2245 			chan->chan_bustype->bustype_async_event_xfer_mode(
   2246 			    chan, arg);
   2247 		}
   2248 		break;
   2249 	case ASYNC_EVENT_RESET:
   2250 		scsipi_async_event_channel_reset(chan);
   2251 		break;
   2252 	}
   2253 	mutex_exit(chan_mtx(chan));
   2254 }
   2255 
   2256 /*
   2257  * scsipi_async_event_max_openings:
   2258  *
   2259  *	Update the maximum number of outstanding commands a
   2260  *	device may have.
   2261  */
   2262 static void
   2263 scsipi_async_event_max_openings(struct scsipi_channel *chan,
   2264     struct scsipi_max_openings *mo)
   2265 {
   2266 	struct scsipi_periph *periph;
   2267 	int minlun, maxlun;
   2268 
   2269 	if (mo->mo_lun == -1) {
   2270 		/*
   2271 		 * Wildcarded; apply it to all LUNs.
   2272 		 */
   2273 		minlun = 0;
   2274 		maxlun = chan->chan_nluns - 1;
   2275 	} else
   2276 		minlun = maxlun = mo->mo_lun;
   2277 
   2278 	/* XXX This could really suck with a large LUN space. */
   2279 	for (; minlun <= maxlun; minlun++) {
   2280 		periph = scsipi_lookup_periph_locked(chan, mo->mo_target, minlun);
   2281 		if (periph == NULL)
   2282 			continue;
   2283 
   2284 		if (mo->mo_openings < periph->periph_openings)
   2285 			periph->periph_openings = mo->mo_openings;
   2286 		else if (mo->mo_openings > periph->periph_openings &&
   2287 		    (periph->periph_flags & PERIPH_GROW_OPENINGS) != 0)
   2288 			periph->periph_openings = mo->mo_openings;
   2289 	}
   2290 }
   2291 
   2292 /*
   2293  * scsipi_set_xfer_mode:
   2294  *
   2295  *	Set the xfer mode for the specified I_T Nexus.
   2296  */
   2297 void
   2298 scsipi_set_xfer_mode(struct scsipi_channel *chan, int target, int immed)
   2299 {
   2300 	struct scsipi_xfer_mode xm;
   2301 	struct scsipi_periph *itperiph;
   2302 	int lun;
   2303 
   2304 	/*
   2305 	 * Go to the minimal xfer mode.
   2306 	 */
   2307 	xm.xm_target = target;
   2308 	xm.xm_mode = 0;
   2309 	xm.xm_period = 0;			/* ignored */
   2310 	xm.xm_offset = 0;			/* ignored */
   2311 
   2312 	/*
   2313 	 * Find the first LUN we know about on this I_T Nexus.
   2314 	 */
   2315 	for (itperiph = NULL, lun = 0; lun < chan->chan_nluns; lun++) {
   2316 		itperiph = scsipi_lookup_periph(chan, target, lun);
   2317 		if (itperiph != NULL)
   2318 			break;
   2319 	}
   2320 	if (itperiph != NULL) {
   2321 		xm.xm_mode = itperiph->periph_cap;
   2322 		/*
   2323 		 * Now issue the request to the adapter.
   2324 		 */
   2325 		scsipi_adapter_request(chan, ADAPTER_REQ_SET_XFER_MODE, &xm);
   2326 		/*
   2327 		 * If we want this to happen immediately, issue a dummy
   2328 		 * command, since most adapters can't really negotiate unless
   2329 		 * they're executing a job.
   2330 		 */
   2331 		if (immed != 0) {
   2332 			(void) scsipi_test_unit_ready(itperiph,
   2333 			    XS_CTL_DISCOVERY | XS_CTL_IGNORE_ILLEGAL_REQUEST |
   2334 			    XS_CTL_IGNORE_NOT_READY |
   2335 			    XS_CTL_IGNORE_MEDIA_CHANGE);
   2336 		}
   2337 	}
   2338 }
   2339 
   2340 /*
   2341  * scsipi_channel_reset:
   2342  *
   2343  *	handle scsi bus reset
   2344  * called with channel lock held
   2345  */
   2346 static void
   2347 scsipi_async_event_channel_reset(struct scsipi_channel *chan)
   2348 {
   2349 	struct scsipi_xfer *xs, *xs_next;
   2350 	struct scsipi_periph *periph;
   2351 	int target, lun;
   2352 
   2353 	/*
   2354 	 * Channel has been reset. Also mark as reset pending REQUEST_SENSE
   2355 	 * commands; as the sense is not available any more.
   2356 	 * can't call scsipi_done() from here, as the command has not been
   2357 	 * sent to the adapter yet (this would corrupt accounting).
   2358 	 */
   2359 
   2360 	for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL; xs = xs_next) {
   2361 		xs_next = TAILQ_NEXT(xs, channel_q);
   2362 		if (xs->xs_control & XS_CTL_REQSENSE) {
   2363 			TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
   2364 			xs->error = XS_RESET;
   2365 			if ((xs->xs_control & XS_CTL_ASYNC) != 0)
   2366 				TAILQ_INSERT_TAIL(&chan->chan_complete, xs,
   2367 				    channel_q);
   2368 		}
   2369 	}
   2370 	cv_broadcast(chan_cv_complete(chan));
   2371 	/* Catch xs with pending sense which may not have a REQSENSE xs yet */
   2372 	for (target = 0; target < chan->chan_ntargets; target++) {
   2373 		if (target == chan->chan_id)
   2374 			continue;
   2375 		for (lun = 0; lun <  chan->chan_nluns; lun++) {
   2376 			periph = scsipi_lookup_periph_locked(chan, target, lun);
   2377 			if (periph) {
   2378 				xs = periph->periph_xscheck;
   2379 				if (xs)
   2380 					xs->error = XS_RESET;
   2381 			}
   2382 		}
   2383 	}
   2384 }
   2385 
   2386 /*
   2387  * scsipi_target_detach:
   2388  *
   2389  *	detach all periph associated with a I_T
   2390  * 	must be called from valid thread context
   2391  */
   2392 int
   2393 scsipi_target_detach(struct scsipi_channel *chan, int target, int lun,
   2394     int flags)
   2395 {
   2396 	struct scsipi_periph *periph;
   2397 	int ctarget, mintarget, maxtarget;
   2398 	int clun, minlun, maxlun;
   2399 	int error;
   2400 
   2401 	if (target == -1) {
   2402 		mintarget = 0;
   2403 		maxtarget = chan->chan_ntargets;
   2404 	} else {
   2405 		if (target == chan->chan_id)
   2406 			return EINVAL;
   2407 		if (target < 0 || target >= chan->chan_ntargets)
   2408 			return EINVAL;
   2409 		mintarget = target;
   2410 		maxtarget = target + 1;
   2411 	}
   2412 
   2413 	if (lun == -1) {
   2414 		minlun = 0;
   2415 		maxlun = chan->chan_nluns;
   2416 	} else {
   2417 		if (lun < 0 || lun >= chan->chan_nluns)
   2418 			return EINVAL;
   2419 		minlun = lun;
   2420 		maxlun = lun + 1;
   2421 	}
   2422 
   2423 	for (ctarget = mintarget; ctarget < maxtarget; ctarget++) {
   2424 		if (ctarget == chan->chan_id)
   2425 			continue;
   2426 
   2427 		for (clun = minlun; clun < maxlun; clun++) {
   2428 			periph = scsipi_lookup_periph(chan, ctarget, clun);
   2429 			if (periph == NULL)
   2430 				continue;
   2431 			error = config_detach(periph->periph_dev, flags);
   2432 			if (error)
   2433 				return (error);
   2434 		}
   2435 	}
   2436 	return(0);
   2437 }
   2438 
   2439 /*
   2440  * scsipi_adapter_addref:
   2441  *
   2442  *	Add a reference to the adapter pointed to by the provided
   2443  *	link, enabling the adapter if necessary.
   2444  */
   2445 int
   2446 scsipi_adapter_addref(struct scsipi_adapter *adapt)
   2447 {
   2448 	int error = 0;
   2449 
   2450 	if (atomic_inc_uint_nv(&adapt->adapt_refcnt) == 1
   2451 	    && adapt->adapt_enable != NULL) {
   2452 		scsipi_adapter_lock(adapt);
   2453 		error = scsipi_adapter_enable(adapt, 1);
   2454 		scsipi_adapter_unlock(adapt);
   2455 		if (error)
   2456 			atomic_dec_uint(&adapt->adapt_refcnt);
   2457 	}
   2458 	return (error);
   2459 }
   2460 
   2461 /*
   2462  * scsipi_adapter_delref:
   2463  *
   2464  *	Delete a reference to the adapter pointed to by the provided
   2465  *	link, disabling the adapter if possible.
   2466  */
   2467 void
   2468 scsipi_adapter_delref(struct scsipi_adapter *adapt)
   2469 {
   2470 
   2471 	if (atomic_dec_uint_nv(&adapt->adapt_refcnt) == 0
   2472 	    && adapt->adapt_enable != NULL) {
   2473 		scsipi_adapter_lock(adapt);
   2474 		(void) scsipi_adapter_enable(adapt, 0);
   2475 		scsipi_adapter_unlock(adapt);
   2476 	}
   2477 }
   2478 
   2479 static struct scsipi_syncparam {
   2480 	int	ss_factor;
   2481 	int	ss_period;	/* ns * 100 */
   2482 } scsipi_syncparams[] = {
   2483 	{ 0x08,		 625 },	/* FAST-160 (Ultra320) */
   2484 	{ 0x09,		1250 },	/* FAST-80 (Ultra160) */
   2485 	{ 0x0a,		2500 },	/* FAST-40 40MHz (Ultra2) */
   2486 	{ 0x0b,		3030 },	/* FAST-40 33MHz (Ultra2) */
   2487 	{ 0x0c,		5000 },	/* FAST-20 (Ultra) */
   2488 };
   2489 static const int scsipi_nsyncparams =
   2490     sizeof(scsipi_syncparams) / sizeof(scsipi_syncparams[0]);
   2491 
   2492 int
   2493 scsipi_sync_period_to_factor(int period /* ns * 100 */)
   2494 {
   2495 	int i;
   2496 
   2497 	for (i = 0; i < scsipi_nsyncparams; i++) {
   2498 		if (period <= scsipi_syncparams[i].ss_period)
   2499 			return (scsipi_syncparams[i].ss_factor);
   2500 	}
   2501 
   2502 	return ((period / 100) / 4);
   2503 }
   2504 
   2505 int
   2506 scsipi_sync_factor_to_period(int factor)
   2507 {
   2508 	int i;
   2509 
   2510 	for (i = 0; i < scsipi_nsyncparams; i++) {
   2511 		if (factor == scsipi_syncparams[i].ss_factor)
   2512 			return (scsipi_syncparams[i].ss_period);
   2513 	}
   2514 
   2515 	return ((factor * 4) * 100);
   2516 }
   2517 
   2518 int
   2519 scsipi_sync_factor_to_freq(int factor)
   2520 {
   2521 	int i;
   2522 
   2523 	for (i = 0; i < scsipi_nsyncparams; i++) {
   2524 		if (factor == scsipi_syncparams[i].ss_factor)
   2525 			return (100000000 / scsipi_syncparams[i].ss_period);
   2526 	}
   2527 
   2528 	return (10000000 / ((factor * 4) * 10));
   2529 }
   2530 
   2531 static inline void
   2532 scsipi_adapter_lock(struct scsipi_adapter *adapt)
   2533 {
   2534 
   2535 	if ((adapt->adapt_flags & SCSIPI_ADAPT_MPSAFE) == 0)
   2536 		KERNEL_LOCK(1, NULL);
   2537 }
   2538 
   2539 static inline void
   2540 scsipi_adapter_unlock(struct scsipi_adapter *adapt)
   2541 {
   2542 
   2543 	if ((adapt->adapt_flags & SCSIPI_ADAPT_MPSAFE) == 0)
   2544 		KERNEL_UNLOCK_ONE(NULL);
   2545 }
   2546 
   2547 void
   2548 scsipi_adapter_minphys(struct scsipi_channel *chan, struct buf *bp)
   2549 {
   2550 	struct scsipi_adapter *adapt = chan->chan_adapter;
   2551 
   2552 	scsipi_adapter_lock(adapt);
   2553 	(adapt->adapt_minphys)(bp);
   2554 	scsipi_adapter_unlock(chan->chan_adapter);
   2555 }
   2556 
   2557 void
   2558 scsipi_adapter_request(struct scsipi_channel *chan,
   2559 	scsipi_adapter_req_t req, void *arg)
   2560 
   2561 {
   2562 	struct scsipi_adapter *adapt = chan->chan_adapter;
   2563 
   2564 	scsipi_adapter_lock(adapt);
   2565 	(adapt->adapt_request)(chan, req, arg);
   2566 	scsipi_adapter_unlock(adapt);
   2567 }
   2568 
   2569 int
   2570 scsipi_adapter_ioctl(struct scsipi_channel *chan, u_long cmd,
   2571 	void *data, int flag, struct proc *p)
   2572 {
   2573 	struct scsipi_adapter *adapt = chan->chan_adapter;
   2574 	int error;
   2575 
   2576 	if (adapt->adapt_ioctl == NULL)
   2577 		return ENOTTY;
   2578 
   2579 	scsipi_adapter_lock(adapt);
   2580 	error = (adapt->adapt_ioctl)(chan, cmd, data, flag, p);
   2581 	scsipi_adapter_unlock(adapt);
   2582 	return error;
   2583 }
   2584 
   2585 int
   2586 scsipi_adapter_enable(struct scsipi_adapter *adapt, int enable)
   2587 {
   2588 	int error;
   2589 
   2590 	scsipi_adapter_lock(adapt);
   2591 	error = (adapt->adapt_enable)(adapt->adapt_dev, enable);
   2592 	scsipi_adapter_unlock(adapt);
   2593 	return error;
   2594 }
   2595 
   2596 #ifdef SCSIPI_DEBUG
   2597 /*
   2598  * Given a scsipi_xfer, dump the request, in all its glory
   2599  */
   2600 void
   2601 show_scsipi_xs(struct scsipi_xfer *xs)
   2602 {
   2603 
   2604 	printf("xs(%p): ", xs);
   2605 	printf("xs_control(0x%08x)", xs->xs_control);
   2606 	printf("xs_status(0x%08x)", xs->xs_status);
   2607 	printf("periph(%p)", xs->xs_periph);
   2608 	printf("retr(0x%x)", xs->xs_retries);
   2609 	printf("timo(0x%x)", xs->timeout);
   2610 	printf("cmd(%p)", xs->cmd);
   2611 	printf("len(0x%x)", xs->cmdlen);
   2612 	printf("data(%p)", xs->data);
   2613 	printf("len(0x%x)", xs->datalen);
   2614 	printf("res(0x%x)", xs->resid);
   2615 	printf("err(0x%x)", xs->error);
   2616 	printf("bp(%p)", xs->bp);
   2617 	show_scsipi_cmd(xs);
   2618 }
   2619 
   2620 void
   2621 show_scsipi_cmd(struct scsipi_xfer *xs)
   2622 {
   2623 	u_char *b = (u_char *) xs->cmd;
   2624 	int i = 0;
   2625 
   2626 	scsipi_printaddr(xs->xs_periph);
   2627 	printf(" command: ");
   2628 
   2629 	if ((xs->xs_control & XS_CTL_RESET) == 0) {
   2630 		while (i < xs->cmdlen) {
   2631 			if (i)
   2632 				printf(",");
   2633 			printf("0x%x", b[i++]);
   2634 		}
   2635 		printf("-[%d bytes]\n", xs->datalen);
   2636 		if (xs->datalen)
   2637 			show_mem(xs->data, min(64, xs->datalen));
   2638 	} else
   2639 		printf("-RESET-\n");
   2640 }
   2641 
   2642 void
   2643 show_mem(u_char *address, int num)
   2644 {
   2645 	int x;
   2646 
   2647 	printf("------------------------------");
   2648 	for (x = 0; x < num; x++) {
   2649 		if ((x % 16) == 0)
   2650 			printf("\n%03d: ", x);
   2651 		printf("%02x ", *address++);
   2652 	}
   2653 	printf("\n------------------------------\n");
   2654 }
   2655 #endif /* SCSIPI_DEBUG */
   2656