Home | History | Annotate | Line # | Download | only in scsipi
scsipi_base.c revision 1.116
      1 /*	$NetBSD: scsipi_base.c,v 1.116 2004/09/18 00:08:16 mycroft Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1998, 1999, 2000, 2002, 2003, 2004 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Charles M. Hannum; by Jason R. Thorpe of the Numerical Aerospace
      9  * Simulation Facility, NASA Ames Research Center.
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  * 3. All advertising materials mentioning features or use of this software
     20  *    must display the following acknowledgement:
     21  *        This product includes software developed by the NetBSD
     22  *        Foundation, Inc. and its contributors.
     23  * 4. Neither the name of The NetBSD Foundation nor the names of its
     24  *    contributors may be used to endorse or promote products derived
     25  *    from this software without specific prior written permission.
     26  *
     27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     37  * POSSIBILITY OF SUCH DAMAGE.
     38  */
     39 
     40 #include <sys/cdefs.h>
     41 __KERNEL_RCSID(0, "$NetBSD: scsipi_base.c,v 1.116 2004/09/18 00:08:16 mycroft Exp $");
     42 
     43 #include "opt_scsi.h"
     44 
     45 #include <sys/param.h>
     46 #include <sys/systm.h>
     47 #include <sys/kernel.h>
     48 #include <sys/buf.h>
     49 #include <sys/uio.h>
     50 #include <sys/malloc.h>
     51 #include <sys/pool.h>
     52 #include <sys/errno.h>
     53 #include <sys/device.h>
     54 #include <sys/proc.h>
     55 #include <sys/kthread.h>
     56 #include <sys/hash.h>
     57 
     58 #include <uvm/uvm_extern.h>
     59 
     60 #include <dev/scsipi/scsipi_all.h>
     61 #include <dev/scsipi/scsipi_disk.h>
     62 #include <dev/scsipi/scsipiconf.h>
     63 #include <dev/scsipi/scsipi_base.h>
     64 
     65 #include <dev/scsipi/scsi_all.h>
     66 #include <dev/scsipi/scsi_message.h>
     67 
     68 static int	scsipi_complete(struct scsipi_xfer *);
     69 static void	scsipi_request_sense(struct scsipi_xfer *);
     70 static int	scsipi_enqueue(struct scsipi_xfer *);
     71 static void	scsipi_run_queue(struct scsipi_channel *chan);
     72 
     73 static void	scsipi_completion_thread(void *);
     74 
     75 static void	scsipi_get_tag(struct scsipi_xfer *);
     76 static void	scsipi_put_tag(struct scsipi_xfer *);
     77 
     78 static int	scsipi_get_resource(struct scsipi_channel *);
     79 static void	scsipi_put_resource(struct scsipi_channel *);
     80 
     81 static void	scsipi_async_event_max_openings(struct scsipi_channel *,
     82 		    struct scsipi_max_openings *);
     83 static void	scsipi_async_event_xfer_mode(struct scsipi_channel *,
     84 		    struct scsipi_xfer_mode *);
     85 static void	scsipi_async_event_channel_reset(struct scsipi_channel *);
     86 
     87 static struct pool scsipi_xfer_pool;
     88 
     89 /*
     90  * scsipi_init:
     91  *
     92  *	Called when a scsibus or atapibus is attached to the system
     93  *	to initialize shared data structures.
     94  */
     95 void
     96 scsipi_init(void)
     97 {
     98 	static int scsipi_init_done;
     99 
    100 	if (scsipi_init_done)
    101 		return;
    102 	scsipi_init_done = 1;
    103 
    104 	/* Initialize the scsipi_xfer pool. */
    105 	pool_init(&scsipi_xfer_pool, sizeof(struct scsipi_xfer), 0,
    106 	    0, 0, "scxspl", NULL);
    107 	if (pool_prime(&scsipi_xfer_pool,
    108 	    PAGE_SIZE / sizeof(struct scsipi_xfer)) == ENOMEM) {
    109 		printf("WARNING: not enough memory for scsipi_xfer_pool\n");
    110 	}
    111 }
    112 
    113 /*
    114  * scsipi_channel_init:
    115  *
    116  *	Initialize a scsipi_channel when it is attached.
    117  */
    118 int
    119 scsipi_channel_init(struct scsipi_channel *chan)
    120 {
    121 	int i;
    122 
    123 	/* Initialize shared data. */
    124 	scsipi_init();
    125 
    126 	/* Initialize the queues. */
    127 	TAILQ_INIT(&chan->chan_queue);
    128 	TAILQ_INIT(&chan->chan_complete);
    129 
    130 	for (i = 0; i < SCSIPI_CHAN_PERIPH_BUCKETS; i++)
    131 		LIST_INIT(&chan->chan_periphtab[i]);
    132 
    133 	/*
    134 	 * Create the asynchronous completion thread.
    135 	 */
    136 	kthread_create(scsipi_create_completion_thread, chan);
    137 	return (0);
    138 }
    139 
    140 /*
    141  * scsipi_channel_shutdown:
    142  *
    143  *	Shutdown a scsipi_channel.
    144  */
    145 void
    146 scsipi_channel_shutdown(struct scsipi_channel *chan)
    147 {
    148 
    149 	/*
    150 	 * Shut down the completion thread.
    151 	 */
    152 	chan->chan_tflags |= SCSIPI_CHANT_SHUTDOWN;
    153 	wakeup(&chan->chan_complete);
    154 
    155 	/*
    156 	 * Now wait for the thread to exit.
    157 	 */
    158 	while (chan->chan_thread != NULL)
    159 		(void) tsleep(&chan->chan_thread, PRIBIO, "scshut", 0);
    160 }
    161 
    162 static uint32_t
    163 scsipi_chan_periph_hash(uint64_t t, uint64_t l)
    164 {
    165 	uint32_t hash;
    166 
    167 	hash = hash32_buf(&t, sizeof(t), HASH32_BUF_INIT);
    168 	hash = hash32_buf(&l, sizeof(l), hash);
    169 
    170 	return (hash & SCSIPI_CHAN_PERIPH_HASHMASK);
    171 }
    172 
    173 /*
    174  * scsipi_insert_periph:
    175  *
    176  *	Insert a periph into the channel.
    177  */
    178 void
    179 scsipi_insert_periph(struct scsipi_channel *chan, struct scsipi_periph *periph)
    180 {
    181 	uint32_t hash;
    182 	int s;
    183 
    184 	hash = scsipi_chan_periph_hash(periph->periph_target,
    185 	    periph->periph_lun);
    186 
    187 	s = splbio();
    188 	LIST_INSERT_HEAD(&chan->chan_periphtab[hash], periph, periph_hash);
    189 	splx(s);
    190 }
    191 
    192 /*
    193  * scsipi_remove_periph:
    194  *
    195  *	Remove a periph from the channel.
    196  */
    197 void
    198 scsipi_remove_periph(struct scsipi_channel *chan, struct scsipi_periph *periph)
    199 {
    200 	int s;
    201 
    202 	s = splbio();
    203 	LIST_REMOVE(periph, periph_hash);
    204 	splx(s);
    205 }
    206 
    207 /*
    208  * scsipi_lookup_periph:
    209  *
    210  *	Lookup a periph on the specified channel.
    211  */
    212 struct scsipi_periph *
    213 scsipi_lookup_periph(struct scsipi_channel *chan, int target, int lun)
    214 {
    215 	struct scsipi_periph *periph;
    216 	uint32_t hash;
    217 	int s;
    218 
    219 	if (target >= chan->chan_ntargets ||
    220 	    lun >= chan->chan_nluns)
    221 		return (NULL);
    222 
    223 	hash = scsipi_chan_periph_hash(target, lun);
    224 
    225 	s = splbio();
    226 	LIST_FOREACH(periph, &chan->chan_periphtab[hash], periph_hash) {
    227 		if (periph->periph_target == target &&
    228 		    periph->periph_lun == lun)
    229 			break;
    230 	}
    231 	splx(s);
    232 
    233 	return (periph);
    234 }
    235 
    236 /*
    237  * scsipi_get_resource:
    238  *
    239  *	Allocate a single xfer `resource' from the channel.
    240  *
    241  *	NOTE: Must be called at splbio().
    242  */
    243 static int
    244 scsipi_get_resource(struct scsipi_channel *chan)
    245 {
    246 	struct scsipi_adapter *adapt = chan->chan_adapter;
    247 
    248 	if (chan->chan_flags & SCSIPI_CHAN_OPENINGS) {
    249 		if (chan->chan_openings > 0) {
    250 			chan->chan_openings--;
    251 			return (1);
    252 		}
    253 		return (0);
    254 	}
    255 
    256 	if (adapt->adapt_openings > 0) {
    257 		adapt->adapt_openings--;
    258 		return (1);
    259 	}
    260 	return (0);
    261 }
    262 
    263 /*
    264  * scsipi_grow_resources:
    265  *
    266  *	Attempt to grow resources for a channel.  If this succeeds,
    267  *	we allocate one for our caller.
    268  *
    269  *	NOTE: Must be called at splbio().
    270  */
    271 static __inline int
    272 scsipi_grow_resources(struct scsipi_channel *chan)
    273 {
    274 
    275 	if (chan->chan_flags & SCSIPI_CHAN_CANGROW) {
    276 		if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
    277 			scsipi_adapter_request(chan,
    278 			    ADAPTER_REQ_GROW_RESOURCES, NULL);
    279 			return (scsipi_get_resource(chan));
    280 		}
    281 		/*
    282 		 * ask the channel thread to do it. It'll have to thaw the
    283 		 * queue
    284 		 */
    285 		scsipi_channel_freeze(chan, 1);
    286 		chan->chan_tflags |= SCSIPI_CHANT_GROWRES;
    287 		wakeup(&chan->chan_complete);
    288 		return (0);
    289 	}
    290 
    291 	return (0);
    292 }
    293 
    294 /*
    295  * scsipi_put_resource:
    296  *
    297  *	Free a single xfer `resource' to the channel.
    298  *
    299  *	NOTE: Must be called at splbio().
    300  */
    301 static void
    302 scsipi_put_resource(struct scsipi_channel *chan)
    303 {
    304 	struct scsipi_adapter *adapt = chan->chan_adapter;
    305 
    306 	if (chan->chan_flags & SCSIPI_CHAN_OPENINGS)
    307 		chan->chan_openings++;
    308 	else
    309 		adapt->adapt_openings++;
    310 }
    311 
    312 /*
    313  * scsipi_get_tag:
    314  *
    315  *	Get a tag ID for the specified xfer.
    316  *
    317  *	NOTE: Must be called at splbio().
    318  */
    319 static void
    320 scsipi_get_tag(struct scsipi_xfer *xs)
    321 {
    322 	struct scsipi_periph *periph = xs->xs_periph;
    323 	int bit, tag;
    324 	u_int word;
    325 
    326 	bit = 0;	/* XXX gcc */
    327 	for (word = 0; word < PERIPH_NTAGWORDS; word++) {
    328 		bit = ffs(periph->periph_freetags[word]);
    329 		if (bit != 0)
    330 			break;
    331 	}
    332 #ifdef DIAGNOSTIC
    333 	if (word == PERIPH_NTAGWORDS) {
    334 		scsipi_printaddr(periph);
    335 		printf("no free tags\n");
    336 		panic("scsipi_get_tag");
    337 	}
    338 #endif
    339 
    340 	bit -= 1;
    341 	periph->periph_freetags[word] &= ~(1 << bit);
    342 	tag = (word << 5) | bit;
    343 
    344 	/* XXX Should eventually disallow this completely. */
    345 	if (tag >= periph->periph_openings) {
    346 		scsipi_printaddr(periph);
    347 		printf("WARNING: tag %d greater than available openings %d\n",
    348 		    tag, periph->periph_openings);
    349 	}
    350 
    351 	xs->xs_tag_id = tag;
    352 }
    353 
    354 /*
    355  * scsipi_put_tag:
    356  *
    357  *	Put the tag ID for the specified xfer back into the pool.
    358  *
    359  *	NOTE: Must be called at splbio().
    360  */
    361 static void
    362 scsipi_put_tag(struct scsipi_xfer *xs)
    363 {
    364 	struct scsipi_periph *periph = xs->xs_periph;
    365 	int word, bit;
    366 
    367 	word = xs->xs_tag_id >> 5;
    368 	bit = xs->xs_tag_id & 0x1f;
    369 
    370 	periph->periph_freetags[word] |= (1 << bit);
    371 }
    372 
    373 /*
    374  * scsipi_get_xs:
    375  *
    376  *	Allocate an xfer descriptor and associate it with the
    377  *	specified peripherial.  If the peripherial has no more
    378  *	available command openings, we either block waiting for
    379  *	one to become available, or fail.
    380  */
    381 struct scsipi_xfer *
    382 scsipi_get_xs(struct scsipi_periph *periph, int flags)
    383 {
    384 	struct scsipi_xfer *xs;
    385 	int s;
    386 
    387 	SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_get_xs\n"));
    388 
    389 	/*
    390 	 * If we're cold, make sure we poll.
    391 	 */
    392 	if (cold)
    393 		flags |= XS_CTL_NOSLEEP | XS_CTL_POLL;
    394 
    395 #ifdef DIAGNOSTIC
    396 	/*
    397 	 * URGENT commands can never be ASYNC.
    398 	 */
    399 	if ((flags & (XS_CTL_URGENT|XS_CTL_ASYNC)) ==
    400 	    (XS_CTL_URGENT|XS_CTL_ASYNC)) {
    401 		scsipi_printaddr(periph);
    402 		printf("URGENT and ASYNC\n");
    403 		panic("scsipi_get_xs");
    404 	}
    405 #endif
    406 
    407 	s = splbio();
    408 	/*
    409 	 * Wait for a command opening to become available.  Rules:
    410 	 *
    411 	 *	- All xfers must wait for an available opening.
    412 	 *	  Exception: URGENT xfers can proceed when
    413 	 *	  active == openings, because we use the opening
    414 	 *	  of the command we're recovering for.
    415 	 *	- if the periph has sense pending, only URGENT & REQSENSE
    416 	 *	  xfers may proceed.
    417 	 *
    418 	 *	- If the periph is recovering, only URGENT xfers may
    419 	 *	  proceed.
    420 	 *
    421 	 *	- If the periph is currently executing a recovery
    422 	 *	  command, URGENT commands must block, because only
    423 	 *	  one recovery command can execute at a time.
    424 	 */
    425 	for (;;) {
    426 		if (flags & XS_CTL_URGENT) {
    427 			if (periph->periph_active > periph->periph_openings)
    428 				goto wait_for_opening;
    429 			if (periph->periph_flags & PERIPH_SENSE) {
    430 				if ((flags & XS_CTL_REQSENSE) == 0)
    431 					goto wait_for_opening;
    432 			} else {
    433 				if ((periph->periph_flags &
    434 				    PERIPH_RECOVERY_ACTIVE) != 0)
    435 					goto wait_for_opening;
    436 				periph->periph_flags |= PERIPH_RECOVERY_ACTIVE;
    437 			}
    438 			break;
    439 		}
    440 		if (periph->periph_active >= periph->periph_openings ||
    441 		    (periph->periph_flags & PERIPH_RECOVERING) != 0)
    442 			goto wait_for_opening;
    443 		periph->periph_active++;
    444 		break;
    445 
    446  wait_for_opening:
    447 		if (flags & XS_CTL_NOSLEEP) {
    448 			splx(s);
    449 			return (NULL);
    450 		}
    451 		SC_DEBUG(periph, SCSIPI_DB3, ("sleeping\n"));
    452 		periph->periph_flags |= PERIPH_WAITING;
    453 		(void) tsleep(periph, PRIBIO, "getxs", 0);
    454 	}
    455 	SC_DEBUG(periph, SCSIPI_DB3, ("calling pool_get\n"));
    456 	xs = pool_get(&scsipi_xfer_pool,
    457 	    ((flags & XS_CTL_NOSLEEP) != 0 ? PR_NOWAIT : PR_WAITOK));
    458 	if (xs == NULL) {
    459 		if (flags & XS_CTL_URGENT) {
    460 			if ((flags & XS_CTL_REQSENSE) == 0)
    461 				periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
    462 		} else
    463 			periph->periph_active--;
    464 		scsipi_printaddr(periph);
    465 		printf("unable to allocate %sscsipi_xfer\n",
    466 		    (flags & XS_CTL_URGENT) ? "URGENT " : "");
    467 	}
    468 	splx(s);
    469 
    470 	SC_DEBUG(periph, SCSIPI_DB3, ("returning\n"));
    471 
    472 	if (xs != NULL) {
    473 		memset(xs, 0, sizeof(*xs));
    474 		callout_init(&xs->xs_callout);
    475 		xs->xs_periph = periph;
    476 		xs->xs_control = flags;
    477 		xs->xs_status = 0;
    478 		s = splbio();
    479 		TAILQ_INSERT_TAIL(&periph->periph_xferq, xs, device_q);
    480 		splx(s);
    481 	}
    482 	return (xs);
    483 }
    484 
    485 /*
    486  * scsipi_put_xs:
    487  *
    488  *	Release an xfer descriptor, decreasing the outstanding command
    489  *	count for the peripherial.  If there is a thread waiting for
    490  *	an opening, wake it up.  If not, kick any queued I/O the
    491  *	peripherial may have.
    492  *
    493  *	NOTE: Must be called at splbio().
    494  */
    495 void
    496 scsipi_put_xs(struct scsipi_xfer *xs)
    497 {
    498 	struct scsipi_periph *periph = xs->xs_periph;
    499 	int flags = xs->xs_control;
    500 
    501 	SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_free_xs\n"));
    502 
    503 	TAILQ_REMOVE(&periph->periph_xferq, xs, device_q);
    504 	pool_put(&scsipi_xfer_pool, xs);
    505 
    506 #ifdef DIAGNOSTIC
    507 	if ((periph->periph_flags & PERIPH_RECOVERY_ACTIVE) != 0 &&
    508 	    periph->periph_active == 0) {
    509 		scsipi_printaddr(periph);
    510 		printf("recovery without a command to recovery for\n");
    511 		panic("scsipi_put_xs");
    512 	}
    513 #endif
    514 
    515 	if (flags & XS_CTL_URGENT) {
    516 		if ((flags & XS_CTL_REQSENSE) == 0)
    517 			periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
    518 	} else
    519 		periph->periph_active--;
    520 	if (periph->periph_active == 0 &&
    521 	    (periph->periph_flags & PERIPH_WAITDRAIN) != 0) {
    522 		periph->periph_flags &= ~PERIPH_WAITDRAIN;
    523 		wakeup(&periph->periph_active);
    524 	}
    525 
    526 	if (periph->periph_flags & PERIPH_WAITING) {
    527 		periph->periph_flags &= ~PERIPH_WAITING;
    528 		wakeup(periph);
    529 	} else {
    530 		if (periph->periph_switch->psw_start != NULL &&
    531 		    (periph->periph_dev->dv_flags & DVF_ACTIVE)) {
    532 			SC_DEBUG(periph, SCSIPI_DB2,
    533 			    ("calling private start()\n"));
    534 			(*periph->periph_switch->psw_start)(periph);
    535 		}
    536 	}
    537 }
    538 
    539 /*
    540  * scsipi_channel_freeze:
    541  *
    542  *	Freeze a channel's xfer queue.
    543  */
    544 void
    545 scsipi_channel_freeze(struct scsipi_channel *chan, int count)
    546 {
    547 	int s;
    548 
    549 	s = splbio();
    550 	chan->chan_qfreeze += count;
    551 	splx(s);
    552 }
    553 
    554 /*
    555  * scsipi_channel_thaw:
    556  *
    557  *	Thaw a channel's xfer queue.
    558  */
    559 void
    560 scsipi_channel_thaw(struct scsipi_channel *chan, int count)
    561 {
    562 	int s;
    563 
    564 	s = splbio();
    565 	chan->chan_qfreeze -= count;
    566 	/*
    567 	 * Don't let the freeze count go negative.
    568 	 *
    569 	 * Presumably the adapter driver could keep track of this,
    570 	 * but it might just be easier to do this here so as to allow
    571 	 * multiple callers, including those outside the adapter driver.
    572 	 */
    573 	if (chan->chan_qfreeze < 0) {
    574 		chan->chan_qfreeze = 0;
    575 	}
    576 	splx(s);
    577 	/*
    578 	 * Kick the channel's queue here.  Note, we may be running in
    579 	 * interrupt context (softclock or HBA's interrupt), so the adapter
    580 	 * driver had better not sleep.
    581 	 */
    582 	if (chan->chan_qfreeze == 0)
    583 		scsipi_run_queue(chan);
    584 }
    585 
    586 /*
    587  * scsipi_channel_timed_thaw:
    588  *
    589  *	Thaw a channel after some time has expired. This will also
    590  * 	run the channel's queue if the freeze count has reached 0.
    591  */
    592 void
    593 scsipi_channel_timed_thaw(void *arg)
    594 {
    595 	struct scsipi_channel *chan = arg;
    596 
    597 	scsipi_channel_thaw(chan, 1);
    598 }
    599 
    600 /*
    601  * scsipi_periph_freeze:
    602  *
    603  *	Freeze a device's xfer queue.
    604  */
    605 void
    606 scsipi_periph_freeze(struct scsipi_periph *periph, int count)
    607 {
    608 	int s;
    609 
    610 	s = splbio();
    611 	periph->periph_qfreeze += count;
    612 	splx(s);
    613 }
    614 
    615 /*
    616  * scsipi_periph_thaw:
    617  *
    618  *	Thaw a device's xfer queue.
    619  */
    620 void
    621 scsipi_periph_thaw(struct scsipi_periph *periph, int count)
    622 {
    623 	int s;
    624 
    625 	s = splbio();
    626 	periph->periph_qfreeze -= count;
    627 #ifdef DIAGNOSTIC
    628 	if (periph->periph_qfreeze < 0) {
    629 		static const char pc[] = "periph freeze count < 0";
    630 		scsipi_printaddr(periph);
    631 		printf("%s\n", pc);
    632 		panic(pc);
    633 	}
    634 #endif
    635 	if (periph->periph_qfreeze == 0 &&
    636 	    (periph->periph_flags & PERIPH_WAITING) != 0)
    637 		wakeup(periph);
    638 	splx(s);
    639 }
    640 
    641 /*
    642  * scsipi_periph_timed_thaw:
    643  *
    644  *	Thaw a device after some time has expired.
    645  */
    646 void
    647 scsipi_periph_timed_thaw(void *arg)
    648 {
    649 	int s;
    650 	struct scsipi_periph *periph = arg;
    651 
    652 	callout_stop(&periph->periph_callout);
    653 
    654 	s = splbio();
    655 	scsipi_periph_thaw(periph, 1);
    656 	if ((periph->periph_channel->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
    657 		/*
    658 		 * Kick the channel's queue here.  Note, we're running in
    659 		 * interrupt context (softclock), so the adapter driver
    660 		 * had better not sleep.
    661 		 */
    662 		scsipi_run_queue(periph->periph_channel);
    663 	} else {
    664 		/*
    665 		 * Tell the completion thread to kick the channel's queue here.
    666 		 */
    667 		periph->periph_channel->chan_tflags |= SCSIPI_CHANT_KICK;
    668 		wakeup(&periph->periph_channel->chan_complete);
    669 	}
    670 	splx(s);
    671 }
    672 
    673 /*
    674  * scsipi_wait_drain:
    675  *
    676  *	Wait for a periph's pending xfers to drain.
    677  */
    678 void
    679 scsipi_wait_drain(struct scsipi_periph *periph)
    680 {
    681 	int s;
    682 
    683 	s = splbio();
    684 	while (periph->periph_active != 0) {
    685 		periph->periph_flags |= PERIPH_WAITDRAIN;
    686 		(void) tsleep(&periph->periph_active, PRIBIO, "sxdrn", 0);
    687 	}
    688 	splx(s);
    689 }
    690 
    691 /*
    692  * scsipi_kill_pending:
    693  *
    694  *	Kill off all pending xfers for a periph.
    695  *
    696  *	NOTE: Must be called at splbio().
    697  */
    698 void
    699 scsipi_kill_pending(struct scsipi_periph *periph)
    700 {
    701 
    702 	(*periph->periph_channel->chan_bustype->bustype_kill_pending)(periph);
    703 	scsipi_wait_drain(periph);
    704 }
    705 
    706 /*
    707  * scsipi_print_cdb:
    708  * prints a command descriptor block (for debug purpose, error messages,
    709  * SCSIPI_VERBOSE, ...)
    710  */
    711 void
    712 scsipi_print_cdb(struct scsipi_generic *cmd)
    713 {
    714 	int i, j;
    715 
    716  	printf("0x%02x", cmd->opcode);
    717 
    718  	switch (CDB_GROUPID(cmd->opcode)) {
    719  	case CDB_GROUPID_0:
    720  		j = CDB_GROUP0;
    721  		break;
    722  	case CDB_GROUPID_1:
    723  		j = CDB_GROUP1;
    724  		break;
    725  	case CDB_GROUPID_2:
    726  		j = CDB_GROUP2;
    727  		break;
    728  	case CDB_GROUPID_3:
    729  		j = CDB_GROUP3;
    730  		break;
    731  	case CDB_GROUPID_4:
    732  		j = CDB_GROUP4;
    733  		break;
    734  	case CDB_GROUPID_5:
    735  		j = CDB_GROUP5;
    736  		break;
    737  	case CDB_GROUPID_6:
    738  		j = CDB_GROUP6;
    739  		break;
    740  	case CDB_GROUPID_7:
    741  		j = CDB_GROUP7;
    742  		break;
    743  	default:
    744  		j = 0;
    745  	}
    746  	if (j == 0)
    747  		j = sizeof (cmd->bytes);
    748  	for (i = 0; i < j-1; i++) /* already done the opcode */
    749  		printf(" %02x", cmd->bytes[i]);
    750 }
    751 
    752 /*
    753  * scsipi_interpret_sense:
    754  *
    755  *	Look at the returned sense and act on the error, determining
    756  *	the unix error number to pass back.  (0 = report no error)
    757  *
    758  *	NOTE: If we return ERESTART, we are expected to haved
    759  *	thawed the device!
    760  *
    761  *	THIS IS THE DEFAULT ERROR HANDLER FOR SCSI DEVICES.
    762  */
    763 int
    764 scsipi_interpret_sense(struct scsipi_xfer *xs)
    765 {
    766 	struct scsipi_sense_data *sense;
    767 	struct scsipi_periph *periph = xs->xs_periph;
    768 	u_int8_t key;
    769 	int error;
    770 #ifndef	SCSIVERBOSE
    771 	u_int32_t info;
    772 	static char *error_mes[] = {
    773 		"soft error (corrected)",
    774 		"not ready", "medium error",
    775 		"non-media hardware failure", "illegal request",
    776 		"unit attention", "readonly device",
    777 		"no data found", "vendor unique",
    778 		"copy aborted", "command aborted",
    779 		"search returned equal", "volume overflow",
    780 		"verify miscompare", "unknown error key"
    781 	};
    782 #endif
    783 
    784 	sense = &xs->sense.scsi_sense;
    785 #ifdef SCSIPI_DEBUG
    786 	if (periph->periph_flags & SCSIPI_DB1) {
    787 		int count;
    788 		scsipi_printaddr(periph);
    789 		printf(" sense debug information:\n");
    790 		printf("\tcode 0x%x valid 0x%x\n",
    791 			sense->error_code & SSD_ERRCODE,
    792 			sense->error_code & SSD_ERRCODE_VALID ? 1 : 0);
    793 		printf("\tseg 0x%x key 0x%x ili 0x%x eom 0x%x fmark 0x%x\n",
    794 			sense->segment,
    795 			sense->flags & SSD_KEY,
    796 			sense->flags & SSD_ILI ? 1 : 0,
    797 			sense->flags & SSD_EOM ? 1 : 0,
    798 			sense->flags & SSD_FILEMARK ? 1 : 0);
    799 		printf("\ninfo: 0x%x 0x%x 0x%x 0x%x followed by %d "
    800 			"extra bytes\n",
    801 			sense->info[0],
    802 			sense->info[1],
    803 			sense->info[2],
    804 			sense->info[3],
    805 			sense->extra_len);
    806 		printf("\textra: ");
    807 		for (count = 0; count < ADD_BYTES_LIM(sense); count++)
    808 			printf("0x%x ", sense->cmd_spec_info[count]);
    809 		printf("\n");
    810 	}
    811 #endif
    812 
    813 	/*
    814 	 * If the periph has it's own error handler, call it first.
    815 	 * If it returns a legit error value, return that, otherwise
    816 	 * it wants us to continue with normal error processing.
    817 	 */
    818 	if (periph->periph_switch->psw_error != NULL) {
    819 		SC_DEBUG(periph, SCSIPI_DB2,
    820 		    ("calling private err_handler()\n"));
    821 		error = (*periph->periph_switch->psw_error)(xs);
    822 		if (error != EJUSTRETURN)
    823 			return (error);
    824 	}
    825 	/* otherwise use the default */
    826 	switch (sense->error_code & SSD_ERRCODE) {
    827 
    828 		/*
    829 		 * Old SCSI-1 and SASI devices respond with
    830 		 * codes other than 70.
    831 		 */
    832 	case 0x00:		/* no error (command completed OK) */
    833 		return (0);
    834 	case 0x04:		/* drive not ready after it was selected */
    835 		if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
    836 			periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
    837 		if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
    838 			return (0);
    839 		/* XXX - display some sort of error here? */
    840 		return (EIO);
    841 	case 0x20:		/* invalid command */
    842 		if ((xs->xs_control &
    843 		     XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
    844 			return (0);
    845 		return (EINVAL);
    846 	case 0x25:		/* invalid LUN (Adaptec ACB-4000) */
    847 		return (EACCES);
    848 
    849 		/*
    850 		 * If it's code 70, use the extended stuff and
    851 		 * interpret the key
    852 		 */
    853 	case 0x71:		/* delayed error */
    854 		scsipi_printaddr(periph);
    855 		key = sense->flags & SSD_KEY;
    856 		printf(" DEFERRED ERROR, key = 0x%x\n", key);
    857 		/* FALLTHROUGH */
    858 	case 0x70:
    859 #ifndef	SCSIVERBOSE
    860 		if ((sense->error_code & SSD_ERRCODE_VALID) != 0)
    861 			info = _4btol(sense->info);
    862 		else
    863 			info = 0;
    864 #endif
    865 		key = sense->flags & SSD_KEY;
    866 
    867 		switch (key) {
    868 		case SKEY_NO_SENSE:
    869 		case SKEY_RECOVERED_ERROR:
    870 			if (xs->resid == xs->datalen && xs->datalen) {
    871 				/*
    872 				 * Why is this here?
    873 				 */
    874 				xs->resid = 0;	/* not short read */
    875 			}
    876 		case SKEY_EQUAL:
    877 			error = 0;
    878 			break;
    879 		case SKEY_NOT_READY:
    880 			if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
    881 				periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
    882 			if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
    883 				return (0);
    884 			if (sense->add_sense_code == 0x3A) {
    885 				error = ENODEV; /* Medium not present */
    886 				if (xs->xs_control & XS_CTL_SILENT_NODEV)
    887 					return (error);
    888 			} else
    889 				error = EIO;
    890 			if ((xs->xs_control & XS_CTL_SILENT) != 0)
    891 				return (error);
    892 			break;
    893 		case SKEY_ILLEGAL_REQUEST:
    894 			if ((xs->xs_control &
    895 			     XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
    896 				return (0);
    897 			/*
    898 			 * Handle the case where a device reports
    899 			 * Logical Unit Not Supported during discovery.
    900 			 */
    901 			if ((xs->xs_control & XS_CTL_DISCOVERY) != 0 &&
    902 			    sense->add_sense_code == 0x25 &&
    903 			    sense->add_sense_code_qual == 0x00)
    904 				return (EINVAL);
    905 			if ((xs->xs_control & XS_CTL_SILENT) != 0)
    906 				return (EIO);
    907 			error = EINVAL;
    908 			break;
    909 		case SKEY_UNIT_ATTENTION:
    910 			if (sense->add_sense_code == 0x29 &&
    911 			    sense->add_sense_code_qual == 0x00) {
    912 				/* device or bus reset */
    913 				return (ERESTART);
    914 			}
    915 			if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
    916 				periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
    917 			if ((xs->xs_control &
    918 			     XS_CTL_IGNORE_MEDIA_CHANGE) != 0 ||
    919 				/* XXX Should reupload any transient state. */
    920 				(periph->periph_flags &
    921 				 PERIPH_REMOVABLE) == 0) {
    922 				return (ERESTART);
    923 			}
    924 			if ((xs->xs_control & XS_CTL_SILENT) != 0)
    925 				return (EIO);
    926 			error = EIO;
    927 			break;
    928 		case SKEY_WRITE_PROTECT:
    929 			error = EROFS;
    930 			break;
    931 		case SKEY_BLANK_CHECK:
    932 			error = 0;
    933 			break;
    934 		case SKEY_ABORTED_COMMAND:
    935 			if (xs->xs_retries != 0) {
    936 				xs->xs_retries--;
    937 				error = ERESTART;
    938 			} else
    939 				error = EIO;
    940 			break;
    941 		case SKEY_VOLUME_OVERFLOW:
    942 			error = ENOSPC;
    943 			break;
    944 		default:
    945 			error = EIO;
    946 			break;
    947 		}
    948 
    949 #ifdef SCSIVERBOSE
    950 		if (key && (xs->xs_control & XS_CTL_SILENT) == 0)
    951 			scsipi_print_sense(xs, 0);
    952 #else
    953 		if (key) {
    954 			scsipi_printaddr(periph);
    955 			printf("%s", error_mes[key - 1]);
    956 			if ((sense->error_code & SSD_ERRCODE_VALID) != 0) {
    957 				switch (key) {
    958 				case SKEY_NOT_READY:
    959 				case SKEY_ILLEGAL_REQUEST:
    960 				case SKEY_UNIT_ATTENTION:
    961 				case SKEY_WRITE_PROTECT:
    962 					break;
    963 				case SKEY_BLANK_CHECK:
    964 					printf(", requested size: %d (decimal)",
    965 					    info);
    966 					break;
    967 				case SKEY_ABORTED_COMMAND:
    968 					if (xs->xs_retries)
    969 						printf(", retrying");
    970 					printf(", cmd 0x%x, info 0x%x",
    971 					    xs->cmd->opcode, info);
    972 					break;
    973 				default:
    974 					printf(", info = %d (decimal)", info);
    975 				}
    976 			}
    977 			if (sense->extra_len != 0) {
    978 				int n;
    979 				printf(", data =");
    980 				for (n = 0; n < sense->extra_len; n++)
    981 					printf(" %02x",
    982 					    sense->cmd_spec_info[n]);
    983 			}
    984 			printf("\n");
    985 		}
    986 #endif
    987 		return (error);
    988 
    989 	/*
    990 	 * Some other code, just report it
    991 	 */
    992 	default:
    993 #if    defined(SCSIDEBUG) || defined(DEBUG)
    994 	{
    995 		static char *uc = "undecodable sense error";
    996 		int i;
    997 		u_int8_t *cptr = (u_int8_t *) sense;
    998 		scsipi_printaddr(periph);
    999 		if (xs->cmd == &xs->cmdstore) {
   1000 			printf("%s for opcode 0x%x, data=",
   1001 			    uc, xs->cmdstore.opcode);
   1002 		} else {
   1003 			printf("%s, data=", uc);
   1004 		}
   1005 		for (i = 0; i < sizeof (sense); i++)
   1006 			printf(" 0x%02x", *(cptr++) & 0xff);
   1007 		printf("\n");
   1008 	}
   1009 #else
   1010 		scsipi_printaddr(periph);
   1011 		printf("Sense Error Code 0x%x",
   1012 			sense->error_code & SSD_ERRCODE);
   1013 		if ((sense->error_code & SSD_ERRCODE_VALID) != 0) {
   1014 			struct scsipi_sense_data_unextended *usense =
   1015 			    (struct scsipi_sense_data_unextended *)sense;
   1016 			printf(" at block no. %d (decimal)",
   1017 			    _3btol(usense->block));
   1018 		}
   1019 		printf("\n");
   1020 #endif
   1021 		return (EIO);
   1022 	}
   1023 }
   1024 
   1025 /*
   1026  * scsipi_size:
   1027  *
   1028  *	Find out from the device what its capacity is.
   1029  */
   1030 u_int64_t
   1031 scsipi_size(struct scsipi_periph *periph, int flags)
   1032 {
   1033 	struct scsipi_read_capacity cmd;
   1034 	struct scsipi_read_cap_data data;
   1035 
   1036 	memset(&cmd, 0, sizeof(cmd));
   1037 	cmd.opcode = READ_CAPACITY;
   1038 
   1039 	/*
   1040 	 * If the command works, interpret the result as a 4 byte
   1041 	 * number of blocks
   1042 	 */
   1043 	if (scsipi_command(periph, (void *)&cmd, sizeof(cmd),
   1044 	    (void *)&data, sizeof(data), SCSIPIRETRIES, 20000, NULL,
   1045 	    flags | XS_CTL_DATA_IN | XS_CTL_DATA_ONSTACK | XS_CTL_SILENT) != 0)
   1046 		return (0);
   1047 
   1048 	return (_4btol(data.addr) + 1);
   1049 }
   1050 
   1051 /*
   1052  * scsipi_test_unit_ready:
   1053  *
   1054  *	Issue a `test unit ready' request.
   1055  */
   1056 int
   1057 scsipi_test_unit_ready(struct scsipi_periph *periph, int flags)
   1058 {
   1059 	int retries;
   1060 	struct scsipi_test_unit_ready cmd;
   1061 
   1062 	/* some ATAPI drives don't support TEST_UNIT_READY. Sigh */
   1063 	if (periph->periph_quirks & PQUIRK_NOTUR)
   1064 		return (0);
   1065 
   1066 	memset(&cmd, 0, sizeof(cmd));
   1067 	cmd.opcode = TEST_UNIT_READY;
   1068 
   1069 	if (flags & XS_CTL_DISCOVERY)
   1070 		retries = 0;
   1071 	else
   1072 		retries = SCSIPIRETRIES;
   1073 
   1074 	return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0,
   1075 	    retries, 10000, NULL, flags));
   1076 }
   1077 
   1078 /*
   1079  * scsipi_inquire:
   1080  *
   1081  *	Ask the device about itself.
   1082  */
   1083 int
   1084 scsipi_inquire(struct scsipi_periph *periph, struct scsipi_inquiry_data *inqbuf,
   1085     int flags)
   1086 {
   1087 	int retries;
   1088 	struct scsipi_inquiry cmd;
   1089 	int error;
   1090 
   1091 	memset(&cmd, 0, sizeof(cmd));
   1092 	cmd.opcode = INQUIRY;
   1093 
   1094 	if (flags & XS_CTL_DISCOVERY)
   1095 		retries = 0;
   1096 	else
   1097 		retries = SCSIPIRETRIES;
   1098 
   1099 	/*
   1100 	 * If we request more data than the device can provide, it SHOULD just
   1101 	 * return a short reponse.  However, some devices error with an
   1102 	 * ILLEGAL REQUEST sense code, and yet others have even more special
   1103 	 * failture modes (such as the GL641USB flash adapter, which goes loony
   1104 	 * and sends corrupted CRCs).  To work around this, and to bring our
   1105 	 * behavior more in line with other OSes, we do a shorter inquiry,
   1106 	 * covering all the SCSI-2 information, first, and then request more
   1107 	 * data iff the "additional length" field indicates there is more.
   1108 	 * - mycroft, 2003/10/16
   1109 	 */
   1110 	cmd.length = SCSIPI_INQUIRY_LENGTH_SCSI2;
   1111 	error = scsipi_command(periph, (void *)&cmd, sizeof(cmd),
   1112 	    (void *)inqbuf, SCSIPI_INQUIRY_LENGTH_SCSI2, retries,
   1113 	    10000, NULL, XS_CTL_DATA_IN | flags);
   1114 	if (!error && inqbuf->additional_length > SCSIPI_INQUIRY_LENGTH_SCSI2 - 4) {
   1115 		cmd.length = SCSIPI_INQUIRY_LENGTH_SCSI3;
   1116 		error = scsipi_command(periph, (void *)&cmd, sizeof(cmd),
   1117 		    (void *)inqbuf, SCSIPI_INQUIRY_LENGTH_SCSI3, retries,
   1118 		    10000, NULL, XS_CTL_DATA_IN | flags);
   1119 	}
   1120 
   1121 #ifdef SCSI_OLD_NOINQUIRY
   1122 	/*
   1123 	 * Kludge for the Adaptec ACB-4000 SCSI->MFM translator.
   1124 	 * This board doesn't support the INQUIRY command at all.
   1125 	 */
   1126 	if (error == EINVAL || error == EACCES) {
   1127 		/*
   1128 		 * Conjure up an INQUIRY response.
   1129 		 */
   1130 		inqbuf->device = (error == EINVAL ?
   1131 			 SID_QUAL_LU_PRESENT :
   1132 			 SID_QUAL_LU_NOTPRESENT) | T_DIRECT;
   1133 		inqbuf->dev_qual2 = 0;
   1134 		inqbuf->version = 0;
   1135 		inqbuf->response_format = SID_FORMAT_SCSI1;
   1136 		inqbuf->additional_length = SCSIPI_INQUIRY_LENGTH_SCSI2 - 4;
   1137 		inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0;
   1138 		memcpy(inqbuf->vendor, "ADAPTEC ACB-4000            ", 28);
   1139 		error = 0;
   1140 	}
   1141 
   1142 	/*
   1143 	 * Kludge for the Emulex MT-02 SCSI->QIC translator.
   1144 	 * This board gives an empty response to an INQUIRY command.
   1145 	 */
   1146 	else if (error == 0 &&
   1147 		 inqbuf->device == (SID_QUAL_LU_PRESENT | T_DIRECT) &&
   1148 		 inqbuf->dev_qual2 == 0 &&
   1149 		 inqbuf->version == 0 &&
   1150 		 inqbuf->response_format == SID_FORMAT_SCSI1) {
   1151 		/*
   1152 		 * Fill out the INQUIRY response.
   1153 		 */
   1154 		inqbuf->device = (SID_QUAL_LU_PRESENT | T_SEQUENTIAL);
   1155 		inqbuf->dev_qual2 = SID_REMOVABLE;
   1156 		inqbuf->additional_length = SCSIPI_INQUIRY_LENGTH_SCSI2 - 4;
   1157 		inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0;
   1158 		memcpy(inqbuf->vendor, "EMULEX  MT-02 QIC           ", 28);
   1159 	}
   1160 #endif /* SCSI_OLD_NOINQUIRY */
   1161 
   1162 	return error;
   1163 }
   1164 
   1165 /*
   1166  * scsipi_prevent:
   1167  *
   1168  *	Prevent or allow the user to remove the media
   1169  */
   1170 int
   1171 scsipi_prevent(struct scsipi_periph *periph, int type, int flags)
   1172 {
   1173 	struct scsipi_prevent cmd;
   1174 
   1175 	memset(&cmd, 0, sizeof(cmd));
   1176 	cmd.opcode = PREVENT_ALLOW;
   1177 	cmd.how = type;
   1178 
   1179 	return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0,
   1180 	    SCSIPIRETRIES, 5000, NULL, flags));
   1181 }
   1182 
   1183 /*
   1184  * scsipi_start:
   1185  *
   1186  *	Send a START UNIT.
   1187  */
   1188 int
   1189 scsipi_start(struct scsipi_periph *periph, int type, int flags)
   1190 {
   1191 	struct scsipi_start_stop cmd;
   1192 
   1193 	memset(&cmd, 0, sizeof(cmd));
   1194 	cmd.opcode = START_STOP;
   1195 	cmd.byte2 = 0x00;
   1196 	cmd.how = type;
   1197 
   1198 	return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0,
   1199 	    SCSIPIRETRIES, (type & SSS_START) ? 60000 : 10000, NULL, flags));
   1200 }
   1201 
   1202 /*
   1203  * scsipi_mode_sense, scsipi_mode_sense_big:
   1204  *	get a sense page from a device
   1205  */
   1206 
   1207 int
   1208 scsipi_mode_sense(struct scsipi_periph *periph, int byte2, int page,
   1209     struct scsipi_mode_header *data, int len, int flags, int retries,
   1210     int timeout)
   1211 {
   1212 	struct scsipi_mode_sense cmd;
   1213 	int error;
   1214 
   1215 	memset(&cmd, 0, sizeof(cmd));
   1216 	cmd.opcode = MODE_SENSE;
   1217 	cmd.byte2 = byte2;
   1218 	cmd.page = page;
   1219 	cmd.length = len & 0xff;
   1220 
   1221 	error = scsipi_command(periph, (void *)&cmd, sizeof(cmd),
   1222 	    (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_IN);
   1223 	SC_DEBUG(periph, SCSIPI_DB2,
   1224 	    ("scsipi_mode_sense: error=%d\n", error));
   1225 	return (error);
   1226 }
   1227 
   1228 int
   1229 scsipi_mode_sense_big(struct scsipi_periph *periph, int byte2, int page,
   1230     struct scsipi_mode_header_big *data, int len, int flags, int retries,
   1231     int timeout)
   1232 {
   1233 	struct scsipi_mode_sense_big cmd;
   1234 	int error;
   1235 
   1236 	memset(&cmd, 0, sizeof(cmd));
   1237 	cmd.opcode = MODE_SENSE_BIG;
   1238 	cmd.byte2 = byte2;
   1239 	cmd.page = page;
   1240 	_lto2b(len, cmd.length);
   1241 
   1242 	error = scsipi_command(periph, (void *)&cmd, sizeof(cmd),
   1243 	    (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_IN);
   1244 	SC_DEBUG(periph, SCSIPI_DB2,
   1245 	    ("scsipi_mode_sense_big: error=%d\n", error));
   1246 	return (error);
   1247 }
   1248 
   1249 int
   1250 scsipi_mode_select(struct scsipi_periph *periph, int byte2,
   1251     struct scsipi_mode_header *data, int len, int flags, int retries,
   1252     int timeout)
   1253 {
   1254 	struct scsipi_mode_select cmd;
   1255 	int error;
   1256 
   1257 	memset(&cmd, 0, sizeof(cmd));
   1258 	cmd.opcode = MODE_SELECT;
   1259 	cmd.byte2 = byte2;
   1260 	cmd.length = len & 0xff;
   1261 
   1262 	error = scsipi_command(periph, (void *)&cmd, sizeof(cmd),
   1263 	    (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_OUT);
   1264 	SC_DEBUG(periph, SCSIPI_DB2,
   1265 	    ("scsipi_mode_select: error=%d\n", error));
   1266 	return (error);
   1267 }
   1268 
   1269 int
   1270 scsipi_mode_select_big(struct scsipi_periph *periph, int byte2,
   1271     struct scsipi_mode_header_big *data, int len, int flags, int retries,
   1272     int timeout)
   1273 {
   1274 	struct scsipi_mode_select_big cmd;
   1275 	int error;
   1276 
   1277 	memset(&cmd, 0, sizeof(cmd));
   1278 	cmd.opcode = MODE_SELECT_BIG;
   1279 	cmd.byte2 = byte2;
   1280 	_lto2b(len, cmd.length);
   1281 
   1282 	error = scsipi_command(periph, (void *)&cmd, sizeof(cmd),
   1283 	    (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_OUT);
   1284 	SC_DEBUG(periph, SCSIPI_DB2,
   1285 	    ("scsipi_mode_select_big: error=%d\n", error));
   1286 	return (error);
   1287 }
   1288 
   1289 /*
   1290  * scsipi_done:
   1291  *
   1292  *	This routine is called by an adapter's interrupt handler when
   1293  *	an xfer is completed.
   1294  */
   1295 void
   1296 scsipi_done(struct scsipi_xfer *xs)
   1297 {
   1298 	struct scsipi_periph *periph = xs->xs_periph;
   1299 	struct scsipi_channel *chan = periph->periph_channel;
   1300 	int s, freezecnt;
   1301 
   1302 	SC_DEBUG(periph, SCSIPI_DB2, ("scsipi_done\n"));
   1303 #ifdef SCSIPI_DEBUG
   1304 	if (periph->periph_dbflags & SCSIPI_DB1)
   1305 		show_scsipi_cmd(xs);
   1306 #endif
   1307 
   1308 	s = splbio();
   1309 	/*
   1310 	 * The resource this command was using is now free.
   1311 	 */
   1312 	scsipi_put_resource(chan);
   1313 	xs->xs_periph->periph_sent--;
   1314 
   1315 	/*
   1316 	 * If the command was tagged, free the tag.
   1317 	 */
   1318 	if (XS_CTL_TAGTYPE(xs) != 0)
   1319 		scsipi_put_tag(xs);
   1320 	else
   1321 		periph->periph_flags &= ~PERIPH_UNTAG;
   1322 
   1323 	/* Mark the command as `done'. */
   1324 	xs->xs_status |= XS_STS_DONE;
   1325 
   1326 #ifdef DIAGNOSTIC
   1327 	if ((xs->xs_control & (XS_CTL_ASYNC|XS_CTL_POLL)) ==
   1328 	    (XS_CTL_ASYNC|XS_CTL_POLL))
   1329 		panic("scsipi_done: ASYNC and POLL");
   1330 #endif
   1331 
   1332 	/*
   1333 	 * If the xfer had an error of any sort, freeze the
   1334 	 * periph's queue.  Freeze it again if we were requested
   1335 	 * to do so in the xfer.
   1336 	 */
   1337 	freezecnt = 0;
   1338 	if (xs->error != XS_NOERROR)
   1339 		freezecnt++;
   1340 	if (xs->xs_control & XS_CTL_FREEZE_PERIPH)
   1341 		freezecnt++;
   1342 	if (freezecnt != 0)
   1343 		scsipi_periph_freeze(periph, freezecnt);
   1344 
   1345 	/*
   1346 	 * record the xfer with a pending sense, in case a SCSI reset is
   1347 	 * received before the thread is waked up.
   1348 	 */
   1349 	if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
   1350 		periph->periph_flags |= PERIPH_SENSE;
   1351 		periph->periph_xscheck = xs;
   1352 	}
   1353 
   1354 	/*
   1355 	 * If this was an xfer that was not to complete asynchronously,
   1356 	 * let the requesting thread perform error checking/handling
   1357 	 * in its context.
   1358 	 */
   1359 	if ((xs->xs_control & XS_CTL_ASYNC) == 0) {
   1360 		splx(s);
   1361 		/*
   1362 		 * If it's a polling job, just return, to unwind the
   1363 		 * call graph.  We don't need to restart the queue,
   1364 		 * because pollings jobs are treated specially, and
   1365 		 * are really only used during crash dumps anyway
   1366 		 * (XXX or during boot-time autconfiguration of
   1367 		 * ATAPI devices).
   1368 		 */
   1369 		if (xs->xs_control & XS_CTL_POLL)
   1370 			return;
   1371 		wakeup(xs);
   1372 		goto out;
   1373 	}
   1374 
   1375 	/*
   1376 	 * Catch the extremely common case of I/O completing
   1377 	 * without error; no use in taking a context switch
   1378 	 * if we can handle it in interrupt context.
   1379 	 */
   1380 	if (xs->error == XS_NOERROR) {
   1381 		splx(s);
   1382 		(void) scsipi_complete(xs);
   1383 		goto out;
   1384 	}
   1385 
   1386 	/*
   1387 	 * There is an error on this xfer.  Put it on the channel's
   1388 	 * completion queue, and wake up the completion thread.
   1389 	 */
   1390 	TAILQ_INSERT_TAIL(&chan->chan_complete, xs, channel_q);
   1391 	splx(s);
   1392 	wakeup(&chan->chan_complete);
   1393 
   1394  out:
   1395 	/*
   1396 	 * If there are more xfers on the channel's queue, attempt to
   1397 	 * run them.
   1398 	 */
   1399 	scsipi_run_queue(chan);
   1400 }
   1401 
   1402 /*
   1403  * scsipi_complete:
   1404  *
   1405  *	Completion of a scsipi_xfer.  This is the guts of scsipi_done().
   1406  *
   1407  *	NOTE: This routine MUST be called with valid thread context
   1408  *	except for the case where the following two conditions are
   1409  *	true:
   1410  *
   1411  *		xs->error == XS_NOERROR
   1412  *		XS_CTL_ASYNC is set in xs->xs_control
   1413  *
   1414  *	The semantics of this routine can be tricky, so here is an
   1415  *	explanation:
   1416  *
   1417  *		0		Xfer completed successfully.
   1418  *
   1419  *		ERESTART	Xfer had an error, but was restarted.
   1420  *
   1421  *		anything else	Xfer had an error, return value is Unix
   1422  *				errno.
   1423  *
   1424  *	If the return value is anything but ERESTART:
   1425  *
   1426  *		- If XS_CTL_ASYNC is set, `xs' has been freed back to
   1427  *		  the pool.
   1428  *		- If there is a buf associated with the xfer,
   1429  *		  it has been biodone()'d.
   1430  */
   1431 static int
   1432 scsipi_complete(struct scsipi_xfer *xs)
   1433 {
   1434 	struct scsipi_periph *periph = xs->xs_periph;
   1435 	struct scsipi_channel *chan = periph->periph_channel;
   1436 	int error, s;
   1437 
   1438 #ifdef DIAGNOSTIC
   1439 	if ((xs->xs_control & XS_CTL_ASYNC) != 0 && xs->bp == NULL)
   1440 		panic("scsipi_complete: XS_CTL_ASYNC but no buf");
   1441 #endif
   1442 	/*
   1443 	 * If command terminated with a CHECK CONDITION, we need to issue a
   1444 	 * REQUEST_SENSE command. Once the REQUEST_SENSE has been processed
   1445 	 * we'll have the real status.
   1446 	 * Must be processed at splbio() to avoid missing a SCSI bus reset
   1447 	 * for this command.
   1448 	 */
   1449 	s = splbio();
   1450 	if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
   1451 		/* request sense for a request sense ? */
   1452 		if (xs->xs_control & XS_CTL_REQSENSE) {
   1453 			scsipi_printaddr(periph);
   1454 			printf("request sense for a request sense ?\n");
   1455 			/* XXX maybe we should reset the device ? */
   1456 			/* we've been frozen because xs->error != XS_NOERROR */
   1457 			scsipi_periph_thaw(periph, 1);
   1458 			splx(s);
   1459 			if (xs->resid < xs->datalen) {
   1460 				printf("we read %d bytes of sense anyway:\n",
   1461 				    xs->datalen - xs->resid);
   1462 #ifdef SCSIVERBOSE
   1463 				scsipi_print_sense_data((void *)xs->data, 0);
   1464 #endif
   1465 			}
   1466 			return EINVAL;
   1467 		}
   1468 		scsipi_request_sense(xs);
   1469 	}
   1470 	splx(s);
   1471 
   1472 	/*
   1473 	 * If it's a user level request, bypass all usual completion
   1474 	 * processing, let the user work it out..
   1475 	 */
   1476 	if ((xs->xs_control & XS_CTL_USERCMD) != 0) {
   1477 		SC_DEBUG(periph, SCSIPI_DB3, ("calling user done()\n"));
   1478 		if (xs->error != XS_NOERROR)
   1479 			scsipi_periph_thaw(periph, 1);
   1480 		scsipi_user_done(xs);
   1481 		SC_DEBUG(periph, SCSIPI_DB3, ("returned from user done()\n "));
   1482 		return 0;
   1483 	}
   1484 
   1485 	switch (xs->error) {
   1486 	case XS_NOERROR:
   1487 		error = 0;
   1488 		break;
   1489 
   1490 	case XS_SENSE:
   1491 	case XS_SHORTSENSE:
   1492 		error = (*chan->chan_bustype->bustype_interpret_sense)(xs);
   1493 		break;
   1494 
   1495 	case XS_RESOURCE_SHORTAGE:
   1496 		/*
   1497 		 * XXX Should freeze channel's queue.
   1498 		 */
   1499 		scsipi_printaddr(periph);
   1500 		printf("adapter resource shortage\n");
   1501 		/* FALLTHROUGH */
   1502 
   1503 	case XS_BUSY:
   1504 		if (xs->error == XS_BUSY && xs->status == SCSI_QUEUE_FULL) {
   1505 			struct scsipi_max_openings mo;
   1506 
   1507 			/*
   1508 			 * We set the openings to active - 1, assuming that
   1509 			 * the command that got us here is the first one that
   1510 			 * can't fit into the device's queue.  If that's not
   1511 			 * the case, I guess we'll find out soon enough.
   1512 			 */
   1513 			mo.mo_target = periph->periph_target;
   1514 			mo.mo_lun = periph->periph_lun;
   1515 			if (periph->periph_active < periph->periph_openings)
   1516 				mo.mo_openings = periph->periph_active - 1;
   1517 			else
   1518 				mo.mo_openings = periph->periph_openings - 1;
   1519 #ifdef DIAGNOSTIC
   1520 			if (mo.mo_openings < 0) {
   1521 				scsipi_printaddr(periph);
   1522 				printf("QUEUE FULL resulted in < 0 openings\n");
   1523 				panic("scsipi_done");
   1524 			}
   1525 #endif
   1526 			if (mo.mo_openings == 0) {
   1527 				scsipi_printaddr(periph);
   1528 				printf("QUEUE FULL resulted in 0 openings\n");
   1529 				mo.mo_openings = 1;
   1530 			}
   1531 			scsipi_async_event(chan, ASYNC_EVENT_MAX_OPENINGS, &mo);
   1532 			error = ERESTART;
   1533 		} else if (xs->xs_retries != 0) {
   1534 			xs->xs_retries--;
   1535 			/*
   1536 			 * Wait one second, and try again.
   1537 			 */
   1538 			if ((xs->xs_control & XS_CTL_POLL) ||
   1539 			    (chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
   1540 				delay(1000000);
   1541 			} else if (!callout_pending(&periph->periph_callout)) {
   1542 				scsipi_periph_freeze(periph, 1);
   1543 				callout_reset(&periph->periph_callout,
   1544 				    hz, scsipi_periph_timed_thaw, periph);
   1545 			}
   1546 			error = ERESTART;
   1547 		} else
   1548 			error = EBUSY;
   1549 		break;
   1550 
   1551 	case XS_REQUEUE:
   1552 		error = ERESTART;
   1553 		break;
   1554 
   1555 	case XS_SELTIMEOUT:
   1556 	case XS_TIMEOUT:
   1557 		/*
   1558 		 * If the device hasn't gone away, honor retry counts.
   1559 		 *
   1560 		 * Note that if we're in the middle of probing it,
   1561 		 * it won't be found because it isn't here yet so
   1562 		 * we won't honor the retry count in that case.
   1563 		 */
   1564 		if (scsipi_lookup_periph(chan, periph->periph_target,
   1565 		    periph->periph_lun) && xs->xs_retries != 0) {
   1566 			xs->xs_retries--;
   1567 			error = ERESTART;
   1568 		} else
   1569 			error = EIO;
   1570 		break;
   1571 
   1572 	case XS_RESET:
   1573 		if (xs->xs_control & XS_CTL_REQSENSE) {
   1574 			/*
   1575 			 * request sense interrupted by reset: signal it
   1576 			 * with EINTR return code.
   1577 			 */
   1578 			error = EINTR;
   1579 		} else {
   1580 			if (xs->xs_retries != 0) {
   1581 				xs->xs_retries--;
   1582 				error = ERESTART;
   1583 			} else
   1584 				error = EIO;
   1585 		}
   1586 		break;
   1587 
   1588 	case XS_DRIVER_STUFFUP:
   1589 		scsipi_printaddr(periph);
   1590 		printf("generic HBA error\n");
   1591 		error = EIO;
   1592 		break;
   1593 	default:
   1594 		scsipi_printaddr(periph);
   1595 		printf("invalid return code from adapter: %d\n", xs->error);
   1596 		error = EIO;
   1597 		break;
   1598 	}
   1599 
   1600 	s = splbio();
   1601 	if (error == ERESTART) {
   1602 		/*
   1603 		 * If we get here, the periph has been thawed and frozen
   1604 		 * again if we had to issue recovery commands.  Alternatively,
   1605 		 * it may have been frozen again and in a timed thaw.  In
   1606 		 * any case, we thaw the periph once we re-enqueue the
   1607 		 * command.  Once the periph is fully thawed, it will begin
   1608 		 * operation again.
   1609 		 */
   1610 		xs->error = XS_NOERROR;
   1611 		xs->status = SCSI_OK;
   1612 		xs->xs_status &= ~XS_STS_DONE;
   1613 		xs->xs_requeuecnt++;
   1614 		error = scsipi_enqueue(xs);
   1615 		if (error == 0) {
   1616 			scsipi_periph_thaw(periph, 1);
   1617 			splx(s);
   1618 			return (ERESTART);
   1619 		}
   1620 	}
   1621 
   1622 	/*
   1623 	 * scsipi_done() freezes the queue if not XS_NOERROR.
   1624 	 * Thaw it here.
   1625 	 */
   1626 	if (xs->error != XS_NOERROR)
   1627 		scsipi_periph_thaw(periph, 1);
   1628 
   1629 	if (periph->periph_switch->psw_done)
   1630 		periph->periph_switch->psw_done(xs, error);
   1631 
   1632 	if (xs->xs_control & XS_CTL_ASYNC)
   1633 		scsipi_put_xs(xs);
   1634 	splx(s);
   1635 
   1636 	return (error);
   1637 }
   1638 
   1639 /*
   1640  * Issue a request sense for the given scsipi_xfer. Called when the xfer
   1641  * returns with a CHECK_CONDITION status. Must be called in valid thread
   1642  * context and at splbio().
   1643  */
   1644 
   1645 static void
   1646 scsipi_request_sense(struct scsipi_xfer *xs)
   1647 {
   1648 	struct scsipi_periph *periph = xs->xs_periph;
   1649 	int flags, error;
   1650 	struct scsipi_sense cmd;
   1651 
   1652 	periph->periph_flags |= PERIPH_SENSE;
   1653 
   1654 	/* if command was polling, request sense will too */
   1655 	flags = xs->xs_control & XS_CTL_POLL;
   1656 	/* Polling commands can't sleep */
   1657 	if (flags)
   1658 		flags |= XS_CTL_NOSLEEP;
   1659 
   1660 	flags |= XS_CTL_REQSENSE | XS_CTL_URGENT | XS_CTL_DATA_IN |
   1661 	    XS_CTL_THAW_PERIPH | XS_CTL_FREEZE_PERIPH;
   1662 
   1663 	memset(&cmd, 0, sizeof(cmd));
   1664 	cmd.opcode = REQUEST_SENSE;
   1665 	cmd.length = sizeof(struct scsipi_sense_data);
   1666 
   1667 	error = scsipi_command(periph, (void *)&cmd, sizeof(cmd),
   1668 	    (void *)&xs->sense.scsi_sense, sizeof(struct scsipi_sense_data),
   1669 	    0, 1000, NULL, flags);
   1670 	periph->periph_flags &= ~PERIPH_SENSE;
   1671 	periph->periph_xscheck = NULL;
   1672 	switch(error) {
   1673 	case 0:
   1674 		/* we have a valid sense */
   1675 		xs->error = XS_SENSE;
   1676 		return;
   1677 	case EINTR:
   1678 		/* REQUEST_SENSE interrupted by bus reset. */
   1679 		xs->error = XS_RESET;
   1680 		return;
   1681 	case EIO:
   1682 		 /* request sense coudn't be performed */
   1683 		/*
   1684 		 * XXX this isn't quite right but we don't have anything
   1685 		 * better for now
   1686 		 */
   1687 		xs->error = XS_DRIVER_STUFFUP;
   1688 		return;
   1689 	default:
   1690 		 /* Notify that request sense failed. */
   1691 		xs->error = XS_DRIVER_STUFFUP;
   1692 		scsipi_printaddr(periph);
   1693 		printf("request sense failed with error %d\n", error);
   1694 		return;
   1695 	}
   1696 }
   1697 
   1698 /*
   1699  * scsipi_enqueue:
   1700  *
   1701  *	Enqueue an xfer on a channel.
   1702  */
   1703 static int
   1704 scsipi_enqueue(struct scsipi_xfer *xs)
   1705 {
   1706 	struct scsipi_channel *chan = xs->xs_periph->periph_channel;
   1707 	struct scsipi_xfer *qxs;
   1708 	int s;
   1709 
   1710 	s = splbio();
   1711 
   1712 	/*
   1713 	 * If the xfer is to be polled, and there are already jobs on
   1714 	 * the queue, we can't proceed.
   1715 	 */
   1716 	if ((xs->xs_control & XS_CTL_POLL) != 0 &&
   1717 	    TAILQ_FIRST(&chan->chan_queue) != NULL) {
   1718 		splx(s);
   1719 		xs->error = XS_DRIVER_STUFFUP;
   1720 		return (EAGAIN);
   1721 	}
   1722 
   1723 	/*
   1724 	 * If we have an URGENT xfer, it's an error recovery command
   1725 	 * and it should just go on the head of the channel's queue.
   1726 	 */
   1727 	if (xs->xs_control & XS_CTL_URGENT) {
   1728 		TAILQ_INSERT_HEAD(&chan->chan_queue, xs, channel_q);
   1729 		goto out;
   1730 	}
   1731 
   1732 	/*
   1733 	 * If this xfer has already been on the queue before, we
   1734 	 * need to reinsert it in the correct order.  That order is:
   1735 	 *
   1736 	 *	Immediately before the first xfer for this periph
   1737 	 *	with a requeuecnt less than xs->xs_requeuecnt.
   1738 	 *
   1739 	 * Failing that, at the end of the queue.  (We'll end up
   1740 	 * there naturally.)
   1741 	 */
   1742 	if (xs->xs_requeuecnt != 0) {
   1743 		for (qxs = TAILQ_FIRST(&chan->chan_queue); qxs != NULL;
   1744 		     qxs = TAILQ_NEXT(qxs, channel_q)) {
   1745 			if (qxs->xs_periph == xs->xs_periph &&
   1746 			    qxs->xs_requeuecnt < xs->xs_requeuecnt)
   1747 				break;
   1748 		}
   1749 		if (qxs != NULL) {
   1750 			TAILQ_INSERT_AFTER(&chan->chan_queue, qxs, xs,
   1751 			    channel_q);
   1752 			goto out;
   1753 		}
   1754 	}
   1755 	TAILQ_INSERT_TAIL(&chan->chan_queue, xs, channel_q);
   1756  out:
   1757 	if (xs->xs_control & XS_CTL_THAW_PERIPH)
   1758 		scsipi_periph_thaw(xs->xs_periph, 1);
   1759 	splx(s);
   1760 	return (0);
   1761 }
   1762 
   1763 /*
   1764  * scsipi_run_queue:
   1765  *
   1766  *	Start as many xfers as possible running on the channel.
   1767  */
   1768 static void
   1769 scsipi_run_queue(struct scsipi_channel *chan)
   1770 {
   1771 	struct scsipi_xfer *xs;
   1772 	struct scsipi_periph *periph;
   1773 	int s;
   1774 
   1775 	for (;;) {
   1776 		s = splbio();
   1777 
   1778 		/*
   1779 		 * If the channel is frozen, we can't do any work right
   1780 		 * now.
   1781 		 */
   1782 		if (chan->chan_qfreeze != 0) {
   1783 			splx(s);
   1784 			return;
   1785 		}
   1786 
   1787 		/*
   1788 		 * Look for work to do, and make sure we can do it.
   1789 		 */
   1790 		for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL;
   1791 		     xs = TAILQ_NEXT(xs, channel_q)) {
   1792 			periph = xs->xs_periph;
   1793 
   1794 			if ((periph->periph_sent >= periph->periph_openings) ||
   1795 			    periph->periph_qfreeze != 0 ||
   1796 			    (periph->periph_flags & PERIPH_UNTAG) != 0)
   1797 				continue;
   1798 
   1799 			if ((periph->periph_flags &
   1800 			    (PERIPH_RECOVERING | PERIPH_SENSE)) != 0 &&
   1801 			    (xs->xs_control & XS_CTL_URGENT) == 0)
   1802 				continue;
   1803 
   1804 			/*
   1805 			 * We can issue this xfer!
   1806 			 */
   1807 			goto got_one;
   1808 		}
   1809 
   1810 		/*
   1811 		 * Can't find any work to do right now.
   1812 		 */
   1813 		splx(s);
   1814 		return;
   1815 
   1816  got_one:
   1817 		/*
   1818 		 * Have an xfer to run.  Allocate a resource from
   1819 		 * the adapter to run it.  If we can't allocate that
   1820 		 * resource, we don't dequeue the xfer.
   1821 		 */
   1822 		if (scsipi_get_resource(chan) == 0) {
   1823 			/*
   1824 			 * Adapter is out of resources.  If the adapter
   1825 			 * supports it, attempt to grow them.
   1826 			 */
   1827 			if (scsipi_grow_resources(chan) == 0) {
   1828 				/*
   1829 				 * Wasn't able to grow resources,
   1830 				 * nothing more we can do.
   1831 				 */
   1832 				if (xs->xs_control & XS_CTL_POLL) {
   1833 					scsipi_printaddr(xs->xs_periph);
   1834 					printf("polling command but no "
   1835 					    "adapter resources");
   1836 					/* We'll panic shortly... */
   1837 				}
   1838 				splx(s);
   1839 
   1840 				/*
   1841 				 * XXX: We should be able to note that
   1842 				 * XXX: that resources are needed here!
   1843 				 */
   1844 				return;
   1845 			}
   1846 			/*
   1847 			 * scsipi_grow_resources() allocated the resource
   1848 			 * for us.
   1849 			 */
   1850 		}
   1851 
   1852 		/*
   1853 		 * We have a resource to run this xfer, do it!
   1854 		 */
   1855 		TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
   1856 
   1857 		/*
   1858 		 * If the command is to be tagged, allocate a tag ID
   1859 		 * for it.
   1860 		 */
   1861 		if (XS_CTL_TAGTYPE(xs) != 0)
   1862 			scsipi_get_tag(xs);
   1863 		else
   1864 			periph->periph_flags |= PERIPH_UNTAG;
   1865 		periph->periph_sent++;
   1866 		splx(s);
   1867 
   1868 		scsipi_adapter_request(chan, ADAPTER_REQ_RUN_XFER, xs);
   1869 	}
   1870 #ifdef DIAGNOSTIC
   1871 	panic("scsipi_run_queue: impossible");
   1872 #endif
   1873 }
   1874 
   1875 /*
   1876  * scsipi_execute_xs:
   1877  *
   1878  *	Begin execution of an xfer, waiting for it to complete, if necessary.
   1879  */
   1880 int
   1881 scsipi_execute_xs(struct scsipi_xfer *xs)
   1882 {
   1883 	struct scsipi_periph *periph = xs->xs_periph;
   1884 	struct scsipi_channel *chan = periph->periph_channel;
   1885 	int oasync, async, poll, retries, error, s;
   1886 
   1887 	(chan->chan_bustype->bustype_cmd)(xs);
   1888 
   1889 	if (xs->xs_control & XS_CTL_DATA_ONSTACK) {
   1890 #if 1
   1891 		if (xs->xs_control & XS_CTL_ASYNC)
   1892 			panic("scsipi_execute_xs: on stack and async");
   1893 #endif
   1894 		/*
   1895 		 * If the I/O buffer is allocated on stack, the
   1896 		 * process must NOT be swapped out, as the device will
   1897 		 * be accessing the stack.
   1898 		 */
   1899 		PHOLD(curlwp);
   1900 	}
   1901 
   1902 	xs->xs_status &= ~XS_STS_DONE;
   1903 	xs->error = XS_NOERROR;
   1904 	xs->resid = xs->datalen;
   1905 	xs->status = SCSI_OK;
   1906 
   1907 #ifdef SCSIPI_DEBUG
   1908 	if (xs->xs_periph->periph_dbflags & SCSIPI_DB3) {
   1909 		printf("scsipi_execute_xs: ");
   1910 		show_scsipi_xs(xs);
   1911 		printf("\n");
   1912 	}
   1913 #endif
   1914 
   1915 	/*
   1916 	 * Deal with command tagging:
   1917 	 *
   1918 	 *	- If the device's current operating mode doesn't
   1919 	 *	  include tagged queueing, clear the tag mask.
   1920 	 *
   1921 	 *	- If the device's current operating mode *does*
   1922 	 *	  include tagged queueing, set the tag_type in
   1923 	 *	  the xfer to the appropriate byte for the tag
   1924 	 *	  message.
   1925 	 */
   1926 	if ((PERIPH_XFER_MODE(periph) & PERIPH_CAP_TQING) == 0 ||
   1927 		(xs->xs_control & XS_CTL_REQSENSE)) {
   1928 		xs->xs_control &= ~XS_CTL_TAGMASK;
   1929 		xs->xs_tag_type = 0;
   1930 	} else {
   1931 		/*
   1932 		 * If the request doesn't specify a tag, give Head
   1933 		 * tags to URGENT operations and Ordered tags to
   1934 		 * everything else.
   1935 		 */
   1936 		if (XS_CTL_TAGTYPE(xs) == 0) {
   1937 			if (xs->xs_control & XS_CTL_URGENT)
   1938 				xs->xs_control |= XS_CTL_HEAD_TAG;
   1939 			else
   1940 				xs->xs_control |= XS_CTL_ORDERED_TAG;
   1941 		}
   1942 
   1943 		switch (XS_CTL_TAGTYPE(xs)) {
   1944 		case XS_CTL_ORDERED_TAG:
   1945 			xs->xs_tag_type = MSG_ORDERED_Q_TAG;
   1946 			break;
   1947 
   1948 		case XS_CTL_SIMPLE_TAG:
   1949 			xs->xs_tag_type = MSG_SIMPLE_Q_TAG;
   1950 			break;
   1951 
   1952 		case XS_CTL_HEAD_TAG:
   1953 			xs->xs_tag_type = MSG_HEAD_OF_Q_TAG;
   1954 			break;
   1955 
   1956 		default:
   1957 			scsipi_printaddr(periph);
   1958 			printf("invalid tag mask 0x%08x\n",
   1959 			    XS_CTL_TAGTYPE(xs));
   1960 			panic("scsipi_execute_xs");
   1961 		}
   1962 	}
   1963 
   1964 	/* If the adaptor wants us to poll, poll. */
   1965 	if (chan->chan_adapter->adapt_flags & SCSIPI_ADAPT_POLL_ONLY)
   1966 		xs->xs_control |= XS_CTL_POLL;
   1967 
   1968 	/*
   1969 	 * If we don't yet have a completion thread, or we are to poll for
   1970 	 * completion, clear the ASYNC flag.
   1971 	 */
   1972 	oasync =  (xs->xs_control & XS_CTL_ASYNC);
   1973 	if (chan->chan_thread == NULL || (xs->xs_control & XS_CTL_POLL) != 0)
   1974 		xs->xs_control &= ~XS_CTL_ASYNC;
   1975 
   1976 	async = (xs->xs_control & XS_CTL_ASYNC);
   1977 	poll = (xs->xs_control & XS_CTL_POLL);
   1978 	retries = xs->xs_retries;		/* for polling commands */
   1979 
   1980 #ifdef DIAGNOSTIC
   1981 	if (oasync != 0 && xs->bp == NULL)
   1982 		panic("scsipi_execute_xs: XS_CTL_ASYNC but no buf");
   1983 #endif
   1984 
   1985 	/*
   1986 	 * Enqueue the transfer.  If we're not polling for completion, this
   1987 	 * should ALWAYS return `no error'.
   1988 	 */
   1989  try_again:
   1990 	error = scsipi_enqueue(xs);
   1991 	if (error) {
   1992 		if (poll == 0) {
   1993 			scsipi_printaddr(periph);
   1994 			printf("not polling, but enqueue failed with %d\n",
   1995 			    error);
   1996 			panic("scsipi_execute_xs");
   1997 		}
   1998 
   1999 		scsipi_printaddr(periph);
   2000 		printf("failed to enqueue polling command");
   2001 		if (retries != 0) {
   2002 			printf(", retrying...\n");
   2003 			delay(1000000);
   2004 			retries--;
   2005 			goto try_again;
   2006 		}
   2007 		printf("\n");
   2008 		goto free_xs;
   2009 	}
   2010 
   2011  restarted:
   2012 	scsipi_run_queue(chan);
   2013 
   2014 	/*
   2015 	 * The xfer is enqueued, and possibly running.  If it's to be
   2016 	 * completed asynchronously, just return now.
   2017 	 */
   2018 	if (async)
   2019 		return (EJUSTRETURN);
   2020 
   2021 	/*
   2022 	 * Not an asynchronous command; wait for it to complete.
   2023 	 */
   2024 	s = splbio();
   2025 	while ((xs->xs_status & XS_STS_DONE) == 0) {
   2026 		if (poll) {
   2027 			scsipi_printaddr(periph);
   2028 			printf("polling command not done\n");
   2029 			panic("scsipi_execute_xs");
   2030 		}
   2031 		(void) tsleep(xs, PRIBIO, "xscmd", 0);
   2032 	}
   2033 	splx(s);
   2034 
   2035 	/*
   2036 	 * Command is complete.  scsipi_done() has awakened us to perform
   2037 	 * the error handling.
   2038 	 */
   2039 	error = scsipi_complete(xs);
   2040 	if (error == ERESTART)
   2041 		goto restarted;
   2042 
   2043 	/*
   2044 	 * If it was meant to run async and we cleared aync ourselve,
   2045 	 * don't return an error here. It has already been handled
   2046 	 */
   2047 	if (oasync)
   2048 		error = EJUSTRETURN;
   2049 	/*
   2050 	 * Command completed successfully or fatal error occurred.  Fall
   2051 	 * into....
   2052 	 */
   2053  free_xs:
   2054 	s = splbio();
   2055 	scsipi_put_xs(xs);
   2056 	splx(s);
   2057 
   2058 	/*
   2059 	 * Kick the queue, keep it running in case it stopped for some
   2060 	 * reason.
   2061 	 */
   2062 	scsipi_run_queue(chan);
   2063 
   2064 	if (xs->xs_control & XS_CTL_DATA_ONSTACK)
   2065 		PRELE(curlwp);
   2066 	return (error);
   2067 }
   2068 
   2069 /*
   2070  * scsipi_completion_thread:
   2071  *
   2072  *	This is the completion thread.  We wait for errors on
   2073  *	asynchronous xfers, and perform the error handling
   2074  *	function, restarting the command, if necessary.
   2075  */
   2076 static void
   2077 scsipi_completion_thread(void *arg)
   2078 {
   2079 	struct scsipi_channel *chan = arg;
   2080 	struct scsipi_xfer *xs;
   2081 	int s;
   2082 
   2083 	if (chan->chan_init_cb)
   2084 		(*chan->chan_init_cb)(chan, chan->chan_init_cb_arg);
   2085 
   2086 	s = splbio();
   2087 	chan->chan_flags |= SCSIPI_CHAN_TACTIVE;
   2088 	splx(s);
   2089 	for (;;) {
   2090 		s = splbio();
   2091 		xs = TAILQ_FIRST(&chan->chan_complete);
   2092 		if (xs == NULL && chan->chan_tflags  == 0) {
   2093 			/* nothing to do; wait */
   2094 			(void) tsleep(&chan->chan_complete, PRIBIO,
   2095 			    "sccomp", 0);
   2096 			splx(s);
   2097 			continue;
   2098 		}
   2099 		if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) {
   2100 			/* call chan_callback from thread context */
   2101 			chan->chan_tflags &= ~SCSIPI_CHANT_CALLBACK;
   2102 			chan->chan_callback(chan, chan->chan_callback_arg);
   2103 			splx(s);
   2104 			continue;
   2105 		}
   2106 		if (chan->chan_tflags & SCSIPI_CHANT_GROWRES) {
   2107 			/* attempt to get more openings for this channel */
   2108 			chan->chan_tflags &= ~SCSIPI_CHANT_GROWRES;
   2109 			scsipi_adapter_request(chan,
   2110 			    ADAPTER_REQ_GROW_RESOURCES, NULL);
   2111 			scsipi_channel_thaw(chan, 1);
   2112 			splx(s);
   2113 			continue;
   2114 		}
   2115 		if (chan->chan_tflags & SCSIPI_CHANT_KICK) {
   2116 			/* explicitly run the queues for this channel */
   2117 			chan->chan_tflags &= ~SCSIPI_CHANT_KICK;
   2118 			scsipi_run_queue(chan);
   2119 			splx(s);
   2120 			continue;
   2121 		}
   2122 		if (chan->chan_tflags & SCSIPI_CHANT_SHUTDOWN) {
   2123 			splx(s);
   2124 			break;
   2125 		}
   2126 		if (xs) {
   2127 			TAILQ_REMOVE(&chan->chan_complete, xs, channel_q);
   2128 			splx(s);
   2129 
   2130 			/*
   2131 			 * Have an xfer with an error; process it.
   2132 			 */
   2133 			(void) scsipi_complete(xs);
   2134 
   2135 			/*
   2136 			 * Kick the queue; keep it running if it was stopped
   2137 			 * for some reason.
   2138 			 */
   2139 			scsipi_run_queue(chan);
   2140 		} else {
   2141 			splx(s);
   2142 		}
   2143 	}
   2144 
   2145 	chan->chan_thread = NULL;
   2146 
   2147 	/* In case parent is waiting for us to exit. */
   2148 	wakeup(&chan->chan_thread);
   2149 
   2150 	kthread_exit(0);
   2151 }
   2152 
   2153 /*
   2154  * scsipi_create_completion_thread:
   2155  *
   2156  *	Callback to actually create the completion thread.
   2157  */
   2158 void
   2159 scsipi_create_completion_thread(void *arg)
   2160 {
   2161 	struct scsipi_channel *chan = arg;
   2162 	struct scsipi_adapter *adapt = chan->chan_adapter;
   2163 
   2164 	if (kthread_create1(scsipi_completion_thread, chan,
   2165 	    &chan->chan_thread, "%s", chan->chan_name)) {
   2166 		printf("%s: unable to create completion thread for "
   2167 		    "channel %d\n", adapt->adapt_dev->dv_xname,
   2168 		    chan->chan_channel);
   2169 		panic("scsipi_create_completion_thread");
   2170 	}
   2171 }
   2172 
   2173 /*
   2174  * scsipi_thread_call_callback:
   2175  *
   2176  * 	request to call a callback from the completion thread
   2177  */
   2178 int
   2179 scsipi_thread_call_callback(struct scsipi_channel *chan,
   2180     void (*callback)(struct scsipi_channel *, void *), void *arg)
   2181 {
   2182 	int s;
   2183 
   2184 	s = splbio();
   2185 	if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
   2186 		/* kernel thread doesn't exist yet */
   2187 		splx(s);
   2188 		return ESRCH;
   2189 	}
   2190 	if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) {
   2191 		splx(s);
   2192 		return EBUSY;
   2193 	}
   2194 	scsipi_channel_freeze(chan, 1);
   2195 	chan->chan_callback = callback;
   2196 	chan->chan_callback_arg = arg;
   2197 	chan->chan_tflags |= SCSIPI_CHANT_CALLBACK;
   2198 	wakeup(&chan->chan_complete);
   2199 	splx(s);
   2200 	return(0);
   2201 }
   2202 
   2203 /*
   2204  * scsipi_async_event:
   2205  *
   2206  *	Handle an asynchronous event from an adapter.
   2207  */
   2208 void
   2209 scsipi_async_event(struct scsipi_channel *chan, scsipi_async_event_t event,
   2210     void *arg)
   2211 {
   2212 	int s;
   2213 
   2214 	s = splbio();
   2215 	switch (event) {
   2216 	case ASYNC_EVENT_MAX_OPENINGS:
   2217 		scsipi_async_event_max_openings(chan,
   2218 		    (struct scsipi_max_openings *)arg);
   2219 		break;
   2220 
   2221 	case ASYNC_EVENT_XFER_MODE:
   2222 		scsipi_async_event_xfer_mode(chan,
   2223 		    (struct scsipi_xfer_mode *)arg);
   2224 		break;
   2225 	case ASYNC_EVENT_RESET:
   2226 		scsipi_async_event_channel_reset(chan);
   2227 		break;
   2228 	}
   2229 	splx(s);
   2230 }
   2231 
   2232 /*
   2233  * scsipi_print_xfer_mode:
   2234  *
   2235  *	Print a periph's capabilities.
   2236  */
   2237 void
   2238 scsipi_print_xfer_mode(struct scsipi_periph *periph)
   2239 {
   2240 	int period, freq, speed, mbs;
   2241 
   2242 	if ((periph->periph_flags & PERIPH_MODE_VALID) == 0)
   2243 		return;
   2244 
   2245 	aprint_normal("%s: ", periph->periph_dev->dv_xname);
   2246 	if (periph->periph_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_DT)) {
   2247 		period = scsipi_sync_factor_to_period(periph->periph_period);
   2248 		aprint_normal("sync (%d.%02dns offset %d)",
   2249 		    period / 100, period % 100, periph->periph_offset);
   2250 	} else
   2251 		aprint_normal("async");
   2252 
   2253 	if (periph->periph_mode & PERIPH_CAP_WIDE32)
   2254 		aprint_normal(", 32-bit");
   2255 	else if (periph->periph_mode & (PERIPH_CAP_WIDE16 | PERIPH_CAP_DT))
   2256 		aprint_normal(", 16-bit");
   2257 	else
   2258 		aprint_normal(", 8-bit");
   2259 
   2260 	if (periph->periph_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_DT)) {
   2261 		freq = scsipi_sync_factor_to_freq(periph->periph_period);
   2262 		speed = freq;
   2263 		if (periph->periph_mode & PERIPH_CAP_WIDE32)
   2264 			speed *= 4;
   2265 		else if (periph->periph_mode &
   2266 		    (PERIPH_CAP_WIDE16 | PERIPH_CAP_DT))
   2267 			speed *= 2;
   2268 		mbs = speed / 1000;
   2269 		if (mbs > 0)
   2270 			aprint_normal(" (%d.%03dMB/s)", mbs, speed % 1000);
   2271 		else
   2272 			aprint_normal(" (%dKB/s)", speed % 1000);
   2273 	}
   2274 
   2275 	aprint_normal(" transfers");
   2276 
   2277 	if (periph->periph_mode & PERIPH_CAP_TQING)
   2278 		aprint_normal(", tagged queueing");
   2279 
   2280 	aprint_normal("\n");
   2281 }
   2282 
   2283 /*
   2284  * scsipi_async_event_max_openings:
   2285  *
   2286  *	Update the maximum number of outstanding commands a
   2287  *	device may have.
   2288  */
   2289 static void
   2290 scsipi_async_event_max_openings(struct scsipi_channel *chan,
   2291     struct scsipi_max_openings *mo)
   2292 {
   2293 	struct scsipi_periph *periph;
   2294 	int minlun, maxlun;
   2295 
   2296 	if (mo->mo_lun == -1) {
   2297 		/*
   2298 		 * Wildcarded; apply it to all LUNs.
   2299 		 */
   2300 		minlun = 0;
   2301 		maxlun = chan->chan_nluns - 1;
   2302 	} else
   2303 		minlun = maxlun = mo->mo_lun;
   2304 
   2305 	/* XXX This could really suck with a large LUN space. */
   2306 	for (; minlun <= maxlun; minlun++) {
   2307 		periph = scsipi_lookup_periph(chan, mo->mo_target, minlun);
   2308 		if (periph == NULL)
   2309 			continue;
   2310 
   2311 		if (mo->mo_openings < periph->periph_openings)
   2312 			periph->periph_openings = mo->mo_openings;
   2313 		else if (mo->mo_openings > periph->periph_openings &&
   2314 		    (periph->periph_flags & PERIPH_GROW_OPENINGS) != 0)
   2315 			periph->periph_openings = mo->mo_openings;
   2316 	}
   2317 }
   2318 
   2319 /*
   2320  * scsipi_async_event_xfer_mode:
   2321  *
   2322  *	Update the xfer mode for all periphs sharing the
   2323  *	specified I_T Nexus.
   2324  */
   2325 static void
   2326 scsipi_async_event_xfer_mode(struct scsipi_channel *chan,
   2327     struct scsipi_xfer_mode *xm)
   2328 {
   2329 	struct scsipi_periph *periph;
   2330 	int lun, announce, mode, period, offset;
   2331 
   2332 	for (lun = 0; lun < chan->chan_nluns; lun++) {
   2333 		periph = scsipi_lookup_periph(chan, xm->xm_target, lun);
   2334 		if (periph == NULL)
   2335 			continue;
   2336 		announce = 0;
   2337 
   2338 		/*
   2339 		 * Clamp the xfer mode down to this periph's capabilities.
   2340 		 */
   2341 		mode = xm->xm_mode & periph->periph_cap;
   2342 		if (mode & PERIPH_CAP_SYNC) {
   2343 			period = xm->xm_period;
   2344 			offset = xm->xm_offset;
   2345 		} else {
   2346 			period = 0;
   2347 			offset = 0;
   2348 		}
   2349 
   2350 		/*
   2351 		 * If we do not have a valid xfer mode yet, or the parameters
   2352 		 * are different, announce them.
   2353 		 */
   2354 		if ((periph->periph_flags & PERIPH_MODE_VALID) == 0 ||
   2355 		    periph->periph_mode != mode ||
   2356 		    periph->periph_period != period ||
   2357 		    periph->periph_offset != offset)
   2358 			announce = 1;
   2359 
   2360 		periph->periph_mode = mode;
   2361 		periph->periph_period = period;
   2362 		periph->periph_offset = offset;
   2363 		periph->periph_flags |= PERIPH_MODE_VALID;
   2364 
   2365 		if (announce)
   2366 			scsipi_print_xfer_mode(periph);
   2367 	}
   2368 }
   2369 
   2370 /*
   2371  * scsipi_set_xfer_mode:
   2372  *
   2373  *	Set the xfer mode for the specified I_T Nexus.
   2374  */
   2375 void
   2376 scsipi_set_xfer_mode(struct scsipi_channel *chan, int target, int immed)
   2377 {
   2378 	struct scsipi_xfer_mode xm;
   2379 	struct scsipi_periph *itperiph;
   2380 	int lun, s;
   2381 
   2382 	/*
   2383 	 * Go to the minimal xfer mode.
   2384 	 */
   2385 	xm.xm_target = target;
   2386 	xm.xm_mode = 0;
   2387 	xm.xm_period = 0;			/* ignored */
   2388 	xm.xm_offset = 0;			/* ignored */
   2389 
   2390 	/*
   2391 	 * Find the first LUN we know about on this I_T Nexus.
   2392 	 */
   2393 	for (itperiph = NULL, lun = 0; lun < chan->chan_nluns; lun++) {
   2394 		itperiph = scsipi_lookup_periph(chan, target, lun);
   2395 		if (itperiph != NULL)
   2396 			break;
   2397 	}
   2398 	if (itperiph != NULL) {
   2399 		xm.xm_mode = itperiph->periph_cap;
   2400 		/*
   2401 		 * Now issue the request to the adapter.
   2402 		 */
   2403 		s = splbio();
   2404 		scsipi_adapter_request(chan, ADAPTER_REQ_SET_XFER_MODE, &xm);
   2405 		splx(s);
   2406 		/*
   2407 		 * If we want this to happen immediately, issue a dummy
   2408 		 * command, since most adapters can't really negotiate unless
   2409 		 * they're executing a job.
   2410 		 */
   2411 		if (immed != 0) {
   2412 			(void) scsipi_test_unit_ready(itperiph,
   2413 			    XS_CTL_DISCOVERY | XS_CTL_IGNORE_ILLEGAL_REQUEST |
   2414 			    XS_CTL_IGNORE_NOT_READY |
   2415 			    XS_CTL_IGNORE_MEDIA_CHANGE);
   2416 		}
   2417 	}
   2418 }
   2419 
   2420 /*
   2421  * scsipi_channel_reset:
   2422  *
   2423  *	handle scsi bus reset
   2424  * called at splbio
   2425  */
   2426 static void
   2427 scsipi_async_event_channel_reset(struct scsipi_channel *chan)
   2428 {
   2429 	struct scsipi_xfer *xs, *xs_next;
   2430 	struct scsipi_periph *periph;
   2431 	int target, lun;
   2432 
   2433 	/*
   2434 	 * Channel has been reset. Also mark as reset pending REQUEST_SENSE
   2435 	 * commands; as the sense is not available any more.
   2436 	 * can't call scsipi_done() from here, as the command has not been
   2437 	 * sent to the adapter yet (this would corrupt accounting).
   2438 	 */
   2439 
   2440 	for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL; xs = xs_next) {
   2441 		xs_next = TAILQ_NEXT(xs, channel_q);
   2442 		if (xs->xs_control & XS_CTL_REQSENSE) {
   2443 			TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
   2444 			xs->error = XS_RESET;
   2445 			if ((xs->xs_control & XS_CTL_ASYNC) != 0)
   2446 				TAILQ_INSERT_TAIL(&chan->chan_complete, xs,
   2447 				    channel_q);
   2448 		}
   2449 	}
   2450 	wakeup(&chan->chan_complete);
   2451 	/* Catch xs with pending sense which may not have a REQSENSE xs yet */
   2452 	for (target = 0; target < chan->chan_ntargets; target++) {
   2453 		if (target == chan->chan_id)
   2454 			continue;
   2455 		for (lun = 0; lun <  chan->chan_nluns; lun++) {
   2456 			periph = scsipi_lookup_periph(chan, target, lun);
   2457 			if (periph) {
   2458 				xs = periph->periph_xscheck;
   2459 				if (xs)
   2460 					xs->error = XS_RESET;
   2461 			}
   2462 		}
   2463 	}
   2464 }
   2465 
   2466 /*
   2467  * scsipi_target_detach:
   2468  *
   2469  *	detach all periph associated with a I_T
   2470  * 	must be called from valid thread context
   2471  */
   2472 int
   2473 scsipi_target_detach(struct scsipi_channel *chan, int target, int lun,
   2474     int flags)
   2475 {
   2476 	struct scsipi_periph *periph;
   2477 	int ctarget, mintarget, maxtarget;
   2478 	int clun, minlun, maxlun;
   2479 	int error;
   2480 
   2481 	if (target == -1) {
   2482 		mintarget = 0;
   2483 		maxtarget = chan->chan_ntargets;
   2484 	} else {
   2485 		if (target == chan->chan_id)
   2486 			return EINVAL;
   2487 		if (target < 0 || target >= chan->chan_ntargets)
   2488 			return EINVAL;
   2489 		mintarget = target;
   2490 		maxtarget = target + 1;
   2491 	}
   2492 
   2493 	if (lun == -1) {
   2494 		minlun = 0;
   2495 		maxlun = chan->chan_nluns;
   2496 	} else {
   2497 		if (lun < 0 || lun >= chan->chan_nluns)
   2498 			return EINVAL;
   2499 		minlun = lun;
   2500 		maxlun = lun + 1;
   2501 	}
   2502 
   2503 	for (ctarget = mintarget; ctarget < maxtarget; ctarget++) {
   2504 		if (ctarget == chan->chan_id)
   2505 			continue;
   2506 
   2507 		for (clun = minlun; clun < maxlun; clun++) {
   2508 			periph = scsipi_lookup_periph(chan, ctarget, clun);
   2509 			if (periph == NULL)
   2510 				continue;
   2511 			error = config_detach(periph->periph_dev, flags);
   2512 			if (error)
   2513 				return (error);
   2514 		}
   2515 	}
   2516 	return(0);
   2517 }
   2518 
   2519 /*
   2520  * scsipi_adapter_addref:
   2521  *
   2522  *	Add a reference to the adapter pointed to by the provided
   2523  *	link, enabling the adapter if necessary.
   2524  */
   2525 int
   2526 scsipi_adapter_addref(struct scsipi_adapter *adapt)
   2527 {
   2528 	int s, error = 0;
   2529 
   2530 	s = splbio();
   2531 	if (adapt->adapt_refcnt++ == 0 && adapt->adapt_enable != NULL) {
   2532 		error = (*adapt->adapt_enable)(adapt->adapt_dev, 1);
   2533 		if (error)
   2534 			adapt->adapt_refcnt--;
   2535 	}
   2536 	splx(s);
   2537 	return (error);
   2538 }
   2539 
   2540 /*
   2541  * scsipi_adapter_delref:
   2542  *
   2543  *	Delete a reference to the adapter pointed to by the provided
   2544  *	link, disabling the adapter if possible.
   2545  */
   2546 void
   2547 scsipi_adapter_delref(struct scsipi_adapter *adapt)
   2548 {
   2549 	int s;
   2550 
   2551 	s = splbio();
   2552 	if (adapt->adapt_refcnt-- == 1 && adapt->adapt_enable != NULL)
   2553 		(void) (*adapt->adapt_enable)(adapt->adapt_dev, 0);
   2554 	splx(s);
   2555 }
   2556 
   2557 static struct scsipi_syncparam {
   2558 	int	ss_factor;
   2559 	int	ss_period;	/* ns * 100 */
   2560 } scsipi_syncparams[] = {
   2561 	{ 0x08,		 625 },	/* FAST-160 (Ultra320) */
   2562 	{ 0x09,		1250 },	/* FAST-80 (Ultra160) */
   2563 	{ 0x0a,		2500 },	/* FAST-40 40MHz (Ultra2) */
   2564 	{ 0x0b,		3030 },	/* FAST-40 33MHz (Ultra2) */
   2565 	{ 0x0c,		5000 },	/* FAST-20 (Ultra) */
   2566 };
   2567 static const int scsipi_nsyncparams =
   2568     sizeof(scsipi_syncparams) / sizeof(scsipi_syncparams[0]);
   2569 
   2570 int
   2571 scsipi_sync_period_to_factor(int period /* ns * 100 */)
   2572 {
   2573 	int i;
   2574 
   2575 	for (i = 0; i < scsipi_nsyncparams; i++) {
   2576 		if (period <= scsipi_syncparams[i].ss_period)
   2577 			return (scsipi_syncparams[i].ss_factor);
   2578 	}
   2579 
   2580 	return ((period / 100) / 4);
   2581 }
   2582 
   2583 int
   2584 scsipi_sync_factor_to_period(int factor)
   2585 {
   2586 	int i;
   2587 
   2588 	for (i = 0; i < scsipi_nsyncparams; i++) {
   2589 		if (factor == scsipi_syncparams[i].ss_factor)
   2590 			return (scsipi_syncparams[i].ss_period);
   2591 	}
   2592 
   2593 	return ((factor * 4) * 100);
   2594 }
   2595 
   2596 int
   2597 scsipi_sync_factor_to_freq(int factor)
   2598 {
   2599 	int i;
   2600 
   2601 	for (i = 0; i < scsipi_nsyncparams; i++) {
   2602 		if (factor == scsipi_syncparams[i].ss_factor)
   2603 			return (100000000 / scsipi_syncparams[i].ss_period);
   2604 	}
   2605 
   2606 	return (10000000 / ((factor * 4) * 10));
   2607 }
   2608 
   2609 #ifdef SCSIPI_DEBUG
   2610 /*
   2611  * Given a scsipi_xfer, dump the request, in all it's glory
   2612  */
   2613 void
   2614 show_scsipi_xs(struct scsipi_xfer *xs)
   2615 {
   2616 
   2617 	printf("xs(%p): ", xs);
   2618 	printf("xs_control(0x%08x)", xs->xs_control);
   2619 	printf("xs_status(0x%08x)", xs->xs_status);
   2620 	printf("periph(%p)", xs->xs_periph);
   2621 	printf("retr(0x%x)", xs->xs_retries);
   2622 	printf("timo(0x%x)", xs->timeout);
   2623 	printf("cmd(%p)", xs->cmd);
   2624 	printf("len(0x%x)", xs->cmdlen);
   2625 	printf("data(%p)", xs->data);
   2626 	printf("len(0x%x)", xs->datalen);
   2627 	printf("res(0x%x)", xs->resid);
   2628 	printf("err(0x%x)", xs->error);
   2629 	printf("bp(%p)", xs->bp);
   2630 	show_scsipi_cmd(xs);
   2631 }
   2632 
   2633 void
   2634 show_scsipi_cmd(struct scsipi_xfer *xs)
   2635 {
   2636 	u_char *b = (u_char *) xs->cmd;
   2637 	int i = 0;
   2638 
   2639 	scsipi_printaddr(xs->xs_periph);
   2640 	printf(" command: ");
   2641 
   2642 	if ((xs->xs_control & XS_CTL_RESET) == 0) {
   2643 		while (i < xs->cmdlen) {
   2644 			if (i)
   2645 				printf(",");
   2646 			printf("0x%x", b[i++]);
   2647 		}
   2648 		printf("-[%d bytes]\n", xs->datalen);
   2649 		if (xs->datalen)
   2650 			show_mem(xs->data, min(64, xs->datalen));
   2651 	} else
   2652 		printf("-RESET-\n");
   2653 }
   2654 
   2655 void
   2656 show_mem(u_char *address, int num)
   2657 {
   2658 	int x;
   2659 
   2660 	printf("------------------------------");
   2661 	for (x = 0; x < num; x++) {
   2662 		if ((x % 16) == 0)
   2663 			printf("\n%03d: ", x);
   2664 		printf("%02x ", *address++);
   2665 	}
   2666 	printf("\n------------------------------\n");
   2667 }
   2668 #endif /* SCSIPI_DEBUG */
   2669