Home | History | Annotate | Line # | Download | only in ic
isp_netbsd.c revision 1.32
      1 /* $NetBSD: isp_netbsd.c,v 1.32 2000/12/04 11:05:32 fvdl Exp $ */
      2 /*
      3  * This driver, which is contained in NetBSD in the files:
      4  *
      5  *	sys/dev/ic/isp.c
      6  *	sys/dev/ic/ic/isp.c
      7  *	sys/dev/ic/ic/isp_inline.h
      8  *	sys/dev/ic/ic/isp_netbsd.c
      9  *	sys/dev/ic/ic/isp_netbsd.h
     10  *	sys/dev/ic/ic/isp_target.c
     11  *	sys/dev/ic/ic/isp_target.h
     12  *	sys/dev/ic/ic/isp_tpublic.h
     13  *	sys/dev/ic/ic/ispmbox.h
     14  *	sys/dev/ic/ic/ispreg.h
     15  *	sys/dev/ic/ic/ispvar.h
     16  *	sys/microcode/isp/asm_sbus.h
     17  *	sys/microcode/isp/asm_1040.h
     18  *	sys/microcode/isp/asm_1080.h
     19  *	sys/microcode/isp/asm_12160.h
     20  *	sys/microcode/isp/asm_2100.h
     21  *	sys/microcode/isp/asm_2200.h
     22  *	sys/pci/isp_pci.c
     23  *	sys/sbus/isp_sbus.c
     24  *
     25  * Is being actively maintained by Matthew Jacob (mjacob (at) netbsd.org).
     26  * This driver also is shared source with FreeBSD, OpenBSD, Linux, Solaris,
     27  * Linux versions. This tends to be an interesting maintenance problem.
     28  *
     29  * Please coordinate with Matthew Jacob on changes you wish to make here.
     30  */
     31 /*
     32  * Platform (NetBSD) dependent common attachment code for Qlogic adapters.
     33  * Matthew Jacob <mjacob (at) nas.nasa.gov>
     34  */
     35 /*
     36  * Copyright (C) 1997, 1998, 1999 National Aeronautics & Space Administration
     37  * All rights reserved.
     38  *
     39  * Redistribution and use in source and binary forms, with or without
     40  * modification, are permitted provided that the following conditions
     41  * are met:
     42  * 1. Redistributions of source code must retain the above copyright
     43  *    notice, this list of conditions and the following disclaimer.
     44  * 2. Redistributions in binary form must reproduce the above copyright
     45  *    notice, this list of conditions and the following disclaimer in the
     46  *    documentation and/or other materials provided with the distribution.
     47  * 3. The name of the author may not be used to endorse or promote products
     48  *    derived from this software without specific prior written permission
     49  *
     50  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     51  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     52  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     53  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     54  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     55  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     56  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     57  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     58  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     59  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     60  */
     61 
     62 #include <dev/ic/isp_netbsd.h>
     63 #include <sys/scsiio.h>
     64 
     65 
     66 /*
     67  * Set a timeout for the watchdogging of a command.
     68  *
     69  * The dimensional analysis is
     70  *
     71  *	milliseconds * (seconds/millisecond) * (ticks/second) = ticks
     72  *
     73  *			=
     74  *
     75  *	(milliseconds / 1000) * hz = ticks
     76  *
     77  *
     78  * For timeouts less than 1 second, we'll get zero. Because of this, and
     79  * because we want to establish *our* timeout to be longer than what the
     80  * firmware might do, we just add 3 seconds at the back end.
     81  */
     82 #define	_XT(xs)	((((xs)->timeout/1000) * hz) + (3 * hz))
     83 
     84 static void ispminphys __P((struct buf *));
     85 static int32_t ispcmd __P((XS_T *));
     86 static int
     87 ispioctl __P((struct scsipi_link *, u_long, caddr_t, int, struct proc *));
     88 
     89 static struct scsipi_device isp_dev = { NULL, NULL, NULL, NULL };
     90 static int isp_polled_cmd __P((struct ispsoftc *, XS_T *));
     91 static void isp_dog __P((void *));
     92 static void isp_command_requeue __P((void *));
     93 static void isp_internal_restart __P((void *));
     94 
     95 /*
     96  * Complete attachment of hardware, include subdevices.
     97  */
     98 void
     99 isp_attach(isp)
    100 	struct ispsoftc *isp;
    101 {
    102 	isp->isp_osinfo._adapter.scsipi_minphys = ispminphys;
    103 	isp->isp_osinfo._adapter.scsipi_ioctl = ispioctl;
    104 	isp->isp_osinfo._adapter.scsipi_cmd = ispcmd;
    105 
    106 	isp->isp_state = ISP_RUNSTATE;
    107 	isp->isp_osinfo._link.scsipi_scsi.channel =
    108 	    (IS_DUALBUS(isp))? 0 : SCSI_CHANNEL_ONLY_ONE;
    109 	isp->isp_osinfo._link.adapter_softc = isp;
    110 	isp->isp_osinfo._link.device = &isp_dev;
    111 	isp->isp_osinfo._link.adapter = &isp->isp_osinfo._adapter;
    112 	isp->isp_osinfo._link.openings = isp->isp_maxcmds;
    113 	/*
    114 	 * Until the midlayer is fixed to use REPORT LUNS, limit to 8 luns.
    115 	 */
    116 	isp->isp_osinfo._link.scsipi_scsi.max_lun =
    117 	   (isp->isp_maxluns < 7)? isp->isp_maxluns - 1 : 7;
    118 	TAILQ_INIT(&isp->isp_osinfo.waitq);	/* The 2nd bus will share.. */
    119 
    120 	if (IS_FC(isp)) {
    121 		isp->isp_osinfo._link.scsipi_scsi.max_target = MAX_FC_TARG-1;
    122 	} else {
    123 		sdparam *sdp = isp->isp_param;
    124 		isp->isp_osinfo._link.scsipi_scsi.max_target = MAX_TARGETS-1;
    125 		isp->isp_osinfo._link.scsipi_scsi.adapter_target =
    126 		    sdp->isp_initiator_id;
    127 		isp->isp_osinfo.discovered[0] = 1 << sdp->isp_initiator_id;
    128 		if (IS_DUALBUS(isp)) {
    129 			isp->isp_osinfo._link_b = isp->isp_osinfo._link;
    130 			sdp++;
    131 			isp->isp_osinfo.discovered[1] =
    132 			    1 << sdp->isp_initiator_id;
    133 			isp->isp_osinfo._link_b.scsipi_scsi.adapter_target =
    134 			    sdp->isp_initiator_id;
    135 			isp->isp_osinfo._link_b.scsipi_scsi.channel = 1;
    136 			isp->isp_osinfo._link_b.scsipi_scsi.max_lun =
    137 			    isp->isp_osinfo._link.scsipi_scsi.max_lun;
    138 		}
    139 	}
    140 	isp->isp_osinfo._link.type = BUS_SCSI;
    141 
    142 	/*
    143 	 * Send a SCSI Bus Reset.
    144 	 */
    145 	if (IS_SCSI(isp)) {
    146 		int bus = 0;
    147 		ISP_LOCK(isp);
    148 		(void) isp_control(isp, ISPCTL_RESET_BUS, &bus);
    149 		if (IS_DUALBUS(isp)) {
    150 			bus++;
    151 			(void) isp_control(isp, ISPCTL_RESET_BUS, &bus);
    152 		}
    153 		ISP_UNLOCK(isp);
    154 	} else {
    155 		int defid;
    156 		fcparam *fcp = isp->isp_param;
    157 		delay(2 * 1000000);
    158 		defid = MAX_FC_TARG;
    159 		ISP_LOCK(isp);
    160 		/*
    161 		 * We probably won't have clock interrupts running,
    162 		 * so we'll be really short (smoke test, really)
    163 		 * at this time.
    164 		 */
    165 		if (isp_control(isp, ISPCTL_FCLINK_TEST, NULL)) {
    166 			(void) isp_control(isp, ISPCTL_PDB_SYNC, NULL);
    167 			if (fcp->isp_fwstate == FW_READY &&
    168 			    fcp->isp_loopstate >= LOOP_PDB_RCVD) {
    169 				defid = fcp->isp_loopid;
    170 			}
    171 		}
    172 		ISP_UNLOCK(isp);
    173 		isp->isp_osinfo._link.scsipi_scsi.adapter_target = defid;
    174 	}
    175 
    176 	/*
    177 	 * After this point, we'll be doing the new configuration
    178 	 * schema which allows interrups, so we can do tsleep/wakeup
    179 	 * for mailbox stuff at that point.
    180 	 */
    181 	isp->isp_osinfo.no_mbox_ints = 0;
    182 
    183 	/*
    184 	 * And attach children (if any).
    185 	 */
    186 	config_found((void *)isp, &isp->isp_osinfo._link, scsiprint);
    187 	if (IS_DUALBUS(isp)) {
    188 		config_found((void *)isp, &isp->isp_osinfo._link_b, scsiprint);
    189 	}
    190 }
    191 
    192 /*
    193  * minphys our xfers
    194  *
    195  * Unfortunately, the buffer pointer describes the target device- not the
    196  * adapter device, so we can't use the pointer to find out what kind of
    197  * adapter we are and adjust accordingly.
    198  */
    199 
    200 static void
    201 ispminphys(bp)
    202 	struct buf *bp;
    203 {
    204 	/*
    205 	 * XX: Only the 1020 has a 24 bit limit.
    206 	 */
    207 	if (bp->b_bcount >= (1 << 24)) {
    208 		bp->b_bcount = (1 << 24);
    209 	}
    210 	minphys(bp);
    211 }
    212 
    213 static int
    214 ispioctl(sc_link, cmd, addr, flag, p)
    215 	struct scsipi_link *sc_link;
    216 	u_long cmd;
    217 	caddr_t addr;
    218 	int flag;
    219 	struct proc *p;
    220 {
    221 	struct ispsoftc *isp = sc_link->adapter_softc;
    222 	int s, chan, retval = ENOTTY;
    223 
    224 	chan = (sc_link->scsipi_scsi.channel == SCSI_CHANNEL_ONLY_ONE)? 0 :
    225 	    sc_link->scsipi_scsi.channel;
    226 
    227 	switch (cmd) {
    228 	case SCBUSACCEL:
    229 	{
    230 		struct scbusaccel_args *sp = (struct scbusaccel_args *)addr;
    231 		if (IS_SCSI(isp) && sp->sa_lun == 0) {
    232 			int dflags = 0;
    233 			sdparam *sdp = SDPARAM(isp);
    234 
    235 			sdp += chan;
    236 			if (sp->sa_flags & SC_ACCEL_TAGS)
    237 				dflags |= DPARM_TQING;
    238 			if (sp->sa_flags & SC_ACCEL_WIDE)
    239 				dflags |= DPARM_WIDE;
    240 			if (sp->sa_flags & SC_ACCEL_SYNC)
    241 				dflags |= DPARM_SYNC;
    242 			s = splbio();
    243 			sdp->isp_devparam[sp->sa_target].dev_flags |= dflags;
    244 			dflags = sdp->isp_devparam[sp->sa_target].dev_flags;
    245 			sdp->isp_devparam[sp->sa_target].dev_update = 1;
    246 			isp->isp_update |= (1 << chan);
    247 			splx(s);
    248 			isp_prt(isp, ISP_LOGDEBUG1,
    249 			    "ispioctl: device flags 0x%x for %d.%d.X",
    250 			    dflags, chan, sp->sa_target);
    251 		}
    252 		retval = 0;
    253 		break;
    254 	}
    255 	case SCBUSIORESET:
    256 		s = splbio();
    257 		if (isp_control(isp, ISPCTL_RESET_BUS, &chan))
    258 			retval = EIO;
    259 		else
    260 			retval = 0;
    261 		(void) splx(s);
    262 		break;
    263 	default:
    264 		break;
    265 	}
    266 	return (retval);
    267 }
    268 
    269 
    270 static int32_t
    271 ispcmd(xs)
    272 	XS_T *xs;
    273 {
    274 	struct ispsoftc *isp;
    275 	int result, s;
    276 
    277 	isp = XS_ISP(xs);
    278 	s = splbio();
    279 	if (isp->isp_state < ISP_RUNSTATE) {
    280 		DISABLE_INTS(isp);
    281 		isp_init(isp);
    282                 if (isp->isp_state != ISP_INITSTATE) {
    283 			ENABLE_INTS(isp);
    284                         (void) splx(s);
    285                         XS_SETERR(xs, HBA_BOTCH);
    286                         return (COMPLETE);
    287                 }
    288                 isp->isp_state = ISP_RUNSTATE;
    289 		ENABLE_INTS(isp);
    290         }
    291 
    292 	/*
    293 	 * Check for queue blockage...
    294 	 */
    295 	if (isp->isp_osinfo.blocked) {
    296 		if (xs->xs_control & XS_CTL_POLL) {
    297 			xs->error = XS_DRIVER_STUFFUP;
    298 			splx(s);
    299 			return (TRY_AGAIN_LATER);
    300 		}
    301 		TAILQ_INSERT_TAIL(&isp->isp_osinfo.waitq, xs, adapter_q);
    302 		splx(s);
    303 		return (SUCCESSFULLY_QUEUED);
    304 	}
    305 
    306 	if (xs->xs_control & XS_CTL_POLL) {
    307 		volatile u_int8_t ombi = isp->isp_osinfo.no_mbox_ints;
    308 		isp->isp_osinfo.no_mbox_ints = 1;
    309 		result = isp_polled_cmd(isp, xs);
    310 		isp->isp_osinfo.no_mbox_ints = ombi;
    311 		(void) splx(s);
    312 		return (result);
    313 	}
    314 
    315 	result = isp_start(xs);
    316 #if	0
    317 {
    318 	static int na[16] = { 0 };
    319 	if (na[isp->isp_unit] < isp->isp_nactive) {
    320 		isp_prt(isp, ISP_LOGALL, "active hiwater %d", isp->isp_nactive);
    321 		na[isp->isp_unit] = isp->isp_nactive;
    322 	}
    323 }
    324 #endif
    325 	switch (result) {
    326 	case CMD_QUEUED:
    327 		result = SUCCESSFULLY_QUEUED;
    328 		if (xs->timeout) {
    329 			callout_reset(&xs->xs_callout, _XT(xs), isp_dog, xs);
    330 		}
    331 		break;
    332 	case CMD_EAGAIN:
    333 		result = TRY_AGAIN_LATER;
    334 		break;
    335 	case CMD_RQLATER:
    336 		result = SUCCESSFULLY_QUEUED;
    337 		callout_reset(&xs->xs_callout, hz, isp_command_requeue, xs);
    338 		break;
    339 	case CMD_COMPLETE:
    340 		result = COMPLETE;
    341 		break;
    342 	}
    343 	(void) splx(s);
    344 	return (result);
    345 }
    346 
    347 static int
    348 isp_polled_cmd(isp, xs)
    349 	struct ispsoftc *isp;
    350 	XS_T *xs;
    351 {
    352 	int result;
    353 	int infinite = 0, mswait;
    354 
    355 	result = isp_start(xs);
    356 
    357 	switch (result) {
    358 	case CMD_QUEUED:
    359 		result = SUCCESSFULLY_QUEUED;
    360 		break;
    361 	case CMD_RQLATER:
    362 	case CMD_EAGAIN:
    363 		if (XS_NOERR(xs)) {
    364 			xs->error = XS_DRIVER_STUFFUP;
    365 		}
    366 		result = TRY_AGAIN_LATER;
    367 		break;
    368 	case CMD_COMPLETE:
    369 		result = COMPLETE;
    370 		break;
    371 
    372 	}
    373 
    374 	if (result != SUCCESSFULLY_QUEUED) {
    375 		return (result);
    376 	}
    377 
    378 	/*
    379 	 * If we can't use interrupts, poll on completion.
    380 	 */
    381 	if ((mswait = XS_TIME(xs)) == 0)
    382 		infinite = 1;
    383 
    384 	while (mswait || infinite) {
    385 		if (isp_intr((void *)isp)) {
    386 			if (XS_CMD_DONE_P(xs)) {
    387 				break;
    388 			}
    389 		}
    390 		USEC_DELAY(1000);
    391 		mswait -= 1;
    392 	}
    393 
    394 	/*
    395 	 * If no other error occurred but we didn't finish,
    396 	 * something bad happened.
    397 	 */
    398 	if (XS_CMD_DONE_P(xs) == 0) {
    399 		if (isp_control(isp, ISPCTL_ABORT_CMD, xs)) {
    400 			isp_reinit(isp);
    401 		}
    402 		if (XS_NOERR(xs)) {
    403 			XS_SETERR(xs, HBA_BOTCH);
    404 		}
    405 	}
    406 	result = COMPLETE;
    407 	return (result);
    408 }
    409 
    410 void
    411 isp_done(xs)
    412 	XS_T *xs;
    413 {
    414 	XS_CMD_S_DONE(xs);
    415 	if (XS_CMD_WDOG_P(xs) == 0) {
    416 		struct ispsoftc *isp = XS_ISP(xs);
    417 		callout_stop(&xs->xs_callout);
    418 		if (XS_CMD_GRACE_P(xs)) {
    419 			isp_prt(isp, ISP_LOGDEBUG1,
    420 			    "finished command on borrowed time");
    421 		}
    422 		XS_CMD_S_CLEAR(xs);
    423 		scsipi_done(xs);
    424 	}
    425 }
    426 
    427 static void
    428 isp_dog(arg)
    429 	void *arg;
    430 {
    431 	XS_T *xs = arg;
    432 	struct ispsoftc *isp = XS_ISP(xs);
    433 	u_int32_t handle;
    434 
    435 	ISP_ILOCK(isp);
    436 	/*
    437 	 * We've decided this command is dead. Make sure we're not trying
    438 	 * to kill a command that's already dead by getting it's handle and
    439 	 * and seeing whether it's still alive.
    440 	 */
    441 	handle = isp_find_handle(isp, xs);
    442 	if (handle) {
    443 		u_int16_t r, r1, i;
    444 
    445 		if (XS_CMD_DONE_P(xs)) {
    446 			isp_prt(isp, ISP_LOGDEBUG1,
    447 			    "watchdog found done cmd (handle 0x%x)", handle);
    448 			ISP_IUNLOCK(isp);
    449 			return;
    450 		}
    451 
    452 		if (XS_CMD_WDOG_P(xs)) {
    453 			isp_prt(isp, ISP_LOGDEBUG1,
    454 			    "recursive watchdog (handle 0x%x)", handle);
    455 			ISP_IUNLOCK(isp);
    456 			return;
    457 		}
    458 
    459 		XS_CMD_S_WDOG(xs);
    460 
    461 		i = 0;
    462 		do {
    463 			r = ISP_READ(isp, BIU_ISR);
    464 			USEC_DELAY(1);
    465 			r1 = ISP_READ(isp, BIU_ISR);
    466 		} while (r != r1 && ++i < 1000);
    467 
    468 		if (INT_PENDING(isp, r) && isp_intr(isp) && XS_CMD_DONE_P(xs)) {
    469 			isp_prt(isp, ISP_LOGDEBUG1, "watchdog cleanup (%x, %x)",
    470 			    handle, r);
    471 			XS_CMD_C_WDOG(xs);
    472 			isp_done(xs);
    473 		} else if (XS_CMD_GRACE_P(xs)) {
    474 			isp_prt(isp, ISP_LOGDEBUG1, "watchdog timeout (%x, %x)",
    475 			    handle, r);
    476 			/*
    477 			 * Make sure the command is *really* dead before we
    478 			 * release the handle (and DMA resources) for reuse.
    479 			 */
    480 			(void) isp_control(isp, ISPCTL_ABORT_CMD, arg);
    481 
    482 			/*
    483 			 * After this point, the comamnd is really dead.
    484 			 */
    485 			if (XS_XFRLEN(xs)) {
    486 				ISP_DMAFREE(isp, xs, handle);
    487 			}
    488 			isp_destroy_handle(isp, handle);
    489 			XS_SETERR(xs, XS_TIMEOUT);
    490 			XS_CMD_S_CLEAR(xs);
    491 			isp_done(xs);
    492 		} else {
    493 			u_int16_t iptr, optr;
    494 			ispreq_t *mp;
    495 			isp_prt(isp, ISP_LOGDEBUG2,
    496 			    "possible command timeout (%x, %x)", handle, r);
    497 			XS_CMD_C_WDOG(xs);
    498 			callout_reset(&xs->xs_callout, hz, isp_dog, xs);
    499 			if (isp_getrqentry(isp, &iptr, &optr, (void **) &mp)) {
    500 				ISP_IUNLOCK(isp);
    501 				return;
    502 			}
    503 			XS_CMD_S_GRACE(xs);
    504 			MEMZERO((void *) mp, sizeof (*mp));
    505 			mp->req_header.rqs_entry_count = 1;
    506 			mp->req_header.rqs_entry_type = RQSTYPE_MARKER;
    507 			mp->req_modifier = SYNC_ALL;
    508 			mp->req_target = XS_CHANNEL(xs) << 7;
    509 			ISP_SWIZZLE_REQUEST(isp, mp);
    510 			ISP_ADD_REQUEST(isp, iptr);
    511 		}
    512 	} else {
    513 		isp_prt(isp, ISP_LOGDEBUG0, "watchdog with no command");
    514 	}
    515 	ISP_IUNLOCK(isp);
    516 }
    517 
    518 /*
    519  * Free any associated resources prior to decommissioning and
    520  * set the card to a known state (so it doesn't wake up and kick
    521  * us when we aren't expecting it to).
    522  *
    523  * Locks are held before coming here.
    524  */
    525 void
    526 isp_uninit(isp)
    527 	struct ispsoftc *isp;
    528 {
    529 	isp_lock(isp);
    530 	/*
    531 	 * Leave with interrupts disabled.
    532 	 */
    533 	DISABLE_INTS(isp);
    534 	isp_unlock(isp);
    535 }
    536 
    537 /*
    538  * Restart function for a command to be requeued later.
    539  */
    540 static void
    541 isp_command_requeue(arg)
    542 	void *arg;
    543 {
    544 	struct scsipi_xfer *xs = arg;
    545 	struct ispsoftc *isp = XS_ISP(xs);
    546 	ISP_ILOCK(isp);
    547 	switch (ispcmd(xs)) {
    548 	case SUCCESSFULLY_QUEUED:
    549 		isp_prt(isp, ISP_LOGINFO,
    550 		    "requeued commands for %d.%d", XS_TGT(xs), XS_LUN(xs));
    551 		if (xs->timeout) {
    552 			callout_reset(&xs->xs_callout, _XT(xs), isp_dog, xs);
    553 		}
    554 		break;
    555 	case TRY_AGAIN_LATER:
    556 		isp_prt(isp, ISP_LOGINFO,
    557 		    "EAGAIN on requeue for %d.%d", XS_TGT(xs), XS_LUN(xs));
    558 		callout_reset(&xs->xs_callout, hz, isp_command_requeue, xs);
    559 		break;
    560 	case COMPLETE:
    561 		/* can only be an error */
    562 		XS_CMD_S_DONE(xs);
    563 		callout_stop(&xs->xs_callout);
    564 		if (XS_NOERR(xs)) {
    565 			XS_SETERR(xs, HBA_BOTCH);
    566 		}
    567 		scsipi_done(xs);
    568 		break;
    569 	}
    570 	ISP_IUNLOCK(isp);
    571 }
    572 
    573 /*
    574  * Restart function after a LOOP UP event (e.g.),
    575  * done as a timeout for some hysteresis.
    576  */
    577 static void
    578 isp_internal_restart(arg)
    579 	void *arg;
    580 {
    581 	struct ispsoftc *isp = arg;
    582 	int result, nrestarted = 0;
    583 
    584 	ISP_ILOCK(isp);
    585 	if (isp->isp_osinfo.blocked == 0) {
    586 		struct scsipi_xfer *xs;
    587 		while ((xs = TAILQ_FIRST(&isp->isp_osinfo.waitq)) != NULL) {
    588 			TAILQ_REMOVE(&isp->isp_osinfo.waitq, xs, adapter_q);
    589 			result = isp_start(xs);
    590 			if (result != CMD_QUEUED) {
    591 				isp_prt(isp, ISP_LOGERR,
    592 				    "botched command restart (err=%d)", result);
    593 				XS_CMD_S_DONE(xs);
    594 				if (xs->error == XS_NOERROR)
    595 					xs->error = XS_DRIVER_STUFFUP;
    596 				callout_stop(&xs->xs_callout);
    597 				scsipi_done(xs);
    598 			} else if (xs->timeout) {
    599 				callout_reset(&xs->xs_callout,
    600 				    _XT(xs), isp_dog, xs);
    601 			}
    602 			nrestarted++;
    603 		}
    604 		isp_prt(isp, ISP_LOGINFO,
    605 		    "isp_restart requeued %d commands", nrestarted);
    606 	}
    607 	ISP_IUNLOCK(isp);
    608 }
    609 
    610 int
    611 isp_async(isp, cmd, arg)
    612 	struct ispsoftc *isp;
    613 	ispasync_t cmd;
    614 	void *arg;
    615 {
    616 	int bus, tgt;
    617 	int s = splbio();
    618 	switch (cmd) {
    619 	case ISPASYNC_NEW_TGT_PARAMS:
    620 	if (IS_SCSI(isp) && isp->isp_dblev) {
    621 		sdparam *sdp = isp->isp_param;
    622 		char *wt;
    623 		int mhz, flags, period;
    624 
    625 		tgt = *((int *) arg);
    626 		bus = (tgt >> 16) & 0xffff;
    627 		tgt &= 0xffff;
    628 		sdp += bus;
    629 		flags = sdp->isp_devparam[tgt].cur_dflags;
    630 		period = sdp->isp_devparam[tgt].cur_period;
    631 
    632 		if ((flags & DPARM_SYNC) && period &&
    633 		    (sdp->isp_devparam[tgt].cur_offset) != 0) {
    634 			/*
    635 			 * There's some ambiguity about our negotiated speed
    636 			 * if we haven't detected LVD mode correctly (which
    637 			 * seems to happen, unfortunately). If we're in LVD
    638 			 * mode, then different rules apply about speed.
    639 			 */
    640 			if (sdp->isp_lvdmode || period < 0xc) {
    641 				switch (period) {
    642 				case 0x9:
    643 					mhz = 80;
    644 					break;
    645 				case 0xa:
    646 					mhz = 40;
    647 					break;
    648 				case 0xb:
    649 					mhz = 33;
    650 					break;
    651 				case 0xc:
    652 					mhz = 25;
    653 					break;
    654 				default:
    655 					mhz = 1000 / (period * 4);
    656 					break;
    657 				}
    658 			} else {
    659 				mhz = 1000 / (period * 4);
    660 			}
    661 		} else {
    662 			mhz = 0;
    663 		}
    664 		switch (flags & (DPARM_WIDE|DPARM_TQING)) {
    665 		case DPARM_WIDE:
    666 			wt = ", 16 bit wide";
    667 			break;
    668 		case DPARM_TQING:
    669 			wt = ", Tagged Queueing Enabled";
    670 			break;
    671 		case DPARM_WIDE|DPARM_TQING:
    672 			wt = ", 16 bit wide, Tagged Queueing Enabled";
    673 			break;
    674 		default:
    675 			wt = " ";
    676 			break;
    677 		}
    678 		if (mhz) {
    679 			isp_prt(isp, ISP_LOGINFO,
    680 			    "Bus %d Target %d at %dMHz Max Offset %d%s",
    681 			    bus, tgt, mhz, sdp->isp_devparam[tgt].cur_offset,
    682 			    wt);
    683 		} else {
    684 			isp_prt(isp, ISP_LOGINFO,
    685 			    "Bus %d Target %d Async Mode%s", bus, tgt, wt);
    686 		}
    687 		break;
    688 	}
    689 	case ISPASYNC_BUS_RESET:
    690 		if (arg)
    691 			bus = *((int *) arg);
    692 		else
    693 			bus = 0;
    694 		isp_prt(isp, ISP_LOGINFO, "SCSI bus %d reset detected", bus);
    695 		break;
    696 	case ISPASYNC_LOOP_DOWN:
    697 		/*
    698 		 * Hopefully we get here in time to minimize the number
    699 		 * of commands we are firing off that are sure to die.
    700 		 */
    701 		isp->isp_osinfo.blocked = 1;
    702 		isp_prt(isp, ISP_LOGINFO, "Loop DOWN");
    703 		break;
    704         case ISPASYNC_LOOP_UP:
    705 		isp->isp_osinfo.blocked = 0;
    706 		callout_reset(&isp->isp_osinfo._restart, 1,
    707 		    isp_internal_restart, isp);
    708 		isp_prt(isp, ISP_LOGINFO, "Loop UP");
    709 		break;
    710 	case ISPASYNC_PDB_CHANGED:
    711 	if (IS_FC(isp) && isp->isp_dblev) {
    712 		const char *fmt = "Target %d (Loop 0x%x) Port ID 0x%x "
    713 		    "role %s %s\n Port WWN 0x%08x%08x\n Node WWN 0x%08x%08x";
    714 		const static char *roles[4] = {
    715 		    "No", "Target", "Initiator", "Target/Initiator"
    716 		};
    717 		char *ptr;
    718 		fcparam *fcp = isp->isp_param;
    719 		int tgt = *((int *) arg);
    720 		struct lportdb *lp = &fcp->portdb[tgt];
    721 
    722 		if (lp->valid) {
    723 			ptr = "arrived";
    724 		} else {
    725 			ptr = "disappeared";
    726 		}
    727 		isp_prt(isp, ISP_LOGINFO, fmt, tgt, lp->loopid, lp->portid,
    728 		    roles[lp->roles & 0x3], ptr,
    729 		    (u_int32_t) (lp->port_wwn >> 32),
    730 		    (u_int32_t) (lp->port_wwn & 0xffffffffLL),
    731 		    (u_int32_t) (lp->node_wwn >> 32),
    732 		    (u_int32_t) (lp->node_wwn & 0xffffffffLL));
    733 		break;
    734 	}
    735 #ifdef	ISP2100_FABRIC
    736 	case ISPASYNC_CHANGE_NOTIFY:
    737 		isp_prt(isp, ISP_LOGINFO, "Name Server Database Changed");
    738 		break;
    739 	case ISPASYNC_FABRIC_DEV:
    740 	{
    741 		int target;
    742 		struct lportdb *lp;
    743 		sns_scrsp_t *resp = (sns_scrsp_t *) arg;
    744 		u_int32_t portid;
    745 		u_int64_t wwn;
    746 		fcparam *fcp = isp->isp_param;
    747 
    748 		portid =
    749 		    (((u_int32_t) resp->snscb_port_id[0]) << 16) |
    750 		    (((u_int32_t) resp->snscb_port_id[1]) << 8) |
    751 		    (((u_int32_t) resp->snscb_port_id[2]));
    752 		wwn =
    753 		    (((u_int64_t)resp->snscb_portname[0]) << 56) |
    754 		    (((u_int64_t)resp->snscb_portname[1]) << 48) |
    755 		    (((u_int64_t)resp->snscb_portname[2]) << 40) |
    756 		    (((u_int64_t)resp->snscb_portname[3]) << 32) |
    757 		    (((u_int64_t)resp->snscb_portname[4]) << 24) |
    758 		    (((u_int64_t)resp->snscb_portname[5]) << 16) |
    759 		    (((u_int64_t)resp->snscb_portname[6]) <<  8) |
    760 		    (((u_int64_t)resp->snscb_portname[7]));
    761 
    762 		isp_prt(isp, ISP_LOGINFO,
    763 		    "Fabric Device (Type 0x%x)@PortID 0x%x WWN 0x%08x%08x",
    764 		    resp->snscb_port_type, portid, ((u_int32_t)(wwn >> 32)),
    765 		    ((u_int32_t)(wwn & 0xffffffff)));
    766 
    767 		for (target = FC_SNS_ID+1; target < MAX_FC_TARG; target++) {
    768 			lp = &fcp->portdb[target];
    769 			if (lp->port_wwn == wwn)
    770 				break;
    771 		}
    772 		if (target < MAX_FC_TARG) {
    773 			break;
    774 		}
    775 		for (target = FC_SNS_ID+1; target < MAX_FC_TARG; target++) {
    776 			lp = &fcp->portdb[target];
    777 			if (lp->port_wwn == 0)
    778 				break;
    779 		}
    780 		if (target == MAX_FC_TARG) {
    781 			isp_prt(isp, ISP_LOGWARN,
    782 			    "no more space for fabric devices");
    783 			return (-1);
    784 		}
    785 		lp->port_wwn = lp->node_wwn = wwn;
    786 		lp->portid = portid;
    787 		break;
    788 	}
    789 #endif
    790 	default:
    791 		break;
    792 	}
    793 	(void) splx(s);
    794 	return (0);
    795 }
    796 
    797 #include <machine/stdarg.h>
    798 void
    799 #ifdef	__STDC__
    800 isp_prt(struct ispsoftc *isp, int level, const char *fmt, ...)
    801 #else
    802 isp_prt(isp, fmt, va_alist)
    803 	struct ispsoftc *isp;
    804 	char *fmt;
    805 	va_dcl;
    806 #endif
    807 {
    808 	va_list ap;
    809 	if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) {
    810 		return;
    811 	}
    812 	printf("%s: ", isp->isp_name);
    813 	va_start(ap, fmt);
    814 	vprintf(fmt, ap);
    815 	va_end(ap);
    816 	printf("\n");
    817 }
    818