Home | History | Annotate | Line # | Download | only in ic
isp_netbsd.c revision 1.33
      1 /* $NetBSD: isp_netbsd.c,v 1.33 2000/12/09 08:06:32 mjacob Exp $ */
      2 /*
      3  * This driver, which is contained in NetBSD in the files:
      4  *
      5  *	sys/dev/ic/isp.c
      6  *	sys/dev/ic/ic/isp.c
      7  *	sys/dev/ic/ic/isp_inline.h
      8  *	sys/dev/ic/ic/isp_netbsd.c
      9  *	sys/dev/ic/ic/isp_netbsd.h
     10  *	sys/dev/ic/ic/isp_target.c
     11  *	sys/dev/ic/ic/isp_target.h
     12  *	sys/dev/ic/ic/isp_tpublic.h
     13  *	sys/dev/ic/ic/ispmbox.h
     14  *	sys/dev/ic/ic/ispreg.h
     15  *	sys/dev/ic/ic/ispvar.h
     16  *	sys/microcode/isp/asm_sbus.h
     17  *	sys/microcode/isp/asm_1040.h
     18  *	sys/microcode/isp/asm_1080.h
     19  *	sys/microcode/isp/asm_12160.h
     20  *	sys/microcode/isp/asm_2100.h
     21  *	sys/microcode/isp/asm_2200.h
     22  *	sys/pci/isp_pci.c
     23  *	sys/sbus/isp_sbus.c
     24  *
     25  * Is being actively maintained by Matthew Jacob (mjacob (at) netbsd.org).
     26  * This driver also is shared source with FreeBSD, OpenBSD, Linux, Solaris,
     27  * Linux versions. This tends to be an interesting maintenance problem.
     28  *
     29  * Please coordinate with Matthew Jacob on changes you wish to make here.
     30  */
     31 /*
     32  * Platform (NetBSD) dependent common attachment code for Qlogic adapters.
     33  * Matthew Jacob <mjacob (at) nas.nasa.gov>
     34  */
     35 /*
     36  * Copyright (C) 1997, 1998, 1999 National Aeronautics & Space Administration
     37  * All rights reserved.
     38  *
     39  * Redistribution and use in source and binary forms, with or without
     40  * modification, are permitted provided that the following conditions
     41  * are met:
     42  * 1. Redistributions of source code must retain the above copyright
     43  *    notice, this list of conditions and the following disclaimer.
     44  * 2. Redistributions in binary form must reproduce the above copyright
     45  *    notice, this list of conditions and the following disclaimer in the
     46  *    documentation and/or other materials provided with the distribution.
     47  * 3. The name of the author may not be used to endorse or promote products
     48  *    derived from this software without specific prior written permission
     49  *
     50  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     51  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     52  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     53  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     54  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     55  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     56  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     57  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     58  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     59  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     60  */
     61 
     62 #include <dev/ic/isp_netbsd.h>
     63 #include <sys/scsiio.h>
     64 
     65 
     66 /*
     67  * Set a timeout for the watchdogging of a command.
     68  *
     69  * The dimensional analysis is
     70  *
     71  *	milliseconds * (seconds/millisecond) * (ticks/second) = ticks
     72  *
     73  *			=
     74  *
     75  *	(milliseconds / 1000) * hz = ticks
     76  *
     77  *
     78  * For timeouts less than 1 second, we'll get zero. Because of this, and
     79  * because we want to establish *our* timeout to be longer than what the
     80  * firmware might do, we just add 3 seconds at the back end.
     81  */
     82 #define	_XT(xs)	((((xs)->timeout/1000) * hz) + (3 * hz))
     83 
     84 static void ispminphys __P((struct buf *));
     85 static int32_t ispcmd __P((XS_T *));
     86 static int
     87 ispioctl __P((struct scsipi_link *, u_long, caddr_t, int, struct proc *));
     88 
     89 static struct scsipi_device isp_dev = { NULL, NULL, NULL, NULL };
     90 static int isp_polled_cmd __P((struct ispsoftc *, XS_T *));
     91 static void isp_dog __P((void *));
     92 static void isp_command_requeue __P((void *));
     93 static void isp_internal_restart __P((void *));
     94 
     95 /*
     96  * Complete attachment of hardware, include subdevices.
     97  */
     98 void
     99 isp_attach(isp)
    100 	struct ispsoftc *isp;
    101 {
    102 	int maxluns;
    103 	isp->isp_osinfo._adapter.scsipi_minphys = ispminphys;
    104 	isp->isp_osinfo._adapter.scsipi_ioctl = ispioctl;
    105 	isp->isp_osinfo._adapter.scsipi_cmd = ispcmd;
    106 
    107 	isp->isp_state = ISP_RUNSTATE;
    108 	isp->isp_osinfo._link.scsipi_scsi.channel =
    109 	    (IS_DUALBUS(isp))? 0 : SCSI_CHANNEL_ONLY_ONE;
    110 	isp->isp_osinfo._link.adapter_softc = isp;
    111 	isp->isp_osinfo._link.device = &isp_dev;
    112 	isp->isp_osinfo._link.adapter = &isp->isp_osinfo._adapter;
    113 	isp->isp_osinfo._link.openings = isp->isp_maxcmds;
    114 	isp->isp_osinfo._link.scsipi_scsi.max_lun = maxluns;
    115 	/*
    116 	 * Until the midlayer is fixed to use REPORT LUNS, limit to 8 luns.
    117 	 */
    118 	isp->isp_osinfo._link.scsipi_scsi.max_lun =
    119 	   (isp->isp_maxluns < 7)? isp->isp_maxluns - 1 : 7;
    120 	TAILQ_INIT(&isp->isp_osinfo.waitq);	/* The 2nd bus will share.. */
    121 
    122 	if (IS_FC(isp)) {
    123 		isp->isp_osinfo._link.scsipi_scsi.max_target = MAX_FC_TARG-1;
    124 	} else {
    125 		sdparam *sdp = isp->isp_param;
    126 		isp->isp_osinfo._link.scsipi_scsi.max_target = MAX_TARGETS-1;
    127 		isp->isp_osinfo._link.scsipi_scsi.adapter_target =
    128 		    sdp->isp_initiator_id;
    129 		isp->isp_osinfo.discovered[0] = 1 << sdp->isp_initiator_id;
    130 		if (IS_DUALBUS(isp)) {
    131 			isp->isp_osinfo._link_b = isp->isp_osinfo._link;
    132 			sdp++;
    133 			isp->isp_osinfo.discovered[1] =
    134 			    1 << sdp->isp_initiator_id;
    135 			isp->isp_osinfo._link_b.scsipi_scsi.adapter_target =
    136 			    sdp->isp_initiator_id;
    137 			isp->isp_osinfo._link_b.scsipi_scsi.channel = 1;
    138 			isp->isp_osinfo._link_b.scsipi_scsi.max_lun =
    139 			    isp->isp_osinfo._link.scsipi_scsi.max_lun;
    140 		}
    141 	}
    142 	isp->isp_osinfo._link.type = BUS_SCSI;
    143 
    144 	/*
    145 	 * Send a SCSI Bus Reset.
    146 	 */
    147 	if (IS_SCSI(isp)) {
    148 		int bus = 0;
    149 		ISP_LOCK(isp);
    150 		(void) isp_control(isp, ISPCTL_RESET_BUS, &bus);
    151 		if (IS_DUALBUS(isp)) {
    152 			bus++;
    153 			(void) isp_control(isp, ISPCTL_RESET_BUS, &bus);
    154 		}
    155 		ISP_UNLOCK(isp);
    156 	} else {
    157 		int defid;
    158 		fcparam *fcp = isp->isp_param;
    159 		delay(2 * 1000000);
    160 		defid = MAX_FC_TARG;
    161 		ISP_LOCK(isp);
    162 		/*
    163 		 * We probably won't have clock interrupts running,
    164 		 * so we'll be really short (smoke test, really)
    165 		 * at this time.
    166 		 */
    167 		if (isp_control(isp, ISPCTL_FCLINK_TEST, NULL)) {
    168 			(void) isp_control(isp, ISPCTL_PDB_SYNC, NULL);
    169 			if (fcp->isp_fwstate == FW_READY &&
    170 			    fcp->isp_loopstate >= LOOP_PDB_RCVD) {
    171 				defid = fcp->isp_loopid;
    172 			}
    173 		}
    174 		ISP_UNLOCK(isp);
    175 		isp->isp_osinfo._link.scsipi_scsi.adapter_target = defid;
    176 	}
    177 
    178 	/*
    179 	 * After this point, we'll be doing the new configuration
    180 	 * schema which allows interrups, so we can do tsleep/wakeup
    181 	 * for mailbox stuff at that point.
    182 	 */
    183 	isp->isp_osinfo.no_mbox_ints = 0;
    184 
    185 	/*
    186 	 * And attach children (if any).
    187 	 */
    188 	config_found((void *)isp, &isp->isp_osinfo._link, scsiprint);
    189 	if (IS_DUALBUS(isp)) {
    190 		config_found((void *)isp, &isp->isp_osinfo._link_b, scsiprint);
    191 	}
    192 }
    193 
    194 /*
    195  * minphys our xfers
    196  *
    197  * Unfortunately, the buffer pointer describes the target device- not the
    198  * adapter device, so we can't use the pointer to find out what kind of
    199  * adapter we are and adjust accordingly.
    200  */
    201 
    202 static void
    203 ispminphys(bp)
    204 	struct buf *bp;
    205 {
    206 	/*
    207 	 * XX: Only the 1020 has a 24 bit limit.
    208 	 */
    209 	if (bp->b_bcount >= (1 << 24)) {
    210 		bp->b_bcount = (1 << 24);
    211 	}
    212 	minphys(bp);
    213 }
    214 
    215 static int
    216 ispioctl(sc_link, cmd, addr, flag, p)
    217 	struct scsipi_link *sc_link;
    218 	u_long cmd;
    219 	caddr_t addr;
    220 	int flag;
    221 	struct proc *p;
    222 {
    223 	struct ispsoftc *isp = sc_link->adapter_softc;
    224 	int s, chan, retval = ENOTTY;
    225 
    226 	chan = (sc_link->scsipi_scsi.channel == SCSI_CHANNEL_ONLY_ONE)? 0 :
    227 	    sc_link->scsipi_scsi.channel;
    228 
    229 	switch (cmd) {
    230 	case SCBUSACCEL:
    231 	{
    232 		struct scbusaccel_args *sp = (struct scbusaccel_args *)addr;
    233 		if (IS_SCSI(isp) && sp->sa_lun == 0) {
    234 			int dflags = 0;
    235 			sdparam *sdp = SDPARAM(isp);
    236 
    237 			sdp += chan;
    238 			if (sp->sa_flags & SC_ACCEL_TAGS)
    239 				dflags |= DPARM_TQING;
    240 			if (sp->sa_flags & SC_ACCEL_WIDE)
    241 				dflags |= DPARM_WIDE;
    242 			if (sp->sa_flags & SC_ACCEL_SYNC)
    243 				dflags |= DPARM_SYNC;
    244 			s = splbio();
    245 			sdp->isp_devparam[sp->sa_target].dev_flags |= dflags;
    246 			dflags = sdp->isp_devparam[sp->sa_target].dev_flags;
    247 			sdp->isp_devparam[sp->sa_target].dev_update = 1;
    248 			isp->isp_update |= (1 << chan);
    249 			splx(s);
    250 			isp_prt(isp, ISP_LOGDEBUG1,
    251 			    "ispioctl: device flags 0x%x for %d.%d.X",
    252 			    dflags, chan, sp->sa_target);
    253 		}
    254 		retval = 0;
    255 		break;
    256 	}
    257 	case SCBUSIORESET:
    258 		s = splbio();
    259 		if (isp_control(isp, ISPCTL_RESET_BUS, &chan))
    260 			retval = EIO;
    261 		else
    262 			retval = 0;
    263 		(void) splx(s);
    264 		break;
    265 	default:
    266 		break;
    267 	}
    268 	return (retval);
    269 }
    270 
    271 
    272 static int32_t
    273 ispcmd(xs)
    274 	XS_T *xs;
    275 {
    276 	struct ispsoftc *isp;
    277 	int result, s;
    278 
    279 	isp = XS_ISP(xs);
    280 	s = splbio();
    281 	if (isp->isp_state < ISP_RUNSTATE) {
    282 		DISABLE_INTS(isp);
    283 		isp_init(isp);
    284                 if (isp->isp_state != ISP_INITSTATE) {
    285 			ENABLE_INTS(isp);
    286                         (void) splx(s);
    287                         XS_SETERR(xs, HBA_BOTCH);
    288                         return (COMPLETE);
    289                 }
    290                 isp->isp_state = ISP_RUNSTATE;
    291 		ENABLE_INTS(isp);
    292         }
    293 
    294 	/*
    295 	 * Check for queue blockage...
    296 	 */
    297 	if (isp->isp_osinfo.blocked) {
    298 		if (xs->xs_control & XS_CTL_POLL) {
    299 			xs->error = XS_DRIVER_STUFFUP;
    300 			splx(s);
    301 			return (TRY_AGAIN_LATER);
    302 		}
    303 		TAILQ_INSERT_TAIL(&isp->isp_osinfo.waitq, xs, adapter_q);
    304 		splx(s);
    305 		return (SUCCESSFULLY_QUEUED);
    306 	}
    307 
    308 	if (xs->xs_control & XS_CTL_POLL) {
    309 		volatile u_int8_t ombi = isp->isp_osinfo.no_mbox_ints;
    310 		isp->isp_osinfo.no_mbox_ints = 1;
    311 		result = isp_polled_cmd(isp, xs);
    312 		isp->isp_osinfo.no_mbox_ints = ombi;
    313 		(void) splx(s);
    314 		return (result);
    315 	}
    316 
    317 	result = isp_start(xs);
    318 #if	0
    319 {
    320 	static int na[16] = { 0 };
    321 	if (na[isp->isp_unit] < isp->isp_nactive) {
    322 		isp_prt(isp, ISP_LOGALL, "active hiwater %d", isp->isp_nactive);
    323 		na[isp->isp_unit] = isp->isp_nactive;
    324 	}
    325 }
    326 #endif
    327 	switch (result) {
    328 	case CMD_QUEUED:
    329 		result = SUCCESSFULLY_QUEUED;
    330 		if (xs->timeout) {
    331 			callout_reset(&xs->xs_callout, _XT(xs), isp_dog, xs);
    332 		}
    333 		break;
    334 	case CMD_EAGAIN:
    335 		result = TRY_AGAIN_LATER;
    336 		break;
    337 	case CMD_RQLATER:
    338 		result = SUCCESSFULLY_QUEUED;
    339 		callout_reset(&xs->xs_callout, hz, isp_command_requeue, xs);
    340 		break;
    341 	case CMD_COMPLETE:
    342 		result = COMPLETE;
    343 		break;
    344 	}
    345 	(void) splx(s);
    346 	return (result);
    347 }
    348 
    349 static int
    350 isp_polled_cmd(isp, xs)
    351 	struct ispsoftc *isp;
    352 	XS_T *xs;
    353 {
    354 	int result;
    355 	int infinite = 0, mswait;
    356 
    357 	result = isp_start(xs);
    358 
    359 	switch (result) {
    360 	case CMD_QUEUED:
    361 		result = SUCCESSFULLY_QUEUED;
    362 		break;
    363 	case CMD_RQLATER:
    364 	case CMD_EAGAIN:
    365 		if (XS_NOERR(xs)) {
    366 			xs->error = XS_DRIVER_STUFFUP;
    367 		}
    368 		result = TRY_AGAIN_LATER;
    369 		break;
    370 	case CMD_COMPLETE:
    371 		result = COMPLETE;
    372 		break;
    373 
    374 	}
    375 
    376 	if (result != SUCCESSFULLY_QUEUED) {
    377 		return (result);
    378 	}
    379 
    380 	/*
    381 	 * If we can't use interrupts, poll on completion.
    382 	 */
    383 	if ((mswait = XS_TIME(xs)) == 0)
    384 		infinite = 1;
    385 
    386 	while (mswait || infinite) {
    387 		if (isp_intr((void *)isp)) {
    388 			if (XS_CMD_DONE_P(xs)) {
    389 				break;
    390 			}
    391 		}
    392 		USEC_DELAY(1000);
    393 		mswait -= 1;
    394 	}
    395 
    396 	/*
    397 	 * If no other error occurred but we didn't finish,
    398 	 * something bad happened.
    399 	 */
    400 	if (XS_CMD_DONE_P(xs) == 0) {
    401 		if (isp_control(isp, ISPCTL_ABORT_CMD, xs)) {
    402 			isp_reinit(isp);
    403 		}
    404 		if (XS_NOERR(xs)) {
    405 			XS_SETERR(xs, HBA_BOTCH);
    406 		}
    407 	}
    408 	result = COMPLETE;
    409 	return (result);
    410 }
    411 
    412 void
    413 isp_done(xs)
    414 	XS_T *xs;
    415 {
    416 	XS_CMD_S_DONE(xs);
    417 	if (XS_CMD_WDOG_P(xs) == 0) {
    418 		struct ispsoftc *isp = XS_ISP(xs);
    419 		callout_stop(&xs->xs_callout);
    420 		if (XS_CMD_GRACE_P(xs)) {
    421 			isp_prt(isp, ISP_LOGDEBUG1,
    422 			    "finished command on borrowed time");
    423 		}
    424 		XS_CMD_S_CLEAR(xs);
    425 		scsipi_done(xs);
    426 	}
    427 }
    428 
    429 static void
    430 isp_dog(arg)
    431 	void *arg;
    432 {
    433 	XS_T *xs = arg;
    434 	struct ispsoftc *isp = XS_ISP(xs);
    435 	u_int32_t handle;
    436 
    437 	ISP_ILOCK(isp);
    438 	/*
    439 	 * We've decided this command is dead. Make sure we're not trying
    440 	 * to kill a command that's already dead by getting it's handle and
    441 	 * and seeing whether it's still alive.
    442 	 */
    443 	handle = isp_find_handle(isp, xs);
    444 	if (handle) {
    445 		u_int16_t r, r1, i;
    446 
    447 		if (XS_CMD_DONE_P(xs)) {
    448 			isp_prt(isp, ISP_LOGDEBUG1,
    449 			    "watchdog found done cmd (handle 0x%x)", handle);
    450 			ISP_IUNLOCK(isp);
    451 			return;
    452 		}
    453 
    454 		if (XS_CMD_WDOG_P(xs)) {
    455 			isp_prt(isp, ISP_LOGDEBUG1,
    456 			    "recursive watchdog (handle 0x%x)", handle);
    457 			ISP_IUNLOCK(isp);
    458 			return;
    459 		}
    460 
    461 		XS_CMD_S_WDOG(xs);
    462 
    463 		i = 0;
    464 		do {
    465 			r = ISP_READ(isp, BIU_ISR);
    466 			USEC_DELAY(1);
    467 			r1 = ISP_READ(isp, BIU_ISR);
    468 		} while (r != r1 && ++i < 1000);
    469 
    470 		if (INT_PENDING(isp, r) && isp_intr(isp) && XS_CMD_DONE_P(xs)) {
    471 			isp_prt(isp, ISP_LOGDEBUG1, "watchdog cleanup (%x, %x)",
    472 			    handle, r);
    473 			XS_CMD_C_WDOG(xs);
    474 			isp_done(xs);
    475 		} else if (XS_CMD_GRACE_P(xs)) {
    476 			isp_prt(isp, ISP_LOGDEBUG1, "watchdog timeout (%x, %x)",
    477 			    handle, r);
    478 			/*
    479 			 * Make sure the command is *really* dead before we
    480 			 * release the handle (and DMA resources) for reuse.
    481 			 */
    482 			(void) isp_control(isp, ISPCTL_ABORT_CMD, arg);
    483 
    484 			/*
    485 			 * After this point, the comamnd is really dead.
    486 			 */
    487 			if (XS_XFRLEN(xs)) {
    488 				ISP_DMAFREE(isp, xs, handle);
    489 			}
    490 			isp_destroy_handle(isp, handle);
    491 			XS_SETERR(xs, XS_TIMEOUT);
    492 			XS_CMD_S_CLEAR(xs);
    493 			isp_done(xs);
    494 		} else {
    495 			u_int16_t iptr, optr;
    496 			ispreq_t *mp;
    497 			isp_prt(isp, ISP_LOGDEBUG2,
    498 			    "possible command timeout (%x, %x)", handle, r);
    499 			XS_CMD_C_WDOG(xs);
    500 			callout_reset(&xs->xs_callout, hz, isp_dog, xs);
    501 			if (isp_getrqentry(isp, &iptr, &optr, (void **) &mp)) {
    502 				ISP_UNLOCK(isp);
    503 				return;
    504 			}
    505 			XS_CMD_S_GRACE(xs);
    506 			MEMZERO((void *) mp, sizeof (*mp));
    507 			mp->req_header.rqs_entry_count = 1;
    508 			mp->req_header.rqs_entry_type = RQSTYPE_MARKER;
    509 			mp->req_modifier = SYNC_ALL;
    510 			mp->req_target = XS_CHANNEL(xs) << 7;
    511 			ISP_SWIZZLE_REQUEST(isp, mp);
    512 			ISP_ADD_REQUEST(isp, iptr);
    513 		}
    514 	} else {
    515 		isp_prt(isp, ISP_LOGDEBUG0, "watchdog with no command");
    516 	}
    517 	ISP_IUNLOCK(isp);
    518 }
    519 
    520 /*
    521  * Free any associated resources prior to decommissioning and
    522  * set the card to a known state (so it doesn't wake up and kick
    523  * us when we aren't expecting it to).
    524  *
    525  * Locks are held before coming here.
    526  */
    527 void
    528 isp_uninit(isp)
    529 	struct ispsoftc *isp;
    530 {
    531 	isp_lock(isp);
    532 	/*
    533 	 * Leave with interrupts disabled.
    534 	 */
    535 	DISABLE_INTS(isp);
    536 	isp_unlock(isp);
    537 }
    538 
    539 /*
    540  * Restart function for a command to be requeued later.
    541  */
    542 static void
    543 isp_command_requeue(arg)
    544 	void *arg;
    545 {
    546 	struct scsipi_xfer *xs = arg;
    547 	struct ispsoftc *isp = XS_ISP(xs);
    548 	ISP_LOCK(isp);
    549 	switch (ispcmd(xs)) {
    550 	case SUCCESSFULLY_QUEUED:
    551 		isp_prt(isp, ISP_LOGINFO,
    552 		    "requeued commands for %d.%d", XS_TGT(xs), XS_LUN(xs));
    553 		if (xs->timeout) {
    554 			callout_reset(&xs->xs_callout, _XT(xs), isp_dog, xs);
    555 		}
    556 		break;
    557 	case TRY_AGAIN_LATER:
    558 		isp_prt(isp, ISP_LOGINFO,
    559 		    "EAGAIN on requeue for %d.%d", XS_TGT(xs), XS_LUN(xs));
    560 		callout_reset(&xs->xs_callout, hz, isp_command_requeue, xs);
    561 		break;
    562 	case COMPLETE:
    563 		/* can only be an error */
    564 		XS_CMD_S_DONE(xs);
    565 		callout_stop(&xs->xs_callout);
    566 		if (XS_NOERR(xs)) {
    567 			XS_SETERR(xs, HBA_BOTCH);
    568 		}
    569 		scsipi_done(xs);
    570 		break;
    571 	}
    572 	ISP_UNLOCK(isp);
    573 }
    574 
    575 /*
    576  * Restart function after a LOOP UP event (e.g.),
    577  * done as a timeout for some hysteresis.
    578  */
    579 static void
    580 isp_internal_restart(arg)
    581 	void *arg;
    582 {
    583 	struct ispsoftc *isp = arg;
    584 	int result, nrestarted = 0;
    585 
    586 	ISP_LOCK(isp);
    587 	if (isp->isp_osinfo.blocked == 0) {
    588 		struct scsipi_xfer *xs;
    589 		while ((xs = TAILQ_FIRST(&isp->isp_osinfo.waitq)) != NULL) {
    590 			TAILQ_REMOVE(&isp->isp_osinfo.waitq, xs, adapter_q);
    591 			result = isp_start(xs);
    592 			if (result != CMD_QUEUED) {
    593 				isp_prt(isp, ISP_LOGERR,
    594 				    "botched command restart (err=%d)", result);
    595 				XS_CMD_S_DONE(xs);
    596 				if (xs->error == XS_NOERROR)
    597 					xs->error = XS_DRIVER_STUFFUP;
    598 				callout_stop(&xs->xs_callout);
    599 				scsipi_done(xs);
    600 			} else if (xs->timeout) {
    601 				callout_reset(&xs->xs_callout,
    602 				    _XT(xs), isp_dog, xs);
    603 			}
    604 			nrestarted++;
    605 		}
    606 		isp_prt(isp, ISP_LOGINFO,
    607 		    "isp_restart requeued %d commands", nrestarted);
    608 	}
    609 	ISP_UNLOCK(isp);
    610 }
    611 
    612 int
    613 isp_async(isp, cmd, arg)
    614 	struct ispsoftc *isp;
    615 	ispasync_t cmd;
    616 	void *arg;
    617 {
    618 	int bus, tgt;
    619 	int s = splbio();
    620 	switch (cmd) {
    621 	case ISPASYNC_NEW_TGT_PARAMS:
    622 	if (IS_SCSI(isp) && isp->isp_dblev) {
    623 		sdparam *sdp = isp->isp_param;
    624 		char *wt;
    625 		int mhz, flags, period;
    626 
    627 		tgt = *((int *) arg);
    628 		bus = (tgt >> 16) & 0xffff;
    629 		tgt &= 0xffff;
    630 		sdp += bus;
    631 		flags = sdp->isp_devparam[tgt].cur_dflags;
    632 		period = sdp->isp_devparam[tgt].cur_period;
    633 
    634 		if ((flags & DPARM_SYNC) && period &&
    635 		    (sdp->isp_devparam[tgt].cur_offset) != 0) {
    636 			/*
    637 			 * There's some ambiguity about our negotiated speed
    638 			 * if we haven't detected LVD mode correctly (which
    639 			 * seems to happen, unfortunately). If we're in LVD
    640 			 * mode, then different rules apply about speed.
    641 			 */
    642 			if (sdp->isp_lvdmode || period < 0xc) {
    643 				switch (period) {
    644 				case 0x9:
    645 					mhz = 80;
    646 					break;
    647 				case 0xa:
    648 					mhz = 40;
    649 					break;
    650 				case 0xb:
    651 					mhz = 33;
    652 					break;
    653 				case 0xc:
    654 					mhz = 25;
    655 					break;
    656 				default:
    657 					mhz = 1000 / (period * 4);
    658 					break;
    659 				}
    660 			} else {
    661 				mhz = 1000 / (period * 4);
    662 			}
    663 		} else {
    664 			mhz = 0;
    665 		}
    666 		switch (flags & (DPARM_WIDE|DPARM_TQING)) {
    667 		case DPARM_WIDE:
    668 			wt = ", 16 bit wide";
    669 			break;
    670 		case DPARM_TQING:
    671 			wt = ", Tagged Queueing Enabled";
    672 			break;
    673 		case DPARM_WIDE|DPARM_TQING:
    674 			wt = ", 16 bit wide, Tagged Queueing Enabled";
    675 			break;
    676 		default:
    677 			wt = " ";
    678 			break;
    679 		}
    680 		if (mhz) {
    681 			isp_prt(isp, ISP_LOGINFO,
    682 			    "Bus %d Target %d at %dMHz Max Offset %d%s",
    683 			    bus, tgt, mhz, sdp->isp_devparam[tgt].cur_offset,
    684 			    wt);
    685 		} else {
    686 			isp_prt(isp, ISP_LOGINFO,
    687 			    "Bus %d Target %d Async Mode%s", bus, tgt, wt);
    688 		}
    689 		break;
    690 	}
    691 	case ISPASYNC_BUS_RESET:
    692 		if (arg)
    693 			bus = *((int *) arg);
    694 		else
    695 			bus = 0;
    696 		isp_prt(isp, ISP_LOGINFO, "SCSI bus %d reset detected", bus);
    697 		break;
    698 	case ISPASYNC_LOOP_DOWN:
    699 		/*
    700 		 * Hopefully we get here in time to minimize the number
    701 		 * of commands we are firing off that are sure to die.
    702 		 */
    703 		isp->isp_osinfo.blocked = 1;
    704 		isp_prt(isp, ISP_LOGINFO, "Loop DOWN");
    705 		break;
    706         case ISPASYNC_LOOP_UP:
    707 		isp->isp_osinfo.blocked = 0;
    708 		callout_reset(&isp->isp_osinfo._restart, 1,
    709 		    isp_internal_restart, isp);
    710 		isp_prt(isp, ISP_LOGINFO, "Loop UP");
    711 		break;
    712 	case ISPASYNC_PDB_CHANGED:
    713 	if (IS_FC(isp) && isp->isp_dblev) {
    714 		const char *fmt = "Target %d (Loop 0x%x) Port ID 0x%x "
    715 		    "role %s %s\n Port WWN 0x%08x%08x\n Node WWN 0x%08x%08x";
    716 		const static char *roles[4] = {
    717 		    "No", "Target", "Initiator", "Target/Initiator"
    718 		};
    719 		char *ptr;
    720 		fcparam *fcp = isp->isp_param;
    721 		int tgt = *((int *) arg);
    722 		struct lportdb *lp = &fcp->portdb[tgt];
    723 
    724 		if (lp->valid) {
    725 			ptr = "arrived";
    726 		} else {
    727 			ptr = "disappeared";
    728 		}
    729 		isp_prt(isp, ISP_LOGINFO, fmt, tgt, lp->loopid, lp->portid,
    730 		    roles[lp->roles & 0x3], ptr,
    731 		    (u_int32_t) (lp->port_wwn >> 32),
    732 		    (u_int32_t) (lp->port_wwn & 0xffffffffLL),
    733 		    (u_int32_t) (lp->node_wwn >> 32),
    734 		    (u_int32_t) (lp->node_wwn & 0xffffffffLL));
    735 		break;
    736 	}
    737 #ifdef	ISP2100_FABRIC
    738 	case ISPASYNC_CHANGE_NOTIFY:
    739 		isp_prt(isp, ISP_LOGINFO, "Name Server Database Changed");
    740 		break;
    741 	case ISPASYNC_FABRIC_DEV:
    742 	{
    743 		int target;
    744 		struct lportdb *lp;
    745 		sns_scrsp_t *resp = (sns_scrsp_t *) arg;
    746 		u_int32_t portid;
    747 		u_int64_t wwn;
    748 		fcparam *fcp = isp->isp_param;
    749 
    750 		portid =
    751 		    (((u_int32_t) resp->snscb_port_id[0]) << 16) |
    752 		    (((u_int32_t) resp->snscb_port_id[1]) << 8) |
    753 		    (((u_int32_t) resp->snscb_port_id[2]));
    754 		wwn =
    755 		    (((u_int64_t)resp->snscb_portname[0]) << 56) |
    756 		    (((u_int64_t)resp->snscb_portname[1]) << 48) |
    757 		    (((u_int64_t)resp->snscb_portname[2]) << 40) |
    758 		    (((u_int64_t)resp->snscb_portname[3]) << 32) |
    759 		    (((u_int64_t)resp->snscb_portname[4]) << 24) |
    760 		    (((u_int64_t)resp->snscb_portname[5]) << 16) |
    761 		    (((u_int64_t)resp->snscb_portname[6]) <<  8) |
    762 		    (((u_int64_t)resp->snscb_portname[7]));
    763 
    764 		isp_prt(isp, ISP_LOGINFO,
    765 		    "Fabric Device (Type 0x%x)@PortID 0x%x WWN 0x%08x%08x",
    766 		    resp->snscb_port_type, portid, ((u_int32_t)(wwn >> 32)),
    767 		    ((u_int32_t)(wwn & 0xffffffff)));
    768 
    769 		for (target = FC_SNS_ID+1; target < MAX_FC_TARG; target++) {
    770 			lp = &fcp->portdb[target];
    771 			if (lp->port_wwn == wwn)
    772 				break;
    773 		}
    774 		if (target < MAX_FC_TARG) {
    775 			break;
    776 		}
    777 		for (target = FC_SNS_ID+1; target < MAX_FC_TARG; target++) {
    778 			lp = &fcp->portdb[target];
    779 			if (lp->port_wwn == 0)
    780 				break;
    781 		}
    782 		if (target == MAX_FC_TARG) {
    783 			isp_prt(isp, ISP_LOGWARN,
    784 			    "no more space for fabric devices");
    785 			return (-1);
    786 		}
    787 		lp->port_wwn = lp->node_wwn = wwn;
    788 		lp->portid = portid;
    789 		break;
    790 	}
    791 #endif
    792 	default:
    793 		break;
    794 	}
    795 	(void) splx(s);
    796 	return (0);
    797 }
    798 
    799 #include <machine/stdarg.h>
    800 void
    801 #ifdef	__STDC__
    802 isp_prt(struct ispsoftc *isp, int level, const char *fmt, ...)
    803 #else
    804 isp_prt(isp, fmt, va_alist)
    805 	struct ispsoftc *isp;
    806 	char *fmt;
    807 	va_dcl;
    808 #endif
    809 {
    810 	va_list ap;
    811 	if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) {
    812 		return;
    813 	}
    814 	printf("%s: ", isp->isp_name);
    815 	va_start(ap, fmt);
    816 	vprintf(fmt, ap);
    817 	va_end(ap);
    818 	printf("\n");
    819 }
    820