Home | History | Annotate | Line # | Download | only in ic
isp_netbsd.c revision 1.29
      1 /* $NetBSD: isp_netbsd.c,v 1.29 2000/08/08 22:58:31 mjacob Exp $ */
      2 /*
      3  * Platform (NetBSD) dependent common attachment code for Qlogic adapters.
      4  * Matthew Jacob <mjacob (at) nas.nasa.gov>
      5  */
      6 /*
      7  * Copyright (C) 1997, 1998, 1999 National Aeronautics & Space Administration
      8  * All rights reserved.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. The name of the author may not be used to endorse or promote products
     19  *    derived from this software without specific prior written permission
     20  *
     21  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     22  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     23  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     24  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     26  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     27  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     28  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     29  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     30  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     31  */
     32 
     33 #include <dev/ic/isp_netbsd.h>
     34 #include <sys/scsiio.h>
     35 
     36 
     37 /*
     38  * Set a timeout for the watchdogging of a command.
     39  *
     40  * The dimensional analysis is
     41  *
     42  *	milliseconds * (seconds/millisecond) * (ticks/second) = ticks
     43  *
     44  *			=
     45  *
     46  *	(milliseconds / 1000) * hz = ticks
     47  *
     48  *
     49  * For timeouts less than 1 second, we'll get zero. Because of this, and
     50  * because we want to establish *our* timeout to be longer than what the
     51  * firmware might do, we just add 3 seconds at the back end.
     52  */
     53 #define	_XT(xs)	((((xs)->timeout/1000) * hz) + (3 * hz))
     54 
     55 static void ispminphys __P((struct buf *));
     56 static int32_t ispcmd __P((XS_T *));
     57 static int
     58 ispioctl __P((struct scsipi_link *, u_long, caddr_t, int, struct proc *));
     59 
     60 static struct scsipi_device isp_dev = { NULL, NULL, NULL, NULL };
     61 static int isp_polled_cmd __P((struct ispsoftc *, XS_T *));
     62 static void isp_dog __P((void *));
     63 static void isp_command_requeue __P((void *));
     64 static void isp_internal_restart __P((void *));
     65 
     66 /*
     67  * Complete attachment of hardware, include subdevices.
     68  */
     69 void
     70 isp_attach(isp)
     71 	struct ispsoftc *isp;
     72 {
     73 	int maxluns;
     74 	isp->isp_osinfo._adapter.scsipi_minphys = ispminphys;
     75 	isp->isp_osinfo._adapter.scsipi_ioctl = ispioctl;
     76 	isp->isp_osinfo._adapter.scsipi_cmd = ispcmd;
     77 
     78 	isp->isp_state = ISP_RUNSTATE;
     79 	isp->isp_osinfo._link.scsipi_scsi.channel =
     80 	    (IS_DUALBUS(isp))? 0 : SCSI_CHANNEL_ONLY_ONE;
     81 	isp->isp_osinfo._link.adapter_softc = isp;
     82 	isp->isp_osinfo._link.device = &isp_dev;
     83 	isp->isp_osinfo._link.adapter = &isp->isp_osinfo._adapter;
     84 	isp->isp_osinfo._link.openings = isp->isp_maxcmds;
     85 	isp->isp_osinfo._link.scsipi_scsi.max_lun = maxluns;
     86 	/*
     87 	 * Until the midlayer is fixed to use REPORT LUNS, limit to 8 luns.
     88 	 */
     89 	isp->isp_osinfo._link.scsipi_scsi.max_lun =
     90 	   (isp->isp_maxluns < 7)? isp->isp_maxluns - 1 : 7;
     91 	TAILQ_INIT(&isp->isp_osinfo.waitq);	/* The 2nd bus will share.. */
     92 
     93 	if (IS_FC(isp)) {
     94 		isp->isp_osinfo._link.scsipi_scsi.max_target = MAX_FC_TARG-1;
     95 	} else {
     96 		sdparam *sdp = isp->isp_param;
     97 		isp->isp_osinfo._link.scsipi_scsi.max_target = MAX_TARGETS-1;
     98 		isp->isp_osinfo._link.scsipi_scsi.adapter_target =
     99 		    sdp->isp_initiator_id;
    100 		isp->isp_osinfo.discovered[0] = 1 << sdp->isp_initiator_id;
    101 		if (IS_DUALBUS(isp)) {
    102 			isp->isp_osinfo._link_b = isp->isp_osinfo._link;
    103 			sdp++;
    104 			isp->isp_osinfo.discovered[1] =
    105 			    1 << sdp->isp_initiator_id;
    106 			isp->isp_osinfo._link_b.scsipi_scsi.adapter_target =
    107 			    sdp->isp_initiator_id;
    108 			isp->isp_osinfo._link_b.scsipi_scsi.channel = 1;
    109 			isp->isp_osinfo._link_b.scsipi_scsi.max_lun =
    110 			    isp->isp_osinfo._link.scsipi_scsi.max_lun;
    111 		}
    112 	}
    113 	isp->isp_osinfo._link.type = BUS_SCSI;
    114 
    115 	/*
    116 	 * Send a SCSI Bus Reset.
    117 	 */
    118 	if (IS_SCSI(isp)) {
    119 		int bus = 0;
    120 		ISP_LOCK(isp);
    121 		(void) isp_control(isp, ISPCTL_RESET_BUS, &bus);
    122 		if (IS_DUALBUS(isp)) {
    123 			bus++;
    124 			(void) isp_control(isp, ISPCTL_RESET_BUS, &bus);
    125 		}
    126 		ISP_UNLOCK(isp);
    127 	} else {
    128 		int defid;
    129 		fcparam *fcp = isp->isp_param;
    130 		delay(2 * 1000000);
    131 		defid = MAX_FC_TARG;
    132 		ISP_LOCK(isp);
    133 		/*
    134 		 * We probably won't have clock interrupts running,
    135 		 * so we'll be really short (smoke test, really)
    136 		 * at this time.
    137 		 */
    138 		if (isp_control(isp, ISPCTL_FCLINK_TEST, NULL)) {
    139 			(void) isp_control(isp, ISPCTL_PDB_SYNC, NULL);
    140 			if (fcp->isp_fwstate == FW_READY &&
    141 			    fcp->isp_loopstate >= LOOP_PDB_RCVD) {
    142 				defid = fcp->isp_loopid;
    143 			}
    144 		}
    145 		ISP_UNLOCK(isp);
    146 		isp->isp_osinfo._link.scsipi_scsi.adapter_target = defid;
    147 	}
    148 
    149 	/*
    150 	 * After this point, we'll be doing the new configuration
    151 	 * schema which allows interrups, so we can do tsleep/wakeup
    152 	 * for mailbox stuff at that point.
    153 	 */
    154 	isp->isp_osinfo.no_mbox_ints = 0;
    155 
    156 	/*
    157 	 * And attach children (if any).
    158 	 */
    159 	config_found((void *)isp, &isp->isp_osinfo._link, scsiprint);
    160 	if (IS_DUALBUS(isp)) {
    161 		config_found((void *)isp, &isp->isp_osinfo._link_b, scsiprint);
    162 	}
    163 }
    164 
    165 /*
    166  * minphys our xfers
    167  *
    168  * Unfortunately, the buffer pointer describes the target device- not the
    169  * adapter device, so we can't use the pointer to find out what kind of
    170  * adapter we are and adjust accordingly.
    171  */
    172 
    173 static void
    174 ispminphys(bp)
    175 	struct buf *bp;
    176 {
    177 	/*
    178 	 * XX: Only the 1020 has a 24 bit limit.
    179 	 */
    180 	if (bp->b_bcount >= (1 << 24)) {
    181 		bp->b_bcount = (1 << 24);
    182 	}
    183 	minphys(bp);
    184 }
    185 
    186 static int
    187 ispioctl(sc_link, cmd, addr, flag, p)
    188 	struct scsipi_link *sc_link;
    189 	u_long cmd;
    190 	caddr_t addr;
    191 	int flag;
    192 	struct proc *p;
    193 {
    194 	struct ispsoftc *isp = sc_link->adapter_softc;
    195 	int s, chan, retval = ENOTTY;
    196 
    197 	chan = (sc_link->scsipi_scsi.channel == SCSI_CHANNEL_ONLY_ONE)? 0 :
    198 	    sc_link->scsipi_scsi.channel;
    199 
    200 	switch (cmd) {
    201 	case SCBUSACCEL:
    202 	{
    203 		struct scbusaccel_args *sp = (struct scbusaccel_args *)addr;
    204 		if (IS_SCSI(isp) && sp->sa_lun == 0) {
    205 			int dflags = 0;
    206 			sdparam *sdp = SDPARAM(isp);
    207 
    208 			sdp += chan;
    209 			if (sp->sa_flags & SC_ACCEL_TAGS)
    210 				dflags |= DPARM_TQING;
    211 			if (sp->sa_flags & SC_ACCEL_WIDE)
    212 				dflags |= DPARM_WIDE;
    213 			if (sp->sa_flags & SC_ACCEL_SYNC)
    214 				dflags |= DPARM_SYNC;
    215 			s = splbio();
    216 			sdp->isp_devparam[sp->sa_target].dev_flags |= dflags;
    217 			dflags = sdp->isp_devparam[sp->sa_target].dev_flags;
    218 			sdp->isp_devparam[sp->sa_target].dev_update = 1;
    219 			isp->isp_update |= (1 << chan);
    220 			splx(s);
    221 			isp_prt(isp, ISP_LOGDEBUG1,
    222 			    "ispioctl: device flags 0x%x for %d.%d.X",
    223 			    dflags, chan, sp->sa_target);
    224 		}
    225 		retval = 0;
    226 		break;
    227 	}
    228 	case SCBUSIORESET:
    229 		s = splbio();
    230 		if (isp_control(isp, ISPCTL_RESET_BUS, &chan))
    231 			retval = EIO;
    232 		else
    233 			retval = 0;
    234 		(void) splx(s);
    235 		break;
    236 	default:
    237 		break;
    238 	}
    239 	return (retval);
    240 }
    241 
    242 
    243 static int32_t
    244 ispcmd(xs)
    245 	XS_T *xs;
    246 {
    247 	struct ispsoftc *isp;
    248 	int result, s;
    249 
    250 	isp = XS_ISP(xs);
    251 	s = splbio();
    252 	if (isp->isp_state < ISP_RUNSTATE) {
    253 		DISABLE_INTS(isp);
    254 		isp_init(isp);
    255                 if (isp->isp_state != ISP_INITSTATE) {
    256 			ENABLE_INTS(isp);
    257                         (void) splx(s);
    258                         XS_SETERR(xs, HBA_BOTCH);
    259                         return (COMPLETE);
    260                 }
    261                 isp->isp_state = ISP_RUNSTATE;
    262 		ENABLE_INTS(isp);
    263         }
    264 
    265 	/*
    266 	 * Check for queue blockage...
    267 	 */
    268 	if (isp->isp_osinfo.blocked) {
    269 		if (xs->xs_control & XS_CTL_POLL) {
    270 			xs->error = XS_DRIVER_STUFFUP;
    271 			splx(s);
    272 			return (TRY_AGAIN_LATER);
    273 		}
    274 		TAILQ_INSERT_TAIL(&isp->isp_osinfo.waitq, xs, adapter_q);
    275 		splx(s);
    276 		return (SUCCESSFULLY_QUEUED);
    277 	}
    278 
    279 	if (xs->xs_control & XS_CTL_POLL) {
    280 		volatile u_int8_t ombi = isp->isp_osinfo.no_mbox_ints;
    281 		isp->isp_osinfo.no_mbox_ints = 1;
    282 		result = isp_polled_cmd(isp, xs);
    283 		isp->isp_osinfo.no_mbox_ints = ombi;
    284 		(void) splx(s);
    285 		return (result);
    286 	}
    287 
    288 	result = isp_start(xs);
    289 	switch (result) {
    290 	case CMD_QUEUED:
    291 		result = SUCCESSFULLY_QUEUED;
    292 		if (xs->timeout) {
    293 			callout_reset(&xs->xs_callout, _XT(xs), isp_dog, xs);
    294 		}
    295 		break;
    296 	case CMD_EAGAIN:
    297 		result = TRY_AGAIN_LATER;
    298 		break;
    299 	case CMD_RQLATER:
    300 		result = SUCCESSFULLY_QUEUED;
    301 		callout_reset(&xs->xs_callout, hz, isp_command_requeue, xs);
    302 		break;
    303 	case CMD_COMPLETE:
    304 		result = COMPLETE;
    305 		break;
    306 	}
    307 	(void) splx(s);
    308 	return (result);
    309 }
    310 
    311 static int
    312 isp_polled_cmd(isp, xs)
    313 	struct ispsoftc *isp;
    314 	XS_T *xs;
    315 {
    316 	int result;
    317 	int infinite = 0, mswait;
    318 
    319 	result = isp_start(xs);
    320 
    321 	switch (result) {
    322 	case CMD_QUEUED:
    323 		result = SUCCESSFULLY_QUEUED;
    324 		break;
    325 	case CMD_RQLATER:
    326 	case CMD_EAGAIN:
    327 		if (XS_NOERR(xs)) {
    328 			xs->error = XS_DRIVER_STUFFUP;
    329 		}
    330 		result = TRY_AGAIN_LATER;
    331 		break;
    332 	case CMD_COMPLETE:
    333 		result = COMPLETE;
    334 		break;
    335 
    336 	}
    337 
    338 	if (result != SUCCESSFULLY_QUEUED) {
    339 		return (result);
    340 	}
    341 
    342 	/*
    343 	 * If we can't use interrupts, poll on completion.
    344 	 */
    345 	if ((mswait = XS_TIME(xs)) == 0)
    346 		infinite = 1;
    347 
    348 	while (mswait || infinite) {
    349 		if (isp_intr((void *)isp)) {
    350 			if (XS_CMD_DONE_P(xs)) {
    351 				break;
    352 			}
    353 		}
    354 		USEC_DELAY(1000);
    355 		mswait -= 1;
    356 	}
    357 
    358 	/*
    359 	 * If no other error occurred but we didn't finish,
    360 	 * something bad happened.
    361 	 */
    362 	if (XS_CMD_DONE_P(xs) == 0) {
    363 		if (isp_control(isp, ISPCTL_ABORT_CMD, xs)) {
    364 			isp_reinit(isp);
    365 		}
    366 		if (XS_NOERR(xs)) {
    367 			XS_SETERR(xs, HBA_BOTCH);
    368 		}
    369 	}
    370 	result = COMPLETE;
    371 	return (result);
    372 }
    373 
    374 void
    375 isp_done(xs)
    376 	XS_T *xs;
    377 {
    378 	XS_CMD_S_DONE(xs);
    379 	if (XS_CMD_WDOG_P(xs) == 0) {
    380 		struct ispsoftc *isp = XS_ISP(xs);
    381 		callout_stop(&xs->xs_callout);
    382 		if (XS_CMD_GRACE_P(xs)) {
    383 			isp_prt(isp, ISP_LOGDEBUG1,
    384 			    "finished command on borrowed time");
    385 		}
    386 		XS_CMD_S_CLEAR(xs);
    387 		scsipi_done(xs);
    388 	}
    389 }
    390 
    391 static void
    392 isp_dog(arg)
    393 	void *arg;
    394 {
    395 	XS_T *xs = arg;
    396 	struct ispsoftc *isp = XS_ISP(xs);
    397 	u_int32_t handle;
    398 	int s = splbio();
    399 
    400 	/*
    401 	 * We've decided this command is dead. Make sure we're not trying
    402 	 * to kill a command that's already dead by getting it's handle and
    403 	 * and seeing whether it's still alive.
    404 	 */
    405 	handle = isp_find_handle(isp, xs);
    406 	if (handle) {
    407 		u_int16_t r, r1, i;
    408 
    409 		if (XS_CMD_DONE_P(xs)) {
    410 			isp_prt(isp, ISP_LOGDEBUG1,
    411 			    "watchdog found done cmd (handle 0x%x)", handle);
    412 			(void) splx(s);
    413 			return;
    414 		}
    415 
    416 		if (XS_CMD_WDOG_P(xs)) {
    417 			isp_prt(isp, ISP_LOGDEBUG1,
    418 			    "recursive watchdog (handle 0x%x)", handle);
    419 			(void) splx(s);
    420 			return;
    421 		}
    422 
    423 		XS_CMD_S_WDOG(xs);
    424 
    425 		i = 0;
    426 		do {
    427 			r = ISP_READ(isp, BIU_ISR);
    428 			USEC_DELAY(1);
    429 			r1 = ISP_READ(isp, BIU_ISR);
    430 		} while (r != r1 && ++i < 1000);
    431 
    432 		if (INT_PENDING(isp, r) && isp_intr(isp) && XS_CMD_DONE_P(xs)) {
    433 			isp_prt(isp, ISP_LOGDEBUG1, "watchdog cleanup (%x, %x)",
    434 			    handle, r);
    435 			XS_CMD_C_WDOG(xs);
    436 			isp_done(xs);
    437 		} else if (XS_CMD_GRACE_P(xs)) {
    438 			isp_prt(isp, ISP_LOGDEBUG1, "watchdog timeout (%x, %x)",
    439 			    handle, r);
    440 			/*
    441 			 * Make sure the command is *really* dead before we
    442 			 * release the handle (and DMA resources) for reuse.
    443 			 */
    444 			(void) isp_control(isp, ISPCTL_ABORT_CMD, arg);
    445 
    446 			/*
    447 			 * After this point, the comamnd is really dead.
    448 			 */
    449 			if (XS_XFRLEN(xs)) {
    450 				ISP_DMAFREE(isp, xs, handle);
    451 			}
    452 			isp_destroy_handle(isp, handle);
    453 			XS_SETERR(xs, XS_TIMEOUT);
    454 			XS_CMD_S_CLEAR(xs);
    455 			isp_done(xs);
    456 		} else {
    457 			u_int16_t iptr, optr;
    458 			ispreq_t *mp;
    459 			isp_prt(isp, ISP_LOGDEBUG2,
    460 			    "possible command timeout (%x, %x)", handle, r);
    461 			XS_CMD_C_WDOG(xs);
    462 			callout_reset(&xs->xs_callout, hz, isp_dog, xs);
    463 			if (isp_getrqentry(isp, &iptr, &optr, (void **) &mp)) {
    464 				(void) splx(s);
    465 				return;
    466 			}
    467 			XS_CMD_S_GRACE(xs);
    468 			MEMZERO((void *) mp, sizeof (*mp));
    469 			mp->req_header.rqs_entry_count = 1;
    470 			mp->req_header.rqs_entry_type = RQSTYPE_MARKER;
    471 			mp->req_modifier = SYNC_ALL;
    472 			mp->req_target = XS_CHANNEL(xs) << 7;
    473 			ISP_SWIZZLE_REQUEST(isp, mp);
    474 			ISP_ADD_REQUEST(isp, iptr);
    475 		}
    476 	} else if (isp->isp_dblev) {
    477 		isp_prt(isp, ISP_LOGDEBUG2, "watchdog with no command");
    478 	}
    479 	(void) splx(s);
    480 }
    481 
    482 /*
    483  * Free any associated resources prior to decommissioning and
    484  * set the card to a known state (so it doesn't wake up and kick
    485  * us when we aren't expecting it to).
    486  *
    487  * Locks are held before coming here.
    488  */
    489 void
    490 isp_uninit(isp)
    491 	struct ispsoftc *isp;
    492 {
    493 	isp_lock(isp);
    494 	/*
    495 	 * Leave with interrupts disabled.
    496 	 */
    497 	DISABLE_INTS(isp);
    498 	isp_unlock(isp);
    499 }
    500 
    501 /*
    502  * Restart function for a command to be requeued later.
    503  */
    504 static void
    505 isp_command_requeue(arg)
    506 	void *arg;
    507 {
    508 	struct scsipi_xfer *xs = arg;
    509 	struct ispsoftc *isp = XS_ISP(xs);
    510 	int s = splbio();
    511 	switch (ispcmd(xs)) {
    512 	case SUCCESSFULLY_QUEUED:
    513 		isp_prt(isp, ISP_LOGINFO,
    514 		    "requeued commands for %d.%d", XS_TGT(xs), XS_LUN(xs));
    515 		if (xs->timeout) {
    516 			callout_reset(&xs->xs_callout, _XT(xs), isp_dog, xs);
    517 		}
    518 		break;
    519 	case TRY_AGAIN_LATER:
    520 		isp_prt(isp, ISP_LOGINFO,
    521 		    "EAGAIN on requeue for %d.%d", XS_TGT(xs), XS_LUN(xs));
    522 		callout_reset(&xs->xs_callout, hz, isp_command_requeue, xs);
    523 		break;
    524 	case COMPLETE:
    525 		/* can only be an error */
    526 		XS_CMD_S_DONE(xs);
    527 		callout_stop(&xs->xs_callout);
    528 		if (XS_NOERR(xs)) {
    529 			XS_SETERR(xs, HBA_BOTCH);
    530 		}
    531 		scsipi_done(xs);
    532 		break;
    533 	}
    534 	(void) splx(s);
    535 }
    536 
    537 /*
    538  * Restart function after a LOOP UP event (e.g.),
    539  * done as a timeout for some hysteresis.
    540  */
    541 static void
    542 isp_internal_restart(arg)
    543 	void *arg;
    544 {
    545 	struct ispsoftc *isp = arg;
    546 	int result, nrestarted = 0, s;
    547 
    548 	s = splbio();
    549 	if (isp->isp_osinfo.blocked == 0) {
    550 		struct scsipi_xfer *xs;
    551 		while ((xs = TAILQ_FIRST(&isp->isp_osinfo.waitq)) != NULL) {
    552 			TAILQ_REMOVE(&isp->isp_osinfo.waitq, xs, adapter_q);
    553 			result = isp_start(xs);
    554 			if (result != CMD_QUEUED) {
    555 				isp_prt(isp, ISP_LOGERR,
    556 				    "botched command restart (err=%d)", result);
    557 				XS_CMD_S_DONE(xs);
    558 				if (xs->error == XS_NOERROR)
    559 					xs->error = XS_DRIVER_STUFFUP;
    560 				callout_stop(&xs->xs_callout);
    561 				scsipi_done(xs);
    562 			} else if (xs->timeout) {
    563 				callout_reset(&xs->xs_callout,
    564 				    _XT(xs), isp_dog, xs);
    565 			}
    566 			nrestarted++;
    567 		}
    568 		isp_prt(isp, ISP_LOGINFO,
    569 		    "isp_restart requeued %d commands", nrestarted);
    570 	}
    571 	(void) splx(s);
    572 }
    573 
    574 int
    575 isp_async(isp, cmd, arg)
    576 	struct ispsoftc *isp;
    577 	ispasync_t cmd;
    578 	void *arg;
    579 {
    580 	int bus, tgt;
    581 	int s = splbio();
    582 	switch (cmd) {
    583 	case ISPASYNC_NEW_TGT_PARAMS:
    584 	if (IS_SCSI(isp) && isp->isp_dblev) {
    585 		sdparam *sdp = isp->isp_param;
    586 		char *wt;
    587 		int mhz, flags, period;
    588 
    589 		tgt = *((int *) arg);
    590 		bus = (tgt >> 16) & 0xffff;
    591 		tgt &= 0xffff;
    592 		sdp += bus;
    593 		flags = sdp->isp_devparam[tgt].cur_dflags;
    594 		period = sdp->isp_devparam[tgt].cur_period;
    595 
    596 		if ((flags & DPARM_SYNC) && period &&
    597 		    (sdp->isp_devparam[tgt].cur_offset) != 0) {
    598 			/*
    599 			 * There's some ambiguity about our negotiated speed
    600 			 * if we haven't detected LVD mode correctly (which
    601 			 * seems to happen, unfortunately). If we're in LVD
    602 			 * mode, then different rules apply about speed.
    603 			 */
    604 			if (sdp->isp_lvdmode || period < 0xc) {
    605 				switch (period) {
    606 				case 0x9:
    607 					mhz = 80;
    608 					break;
    609 				case 0xa:
    610 					mhz = 40;
    611 					break;
    612 				case 0xb:
    613 					mhz = 33;
    614 					break;
    615 				case 0xc:
    616 					mhz = 25;
    617 					break;
    618 				default:
    619 					mhz = 1000 / (period * 4);
    620 					break;
    621 				}
    622 			} else {
    623 				mhz = 1000 / (period * 4);
    624 			}
    625 		} else {
    626 			mhz = 0;
    627 		}
    628 		switch (flags & (DPARM_WIDE|DPARM_TQING)) {
    629 		case DPARM_WIDE:
    630 			wt = ", 16 bit wide";
    631 			break;
    632 		case DPARM_TQING:
    633 			wt = ", Tagged Queueing Enabled";
    634 			break;
    635 		case DPARM_WIDE|DPARM_TQING:
    636 			wt = ", 16 bit wide, Tagged Queueing Enabled";
    637 			break;
    638 		default:
    639 			wt = " ";
    640 			break;
    641 		}
    642 		if (mhz) {
    643 			isp_prt(isp, ISP_LOGINFO,
    644 			    "Bus %d Target %d at %dMHz Max Offset %d%s",
    645 			    bus, tgt, mhz, sdp->isp_devparam[tgt].cur_offset,
    646 			    wt);
    647 		} else {
    648 			isp_prt(isp, ISP_LOGINFO,
    649 			    "Bus %d Target %d Async Mode%s", bus, tgt, wt);
    650 		}
    651 		break;
    652 	}
    653 	case ISPASYNC_BUS_RESET:
    654 		if (arg)
    655 			bus = *((int *) arg);
    656 		else
    657 			bus = 0;
    658 		isp_prt(isp, ISP_LOGINFO, "SCSI bus %d reset detected", bus);
    659 		break;
    660 	case ISPASYNC_LOOP_DOWN:
    661 		/*
    662 		 * Hopefully we get here in time to minimize the number
    663 		 * of commands we are firing off that are sure to die.
    664 		 */
    665 		isp->isp_osinfo.blocked = 1;
    666 		isp_prt(isp, ISP_LOGINFO, "Loop DOWN");
    667 		break;
    668         case ISPASYNC_LOOP_UP:
    669 		isp->isp_osinfo.blocked = 0;
    670 		callout_reset(&isp->isp_osinfo._restart, 1,
    671 		    isp_internal_restart, isp);
    672 		isp_prt(isp, ISP_LOGINFO, "Loop UP");
    673 		break;
    674 	case ISPASYNC_PDB_CHANGED:
    675 	if (IS_FC(isp) && isp->isp_dblev) {
    676 		const char *fmt = "Target %d (Loop 0x%x) Port ID 0x%x "
    677 		    "role %s %s\n Port WWN 0x%08x%08x\n Node WWN 0x%08x%08x";
    678 		const static char *roles[4] = {
    679 		    "No", "Target", "Initiator", "Target/Initiator"
    680 		};
    681 		char *ptr;
    682 		fcparam *fcp = isp->isp_param;
    683 		int tgt = *((int *) arg);
    684 		struct lportdb *lp = &fcp->portdb[tgt];
    685 
    686 		if (lp->valid) {
    687 			ptr = "arrived";
    688 		} else {
    689 			ptr = "disappeared";
    690 		}
    691 		isp_prt(isp, ISP_LOGINFO, fmt, tgt, lp->loopid, lp->portid,
    692 		    roles[lp->roles & 0x3], ptr,
    693 		    (u_int32_t) (lp->port_wwn >> 32),
    694 		    (u_int32_t) (lp->port_wwn & 0xffffffffLL),
    695 		    (u_int32_t) (lp->node_wwn >> 32),
    696 		    (u_int32_t) (lp->node_wwn & 0xffffffffLL));
    697 		break;
    698 	}
    699 #ifdef	ISP2100_FABRIC
    700 	case ISPASYNC_CHANGE_NOTIFY:
    701 		isp_prt(isp, ISP_LOGINFO, "Name Server Database Changed");
    702 		break;
    703 	case ISPASYNC_FABRIC_DEV:
    704 	{
    705 		int target;
    706 		struct lportdb *lp;
    707 		sns_scrsp_t *resp = (sns_scrsp_t *) arg;
    708 		u_int32_t portid;
    709 		u_int64_t wwn;
    710 		fcparam *fcp = isp->isp_param;
    711 
    712 		portid =
    713 		    (((u_int32_t) resp->snscb_port_id[0]) << 16) |
    714 		    (((u_int32_t) resp->snscb_port_id[1]) << 8) |
    715 		    (((u_int32_t) resp->snscb_port_id[2]));
    716 		wwn =
    717 		    (((u_int64_t)resp->snscb_portname[0]) << 56) |
    718 		    (((u_int64_t)resp->snscb_portname[1]) << 48) |
    719 		    (((u_int64_t)resp->snscb_portname[2]) << 40) |
    720 		    (((u_int64_t)resp->snscb_portname[3]) << 32) |
    721 		    (((u_int64_t)resp->snscb_portname[4]) << 24) |
    722 		    (((u_int64_t)resp->snscb_portname[5]) << 16) |
    723 		    (((u_int64_t)resp->snscb_portname[6]) <<  8) |
    724 		    (((u_int64_t)resp->snscb_portname[7]));
    725 		isp_prt(isp, ISP_LOGINFO,
    726 		    "Fabric Device (Type 0x%x)@PortID 0x%x WWN 0x%08x%08x",
    727 		    resp->snscb_port_type, portid, ((u_int32_t)(wwn >> 32)),
    728 		    ((u_int32_t)(wwn & 0xffffffff)));
    729 		if (resp->snscb_port_type != 2)
    730 			break;
    731 		for (target = FC_SNS_ID+1; target < MAX_FC_TARG; target++) {
    732 			lp = &fcp->portdb[target];
    733 			if (lp->port_wwn == wwn)
    734 				break;
    735 		}
    736 		if (target < MAX_FC_TARG) {
    737 			break;
    738 		}
    739 		for (target = FC_SNS_ID+1; target < MAX_FC_TARG; target++) {
    740 			lp = &fcp->portdb[target];
    741 			if (lp->port_wwn == 0)
    742 				break;
    743 		}
    744 		if (target == MAX_FC_TARG) {
    745 			isp_prt(isp, ISP_LOGWARN,
    746 			    "no more space for fabric devices");
    747 			return (-1);
    748 		}
    749 		lp->port_wwn = lp->node_wwn = wwn;
    750 		lp->portid = portid;
    751 		break;
    752 	}
    753 #endif
    754 	default:
    755 		break;
    756 	}
    757 	(void) splx(s);
    758 	return (0);
    759 }
    760 
    761 #include <machine/stdarg.h>
    762 void
    763 #ifdef	__STDC__
    764 isp_prt(struct ispsoftc *isp, int level, const char *fmt, ...)
    765 #else
    766 isp_prt(isp, fmt, va_alist)
    767 	struct ispsoftc *isp;
    768 	char *fmt;
    769 	va_dcl;
    770 #endif
    771 {
    772 	va_list ap;
    773 	if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) {
    774 		return;
    775 	}
    776 	printf("%s: ", isp->isp_name);
    777 	va_start(ap, fmt);
    778 	vprintf(fmt, ap);
    779 	va_end(ap);
    780 	printf("\n");
    781 }
    782