Home | History | Annotate | Line # | Download | only in ic
isp_netbsd.c revision 1.46
      1 /* $NetBSD: isp_netbsd.c,v 1.46 2001/07/07 01:44:21 mjacob Exp $ */
      2 /*
      3  * This driver, which is contained in NetBSD in the files:
      4  *
      5  *	sys/dev/ic/isp.c
      6  *	sys/dev/ic/isp_inline.h
      7  *	sys/dev/ic/isp_netbsd.c
      8  *	sys/dev/ic/isp_netbsd.h
      9  *	sys/dev/ic/isp_target.c
     10  *	sys/dev/ic/isp_target.h
     11  *	sys/dev/ic/isp_tpublic.h
     12  *	sys/dev/ic/ispmbox.h
     13  *	sys/dev/ic/ispreg.h
     14  *	sys/dev/ic/ispvar.h
     15  *	sys/microcode/isp/asm_sbus.h
     16  *	sys/microcode/isp/asm_1040.h
     17  *	sys/microcode/isp/asm_1080.h
     18  *	sys/microcode/isp/asm_12160.h
     19  *	sys/microcode/isp/asm_2100.h
     20  *	sys/microcode/isp/asm_2200.h
     21  *	sys/pci/isp_pci.c
     22  *	sys/sbus/isp_sbus.c
     23  *
     24  * Is being actively maintained by Matthew Jacob (mjacob (at) netbsd.org).
     25  * This driver also is shared source with FreeBSD, OpenBSD, Linux, Solaris,
     26  * Linux versions. This tends to be an interesting maintenance problem.
     27  *
     28  * Please coordinate with Matthew Jacob on changes you wish to make here.
     29  */
     30 /*
     31  * Platform (NetBSD) dependent common attachment code for Qlogic adapters.
     32  * Matthew Jacob <mjacob (at) nas.nasa.gov>
     33  */
     34 /*
     35  * Copyright (C) 1997, 1998, 1999 National Aeronautics & Space Administration
     36  * All rights reserved.
     37  *
     38  * Redistribution and use in source and binary forms, with or without
     39  * modification, are permitted provided that the following conditions
     40  * are met:
     41  * 1. Redistributions of source code must retain the above copyright
     42  *    notice, this list of conditions and the following disclaimer.
     43  * 2. Redistributions in binary form must reproduce the above copyright
     44  *    notice, this list of conditions and the following disclaimer in the
     45  *    documentation and/or other materials provided with the distribution.
     46  * 3. The name of the author may not be used to endorse or promote products
     47  *    derived from this software without specific prior written permission
     48  *
     49  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     50  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     51  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     52  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     53  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     54  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     55  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     56  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     57  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     58  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     59  */
     60 
     61 #include <dev/ic/isp_netbsd.h>
     62 #include <sys/scsiio.h>
     63 
     64 
     65 /*
     66  * Set a timeout for the watchdogging of a command.
     67  *
     68  * The dimensional analysis is
     69  *
     70  *	milliseconds * (seconds/millisecond) * (ticks/second) = ticks
     71  *
     72  *			=
     73  *
     74  *	(milliseconds / 1000) * hz = ticks
     75  *
     76  *
     77  * For timeouts less than 1 second, we'll get zero. Because of this, and
     78  * because we want to establish *our* timeout to be longer than what the
     79  * firmware might do, we just add 3 seconds at the back end.
     80  */
     81 #define	_XT(xs)	((((xs)->timeout/1000) * hz) + (3 * hz))
     82 
     83 static void isp_config_interrupts(struct device *);
     84 static void ispminphys_1020(struct buf *);
     85 static void ispminphys(struct buf *);
     86 static INLINE void ispcmd(struct ispsoftc *, XS_T *);
     87 static void isprequest(struct scsipi_channel *, scsipi_adapter_req_t, void *);
     88 static int
     89 ispioctl(struct scsipi_channel *, u_long, caddr_t, int, struct proc *);
     90 
     91 static void isp_polled_cmd(struct ispsoftc *, XS_T *);
     92 static void isp_dog(void *);
     93 static void isp_create_fc_worker(void *);
     94 static void isp_fc_worker(void *);
     95 
     96 /*
     97  * Complete attachment of hardware, include subdevices.
     98  */
     99 void
    100 isp_attach(struct ispsoftc *isp)
    101 {
    102 	isp->isp_state = ISP_RUNSTATE;
    103 
    104 	isp->isp_osinfo._adapter.adapt_dev = &isp->isp_osinfo._dev;
    105 	isp->isp_osinfo._adapter.adapt_nchannels = IS_DUALBUS(isp) ? 2 : 1;
    106 	isp->isp_osinfo._adapter.adapt_openings = isp->isp_maxcmds;
    107 	/*
    108 	 * It's not stated whether max_periph is limited by SPI
    109 	 * tag uage, but let's assume that it is.
    110 	 */
    111 	isp->isp_osinfo._adapter.adapt_max_periph = min(isp->isp_maxcmds, 255);
    112 	isp->isp_osinfo._adapter.adapt_ioctl = ispioctl;
    113 	isp->isp_osinfo._adapter.adapt_request = isprequest;
    114 	if (isp->isp_type <= ISP_HA_SCSI_1020A) {
    115 		isp->isp_osinfo._adapter.adapt_minphys = ispminphys_1020;
    116 	} else {
    117 		isp->isp_osinfo._adapter.adapt_minphys = ispminphys;
    118 	}
    119 
    120 	isp->isp_osinfo._chan.chan_adapter = &isp->isp_osinfo._adapter;
    121 	isp->isp_osinfo._chan.chan_bustype = &scsi_bustype;
    122 	isp->isp_osinfo._chan.chan_channel = 0;
    123 
    124 	/*
    125 	 * Until the midlayer is fixed to use REPORT LUNS, limit to 8 luns.
    126 	 */
    127 	isp->isp_osinfo._chan.chan_nluns = min(isp->isp_maxluns, 8);
    128 
    129 	if (IS_FC(isp)) {
    130 		isp->isp_osinfo._chan.chan_ntargets = MAX_FC_TARG;
    131 		isp->isp_osinfo._chan.chan_id = MAX_FC_TARG;
    132 		isp->isp_osinfo.threadwork = 1;
    133 		/*
    134 		 * Note that isp_create_fc_worker won't get called
    135 		 * until much much later (after proc0 is created).
    136 		 */
    137 		kthread_create(isp_create_fc_worker, isp);
    138 	} else {
    139 		int bus = 0;
    140 		sdparam *sdp = isp->isp_param;
    141 
    142 		isp->isp_osinfo._chan.chan_ntargets = MAX_TARGETS;
    143 		isp->isp_osinfo._chan.chan_id = sdp->isp_initiator_id;
    144 		isp->isp_osinfo.discovered[0] = 1 << sdp->isp_initiator_id;
    145 		if (IS_DUALBUS(isp)) {
    146 			isp->isp_osinfo._chan_b = isp->isp_osinfo._chan;
    147 			sdp++;
    148 			isp->isp_osinfo.discovered[1] =
    149 			    1 << sdp->isp_initiator_id;
    150 			isp->isp_osinfo._chan_b.chan_id = sdp->isp_initiator_id;
    151 			isp->isp_osinfo._chan_b.chan_channel = 1;
    152 		}
    153 		ISP_LOCK(isp);
    154 		(void) isp_control(isp, ISPCTL_RESET_BUS, &bus);
    155 		if (IS_DUALBUS(isp)) {
    156 			bus++;
    157 			(void) isp_control(isp, ISPCTL_RESET_BUS, &bus);
    158 		}
    159 		ISP_UNLOCK(isp);
    160 	}
    161 
    162 
    163 	/*
    164          * Defer enabling mailbox interrupts until later.
    165          */
    166         config_interrupts((struct device *) isp, isp_config_interrupts);
    167 
    168 	/*
    169 	 * And attach children (if any).
    170 	 */
    171 	config_found((void *)isp, &isp->isp_chanA, scsiprint);
    172 	if (IS_DUALBUS(isp)) {
    173 		config_found((void *)isp, &isp->isp_chanB, scsiprint);
    174 	}
    175 }
    176 
    177 
    178 static void
    179 isp_config_interrupts(struct device *self)
    180 {
    181         struct ispsoftc *isp = (struct ispsoftc *) self;
    182 
    183 	/*
    184 	 * After this point, we'll be doing the new configuration
    185 	 * schema which allows interrups, so we can do tsleep/wakeup
    186 	 * for mailbox stuff at that point.
    187 	 */
    188 	isp->isp_osinfo.no_mbox_ints = 0;
    189 }
    190 
    191 
    192 /*
    193  * minphys our xfers
    194  */
    195 
    196 static void
    197 ispminphys_1020(struct buf *bp)
    198 {
    199 	if (bp->b_bcount >= (1 << 24)) {
    200 		bp->b_bcount = (1 << 24);
    201 	}
    202 	minphys(bp);
    203 }
    204 
    205 static void
    206 ispminphys(struct buf *bp)
    207 {
    208 	if (bp->b_bcount >= (1 << 30)) {
    209 		bp->b_bcount = (1 << 30);
    210 	}
    211 	minphys(bp);
    212 }
    213 
    214 static int
    215 ispioctl(struct scsipi_channel *chan, u_long cmd, caddr_t addr, int flag,
    216 	struct proc *p)
    217 {
    218 	struct ispsoftc *isp = (void *)chan->chan_adapter->adapt_dev;
    219 	int retval = ENOTTY;
    220 
    221 	switch (cmd) {
    222 	case SCBUSIORESET:
    223 		ISP_LOCK(isp);
    224 		if (isp_control(isp, ISPCTL_RESET_BUS, &chan->chan_channel))
    225 			retval = EIO;
    226 		else
    227 			retval = 0;
    228 		ISP_UNLOCK(isp);
    229 		break;
    230 	case ISP_SDBLEV:
    231 	{
    232 		int olddblev = isp->isp_dblev;
    233 		isp->isp_dblev = *(int *)addr;
    234 		*(int *)addr = olddblev;
    235 		retval = 0;
    236 		break;
    237 	}
    238 	case ISP_RESETHBA:
    239 		ISP_LOCK(isp);
    240 		isp_reinit(isp);
    241 		ISP_UNLOCK(isp);
    242 		retval = 0;
    243 		break;
    244 	case ISP_FC_RESCAN:
    245 		if (IS_FC(isp)) {
    246 			ISP_LOCK(isp);
    247 			if (isp_fc_runstate(isp, 5 * 1000000)) {
    248 				retval = EIO;
    249 			} else {
    250 				retval = 0;
    251 			}
    252 			ISP_UNLOCK(isp);
    253 		}
    254 		break;
    255 	case ISP_FC_LIP:
    256 		if (IS_FC(isp)) {
    257 			ISP_LOCK(isp);
    258 			if (isp_control(isp, ISPCTL_SEND_LIP, 0)) {
    259 				retval = EIO;
    260 			} else {
    261 				retval = 0;
    262 			}
    263 			ISP_UNLOCK(isp);
    264 		}
    265 		break;
    266 	case ISP_FC_GETDINFO:
    267 	{
    268 		struct isp_fc_device *ifc = (struct isp_fc_device *) addr;
    269 		struct lportdb *lp;
    270 
    271 		if (ifc->loopid < 0 || ifc->loopid >= MAX_FC_TARG) {
    272 			retval = EINVAL;
    273 			break;
    274 		}
    275 		ISP_LOCK(isp);
    276 		lp = &FCPARAM(isp)->portdb[ifc->loopid];
    277 		if (lp->valid) {
    278 			ifc->loopid = lp->loopid;
    279 			ifc->portid = lp->portid;
    280 			ifc->node_wwn = lp->node_wwn;
    281 			ifc->port_wwn = lp->port_wwn;
    282 			retval = 0;
    283 		} else {
    284 			retval = ENODEV;
    285 		}
    286 		ISP_UNLOCK(isp);
    287 		break;
    288 	}
    289 	default:
    290 		break;
    291 	}
    292 	return (retval);
    293 }
    294 
    295 static INLINE void
    296 ispcmd(struct ispsoftc *isp, XS_T *xs)
    297 {
    298 	ISP_LOCK(isp);
    299 	if (isp->isp_state < ISP_RUNSTATE) {
    300 		DISABLE_INTS(isp);
    301 		isp_init(isp);
    302 		if (isp->isp_state != ISP_INITSTATE) {
    303 			ENABLE_INTS(isp);
    304 			ISP_UNLOCK(isp);
    305 			XS_SETERR(xs, HBA_BOTCH);
    306 			scsipi_done(xs);
    307 			return;
    308 		}
    309 		isp->isp_state = ISP_RUNSTATE;
    310 		ENABLE_INTS(isp);
    311 	}
    312 	/*
    313 	 * Handle the case of a FC card where the FC thread hasn't
    314 	 * fired up yet and we have loop state to clean up. If we
    315 	 * can't clear things up and we've never seen loop up, bounce
    316 	 * the command.
    317 	 */
    318 	if (IS_FC(isp) && isp->isp_osinfo.threadwork &&
    319 	    isp->isp_osinfo.thread == 0) {
    320 		volatile u_int8_t ombi = isp->isp_osinfo.no_mbox_ints;
    321 		int delay_time;
    322 
    323 		if (xs->xs_control & XS_CTL_POLL) {
    324 			isp->isp_osinfo.no_mbox_ints = 1;
    325 		}
    326 
    327 		if (isp->isp_osinfo.loop_checked == 0) {
    328 			delay_time = 10 * 1000000;
    329 			isp->isp_osinfo.loop_checked = 1;
    330 		} else {
    331 			delay_time = 250000;
    332 		}
    333 
    334 		if (isp_fc_runstate(isp, delay_time) != 0) {
    335 			if (xs->xs_control & XS_CTL_POLL) {
    336 				isp->isp_osinfo.no_mbox_ints = ombi;
    337 			}
    338 			if (FCPARAM(isp)->loop_seen_once == 0) {
    339 				XS_SETERR(xs, HBA_SELTIMEOUT);
    340 				scsipi_done(xs);
    341 				ISP_UNLOCK(isp);
    342 				return;
    343 			}
    344 			/*
    345 			 * Otherwise, fall thru to be queued up for later.
    346 			 */
    347 		} else {
    348 			int wasblocked =
    349 			    (isp->isp_osinfo.blocked || isp->isp_osinfo.paused);
    350 			isp->isp_osinfo.threadwork = 0;
    351 			isp->isp_osinfo.blocked =
    352 			    isp->isp_osinfo.paused = 0;
    353 			if (wasblocked) {
    354 				scsipi_channel_thaw(&isp->isp_chanA, 1);
    355 			}
    356 		}
    357 		if (xs->xs_control & XS_CTL_POLL) {
    358 			isp->isp_osinfo.no_mbox_ints = ombi;
    359 		}
    360 	}
    361 
    362 	if (isp->isp_osinfo.paused) {
    363 		isp_prt(isp, ISP_LOGWARN, "I/O while paused");
    364 		xs->error = XS_RESOURCE_SHORTAGE;
    365 		scsipi_done(xs);
    366 		ISP_UNLOCK(isp);
    367 		return;
    368 	}
    369 	if (isp->isp_osinfo.blocked) {
    370 		isp_prt(isp, ISP_LOGWARN, "I/O while blocked");
    371 		xs->error = XS_REQUEUE;
    372 		scsipi_done(xs);
    373 		ISP_UNLOCK(isp);
    374 		return;
    375 	}
    376 
    377 	if (xs->xs_control & XS_CTL_POLL) {
    378 		volatile u_int8_t ombi = isp->isp_osinfo.no_mbox_ints;
    379 		isp->isp_osinfo.no_mbox_ints = 1;
    380 		isp_polled_cmd(isp, xs);
    381 		isp->isp_osinfo.no_mbox_ints = ombi;
    382 		ISP_UNLOCK(isp);
    383 		return;
    384 	}
    385 
    386 	switch (isp_start(xs)) {
    387 	case CMD_QUEUED:
    388 		if (xs->timeout) {
    389 			callout_reset(&xs->xs_callout, _XT(xs), isp_dog, xs);
    390 		}
    391 		break;
    392 	case CMD_EAGAIN:
    393 		isp->isp_osinfo.paused = 1;
    394 		xs->error = XS_RESOURCE_SHORTAGE;
    395 		scsipi_channel_freeze(&isp->isp_chanA, 1);
    396 		if (IS_DUALBUS(isp)) {
    397 			scsipi_channel_freeze(&isp->isp_chanB, 1);
    398 		}
    399 		scsipi_done(xs);
    400 		break;
    401 	case CMD_RQLATER:
    402 		/*
    403 		 * We can only get RQLATER from FC devices (1 channel only)
    404 		 *
    405 		 * Also, if we've never seen loop up, bounce the command
    406 		 * (somebody has booted with no FC cable connected)
    407 		 */
    408 		if (FCPARAM(isp)->loop_seen_once == 0) {
    409 			XS_SETERR(xs, HBA_SELTIMEOUT);
    410 			scsipi_done(xs);
    411 			break;
    412 		}
    413 		if (isp->isp_osinfo.blocked == 0) {
    414 			isp->isp_osinfo.blocked = 1;
    415 			scsipi_channel_freeze(&isp->isp_chanA, 1);
    416 		}
    417 		xs->error = XS_REQUEUE;
    418 		scsipi_done(xs);
    419 		break;
    420 	case CMD_COMPLETE:
    421 		scsipi_done(xs);
    422 		break;
    423 	}
    424 	ISP_UNLOCK(isp);
    425 }
    426 
    427 static void
    428 isprequest(struct scsipi_channel *chan, scsipi_adapter_req_t req, void *arg)
    429 {
    430 	struct ispsoftc *isp = (void *)chan->chan_adapter->adapt_dev;
    431 
    432 	switch (req) {
    433 	case ADAPTER_REQ_RUN_XFER:
    434 		ispcmd(isp, (XS_T *) arg);
    435 		break;
    436 
    437 	case ADAPTER_REQ_GROW_RESOURCES:
    438 		/* Not supported. */
    439 		break;
    440 
    441 	case ADAPTER_REQ_SET_XFER_MODE:
    442 	if (IS_SCSI(isp)) {
    443 		struct scsipi_xfer_mode *xm = arg;
    444 		int dflags = 0;
    445 		sdparam *sdp = SDPARAM(isp);
    446 
    447 		sdp += chan->chan_channel;
    448 		if (xm->xm_mode & PERIPH_CAP_TQING)
    449 			dflags |= DPARM_TQING;
    450 		if (xm->xm_mode & PERIPH_CAP_WIDE16)
    451 			dflags |= DPARM_WIDE;
    452 		if (xm->xm_mode & PERIPH_CAP_SYNC)
    453 			dflags |= DPARM_SYNC;
    454 		ISP_LOCK(isp);
    455 		sdp->isp_devparam[xm->xm_target].dev_flags |= dflags;
    456 		dflags = sdp->isp_devparam[xm->xm_target].dev_flags;
    457 		sdp->isp_devparam[xm->xm_target].dev_update = 1;
    458 		isp->isp_update |= (1 << chan->chan_channel);
    459 		ISP_UNLOCK(isp);
    460 		isp_prt(isp, ISP_LOGDEBUG1,
    461 		    "ispioctl: device flags 0x%x for %d.%d.X",
    462 		    dflags, chan->chan_channel, xm->xm_target);
    463 		break;
    464 	}
    465 	default:
    466 		break;
    467 	}
    468 }
    469 
    470 static void
    471 isp_polled_cmd(struct ispsoftc *isp, XS_T *xs)
    472 {
    473 	int result;
    474 	int infinite = 0, mswait;
    475 
    476 	result = isp_start(xs);
    477 
    478 	switch (result) {
    479 	case CMD_QUEUED:
    480 		break;
    481 	case CMD_RQLATER:
    482 		if (XS_NOERR(xs)) {
    483 			xs->error = XS_REQUEUE;
    484 		}
    485 	case CMD_EAGAIN:
    486 		if (XS_NOERR(xs)) {
    487 			xs->error = XS_RESOURCE_SHORTAGE;
    488 		}
    489 		/* FALLTHROUGH */
    490 	case CMD_COMPLETE:
    491 		scsipi_done(xs);
    492 		return;
    493 
    494 	}
    495 
    496 	/*
    497 	 * If we can't use interrupts, poll on completion.
    498 	 */
    499 	if ((mswait = XS_TIME(xs)) == 0)
    500 		infinite = 1;
    501 
    502 	while (mswait || infinite) {
    503 		if (isp_intr((void *)isp)) {
    504 			if (XS_CMD_DONE_P(xs)) {
    505 				break;
    506 			}
    507 		}
    508 		USEC_DELAY(1000);
    509 		mswait -= 1;
    510 	}
    511 
    512 	/*
    513 	 * If no other error occurred but we didn't finish,
    514 	 * something bad happened.
    515 	 */
    516 	if (XS_CMD_DONE_P(xs) == 0) {
    517 		if (isp_control(isp, ISPCTL_ABORT_CMD, xs)) {
    518 			isp_reinit(isp);
    519 		}
    520 		if (XS_NOERR(xs)) {
    521 			XS_SETERR(xs, HBA_BOTCH);
    522 		}
    523 	}
    524 	scsipi_done(xs);
    525 }
    526 
    527 void
    528 isp_done(XS_T *xs)
    529 {
    530 	XS_CMD_S_DONE(xs);
    531 	if (XS_CMD_WDOG_P(xs) == 0) {
    532 		struct ispsoftc *isp = XS_ISP(xs);
    533 		callout_stop(&xs->xs_callout);
    534 		if (XS_CMD_GRACE_P(xs)) {
    535 			isp_prt(isp, ISP_LOGDEBUG1,
    536 			    "finished command on borrowed time");
    537 		}
    538 		XS_CMD_S_CLEAR(xs);
    539 		/*
    540 		 * Fixup- if we get a QFULL, we need
    541 		 * to set XS_BUSY as the error.
    542 		 */
    543 		if (xs->status == SCSI_QUEUE_FULL) {
    544 			xs->error = XS_BUSY;
    545 		}
    546 		if (isp->isp_osinfo.paused) {
    547 			isp->isp_osinfo.paused = 0;
    548 			scsipi_channel_timed_thaw(&isp->isp_chanA);
    549 			if (IS_DUALBUS(isp)) {
    550 				scsipi_channel_timed_thaw(&isp->isp_chanB);
    551 			}
    552 		}
    553 		scsipi_done(xs);
    554 	}
    555 }
    556 
    557 static void
    558 isp_dog(void *arg)
    559 {
    560 	XS_T *xs = arg;
    561 	struct ispsoftc *isp = XS_ISP(xs);
    562 	u_int16_t handle;
    563 
    564 	ISP_ILOCK(isp);
    565 	/*
    566 	 * We've decided this command is dead. Make sure we're not trying
    567 	 * to kill a command that's already dead by getting it's handle and
    568 	 * and seeing whether it's still alive.
    569 	 */
    570 	handle = isp_find_handle(isp, xs);
    571 	if (handle) {
    572 		u_int16_t r, r1, i;
    573 
    574 		if (XS_CMD_DONE_P(xs)) {
    575 			isp_prt(isp, ISP_LOGDEBUG1,
    576 			    "watchdog found done cmd (handle 0x%x)", handle);
    577 			ISP_IUNLOCK(isp);
    578 			return;
    579 		}
    580 
    581 		if (XS_CMD_WDOG_P(xs)) {
    582 			isp_prt(isp, ISP_LOGDEBUG1,
    583 			    "recursive watchdog (handle 0x%x)", handle);
    584 			ISP_IUNLOCK(isp);
    585 			return;
    586 		}
    587 
    588 		XS_CMD_S_WDOG(xs);
    589 
    590 		i = 0;
    591 		do {
    592 			r = ISP_READ(isp, BIU_ISR);
    593 			USEC_DELAY(1);
    594 			r1 = ISP_READ(isp, BIU_ISR);
    595 		} while (r != r1 && ++i < 1000);
    596 
    597 		if (INT_PENDING(isp, r) && isp_intr(isp) && XS_CMD_DONE_P(xs)) {
    598 			isp_prt(isp, ISP_LOGDEBUG1, "watchdog cleanup (%x, %x)",
    599 			    handle, r);
    600 			XS_CMD_C_WDOG(xs);
    601 			isp_done(xs);
    602 		} else if (XS_CMD_GRACE_P(xs)) {
    603 			isp_prt(isp, ISP_LOGDEBUG1, "watchdog timeout (%x, %x)",
    604 			    handle, r);
    605 			/*
    606 			 * Make sure the command is *really* dead before we
    607 			 * release the handle (and DMA resources) for reuse.
    608 			 */
    609 			(void) isp_control(isp, ISPCTL_ABORT_CMD, arg);
    610 
    611 			/*
    612 			 * After this point, the comamnd is really dead.
    613 			 */
    614 			if (XS_XFRLEN(xs)) {
    615 				ISP_DMAFREE(isp, xs, handle);
    616 			}
    617 			isp_destroy_handle(isp, handle);
    618 			XS_SETERR(xs, XS_TIMEOUT);
    619 			XS_CMD_S_CLEAR(xs);
    620 			isp_done(xs);
    621 		} else {
    622 			u_int16_t iptr, optr;
    623 			ispreq_t *mp;
    624 			isp_prt(isp, ISP_LOGDEBUG2,
    625 			    "possible command timeout (%x, %x)", handle, r);
    626 			XS_CMD_C_WDOG(xs);
    627 			callout_reset(&xs->xs_callout, hz, isp_dog, xs);
    628 			if (isp_getrqentry(isp, &iptr, &optr, (void **) &mp)) {
    629 				ISP_UNLOCK(isp);
    630 				return;
    631 			}
    632 			XS_CMD_S_GRACE(xs);
    633 			MEMZERO((void *) mp, sizeof (*mp));
    634 			mp->req_header.rqs_entry_count = 1;
    635 			mp->req_header.rqs_entry_type = RQSTYPE_MARKER;
    636 			mp->req_modifier = SYNC_ALL;
    637 			mp->req_target = XS_CHANNEL(xs) << 7;
    638 			ISP_SWIZZLE_REQUEST(isp, mp);
    639 			ISP_ADD_REQUEST(isp, iptr);
    640 		}
    641 	} else {
    642 		isp_prt(isp, ISP_LOGDEBUG0, "watchdog with no command");
    643 	}
    644 	ISP_IUNLOCK(isp);
    645 }
    646 
    647 /*
    648  * Fibre Channel state cleanup thread
    649  */
    650 static void
    651 isp_create_fc_worker(void *arg)
    652 {
    653 	struct ispsoftc *isp = arg;
    654 
    655 	if (kthread_create1(isp_fc_worker, isp, &isp->isp_osinfo.thread,
    656 	    "%s:fc_thrd", isp->isp_name)) {
    657 		isp_prt(isp, ISP_LOGERR, "unable to create FC worker thread");
    658 		panic("isp_create_fc_worker");
    659 	}
    660 
    661 }
    662 
    663 static void
    664 isp_fc_worker(void *arg)
    665 {
    666 	void scsipi_run_queue(struct scsipi_channel *);
    667 	struct ispsoftc *isp = arg;
    668 
    669 	for (;;) {
    670 		int s;
    671 
    672 		/*
    673 		 * Note we do *not* use the ISP_LOCK/ISP_UNLOCK macros here.
    674 		 */
    675 		s = splbio();
    676 		while (isp->isp_osinfo.threadwork) {
    677 			isp->isp_osinfo.threadwork = 0;
    678 			if (isp_fc_runstate(isp, 10 * 1000000) == 0) {
    679 				break;
    680 			}
    681 			isp->isp_osinfo.threadwork = 1;
    682 			splx(s);
    683 			delay(500 * 1000);
    684 			s = splbio();
    685 		}
    686 		if (FCPARAM(isp)->isp_fwstate != FW_READY ||
    687 		    FCPARAM(isp)->isp_loopstate != LOOP_READY) {
    688 			isp_prt(isp, ISP_LOGINFO, "isp_fc_runstate in vain");
    689 			isp->isp_osinfo.threadwork = 1;
    690 			splx(s);
    691 			continue;
    692 		}
    693 
    694 		if (isp->isp_osinfo.blocked) {
    695 			isp->isp_osinfo.blocked = 0;
    696 			isp_prt(isp, /* ISP_LOGDEBUG0 */ ISP_LOGALL, "restarting queues (freeze count %d)", isp->isp_chanA.chan_qfreeze);
    697 
    698 			scsipi_channel_thaw(&isp->isp_chanA, 1);
    699 		}
    700 
    701 		if (isp->isp_osinfo.thread == NULL)
    702 			break;
    703 
    704 		(void) tsleep(&isp->isp_osinfo.thread, PRIBIO, "fcclnup", 0);
    705 
    706 		splx(s);
    707 	}
    708 
    709 	/* In case parent is waiting for us to exit. */
    710 	wakeup(&isp->isp_osinfo.thread);
    711 
    712 	kthread_exit(0);
    713 }
    714 
    715 /*
    716  * Free any associated resources prior to decommissioning and
    717  * set the card to a known state (so it doesn't wake up and kick
    718  * us when we aren't expecting it to).
    719  *
    720  * Locks are held before coming here.
    721  */
    722 void
    723 isp_uninit(struct ispsoftc *isp)
    724 {
    725 	isp_lock(isp);
    726 	/*
    727 	 * Leave with interrupts disabled.
    728 	 */
    729 	DISABLE_INTS(isp);
    730 	isp_unlock(isp);
    731 }
    732 
    733 int
    734 isp_async(struct ispsoftc *isp, ispasync_t cmd, void *arg)
    735 {
    736 	int bus, tgt;
    737 
    738 	switch (cmd) {
    739 	case ISPASYNC_NEW_TGT_PARAMS:
    740 	if (IS_SCSI(isp) && isp->isp_dblev) {
    741 		sdparam *sdp = isp->isp_param;
    742 		int flags;
    743 		struct scsipi_xfer_mode xm;
    744 
    745 		tgt = *((int *) arg);
    746 		bus = (tgt >> 16) & 0xffff;
    747 		tgt &= 0xffff;
    748 		sdp += bus;
    749 		flags = sdp->isp_devparam[tgt].cur_dflags;
    750 
    751 		xm.xm_mode = 0;
    752 		xm.xm_period = sdp->isp_devparam[tgt].cur_period;
    753 		xm.xm_offset = sdp->isp_devparam[tgt].cur_offset;
    754 		xm.xm_target = tgt;
    755 
    756 		if ((flags & DPARM_SYNC) && xm.xm_period && xm.xm_offset)
    757 			xm.xm_mode |= PERIPH_CAP_SYNC;
    758 		if (flags & DPARM_WIDE)
    759 			xm.xm_mode |= PERIPH_CAP_WIDE16;
    760 		if (flags & DPARM_TQING)
    761 			xm.xm_mode |= PERIPH_CAP_TQING;
    762 		scsipi_async_event(bus? &isp->isp_chanB : &isp->isp_chanA,
    763 		    ASYNC_EVENT_XFER_MODE, &xm);
    764 		break;
    765 	}
    766 	case ISPASYNC_BUS_RESET:
    767 		bus = *((int *) arg);
    768 		scsipi_async_event(bus? &isp->isp_chanB : &isp->isp_chanA,
    769 		    ASYNC_EVENT_RESET, NULL);
    770 		isp_prt(isp, ISP_LOGINFO, "SCSI bus %d reset detected", bus);
    771 		break;
    772 	case ISPASYNC_LIP:
    773 		/*
    774 		 * Don't do queue freezes or blockage until we have the
    775 		 * thread running that can unfreeze/unblock us.
    776 		 */
    777 		if (isp->isp_osinfo.blocked == 0)  {
    778 			if (isp->isp_osinfo.thread) {
    779 				isp->isp_osinfo.blocked = 1;
    780 				scsipi_channel_freeze(&isp->isp_chanA, 1);
    781 			}
    782 		}
    783 		isp_prt(isp, ISP_LOGINFO, "LIP Received");
    784 		break;
    785 	case ISPASYNC_LOOP_RESET:
    786 		/*
    787 		 * Don't do queue freezes or blockage until we have the
    788 		 * thread running that can unfreeze/unblock us.
    789 		 */
    790 		if (isp->isp_osinfo.blocked == 0) {
    791 			if (isp->isp_osinfo.thread) {
    792 				isp->isp_osinfo.blocked = 1;
    793 				scsipi_channel_freeze(&isp->isp_chanA, 1);
    794 			}
    795 		}
    796 		isp_prt(isp, ISP_LOGINFO, "Loop Reset Received");
    797 		break;
    798 	case ISPASYNC_LOOP_DOWN:
    799 		/*
    800 		 * Don't do queue freezes or blockage until we have the
    801 		 * thread running that can unfreeze/unblock us.
    802 		 */
    803 		if (isp->isp_osinfo.blocked == 0) {
    804 			if (isp->isp_osinfo.thread) {
    805 				isp->isp_osinfo.blocked = 1;
    806 				scsipi_channel_freeze(&isp->isp_chanA, 1);
    807 			}
    808 		}
    809 		isp_prt(isp, ISP_LOGINFO, "Loop DOWN");
    810 		break;
    811         case ISPASYNC_LOOP_UP:
    812 		/*
    813 		 * Let the subsequent ISPASYNC_CHANGE_NOTIFY invoke
    814 		 * the FC worker thread. When the FC worker thread
    815 		 * is done, let *it* call scsipi_channel_thaw...
    816 		 */
    817 		isp_prt(isp, ISP_LOGINFO, "Loop UP");
    818 		break;
    819 	case ISPASYNC_PROMENADE:
    820 	if (IS_FC(isp) && isp->isp_dblev) {
    821 		const char fmt[] = "Target %d (Loop 0x%x) Port ID 0x%x "
    822 		    "(role %s) %s\n Port WWN 0x%08x%08x\n Node WWN 0x%08x%08x";
    823 		const static char *roles[4] = {
    824 		    "None", "Target", "Initiator", "Target/Initiator"
    825 		};
    826 		fcparam *fcp = isp->isp_param;
    827 		int tgt = *((int *) arg);
    828 		struct lportdb *lp = &fcp->portdb[tgt];
    829 
    830 		isp_prt(isp, ISP_LOGINFO, fmt, tgt, lp->loopid, lp->portid,
    831 		    roles[lp->roles & 0x3],
    832 		    (lp->valid)? "Arrived" : "Departed",
    833 		    (u_int32_t) (lp->port_wwn >> 32),
    834 		    (u_int32_t) (lp->port_wwn & 0xffffffffLL),
    835 		    (u_int32_t) (lp->node_wwn >> 32),
    836 		    (u_int32_t) (lp->node_wwn & 0xffffffffLL));
    837 		break;
    838 	}
    839 	case ISPASYNC_CHANGE_NOTIFY:
    840 		if (arg == ISPASYNC_CHANGE_PDB) {
    841 			isp_prt(isp, ISP_LOGINFO, "Port Database Changed");
    842 		} else if (arg == ISPASYNC_CHANGE_SNS) {
    843 			isp_prt(isp, ISP_LOGINFO,
    844 			    "Name Server Database Changed");
    845 		}
    846 
    847 		/*
    848 		 * We can set blocked here because we know it's now okay
    849 		 * to try and run isp_fc_runstate (in order to build loop
    850 		 * state). But we don't try and freeze the midlayer's queue
    851 		 * if we have no thread that we can wake to later unfreeze
    852 		 * it.
    853 		 */
    854 		if (isp->isp_osinfo.blocked == 0) {
    855 			isp->isp_osinfo.blocked = 1;
    856 			if (isp->isp_osinfo.thread) {
    857 				scsipi_channel_freeze(&isp->isp_chanA, 1);
    858 			}
    859 		}
    860 		/*
    861 		 * Note that we have work for the thread to do, and
    862 		 * if the thread is here already, wake it up.
    863 		 */
    864 		isp->isp_osinfo.threadwork++;
    865 		if (isp->isp_osinfo.thread) {
    866 			wakeup(&isp->isp_osinfo.thread);
    867 		} else {
    868 			isp_prt(isp, ISP_LOGDEBUG1, "no FC thread yet");
    869 		}
    870 		break;
    871 	case ISPASYNC_FABRIC_DEV:
    872 	{
    873 		int target, lrange;
    874 		struct lportdb *lp = NULL;
    875 		char *pt;
    876 		sns_ganrsp_t *resp = (sns_ganrsp_t *) arg;
    877 		u_int32_t portid;
    878 		u_int64_t wwpn, wwnn;
    879 		fcparam *fcp = isp->isp_param;
    880 
    881 		portid =
    882 		    (((u_int32_t) resp->snscb_port_id[0]) << 16) |
    883 		    (((u_int32_t) resp->snscb_port_id[1]) << 8) |
    884 		    (((u_int32_t) resp->snscb_port_id[2]));
    885 
    886 		wwpn =
    887 		    (((u_int64_t)resp->snscb_portname[0]) << 56) |
    888 		    (((u_int64_t)resp->snscb_portname[1]) << 48) |
    889 		    (((u_int64_t)resp->snscb_portname[2]) << 40) |
    890 		    (((u_int64_t)resp->snscb_portname[3]) << 32) |
    891 		    (((u_int64_t)resp->snscb_portname[4]) << 24) |
    892 		    (((u_int64_t)resp->snscb_portname[5]) << 16) |
    893 		    (((u_int64_t)resp->snscb_portname[6]) <<  8) |
    894 		    (((u_int64_t)resp->snscb_portname[7]));
    895 
    896 		wwnn =
    897 		    (((u_int64_t)resp->snscb_nodename[0]) << 56) |
    898 		    (((u_int64_t)resp->snscb_nodename[1]) << 48) |
    899 		    (((u_int64_t)resp->snscb_nodename[2]) << 40) |
    900 		    (((u_int64_t)resp->snscb_nodename[3]) << 32) |
    901 		    (((u_int64_t)resp->snscb_nodename[4]) << 24) |
    902 		    (((u_int64_t)resp->snscb_nodename[5]) << 16) |
    903 		    (((u_int64_t)resp->snscb_nodename[6]) <<  8) |
    904 		    (((u_int64_t)resp->snscb_nodename[7]));
    905 		if (portid == 0 || wwpn == 0) {
    906 			break;
    907 		}
    908 
    909 		switch (resp->snscb_port_type) {
    910 		case 1:
    911 			pt = "   N_Port";
    912 			break;
    913 		case 2:
    914 			pt = "  NL_Port";
    915 			break;
    916 		case 3:
    917 			pt = "F/NL_Port";
    918 			break;
    919 		case 0x7f:
    920 			pt = "  Nx_Port";
    921 			break;
    922 		case 0x81:
    923 			pt = "  F_port";
    924 			break;
    925 		case 0x82:
    926 			pt = "  FL_Port";
    927 			break;
    928 		case 0x84:
    929 			pt = "   E_port";
    930 			break;
    931 		default:
    932 			pt = "?";
    933 			break;
    934 		}
    935 		isp_prt(isp, ISP_LOGINFO,
    936 		    "%s @ 0x%x, Node 0x%08x%08x Port %08x%08x",
    937 		    pt, portid, ((u_int32_t) (wwnn >> 32)), ((u_int32_t) wwnn),
    938 		    ((u_int32_t) (wwpn >> 32)), ((u_int32_t) wwpn));
    939 		/*
    940 		 * We're only interested in SCSI_FCP types (for now)
    941 		 */
    942 		if ((resp->snscb_fc4_types[2] & 1) == 0) {
    943 			break;
    944 		}
    945 		if (fcp->isp_topo != TOPO_F_PORT)
    946 			lrange = FC_SNS_ID+1;
    947 		else
    948 			lrange = 0;
    949 		/*
    950 		 * Is it already in our list?
    951 		 */
    952 		for (target = lrange; target < MAX_FC_TARG; target++) {
    953 			if (target >= FL_PORT_ID && target <= FC_SNS_ID) {
    954 				continue;
    955 			}
    956 			lp = &fcp->portdb[target];
    957 			if (lp->port_wwn == wwpn && lp->node_wwn == wwnn) {
    958 				lp->fabric_dev = 1;
    959 				break;
    960 			}
    961 		}
    962 		if (target < MAX_FC_TARG) {
    963 			break;
    964 		}
    965 		for (target = lrange; target < MAX_FC_TARG; target++) {
    966 			if (target >= FL_PORT_ID && target <= FC_SNS_ID) {
    967 				continue;
    968 			}
    969 			lp = &fcp->portdb[target];
    970 			if (lp->port_wwn == 0) {
    971 				break;
    972 			}
    973 		}
    974 		if (target == MAX_FC_TARG) {
    975 			isp_prt(isp, ISP_LOGWARN,
    976 			    "no more space for fabric devices");
    977 			break;
    978 		}
    979 		lp->node_wwn = wwnn;
    980 		lp->port_wwn = wwpn;
    981 		lp->portid = portid;
    982 		lp->fabric_dev = 1;
    983 		break;
    984 	}
    985 	case ISPASYNC_FW_CRASH:
    986 	{
    987 		u_int16_t mbox1, mbox6;
    988 		mbox1 = ISP_READ(isp, OUTMAILBOX1);
    989 		if (IS_DUALBUS(isp)) {
    990 			mbox6 = ISP_READ(isp, OUTMAILBOX6);
    991 		} else {
    992 			mbox6 = 0;
    993 		}
    994                 isp_prt(isp, ISP_LOGERR,
    995                     "Internal Firmware on bus %d Error @ RISC Address 0x%x",
    996                     mbox6, mbox1);
    997 		isp_reinit(isp);
    998 		break;
    999 	}
   1000 	default:
   1001 		break;
   1002 	}
   1003 	return (0);
   1004 }
   1005 
   1006 #include <machine/stdarg.h>
   1007 void
   1008 isp_prt(struct ispsoftc *isp, int level, const char *fmt, ...)
   1009 {
   1010 	va_list ap;
   1011 	if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) {
   1012 		return;
   1013 	}
   1014 	printf("%s: ", isp->isp_name);
   1015 	va_start(ap, fmt);
   1016 	vprintf(fmt, ap);
   1017 	va_end(ap);
   1018 	printf("\n");
   1019 }
   1020