Home | History | Annotate | Line # | Download | only in ic
isp_netbsd.c revision 1.51
      1 /* $NetBSD: isp_netbsd.c,v 1.51 2001/12/14 00:13:45 mjacob Exp $ */
      2 /*
      3  * This driver, which is contained in NetBSD in the files:
      4  *
      5  *	sys/dev/ic/isp.c
      6  *	sys/dev/ic/isp_inline.h
      7  *	sys/dev/ic/isp_netbsd.c
      8  *	sys/dev/ic/isp_netbsd.h
      9  *	sys/dev/ic/isp_target.c
     10  *	sys/dev/ic/isp_target.h
     11  *	sys/dev/ic/isp_tpublic.h
     12  *	sys/dev/ic/ispmbox.h
     13  *	sys/dev/ic/ispreg.h
     14  *	sys/dev/ic/ispvar.h
     15  *	sys/microcode/isp/asm_sbus.h
     16  *	sys/microcode/isp/asm_1040.h
     17  *	sys/microcode/isp/asm_1080.h
     18  *	sys/microcode/isp/asm_12160.h
     19  *	sys/microcode/isp/asm_2100.h
     20  *	sys/microcode/isp/asm_2200.h
     21  *	sys/pci/isp_pci.c
     22  *	sys/sbus/isp_sbus.c
     23  *
     24  * Is being actively maintained by Matthew Jacob (mjacob (at) netbsd.org).
     25  * This driver also is shared source with FreeBSD, OpenBSD, Linux, Solaris,
     26  * Linux versions. This tends to be an interesting maintenance problem.
     27  *
     28  * Please coordinate with Matthew Jacob on changes you wish to make here.
     29  */
     30 /*
     31  * Platform (NetBSD) dependent common attachment code for Qlogic adapters.
     32  * Matthew Jacob <mjacob (at) nas.nasa.gov>
     33  */
     34 /*
     35  * Copyright (C) 1997, 1998, 1999 National Aeronautics & Space Administration
     36  * All rights reserved.
     37  *
     38  * Redistribution and use in source and binary forms, with or without
     39  * modification, are permitted provided that the following conditions
     40  * are met:
     41  * 1. Redistributions of source code must retain the above copyright
     42  *    notice, this list of conditions and the following disclaimer.
     43  * 2. Redistributions in binary form must reproduce the above copyright
     44  *    notice, this list of conditions and the following disclaimer in the
     45  *    documentation and/or other materials provided with the distribution.
     46  * 3. The name of the author may not be used to endorse or promote products
     47  *    derived from this software without specific prior written permission
     48  *
     49  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     50  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     51  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     52  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     53  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     54  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     55  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     56  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     57  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     58  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     59  */
     60 
     61 #include <sys/cdefs.h>
     62 __KERNEL_RCSID(0, "$NetBSD: isp_netbsd.c,v 1.51 2001/12/14 00:13:45 mjacob Exp $");
     63 
     64 #include <dev/ic/isp_netbsd.h>
     65 #include <sys/scsiio.h>
     66 
     67 
     68 /*
     69  * Set a timeout for the watchdogging of a command.
     70  *
     71  * The dimensional analysis is
     72  *
     73  *	milliseconds * (seconds/millisecond) * (ticks/second) = ticks
     74  *
     75  *			=
     76  *
     77  *	(milliseconds / 1000) * hz = ticks
     78  *
     79  *
     80  * For timeouts less than 1 second, we'll get zero. Because of this, and
     81  * because we want to establish *our* timeout to be longer than what the
     82  * firmware might do, we just add 3 seconds at the back end.
     83  */
     84 #define	_XT(xs)	((((xs)->timeout/1000) * hz) + (3 * hz))
     85 
     86 static void isp_config_interrupts(struct device *);
     87 static void ispminphys_1020(struct buf *);
     88 static void ispminphys(struct buf *);
     89 static INLINE void ispcmd(struct ispsoftc *, XS_T *);
     90 static void isprequest(struct scsipi_channel *, scsipi_adapter_req_t, void *);
     91 static int
     92 ispioctl(struct scsipi_channel *, u_long, caddr_t, int, struct proc *);
     93 
     94 static void isp_polled_cmd(struct ispsoftc *, XS_T *);
     95 static void isp_dog(void *);
     96 static void isp_create_fc_worker(void *);
     97 static void isp_fc_worker(void *);
     98 
     99 /*
    100  * Complete attachment of hardware, include subdevices.
    101  */
    102 void
    103 isp_attach(struct ispsoftc *isp)
    104 {
    105 	isp->isp_state = ISP_RUNSTATE;
    106 
    107 	isp->isp_osinfo._adapter.adapt_dev = &isp->isp_osinfo._dev;
    108 	isp->isp_osinfo._adapter.adapt_nchannels = IS_DUALBUS(isp) ? 2 : 1;
    109 	isp->isp_osinfo._adapter.adapt_openings = isp->isp_maxcmds;
    110 	/*
    111 	 * It's not stated whether max_periph is limited by SPI
    112 	 * tag uage, but let's assume that it is.
    113 	 */
    114 	isp->isp_osinfo._adapter.adapt_max_periph = min(isp->isp_maxcmds, 255);
    115 	isp->isp_osinfo._adapter.adapt_ioctl = ispioctl;
    116 	isp->isp_osinfo._adapter.adapt_request = isprequest;
    117 	if (isp->isp_type <= ISP_HA_SCSI_1020A) {
    118 		isp->isp_osinfo._adapter.adapt_minphys = ispminphys_1020;
    119 	} else {
    120 		isp->isp_osinfo._adapter.adapt_minphys = ispminphys;
    121 	}
    122 
    123 	isp->isp_osinfo._chan.chan_adapter = &isp->isp_osinfo._adapter;
    124 	isp->isp_osinfo._chan.chan_bustype = &scsi_bustype;
    125 	isp->isp_osinfo._chan.chan_channel = 0;
    126 
    127 	/*
    128 	 * Until the midlayer is fixed to use REPORT LUNS, limit to 8 luns.
    129 	 */
    130 	isp->isp_osinfo._chan.chan_nluns = min(isp->isp_maxluns, 8);
    131 
    132 	if (IS_FC(isp)) {
    133 		isp->isp_osinfo._chan.chan_ntargets = MAX_FC_TARG;
    134 		isp->isp_osinfo._chan.chan_id = MAX_FC_TARG;
    135 		isp->isp_osinfo.threadwork = 1;
    136 		/*
    137 		 * Note that isp_create_fc_worker won't get called
    138 		 * until much much later (after proc0 is created).
    139 		 */
    140 		kthread_create(isp_create_fc_worker, isp);
    141 	} else {
    142 		int bus = 0;
    143 		sdparam *sdp = isp->isp_param;
    144 
    145 		isp->isp_osinfo._chan.chan_ntargets = MAX_TARGETS;
    146 		isp->isp_osinfo._chan.chan_id = sdp->isp_initiator_id;
    147 		isp->isp_osinfo.discovered[0] = 1 << sdp->isp_initiator_id;
    148 		if (IS_DUALBUS(isp)) {
    149 			isp->isp_osinfo._chan_b = isp->isp_osinfo._chan;
    150 			sdp++;
    151 			isp->isp_osinfo.discovered[1] =
    152 			    1 << sdp->isp_initiator_id;
    153 			isp->isp_osinfo._chan_b.chan_id = sdp->isp_initiator_id;
    154 			isp->isp_osinfo._chan_b.chan_channel = 1;
    155 		}
    156 		ISP_LOCK(isp);
    157 		(void) isp_control(isp, ISPCTL_RESET_BUS, &bus);
    158 		if (IS_DUALBUS(isp)) {
    159 			bus++;
    160 			(void) isp_control(isp, ISPCTL_RESET_BUS, &bus);
    161 		}
    162 		ISP_UNLOCK(isp);
    163 	}
    164 
    165 
    166 	/*
    167          * Defer enabling mailbox interrupts until later.
    168          */
    169         config_interrupts((struct device *) isp, isp_config_interrupts);
    170 
    171 	/*
    172 	 * And attach children (if any).
    173 	 */
    174 	config_found((void *)isp, &isp->isp_chanA, scsiprint);
    175 	if (IS_DUALBUS(isp)) {
    176 		config_found((void *)isp, &isp->isp_chanB, scsiprint);
    177 	}
    178 }
    179 
    180 
    181 static void
    182 isp_config_interrupts(struct device *self)
    183 {
    184         struct ispsoftc *isp = (struct ispsoftc *) self;
    185 
    186 	/*
    187 	 * After this point, we'll be doing the new configuration
    188 	 * schema which allows interrups, so we can do tsleep/wakeup
    189 	 * for mailbox stuff at that point.
    190 	 */
    191 	isp->isp_osinfo.no_mbox_ints = 0;
    192 }
    193 
    194 
    195 /*
    196  * minphys our xfers
    197  */
    198 
    199 static void
    200 ispminphys_1020(struct buf *bp)
    201 {
    202 	if (bp->b_bcount >= (1 << 24)) {
    203 		bp->b_bcount = (1 << 24);
    204 	}
    205 	minphys(bp);
    206 }
    207 
    208 static void
    209 ispminphys(struct buf *bp)
    210 {
    211 	if (bp->b_bcount >= (1 << 30)) {
    212 		bp->b_bcount = (1 << 30);
    213 	}
    214 	minphys(bp);
    215 }
    216 
    217 static int
    218 ispioctl(struct scsipi_channel *chan, u_long cmd, caddr_t addr, int flag,
    219 	struct proc *p)
    220 {
    221 	struct ispsoftc *isp = (void *)chan->chan_adapter->adapt_dev;
    222 	int retval = ENOTTY;
    223 
    224 	switch (cmd) {
    225 	case SCBUSIORESET:
    226 		ISP_LOCK(isp);
    227 		if (isp_control(isp, ISPCTL_RESET_BUS, &chan->chan_channel))
    228 			retval = EIO;
    229 		else
    230 			retval = 0;
    231 		ISP_UNLOCK(isp);
    232 		break;
    233 	case ISP_SDBLEV:
    234 	{
    235 		int olddblev = isp->isp_dblev;
    236 		isp->isp_dblev = *(int *)addr;
    237 		*(int *)addr = olddblev;
    238 		retval = 0;
    239 		break;
    240 	}
    241 	case ISP_RESETHBA:
    242 		ISP_LOCK(isp);
    243 		isp_reinit(isp);
    244 		ISP_UNLOCK(isp);
    245 		retval = 0;
    246 		break;
    247 	case ISP_FC_RESCAN:
    248 		if (IS_FC(isp)) {
    249 			ISP_LOCK(isp);
    250 			if (isp_fc_runstate(isp, 5 * 1000000)) {
    251 				retval = EIO;
    252 			} else {
    253 				retval = 0;
    254 			}
    255 			ISP_UNLOCK(isp);
    256 		}
    257 		break;
    258 	case ISP_FC_LIP:
    259 		if (IS_FC(isp)) {
    260 			ISP_LOCK(isp);
    261 			if (isp_control(isp, ISPCTL_SEND_LIP, 0)) {
    262 				retval = EIO;
    263 			} else {
    264 				retval = 0;
    265 			}
    266 			ISP_UNLOCK(isp);
    267 		}
    268 		break;
    269 	case ISP_FC_GETDINFO:
    270 	{
    271 		struct isp_fc_device *ifc = (struct isp_fc_device *) addr;
    272 		struct lportdb *lp;
    273 
    274 		if (ifc->loopid < 0 || ifc->loopid >= MAX_FC_TARG) {
    275 			retval = EINVAL;
    276 			break;
    277 		}
    278 		ISP_LOCK(isp);
    279 		lp = &FCPARAM(isp)->portdb[ifc->loopid];
    280 		if (lp->valid) {
    281 			ifc->loopid = lp->loopid;
    282 			ifc->portid = lp->portid;
    283 			ifc->node_wwn = lp->node_wwn;
    284 			ifc->port_wwn = lp->port_wwn;
    285 			retval = 0;
    286 		} else {
    287 			retval = ENODEV;
    288 		}
    289 		ISP_UNLOCK(isp);
    290 		break;
    291 	}
    292 	default:
    293 		break;
    294 	}
    295 	return (retval);
    296 }
    297 
    298 static INLINE void
    299 ispcmd(struct ispsoftc *isp, XS_T *xs)
    300 {
    301 	ISP_LOCK(isp);
    302 	if (isp->isp_state < ISP_RUNSTATE) {
    303 		DISABLE_INTS(isp);
    304 		isp_init(isp);
    305 		if (isp->isp_state != ISP_INITSTATE) {
    306 			ENABLE_INTS(isp);
    307 			ISP_UNLOCK(isp);
    308 			XS_SETERR(xs, HBA_BOTCH);
    309 			scsipi_done(xs);
    310 			return;
    311 		}
    312 		isp->isp_state = ISP_RUNSTATE;
    313 		ENABLE_INTS(isp);
    314 	}
    315 	/*
    316 	 * Handle the case of a FC card where the FC thread hasn't
    317 	 * fired up yet and we have loop state to clean up. If we
    318 	 * can't clear things up and we've never seen loop up, bounce
    319 	 * the command.
    320 	 */
    321 	if (IS_FC(isp) && isp->isp_osinfo.threadwork &&
    322 	    isp->isp_osinfo.thread == 0) {
    323 		volatile u_int8_t ombi = isp->isp_osinfo.no_mbox_ints;
    324 		int delay_time;
    325 
    326 		if (xs->xs_control & XS_CTL_POLL) {
    327 			isp->isp_osinfo.no_mbox_ints = 1;
    328 		}
    329 
    330 		if (isp->isp_osinfo.loop_checked == 0) {
    331 			delay_time = 10 * 1000000;
    332 			isp->isp_osinfo.loop_checked = 1;
    333 		} else {
    334 			delay_time = 250000;
    335 		}
    336 
    337 		if (isp_fc_runstate(isp, delay_time) != 0) {
    338 			if (xs->xs_control & XS_CTL_POLL) {
    339 				isp->isp_osinfo.no_mbox_ints = ombi;
    340 			}
    341 			if (FCPARAM(isp)->loop_seen_once == 0) {
    342 				XS_SETERR(xs, HBA_SELTIMEOUT);
    343 				scsipi_done(xs);
    344 				ISP_UNLOCK(isp);
    345 				return;
    346 			}
    347 			/*
    348 			 * Otherwise, fall thru to be queued up for later.
    349 			 */
    350 		} else {
    351 			int wasblocked =
    352 			    (isp->isp_osinfo.blocked || isp->isp_osinfo.paused);
    353 			isp->isp_osinfo.threadwork = 0;
    354 			isp->isp_osinfo.blocked =
    355 			    isp->isp_osinfo.paused = 0;
    356 			if (wasblocked) {
    357 				scsipi_channel_thaw(&isp->isp_chanA, 1);
    358 			}
    359 		}
    360 		if (xs->xs_control & XS_CTL_POLL) {
    361 			isp->isp_osinfo.no_mbox_ints = ombi;
    362 		}
    363 	}
    364 
    365 	if (isp->isp_osinfo.paused) {
    366 		isp_prt(isp, ISP_LOGWARN, "I/O while paused");
    367 		xs->error = XS_RESOURCE_SHORTAGE;
    368 		scsipi_done(xs);
    369 		ISP_UNLOCK(isp);
    370 		return;
    371 	}
    372 	if (isp->isp_osinfo.blocked) {
    373 		isp_prt(isp, ISP_LOGWARN, "I/O while blocked");
    374 		xs->error = XS_REQUEUE;
    375 		scsipi_done(xs);
    376 		ISP_UNLOCK(isp);
    377 		return;
    378 	}
    379 
    380 	if (xs->xs_control & XS_CTL_POLL) {
    381 		volatile u_int8_t ombi = isp->isp_osinfo.no_mbox_ints;
    382 		isp->isp_osinfo.no_mbox_ints = 1;
    383 		isp_polled_cmd(isp, xs);
    384 		isp->isp_osinfo.no_mbox_ints = ombi;
    385 		ISP_UNLOCK(isp);
    386 		return;
    387 	}
    388 
    389 	switch (isp_start(xs)) {
    390 	case CMD_QUEUED:
    391 		if (xs->timeout) {
    392 			callout_reset(&xs->xs_callout, _XT(xs), isp_dog, xs);
    393 		}
    394 		break;
    395 	case CMD_EAGAIN:
    396 		isp->isp_osinfo.paused = 1;
    397 		xs->error = XS_RESOURCE_SHORTAGE;
    398 		scsipi_channel_freeze(&isp->isp_chanA, 1);
    399 		if (IS_DUALBUS(isp)) {
    400 			scsipi_channel_freeze(&isp->isp_chanB, 1);
    401 		}
    402 		scsipi_done(xs);
    403 		break;
    404 	case CMD_RQLATER:
    405 		/*
    406 		 * We can only get RQLATER from FC devices (1 channel only)
    407 		 *
    408 		 * Also, if we've never seen loop up, bounce the command
    409 		 * (somebody has booted with no FC cable connected)
    410 		 */
    411 		if (FCPARAM(isp)->loop_seen_once == 0) {
    412 			XS_SETERR(xs, HBA_SELTIMEOUT);
    413 			scsipi_done(xs);
    414 			break;
    415 		}
    416 		if (isp->isp_osinfo.blocked == 0) {
    417 			isp->isp_osinfo.blocked = 1;
    418 			scsipi_channel_freeze(&isp->isp_chanA, 1);
    419 		}
    420 		xs->error = XS_REQUEUE;
    421 		scsipi_done(xs);
    422 		break;
    423 	case CMD_COMPLETE:
    424 		scsipi_done(xs);
    425 		break;
    426 	}
    427 	ISP_UNLOCK(isp);
    428 }
    429 
    430 static void
    431 isprequest(struct scsipi_channel *chan, scsipi_adapter_req_t req, void *arg)
    432 {
    433 	struct ispsoftc *isp = (void *)chan->chan_adapter->adapt_dev;
    434 
    435 	switch (req) {
    436 	case ADAPTER_REQ_RUN_XFER:
    437 		ispcmd(isp, (XS_T *) arg);
    438 		break;
    439 
    440 	case ADAPTER_REQ_GROW_RESOURCES:
    441 		/* Not supported. */
    442 		break;
    443 
    444 	case ADAPTER_REQ_SET_XFER_MODE:
    445 	if (IS_SCSI(isp)) {
    446 		struct scsipi_xfer_mode *xm = arg;
    447 		int dflags = 0;
    448 		sdparam *sdp = SDPARAM(isp);
    449 
    450 		sdp += chan->chan_channel;
    451 		if (xm->xm_mode & PERIPH_CAP_TQING)
    452 			dflags |= DPARM_TQING;
    453 		if (xm->xm_mode & PERIPH_CAP_WIDE16)
    454 			dflags |= DPARM_WIDE;
    455 		if (xm->xm_mode & PERIPH_CAP_SYNC)
    456 			dflags |= DPARM_SYNC;
    457 		ISP_LOCK(isp);
    458 		sdp->isp_devparam[xm->xm_target].goal_flags |= dflags;
    459 		dflags = sdp->isp_devparam[xm->xm_target].goal_flags;
    460 		sdp->isp_devparam[xm->xm_target].dev_update = 1;
    461 		isp->isp_update |= (1 << chan->chan_channel);
    462 		ISP_UNLOCK(isp);
    463 		isp_prt(isp, ISP_LOGDEBUG1,
    464 		    "ispioctl: device flags 0x%x for %d.%d.X",
    465 		    dflags, chan->chan_channel, xm->xm_target);
    466 		break;
    467 	}
    468 	default:
    469 		break;
    470 	}
    471 }
    472 
    473 static void
    474 isp_polled_cmd(struct ispsoftc *isp, XS_T *xs)
    475 {
    476 	int result;
    477 	int infinite = 0, mswait;
    478 
    479 	result = isp_start(xs);
    480 
    481 	switch (result) {
    482 	case CMD_QUEUED:
    483 		break;
    484 	case CMD_RQLATER:
    485 		if (XS_NOERR(xs)) {
    486 			xs->error = XS_REQUEUE;
    487 		}
    488 	case CMD_EAGAIN:
    489 		if (XS_NOERR(xs)) {
    490 			xs->error = XS_RESOURCE_SHORTAGE;
    491 		}
    492 		/* FALLTHROUGH */
    493 	case CMD_COMPLETE:
    494 		scsipi_done(xs);
    495 		return;
    496 
    497 	}
    498 
    499 	/*
    500 	 * If we can't use interrupts, poll on completion.
    501 	 */
    502 	if ((mswait = XS_TIME(xs)) == 0)
    503 		infinite = 1;
    504 
    505 	while (mswait || infinite) {
    506 		u_int16_t isr, sema, mbox;
    507 		if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
    508 			isp_intr(isp, isr, sema, mbox);
    509 			if (XS_CMD_DONE_P(xs)) {
    510 				break;
    511 			}
    512 		}
    513 		USEC_DELAY(1000);
    514 		mswait -= 1;
    515 	}
    516 
    517 	/*
    518 	 * If no other error occurred but we didn't finish,
    519 	 * something bad happened.
    520 	 */
    521 	if (XS_CMD_DONE_P(xs) == 0) {
    522 		if (isp_control(isp, ISPCTL_ABORT_CMD, xs)) {
    523 			isp_reinit(isp);
    524 		}
    525 		if (XS_NOERR(xs)) {
    526 			XS_SETERR(xs, HBA_BOTCH);
    527 		}
    528 	}
    529 	scsipi_done(xs);
    530 }
    531 
    532 void
    533 isp_done(XS_T *xs)
    534 {
    535 	XS_CMD_S_DONE(xs);
    536 	if (XS_CMD_WDOG_P(xs) == 0) {
    537 		struct ispsoftc *isp = XS_ISP(xs);
    538 		callout_stop(&xs->xs_callout);
    539 		if (XS_CMD_GRACE_P(xs)) {
    540 			isp_prt(isp, ISP_LOGDEBUG1,
    541 			    "finished command on borrowed time");
    542 		}
    543 		XS_CMD_S_CLEAR(xs);
    544 		/*
    545 		 * Fixup- if we get a QFULL, we need
    546 		 * to set XS_BUSY as the error.
    547 		 */
    548 		if (xs->status == SCSI_QUEUE_FULL) {
    549 			xs->error = XS_BUSY;
    550 		}
    551 		if (isp->isp_osinfo.paused) {
    552 			isp->isp_osinfo.paused = 0;
    553 			scsipi_channel_timed_thaw(&isp->isp_chanA);
    554 			if (IS_DUALBUS(isp)) {
    555 				scsipi_channel_timed_thaw(&isp->isp_chanB);
    556 			}
    557 		}
    558 		scsipi_done(xs);
    559 	}
    560 }
    561 
    562 static void
    563 isp_dog(void *arg)
    564 {
    565 	XS_T *xs = arg;
    566 	struct ispsoftc *isp = XS_ISP(xs);
    567 	u_int16_t handle;
    568 
    569 	ISP_ILOCK(isp);
    570 	/*
    571 	 * We've decided this command is dead. Make sure we're not trying
    572 	 * to kill a command that's already dead by getting it's handle and
    573 	 * and seeing whether it's still alive.
    574 	 */
    575 	handle = isp_find_handle(isp, xs);
    576 	if (handle) {
    577 		u_int16_t isr, mbox, sema;
    578 
    579 		if (XS_CMD_DONE_P(xs)) {
    580 			isp_prt(isp, ISP_LOGDEBUG1,
    581 			    "watchdog found done cmd (handle 0x%x)", handle);
    582 			ISP_IUNLOCK(isp);
    583 			return;
    584 		}
    585 
    586 		if (XS_CMD_WDOG_P(xs)) {
    587 			isp_prt(isp, ISP_LOGDEBUG1,
    588 			    "recursive watchdog (handle 0x%x)", handle);
    589 			ISP_IUNLOCK(isp);
    590 			return;
    591 		}
    592 
    593 		XS_CMD_S_WDOG(xs);
    594 
    595 		if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
    596 			isp_intr(isp, isr, sema, mbox);
    597 
    598 		}
    599 		if (XS_CMD_DONE_P(xs)) {
    600 			isp_prt(isp, ISP_LOGDEBUG1,
    601 			    "watchdog cleanup for handle 0x%x", handle);
    602 			XS_CMD_C_WDOG(xs);
    603 			isp_done(xs);
    604 		} else if (XS_CMD_GRACE_P(xs)) {
    605 			isp_prt(isp, ISP_LOGDEBUG1,
    606 			    "watchdog timeout for handle 0x%x", handle);
    607 			/*
    608 			 * Make sure the command is *really* dead before we
    609 			 * release the handle (and DMA resources) for reuse.
    610 			 */
    611 			(void) isp_control(isp, ISPCTL_ABORT_CMD, arg);
    612 
    613 			/*
    614 			 * After this point, the comamnd is really dead.
    615 			 */
    616 			if (XS_XFRLEN(xs)) {
    617 				ISP_DMAFREE(isp, xs, handle);
    618 			}
    619 			isp_destroy_handle(isp, handle);
    620 			XS_SETERR(xs, XS_TIMEOUT);
    621 			XS_CMD_S_CLEAR(xs);
    622 			isp_done(xs);
    623 		} else {
    624 			u_int16_t nxti, optr;
    625 			ispreq_t local, *mp = &local, *qe;
    626 			isp_prt(isp, ISP_LOGDEBUG2,
    627 			    "possible command timeout on handle %x", handle);
    628 			XS_CMD_C_WDOG(xs);
    629 			callout_reset(&xs->xs_callout, hz, isp_dog, xs);
    630 			if (isp_getrqentry(isp, &nxti, &optr, (void **) &qe)) {
    631 				ISP_UNLOCK(isp);
    632 				return;
    633 			}
    634 			XS_CMD_S_GRACE(xs);
    635 			MEMZERO((void *) mp, sizeof (*mp));
    636 			mp->req_header.rqs_entry_count = 1;
    637 			mp->req_header.rqs_entry_type = RQSTYPE_MARKER;
    638 			mp->req_modifier = SYNC_ALL;
    639 			mp->req_target = XS_CHANNEL(xs) << 7;
    640 			isp_put_request(isp, mp, qe);
    641 			ISP_ADD_REQUEST(isp, nxti);
    642 		}
    643 	} else {
    644 		isp_prt(isp, ISP_LOGDEBUG0, "watchdog with no command");
    645 	}
    646 	ISP_IUNLOCK(isp);
    647 }
    648 
    649 /*
    650  * Fibre Channel state cleanup thread
    651  */
    652 static void
    653 isp_create_fc_worker(void *arg)
    654 {
    655 	struct ispsoftc *isp = arg;
    656 
    657 	if (kthread_create1(isp_fc_worker, isp, &isp->isp_osinfo.thread,
    658 	    "%s:fc_thrd", isp->isp_name)) {
    659 		isp_prt(isp, ISP_LOGERR, "unable to create FC worker thread");
    660 		panic("isp_create_fc_worker");
    661 	}
    662 
    663 }
    664 
    665 static void
    666 isp_fc_worker(void *arg)
    667 {
    668 	void scsipi_run_queue(struct scsipi_channel *);
    669 	struct ispsoftc *isp = arg;
    670 
    671 	for (;;) {
    672 		int s;
    673 
    674 		/*
    675 		 * Note we do *not* use the ISP_LOCK/ISP_UNLOCK macros here.
    676 		 */
    677 		s = splbio();
    678 		while (isp->isp_osinfo.threadwork) {
    679 			isp->isp_osinfo.threadwork = 0;
    680 			if (isp_fc_runstate(isp, 10 * 1000000) == 0) {
    681 				break;
    682 			}
    683 			if  (isp->isp_osinfo.loop_checked &&
    684 			     FCPARAM(isp)->loop_seen_once == 0) {
    685 				splx(s);
    686 				goto skip;
    687 			}
    688 			isp->isp_osinfo.threadwork = 1;
    689 			splx(s);
    690 			delay(500 * 1000);
    691 			s = splbio();
    692 		}
    693 		if (FCPARAM(isp)->isp_fwstate != FW_READY ||
    694 		    FCPARAM(isp)->isp_loopstate != LOOP_READY) {
    695 			isp_prt(isp, ISP_LOGINFO, "isp_fc_runstate in vain");
    696 			isp->isp_osinfo.threadwork = 1;
    697 			splx(s);
    698 			continue;
    699 		}
    700 
    701 		if (isp->isp_osinfo.blocked) {
    702 			isp->isp_osinfo.blocked = 0;
    703 			isp_prt(isp, ISP_LOGDEBUG0,
    704 			    "restarting queues (freeze count %d)",
    705 			    isp->isp_chanA.chan_qfreeze);
    706 			scsipi_channel_thaw(&isp->isp_chanA, 1);
    707 		}
    708 
    709 		if (isp->isp_osinfo.thread == NULL)
    710 			break;
    711 
    712 skip:
    713 		(void) tsleep(&isp->isp_osinfo.thread, PRIBIO, "fcclnup", 0);
    714 
    715 		splx(s);
    716 	}
    717 
    718 	/* In case parent is waiting for us to exit. */
    719 	wakeup(&isp->isp_osinfo.thread);
    720 
    721 	kthread_exit(0);
    722 }
    723 
    724 /*
    725  * Free any associated resources prior to decommissioning and
    726  * set the card to a known state (so it doesn't wake up and kick
    727  * us when we aren't expecting it to).
    728  *
    729  * Locks are held before coming here.
    730  */
    731 void
    732 isp_uninit(struct ispsoftc *isp)
    733 {
    734 	isp_lock(isp);
    735 	/*
    736 	 * Leave with interrupts disabled.
    737 	 */
    738 	DISABLE_INTS(isp);
    739 	isp_unlock(isp);
    740 }
    741 
    742 int
    743 isp_async(struct ispsoftc *isp, ispasync_t cmd, void *arg)
    744 {
    745 	int bus, tgt;
    746 
    747 	switch (cmd) {
    748 	case ISPASYNC_NEW_TGT_PARAMS:
    749 	if (IS_SCSI(isp) && isp->isp_dblev) {
    750 		sdparam *sdp = isp->isp_param;
    751 		int flags;
    752 		struct scsipi_xfer_mode xm;
    753 
    754 		tgt = *((int *) arg);
    755 		bus = (tgt >> 16) & 0xffff;
    756 		tgt &= 0xffff;
    757 		sdp += bus;
    758 		flags = sdp->isp_devparam[tgt].actv_flags;
    759 
    760 		xm.xm_mode = 0;
    761 		xm.xm_period = sdp->isp_devparam[tgt].actv_period;
    762 		xm.xm_offset = sdp->isp_devparam[tgt].actv_offset;
    763 		xm.xm_target = tgt;
    764 
    765 		if ((flags & DPARM_SYNC) && xm.xm_period && xm.xm_offset)
    766 			xm.xm_mode |= PERIPH_CAP_SYNC;
    767 		if (flags & DPARM_WIDE)
    768 			xm.xm_mode |= PERIPH_CAP_WIDE16;
    769 		if (flags & DPARM_TQING)
    770 			xm.xm_mode |= PERIPH_CAP_TQING;
    771 		scsipi_async_event(bus? &isp->isp_chanB : &isp->isp_chanA,
    772 		    ASYNC_EVENT_XFER_MODE, &xm);
    773 		break;
    774 	}
    775 	case ISPASYNC_BUS_RESET:
    776 		bus = *((int *) arg);
    777 		scsipi_async_event(bus? &isp->isp_chanB : &isp->isp_chanA,
    778 		    ASYNC_EVENT_RESET, NULL);
    779 		isp_prt(isp, ISP_LOGINFO, "SCSI bus %d reset detected", bus);
    780 		break;
    781 	case ISPASYNC_LIP:
    782 		/*
    783 		 * Don't do queue freezes or blockage until we have the
    784 		 * thread running that can unfreeze/unblock us.
    785 		 */
    786 		if (isp->isp_osinfo.blocked == 0)  {
    787 			if (isp->isp_osinfo.thread) {
    788 				isp->isp_osinfo.blocked = 1;
    789 				scsipi_channel_freeze(&isp->isp_chanA, 1);
    790 			}
    791 		}
    792 		isp_prt(isp, ISP_LOGINFO, "LIP Received");
    793 		break;
    794 	case ISPASYNC_LOOP_RESET:
    795 		/*
    796 		 * Don't do queue freezes or blockage until we have the
    797 		 * thread running that can unfreeze/unblock us.
    798 		 */
    799 		if (isp->isp_osinfo.blocked == 0) {
    800 			if (isp->isp_osinfo.thread) {
    801 				isp->isp_osinfo.blocked = 1;
    802 				scsipi_channel_freeze(&isp->isp_chanA, 1);
    803 			}
    804 		}
    805 		isp_prt(isp, ISP_LOGINFO, "Loop Reset Received");
    806 		break;
    807 	case ISPASYNC_LOOP_DOWN:
    808 		/*
    809 		 * Don't do queue freezes or blockage until we have the
    810 		 * thread running that can unfreeze/unblock us.
    811 		 */
    812 		if (isp->isp_osinfo.blocked == 0) {
    813 			if (isp->isp_osinfo.thread) {
    814 				isp->isp_osinfo.blocked = 1;
    815 				scsipi_channel_freeze(&isp->isp_chanA, 1);
    816 			}
    817 		}
    818 		isp_prt(isp, ISP_LOGINFO, "Loop DOWN");
    819 		break;
    820         case ISPASYNC_LOOP_UP:
    821 		/*
    822 		 * Let the subsequent ISPASYNC_CHANGE_NOTIFY invoke
    823 		 * the FC worker thread. When the FC worker thread
    824 		 * is done, let *it* call scsipi_channel_thaw...
    825 		 */
    826 		isp_prt(isp, ISP_LOGINFO, "Loop UP");
    827 		break;
    828 	case ISPASYNC_PROMENADE:
    829 	if (IS_FC(isp) && isp->isp_dblev) {
    830 		const char fmt[] = "Target %d (Loop 0x%x) Port ID 0x%x "
    831 		    "(role %s) %s\n Port WWN 0x%08x%08x\n Node WWN 0x%08x%08x";
    832 		const static char *roles[4] = {
    833 		    "None", "Target", "Initiator", "Target/Initiator"
    834 		};
    835 		fcparam *fcp = isp->isp_param;
    836 		int tgt = *((int *) arg);
    837 		struct lportdb *lp = &fcp->portdb[tgt];
    838 
    839 		isp_prt(isp, ISP_LOGINFO, fmt, tgt, lp->loopid, lp->portid,
    840 		    roles[lp->roles & 0x3],
    841 		    (lp->valid)? "Arrived" : "Departed",
    842 		    (u_int32_t) (lp->port_wwn >> 32),
    843 		    (u_int32_t) (lp->port_wwn & 0xffffffffLL),
    844 		    (u_int32_t) (lp->node_wwn >> 32),
    845 		    (u_int32_t) (lp->node_wwn & 0xffffffffLL));
    846 		break;
    847 	}
    848 	case ISPASYNC_CHANGE_NOTIFY:
    849 		if (arg == ISPASYNC_CHANGE_PDB) {
    850 			isp_prt(isp, ISP_LOGINFO, "Port Database Changed");
    851 		} else if (arg == ISPASYNC_CHANGE_SNS) {
    852 			isp_prt(isp, ISP_LOGINFO,
    853 			    "Name Server Database Changed");
    854 		}
    855 
    856 		/*
    857 		 * We can set blocked here because we know it's now okay
    858 		 * to try and run isp_fc_runstate (in order to build loop
    859 		 * state). But we don't try and freeze the midlayer's queue
    860 		 * if we have no thread that we can wake to later unfreeze
    861 		 * it.
    862 		 */
    863 		if (isp->isp_osinfo.blocked == 0) {
    864 			isp->isp_osinfo.blocked = 1;
    865 			if (isp->isp_osinfo.thread) {
    866 				scsipi_channel_freeze(&isp->isp_chanA, 1);
    867 			}
    868 		}
    869 		/*
    870 		 * Note that we have work for the thread to do, and
    871 		 * if the thread is here already, wake it up.
    872 		 */
    873 		isp->isp_osinfo.threadwork++;
    874 		if (isp->isp_osinfo.thread) {
    875 			wakeup(&isp->isp_osinfo.thread);
    876 		} else {
    877 			isp_prt(isp, ISP_LOGDEBUG1, "no FC thread yet");
    878 		}
    879 		break;
    880 	case ISPASYNC_FABRIC_DEV:
    881 	{
    882 		int target, lrange;
    883 		struct lportdb *lp = NULL;
    884 		char *pt;
    885 		sns_ganrsp_t *resp = (sns_ganrsp_t *) arg;
    886 		u_int32_t portid;
    887 		u_int64_t wwpn, wwnn;
    888 		fcparam *fcp = isp->isp_param;
    889 
    890 		portid =
    891 		    (((u_int32_t) resp->snscb_port_id[0]) << 16) |
    892 		    (((u_int32_t) resp->snscb_port_id[1]) << 8) |
    893 		    (((u_int32_t) resp->snscb_port_id[2]));
    894 
    895 		wwpn =
    896 		    (((u_int64_t)resp->snscb_portname[0]) << 56) |
    897 		    (((u_int64_t)resp->snscb_portname[1]) << 48) |
    898 		    (((u_int64_t)resp->snscb_portname[2]) << 40) |
    899 		    (((u_int64_t)resp->snscb_portname[3]) << 32) |
    900 		    (((u_int64_t)resp->snscb_portname[4]) << 24) |
    901 		    (((u_int64_t)resp->snscb_portname[5]) << 16) |
    902 		    (((u_int64_t)resp->snscb_portname[6]) <<  8) |
    903 		    (((u_int64_t)resp->snscb_portname[7]));
    904 
    905 		wwnn =
    906 		    (((u_int64_t)resp->snscb_nodename[0]) << 56) |
    907 		    (((u_int64_t)resp->snscb_nodename[1]) << 48) |
    908 		    (((u_int64_t)resp->snscb_nodename[2]) << 40) |
    909 		    (((u_int64_t)resp->snscb_nodename[3]) << 32) |
    910 		    (((u_int64_t)resp->snscb_nodename[4]) << 24) |
    911 		    (((u_int64_t)resp->snscb_nodename[5]) << 16) |
    912 		    (((u_int64_t)resp->snscb_nodename[6]) <<  8) |
    913 		    (((u_int64_t)resp->snscb_nodename[7]));
    914 		if (portid == 0 || wwpn == 0) {
    915 			break;
    916 		}
    917 
    918 		switch (resp->snscb_port_type) {
    919 		case 1:
    920 			pt = "   N_Port";
    921 			break;
    922 		case 2:
    923 			pt = "  NL_Port";
    924 			break;
    925 		case 3:
    926 			pt = "F/NL_Port";
    927 			break;
    928 		case 0x7f:
    929 			pt = "  Nx_Port";
    930 			break;
    931 		case 0x81:
    932 			pt = "  F_port";
    933 			break;
    934 		case 0x82:
    935 			pt = "  FL_Port";
    936 			break;
    937 		case 0x84:
    938 			pt = "   E_port";
    939 			break;
    940 		default:
    941 			pt = "?";
    942 			break;
    943 		}
    944 		isp_prt(isp, ISP_LOGINFO,
    945 		    "%s @ 0x%x, Node 0x%08x%08x Port %08x%08x",
    946 		    pt, portid, ((u_int32_t) (wwnn >> 32)), ((u_int32_t) wwnn),
    947 		    ((u_int32_t) (wwpn >> 32)), ((u_int32_t) wwpn));
    948 		/*
    949 		 * We're only interested in SCSI_FCP types (for now)
    950 		 */
    951 		if ((resp->snscb_fc4_types[2] & 1) == 0) {
    952 			break;
    953 		}
    954 		if (fcp->isp_topo != TOPO_F_PORT)
    955 			lrange = FC_SNS_ID+1;
    956 		else
    957 			lrange = 0;
    958 		/*
    959 		 * Is it already in our list?
    960 		 */
    961 		for (target = lrange; target < MAX_FC_TARG; target++) {
    962 			if (target >= FL_PORT_ID && target <= FC_SNS_ID) {
    963 				continue;
    964 			}
    965 			lp = &fcp->portdb[target];
    966 			if (lp->port_wwn == wwpn && lp->node_wwn == wwnn) {
    967 				lp->fabric_dev = 1;
    968 				break;
    969 			}
    970 		}
    971 		if (target < MAX_FC_TARG) {
    972 			break;
    973 		}
    974 		for (target = lrange; target < MAX_FC_TARG; target++) {
    975 			if (target >= FL_PORT_ID && target <= FC_SNS_ID) {
    976 				continue;
    977 			}
    978 			lp = &fcp->portdb[target];
    979 			if (lp->port_wwn == 0) {
    980 				break;
    981 			}
    982 		}
    983 		if (target == MAX_FC_TARG) {
    984 			isp_prt(isp, ISP_LOGWARN,
    985 			    "no more space for fabric devices");
    986 			break;
    987 		}
    988 		lp->node_wwn = wwnn;
    989 		lp->port_wwn = wwpn;
    990 		lp->portid = portid;
    991 		lp->fabric_dev = 1;
    992 		break;
    993 	}
    994 	case ISPASYNC_FW_CRASH:
    995 	{
    996 		u_int16_t mbox1, mbox6;
    997 		mbox1 = ISP_READ(isp, OUTMAILBOX1);
    998 		if (IS_DUALBUS(isp)) {
    999 			mbox6 = ISP_READ(isp, OUTMAILBOX6);
   1000 		} else {
   1001 			mbox6 = 0;
   1002 		}
   1003                 isp_prt(isp, ISP_LOGERR,
   1004                     "Internal Firmware Error on bus %d @ RISC Address 0x%x",
   1005                     mbox6, mbox1);
   1006 		isp_reinit(isp);
   1007 		break;
   1008 	}
   1009 	default:
   1010 		break;
   1011 	}
   1012 	return (0);
   1013 }
   1014 
   1015 #include <machine/stdarg.h>
   1016 void
   1017 isp_prt(struct ispsoftc *isp, int level, const char *fmt, ...)
   1018 {
   1019 	va_list ap;
   1020 	if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) {
   1021 		return;
   1022 	}
   1023 	printf("%s: ", isp->isp_name);
   1024 	va_start(ap, fmt);
   1025 	vprintf(fmt, ap);
   1026 	va_end(ap);
   1027 	printf("\n");
   1028 }
   1029