Home | History | Annotate | Line # | Download | only in ic
isp_netbsd.c revision 1.48
      1 /* $NetBSD: isp_netbsd.c,v 1.48 2001/09/05 23:08:23 mjacob Exp $ */
      2 /*
      3  * This driver, which is contained in NetBSD in the files:
      4  *
      5  *	sys/dev/ic/isp.c
      6  *	sys/dev/ic/isp_inline.h
      7  *	sys/dev/ic/isp_netbsd.c
      8  *	sys/dev/ic/isp_netbsd.h
      9  *	sys/dev/ic/isp_target.c
     10  *	sys/dev/ic/isp_target.h
     11  *	sys/dev/ic/isp_tpublic.h
     12  *	sys/dev/ic/ispmbox.h
     13  *	sys/dev/ic/ispreg.h
     14  *	sys/dev/ic/ispvar.h
     15  *	sys/microcode/isp/asm_sbus.h
     16  *	sys/microcode/isp/asm_1040.h
     17  *	sys/microcode/isp/asm_1080.h
     18  *	sys/microcode/isp/asm_12160.h
     19  *	sys/microcode/isp/asm_2100.h
     20  *	sys/microcode/isp/asm_2200.h
     21  *	sys/pci/isp_pci.c
     22  *	sys/sbus/isp_sbus.c
     23  *
     24  * Is being actively maintained by Matthew Jacob (mjacob (at) netbsd.org).
     25  * This driver also is shared source with FreeBSD, OpenBSD, Linux, Solaris,
     26  * Linux versions. This tends to be an interesting maintenance problem.
     27  *
     28  * Please coordinate with Matthew Jacob on changes you wish to make here.
     29  */
     30 /*
     31  * Platform (NetBSD) dependent common attachment code for Qlogic adapters.
     32  * Matthew Jacob <mjacob (at) nas.nasa.gov>
     33  */
     34 /*
     35  * Copyright (C) 1997, 1998, 1999 National Aeronautics & Space Administration
     36  * All rights reserved.
     37  *
     38  * Redistribution and use in source and binary forms, with or without
     39  * modification, are permitted provided that the following conditions
     40  * are met:
     41  * 1. Redistributions of source code must retain the above copyright
     42  *    notice, this list of conditions and the following disclaimer.
     43  * 2. Redistributions in binary form must reproduce the above copyright
     44  *    notice, this list of conditions and the following disclaimer in the
     45  *    documentation and/or other materials provided with the distribution.
     46  * 3. The name of the author may not be used to endorse or promote products
     47  *    derived from this software without specific prior written permission
     48  *
     49  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     50  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     51  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     52  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     53  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     54  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     55  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     56  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     57  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     58  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     59  */
     60 
     61 #include <dev/ic/isp_netbsd.h>
     62 #include <sys/scsiio.h>
     63 
     64 
     65 /*
     66  * Set a timeout for the watchdogging of a command.
     67  *
     68  * The dimensional analysis is
     69  *
     70  *	milliseconds * (seconds/millisecond) * (ticks/second) = ticks
     71  *
     72  *			=
     73  *
     74  *	(milliseconds / 1000) * hz = ticks
     75  *
     76  *
     77  * For timeouts less than 1 second, we'll get zero. Because of this, and
     78  * because we want to establish *our* timeout to be longer than what the
     79  * firmware might do, we just add 3 seconds at the back end.
     80  */
     81 #define	_XT(xs)	((((xs)->timeout/1000) * hz) + (3 * hz))
     82 
     83 static void isp_config_interrupts(struct device *);
     84 static void ispminphys_1020(struct buf *);
     85 static void ispminphys(struct buf *);
     86 static INLINE void ispcmd(struct ispsoftc *, XS_T *);
     87 static void isprequest(struct scsipi_channel *, scsipi_adapter_req_t, void *);
     88 static int
     89 ispioctl(struct scsipi_channel *, u_long, caddr_t, int, struct proc *);
     90 
     91 static void isp_polled_cmd(struct ispsoftc *, XS_T *);
     92 static void isp_dog(void *);
     93 static void isp_create_fc_worker(void *);
     94 static void isp_fc_worker(void *);
     95 
     96 /*
     97  * Complete attachment of hardware, include subdevices.
     98  */
     99 void
    100 isp_attach(struct ispsoftc *isp)
    101 {
    102 	isp->isp_state = ISP_RUNSTATE;
    103 
    104 	isp->isp_osinfo._adapter.adapt_dev = &isp->isp_osinfo._dev;
    105 	isp->isp_osinfo._adapter.adapt_nchannels = IS_DUALBUS(isp) ? 2 : 1;
    106 	isp->isp_osinfo._adapter.adapt_openings = isp->isp_maxcmds;
    107 	/*
    108 	 * It's not stated whether max_periph is limited by SPI
    109 	 * tag uage, but let's assume that it is.
    110 	 */
    111 	isp->isp_osinfo._adapter.adapt_max_periph = min(isp->isp_maxcmds, 255);
    112 	isp->isp_osinfo._adapter.adapt_ioctl = ispioctl;
    113 	isp->isp_osinfo._adapter.adapt_request = isprequest;
    114 	if (isp->isp_type <= ISP_HA_SCSI_1020A) {
    115 		isp->isp_osinfo._adapter.adapt_minphys = ispminphys_1020;
    116 	} else {
    117 		isp->isp_osinfo._adapter.adapt_minphys = ispminphys;
    118 	}
    119 
    120 	isp->isp_osinfo._chan.chan_adapter = &isp->isp_osinfo._adapter;
    121 	isp->isp_osinfo._chan.chan_bustype = &scsi_bustype;
    122 	isp->isp_osinfo._chan.chan_channel = 0;
    123 
    124 	/*
    125 	 * Until the midlayer is fixed to use REPORT LUNS, limit to 8 luns.
    126 	 */
    127 	isp->isp_osinfo._chan.chan_nluns = min(isp->isp_maxluns, 8);
    128 
    129 	if (IS_FC(isp)) {
    130 		isp->isp_osinfo._chan.chan_ntargets = MAX_FC_TARG;
    131 		isp->isp_osinfo._chan.chan_id = MAX_FC_TARG;
    132 		isp->isp_osinfo.threadwork = 1;
    133 		/*
    134 		 * Note that isp_create_fc_worker won't get called
    135 		 * until much much later (after proc0 is created).
    136 		 */
    137 		kthread_create(isp_create_fc_worker, isp);
    138 	} else {
    139 		int bus = 0;
    140 		sdparam *sdp = isp->isp_param;
    141 
    142 		isp->isp_osinfo._chan.chan_ntargets = MAX_TARGETS;
    143 		isp->isp_osinfo._chan.chan_id = sdp->isp_initiator_id;
    144 		isp->isp_osinfo.discovered[0] = 1 << sdp->isp_initiator_id;
    145 		if (IS_DUALBUS(isp)) {
    146 			isp->isp_osinfo._chan_b = isp->isp_osinfo._chan;
    147 			sdp++;
    148 			isp->isp_osinfo.discovered[1] =
    149 			    1 << sdp->isp_initiator_id;
    150 			isp->isp_osinfo._chan_b.chan_id = sdp->isp_initiator_id;
    151 			isp->isp_osinfo._chan_b.chan_channel = 1;
    152 		}
    153 		ISP_LOCK(isp);
    154 		(void) isp_control(isp, ISPCTL_RESET_BUS, &bus);
    155 		if (IS_DUALBUS(isp)) {
    156 			bus++;
    157 			(void) isp_control(isp, ISPCTL_RESET_BUS, &bus);
    158 		}
    159 		ISP_UNLOCK(isp);
    160 	}
    161 
    162 
    163 	/*
    164          * Defer enabling mailbox interrupts until later.
    165          */
    166         config_interrupts((struct device *) isp, isp_config_interrupts);
    167 
    168 	/*
    169 	 * And attach children (if any).
    170 	 */
    171 	config_found((void *)isp, &isp->isp_chanA, scsiprint);
    172 	if (IS_DUALBUS(isp)) {
    173 		config_found((void *)isp, &isp->isp_chanB, scsiprint);
    174 	}
    175 }
    176 
    177 
    178 static void
    179 isp_config_interrupts(struct device *self)
    180 {
    181 #if	0
    182         struct ispsoftc *isp = (struct ispsoftc *) self;
    183 
    184 	/*
    185 	 * After this point, we'll be doing the new configuration
    186 	 * schema which allows interrupts, so we can do tsleep/wakeup
    187 	 * for mailbox stuff at that point.
    188 	 */
    189 
    190 	/*
    191 	 * Argh. We cannot use this until we know whether isprequest
    192 	 * was *not* called via a hardclock (timed thaw). So- we'll
    193 	 * only allow a window of the FC kernel thread doing this
    194 	 * when calling isp_fc_runstate.
    195 	 */
    196 	isp->isp_osinfo.no_mbox_ints = 0;
    197 #endif
    198 }
    199 
    200 
    201 /*
    202  * minphys our xfers
    203  */
    204 
    205 static void
    206 ispminphys_1020(struct buf *bp)
    207 {
    208 	if (bp->b_bcount >= (1 << 24)) {
    209 		bp->b_bcount = (1 << 24);
    210 	}
    211 	minphys(bp);
    212 }
    213 
    214 static void
    215 ispminphys(struct buf *bp)
    216 {
    217 	if (bp->b_bcount >= (1 << 30)) {
    218 		bp->b_bcount = (1 << 30);
    219 	}
    220 	minphys(bp);
    221 }
    222 
    223 static int
    224 ispioctl(struct scsipi_channel *chan, u_long cmd, caddr_t addr, int flag,
    225 	struct proc *p)
    226 {
    227 	struct ispsoftc *isp = (void *)chan->chan_adapter->adapt_dev;
    228 	int retval = ENOTTY;
    229 
    230 	switch (cmd) {
    231 	case SCBUSIORESET:
    232 		ISP_LOCK(isp);
    233 		if (isp_control(isp, ISPCTL_RESET_BUS, &chan->chan_channel))
    234 			retval = EIO;
    235 		else
    236 			retval = 0;
    237 		ISP_UNLOCK(isp);
    238 		break;
    239 	case ISP_SDBLEV:
    240 	{
    241 		int olddblev = isp->isp_dblev;
    242 		isp->isp_dblev = *(int *)addr;
    243 		*(int *)addr = olddblev;
    244 		retval = 0;
    245 		break;
    246 	}
    247 	case ISP_RESETHBA:
    248 		ISP_LOCK(isp);
    249 		isp_reinit(isp);
    250 		ISP_UNLOCK(isp);
    251 		retval = 0;
    252 		break;
    253 	case ISP_FC_RESCAN:
    254 		if (IS_FC(isp)) {
    255 			ISP_LOCK(isp);
    256 			if (isp_fc_runstate(isp, 5 * 1000000)) {
    257 				retval = EIO;
    258 			} else {
    259 				retval = 0;
    260 			}
    261 			ISP_UNLOCK(isp);
    262 		}
    263 		break;
    264 	case ISP_FC_LIP:
    265 		if (IS_FC(isp)) {
    266 			ISP_LOCK(isp);
    267 			if (isp_control(isp, ISPCTL_SEND_LIP, 0)) {
    268 				retval = EIO;
    269 			} else {
    270 				retval = 0;
    271 			}
    272 			ISP_UNLOCK(isp);
    273 		}
    274 		break;
    275 	case ISP_FC_GETDINFO:
    276 	{
    277 		struct isp_fc_device *ifc = (struct isp_fc_device *) addr;
    278 		struct lportdb *lp;
    279 
    280 		if (ifc->loopid < 0 || ifc->loopid >= MAX_FC_TARG) {
    281 			retval = EINVAL;
    282 			break;
    283 		}
    284 		ISP_LOCK(isp);
    285 		lp = &FCPARAM(isp)->portdb[ifc->loopid];
    286 		if (lp->valid) {
    287 			ifc->loopid = lp->loopid;
    288 			ifc->portid = lp->portid;
    289 			ifc->node_wwn = lp->node_wwn;
    290 			ifc->port_wwn = lp->port_wwn;
    291 			retval = 0;
    292 		} else {
    293 			retval = ENODEV;
    294 		}
    295 		ISP_UNLOCK(isp);
    296 		break;
    297 	}
    298 	default:
    299 		break;
    300 	}
    301 	return (retval);
    302 }
    303 
    304 static INLINE void
    305 ispcmd(struct ispsoftc *isp, XS_T *xs)
    306 {
    307 	ISP_LOCK(isp);
    308 	if (isp->isp_state < ISP_RUNSTATE) {
    309 		DISABLE_INTS(isp);
    310 		isp_init(isp);
    311 		if (isp->isp_state != ISP_INITSTATE) {
    312 			ENABLE_INTS(isp);
    313 			ISP_UNLOCK(isp);
    314 			XS_SETERR(xs, HBA_BOTCH);
    315 			scsipi_done(xs);
    316 			return;
    317 		}
    318 		isp->isp_state = ISP_RUNSTATE;
    319 		ENABLE_INTS(isp);
    320 	}
    321 	/*
    322 	 * Handle the case of a FC card where the FC thread hasn't
    323 	 * fired up yet and we have loop state to clean up. If we
    324 	 * can't clear things up and we've never seen loop up, bounce
    325 	 * the command.
    326 	 */
    327 	if (IS_FC(isp) && isp->isp_osinfo.threadwork &&
    328 	    isp->isp_osinfo.thread == 0) {
    329 		volatile u_int8_t ombi = isp->isp_osinfo.no_mbox_ints;
    330 		int delay_time;
    331 
    332 		if (xs->xs_control & XS_CTL_POLL) {
    333 			isp->isp_osinfo.no_mbox_ints = 1;
    334 		}
    335 
    336 		if (isp->isp_osinfo.loop_checked == 0) {
    337 			delay_time = 10 * 1000000;
    338 			isp->isp_osinfo.loop_checked = 1;
    339 		} else {
    340 			delay_time = 250000;
    341 		}
    342 
    343 		if (isp_fc_runstate(isp, delay_time) != 0) {
    344 			if (xs->xs_control & XS_CTL_POLL) {
    345 				isp->isp_osinfo.no_mbox_ints = ombi;
    346 			}
    347 			if (FCPARAM(isp)->loop_seen_once == 0) {
    348 				XS_SETERR(xs, HBA_SELTIMEOUT);
    349 				scsipi_done(xs);
    350 				ISP_UNLOCK(isp);
    351 				return;
    352 			}
    353 			/*
    354 			 * Otherwise, fall thru to be queued up for later.
    355 			 */
    356 		} else {
    357 			int wasblocked =
    358 			    (isp->isp_osinfo.blocked || isp->isp_osinfo.paused);
    359 			isp->isp_osinfo.threadwork = 0;
    360 			isp->isp_osinfo.blocked =
    361 			    isp->isp_osinfo.paused = 0;
    362 			if (wasblocked) {
    363 				scsipi_channel_thaw(&isp->isp_chanA, 1);
    364 			}
    365 		}
    366 		if (xs->xs_control & XS_CTL_POLL) {
    367 			isp->isp_osinfo.no_mbox_ints = ombi;
    368 		}
    369 	}
    370 
    371 	if (isp->isp_osinfo.paused) {
    372 		isp_prt(isp, ISP_LOGWARN, "I/O while paused");
    373 		xs->error = XS_RESOURCE_SHORTAGE;
    374 		scsipi_done(xs);
    375 		ISP_UNLOCK(isp);
    376 		return;
    377 	}
    378 	if (isp->isp_osinfo.blocked) {
    379 		isp_prt(isp, ISP_LOGWARN, "I/O while blocked");
    380 		xs->error = XS_REQUEUE;
    381 		scsipi_done(xs);
    382 		ISP_UNLOCK(isp);
    383 		return;
    384 	}
    385 
    386 	if (xs->xs_control & XS_CTL_POLL) {
    387 		volatile u_int8_t ombi = isp->isp_osinfo.no_mbox_ints;
    388 		isp->isp_osinfo.no_mbox_ints = 1;
    389 		isp_polled_cmd(isp, xs);
    390 		isp->isp_osinfo.no_mbox_ints = ombi;
    391 		ISP_UNLOCK(isp);
    392 		return;
    393 	}
    394 
    395 	switch (isp_start(xs)) {
    396 	case CMD_QUEUED:
    397 		if (xs->timeout) {
    398 			callout_reset(&xs->xs_callout, _XT(xs), isp_dog, xs);
    399 		}
    400 		break;
    401 	case CMD_EAGAIN:
    402 		isp->isp_osinfo.paused = 1;
    403 		xs->error = XS_RESOURCE_SHORTAGE;
    404 		scsipi_channel_freeze(&isp->isp_chanA, 1);
    405 		if (IS_DUALBUS(isp)) {
    406 			scsipi_channel_freeze(&isp->isp_chanB, 1);
    407 		}
    408 		scsipi_done(xs);
    409 		break;
    410 	case CMD_RQLATER:
    411 		/*
    412 		 * We can only get RQLATER from FC devices (1 channel only)
    413 		 *
    414 		 * Also, if we've never seen loop up, bounce the command
    415 		 * (somebody has booted with no FC cable connected)
    416 		 */
    417 		if (FCPARAM(isp)->loop_seen_once == 0) {
    418 			XS_SETERR(xs, HBA_SELTIMEOUT);
    419 			scsipi_done(xs);
    420 			break;
    421 		}
    422 		if (isp->isp_osinfo.blocked == 0) {
    423 			isp->isp_osinfo.blocked = 1;
    424 			scsipi_channel_freeze(&isp->isp_chanA, 1);
    425 		}
    426 		xs->error = XS_REQUEUE;
    427 		scsipi_done(xs);
    428 		break;
    429 	case CMD_COMPLETE:
    430 		scsipi_done(xs);
    431 		break;
    432 	}
    433 	ISP_UNLOCK(isp);
    434 }
    435 
    436 static void
    437 isprequest(struct scsipi_channel *chan, scsipi_adapter_req_t req, void *arg)
    438 {
    439 	struct ispsoftc *isp = (void *)chan->chan_adapter->adapt_dev;
    440 
    441 	switch (req) {
    442 	case ADAPTER_REQ_RUN_XFER:
    443 		ispcmd(isp, (XS_T *) arg);
    444 		break;
    445 
    446 	case ADAPTER_REQ_GROW_RESOURCES:
    447 		/* Not supported. */
    448 		break;
    449 
    450 	case ADAPTER_REQ_SET_XFER_MODE:
    451 	if (IS_SCSI(isp)) {
    452 		struct scsipi_xfer_mode *xm = arg;
    453 		int dflags = 0;
    454 		sdparam *sdp = SDPARAM(isp);
    455 
    456 		sdp += chan->chan_channel;
    457 		if (xm->xm_mode & PERIPH_CAP_TQING)
    458 			dflags |= DPARM_TQING;
    459 		if (xm->xm_mode & PERIPH_CAP_WIDE16)
    460 			dflags |= DPARM_WIDE;
    461 		if (xm->xm_mode & PERIPH_CAP_SYNC)
    462 			dflags |= DPARM_SYNC;
    463 		ISP_LOCK(isp);
    464 		sdp->isp_devparam[xm->xm_target].goal_flags |= dflags;
    465 		dflags = sdp->isp_devparam[xm->xm_target].goal_flags;
    466 		sdp->isp_devparam[xm->xm_target].dev_update = 1;
    467 		isp->isp_update |= (1 << chan->chan_channel);
    468 		ISP_UNLOCK(isp);
    469 		isp_prt(isp, ISP_LOGDEBUG1,
    470 		    "ispioctl: device flags 0x%x for %d.%d.X",
    471 		    dflags, chan->chan_channel, xm->xm_target);
    472 		break;
    473 	}
    474 	default:
    475 		break;
    476 	}
    477 }
    478 
    479 static void
    480 isp_polled_cmd(struct ispsoftc *isp, XS_T *xs)
    481 {
    482 	int result;
    483 	int infinite = 0, mswait;
    484 
    485 	result = isp_start(xs);
    486 
    487 	switch (result) {
    488 	case CMD_QUEUED:
    489 		break;
    490 	case CMD_RQLATER:
    491 		if (XS_NOERR(xs)) {
    492 			xs->error = XS_REQUEUE;
    493 		}
    494 	case CMD_EAGAIN:
    495 		if (XS_NOERR(xs)) {
    496 			xs->error = XS_RESOURCE_SHORTAGE;
    497 		}
    498 		/* FALLTHROUGH */
    499 	case CMD_COMPLETE:
    500 		scsipi_done(xs);
    501 		return;
    502 
    503 	}
    504 
    505 	/*
    506 	 * If we can't use interrupts, poll on completion.
    507 	 */
    508 	if ((mswait = XS_TIME(xs)) == 0)
    509 		infinite = 1;
    510 
    511 	while (mswait || infinite) {
    512 		u_int16_t isr, sema, mbox;
    513 		if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
    514 			isp_intr(isp, isr, sema, mbox);
    515 			if (XS_CMD_DONE_P(xs)) {
    516 				break;
    517 			}
    518 		}
    519 		USEC_DELAY(1000);
    520 		mswait -= 1;
    521 	}
    522 
    523 	/*
    524 	 * If no other error occurred but we didn't finish,
    525 	 * something bad happened.
    526 	 */
    527 	if (XS_CMD_DONE_P(xs) == 0) {
    528 		if (isp_control(isp, ISPCTL_ABORT_CMD, xs)) {
    529 			isp_reinit(isp);
    530 		}
    531 		if (XS_NOERR(xs)) {
    532 			XS_SETERR(xs, HBA_BOTCH);
    533 		}
    534 	}
    535 	scsipi_done(xs);
    536 }
    537 
    538 void
    539 isp_done(XS_T *xs)
    540 {
    541 	XS_CMD_S_DONE(xs);
    542 	if (XS_CMD_WDOG_P(xs) == 0) {
    543 		struct ispsoftc *isp = XS_ISP(xs);
    544 		callout_stop(&xs->xs_callout);
    545 		if (XS_CMD_GRACE_P(xs)) {
    546 			isp_prt(isp, ISP_LOGDEBUG1,
    547 			    "finished command on borrowed time");
    548 		}
    549 		XS_CMD_S_CLEAR(xs);
    550 		/*
    551 		 * Fixup- if we get a QFULL, we need
    552 		 * to set XS_BUSY as the error.
    553 		 */
    554 		if (xs->status == SCSI_QUEUE_FULL) {
    555 			xs->error = XS_BUSY;
    556 		}
    557 		if (isp->isp_osinfo.paused) {
    558 			isp->isp_osinfo.paused = 0;
    559 			scsipi_channel_timed_thaw(&isp->isp_chanA);
    560 			if (IS_DUALBUS(isp)) {
    561 				scsipi_channel_timed_thaw(&isp->isp_chanB);
    562 			}
    563 		}
    564 		scsipi_done(xs);
    565 	}
    566 }
    567 
    568 static void
    569 isp_dog(void *arg)
    570 {
    571 	XS_T *xs = arg;
    572 	struct ispsoftc *isp = XS_ISP(xs);
    573 	u_int16_t handle;
    574 
    575 	ISP_ILOCK(isp);
    576 	/*
    577 	 * We've decided this command is dead. Make sure we're not trying
    578 	 * to kill a command that's already dead by getting it's handle and
    579 	 * and seeing whether it's still alive.
    580 	 */
    581 	handle = isp_find_handle(isp, xs);
    582 	if (handle) {
    583 		u_int16_t isr, mbox, sema;
    584 
    585 		if (XS_CMD_DONE_P(xs)) {
    586 			isp_prt(isp, ISP_LOGDEBUG1,
    587 			    "watchdog found done cmd (handle 0x%x)", handle);
    588 			ISP_IUNLOCK(isp);
    589 			return;
    590 		}
    591 
    592 		if (XS_CMD_WDOG_P(xs)) {
    593 			isp_prt(isp, ISP_LOGDEBUG1,
    594 			    "recursive watchdog (handle 0x%x)", handle);
    595 			ISP_IUNLOCK(isp);
    596 			return;
    597 		}
    598 
    599 		XS_CMD_S_WDOG(xs);
    600 
    601 		if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
    602 			isp_intr(isp, isr, sema, mbox);
    603 
    604 		}
    605 		if (XS_CMD_DONE_P(xs)) {
    606 			isp_prt(isp, ISP_LOGDEBUG1,
    607 			    "watchdog cleanup for handle 0x%x", handle);
    608 			XS_CMD_C_WDOG(xs);
    609 			isp_done(xs);
    610 		} else if (XS_CMD_GRACE_P(xs)) {
    611 			isp_prt(isp, ISP_LOGDEBUG1,
    612 			    "watchdog timeout for handle 0x%x", handle);
    613 			/*
    614 			 * Make sure the command is *really* dead before we
    615 			 * release the handle (and DMA resources) for reuse.
    616 			 */
    617 			(void) isp_control(isp, ISPCTL_ABORT_CMD, arg);
    618 
    619 			/*
    620 			 * After this point, the comamnd is really dead.
    621 			 */
    622 			if (XS_XFRLEN(xs)) {
    623 				ISP_DMAFREE(isp, xs, handle);
    624 			}
    625 			isp_destroy_handle(isp, handle);
    626 			XS_SETERR(xs, XS_TIMEOUT);
    627 			XS_CMD_S_CLEAR(xs);
    628 			isp_done(xs);
    629 		} else {
    630 			u_int16_t iptr, optr;
    631 			ispreq_t *mp;
    632 			isp_prt(isp, ISP_LOGDEBUG2,
    633 			    "possible command timeout on handle %x", handle);
    634 			XS_CMD_C_WDOG(xs);
    635 			callout_reset(&xs->xs_callout, hz, isp_dog, xs);
    636 			if (isp_getrqentry(isp, &iptr, &optr, (void **) &mp)) {
    637 				ISP_UNLOCK(isp);
    638 				return;
    639 			}
    640 			XS_CMD_S_GRACE(xs);
    641 			MEMZERO((void *) mp, sizeof (*mp));
    642 			mp->req_header.rqs_entry_count = 1;
    643 			mp->req_header.rqs_entry_type = RQSTYPE_MARKER;
    644 			mp->req_modifier = SYNC_ALL;
    645 			mp->req_target = XS_CHANNEL(xs) << 7;
    646 			ISP_SWIZZLE_REQUEST(isp, mp);
    647 			ISP_ADD_REQUEST(isp, iptr);
    648 		}
    649 	} else {
    650 		isp_prt(isp, ISP_LOGDEBUG0, "watchdog with no command");
    651 	}
    652 	ISP_IUNLOCK(isp);
    653 }
    654 
    655 /*
    656  * Fibre Channel state cleanup thread
    657  */
    658 static void
    659 isp_create_fc_worker(void *arg)
    660 {
    661 	struct ispsoftc *isp = arg;
    662 
    663 	if (kthread_create1(isp_fc_worker, isp, &isp->isp_osinfo.thread,
    664 	    "%s:fc_thrd", isp->isp_name)) {
    665 		isp_prt(isp, ISP_LOGERR, "unable to create FC worker thread");
    666 		panic("isp_create_fc_worker");
    667 	}
    668 
    669 }
    670 
    671 static void
    672 isp_fc_worker(void *arg)
    673 {
    674 	void scsipi_run_queue(struct scsipi_channel *);
    675 	struct ispsoftc *isp = arg;
    676 
    677 	for (;;) {
    678 		int s;
    679 
    680 		/*
    681 		 * Note we do *not* use the ISP_LOCK/ISP_UNLOCK macros here.
    682 		 */
    683 		s = splbio();
    684 		while (isp->isp_osinfo.threadwork) {
    685 			int omb, r;
    686 			isp->isp_osinfo.threadwork = 0;
    687 			omb = isp->isp_osinfo.no_mbox_ints;
    688 			isp->isp_osinfo.no_mbox_ints = 0;
    689 			r = isp_fc_runstate(isp, 10 * 1000000);
    690 			isp->isp_osinfo.no_mbox_ints = omb;
    691 			if (r) {
    692 				break;
    693 			}
    694 			if  (isp->isp_osinfo.loop_checked &&
    695 			     FCPARAM(isp)->loop_seen_once == 0) {
    696 				splx(s);
    697 				goto skip;
    698 			}
    699 			isp->isp_osinfo.threadwork = 1;
    700 			splx(s);
    701 			delay(500 * 1000);
    702 			s = splbio();
    703 		}
    704 		if (FCPARAM(isp)->isp_fwstate != FW_READY ||
    705 		    FCPARAM(isp)->isp_loopstate != LOOP_READY) {
    706 			isp_prt(isp, ISP_LOGINFO, "isp_fc_runstate in vain");
    707 			isp->isp_osinfo.threadwork = 1;
    708 			splx(s);
    709 			continue;
    710 		}
    711 
    712 		if (isp->isp_osinfo.blocked) {
    713 			isp->isp_osinfo.blocked = 0;
    714 			isp_prt(isp, ISP_LOGDEBUG0,
    715 			    "restarting queues (freeze count %d)",
    716 			    isp->isp_chanA.chan_qfreeze);
    717 			scsipi_channel_thaw(&isp->isp_chanA, 1);
    718 		}
    719 
    720 		if (isp->isp_osinfo.thread == NULL)
    721 			break;
    722 
    723 skip:
    724 		(void) tsleep(&isp->isp_osinfo.thread, PRIBIO, "fcclnup", 0);
    725 
    726 		splx(s);
    727 	}
    728 
    729 	/* In case parent is waiting for us to exit. */
    730 	wakeup(&isp->isp_osinfo.thread);
    731 
    732 	kthread_exit(0);
    733 }
    734 
    735 /*
    736  * Free any associated resources prior to decommissioning and
    737  * set the card to a known state (so it doesn't wake up and kick
    738  * us when we aren't expecting it to).
    739  *
    740  * Locks are held before coming here.
    741  */
    742 void
    743 isp_uninit(struct ispsoftc *isp)
    744 {
    745 	isp_lock(isp);
    746 	/*
    747 	 * Leave with interrupts disabled.
    748 	 */
    749 	DISABLE_INTS(isp);
    750 	isp_unlock(isp);
    751 }
    752 
    753 int
    754 isp_async(struct ispsoftc *isp, ispasync_t cmd, void *arg)
    755 {
    756 	int bus, tgt;
    757 
    758 	switch (cmd) {
    759 	case ISPASYNC_NEW_TGT_PARAMS:
    760 	if (IS_SCSI(isp) && isp->isp_dblev) {
    761 		sdparam *sdp = isp->isp_param;
    762 		int flags;
    763 		struct scsipi_xfer_mode xm;
    764 
    765 		tgt = *((int *) arg);
    766 		bus = (tgt >> 16) & 0xffff;
    767 		tgt &= 0xffff;
    768 		sdp += bus;
    769 		flags = sdp->isp_devparam[tgt].actv_flags;
    770 
    771 		xm.xm_mode = 0;
    772 		xm.xm_period = sdp->isp_devparam[tgt].actv_period;
    773 		xm.xm_offset = sdp->isp_devparam[tgt].actv_offset;
    774 		xm.xm_target = tgt;
    775 
    776 		if ((flags & DPARM_SYNC) && xm.xm_period && xm.xm_offset)
    777 			xm.xm_mode |= PERIPH_CAP_SYNC;
    778 		if (flags & DPARM_WIDE)
    779 			xm.xm_mode |= PERIPH_CAP_WIDE16;
    780 		if (flags & DPARM_TQING)
    781 			xm.xm_mode |= PERIPH_CAP_TQING;
    782 		scsipi_async_event(bus? &isp->isp_chanB : &isp->isp_chanA,
    783 		    ASYNC_EVENT_XFER_MODE, &xm);
    784 		break;
    785 	}
    786 	case ISPASYNC_BUS_RESET:
    787 		bus = *((int *) arg);
    788 		scsipi_async_event(bus? &isp->isp_chanB : &isp->isp_chanA,
    789 		    ASYNC_EVENT_RESET, NULL);
    790 		isp_prt(isp, ISP_LOGINFO, "SCSI bus %d reset detected", bus);
    791 		break;
    792 	case ISPASYNC_LIP:
    793 		/*
    794 		 * Don't do queue freezes or blockage until we have the
    795 		 * thread running that can unfreeze/unblock us.
    796 		 */
    797 		if (isp->isp_osinfo.blocked == 0)  {
    798 			if (isp->isp_osinfo.thread) {
    799 				isp->isp_osinfo.blocked = 1;
    800 				scsipi_channel_freeze(&isp->isp_chanA, 1);
    801 			}
    802 		}
    803 		isp_prt(isp, ISP_LOGINFO, "LIP Received");
    804 		break;
    805 	case ISPASYNC_LOOP_RESET:
    806 		/*
    807 		 * Don't do queue freezes or blockage until we have the
    808 		 * thread running that can unfreeze/unblock us.
    809 		 */
    810 		if (isp->isp_osinfo.blocked == 0) {
    811 			if (isp->isp_osinfo.thread) {
    812 				isp->isp_osinfo.blocked = 1;
    813 				scsipi_channel_freeze(&isp->isp_chanA, 1);
    814 			}
    815 		}
    816 		isp_prt(isp, ISP_LOGINFO, "Loop Reset Received");
    817 		break;
    818 	case ISPASYNC_LOOP_DOWN:
    819 		/*
    820 		 * Don't do queue freezes or blockage until we have the
    821 		 * thread running that can unfreeze/unblock us.
    822 		 */
    823 		if (isp->isp_osinfo.blocked == 0) {
    824 			if (isp->isp_osinfo.thread) {
    825 				isp->isp_osinfo.blocked = 1;
    826 				scsipi_channel_freeze(&isp->isp_chanA, 1);
    827 			}
    828 		}
    829 		isp_prt(isp, ISP_LOGINFO, "Loop DOWN");
    830 		break;
    831         case ISPASYNC_LOOP_UP:
    832 		/*
    833 		 * Let the subsequent ISPASYNC_CHANGE_NOTIFY invoke
    834 		 * the FC worker thread. When the FC worker thread
    835 		 * is done, let *it* call scsipi_channel_thaw...
    836 		 */
    837 		isp_prt(isp, ISP_LOGINFO, "Loop UP");
    838 		break;
    839 	case ISPASYNC_PROMENADE:
    840 	if (IS_FC(isp) && isp->isp_dblev) {
    841 		const char fmt[] = "Target %d (Loop 0x%x) Port ID 0x%x "
    842 		    "(role %s) %s\n Port WWN 0x%08x%08x\n Node WWN 0x%08x%08x";
    843 		const static char *roles[4] = {
    844 		    "None", "Target", "Initiator", "Target/Initiator"
    845 		};
    846 		fcparam *fcp = isp->isp_param;
    847 		int tgt = *((int *) arg);
    848 		struct lportdb *lp = &fcp->portdb[tgt];
    849 
    850 		isp_prt(isp, ISP_LOGINFO, fmt, tgt, lp->loopid, lp->portid,
    851 		    roles[lp->roles & 0x3],
    852 		    (lp->valid)? "Arrived" : "Departed",
    853 		    (u_int32_t) (lp->port_wwn >> 32),
    854 		    (u_int32_t) (lp->port_wwn & 0xffffffffLL),
    855 		    (u_int32_t) (lp->node_wwn >> 32),
    856 		    (u_int32_t) (lp->node_wwn & 0xffffffffLL));
    857 		break;
    858 	}
    859 	case ISPASYNC_CHANGE_NOTIFY:
    860 		if (arg == ISPASYNC_CHANGE_PDB) {
    861 			isp_prt(isp, ISP_LOGINFO, "Port Database Changed");
    862 		} else if (arg == ISPASYNC_CHANGE_SNS) {
    863 			isp_prt(isp, ISP_LOGINFO,
    864 			    "Name Server Database Changed");
    865 		}
    866 
    867 		/*
    868 		 * We can set blocked here because we know it's now okay
    869 		 * to try and run isp_fc_runstate (in order to build loop
    870 		 * state). But we don't try and freeze the midlayer's queue
    871 		 * if we have no thread that we can wake to later unfreeze
    872 		 * it.
    873 		 */
    874 		if (isp->isp_osinfo.blocked == 0) {
    875 			isp->isp_osinfo.blocked = 1;
    876 			if (isp->isp_osinfo.thread) {
    877 				scsipi_channel_freeze(&isp->isp_chanA, 1);
    878 			}
    879 		}
    880 		/*
    881 		 * Note that we have work for the thread to do, and
    882 		 * if the thread is here already, wake it up.
    883 		 */
    884 		isp->isp_osinfo.threadwork++;
    885 		if (isp->isp_osinfo.thread) {
    886 			wakeup(&isp->isp_osinfo.thread);
    887 		} else {
    888 			isp_prt(isp, ISP_LOGDEBUG1, "no FC thread yet");
    889 		}
    890 		break;
    891 	case ISPASYNC_FABRIC_DEV:
    892 	{
    893 		int target, lrange;
    894 		struct lportdb *lp = NULL;
    895 		char *pt;
    896 		sns_ganrsp_t *resp = (sns_ganrsp_t *) arg;
    897 		u_int32_t portid;
    898 		u_int64_t wwpn, wwnn;
    899 		fcparam *fcp = isp->isp_param;
    900 
    901 		portid =
    902 		    (((u_int32_t) resp->snscb_port_id[0]) << 16) |
    903 		    (((u_int32_t) resp->snscb_port_id[1]) << 8) |
    904 		    (((u_int32_t) resp->snscb_port_id[2]));
    905 
    906 		wwpn =
    907 		    (((u_int64_t)resp->snscb_portname[0]) << 56) |
    908 		    (((u_int64_t)resp->snscb_portname[1]) << 48) |
    909 		    (((u_int64_t)resp->snscb_portname[2]) << 40) |
    910 		    (((u_int64_t)resp->snscb_portname[3]) << 32) |
    911 		    (((u_int64_t)resp->snscb_portname[4]) << 24) |
    912 		    (((u_int64_t)resp->snscb_portname[5]) << 16) |
    913 		    (((u_int64_t)resp->snscb_portname[6]) <<  8) |
    914 		    (((u_int64_t)resp->snscb_portname[7]));
    915 
    916 		wwnn =
    917 		    (((u_int64_t)resp->snscb_nodename[0]) << 56) |
    918 		    (((u_int64_t)resp->snscb_nodename[1]) << 48) |
    919 		    (((u_int64_t)resp->snscb_nodename[2]) << 40) |
    920 		    (((u_int64_t)resp->snscb_nodename[3]) << 32) |
    921 		    (((u_int64_t)resp->snscb_nodename[4]) << 24) |
    922 		    (((u_int64_t)resp->snscb_nodename[5]) << 16) |
    923 		    (((u_int64_t)resp->snscb_nodename[6]) <<  8) |
    924 		    (((u_int64_t)resp->snscb_nodename[7]));
    925 		if (portid == 0 || wwpn == 0) {
    926 			break;
    927 		}
    928 
    929 		switch (resp->snscb_port_type) {
    930 		case 1:
    931 			pt = "   N_Port";
    932 			break;
    933 		case 2:
    934 			pt = "  NL_Port";
    935 			break;
    936 		case 3:
    937 			pt = "F/NL_Port";
    938 			break;
    939 		case 0x7f:
    940 			pt = "  Nx_Port";
    941 			break;
    942 		case 0x81:
    943 			pt = "  F_port";
    944 			break;
    945 		case 0x82:
    946 			pt = "  FL_Port";
    947 			break;
    948 		case 0x84:
    949 			pt = "   E_port";
    950 			break;
    951 		default:
    952 			pt = "?";
    953 			break;
    954 		}
    955 		isp_prt(isp, ISP_LOGINFO,
    956 		    "%s @ 0x%x, Node 0x%08x%08x Port %08x%08x",
    957 		    pt, portid, ((u_int32_t) (wwnn >> 32)), ((u_int32_t) wwnn),
    958 		    ((u_int32_t) (wwpn >> 32)), ((u_int32_t) wwpn));
    959 		/*
    960 		 * We're only interested in SCSI_FCP types (for now)
    961 		 */
    962 		if ((resp->snscb_fc4_types[2] & 1) == 0) {
    963 			break;
    964 		}
    965 		if (fcp->isp_topo != TOPO_F_PORT)
    966 			lrange = FC_SNS_ID+1;
    967 		else
    968 			lrange = 0;
    969 		/*
    970 		 * Is it already in our list?
    971 		 */
    972 		for (target = lrange; target < MAX_FC_TARG; target++) {
    973 			if (target >= FL_PORT_ID && target <= FC_SNS_ID) {
    974 				continue;
    975 			}
    976 			lp = &fcp->portdb[target];
    977 			if (lp->port_wwn == wwpn && lp->node_wwn == wwnn) {
    978 				lp->fabric_dev = 1;
    979 				break;
    980 			}
    981 		}
    982 		if (target < MAX_FC_TARG) {
    983 			break;
    984 		}
    985 		for (target = lrange; target < MAX_FC_TARG; target++) {
    986 			if (target >= FL_PORT_ID && target <= FC_SNS_ID) {
    987 				continue;
    988 			}
    989 			lp = &fcp->portdb[target];
    990 			if (lp->port_wwn == 0) {
    991 				break;
    992 			}
    993 		}
    994 		if (target == MAX_FC_TARG) {
    995 			isp_prt(isp, ISP_LOGWARN,
    996 			    "no more space for fabric devices");
    997 			break;
    998 		}
    999 		lp->node_wwn = wwnn;
   1000 		lp->port_wwn = wwpn;
   1001 		lp->portid = portid;
   1002 		lp->fabric_dev = 1;
   1003 		break;
   1004 	}
   1005 	case ISPASYNC_FW_CRASH:
   1006 	{
   1007 		u_int16_t mbox1, mbox6;
   1008 		mbox1 = ISP_READ(isp, OUTMAILBOX1);
   1009 		if (IS_DUALBUS(isp)) {
   1010 			mbox6 = ISP_READ(isp, OUTMAILBOX6);
   1011 		} else {
   1012 			mbox6 = 0;
   1013 		}
   1014                 isp_prt(isp, ISP_LOGERR,
   1015                     "Internal Firmware Error on bus %d @ RISC Address 0x%x",
   1016                     mbox6, mbox1);
   1017 		isp_reinit(isp);
   1018 		break;
   1019 	}
   1020 	default:
   1021 		break;
   1022 	}
   1023 	return (0);
   1024 }
   1025 
   1026 #include <machine/stdarg.h>
   1027 void
   1028 isp_prt(struct ispsoftc *isp, int level, const char *fmt, ...)
   1029 {
   1030 	va_list ap;
   1031 	if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) {
   1032 		return;
   1033 	}
   1034 	printf("%s: ", isp->isp_name);
   1035 	va_start(ap, fmt);
   1036 	vprintf(fmt, ap);
   1037 	va_end(ap);
   1038 	printf("\n");
   1039 }
   1040