Home | History | Annotate | Line # | Download | only in ic
isp_netbsd.c revision 1.49
      1 /* $NetBSD: isp_netbsd.c,v 1.49 2001/09/28 16:23:19 mjacob Exp $ */
      2 /*
      3  * This driver, which is contained in NetBSD in the files:
      4  *
      5  *	sys/dev/ic/isp.c
      6  *	sys/dev/ic/isp_inline.h
      7  *	sys/dev/ic/isp_netbsd.c
      8  *	sys/dev/ic/isp_netbsd.h
      9  *	sys/dev/ic/isp_target.c
     10  *	sys/dev/ic/isp_target.h
     11  *	sys/dev/ic/isp_tpublic.h
     12  *	sys/dev/ic/ispmbox.h
     13  *	sys/dev/ic/ispreg.h
     14  *	sys/dev/ic/ispvar.h
     15  *	sys/microcode/isp/asm_sbus.h
     16  *	sys/microcode/isp/asm_1040.h
     17  *	sys/microcode/isp/asm_1080.h
     18  *	sys/microcode/isp/asm_12160.h
     19  *	sys/microcode/isp/asm_2100.h
     20  *	sys/microcode/isp/asm_2200.h
     21  *	sys/pci/isp_pci.c
     22  *	sys/sbus/isp_sbus.c
     23  *
     24  * Is being actively maintained by Matthew Jacob (mjacob (at) netbsd.org).
     25  * This driver also is shared source with FreeBSD, OpenBSD, Linux, Solaris,
     26  * Linux versions. This tends to be an interesting maintenance problem.
     27  *
     28  * Please coordinate with Matthew Jacob on changes you wish to make here.
     29  */
     30 /*
     31  * Platform (NetBSD) dependent common attachment code for Qlogic adapters.
     32  * Matthew Jacob <mjacob (at) nas.nasa.gov>
     33  */
     34 /*
     35  * Copyright (C) 1997, 1998, 1999 National Aeronautics & Space Administration
     36  * All rights reserved.
     37  *
     38  * Redistribution and use in source and binary forms, with or without
     39  * modification, are permitted provided that the following conditions
     40  * are met:
     41  * 1. Redistributions of source code must retain the above copyright
     42  *    notice, this list of conditions and the following disclaimer.
     43  * 2. Redistributions in binary form must reproduce the above copyright
     44  *    notice, this list of conditions and the following disclaimer in the
     45  *    documentation and/or other materials provided with the distribution.
     46  * 3. The name of the author may not be used to endorse or promote products
     47  *    derived from this software without specific prior written permission
     48  *
     49  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     50  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     51  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     52  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     53  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     54  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     55  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     56  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     57  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     58  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     59  */
     60 
     61 #include <dev/ic/isp_netbsd.h>
     62 #include <sys/scsiio.h>
     63 
     64 
     65 /*
     66  * Set a timeout for the watchdogging of a command.
     67  *
     68  * The dimensional analysis is
     69  *
     70  *	milliseconds * (seconds/millisecond) * (ticks/second) = ticks
     71  *
     72  *			=
     73  *
     74  *	(milliseconds / 1000) * hz = ticks
     75  *
     76  *
     77  * For timeouts less than 1 second, we'll get zero. Because of this, and
     78  * because we want to establish *our* timeout to be longer than what the
     79  * firmware might do, we just add 3 seconds at the back end.
     80  */
     81 #define	_XT(xs)	((((xs)->timeout/1000) * hz) + (3 * hz))
     82 
     83 static void isp_config_interrupts(struct device *);
     84 static void ispminphys_1020(struct buf *);
     85 static void ispminphys(struct buf *);
     86 static INLINE void ispcmd(struct ispsoftc *, XS_T *);
     87 static void isprequest(struct scsipi_channel *, scsipi_adapter_req_t, void *);
     88 static int
     89 ispioctl(struct scsipi_channel *, u_long, caddr_t, int, struct proc *);
     90 
     91 static void isp_polled_cmd(struct ispsoftc *, XS_T *);
     92 static void isp_dog(void *);
     93 static void isp_create_fc_worker(void *);
     94 static void isp_fc_worker(void *);
     95 
     96 /*
     97  * Complete attachment of hardware, include subdevices.
     98  */
     99 void
    100 isp_attach(struct ispsoftc *isp)
    101 {
    102 	isp->isp_state = ISP_RUNSTATE;
    103 
    104 	isp->isp_osinfo._adapter.adapt_dev = &isp->isp_osinfo._dev;
    105 	isp->isp_osinfo._adapter.adapt_nchannels = IS_DUALBUS(isp) ? 2 : 1;
    106 	isp->isp_osinfo._adapter.adapt_openings = isp->isp_maxcmds;
    107 	/*
    108 	 * It's not stated whether max_periph is limited by SPI
    109 	 * tag uage, but let's assume that it is.
    110 	 */
    111 	isp->isp_osinfo._adapter.adapt_max_periph = min(isp->isp_maxcmds, 255);
    112 	isp->isp_osinfo._adapter.adapt_ioctl = ispioctl;
    113 	isp->isp_osinfo._adapter.adapt_request = isprequest;
    114 	if (isp->isp_type <= ISP_HA_SCSI_1020A) {
    115 		isp->isp_osinfo._adapter.adapt_minphys = ispminphys_1020;
    116 	} else {
    117 		isp->isp_osinfo._adapter.adapt_minphys = ispminphys;
    118 	}
    119 
    120 	isp->isp_osinfo._chan.chan_adapter = &isp->isp_osinfo._adapter;
    121 	isp->isp_osinfo._chan.chan_bustype = &scsi_bustype;
    122 	isp->isp_osinfo._chan.chan_channel = 0;
    123 
    124 	/*
    125 	 * Until the midlayer is fixed to use REPORT LUNS, limit to 8 luns.
    126 	 */
    127 	isp->isp_osinfo._chan.chan_nluns = min(isp->isp_maxluns, 8);
    128 
    129 	if (IS_FC(isp)) {
    130 		isp->isp_osinfo._chan.chan_ntargets = MAX_FC_TARG;
    131 		isp->isp_osinfo._chan.chan_id = MAX_FC_TARG;
    132 		isp->isp_osinfo.threadwork = 1;
    133 		/*
    134 		 * Note that isp_create_fc_worker won't get called
    135 		 * until much much later (after proc0 is created).
    136 		 */
    137 		kthread_create(isp_create_fc_worker, isp);
    138 	} else {
    139 		int bus = 0;
    140 		sdparam *sdp = isp->isp_param;
    141 
    142 		isp->isp_osinfo._chan.chan_ntargets = MAX_TARGETS;
    143 		isp->isp_osinfo._chan.chan_id = sdp->isp_initiator_id;
    144 		isp->isp_osinfo.discovered[0] = 1 << sdp->isp_initiator_id;
    145 		if (IS_DUALBUS(isp)) {
    146 			isp->isp_osinfo._chan_b = isp->isp_osinfo._chan;
    147 			sdp++;
    148 			isp->isp_osinfo.discovered[1] =
    149 			    1 << sdp->isp_initiator_id;
    150 			isp->isp_osinfo._chan_b.chan_id = sdp->isp_initiator_id;
    151 			isp->isp_osinfo._chan_b.chan_channel = 1;
    152 		}
    153 		ISP_LOCK(isp);
    154 		(void) isp_control(isp, ISPCTL_RESET_BUS, &bus);
    155 		if (IS_DUALBUS(isp)) {
    156 			bus++;
    157 			(void) isp_control(isp, ISPCTL_RESET_BUS, &bus);
    158 		}
    159 		ISP_UNLOCK(isp);
    160 	}
    161 
    162 
    163 	/*
    164          * Defer enabling mailbox interrupts until later.
    165          */
    166         config_interrupts((struct device *) isp, isp_config_interrupts);
    167 
    168 	/*
    169 	 * And attach children (if any).
    170 	 */
    171 	config_found((void *)isp, &isp->isp_chanA, scsiprint);
    172 	if (IS_DUALBUS(isp)) {
    173 		config_found((void *)isp, &isp->isp_chanB, scsiprint);
    174 	}
    175 }
    176 
    177 
    178 static void
    179 isp_config_interrupts(struct device *self)
    180 {
    181         struct ispsoftc *isp = (struct ispsoftc *) self;
    182 
    183 	/*
    184 	 * After this point, we'll be doing the new configuration
    185 	 * schema which allows interrups, so we can do tsleep/wakeup
    186 	 * for mailbox stuff at that point.
    187 	 */
    188 	isp->isp_osinfo.no_mbox_ints = 0;
    189 }
    190 
    191 
    192 /*
    193  * minphys our xfers
    194  */
    195 
    196 static void
    197 ispminphys_1020(struct buf *bp)
    198 {
    199 	if (bp->b_bcount >= (1 << 24)) {
    200 		bp->b_bcount = (1 << 24);
    201 	}
    202 	minphys(bp);
    203 }
    204 
    205 static void
    206 ispminphys(struct buf *bp)
    207 {
    208 	if (bp->b_bcount >= (1 << 30)) {
    209 		bp->b_bcount = (1 << 30);
    210 	}
    211 	minphys(bp);
    212 }
    213 
    214 static int
    215 ispioctl(struct scsipi_channel *chan, u_long cmd, caddr_t addr, int flag,
    216 	struct proc *p)
    217 {
    218 	struct ispsoftc *isp = (void *)chan->chan_adapter->adapt_dev;
    219 	int retval = ENOTTY;
    220 
    221 	switch (cmd) {
    222 	case SCBUSIORESET:
    223 		ISP_LOCK(isp);
    224 		if (isp_control(isp, ISPCTL_RESET_BUS, &chan->chan_channel))
    225 			retval = EIO;
    226 		else
    227 			retval = 0;
    228 		ISP_UNLOCK(isp);
    229 		break;
    230 	case ISP_SDBLEV:
    231 	{
    232 		int olddblev = isp->isp_dblev;
    233 		isp->isp_dblev = *(int *)addr;
    234 		*(int *)addr = olddblev;
    235 		retval = 0;
    236 		break;
    237 	}
    238 	case ISP_RESETHBA:
    239 		ISP_LOCK(isp);
    240 		isp_reinit(isp);
    241 		ISP_UNLOCK(isp);
    242 		retval = 0;
    243 		break;
    244 	case ISP_FC_RESCAN:
    245 		if (IS_FC(isp)) {
    246 			ISP_LOCK(isp);
    247 			if (isp_fc_runstate(isp, 5 * 1000000)) {
    248 				retval = EIO;
    249 			} else {
    250 				retval = 0;
    251 			}
    252 			ISP_UNLOCK(isp);
    253 		}
    254 		break;
    255 	case ISP_FC_LIP:
    256 		if (IS_FC(isp)) {
    257 			ISP_LOCK(isp);
    258 			if (isp_control(isp, ISPCTL_SEND_LIP, 0)) {
    259 				retval = EIO;
    260 			} else {
    261 				retval = 0;
    262 			}
    263 			ISP_UNLOCK(isp);
    264 		}
    265 		break;
    266 	case ISP_FC_GETDINFO:
    267 	{
    268 		struct isp_fc_device *ifc = (struct isp_fc_device *) addr;
    269 		struct lportdb *lp;
    270 
    271 		if (ifc->loopid < 0 || ifc->loopid >= MAX_FC_TARG) {
    272 			retval = EINVAL;
    273 			break;
    274 		}
    275 		ISP_LOCK(isp);
    276 		lp = &FCPARAM(isp)->portdb[ifc->loopid];
    277 		if (lp->valid) {
    278 			ifc->loopid = lp->loopid;
    279 			ifc->portid = lp->portid;
    280 			ifc->node_wwn = lp->node_wwn;
    281 			ifc->port_wwn = lp->port_wwn;
    282 			retval = 0;
    283 		} else {
    284 			retval = ENODEV;
    285 		}
    286 		ISP_UNLOCK(isp);
    287 		break;
    288 	}
    289 	default:
    290 		break;
    291 	}
    292 	return (retval);
    293 }
    294 
    295 static INLINE void
    296 ispcmd(struct ispsoftc *isp, XS_T *xs)
    297 {
    298 	ISP_LOCK(isp);
    299 	if (isp->isp_state < ISP_RUNSTATE) {
    300 		DISABLE_INTS(isp);
    301 		isp_init(isp);
    302 		if (isp->isp_state != ISP_INITSTATE) {
    303 			ENABLE_INTS(isp);
    304 			ISP_UNLOCK(isp);
    305 			XS_SETERR(xs, HBA_BOTCH);
    306 			scsipi_done(xs);
    307 			return;
    308 		}
    309 		isp->isp_state = ISP_RUNSTATE;
    310 		ENABLE_INTS(isp);
    311 	}
    312 	/*
    313 	 * Handle the case of a FC card where the FC thread hasn't
    314 	 * fired up yet and we have loop state to clean up. If we
    315 	 * can't clear things up and we've never seen loop up, bounce
    316 	 * the command.
    317 	 */
    318 	if (IS_FC(isp) && isp->isp_osinfo.threadwork &&
    319 	    isp->isp_osinfo.thread == 0) {
    320 		volatile u_int8_t ombi = isp->isp_osinfo.no_mbox_ints;
    321 		int delay_time;
    322 
    323 		if (xs->xs_control & XS_CTL_POLL) {
    324 			isp->isp_osinfo.no_mbox_ints = 1;
    325 		}
    326 
    327 		if (isp->isp_osinfo.loop_checked == 0) {
    328 			delay_time = 10 * 1000000;
    329 			isp->isp_osinfo.loop_checked = 1;
    330 		} else {
    331 			delay_time = 250000;
    332 		}
    333 
    334 		if (isp_fc_runstate(isp, delay_time) != 0) {
    335 			if (xs->xs_control & XS_CTL_POLL) {
    336 				isp->isp_osinfo.no_mbox_ints = ombi;
    337 			}
    338 			if (FCPARAM(isp)->loop_seen_once == 0) {
    339 				XS_SETERR(xs, HBA_SELTIMEOUT);
    340 				scsipi_done(xs);
    341 				ISP_UNLOCK(isp);
    342 				return;
    343 			}
    344 			/*
    345 			 * Otherwise, fall thru to be queued up for later.
    346 			 */
    347 		} else {
    348 			int wasblocked =
    349 			    (isp->isp_osinfo.blocked || isp->isp_osinfo.paused);
    350 			isp->isp_osinfo.threadwork = 0;
    351 			isp->isp_osinfo.blocked =
    352 			    isp->isp_osinfo.paused = 0;
    353 			if (wasblocked) {
    354 				scsipi_channel_thaw(&isp->isp_chanA, 1);
    355 			}
    356 		}
    357 		if (xs->xs_control & XS_CTL_POLL) {
    358 			isp->isp_osinfo.no_mbox_ints = ombi;
    359 		}
    360 	}
    361 
    362 	if (isp->isp_osinfo.paused) {
    363 		isp_prt(isp, ISP_LOGWARN, "I/O while paused");
    364 		xs->error = XS_RESOURCE_SHORTAGE;
    365 		scsipi_done(xs);
    366 		ISP_UNLOCK(isp);
    367 		return;
    368 	}
    369 	if (isp->isp_osinfo.blocked) {
    370 		isp_prt(isp, ISP_LOGWARN, "I/O while blocked");
    371 		xs->error = XS_REQUEUE;
    372 		scsipi_done(xs);
    373 		ISP_UNLOCK(isp);
    374 		return;
    375 	}
    376 
    377 	if (xs->xs_control & XS_CTL_POLL) {
    378 		volatile u_int8_t ombi = isp->isp_osinfo.no_mbox_ints;
    379 		isp->isp_osinfo.no_mbox_ints = 1;
    380 		isp_polled_cmd(isp, xs);
    381 		isp->isp_osinfo.no_mbox_ints = ombi;
    382 		ISP_UNLOCK(isp);
    383 		return;
    384 	}
    385 
    386 	switch (isp_start(xs)) {
    387 	case CMD_QUEUED:
    388 		if (xs->timeout) {
    389 			callout_reset(&xs->xs_callout, _XT(xs), isp_dog, xs);
    390 		}
    391 		break;
    392 	case CMD_EAGAIN:
    393 		isp->isp_osinfo.paused = 1;
    394 		xs->error = XS_RESOURCE_SHORTAGE;
    395 		scsipi_channel_freeze(&isp->isp_chanA, 1);
    396 		if (IS_DUALBUS(isp)) {
    397 			scsipi_channel_freeze(&isp->isp_chanB, 1);
    398 		}
    399 		scsipi_done(xs);
    400 		break;
    401 	case CMD_RQLATER:
    402 		/*
    403 		 * We can only get RQLATER from FC devices (1 channel only)
    404 		 *
    405 		 * Also, if we've never seen loop up, bounce the command
    406 		 * (somebody has booted with no FC cable connected)
    407 		 */
    408 		if (FCPARAM(isp)->loop_seen_once == 0) {
    409 			XS_SETERR(xs, HBA_SELTIMEOUT);
    410 			scsipi_done(xs);
    411 			break;
    412 		}
    413 		if (isp->isp_osinfo.blocked == 0) {
    414 			isp->isp_osinfo.blocked = 1;
    415 			scsipi_channel_freeze(&isp->isp_chanA, 1);
    416 		}
    417 		xs->error = XS_REQUEUE;
    418 		scsipi_done(xs);
    419 		break;
    420 	case CMD_COMPLETE:
    421 		scsipi_done(xs);
    422 		break;
    423 	}
    424 	ISP_UNLOCK(isp);
    425 }
    426 
    427 static void
    428 isprequest(struct scsipi_channel *chan, scsipi_adapter_req_t req, void *arg)
    429 {
    430 	struct ispsoftc *isp = (void *)chan->chan_adapter->adapt_dev;
    431 
    432 	switch (req) {
    433 	case ADAPTER_REQ_RUN_XFER:
    434 		ispcmd(isp, (XS_T *) arg);
    435 		break;
    436 
    437 	case ADAPTER_REQ_GROW_RESOURCES:
    438 		/* Not supported. */
    439 		break;
    440 
    441 	case ADAPTER_REQ_SET_XFER_MODE:
    442 	if (IS_SCSI(isp)) {
    443 		struct scsipi_xfer_mode *xm = arg;
    444 		int dflags = 0;
    445 		sdparam *sdp = SDPARAM(isp);
    446 
    447 		sdp += chan->chan_channel;
    448 		if (xm->xm_mode & PERIPH_CAP_TQING)
    449 			dflags |= DPARM_TQING;
    450 		if (xm->xm_mode & PERIPH_CAP_WIDE16)
    451 			dflags |= DPARM_WIDE;
    452 		if (xm->xm_mode & PERIPH_CAP_SYNC)
    453 			dflags |= DPARM_SYNC;
    454 		ISP_LOCK(isp);
    455 		sdp->isp_devparam[xm->xm_target].goal_flags |= dflags;
    456 		dflags = sdp->isp_devparam[xm->xm_target].goal_flags;
    457 		sdp->isp_devparam[xm->xm_target].dev_update = 1;
    458 		isp->isp_update |= (1 << chan->chan_channel);
    459 		ISP_UNLOCK(isp);
    460 		isp_prt(isp, ISP_LOGDEBUG1,
    461 		    "ispioctl: device flags 0x%x for %d.%d.X",
    462 		    dflags, chan->chan_channel, xm->xm_target);
    463 		break;
    464 	}
    465 	default:
    466 		break;
    467 	}
    468 }
    469 
    470 static void
    471 isp_polled_cmd(struct ispsoftc *isp, XS_T *xs)
    472 {
    473 	int result;
    474 	int infinite = 0, mswait;
    475 
    476 	result = isp_start(xs);
    477 
    478 	switch (result) {
    479 	case CMD_QUEUED:
    480 		break;
    481 	case CMD_RQLATER:
    482 		if (XS_NOERR(xs)) {
    483 			xs->error = XS_REQUEUE;
    484 		}
    485 	case CMD_EAGAIN:
    486 		if (XS_NOERR(xs)) {
    487 			xs->error = XS_RESOURCE_SHORTAGE;
    488 		}
    489 		/* FALLTHROUGH */
    490 	case CMD_COMPLETE:
    491 		scsipi_done(xs);
    492 		return;
    493 
    494 	}
    495 
    496 	/*
    497 	 * If we can't use interrupts, poll on completion.
    498 	 */
    499 	if ((mswait = XS_TIME(xs)) == 0)
    500 		infinite = 1;
    501 
    502 	while (mswait || infinite) {
    503 		u_int16_t isr, sema, mbox;
    504 		if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
    505 			isp_intr(isp, isr, sema, mbox);
    506 			if (XS_CMD_DONE_P(xs)) {
    507 				break;
    508 			}
    509 		}
    510 		USEC_DELAY(1000);
    511 		mswait -= 1;
    512 	}
    513 
    514 	/*
    515 	 * If no other error occurred but we didn't finish,
    516 	 * something bad happened.
    517 	 */
    518 	if (XS_CMD_DONE_P(xs) == 0) {
    519 		if (isp_control(isp, ISPCTL_ABORT_CMD, xs)) {
    520 			isp_reinit(isp);
    521 		}
    522 		if (XS_NOERR(xs)) {
    523 			XS_SETERR(xs, HBA_BOTCH);
    524 		}
    525 	}
    526 	scsipi_done(xs);
    527 }
    528 
    529 void
    530 isp_done(XS_T *xs)
    531 {
    532 	XS_CMD_S_DONE(xs);
    533 	if (XS_CMD_WDOG_P(xs) == 0) {
    534 		struct ispsoftc *isp = XS_ISP(xs);
    535 		callout_stop(&xs->xs_callout);
    536 		if (XS_CMD_GRACE_P(xs)) {
    537 			isp_prt(isp, ISP_LOGDEBUG1,
    538 			    "finished command on borrowed time");
    539 		}
    540 		XS_CMD_S_CLEAR(xs);
    541 		/*
    542 		 * Fixup- if we get a QFULL, we need
    543 		 * to set XS_BUSY as the error.
    544 		 */
    545 		if (xs->status == SCSI_QUEUE_FULL) {
    546 			xs->error = XS_BUSY;
    547 		}
    548 		if (isp->isp_osinfo.paused) {
    549 			isp->isp_osinfo.paused = 0;
    550 			scsipi_channel_timed_thaw(&isp->isp_chanA);
    551 			if (IS_DUALBUS(isp)) {
    552 				scsipi_channel_timed_thaw(&isp->isp_chanB);
    553 			}
    554 		}
    555 		scsipi_done(xs);
    556 	}
    557 }
    558 
    559 static void
    560 isp_dog(void *arg)
    561 {
    562 	XS_T *xs = arg;
    563 	struct ispsoftc *isp = XS_ISP(xs);
    564 	u_int16_t handle;
    565 
    566 	ISP_ILOCK(isp);
    567 	/*
    568 	 * We've decided this command is dead. Make sure we're not trying
    569 	 * to kill a command that's already dead by getting it's handle and
    570 	 * and seeing whether it's still alive.
    571 	 */
    572 	handle = isp_find_handle(isp, xs);
    573 	if (handle) {
    574 		u_int16_t isr, mbox, sema;
    575 
    576 		if (XS_CMD_DONE_P(xs)) {
    577 			isp_prt(isp, ISP_LOGDEBUG1,
    578 			    "watchdog found done cmd (handle 0x%x)", handle);
    579 			ISP_IUNLOCK(isp);
    580 			return;
    581 		}
    582 
    583 		if (XS_CMD_WDOG_P(xs)) {
    584 			isp_prt(isp, ISP_LOGDEBUG1,
    585 			    "recursive watchdog (handle 0x%x)", handle);
    586 			ISP_IUNLOCK(isp);
    587 			return;
    588 		}
    589 
    590 		XS_CMD_S_WDOG(xs);
    591 
    592 		if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
    593 			isp_intr(isp, isr, sema, mbox);
    594 
    595 		}
    596 		if (XS_CMD_DONE_P(xs)) {
    597 			isp_prt(isp, ISP_LOGDEBUG1,
    598 			    "watchdog cleanup for handle 0x%x", handle);
    599 			XS_CMD_C_WDOG(xs);
    600 			isp_done(xs);
    601 		} else if (XS_CMD_GRACE_P(xs)) {
    602 			isp_prt(isp, ISP_LOGDEBUG1,
    603 			    "watchdog timeout for handle 0x%x", handle);
    604 			/*
    605 			 * Make sure the command is *really* dead before we
    606 			 * release the handle (and DMA resources) for reuse.
    607 			 */
    608 			(void) isp_control(isp, ISPCTL_ABORT_CMD, arg);
    609 
    610 			/*
    611 			 * After this point, the comamnd is really dead.
    612 			 */
    613 			if (XS_XFRLEN(xs)) {
    614 				ISP_DMAFREE(isp, xs, handle);
    615 			}
    616 			isp_destroy_handle(isp, handle);
    617 			XS_SETERR(xs, XS_TIMEOUT);
    618 			XS_CMD_S_CLEAR(xs);
    619 			isp_done(xs);
    620 		} else {
    621 			u_int16_t iptr, optr;
    622 			ispreq_t *mp;
    623 			isp_prt(isp, ISP_LOGDEBUG2,
    624 			    "possible command timeout on handle %x", handle);
    625 			XS_CMD_C_WDOG(xs);
    626 			callout_reset(&xs->xs_callout, hz, isp_dog, xs);
    627 			if (isp_getrqentry(isp, &iptr, &optr, (void **) &mp)) {
    628 				ISP_UNLOCK(isp);
    629 				return;
    630 			}
    631 			XS_CMD_S_GRACE(xs);
    632 			MEMZERO((void *) mp, sizeof (*mp));
    633 			mp->req_header.rqs_entry_count = 1;
    634 			mp->req_header.rqs_entry_type = RQSTYPE_MARKER;
    635 			mp->req_modifier = SYNC_ALL;
    636 			mp->req_target = XS_CHANNEL(xs) << 7;
    637 			ISP_SWIZZLE_REQUEST(isp, mp);
    638 			ISP_ADD_REQUEST(isp, iptr);
    639 		}
    640 	} else {
    641 		isp_prt(isp, ISP_LOGDEBUG0, "watchdog with no command");
    642 	}
    643 	ISP_IUNLOCK(isp);
    644 }
    645 
    646 /*
    647  * Fibre Channel state cleanup thread
    648  */
    649 static void
    650 isp_create_fc_worker(void *arg)
    651 {
    652 	struct ispsoftc *isp = arg;
    653 
    654 	if (kthread_create1(isp_fc_worker, isp, &isp->isp_osinfo.thread,
    655 	    "%s:fc_thrd", isp->isp_name)) {
    656 		isp_prt(isp, ISP_LOGERR, "unable to create FC worker thread");
    657 		panic("isp_create_fc_worker");
    658 	}
    659 
    660 }
    661 
    662 static void
    663 isp_fc_worker(void *arg)
    664 {
    665 	void scsipi_run_queue(struct scsipi_channel *);
    666 	struct ispsoftc *isp = arg;
    667 
    668 	for (;;) {
    669 		int s;
    670 
    671 		/*
    672 		 * Note we do *not* use the ISP_LOCK/ISP_UNLOCK macros here.
    673 		 */
    674 		s = splbio();
    675 		while (isp->isp_osinfo.threadwork) {
    676 			isp->isp_osinfo.threadwork = 0;
    677 			if (isp_fc_runstate(isp, 10 * 1000000) == 0) {
    678 				break;
    679 			}
    680 			if  (isp->isp_osinfo.loop_checked &&
    681 			     FCPARAM(isp)->loop_seen_once == 0) {
    682 				splx(s);
    683 				goto skip;
    684 			}
    685 			isp->isp_osinfo.threadwork = 1;
    686 			splx(s);
    687 			delay(500 * 1000);
    688 			s = splbio();
    689 		}
    690 		if (FCPARAM(isp)->isp_fwstate != FW_READY ||
    691 		    FCPARAM(isp)->isp_loopstate != LOOP_READY) {
    692 			isp_prt(isp, ISP_LOGINFO, "isp_fc_runstate in vain");
    693 			isp->isp_osinfo.threadwork = 1;
    694 			splx(s);
    695 			continue;
    696 		}
    697 
    698 		if (isp->isp_osinfo.blocked) {
    699 			isp->isp_osinfo.blocked = 0;
    700 			isp_prt(isp, ISP_LOGDEBUG0,
    701 			    "restarting queues (freeze count %d)",
    702 			    isp->isp_chanA.chan_qfreeze);
    703 			scsipi_channel_thaw(&isp->isp_chanA, 1);
    704 		}
    705 
    706 		if (isp->isp_osinfo.thread == NULL)
    707 			break;
    708 
    709 skip:
    710 		(void) tsleep(&isp->isp_osinfo.thread, PRIBIO, "fcclnup", 0);
    711 
    712 		splx(s);
    713 	}
    714 
    715 	/* In case parent is waiting for us to exit. */
    716 	wakeup(&isp->isp_osinfo.thread);
    717 
    718 	kthread_exit(0);
    719 }
    720 
    721 /*
    722  * Free any associated resources prior to decommissioning and
    723  * set the card to a known state (so it doesn't wake up and kick
    724  * us when we aren't expecting it to).
    725  *
    726  * Locks are held before coming here.
    727  */
    728 void
    729 isp_uninit(struct ispsoftc *isp)
    730 {
    731 	isp_lock(isp);
    732 	/*
    733 	 * Leave with interrupts disabled.
    734 	 */
    735 	DISABLE_INTS(isp);
    736 	isp_unlock(isp);
    737 }
    738 
    739 int
    740 isp_async(struct ispsoftc *isp, ispasync_t cmd, void *arg)
    741 {
    742 	int bus, tgt;
    743 
    744 	switch (cmd) {
    745 	case ISPASYNC_NEW_TGT_PARAMS:
    746 	if (IS_SCSI(isp) && isp->isp_dblev) {
    747 		sdparam *sdp = isp->isp_param;
    748 		int flags;
    749 		struct scsipi_xfer_mode xm;
    750 
    751 		tgt = *((int *) arg);
    752 		bus = (tgt >> 16) & 0xffff;
    753 		tgt &= 0xffff;
    754 		sdp += bus;
    755 		flags = sdp->isp_devparam[tgt].actv_flags;
    756 
    757 		xm.xm_mode = 0;
    758 		xm.xm_period = sdp->isp_devparam[tgt].actv_period;
    759 		xm.xm_offset = sdp->isp_devparam[tgt].actv_offset;
    760 		xm.xm_target = tgt;
    761 
    762 		if ((flags & DPARM_SYNC) && xm.xm_period && xm.xm_offset)
    763 			xm.xm_mode |= PERIPH_CAP_SYNC;
    764 		if (flags & DPARM_WIDE)
    765 			xm.xm_mode |= PERIPH_CAP_WIDE16;
    766 		if (flags & DPARM_TQING)
    767 			xm.xm_mode |= PERIPH_CAP_TQING;
    768 		scsipi_async_event(bus? &isp->isp_chanB : &isp->isp_chanA,
    769 		    ASYNC_EVENT_XFER_MODE, &xm);
    770 		break;
    771 	}
    772 	case ISPASYNC_BUS_RESET:
    773 		bus = *((int *) arg);
    774 		scsipi_async_event(bus? &isp->isp_chanB : &isp->isp_chanA,
    775 		    ASYNC_EVENT_RESET, NULL);
    776 		isp_prt(isp, ISP_LOGINFO, "SCSI bus %d reset detected", bus);
    777 		break;
    778 	case ISPASYNC_LIP:
    779 		/*
    780 		 * Don't do queue freezes or blockage until we have the
    781 		 * thread running that can unfreeze/unblock us.
    782 		 */
    783 		if (isp->isp_osinfo.blocked == 0)  {
    784 			if (isp->isp_osinfo.thread) {
    785 				isp->isp_osinfo.blocked = 1;
    786 				scsipi_channel_freeze(&isp->isp_chanA, 1);
    787 			}
    788 		}
    789 		isp_prt(isp, ISP_LOGINFO, "LIP Received");
    790 		break;
    791 	case ISPASYNC_LOOP_RESET:
    792 		/*
    793 		 * Don't do queue freezes or blockage until we have the
    794 		 * thread running that can unfreeze/unblock us.
    795 		 */
    796 		if (isp->isp_osinfo.blocked == 0) {
    797 			if (isp->isp_osinfo.thread) {
    798 				isp->isp_osinfo.blocked = 1;
    799 				scsipi_channel_freeze(&isp->isp_chanA, 1);
    800 			}
    801 		}
    802 		isp_prt(isp, ISP_LOGINFO, "Loop Reset Received");
    803 		break;
    804 	case ISPASYNC_LOOP_DOWN:
    805 		/*
    806 		 * Don't do queue freezes or blockage until we have the
    807 		 * thread running that can unfreeze/unblock us.
    808 		 */
    809 		if (isp->isp_osinfo.blocked == 0) {
    810 			if (isp->isp_osinfo.thread) {
    811 				isp->isp_osinfo.blocked = 1;
    812 				scsipi_channel_freeze(&isp->isp_chanA, 1);
    813 			}
    814 		}
    815 		isp_prt(isp, ISP_LOGINFO, "Loop DOWN");
    816 		break;
    817         case ISPASYNC_LOOP_UP:
    818 		/*
    819 		 * Let the subsequent ISPASYNC_CHANGE_NOTIFY invoke
    820 		 * the FC worker thread. When the FC worker thread
    821 		 * is done, let *it* call scsipi_channel_thaw...
    822 		 */
    823 		isp_prt(isp, ISP_LOGINFO, "Loop UP");
    824 		break;
    825 	case ISPASYNC_PROMENADE:
    826 	if (IS_FC(isp) && isp->isp_dblev) {
    827 		const char fmt[] = "Target %d (Loop 0x%x) Port ID 0x%x "
    828 		    "(role %s) %s\n Port WWN 0x%08x%08x\n Node WWN 0x%08x%08x";
    829 		const static char *roles[4] = {
    830 		    "None", "Target", "Initiator", "Target/Initiator"
    831 		};
    832 		fcparam *fcp = isp->isp_param;
    833 		int tgt = *((int *) arg);
    834 		struct lportdb *lp = &fcp->portdb[tgt];
    835 
    836 		isp_prt(isp, ISP_LOGINFO, fmt, tgt, lp->loopid, lp->portid,
    837 		    roles[lp->roles & 0x3],
    838 		    (lp->valid)? "Arrived" : "Departed",
    839 		    (u_int32_t) (lp->port_wwn >> 32),
    840 		    (u_int32_t) (lp->port_wwn & 0xffffffffLL),
    841 		    (u_int32_t) (lp->node_wwn >> 32),
    842 		    (u_int32_t) (lp->node_wwn & 0xffffffffLL));
    843 		break;
    844 	}
    845 	case ISPASYNC_CHANGE_NOTIFY:
    846 		if (arg == ISPASYNC_CHANGE_PDB) {
    847 			isp_prt(isp, ISP_LOGINFO, "Port Database Changed");
    848 		} else if (arg == ISPASYNC_CHANGE_SNS) {
    849 			isp_prt(isp, ISP_LOGINFO,
    850 			    "Name Server Database Changed");
    851 		}
    852 
    853 		/*
    854 		 * We can set blocked here because we know it's now okay
    855 		 * to try and run isp_fc_runstate (in order to build loop
    856 		 * state). But we don't try and freeze the midlayer's queue
    857 		 * if we have no thread that we can wake to later unfreeze
    858 		 * it.
    859 		 */
    860 		if (isp->isp_osinfo.blocked == 0) {
    861 			isp->isp_osinfo.blocked = 1;
    862 			if (isp->isp_osinfo.thread) {
    863 				scsipi_channel_freeze(&isp->isp_chanA, 1);
    864 			}
    865 		}
    866 		/*
    867 		 * Note that we have work for the thread to do, and
    868 		 * if the thread is here already, wake it up.
    869 		 */
    870 		isp->isp_osinfo.threadwork++;
    871 		if (isp->isp_osinfo.thread) {
    872 			wakeup(&isp->isp_osinfo.thread);
    873 		} else {
    874 			isp_prt(isp, ISP_LOGDEBUG1, "no FC thread yet");
    875 		}
    876 		break;
    877 	case ISPASYNC_FABRIC_DEV:
    878 	{
    879 		int target, lrange;
    880 		struct lportdb *lp = NULL;
    881 		char *pt;
    882 		sns_ganrsp_t *resp = (sns_ganrsp_t *) arg;
    883 		u_int32_t portid;
    884 		u_int64_t wwpn, wwnn;
    885 		fcparam *fcp = isp->isp_param;
    886 
    887 		portid =
    888 		    (((u_int32_t) resp->snscb_port_id[0]) << 16) |
    889 		    (((u_int32_t) resp->snscb_port_id[1]) << 8) |
    890 		    (((u_int32_t) resp->snscb_port_id[2]));
    891 
    892 		wwpn =
    893 		    (((u_int64_t)resp->snscb_portname[0]) << 56) |
    894 		    (((u_int64_t)resp->snscb_portname[1]) << 48) |
    895 		    (((u_int64_t)resp->snscb_portname[2]) << 40) |
    896 		    (((u_int64_t)resp->snscb_portname[3]) << 32) |
    897 		    (((u_int64_t)resp->snscb_portname[4]) << 24) |
    898 		    (((u_int64_t)resp->snscb_portname[5]) << 16) |
    899 		    (((u_int64_t)resp->snscb_portname[6]) <<  8) |
    900 		    (((u_int64_t)resp->snscb_portname[7]));
    901 
    902 		wwnn =
    903 		    (((u_int64_t)resp->snscb_nodename[0]) << 56) |
    904 		    (((u_int64_t)resp->snscb_nodename[1]) << 48) |
    905 		    (((u_int64_t)resp->snscb_nodename[2]) << 40) |
    906 		    (((u_int64_t)resp->snscb_nodename[3]) << 32) |
    907 		    (((u_int64_t)resp->snscb_nodename[4]) << 24) |
    908 		    (((u_int64_t)resp->snscb_nodename[5]) << 16) |
    909 		    (((u_int64_t)resp->snscb_nodename[6]) <<  8) |
    910 		    (((u_int64_t)resp->snscb_nodename[7]));
    911 		if (portid == 0 || wwpn == 0) {
    912 			break;
    913 		}
    914 
    915 		switch (resp->snscb_port_type) {
    916 		case 1:
    917 			pt = "   N_Port";
    918 			break;
    919 		case 2:
    920 			pt = "  NL_Port";
    921 			break;
    922 		case 3:
    923 			pt = "F/NL_Port";
    924 			break;
    925 		case 0x7f:
    926 			pt = "  Nx_Port";
    927 			break;
    928 		case 0x81:
    929 			pt = "  F_port";
    930 			break;
    931 		case 0x82:
    932 			pt = "  FL_Port";
    933 			break;
    934 		case 0x84:
    935 			pt = "   E_port";
    936 			break;
    937 		default:
    938 			pt = "?";
    939 			break;
    940 		}
    941 		isp_prt(isp, ISP_LOGINFO,
    942 		    "%s @ 0x%x, Node 0x%08x%08x Port %08x%08x",
    943 		    pt, portid, ((u_int32_t) (wwnn >> 32)), ((u_int32_t) wwnn),
    944 		    ((u_int32_t) (wwpn >> 32)), ((u_int32_t) wwpn));
    945 		/*
    946 		 * We're only interested in SCSI_FCP types (for now)
    947 		 */
    948 		if ((resp->snscb_fc4_types[2] & 1) == 0) {
    949 			break;
    950 		}
    951 		if (fcp->isp_topo != TOPO_F_PORT)
    952 			lrange = FC_SNS_ID+1;
    953 		else
    954 			lrange = 0;
    955 		/*
    956 		 * Is it already in our list?
    957 		 */
    958 		for (target = lrange; target < MAX_FC_TARG; target++) {
    959 			if (target >= FL_PORT_ID && target <= FC_SNS_ID) {
    960 				continue;
    961 			}
    962 			lp = &fcp->portdb[target];
    963 			if (lp->port_wwn == wwpn && lp->node_wwn == wwnn) {
    964 				lp->fabric_dev = 1;
    965 				break;
    966 			}
    967 		}
    968 		if (target < MAX_FC_TARG) {
    969 			break;
    970 		}
    971 		for (target = lrange; target < MAX_FC_TARG; target++) {
    972 			if (target >= FL_PORT_ID && target <= FC_SNS_ID) {
    973 				continue;
    974 			}
    975 			lp = &fcp->portdb[target];
    976 			if (lp->port_wwn == 0) {
    977 				break;
    978 			}
    979 		}
    980 		if (target == MAX_FC_TARG) {
    981 			isp_prt(isp, ISP_LOGWARN,
    982 			    "no more space for fabric devices");
    983 			break;
    984 		}
    985 		lp->node_wwn = wwnn;
    986 		lp->port_wwn = wwpn;
    987 		lp->portid = portid;
    988 		lp->fabric_dev = 1;
    989 		break;
    990 	}
    991 	case ISPASYNC_FW_CRASH:
    992 	{
    993 		u_int16_t mbox1, mbox6;
    994 		mbox1 = ISP_READ(isp, OUTMAILBOX1);
    995 		if (IS_DUALBUS(isp)) {
    996 			mbox6 = ISP_READ(isp, OUTMAILBOX6);
    997 		} else {
    998 			mbox6 = 0;
    999 		}
   1000                 isp_prt(isp, ISP_LOGERR,
   1001                     "Internal Firmware Error on bus %d @ RISC Address 0x%x",
   1002                     mbox6, mbox1);
   1003 		isp_reinit(isp);
   1004 		break;
   1005 	}
   1006 	default:
   1007 		break;
   1008 	}
   1009 	return (0);
   1010 }
   1011 
   1012 #include <machine/stdarg.h>
   1013 void
   1014 isp_prt(struct ispsoftc *isp, int level, const char *fmt, ...)
   1015 {
   1016 	va_list ap;
   1017 	if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) {
   1018 		return;
   1019 	}
   1020 	printf("%s: ", isp->isp_name);
   1021 	va_start(ap, fmt);
   1022 	vprintf(fmt, ap);
   1023 	va_end(ap);
   1024 	printf("\n");
   1025 }
   1026