Home | History | Annotate | Line # | Download | only in ic
isp_netbsd.c revision 1.72
      1 /* $NetBSD: isp_netbsd.c,v 1.72 2007/01/13 19:41:12 cube Exp $ */
      2 /*
      3  * This driver, which is contained in NetBSD in the files:
      4  *
      5  *	sys/dev/ic/isp.c
      6  *	sys/dev/ic/isp_inline.h
      7  *	sys/dev/ic/isp_netbsd.c
      8  *	sys/dev/ic/isp_netbsd.h
      9  *	sys/dev/ic/isp_target.c
     10  *	sys/dev/ic/isp_target.h
     11  *	sys/dev/ic/isp_tpublic.h
     12  *	sys/dev/ic/ispmbox.h
     13  *	sys/dev/ic/ispreg.h
     14  *	sys/dev/ic/ispvar.h
     15  *	sys/microcode/isp/asm_sbus.h
     16  *	sys/microcode/isp/asm_1040.h
     17  *	sys/microcode/isp/asm_1080.h
     18  *	sys/microcode/isp/asm_12160.h
     19  *	sys/microcode/isp/asm_2100.h
     20  *	sys/microcode/isp/asm_2200.h
     21  *	sys/pci/isp_pci.c
     22  *	sys/sbus/isp_sbus.c
     23  *
     24  * Is being actively maintained by Matthew Jacob (mjacob (at) NetBSD.org).
     25  * This driver also is shared source with FreeBSD, OpenBSD, Linux, Solaris,
     26  * Linux versions. This tends to be an interesting maintenance problem.
     27  *
     28  * Please coordinate with Matthew Jacob on changes you wish to make here.
     29  */
     30 /*
     31  * Platform (NetBSD) dependent common attachment code for Qlogic adapters.
     32  * Matthew Jacob <mjacob (at) nas.nasa.gov>
     33  */
     34 /*
     35  * Copyright (C) 1997, 1998, 1999 National Aeronautics & Space Administration
     36  * All rights reserved.
     37  *
     38  * Redistribution and use in source and binary forms, with or without
     39  * modification, are permitted provided that the following conditions
     40  * are met:
     41  * 1. Redistributions of source code must retain the above copyright
     42  *    notice, this list of conditions and the following disclaimer.
     43  * 2. Redistributions in binary form must reproduce the above copyright
     44  *    notice, this list of conditions and the following disclaimer in the
     45  *    documentation and/or other materials provided with the distribution.
     46  * 3. The name of the author may not be used to endorse or promote products
     47  *    derived from this software without specific prior written permission
     48  *
     49  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     50  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     51  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     52  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     53  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     54  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     55  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     56  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     57  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     58  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     59  */
     60 
     61 #include <sys/cdefs.h>
     62 __KERNEL_RCSID(0, "$NetBSD: isp_netbsd.c,v 1.72 2007/01/13 19:41:12 cube Exp $");
     63 
     64 #include <dev/ic/isp_netbsd.h>
     65 #include <sys/scsiio.h>
     66 
     67 
     68 /*
     69  * Set a timeout for the watchdogging of a command.
     70  *
     71  * The dimensional analysis is
     72  *
     73  *	milliseconds * (seconds/millisecond) * (ticks/second) = ticks
     74  *
     75  *			=
     76  *
     77  *	(milliseconds / 1000) * hz = ticks
     78  *
     79  *
     80  * For timeouts less than 1 second, we'll get zero. Because of this, and
     81  * because we want to establish *our* timeout to be longer than what the
     82  * firmware might do, we just add 3 seconds at the back end.
     83  */
     84 #define	_XT(xs)	((((xs)->timeout/1000) * hz) + (3 * hz))
     85 
     86 static void isp_config_interrupts(struct device *);
     87 static void ispminphys_1020(struct buf *);
     88 static void ispminphys(struct buf *);
     89 static INLINE void ispcmd(struct ispsoftc *, XS_T *);
     90 static void isprequest(struct scsipi_channel *, scsipi_adapter_req_t, void *);
     91 static int
     92 ispioctl(struct scsipi_channel *, u_long, caddr_t, int, struct proc *);
     93 
     94 static void isp_polled_cmd(struct ispsoftc *, XS_T *);
     95 static void isp_dog(void *);
     96 static void isp_create_fc_worker(void *);
     97 static void isp_fc_worker(void *);
     98 
     99 /*
    100  * Complete attachment of hardware, include subdevices.
    101  */
    102 void
    103 isp_attach(struct ispsoftc *isp)
    104 {
    105 	isp->isp_state = ISP_RUNSTATE;
    106 
    107 	isp->isp_osinfo._adapter.adapt_dev = &isp->isp_osinfo._dev;
    108 	isp->isp_osinfo._adapter.adapt_nchannels = IS_DUALBUS(isp) ? 2 : 1;
    109 	isp->isp_osinfo._adapter.adapt_openings = isp->isp_maxcmds;
    110 	/*
    111 	 * It's not stated whether max_periph is limited by SPI
    112 	 * tag uage, but let's assume that it is.
    113 	 */
    114 	isp->isp_osinfo._adapter.adapt_max_periph = min(isp->isp_maxcmds, 255);
    115 	isp->isp_osinfo._adapter.adapt_ioctl = ispioctl;
    116 	isp->isp_osinfo._adapter.adapt_request = isprequest;
    117 	if (isp->isp_type <= ISP_HA_SCSI_1020A) {
    118 		isp->isp_osinfo._adapter.adapt_minphys = ispminphys_1020;
    119 	} else {
    120 		isp->isp_osinfo._adapter.adapt_minphys = ispminphys;
    121 	}
    122 
    123 	isp->isp_osinfo._chan.chan_adapter = &isp->isp_osinfo._adapter;
    124 	isp->isp_osinfo._chan.chan_bustype = &scsi_bustype;
    125 	isp->isp_osinfo._chan.chan_channel = 0;
    126 
    127 	/*
    128 	 * Until the midlayer is fixed to use REPORT LUNS, limit to 8 luns.
    129 	 */
    130 	isp->isp_osinfo._chan.chan_nluns = min(isp->isp_maxluns, 8);
    131 
    132 	if (IS_FC(isp)) {
    133 #if 0	/* XXX channel "settle" time seems to sidestep some nasty race */
    134         	isp->isp_osinfo._chan.chan_flags = SCSIPI_CHAN_NOSETTLE;
    135 #endif
    136 		isp->isp_osinfo._chan.chan_ntargets = MAX_FC_TARG;
    137 		isp->isp_osinfo._chan.chan_id = MAX_FC_TARG;
    138 		isp->isp_osinfo.threadwork = 1;
    139 		/*
    140 		 * Note that isp_create_fc_worker won't get called
    141 		 * until much much later (after proc0 is created).
    142 		 */
    143 		kthread_create(isp_create_fc_worker, isp);
    144 #ifdef	ISP_FW_CRASH_DUMP
    145 		if (IS_2200(isp)) {
    146 			FCPARAM(isp)->isp_dump_data =
    147 			    malloc(QLA2200_RISC_IMAGE_DUMP_SIZE, M_DEVBUF,
    148 				M_NOWAIT);
    149 		} else if (IS_23XX(isp)) {
    150 			FCPARAM(isp)->isp_dump_data =
    151 			    malloc(QLA2300_RISC_IMAGE_DUMP_SIZE, M_DEVBUF,
    152 				M_NOWAIT);
    153 		}
    154 		if (FCPARAM(isp)->isp_dump_data)
    155 			FCPARAM(isp)->isp_dump_data[0] = 0;
    156 #endif
    157 	} else {
    158 		int bus = 0;
    159 		sdparam *sdp = isp->isp_param;
    160 
    161 		isp->isp_osinfo._chan.chan_ntargets = MAX_TARGETS;
    162 		isp->isp_osinfo._chan.chan_id = sdp->isp_initiator_id;
    163 		isp->isp_osinfo.discovered[0] = 1 << sdp->isp_initiator_id;
    164 		if (IS_DUALBUS(isp)) {
    165 			isp->isp_osinfo._chan_b = isp->isp_osinfo._chan;
    166 			sdp++;
    167 			isp->isp_osinfo.discovered[1] =
    168 			    1 << sdp->isp_initiator_id;
    169 			isp->isp_osinfo._chan_b.chan_id = sdp->isp_initiator_id;
    170 			isp->isp_osinfo._chan_b.chan_channel = 1;
    171 		}
    172 		ISP_LOCK(isp);
    173 		(void) isp_control(isp, ISPCTL_RESET_BUS, &bus);
    174 		if (IS_DUALBUS(isp)) {
    175 			bus++;
    176 			(void) isp_control(isp, ISPCTL_RESET_BUS, &bus);
    177 		}
    178 		ISP_UNLOCK(isp);
    179 	}
    180 
    181 
    182 	/*
    183          * Defer enabling mailbox interrupts until later.
    184          */
    185         config_interrupts((struct device *) isp, isp_config_interrupts);
    186 
    187 	/*
    188 	 * And attach children (if any).
    189 	 */
    190 	config_found((void *)isp, &isp->isp_chanA, scsiprint);
    191 	if (IS_DUALBUS(isp)) {
    192 		config_found((void *)isp, &isp->isp_chanB, scsiprint);
    193 	}
    194 }
    195 
    196 
    197 static void
    198 isp_config_interrupts(struct device *self)
    199 {
    200         struct ispsoftc *isp = (struct ispsoftc *) self;
    201 
    202 	/*
    203 	 * After this point, we'll be doing the new configuration
    204 	 * schema which allows interrupts, so we can do tsleep/wakeup
    205 	 * for mailbox stuff at that point, if that's allowed.
    206 	 */
    207 	if (IS_FC(isp)) {
    208 		isp->isp_osinfo.no_mbox_ints = 0;
    209 	}
    210 }
    211 
    212 
    213 /*
    214  * minphys our xfers
    215  */
    216 
    217 static void
    218 ispminphys_1020(struct buf *bp)
    219 {
    220 	if (bp->b_bcount >= (1 << 24)) {
    221 		bp->b_bcount = (1 << 24);
    222 	}
    223 	minphys(bp);
    224 }
    225 
    226 static void
    227 ispminphys(struct buf *bp)
    228 {
    229 	if (bp->b_bcount >= (1 << 30)) {
    230 		bp->b_bcount = (1 << 30);
    231 	}
    232 	minphys(bp);
    233 }
    234 
    235 static int
    236 ispioctl(struct scsipi_channel *chan, u_long cmd, caddr_t addr,
    237     int flag, struct proc *p)
    238 {
    239 	struct ispsoftc *isp = (void *)chan->chan_adapter->adapt_dev;
    240 	int retval = ENOTTY;
    241 
    242 	switch (cmd) {
    243 #ifdef	ISP_FW_CRASH_DUMP
    244 	case ISP_GET_FW_CRASH_DUMP:
    245 	{
    246 		u_int16_t *ptr = FCPARAM(isp)->isp_dump_data;
    247 		size_t sz;
    248 
    249 		retval = 0;
    250 		if (IS_2200(isp))
    251 			sz = QLA2200_RISC_IMAGE_DUMP_SIZE;
    252 		else
    253 			sz = QLA2300_RISC_IMAGE_DUMP_SIZE;
    254 		ISP_LOCK(isp);
    255 		if (ptr && *ptr) {
    256 			void *uaddr = *((void **) addr);
    257 			if (copyout(ptr, uaddr, sz)) {
    258 				retval = EFAULT;
    259 			} else {
    260 				*ptr = 0;
    261 			}
    262 		} else {
    263 			retval = ENXIO;
    264 		}
    265 		ISP_UNLOCK(isp);
    266 		break;
    267 	}
    268 
    269 	case ISP_FORCE_CRASH_DUMP:
    270 		ISP_LOCK(isp);
    271 		if (isp->isp_osinfo.blocked == 0) {
    272                         isp->isp_osinfo.blocked = 1;
    273                         scsipi_channel_freeze(&isp->isp_chanA, 1);
    274                 }
    275 		isp_fw_dump(isp);
    276 		isp_reinit(isp);
    277 		ISP_UNLOCK(isp);
    278 		retval = 0;
    279 		break;
    280 #endif
    281 	case ISP_SDBLEV:
    282 	{
    283 		int olddblev = isp->isp_dblev;
    284 		isp->isp_dblev = *(int *)addr;
    285 		*(int *)addr = olddblev;
    286 		retval = 0;
    287 		break;
    288 	}
    289 	case ISP_RESETHBA:
    290 		ISP_LOCK(isp);
    291 		isp_reinit(isp);
    292 		ISP_UNLOCK(isp);
    293 		retval = 0;
    294 		break;
    295 	case ISP_RESCAN:
    296 		if (IS_FC(isp)) {
    297 			ISP_LOCK(isp);
    298 			if (isp_fc_runstate(isp, 5 * 1000000)) {
    299 				retval = EIO;
    300 			} else {
    301 				retval = 0;
    302 			}
    303 			ISP_UNLOCK(isp);
    304 		}
    305 		break;
    306 	case ISP_FC_LIP:
    307 		if (IS_FC(isp)) {
    308 			ISP_LOCK(isp);
    309 			if (isp_control(isp, ISPCTL_SEND_LIP, 0)) {
    310 				retval = EIO;
    311 			} else {
    312 				retval = 0;
    313 			}
    314 			ISP_UNLOCK(isp);
    315 		}
    316 		break;
    317 	case ISP_FC_GETDINFO:
    318 	{
    319 		struct isp_fc_device *ifc = (struct isp_fc_device *) addr;
    320 		struct lportdb *lp;
    321 
    322 		if (/* ifc->loopid < 0 || */ ifc->loopid >= MAX_FC_TARG) {
    323 			retval = EINVAL;
    324 			break;
    325 		}
    326 		ISP_LOCK(isp);
    327 		lp = &FCPARAM(isp)->portdb[ifc->loopid];
    328 		if (lp->valid) {
    329 			ifc->loopid = lp->loopid;
    330 			ifc->portid = lp->portid;
    331 			ifc->node_wwn = lp->node_wwn;
    332 			ifc->port_wwn = lp->port_wwn;
    333 			retval = 0;
    334 		} else {
    335 			retval = ENODEV;
    336 		}
    337 		ISP_UNLOCK(isp);
    338 		break;
    339 	}
    340 	case ISP_GET_STATS:
    341 	{
    342 		isp_stats_t *sp = (isp_stats_t *) addr;
    343 
    344 		MEMZERO(sp, sizeof (*sp));
    345 		sp->isp_stat_version = ISP_STATS_VERSION;
    346 		sp->isp_type = isp->isp_type;
    347 		sp->isp_revision = isp->isp_revision;
    348 		ISP_LOCK(isp);
    349 		sp->isp_stats[ISP_INTCNT] = isp->isp_intcnt;
    350 		sp->isp_stats[ISP_INTBOGUS] = isp->isp_intbogus;
    351 		sp->isp_stats[ISP_INTMBOXC] = isp->isp_intmboxc;
    352 		sp->isp_stats[ISP_INGOASYNC] = isp->isp_intoasync;
    353 		sp->isp_stats[ISP_RSLTCCMPLT] = isp->isp_rsltccmplt;
    354 		sp->isp_stats[ISP_FPHCCMCPLT] = isp->isp_fphccmplt;
    355 		sp->isp_stats[ISP_RSCCHIWAT] = isp->isp_rscchiwater;
    356 		sp->isp_stats[ISP_FPCCHIWAT] = isp->isp_fpcchiwater;
    357 		ISP_UNLOCK(isp);
    358 		retval = 0;
    359 		break;
    360 	}
    361 	case ISP_CLR_STATS:
    362 		ISP_LOCK(isp);
    363 		isp->isp_intcnt = 0;
    364 		isp->isp_intbogus = 0;
    365 		isp->isp_intmboxc = 0;
    366 		isp->isp_intoasync = 0;
    367 		isp->isp_rsltccmplt = 0;
    368 		isp->isp_fphccmplt = 0;
    369 		isp->isp_rscchiwater = 0;
    370 		isp->isp_fpcchiwater = 0;
    371 		ISP_UNLOCK(isp);
    372 		retval = 0;
    373 		break;
    374 	case ISP_FC_GETHINFO:
    375 	{
    376 		struct isp_hba_device *hba = (struct isp_hba_device *) addr;
    377 		MEMZERO(hba, sizeof (*hba));
    378 		ISP_LOCK(isp);
    379 		hba->fc_speed = FCPARAM(isp)->isp_gbspeed;
    380 		hba->fc_scsi_supported = 1;
    381 		hba->fc_topology = FCPARAM(isp)->isp_topo + 1;
    382 		hba->fc_loopid = FCPARAM(isp)->isp_loopid;
    383 		hba->nvram_node_wwn = FCPARAM(isp)->isp_nodewwn;
    384 		hba->nvram_port_wwn = FCPARAM(isp)->isp_portwwn;
    385 		hba->active_node_wwn = ISP_NODEWWN(isp);
    386 		hba->active_port_wwn = ISP_PORTWWN(isp);
    387 		ISP_UNLOCK(isp);
    388 		break;
    389 	}
    390 	case SCBUSIORESET:
    391 		ISP_LOCK(isp);
    392 		if (isp_control(isp, ISPCTL_RESET_BUS, &chan->chan_channel))
    393 			retval = EIO;
    394 		else
    395 			retval = 0;
    396 		ISP_UNLOCK(isp);
    397 		break;
    398 	default:
    399 		break;
    400 	}
    401 	return (retval);
    402 }
    403 
    404 static INLINE void
    405 ispcmd(struct ispsoftc *isp, XS_T *xs)
    406 {
    407 	ISP_LOCK(isp);
    408 	if (isp->isp_state < ISP_RUNSTATE) {
    409 		DISABLE_INTS(isp);
    410 		isp_init(isp);
    411 		if (isp->isp_state != ISP_INITSTATE) {
    412 			ENABLE_INTS(isp);
    413 			ISP_UNLOCK(isp);
    414 			isp_prt(isp, ISP_LOGERR, "isp not at init state");
    415 			XS_SETERR(xs, HBA_BOTCH);
    416 			scsipi_done(xs);
    417 			return;
    418 		}
    419 		isp->isp_state = ISP_RUNSTATE;
    420 		ENABLE_INTS(isp);
    421 	}
    422 	/*
    423 	 * Handle the case of a FC card where the FC thread hasn't
    424 	 * fired up yet and we have loop state to clean up. If we
    425 	 * can't clear things up and we've never seen loop up, bounce
    426 	 * the command.
    427 	 */
    428 	if (IS_FC(isp) && isp->isp_osinfo.threadwork &&
    429 	    isp->isp_osinfo.thread == 0) {
    430 		volatile u_int8_t ombi = isp->isp_osinfo.no_mbox_ints;
    431 		int delay_time;
    432 
    433 		if (xs->xs_control & XS_CTL_POLL) {
    434 			isp->isp_osinfo.no_mbox_ints = 1;
    435 		}
    436 
    437 		if (isp->isp_osinfo.loop_checked == 0) {
    438 			delay_time = 10 * 1000000;
    439 			isp->isp_osinfo.loop_checked = 1;
    440 		} else {
    441 			delay_time = 250000;
    442 		}
    443 
    444 		if (isp_fc_runstate(isp, delay_time) != 0) {
    445 			if (xs->xs_control & XS_CTL_POLL) {
    446 				isp->isp_osinfo.no_mbox_ints = ombi;
    447 			}
    448 			if (FCPARAM(isp)->loop_seen_once == 0) {
    449 				XS_SETERR(xs, HBA_SELTIMEOUT);
    450 				scsipi_done(xs);
    451 				ISP_UNLOCK(isp);
    452 				return;
    453 			}
    454 			/*
    455 			 * Otherwise, fall thru to be queued up for later.
    456 			 */
    457 		} else {
    458 			int wasblocked =
    459 			    (isp->isp_osinfo.blocked || isp->isp_osinfo.paused);
    460 			isp->isp_osinfo.threadwork = 0;
    461 			isp->isp_osinfo.blocked =
    462 			    isp->isp_osinfo.paused = 0;
    463 			if (wasblocked) {
    464 				scsipi_channel_thaw(&isp->isp_chanA, 1);
    465 			}
    466 		}
    467 		if (xs->xs_control & XS_CTL_POLL) {
    468 			isp->isp_osinfo.no_mbox_ints = ombi;
    469 		}
    470 	}
    471 
    472 	if (isp->isp_osinfo.paused) {
    473 		isp_prt(isp, ISP_LOGWARN, "I/O while paused");
    474 		xs->error = XS_RESOURCE_SHORTAGE;
    475 		scsipi_done(xs);
    476 		ISP_UNLOCK(isp);
    477 		return;
    478 	}
    479 	if (isp->isp_osinfo.blocked) {
    480 		isp_prt(isp, ISP_LOGWARN, "I/O while blocked");
    481 		xs->error = XS_REQUEUE;
    482 		scsipi_done(xs);
    483 		ISP_UNLOCK(isp);
    484 		return;
    485 	}
    486 
    487 	if (xs->xs_control & XS_CTL_POLL) {
    488 		volatile u_int8_t ombi = isp->isp_osinfo.no_mbox_ints;
    489 		isp->isp_osinfo.no_mbox_ints = 1;
    490 		isp_polled_cmd(isp, xs);
    491 		isp->isp_osinfo.no_mbox_ints = ombi;
    492 		ISP_UNLOCK(isp);
    493 		return;
    494 	}
    495 
    496 	switch (isp_start(xs)) {
    497 	case CMD_QUEUED:
    498 		if (xs->timeout) {
    499 			callout_reset(&xs->xs_callout, _XT(xs), isp_dog, xs);
    500 		}
    501 		break;
    502 	case CMD_EAGAIN:
    503 		isp->isp_osinfo.paused = 1;
    504 		xs->error = XS_RESOURCE_SHORTAGE;
    505 		scsipi_channel_freeze(&isp->isp_chanA, 1);
    506 		if (IS_DUALBUS(isp)) {
    507 			scsipi_channel_freeze(&isp->isp_chanB, 1);
    508 		}
    509 		scsipi_done(xs);
    510 		break;
    511 	case CMD_RQLATER:
    512 		/*
    513 		 * We can only get RQLATER from FC devices (1 channel only)
    514 		 *
    515 		 * Also, if we've never seen loop up, bounce the command
    516 		 * (somebody has booted with no FC cable connected)
    517 		 */
    518 		if (FCPARAM(isp)->loop_seen_once == 0) {
    519 			XS_SETERR(xs, HBA_SELTIMEOUT);
    520 			scsipi_done(xs);
    521 			break;
    522 		}
    523 		if (isp->isp_osinfo.blocked == 0) {
    524 			isp->isp_osinfo.blocked = 1;
    525 			scsipi_channel_freeze(&isp->isp_chanA, 1);
    526 		}
    527 		xs->error = XS_REQUEUE;
    528 		scsipi_done(xs);
    529 		break;
    530 	case CMD_COMPLETE:
    531 		scsipi_done(xs);
    532 		break;
    533 	}
    534 	ISP_UNLOCK(isp);
    535 }
    536 
    537 static void
    538 isprequest(struct scsipi_channel *chan, scsipi_adapter_req_t req, void *arg)
    539 {
    540 	struct ispsoftc *isp = (void *)chan->chan_adapter->adapt_dev;
    541 
    542 	switch (req) {
    543 	case ADAPTER_REQ_RUN_XFER:
    544 		ispcmd(isp, (XS_T *) arg);
    545 		break;
    546 
    547 	case ADAPTER_REQ_GROW_RESOURCES:
    548 		/* Not supported. */
    549 		break;
    550 
    551 	case ADAPTER_REQ_SET_XFER_MODE:
    552 	if (IS_SCSI(isp)) {
    553 		struct scsipi_xfer_mode *xm = arg;
    554 		int dflags = 0;
    555 		sdparam *sdp = SDPARAM(isp);
    556 
    557 		sdp += chan->chan_channel;
    558 		if (xm->xm_mode & PERIPH_CAP_TQING)
    559 			dflags |= DPARM_TQING;
    560 		if (xm->xm_mode & PERIPH_CAP_WIDE16)
    561 			dflags |= DPARM_WIDE;
    562 		if (xm->xm_mode & PERIPH_CAP_SYNC)
    563 			dflags |= DPARM_SYNC;
    564 		ISP_LOCK(isp);
    565 		sdp->isp_devparam[xm->xm_target].goal_flags |= dflags;
    566 		dflags = sdp->isp_devparam[xm->xm_target].goal_flags;
    567 		sdp->isp_devparam[xm->xm_target].dev_update = 1;
    568 		isp->isp_update |= (1 << chan->chan_channel);
    569 		ISP_UNLOCK(isp);
    570 		isp_prt(isp, ISP_LOGDEBUG1,
    571 		    "ispioctl: device flags 0x%x for %d.%d.X",
    572 		    dflags, chan->chan_channel, xm->xm_target);
    573 		break;
    574 	}
    575 	default:
    576 		break;
    577 	}
    578 }
    579 
    580 static void
    581 isp_polled_cmd(struct ispsoftc *isp, XS_T *xs)
    582 {
    583 	int result;
    584 	int infinite = 0, mswait;
    585 
    586 	result = isp_start(xs);
    587 
    588 	switch (result) {
    589 	case CMD_QUEUED:
    590 		break;
    591 	case CMD_RQLATER:
    592 		if (XS_NOERR(xs)) {
    593 			xs->error = XS_REQUEUE;
    594 		}
    595 	case CMD_EAGAIN:
    596 		if (XS_NOERR(xs)) {
    597 			xs->error = XS_RESOURCE_SHORTAGE;
    598 		}
    599 		/* FALLTHROUGH */
    600 	case CMD_COMPLETE:
    601 		scsipi_done(xs);
    602 		return;
    603 
    604 	}
    605 
    606 	/*
    607 	 * If we can't use interrupts, poll on completion.
    608 	 */
    609 	if ((mswait = XS_TIME(xs)) == 0)
    610 		infinite = 1;
    611 
    612 	while (mswait || infinite) {
    613 		u_int16_t isr, sema, mbox;
    614 		if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
    615 			isp_intr(isp, isr, sema, mbox);
    616 			if (XS_CMD_DONE_P(xs)) {
    617 				break;
    618 			}
    619 		}
    620 		USEC_DELAY(1000);
    621 		mswait -= 1;
    622 	}
    623 
    624 	/*
    625 	 * If no other error occurred but we didn't finish,
    626 	 * something bad happened.
    627 	 */
    628 	if (XS_CMD_DONE_P(xs) == 0) {
    629 		if (isp_control(isp, ISPCTL_ABORT_CMD, xs)) {
    630 			isp_reinit(isp);
    631 		}
    632 		if (XS_NOERR(xs)) {
    633 			isp_prt(isp, ISP_LOGERR, "polled command timed out");
    634 			XS_SETERR(xs, HBA_BOTCH);
    635 		}
    636 	}
    637 	scsipi_done(xs);
    638 }
    639 
    640 void
    641 isp_done(XS_T *xs)
    642 {
    643 	if (XS_CMD_WDOG_P(xs) == 0) {
    644 		struct ispsoftc *isp = XS_ISP(xs);
    645 		callout_stop(&xs->xs_callout);
    646 		if (XS_CMD_GRACE_P(xs)) {
    647 			isp_prt(isp, ISP_LOGDEBUG1,
    648 			    "finished command on borrowed time");
    649 		}
    650 		XS_CMD_S_CLEAR(xs);
    651 		/*
    652 		 * Fixup- if we get a QFULL, we need
    653 		 * to set XS_BUSY as the error.
    654 		 */
    655 		if (xs->status == SCSI_QUEUE_FULL) {
    656 			xs->error = XS_BUSY;
    657 		}
    658 		if (isp->isp_osinfo.paused) {
    659 			isp->isp_osinfo.paused = 0;
    660 			scsipi_channel_timed_thaw(&isp->isp_chanA);
    661 			if (IS_DUALBUS(isp)) {
    662 				scsipi_channel_timed_thaw(&isp->isp_chanB);
    663 			}
    664 		}
    665 if (xs->error == XS_DRIVER_STUFFUP) {
    666 isp_prt(isp, ISP_LOGERR, "BOTCHED cmd for %d.%d.%d cmd 0x%x datalen %ld",
    667 XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs), XS_CDBP(xs)[0], (long) XS_XFRLEN(xs));
    668 }
    669 		scsipi_done(xs);
    670 	}
    671 }
    672 
    673 static void
    674 isp_dog(void *arg)
    675 {
    676 	XS_T *xs = arg;
    677 	struct ispsoftc *isp = XS_ISP(xs);
    678 	u_int16_t handle;
    679 
    680 	ISP_ILOCK(isp);
    681 	/*
    682 	 * We've decided this command is dead. Make sure we're not trying
    683 	 * to kill a command that's already dead by getting it's handle and
    684 	 * and seeing whether it's still alive.
    685 	 */
    686 	handle = isp_find_handle(isp, xs);
    687 	if (handle) {
    688 		u_int16_t isr, mbox, sema;
    689 
    690 		if (XS_CMD_DONE_P(xs)) {
    691 			isp_prt(isp, ISP_LOGDEBUG1,
    692 			    "watchdog found done cmd (handle 0x%x)", handle);
    693 			ISP_IUNLOCK(isp);
    694 			return;
    695 		}
    696 
    697 		if (XS_CMD_WDOG_P(xs)) {
    698 			isp_prt(isp, ISP_LOGDEBUG1,
    699 			    "recursive watchdog (handle 0x%x)", handle);
    700 			ISP_IUNLOCK(isp);
    701 			return;
    702 		}
    703 
    704 		XS_CMD_S_WDOG(xs);
    705 
    706 		if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
    707 			isp_intr(isp, isr, sema, mbox);
    708 
    709 		}
    710 		if (XS_CMD_DONE_P(xs)) {
    711 			isp_prt(isp, ISP_LOGDEBUG1,
    712 			    "watchdog cleanup for handle 0x%x", handle);
    713 			XS_CMD_C_WDOG(xs);
    714 			isp_done(xs);
    715 		} else if (XS_CMD_GRACE_P(xs)) {
    716 			isp_prt(isp, ISP_LOGDEBUG1,
    717 			    "watchdog timeout for handle 0x%x", handle);
    718 			/*
    719 			 * Make sure the command is *really* dead before we
    720 			 * release the handle (and DMA resources) for reuse.
    721 			 */
    722 			(void) isp_control(isp, ISPCTL_ABORT_CMD, arg);
    723 
    724 			/*
    725 			 * After this point, the command is really dead.
    726 			 */
    727 			if (XS_XFRLEN(xs)) {
    728 				ISP_DMAFREE(isp, xs, handle);
    729 			}
    730 			isp_destroy_handle(isp, handle);
    731 			XS_SETERR(xs, XS_TIMEOUT);
    732 			XS_CMD_S_CLEAR(xs);
    733 			isp_done(xs);
    734 		} else {
    735 			u_int16_t nxti, optr;
    736 			ispreq_t local, *mp = &local, *qe;
    737 			isp_prt(isp, ISP_LOGDEBUG2,
    738 			    "possible command timeout on handle %x", handle);
    739 			XS_CMD_C_WDOG(xs);
    740 			callout_reset(&xs->xs_callout, hz, isp_dog, xs);
    741 			if (isp_getrqentry(isp, &nxti, &optr, (void *) &qe)) {
    742 				ISP_UNLOCK(isp);
    743 				return;
    744 			}
    745 			XS_CMD_S_GRACE(xs);
    746 			MEMZERO((void *) mp, sizeof (*mp));
    747 			mp->req_header.rqs_entry_count = 1;
    748 			mp->req_header.rqs_entry_type = RQSTYPE_MARKER;
    749 			mp->req_modifier = SYNC_ALL;
    750 			mp->req_target = XS_CHANNEL(xs) << 7;
    751 			isp_put_request(isp, mp, qe);
    752 			ISP_ADD_REQUEST(isp, nxti);
    753 		}
    754 	} else {
    755 		isp_prt(isp, ISP_LOGDEBUG0, "watchdog with no command");
    756 	}
    757 	ISP_IUNLOCK(isp);
    758 }
    759 
    760 /*
    761  * Fibre Channel state cleanup thread
    762  */
    763 static void
    764 isp_create_fc_worker(void *arg)
    765 {
    766 	struct ispsoftc *isp = arg;
    767 
    768 	if (kthread_create1(isp_fc_worker, isp, &isp->isp_osinfo.thread,
    769 	    "%s:fc_thrd", isp->isp_name)) {
    770 		isp_prt(isp, ISP_LOGERR, "unable to create FC worker thread");
    771 		panic("isp_create_fc_worker");
    772 	}
    773 
    774 }
    775 
    776 static void
    777 isp_fc_worker(void *arg)
    778 {
    779 	void scsipi_run_queue(struct scsipi_channel *);
    780 	struct ispsoftc *isp = arg;
    781 
    782 	for (;;) {
    783 		int s;
    784 
    785 		/*
    786 		 * Note we do *not* use the ISP_LOCK/ISP_UNLOCK macros here.
    787 		 */
    788 		s = splbio();
    789 		while (isp->isp_osinfo.threadwork) {
    790 			isp->isp_osinfo.threadwork = 0;
    791 			if (isp_fc_runstate(isp, 250000) == 0) {
    792 				break;
    793 			}
    794 			if  (isp->isp_osinfo.loop_checked &&
    795 			     FCPARAM(isp)->loop_seen_once == 0) {
    796 				splx(s);
    797 				goto skip;
    798 			}
    799 			isp->isp_osinfo.loop_checked = 1;
    800 			isp->isp_osinfo.threadwork = 1;
    801 			splx(s);
    802 			delay(500 * 1000);
    803 			s = splbio();
    804 		}
    805 		if (FCPARAM(isp)->isp_fwstate != FW_READY ||
    806 		    FCPARAM(isp)->isp_loopstate != LOOP_READY) {
    807 			isp_prt(isp, ISP_LOGINFO, "isp_fc_runstate in vain");
    808 			isp->isp_osinfo.threadwork = 1;
    809 			splx(s);
    810 			continue;
    811 		}
    812 
    813 		if (isp->isp_osinfo.blocked) {
    814 			isp->isp_osinfo.blocked = 0;
    815 			isp_prt(isp, ISP_LOGDEBUG0,
    816 			    "restarting queues (freeze count %d)",
    817 			    isp->isp_chanA.chan_qfreeze);
    818 			scsipi_channel_thaw(&isp->isp_chanA, 1);
    819 		}
    820 
    821 		if (isp->isp_osinfo.thread == NULL)
    822 			break;
    823 
    824 skip:
    825 		(void) tsleep(&isp->isp_osinfo.thread, PRIBIO, "fcclnup", 0);
    826 
    827 		splx(s);
    828 	}
    829 
    830 	/* In case parent is waiting for us to exit. */
    831 	wakeup(&isp->isp_osinfo.thread);
    832 
    833 	kthread_exit(0);
    834 }
    835 
    836 /*
    837  * Free any associated resources prior to decommissioning and
    838  * set the card to a known state (so it doesn't wake up and kick
    839  * us when we aren't expecting it to).
    840  *
    841  * Locks are held before coming here.
    842  */
    843 void
    844 isp_uninit(struct ispsoftc *isp)
    845 {
    846 	isp_lock(isp);
    847 	/*
    848 	 * Leave with interrupts disabled.
    849 	 */
    850 	DISABLE_INTS(isp);
    851 	isp_unlock(isp);
    852 }
    853 
    854 int
    855 isp_async(struct ispsoftc *isp, ispasync_t cmd, void *arg)
    856 {
    857 	int bus, tgt;
    858 
    859 	switch (cmd) {
    860 	case ISPASYNC_NEW_TGT_PARAMS:
    861 	if (IS_SCSI(isp) && isp->isp_dblev) {
    862 		sdparam *sdp = isp->isp_param;
    863 		int flags;
    864 		struct scsipi_xfer_mode xm;
    865 
    866 		tgt = *((int *) arg);
    867 		bus = (tgt >> 16) & 0xffff;
    868 		tgt &= 0xffff;
    869 		sdp += bus;
    870 		flags = sdp->isp_devparam[tgt].actv_flags;
    871 
    872 		xm.xm_mode = 0;
    873 		xm.xm_period = sdp->isp_devparam[tgt].actv_period;
    874 		xm.xm_offset = sdp->isp_devparam[tgt].actv_offset;
    875 		xm.xm_target = tgt;
    876 
    877 		if ((flags & DPARM_SYNC) && xm.xm_period && xm.xm_offset)
    878 			xm.xm_mode |= PERIPH_CAP_SYNC;
    879 		if (flags & DPARM_WIDE)
    880 			xm.xm_mode |= PERIPH_CAP_WIDE16;
    881 		if (flags & DPARM_TQING)
    882 			xm.xm_mode |= PERIPH_CAP_TQING;
    883 		scsipi_async_event(bus? &isp->isp_chanB : &isp->isp_chanA,
    884 		    ASYNC_EVENT_XFER_MODE, &xm);
    885 		break;
    886 	}
    887 	case ISPASYNC_BUS_RESET:
    888 		bus = *((int *) arg);
    889 		scsipi_async_event(bus? &isp->isp_chanB : &isp->isp_chanA,
    890 		    ASYNC_EVENT_RESET, NULL);
    891 		isp_prt(isp, ISP_LOGINFO, "SCSI bus %d reset detected", bus);
    892 		break;
    893 	case ISPASYNC_LIP:
    894 		/*
    895 		 * Don't do queue freezes or blockage until we have the
    896 		 * thread running that can unfreeze/unblock us.
    897 		 */
    898 		if (isp->isp_osinfo.blocked == 0)  {
    899 			if (isp->isp_osinfo.thread) {
    900 				isp->isp_osinfo.blocked = 1;
    901 				scsipi_channel_freeze(&isp->isp_chanA, 1);
    902 			}
    903 		}
    904 		isp_prt(isp, ISP_LOGINFO, "LIP Received");
    905 		break;
    906 	case ISPASYNC_LOOP_RESET:
    907 		/*
    908 		 * Don't do queue freezes or blockage until we have the
    909 		 * thread running that can unfreeze/unblock us.
    910 		 */
    911 		if (isp->isp_osinfo.blocked == 0) {
    912 			if (isp->isp_osinfo.thread) {
    913 				isp->isp_osinfo.blocked = 1;
    914 				scsipi_channel_freeze(&isp->isp_chanA, 1);
    915 			}
    916 		}
    917 		isp_prt(isp, ISP_LOGINFO, "Loop Reset Received");
    918 		break;
    919 	case ISPASYNC_LOOP_DOWN:
    920 		/*
    921 		 * Don't do queue freezes or blockage until we have the
    922 		 * thread running that can unfreeze/unblock us.
    923 		 */
    924 		if (isp->isp_osinfo.blocked == 0) {
    925 			if (isp->isp_osinfo.thread) {
    926 				isp->isp_osinfo.blocked = 1;
    927 				scsipi_channel_freeze(&isp->isp_chanA, 1);
    928 			}
    929 		}
    930 		isp_prt(isp, ISP_LOGINFO, "Loop DOWN");
    931 		break;
    932         case ISPASYNC_LOOP_UP:
    933 		/*
    934 		 * Let the subsequent ISPASYNC_CHANGE_NOTIFY invoke
    935 		 * the FC worker thread. When the FC worker thread
    936 		 * is done, let *it* call scsipi_channel_thaw...
    937 		 */
    938 		isp_prt(isp, ISP_LOGINFO, "Loop UP");
    939 		break;
    940 	case ISPASYNC_PROMENADE:
    941 	if (IS_FC(isp) && isp->isp_dblev) {
    942 		static const char fmt[] = "Target %d (Loop 0x%x) Port ID 0x%x "
    943 		    "(role %s) %s\n Port WWN 0x%08x%08x\n Node WWN 0x%08x%08x";
    944 		static const char *const roles[4] = {
    945 		    "None", "Target", "Initiator", "Target/Initiator"
    946 		};
    947 		fcparam *fcp = isp->isp_param;
    948 		int tgt1 = *((int *) arg);
    949 		struct lportdb *lp = &fcp->portdb[tgt1];
    950 
    951 		isp_prt(isp, ISP_LOGINFO, fmt, tgt1, lp->loopid, lp->portid,
    952 		    roles[lp->roles & 0x3],
    953 		    (lp->valid)? "Arrived" : "Departed",
    954 		    (u_int32_t) (lp->port_wwn >> 32),
    955 		    (u_int32_t) (lp->port_wwn & 0xffffffffLL),
    956 		    (u_int32_t) (lp->node_wwn >> 32),
    957 		    (u_int32_t) (lp->node_wwn & 0xffffffffLL));
    958 		break;
    959 	}
    960 	case ISPASYNC_CHANGE_NOTIFY:
    961 		if (arg == ISPASYNC_CHANGE_PDB) {
    962 			isp_prt(isp, ISP_LOGINFO, "Port Database Changed");
    963 		} else if (arg == ISPASYNC_CHANGE_SNS) {
    964 			isp_prt(isp, ISP_LOGINFO,
    965 			    "Name Server Database Changed");
    966 		}
    967 
    968 		/*
    969 		 * We can set blocked here because we know it's now okay
    970 		 * to try and run isp_fc_runstate (in order to build loop
    971 		 * state). But we don't try and freeze the midlayer's queue
    972 		 * if we have no thread that we can wake to later unfreeze
    973 		 * it.
    974 		 */
    975 		if (isp->isp_osinfo.blocked == 0) {
    976 			isp->isp_osinfo.blocked = 1;
    977 			if (isp->isp_osinfo.thread) {
    978 				scsipi_channel_freeze(&isp->isp_chanA, 1);
    979 			}
    980 		}
    981 		/*
    982 		 * Note that we have work for the thread to do, and
    983 		 * if the thread is here already, wake it up.
    984 		 */
    985 		isp->isp_osinfo.threadwork++;
    986 		if (isp->isp_osinfo.thread) {
    987 			wakeup(&isp->isp_osinfo.thread);
    988 		} else {
    989 			isp_prt(isp, ISP_LOGDEBUG1, "no FC thread yet");
    990 		}
    991 		break;
    992 	case ISPASYNC_FABRIC_DEV:
    993 	{
    994 		int target, base, lim;
    995 		fcparam *fcp = isp->isp_param;
    996 		struct lportdb *lp = NULL;
    997 		struct lportdb *clp = (struct lportdb *) arg;
    998 		const char *pt;
    999 
   1000 		switch (clp->port_type) {
   1001 		case 1:
   1002 			pt = "   N_Port";
   1003 			break;
   1004 		case 2:
   1005 			pt = "  NL_Port";
   1006 			break;
   1007 		case 3:
   1008 			pt = "F/NL_Port";
   1009 			break;
   1010 		case 0x7f:
   1011 			pt = "  Nx_Port";
   1012 			break;
   1013 		case 0x81:
   1014 			pt = "  F_port";
   1015 			break;
   1016 		case 0x82:
   1017 			pt = "  FL_Port";
   1018 			break;
   1019 		case 0x84:
   1020 			pt = "   E_port";
   1021 			break;
   1022 		default:
   1023 			pt = " ";
   1024 			break;
   1025 		}
   1026 
   1027 		isp_prt(isp, ISP_LOGINFO,
   1028 		    "%s Fabric Device @ PortID 0x%x", pt, clp->portid);
   1029 
   1030 		/*
   1031 		 * If we don't have an initiator role we bail.
   1032 		 *
   1033 		 * We just use ISPASYNC_FABRIC_DEV for announcement purposes.
   1034 		 */
   1035 
   1036 		if ((isp->isp_role & ISP_ROLE_INITIATOR) == 0) {
   1037 			break;
   1038 		}
   1039 
   1040 		/*
   1041 		 * Is this entry for us? If so, we bail.
   1042 		 */
   1043 
   1044 		if (fcp->isp_portid == clp->portid) {
   1045 			break;
   1046 		}
   1047 
   1048 		/*
   1049 		 * Else, the default policy is to find room for it in
   1050 		 * our local port database. Later, when we execute
   1051 		 * the call to isp_pdb_sync either this newly arrived
   1052 		 * or already logged in device will be (re)announced.
   1053 		 */
   1054 
   1055 		if (fcp->isp_topo == TOPO_FL_PORT)
   1056 			base = FC_SNS_ID+1;
   1057 		else
   1058 			base = 0;
   1059 
   1060 		if (fcp->isp_topo == TOPO_N_PORT)
   1061 			lim = 1;
   1062 		else
   1063 			lim = MAX_FC_TARG;
   1064 
   1065 		/*
   1066 		 * Is it already in our list?
   1067 		 */
   1068 		for (target = base; target < lim; target++) {
   1069 			if (target >= FL_PORT_ID && target <= FC_SNS_ID) {
   1070 				continue;
   1071 			}
   1072 			lp = &fcp->portdb[target];
   1073 			if (lp->port_wwn == clp->port_wwn &&
   1074 			    lp->node_wwn == clp->node_wwn) {
   1075 				lp->fabric_dev = 1;
   1076 				break;
   1077 			}
   1078 		}
   1079 		if (target < lim) {
   1080 			break;
   1081 		}
   1082 		for (target = base; target < lim; target++) {
   1083 			if (target >= FL_PORT_ID && target <= FC_SNS_ID) {
   1084 				continue;
   1085 			}
   1086 			lp = &fcp->portdb[target];
   1087 			if (lp->port_wwn == 0) {
   1088 				break;
   1089 			}
   1090 		}
   1091 		if (target == lim) {
   1092 			isp_prt(isp, ISP_LOGWARN,
   1093 			    "out of space for fabric devices");
   1094 			break;
   1095 		}
   1096 		lp->port_type = clp->port_type;
   1097 		lp->fc4_type = clp->fc4_type;
   1098 		lp->node_wwn = clp->node_wwn;
   1099 		lp->port_wwn = clp->port_wwn;
   1100 		lp->portid = clp->portid;
   1101 		lp->fabric_dev = 1;
   1102 		break;
   1103 	}
   1104 	case ISPASYNC_FW_CRASH:
   1105 	{
   1106 		u_int16_t mbox1, mbox6;
   1107 		mbox1 = ISP_READ(isp, OUTMAILBOX1);
   1108 		if (IS_DUALBUS(isp)) {
   1109 			mbox6 = ISP_READ(isp, OUTMAILBOX6);
   1110 		} else {
   1111 			mbox6 = 0;
   1112 		}
   1113                 isp_prt(isp, ISP_LOGERR,
   1114                     "Internal Firmware Error on bus %d @ RISC Address 0x%x",
   1115                     mbox6, mbox1);
   1116 #ifdef	ISP_FW_CRASH_DUMP
   1117 		if (IS_FC(isp)) {
   1118 			if (isp->isp_osinfo.blocked == 0) {
   1119 				isp->isp_osinfo.blocked = 1;
   1120 				scsipi_channel_freeze(&isp->isp_chanA, 1);
   1121 			}
   1122 			isp_fw_dump(isp);
   1123 		}
   1124 		isp_reinit(isp);
   1125 		isp_async(isp, ISPASYNC_FW_RESTARTED, NULL);
   1126 #endif
   1127 		break;
   1128 	}
   1129 	default:
   1130 		break;
   1131 	}
   1132 	return (0);
   1133 }
   1134 
   1135 #include <machine/stdarg.h>
   1136 void
   1137 isp_prt(struct ispsoftc *isp, int level, const char *fmt, ...)
   1138 {
   1139 	va_list ap;
   1140 	if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) {
   1141 		return;
   1142 	}
   1143 	printf("%s: ", isp->isp_name);
   1144 	va_start(ap, fmt);
   1145 	vprintf(fmt, ap);
   1146 	va_end(ap);
   1147 	printf("\n");
   1148 }
   1149