isp_netbsd.c revision 1.73.2.5 1 /* $NetBSD: isp_netbsd.c,v 1.73.2.5 2007/06/09 23:57:50 ad Exp $ */
2 /*
3 * Platform (NetBSD) dependent common attachment code for Qlogic adapters.
4 */
5 /*
6 * Copyright (C) 1997, 1998, 1999 National Aeronautics & Space Administration
7 * All rights reserved.
8 *
9 * Additional Copyright (C) 2000-2007 by Matthew Jacob
10 * All rights reserved.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. The name of the author may not be used to endorse or promote products
21 * derived from this software without specific prior written permission
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: isp_netbsd.c,v 1.73.2.5 2007/06/09 23:57:50 ad Exp $");
37
38 #include <dev/ic/isp_netbsd.h>
39 #include <dev/ic/isp_ioctl.h>
40 #include <sys/scsiio.h>
41
42
43 /*
44 * Set a timeout for the watchdogging of a command.
45 *
46 * The dimensional analysis is
47 *
48 * milliseconds * (seconds/millisecond) * (ticks/second) = ticks
49 *
50 * =
51 *
52 * (milliseconds / 1000) * hz = ticks
53 *
54 *
55 * For timeouts less than 1 second, we'll get zero. Because of this, and
56 * because we want to establish *our* timeout to be longer than what the
57 * firmware might do, we just add 3 seconds at the back end.
58 */
59 #define _XT(xs) ((((xs)->timeout/1000) * hz) + (3 * hz))
60
61 static void isp_config_interrupts(struct device *);
62 static void ispminphys_1020(struct buf *);
63 static void ispminphys(struct buf *);
64 static void ispcmd(struct ispsoftc *, XS_T *);
65 static void isprequest(struct scsipi_channel *, scsipi_adapter_req_t, void *);
66 static int
67 ispioctl(struct scsipi_channel *, u_long, void *, int, struct proc *);
68
69 static void isp_polled_cmd_wait(struct ispsoftc *, XS_T *);
70 static void isp_dog(void *);
71 static void isp_gdt(void *);
72 static void isp_ldt(void *);
73 static void isp_make_here(ispsoftc_t *, int);
74 static void isp_make_gone(ispsoftc_t *, int);
75 static void isp_fc_worker(void *);
76
77 static const char *roles[4] = {
78 "(none)", "Target", "Initiator", "Target/Initiator"
79 };
80 static const char prom3[] =
81 "PortID 0x%06x Departed from Target %u because of %s";
82 int isp_change_is_bad = 0; /* "changed" devices are bad */
83 int isp_quickboot_time = 15; /* don't wait more than N secs for loop up */
84 static int isp_fabric_hysteresis = 5;
85 #define isp_change_is_bad 0
86
87 /*
88 * Complete attachment of hardware, include subdevices.
89 */
90
91 void
92 isp_attach(struct ispsoftc *isp)
93 {
94 isp->isp_state = ISP_RUNSTATE;
95
96 isp->isp_osinfo._adapter.adapt_dev = &isp->isp_osinfo._dev;
97 isp->isp_osinfo._adapter.adapt_nchannels = IS_DUALBUS(isp) ? 2 : 1;
98 isp->isp_osinfo._adapter.adapt_openings = isp->isp_maxcmds;
99 /*
100 * It's not stated whether max_periph is limited by SPI
101 * tag uage, but let's assume that it is.
102 */
103 isp->isp_osinfo._adapter.adapt_max_periph = min(isp->isp_maxcmds, 255);
104 isp->isp_osinfo._adapter.adapt_ioctl = ispioctl;
105 isp->isp_osinfo._adapter.adapt_request = isprequest;
106 if (isp->isp_type <= ISP_HA_SCSI_1020A) {
107 isp->isp_osinfo._adapter.adapt_minphys = ispminphys_1020;
108 } else {
109 isp->isp_osinfo._adapter.adapt_minphys = ispminphys;
110 }
111
112 isp->isp_osinfo._chan.chan_adapter = &isp->isp_osinfo._adapter;
113 isp->isp_osinfo._chan.chan_bustype = &scsi_bustype;
114 isp->isp_osinfo._chan.chan_channel = 0;
115
116 /*
117 * Until the midlayer is fixed to use REPORT LUNS, limit to 8 luns.
118 */
119 isp->isp_osinfo._chan.chan_nluns = min(isp->isp_maxluns, 8);
120
121 callout_init(&isp->isp_osinfo.gdt);
122 callout_setfunc(&isp->isp_osinfo.gdt, isp_gdt, isp);
123
124 callout_init(&isp->isp_osinfo.ldt);
125 callout_setfunc(&isp->isp_osinfo.ldt, isp_ldt, isp);
126
127 if (IS_FC(isp)) {
128 isp->isp_osinfo._chan.chan_ntargets = MAX_FC_TARG;
129 isp->isp_osinfo._chan.chan_id = MAX_FC_TARG;
130 #ifdef ISP_FW_CRASH_DUMP
131 if (IS_2200(isp)) {
132 FCPARAM(isp)->isp_dump_data =
133 malloc(QLA2200_RISC_IMAGE_DUMP_SIZE, M_DEVBUF,
134 M_NOWAIT);
135 } else if (IS_23XX(isp)) {
136 FCPARAM(isp)->isp_dump_data =
137 malloc(QLA2300_RISC_IMAGE_DUMP_SIZE, M_DEVBUF,
138 M_NOWAIT);
139 }
140 if (FCPARAM(isp)->isp_dump_data)
141 FCPARAM(isp)->isp_dump_data[0] = 0;
142 #endif
143 } else {
144 int bus = 0;
145 sdparam *sdp = isp->isp_param;
146
147 isp->isp_osinfo._chan.chan_ntargets = MAX_TARGETS;
148 isp->isp_osinfo._chan.chan_id = sdp->isp_initiator_id;
149 isp->isp_osinfo.discovered[0] = 1 << sdp->isp_initiator_id;
150 if (IS_DUALBUS(isp)) {
151 isp->isp_osinfo._chan_b = isp->isp_osinfo._chan;
152 sdp++;
153 isp->isp_osinfo.discovered[1] =
154 1 << sdp->isp_initiator_id;
155 isp->isp_osinfo._chan_b.chan_id = sdp->isp_initiator_id;
156 isp->isp_osinfo._chan_b.chan_channel = 1;
157 }
158 ISP_LOCK(isp);
159 (void) isp_control(isp, ISPCTL_RESET_BUS, &bus);
160 if (IS_DUALBUS(isp)) {
161 bus++;
162 (void) isp_control(isp, ISPCTL_RESET_BUS, &bus);
163 }
164 ISP_UNLOCK(isp);
165 }
166
167
168 /*
169 * Defer enabling mailbox interrupts until later.
170 */
171 config_interrupts((struct device *) isp, isp_config_interrupts);
172
173 /*
174 * And attach children (if any).
175 */
176 config_found((void *)isp, &isp->isp_chanA, scsiprint);
177 if (IS_DUALBUS(isp)) {
178 config_found((void *)isp, &isp->isp_chanB, scsiprint);
179 }
180 }
181
182 static void
183 isp_config_interrupts(struct device *self)
184 {
185 struct ispsoftc *isp = (struct ispsoftc *) self;
186
187 isp->isp_osinfo.mbox_sleep_ok = 1;
188
189 if (kthread_create(PRI_NONE, 0, NULL, isp_fc_worker, isp,
190 &isp->isp_osinfo.thread, "%s:fc_thrd", isp->isp_name)) {
191 isp_prt(isp, ISP_LOGERR,
192 "unable to create FC worker thread");
193 panic("isp_config_interrupts");
194 }
195 }
196
197
198 /*
199 * minphys our xfers
200 */
201 static void
202 ispminphys_1020(struct buf *bp)
203 {
204 if (bp->b_bcount >= (1 << 24)) {
205 bp->b_bcount = (1 << 24);
206 }
207 minphys(bp);
208 }
209
210 static void
211 ispminphys(struct buf *bp)
212 {
213 if (bp->b_bcount >= (1 << 30)) {
214 bp->b_bcount = (1 << 30);
215 }
216 minphys(bp);
217 }
218
219 static int
220 ispioctl(struct scsipi_channel *chan, u_long cmd, void *addr, int flag,
221 struct proc *p)
222 {
223 struct ispsoftc *isp = (void *)chan->chan_adapter->adapt_dev;
224 int retval = ENOTTY;
225
226 switch (cmd) {
227 #ifdef ISP_FW_CRASH_DUMP
228 case ISP_GET_FW_CRASH_DUMP:
229 {
230 uint16_t *ptr = FCPARAM(isp)->isp_dump_data;
231 size_t sz;
232
233 retval = 0;
234 if (IS_2200(isp))
235 sz = QLA2200_RISC_IMAGE_DUMP_SIZE;
236 else
237 sz = QLA2300_RISC_IMAGE_DUMP_SIZE;
238 ISP_LOCK(isp);
239 if (ptr && *ptr) {
240 void *uaddr = *((void **) addr);
241 if (copyout(ptr, uaddr, sz)) {
242 retval = EFAULT;
243 } else {
244 *ptr = 0;
245 }
246 } else {
247 retval = ENXIO;
248 }
249 ISP_UNLOCK(isp);
250 break;
251 }
252
253 case ISP_FORCE_CRASH_DUMP:
254 ISP_LOCK(isp);
255 if (isp->isp_osinfo.blocked == 0) {
256 isp->isp_osinfo.blocked = 1;
257 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
258 "FREEZE QUEUES @ LINE %d", __LINE__);
259 scsipi_channel_freeze(&isp->isp_chanA, 1);
260 }
261 isp_fw_dump(isp);
262 isp_reinit(isp);
263 ISP_UNLOCK(isp);
264 retval = 0;
265 break;
266 #endif
267 case ISP_SDBLEV:
268 {
269 int olddblev = isp->isp_dblev;
270 isp->isp_dblev = *(int *)addr;
271 *(int *)addr = olddblev;
272 retval = 0;
273 break;
274 }
275 case ISP_RESETHBA:
276 ISP_LOCK(isp);
277 isp_reinit(isp);
278 ISP_UNLOCK(isp);
279 retval = 0;
280 break;
281 case ISP_RESCAN:
282 if (IS_FC(isp)) {
283 ISP_LOCK(isp);
284 if (isp_fc_runstate(isp, 5 * 1000000)) {
285 retval = EIO;
286 } else {
287 retval = 0;
288 }
289 ISP_UNLOCK(isp);
290 }
291 break;
292 case ISP_FC_LIP:
293 if (IS_FC(isp)) {
294 ISP_LOCK(isp);
295 if (isp_control(isp, ISPCTL_SEND_LIP, 0)) {
296 retval = EIO;
297 } else {
298 retval = 0;
299 }
300 ISP_UNLOCK(isp);
301 }
302 break;
303 case ISP_FC_GETDINFO:
304 {
305 struct isp_fc_device *ifc = (struct isp_fc_device *) addr;
306 fcportdb_t *lp;
307
308 if (ifc->loopid >= MAX_FC_TARG) {
309 retval = EINVAL;
310 break;
311 }
312 ISP_LOCK(isp);
313 lp = &FCPARAM(isp)->portdb[ifc->loopid];
314 if (lp->state == FC_PORTDB_STATE_VALID) {
315 ifc->role = lp->roles;
316 ifc->loopid = lp->handle;
317 ifc->portid = lp->portid;
318 ifc->node_wwn = lp->node_wwn;
319 ifc->port_wwn = lp->port_wwn;
320 retval = 0;
321 } else {
322 retval = ENODEV;
323 }
324 ISP_UNLOCK(isp);
325 break;
326 }
327 case ISP_GET_STATS:
328 {
329 isp_stats_t *sp = (isp_stats_t *) addr;
330
331 MEMZERO(sp, sizeof (*sp));
332 sp->isp_stat_version = ISP_STATS_VERSION;
333 sp->isp_type = isp->isp_type;
334 sp->isp_revision = isp->isp_revision;
335 ISP_LOCK(isp);
336 sp->isp_stats[ISP_INTCNT] = isp->isp_intcnt;
337 sp->isp_stats[ISP_INTBOGUS] = isp->isp_intbogus;
338 sp->isp_stats[ISP_INTMBOXC] = isp->isp_intmboxc;
339 sp->isp_stats[ISP_INGOASYNC] = isp->isp_intoasync;
340 sp->isp_stats[ISP_RSLTCCMPLT] = isp->isp_rsltccmplt;
341 sp->isp_stats[ISP_FPHCCMCPLT] = isp->isp_fphccmplt;
342 sp->isp_stats[ISP_RSCCHIWAT] = isp->isp_rscchiwater;
343 sp->isp_stats[ISP_FPCCHIWAT] = isp->isp_fpcchiwater;
344 ISP_UNLOCK(isp);
345 retval = 0;
346 break;
347 }
348 case ISP_CLR_STATS:
349 ISP_LOCK(isp);
350 isp->isp_intcnt = 0;
351 isp->isp_intbogus = 0;
352 isp->isp_intmboxc = 0;
353 isp->isp_intoasync = 0;
354 isp->isp_rsltccmplt = 0;
355 isp->isp_fphccmplt = 0;
356 isp->isp_rscchiwater = 0;
357 isp->isp_fpcchiwater = 0;
358 ISP_UNLOCK(isp);
359 retval = 0;
360 break;
361 case ISP_FC_GETHINFO:
362 {
363 struct isp_hba_device *hba = (struct isp_hba_device *) addr;
364 MEMZERO(hba, sizeof (*hba));
365 ISP_LOCK(isp);
366 hba->fc_speed = FCPARAM(isp)->isp_gbspeed;
367 hba->fc_scsi_supported = 1;
368 hba->fc_topology = FCPARAM(isp)->isp_topo + 1;
369 hba->fc_loopid = FCPARAM(isp)->isp_loopid;
370 hba->nvram_node_wwn = FCPARAM(isp)->isp_wwnn_nvram;
371 hba->nvram_port_wwn = FCPARAM(isp)->isp_wwpn_nvram;
372 hba->active_node_wwn = ISP_NODEWWN(isp);
373 hba->active_port_wwn = ISP_PORTWWN(isp);
374 ISP_UNLOCK(isp);
375 retval = 0;
376 break;
377 }
378 case SCBUSIORESET:
379 ISP_LOCK(isp);
380 if (isp_control(isp, ISPCTL_RESET_BUS, &chan->chan_channel)) {
381 retval = EIO;
382 } else {
383 retval = 0;
384 }
385 ISP_UNLOCK(isp);
386 break;
387 default:
388 break;
389 }
390 return (retval);
391 }
392
393 static void
394 ispcmd(struct ispsoftc *isp, XS_T *xs)
395 {
396 volatile uint8_t ombi;
397 int lim;
398
399 ISP_LOCK(isp);
400 if (isp->isp_state < ISP_RUNSTATE) {
401 ISP_DISABLE_INTS(isp);
402 isp_init(isp);
403 if (isp->isp_state != ISP_INITSTATE) {
404 ISP_ENABLE_INTS(isp);
405 ISP_UNLOCK(isp);
406 isp_prt(isp, ISP_LOGERR, "isp not at init state");
407 XS_SETERR(xs, HBA_BOTCH);
408 scsipi_done(xs);
409 return;
410 }
411 isp->isp_state = ISP_RUNSTATE;
412 ISP_ENABLE_INTS(isp);
413 }
414
415 /*
416 * Handle the case of a FC card where the FC thread hasn't
417 * fired up yet and we don't yet have a known loop state.
418 */
419 if (IS_FC(isp) && (FCPARAM(isp)->isp_fwstate != FW_READY ||
420 FCPARAM(isp)->isp_loopstate != LOOP_READY) &&
421 isp->isp_osinfo.thread == NULL) {
422 ombi = isp->isp_osinfo.mbox_sleep_ok != 0;
423 int delay_time;
424
425 if (xs->xs_control & XS_CTL_POLL) {
426 isp->isp_osinfo.mbox_sleep_ok = 0;
427 }
428
429 if (isp->isp_osinfo.loop_checked == 0) {
430 delay_time = 10 * 1000000;
431 isp->isp_osinfo.loop_checked = 1;
432 } else {
433 delay_time = 250000;
434 }
435
436 if (isp_fc_runstate(isp, delay_time) != 0) {
437 if (xs->xs_control & XS_CTL_POLL) {
438 isp->isp_osinfo.mbox_sleep_ok = ombi;
439 }
440 if (FCPARAM(isp)->loop_seen_once == 0) {
441 XS_SETERR(xs, HBA_SELTIMEOUT);
442 scsipi_done(xs);
443 ISP_UNLOCK(isp);
444 return;
445 }
446 /*
447 * Otherwise, fall thru to be queued up for later.
448 */
449 } else {
450 int wasblocked =
451 (isp->isp_osinfo.blocked || isp->isp_osinfo.paused);
452 isp->isp_osinfo.blocked =
453 isp->isp_osinfo.paused = 0;
454 if (wasblocked) {
455 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
456 "THAW QUEUES @ LINE %d", __LINE__);
457 scsipi_channel_thaw(&isp->isp_chanA, 1);
458 }
459 }
460 if (xs->xs_control & XS_CTL_POLL) {
461 isp->isp_osinfo.mbox_sleep_ok = ombi;
462 }
463 }
464
465 if (isp->isp_osinfo.paused) {
466 isp_prt(isp, ISP_LOGWARN, "I/O while paused");
467 xs->error = XS_RESOURCE_SHORTAGE;
468 scsipi_done(xs);
469 ISP_UNLOCK(isp);
470 return;
471 }
472 if (isp->isp_osinfo.blocked) {
473 isp_prt(isp, ISP_LOGWARN, "I/O while blocked");
474 xs->error = XS_REQUEUE;
475 scsipi_done(xs);
476 ISP_UNLOCK(isp);
477 return;
478 }
479
480 if (xs->xs_control & XS_CTL_POLL) {
481 ombi = isp->isp_osinfo.mbox_sleep_ok;
482 isp->isp_osinfo.mbox_sleep_ok = 0;
483 }
484
485 switch (isp_start(xs)) {
486 case CMD_QUEUED:
487 if (xs->xs_control & XS_CTL_POLL) {
488 isp_polled_cmd_wait(isp, xs);
489 isp->isp_osinfo.mbox_sleep_ok = ombi;
490 } else if (xs->timeout) {
491 callout_reset(&xs->xs_callout, _XT(xs), isp_dog, xs);
492 }
493 break;
494 case CMD_EAGAIN:
495 isp->isp_osinfo.paused = 1;
496 xs->error = XS_RESOURCE_SHORTAGE;
497 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
498 "FREEZE QUEUES @ LINE %d", __LINE__);
499 scsipi_channel_freeze(&isp->isp_chanA, 1);
500 if (IS_DUALBUS(isp)) {
501 scsipi_channel_freeze(&isp->isp_chanB, 1);
502 }
503 scsipi_done(xs);
504 break;
505 case CMD_RQLATER:
506 /*
507 * We can only get RQLATER from FC devices (1 channel only)
508 *
509 * If we've never seen loop up see if if we've been down
510 * quickboot time, otherwise wait loop down limit time.
511 * If so, then we start giving up on commands.
512 */
513 if (FCPARAM(isp)->loop_seen_once == 0) {
514 lim = isp_quickboot_time;
515 } else {
516 lim = isp->isp_osinfo.loop_down_limit;
517 }
518 if (isp->isp_osinfo.loop_down_time >= lim) {
519 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
520 "RQLATER -> SELTIMEOUT");
521 XS_SETERR(xs, HBA_SELTIMEOUT);
522 scsipi_done(xs);
523 break;
524 }
525 if (isp->isp_osinfo.blocked == 0) {
526 isp->isp_osinfo.blocked = 1;
527 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
528 "FREEZE QUEUES @ LINE %d", __LINE__);
529 scsipi_channel_freeze(&isp->isp_chanA, 1);
530 } else {
531 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
532 "RQLATER WITH FROZEN QUEUES @ LINE %d", __LINE__);
533 }
534 xs->error = XS_REQUEUE;
535 scsipi_done(xs);
536 break;
537 case CMD_COMPLETE:
538 scsipi_done(xs);
539 break;
540 }
541 ISP_UNLOCK(isp);
542 }
543
544 static void
545 isprequest(struct scsipi_channel *chan, scsipi_adapter_req_t req, void *arg)
546 {
547 struct ispsoftc *isp = (void *)chan->chan_adapter->adapt_dev;
548
549 switch (req) {
550 case ADAPTER_REQ_RUN_XFER:
551 ispcmd(isp, (XS_T *) arg);
552 break;
553
554 case ADAPTER_REQ_GROW_RESOURCES:
555 /* Not supported. */
556 break;
557
558 case ADAPTER_REQ_SET_XFER_MODE:
559 if (IS_SCSI(isp)) {
560 struct scsipi_xfer_mode *xm = arg;
561 int dflags = 0;
562 sdparam *sdp = SDPARAM(isp);
563
564 sdp += chan->chan_channel;
565 if (xm->xm_mode & PERIPH_CAP_TQING)
566 dflags |= DPARM_TQING;
567 if (xm->xm_mode & PERIPH_CAP_WIDE16)
568 dflags |= DPARM_WIDE;
569 if (xm->xm_mode & PERIPH_CAP_SYNC)
570 dflags |= DPARM_SYNC;
571 ISP_LOCK(isp);
572 sdp->isp_devparam[xm->xm_target].goal_flags |= dflags;
573 dflags = sdp->isp_devparam[xm->xm_target].goal_flags;
574 sdp->isp_devparam[xm->xm_target].dev_update = 1;
575 isp->isp_update |= (1 << chan->chan_channel);
576 ISP_UNLOCK(isp);
577 isp_prt(isp, ISP_LOGDEBUG1,
578 "isprequest: device flags 0x%x for %d.%d.X",
579 dflags, chan->chan_channel, xm->xm_target);
580 break;
581 }
582 default:
583 break;
584 }
585 }
586
587 static void
588 isp_polled_cmd_wait(struct ispsoftc *isp, XS_T *xs)
589 {
590 int infinite = 0, mswait;
591
592 /*
593 * If we can't use interrupts, poll on completion.
594 */
595 if ((mswait = XS_TIME(xs)) == 0) {
596 infinite = 1;
597 }
598
599 while (mswait || infinite) {
600 uint32_t isr;
601 uint16_t sema, mbox;
602 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
603 isp_intr(isp, isr, sema, mbox);
604 if (XS_CMD_DONE_P(xs)) {
605 break;
606 }
607 }
608 USEC_DELAY(1000);
609 mswait -= 1;
610 }
611
612 /*
613 * If no other error occurred but we didn't finish
614 * something bad happened, so abort the command.
615 */
616 if (XS_CMD_DONE_P(xs) == 0) {
617 if (isp_control(isp, ISPCTL_ABORT_CMD, xs)) {
618 isp_reinit(isp);
619 }
620 if (XS_NOERR(xs)) {
621 isp_prt(isp, ISP_LOGERR, "polled command timed out");
622 XS_SETERR(xs, HBA_BOTCH);
623 }
624 }
625 scsipi_done(xs);
626 }
627
628 void
629 isp_done(XS_T *xs)
630 {
631 if (XS_CMD_WDOG_P(xs) == 0) {
632 struct ispsoftc *isp = XS_ISP(xs);
633 callout_stop(&xs->xs_callout);
634 if (XS_CMD_GRACE_P(xs)) {
635 isp_prt(isp, ISP_LOGDEBUG1,
636 "finished command on borrowed time");
637 }
638 XS_CMD_S_CLEAR(xs);
639 /*
640 * Fixup- if we get a QFULL, we need
641 * to set XS_BUSY as the error.
642 */
643 if (xs->status == SCSI_QUEUE_FULL) {
644 xs->error = XS_BUSY;
645 }
646 if (isp->isp_osinfo.paused) {
647 isp->isp_osinfo.paused = 0;
648 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
649 "THAW QUEUES @ LINE %d", __LINE__);
650 scsipi_channel_timed_thaw(&isp->isp_chanA);
651 if (IS_DUALBUS(isp)) {
652 scsipi_channel_timed_thaw(&isp->isp_chanB);
653 }
654 }
655 if (xs->error == XS_DRIVER_STUFFUP) {
656 isp_prt(isp, ISP_LOGERR,
657 "BOTCHED cmd for %d.%d.%d cmd 0x%x datalen %ld",
658 XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs),
659 XS_CDBP(xs)[0], (long) XS_XFRLEN(xs));
660 }
661 scsipi_done(xs);
662 }
663 }
664
665 static void
666 isp_dog(void *arg)
667 {
668 XS_T *xs = arg;
669 struct ispsoftc *isp = XS_ISP(xs);
670 uint32_t handle;
671
672
673 ISP_ILOCK(isp);
674 /*
675 * We've decided this command is dead. Make sure we're not trying
676 * to kill a command that's already dead by getting it's handle and
677 * and seeing whether it's still alive.
678 */
679 handle = isp_find_handle(isp, xs);
680 if (handle) {
681 uint32_t isr;
682 uint16_t mbox, sema;
683
684 if (XS_CMD_DONE_P(xs)) {
685 isp_prt(isp, ISP_LOGDEBUG1,
686 "watchdog found done cmd (handle 0x%x)", handle);
687 ISP_IUNLOCK(isp);
688 return;
689 }
690
691 if (XS_CMD_WDOG_P(xs)) {
692 isp_prt(isp, ISP_LOGDEBUG1,
693 "recursive watchdog (handle 0x%x)", handle);
694 ISP_IUNLOCK(isp);
695 return;
696 }
697
698 XS_CMD_S_WDOG(xs);
699
700 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
701 isp_intr(isp, isr, sema, mbox);
702
703 }
704 if (XS_CMD_DONE_P(xs)) {
705 isp_prt(isp, ISP_LOGDEBUG1,
706 "watchdog cleanup for handle 0x%x", handle);
707 XS_CMD_C_WDOG(xs);
708 isp_done(xs);
709 } else if (XS_CMD_GRACE_P(xs)) {
710 isp_prt(isp, ISP_LOGDEBUG1,
711 "watchdog timeout for handle 0x%x", handle);
712 /*
713 * Make sure the command is *really* dead before we
714 * release the handle (and DMA resources) for reuse.
715 */
716 (void) isp_control(isp, ISPCTL_ABORT_CMD, arg);
717
718 /*
719 * After this point, the command is really dead.
720 */
721 if (XS_XFRLEN(xs)) {
722 ISP_DMAFREE(isp, xs, handle);
723 }
724 isp_destroy_handle(isp, handle);
725 XS_SETERR(xs, XS_TIMEOUT);
726 XS_CMD_S_CLEAR(xs);
727 isp_done(xs);
728 } else {
729 uint32_t nxti, optr;
730 void *qe;
731 isp_marker_t local, *mp = &local;
732 isp_prt(isp, ISP_LOGDEBUG2,
733 "possible command timeout on handle %x", handle);
734 XS_CMD_C_WDOG(xs);
735 callout_reset(&xs->xs_callout, hz, isp_dog, xs);
736 if (isp_getrqentry(isp, &nxti, &optr, &qe)) {
737 ISP_UNLOCK(isp);
738 return;
739 }
740 XS_CMD_S_GRACE(xs);
741 MEMZERO((void *) mp, sizeof (*mp));
742 mp->mrk_header.rqs_entry_count = 1;
743 mp->mrk_header.rqs_entry_type = RQSTYPE_MARKER;
744 mp->mrk_modifier = SYNC_ALL;
745 mp->mrk_target = XS_CHANNEL(xs) << 7;
746 isp_put_marker(isp, mp, qe);
747 ISP_ADD_REQUEST(isp, nxti);
748 }
749 } else {
750 isp_prt(isp, ISP_LOGDEBUG0, "watchdog with no command");
751 }
752 ISP_IUNLOCK(isp);
753 }
754
755
756 static void
757 isp_fc_worker(void *arg)
758 {
759 void scsipi_run_queue(struct scsipi_channel *);
760 ispsoftc_t *isp = arg;
761 int slp = 0;
762 int s = splbio();
763 /*
764 * The first loop is for our usage where we have yet to have
765 * gotten good fibre channel state.
766 */
767 while (isp->isp_osinfo.thread != NULL) {
768 int sok, lb, lim;
769
770 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, "checking FC state");
771 sok = isp->isp_osinfo.mbox_sleep_ok;
772 isp->isp_osinfo.mbox_sleep_ok = 1;
773 lb = isp_fc_runstate(isp, 250000);
774 isp->isp_osinfo.mbox_sleep_ok = sok;
775 if (lb) {
776 /*
777 * Increment loop down time by the last sleep interval
778 */
779 isp->isp_osinfo.loop_down_time += slp;
780
781 if (lb < 0) {
782 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
783 "FC loop not up (down count %d)",
784 isp->isp_osinfo.loop_down_time);
785 } else {
786 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
787 "FC got to %d (down count %d)",
788 lb, isp->isp_osinfo.loop_down_time);
789 }
790
791
792 /*
793 * If we've never seen loop up and we've waited longer
794 * than quickboot time, or we've seen loop up but we've
795 * waited longer than loop_down_limit, give up and go
796 * to sleep until loop comes up.
797 */
798 if (FCPARAM(isp)->loop_seen_once == 0) {
799 lim = isp_quickboot_time;
800 } else {
801 lim = isp->isp_osinfo.loop_down_limit;
802 }
803 if (isp->isp_osinfo.loop_down_time >= lim) {
804 /*
805 * If we're now past our limit, release
806 * the queues and let them come in and
807 * either get HBA_SELTIMOUT or cause
808 * another freeze.
809 */
810 isp->isp_osinfo.blocked = 1;
811 slp = 0;
812 } else if (isp->isp_osinfo.loop_down_time < 10) {
813 slp = 1;
814 } else if (isp->isp_osinfo.loop_down_time < 30) {
815 slp = 5;
816 } else if (isp->isp_osinfo.loop_down_time < 60) {
817 slp = 10;
818 } else if (isp->isp_osinfo.loop_down_time < 120) {
819 slp = 20;
820 } else {
821 slp = 30;
822 }
823
824 } else {
825 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
826 "FC state OK");
827 isp->isp_osinfo.loop_down_time = 0;
828 slp = 0;
829 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
830 "THAW QUEUES @ LINE %d", __LINE__);
831 scsipi_channel_thaw(&isp->isp_chanA, 1);
832 }
833
834 /*
835 * If we'd frozen the queues, unfreeze them now so that
836 * we can start getting commands. If the FC state isn't
837 * okay yet, they'll hit that in isp_start which will
838 * freeze the queues again.
839 */
840 if (isp->isp_osinfo.blocked) {
841 isp->isp_osinfo.blocked = 0;
842 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
843 "THAW QUEUES @ LINE %d", __LINE__);
844 scsipi_channel_thaw(&isp->isp_chanA, 1);
845 }
846 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, "sleep time %d", slp);
847 tsleep(&isp->isp_osinfo.thread, PRIBIO, "ispf", slp * hz);
848
849 /*
850 * If slp is zero, we're waking up for the first time after
851 * things have been okay. In this case, we set a deferral state
852 * for all commands and delay hysteresis seconds before starting
853 * the FC state evaluation. This gives the loop/fabric a chance
854 * to settle.
855 */
856 if (slp == 0 && isp_fabric_hysteresis) {
857 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
858 "sleep hysteresis tick time %d",
859 isp_fabric_hysteresis * hz);
860 (void) tsleep(&isp_fabric_hysteresis, PRIBIO, "ispT",
861 (isp_fabric_hysteresis * hz));
862 }
863 }
864 splx(s);
865
866 /* In case parent is waiting for us to exit. */
867 wakeup(&isp->isp_osinfo.thread);
868 kthread_exit(0);
869 }
870
871 /*
872 * Free any associated resources prior to decommissioning and
873 * set the card to a known state (so it doesn't wake up and kick
874 * us when we aren't expecting it to).
875 *
876 * Locks are held before coming here.
877 */
878 void
879 isp_uninit(struct ispsoftc *isp)
880 {
881 isp_lock(isp);
882 /*
883 * Leave with interrupts disabled.
884 */
885 ISP_DISABLE_INTS(isp);
886 isp_unlock(isp);
887 }
888
889 /*
890 * Gone Device Timer Function- when we have decided that a device has gone
891 * away, we wait a specific period of time prior to telling the OS it has
892 * gone away.
893 *
894 * This timer function fires once a second and then scans the port database
895 * for devices that are marked dead but still have a virtual target assigned.
896 * We decrement a counter for that port database entry, and when it hits zero,
897 * we tell the OS the device has gone away.
898 */
899 static void
900 isp_gdt(void *arg)
901 {
902 ispsoftc_t *isp = arg;
903 fcportdb_t *lp;
904 int dbidx, tgt, more_to_do = 0;
905
906 isp_prt(isp, ISP_LOGDEBUG0, "GDT timer expired");
907 ISP_LOCK(isp);
908 for (dbidx = 0; dbidx < MAX_FC_TARG; dbidx++) {
909 lp = &FCPARAM(isp)->portdb[dbidx];
910
911 if (lp->state != FC_PORTDB_STATE_ZOMBIE) {
912 continue;
913 }
914 if (lp->ini_map_idx == 0) {
915 continue;
916 }
917 if (lp->new_reserved == 0) {
918 continue;
919 }
920 lp->new_reserved -= 1;
921 if (lp->new_reserved != 0) {
922 more_to_do++;
923 continue;
924 }
925 tgt = lp->ini_map_idx - 1;
926 FCPARAM(isp)->isp_ini_map[tgt] = 0;
927 lp->ini_map_idx = 0;
928 lp->state = FC_PORTDB_STATE_NIL;
929 isp_prt(isp, ISP_LOGCONFIG, prom3, lp->portid, tgt,
930 "Gone Device Timeout");
931 isp_make_gone(isp, tgt);
932 }
933 if (more_to_do) {
934 callout_schedule(&isp->isp_osinfo.gdt, hz);
935 } else {
936 isp->isp_osinfo.gdt_running = 0;
937 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
938 "stopping Gone Device Timer");
939 }
940 ISP_UNLOCK(isp);
941 }
942
943 /*
944 * Loop Down Timer Function- when loop goes down, a timer is started and
945 * and after it expires we come here and take all probational devices that
946 * the OS knows about and the tell the OS that they've gone away.
947 *
948 * We don't clear the devices out of our port database because, when loop
949 * come back up, we have to do some actual cleanup with the chip at that
950 * point (implicit PLOGO, e.g., to get the chip's port database state right).
951 */
952 static void
953 isp_ldt(void *arg)
954 {
955 ispsoftc_t *isp = arg;
956 fcportdb_t *lp;
957 int dbidx, tgt;
958
959 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, "Loop Down Timer expired");
960 ISP_LOCK(isp);
961
962 /*
963 * Notify to the OS all targets who we now consider have departed.
964 */
965 for (dbidx = 0; dbidx < MAX_FC_TARG; dbidx++) {
966 lp = &FCPARAM(isp)->portdb[dbidx];
967
968 if (lp->state != FC_PORTDB_STATE_PROBATIONAL) {
969 continue;
970 }
971 if (lp->ini_map_idx == 0) {
972 continue;
973 }
974
975 /*
976 * XXX: CLEAN UP AND COMPLETE ANY PENDING COMMANDS FIRST!
977 */
978
979 /*
980 * Mark that we've announced that this device is gone....
981 */
982 lp->reserved = 1;
983
984 /*
985 * but *don't* change the state of the entry. Just clear
986 * any target id stuff and announce to CAM that the
987 * device is gone. This way any necessary PLOGO stuff
988 * will happen when loop comes back up.
989 */
990
991 tgt = lp->ini_map_idx - 1;
992 FCPARAM(isp)->isp_ini_map[tgt] = 0;
993 lp->ini_map_idx = 0;
994 isp_prt(isp, ISP_LOGCONFIG, prom3, lp->portid, tgt,
995 "Loop Down Timeout");
996 isp_make_gone(isp, tgt);
997 }
998
999 /*
1000 * The loop down timer has expired. Wake up the kthread
1001 * to notice that fact (or make it false).
1002 */
1003 isp->isp_osinfo.loop_down_time = isp->isp_osinfo.loop_down_limit+1;
1004 wakeup(&isp->isp_osinfo.thread);
1005 ISP_UNLOCK(isp);
1006 }
1007
1008 static void
1009 isp_make_here(ispsoftc_t *isp, int tgt)
1010 {
1011 isp_prt(isp, ISP_LOGINFO, "target %d has arrived", tgt);
1012 }
1013
1014 static void
1015 isp_make_gone(ispsoftc_t *isp, int tgt)
1016 {
1017 isp_prt(isp, ISP_LOGINFO, "target %d has departed", tgt);
1018 }
1019
1020 int
1021 isp_async(struct ispsoftc *isp, ispasync_t cmd, void *arg)
1022 {
1023 int bus, tgt;
1024 const char *msg = NULL;
1025 static const char prom[] =
1026 "PortID 0x%06x handle 0x%x role %s %s\n"
1027 " WWNN 0x%08x%08x WWPN 0x%08x%08x";
1028 static const char prom2[] =
1029 "PortID 0x%06x handle 0x%x role %s %s tgt %u\n"
1030 " WWNN 0x%08x%08x WWPN 0x%08x%08x";
1031 fcportdb_t *lp;
1032
1033 switch (cmd) {
1034 case ISPASYNC_NEW_TGT_PARAMS:
1035 if (IS_SCSI(isp) && isp->isp_dblev) {
1036 sdparam *sdp = isp->isp_param;
1037 int flags;
1038 struct scsipi_xfer_mode xm;
1039
1040 tgt = *((int *) arg);
1041 bus = (tgt >> 16) & 0xffff;
1042 tgt &= 0xffff;
1043 sdp += bus;
1044 flags = sdp->isp_devparam[tgt].actv_flags;
1045
1046 xm.xm_mode = 0;
1047 xm.xm_period = sdp->isp_devparam[tgt].actv_period;
1048 xm.xm_offset = sdp->isp_devparam[tgt].actv_offset;
1049 xm.xm_target = tgt;
1050
1051 if ((flags & DPARM_SYNC) && xm.xm_period && xm.xm_offset)
1052 xm.xm_mode |= PERIPH_CAP_SYNC;
1053 if (flags & DPARM_WIDE)
1054 xm.xm_mode |= PERIPH_CAP_WIDE16;
1055 if (flags & DPARM_TQING)
1056 xm.xm_mode |= PERIPH_CAP_TQING;
1057 scsipi_async_event(bus? &isp->isp_chanB : &isp->isp_chanA,
1058 ASYNC_EVENT_XFER_MODE, &xm);
1059 break;
1060 }
1061 case ISPASYNC_BUS_RESET:
1062 bus = *((int *) arg);
1063 scsipi_async_event(bus? &isp->isp_chanB : &isp->isp_chanA,
1064 ASYNC_EVENT_RESET, NULL);
1065 isp_prt(isp, ISP_LOGINFO, "SCSI bus %d reset detected", bus);
1066 break;
1067 case ISPASYNC_LIP:
1068 if (msg == NULL) {
1069 msg = "LIP Received";
1070 }
1071 /* FALLTHROUGH */
1072 case ISPASYNC_LOOP_RESET:
1073 if (msg == NULL) {
1074 msg = "LOOP Reset Received";
1075 }
1076 /* FALLTHROUGH */
1077 case ISPASYNC_LOOP_DOWN:
1078 if (msg == NULL) {
1079 msg = "Loop DOWN";
1080 }
1081 /*
1082 * Don't do queue freezes or blockage until we have the
1083 * thread running that can unfreeze/unblock us.
1084 */
1085 if (isp->isp_osinfo.blocked == 0) {
1086 if (isp->isp_osinfo.thread) {
1087 isp->isp_osinfo.blocked = 1;
1088 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
1089 "FREEZE QUEUES @ LINE %d", __LINE__);
1090 scsipi_channel_freeze(&isp->isp_chanA, 1);
1091 }
1092 }
1093 isp_prt(isp, ISP_LOGINFO, msg);
1094 break;
1095 case ISPASYNC_LOOP_UP:
1096 /*
1097 * Let the subsequent ISPASYNC_CHANGE_NOTIFY invoke
1098 * the FC worker thread. When the FC worker thread
1099 * is done, let *it* call scsipi_channel_thaw...
1100 */
1101 isp_prt(isp, ISP_LOGINFO, "Loop UP");
1102 break;
1103 case ISPASYNC_DEV_ARRIVED:
1104 lp = arg;
1105 lp->reserved = 0;
1106 if ((isp->isp_role & ISP_ROLE_INITIATOR) &&
1107 (lp->roles & (SVC3_TGT_ROLE >> SVC3_ROLE_SHIFT))) {
1108 int dbidx = lp - FCPARAM(isp)->portdb;
1109 int i;
1110
1111 for (i = 0; i < MAX_FC_TARG; i++) {
1112 if (i >= FL_ID && i <= SNS_ID) {
1113 continue;
1114 }
1115 if (FCPARAM(isp)->isp_ini_map[i] == 0) {
1116 break;
1117 }
1118 }
1119 if (i < MAX_FC_TARG) {
1120 FCPARAM(isp)->isp_ini_map[i] = dbidx + 1;
1121 lp->ini_map_idx = i + 1;
1122 } else {
1123 isp_prt(isp, ISP_LOGWARN, "out of target ids");
1124 isp_dump_portdb(isp);
1125 }
1126 }
1127 if (lp->ini_map_idx) {
1128 tgt = lp->ini_map_idx - 1;
1129 isp_prt(isp, ISP_LOGCONFIG, prom2,
1130 lp->portid, lp->handle,
1131 roles[lp->roles], "arrived at", tgt,
1132 (uint32_t) (lp->node_wwn >> 32),
1133 (uint32_t) lp->node_wwn,
1134 (uint32_t) (lp->port_wwn >> 32),
1135 (uint32_t) lp->port_wwn);
1136 isp_make_here(isp, tgt);
1137 } else {
1138 isp_prt(isp, ISP_LOGCONFIG, prom,
1139 lp->portid, lp->handle,
1140 roles[lp->roles], "arrived",
1141 (uint32_t) (lp->node_wwn >> 32),
1142 (uint32_t) lp->node_wwn,
1143 (uint32_t) (lp->port_wwn >> 32),
1144 (uint32_t) lp->port_wwn);
1145 }
1146 break;
1147 case ISPASYNC_DEV_CHANGED:
1148 lp = arg;
1149 if (isp_change_is_bad) {
1150 lp->state = FC_PORTDB_STATE_NIL;
1151 if (lp->ini_map_idx) {
1152 tgt = lp->ini_map_idx - 1;
1153 FCPARAM(isp)->isp_ini_map[tgt] = 0;
1154 lp->ini_map_idx = 0;
1155 isp_prt(isp, ISP_LOGCONFIG, prom3,
1156 lp->portid, tgt, "change is bad");
1157 isp_make_gone(isp, tgt);
1158 } else {
1159 isp_prt(isp, ISP_LOGCONFIG, prom,
1160 lp->portid, lp->handle,
1161 roles[lp->roles],
1162 "changed and departed",
1163 (uint32_t) (lp->node_wwn >> 32),
1164 (uint32_t) lp->node_wwn,
1165 (uint32_t) (lp->port_wwn >> 32),
1166 (uint32_t) lp->port_wwn);
1167 }
1168 } else {
1169 lp->portid = lp->new_portid;
1170 lp->roles = lp->new_roles;
1171 if (lp->ini_map_idx) {
1172 int t = lp->ini_map_idx - 1;
1173 FCPARAM(isp)->isp_ini_map[t] =
1174 (lp - FCPARAM(isp)->portdb) + 1;
1175 tgt = lp->ini_map_idx - 1;
1176 isp_prt(isp, ISP_LOGCONFIG, prom2,
1177 lp->portid, lp->handle,
1178 roles[lp->roles], "changed at", tgt,
1179 (uint32_t) (lp->node_wwn >> 32),
1180 (uint32_t) lp->node_wwn,
1181 (uint32_t) (lp->port_wwn >> 32),
1182 (uint32_t) lp->port_wwn);
1183 } else {
1184 isp_prt(isp, ISP_LOGCONFIG, prom,
1185 lp->portid, lp->handle,
1186 roles[lp->roles], "changed",
1187 (uint32_t) (lp->node_wwn >> 32),
1188 (uint32_t) lp->node_wwn,
1189 (uint32_t) (lp->port_wwn >> 32),
1190 (uint32_t) lp->port_wwn);
1191 }
1192 }
1193 break;
1194 case ISPASYNC_DEV_STAYED:
1195 lp = arg;
1196 if (lp->ini_map_idx) {
1197 tgt = lp->ini_map_idx - 1;
1198 isp_prt(isp, ISP_LOGCONFIG, prom2,
1199 lp->portid, lp->handle,
1200 roles[lp->roles], "stayed at", tgt,
1201 (uint32_t) (lp->node_wwn >> 32),
1202 (uint32_t) lp->node_wwn,
1203 (uint32_t) (lp->port_wwn >> 32),
1204 (uint32_t) lp->port_wwn);
1205 } else {
1206 isp_prt(isp, ISP_LOGCONFIG, prom,
1207 lp->portid, lp->handle,
1208 roles[lp->roles], "stayed",
1209 (uint32_t) (lp->node_wwn >> 32),
1210 (uint32_t) lp->node_wwn,
1211 (uint32_t) (lp->port_wwn >> 32),
1212 (uint32_t) lp->port_wwn);
1213 }
1214 break;
1215 case ISPASYNC_DEV_GONE:
1216 lp = arg;
1217 /*
1218 * If this has a virtual target and we haven't marked it
1219 * that we're going to have isp_gdt tell the OS it's gone,
1220 * set the isp_gdt timer running on it.
1221 *
1222 * If it isn't marked that isp_gdt is going to get rid of it,
1223 * announce that it's gone.
1224 */
1225 if (lp->ini_map_idx && lp->reserved == 0) {
1226 lp->reserved = 1;
1227 lp->new_reserved = isp->isp_osinfo.gone_device_time;
1228 lp->state = FC_PORTDB_STATE_ZOMBIE;
1229 if (isp->isp_osinfo.gdt_running == 0) {
1230 isp->isp_osinfo.gdt_running = 1;
1231 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
1232 "starting Gone Device Timer");
1233 callout_schedule(&isp->isp_osinfo.gdt, hz);
1234 }
1235 tgt = lp->ini_map_idx - 1;
1236 isp_prt(isp, ISP_LOGCONFIG, prom2,
1237 lp->portid, lp->handle,
1238 roles[lp->roles], "gone zombie at", tgt,
1239 (uint32_t) (lp->node_wwn >> 32),
1240 (uint32_t) lp->node_wwn,
1241 (uint32_t) (lp->port_wwn >> 32),
1242 (uint32_t) lp->port_wwn);
1243 } else if (lp->reserved == 0) {
1244 isp_prt(isp, ISP_LOGCONFIG, prom,
1245 lp->portid, lp->handle,
1246 roles[lp->roles], "departed",
1247 (uint32_t) (lp->node_wwn >> 32),
1248 (uint32_t) lp->node_wwn,
1249 (uint32_t) (lp->port_wwn >> 32),
1250 (uint32_t) lp->port_wwn);
1251 }
1252 break;
1253 case ISPASYNC_CHANGE_NOTIFY:
1254 {
1255 if (arg == ISPASYNC_CHANGE_PDB) {
1256 msg = "Port Database Changed";
1257 } else if (arg == ISPASYNC_CHANGE_SNS) {
1258 msg = "Name Server Database Changed";
1259 } else {
1260 msg = "Other Change Notify";
1261 }
1262 /*
1263 * If the loop down timer is running, cancel it.
1264 */
1265 if (callout_active(&isp->isp_osinfo.ldt)) {
1266 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
1267 "Stopping Loop Down Timer");
1268 callout_stop(&isp->isp_osinfo.ldt);
1269 }
1270 isp_prt(isp, ISP_LOGINFO, msg);
1271 /*
1272 * We can set blocked here because we know it's now okay
1273 * to try and run isp_fc_runstate (in order to build loop
1274 * state). But we don't try and freeze the midlayer's queue
1275 * if we have no thread that we can wake to later unfreeze
1276 * it.
1277 */
1278 if (isp->isp_osinfo.blocked == 0) {
1279 isp->isp_osinfo.blocked = 1;
1280 if (isp->isp_osinfo.thread) {
1281 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
1282 "FREEZE QUEUES @ LINE %d", __LINE__);
1283 scsipi_channel_freeze(&isp->isp_chanA, 1);
1284 }
1285 }
1286 /*
1287 * Note that we have work for the thread to do, and
1288 * if the thread is here already, wake it up.
1289 */
1290 if (isp->isp_osinfo.thread) {
1291 wakeup(&isp->isp_osinfo.thread);
1292 } else {
1293 isp_prt(isp, ISP_LOGDEBUG1, "no FC thread yet");
1294 }
1295 break;
1296 }
1297 case ISPASYNC_FW_CRASH:
1298 {
1299 uint16_t mbox1, mbox6;
1300 mbox1 = ISP_READ(isp, OUTMAILBOX1);
1301 if (IS_DUALBUS(isp)) {
1302 mbox6 = ISP_READ(isp, OUTMAILBOX6);
1303 } else {
1304 mbox6 = 0;
1305 }
1306 isp_prt(isp, ISP_LOGERR,
1307 "Internal Firmware Error on bus %d @ RISC Address 0x%x",
1308 mbox6, mbox1);
1309 if (IS_FC(isp)) {
1310 if (isp->isp_osinfo.blocked == 0) {
1311 isp->isp_osinfo.blocked = 1;
1312 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
1313 "FREEZE QUEUES @ LINE %d", __LINE__);
1314 scsipi_channel_freeze(&isp->isp_chanA, 1);
1315 }
1316 #ifdef ISP_FW_CRASH_DUMP
1317 isp_fw_dump(isp);
1318 #endif
1319 }
1320 mbox1 = isp->isp_osinfo.mbox_sleep_ok;
1321 isp->isp_osinfo.mbox_sleep_ok = 0;
1322 isp_reinit(isp);
1323 isp->isp_osinfo.mbox_sleep_ok = mbox1;
1324 isp_async(isp, ISPASYNC_FW_RESTARTED, NULL);
1325 break;
1326 }
1327 default:
1328 break;
1329 }
1330 return (0);
1331 }
1332
1333 #include <machine/stdarg.h>
1334 void
1335 isp_prt(struct ispsoftc *isp, int level, const char *fmt, ...)
1336 {
1337 va_list ap;
1338 if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) {
1339 return;
1340 }
1341 printf("%s: ", isp->isp_name);
1342 va_start(ap, fmt);
1343 vprintf(fmt, ap);
1344 va_end(ap);
1345 printf("\n");
1346 }
1347
1348 void
1349 isp_lock(struct ispsoftc *isp)
1350 {
1351 int s = splbio();
1352 if (isp->isp_osinfo.islocked++ == 0) {
1353 isp->isp_osinfo.splsaved = s;
1354 } else {
1355 splx(s);
1356 }
1357 }
1358
1359 void
1360 isp_unlock(struct ispsoftc *isp)
1361 {
1362 if (isp->isp_osinfo.islocked-- <= 1) {
1363 isp->isp_osinfo.islocked = 0;
1364 splx(isp->isp_osinfo.splsaved);
1365 }
1366 }
1367
1368 uint64_t
1369 isp_microtime_sub(struct timeval *b, struct timeval *a)
1370 {
1371 struct timeval x;
1372 uint64_t elapsed;
1373 timersub(b, a, &x);
1374 elapsed = GET_NANOSEC(&x);
1375 if (elapsed == 0)
1376 elapsed++;
1377 return (elapsed);
1378 }
1379
1380 int
1381 isp_mbox_acquire(ispsoftc_t *isp)
1382 {
1383 if (isp->isp_osinfo.mboxbsy) {
1384 return (1);
1385 } else {
1386 isp->isp_osinfo.mboxcmd_done = 0;
1387 isp->isp_osinfo.mboxbsy = 1;
1388 return (0);
1389 }
1390 }
1391
1392 void
1393 isp_mbox_wait_complete(struct ispsoftc *isp, mbreg_t *mbp)
1394 {
1395 unsigned int usecs = mbp->timeout;
1396 unsigned int maxc, olim, ilim;
1397 struct timeval start;
1398
1399 if (usecs == 0) {
1400 usecs = MBCMD_DEFAULT_TIMEOUT;
1401 }
1402 maxc = isp->isp_mbxwrk0 + 1;
1403
1404 microtime(&start);
1405 if (isp->isp_osinfo.mbox_sleep_ok) {
1406 int to;
1407 struct timeval tv;
1408
1409 tv.tv_sec = 0;
1410 tv.tv_usec = 0;
1411 for (olim = 0; olim < maxc; olim++) {
1412 tv.tv_sec += (usecs / 1000000);
1413 tv.tv_usec += (usecs % 1000000);
1414 if (tv.tv_usec >= 100000) {
1415 tv.tv_sec++;
1416 tv.tv_usec -= 1000000;
1417 }
1418 }
1419 timeradd(&tv, &start, &tv);
1420 to = hzto(&tv);
1421 if (to == 0)
1422 to = 1;
1423
1424 isp->isp_osinfo.mbox_sleep_ok = 0;
1425 isp->isp_osinfo.mbox_sleeping = 1;
1426 tsleep(&isp->isp_mbxworkp, PRIBIO, "ispmbx_sleep", to);
1427 isp->isp_osinfo.mbox_sleeping = 0;
1428 isp->isp_osinfo.mbox_sleep_ok = 1;
1429 } else {
1430 for (olim = 0; olim < maxc; olim++) {
1431 for (ilim = 0; ilim < usecs; ilim += 100) {
1432 uint32_t isr;
1433 uint16_t sema, mbox;
1434 if (isp->isp_osinfo.mboxcmd_done) {
1435 break;
1436 }
1437 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
1438 isp_intr(isp, isr, sema, mbox);
1439 if (isp->isp_osinfo.mboxcmd_done) {
1440 break;
1441 }
1442 }
1443 USEC_DELAY(100);
1444 }
1445 if (isp->isp_osinfo.mboxcmd_done) {
1446 break;
1447 }
1448 }
1449 }
1450 if (isp->isp_osinfo.mboxcmd_done == 0) {
1451 struct timeval finish, elapsed;
1452
1453 microtime(&finish);
1454 timersub(&finish, &start, &elapsed);
1455 isp_prt(isp, ISP_LOGWARN,
1456 "%s Mailbox Command (0x%x) Timeout (%uus actual)",
1457 isp->isp_osinfo.mbox_sleep_ok? "Interrupting" : "Polled",
1458 isp->isp_lastmbxcmd, (elapsed.tv_sec * 1000000) +
1459 elapsed.tv_usec);
1460 mbp->param[0] = MBOX_TIMEOUT;
1461 isp->isp_osinfo.mboxcmd_done = 1;
1462 }
1463 }
1464
1465 void
1466 isp_mbox_notify_done(ispsoftc_t *isp)
1467 {
1468 if (isp->isp_osinfo.mbox_sleeping) {
1469 wakeup(&isp->isp_mbxworkp);
1470 }
1471 isp->isp_osinfo.mboxcmd_done = 1;
1472 }
1473
1474 void
1475 isp_mbox_release(ispsoftc_t *isp)
1476 {
1477 isp->isp_osinfo.mboxbsy = 0;
1478 }
1479