isp_netbsd.c revision 1.73.2.4 1 /* $NetBSD: isp_netbsd.c,v 1.73.2.4 2007/05/27 14:30:03 ad Exp $ */
2 /*
3 * Platform (NetBSD) dependent common attachment code for Qlogic adapters.
4 */
5 /*
6 * Copyright (C) 1997, 1998, 1999 National Aeronautics & Space Administration
7 * All rights reserved.
8 *
9 * Additional Copyright (C) 2000-2007 by Matthew Jacob
10 * All rights reserved.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. The name of the author may not be used to endorse or promote products
21 * derived from this software without specific prior written permission
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: isp_netbsd.c,v 1.73.2.4 2007/05/27 14:30:03 ad Exp $");
37
38 #include <dev/ic/isp_netbsd.h>
39 #include <dev/ic/isp_ioctl.h>
40 #include <sys/scsiio.h>
41
42
43 /*
44 * Set a timeout for the watchdogging of a command.
45 *
46 * The dimensional analysis is
47 *
48 * milliseconds * (seconds/millisecond) * (ticks/second) = ticks
49 *
50 * =
51 *
52 * (milliseconds / 1000) * hz = ticks
53 *
54 *
55 * For timeouts less than 1 second, we'll get zero. Because of this, and
56 * because we want to establish *our* timeout to be longer than what the
57 * firmware might do, we just add 3 seconds at the back end.
58 */
59 #define _XT(xs) ((((xs)->timeout/1000) * hz) + (3 * hz))
60
61 static void isp_config_interrupts(struct device *);
62 static void ispminphys_1020(struct buf *);
63 static void ispminphys(struct buf *);
64 static void ispcmd(struct ispsoftc *, XS_T *);
65 static void isprequest(struct scsipi_channel *, scsipi_adapter_req_t, void *);
66 static int
67 ispioctl(struct scsipi_channel *, u_long, void *, int, struct proc *);
68
69 static void isp_polled_cmd_wait(struct ispsoftc *, XS_T *);
70 static void isp_dog(void *);
71 static void isp_gdt(void *);
72 static void isp_ldt(void *);
73 static void isp_make_here(void *, int);
74 static void isp_make_gone(void *, int);
75 static void isp_fc_worker(void *);
76
77 static const char *roles[4] = {
78 "(none)", "Target", "Initiator", "Target/Initiator"
79 };
80 static const char prom3[] =
81 "PortID 0x%06x Departed from Target %u because of %s";
82 int isp_change_is_bad = 0; /* "changed" devices are bad */
83 int isp_quickboot_time = 15; /* don't wait more than N secs for loop up */
84 static int isp_fabric_hysteresis = 5;
85 #define isp_change_is_bad 0
86
87 /*
88 * Complete attachment of hardware, include subdevices.
89 */
90
91 void
92 isp_attach(struct ispsoftc *isp)
93 {
94 isp->isp_state = ISP_RUNSTATE;
95
96 isp->isp_osinfo._adapter.adapt_dev = &isp->isp_osinfo._dev;
97 isp->isp_osinfo._adapter.adapt_nchannels = IS_DUALBUS(isp) ? 2 : 1;
98 isp->isp_osinfo._adapter.adapt_openings = isp->isp_maxcmds;
99 /*
100 * It's not stated whether max_periph is limited by SPI
101 * tag uage, but let's assume that it is.
102 */
103 isp->isp_osinfo._adapter.adapt_max_periph = min(isp->isp_maxcmds, 255);
104 isp->isp_osinfo._adapter.adapt_ioctl = ispioctl;
105 isp->isp_osinfo._adapter.adapt_request = isprequest;
106 if (isp->isp_type <= ISP_HA_SCSI_1020A) {
107 isp->isp_osinfo._adapter.adapt_minphys = ispminphys_1020;
108 } else {
109 isp->isp_osinfo._adapter.adapt_minphys = ispminphys;
110 }
111
112 isp->isp_osinfo._chan.chan_adapter = &isp->isp_osinfo._adapter;
113 isp->isp_osinfo._chan.chan_bustype = &scsi_bustype;
114 isp->isp_osinfo._chan.chan_channel = 0;
115
116 /*
117 * Until the midlayer is fixed to use REPORT LUNS, limit to 8 luns.
118 */
119 isp->isp_osinfo._chan.chan_nluns = min(isp->isp_maxluns, 8);
120
121 callout_init(&isp->isp_osinfo.gdt);
122 callout_setfunc(&isp->isp_osinfo.gdt, isp_gdt, isp);
123
124 callout_init(&isp->isp_osinfo.ldt);
125 callout_setfunc(&isp->isp_osinfo.ldt, isp_ldt, isp);
126
127 if (IS_FC(isp)) {
128 isp->isp_osinfo._chan.chan_ntargets = MAX_FC_TARG;
129 isp->isp_osinfo._chan.chan_id = MAX_FC_TARG;
130 isp->isp_osinfo.threadwork = 1;
131 #ifdef ISP_FW_CRASH_DUMP
132 if (IS_2200(isp)) {
133 FCPARAM(isp)->isp_dump_data =
134 malloc(QLA2200_RISC_IMAGE_DUMP_SIZE, M_DEVBUF,
135 M_NOWAIT);
136 } else if (IS_23XX(isp)) {
137 FCPARAM(isp)->isp_dump_data =
138 malloc(QLA2300_RISC_IMAGE_DUMP_SIZE, M_DEVBUF,
139 M_NOWAIT);
140 }
141 if (FCPARAM(isp)->isp_dump_data)
142 FCPARAM(isp)->isp_dump_data[0] = 0;
143 #endif
144 } else {
145 int bus = 0;
146 sdparam *sdp = isp->isp_param;
147
148 isp->isp_osinfo._chan.chan_ntargets = MAX_TARGETS;
149 isp->isp_osinfo._chan.chan_id = sdp->isp_initiator_id;
150 isp->isp_osinfo.discovered[0] = 1 << sdp->isp_initiator_id;
151 if (IS_DUALBUS(isp)) {
152 isp->isp_osinfo._chan_b = isp->isp_osinfo._chan;
153 sdp++;
154 isp->isp_osinfo.discovered[1] =
155 1 << sdp->isp_initiator_id;
156 isp->isp_osinfo._chan_b.chan_id = sdp->isp_initiator_id;
157 isp->isp_osinfo._chan_b.chan_channel = 1;
158 }
159 ISP_LOCK(isp);
160 (void) isp_control(isp, ISPCTL_RESET_BUS, &bus);
161 if (IS_DUALBUS(isp)) {
162 bus++;
163 (void) isp_control(isp, ISPCTL_RESET_BUS, &bus);
164 }
165 ISP_UNLOCK(isp);
166 }
167
168
169 /*
170 * Defer enabling mailbox interrupts until later.
171 */
172 config_interrupts((struct device *) isp, isp_config_interrupts);
173
174 /*
175 * And attach children (if any).
176 */
177 config_found((void *)isp, &isp->isp_chanA, scsiprint);
178 if (IS_DUALBUS(isp)) {
179 config_found((void *)isp, &isp->isp_chanB, scsiprint);
180 }
181 }
182
183 static void
184 isp_config_interrupts(struct device *self)
185 {
186 struct ispsoftc *isp = (struct ispsoftc *) self;
187
188 isp->isp_osinfo.mbox_sleep_ok = 1;
189
190 if (kthread_create(PRI_NONE, 0, NULL, isp_fc_worker, isp,
191 &isp->isp_osinfo.thread, "%s:fc_thrd", isp->isp_name)) {
192 isp_prt(isp, ISP_LOGERR,
193 "unable to create FC worker thread");
194 panic("isp_config_interrupts");
195 }
196 }
197
198
199 /*
200 * minphys our xfers
201 */
202 static void
203 ispminphys_1020(struct buf *bp)
204 {
205 if (bp->b_bcount >= (1 << 24)) {
206 bp->b_bcount = (1 << 24);
207 }
208 minphys(bp);
209 }
210
211 static void
212 ispminphys(struct buf *bp)
213 {
214 if (bp->b_bcount >= (1 << 30)) {
215 bp->b_bcount = (1 << 30);
216 }
217 minphys(bp);
218 }
219
220 static int
221 ispioctl(struct scsipi_channel *chan, u_long cmd, void *addr, int flag,
222 struct proc *p)
223 {
224 struct ispsoftc *isp = (void *)chan->chan_adapter->adapt_dev;
225 int retval = ENOTTY;
226
227 switch (cmd) {
228 #ifdef ISP_FW_CRASH_DUMP
229 case ISP_GET_FW_CRASH_DUMP:
230 {
231 uint16_t *ptr = FCPARAM(isp)->isp_dump_data;
232 size_t sz;
233
234 retval = 0;
235 if (IS_2200(isp))
236 sz = QLA2200_RISC_IMAGE_DUMP_SIZE;
237 else
238 sz = QLA2300_RISC_IMAGE_DUMP_SIZE;
239 ISP_LOCK(isp);
240 if (ptr && *ptr) {
241 void *uaddr = *((void **) addr);
242 if (copyout(ptr, uaddr, sz)) {
243 retval = EFAULT;
244 } else {
245 *ptr = 0;
246 }
247 } else {
248 retval = ENXIO;
249 }
250 ISP_UNLOCK(isp);
251 break;
252 }
253
254 case ISP_FORCE_CRASH_DUMP:
255 ISP_LOCK(isp);
256 if (isp->isp_osinfo.blocked == 0) {
257 isp->isp_osinfo.blocked = 1;
258 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
259 "FREEZE QUEUES @ LINE %d", __LINE__);
260 scsipi_channel_freeze(&isp->isp_chanA, 1);
261 }
262 isp_fw_dump(isp);
263 isp_reinit(isp);
264 ISP_UNLOCK(isp);
265 retval = 0;
266 break;
267 #endif
268 case ISP_SDBLEV:
269 {
270 int olddblev = isp->isp_dblev;
271 isp->isp_dblev = *(int *)addr;
272 *(int *)addr = olddblev;
273 retval = 0;
274 break;
275 }
276 case ISP_RESETHBA:
277 ISP_LOCK(isp);
278 isp_reinit(isp);
279 ISP_UNLOCK(isp);
280 retval = 0;
281 break;
282 case ISP_RESCAN:
283 if (IS_FC(isp)) {
284 ISP_LOCK(isp);
285 if (isp_fc_runstate(isp, 5 * 1000000)) {
286 retval = EIO;
287 } else {
288 retval = 0;
289 }
290 ISP_UNLOCK(isp);
291 }
292 break;
293 case ISP_FC_LIP:
294 if (IS_FC(isp)) {
295 ISP_LOCK(isp);
296 if (isp_control(isp, ISPCTL_SEND_LIP, 0)) {
297 retval = EIO;
298 } else {
299 retval = 0;
300 }
301 ISP_UNLOCK(isp);
302 }
303 break;
304 case ISP_FC_GETDINFO:
305 {
306 struct isp_fc_device *ifc = (struct isp_fc_device *) addr;
307 fcportdb_t *lp;
308
309 if (ifc->loopid >= MAX_FC_TARG) {
310 retval = EINVAL;
311 break;
312 }
313 ISP_LOCK(isp);
314 lp = &FCPARAM(isp)->portdb[ifc->loopid];
315 if (lp->state == FC_PORTDB_STATE_VALID) {
316 ifc->role = lp->roles;
317 ifc->loopid = lp->handle;
318 ifc->portid = lp->portid;
319 ifc->node_wwn = lp->node_wwn;
320 ifc->port_wwn = lp->port_wwn;
321 retval = 0;
322 } else {
323 retval = ENODEV;
324 }
325 ISP_UNLOCK(isp);
326 break;
327 }
328 case ISP_GET_STATS:
329 {
330 isp_stats_t *sp = (isp_stats_t *) addr;
331
332 MEMZERO(sp, sizeof (*sp));
333 sp->isp_stat_version = ISP_STATS_VERSION;
334 sp->isp_type = isp->isp_type;
335 sp->isp_revision = isp->isp_revision;
336 ISP_LOCK(isp);
337 sp->isp_stats[ISP_INTCNT] = isp->isp_intcnt;
338 sp->isp_stats[ISP_INTBOGUS] = isp->isp_intbogus;
339 sp->isp_stats[ISP_INTMBOXC] = isp->isp_intmboxc;
340 sp->isp_stats[ISP_INGOASYNC] = isp->isp_intoasync;
341 sp->isp_stats[ISP_RSLTCCMPLT] = isp->isp_rsltccmplt;
342 sp->isp_stats[ISP_FPHCCMCPLT] = isp->isp_fphccmplt;
343 sp->isp_stats[ISP_RSCCHIWAT] = isp->isp_rscchiwater;
344 sp->isp_stats[ISP_FPCCHIWAT] = isp->isp_fpcchiwater;
345 ISP_UNLOCK(isp);
346 retval = 0;
347 break;
348 }
349 case ISP_CLR_STATS:
350 ISP_LOCK(isp);
351 isp->isp_intcnt = 0;
352 isp->isp_intbogus = 0;
353 isp->isp_intmboxc = 0;
354 isp->isp_intoasync = 0;
355 isp->isp_rsltccmplt = 0;
356 isp->isp_fphccmplt = 0;
357 isp->isp_rscchiwater = 0;
358 isp->isp_fpcchiwater = 0;
359 ISP_UNLOCK(isp);
360 retval = 0;
361 break;
362 case ISP_FC_GETHINFO:
363 {
364 struct isp_hba_device *hba = (struct isp_hba_device *) addr;
365 MEMZERO(hba, sizeof (*hba));
366 ISP_LOCK(isp);
367 hba->fc_speed = FCPARAM(isp)->isp_gbspeed;
368 hba->fc_scsi_supported = 1;
369 hba->fc_topology = FCPARAM(isp)->isp_topo + 1;
370 hba->fc_loopid = FCPARAM(isp)->isp_loopid;
371 hba->nvram_node_wwn = FCPARAM(isp)->isp_wwnn_nvram;
372 hba->nvram_port_wwn = FCPARAM(isp)->isp_wwpn_nvram;
373 hba->active_node_wwn = ISP_NODEWWN(isp);
374 hba->active_port_wwn = ISP_PORTWWN(isp);
375 ISP_UNLOCK(isp);
376 retval = 0;
377 break;
378 }
379 case SCBUSIORESET:
380 ISP_LOCK(isp);
381 if (isp_control(isp, ISPCTL_RESET_BUS, &chan->chan_channel)) {
382 retval = EIO;
383 } else {
384 retval = 0;
385 }
386 ISP_UNLOCK(isp);
387 break;
388 default:
389 break;
390 }
391 return (retval);
392 }
393
394 static void
395 ispcmd(struct ispsoftc *isp, XS_T *xs)
396 {
397 volatile uint8_t ombi;
398 int lim;
399
400 ISP_LOCK(isp);
401 if (isp->isp_state < ISP_RUNSTATE) {
402 ISP_DISABLE_INTS(isp);
403 isp_init(isp);
404 if (isp->isp_state != ISP_INITSTATE) {
405 ISP_ENABLE_INTS(isp);
406 ISP_UNLOCK(isp);
407 isp_prt(isp, ISP_LOGERR, "isp not at init state");
408 XS_SETERR(xs, HBA_BOTCH);
409 scsipi_done(xs);
410 return;
411 }
412 isp->isp_state = ISP_RUNSTATE;
413 ISP_ENABLE_INTS(isp);
414 }
415
416 /*
417 * Handle the case of a FC card where the FC thread hasn't
418 * fired up yet and we don't yet have a known loop state.
419 */
420 if (IS_FC(isp) && (FCPARAM(isp)->isp_fwstate != FW_READY ||
421 FCPARAM(isp)->isp_loopstate != LOOP_READY) &&
422 isp->isp_osinfo.thread == NULL) {
423 ombi = isp->isp_osinfo.mbox_sleep_ok != 0;
424 int delay_time;
425
426 if (xs->xs_control & XS_CTL_POLL) {
427 isp->isp_osinfo.mbox_sleep_ok = 0;
428 }
429
430 if (isp->isp_osinfo.loop_checked == 0) {
431 delay_time = 10 * 1000000;
432 isp->isp_osinfo.loop_checked = 1;
433 } else {
434 delay_time = 250000;
435 }
436
437 if (isp_fc_runstate(isp, delay_time) != 0) {
438 if (xs->xs_control & XS_CTL_POLL) {
439 isp->isp_osinfo.mbox_sleep_ok = ombi;
440 }
441 if (FCPARAM(isp)->loop_seen_once == 0) {
442 XS_SETERR(xs, HBA_SELTIMEOUT);
443 scsipi_done(xs);
444 ISP_UNLOCK(isp);
445 return;
446 }
447 /*
448 * Otherwise, fall thru to be queued up for later.
449 */
450 } else {
451 int wasblocked =
452 (isp->isp_osinfo.blocked || isp->isp_osinfo.paused);
453 isp->isp_osinfo.blocked =
454 isp->isp_osinfo.paused = 0;
455 if (wasblocked) {
456 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
457 "THAW QUEUES @ LINE %d", __LINE__);
458 scsipi_channel_thaw(&isp->isp_chanA, 1);
459 }
460 }
461 if (xs->xs_control & XS_CTL_POLL) {
462 isp->isp_osinfo.mbox_sleep_ok = ombi;
463 }
464 }
465
466 if (isp->isp_osinfo.paused) {
467 isp_prt(isp, ISP_LOGWARN, "I/O while paused");
468 xs->error = XS_RESOURCE_SHORTAGE;
469 scsipi_done(xs);
470 ISP_UNLOCK(isp);
471 return;
472 }
473 if (isp->isp_osinfo.blocked) {
474 isp_prt(isp, ISP_LOGWARN, "I/O while blocked");
475 xs->error = XS_REQUEUE;
476 scsipi_done(xs);
477 ISP_UNLOCK(isp);
478 return;
479 }
480
481 if (xs->xs_control & XS_CTL_POLL) {
482 ombi = isp->isp_osinfo.mbox_sleep_ok;
483 isp->isp_osinfo.mbox_sleep_ok = 0;
484 }
485
486 switch (isp_start(xs)) {
487 case CMD_QUEUED:
488 if (xs->xs_control & XS_CTL_POLL) {
489 isp_polled_cmd_wait(isp, xs);
490 isp->isp_osinfo.mbox_sleep_ok = ombi;
491 } else if (xs->timeout) {
492 callout_reset(&xs->xs_callout, _XT(xs), isp_dog, xs);
493 }
494 break;
495 case CMD_EAGAIN:
496 isp->isp_osinfo.paused = 1;
497 xs->error = XS_RESOURCE_SHORTAGE;
498 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
499 "FREEZE QUEUES @ LINE %d", __LINE__);
500 scsipi_channel_freeze(&isp->isp_chanA, 1);
501 if (IS_DUALBUS(isp)) {
502 scsipi_channel_freeze(&isp->isp_chanB, 1);
503 }
504 scsipi_done(xs);
505 break;
506 case CMD_RQLATER:
507 /*
508 * We can only get RQLATER from FC devices (1 channel only)
509 *
510 * If we've never seen loop up see if if we've been down
511 * quickboot time, otherwise wait loop down limit time.
512 * If so, then we start giving up on commands.
513 */
514 if (FCPARAM(isp)->loop_seen_once == 0) {
515 lim = isp_quickboot_time;
516 } else {
517 lim = isp->isp_osinfo.loop_down_limit;
518 }
519 if (isp->isp_osinfo.loop_down_time >= lim) {
520 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
521 "RQLATER -> SELTIMEOUT");
522 XS_SETERR(xs, HBA_SELTIMEOUT);
523 scsipi_done(xs);
524 break;
525 }
526 if (isp->isp_osinfo.blocked == 0) {
527 isp->isp_osinfo.blocked = 1;
528 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
529 "FREEZE QUEUES @ LINE %d", __LINE__);
530 scsipi_channel_freeze(&isp->isp_chanA, 1);
531 } else {
532 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
533 "RQLATER WITH FROZEN QUEUES @ LINE %d", __LINE__);
534 }
535 xs->error = XS_REQUEUE;
536 scsipi_done(xs);
537 break;
538 case CMD_COMPLETE:
539 scsipi_done(xs);
540 break;
541 }
542 ISP_UNLOCK(isp);
543 }
544
545 static void
546 isprequest(struct scsipi_channel *chan, scsipi_adapter_req_t req, void *arg)
547 {
548 struct ispsoftc *isp = (void *)chan->chan_adapter->adapt_dev;
549
550 switch (req) {
551 case ADAPTER_REQ_RUN_XFER:
552 ispcmd(isp, (XS_T *) arg);
553 break;
554
555 case ADAPTER_REQ_GROW_RESOURCES:
556 /* Not supported. */
557 break;
558
559 case ADAPTER_REQ_SET_XFER_MODE:
560 if (IS_SCSI(isp)) {
561 struct scsipi_xfer_mode *xm = arg;
562 int dflags = 0;
563 sdparam *sdp = SDPARAM(isp);
564
565 sdp += chan->chan_channel;
566 if (xm->xm_mode & PERIPH_CAP_TQING)
567 dflags |= DPARM_TQING;
568 if (xm->xm_mode & PERIPH_CAP_WIDE16)
569 dflags |= DPARM_WIDE;
570 if (xm->xm_mode & PERIPH_CAP_SYNC)
571 dflags |= DPARM_SYNC;
572 ISP_LOCK(isp);
573 sdp->isp_devparam[xm->xm_target].goal_flags |= dflags;
574 dflags = sdp->isp_devparam[xm->xm_target].goal_flags;
575 sdp->isp_devparam[xm->xm_target].dev_update = 1;
576 isp->isp_update |= (1 << chan->chan_channel);
577 ISP_UNLOCK(isp);
578 isp_prt(isp, ISP_LOGDEBUG1,
579 "isprequest: device flags 0x%x for %d.%d.X",
580 dflags, chan->chan_channel, xm->xm_target);
581 break;
582 }
583 default:
584 break;
585 }
586 }
587
588 static void
589 isp_polled_cmd_wait(struct ispsoftc *isp, XS_T *xs)
590 {
591 int infinite = 0, mswait;
592
593 /*
594 * If we can't use interrupts, poll on completion.
595 */
596 if ((mswait = XS_TIME(xs)) == 0) {
597 infinite = 1;
598 }
599
600 while (mswait || infinite) {
601 uint32_t isr;
602 uint16_t sema, mbox;
603 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
604 isp_intr(isp, isr, sema, mbox);
605 if (XS_CMD_DONE_P(xs)) {
606 break;
607 }
608 }
609 USEC_DELAY(1000);
610 mswait -= 1;
611 }
612
613 /*
614 * If no other error occurred but we didn't finish
615 * something bad happened, so abort the command.
616 */
617 if (XS_CMD_DONE_P(xs) == 0) {
618 if (isp_control(isp, ISPCTL_ABORT_CMD, xs)) {
619 isp_reinit(isp);
620 }
621 if (XS_NOERR(xs)) {
622 isp_prt(isp, ISP_LOGERR, "polled command timed out");
623 XS_SETERR(xs, HBA_BOTCH);
624 }
625 }
626 scsipi_done(xs);
627 }
628
629 void
630 isp_done(XS_T *xs)
631 {
632 if (XS_CMD_WDOG_P(xs) == 0) {
633 struct ispsoftc *isp = XS_ISP(xs);
634 callout_stop(&xs->xs_callout);
635 if (XS_CMD_GRACE_P(xs)) {
636 isp_prt(isp, ISP_LOGDEBUG1,
637 "finished command on borrowed time");
638 }
639 XS_CMD_S_CLEAR(xs);
640 /*
641 * Fixup- if we get a QFULL, we need
642 * to set XS_BUSY as the error.
643 */
644 if (xs->status == SCSI_QUEUE_FULL) {
645 xs->error = XS_BUSY;
646 }
647 if (isp->isp_osinfo.paused) {
648 isp->isp_osinfo.paused = 0;
649 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
650 "THAW QUEUES @ LINE %d", __LINE__);
651 scsipi_channel_timed_thaw(&isp->isp_chanA);
652 if (IS_DUALBUS(isp)) {
653 scsipi_channel_timed_thaw(&isp->isp_chanB);
654 }
655 }
656 if (xs->error == XS_DRIVER_STUFFUP) {
657 isp_prt(isp, ISP_LOGERR,
658 "BOTCHED cmd for %d.%d.%d cmd 0x%x datalen %ld",
659 XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs),
660 XS_CDBP(xs)[0], (long) XS_XFRLEN(xs));
661 }
662 scsipi_done(xs);
663 }
664 }
665
666 static void
667 isp_dog(void *arg)
668 {
669 XS_T *xs = arg;
670 struct ispsoftc *isp = XS_ISP(xs);
671 uint32_t handle;
672
673
674 ISP_ILOCK(isp);
675 /*
676 * We've decided this command is dead. Make sure we're not trying
677 * to kill a command that's already dead by getting it's handle and
678 * and seeing whether it's still alive.
679 */
680 handle = isp_find_handle(isp, xs);
681 if (handle) {
682 uint32_t isr;
683 uint16_t mbox, sema;
684
685 if (XS_CMD_DONE_P(xs)) {
686 isp_prt(isp, ISP_LOGDEBUG1,
687 "watchdog found done cmd (handle 0x%x)", handle);
688 ISP_IUNLOCK(isp);
689 return;
690 }
691
692 if (XS_CMD_WDOG_P(xs)) {
693 isp_prt(isp, ISP_LOGDEBUG1,
694 "recursive watchdog (handle 0x%x)", handle);
695 ISP_IUNLOCK(isp);
696 return;
697 }
698
699 XS_CMD_S_WDOG(xs);
700
701 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
702 isp_intr(isp, isr, sema, mbox);
703
704 }
705 if (XS_CMD_DONE_P(xs)) {
706 isp_prt(isp, ISP_LOGDEBUG1,
707 "watchdog cleanup for handle 0x%x", handle);
708 XS_CMD_C_WDOG(xs);
709 isp_done(xs);
710 } else if (XS_CMD_GRACE_P(xs)) {
711 isp_prt(isp, ISP_LOGDEBUG1,
712 "watchdog timeout for handle 0x%x", handle);
713 /*
714 * Make sure the command is *really* dead before we
715 * release the handle (and DMA resources) for reuse.
716 */
717 (void) isp_control(isp, ISPCTL_ABORT_CMD, arg);
718
719 /*
720 * After this point, the command is really dead.
721 */
722 if (XS_XFRLEN(xs)) {
723 ISP_DMAFREE(isp, xs, handle);
724 }
725 isp_destroy_handle(isp, handle);
726 XS_SETERR(xs, XS_TIMEOUT);
727 XS_CMD_S_CLEAR(xs);
728 isp_done(xs);
729 } else {
730 uint32_t nxti, optr;
731 void *qe;
732 isp_marker_t local, *mp = &local;
733 isp_prt(isp, ISP_LOGDEBUG2,
734 "possible command timeout on handle %x", handle);
735 XS_CMD_C_WDOG(xs);
736 callout_reset(&xs->xs_callout, hz, isp_dog, xs);
737 if (isp_getrqentry(isp, &nxti, &optr, &qe)) {
738 ISP_UNLOCK(isp);
739 return;
740 }
741 XS_CMD_S_GRACE(xs);
742 MEMZERO((void *) mp, sizeof (*mp));
743 mp->mrk_header.rqs_entry_count = 1;
744 mp->mrk_header.rqs_entry_type = RQSTYPE_MARKER;
745 mp->mrk_modifier = SYNC_ALL;
746 mp->mrk_target = XS_CHANNEL(xs) << 7;
747 isp_put_marker(isp, mp, qe);
748 ISP_ADD_REQUEST(isp, nxti);
749 }
750 } else {
751 isp_prt(isp, ISP_LOGDEBUG0, "watchdog with no command");
752 }
753 ISP_IUNLOCK(isp);
754 }
755
756
757 static void
758 isp_fc_worker(void *arg)
759 {
760 void scsipi_run_queue(struct scsipi_channel *);
761 ispsoftc_t *isp = arg;
762 int slp = 0;
763 int s = splbio();
764 /*
765 * The first loop is for our usage where we have yet to have
766 * gotten good fibre channel state.
767 */
768 while (isp->isp_osinfo.thread != NULL) {
769 int sok, lb, lim;
770
771 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, "checking FC state");
772 sok = isp->isp_osinfo.mbox_sleep_ok;
773 isp->isp_osinfo.mbox_sleep_ok = 1;
774 lb = isp_fc_runstate(isp, 250000);
775 isp->isp_osinfo.mbox_sleep_ok = sok;
776 if (lb) {
777 /*
778 * Increment loop down time by the last sleep interval
779 */
780 isp->isp_osinfo.loop_down_time += slp;
781
782 if (lb < 0) {
783 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
784 "FC loop not up (down count %d)",
785 isp->isp_osinfo.loop_down_time);
786 } else {
787 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
788 "FC got to %d (down count %d)",
789 lb, isp->isp_osinfo.loop_down_time);
790 }
791
792
793 /*
794 * If we've never seen loop up and we've waited longer
795 * than quickboot time, or we've seen loop up but we've
796 * waited longer than loop_down_limit, give up and go
797 * to sleep until loop comes up.
798 */
799 if (FCPARAM(isp)->loop_seen_once == 0) {
800 lim = isp_quickboot_time;
801 } else {
802 lim = isp->isp_osinfo.loop_down_limit;
803 }
804 if (isp->isp_osinfo.loop_down_time >= lim) {
805 /*
806 * If we're now past our limit, release
807 * the queues and let them come in and
808 * either get HBA_SELTIMOUT or cause
809 * another freeze.
810 */
811 isp->isp_osinfo.blocked = 1;
812 slp = 0;
813 } else if (isp->isp_osinfo.loop_down_time < 10) {
814 slp = 1;
815 } else if (isp->isp_osinfo.loop_down_time < 30) {
816 slp = 5;
817 } else if (isp->isp_osinfo.loop_down_time < 60) {
818 slp = 10;
819 } else if (isp->isp_osinfo.loop_down_time < 120) {
820 slp = 20;
821 } else {
822 slp = 30;
823 }
824
825 } else {
826 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
827 "FC state OK");
828 isp->isp_osinfo.loop_down_time = 0;
829 slp = 0;
830 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
831 "THAW QUEUES @ LINE %d", __LINE__);
832 scsipi_channel_thaw(&isp->isp_chanA, 1);
833 }
834
835 /*
836 * If we'd frozen the queues, unfreeze them now so that
837 * we can start getting commands. If the FC state isn't
838 * okay yet, they'll hit that in isp_start which will
839 * freeze the queues again.
840 */
841 if (isp->isp_osinfo.blocked) {
842 isp->isp_osinfo.blocked = 0;
843 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
844 "THAW QUEUES @ LINE %d", __LINE__);
845 scsipi_channel_thaw(&isp->isp_chanA, 1);
846 }
847 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, "sleep time %d", slp);
848 tsleep(&isp->isp_osinfo.thread, PRIBIO, "ispf", slp * hz);
849
850 /*
851 * If slp is zero, we're waking up for the first time after
852 * things have been okay. In this case, we set a deferral state
853 * for all commands and delay hysteresis seconds before starting
854 * the FC state evaluation. This gives the loop/fabric a chance
855 * to settle.
856 */
857 if (slp == 0 && isp_fabric_hysteresis) {
858 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
859 "sleep hysteresis tick time %d",
860 isp_fabric_hysteresis * hz);
861 (void) tsleep(&isp_fabric_hysteresis, PRIBIO, "ispT",
862 (isp_fabric_hysteresis * hz));
863 }
864 }
865 splx(s);
866
867 /* In case parent is waiting for us to exit. */
868 wakeup(&isp->isp_osinfo.thread);
869 kthread_exit(0);
870 }
871
872 /*
873 * Free any associated resources prior to decommissioning and
874 * set the card to a known state (so it doesn't wake up and kick
875 * us when we aren't expecting it to).
876 *
877 * Locks are held before coming here.
878 */
879 void
880 isp_uninit(struct ispsoftc *isp)
881 {
882 isp_lock(isp);
883 /*
884 * Leave with interrupts disabled.
885 */
886 ISP_DISABLE_INTS(isp);
887 isp_unlock(isp);
888 }
889
890 /*
891 * Gone Device Timer Function- when we have decided that a device has gone
892 * away, we wait a specific period of time prior to telling the OS it has
893 * gone away.
894 *
895 * This timer function fires once a second and then scans the port database
896 * for devices that are marked dead but still have a virtual target assigned.
897 * We decrement a counter for that port database entry, and when it hits zero,
898 * we tell the OS the device has gone away.
899 */
900 static void
901 isp_gdt(void *arg)
902 {
903 ispsoftc_t *isp = arg;
904 fcportdb_t *lp;
905 int dbidx, tgt, more_to_do = 0;
906
907 isp_prt(isp, ISP_LOGDEBUG0, "GDT timer expired");
908 ISP_LOCK(isp);
909 for (dbidx = 0; dbidx < MAX_FC_TARG; dbidx++) {
910 lp = &FCPARAM(isp)->portdb[dbidx];
911
912 if (lp->state != FC_PORTDB_STATE_ZOMBIE) {
913 continue;
914 }
915 if (lp->ini_map_idx == 0) {
916 continue;
917 }
918 if (lp->new_reserved == 0) {
919 continue;
920 }
921 lp->new_reserved -= 1;
922 if (lp->new_reserved != 0) {
923 more_to_do++;
924 continue;
925 }
926 tgt = lp->ini_map_idx - 1;
927 FCPARAM(isp)->isp_ini_map[tgt] = 0;
928 lp->ini_map_idx = 0;
929 lp->state = FC_PORTDB_STATE_NIL;
930 isp_prt(isp, ISP_LOGCONFIG, prom3, lp->portid, tgt,
931 "Gone Device Timeout");
932 isp_make_gone(isp, tgt);
933 }
934 if (more_to_do) {
935 callout_schedule(&isp->isp_osinfo.gdt, hz);
936 } else {
937 isp->isp_osinfo.gdt_running = 0;
938 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
939 "stopping Gone Device Timer");
940 }
941 ISP_UNLOCK(isp);
942 }
943
944 /*
945 * Loop Down Timer Function- when loop goes down, a timer is started and
946 * and after it expires we come here and take all probational devices that
947 * the OS knows about and the tell the OS that they've gone away.
948 *
949 * We don't clear the devices out of our port database because, when loop
950 * come back up, we have to do some actual cleanup with the chip at that
951 * point (implicit PLOGO, e.g., to get the chip's port database state right).
952 */
953 static void
954 isp_ldt(void *arg)
955 {
956 ispsoftc_t *isp = arg;
957 fcportdb_t *lp;
958 int dbidx, tgt;
959
960 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, "Loop Down Timer expired");
961 ISP_LOCK(isp);
962
963 /*
964 * Notify to the OS all targets who we now consider have departed.
965 */
966 for (dbidx = 0; dbidx < MAX_FC_TARG; dbidx++) {
967 lp = &FCPARAM(isp)->portdb[dbidx];
968
969 if (lp->state != FC_PORTDB_STATE_PROBATIONAL) {
970 continue;
971 }
972 if (lp->ini_map_idx == 0) {
973 continue;
974 }
975
976 /*
977 * XXX: CLEAN UP AND COMPLETE ANY PENDING COMMANDS FIRST!
978 */
979
980 /*
981 * Mark that we've announced that this device is gone....
982 */
983 lp->reserved = 1;
984
985 /*
986 * but *don't* change the state of the entry. Just clear
987 * any target id stuff and announce to CAM that the
988 * device is gone. This way any necessary PLOGO stuff
989 * will happen when loop comes back up.
990 */
991
992 tgt = lp->ini_map_idx - 1;
993 FCPARAM(isp)->isp_ini_map[tgt] = 0;
994 lp->ini_map_idx = 0;
995 isp_prt(isp, ISP_LOGCONFIG, prom3, lp->portid, tgt,
996 "Loop Down Timeout");
997 isp_make_gone(isp, tgt);
998 }
999
1000 /*
1001 * The loop down timer has expired. Wake up the kthread
1002 * to notice that fact (or make it false).
1003 */
1004 isp->isp_osinfo.loop_down_time = isp->isp_osinfo.loop_down_limit+1;
1005 wakeup(&isp->isp_osinfo.thread);
1006 ISP_UNLOCK(isp);
1007 }
1008
1009 static void
1010 isp_make_here(ispsoftc_t *isp, int tgt)
1011 {
1012 isp_prt(isp, ISP_LOGINFO, "target %d has arrived", tgt);
1013 }
1014
1015 static void
1016 isp_make_gone(ispsoftc_t *isp, int tgt)
1017 {
1018 isp_prt(isp, ISP_LOGINFO, "target %d has departed", tgt);
1019 }
1020
1021 int
1022 isp_async(struct ispsoftc *isp, ispasync_t cmd, void *arg)
1023 {
1024 int bus, tgt;
1025 const char *msg = NULL;
1026 static const char prom[] =
1027 "PortID 0x%06x handle 0x%x role %s %s\n"
1028 " WWNN 0x%08x%08x WWPN 0x%08x%08x";
1029 static const char prom2[] =
1030 "PortID 0x%06x handle 0x%x role %s %s tgt %u\n"
1031 " WWNN 0x%08x%08x WWPN 0x%08x%08x";
1032 fcportdb_t *lp;
1033
1034 switch (cmd) {
1035 case ISPASYNC_NEW_TGT_PARAMS:
1036 if (IS_SCSI(isp) && isp->isp_dblev) {
1037 sdparam *sdp = isp->isp_param;
1038 int flags;
1039 struct scsipi_xfer_mode xm;
1040
1041 tgt = *((int *) arg);
1042 bus = (tgt >> 16) & 0xffff;
1043 tgt &= 0xffff;
1044 sdp += bus;
1045 flags = sdp->isp_devparam[tgt].actv_flags;
1046
1047 xm.xm_mode = 0;
1048 xm.xm_period = sdp->isp_devparam[tgt].actv_period;
1049 xm.xm_offset = sdp->isp_devparam[tgt].actv_offset;
1050 xm.xm_target = tgt;
1051
1052 if ((flags & DPARM_SYNC) && xm.xm_period && xm.xm_offset)
1053 xm.xm_mode |= PERIPH_CAP_SYNC;
1054 if (flags & DPARM_WIDE)
1055 xm.xm_mode |= PERIPH_CAP_WIDE16;
1056 if (flags & DPARM_TQING)
1057 xm.xm_mode |= PERIPH_CAP_TQING;
1058 scsipi_async_event(bus? &isp->isp_chanB : &isp->isp_chanA,
1059 ASYNC_EVENT_XFER_MODE, &xm);
1060 break;
1061 }
1062 case ISPASYNC_BUS_RESET:
1063 bus = *((int *) arg);
1064 scsipi_async_event(bus? &isp->isp_chanB : &isp->isp_chanA,
1065 ASYNC_EVENT_RESET, NULL);
1066 isp_prt(isp, ISP_LOGINFO, "SCSI bus %d reset detected", bus);
1067 break;
1068 case ISPASYNC_LIP:
1069 if (msg == NULL) {
1070 msg = "LIP Received";
1071 }
1072 /* FALLTHROUGH */
1073 case ISPASYNC_LOOP_RESET:
1074 if (msg == NULL) {
1075 msg = "LOOP Reset Received";
1076 }
1077 /* FALLTHROUGH */
1078 case ISPASYNC_LOOP_DOWN:
1079 if (msg == NULL) {
1080 msg = "Loop DOWN";
1081 }
1082 /*
1083 * Don't do queue freezes or blockage until we have the
1084 * thread running that can unfreeze/unblock us.
1085 */
1086 if (isp->isp_osinfo.blocked == 0) {
1087 if (isp->isp_osinfo.thread) {
1088 isp->isp_osinfo.blocked = 1;
1089 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
1090 "FREEZE QUEUES @ LINE %d", __LINE__);
1091 scsipi_channel_freeze(&isp->isp_chanA, 1);
1092 }
1093 }
1094 isp_prt(isp, ISP_LOGINFO, msg);
1095 break;
1096 case ISPASYNC_LOOP_UP:
1097 /*
1098 * Let the subsequent ISPASYNC_CHANGE_NOTIFY invoke
1099 * the FC worker thread. When the FC worker thread
1100 * is done, let *it* call scsipi_channel_thaw...
1101 */
1102 isp_prt(isp, ISP_LOGINFO, "Loop UP");
1103 break;
1104 case ISPASYNC_DEV_ARRIVED:
1105 lp = arg;
1106 lp->reserved = 0;
1107 if ((isp->isp_role & ISP_ROLE_INITIATOR) &&
1108 (lp->roles & (SVC3_TGT_ROLE >> SVC3_ROLE_SHIFT))) {
1109 int dbidx = lp - FCPARAM(isp)->portdb;
1110 int i;
1111
1112 for (i = 0; i < MAX_FC_TARG; i++) {
1113 if (i >= FL_ID && i <= SNS_ID) {
1114 continue;
1115 }
1116 if (FCPARAM(isp)->isp_ini_map[i] == 0) {
1117 break;
1118 }
1119 }
1120 if (i < MAX_FC_TARG) {
1121 FCPARAM(isp)->isp_ini_map[i] = dbidx + 1;
1122 lp->ini_map_idx = i + 1;
1123 } else {
1124 isp_prt(isp, ISP_LOGWARN, "out of target ids");
1125 isp_dump_portdb(isp);
1126 }
1127 }
1128 if (lp->ini_map_idx) {
1129 tgt = lp->ini_map_idx - 1;
1130 isp_prt(isp, ISP_LOGCONFIG, prom2,
1131 lp->portid, lp->handle,
1132 roles[lp->roles], "arrived at", tgt,
1133 (uint32_t) (lp->node_wwn >> 32),
1134 (uint32_t) lp->node_wwn,
1135 (uint32_t) (lp->port_wwn >> 32),
1136 (uint32_t) lp->port_wwn);
1137 isp_make_here(isp, tgt);
1138 } else {
1139 isp_prt(isp, ISP_LOGCONFIG, prom,
1140 lp->portid, lp->handle,
1141 roles[lp->roles], "arrived",
1142 (uint32_t) (lp->node_wwn >> 32),
1143 (uint32_t) lp->node_wwn,
1144 (uint32_t) (lp->port_wwn >> 32),
1145 (uint32_t) lp->port_wwn);
1146 }
1147 break;
1148 case ISPASYNC_DEV_CHANGED:
1149 lp = arg;
1150 if (isp_change_is_bad) {
1151 lp->state = FC_PORTDB_STATE_NIL;
1152 if (lp->ini_map_idx) {
1153 tgt = lp->ini_map_idx - 1;
1154 FCPARAM(isp)->isp_ini_map[tgt] = 0;
1155 lp->ini_map_idx = 0;
1156 isp_prt(isp, ISP_LOGCONFIG, prom3,
1157 lp->portid, tgt, "change is bad");
1158 isp_make_gone(isp, tgt);
1159 } else {
1160 isp_prt(isp, ISP_LOGCONFIG, prom,
1161 lp->portid, lp->handle,
1162 roles[lp->roles],
1163 "changed and departed",
1164 (uint32_t) (lp->node_wwn >> 32),
1165 (uint32_t) lp->node_wwn,
1166 (uint32_t) (lp->port_wwn >> 32),
1167 (uint32_t) lp->port_wwn);
1168 }
1169 } else {
1170 lp->portid = lp->new_portid;
1171 lp->roles = lp->new_roles;
1172 if (lp->ini_map_idx) {
1173 int t = lp->ini_map_idx - 1;
1174 FCPARAM(isp)->isp_ini_map[t] =
1175 (lp - FCPARAM(isp)->portdb) + 1;
1176 tgt = lp->ini_map_idx - 1;
1177 isp_prt(isp, ISP_LOGCONFIG, prom2,
1178 lp->portid, lp->handle,
1179 roles[lp->roles], "changed at", tgt,
1180 (uint32_t) (lp->node_wwn >> 32),
1181 (uint32_t) lp->node_wwn,
1182 (uint32_t) (lp->port_wwn >> 32),
1183 (uint32_t) lp->port_wwn);
1184 } else {
1185 isp_prt(isp, ISP_LOGCONFIG, prom,
1186 lp->portid, lp->handle,
1187 roles[lp->roles], "changed",
1188 (uint32_t) (lp->node_wwn >> 32),
1189 (uint32_t) lp->node_wwn,
1190 (uint32_t) (lp->port_wwn >> 32),
1191 (uint32_t) lp->port_wwn);
1192 }
1193 }
1194 break;
1195 case ISPASYNC_DEV_STAYED:
1196 lp = arg;
1197 if (lp->ini_map_idx) {
1198 tgt = lp->ini_map_idx - 1;
1199 isp_prt(isp, ISP_LOGCONFIG, prom2,
1200 lp->portid, lp->handle,
1201 roles[lp->roles], "stayed at", tgt,
1202 (uint32_t) (lp->node_wwn >> 32),
1203 (uint32_t) lp->node_wwn,
1204 (uint32_t) (lp->port_wwn >> 32),
1205 (uint32_t) lp->port_wwn);
1206 } else {
1207 isp_prt(isp, ISP_LOGCONFIG, prom,
1208 lp->portid, lp->handle,
1209 roles[lp->roles], "stayed",
1210 (uint32_t) (lp->node_wwn >> 32),
1211 (uint32_t) lp->node_wwn,
1212 (uint32_t) (lp->port_wwn >> 32),
1213 (uint32_t) lp->port_wwn);
1214 }
1215 break;
1216 case ISPASYNC_DEV_GONE:
1217 lp = arg;
1218 /*
1219 * If this has a virtual target and we haven't marked it
1220 * that we're going to have isp_gdt tell the OS it's gone,
1221 * set the isp_gdt timer running on it.
1222 *
1223 * If it isn't marked that isp_gdt is going to get rid of it,
1224 * announce that it's gone.
1225 */
1226 if (lp->ini_map_idx && lp->reserved == 0) {
1227 lp->reserved = 1;
1228 lp->new_reserved = isp->isp_osinfo.gone_device_time;
1229 lp->state = FC_PORTDB_STATE_ZOMBIE;
1230 if (isp->isp_osinfo.gdt_running == 0) {
1231 isp->isp_osinfo.gdt_running = 1;
1232 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
1233 "starting Gone Device Timer");
1234 callout_schedule(&isp->isp_osinfo.gdt, hz);
1235 }
1236 tgt = lp->ini_map_idx - 1;
1237 isp_prt(isp, ISP_LOGCONFIG, prom2,
1238 lp->portid, lp->handle,
1239 roles[lp->roles], "gone zombie at", tgt,
1240 (uint32_t) (lp->node_wwn >> 32),
1241 (uint32_t) lp->node_wwn,
1242 (uint32_t) (lp->port_wwn >> 32),
1243 (uint32_t) lp->port_wwn);
1244 } else if (lp->reserved == 0) {
1245 isp_prt(isp, ISP_LOGCONFIG, prom,
1246 lp->portid, lp->handle,
1247 roles[lp->roles], "departed",
1248 (uint32_t) (lp->node_wwn >> 32),
1249 (uint32_t) lp->node_wwn,
1250 (uint32_t) (lp->port_wwn >> 32),
1251 (uint32_t) lp->port_wwn);
1252 }
1253 break;
1254 case ISPASYNC_CHANGE_NOTIFY:
1255 {
1256 if (arg == ISPASYNC_CHANGE_PDB) {
1257 msg = "Port Database Changed";
1258 } else if (arg == ISPASYNC_CHANGE_SNS) {
1259 msg = "Name Server Database Changed";
1260 } else {
1261 msg = "Other Change Notify";
1262 }
1263 /*
1264 * If the loop down timer is running, cancel it.
1265 */
1266 if (callout_active(&isp->isp_osinfo.ldt)) {
1267 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
1268 "Stopping Loop Down Timer");
1269 callout_stop(&isp->isp_osinfo.ldt);
1270 }
1271 isp_prt(isp, ISP_LOGINFO, msg);
1272 /*
1273 * We can set blocked here because we know it's now okay
1274 * to try and run isp_fc_runstate (in order to build loop
1275 * state). But we don't try and freeze the midlayer's queue
1276 * if we have no thread that we can wake to later unfreeze
1277 * it.
1278 */
1279 if (isp->isp_osinfo.blocked == 0) {
1280 isp->isp_osinfo.blocked = 1;
1281 if (isp->isp_osinfo.thread) {
1282 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
1283 "FREEZE QUEUES @ LINE %d", __LINE__);
1284 scsipi_channel_freeze(&isp->isp_chanA, 1);
1285 }
1286 }
1287 /*
1288 * Note that we have work for the thread to do, and
1289 * if the thread is here already, wake it up.
1290 */
1291 if (isp->isp_osinfo.thread) {
1292 wakeup(&isp->isp_osinfo.thread);
1293 } else {
1294 isp_prt(isp, ISP_LOGDEBUG1, "no FC thread yet");
1295 }
1296 break;
1297 }
1298 case ISPASYNC_FW_CRASH:
1299 {
1300 uint16_t mbox1, mbox6;
1301 mbox1 = ISP_READ(isp, OUTMAILBOX1);
1302 if (IS_DUALBUS(isp)) {
1303 mbox6 = ISP_READ(isp, OUTMAILBOX6);
1304 } else {
1305 mbox6 = 0;
1306 }
1307 isp_prt(isp, ISP_LOGERR,
1308 "Internal Firmware Error on bus %d @ RISC Address 0x%x",
1309 mbox6, mbox1);
1310 if (IS_FC(isp)) {
1311 if (isp->isp_osinfo.blocked == 0) {
1312 isp->isp_osinfo.blocked = 1;
1313 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
1314 "FREEZE QUEUES @ LINE %d", __LINE__);
1315 scsipi_channel_freeze(&isp->isp_chanA, 1);
1316 }
1317 #ifdef ISP_FW_CRASH_DUMP
1318 isp_fw_dump(isp);
1319 #endif
1320 }
1321 mbox1 = isp->isp_osinfo.mbox_sleep_ok;
1322 isp->isp_osinfo.mbox_sleep_ok = 0;
1323 isp_reinit(isp);
1324 isp->isp_osinfo.mbox_sleep_ok = mbox1;
1325 isp_async(isp, ISPASYNC_FW_RESTARTED, NULL);
1326 break;
1327 }
1328 default:
1329 break;
1330 }
1331 return (0);
1332 }
1333
1334 #include <machine/stdarg.h>
1335 void
1336 isp_prt(struct ispsoftc *isp, int level, const char *fmt, ...)
1337 {
1338 va_list ap;
1339 if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) {
1340 return;
1341 }
1342 printf("%s: ", isp->isp_name);
1343 va_start(ap, fmt);
1344 vprintf(fmt, ap);
1345 va_end(ap);
1346 printf("\n");
1347 }
1348
1349 void
1350 isp_lock(struct ispsoftc *isp)
1351 {
1352 int s = splbio();
1353 if (isp->isp_osinfo.islocked++ == 0) {
1354 isp->isp_osinfo.splsaved = s;
1355 } else {
1356 splx(s);
1357 }
1358 }
1359
1360 void
1361 isp_unlock(struct ispsoftc *isp)
1362 {
1363 if (isp->isp_osinfo.islocked-- <= 1) {
1364 isp->isp_osinfo.islocked = 0;
1365 splx(isp->isp_osinfo.splsaved);
1366 }
1367 }
1368
1369 uint64_t
1370 isp_microtime_sub(struct timeval *b, struct timeval *a)
1371 {
1372 struct timeval x;
1373 uint64_t elapsed;
1374 timersub(b, a, &x);
1375 elapsed = GET_NANOSEC(&x);
1376 if (elapsed == 0)
1377 elapsed++;
1378 return (elapsed);
1379 }
1380
1381 int
1382 isp_mbox_acquire(ispsoftc_t *isp)
1383 {
1384 if (isp->isp_osinfo.mboxbsy) {
1385 return (1);
1386 } else {
1387 isp->isp_osinfo.mboxcmd_done = 0;
1388 isp->isp_osinfo.mboxbsy = 1;
1389 return (0);
1390 }
1391 }
1392
1393 void
1394 isp_mbox_wait_complete(struct ispsoftc *isp, mbreg_t *mbp)
1395 {
1396 unsigned int usecs = mbp->timeout;
1397 unsigned int maxc, olim, ilim;
1398 struct timeval start;
1399
1400 if (usecs == 0) {
1401 usecs = MBCMD_DEFAULT_TIMEOUT;
1402 }
1403 maxc = isp->isp_mbxwrk0 + 1;
1404
1405 microtime(&start);
1406 if (isp->isp_osinfo.mbox_sleep_ok) {
1407 int to;
1408 struct timeval tv;
1409
1410 tv.tv_sec = 0;
1411 tv.tv_usec = 0;
1412 for (olim = 0; olim < maxc; olim++) {
1413 tv.tv_sec += (usecs / 1000000);
1414 tv.tv_usec += (usecs % 1000000);
1415 if (tv.tv_usec >= 100000) {
1416 tv.tv_sec++;
1417 tv.tv_usec -= 1000000;
1418 }
1419 }
1420 timeradd(&tv, &start, &tv);
1421 to = hzto(&tv);
1422 if (to == 0)
1423 to = 1;
1424
1425 isp->isp_osinfo.mbox_sleep_ok = 0;
1426 isp->isp_osinfo.mbox_sleeping = 1;
1427 tsleep(&isp->isp_mbxworkp, PRIBIO, "ispmbx_sleep", to);
1428 isp->isp_osinfo.mbox_sleeping = 0;
1429 isp->isp_osinfo.mbox_sleep_ok = 1;
1430 } else {
1431 for (olim = 0; olim < maxc; olim++) {
1432 for (ilim = 0; ilim < usecs; ilim += 100) {
1433 uint32_t isr;
1434 uint16_t sema, mbox;
1435 if (isp->isp_osinfo.mboxcmd_done) {
1436 break;
1437 }
1438 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
1439 isp_intr(isp, isr, sema, mbox);
1440 if (isp->isp_osinfo.mboxcmd_done) {
1441 break;
1442 }
1443 }
1444 USEC_DELAY(100);
1445 }
1446 if (isp->isp_osinfo.mboxcmd_done) {
1447 break;
1448 }
1449 }
1450 }
1451 if (isp->isp_osinfo.mboxcmd_done == 0) {
1452 struct timeval finish, elapsed;
1453
1454 microtime(&finish);
1455 timersub(&finish, &start, &elapsed);
1456 isp_prt(isp, ISP_LOGWARN,
1457 "%s Mailbox Command (0x%x) Timeout (%uus actual)",
1458 isp->isp_osinfo.mbox_sleep_ok? "Interrupting" : "Polled",
1459 isp->isp_lastmbxcmd, (elapsed.tv_sec * 1000000) +
1460 elapsed.tv_usec);
1461 mbp->param[0] = MBOX_TIMEOUT;
1462 isp->isp_osinfo.mboxcmd_done = 1;
1463 }
1464 }
1465
1466 void
1467 isp_mbox_notify_done(ispsoftc_t *isp)
1468 {
1469 if (isp->isp_osinfo.mbox_sleeping) {
1470 wakeup(&isp->isp_mbxworkp);
1471 }
1472 isp->isp_osinfo.mboxcmd_done = 1;
1473 }
1474
1475 void
1476 isp_mbox_release(ispsoftc_t *isp)
1477 {
1478 isp->isp_osinfo.mboxbsy = 0;
1479 }
1480