isp_netbsd.c revision 1.74 1 /* $NetBSD: isp_netbsd.c,v 1.74 2007/05/24 21:30:43 mjacob Exp $ */
2 /*
3 * Platform (NetBSD) dependent common attachment code for Qlogic adapters.
4 */
5 /*
6 * Copyright (C) 1997, 1998, 1999 National Aeronautics & Space Administration
7 * All rights reserved.
8 *
9 * Additional Copyright (C) 2000-2007 by Matthew Jacob
10 * All rights reserved.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. The name of the author may not be used to endorse or promote products
21 * derived from this software without specific prior written permission
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: isp_netbsd.c,v 1.74 2007/05/24 21:30:43 mjacob Exp $");
37
38 #include <dev/ic/isp_netbsd.h>
39 #include <dev/ic/isp_ioctl.h>
40 #include <sys/scsiio.h>
41
42
43 /*
44 * Set a timeout for the watchdogging of a command.
45 *
46 * The dimensional analysis is
47 *
48 * milliseconds * (seconds/millisecond) * (ticks/second) = ticks
49 *
50 * =
51 *
52 * (milliseconds / 1000) * hz = ticks
53 *
54 *
55 * For timeouts less than 1 second, we'll get zero. Because of this, and
56 * because we want to establish *our* timeout to be longer than what the
57 * firmware might do, we just add 3 seconds at the back end.
58 */
59 #define _XT(xs) ((((xs)->timeout/1000) * hz) + (3 * hz))
60
61 static void isp_config_interrupts(struct device *);
62 static void ispminphys_1020(struct buf *);
63 static void ispminphys(struct buf *);
64 static void ispcmd(struct ispsoftc *, XS_T *);
65 static void isprequest(struct scsipi_channel *, scsipi_adapter_req_t, void *);
66 static int
67 ispioctl(struct scsipi_channel *, u_long, void *, int, struct proc *);
68
69 static void isp_polled_cmd_wait(struct ispsoftc *, XS_T *);
70 static void isp_dog(void *);
71 static void isp_gdt(void *);
72 static void isp_ldt(void *);
73 static void isp_make_here(ispsoftc_t *, int);
74 static void isp_make_gone(ispsoftc_t *, int);
75 static void isp_create_fc_worker(void *);
76 static void isp_fc_worker(void *);
77
78 static const char *roles[4] = {
79 "(none)", "Target", "Initiator", "Target/Initiator"
80 };
81 static const char prom3[] =
82 "PortID 0x%06x Departed from Target %u because of %s";
83 int isp_change_is_bad = 0; /* "changed" devices are bad */
84 int isp_quickboot_time = 15; /* don't wait more than N secs for loop up */
85 static int isp_fabric_hysteresis = 5;
86 #define isp_change_is_bad 0
87
88
89 /*
90 * Complete attachment of hardware, include subdevices.
91 */
92
93 void
94 isp_attach(struct ispsoftc *isp)
95 {
96 isp->isp_state = ISP_RUNSTATE;
97
98 isp->isp_osinfo._adapter.adapt_dev = &isp->isp_osinfo._dev;
99 isp->isp_osinfo._adapter.adapt_nchannels = IS_DUALBUS(isp) ? 2 : 1;
100 isp->isp_osinfo._adapter.adapt_openings = isp->isp_maxcmds;
101 /*
102 * It's not stated whether max_periph is limited by SPI
103 * tag uage, but let's assume that it is.
104 */
105 isp->isp_osinfo._adapter.adapt_max_periph = min(isp->isp_maxcmds, 255);
106 isp->isp_osinfo._adapter.adapt_ioctl = ispioctl;
107 isp->isp_osinfo._adapter.adapt_request = isprequest;
108 if (isp->isp_type <= ISP_HA_SCSI_1020A) {
109 isp->isp_osinfo._adapter.adapt_minphys = ispminphys_1020;
110 } else {
111 isp->isp_osinfo._adapter.adapt_minphys = ispminphys;
112 }
113
114 isp->isp_osinfo._chan.chan_adapter = &isp->isp_osinfo._adapter;
115 isp->isp_osinfo._chan.chan_bustype = &scsi_bustype;
116 isp->isp_osinfo._chan.chan_channel = 0;
117
118 /*
119 * Until the midlayer is fixed to use REPORT LUNS, limit to 8 luns.
120 */
121 isp->isp_osinfo._chan.chan_nluns = min(isp->isp_maxluns, 8);
122
123 callout_init(&isp->isp_osinfo.gdt);
124 callout_setfunc(&isp->isp_osinfo.gdt, isp_gdt, isp);
125
126 callout_init(&isp->isp_osinfo.ldt);
127 callout_setfunc(&isp->isp_osinfo.ldt, isp_ldt, isp);
128
129 if (IS_FC(isp)) {
130 isp->isp_osinfo._chan.chan_ntargets = MAX_FC_TARG;
131 isp->isp_osinfo._chan.chan_id = MAX_FC_TARG;
132 kthread_create(isp_create_fc_worker, isp);
133 #ifdef ISP_FW_CRASH_DUMP
134 if (IS_2200(isp)) {
135 FCPARAM(isp)->isp_dump_data =
136 malloc(QLA2200_RISC_IMAGE_DUMP_SIZE, M_DEVBUF,
137 M_NOWAIT);
138 } else if (IS_23XX(isp)) {
139 FCPARAM(isp)->isp_dump_data =
140 malloc(QLA2300_RISC_IMAGE_DUMP_SIZE, M_DEVBUF,
141 M_NOWAIT);
142 }
143 if (FCPARAM(isp)->isp_dump_data)
144 FCPARAM(isp)->isp_dump_data[0] = 0;
145 #endif
146 } else {
147 int bus = 0;
148 sdparam *sdp = isp->isp_param;
149
150 isp->isp_osinfo._chan.chan_ntargets = MAX_TARGETS;
151 isp->isp_osinfo._chan.chan_id = sdp->isp_initiator_id;
152 isp->isp_osinfo.discovered[0] = 1 << sdp->isp_initiator_id;
153 if (IS_DUALBUS(isp)) {
154 isp->isp_osinfo._chan_b = isp->isp_osinfo._chan;
155 sdp++;
156 isp->isp_osinfo.discovered[1] =
157 1 << sdp->isp_initiator_id;
158 isp->isp_osinfo._chan_b.chan_id = sdp->isp_initiator_id;
159 isp->isp_osinfo._chan_b.chan_channel = 1;
160 }
161 ISP_LOCK(isp);
162 (void) isp_control(isp, ISPCTL_RESET_BUS, &bus);
163 if (IS_DUALBUS(isp)) {
164 bus++;
165 (void) isp_control(isp, ISPCTL_RESET_BUS, &bus);
166 }
167 ISP_UNLOCK(isp);
168 }
169
170
171 /*
172 * Defer enabling mailbox interrupts until later.
173 */
174 config_interrupts((struct device *) isp, isp_config_interrupts);
175
176 /*
177 * And attach children (if any).
178 */
179 config_found((void *)isp, &isp->isp_chanA, scsiprint);
180 if (IS_DUALBUS(isp)) {
181 config_found((void *)isp, &isp->isp_chanB, scsiprint);
182 }
183 }
184
185 static void
186 isp_config_interrupts(struct device *self)
187 {
188 struct ispsoftc *isp = (struct ispsoftc *) self;
189 isp->isp_osinfo.mbox_sleep_ok = 1;
190 }
191
192
193 /*
194 * minphys our xfers
195 */
196 static void
197 ispminphys_1020(struct buf *bp)
198 {
199 if (bp->b_bcount >= (1 << 24)) {
200 bp->b_bcount = (1 << 24);
201 }
202 minphys(bp);
203 }
204
205 static void
206 ispminphys(struct buf *bp)
207 {
208 if (bp->b_bcount >= (1 << 30)) {
209 bp->b_bcount = (1 << 30);
210 }
211 minphys(bp);
212 }
213
214 static int
215 ispioctl(struct scsipi_channel *chan, u_long cmd, void *addr, int flag,
216 struct proc *p)
217 {
218 struct ispsoftc *isp = (void *)chan->chan_adapter->adapt_dev;
219 int retval = ENOTTY;
220
221 switch (cmd) {
222 #ifdef ISP_FW_CRASH_DUMP
223 case ISP_GET_FW_CRASH_DUMP:
224 {
225 uint16_t *ptr = FCPARAM(isp)->isp_dump_data;
226 size_t sz;
227
228 retval = 0;
229 if (IS_2200(isp))
230 sz = QLA2200_RISC_IMAGE_DUMP_SIZE;
231 else
232 sz = QLA2300_RISC_IMAGE_DUMP_SIZE;
233 ISP_LOCK(isp);
234 if (ptr && *ptr) {
235 void *uaddr = *((void **) addr);
236 if (copyout(ptr, uaddr, sz)) {
237 retval = EFAULT;
238 } else {
239 *ptr = 0;
240 }
241 } else {
242 retval = ENXIO;
243 }
244 ISP_UNLOCK(isp);
245 break;
246 }
247
248 case ISP_FORCE_CRASH_DUMP:
249 ISP_LOCK(isp);
250 if (isp->isp_osinfo.blocked == 0) {
251 isp->isp_osinfo.blocked = 1;
252 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
253 "FREEZE QUEUES @ LINE %d", __LINE__);
254 scsipi_channel_freeze(&isp->isp_chanA, 1);
255 }
256 isp_fw_dump(isp);
257 isp_reinit(isp);
258 ISP_UNLOCK(isp);
259 retval = 0;
260 break;
261 #endif
262 case ISP_SDBLEV:
263 {
264 int olddblev = isp->isp_dblev;
265 isp->isp_dblev = *(int *)addr;
266 *(int *)addr = olddblev;
267 retval = 0;
268 break;
269 }
270 case ISP_RESETHBA:
271 ISP_LOCK(isp);
272 isp_reinit(isp);
273 ISP_UNLOCK(isp);
274 retval = 0;
275 break;
276 case ISP_RESCAN:
277 if (IS_FC(isp)) {
278 ISP_LOCK(isp);
279 if (isp_fc_runstate(isp, 5 * 1000000)) {
280 retval = EIO;
281 } else {
282 retval = 0;
283 }
284 ISP_UNLOCK(isp);
285 }
286 break;
287 case ISP_FC_LIP:
288 if (IS_FC(isp)) {
289 ISP_LOCK(isp);
290 if (isp_control(isp, ISPCTL_SEND_LIP, 0)) {
291 retval = EIO;
292 } else {
293 retval = 0;
294 }
295 ISP_UNLOCK(isp);
296 }
297 break;
298 case ISP_FC_GETDINFO:
299 {
300 struct isp_fc_device *ifc = (struct isp_fc_device *) addr;
301 fcportdb_t *lp;
302
303 if (ifc->loopid >= MAX_FC_TARG) {
304 retval = EINVAL;
305 break;
306 }
307 ISP_LOCK(isp);
308 lp = &FCPARAM(isp)->portdb[ifc->loopid];
309 if (lp->state == FC_PORTDB_STATE_VALID) {
310 ifc->role = lp->roles;
311 ifc->loopid = lp->handle;
312 ifc->portid = lp->portid;
313 ifc->node_wwn = lp->node_wwn;
314 ifc->port_wwn = lp->port_wwn;
315 retval = 0;
316 } else {
317 retval = ENODEV;
318 }
319 ISP_UNLOCK(isp);
320 break;
321 }
322 case ISP_GET_STATS:
323 {
324 isp_stats_t *sp = (isp_stats_t *) addr;
325
326 MEMZERO(sp, sizeof (*sp));
327 sp->isp_stat_version = ISP_STATS_VERSION;
328 sp->isp_type = isp->isp_type;
329 sp->isp_revision = isp->isp_revision;
330 ISP_LOCK(isp);
331 sp->isp_stats[ISP_INTCNT] = isp->isp_intcnt;
332 sp->isp_stats[ISP_INTBOGUS] = isp->isp_intbogus;
333 sp->isp_stats[ISP_INTMBOXC] = isp->isp_intmboxc;
334 sp->isp_stats[ISP_INGOASYNC] = isp->isp_intoasync;
335 sp->isp_stats[ISP_RSLTCCMPLT] = isp->isp_rsltccmplt;
336 sp->isp_stats[ISP_FPHCCMCPLT] = isp->isp_fphccmplt;
337 sp->isp_stats[ISP_RSCCHIWAT] = isp->isp_rscchiwater;
338 sp->isp_stats[ISP_FPCCHIWAT] = isp->isp_fpcchiwater;
339 ISP_UNLOCK(isp);
340 retval = 0;
341 break;
342 }
343 case ISP_CLR_STATS:
344 ISP_LOCK(isp);
345 isp->isp_intcnt = 0;
346 isp->isp_intbogus = 0;
347 isp->isp_intmboxc = 0;
348 isp->isp_intoasync = 0;
349 isp->isp_rsltccmplt = 0;
350 isp->isp_fphccmplt = 0;
351 isp->isp_rscchiwater = 0;
352 isp->isp_fpcchiwater = 0;
353 ISP_UNLOCK(isp);
354 retval = 0;
355 break;
356 case ISP_FC_GETHINFO:
357 {
358 struct isp_hba_device *hba = (struct isp_hba_device *) addr;
359 MEMZERO(hba, sizeof (*hba));
360 ISP_LOCK(isp);
361 hba->fc_speed = FCPARAM(isp)->isp_gbspeed;
362 hba->fc_scsi_supported = 1;
363 hba->fc_topology = FCPARAM(isp)->isp_topo + 1;
364 hba->fc_loopid = FCPARAM(isp)->isp_loopid;
365 hba->nvram_node_wwn = FCPARAM(isp)->isp_wwnn_nvram;
366 hba->nvram_port_wwn = FCPARAM(isp)->isp_wwpn_nvram;
367 hba->active_node_wwn = ISP_NODEWWN(isp);
368 hba->active_port_wwn = ISP_PORTWWN(isp);
369 ISP_UNLOCK(isp);
370 retval = 0;
371 break;
372 }
373 case SCBUSIORESET:
374 ISP_LOCK(isp);
375 if (isp_control(isp, ISPCTL_RESET_BUS, &chan->chan_channel)) {
376 retval = EIO;
377 } else {
378 retval = 0;
379 }
380 ISP_UNLOCK(isp);
381 break;
382 default:
383 break;
384 }
385 return (retval);
386 }
387
388 static void
389 ispcmd(struct ispsoftc *isp, XS_T *xs)
390 {
391 volatile uint8_t ombi;
392 int lim;
393
394 ISP_LOCK(isp);
395 if (isp->isp_state < ISP_RUNSTATE) {
396 ISP_DISABLE_INTS(isp);
397 isp_init(isp);
398 if (isp->isp_state != ISP_INITSTATE) {
399 ISP_ENABLE_INTS(isp);
400 ISP_UNLOCK(isp);
401 isp_prt(isp, ISP_LOGERR, "isp not at init state");
402 XS_SETERR(xs, HBA_BOTCH);
403 scsipi_done(xs);
404 return;
405 }
406 isp->isp_state = ISP_RUNSTATE;
407 ISP_ENABLE_INTS(isp);
408 }
409
410 /*
411 * Handle the case of a FC card where the FC thread hasn't
412 * fired up yet and we don't yet have a known loop state.
413 */
414 if (IS_FC(isp) && (FCPARAM(isp)->isp_fwstate != FW_READY ||
415 FCPARAM(isp)->isp_loopstate != LOOP_READY) &&
416 isp->isp_osinfo.thread == NULL) {
417 ombi = isp->isp_osinfo.mbox_sleep_ok != 0;
418 int delay_time;
419
420 if (xs->xs_control & XS_CTL_POLL) {
421 isp->isp_osinfo.mbox_sleep_ok = 0;
422 }
423
424 if (isp->isp_osinfo.loop_checked == 0) {
425 delay_time = 10 * 1000000;
426 isp->isp_osinfo.loop_checked = 1;
427 } else {
428 delay_time = 250000;
429 }
430
431 if (isp_fc_runstate(isp, delay_time) != 0) {
432 if (xs->xs_control & XS_CTL_POLL) {
433 isp->isp_osinfo.mbox_sleep_ok = ombi;
434 }
435 if (FCPARAM(isp)->loop_seen_once == 0) {
436 XS_SETERR(xs, HBA_SELTIMEOUT);
437 scsipi_done(xs);
438 ISP_UNLOCK(isp);
439 return;
440 }
441 /*
442 * Otherwise, fall thru to be queued up for later.
443 */
444 } else {
445 int wasblocked =
446 (isp->isp_osinfo.blocked || isp->isp_osinfo.paused);
447 isp->isp_osinfo.blocked =
448 isp->isp_osinfo.paused = 0;
449 if (wasblocked) {
450 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
451 "THAW QUEUES @ LINE %d", __LINE__);
452 scsipi_channel_thaw(&isp->isp_chanA, 1);
453 }
454 }
455 if (xs->xs_control & XS_CTL_POLL) {
456 isp->isp_osinfo.mbox_sleep_ok = ombi;
457 }
458 }
459
460 if (isp->isp_osinfo.paused) {
461 isp_prt(isp, ISP_LOGWARN, "I/O while paused");
462 xs->error = XS_RESOURCE_SHORTAGE;
463 scsipi_done(xs);
464 ISP_UNLOCK(isp);
465 return;
466 }
467 if (isp->isp_osinfo.blocked) {
468 isp_prt(isp, ISP_LOGWARN, "I/O while blocked");
469 xs->error = XS_REQUEUE;
470 scsipi_done(xs);
471 ISP_UNLOCK(isp);
472 return;
473 }
474
475 if (xs->xs_control & XS_CTL_POLL) {
476 ombi = isp->isp_osinfo.mbox_sleep_ok;
477 isp->isp_osinfo.mbox_sleep_ok = 0;
478 }
479
480 switch (isp_start(xs)) {
481 case CMD_QUEUED:
482 if (xs->xs_control & XS_CTL_POLL) {
483 isp_polled_cmd_wait(isp, xs);
484 isp->isp_osinfo.mbox_sleep_ok = ombi;
485 } else if (xs->timeout) {
486 callout_reset(&xs->xs_callout, _XT(xs), isp_dog, xs);
487 }
488 break;
489 case CMD_EAGAIN:
490 isp->isp_osinfo.paused = 1;
491 xs->error = XS_RESOURCE_SHORTAGE;
492 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
493 "FREEZE QUEUES @ LINE %d", __LINE__);
494 scsipi_channel_freeze(&isp->isp_chanA, 1);
495 if (IS_DUALBUS(isp)) {
496 scsipi_channel_freeze(&isp->isp_chanB, 1);
497 }
498 scsipi_done(xs);
499 break;
500 case CMD_RQLATER:
501 /*
502 * We can only get RQLATER from FC devices (1 channel only)
503 *
504 * If we've never seen loop up see if if we've been down
505 * quickboot time, otherwise wait loop down limit time.
506 * If so, then we start giving up on commands.
507 */
508 if (FCPARAM(isp)->loop_seen_once == 0) {
509 lim = isp_quickboot_time;
510 } else {
511 lim = isp->isp_osinfo.loop_down_limit;
512 }
513 if (isp->isp_osinfo.loop_down_time >= lim) {
514 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
515 "RQLATER -> SELTIMEOUT");
516 XS_SETERR(xs, HBA_SELTIMEOUT);
517 scsipi_done(xs);
518 break;
519 }
520 if (isp->isp_osinfo.blocked == 0) {
521 isp->isp_osinfo.blocked = 1;
522 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
523 "FREEZE QUEUES @ LINE %d", __LINE__);
524 scsipi_channel_freeze(&isp->isp_chanA, 1);
525 } else {
526 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
527 "RQLATER WITH FROZEN QUEUES @ LINE %d", __LINE__);
528 }
529 xs->error = XS_REQUEUE;
530 scsipi_done(xs);
531 break;
532 case CMD_COMPLETE:
533 scsipi_done(xs);
534 break;
535 }
536 ISP_UNLOCK(isp);
537 }
538
539 static void
540 isprequest(struct scsipi_channel *chan, scsipi_adapter_req_t req, void *arg)
541 {
542 struct ispsoftc *isp = (void *)chan->chan_adapter->adapt_dev;
543
544 switch (req) {
545 case ADAPTER_REQ_RUN_XFER:
546 ispcmd(isp, (XS_T *) arg);
547 break;
548
549 case ADAPTER_REQ_GROW_RESOURCES:
550 /* Not supported. */
551 break;
552
553 case ADAPTER_REQ_SET_XFER_MODE:
554 if (IS_SCSI(isp)) {
555 struct scsipi_xfer_mode *xm = arg;
556 int dflags = 0;
557 sdparam *sdp = SDPARAM(isp);
558
559 sdp += chan->chan_channel;
560 if (xm->xm_mode & PERIPH_CAP_TQING)
561 dflags |= DPARM_TQING;
562 if (xm->xm_mode & PERIPH_CAP_WIDE16)
563 dflags |= DPARM_WIDE;
564 if (xm->xm_mode & PERIPH_CAP_SYNC)
565 dflags |= DPARM_SYNC;
566 ISP_LOCK(isp);
567 sdp->isp_devparam[xm->xm_target].goal_flags |= dflags;
568 dflags = sdp->isp_devparam[xm->xm_target].goal_flags;
569 sdp->isp_devparam[xm->xm_target].dev_update = 1;
570 isp->isp_update |= (1 << chan->chan_channel);
571 ISP_UNLOCK(isp);
572 isp_prt(isp, ISP_LOGDEBUG1,
573 "isprequest: device flags 0x%x for %d.%d.X",
574 dflags, chan->chan_channel, xm->xm_target);
575 break;
576 }
577 default:
578 break;
579 }
580 }
581
582 static void
583 isp_polled_cmd_wait(struct ispsoftc *isp, XS_T *xs)
584 {
585 int infinite = 0, mswait;
586
587 /*
588 * If we can't use interrupts, poll on completion.
589 */
590 if ((mswait = XS_TIME(xs)) == 0) {
591 infinite = 1;
592 }
593
594 while (mswait || infinite) {
595 uint32_t isr;
596 uint16_t sema, mbox;
597 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
598 isp_intr(isp, isr, sema, mbox);
599 if (XS_CMD_DONE_P(xs)) {
600 break;
601 }
602 }
603 USEC_DELAY(1000);
604 mswait -= 1;
605 }
606
607 /*
608 * If no other error occurred but we didn't finish
609 * something bad happened, so abort the command.
610 */
611 if (XS_CMD_DONE_P(xs) == 0) {
612 if (isp_control(isp, ISPCTL_ABORT_CMD, xs)) {
613 isp_reinit(isp);
614 }
615 if (XS_NOERR(xs)) {
616 isp_prt(isp, ISP_LOGERR, "polled command timed out");
617 XS_SETERR(xs, HBA_BOTCH);
618 }
619 }
620 scsipi_done(xs);
621 }
622
623 void
624 isp_done(XS_T *xs)
625 {
626 if (XS_CMD_WDOG_P(xs) == 0) {
627 struct ispsoftc *isp = XS_ISP(xs);
628 callout_stop(&xs->xs_callout);
629 if (XS_CMD_GRACE_P(xs)) {
630 isp_prt(isp, ISP_LOGDEBUG1,
631 "finished command on borrowed time");
632 }
633 XS_CMD_S_CLEAR(xs);
634 /*
635 * Fixup- if we get a QFULL, we need
636 * to set XS_BUSY as the error.
637 */
638 if (xs->status == SCSI_QUEUE_FULL) {
639 xs->error = XS_BUSY;
640 }
641 if (isp->isp_osinfo.paused) {
642 isp->isp_osinfo.paused = 0;
643 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
644 "THAW QUEUES @ LINE %d", __LINE__);
645 scsipi_channel_timed_thaw(&isp->isp_chanA);
646 if (IS_DUALBUS(isp)) {
647 scsipi_channel_timed_thaw(&isp->isp_chanB);
648 }
649 }
650 if (xs->error == XS_DRIVER_STUFFUP) {
651 isp_prt(isp, ISP_LOGERR,
652 "BOTCHED cmd for %d.%d.%d cmd 0x%x datalen %ld",
653 XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs),
654 XS_CDBP(xs)[0], (long) XS_XFRLEN(xs));
655 }
656 scsipi_done(xs);
657 }
658 }
659
660 static void
661 isp_dog(void *arg)
662 {
663 XS_T *xs = arg;
664 struct ispsoftc *isp = XS_ISP(xs);
665 uint32_t handle;
666
667
668 ISP_ILOCK(isp);
669 /*
670 * We've decided this command is dead. Make sure we're not trying
671 * to kill a command that's already dead by getting it's handle and
672 * and seeing whether it's still alive.
673 */
674 handle = isp_find_handle(isp, xs);
675 if (handle) {
676 uint32_t isr;
677 uint16_t mbox, sema;
678
679 if (XS_CMD_DONE_P(xs)) {
680 isp_prt(isp, ISP_LOGDEBUG1,
681 "watchdog found done cmd (handle 0x%x)", handle);
682 ISP_IUNLOCK(isp);
683 return;
684 }
685
686 if (XS_CMD_WDOG_P(xs)) {
687 isp_prt(isp, ISP_LOGDEBUG1,
688 "recursive watchdog (handle 0x%x)", handle);
689 ISP_IUNLOCK(isp);
690 return;
691 }
692
693 XS_CMD_S_WDOG(xs);
694
695 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
696 isp_intr(isp, isr, sema, mbox);
697
698 }
699 if (XS_CMD_DONE_P(xs)) {
700 isp_prt(isp, ISP_LOGDEBUG1,
701 "watchdog cleanup for handle 0x%x", handle);
702 XS_CMD_C_WDOG(xs);
703 isp_done(xs);
704 } else if (XS_CMD_GRACE_P(xs)) {
705 isp_prt(isp, ISP_LOGDEBUG1,
706 "watchdog timeout for handle 0x%x", handle);
707 /*
708 * Make sure the command is *really* dead before we
709 * release the handle (and DMA resources) for reuse.
710 */
711 (void) isp_control(isp, ISPCTL_ABORT_CMD, arg);
712
713 /*
714 * After this point, the command is really dead.
715 */
716 if (XS_XFRLEN(xs)) {
717 ISP_DMAFREE(isp, xs, handle);
718 }
719 isp_destroy_handle(isp, handle);
720 XS_SETERR(xs, XS_TIMEOUT);
721 XS_CMD_S_CLEAR(xs);
722 isp_done(xs);
723 } else {
724 uint32_t nxti, optr;
725 void *qe;
726 isp_marker_t local, *mp = &local;
727 isp_prt(isp, ISP_LOGDEBUG2,
728 "possible command timeout on handle %x", handle);
729 XS_CMD_C_WDOG(xs);
730 callout_reset(&xs->xs_callout, hz, isp_dog, xs);
731 if (isp_getrqentry(isp, &nxti, &optr, &qe)) {
732 ISP_UNLOCK(isp);
733 return;
734 }
735 XS_CMD_S_GRACE(xs);
736 MEMZERO((void *) mp, sizeof (*mp));
737 mp->mrk_header.rqs_entry_count = 1;
738 mp->mrk_header.rqs_entry_type = RQSTYPE_MARKER;
739 mp->mrk_modifier = SYNC_ALL;
740 mp->mrk_target = XS_CHANNEL(xs) << 7;
741 isp_put_marker(isp, mp, qe);
742 ISP_ADD_REQUEST(isp, nxti);
743 }
744 } else {
745 isp_prt(isp, ISP_LOGDEBUG0, "watchdog with no command");
746 }
747 ISP_IUNLOCK(isp);
748 }
749
750 /*
751 * Gone Device Timer Function- when we have decided that a device has gone
752 * away, we wait a specific period of time prior to telling the OS it has
753 * gone away.
754 *
755 * This timer function fires once a second and then scans the port database
756 * for devices that are marked dead but still have a virtual target assigned.
757 * We decrement a counter for that port database entry, and when it hits zero,
758 * we tell the OS the device has gone away.
759 */
760 static void
761 isp_gdt(void *arg)
762 {
763 ispsoftc_t *isp = arg;
764 fcportdb_t *lp;
765 int dbidx, tgt, more_to_do = 0;
766
767 isp_prt(isp, ISP_LOGDEBUG0, "GDT timer expired");
768 ISP_LOCK(isp);
769 for (dbidx = 0; dbidx < MAX_FC_TARG; dbidx++) {
770 lp = &FCPARAM(isp)->portdb[dbidx];
771
772 if (lp->state != FC_PORTDB_STATE_ZOMBIE) {
773 continue;
774 }
775 if (lp->ini_map_idx == 0) {
776 continue;
777 }
778 if (lp->new_reserved == 0) {
779 continue;
780 }
781 lp->new_reserved -= 1;
782 if (lp->new_reserved != 0) {
783 more_to_do++;
784 continue;
785 }
786 tgt = lp->ini_map_idx - 1;
787 FCPARAM(isp)->isp_ini_map[tgt] = 0;
788 lp->ini_map_idx = 0;
789 lp->state = FC_PORTDB_STATE_NIL;
790 isp_prt(isp, ISP_LOGCONFIG, prom3, lp->portid, tgt,
791 "Gone Device Timeout");
792 isp_make_gone(isp, tgt);
793 }
794 if (more_to_do) {
795 callout_schedule(&isp->isp_osinfo.gdt, hz);
796 } else {
797 isp->isp_osinfo.gdt_running = 0;
798 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
799 "stopping Gone Device Timer");
800 }
801 ISP_UNLOCK(isp);
802 }
803
804 /*
805 * Loop Down Timer Function- when loop goes down, a timer is started and
806 * and after it expires we come here and take all probational devices that
807 * the OS knows about and the tell the OS that they've gone away.
808 *
809 * We don't clear the devices out of our port database because, when loop
810 * come back up, we have to do some actual cleanup with the chip at that
811 * point (implicit PLOGO, e.g., to get the chip's port database state right).
812 */
813 static void
814 isp_ldt(void *arg)
815 {
816 ispsoftc_t *isp = arg;
817 fcportdb_t *lp;
818 int dbidx, tgt;
819
820 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, "Loop Down Timer expired");
821 ISP_LOCK(isp);
822
823 /*
824 * Notify to the OS all targets who we now consider have departed.
825 */
826 for (dbidx = 0; dbidx < MAX_FC_TARG; dbidx++) {
827 lp = &FCPARAM(isp)->portdb[dbidx];
828
829 if (lp->state != FC_PORTDB_STATE_PROBATIONAL) {
830 continue;
831 }
832 if (lp->ini_map_idx == 0) {
833 continue;
834 }
835
836 /*
837 * XXX: CLEAN UP AND COMPLETE ANY PENDING COMMANDS FIRST!
838 */
839
840 /*
841 * Mark that we've announced that this device is gone....
842 */
843 lp->reserved = 1;
844
845 /*
846 * but *don't* change the state of the entry. Just clear
847 * any target id stuff and announce to CAM that the
848 * device is gone. This way any necessary PLOGO stuff
849 * will happen when loop comes back up.
850 */
851
852 tgt = lp->ini_map_idx - 1;
853 FCPARAM(isp)->isp_ini_map[tgt] = 0;
854 lp->ini_map_idx = 0;
855 isp_prt(isp, ISP_LOGCONFIG, prom3, lp->portid, tgt,
856 "Loop Down Timeout");
857 isp_make_gone(isp, tgt);
858 }
859
860 /*
861 * The loop down timer has expired. Wake up the kthread
862 * to notice that fact (or make it false).
863 */
864 isp->isp_osinfo.loop_down_time = isp->isp_osinfo.loop_down_limit+1;
865 wakeup(&isp->isp_osinfo.thread);
866 ISP_UNLOCK(isp);
867 }
868
869 static void
870 isp_make_here(ispsoftc_t *isp, int tgt)
871 {
872 isp_prt(isp, ISP_LOGINFO, "target %d has arrived", tgt);
873 }
874
875 static void
876 isp_make_gone(ispsoftc_t *isp, int tgt)
877 {
878 isp_prt(isp, ISP_LOGINFO, "target %d has departed", tgt);
879 }
880
881 /*
882 * Fibre Channel state cleanup thread
883 */
884 static void
885 isp_create_fc_worker(void *arg)
886 {
887 struct ispsoftc *isp = arg;
888 if (kthread_create1(isp_fc_worker, isp, &isp->isp_osinfo.thread,
889 "%s:fc_thrd", isp->isp_name)) {
890 isp_prt(isp, ISP_LOGERR, "unable to create FC worker thread");
891 panic("isp_create_fc_worker");
892 }
893
894 }
895
896
897 static void
898 isp_fc_worker(void *arg)
899 {
900 void scsipi_run_queue(struct scsipi_channel *);
901 ispsoftc_t *isp = arg;
902 int slp = 0;
903 int s = splbio();
904 /*
905 * The first loop is for our usage where we have yet to have
906 * gotten good fibre channel state.
907 */
908 while (isp->isp_osinfo.thread != NULL) {
909 int sok, lb, lim;
910
911 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, "checking FC state");
912 sok = isp->isp_osinfo.mbox_sleep_ok;
913 isp->isp_osinfo.mbox_sleep_ok = 1;
914 lb = isp_fc_runstate(isp, 250000);
915 isp->isp_osinfo.mbox_sleep_ok = sok;
916 if (lb) {
917 /*
918 * Increment loop down time by the last sleep interval
919 */
920 isp->isp_osinfo.loop_down_time += slp;
921
922 if (lb < 0) {
923 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
924 "FC loop not up (down count %d)",
925 isp->isp_osinfo.loop_down_time);
926 } else {
927 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
928 "FC got to %d (down count %d)",
929 lb, isp->isp_osinfo.loop_down_time);
930 }
931
932
933 /*
934 * If we've never seen loop up and we've waited longer
935 * than quickboot time, or we've seen loop up but we've
936 * waited longer than loop_down_limit, give up and go
937 * to sleep until loop comes up.
938 */
939 if (FCPARAM(isp)->loop_seen_once == 0) {
940 lim = isp_quickboot_time;
941 } else {
942 lim = isp->isp_osinfo.loop_down_limit;
943 }
944 if (isp->isp_osinfo.loop_down_time >= lim) {
945 /*
946 * If we're now past our limit, release
947 * the queues and let them come in and
948 * either get HBA_SELTIMOUT or cause
949 * another freeze.
950 */
951 isp->isp_osinfo.blocked = 1;
952 slp = 0;
953 } else if (isp->isp_osinfo.loop_down_time < 10) {
954 slp = 1;
955 } else if (isp->isp_osinfo.loop_down_time < 30) {
956 slp = 5;
957 } else if (isp->isp_osinfo.loop_down_time < 60) {
958 slp = 10;
959 } else if (isp->isp_osinfo.loop_down_time < 120) {
960 slp = 20;
961 } else {
962 slp = 30;
963 }
964
965 } else {
966 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
967 "FC state OK");
968 isp->isp_osinfo.loop_down_time = 0;
969 slp = 0;
970 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
971 "THAW QUEUES @ LINE %d", __LINE__);
972 scsipi_channel_thaw(&isp->isp_chanA, 1);
973 }
974
975 /*
976 * If we'd frozen the queues, unfreeze them now so that
977 * we can start getting commands. If the FC state isn't
978 * okay yet, they'll hit that in isp_start which will
979 * freeze the queues again.
980 */
981 if (isp->isp_osinfo.blocked) {
982 isp->isp_osinfo.blocked = 0;
983 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
984 "THAW QUEUES @ LINE %d", __LINE__);
985 scsipi_channel_thaw(&isp->isp_chanA, 1);
986 }
987 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, "sleep time %d", slp);
988 tsleep(&isp->isp_osinfo.thread, PRIBIO, "ispf", slp * hz);
989
990 /*
991 * If slp is zero, we're waking up for the first time after
992 * things have been okay. In this case, we set a deferral state
993 * for all commands and delay hysteresis seconds before starting
994 * the FC state evaluation. This gives the loop/fabric a chance
995 * to settle.
996 */
997 if (slp == 0 && isp_fabric_hysteresis) {
998 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
999 "sleep hysteresis tick time %d",
1000 isp_fabric_hysteresis * hz);
1001 (void) tsleep(&isp_fabric_hysteresis, PRIBIO, "ispT",
1002 (isp_fabric_hysteresis * hz));
1003 }
1004 }
1005 splx(s);
1006
1007 /* In case parent is waiting for us to exit. */
1008 wakeup(&isp->isp_osinfo.thread);
1009 kthread_exit(0);
1010 }
1011
1012 /*
1013 * Free any associated resources prior to decommissioning and
1014 * set the card to a known state (so it doesn't wake up and kick
1015 * us when we aren't expecting it to).
1016 *
1017 * Locks are held before coming here.
1018 */
1019 void
1020 isp_uninit(struct ispsoftc *isp)
1021 {
1022 isp_lock(isp);
1023 /*
1024 * Leave with interrupts disabled.
1025 */
1026 ISP_DISABLE_INTS(isp);
1027 isp_unlock(isp);
1028 }
1029
1030 int
1031 isp_async(struct ispsoftc *isp, ispasync_t cmd, void *arg)
1032 {
1033 int bus, tgt;
1034 const char *msg = NULL;
1035 static const char prom[] =
1036 "PortID 0x%06x handle 0x%x role %s %s\n"
1037 " WWNN 0x%08x%08x WWPN 0x%08x%08x";
1038 static const char prom2[] =
1039 "PortID 0x%06x handle 0x%x role %s %s tgt %u\n"
1040 " WWNN 0x%08x%08x WWPN 0x%08x%08x";
1041 fcportdb_t *lp;
1042
1043 switch (cmd) {
1044 case ISPASYNC_NEW_TGT_PARAMS:
1045 if (IS_SCSI(isp) && isp->isp_dblev) {
1046 sdparam *sdp = isp->isp_param;
1047 int flags;
1048 struct scsipi_xfer_mode xm;
1049
1050 tgt = *((int *) arg);
1051 bus = (tgt >> 16) & 0xffff;
1052 tgt &= 0xffff;
1053 sdp += bus;
1054 flags = sdp->isp_devparam[tgt].actv_flags;
1055
1056 xm.xm_mode = 0;
1057 xm.xm_period = sdp->isp_devparam[tgt].actv_period;
1058 xm.xm_offset = sdp->isp_devparam[tgt].actv_offset;
1059 xm.xm_target = tgt;
1060
1061 if ((flags & DPARM_SYNC) && xm.xm_period && xm.xm_offset)
1062 xm.xm_mode |= PERIPH_CAP_SYNC;
1063 if (flags & DPARM_WIDE)
1064 xm.xm_mode |= PERIPH_CAP_WIDE16;
1065 if (flags & DPARM_TQING)
1066 xm.xm_mode |= PERIPH_CAP_TQING;
1067 scsipi_async_event(bus? &isp->isp_chanB : &isp->isp_chanA,
1068 ASYNC_EVENT_XFER_MODE, &xm);
1069 break;
1070 }
1071 case ISPASYNC_BUS_RESET:
1072 bus = *((int *) arg);
1073 scsipi_async_event(bus? &isp->isp_chanB : &isp->isp_chanA,
1074 ASYNC_EVENT_RESET, NULL);
1075 isp_prt(isp, ISP_LOGINFO, "SCSI bus %d reset detected", bus);
1076 break;
1077 case ISPASYNC_LIP:
1078 if (msg == NULL) {
1079 msg = "LIP Received";
1080 }
1081 /* FALLTHROUGH */
1082 case ISPASYNC_LOOP_RESET:
1083 if (msg == NULL) {
1084 msg = "LOOP Reset Received";
1085 }
1086 /* FALLTHROUGH */
1087 case ISPASYNC_LOOP_DOWN:
1088 if (msg == NULL) {
1089 msg = "Loop DOWN";
1090 }
1091 /*
1092 * Don't do queue freezes or blockage until we have the
1093 * thread running that can unfreeze/unblock us.
1094 */
1095 if (isp->isp_osinfo.blocked == 0) {
1096 if (isp->isp_osinfo.thread) {
1097 isp->isp_osinfo.blocked = 1;
1098 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
1099 "FREEZE QUEUES @ LINE %d", __LINE__);
1100 scsipi_channel_freeze(&isp->isp_chanA, 1);
1101 }
1102 }
1103 isp_prt(isp, ISP_LOGINFO, msg);
1104 break;
1105 case ISPASYNC_LOOP_UP:
1106 /*
1107 * Let the subsequent ISPASYNC_CHANGE_NOTIFY invoke
1108 * the FC worker thread. When the FC worker thread
1109 * is done, let *it* call scsipi_channel_thaw...
1110 */
1111 isp_prt(isp, ISP_LOGINFO, "Loop UP");
1112 break;
1113 case ISPASYNC_DEV_ARRIVED:
1114 lp = arg;
1115 lp->reserved = 0;
1116 if ((isp->isp_role & ISP_ROLE_INITIATOR) &&
1117 (lp->roles & (SVC3_TGT_ROLE >> SVC3_ROLE_SHIFT))) {
1118 int dbidx = lp - FCPARAM(isp)->portdb;
1119 int i;
1120
1121 for (i = 0; i < MAX_FC_TARG; i++) {
1122 if (i >= FL_ID && i <= SNS_ID) {
1123 continue;
1124 }
1125 if (FCPARAM(isp)->isp_ini_map[i] == 0) {
1126 break;
1127 }
1128 }
1129 if (i < MAX_FC_TARG) {
1130 FCPARAM(isp)->isp_ini_map[i] = dbidx + 1;
1131 lp->ini_map_idx = i + 1;
1132 } else {
1133 isp_prt(isp, ISP_LOGWARN, "out of target ids");
1134 isp_dump_portdb(isp);
1135 }
1136 }
1137 if (lp->ini_map_idx) {
1138 tgt = lp->ini_map_idx - 1;
1139 isp_prt(isp, ISP_LOGCONFIG, prom2,
1140 lp->portid, lp->handle,
1141 roles[lp->roles], "arrived at", tgt,
1142 (uint32_t) (lp->node_wwn >> 32),
1143 (uint32_t) lp->node_wwn,
1144 (uint32_t) (lp->port_wwn >> 32),
1145 (uint32_t) lp->port_wwn);
1146 isp_make_here(isp, tgt);
1147 } else {
1148 isp_prt(isp, ISP_LOGCONFIG, prom,
1149 lp->portid, lp->handle,
1150 roles[lp->roles], "arrived",
1151 (uint32_t) (lp->node_wwn >> 32),
1152 (uint32_t) lp->node_wwn,
1153 (uint32_t) (lp->port_wwn >> 32),
1154 (uint32_t) lp->port_wwn);
1155 }
1156 break;
1157 case ISPASYNC_DEV_CHANGED:
1158 lp = arg;
1159 if (isp_change_is_bad) {
1160 lp->state = FC_PORTDB_STATE_NIL;
1161 if (lp->ini_map_idx) {
1162 tgt = lp->ini_map_idx - 1;
1163 FCPARAM(isp)->isp_ini_map[tgt] = 0;
1164 lp->ini_map_idx = 0;
1165 isp_prt(isp, ISP_LOGCONFIG, prom3,
1166 lp->portid, tgt, "change is bad");
1167 isp_make_gone(isp, tgt);
1168 } else {
1169 isp_prt(isp, ISP_LOGCONFIG, prom,
1170 lp->portid, lp->handle,
1171 roles[lp->roles],
1172 "changed and departed",
1173 (uint32_t) (lp->node_wwn >> 32),
1174 (uint32_t) lp->node_wwn,
1175 (uint32_t) (lp->port_wwn >> 32),
1176 (uint32_t) lp->port_wwn);
1177 }
1178 } else {
1179 lp->portid = lp->new_portid;
1180 lp->roles = lp->new_roles;
1181 if (lp->ini_map_idx) {
1182 int t = lp->ini_map_idx - 1;
1183 FCPARAM(isp)->isp_ini_map[t] =
1184 (lp - FCPARAM(isp)->portdb) + 1;
1185 tgt = lp->ini_map_idx - 1;
1186 isp_prt(isp, ISP_LOGCONFIG, prom2,
1187 lp->portid, lp->handle,
1188 roles[lp->roles], "changed at", tgt,
1189 (uint32_t) (lp->node_wwn >> 32),
1190 (uint32_t) lp->node_wwn,
1191 (uint32_t) (lp->port_wwn >> 32),
1192 (uint32_t) lp->port_wwn);
1193 } else {
1194 isp_prt(isp, ISP_LOGCONFIG, prom,
1195 lp->portid, lp->handle,
1196 roles[lp->roles], "changed",
1197 (uint32_t) (lp->node_wwn >> 32),
1198 (uint32_t) lp->node_wwn,
1199 (uint32_t) (lp->port_wwn >> 32),
1200 (uint32_t) lp->port_wwn);
1201 }
1202 }
1203 break;
1204 case ISPASYNC_DEV_STAYED:
1205 lp = arg;
1206 if (lp->ini_map_idx) {
1207 tgt = lp->ini_map_idx - 1;
1208 isp_prt(isp, ISP_LOGCONFIG, prom2,
1209 lp->portid, lp->handle,
1210 roles[lp->roles], "stayed at", tgt,
1211 (uint32_t) (lp->node_wwn >> 32),
1212 (uint32_t) lp->node_wwn,
1213 (uint32_t) (lp->port_wwn >> 32),
1214 (uint32_t) lp->port_wwn);
1215 } else {
1216 isp_prt(isp, ISP_LOGCONFIG, prom,
1217 lp->portid, lp->handle,
1218 roles[lp->roles], "stayed",
1219 (uint32_t) (lp->node_wwn >> 32),
1220 (uint32_t) lp->node_wwn,
1221 (uint32_t) (lp->port_wwn >> 32),
1222 (uint32_t) lp->port_wwn);
1223 }
1224 break;
1225 case ISPASYNC_DEV_GONE:
1226 lp = arg;
1227 /*
1228 * If this has a virtual target and we haven't marked it
1229 * that we're going to have isp_gdt tell the OS it's gone,
1230 * set the isp_gdt timer running on it.
1231 *
1232 * If it isn't marked that isp_gdt is going to get rid of it,
1233 * announce that it's gone.
1234 */
1235 if (lp->ini_map_idx && lp->reserved == 0) {
1236 lp->reserved = 1;
1237 lp->new_reserved = isp->isp_osinfo.gone_device_time;
1238 lp->state = FC_PORTDB_STATE_ZOMBIE;
1239 if (isp->isp_osinfo.gdt_running == 0) {
1240 isp->isp_osinfo.gdt_running = 1;
1241 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
1242 "starting Gone Device Timer");
1243 callout_schedule(&isp->isp_osinfo.gdt, hz);
1244 }
1245 tgt = lp->ini_map_idx - 1;
1246 isp_prt(isp, ISP_LOGCONFIG, prom2,
1247 lp->portid, lp->handle,
1248 roles[lp->roles], "gone zombie at", tgt,
1249 (uint32_t) (lp->node_wwn >> 32),
1250 (uint32_t) lp->node_wwn,
1251 (uint32_t) (lp->port_wwn >> 32),
1252 (uint32_t) lp->port_wwn);
1253 } else if (lp->reserved == 0) {
1254 isp_prt(isp, ISP_LOGCONFIG, prom,
1255 lp->portid, lp->handle,
1256 roles[lp->roles], "departed",
1257 (uint32_t) (lp->node_wwn >> 32),
1258 (uint32_t) lp->node_wwn,
1259 (uint32_t) (lp->port_wwn >> 32),
1260 (uint32_t) lp->port_wwn);
1261 }
1262 break;
1263 case ISPASYNC_CHANGE_NOTIFY:
1264 {
1265 if (arg == ISPASYNC_CHANGE_PDB) {
1266 msg = "Port Database Changed";
1267 } else if (arg == ISPASYNC_CHANGE_SNS) {
1268 msg = "Name Server Database Changed";
1269 } else {
1270 msg = "Other Change Notify";
1271 }
1272 /*
1273 * If the loop down timer is running, cancel it.
1274 */
1275 if (callout_active(&isp->isp_osinfo.ldt)) {
1276 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
1277 "Stopping Loop Down Timer");
1278 callout_stop(&isp->isp_osinfo.ldt);
1279 }
1280 isp_prt(isp, ISP_LOGINFO, msg);
1281 /*
1282 * We can set blocked here because we know it's now okay
1283 * to try and run isp_fc_runstate (in order to build loop
1284 * state). But we don't try and freeze the midlayer's queue
1285 * if we have no thread that we can wake to later unfreeze
1286 * it.
1287 */
1288 if (isp->isp_osinfo.blocked == 0) {
1289 isp->isp_osinfo.blocked = 1;
1290 if (isp->isp_osinfo.thread) {
1291 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
1292 "FREEZE QUEUES @ LINE %d", __LINE__);
1293 scsipi_channel_freeze(&isp->isp_chanA, 1);
1294 }
1295 }
1296 /*
1297 * Note that we have work for the thread to do, and
1298 * if the thread is here already, wake it up.
1299 */
1300 if (isp->isp_osinfo.thread) {
1301 wakeup(&isp->isp_osinfo.thread);
1302 } else {
1303 isp_prt(isp, ISP_LOGDEBUG1, "no FC thread yet");
1304 }
1305 break;
1306 }
1307 case ISPASYNC_FW_CRASH:
1308 {
1309 uint16_t mbox1, mbox6;
1310 mbox1 = ISP_READ(isp, OUTMAILBOX1);
1311 if (IS_DUALBUS(isp)) {
1312 mbox6 = ISP_READ(isp, OUTMAILBOX6);
1313 } else {
1314 mbox6 = 0;
1315 }
1316 isp_prt(isp, ISP_LOGERR,
1317 "Internal Firmware Error on bus %d @ RISC Address 0x%x",
1318 mbox6, mbox1);
1319 if (IS_FC(isp)) {
1320 if (isp->isp_osinfo.blocked == 0) {
1321 isp->isp_osinfo.blocked = 1;
1322 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
1323 "FREEZE QUEUES @ LINE %d", __LINE__);
1324 scsipi_channel_freeze(&isp->isp_chanA, 1);
1325 }
1326 #ifdef ISP_FW_CRASH_DUMP
1327 isp_fw_dump(isp);
1328 #endif
1329 }
1330 mbox1 = isp->isp_osinfo.mbox_sleep_ok;
1331 isp->isp_osinfo.mbox_sleep_ok = 0;
1332 isp_reinit(isp);
1333 isp->isp_osinfo.mbox_sleep_ok = mbox1;
1334 isp_async(isp, ISPASYNC_FW_RESTARTED, NULL);
1335 break;
1336 }
1337 default:
1338 break;
1339 }
1340 return (0);
1341 }
1342
1343 #include <machine/stdarg.h>
1344 void
1345 isp_prt(struct ispsoftc *isp, int level, const char *fmt, ...)
1346 {
1347 va_list ap;
1348 if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) {
1349 return;
1350 }
1351 printf("%s: ", isp->isp_name);
1352 va_start(ap, fmt);
1353 vprintf(fmt, ap);
1354 va_end(ap);
1355 printf("\n");
1356 }
1357
1358 void
1359 isp_lock(struct ispsoftc *isp)
1360 {
1361 int s = splbio();
1362 if (isp->isp_osinfo.islocked++ == 0) {
1363 isp->isp_osinfo.splsaved = s;
1364 } else {
1365 splx(s);
1366 }
1367 }
1368
1369 void
1370 isp_unlock(struct ispsoftc *isp)
1371 {
1372 if (isp->isp_osinfo.islocked-- <= 1) {
1373 isp->isp_osinfo.islocked = 0;
1374 splx(isp->isp_osinfo.splsaved);
1375 }
1376 }
1377
1378 uint64_t
1379 isp_microtime_sub(struct timeval *b, struct timeval *a)
1380 {
1381 struct timeval x;
1382 uint64_t elapsed;
1383 timersub(b, a, &x);
1384 elapsed = GET_NANOSEC(&x);
1385 if (elapsed == 0)
1386 elapsed++;
1387 return (elapsed);
1388 }
1389
1390 int
1391 isp_mbox_acquire(ispsoftc_t *isp)
1392 {
1393 if (isp->isp_osinfo.mboxbsy) {
1394 return (1);
1395 } else {
1396 isp->isp_osinfo.mboxcmd_done = 0;
1397 isp->isp_osinfo.mboxbsy = 1;
1398 return (0);
1399 }
1400 }
1401
1402 void
1403 isp_mbox_wait_complete(struct ispsoftc *isp, mbreg_t *mbp)
1404 {
1405 unsigned int usecs = mbp->timeout;
1406 unsigned int maxc, olim, ilim;
1407 struct timeval start;
1408
1409 if (usecs == 0) {
1410 usecs = MBCMD_DEFAULT_TIMEOUT;
1411 }
1412 maxc = isp->isp_mbxwrk0 + 1;
1413
1414 microtime(&start);
1415 if (isp->isp_osinfo.mbox_sleep_ok) {
1416 int to;
1417 struct timeval tv;
1418
1419 tv.tv_sec = 0;
1420 tv.tv_usec = 0;
1421 for (olim = 0; olim < maxc; olim++) {
1422 tv.tv_sec += (usecs / 1000000);
1423 tv.tv_usec += (usecs % 1000000);
1424 if (tv.tv_usec >= 100000) {
1425 tv.tv_sec++;
1426 tv.tv_usec -= 1000000;
1427 }
1428 }
1429 timeradd(&tv, &start, &tv);
1430 to = hzto(&tv);
1431 if (to == 0)
1432 to = 1;
1433
1434 isp->isp_osinfo.mbox_sleep_ok = 0;
1435 isp->isp_osinfo.mbox_sleeping = 1;
1436 tsleep(&isp->isp_mbxworkp, PRIBIO, "ispmbx_sleep", to);
1437 isp->isp_osinfo.mbox_sleeping = 0;
1438 isp->isp_osinfo.mbox_sleep_ok = 1;
1439 } else {
1440 for (olim = 0; olim < maxc; olim++) {
1441 for (ilim = 0; ilim < usecs; ilim += 100) {
1442 uint32_t isr;
1443 uint16_t sema, mbox;
1444 if (isp->isp_osinfo.mboxcmd_done) {
1445 break;
1446 }
1447 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
1448 isp_intr(isp, isr, sema, mbox);
1449 if (isp->isp_osinfo.mboxcmd_done) {
1450 break;
1451 }
1452 }
1453 USEC_DELAY(100);
1454 }
1455 if (isp->isp_osinfo.mboxcmd_done) {
1456 break;
1457 }
1458 }
1459 }
1460 if (isp->isp_osinfo.mboxcmd_done == 0) {
1461 struct timeval finish, elapsed;
1462
1463 microtime(&finish);
1464 timersub(&finish, &start, &elapsed);
1465 isp_prt(isp, ISP_LOGWARN,
1466 "%s Mailbox Command (0x%x) Timeout (%uus actual)",
1467 isp->isp_osinfo.mbox_sleep_ok? "Interrupting" : "Polled",
1468 isp->isp_lastmbxcmd, (elapsed.tv_sec * 1000000) +
1469 elapsed.tv_usec);
1470 mbp->param[0] = MBOX_TIMEOUT;
1471 isp->isp_osinfo.mboxcmd_done = 1;
1472 }
1473 }
1474
1475 void
1476 isp_mbox_notify_done(ispsoftc_t *isp)
1477 {
1478 if (isp->isp_osinfo.mbox_sleeping) {
1479 wakeup(&isp->isp_mbxworkp);
1480 }
1481 isp->isp_osinfo.mboxcmd_done = 1;
1482 }
1483
1484 void
1485 isp_mbox_release(ispsoftc_t *isp)
1486 {
1487 isp->isp_osinfo.mboxbsy = 0;
1488 }
1489