isp_netbsd.c revision 1.52 1 /* $NetBSD: isp_netbsd.c,v 1.52 2002/02/21 22:32:41 mjacob Exp $ */
2 /*
3 * This driver, which is contained in NetBSD in the files:
4 *
5 * sys/dev/ic/isp.c
6 * sys/dev/ic/isp_inline.h
7 * sys/dev/ic/isp_netbsd.c
8 * sys/dev/ic/isp_netbsd.h
9 * sys/dev/ic/isp_target.c
10 * sys/dev/ic/isp_target.h
11 * sys/dev/ic/isp_tpublic.h
12 * sys/dev/ic/ispmbox.h
13 * sys/dev/ic/ispreg.h
14 * sys/dev/ic/ispvar.h
15 * sys/microcode/isp/asm_sbus.h
16 * sys/microcode/isp/asm_1040.h
17 * sys/microcode/isp/asm_1080.h
18 * sys/microcode/isp/asm_12160.h
19 * sys/microcode/isp/asm_2100.h
20 * sys/microcode/isp/asm_2200.h
21 * sys/pci/isp_pci.c
22 * sys/sbus/isp_sbus.c
23 *
24 * Is being actively maintained by Matthew Jacob (mjacob (at) netbsd.org).
25 * This driver also is shared source with FreeBSD, OpenBSD, Linux, Solaris,
26 * Linux versions. This tends to be an interesting maintenance problem.
27 *
28 * Please coordinate with Matthew Jacob on changes you wish to make here.
29 */
30 /*
31 * Platform (NetBSD) dependent common attachment code for Qlogic adapters.
32 * Matthew Jacob <mjacob (at) nas.nasa.gov>
33 */
34 /*
35 * Copyright (C) 1997, 1998, 1999 National Aeronautics & Space Administration
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. The name of the author may not be used to endorse or promote products
47 * derived from this software without specific prior written permission
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
50 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
51 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
52 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
53 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
54 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
55 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
56 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
57 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
58 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
59 */
60
61 #include <sys/cdefs.h>
62 __KERNEL_RCSID(0, "$NetBSD: isp_netbsd.c,v 1.52 2002/02/21 22:32:41 mjacob Exp $");
63
64 #include <dev/ic/isp_netbsd.h>
65 #include <sys/scsiio.h>
66
67
68 /*
69 * Set a timeout for the watchdogging of a command.
70 *
71 * The dimensional analysis is
72 *
73 * milliseconds * (seconds/millisecond) * (ticks/second) = ticks
74 *
75 * =
76 *
77 * (milliseconds / 1000) * hz = ticks
78 *
79 *
80 * For timeouts less than 1 second, we'll get zero. Because of this, and
81 * because we want to establish *our* timeout to be longer than what the
82 * firmware might do, we just add 3 seconds at the back end.
83 */
84 #define _XT(xs) ((((xs)->timeout/1000) * hz) + (3 * hz))
85
86 static void isp_config_interrupts(struct device *);
87 static void ispminphys_1020(struct buf *);
88 static void ispminphys(struct buf *);
89 static INLINE void ispcmd(struct ispsoftc *, XS_T *);
90 static void isprequest(struct scsipi_channel *, scsipi_adapter_req_t, void *);
91 static int
92 ispioctl(struct scsipi_channel *, u_long, caddr_t, int, struct proc *);
93
94 static void isp_polled_cmd(struct ispsoftc *, XS_T *);
95 static void isp_dog(void *);
96 static void isp_create_fc_worker(void *);
97 static void isp_fc_worker(void *);
98
99 /*
100 * Complete attachment of hardware, include subdevices.
101 */
102 void
103 isp_attach(struct ispsoftc *isp)
104 {
105 isp->isp_state = ISP_RUNSTATE;
106
107 isp->isp_osinfo._adapter.adapt_dev = &isp->isp_osinfo._dev;
108 isp->isp_osinfo._adapter.adapt_nchannels = IS_DUALBUS(isp) ? 2 : 1;
109 isp->isp_osinfo._adapter.adapt_openings = isp->isp_maxcmds;
110 /*
111 * It's not stated whether max_periph is limited by SPI
112 * tag uage, but let's assume that it is.
113 */
114 isp->isp_osinfo._adapter.adapt_max_periph = min(isp->isp_maxcmds, 255);
115 isp->isp_osinfo._adapter.adapt_ioctl = ispioctl;
116 isp->isp_osinfo._adapter.adapt_request = isprequest;
117 if (isp->isp_type <= ISP_HA_SCSI_1020A) {
118 isp->isp_osinfo._adapter.adapt_minphys = ispminphys_1020;
119 } else {
120 isp->isp_osinfo._adapter.adapt_minphys = ispminphys;
121 }
122
123 isp->isp_osinfo._chan.chan_adapter = &isp->isp_osinfo._adapter;
124 isp->isp_osinfo._chan.chan_bustype = &scsi_bustype;
125 isp->isp_osinfo._chan.chan_channel = 0;
126
127 /*
128 * Until the midlayer is fixed to use REPORT LUNS, limit to 8 luns.
129 */
130 isp->isp_osinfo._chan.chan_nluns = min(isp->isp_maxluns, 8);
131
132 if (IS_FC(isp)) {
133 isp->isp_osinfo._chan.chan_ntargets = MAX_FC_TARG;
134 isp->isp_osinfo._chan.chan_id = MAX_FC_TARG;
135 isp->isp_osinfo.threadwork = 1;
136 /*
137 * Note that isp_create_fc_worker won't get called
138 * until much much later (after proc0 is created).
139 */
140 kthread_create(isp_create_fc_worker, isp);
141 #ifdef ISP_FW_CRASH_DUMP
142 if (IS_2200(isp)) {
143 FCPARAM(isp)->isp_dump_data =
144 malloc(QLA2200_RISC_IMAGE_DUMP_SIZE, M_DEVBUF,
145 M_NOWAIT);
146 } else if (IS_23XX(isp)) {
147 FCPARAM(isp)->isp_dump_data =
148 malloc(QLA2300_RISC_IMAGE_DUMP_SIZE, M_DEVBUF,
149 M_NOWAIT);
150 }
151 if (FCPARAM(isp)->isp_dump_data)
152 FCPARAM(isp)->isp_dump_data[0] = 0;
153 #endif
154 } else {
155 int bus = 0;
156 sdparam *sdp = isp->isp_param;
157
158 isp->isp_osinfo._chan.chan_ntargets = MAX_TARGETS;
159 isp->isp_osinfo._chan.chan_id = sdp->isp_initiator_id;
160 isp->isp_osinfo.discovered[0] = 1 << sdp->isp_initiator_id;
161 if (IS_DUALBUS(isp)) {
162 isp->isp_osinfo._chan_b = isp->isp_osinfo._chan;
163 sdp++;
164 isp->isp_osinfo.discovered[1] =
165 1 << sdp->isp_initiator_id;
166 isp->isp_osinfo._chan_b.chan_id = sdp->isp_initiator_id;
167 isp->isp_osinfo._chan_b.chan_channel = 1;
168 }
169 ISP_LOCK(isp);
170 (void) isp_control(isp, ISPCTL_RESET_BUS, &bus);
171 if (IS_DUALBUS(isp)) {
172 bus++;
173 (void) isp_control(isp, ISPCTL_RESET_BUS, &bus);
174 }
175 ISP_UNLOCK(isp);
176 }
177
178
179 /*
180 * Defer enabling mailbox interrupts until later.
181 */
182 config_interrupts((struct device *) isp, isp_config_interrupts);
183
184 /*
185 * And attach children (if any).
186 */
187 config_found((void *)isp, &isp->isp_chanA, scsiprint);
188 if (IS_DUALBUS(isp)) {
189 config_found((void *)isp, &isp->isp_chanB, scsiprint);
190 }
191 }
192
193
194 static void
195 isp_config_interrupts(struct device *self)
196 {
197 struct ispsoftc *isp = (struct ispsoftc *) self;
198
199 /*
200 * After this point, we'll be doing the new configuration
201 * schema which allows interrups, so we can do tsleep/wakeup
202 * for mailbox stuff at that point.
203 */
204 isp->isp_osinfo.no_mbox_ints = 0;
205 }
206
207
208 /*
209 * minphys our xfers
210 */
211
212 static void
213 ispminphys_1020(struct buf *bp)
214 {
215 if (bp->b_bcount >= (1 << 24)) {
216 bp->b_bcount = (1 << 24);
217 }
218 minphys(bp);
219 }
220
221 static void
222 ispminphys(struct buf *bp)
223 {
224 if (bp->b_bcount >= (1 << 30)) {
225 bp->b_bcount = (1 << 30);
226 }
227 minphys(bp);
228 }
229
230 static int
231 ispioctl(struct scsipi_channel *chan, u_long cmd, caddr_t addr, int flag,
232 struct proc *p)
233 {
234 struct ispsoftc *isp = (void *)chan->chan_adapter->adapt_dev;
235 int retval = ENOTTY;
236
237 switch (cmd) {
238 #ifdef ISP_FW_CRASH_DUMP
239 case ISP_GET_FW_CRASH_DUMP:
240 {
241 u_int16_t *ptr = FCPARAM(isp)->isp_dump_data;
242 size_t sz;
243
244 retval = 0;
245 if (IS_2200(isp))
246 sz = QLA2200_RISC_IMAGE_DUMP_SIZE;
247 else
248 sz = QLA2300_RISC_IMAGE_DUMP_SIZE;
249 ISP_LOCK(isp);
250 if (ptr && *ptr) {
251 void *uaddr = *((void **) addr);
252 if (copyout(ptr, uaddr, sz)) {
253 retval = EFAULT;
254 } else {
255 *ptr = 0;
256 }
257 } else {
258 retval = ENXIO;
259 }
260 ISP_UNLOCK(isp);
261 break;
262 }
263
264 case ISP_FORCE_CRASH_DUMP:
265 ISP_LOCK(isp);
266 if (isp->isp_osinfo.blocked == 0) {
267 isp->isp_osinfo.blocked = 1;
268 scsipi_channel_freeze(&isp->isp_chanA, 1);
269 }
270 isp_fw_dump(isp);
271 isp_reinit(isp);
272 ISP_UNLOCK(isp);
273 retval = 0;
274 break;
275 #endif
276 case ISP_GET_STATS:
277 {
278 isp_stats_t *sp = (isp_stats_t *) addr;
279
280 MEMZERO(sp, sizeof (*sp));
281 sp->isp_stat_version = ISP_STATS_VERSION;
282 sp->isp_type = isp->isp_type;
283 sp->isp_revision = isp->isp_revision;
284 ISP_LOCK(isp);
285 sp->isp_stats[ISP_INTCNT] = isp->isp_intcnt;
286 sp->isp_stats[ISP_INTBOGUS] = isp->isp_intbogus;
287 sp->isp_stats[ISP_INTMBOXC] = isp->isp_intmboxc;
288 sp->isp_stats[ISP_INGOASYNC] = isp->isp_intoasync;
289 sp->isp_stats[ISP_RSLTCCMPLT] = isp->isp_rsltccmplt;
290 sp->isp_stats[ISP_FPHCCMCPLT] = isp->isp_fphccmplt;
291 sp->isp_stats[ISP_RSCCHIWAT] = isp->isp_rscchiwater;
292 sp->isp_stats[ISP_FPCCHIWAT] = isp->isp_fpcchiwater;
293 ISP_UNLOCK(isp);
294 retval = 0;
295 break;
296 }
297 case ISP_CLR_STATS:
298 ISP_LOCK(isp);
299 isp->isp_intcnt = 0;
300 isp->isp_intbogus = 0;
301 isp->isp_intmboxc = 0;
302 isp->isp_intoasync = 0;
303 isp->isp_rsltccmplt = 0;
304 isp->isp_fphccmplt = 0;
305 isp->isp_rscchiwater = 0;
306 isp->isp_fpcchiwater = 0;
307 ISP_UNLOCK(isp);
308 retval = 0;
309 break;
310 case ISP_SDBLEV:
311 {
312 int olddblev = isp->isp_dblev;
313 isp->isp_dblev = *(int *)addr;
314 *(int *)addr = olddblev;
315 retval = 0;
316 break;
317 }
318 case ISP_RESETHBA:
319 ISP_LOCK(isp);
320 isp_reinit(isp);
321 ISP_UNLOCK(isp);
322 retval = 0;
323 break;
324 case ISP_RESCAN:
325 if (IS_FC(isp)) {
326 ISP_LOCK(isp);
327 if (isp_fc_runstate(isp, 5 * 1000000)) {
328 retval = EIO;
329 } else {
330 retval = 0;
331 }
332 ISP_UNLOCK(isp);
333 }
334 break;
335 case ISP_FC_LIP:
336 if (IS_FC(isp)) {
337 ISP_LOCK(isp);
338 if (isp_control(isp, ISPCTL_SEND_LIP, 0)) {
339 retval = EIO;
340 } else {
341 retval = 0;
342 }
343 ISP_UNLOCK(isp);
344 }
345 break;
346 case ISP_FC_GETDINFO:
347 {
348 struct isp_fc_device *ifc = (struct isp_fc_device *) addr;
349 struct lportdb *lp;
350
351 if (ifc->loopid < 0 || ifc->loopid >= MAX_FC_TARG) {
352 retval = EINVAL;
353 break;
354 }
355 ISP_LOCK(isp);
356 lp = &FCPARAM(isp)->portdb[ifc->loopid];
357 if (lp->valid) {
358 ifc->loopid = lp->loopid;
359 ifc->portid = lp->portid;
360 ifc->node_wwn = lp->node_wwn;
361 ifc->port_wwn = lp->port_wwn;
362 retval = 0;
363 } else {
364 retval = ENODEV;
365 }
366 ISP_UNLOCK(isp);
367 break;
368 }
369 case SCBUSIORESET:
370 ISP_LOCK(isp);
371 if (isp_control(isp, ISPCTL_RESET_BUS, &chan->chan_channel))
372 retval = EIO;
373 else
374 retval = 0;
375 ISP_UNLOCK(isp);
376 break;
377 default:
378 break;
379 }
380 return (retval);
381 }
382
383 static INLINE void
384 ispcmd(struct ispsoftc *isp, XS_T *xs)
385 {
386 ISP_LOCK(isp);
387 if (isp->isp_state < ISP_RUNSTATE) {
388 DISABLE_INTS(isp);
389 isp_init(isp);
390 if (isp->isp_state != ISP_INITSTATE) {
391 ENABLE_INTS(isp);
392 ISP_UNLOCK(isp);
393 XS_SETERR(xs, HBA_BOTCH);
394 scsipi_done(xs);
395 return;
396 }
397 isp->isp_state = ISP_RUNSTATE;
398 ENABLE_INTS(isp);
399 }
400 /*
401 * Handle the case of a FC card where the FC thread hasn't
402 * fired up yet and we have loop state to clean up. If we
403 * can't clear things up and we've never seen loop up, bounce
404 * the command.
405 */
406 if (IS_FC(isp) && isp->isp_osinfo.threadwork &&
407 isp->isp_osinfo.thread == 0) {
408 volatile u_int8_t ombi = isp->isp_osinfo.no_mbox_ints;
409 int delay_time;
410
411 if (xs->xs_control & XS_CTL_POLL) {
412 isp->isp_osinfo.no_mbox_ints = 1;
413 }
414
415 if (isp->isp_osinfo.loop_checked == 0) {
416 delay_time = 10 * 1000000;
417 isp->isp_osinfo.loop_checked = 1;
418 } else {
419 delay_time = 250000;
420 }
421
422 if (isp_fc_runstate(isp, delay_time) != 0) {
423 if (xs->xs_control & XS_CTL_POLL) {
424 isp->isp_osinfo.no_mbox_ints = ombi;
425 }
426 if (FCPARAM(isp)->loop_seen_once == 0) {
427 XS_SETERR(xs, HBA_SELTIMEOUT);
428 scsipi_done(xs);
429 ISP_UNLOCK(isp);
430 return;
431 }
432 /*
433 * Otherwise, fall thru to be queued up for later.
434 */
435 } else {
436 int wasblocked =
437 (isp->isp_osinfo.blocked || isp->isp_osinfo.paused);
438 isp->isp_osinfo.threadwork = 0;
439 isp->isp_osinfo.blocked =
440 isp->isp_osinfo.paused = 0;
441 if (wasblocked) {
442 scsipi_channel_thaw(&isp->isp_chanA, 1);
443 }
444 }
445 if (xs->xs_control & XS_CTL_POLL) {
446 isp->isp_osinfo.no_mbox_ints = ombi;
447 }
448 }
449
450 if (isp->isp_osinfo.paused) {
451 isp_prt(isp, ISP_LOGWARN, "I/O while paused");
452 xs->error = XS_RESOURCE_SHORTAGE;
453 scsipi_done(xs);
454 ISP_UNLOCK(isp);
455 return;
456 }
457 if (isp->isp_osinfo.blocked) {
458 isp_prt(isp, ISP_LOGWARN, "I/O while blocked");
459 xs->error = XS_REQUEUE;
460 scsipi_done(xs);
461 ISP_UNLOCK(isp);
462 return;
463 }
464
465 if (xs->xs_control & XS_CTL_POLL) {
466 volatile u_int8_t ombi = isp->isp_osinfo.no_mbox_ints;
467 isp->isp_osinfo.no_mbox_ints = 1;
468 isp_polled_cmd(isp, xs);
469 isp->isp_osinfo.no_mbox_ints = ombi;
470 ISP_UNLOCK(isp);
471 return;
472 }
473
474 switch (isp_start(xs)) {
475 case CMD_QUEUED:
476 if (xs->timeout) {
477 callout_reset(&xs->xs_callout, _XT(xs), isp_dog, xs);
478 }
479 break;
480 case CMD_EAGAIN:
481 isp->isp_osinfo.paused = 1;
482 xs->error = XS_RESOURCE_SHORTAGE;
483 scsipi_channel_freeze(&isp->isp_chanA, 1);
484 if (IS_DUALBUS(isp)) {
485 scsipi_channel_freeze(&isp->isp_chanB, 1);
486 }
487 scsipi_done(xs);
488 break;
489 case CMD_RQLATER:
490 /*
491 * We can only get RQLATER from FC devices (1 channel only)
492 *
493 * Also, if we've never seen loop up, bounce the command
494 * (somebody has booted with no FC cable connected)
495 */
496 if (FCPARAM(isp)->loop_seen_once == 0) {
497 XS_SETERR(xs, HBA_SELTIMEOUT);
498 scsipi_done(xs);
499 break;
500 }
501 if (isp->isp_osinfo.blocked == 0) {
502 isp->isp_osinfo.blocked = 1;
503 scsipi_channel_freeze(&isp->isp_chanA, 1);
504 }
505 xs->error = XS_REQUEUE;
506 scsipi_done(xs);
507 break;
508 case CMD_COMPLETE:
509 scsipi_done(xs);
510 break;
511 }
512 ISP_UNLOCK(isp);
513 }
514
515 static void
516 isprequest(struct scsipi_channel *chan, scsipi_adapter_req_t req, void *arg)
517 {
518 struct ispsoftc *isp = (void *)chan->chan_adapter->adapt_dev;
519
520 switch (req) {
521 case ADAPTER_REQ_RUN_XFER:
522 ispcmd(isp, (XS_T *) arg);
523 break;
524
525 case ADAPTER_REQ_GROW_RESOURCES:
526 /* Not supported. */
527 break;
528
529 case ADAPTER_REQ_SET_XFER_MODE:
530 if (IS_SCSI(isp)) {
531 struct scsipi_xfer_mode *xm = arg;
532 int dflags = 0;
533 sdparam *sdp = SDPARAM(isp);
534
535 sdp += chan->chan_channel;
536 if (xm->xm_mode & PERIPH_CAP_TQING)
537 dflags |= DPARM_TQING;
538 if (xm->xm_mode & PERIPH_CAP_WIDE16)
539 dflags |= DPARM_WIDE;
540 if (xm->xm_mode & PERIPH_CAP_SYNC)
541 dflags |= DPARM_SYNC;
542 ISP_LOCK(isp);
543 sdp->isp_devparam[xm->xm_target].goal_flags |= dflags;
544 dflags = sdp->isp_devparam[xm->xm_target].goal_flags;
545 sdp->isp_devparam[xm->xm_target].dev_update = 1;
546 isp->isp_update |= (1 << chan->chan_channel);
547 ISP_UNLOCK(isp);
548 isp_prt(isp, ISP_LOGDEBUG1,
549 "ispioctl: device flags 0x%x for %d.%d.X",
550 dflags, chan->chan_channel, xm->xm_target);
551 break;
552 }
553 default:
554 break;
555 }
556 }
557
558 static void
559 isp_polled_cmd(struct ispsoftc *isp, XS_T *xs)
560 {
561 int result;
562 int infinite = 0, mswait;
563
564 result = isp_start(xs);
565
566 switch (result) {
567 case CMD_QUEUED:
568 break;
569 case CMD_RQLATER:
570 if (XS_NOERR(xs)) {
571 xs->error = XS_REQUEUE;
572 }
573 case CMD_EAGAIN:
574 if (XS_NOERR(xs)) {
575 xs->error = XS_RESOURCE_SHORTAGE;
576 }
577 /* FALLTHROUGH */
578 case CMD_COMPLETE:
579 scsipi_done(xs);
580 return;
581
582 }
583
584 /*
585 * If we can't use interrupts, poll on completion.
586 */
587 if ((mswait = XS_TIME(xs)) == 0)
588 infinite = 1;
589
590 while (mswait || infinite) {
591 u_int16_t isr, sema, mbox;
592 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
593 isp_intr(isp, isr, sema, mbox);
594 if (XS_CMD_DONE_P(xs)) {
595 break;
596 }
597 }
598 USEC_DELAY(1000);
599 mswait -= 1;
600 }
601
602 /*
603 * If no other error occurred but we didn't finish,
604 * something bad happened.
605 */
606 if (XS_CMD_DONE_P(xs) == 0) {
607 if (isp_control(isp, ISPCTL_ABORT_CMD, xs)) {
608 isp_reinit(isp);
609 }
610 if (XS_NOERR(xs)) {
611 XS_SETERR(xs, HBA_BOTCH);
612 }
613 }
614 scsipi_done(xs);
615 }
616
617 void
618 isp_done(XS_T *xs)
619 {
620 XS_CMD_S_DONE(xs);
621 if (XS_CMD_WDOG_P(xs) == 0) {
622 struct ispsoftc *isp = XS_ISP(xs);
623 callout_stop(&xs->xs_callout);
624 if (XS_CMD_GRACE_P(xs)) {
625 isp_prt(isp, ISP_LOGDEBUG1,
626 "finished command on borrowed time");
627 }
628 XS_CMD_S_CLEAR(xs);
629 /*
630 * Fixup- if we get a QFULL, we need
631 * to set XS_BUSY as the error.
632 */
633 if (xs->status == SCSI_QUEUE_FULL) {
634 xs->error = XS_BUSY;
635 }
636 if (isp->isp_osinfo.paused) {
637 isp->isp_osinfo.paused = 0;
638 scsipi_channel_timed_thaw(&isp->isp_chanA);
639 if (IS_DUALBUS(isp)) {
640 scsipi_channel_timed_thaw(&isp->isp_chanB);
641 }
642 }
643 scsipi_done(xs);
644 }
645 }
646
647 static void
648 isp_dog(void *arg)
649 {
650 XS_T *xs = arg;
651 struct ispsoftc *isp = XS_ISP(xs);
652 u_int16_t handle;
653
654 ISP_ILOCK(isp);
655 /*
656 * We've decided this command is dead. Make sure we're not trying
657 * to kill a command that's already dead by getting it's handle and
658 * and seeing whether it's still alive.
659 */
660 handle = isp_find_handle(isp, xs);
661 if (handle) {
662 u_int16_t isr, mbox, sema;
663
664 if (XS_CMD_DONE_P(xs)) {
665 isp_prt(isp, ISP_LOGDEBUG1,
666 "watchdog found done cmd (handle 0x%x)", handle);
667 ISP_IUNLOCK(isp);
668 return;
669 }
670
671 if (XS_CMD_WDOG_P(xs)) {
672 isp_prt(isp, ISP_LOGDEBUG1,
673 "recursive watchdog (handle 0x%x)", handle);
674 ISP_IUNLOCK(isp);
675 return;
676 }
677
678 XS_CMD_S_WDOG(xs);
679
680 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
681 isp_intr(isp, isr, sema, mbox);
682
683 }
684 if (XS_CMD_DONE_P(xs)) {
685 isp_prt(isp, ISP_LOGDEBUG1,
686 "watchdog cleanup for handle 0x%x", handle);
687 XS_CMD_C_WDOG(xs);
688 isp_done(xs);
689 } else if (XS_CMD_GRACE_P(xs)) {
690 isp_prt(isp, ISP_LOGDEBUG1,
691 "watchdog timeout for handle 0x%x", handle);
692 /*
693 * Make sure the command is *really* dead before we
694 * release the handle (and DMA resources) for reuse.
695 */
696 (void) isp_control(isp, ISPCTL_ABORT_CMD, arg);
697
698 /*
699 * After this point, the comamnd is really dead.
700 */
701 if (XS_XFRLEN(xs)) {
702 ISP_DMAFREE(isp, xs, handle);
703 }
704 isp_destroy_handle(isp, handle);
705 XS_SETERR(xs, XS_TIMEOUT);
706 XS_CMD_S_CLEAR(xs);
707 isp_done(xs);
708 } else {
709 u_int16_t nxti, optr;
710 ispreq_t local, *mp = &local, *qe;
711 isp_prt(isp, ISP_LOGDEBUG2,
712 "possible command timeout on handle %x", handle);
713 XS_CMD_C_WDOG(xs);
714 callout_reset(&xs->xs_callout, hz, isp_dog, xs);
715 if (isp_getrqentry(isp, &nxti, &optr, (void **) &qe)) {
716 ISP_UNLOCK(isp);
717 return;
718 }
719 XS_CMD_S_GRACE(xs);
720 MEMZERO((void *) mp, sizeof (*mp));
721 mp->req_header.rqs_entry_count = 1;
722 mp->req_header.rqs_entry_type = RQSTYPE_MARKER;
723 mp->req_modifier = SYNC_ALL;
724 mp->req_target = XS_CHANNEL(xs) << 7;
725 isp_put_request(isp, mp, qe);
726 ISP_ADD_REQUEST(isp, nxti);
727 }
728 } else {
729 isp_prt(isp, ISP_LOGDEBUG0, "watchdog with no command");
730 }
731 ISP_IUNLOCK(isp);
732 }
733
734 /*
735 * Fibre Channel state cleanup thread
736 */
737 static void
738 isp_create_fc_worker(void *arg)
739 {
740 struct ispsoftc *isp = arg;
741
742 if (kthread_create1(isp_fc_worker, isp, &isp->isp_osinfo.thread,
743 "%s:fc_thrd", isp->isp_name)) {
744 isp_prt(isp, ISP_LOGERR, "unable to create FC worker thread");
745 panic("isp_create_fc_worker");
746 }
747
748 }
749
750 static void
751 isp_fc_worker(void *arg)
752 {
753 void scsipi_run_queue(struct scsipi_channel *);
754 struct ispsoftc *isp = arg;
755
756 for (;;) {
757 int s;
758
759 /*
760 * Note we do *not* use the ISP_LOCK/ISP_UNLOCK macros here.
761 */
762 s = splbio();
763 while (isp->isp_osinfo.threadwork) {
764 isp->isp_osinfo.threadwork = 0;
765 if (isp_fc_runstate(isp, 10 * 1000000) == 0) {
766 break;
767 }
768 if (isp->isp_osinfo.loop_checked &&
769 FCPARAM(isp)->loop_seen_once == 0) {
770 splx(s);
771 goto skip;
772 }
773 isp->isp_osinfo.threadwork = 1;
774 splx(s);
775 delay(500 * 1000);
776 s = splbio();
777 }
778 if (FCPARAM(isp)->isp_fwstate != FW_READY ||
779 FCPARAM(isp)->isp_loopstate != LOOP_READY) {
780 isp_prt(isp, ISP_LOGINFO, "isp_fc_runstate in vain");
781 isp->isp_osinfo.threadwork = 1;
782 splx(s);
783 continue;
784 }
785
786 if (isp->isp_osinfo.blocked) {
787 isp->isp_osinfo.blocked = 0;
788 isp_prt(isp, ISP_LOGDEBUG0,
789 "restarting queues (freeze count %d)",
790 isp->isp_chanA.chan_qfreeze);
791 scsipi_channel_thaw(&isp->isp_chanA, 1);
792 }
793
794 if (isp->isp_osinfo.thread == NULL)
795 break;
796
797 skip:
798 (void) tsleep(&isp->isp_osinfo.thread, PRIBIO, "fcclnup", 0);
799
800 splx(s);
801 }
802
803 /* In case parent is waiting for us to exit. */
804 wakeup(&isp->isp_osinfo.thread);
805
806 kthread_exit(0);
807 }
808
809 /*
810 * Free any associated resources prior to decommissioning and
811 * set the card to a known state (so it doesn't wake up and kick
812 * us when we aren't expecting it to).
813 *
814 * Locks are held before coming here.
815 */
816 void
817 isp_uninit(struct ispsoftc *isp)
818 {
819 isp_lock(isp);
820 /*
821 * Leave with interrupts disabled.
822 */
823 DISABLE_INTS(isp);
824 isp_unlock(isp);
825 }
826
827 int
828 isp_async(struct ispsoftc *isp, ispasync_t cmd, void *arg)
829 {
830 int bus, tgt;
831
832 switch (cmd) {
833 case ISPASYNC_NEW_TGT_PARAMS:
834 if (IS_SCSI(isp) && isp->isp_dblev) {
835 sdparam *sdp = isp->isp_param;
836 int flags;
837 struct scsipi_xfer_mode xm;
838
839 tgt = *((int *) arg);
840 bus = (tgt >> 16) & 0xffff;
841 tgt &= 0xffff;
842 sdp += bus;
843 flags = sdp->isp_devparam[tgt].actv_flags;
844
845 xm.xm_mode = 0;
846 xm.xm_period = sdp->isp_devparam[tgt].actv_period;
847 xm.xm_offset = sdp->isp_devparam[tgt].actv_offset;
848 xm.xm_target = tgt;
849
850 if ((flags & DPARM_SYNC) && xm.xm_period && xm.xm_offset)
851 xm.xm_mode |= PERIPH_CAP_SYNC;
852 if (flags & DPARM_WIDE)
853 xm.xm_mode |= PERIPH_CAP_WIDE16;
854 if (flags & DPARM_TQING)
855 xm.xm_mode |= PERIPH_CAP_TQING;
856 scsipi_async_event(bus? &isp->isp_chanB : &isp->isp_chanA,
857 ASYNC_EVENT_XFER_MODE, &xm);
858 break;
859 }
860 case ISPASYNC_BUS_RESET:
861 bus = *((int *) arg);
862 scsipi_async_event(bus? &isp->isp_chanB : &isp->isp_chanA,
863 ASYNC_EVENT_RESET, NULL);
864 isp_prt(isp, ISP_LOGINFO, "SCSI bus %d reset detected", bus);
865 break;
866 case ISPASYNC_LIP:
867 /*
868 * Don't do queue freezes or blockage until we have the
869 * thread running that can unfreeze/unblock us.
870 */
871 if (isp->isp_osinfo.blocked == 0) {
872 if (isp->isp_osinfo.thread) {
873 isp->isp_osinfo.blocked = 1;
874 scsipi_channel_freeze(&isp->isp_chanA, 1);
875 }
876 }
877 isp_prt(isp, ISP_LOGINFO, "LIP Received");
878 break;
879 case ISPASYNC_LOOP_RESET:
880 /*
881 * Don't do queue freezes or blockage until we have the
882 * thread running that can unfreeze/unblock us.
883 */
884 if (isp->isp_osinfo.blocked == 0) {
885 if (isp->isp_osinfo.thread) {
886 isp->isp_osinfo.blocked = 1;
887 scsipi_channel_freeze(&isp->isp_chanA, 1);
888 }
889 }
890 isp_prt(isp, ISP_LOGINFO, "Loop Reset Received");
891 break;
892 case ISPASYNC_LOOP_DOWN:
893 /*
894 * Don't do queue freezes or blockage until we have the
895 * thread running that can unfreeze/unblock us.
896 */
897 if (isp->isp_osinfo.blocked == 0) {
898 if (isp->isp_osinfo.thread) {
899 isp->isp_osinfo.blocked = 1;
900 scsipi_channel_freeze(&isp->isp_chanA, 1);
901 }
902 }
903 isp_prt(isp, ISP_LOGINFO, "Loop DOWN");
904 break;
905 case ISPASYNC_LOOP_UP:
906 /*
907 * Let the subsequent ISPASYNC_CHANGE_NOTIFY invoke
908 * the FC worker thread. When the FC worker thread
909 * is done, let *it* call scsipi_channel_thaw...
910 */
911 isp_prt(isp, ISP_LOGINFO, "Loop UP");
912 break;
913 case ISPASYNC_PROMENADE:
914 if (IS_FC(isp) && isp->isp_dblev) {
915 const char fmt[] = "Target %d (Loop 0x%x) Port ID 0x%x "
916 "(role %s) %s\n Port WWN 0x%08x%08x\n Node WWN 0x%08x%08x";
917 const static char *roles[4] = {
918 "None", "Target", "Initiator", "Target/Initiator"
919 };
920 fcparam *fcp = isp->isp_param;
921 int tgt = *((int *) arg);
922 struct lportdb *lp = &fcp->portdb[tgt];
923
924 isp_prt(isp, ISP_LOGINFO, fmt, tgt, lp->loopid, lp->portid,
925 roles[lp->roles & 0x3],
926 (lp->valid)? "Arrived" : "Departed",
927 (u_int32_t) (lp->port_wwn >> 32),
928 (u_int32_t) (lp->port_wwn & 0xffffffffLL),
929 (u_int32_t) (lp->node_wwn >> 32),
930 (u_int32_t) (lp->node_wwn & 0xffffffffLL));
931 break;
932 }
933 case ISPASYNC_CHANGE_NOTIFY:
934 if (arg == ISPASYNC_CHANGE_PDB) {
935 isp_prt(isp, ISP_LOGINFO, "Port Database Changed");
936 } else if (arg == ISPASYNC_CHANGE_SNS) {
937 isp_prt(isp, ISP_LOGINFO,
938 "Name Server Database Changed");
939 }
940
941 /*
942 * We can set blocked here because we know it's now okay
943 * to try and run isp_fc_runstate (in order to build loop
944 * state). But we don't try and freeze the midlayer's queue
945 * if we have no thread that we can wake to later unfreeze
946 * it.
947 */
948 if (isp->isp_osinfo.blocked == 0) {
949 isp->isp_osinfo.blocked = 1;
950 if (isp->isp_osinfo.thread) {
951 scsipi_channel_freeze(&isp->isp_chanA, 1);
952 }
953 }
954 /*
955 * Note that we have work for the thread to do, and
956 * if the thread is here already, wake it up.
957 */
958 isp->isp_osinfo.threadwork++;
959 if (isp->isp_osinfo.thread) {
960 wakeup(&isp->isp_osinfo.thread);
961 } else {
962 isp_prt(isp, ISP_LOGDEBUG1, "no FC thread yet");
963 }
964 break;
965 case ISPASYNC_FABRIC_DEV:
966 {
967 int target, lrange;
968 struct lportdb *lp = NULL;
969 char *pt;
970 sns_ganrsp_t *resp = (sns_ganrsp_t *) arg;
971 u_int32_t portid;
972 u_int64_t wwpn, wwnn;
973 fcparam *fcp = isp->isp_param;
974
975 portid =
976 (((u_int32_t) resp->snscb_port_id[0]) << 16) |
977 (((u_int32_t) resp->snscb_port_id[1]) << 8) |
978 (((u_int32_t) resp->snscb_port_id[2]));
979
980 wwpn =
981 (((u_int64_t)resp->snscb_portname[0]) << 56) |
982 (((u_int64_t)resp->snscb_portname[1]) << 48) |
983 (((u_int64_t)resp->snscb_portname[2]) << 40) |
984 (((u_int64_t)resp->snscb_portname[3]) << 32) |
985 (((u_int64_t)resp->snscb_portname[4]) << 24) |
986 (((u_int64_t)resp->snscb_portname[5]) << 16) |
987 (((u_int64_t)resp->snscb_portname[6]) << 8) |
988 (((u_int64_t)resp->snscb_portname[7]));
989
990 wwnn =
991 (((u_int64_t)resp->snscb_nodename[0]) << 56) |
992 (((u_int64_t)resp->snscb_nodename[1]) << 48) |
993 (((u_int64_t)resp->snscb_nodename[2]) << 40) |
994 (((u_int64_t)resp->snscb_nodename[3]) << 32) |
995 (((u_int64_t)resp->snscb_nodename[4]) << 24) |
996 (((u_int64_t)resp->snscb_nodename[5]) << 16) |
997 (((u_int64_t)resp->snscb_nodename[6]) << 8) |
998 (((u_int64_t)resp->snscb_nodename[7]));
999 if (portid == 0 || wwpn == 0) {
1000 break;
1001 }
1002
1003 switch (resp->snscb_port_type) {
1004 case 1:
1005 pt = " N_Port";
1006 break;
1007 case 2:
1008 pt = " NL_Port";
1009 break;
1010 case 3:
1011 pt = "F/NL_Port";
1012 break;
1013 case 0x7f:
1014 pt = " Nx_Port";
1015 break;
1016 case 0x81:
1017 pt = " F_port";
1018 break;
1019 case 0x82:
1020 pt = " FL_Port";
1021 break;
1022 case 0x84:
1023 pt = " E_port";
1024 break;
1025 default:
1026 pt = "?";
1027 break;
1028 }
1029 isp_prt(isp, ISP_LOGINFO,
1030 "%s @ 0x%x, Node 0x%08x%08x Port %08x%08x",
1031 pt, portid, ((u_int32_t) (wwnn >> 32)), ((u_int32_t) wwnn),
1032 ((u_int32_t) (wwpn >> 32)), ((u_int32_t) wwpn));
1033 /*
1034 * We're only interested in SCSI_FCP types (for now)
1035 */
1036 if ((resp->snscb_fc4_types[2] & 1) == 0) {
1037 break;
1038 }
1039 if (fcp->isp_topo != TOPO_F_PORT)
1040 lrange = FC_SNS_ID+1;
1041 else
1042 lrange = 0;
1043 /*
1044 * Is it already in our list?
1045 */
1046 for (target = lrange; target < MAX_FC_TARG; target++) {
1047 if (target >= FL_PORT_ID && target <= FC_SNS_ID) {
1048 continue;
1049 }
1050 lp = &fcp->portdb[target];
1051 if (lp->port_wwn == wwpn && lp->node_wwn == wwnn) {
1052 lp->fabric_dev = 1;
1053 break;
1054 }
1055 }
1056 if (target < MAX_FC_TARG) {
1057 break;
1058 }
1059 for (target = lrange; target < MAX_FC_TARG; target++) {
1060 if (target >= FL_PORT_ID && target <= FC_SNS_ID) {
1061 continue;
1062 }
1063 lp = &fcp->portdb[target];
1064 if (lp->port_wwn == 0) {
1065 break;
1066 }
1067 }
1068 if (target == MAX_FC_TARG) {
1069 isp_prt(isp, ISP_LOGWARN,
1070 "no more space for fabric devices");
1071 break;
1072 }
1073 lp->node_wwn = wwnn;
1074 lp->port_wwn = wwpn;
1075 lp->portid = portid;
1076 lp->fabric_dev = 1;
1077 break;
1078 }
1079 case ISPASYNC_FW_CRASH:
1080 {
1081 u_int16_t mbox1, mbox6;
1082 mbox1 = ISP_READ(isp, OUTMAILBOX1);
1083 if (IS_DUALBUS(isp)) {
1084 mbox6 = ISP_READ(isp, OUTMAILBOX6);
1085 } else {
1086 mbox6 = 0;
1087 }
1088 isp_prt(isp, ISP_LOGERR,
1089 "Internal Firmware Error on bus %d @ RISC Address 0x%x",
1090 mbox6, mbox1);
1091 isp_reinit(isp);
1092 break;
1093 }
1094 default:
1095 break;
1096 }
1097 return (0);
1098 }
1099
1100 #include <machine/stdarg.h>
1101 void
1102 isp_prt(struct ispsoftc *isp, int level, const char *fmt, ...)
1103 {
1104 va_list ap;
1105 if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) {
1106 return;
1107 }
1108 printf("%s: ", isp->isp_name);
1109 va_start(ap, fmt);
1110 vprintf(fmt, ap);
1111 va_end(ap);
1112 printf("\n");
1113 }
1114