isp_netbsd.c revision 1.53.4.1 1 /* $NetBSD: isp_netbsd.c,v 1.53.4.1 2002/09/01 23:39:46 lukem Exp $ */
2 /*
3 * This driver, which is contained in NetBSD in the files:
4 *
5 * sys/dev/ic/isp.c
6 * sys/dev/ic/isp_inline.h
7 * sys/dev/ic/isp_netbsd.c
8 * sys/dev/ic/isp_netbsd.h
9 * sys/dev/ic/isp_target.c
10 * sys/dev/ic/isp_target.h
11 * sys/dev/ic/isp_tpublic.h
12 * sys/dev/ic/ispmbox.h
13 * sys/dev/ic/ispreg.h
14 * sys/dev/ic/ispvar.h
15 * sys/microcode/isp/asm_sbus.h
16 * sys/microcode/isp/asm_1040.h
17 * sys/microcode/isp/asm_1080.h
18 * sys/microcode/isp/asm_12160.h
19 * sys/microcode/isp/asm_2100.h
20 * sys/microcode/isp/asm_2200.h
21 * sys/pci/isp_pci.c
22 * sys/sbus/isp_sbus.c
23 *
24 * Is being actively maintained by Matthew Jacob (mjacob (at) netbsd.org).
25 * This driver also is shared source with FreeBSD, OpenBSD, Linux, Solaris,
26 * Linux versions. This tends to be an interesting maintenance problem.
27 *
28 * Please coordinate with Matthew Jacob on changes you wish to make here.
29 */
30 /*
31 * Platform (NetBSD) dependent common attachment code for Qlogic adapters.
32 * Matthew Jacob <mjacob (at) nas.nasa.gov>
33 */
34 /*
35 * Copyright (C) 1997, 1998, 1999 National Aeronautics & Space Administration
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. The name of the author may not be used to endorse or promote products
47 * derived from this software without specific prior written permission
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
50 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
51 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
52 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
53 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
54 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
55 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
56 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
57 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
58 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
59 */
60
61 #include <sys/cdefs.h>
62 __KERNEL_RCSID(0, "$NetBSD: isp_netbsd.c,v 1.53.4.1 2002/09/01 23:39:46 lukem Exp $");
63
64 #include <dev/ic/isp_netbsd.h>
65 #include <sys/scsiio.h>
66
67
68 /*
69 * Set a timeout for the watchdogging of a command.
70 *
71 * The dimensional analysis is
72 *
73 * milliseconds * (seconds/millisecond) * (ticks/second) = ticks
74 *
75 * =
76 *
77 * (milliseconds / 1000) * hz = ticks
78 *
79 *
80 * For timeouts less than 1 second, we'll get zero. Because of this, and
81 * because we want to establish *our* timeout to be longer than what the
82 * firmware might do, we just add 3 seconds at the back end.
83 */
84 #define _XT(xs) ((((xs)->timeout/1000) * hz) + (3 * hz))
85
86 static void isp_config_interrupts(struct device *);
87 static void ispminphys_1020(struct buf *);
88 static void ispminphys(struct buf *);
89 static INLINE void ispcmd(struct ispsoftc *, XS_T *);
90 static void isprequest(struct scsipi_channel *, scsipi_adapter_req_t, void *);
91 static int
92 ispioctl(struct scsipi_channel *, u_long, caddr_t, int, struct proc *);
93
94 static void isp_polled_cmd(struct ispsoftc *, XS_T *);
95 static void isp_dog(void *);
96 static void isp_create_fc_worker(void *);
97 static void isp_fc_worker(void *);
98
99 /*
100 * Complete attachment of hardware, include subdevices.
101 */
102 void
103 isp_attach(struct ispsoftc *isp)
104 {
105 isp->isp_state = ISP_RUNSTATE;
106
107 isp->isp_osinfo._adapter.adapt_dev = &isp->isp_osinfo._dev;
108 isp->isp_osinfo._adapter.adapt_nchannels = IS_DUALBUS(isp) ? 2 : 1;
109 isp->isp_osinfo._adapter.adapt_openings = isp->isp_maxcmds;
110 /*
111 * It's not stated whether max_periph is limited by SPI
112 * tag uage, but let's assume that it is.
113 */
114 isp->isp_osinfo._adapter.adapt_max_periph = min(isp->isp_maxcmds, 255);
115 isp->isp_osinfo._adapter.adapt_ioctl = ispioctl;
116 isp->isp_osinfo._adapter.adapt_request = isprequest;
117 if (isp->isp_type <= ISP_HA_SCSI_1020A) {
118 isp->isp_osinfo._adapter.adapt_minphys = ispminphys_1020;
119 } else {
120 isp->isp_osinfo._adapter.adapt_minphys = ispminphys;
121 }
122
123 isp->isp_osinfo._chan.chan_adapter = &isp->isp_osinfo._adapter;
124 isp->isp_osinfo._chan.chan_bustype = &scsi_bustype;
125 isp->isp_osinfo._chan.chan_channel = 0;
126
127 /*
128 * Until the midlayer is fixed to use REPORT LUNS, limit to 8 luns.
129 */
130 isp->isp_osinfo._chan.chan_nluns = min(isp->isp_maxluns, 8);
131
132 if (IS_FC(isp)) {
133 isp->isp_osinfo._chan.chan_ntargets = MAX_FC_TARG;
134 isp->isp_osinfo._chan.chan_id = MAX_FC_TARG;
135 isp->isp_osinfo.threadwork = 1;
136 /*
137 * Note that isp_create_fc_worker won't get called
138 * until much much later (after proc0 is created).
139 */
140 kthread_create(isp_create_fc_worker, isp);
141 #ifdef ISP_FW_CRASH_DUMP
142 if (IS_2200(isp)) {
143 FCPARAM(isp)->isp_dump_data =
144 malloc(QLA2200_RISC_IMAGE_DUMP_SIZE, M_DEVBUF,
145 M_NOWAIT);
146 } else if (IS_23XX(isp)) {
147 FCPARAM(isp)->isp_dump_data =
148 malloc(QLA2300_RISC_IMAGE_DUMP_SIZE, M_DEVBUF,
149 M_NOWAIT);
150 }
151 if (FCPARAM(isp)->isp_dump_data)
152 FCPARAM(isp)->isp_dump_data[0] = 0;
153 #endif
154 } else {
155 int bus = 0;
156 sdparam *sdp = isp->isp_param;
157
158 isp->isp_osinfo._chan.chan_ntargets = MAX_TARGETS;
159 isp->isp_osinfo._chan.chan_id = sdp->isp_initiator_id;
160 isp->isp_osinfo.discovered[0] = 1 << sdp->isp_initiator_id;
161 if (IS_DUALBUS(isp)) {
162 isp->isp_osinfo._chan_b = isp->isp_osinfo._chan;
163 sdp++;
164 isp->isp_osinfo.discovered[1] =
165 1 << sdp->isp_initiator_id;
166 isp->isp_osinfo._chan_b.chan_id = sdp->isp_initiator_id;
167 isp->isp_osinfo._chan_b.chan_channel = 1;
168 }
169 ISP_LOCK(isp);
170 (void) isp_control(isp, ISPCTL_RESET_BUS, &bus);
171 if (IS_DUALBUS(isp)) {
172 bus++;
173 (void) isp_control(isp, ISPCTL_RESET_BUS, &bus);
174 }
175 ISP_UNLOCK(isp);
176 }
177
178
179 /*
180 * Defer enabling mailbox interrupts until later.
181 */
182 config_interrupts((struct device *) isp, isp_config_interrupts);
183
184 /*
185 * And attach children (if any).
186 */
187 config_found((void *)isp, &isp->isp_chanA, scsiprint);
188 if (IS_DUALBUS(isp)) {
189 config_found((void *)isp, &isp->isp_chanB, scsiprint);
190 }
191 }
192
193
194 static void
195 isp_config_interrupts(struct device *self)
196 {
197 struct ispsoftc *isp = (struct ispsoftc *) self;
198
199 /*
200 * After this point, we'll be doing the new configuration
201 * schema which allows interrupts, so we can do tsleep/wakeup
202 * for mailbox stuff at that point, if that's allowed.
203 */
204 if (IS_FC(isp)) {
205 isp->isp_osinfo.no_mbox_ints = 0;
206 }
207 }
208
209
210 /*
211 * minphys our xfers
212 */
213
214 static void
215 ispminphys_1020(struct buf *bp)
216 {
217 if (bp->b_bcount >= (1 << 24)) {
218 bp->b_bcount = (1 << 24);
219 }
220 minphys(bp);
221 }
222
223 static void
224 ispminphys(struct buf *bp)
225 {
226 if (bp->b_bcount >= (1 << 30)) {
227 bp->b_bcount = (1 << 30);
228 }
229 minphys(bp);
230 }
231
232 static int
233 ispioctl(struct scsipi_channel *chan, u_long cmd, caddr_t addr, int flag,
234 struct proc *p)
235 {
236 struct ispsoftc *isp = (void *)chan->chan_adapter->adapt_dev;
237 int retval = ENOTTY;
238
239 switch (cmd) {
240 #ifdef ISP_FW_CRASH_DUMP
241 case ISP_GET_FW_CRASH_DUMP:
242 {
243 u_int16_t *ptr = FCPARAM(isp)->isp_dump_data;
244 size_t sz;
245
246 retval = 0;
247 if (IS_2200(isp))
248 sz = QLA2200_RISC_IMAGE_DUMP_SIZE;
249 else
250 sz = QLA2300_RISC_IMAGE_DUMP_SIZE;
251 ISP_LOCK(isp);
252 if (ptr && *ptr) {
253 void *uaddr = *((void **) addr);
254 if (copyout(ptr, uaddr, sz)) {
255 retval = EFAULT;
256 } else {
257 *ptr = 0;
258 }
259 } else {
260 retval = ENXIO;
261 }
262 ISP_UNLOCK(isp);
263 break;
264 }
265
266 case ISP_FORCE_CRASH_DUMP:
267 ISP_LOCK(isp);
268 if (isp->isp_osinfo.blocked == 0) {
269 isp->isp_osinfo.blocked = 1;
270 scsipi_channel_freeze(&isp->isp_chanA, 1);
271 }
272 isp_fw_dump(isp);
273 isp_reinit(isp);
274 ISP_UNLOCK(isp);
275 retval = 0;
276 break;
277 #endif
278 case ISP_GET_STATS:
279 {
280 isp_stats_t *sp = (isp_stats_t *) addr;
281
282 MEMZERO(sp, sizeof (*sp));
283 sp->isp_stat_version = ISP_STATS_VERSION;
284 sp->isp_type = isp->isp_type;
285 sp->isp_revision = isp->isp_revision;
286 ISP_LOCK(isp);
287 sp->isp_stats[ISP_INTCNT] = isp->isp_intcnt;
288 sp->isp_stats[ISP_INTBOGUS] = isp->isp_intbogus;
289 sp->isp_stats[ISP_INTMBOXC] = isp->isp_intmboxc;
290 sp->isp_stats[ISP_INGOASYNC] = isp->isp_intoasync;
291 sp->isp_stats[ISP_RSLTCCMPLT] = isp->isp_rsltccmplt;
292 sp->isp_stats[ISP_FPHCCMCPLT] = isp->isp_fphccmplt;
293 sp->isp_stats[ISP_RSCCHIWAT] = isp->isp_rscchiwater;
294 sp->isp_stats[ISP_FPCCHIWAT] = isp->isp_fpcchiwater;
295 ISP_UNLOCK(isp);
296 retval = 0;
297 break;
298 }
299 case ISP_CLR_STATS:
300 ISP_LOCK(isp);
301 isp->isp_intcnt = 0;
302 isp->isp_intbogus = 0;
303 isp->isp_intmboxc = 0;
304 isp->isp_intoasync = 0;
305 isp->isp_rsltccmplt = 0;
306 isp->isp_fphccmplt = 0;
307 isp->isp_rscchiwater = 0;
308 isp->isp_fpcchiwater = 0;
309 ISP_UNLOCK(isp);
310 retval = 0;
311 break;
312 case ISP_SDBLEV:
313 {
314 int olddblev = isp->isp_dblev;
315 isp->isp_dblev = *(int *)addr;
316 *(int *)addr = olddblev;
317 retval = 0;
318 break;
319 }
320 case ISP_RESETHBA:
321 ISP_LOCK(isp);
322 isp_reinit(isp);
323 ISP_UNLOCK(isp);
324 retval = 0;
325 break;
326 case ISP_RESCAN:
327 if (IS_FC(isp)) {
328 ISP_LOCK(isp);
329 if (isp_fc_runstate(isp, 5 * 1000000)) {
330 retval = EIO;
331 } else {
332 retval = 0;
333 }
334 ISP_UNLOCK(isp);
335 }
336 break;
337 case ISP_FC_LIP:
338 if (IS_FC(isp)) {
339 ISP_LOCK(isp);
340 if (isp_control(isp, ISPCTL_SEND_LIP, 0)) {
341 retval = EIO;
342 } else {
343 retval = 0;
344 }
345 ISP_UNLOCK(isp);
346 }
347 break;
348 case ISP_FC_GETDINFO:
349 {
350 struct isp_fc_device *ifc = (struct isp_fc_device *) addr;
351 struct lportdb *lp;
352
353 if (ifc->loopid < 0 || ifc->loopid >= MAX_FC_TARG) {
354 retval = EINVAL;
355 break;
356 }
357 ISP_LOCK(isp);
358 lp = &FCPARAM(isp)->portdb[ifc->loopid];
359 if (lp->valid) {
360 ifc->loopid = lp->loopid;
361 ifc->portid = lp->portid;
362 ifc->node_wwn = lp->node_wwn;
363 ifc->port_wwn = lp->port_wwn;
364 retval = 0;
365 } else {
366 retval = ENODEV;
367 }
368 ISP_UNLOCK(isp);
369 break;
370 }
371 case SCBUSIORESET:
372 ISP_LOCK(isp);
373 if (isp_control(isp, ISPCTL_RESET_BUS, &chan->chan_channel))
374 retval = EIO;
375 else
376 retval = 0;
377 ISP_UNLOCK(isp);
378 break;
379 default:
380 break;
381 }
382 return (retval);
383 }
384
385 static INLINE void
386 ispcmd(struct ispsoftc *isp, XS_T *xs)
387 {
388 ISP_LOCK(isp);
389 if (isp->isp_state < ISP_RUNSTATE) {
390 DISABLE_INTS(isp);
391 isp_init(isp);
392 if (isp->isp_state != ISP_INITSTATE) {
393 ENABLE_INTS(isp);
394 ISP_UNLOCK(isp);
395 XS_SETERR(xs, HBA_BOTCH);
396 scsipi_done(xs);
397 return;
398 }
399 isp->isp_state = ISP_RUNSTATE;
400 ENABLE_INTS(isp);
401 }
402 /*
403 * Handle the case of a FC card where the FC thread hasn't
404 * fired up yet and we have loop state to clean up. If we
405 * can't clear things up and we've never seen loop up, bounce
406 * the command.
407 */
408 if (IS_FC(isp) && isp->isp_osinfo.threadwork &&
409 isp->isp_osinfo.thread == 0) {
410 volatile u_int8_t ombi = isp->isp_osinfo.no_mbox_ints;
411 int delay_time;
412
413 if (xs->xs_control & XS_CTL_POLL) {
414 isp->isp_osinfo.no_mbox_ints = 1;
415 }
416
417 if (isp->isp_osinfo.loop_checked == 0) {
418 delay_time = 10 * 1000000;
419 isp->isp_osinfo.loop_checked = 1;
420 } else {
421 delay_time = 250000;
422 }
423
424 if (isp_fc_runstate(isp, delay_time) != 0) {
425 if (xs->xs_control & XS_CTL_POLL) {
426 isp->isp_osinfo.no_mbox_ints = ombi;
427 }
428 if (FCPARAM(isp)->loop_seen_once == 0) {
429 XS_SETERR(xs, HBA_SELTIMEOUT);
430 scsipi_done(xs);
431 ISP_UNLOCK(isp);
432 return;
433 }
434 /*
435 * Otherwise, fall thru to be queued up for later.
436 */
437 } else {
438 int wasblocked =
439 (isp->isp_osinfo.blocked || isp->isp_osinfo.paused);
440 isp->isp_osinfo.threadwork = 0;
441 isp->isp_osinfo.blocked =
442 isp->isp_osinfo.paused = 0;
443 if (wasblocked) {
444 scsipi_channel_thaw(&isp->isp_chanA, 1);
445 }
446 }
447 if (xs->xs_control & XS_CTL_POLL) {
448 isp->isp_osinfo.no_mbox_ints = ombi;
449 }
450 }
451
452 if (isp->isp_osinfo.paused) {
453 isp_prt(isp, ISP_LOGWARN, "I/O while paused");
454 xs->error = XS_RESOURCE_SHORTAGE;
455 scsipi_done(xs);
456 ISP_UNLOCK(isp);
457 return;
458 }
459 if (isp->isp_osinfo.blocked) {
460 isp_prt(isp, ISP_LOGWARN, "I/O while blocked");
461 xs->error = XS_REQUEUE;
462 scsipi_done(xs);
463 ISP_UNLOCK(isp);
464 return;
465 }
466
467 if (xs->xs_control & XS_CTL_POLL) {
468 volatile u_int8_t ombi = isp->isp_osinfo.no_mbox_ints;
469 isp->isp_osinfo.no_mbox_ints = 1;
470 isp_polled_cmd(isp, xs);
471 isp->isp_osinfo.no_mbox_ints = ombi;
472 ISP_UNLOCK(isp);
473 return;
474 }
475
476 switch (isp_start(xs)) {
477 case CMD_QUEUED:
478 if (xs->timeout) {
479 callout_reset(&xs->xs_callout, _XT(xs), isp_dog, xs);
480 }
481 break;
482 case CMD_EAGAIN:
483 isp->isp_osinfo.paused = 1;
484 xs->error = XS_RESOURCE_SHORTAGE;
485 scsipi_channel_freeze(&isp->isp_chanA, 1);
486 if (IS_DUALBUS(isp)) {
487 scsipi_channel_freeze(&isp->isp_chanB, 1);
488 }
489 scsipi_done(xs);
490 break;
491 case CMD_RQLATER:
492 /*
493 * We can only get RQLATER from FC devices (1 channel only)
494 *
495 * Also, if we've never seen loop up, bounce the command
496 * (somebody has booted with no FC cable connected)
497 */
498 if (FCPARAM(isp)->loop_seen_once == 0) {
499 XS_SETERR(xs, HBA_SELTIMEOUT);
500 scsipi_done(xs);
501 break;
502 }
503 if (isp->isp_osinfo.blocked == 0) {
504 isp->isp_osinfo.blocked = 1;
505 scsipi_channel_freeze(&isp->isp_chanA, 1);
506 }
507 xs->error = XS_REQUEUE;
508 scsipi_done(xs);
509 break;
510 case CMD_COMPLETE:
511 scsipi_done(xs);
512 break;
513 }
514 ISP_UNLOCK(isp);
515 }
516
517 static void
518 isprequest(struct scsipi_channel *chan, scsipi_adapter_req_t req, void *arg)
519 {
520 struct ispsoftc *isp = (void *)chan->chan_adapter->adapt_dev;
521
522 switch (req) {
523 case ADAPTER_REQ_RUN_XFER:
524 ispcmd(isp, (XS_T *) arg);
525 break;
526
527 case ADAPTER_REQ_GROW_RESOURCES:
528 /* Not supported. */
529 break;
530
531 case ADAPTER_REQ_SET_XFER_MODE:
532 if (IS_SCSI(isp)) {
533 struct scsipi_xfer_mode *xm = arg;
534 int dflags = 0;
535 sdparam *sdp = SDPARAM(isp);
536
537 sdp += chan->chan_channel;
538 if (xm->xm_mode & PERIPH_CAP_TQING)
539 dflags |= DPARM_TQING;
540 if (xm->xm_mode & PERIPH_CAP_WIDE16)
541 dflags |= DPARM_WIDE;
542 if (xm->xm_mode & PERIPH_CAP_SYNC)
543 dflags |= DPARM_SYNC;
544 ISP_LOCK(isp);
545 sdp->isp_devparam[xm->xm_target].goal_flags |= dflags;
546 dflags = sdp->isp_devparam[xm->xm_target].goal_flags;
547 sdp->isp_devparam[xm->xm_target].dev_update = 1;
548 isp->isp_update |= (1 << chan->chan_channel);
549 ISP_UNLOCK(isp);
550 isp_prt(isp, ISP_LOGDEBUG1,
551 "ispioctl: device flags 0x%x for %d.%d.X",
552 dflags, chan->chan_channel, xm->xm_target);
553 break;
554 }
555 default:
556 break;
557 }
558 }
559
560 static void
561 isp_polled_cmd(struct ispsoftc *isp, XS_T *xs)
562 {
563 int result;
564 int infinite = 0, mswait;
565
566 result = isp_start(xs);
567
568 switch (result) {
569 case CMD_QUEUED:
570 break;
571 case CMD_RQLATER:
572 if (XS_NOERR(xs)) {
573 xs->error = XS_REQUEUE;
574 }
575 case CMD_EAGAIN:
576 if (XS_NOERR(xs)) {
577 xs->error = XS_RESOURCE_SHORTAGE;
578 }
579 /* FALLTHROUGH */
580 case CMD_COMPLETE:
581 scsipi_done(xs);
582 return;
583
584 }
585
586 /*
587 * If we can't use interrupts, poll on completion.
588 */
589 if ((mswait = XS_TIME(xs)) == 0)
590 infinite = 1;
591
592 while (mswait || infinite) {
593 u_int16_t isr, sema, mbox;
594 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
595 isp_intr(isp, isr, sema, mbox);
596 if (XS_CMD_DONE_P(xs)) {
597 break;
598 }
599 }
600 USEC_DELAY(1000);
601 mswait -= 1;
602 }
603
604 /*
605 * If no other error occurred but we didn't finish,
606 * something bad happened.
607 */
608 if (XS_CMD_DONE_P(xs) == 0) {
609 if (isp_control(isp, ISPCTL_ABORT_CMD, xs)) {
610 isp_reinit(isp);
611 }
612 if (XS_NOERR(xs)) {
613 XS_SETERR(xs, HBA_BOTCH);
614 }
615 }
616 scsipi_done(xs);
617 }
618
619 void
620 isp_done(XS_T *xs)
621 {
622 XS_CMD_S_DONE(xs);
623 if (XS_CMD_WDOG_P(xs) == 0) {
624 struct ispsoftc *isp = XS_ISP(xs);
625 callout_stop(&xs->xs_callout);
626 if (XS_CMD_GRACE_P(xs)) {
627 isp_prt(isp, ISP_LOGDEBUG1,
628 "finished command on borrowed time");
629 }
630 XS_CMD_S_CLEAR(xs);
631 /*
632 * Fixup- if we get a QFULL, we need
633 * to set XS_BUSY as the error.
634 */
635 if (xs->status == SCSI_QUEUE_FULL) {
636 xs->error = XS_BUSY;
637 }
638 if (isp->isp_osinfo.paused) {
639 isp->isp_osinfo.paused = 0;
640 scsipi_channel_timed_thaw(&isp->isp_chanA);
641 if (IS_DUALBUS(isp)) {
642 scsipi_channel_timed_thaw(&isp->isp_chanB);
643 }
644 }
645 scsipi_done(xs);
646 }
647 }
648
649 static void
650 isp_dog(void *arg)
651 {
652 XS_T *xs = arg;
653 struct ispsoftc *isp = XS_ISP(xs);
654 u_int16_t handle;
655
656 ISP_ILOCK(isp);
657 /*
658 * We've decided this command is dead. Make sure we're not trying
659 * to kill a command that's already dead by getting it's handle and
660 * and seeing whether it's still alive.
661 */
662 handle = isp_find_handle(isp, xs);
663 if (handle) {
664 u_int16_t isr, mbox, sema;
665
666 if (XS_CMD_DONE_P(xs)) {
667 isp_prt(isp, ISP_LOGDEBUG1,
668 "watchdog found done cmd (handle 0x%x)", handle);
669 ISP_IUNLOCK(isp);
670 return;
671 }
672
673 if (XS_CMD_WDOG_P(xs)) {
674 isp_prt(isp, ISP_LOGDEBUG1,
675 "recursive watchdog (handle 0x%x)", handle);
676 ISP_IUNLOCK(isp);
677 return;
678 }
679
680 XS_CMD_S_WDOG(xs);
681
682 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
683 isp_intr(isp, isr, sema, mbox);
684
685 }
686 if (XS_CMD_DONE_P(xs)) {
687 isp_prt(isp, ISP_LOGDEBUG1,
688 "watchdog cleanup for handle 0x%x", handle);
689 XS_CMD_C_WDOG(xs);
690 isp_done(xs);
691 } else if (XS_CMD_GRACE_P(xs)) {
692 isp_prt(isp, ISP_LOGDEBUG1,
693 "watchdog timeout for handle 0x%x", handle);
694 /*
695 * Make sure the command is *really* dead before we
696 * release the handle (and DMA resources) for reuse.
697 */
698 (void) isp_control(isp, ISPCTL_ABORT_CMD, arg);
699
700 /*
701 * After this point, the comamnd is really dead.
702 */
703 if (XS_XFRLEN(xs)) {
704 ISP_DMAFREE(isp, xs, handle);
705 }
706 isp_destroy_handle(isp, handle);
707 XS_SETERR(xs, XS_TIMEOUT);
708 XS_CMD_S_CLEAR(xs);
709 isp_done(xs);
710 } else {
711 u_int16_t nxti, optr;
712 ispreq_t local, *mp = &local, *qe;
713 isp_prt(isp, ISP_LOGDEBUG2,
714 "possible command timeout on handle %x", handle);
715 XS_CMD_C_WDOG(xs);
716 callout_reset(&xs->xs_callout, hz, isp_dog, xs);
717 if (isp_getrqentry(isp, &nxti, &optr, (void **) &qe)) {
718 ISP_UNLOCK(isp);
719 return;
720 }
721 XS_CMD_S_GRACE(xs);
722 MEMZERO((void *) mp, sizeof (*mp));
723 mp->req_header.rqs_entry_count = 1;
724 mp->req_header.rqs_entry_type = RQSTYPE_MARKER;
725 mp->req_modifier = SYNC_ALL;
726 mp->req_target = XS_CHANNEL(xs) << 7;
727 isp_put_request(isp, mp, qe);
728 ISP_ADD_REQUEST(isp, nxti);
729 }
730 } else {
731 isp_prt(isp, ISP_LOGDEBUG0, "watchdog with no command");
732 }
733 ISP_IUNLOCK(isp);
734 }
735
736 /*
737 * Fibre Channel state cleanup thread
738 */
739 static void
740 isp_create_fc_worker(void *arg)
741 {
742 struct ispsoftc *isp = arg;
743
744 if (kthread_create1(isp_fc_worker, isp, &isp->isp_osinfo.thread,
745 "%s:fc_thrd", isp->isp_name)) {
746 isp_prt(isp, ISP_LOGERR, "unable to create FC worker thread");
747 panic("isp_create_fc_worker");
748 }
749
750 }
751
752 static void
753 isp_fc_worker(void *arg)
754 {
755 void scsipi_run_queue(struct scsipi_channel *);
756 struct ispsoftc *isp = arg;
757
758 for (;;) {
759 int s;
760
761 /*
762 * Note we do *not* use the ISP_LOCK/ISP_UNLOCK macros here.
763 */
764 s = splbio();
765 while (isp->isp_osinfo.threadwork) {
766 isp->isp_osinfo.threadwork = 0;
767 if (isp_fc_runstate(isp, 10 * 1000000) == 0) {
768 break;
769 }
770 if (isp->isp_osinfo.loop_checked &&
771 FCPARAM(isp)->loop_seen_once == 0) {
772 splx(s);
773 goto skip;
774 }
775 isp->isp_osinfo.threadwork = 1;
776 splx(s);
777 delay(500 * 1000);
778 s = splbio();
779 }
780 if (FCPARAM(isp)->isp_fwstate != FW_READY ||
781 FCPARAM(isp)->isp_loopstate != LOOP_READY) {
782 isp_prt(isp, ISP_LOGINFO, "isp_fc_runstate in vain");
783 isp->isp_osinfo.threadwork = 1;
784 splx(s);
785 continue;
786 }
787
788 if (isp->isp_osinfo.blocked) {
789 isp->isp_osinfo.blocked = 0;
790 isp_prt(isp, ISP_LOGDEBUG0,
791 "restarting queues (freeze count %d)",
792 isp->isp_chanA.chan_qfreeze);
793 scsipi_channel_thaw(&isp->isp_chanA, 1);
794 }
795
796 if (isp->isp_osinfo.thread == NULL)
797 break;
798
799 skip:
800 (void) tsleep(&isp->isp_osinfo.thread, PRIBIO, "fcclnup", 0);
801
802 splx(s);
803 }
804
805 /* In case parent is waiting for us to exit. */
806 wakeup(&isp->isp_osinfo.thread);
807
808 kthread_exit(0);
809 }
810
811 /*
812 * Free any associated resources prior to decommissioning and
813 * set the card to a known state (so it doesn't wake up and kick
814 * us when we aren't expecting it to).
815 *
816 * Locks are held before coming here.
817 */
818 void
819 isp_uninit(struct ispsoftc *isp)
820 {
821 isp_lock(isp);
822 /*
823 * Leave with interrupts disabled.
824 */
825 DISABLE_INTS(isp);
826 isp_unlock(isp);
827 }
828
829 int
830 isp_async(struct ispsoftc *isp, ispasync_t cmd, void *arg)
831 {
832 int bus, tgt;
833
834 switch (cmd) {
835 case ISPASYNC_NEW_TGT_PARAMS:
836 if (IS_SCSI(isp) && isp->isp_dblev) {
837 sdparam *sdp = isp->isp_param;
838 int flags;
839 struct scsipi_xfer_mode xm;
840
841 tgt = *((int *) arg);
842 bus = (tgt >> 16) & 0xffff;
843 tgt &= 0xffff;
844 sdp += bus;
845 flags = sdp->isp_devparam[tgt].actv_flags;
846
847 xm.xm_mode = 0;
848 xm.xm_period = sdp->isp_devparam[tgt].actv_period;
849 xm.xm_offset = sdp->isp_devparam[tgt].actv_offset;
850 xm.xm_target = tgt;
851
852 if ((flags & DPARM_SYNC) && xm.xm_period && xm.xm_offset)
853 xm.xm_mode |= PERIPH_CAP_SYNC;
854 if (flags & DPARM_WIDE)
855 xm.xm_mode |= PERIPH_CAP_WIDE16;
856 if (flags & DPARM_TQING)
857 xm.xm_mode |= PERIPH_CAP_TQING;
858 scsipi_async_event(bus? &isp->isp_chanB : &isp->isp_chanA,
859 ASYNC_EVENT_XFER_MODE, &xm);
860 break;
861 }
862 case ISPASYNC_BUS_RESET:
863 bus = *((int *) arg);
864 scsipi_async_event(bus? &isp->isp_chanB : &isp->isp_chanA,
865 ASYNC_EVENT_RESET, NULL);
866 isp_prt(isp, ISP_LOGINFO, "SCSI bus %d reset detected", bus);
867 break;
868 case ISPASYNC_LIP:
869 /*
870 * Don't do queue freezes or blockage until we have the
871 * thread running that can unfreeze/unblock us.
872 */
873 if (isp->isp_osinfo.blocked == 0) {
874 if (isp->isp_osinfo.thread) {
875 isp->isp_osinfo.blocked = 1;
876 scsipi_channel_freeze(&isp->isp_chanA, 1);
877 }
878 }
879 isp_prt(isp, ISP_LOGINFO, "LIP Received");
880 break;
881 case ISPASYNC_LOOP_RESET:
882 /*
883 * Don't do queue freezes or blockage until we have the
884 * thread running that can unfreeze/unblock us.
885 */
886 if (isp->isp_osinfo.blocked == 0) {
887 if (isp->isp_osinfo.thread) {
888 isp->isp_osinfo.blocked = 1;
889 scsipi_channel_freeze(&isp->isp_chanA, 1);
890 }
891 }
892 isp_prt(isp, ISP_LOGINFO, "Loop Reset Received");
893 break;
894 case ISPASYNC_LOOP_DOWN:
895 /*
896 * Don't do queue freezes or blockage until we have the
897 * thread running that can unfreeze/unblock us.
898 */
899 if (isp->isp_osinfo.blocked == 0) {
900 if (isp->isp_osinfo.thread) {
901 isp->isp_osinfo.blocked = 1;
902 scsipi_channel_freeze(&isp->isp_chanA, 1);
903 }
904 }
905 isp_prt(isp, ISP_LOGINFO, "Loop DOWN");
906 break;
907 case ISPASYNC_LOOP_UP:
908 /*
909 * Let the subsequent ISPASYNC_CHANGE_NOTIFY invoke
910 * the FC worker thread. When the FC worker thread
911 * is done, let *it* call scsipi_channel_thaw...
912 */
913 isp_prt(isp, ISP_LOGINFO, "Loop UP");
914 break;
915 case ISPASYNC_PROMENADE:
916 if (IS_FC(isp) && isp->isp_dblev) {
917 const char fmt[] = "Target %d (Loop 0x%x) Port ID 0x%x "
918 "(role %s) %s\n Port WWN 0x%08x%08x\n Node WWN 0x%08x%08x";
919 const static char *roles[4] = {
920 "None", "Target", "Initiator", "Target/Initiator"
921 };
922 fcparam *fcp = isp->isp_param;
923 int tgt = *((int *) arg);
924 struct lportdb *lp = &fcp->portdb[tgt];
925
926 isp_prt(isp, ISP_LOGINFO, fmt, tgt, lp->loopid, lp->portid,
927 roles[lp->roles & 0x3],
928 (lp->valid)? "Arrived" : "Departed",
929 (u_int32_t) (lp->port_wwn >> 32),
930 (u_int32_t) (lp->port_wwn & 0xffffffffLL),
931 (u_int32_t) (lp->node_wwn >> 32),
932 (u_int32_t) (lp->node_wwn & 0xffffffffLL));
933 break;
934 }
935 case ISPASYNC_CHANGE_NOTIFY:
936 if (arg == ISPASYNC_CHANGE_PDB) {
937 isp_prt(isp, ISP_LOGINFO, "Port Database Changed");
938 } else if (arg == ISPASYNC_CHANGE_SNS) {
939 isp_prt(isp, ISP_LOGINFO,
940 "Name Server Database Changed");
941 }
942
943 /*
944 * We can set blocked here because we know it's now okay
945 * to try and run isp_fc_runstate (in order to build loop
946 * state). But we don't try and freeze the midlayer's queue
947 * if we have no thread that we can wake to later unfreeze
948 * it.
949 */
950 if (isp->isp_osinfo.blocked == 0) {
951 isp->isp_osinfo.blocked = 1;
952 if (isp->isp_osinfo.thread) {
953 scsipi_channel_freeze(&isp->isp_chanA, 1);
954 }
955 }
956 /*
957 * Note that we have work for the thread to do, and
958 * if the thread is here already, wake it up.
959 */
960 isp->isp_osinfo.threadwork++;
961 if (isp->isp_osinfo.thread) {
962 wakeup(&isp->isp_osinfo.thread);
963 } else {
964 isp_prt(isp, ISP_LOGDEBUG1, "no FC thread yet");
965 }
966 break;
967 case ISPASYNC_FABRIC_DEV:
968 {
969 int target, base, lim;
970 fcparam *fcp = isp->isp_param;
971 struct lportdb *lp = NULL;
972 struct lportdb *clp = (struct lportdb *) arg;
973 char *pt;
974
975 switch (clp->port_type) {
976 case 1:
977 pt = " N_Port";
978 break;
979 case 2:
980 pt = " NL_Port";
981 break;
982 case 3:
983 pt = "F/NL_Port";
984 break;
985 case 0x7f:
986 pt = " Nx_Port";
987 break;
988 case 0x81:
989 pt = " F_port";
990 break;
991 case 0x82:
992 pt = " FL_Port";
993 break;
994 case 0x84:
995 pt = " E_port";
996 break;
997 default:
998 pt = " ";
999 break;
1000 }
1001
1002 isp_prt(isp, ISP_LOGINFO,
1003 "%s Fabric Device @ PortID 0x%x", pt, clp->portid);
1004
1005 /*
1006 * If we don't have an initiator role we bail.
1007 *
1008 * We just use ISPASYNC_FABRIC_DEV for announcement purposes.
1009 */
1010
1011 if ((isp->isp_role & ISP_ROLE_INITIATOR) == 0) {
1012 break;
1013 }
1014
1015 /*
1016 * Is this entry for us? If so, we bail.
1017 */
1018
1019 if (fcp->isp_portid == clp->portid) {
1020 break;
1021 }
1022
1023 /*
1024 * Else, the default policy is to find room for it in
1025 * our local port database. Later, when we execute
1026 * the call to isp_pdb_sync either this newly arrived
1027 * or already logged in device will be (re)announced.
1028 */
1029
1030 if (fcp->isp_topo == TOPO_FL_PORT)
1031 base = FC_SNS_ID+1;
1032 else
1033 base = 0;
1034
1035 if (fcp->isp_topo == TOPO_N_PORT)
1036 lim = 1;
1037 else
1038 lim = MAX_FC_TARG;
1039
1040 /*
1041 * Is it already in our list?
1042 */
1043 for (target = base; target < lim; target++) {
1044 if (target >= FL_PORT_ID && target <= FC_SNS_ID) {
1045 continue;
1046 }
1047 lp = &fcp->portdb[target];
1048 if (lp->port_wwn == clp->port_wwn &&
1049 lp->node_wwn == clp->node_wwn) {
1050 lp->fabric_dev = 1;
1051 break;
1052 }
1053 }
1054 if (target < lim) {
1055 break;
1056 }
1057 for (target = base; target < lim; target++) {
1058 if (target >= FL_PORT_ID && target <= FC_SNS_ID) {
1059 continue;
1060 }
1061 lp = &fcp->portdb[target];
1062 if (lp->port_wwn == 0) {
1063 break;
1064 }
1065 }
1066 if (target == lim) {
1067 isp_prt(isp, ISP_LOGWARN,
1068 "out of space for fabric devices");
1069 break;
1070 }
1071 lp->port_type = clp->port_type;
1072 lp->fc4_type = clp->fc4_type;
1073 lp->node_wwn = clp->node_wwn;
1074 lp->port_wwn = clp->port_wwn;
1075 lp->portid = clp->portid;
1076 lp->fabric_dev = 1;
1077 break;
1078 }
1079 case ISPASYNC_FW_CRASH:
1080 {
1081 u_int16_t mbox1, mbox6;
1082 mbox1 = ISP_READ(isp, OUTMAILBOX1);
1083 if (IS_DUALBUS(isp)) {
1084 mbox6 = ISP_READ(isp, OUTMAILBOX6);
1085 } else {
1086 mbox6 = 0;
1087 }
1088 isp_prt(isp, ISP_LOGERR,
1089 "Internal Firmware Error on bus %d @ RISC Address 0x%x",
1090 mbox6, mbox1);
1091 isp_reinit(isp);
1092 break;
1093 }
1094 default:
1095 break;
1096 }
1097 return (0);
1098 }
1099
1100 #include <machine/stdarg.h>
1101 void
1102 isp_prt(struct ispsoftc *isp, int level, const char *fmt, ...)
1103 {
1104 va_list ap;
1105 if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) {
1106 return;
1107 }
1108 printf("%s: ", isp->isp_name);
1109 va_start(ap, fmt);
1110 vprintf(fmt, ap);
1111 va_end(ap);
1112 printf("\n");
1113 }
1114