isp_netbsd.c revision 1.56 1 /* $NetBSD: isp_netbsd.c,v 1.56 2002/08/12 21:33:40 mjacob Exp $ */
2 /*
3 * This driver, which is contained in NetBSD in the files:
4 *
5 * sys/dev/ic/isp.c
6 * sys/dev/ic/isp_inline.h
7 * sys/dev/ic/isp_netbsd.c
8 * sys/dev/ic/isp_netbsd.h
9 * sys/dev/ic/isp_target.c
10 * sys/dev/ic/isp_target.h
11 * sys/dev/ic/isp_tpublic.h
12 * sys/dev/ic/ispmbox.h
13 * sys/dev/ic/ispreg.h
14 * sys/dev/ic/ispvar.h
15 * sys/microcode/isp/asm_sbus.h
16 * sys/microcode/isp/asm_1040.h
17 * sys/microcode/isp/asm_1080.h
18 * sys/microcode/isp/asm_12160.h
19 * sys/microcode/isp/asm_2100.h
20 * sys/microcode/isp/asm_2200.h
21 * sys/pci/isp_pci.c
22 * sys/sbus/isp_sbus.c
23 *
24 * Is being actively maintained by Matthew Jacob (mjacob (at) netbsd.org).
25 * This driver also is shared source with FreeBSD, OpenBSD, Linux, Solaris,
26 * Linux versions. This tends to be an interesting maintenance problem.
27 *
28 * Please coordinate with Matthew Jacob on changes you wish to make here.
29 */
30 /*
31 * Platform (NetBSD) dependent common attachment code for Qlogic adapters.
32 * Matthew Jacob <mjacob (at) nas.nasa.gov>
33 */
34 /*
35 * Copyright (C) 1997, 1998, 1999 National Aeronautics & Space Administration
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. The name of the author may not be used to endorse or promote products
47 * derived from this software without specific prior written permission
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
50 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
51 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
52 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
53 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
54 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
55 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
56 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
57 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
58 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
59 */
60
61 #include <sys/cdefs.h>
62 __KERNEL_RCSID(0, "$NetBSD: isp_netbsd.c,v 1.56 2002/08/12 21:33:40 mjacob Exp $");
63
64 #include <dev/ic/isp_netbsd.h>
65 #include <sys/scsiio.h>
66
67
68 /*
69 * Set a timeout for the watchdogging of a command.
70 *
71 * The dimensional analysis is
72 *
73 * milliseconds * (seconds/millisecond) * (ticks/second) = ticks
74 *
75 * =
76 *
77 * (milliseconds / 1000) * hz = ticks
78 *
79 *
80 * For timeouts less than 1 second, we'll get zero. Because of this, and
81 * because we want to establish *our* timeout to be longer than what the
82 * firmware might do, we just add 3 seconds at the back end.
83 */
84 #define _XT(xs) ((((xs)->timeout/1000) * hz) + (3 * hz))
85
86 static void isp_config_interrupts(struct device *);
87 static void ispminphys_1020(struct buf *);
88 static void ispminphys(struct buf *);
89 static INLINE void ispcmd(struct ispsoftc *, XS_T *);
90 static void isprequest(struct scsipi_channel *, scsipi_adapter_req_t, void *);
91 static int
92 ispioctl(struct scsipi_channel *, u_long, caddr_t, int, struct proc *);
93
94 static void isp_polled_cmd(struct ispsoftc *, XS_T *);
95 static void isp_dog(void *);
96 static void isp_create_fc_worker(void *);
97 static void isp_fc_worker(void *);
98
99 /*
100 * Complete attachment of hardware, include subdevices.
101 */
102 void
103 isp_attach(struct ispsoftc *isp)
104 {
105 isp->isp_state = ISP_RUNSTATE;
106
107 isp->isp_osinfo._adapter.adapt_dev = &isp->isp_osinfo._dev;
108 isp->isp_osinfo._adapter.adapt_nchannels = IS_DUALBUS(isp) ? 2 : 1;
109 isp->isp_osinfo._adapter.adapt_openings = isp->isp_maxcmds;
110 /*
111 * It's not stated whether max_periph is limited by SPI
112 * tag uage, but let's assume that it is.
113 */
114 isp->isp_osinfo._adapter.adapt_max_periph = min(isp->isp_maxcmds, 255);
115 isp->isp_osinfo._adapter.adapt_ioctl = ispioctl;
116 isp->isp_osinfo._adapter.adapt_request = isprequest;
117 if (isp->isp_type <= ISP_HA_SCSI_1020A) {
118 isp->isp_osinfo._adapter.adapt_minphys = ispminphys_1020;
119 } else {
120 isp->isp_osinfo._adapter.adapt_minphys = ispminphys;
121 }
122
123 isp->isp_osinfo._chan.chan_adapter = &isp->isp_osinfo._adapter;
124 isp->isp_osinfo._chan.chan_bustype = &scsi_bustype;
125 isp->isp_osinfo._chan.chan_channel = 0;
126
127 /*
128 * Until the midlayer is fixed to use REPORT LUNS, limit to 8 luns.
129 */
130 isp->isp_osinfo._chan.chan_nluns = min(isp->isp_maxluns, 8);
131
132 if (IS_FC(isp)) {
133 isp->isp_osinfo._chan.chan_ntargets = MAX_FC_TARG;
134 isp->isp_osinfo._chan.chan_id = MAX_FC_TARG;
135 isp->isp_osinfo.threadwork = 1;
136 /*
137 * Note that isp_create_fc_worker won't get called
138 * until much much later (after proc0 is created).
139 */
140 kthread_create(isp_create_fc_worker, isp);
141 #ifdef ISP_FW_CRASH_DUMP
142 if (IS_2200(isp)) {
143 FCPARAM(isp)->isp_dump_data =
144 malloc(QLA2200_RISC_IMAGE_DUMP_SIZE, M_DEVBUF,
145 M_NOWAIT);
146 } else if (IS_23XX(isp)) {
147 FCPARAM(isp)->isp_dump_data =
148 malloc(QLA2300_RISC_IMAGE_DUMP_SIZE, M_DEVBUF,
149 M_NOWAIT);
150 }
151 if (FCPARAM(isp)->isp_dump_data)
152 FCPARAM(isp)->isp_dump_data[0] = 0;
153 #endif
154 } else {
155 int bus = 0;
156 sdparam *sdp = isp->isp_param;
157
158 isp->isp_osinfo._chan.chan_ntargets = MAX_TARGETS;
159 isp->isp_osinfo._chan.chan_id = sdp->isp_initiator_id;
160 isp->isp_osinfo.discovered[0] = 1 << sdp->isp_initiator_id;
161 if (IS_DUALBUS(isp)) {
162 isp->isp_osinfo._chan_b = isp->isp_osinfo._chan;
163 sdp++;
164 isp->isp_osinfo.discovered[1] =
165 1 << sdp->isp_initiator_id;
166 isp->isp_osinfo._chan_b.chan_id = sdp->isp_initiator_id;
167 isp->isp_osinfo._chan_b.chan_channel = 1;
168 }
169 ISP_LOCK(isp);
170 (void) isp_control(isp, ISPCTL_RESET_BUS, &bus);
171 if (IS_DUALBUS(isp)) {
172 bus++;
173 (void) isp_control(isp, ISPCTL_RESET_BUS, &bus);
174 }
175 ISP_UNLOCK(isp);
176 }
177
178
179 /*
180 * Defer enabling mailbox interrupts until later.
181 */
182 config_interrupts((struct device *) isp, isp_config_interrupts);
183
184 /*
185 * And attach children (if any).
186 */
187 config_found((void *)isp, &isp->isp_chanA, scsiprint);
188 if (IS_DUALBUS(isp)) {
189 config_found((void *)isp, &isp->isp_chanB, scsiprint);
190 }
191 }
192
193
194 static void
195 isp_config_interrupts(struct device *self)
196 {
197 struct ispsoftc *isp = (struct ispsoftc *) self;
198
199 /*
200 * After this point, we'll be doing the new configuration
201 * schema which allows interrups, so we can do tsleep/wakeup
202 * for mailbox stuff at that point.
203 */
204 isp->isp_osinfo.no_mbox_ints = 0;
205 }
206
207
208 /*
209 * minphys our xfers
210 */
211
212 static void
213 ispminphys_1020(struct buf *bp)
214 {
215 if (bp->b_bcount >= (1 << 24)) {
216 bp->b_bcount = (1 << 24);
217 }
218 minphys(bp);
219 }
220
221 static void
222 ispminphys(struct buf *bp)
223 {
224 if (bp->b_bcount >= (1 << 30)) {
225 bp->b_bcount = (1 << 30);
226 }
227 minphys(bp);
228 }
229
230 static int
231 ispioctl(struct scsipi_channel *chan, u_long cmd, caddr_t addr, int flag,
232 struct proc *p)
233 {
234 struct ispsoftc *isp = (void *)chan->chan_adapter->adapt_dev;
235 int retval = ENOTTY;
236
237 switch (cmd) {
238 #ifdef ISP_FW_CRASH_DUMP
239 case ISP_GET_FW_CRASH_DUMP:
240 {
241 u_int16_t *ptr = FCPARAM(isp)->isp_dump_data;
242 size_t sz;
243
244 retval = 0;
245 if (IS_2200(isp))
246 sz = QLA2200_RISC_IMAGE_DUMP_SIZE;
247 else
248 sz = QLA2300_RISC_IMAGE_DUMP_SIZE;
249 ISP_LOCK(isp);
250 if (ptr && *ptr) {
251 void *uaddr = *((void **) addr);
252 if (copyout(ptr, uaddr, sz)) {
253 retval = EFAULT;
254 } else {
255 *ptr = 0;
256 }
257 } else {
258 retval = ENXIO;
259 }
260 ISP_UNLOCK(isp);
261 break;
262 }
263
264 case ISP_FORCE_CRASH_DUMP:
265 ISP_LOCK(isp);
266 if (isp->isp_osinfo.blocked == 0) {
267 isp->isp_osinfo.blocked = 1;
268 scsipi_channel_freeze(&isp->isp_chanA, 1);
269 }
270 isp_fw_dump(isp);
271 isp_reinit(isp);
272 ISP_UNLOCK(isp);
273 retval = 0;
274 break;
275 #endif
276 case ISP_SDBLEV:
277 {
278 int olddblev = isp->isp_dblev;
279 isp->isp_dblev = *(int *)addr;
280 *(int *)addr = olddblev;
281 retval = 0;
282 break;
283 }
284 case ISP_RESETHBA:
285 ISP_LOCK(isp);
286 isp_reinit(isp);
287 ISP_UNLOCK(isp);
288 retval = 0;
289 break;
290 case ISP_RESCAN:
291 if (IS_FC(isp)) {
292 ISP_LOCK(isp);
293 if (isp_fc_runstate(isp, 5 * 1000000)) {
294 retval = EIO;
295 } else {
296 retval = 0;
297 }
298 ISP_UNLOCK(isp);
299 }
300 break;
301 case ISP_FC_LIP:
302 if (IS_FC(isp)) {
303 ISP_LOCK(isp);
304 if (isp_control(isp, ISPCTL_SEND_LIP, 0)) {
305 retval = EIO;
306 } else {
307 retval = 0;
308 }
309 ISP_UNLOCK(isp);
310 }
311 break;
312 case ISP_FC_GETDINFO:
313 {
314 struct isp_fc_device *ifc = (struct isp_fc_device *) addr;
315 struct lportdb *lp;
316
317 if (ifc->loopid < 0 || ifc->loopid >= MAX_FC_TARG) {
318 retval = EINVAL;
319 break;
320 }
321 ISP_LOCK(isp);
322 lp = &FCPARAM(isp)->portdb[ifc->loopid];
323 if (lp->valid) {
324 ifc->loopid = lp->loopid;
325 ifc->portid = lp->portid;
326 ifc->node_wwn = lp->node_wwn;
327 ifc->port_wwn = lp->port_wwn;
328 retval = 0;
329 } else {
330 retval = ENODEV;
331 }
332 ISP_UNLOCK(isp);
333 break;
334 }
335 case ISP_GET_STATS:
336 {
337 isp_stats_t *sp = (isp_stats_t *) addr;
338
339 MEMZERO(sp, sizeof (*sp));
340 sp->isp_stat_version = ISP_STATS_VERSION;
341 sp->isp_type = isp->isp_type;
342 sp->isp_revision = isp->isp_revision;
343 ISP_LOCK(isp);
344 sp->isp_stats[ISP_INTCNT] = isp->isp_intcnt;
345 sp->isp_stats[ISP_INTBOGUS] = isp->isp_intbogus;
346 sp->isp_stats[ISP_INTMBOXC] = isp->isp_intmboxc;
347 sp->isp_stats[ISP_INGOASYNC] = isp->isp_intoasync;
348 sp->isp_stats[ISP_RSLTCCMPLT] = isp->isp_rsltccmplt;
349 sp->isp_stats[ISP_FPHCCMCPLT] = isp->isp_fphccmplt;
350 sp->isp_stats[ISP_RSCCHIWAT] = isp->isp_rscchiwater;
351 sp->isp_stats[ISP_FPCCHIWAT] = isp->isp_fpcchiwater;
352 ISP_UNLOCK(isp);
353 retval = 0;
354 break;
355 }
356 case ISP_CLR_STATS:
357 ISP_LOCK(isp);
358 isp->isp_intcnt = 0;
359 isp->isp_intbogus = 0;
360 isp->isp_intmboxc = 0;
361 isp->isp_intoasync = 0;
362 isp->isp_rsltccmplt = 0;
363 isp->isp_fphccmplt = 0;
364 isp->isp_rscchiwater = 0;
365 isp->isp_fpcchiwater = 0;
366 ISP_UNLOCK(isp);
367 retval = 0;
368 break;
369 case ISP_FC_GETHINFO:
370 {
371 struct isp_hba_device *hba = (struct isp_hba_device *) addr;
372 MEMZERO(hba, sizeof (*hba));
373 ISP_LOCK(isp);
374 hba->fc_speed = FCPARAM(isp)->isp_gbspeed;
375 hba->fc_scsi_supported = 1;
376 hba->fc_topology = FCPARAM(isp)->isp_topo + 1;
377 hba->fc_loopid = FCPARAM(isp)->isp_loopid;
378 hba->active_node_wwn = FCPARAM(isp)->isp_nodewwn;
379 hba->active_port_wwn = FCPARAM(isp)->isp_portwwn;
380 ISP_UNLOCK(isp);
381 break;
382 }
383 case SCBUSIORESET:
384 ISP_LOCK(isp);
385 if (isp_control(isp, ISPCTL_RESET_BUS, &chan->chan_channel))
386 retval = EIO;
387 else
388 retval = 0;
389 ISP_UNLOCK(isp);
390 break;
391 default:
392 break;
393 }
394 return (retval);
395 }
396
397 static INLINE void
398 ispcmd(struct ispsoftc *isp, XS_T *xs)
399 {
400 ISP_LOCK(isp);
401 if (isp->isp_state < ISP_RUNSTATE) {
402 DISABLE_INTS(isp);
403 isp_init(isp);
404 if (isp->isp_state != ISP_INITSTATE) {
405 ENABLE_INTS(isp);
406 ISP_UNLOCK(isp);
407 isp_prt(isp, ISP_LOGERR, "isp not at init state");
408 XS_SETERR(xs, HBA_BOTCH);
409 scsipi_done(xs);
410 return;
411 }
412 isp->isp_state = ISP_RUNSTATE;
413 ENABLE_INTS(isp);
414 }
415 /*
416 * Handle the case of a FC card where the FC thread hasn't
417 * fired up yet and we have loop state to clean up. If we
418 * can't clear things up and we've never seen loop up, bounce
419 * the command.
420 */
421 if (IS_FC(isp) && isp->isp_osinfo.threadwork &&
422 isp->isp_osinfo.thread == 0) {
423 volatile u_int8_t ombi = isp->isp_osinfo.no_mbox_ints;
424 int delay_time;
425
426 if (xs->xs_control & XS_CTL_POLL) {
427 isp->isp_osinfo.no_mbox_ints = 1;
428 }
429
430 if (isp->isp_osinfo.loop_checked == 0) {
431 delay_time = 10 * 1000000;
432 isp->isp_osinfo.loop_checked = 1;
433 } else {
434 delay_time = 250000;
435 }
436
437 if (isp_fc_runstate(isp, delay_time) != 0) {
438 if (xs->xs_control & XS_CTL_POLL) {
439 isp->isp_osinfo.no_mbox_ints = ombi;
440 }
441 if (FCPARAM(isp)->loop_seen_once == 0) {
442 XS_SETERR(xs, HBA_SELTIMEOUT);
443 scsipi_done(xs);
444 ISP_UNLOCK(isp);
445 return;
446 }
447 /*
448 * Otherwise, fall thru to be queued up for later.
449 */
450 } else {
451 int wasblocked =
452 (isp->isp_osinfo.blocked || isp->isp_osinfo.paused);
453 isp->isp_osinfo.threadwork = 0;
454 isp->isp_osinfo.blocked =
455 isp->isp_osinfo.paused = 0;
456 if (wasblocked) {
457 scsipi_channel_thaw(&isp->isp_chanA, 1);
458 }
459 }
460 if (xs->xs_control & XS_CTL_POLL) {
461 isp->isp_osinfo.no_mbox_ints = ombi;
462 }
463 }
464
465 if (isp->isp_osinfo.paused) {
466 isp_prt(isp, ISP_LOGWARN, "I/O while paused");
467 xs->error = XS_RESOURCE_SHORTAGE;
468 scsipi_done(xs);
469 ISP_UNLOCK(isp);
470 return;
471 }
472 if (isp->isp_osinfo.blocked) {
473 isp_prt(isp, ISP_LOGWARN, "I/O while blocked");
474 xs->error = XS_REQUEUE;
475 scsipi_done(xs);
476 ISP_UNLOCK(isp);
477 return;
478 }
479
480 if (xs->xs_control & XS_CTL_POLL) {
481 volatile u_int8_t ombi = isp->isp_osinfo.no_mbox_ints;
482 isp->isp_osinfo.no_mbox_ints = 1;
483 isp_polled_cmd(isp, xs);
484 isp->isp_osinfo.no_mbox_ints = ombi;
485 ISP_UNLOCK(isp);
486 return;
487 }
488
489 switch (isp_start(xs)) {
490 case CMD_QUEUED:
491 if (xs->timeout) {
492 callout_reset(&xs->xs_callout, _XT(xs), isp_dog, xs);
493 }
494 break;
495 case CMD_EAGAIN:
496 isp->isp_osinfo.paused = 1;
497 xs->error = XS_RESOURCE_SHORTAGE;
498 scsipi_channel_freeze(&isp->isp_chanA, 1);
499 if (IS_DUALBUS(isp)) {
500 scsipi_channel_freeze(&isp->isp_chanB, 1);
501 }
502 scsipi_done(xs);
503 break;
504 case CMD_RQLATER:
505 /*
506 * We can only get RQLATER from FC devices (1 channel only)
507 *
508 * Also, if we've never seen loop up, bounce the command
509 * (somebody has booted with no FC cable connected)
510 */
511 if (FCPARAM(isp)->loop_seen_once == 0) {
512 XS_SETERR(xs, HBA_SELTIMEOUT);
513 scsipi_done(xs);
514 break;
515 }
516 if (isp->isp_osinfo.blocked == 0) {
517 isp->isp_osinfo.blocked = 1;
518 scsipi_channel_freeze(&isp->isp_chanA, 1);
519 }
520 xs->error = XS_REQUEUE;
521 scsipi_done(xs);
522 break;
523 case CMD_COMPLETE:
524 scsipi_done(xs);
525 break;
526 }
527 ISP_UNLOCK(isp);
528 }
529
530 static void
531 isprequest(struct scsipi_channel *chan, scsipi_adapter_req_t req, void *arg)
532 {
533 struct ispsoftc *isp = (void *)chan->chan_adapter->adapt_dev;
534
535 switch (req) {
536 case ADAPTER_REQ_RUN_XFER:
537 ispcmd(isp, (XS_T *) arg);
538 break;
539
540 case ADAPTER_REQ_GROW_RESOURCES:
541 /* Not supported. */
542 break;
543
544 case ADAPTER_REQ_SET_XFER_MODE:
545 if (IS_SCSI(isp)) {
546 struct scsipi_xfer_mode *xm = arg;
547 int dflags = 0;
548 sdparam *sdp = SDPARAM(isp);
549
550 sdp += chan->chan_channel;
551 if (xm->xm_mode & PERIPH_CAP_TQING)
552 dflags |= DPARM_TQING;
553 if (xm->xm_mode & PERIPH_CAP_WIDE16)
554 dflags |= DPARM_WIDE;
555 if (xm->xm_mode & PERIPH_CAP_SYNC)
556 dflags |= DPARM_SYNC;
557 ISP_LOCK(isp);
558 sdp->isp_devparam[xm->xm_target].goal_flags |= dflags;
559 dflags = sdp->isp_devparam[xm->xm_target].goal_flags;
560 sdp->isp_devparam[xm->xm_target].dev_update = 1;
561 isp->isp_update |= (1 << chan->chan_channel);
562 ISP_UNLOCK(isp);
563 isp_prt(isp, ISP_LOGDEBUG1,
564 "ispioctl: device flags 0x%x for %d.%d.X",
565 dflags, chan->chan_channel, xm->xm_target);
566 break;
567 }
568 default:
569 break;
570 }
571 }
572
573 static void
574 isp_polled_cmd(struct ispsoftc *isp, XS_T *xs)
575 {
576 int result;
577 int infinite = 0, mswait;
578
579 result = isp_start(xs);
580
581 switch (result) {
582 case CMD_QUEUED:
583 break;
584 case CMD_RQLATER:
585 if (XS_NOERR(xs)) {
586 xs->error = XS_REQUEUE;
587 }
588 case CMD_EAGAIN:
589 if (XS_NOERR(xs)) {
590 xs->error = XS_RESOURCE_SHORTAGE;
591 }
592 /* FALLTHROUGH */
593 case CMD_COMPLETE:
594 scsipi_done(xs);
595 return;
596
597 }
598
599 /*
600 * If we can't use interrupts, poll on completion.
601 */
602 if ((mswait = XS_TIME(xs)) == 0)
603 infinite = 1;
604
605 while (mswait || infinite) {
606 u_int16_t isr, sema, mbox;
607 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
608 isp_intr(isp, isr, sema, mbox);
609 if (XS_CMD_DONE_P(xs)) {
610 break;
611 }
612 }
613 USEC_DELAY(1000);
614 mswait -= 1;
615 }
616
617 /*
618 * If no other error occurred but we didn't finish,
619 * something bad happened.
620 */
621 if (XS_CMD_DONE_P(xs) == 0) {
622 if (isp_control(isp, ISPCTL_ABORT_CMD, xs)) {
623 isp_reinit(isp);
624 }
625 if (XS_NOERR(xs)) {
626 isp_prt(isp, ISP_LOGERR, "polled command timed out");
627 XS_SETERR(xs, HBA_BOTCH);
628 }
629 }
630 scsipi_done(xs);
631 }
632
633 void
634 isp_done(XS_T *xs)
635 {
636 XS_CMD_S_DONE(xs);
637 if (XS_CMD_WDOG_P(xs) == 0) {
638 struct ispsoftc *isp = XS_ISP(xs);
639 callout_stop(&xs->xs_callout);
640 if (XS_CMD_GRACE_P(xs)) {
641 isp_prt(isp, ISP_LOGDEBUG1,
642 "finished command on borrowed time");
643 }
644 XS_CMD_S_CLEAR(xs);
645 /*
646 * Fixup- if we get a QFULL, we need
647 * to set XS_BUSY as the error.
648 */
649 if (xs->status == SCSI_QUEUE_FULL) {
650 xs->error = XS_BUSY;
651 }
652 if (isp->isp_osinfo.paused) {
653 isp->isp_osinfo.paused = 0;
654 scsipi_channel_timed_thaw(&isp->isp_chanA);
655 if (IS_DUALBUS(isp)) {
656 scsipi_channel_timed_thaw(&isp->isp_chanB);
657 }
658 }
659 if (xs->error == XS_DRIVER_STUFFUP) {
660 isp_prt(isp, ISP_LOGERR, "BOTCHED cmd for %d.%d.%d cmd 0x%x datalen %ld",
661 XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs), XS_CDBP(xs)[0], (long) XS_XFRLEN(xs));
662 }
663 scsipi_done(xs);
664 }
665 }
666
667 static void
668 isp_dog(void *arg)
669 {
670 XS_T *xs = arg;
671 struct ispsoftc *isp = XS_ISP(xs);
672 u_int16_t handle;
673
674 ISP_ILOCK(isp);
675 /*
676 * We've decided this command is dead. Make sure we're not trying
677 * to kill a command that's already dead by getting it's handle and
678 * and seeing whether it's still alive.
679 */
680 handle = isp_find_handle(isp, xs);
681 if (handle) {
682 u_int16_t isr, mbox, sema;
683
684 if (XS_CMD_DONE_P(xs)) {
685 isp_prt(isp, ISP_LOGDEBUG1,
686 "watchdog found done cmd (handle 0x%x)", handle);
687 ISP_IUNLOCK(isp);
688 return;
689 }
690
691 if (XS_CMD_WDOG_P(xs)) {
692 isp_prt(isp, ISP_LOGDEBUG1,
693 "recursive watchdog (handle 0x%x)", handle);
694 ISP_IUNLOCK(isp);
695 return;
696 }
697
698 XS_CMD_S_WDOG(xs);
699
700 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
701 isp_intr(isp, isr, sema, mbox);
702
703 }
704 if (XS_CMD_DONE_P(xs)) {
705 isp_prt(isp, ISP_LOGDEBUG1,
706 "watchdog cleanup for handle 0x%x", handle);
707 XS_CMD_C_WDOG(xs);
708 isp_done(xs);
709 } else if (XS_CMD_GRACE_P(xs)) {
710 isp_prt(isp, ISP_LOGDEBUG1,
711 "watchdog timeout for handle 0x%x", handle);
712 /*
713 * Make sure the command is *really* dead before we
714 * release the handle (and DMA resources) for reuse.
715 */
716 (void) isp_control(isp, ISPCTL_ABORT_CMD, arg);
717
718 /*
719 * After this point, the comamnd is really dead.
720 */
721 if (XS_XFRLEN(xs)) {
722 ISP_DMAFREE(isp, xs, handle);
723 }
724 isp_destroy_handle(isp, handle);
725 XS_SETERR(xs, XS_TIMEOUT);
726 XS_CMD_S_CLEAR(xs);
727 isp_done(xs);
728 } else {
729 u_int16_t nxti, optr;
730 ispreq_t local, *mp = &local, *qe;
731 isp_prt(isp, ISP_LOGDEBUG2,
732 "possible command timeout on handle %x", handle);
733 XS_CMD_C_WDOG(xs);
734 callout_reset(&xs->xs_callout, hz, isp_dog, xs);
735 if (isp_getrqentry(isp, &nxti, &optr, (void **) &qe)) {
736 ISP_UNLOCK(isp);
737 return;
738 }
739 XS_CMD_S_GRACE(xs);
740 MEMZERO((void *) mp, sizeof (*mp));
741 mp->req_header.rqs_entry_count = 1;
742 mp->req_header.rqs_entry_type = RQSTYPE_MARKER;
743 mp->req_modifier = SYNC_ALL;
744 mp->req_target = XS_CHANNEL(xs) << 7;
745 isp_put_request(isp, mp, qe);
746 ISP_ADD_REQUEST(isp, nxti);
747 }
748 } else {
749 isp_prt(isp, ISP_LOGDEBUG0, "watchdog with no command");
750 }
751 ISP_IUNLOCK(isp);
752 }
753
754 /*
755 * Fibre Channel state cleanup thread
756 */
757 static void
758 isp_create_fc_worker(void *arg)
759 {
760 struct ispsoftc *isp = arg;
761
762 if (kthread_create1(isp_fc_worker, isp, &isp->isp_osinfo.thread,
763 "%s:fc_thrd", isp->isp_name)) {
764 isp_prt(isp, ISP_LOGERR, "unable to create FC worker thread");
765 panic("isp_create_fc_worker");
766 }
767
768 }
769
770 static void
771 isp_fc_worker(void *arg)
772 {
773 void scsipi_run_queue(struct scsipi_channel *);
774 struct ispsoftc *isp = arg;
775
776 for (;;) {
777 int s;
778
779 /*
780 * Note we do *not* use the ISP_LOCK/ISP_UNLOCK macros here.
781 */
782 s = splbio();
783 while (isp->isp_osinfo.threadwork) {
784 isp->isp_osinfo.threadwork = 0;
785 if (isp_fc_runstate(isp, 10 * 1000000) == 0) {
786 break;
787 }
788 if (isp->isp_osinfo.loop_checked &&
789 FCPARAM(isp)->loop_seen_once == 0) {
790 splx(s);
791 goto skip;
792 }
793 isp->isp_osinfo.threadwork = 1;
794 splx(s);
795 delay(500 * 1000);
796 s = splbio();
797 }
798 if (FCPARAM(isp)->isp_fwstate != FW_READY ||
799 FCPARAM(isp)->isp_loopstate != LOOP_READY) {
800 isp_prt(isp, ISP_LOGINFO, "isp_fc_runstate in vain");
801 isp->isp_osinfo.threadwork = 1;
802 splx(s);
803 continue;
804 }
805
806 if (isp->isp_osinfo.blocked) {
807 isp->isp_osinfo.blocked = 0;
808 isp_prt(isp, ISP_LOGDEBUG0,
809 "restarting queues (freeze count %d)",
810 isp->isp_chanA.chan_qfreeze);
811 scsipi_channel_thaw(&isp->isp_chanA, 1);
812 }
813
814 if (isp->isp_osinfo.thread == NULL)
815 break;
816
817 skip:
818 (void) tsleep(&isp->isp_osinfo.thread, PRIBIO, "fcclnup", 0);
819
820 splx(s);
821 }
822
823 /* In case parent is waiting for us to exit. */
824 wakeup(&isp->isp_osinfo.thread);
825
826 kthread_exit(0);
827 }
828
829 /*
830 * Free any associated resources prior to decommissioning and
831 * set the card to a known state (so it doesn't wake up and kick
832 * us when we aren't expecting it to).
833 *
834 * Locks are held before coming here.
835 */
836 void
837 isp_uninit(struct ispsoftc *isp)
838 {
839 isp_lock(isp);
840 /*
841 * Leave with interrupts disabled.
842 */
843 DISABLE_INTS(isp);
844 isp_unlock(isp);
845 }
846
847 int
848 isp_async(struct ispsoftc *isp, ispasync_t cmd, void *arg)
849 {
850 int bus, tgt;
851
852 switch (cmd) {
853 case ISPASYNC_NEW_TGT_PARAMS:
854 if (IS_SCSI(isp) && isp->isp_dblev) {
855 sdparam *sdp = isp->isp_param;
856 int flags;
857 struct scsipi_xfer_mode xm;
858
859 tgt = *((int *) arg);
860 bus = (tgt >> 16) & 0xffff;
861 tgt &= 0xffff;
862 sdp += bus;
863 flags = sdp->isp_devparam[tgt].actv_flags;
864
865 xm.xm_mode = 0;
866 xm.xm_period = sdp->isp_devparam[tgt].actv_period;
867 xm.xm_offset = sdp->isp_devparam[tgt].actv_offset;
868 xm.xm_target = tgt;
869
870 if ((flags & DPARM_SYNC) && xm.xm_period && xm.xm_offset)
871 xm.xm_mode |= PERIPH_CAP_SYNC;
872 if (flags & DPARM_WIDE)
873 xm.xm_mode |= PERIPH_CAP_WIDE16;
874 if (flags & DPARM_TQING)
875 xm.xm_mode |= PERIPH_CAP_TQING;
876 scsipi_async_event(bus? &isp->isp_chanB : &isp->isp_chanA,
877 ASYNC_EVENT_XFER_MODE, &xm);
878 break;
879 }
880 case ISPASYNC_BUS_RESET:
881 bus = *((int *) arg);
882 scsipi_async_event(bus? &isp->isp_chanB : &isp->isp_chanA,
883 ASYNC_EVENT_RESET, NULL);
884 isp_prt(isp, ISP_LOGINFO, "SCSI bus %d reset detected", bus);
885 break;
886 case ISPASYNC_LIP:
887 /*
888 * Don't do queue freezes or blockage until we have the
889 * thread running that can unfreeze/unblock us.
890 */
891 if (isp->isp_osinfo.blocked == 0) {
892 if (isp->isp_osinfo.thread) {
893 isp->isp_osinfo.blocked = 1;
894 scsipi_channel_freeze(&isp->isp_chanA, 1);
895 }
896 }
897 isp_prt(isp, ISP_LOGINFO, "LIP Received");
898 break;
899 case ISPASYNC_LOOP_RESET:
900 /*
901 * Don't do queue freezes or blockage until we have the
902 * thread running that can unfreeze/unblock us.
903 */
904 if (isp->isp_osinfo.blocked == 0) {
905 if (isp->isp_osinfo.thread) {
906 isp->isp_osinfo.blocked = 1;
907 scsipi_channel_freeze(&isp->isp_chanA, 1);
908 }
909 }
910 isp_prt(isp, ISP_LOGINFO, "Loop Reset Received");
911 break;
912 case ISPASYNC_LOOP_DOWN:
913 /*
914 * Don't do queue freezes or blockage until we have the
915 * thread running that can unfreeze/unblock us.
916 */
917 if (isp->isp_osinfo.blocked == 0) {
918 if (isp->isp_osinfo.thread) {
919 isp->isp_osinfo.blocked = 1;
920 scsipi_channel_freeze(&isp->isp_chanA, 1);
921 }
922 }
923 isp_prt(isp, ISP_LOGINFO, "Loop DOWN");
924 break;
925 case ISPASYNC_LOOP_UP:
926 /*
927 * Let the subsequent ISPASYNC_CHANGE_NOTIFY invoke
928 * the FC worker thread. When the FC worker thread
929 * is done, let *it* call scsipi_channel_thaw...
930 */
931 isp_prt(isp, ISP_LOGINFO, "Loop UP");
932 break;
933 case ISPASYNC_PROMENADE:
934 if (IS_FC(isp) && isp->isp_dblev) {
935 static const char fmt[] = "Target %d (Loop 0x%x) Port ID 0x%x "
936 "(role %s) %s\n Port WWN 0x%08x%08x\n Node WWN 0x%08x%08x";
937 const static char *const roles[4] = {
938 "None", "Target", "Initiator", "Target/Initiator"
939 };
940 fcparam *fcp = isp->isp_param;
941 int tgt = *((int *) arg);
942 struct lportdb *lp = &fcp->portdb[tgt];
943
944 isp_prt(isp, ISP_LOGINFO, fmt, tgt, lp->loopid, lp->portid,
945 roles[lp->roles & 0x3],
946 (lp->valid)? "Arrived" : "Departed",
947 (u_int32_t) (lp->port_wwn >> 32),
948 (u_int32_t) (lp->port_wwn & 0xffffffffLL),
949 (u_int32_t) (lp->node_wwn >> 32),
950 (u_int32_t) (lp->node_wwn & 0xffffffffLL));
951 break;
952 }
953 case ISPASYNC_CHANGE_NOTIFY:
954 if (arg == ISPASYNC_CHANGE_PDB) {
955 isp_prt(isp, ISP_LOGINFO, "Port Database Changed");
956 } else if (arg == ISPASYNC_CHANGE_SNS) {
957 isp_prt(isp, ISP_LOGINFO,
958 "Name Server Database Changed");
959 }
960
961 /*
962 * We can set blocked here because we know it's now okay
963 * to try and run isp_fc_runstate (in order to build loop
964 * state). But we don't try and freeze the midlayer's queue
965 * if we have no thread that we can wake to later unfreeze
966 * it.
967 */
968 if (isp->isp_osinfo.blocked == 0) {
969 isp->isp_osinfo.blocked = 1;
970 if (isp->isp_osinfo.thread) {
971 scsipi_channel_freeze(&isp->isp_chanA, 1);
972 }
973 }
974 /*
975 * Note that we have work for the thread to do, and
976 * if the thread is here already, wake it up.
977 */
978 isp->isp_osinfo.threadwork++;
979 if (isp->isp_osinfo.thread) {
980 wakeup(&isp->isp_osinfo.thread);
981 } else {
982 isp_prt(isp, ISP_LOGDEBUG1, "no FC thread yet");
983 }
984 break;
985 case ISPASYNC_FABRIC_DEV:
986 {
987 int target, base, lim;
988 fcparam *fcp = isp->isp_param;
989 struct lportdb *lp = NULL;
990 struct lportdb *clp = (struct lportdb *) arg;
991 char *pt;
992
993 switch (clp->port_type) {
994 case 1:
995 pt = " N_Port";
996 break;
997 case 2:
998 pt = " NL_Port";
999 break;
1000 case 3:
1001 pt = "F/NL_Port";
1002 break;
1003 case 0x7f:
1004 pt = " Nx_Port";
1005 break;
1006 case 0x81:
1007 pt = " F_port";
1008 break;
1009 case 0x82:
1010 pt = " FL_Port";
1011 break;
1012 case 0x84:
1013 pt = " E_port";
1014 break;
1015 default:
1016 pt = " ";
1017 break;
1018 }
1019
1020 isp_prt(isp, ISP_LOGINFO,
1021 "%s Fabric Device @ PortID 0x%x", pt, clp->portid);
1022
1023 /*
1024 * If we don't have an initiator role we bail.
1025 *
1026 * We just use ISPASYNC_FABRIC_DEV for announcement purposes.
1027 */
1028
1029 if ((isp->isp_role & ISP_ROLE_INITIATOR) == 0) {
1030 break;
1031 }
1032
1033 /*
1034 * Is this entry for us? If so, we bail.
1035 */
1036
1037 if (fcp->isp_portid == clp->portid) {
1038 break;
1039 }
1040
1041 /*
1042 * Else, the default policy is to find room for it in
1043 * our local port database. Later, when we execute
1044 * the call to isp_pdb_sync either this newly arrived
1045 * or already logged in device will be (re)announced.
1046 */
1047
1048 if (fcp->isp_topo == TOPO_FL_PORT)
1049 base = FC_SNS_ID+1;
1050 else
1051 base = 0;
1052
1053 if (fcp->isp_topo == TOPO_N_PORT)
1054 lim = 1;
1055 else
1056 lim = MAX_FC_TARG;
1057
1058 /*
1059 * Is it already in our list?
1060 */
1061 for (target = base; target < lim; target++) {
1062 if (target >= FL_PORT_ID && target <= FC_SNS_ID) {
1063 continue;
1064 }
1065 lp = &fcp->portdb[target];
1066 if (lp->port_wwn == clp->port_wwn &&
1067 lp->node_wwn == clp->node_wwn) {
1068 lp->fabric_dev = 1;
1069 break;
1070 }
1071 }
1072 if (target < lim) {
1073 break;
1074 }
1075 for (target = base; target < lim; target++) {
1076 if (target >= FL_PORT_ID && target <= FC_SNS_ID) {
1077 continue;
1078 }
1079 lp = &fcp->portdb[target];
1080 if (lp->port_wwn == 0) {
1081 break;
1082 }
1083 }
1084 if (target == lim) {
1085 isp_prt(isp, ISP_LOGWARN,
1086 "out of space for fabric devices");
1087 break;
1088 }
1089 lp->port_type = clp->port_type;
1090 lp->fc4_type = clp->fc4_type;
1091 lp->node_wwn = clp->node_wwn;
1092 lp->port_wwn = clp->port_wwn;
1093 lp->portid = clp->portid;
1094 lp->fabric_dev = 1;
1095 break;
1096 }
1097 case ISPASYNC_FW_CRASH:
1098 {
1099 u_int16_t mbox1, mbox6;
1100 mbox1 = ISP_READ(isp, OUTMAILBOX1);
1101 if (IS_DUALBUS(isp)) {
1102 mbox6 = ISP_READ(isp, OUTMAILBOX6);
1103 } else {
1104 mbox6 = 0;
1105 }
1106 isp_prt(isp, ISP_LOGERR,
1107 "Internal Firmware Error on bus %d @ RISC Address 0x%x",
1108 mbox6, mbox1);
1109 #ifdef ISP_FW_CRASH_DUMP
1110 if (IS_FC(isp)) {
1111 if (isp->isp_osinfo.blocked == 0) {
1112 isp->isp_osinfo.blocked = 1;
1113 scsipi_channel_freeze(&isp->isp_chanA, 1);
1114 }
1115 isp_fw_dump(isp);
1116 }
1117 isp_reinit(isp);
1118 isp_async(isp, ISPASYNC_FW_RESTART, NULL);
1119 #endif
1120 break;
1121 }
1122 default:
1123 break;
1124 }
1125 return (0);
1126 }
1127
1128 #include <machine/stdarg.h>
1129 void
1130 isp_prt(struct ispsoftc *isp, int level, const char *fmt, ...)
1131 {
1132 va_list ap;
1133 if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) {
1134 return;
1135 }
1136 printf("%s: ", isp->isp_name);
1137 va_start(ap, fmt);
1138 vprintf(fmt, ap);
1139 va_end(ap);
1140 printf("\n");
1141 }
1142