isp_netbsd.c revision 1.28 1 /* $NetBSD: isp_netbsd.c,v 1.28 2000/08/01 23:55:10 mjacob Exp $ */
2 /*
3 * Platform (NetBSD) dependent common attachment code for Qlogic adapters.
4 * Matthew Jacob <mjacob (at) nas.nasa.gov>
5 */
6 /*
7 * Copyright (C) 1997, 1998, 1999 National Aeronautics & Space Administration
8 * All rights reserved.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. The name of the author may not be used to endorse or promote products
19 * derived from this software without specific prior written permission
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 #include <dev/ic/isp_netbsd.h>
34 #include <sys/scsiio.h>
35
36
37 /*
38 * Set a timeout for the watchdogging of a command.
39 *
40 * The dimensional analysis is
41 *
42 * milliseconds * (seconds/millisecond) * (ticks/second) = ticks
43 *
44 * =
45 *
46 * (milliseconds / 1000) * hz = ticks
47 *
48 *
49 * For timeouts less than 1 second, we'll get zero. Because of this, and
50 * because we want to establish *our* timeout to be longer than what the
51 * firmware might do, we just add 3 seconds at the back end.
52 */
53 #define _XT(xs) ((((xs)->timeout/1000) * hz) + (3 * hz))
54
55 static void ispminphys __P((struct buf *));
56 static int32_t ispcmd_slow __P((XS_T *));
57 static int32_t ispcmd __P((XS_T *));
58 static int
59 ispioctl __P((struct scsipi_link *, u_long, caddr_t, int, struct proc *));
60
61 static struct scsipi_device isp_dev = { NULL, NULL, NULL, NULL };
62 static int isp_polled_cmd __P((struct ispsoftc *, XS_T *));
63 static void isp_dog __P((void *));
64 static void isp_command_requeue __P((void *));
65 static void isp_internal_restart __P((void *));
66
67 /*
68 * Complete attachment of hardware, include subdevices.
69 */
70 void
71 isp_attach(isp)
72 struct ispsoftc *isp;
73 {
74 int maxluns;
75 isp->isp_osinfo._adapter.scsipi_minphys = ispminphys;
76 isp->isp_osinfo._adapter.scsipi_ioctl = ispioctl;
77
78 isp->isp_state = ISP_RUNSTATE;
79 isp->isp_osinfo._link.scsipi_scsi.channel =
80 (IS_DUALBUS(isp))? 0 : SCSI_CHANNEL_ONLY_ONE;
81 isp->isp_osinfo._link.adapter_softc = isp;
82 isp->isp_osinfo._link.device = &isp_dev;
83 isp->isp_osinfo._link.adapter = &isp->isp_osinfo._adapter;
84 isp->isp_osinfo._link.openings = isp->isp_maxcmds;
85 isp->isp_osinfo._link.scsipi_scsi.max_lun = maxluns;
86 /*
87 * Until the midlayer is fixed to use REPORT LUNS, limit to 8 luns.
88 */
89 isp->isp_osinfo._link.scsipi_scsi.max_lun =
90 (isp->isp_maxluns < 7)? isp->isp_maxluns - 1 : 7;
91 TAILQ_INIT(&isp->isp_osinfo.waitq); /* The 2nd bus will share.. */
92
93 if (IS_FC(isp)) {
94 /*
95 * Give it another chance here to come alive...
96 */
97 isp->isp_osinfo._adapter.scsipi_cmd = ispcmd;
98 isp->isp_osinfo._link.scsipi_scsi.max_target = MAX_FC_TARG-1;
99 } else {
100 sdparam *sdp = isp->isp_param;
101 isp->isp_osinfo._adapter.scsipi_cmd = ispcmd_slow;
102 isp->isp_osinfo._link.scsipi_scsi.max_target = MAX_TARGETS-1;
103 isp->isp_osinfo._link.scsipi_scsi.adapter_target =
104 sdp->isp_initiator_id;
105 isp->isp_osinfo.discovered[0] = 1 << sdp->isp_initiator_id;
106 if (IS_DUALBUS(isp)) {
107 isp->isp_osinfo._link_b = isp->isp_osinfo._link;
108 sdp++;
109 isp->isp_osinfo.discovered[1] =
110 1 << sdp->isp_initiator_id;
111 isp->isp_osinfo._link_b.scsipi_scsi.adapter_target =
112 sdp->isp_initiator_id;
113 isp->isp_osinfo._link_b.scsipi_scsi.channel = 1;
114 isp->isp_osinfo._link_b.scsipi_scsi.max_lun =
115 isp->isp_osinfo._link.scsipi_scsi.max_lun;
116 }
117 }
118 isp->isp_osinfo._link.type = BUS_SCSI;
119
120 /*
121 * Send a SCSI Bus Reset.
122 */
123 if (IS_SCSI(isp)) {
124 int bus = 0;
125 ISP_LOCK(isp);
126 (void) isp_control(isp, ISPCTL_RESET_BUS, &bus);
127 ISP_UNLOCK(isp);
128 if (IS_DUALBUS(isp)) {
129 bus++;
130 ISP_LOCK(isp);
131 (void) isp_control(isp, ISPCTL_RESET_BUS, &bus);
132 ISP_UNLOCK(isp);
133 }
134 } else {
135 int defid;
136 fcparam *fcp = isp->isp_param;
137 delay(2 * 1000000);
138 defid = MAX_FC_TARG;
139 ISP_LOCK(isp);
140 /*
141 * We probably won't have clock interrupts running,
142 * so we'll be really short (smoke test, really)
143 * at this time.
144 */
145 if (isp_control(isp, ISPCTL_FCLINK_TEST, NULL)) {
146 (void) isp_control(isp, ISPCTL_PDB_SYNC, NULL);
147 if (fcp->isp_fwstate == FW_READY &&
148 fcp->isp_loopstate >= LOOP_PDB_RCVD) {
149 defid = fcp->isp_loopid;
150 }
151 }
152 ISP_UNLOCK(isp);
153 isp->isp_osinfo._link.scsipi_scsi.adapter_target = defid;
154 }
155
156 /*
157 * After this point, we'll be doing the new configuration
158 * schema which allows interrups, so we can do tsleep/wakeup
159 * for mailbox stuff at that point.
160 */
161 isp->isp_osinfo.no_mbox_ints = 0;
162
163 /*
164 * And attach children (if any).
165 */
166 config_found((void *)isp, &isp->isp_osinfo._link, scsiprint);
167 if (IS_DUALBUS(isp)) {
168 config_found((void *)isp, &isp->isp_osinfo._link_b, scsiprint);
169 }
170 }
171
172 /*
173 * minphys our xfers
174 *
175 * Unfortunately, the buffer pointer describes the target device- not the
176 * adapter device, so we can't use the pointer to find out what kind of
177 * adapter we are and adjust accordingly.
178 */
179
180 static void
181 ispminphys(bp)
182 struct buf *bp;
183 {
184 /*
185 * XX: Only the 1020 has a 24 bit limit.
186 */
187 if (bp->b_bcount >= (1 << 24)) {
188 bp->b_bcount = (1 << 24);
189 }
190 minphys(bp);
191 }
192
193 static int32_t
194 ispcmd_slow(xs)
195 XS_T *xs;
196 {
197 sdparam *sdp;
198 int tgt, chan, s;
199 u_int16_t flags;
200 struct ispsoftc *isp = XS_ISP(xs);
201
202 /*
203 * Have we completed discovery for this target on this adapter?
204 */
205 tgt = XS_TGT(xs);
206 chan = XS_CHANNEL(xs);
207 if ((xs->xs_control & XS_CTL_DISCOVERY) != 0 ||
208 (isp->isp_osinfo.discovered[chan] & (1 << tgt)) != 0) {
209 return (ispcmd(xs));
210 }
211
212 flags = DPARM_DEFAULT;
213 if (xs->sc_link->quirks & SDEV_NOSYNC) {
214 flags ^= DPARM_SYNC;
215 } else {
216 isp_prt(isp, ISP_LOGDEBUG0,
217 "channel %d target %d SYNC enabled", chan, tgt);
218 }
219 if (xs->sc_link->quirks & SDEV_NOWIDE) {
220 flags ^= DPARM_WIDE;
221 } else {
222 isp_prt(isp, ISP_LOGDEBUG0,
223 "channel %d target %d WIDE enabled", chan, tgt);
224 }
225 if (xs->sc_link->quirks & SDEV_NOTAG) {
226 flags ^= DPARM_TQING;
227 } else {
228 isp_prt(isp, ISP_LOGDEBUG0,
229 "channel %d target %d TAG enabled", chan, tgt);
230 }
231 /*
232 * Okay, we know about this device now,
233 * so mark parameters to be updated for it.
234 */
235 s = splbio();
236 isp->isp_osinfo.discovered[chan] |= (1 << tgt);
237 sdp = isp->isp_param;
238 sdp += chan;
239 sdp->isp_devparam[tgt].dev_flags = flags;
240 sdp->isp_devparam[tgt].dev_update = 1;
241 isp->isp_update |= (1 << chan);
242 splx(s);
243 return (ispcmd(xs));
244 }
245
246 static int
247 ispioctl(sc_link, cmd, addr, flag, p)
248 struct scsipi_link *sc_link;
249 u_long cmd;
250 caddr_t addr;
251 int flag;
252 struct proc *p;
253 {
254 struct ispsoftc *isp = sc_link->adapter_softc;
255 int s, chan, retval = ENOTTY;
256
257 switch (cmd) {
258 case SCBUSIORESET:
259 chan = sc_link->scsipi_scsi.channel;
260 s = splbio();
261 if (isp_control(isp, ISPCTL_RESET_BUS, &chan))
262 retval = EIO;
263 else
264 retval = 0;
265 (void) splx(s);
266 break;
267 default:
268 break;
269 }
270 return (retval);
271 }
272
273
274 static int32_t
275 ispcmd(xs)
276 XS_T *xs;
277 {
278 struct ispsoftc *isp;
279 int result, s;
280
281 isp = XS_ISP(xs);
282 s = splbio();
283 if (isp->isp_state < ISP_RUNSTATE) {
284 DISABLE_INTS(isp);
285 isp_init(isp);
286 if (isp->isp_state != ISP_INITSTATE) {
287 ENABLE_INTS(isp);
288 (void) splx(s);
289 XS_SETERR(xs, HBA_BOTCH);
290 return (COMPLETE);
291 }
292 isp->isp_state = ISP_RUNSTATE;
293 ENABLE_INTS(isp);
294 }
295
296 /*
297 * Check for queue blockage...
298 */
299 if (isp->isp_osinfo.blocked) {
300 if (xs->xs_control & XS_CTL_POLL) {
301 xs->error = XS_DRIVER_STUFFUP;
302 splx(s);
303 return (TRY_AGAIN_LATER);
304 }
305 TAILQ_INSERT_TAIL(&isp->isp_osinfo.waitq, xs, adapter_q);
306 splx(s);
307 return (SUCCESSFULLY_QUEUED);
308 }
309
310 if (xs->xs_control & XS_CTL_POLL) {
311 volatile u_int8_t ombi = isp->isp_osinfo.no_mbox_ints;
312 isp->isp_osinfo.no_mbox_ints = 1;
313 result = isp_polled_cmd(isp, xs);
314 isp->isp_osinfo.no_mbox_ints = ombi;
315 (void) splx(s);
316 return (result);
317 }
318
319 result = isp_start(xs);
320 switch (result) {
321 case CMD_QUEUED:
322 result = SUCCESSFULLY_QUEUED;
323 if (xs->timeout) {
324 callout_reset(&xs->xs_callout, _XT(xs), isp_dog, xs);
325 }
326 break;
327 case CMD_EAGAIN:
328 result = TRY_AGAIN_LATER;
329 break;
330 case CMD_RQLATER:
331 result = SUCCESSFULLY_QUEUED;
332 callout_reset(&xs->xs_callout, hz, isp_command_requeue, xs);
333 break;
334 case CMD_COMPLETE:
335 result = COMPLETE;
336 break;
337 }
338 (void) splx(s);
339 return (result);
340 }
341
342 static int
343 isp_polled_cmd(isp, xs)
344 struct ispsoftc *isp;
345 XS_T *xs;
346 {
347 int result;
348 int infinite = 0, mswait;
349
350 result = isp_start(xs);
351
352 switch (result) {
353 case CMD_QUEUED:
354 result = SUCCESSFULLY_QUEUED;
355 break;
356 case CMD_RQLATER:
357 case CMD_EAGAIN:
358 if (XS_NOERR(xs)) {
359 xs->error = XS_DRIVER_STUFFUP;
360 }
361 result = TRY_AGAIN_LATER;
362 break;
363 case CMD_COMPLETE:
364 result = COMPLETE;
365 break;
366
367 }
368
369 if (result != SUCCESSFULLY_QUEUED) {
370 return (result);
371 }
372
373 /*
374 * If we can't use interrupts, poll on completion.
375 */
376 if ((mswait = XS_TIME(xs)) == 0)
377 infinite = 1;
378
379 while (mswait || infinite) {
380 if (isp_intr((void *)isp)) {
381 if (XS_CMD_DONE_P(xs)) {
382 break;
383 }
384 }
385 USEC_DELAY(1000);
386 mswait -= 1;
387 }
388
389 /*
390 * If no other error occurred but we didn't finish,
391 * something bad happened.
392 */
393 if (XS_CMD_DONE_P(xs) == 0) {
394 if (isp_control(isp, ISPCTL_ABORT_CMD, xs)) {
395 isp_reinit(isp);
396 }
397 if (XS_NOERR(xs)) {
398 XS_SETERR(xs, HBA_BOTCH);
399 }
400 }
401 result = COMPLETE;
402 return (result);
403 }
404
405 void
406 isp_done(xs)
407 XS_T *xs;
408 {
409 XS_CMD_S_DONE(xs);
410 if (XS_CMD_WDOG_P(xs) == 0) {
411 struct ispsoftc *isp = XS_ISP(xs);
412 callout_stop(&xs->xs_callout);
413 if (XS_CMD_GRACE_P(xs)) {
414 isp_prt(isp, ISP_LOGDEBUG1,
415 "finished command on borrowed time");
416 }
417 XS_CMD_S_CLEAR(xs);
418 scsipi_done(xs);
419 }
420 }
421
422 static void
423 isp_dog(arg)
424 void *arg;
425 {
426 XS_T *xs = arg;
427 struct ispsoftc *isp = XS_ISP(xs);
428 u_int32_t handle;
429 int s = splbio();
430
431 /*
432 * We've decided this command is dead. Make sure we're not trying
433 * to kill a command that's already dead by getting it's handle and
434 * and seeing whether it's still alive.
435 */
436 handle = isp_find_handle(isp, xs);
437 if (handle) {
438 u_int16_t r, r1, i;
439
440 if (XS_CMD_DONE_P(xs)) {
441 isp_prt(isp, ISP_LOGDEBUG1,
442 "watchdog found done cmd (handle 0x%x)", handle);
443 (void) splx(s);
444 return;
445 }
446
447 if (XS_CMD_WDOG_P(xs)) {
448 isp_prt(isp, ISP_LOGDEBUG1,
449 "recursive watchdog (handle 0x%x)", handle);
450 (void) splx(s);
451 return;
452 }
453
454 XS_CMD_S_WDOG(xs);
455
456 i = 0;
457 do {
458 r = ISP_READ(isp, BIU_ISR);
459 USEC_DELAY(1);
460 r1 = ISP_READ(isp, BIU_ISR);
461 } while (r != r1 && ++i < 1000);
462
463 if (INT_PENDING(isp, r) && isp_intr(isp) && XS_CMD_DONE_P(xs)) {
464 isp_prt(isp, ISP_LOGDEBUG1, "watchdog cleanup (%x, %x)",
465 handle, r);
466 XS_CMD_C_WDOG(xs);
467 isp_done(xs);
468 } else if (XS_CMD_GRACE_P(xs)) {
469 isp_prt(isp, ISP_LOGDEBUG1, "watchdog timeout (%x, %x)",
470 handle, r);
471 /*
472 * Make sure the command is *really* dead before we
473 * release the handle (and DMA resources) for reuse.
474 */
475 (void) isp_control(isp, ISPCTL_ABORT_CMD, arg);
476
477 /*
478 * After this point, the comamnd is really dead.
479 */
480 if (XS_XFRLEN(xs)) {
481 ISP_DMAFREE(isp, xs, handle);
482 }
483 isp_destroy_handle(isp, handle);
484 XS_SETERR(xs, XS_TIMEOUT);
485 XS_CMD_S_CLEAR(xs);
486 isp_done(xs);
487 } else {
488 u_int16_t iptr, optr;
489 ispreq_t *mp;
490 isp_prt(isp, ISP_LOGDEBUG2,
491 "possible command timeout (%x, %x)", handle, r);
492 XS_CMD_C_WDOG(xs);
493 callout_reset(&xs->xs_callout, hz, isp_dog, xs);
494 if (isp_getrqentry(isp, &iptr, &optr, (void **) &mp)) {
495 (void) splx(s);
496 return;
497 }
498 XS_CMD_S_GRACE(xs);
499 MEMZERO((void *) mp, sizeof (*mp));
500 mp->req_header.rqs_entry_count = 1;
501 mp->req_header.rqs_entry_type = RQSTYPE_MARKER;
502 mp->req_modifier = SYNC_ALL;
503 mp->req_target = XS_CHANNEL(xs) << 7;
504 ISP_SWIZZLE_REQUEST(isp, mp);
505 ISP_ADD_REQUEST(isp, iptr);
506 }
507 } else if (isp->isp_dblev) {
508 isp_prt(isp, ISP_LOGDEBUG2, "watchdog with no command");
509 }
510 (void) splx(s);
511 }
512
513 /*
514 * Free any associated resources prior to decommissioning and
515 * set the card to a known state (so it doesn't wake up and kick
516 * us when we aren't expecting it to).
517 *
518 * Locks are held before coming here.
519 */
520 void
521 isp_uninit(isp)
522 struct ispsoftc *isp;
523 {
524 isp_lock(isp);
525 /*
526 * Leave with interrupts disabled.
527 */
528 DISABLE_INTS(isp);
529 isp_unlock(isp);
530 }
531
532 /*
533 * Restart function for a command to be requeued later.
534 */
535 static void
536 isp_command_requeue(arg)
537 void *arg;
538 {
539 struct scsipi_xfer *xs = arg;
540 struct ispsoftc *isp = XS_ISP(xs);
541 int s = splbio();
542 switch (ispcmd_slow(xs)) {
543 case SUCCESSFULLY_QUEUED:
544 isp_prt(isp, ISP_LOGINFO,
545 "requeued commands for %d.%d", XS_TGT(xs), XS_LUN(xs));
546 if (xs->timeout) {
547 callout_reset(&xs->xs_callout, _XT(xs), isp_dog, xs);
548 }
549 break;
550 case TRY_AGAIN_LATER:
551 isp_prt(isp, ISP_LOGINFO,
552 "EAGAIN on requeue for %d.%d", XS_TGT(xs), XS_LUN(xs));
553 callout_reset(&xs->xs_callout, hz, isp_command_requeue, xs);
554 break;
555 case COMPLETE:
556 /* can only be an error */
557 XS_CMD_S_DONE(xs);
558 callout_stop(&xs->xs_callout);
559 if (XS_NOERR(xs)) {
560 XS_SETERR(xs, HBA_BOTCH);
561 }
562 scsipi_done(xs);
563 break;
564 }
565 (void) splx(s);
566 }
567
568 /*
569 * Restart function after a LOOP UP event (e.g.),
570 * done as a timeout for some hysteresis.
571 */
572 static void
573 isp_internal_restart(arg)
574 void *arg;
575 {
576 struct ispsoftc *isp = arg;
577 int result, nrestarted = 0, s;
578
579 s = splbio();
580 if (isp->isp_osinfo.blocked == 0) {
581 struct scsipi_xfer *xs;
582 while ((xs = TAILQ_FIRST(&isp->isp_osinfo.waitq)) != NULL) {
583 TAILQ_REMOVE(&isp->isp_osinfo.waitq, xs, adapter_q);
584 result = isp_start(xs);
585 if (result != CMD_QUEUED) {
586 isp_prt(isp, ISP_LOGERR,
587 "botched command restart (err=%d)", result);
588 XS_CMD_S_DONE(xs);
589 if (xs->error == XS_NOERROR)
590 xs->error = XS_DRIVER_STUFFUP;
591 callout_stop(&xs->xs_callout);
592 scsipi_done(xs);
593 } else if (xs->timeout) {
594 callout_reset(&xs->xs_callout,
595 _XT(xs), isp_dog, xs);
596 }
597 nrestarted++;
598 }
599 isp_prt(isp, ISP_LOGINFO,
600 "isp_restart requeued %d commands", nrestarted);
601 }
602 (void) splx(s);
603 }
604
605 int
606 isp_async(isp, cmd, arg)
607 struct ispsoftc *isp;
608 ispasync_t cmd;
609 void *arg;
610 {
611 int bus, tgt;
612 int s = splbio();
613 switch (cmd) {
614 case ISPASYNC_NEW_TGT_PARAMS:
615 if (IS_SCSI(isp) && isp->isp_dblev) {
616 sdparam *sdp = isp->isp_param;
617 char *wt;
618 int mhz, flags, period;
619
620 tgt = *((int *) arg);
621 bus = (tgt >> 16) & 0xffff;
622 tgt &= 0xffff;
623 sdp += bus;
624 flags = sdp->isp_devparam[tgt].cur_dflags;
625 period = sdp->isp_devparam[tgt].cur_period;
626
627 if ((flags & DPARM_SYNC) && period &&
628 (sdp->isp_devparam[tgt].cur_offset) != 0) {
629 /*
630 * There's some ambiguity about our negotiated speed
631 * if we haven't detected LVD mode correctly (which
632 * seems to happen, unfortunately). If we're in LVD
633 * mode, then different rules apply about speed.
634 */
635 if (sdp->isp_lvdmode || period < 0xc) {
636 switch (period) {
637 case 0x9:
638 mhz = 80;
639 break;
640 case 0xa:
641 mhz = 40;
642 break;
643 case 0xb:
644 mhz = 33;
645 break;
646 case 0xc:
647 mhz = 25;
648 break;
649 default:
650 mhz = 1000 / (period * 4);
651 break;
652 }
653 } else {
654 mhz = 1000 / (period * 4);
655 }
656 } else {
657 mhz = 0;
658 }
659 switch (flags & (DPARM_WIDE|DPARM_TQING)) {
660 case DPARM_WIDE:
661 wt = ", 16 bit wide";
662 break;
663 case DPARM_TQING:
664 wt = ", Tagged Queueing Enabled";
665 break;
666 case DPARM_WIDE|DPARM_TQING:
667 wt = ", 16 bit wide, Tagged Queueing Enabled";
668 break;
669 default:
670 wt = " ";
671 break;
672 }
673 if (mhz) {
674 isp_prt(isp, ISP_LOGINFO,
675 "Bus %d Target %d at %dMHz Max Offset %d%s",
676 bus, tgt, mhz, sdp->isp_devparam[tgt].cur_offset,
677 wt);
678 } else {
679 isp_prt(isp, ISP_LOGINFO,
680 "Bus %d Target %d Async Mode%s", bus, tgt, wt);
681 }
682 break;
683 }
684 case ISPASYNC_BUS_RESET:
685 if (arg)
686 bus = *((int *) arg);
687 else
688 bus = 0;
689 isp_prt(isp, ISP_LOGINFO, "SCSI bus %d reset detected", bus);
690 break;
691 case ISPASYNC_LOOP_DOWN:
692 /*
693 * Hopefully we get here in time to minimize the number
694 * of commands we are firing off that are sure to die.
695 */
696 isp->isp_osinfo.blocked = 1;
697 isp_prt(isp, ISP_LOGINFO, "Loop DOWN");
698 break;
699 case ISPASYNC_LOOP_UP:
700 isp->isp_osinfo.blocked = 0;
701 callout_reset(&isp->isp_osinfo._restart, 1,
702 isp_internal_restart, isp);
703 isp_prt(isp, ISP_LOGINFO, "Loop UP");
704 break;
705 case ISPASYNC_PDB_CHANGED:
706 if (IS_FC(isp) && isp->isp_dblev) {
707 const char *fmt = "Target %d (Loop 0x%x) Port ID 0x%x "
708 "role %s %s\n Port WWN 0x%08x%08x\n Node WWN 0x%08x%08x";
709 const static char *roles[4] = {
710 "No", "Target", "Initiator", "Target/Initiator"
711 };
712 char *ptr;
713 fcparam *fcp = isp->isp_param;
714 int tgt = *((int *) arg);
715 struct lportdb *lp = &fcp->portdb[tgt];
716
717 if (lp->valid) {
718 ptr = "arrived";
719 } else {
720 ptr = "disappeared";
721 }
722 isp_prt(isp, ISP_LOGINFO, fmt, tgt, lp->loopid, lp->portid,
723 roles[lp->roles & 0x3], ptr,
724 (u_int32_t) (lp->port_wwn >> 32),
725 (u_int32_t) (lp->port_wwn & 0xffffffffLL),
726 (u_int32_t) (lp->node_wwn >> 32),
727 (u_int32_t) (lp->node_wwn & 0xffffffffLL));
728 break;
729 }
730 #ifdef ISP2100_FABRIC
731 case ISPASYNC_CHANGE_NOTIFY:
732 isp_prt(isp, ISP_LOGINFO, "Name Server Database Changed");
733 break;
734 case ISPASYNC_FABRIC_DEV:
735 {
736 int target;
737 struct lportdb *lp;
738 sns_scrsp_t *resp = (sns_scrsp_t *) arg;
739 u_int32_t portid;
740 u_int64_t wwn;
741 fcparam *fcp = isp->isp_param;
742
743 portid =
744 (((u_int32_t) resp->snscb_port_id[0]) << 16) |
745 (((u_int32_t) resp->snscb_port_id[1]) << 8) |
746 (((u_int32_t) resp->snscb_port_id[2]));
747 wwn =
748 (((u_int64_t)resp->snscb_portname[0]) << 56) |
749 (((u_int64_t)resp->snscb_portname[1]) << 48) |
750 (((u_int64_t)resp->snscb_portname[2]) << 40) |
751 (((u_int64_t)resp->snscb_portname[3]) << 32) |
752 (((u_int64_t)resp->snscb_portname[4]) << 24) |
753 (((u_int64_t)resp->snscb_portname[5]) << 16) |
754 (((u_int64_t)resp->snscb_portname[6]) << 8) |
755 (((u_int64_t)resp->snscb_portname[7]));
756 isp_prt(isp, ISP_LOGINFO,
757 "Fabric Device (Type 0x%x)@PortID 0x%x WWN 0x%08x%08x",
758 resp->snscb_port_type, portid, ((u_int32_t)(wwn >> 32)),
759 ((u_int32_t)(wwn & 0xffffffff)));
760 if (resp->snscb_port_type != 2)
761 break;
762 for (target = FC_SNS_ID+1; target < MAX_FC_TARG; target++) {
763 lp = &fcp->portdb[target];
764 if (lp->port_wwn == wwn)
765 break;
766 }
767 if (target < MAX_FC_TARG) {
768 break;
769 }
770 for (target = FC_SNS_ID+1; target < MAX_FC_TARG; target++) {
771 lp = &fcp->portdb[target];
772 if (lp->port_wwn == 0)
773 break;
774 }
775 if (target == MAX_FC_TARG) {
776 isp_prt(isp, ISP_LOGWARN,
777 "no more space for fabric devices");
778 return (-1);
779 }
780 lp->port_wwn = lp->node_wwn = wwn;
781 lp->portid = portid;
782 break;
783 }
784 #endif
785 default:
786 break;
787 }
788 (void) splx(s);
789 return (0);
790 }
791
792 #include <machine/stdarg.h>
793 void
794 #ifdef __STDC__
795 isp_prt(struct ispsoftc *isp, int level, const char *fmt, ...)
796 #else
797 isp_prt(isp, fmt, va_alist)
798 struct ispsoftc *isp;
799 char *fmt;
800 va_dcl;
801 #endif
802 {
803 va_list ap;
804 if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) {
805 return;
806 }
807 printf("%s: ", isp->isp_name);
808 va_start(ap, fmt);
809 vprintf(fmt, ap);
810 va_end(ap);
811 printf("\n");
812 }
813