isp_netbsd.c revision 1.30 1 /* $NetBSD: isp_netbsd.c,v 1.30 2000/08/14 07:08:12 mjacob Exp $ */
2 /*
3 * This driver, which is contained in NetBSD in the files:
4 *
5 * sys/dev/ic/isp.c
6 * sys/dev/ic/ic/isp.c
7 * sys/dev/ic/ic/isp_inline.h
8 * sys/dev/ic/ic/isp_netbsd.c
9 * sys/dev/ic/ic/isp_netbsd.h
10 * sys/dev/ic/ic/isp_target.c
11 * sys/dev/ic/ic/isp_target.h
12 * sys/dev/ic/ic/isp_tpublic.h
13 * sys/dev/ic/ic/ispmbox.h
14 * sys/dev/ic/ic/ispreg.h
15 * sys/dev/ic/ic/ispvar.h
16 * sys/microcode/isp/asm_sbus.h
17 * sys/microcode/isp/asm_1040.h
18 * sys/microcode/isp/asm_1080.h
19 * sys/microcode/isp/asm_12160.h
20 * sys/microcode/isp/asm_2100.h
21 * sys/microcode/isp/asm_2200.h
22 * sys/pci/isp_pci.c
23 * sys/sbus/isp_sbus.c
24 *
25 * Is being actively maintained by Matthew Jacob (mjacob (at) netbsd.org).
26 * This driver also is shared source with FreeBSD, OpenBSD, Linux, Solaris,
27 * Linux versions. This tends to be an interesting maintenance problem.
28 *
29 * Please coordinate with Matthew Jacob on changes you wish to make here.
30 */
31 /*
32 * Platform (NetBSD) dependent common attachment code for Qlogic adapters.
33 * Matthew Jacob <mjacob (at) nas.nasa.gov>
34 */
35 /*
36 * Copyright (C) 1997, 1998, 1999 National Aeronautics & Space Administration
37 * All rights reserved.
38 *
39 * Redistribution and use in source and binary forms, with or without
40 * modification, are permitted provided that the following conditions
41 * are met:
42 * 1. Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * 2. Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in the
46 * documentation and/or other materials provided with the distribution.
47 * 3. The name of the author may not be used to endorse or promote products
48 * derived from this software without specific prior written permission
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
51 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
52 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
53 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
54 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
55 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
56 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
57 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
58 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
59 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
60 */
61
62 #include <dev/ic/isp_netbsd.h>
63 #include <sys/scsiio.h>
64
65
66 /*
67 * Set a timeout for the watchdogging of a command.
68 *
69 * The dimensional analysis is
70 *
71 * milliseconds * (seconds/millisecond) * (ticks/second) = ticks
72 *
73 * =
74 *
75 * (milliseconds / 1000) * hz = ticks
76 *
77 *
78 * For timeouts less than 1 second, we'll get zero. Because of this, and
79 * because we want to establish *our* timeout to be longer than what the
80 * firmware might do, we just add 3 seconds at the back end.
81 */
82 #define _XT(xs) ((((xs)->timeout/1000) * hz) + (3 * hz))
83
84 static void ispminphys __P((struct buf *));
85 static int32_t ispcmd __P((XS_T *));
86 static int
87 ispioctl __P((struct scsipi_link *, u_long, caddr_t, int, struct proc *));
88
89 static struct scsipi_device isp_dev = { NULL, NULL, NULL, NULL };
90 static int isp_polled_cmd __P((struct ispsoftc *, XS_T *));
91 static void isp_dog __P((void *));
92 static void isp_command_requeue __P((void *));
93 static void isp_internal_restart __P((void *));
94
95 /*
96 * Complete attachment of hardware, include subdevices.
97 */
98 void
99 isp_attach(isp)
100 struct ispsoftc *isp;
101 {
102 int maxluns;
103 isp->isp_osinfo._adapter.scsipi_minphys = ispminphys;
104 isp->isp_osinfo._adapter.scsipi_ioctl = ispioctl;
105 isp->isp_osinfo._adapter.scsipi_cmd = ispcmd;
106
107 isp->isp_state = ISP_RUNSTATE;
108 isp->isp_osinfo._link.scsipi_scsi.channel =
109 (IS_DUALBUS(isp))? 0 : SCSI_CHANNEL_ONLY_ONE;
110 isp->isp_osinfo._link.adapter_softc = isp;
111 isp->isp_osinfo._link.device = &isp_dev;
112 isp->isp_osinfo._link.adapter = &isp->isp_osinfo._adapter;
113 isp->isp_osinfo._link.openings = isp->isp_maxcmds;
114 isp->isp_osinfo._link.scsipi_scsi.max_lun = maxluns;
115 /*
116 * Until the midlayer is fixed to use REPORT LUNS, limit to 8 luns.
117 */
118 isp->isp_osinfo._link.scsipi_scsi.max_lun =
119 (isp->isp_maxluns < 7)? isp->isp_maxluns - 1 : 7;
120 TAILQ_INIT(&isp->isp_osinfo.waitq); /* The 2nd bus will share.. */
121
122 if (IS_FC(isp)) {
123 isp->isp_osinfo._link.scsipi_scsi.max_target = MAX_FC_TARG-1;
124 } else {
125 sdparam *sdp = isp->isp_param;
126 isp->isp_osinfo._link.scsipi_scsi.max_target = MAX_TARGETS-1;
127 isp->isp_osinfo._link.scsipi_scsi.adapter_target =
128 sdp->isp_initiator_id;
129 isp->isp_osinfo.discovered[0] = 1 << sdp->isp_initiator_id;
130 if (IS_DUALBUS(isp)) {
131 isp->isp_osinfo._link_b = isp->isp_osinfo._link;
132 sdp++;
133 isp->isp_osinfo.discovered[1] =
134 1 << sdp->isp_initiator_id;
135 isp->isp_osinfo._link_b.scsipi_scsi.adapter_target =
136 sdp->isp_initiator_id;
137 isp->isp_osinfo._link_b.scsipi_scsi.channel = 1;
138 isp->isp_osinfo._link_b.scsipi_scsi.max_lun =
139 isp->isp_osinfo._link.scsipi_scsi.max_lun;
140 }
141 }
142 isp->isp_osinfo._link.type = BUS_SCSI;
143
144 /*
145 * Send a SCSI Bus Reset.
146 */
147 if (IS_SCSI(isp)) {
148 int bus = 0;
149 ISP_LOCK(isp);
150 (void) isp_control(isp, ISPCTL_RESET_BUS, &bus);
151 if (IS_DUALBUS(isp)) {
152 bus++;
153 (void) isp_control(isp, ISPCTL_RESET_BUS, &bus);
154 }
155 ISP_UNLOCK(isp);
156 } else {
157 int defid;
158 fcparam *fcp = isp->isp_param;
159 delay(2 * 1000000);
160 defid = MAX_FC_TARG;
161 ISP_LOCK(isp);
162 /*
163 * We probably won't have clock interrupts running,
164 * so we'll be really short (smoke test, really)
165 * at this time.
166 */
167 if (isp_control(isp, ISPCTL_FCLINK_TEST, NULL)) {
168 (void) isp_control(isp, ISPCTL_PDB_SYNC, NULL);
169 if (fcp->isp_fwstate == FW_READY &&
170 fcp->isp_loopstate >= LOOP_PDB_RCVD) {
171 defid = fcp->isp_loopid;
172 }
173 }
174 ISP_UNLOCK(isp);
175 isp->isp_osinfo._link.scsipi_scsi.adapter_target = defid;
176 }
177
178 /*
179 * After this point, we'll be doing the new configuration
180 * schema which allows interrups, so we can do tsleep/wakeup
181 * for mailbox stuff at that point.
182 */
183 isp->isp_osinfo.no_mbox_ints = 0;
184
185 /*
186 * And attach children (if any).
187 */
188 config_found((void *)isp, &isp->isp_osinfo._link, scsiprint);
189 if (IS_DUALBUS(isp)) {
190 config_found((void *)isp, &isp->isp_osinfo._link_b, scsiprint);
191 }
192 }
193
194 /*
195 * minphys our xfers
196 *
197 * Unfortunately, the buffer pointer describes the target device- not the
198 * adapter device, so we can't use the pointer to find out what kind of
199 * adapter we are and adjust accordingly.
200 */
201
202 static void
203 ispminphys(bp)
204 struct buf *bp;
205 {
206 /*
207 * XX: Only the 1020 has a 24 bit limit.
208 */
209 if (bp->b_bcount >= (1 << 24)) {
210 bp->b_bcount = (1 << 24);
211 }
212 minphys(bp);
213 }
214
215 static int
216 ispioctl(sc_link, cmd, addr, flag, p)
217 struct scsipi_link *sc_link;
218 u_long cmd;
219 caddr_t addr;
220 int flag;
221 struct proc *p;
222 {
223 struct ispsoftc *isp = sc_link->adapter_softc;
224 int s, chan, retval = ENOTTY;
225
226 chan = (sc_link->scsipi_scsi.channel == SCSI_CHANNEL_ONLY_ONE)? 0 :
227 sc_link->scsipi_scsi.channel;
228
229 switch (cmd) {
230 case SCBUSACCEL:
231 {
232 struct scbusaccel_args *sp = (struct scbusaccel_args *)addr;
233 if (IS_SCSI(isp) && sp->sa_lun == 0) {
234 int dflags = 0;
235 sdparam *sdp = SDPARAM(isp);
236
237 sdp += chan;
238 if (sp->sa_flags & SC_ACCEL_TAGS)
239 dflags |= DPARM_TQING;
240 if (sp->sa_flags & SC_ACCEL_WIDE)
241 dflags |= DPARM_WIDE;
242 if (sp->sa_flags & SC_ACCEL_SYNC)
243 dflags |= DPARM_SYNC;
244 s = splbio();
245 sdp->isp_devparam[sp->sa_target].dev_flags |= dflags;
246 dflags = sdp->isp_devparam[sp->sa_target].dev_flags;
247 sdp->isp_devparam[sp->sa_target].dev_update = 1;
248 isp->isp_update |= (1 << chan);
249 splx(s);
250 isp_prt(isp, ISP_LOGDEBUG1,
251 "ispioctl: device flags 0x%x for %d.%d.X",
252 dflags, chan, sp->sa_target);
253 }
254 retval = 0;
255 break;
256 }
257 case SCBUSIORESET:
258 s = splbio();
259 if (isp_control(isp, ISPCTL_RESET_BUS, &chan))
260 retval = EIO;
261 else
262 retval = 0;
263 (void) splx(s);
264 break;
265 default:
266 break;
267 }
268 return (retval);
269 }
270
271
272 static int32_t
273 ispcmd(xs)
274 XS_T *xs;
275 {
276 struct ispsoftc *isp;
277 int result, s;
278
279 isp = XS_ISP(xs);
280 s = splbio();
281 if (isp->isp_state < ISP_RUNSTATE) {
282 DISABLE_INTS(isp);
283 isp_init(isp);
284 if (isp->isp_state != ISP_INITSTATE) {
285 ENABLE_INTS(isp);
286 (void) splx(s);
287 XS_SETERR(xs, HBA_BOTCH);
288 return (COMPLETE);
289 }
290 isp->isp_state = ISP_RUNSTATE;
291 ENABLE_INTS(isp);
292 }
293
294 /*
295 * Check for queue blockage...
296 */
297 if (isp->isp_osinfo.blocked) {
298 if (xs->xs_control & XS_CTL_POLL) {
299 xs->error = XS_DRIVER_STUFFUP;
300 splx(s);
301 return (TRY_AGAIN_LATER);
302 }
303 TAILQ_INSERT_TAIL(&isp->isp_osinfo.waitq, xs, adapter_q);
304 splx(s);
305 return (SUCCESSFULLY_QUEUED);
306 }
307
308 if (xs->xs_control & XS_CTL_POLL) {
309 volatile u_int8_t ombi = isp->isp_osinfo.no_mbox_ints;
310 isp->isp_osinfo.no_mbox_ints = 1;
311 result = isp_polled_cmd(isp, xs);
312 isp->isp_osinfo.no_mbox_ints = ombi;
313 (void) splx(s);
314 return (result);
315 }
316
317 result = isp_start(xs);
318 switch (result) {
319 case CMD_QUEUED:
320 result = SUCCESSFULLY_QUEUED;
321 if (xs->timeout) {
322 callout_reset(&xs->xs_callout, _XT(xs), isp_dog, xs);
323 }
324 break;
325 case CMD_EAGAIN:
326 result = TRY_AGAIN_LATER;
327 break;
328 case CMD_RQLATER:
329 result = SUCCESSFULLY_QUEUED;
330 callout_reset(&xs->xs_callout, hz, isp_command_requeue, xs);
331 break;
332 case CMD_COMPLETE:
333 result = COMPLETE;
334 break;
335 }
336 (void) splx(s);
337 return (result);
338 }
339
340 static int
341 isp_polled_cmd(isp, xs)
342 struct ispsoftc *isp;
343 XS_T *xs;
344 {
345 int result;
346 int infinite = 0, mswait;
347
348 result = isp_start(xs);
349
350 switch (result) {
351 case CMD_QUEUED:
352 result = SUCCESSFULLY_QUEUED;
353 break;
354 case CMD_RQLATER:
355 case CMD_EAGAIN:
356 if (XS_NOERR(xs)) {
357 xs->error = XS_DRIVER_STUFFUP;
358 }
359 result = TRY_AGAIN_LATER;
360 break;
361 case CMD_COMPLETE:
362 result = COMPLETE;
363 break;
364
365 }
366
367 if (result != SUCCESSFULLY_QUEUED) {
368 return (result);
369 }
370
371 /*
372 * If we can't use interrupts, poll on completion.
373 */
374 if ((mswait = XS_TIME(xs)) == 0)
375 infinite = 1;
376
377 while (mswait || infinite) {
378 if (isp_intr((void *)isp)) {
379 if (XS_CMD_DONE_P(xs)) {
380 break;
381 }
382 }
383 USEC_DELAY(1000);
384 mswait -= 1;
385 }
386
387 /*
388 * If no other error occurred but we didn't finish,
389 * something bad happened.
390 */
391 if (XS_CMD_DONE_P(xs) == 0) {
392 if (isp_control(isp, ISPCTL_ABORT_CMD, xs)) {
393 isp_reinit(isp);
394 }
395 if (XS_NOERR(xs)) {
396 XS_SETERR(xs, HBA_BOTCH);
397 }
398 }
399 result = COMPLETE;
400 return (result);
401 }
402
403 void
404 isp_done(xs)
405 XS_T *xs;
406 {
407 XS_CMD_S_DONE(xs);
408 if (XS_CMD_WDOG_P(xs) == 0) {
409 struct ispsoftc *isp = XS_ISP(xs);
410 callout_stop(&xs->xs_callout);
411 if (XS_CMD_GRACE_P(xs)) {
412 isp_prt(isp, ISP_LOGDEBUG1,
413 "finished command on borrowed time");
414 }
415 XS_CMD_S_CLEAR(xs);
416 scsipi_done(xs);
417 }
418 }
419
420 static void
421 isp_dog(arg)
422 void *arg;
423 {
424 XS_T *xs = arg;
425 struct ispsoftc *isp = XS_ISP(xs);
426 u_int32_t handle;
427
428 ISP_ILOCK(isp);
429 /*
430 * We've decided this command is dead. Make sure we're not trying
431 * to kill a command that's already dead by getting it's handle and
432 * and seeing whether it's still alive.
433 */
434 handle = isp_find_handle(isp, xs);
435 if (handle) {
436 u_int16_t r, r1, i;
437
438 if (XS_CMD_DONE_P(xs)) {
439 isp_prt(isp, ISP_LOGDEBUG1,
440 "watchdog found done cmd (handle 0x%x)", handle);
441 ISP_IUNLOCK(isp);
442 return;
443 }
444
445 if (XS_CMD_WDOG_P(xs)) {
446 isp_prt(isp, ISP_LOGDEBUG1,
447 "recursive watchdog (handle 0x%x)", handle);
448 ISP_IUNLOCK(isp);
449 return;
450 }
451
452 XS_CMD_S_WDOG(xs);
453
454 i = 0;
455 do {
456 r = ISP_READ(isp, BIU_ISR);
457 USEC_DELAY(1);
458 r1 = ISP_READ(isp, BIU_ISR);
459 } while (r != r1 && ++i < 1000);
460
461 if (INT_PENDING(isp, r) && isp_intr(isp) && XS_CMD_DONE_P(xs)) {
462 isp_prt(isp, ISP_LOGDEBUG1, "watchdog cleanup (%x, %x)",
463 handle, r);
464 XS_CMD_C_WDOG(xs);
465 isp_done(xs);
466 } else if (XS_CMD_GRACE_P(xs)) {
467 isp_prt(isp, ISP_LOGDEBUG1, "watchdog timeout (%x, %x)",
468 handle, r);
469 /*
470 * Make sure the command is *really* dead before we
471 * release the handle (and DMA resources) for reuse.
472 */
473 (void) isp_control(isp, ISPCTL_ABORT_CMD, arg);
474
475 /*
476 * After this point, the comamnd is really dead.
477 */
478 if (XS_XFRLEN(xs)) {
479 ISP_DMAFREE(isp, xs, handle);
480 }
481 isp_destroy_handle(isp, handle);
482 XS_SETERR(xs, XS_TIMEOUT);
483 XS_CMD_S_CLEAR(xs);
484 isp_done(xs);
485 } else {
486 u_int16_t iptr, optr;
487 ispreq_t *mp;
488 isp_prt(isp, ISP_LOGDEBUG2,
489 "possible command timeout (%x, %x)", handle, r);
490 XS_CMD_C_WDOG(xs);
491 callout_reset(&xs->xs_callout, hz, isp_dog, xs);
492 if (isp_getrqentry(isp, &iptr, &optr, (void **) &mp)) {
493 ISP_IUNLOCK(isp);
494 return;
495 }
496 XS_CMD_S_GRACE(xs);
497 MEMZERO((void *) mp, sizeof (*mp));
498 mp->req_header.rqs_entry_count = 1;
499 mp->req_header.rqs_entry_type = RQSTYPE_MARKER;
500 mp->req_modifier = SYNC_ALL;
501 mp->req_target = XS_CHANNEL(xs) << 7;
502 ISP_SWIZZLE_REQUEST(isp, mp);
503 ISP_ADD_REQUEST(isp, iptr);
504 }
505 } else {
506 isp_prt(isp, ISP_LOGDEBUG0, "watchdog with no command");
507 }
508 ISP_IUNLOCK(isp);
509 }
510
511 /*
512 * Free any associated resources prior to decommissioning and
513 * set the card to a known state (so it doesn't wake up and kick
514 * us when we aren't expecting it to).
515 *
516 * Locks are held before coming here.
517 */
518 void
519 isp_uninit(isp)
520 struct ispsoftc *isp;
521 {
522 isp_lock(isp);
523 /*
524 * Leave with interrupts disabled.
525 */
526 DISABLE_INTS(isp);
527 isp_unlock(isp);
528 }
529
530 /*
531 * Restart function for a command to be requeued later.
532 */
533 static void
534 isp_command_requeue(arg)
535 void *arg;
536 {
537 struct scsipi_xfer *xs = arg;
538 struct ispsoftc *isp = XS_ISP(xs);
539 ISP_ILOCK(isp);
540 switch (ispcmd(xs)) {
541 case SUCCESSFULLY_QUEUED:
542 isp_prt(isp, ISP_LOGINFO,
543 "requeued commands for %d.%d", XS_TGT(xs), XS_LUN(xs));
544 if (xs->timeout) {
545 callout_reset(&xs->xs_callout, _XT(xs), isp_dog, xs);
546 }
547 break;
548 case TRY_AGAIN_LATER:
549 isp_prt(isp, ISP_LOGINFO,
550 "EAGAIN on requeue for %d.%d", XS_TGT(xs), XS_LUN(xs));
551 callout_reset(&xs->xs_callout, hz, isp_command_requeue, xs);
552 break;
553 case COMPLETE:
554 /* can only be an error */
555 XS_CMD_S_DONE(xs);
556 callout_stop(&xs->xs_callout);
557 if (XS_NOERR(xs)) {
558 XS_SETERR(xs, HBA_BOTCH);
559 }
560 scsipi_done(xs);
561 break;
562 }
563 ISP_IUNLOCK(isp);
564 }
565
566 /*
567 * Restart function after a LOOP UP event (e.g.),
568 * done as a timeout for some hysteresis.
569 */
570 static void
571 isp_internal_restart(arg)
572 void *arg;
573 {
574 struct ispsoftc *isp = arg;
575 int result, nrestarted = 0;
576
577 ISP_ILOCK(isp);
578 if (isp->isp_osinfo.blocked == 0) {
579 struct scsipi_xfer *xs;
580 while ((xs = TAILQ_FIRST(&isp->isp_osinfo.waitq)) != NULL) {
581 TAILQ_REMOVE(&isp->isp_osinfo.waitq, xs, adapter_q);
582 result = isp_start(xs);
583 if (result != CMD_QUEUED) {
584 isp_prt(isp, ISP_LOGERR,
585 "botched command restart (err=%d)", result);
586 XS_CMD_S_DONE(xs);
587 if (xs->error == XS_NOERROR)
588 xs->error = XS_DRIVER_STUFFUP;
589 callout_stop(&xs->xs_callout);
590 scsipi_done(xs);
591 } else if (xs->timeout) {
592 callout_reset(&xs->xs_callout,
593 _XT(xs), isp_dog, xs);
594 }
595 nrestarted++;
596 }
597 isp_prt(isp, ISP_LOGINFO,
598 "isp_restart requeued %d commands", nrestarted);
599 }
600 ISP_IUNLOCK(isp);
601 }
602
603 int
604 isp_async(isp, cmd, arg)
605 struct ispsoftc *isp;
606 ispasync_t cmd;
607 void *arg;
608 {
609 int bus, tgt;
610 int s = splbio();
611 switch (cmd) {
612 case ISPASYNC_NEW_TGT_PARAMS:
613 if (IS_SCSI(isp) && isp->isp_dblev) {
614 sdparam *sdp = isp->isp_param;
615 char *wt;
616 int mhz, flags, period;
617
618 tgt = *((int *) arg);
619 bus = (tgt >> 16) & 0xffff;
620 tgt &= 0xffff;
621 sdp += bus;
622 flags = sdp->isp_devparam[tgt].cur_dflags;
623 period = sdp->isp_devparam[tgt].cur_period;
624
625 if ((flags & DPARM_SYNC) && period &&
626 (sdp->isp_devparam[tgt].cur_offset) != 0) {
627 /*
628 * There's some ambiguity about our negotiated speed
629 * if we haven't detected LVD mode correctly (which
630 * seems to happen, unfortunately). If we're in LVD
631 * mode, then different rules apply about speed.
632 */
633 if (sdp->isp_lvdmode || period < 0xc) {
634 switch (period) {
635 case 0x9:
636 mhz = 80;
637 break;
638 case 0xa:
639 mhz = 40;
640 break;
641 case 0xb:
642 mhz = 33;
643 break;
644 case 0xc:
645 mhz = 25;
646 break;
647 default:
648 mhz = 1000 / (period * 4);
649 break;
650 }
651 } else {
652 mhz = 1000 / (period * 4);
653 }
654 } else {
655 mhz = 0;
656 }
657 switch (flags & (DPARM_WIDE|DPARM_TQING)) {
658 case DPARM_WIDE:
659 wt = ", 16 bit wide";
660 break;
661 case DPARM_TQING:
662 wt = ", Tagged Queueing Enabled";
663 break;
664 case DPARM_WIDE|DPARM_TQING:
665 wt = ", 16 bit wide, Tagged Queueing Enabled";
666 break;
667 default:
668 wt = " ";
669 break;
670 }
671 if (mhz) {
672 isp_prt(isp, ISP_LOGINFO,
673 "Bus %d Target %d at %dMHz Max Offset %d%s",
674 bus, tgt, mhz, sdp->isp_devparam[tgt].cur_offset,
675 wt);
676 } else {
677 isp_prt(isp, ISP_LOGINFO,
678 "Bus %d Target %d Async Mode%s", bus, tgt, wt);
679 }
680 break;
681 }
682 case ISPASYNC_BUS_RESET:
683 if (arg)
684 bus = *((int *) arg);
685 else
686 bus = 0;
687 isp_prt(isp, ISP_LOGINFO, "SCSI bus %d reset detected", bus);
688 break;
689 case ISPASYNC_LOOP_DOWN:
690 /*
691 * Hopefully we get here in time to minimize the number
692 * of commands we are firing off that are sure to die.
693 */
694 isp->isp_osinfo.blocked = 1;
695 isp_prt(isp, ISP_LOGINFO, "Loop DOWN");
696 break;
697 case ISPASYNC_LOOP_UP:
698 isp->isp_osinfo.blocked = 0;
699 callout_reset(&isp->isp_osinfo._restart, 1,
700 isp_internal_restart, isp);
701 isp_prt(isp, ISP_LOGINFO, "Loop UP");
702 break;
703 case ISPASYNC_PDB_CHANGED:
704 if (IS_FC(isp) && isp->isp_dblev) {
705 const char *fmt = "Target %d (Loop 0x%x) Port ID 0x%x "
706 "role %s %s\n Port WWN 0x%08x%08x\n Node WWN 0x%08x%08x";
707 const static char *roles[4] = {
708 "No", "Target", "Initiator", "Target/Initiator"
709 };
710 char *ptr;
711 fcparam *fcp = isp->isp_param;
712 int tgt = *((int *) arg);
713 struct lportdb *lp = &fcp->portdb[tgt];
714
715 if (lp->valid) {
716 ptr = "arrived";
717 } else {
718 ptr = "disappeared";
719 }
720 isp_prt(isp, ISP_LOGINFO, fmt, tgt, lp->loopid, lp->portid,
721 roles[lp->roles & 0x3], ptr,
722 (u_int32_t) (lp->port_wwn >> 32),
723 (u_int32_t) (lp->port_wwn & 0xffffffffLL),
724 (u_int32_t) (lp->node_wwn >> 32),
725 (u_int32_t) (lp->node_wwn & 0xffffffffLL));
726 break;
727 }
728 #ifdef ISP2100_FABRIC
729 case ISPASYNC_CHANGE_NOTIFY:
730 isp_prt(isp, ISP_LOGINFO, "Name Server Database Changed");
731 break;
732 case ISPASYNC_FABRIC_DEV:
733 {
734 int target;
735 struct lportdb *lp;
736 sns_scrsp_t *resp = (sns_scrsp_t *) arg;
737 u_int32_t portid;
738 u_int64_t wwn;
739 fcparam *fcp = isp->isp_param;
740
741 portid =
742 (((u_int32_t) resp->snscb_port_id[0]) << 16) |
743 (((u_int32_t) resp->snscb_port_id[1]) << 8) |
744 (((u_int32_t) resp->snscb_port_id[2]));
745 wwn =
746 (((u_int64_t)resp->snscb_portname[0]) << 56) |
747 (((u_int64_t)resp->snscb_portname[1]) << 48) |
748 (((u_int64_t)resp->snscb_portname[2]) << 40) |
749 (((u_int64_t)resp->snscb_portname[3]) << 32) |
750 (((u_int64_t)resp->snscb_portname[4]) << 24) |
751 (((u_int64_t)resp->snscb_portname[5]) << 16) |
752 (((u_int64_t)resp->snscb_portname[6]) << 8) |
753 (((u_int64_t)resp->snscb_portname[7]));
754 isp_prt(isp, ISP_LOGINFO,
755 "Fabric Device (Type 0x%x)@PortID 0x%x WWN 0x%08x%08x",
756 resp->snscb_port_type, portid, ((u_int32_t)(wwn >> 32)),
757 ((u_int32_t)(wwn & 0xffffffff)));
758 if (resp->snscb_port_type != 2)
759 break;
760 for (target = FC_SNS_ID+1; target < MAX_FC_TARG; target++) {
761 lp = &fcp->portdb[target];
762 if (lp->port_wwn == wwn)
763 break;
764 }
765 if (target < MAX_FC_TARG) {
766 break;
767 }
768 for (target = FC_SNS_ID+1; target < MAX_FC_TARG; target++) {
769 lp = &fcp->portdb[target];
770 if (lp->port_wwn == 0)
771 break;
772 }
773 if (target == MAX_FC_TARG) {
774 isp_prt(isp, ISP_LOGWARN,
775 "no more space for fabric devices");
776 return (-1);
777 }
778 lp->port_wwn = lp->node_wwn = wwn;
779 lp->portid = portid;
780 break;
781 }
782 #endif
783 default:
784 break;
785 }
786 (void) splx(s);
787 return (0);
788 }
789
790 #include <machine/stdarg.h>
791 void
792 #ifdef __STDC__
793 isp_prt(struct ispsoftc *isp, int level, const char *fmt, ...)
794 #else
795 isp_prt(isp, fmt, va_alist)
796 struct ispsoftc *isp;
797 char *fmt;
798 va_dcl;
799 #endif
800 {
801 va_list ap;
802 if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) {
803 return;
804 }
805 printf("%s: ", isp->isp_name);
806 va_start(ap, fmt);
807 vprintf(fmt, ap);
808 va_end(ap);
809 printf("\n");
810 }
811