isp_netbsd.c revision 1.34 1 /* $NetBSD: isp_netbsd.c,v 1.34 2000/12/23 01:37:57 wiz Exp $ */
2 /*
3 * This driver, which is contained in NetBSD in the files:
4 *
5 * sys/dev/ic/isp.c
6 * sys/dev/ic/isp_inline.h
7 * sys/dev/ic/isp_netbsd.c
8 * sys/dev/ic/isp_netbsd.h
9 * sys/dev/ic/isp_target.c
10 * sys/dev/ic/isp_target.h
11 * sys/dev/ic/isp_tpublic.h
12 * sys/dev/ic/ispmbox.h
13 * sys/dev/ic/ispreg.h
14 * sys/dev/ic/ispvar.h
15 * sys/microcode/isp/asm_sbus.h
16 * sys/microcode/isp/asm_1040.h
17 * sys/microcode/isp/asm_1080.h
18 * sys/microcode/isp/asm_12160.h
19 * sys/microcode/isp/asm_2100.h
20 * sys/microcode/isp/asm_2200.h
21 * sys/pci/isp_pci.c
22 * sys/sbus/isp_sbus.c
23 *
24 * Is being actively maintained by Matthew Jacob (mjacob (at) netbsd.org).
25 * This driver also is shared source with FreeBSD, OpenBSD, Linux, Solaris,
26 * Linux versions. This tends to be an interesting maintenance problem.
27 *
28 * Please coordinate with Matthew Jacob on changes you wish to make here.
29 */
30 /*
31 * Platform (NetBSD) dependent common attachment code for Qlogic adapters.
32 * Matthew Jacob <mjacob (at) nas.nasa.gov>
33 */
34 /*
35 * Copyright (C) 1997, 1998, 1999 National Aeronautics & Space Administration
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. The name of the author may not be used to endorse or promote products
47 * derived from this software without specific prior written permission
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
50 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
51 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
52 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
53 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
54 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
55 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
56 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
57 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
58 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
59 */
60
61 #include <dev/ic/isp_netbsd.h>
62 #include <sys/scsiio.h>
63
64
65 /*
66 * Set a timeout for the watchdogging of a command.
67 *
68 * The dimensional analysis is
69 *
70 * milliseconds * (seconds/millisecond) * (ticks/second) = ticks
71 *
72 * =
73 *
74 * (milliseconds / 1000) * hz = ticks
75 *
76 *
77 * For timeouts less than 1 second, we'll get zero. Because of this, and
78 * because we want to establish *our* timeout to be longer than what the
79 * firmware might do, we just add 3 seconds at the back end.
80 */
81 #define _XT(xs) ((((xs)->timeout/1000) * hz) + (3 * hz))
82
83 static void ispminphys __P((struct buf *));
84 static int32_t ispcmd __P((XS_T *));
85 static int
86 ispioctl __P((struct scsipi_link *, u_long, caddr_t, int, struct proc *));
87
88 static struct scsipi_device isp_dev = { NULL, NULL, NULL, NULL };
89 static int isp_polled_cmd __P((struct ispsoftc *, XS_T *));
90 static void isp_dog __P((void *));
91 static void isp_command_requeue __P((void *));
92 static void isp_internal_restart __P((void *));
93
94 /*
95 * Complete attachment of hardware, include subdevices.
96 */
97 void
98 isp_attach(isp)
99 struct ispsoftc *isp;
100 {
101 int maxluns;
102 isp->isp_osinfo._adapter.scsipi_minphys = ispminphys;
103 isp->isp_osinfo._adapter.scsipi_ioctl = ispioctl;
104 isp->isp_osinfo._adapter.scsipi_cmd = ispcmd;
105
106 isp->isp_state = ISP_RUNSTATE;
107 isp->isp_osinfo._link.scsipi_scsi.channel =
108 (IS_DUALBUS(isp))? 0 : SCSI_CHANNEL_ONLY_ONE;
109 isp->isp_osinfo._link.adapter_softc = isp;
110 isp->isp_osinfo._link.device = &isp_dev;
111 isp->isp_osinfo._link.adapter = &isp->isp_osinfo._adapter;
112 isp->isp_osinfo._link.openings = isp->isp_maxcmds;
113 isp->isp_osinfo._link.scsipi_scsi.max_lun = maxluns;
114 /*
115 * Until the midlayer is fixed to use REPORT LUNS, limit to 8 luns.
116 */
117 isp->isp_osinfo._link.scsipi_scsi.max_lun =
118 (isp->isp_maxluns < 7)? isp->isp_maxluns - 1 : 7;
119 TAILQ_INIT(&isp->isp_osinfo.waitq); /* The 2nd bus will share.. */
120
121 if (IS_FC(isp)) {
122 isp->isp_osinfo._link.scsipi_scsi.max_target = MAX_FC_TARG-1;
123 } else {
124 sdparam *sdp = isp->isp_param;
125 isp->isp_osinfo._link.scsipi_scsi.max_target = MAX_TARGETS-1;
126 isp->isp_osinfo._link.scsipi_scsi.adapter_target =
127 sdp->isp_initiator_id;
128 isp->isp_osinfo.discovered[0] = 1 << sdp->isp_initiator_id;
129 if (IS_DUALBUS(isp)) {
130 isp->isp_osinfo._link_b = isp->isp_osinfo._link;
131 sdp++;
132 isp->isp_osinfo.discovered[1] =
133 1 << sdp->isp_initiator_id;
134 isp->isp_osinfo._link_b.scsipi_scsi.adapter_target =
135 sdp->isp_initiator_id;
136 isp->isp_osinfo._link_b.scsipi_scsi.channel = 1;
137 isp->isp_osinfo._link_b.scsipi_scsi.max_lun =
138 isp->isp_osinfo._link.scsipi_scsi.max_lun;
139 }
140 }
141 isp->isp_osinfo._link.type = BUS_SCSI;
142
143 /*
144 * Send a SCSI Bus Reset.
145 */
146 if (IS_SCSI(isp)) {
147 int bus = 0;
148 ISP_LOCK(isp);
149 (void) isp_control(isp, ISPCTL_RESET_BUS, &bus);
150 if (IS_DUALBUS(isp)) {
151 bus++;
152 (void) isp_control(isp, ISPCTL_RESET_BUS, &bus);
153 }
154 ISP_UNLOCK(isp);
155 } else {
156 int defid;
157 fcparam *fcp = isp->isp_param;
158 delay(2 * 1000000);
159 defid = MAX_FC_TARG;
160 ISP_LOCK(isp);
161 /*
162 * We probably won't have clock interrupts running,
163 * so we'll be really short (smoke test, really)
164 * at this time.
165 */
166 if (isp_control(isp, ISPCTL_FCLINK_TEST, NULL)) {
167 (void) isp_control(isp, ISPCTL_PDB_SYNC, NULL);
168 if (fcp->isp_fwstate == FW_READY &&
169 fcp->isp_loopstate >= LOOP_PDB_RCVD) {
170 defid = fcp->isp_loopid;
171 }
172 }
173 ISP_UNLOCK(isp);
174 isp->isp_osinfo._link.scsipi_scsi.adapter_target = defid;
175 }
176
177 /*
178 * After this point, we'll be doing the new configuration
179 * schema which allows interrups, so we can do tsleep/wakeup
180 * for mailbox stuff at that point.
181 */
182 isp->isp_osinfo.no_mbox_ints = 0;
183
184 /*
185 * And attach children (if any).
186 */
187 config_found((void *)isp, &isp->isp_osinfo._link, scsiprint);
188 if (IS_DUALBUS(isp)) {
189 config_found((void *)isp, &isp->isp_osinfo._link_b, scsiprint);
190 }
191 }
192
193 /*
194 * minphys our xfers
195 *
196 * Unfortunately, the buffer pointer describes the target device- not the
197 * adapter device, so we can't use the pointer to find out what kind of
198 * adapter we are and adjust accordingly.
199 */
200
201 static void
202 ispminphys(bp)
203 struct buf *bp;
204 {
205 /*
206 * XX: Only the 1020 has a 24 bit limit.
207 */
208 if (bp->b_bcount >= (1 << 24)) {
209 bp->b_bcount = (1 << 24);
210 }
211 minphys(bp);
212 }
213
214 static int
215 ispioctl(sc_link, cmd, addr, flag, p)
216 struct scsipi_link *sc_link;
217 u_long cmd;
218 caddr_t addr;
219 int flag;
220 struct proc *p;
221 {
222 struct ispsoftc *isp = sc_link->adapter_softc;
223 int s, chan, retval = ENOTTY;
224
225 chan = (sc_link->scsipi_scsi.channel == SCSI_CHANNEL_ONLY_ONE)? 0 :
226 sc_link->scsipi_scsi.channel;
227
228 switch (cmd) {
229 case SCBUSACCEL:
230 {
231 struct scbusaccel_args *sp = (struct scbusaccel_args *)addr;
232 if (IS_SCSI(isp) && sp->sa_lun == 0) {
233 int dflags = 0;
234 sdparam *sdp = SDPARAM(isp);
235
236 sdp += chan;
237 if (sp->sa_flags & SC_ACCEL_TAGS)
238 dflags |= DPARM_TQING;
239 if (sp->sa_flags & SC_ACCEL_WIDE)
240 dflags |= DPARM_WIDE;
241 if (sp->sa_flags & SC_ACCEL_SYNC)
242 dflags |= DPARM_SYNC;
243 s = splbio();
244 sdp->isp_devparam[sp->sa_target].dev_flags |= dflags;
245 dflags = sdp->isp_devparam[sp->sa_target].dev_flags;
246 sdp->isp_devparam[sp->sa_target].dev_update = 1;
247 isp->isp_update |= (1 << chan);
248 splx(s);
249 isp_prt(isp, ISP_LOGDEBUG1,
250 "ispioctl: device flags 0x%x for %d.%d.X",
251 dflags, chan, sp->sa_target);
252 }
253 retval = 0;
254 break;
255 }
256 case SCBUSIORESET:
257 s = splbio();
258 if (isp_control(isp, ISPCTL_RESET_BUS, &chan))
259 retval = EIO;
260 else
261 retval = 0;
262 (void) splx(s);
263 break;
264 default:
265 break;
266 }
267 return (retval);
268 }
269
270
271 static int32_t
272 ispcmd(xs)
273 XS_T *xs;
274 {
275 struct ispsoftc *isp;
276 int result, s;
277
278 isp = XS_ISP(xs);
279 s = splbio();
280 if (isp->isp_state < ISP_RUNSTATE) {
281 DISABLE_INTS(isp);
282 isp_init(isp);
283 if (isp->isp_state != ISP_INITSTATE) {
284 ENABLE_INTS(isp);
285 (void) splx(s);
286 XS_SETERR(xs, HBA_BOTCH);
287 return (COMPLETE);
288 }
289 isp->isp_state = ISP_RUNSTATE;
290 ENABLE_INTS(isp);
291 }
292
293 /*
294 * Check for queue blockage...
295 */
296 if (isp->isp_osinfo.blocked) {
297 if (xs->xs_control & XS_CTL_POLL) {
298 xs->error = XS_DRIVER_STUFFUP;
299 splx(s);
300 return (TRY_AGAIN_LATER);
301 }
302 TAILQ_INSERT_TAIL(&isp->isp_osinfo.waitq, xs, adapter_q);
303 splx(s);
304 return (SUCCESSFULLY_QUEUED);
305 }
306
307 if (xs->xs_control & XS_CTL_POLL) {
308 volatile u_int8_t ombi = isp->isp_osinfo.no_mbox_ints;
309 isp->isp_osinfo.no_mbox_ints = 1;
310 result = isp_polled_cmd(isp, xs);
311 isp->isp_osinfo.no_mbox_ints = ombi;
312 (void) splx(s);
313 return (result);
314 }
315
316 result = isp_start(xs);
317 #if 0
318 {
319 static int na[16] = { 0 };
320 if (na[isp->isp_unit] < isp->isp_nactive) {
321 isp_prt(isp, ISP_LOGALL, "active hiwater %d", isp->isp_nactive);
322 na[isp->isp_unit] = isp->isp_nactive;
323 }
324 }
325 #endif
326 switch (result) {
327 case CMD_QUEUED:
328 result = SUCCESSFULLY_QUEUED;
329 if (xs->timeout) {
330 callout_reset(&xs->xs_callout, _XT(xs), isp_dog, xs);
331 }
332 break;
333 case CMD_EAGAIN:
334 result = TRY_AGAIN_LATER;
335 break;
336 case CMD_RQLATER:
337 result = SUCCESSFULLY_QUEUED;
338 callout_reset(&xs->xs_callout, hz, isp_command_requeue, xs);
339 break;
340 case CMD_COMPLETE:
341 result = COMPLETE;
342 break;
343 }
344 (void) splx(s);
345 return (result);
346 }
347
348 static int
349 isp_polled_cmd(isp, xs)
350 struct ispsoftc *isp;
351 XS_T *xs;
352 {
353 int result;
354 int infinite = 0, mswait;
355
356 result = isp_start(xs);
357
358 switch (result) {
359 case CMD_QUEUED:
360 result = SUCCESSFULLY_QUEUED;
361 break;
362 case CMD_RQLATER:
363 case CMD_EAGAIN:
364 if (XS_NOERR(xs)) {
365 xs->error = XS_DRIVER_STUFFUP;
366 }
367 result = TRY_AGAIN_LATER;
368 break;
369 case CMD_COMPLETE:
370 result = COMPLETE;
371 break;
372
373 }
374
375 if (result != SUCCESSFULLY_QUEUED) {
376 return (result);
377 }
378
379 /*
380 * If we can't use interrupts, poll on completion.
381 */
382 if ((mswait = XS_TIME(xs)) == 0)
383 infinite = 1;
384
385 while (mswait || infinite) {
386 if (isp_intr((void *)isp)) {
387 if (XS_CMD_DONE_P(xs)) {
388 break;
389 }
390 }
391 USEC_DELAY(1000);
392 mswait -= 1;
393 }
394
395 /*
396 * If no other error occurred but we didn't finish,
397 * something bad happened.
398 */
399 if (XS_CMD_DONE_P(xs) == 0) {
400 if (isp_control(isp, ISPCTL_ABORT_CMD, xs)) {
401 isp_reinit(isp);
402 }
403 if (XS_NOERR(xs)) {
404 XS_SETERR(xs, HBA_BOTCH);
405 }
406 }
407 result = COMPLETE;
408 return (result);
409 }
410
411 void
412 isp_done(xs)
413 XS_T *xs;
414 {
415 XS_CMD_S_DONE(xs);
416 if (XS_CMD_WDOG_P(xs) == 0) {
417 struct ispsoftc *isp = XS_ISP(xs);
418 callout_stop(&xs->xs_callout);
419 if (XS_CMD_GRACE_P(xs)) {
420 isp_prt(isp, ISP_LOGDEBUG1,
421 "finished command on borrowed time");
422 }
423 XS_CMD_S_CLEAR(xs);
424 scsipi_done(xs);
425 }
426 }
427
428 static void
429 isp_dog(arg)
430 void *arg;
431 {
432 XS_T *xs = arg;
433 struct ispsoftc *isp = XS_ISP(xs);
434 u_int32_t handle;
435
436 ISP_ILOCK(isp);
437 /*
438 * We've decided this command is dead. Make sure we're not trying
439 * to kill a command that's already dead by getting it's handle and
440 * and seeing whether it's still alive.
441 */
442 handle = isp_find_handle(isp, xs);
443 if (handle) {
444 u_int16_t r, r1, i;
445
446 if (XS_CMD_DONE_P(xs)) {
447 isp_prt(isp, ISP_LOGDEBUG1,
448 "watchdog found done cmd (handle 0x%x)", handle);
449 ISP_IUNLOCK(isp);
450 return;
451 }
452
453 if (XS_CMD_WDOG_P(xs)) {
454 isp_prt(isp, ISP_LOGDEBUG1,
455 "recursive watchdog (handle 0x%x)", handle);
456 ISP_IUNLOCK(isp);
457 return;
458 }
459
460 XS_CMD_S_WDOG(xs);
461
462 i = 0;
463 do {
464 r = ISP_READ(isp, BIU_ISR);
465 USEC_DELAY(1);
466 r1 = ISP_READ(isp, BIU_ISR);
467 } while (r != r1 && ++i < 1000);
468
469 if (INT_PENDING(isp, r) && isp_intr(isp) && XS_CMD_DONE_P(xs)) {
470 isp_prt(isp, ISP_LOGDEBUG1, "watchdog cleanup (%x, %x)",
471 handle, r);
472 XS_CMD_C_WDOG(xs);
473 isp_done(xs);
474 } else if (XS_CMD_GRACE_P(xs)) {
475 isp_prt(isp, ISP_LOGDEBUG1, "watchdog timeout (%x, %x)",
476 handle, r);
477 /*
478 * Make sure the command is *really* dead before we
479 * release the handle (and DMA resources) for reuse.
480 */
481 (void) isp_control(isp, ISPCTL_ABORT_CMD, arg);
482
483 /*
484 * After this point, the comamnd is really dead.
485 */
486 if (XS_XFRLEN(xs)) {
487 ISP_DMAFREE(isp, xs, handle);
488 }
489 isp_destroy_handle(isp, handle);
490 XS_SETERR(xs, XS_TIMEOUT);
491 XS_CMD_S_CLEAR(xs);
492 isp_done(xs);
493 } else {
494 u_int16_t iptr, optr;
495 ispreq_t *mp;
496 isp_prt(isp, ISP_LOGDEBUG2,
497 "possible command timeout (%x, %x)", handle, r);
498 XS_CMD_C_WDOG(xs);
499 callout_reset(&xs->xs_callout, hz, isp_dog, xs);
500 if (isp_getrqentry(isp, &iptr, &optr, (void **) &mp)) {
501 ISP_UNLOCK(isp);
502 return;
503 }
504 XS_CMD_S_GRACE(xs);
505 MEMZERO((void *) mp, sizeof (*mp));
506 mp->req_header.rqs_entry_count = 1;
507 mp->req_header.rqs_entry_type = RQSTYPE_MARKER;
508 mp->req_modifier = SYNC_ALL;
509 mp->req_target = XS_CHANNEL(xs) << 7;
510 ISP_SWIZZLE_REQUEST(isp, mp);
511 ISP_ADD_REQUEST(isp, iptr);
512 }
513 } else {
514 isp_prt(isp, ISP_LOGDEBUG0, "watchdog with no command");
515 }
516 ISP_IUNLOCK(isp);
517 }
518
519 /*
520 * Free any associated resources prior to decommissioning and
521 * set the card to a known state (so it doesn't wake up and kick
522 * us when we aren't expecting it to).
523 *
524 * Locks are held before coming here.
525 */
526 void
527 isp_uninit(isp)
528 struct ispsoftc *isp;
529 {
530 isp_lock(isp);
531 /*
532 * Leave with interrupts disabled.
533 */
534 DISABLE_INTS(isp);
535 isp_unlock(isp);
536 }
537
538 /*
539 * Restart function for a command to be requeued later.
540 */
541 static void
542 isp_command_requeue(arg)
543 void *arg;
544 {
545 struct scsipi_xfer *xs = arg;
546 struct ispsoftc *isp = XS_ISP(xs);
547 ISP_LOCK(isp);
548 switch (ispcmd(xs)) {
549 case SUCCESSFULLY_QUEUED:
550 isp_prt(isp, ISP_LOGINFO,
551 "requeued commands for %d.%d", XS_TGT(xs), XS_LUN(xs));
552 if (xs->timeout) {
553 callout_reset(&xs->xs_callout, _XT(xs), isp_dog, xs);
554 }
555 break;
556 case TRY_AGAIN_LATER:
557 isp_prt(isp, ISP_LOGINFO,
558 "EAGAIN on requeue for %d.%d", XS_TGT(xs), XS_LUN(xs));
559 callout_reset(&xs->xs_callout, hz, isp_command_requeue, xs);
560 break;
561 case COMPLETE:
562 /* can only be an error */
563 XS_CMD_S_DONE(xs);
564 callout_stop(&xs->xs_callout);
565 if (XS_NOERR(xs)) {
566 XS_SETERR(xs, HBA_BOTCH);
567 }
568 scsipi_done(xs);
569 break;
570 }
571 ISP_UNLOCK(isp);
572 }
573
574 /*
575 * Restart function after a LOOP UP event (e.g.),
576 * done as a timeout for some hysteresis.
577 */
578 static void
579 isp_internal_restart(arg)
580 void *arg;
581 {
582 struct ispsoftc *isp = arg;
583 int result, nrestarted = 0;
584
585 ISP_LOCK(isp);
586 if (isp->isp_osinfo.blocked == 0) {
587 struct scsipi_xfer *xs;
588 while ((xs = TAILQ_FIRST(&isp->isp_osinfo.waitq)) != NULL) {
589 TAILQ_REMOVE(&isp->isp_osinfo.waitq, xs, adapter_q);
590 result = isp_start(xs);
591 if (result != CMD_QUEUED) {
592 isp_prt(isp, ISP_LOGERR,
593 "botched command restart (err=%d)", result);
594 XS_CMD_S_DONE(xs);
595 if (xs->error == XS_NOERROR)
596 xs->error = XS_DRIVER_STUFFUP;
597 callout_stop(&xs->xs_callout);
598 scsipi_done(xs);
599 } else if (xs->timeout) {
600 callout_reset(&xs->xs_callout,
601 _XT(xs), isp_dog, xs);
602 }
603 nrestarted++;
604 }
605 isp_prt(isp, ISP_LOGINFO,
606 "isp_restart requeued %d commands", nrestarted);
607 }
608 ISP_UNLOCK(isp);
609 }
610
611 int
612 isp_async(isp, cmd, arg)
613 struct ispsoftc *isp;
614 ispasync_t cmd;
615 void *arg;
616 {
617 int bus, tgt;
618 int s = splbio();
619 switch (cmd) {
620 case ISPASYNC_NEW_TGT_PARAMS:
621 if (IS_SCSI(isp) && isp->isp_dblev) {
622 sdparam *sdp = isp->isp_param;
623 char *wt;
624 int mhz, flags, period;
625
626 tgt = *((int *) arg);
627 bus = (tgt >> 16) & 0xffff;
628 tgt &= 0xffff;
629 sdp += bus;
630 flags = sdp->isp_devparam[tgt].cur_dflags;
631 period = sdp->isp_devparam[tgt].cur_period;
632
633 if ((flags & DPARM_SYNC) && period &&
634 (sdp->isp_devparam[tgt].cur_offset) != 0) {
635 /*
636 * There's some ambiguity about our negotiated speed
637 * if we haven't detected LVD mode correctly (which
638 * seems to happen, unfortunately). If we're in LVD
639 * mode, then different rules apply about speed.
640 */
641 if (sdp->isp_lvdmode || period < 0xc) {
642 switch (period) {
643 case 0x9:
644 mhz = 80;
645 break;
646 case 0xa:
647 mhz = 40;
648 break;
649 case 0xb:
650 mhz = 33;
651 break;
652 case 0xc:
653 mhz = 25;
654 break;
655 default:
656 mhz = 1000 / (period * 4);
657 break;
658 }
659 } else {
660 mhz = 1000 / (period * 4);
661 }
662 } else {
663 mhz = 0;
664 }
665 switch (flags & (DPARM_WIDE|DPARM_TQING)) {
666 case DPARM_WIDE:
667 wt = ", 16 bit wide";
668 break;
669 case DPARM_TQING:
670 wt = ", Tagged Queueing Enabled";
671 break;
672 case DPARM_WIDE|DPARM_TQING:
673 wt = ", 16 bit wide, Tagged Queueing Enabled";
674 break;
675 default:
676 wt = " ";
677 break;
678 }
679 if (mhz) {
680 isp_prt(isp, ISP_LOGINFO,
681 "Bus %d Target %d at %dMHz Max Offset %d%s",
682 bus, tgt, mhz, sdp->isp_devparam[tgt].cur_offset,
683 wt);
684 } else {
685 isp_prt(isp, ISP_LOGINFO,
686 "Bus %d Target %d Async Mode%s", bus, tgt, wt);
687 }
688 break;
689 }
690 case ISPASYNC_BUS_RESET:
691 if (arg)
692 bus = *((int *) arg);
693 else
694 bus = 0;
695 isp_prt(isp, ISP_LOGINFO, "SCSI bus %d reset detected", bus);
696 break;
697 case ISPASYNC_LOOP_DOWN:
698 /*
699 * Hopefully we get here in time to minimize the number
700 * of commands we are firing off that are sure to die.
701 */
702 isp->isp_osinfo.blocked = 1;
703 isp_prt(isp, ISP_LOGINFO, "Loop DOWN");
704 break;
705 case ISPASYNC_LOOP_UP:
706 isp->isp_osinfo.blocked = 0;
707 callout_reset(&isp->isp_osinfo._restart, 1,
708 isp_internal_restart, isp);
709 isp_prt(isp, ISP_LOGINFO, "Loop UP");
710 break;
711 case ISPASYNC_PDB_CHANGED:
712 if (IS_FC(isp) && isp->isp_dblev) {
713 const char *fmt = "Target %d (Loop 0x%x) Port ID 0x%x "
714 "role %s %s\n Port WWN 0x%08x%08x\n Node WWN 0x%08x%08x";
715 const static char *roles[4] = {
716 "No", "Target", "Initiator", "Target/Initiator"
717 };
718 char *ptr;
719 fcparam *fcp = isp->isp_param;
720 int tgt = *((int *) arg);
721 struct lportdb *lp = &fcp->portdb[tgt];
722
723 if (lp->valid) {
724 ptr = "arrived";
725 } else {
726 ptr = "disappeared";
727 }
728 isp_prt(isp, ISP_LOGINFO, fmt, tgt, lp->loopid, lp->portid,
729 roles[lp->roles & 0x3], ptr,
730 (u_int32_t) (lp->port_wwn >> 32),
731 (u_int32_t) (lp->port_wwn & 0xffffffffLL),
732 (u_int32_t) (lp->node_wwn >> 32),
733 (u_int32_t) (lp->node_wwn & 0xffffffffLL));
734 break;
735 }
736 #ifdef ISP2100_FABRIC
737 case ISPASYNC_CHANGE_NOTIFY:
738 isp_prt(isp, ISP_LOGINFO, "Name Server Database Changed");
739 break;
740 case ISPASYNC_FABRIC_DEV:
741 {
742 int target;
743 struct lportdb *lp;
744 sns_scrsp_t *resp = (sns_scrsp_t *) arg;
745 u_int32_t portid;
746 u_int64_t wwn;
747 fcparam *fcp = isp->isp_param;
748
749 portid =
750 (((u_int32_t) resp->snscb_port_id[0]) << 16) |
751 (((u_int32_t) resp->snscb_port_id[1]) << 8) |
752 (((u_int32_t) resp->snscb_port_id[2]));
753 wwn =
754 (((u_int64_t)resp->snscb_portname[0]) << 56) |
755 (((u_int64_t)resp->snscb_portname[1]) << 48) |
756 (((u_int64_t)resp->snscb_portname[2]) << 40) |
757 (((u_int64_t)resp->snscb_portname[3]) << 32) |
758 (((u_int64_t)resp->snscb_portname[4]) << 24) |
759 (((u_int64_t)resp->snscb_portname[5]) << 16) |
760 (((u_int64_t)resp->snscb_portname[6]) << 8) |
761 (((u_int64_t)resp->snscb_portname[7]));
762
763 isp_prt(isp, ISP_LOGINFO,
764 "Fabric Device (Type 0x%x)@PortID 0x%x WWN 0x%08x%08x",
765 resp->snscb_port_type, portid, ((u_int32_t)(wwn >> 32)),
766 ((u_int32_t)(wwn & 0xffffffff)));
767
768 for (target = FC_SNS_ID+1; target < MAX_FC_TARG; target++) {
769 lp = &fcp->portdb[target];
770 if (lp->port_wwn == wwn)
771 break;
772 }
773 if (target < MAX_FC_TARG) {
774 break;
775 }
776 for (target = FC_SNS_ID+1; target < MAX_FC_TARG; target++) {
777 lp = &fcp->portdb[target];
778 if (lp->port_wwn == 0)
779 break;
780 }
781 if (target == MAX_FC_TARG) {
782 isp_prt(isp, ISP_LOGWARN,
783 "no more space for fabric devices");
784 return (-1);
785 }
786 lp->port_wwn = lp->node_wwn = wwn;
787 lp->portid = portid;
788 break;
789 }
790 #endif
791 default:
792 break;
793 }
794 (void) splx(s);
795 return (0);
796 }
797
798 #include <machine/stdarg.h>
799 void
800 #ifdef __STDC__
801 isp_prt(struct ispsoftc *isp, int level, const char *fmt, ...)
802 #else
803 isp_prt(isp, fmt, va_alist)
804 struct ispsoftc *isp;
805 char *fmt;
806 va_dcl;
807 #endif
808 {
809 va_list ap;
810 if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) {
811 return;
812 }
813 printf("%s: ", isp->isp_name);
814 va_start(ap, fmt);
815 vprintf(fmt, ap);
816 va_end(ap);
817 printf("\n");
818 }
819