isp_netbsd.c revision 1.40 1 /* $NetBSD: isp_netbsd.c,v 1.40 2001/03/14 05:44:21 mjacob Exp $ */
2 /*
3 * This driver, which is contained in NetBSD in the files:
4 *
5 * sys/dev/ic/isp.c
6 * sys/dev/ic/isp_inline.h
7 * sys/dev/ic/isp_netbsd.c
8 * sys/dev/ic/isp_netbsd.h
9 * sys/dev/ic/isp_target.c
10 * sys/dev/ic/isp_target.h
11 * sys/dev/ic/isp_tpublic.h
12 * sys/dev/ic/ispmbox.h
13 * sys/dev/ic/ispreg.h
14 * sys/dev/ic/ispvar.h
15 * sys/microcode/isp/asm_sbus.h
16 * sys/microcode/isp/asm_1040.h
17 * sys/microcode/isp/asm_1080.h
18 * sys/microcode/isp/asm_12160.h
19 * sys/microcode/isp/asm_2100.h
20 * sys/microcode/isp/asm_2200.h
21 * sys/pci/isp_pci.c
22 * sys/sbus/isp_sbus.c
23 *
24 * Is being actively maintained by Matthew Jacob (mjacob (at) netbsd.org).
25 * This driver also is shared source with FreeBSD, OpenBSD, Linux, Solaris,
26 * Linux versions. This tends to be an interesting maintenance problem.
27 *
28 * Please coordinate with Matthew Jacob on changes you wish to make here.
29 */
30 /*
31 * Platform (NetBSD) dependent common attachment code for Qlogic adapters.
32 * Matthew Jacob <mjacob (at) nas.nasa.gov>
33 */
34 /*
35 * Copyright (C) 1997, 1998, 1999 National Aeronautics & Space Administration
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. The name of the author may not be used to endorse or promote products
47 * derived from this software without specific prior written permission
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
50 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
51 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
52 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
53 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
54 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
55 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
56 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
57 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
58 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
59 */
60
61 #include <dev/ic/isp_netbsd.h>
62 #include <sys/scsiio.h>
63
64
65 /*
66 * Set a timeout for the watchdogging of a command.
67 *
68 * The dimensional analysis is
69 *
70 * milliseconds * (seconds/millisecond) * (ticks/second) = ticks
71 *
72 * =
73 *
74 * (milliseconds / 1000) * hz = ticks
75 *
76 *
77 * For timeouts less than 1 second, we'll get zero. Because of this, and
78 * because we want to establish *our* timeout to be longer than what the
79 * firmware might do, we just add 3 seconds at the back end.
80 */
81 #define _XT(xs) ((((xs)->timeout/1000) * hz) + (3 * hz))
82
83 static void ispminphys(struct buf *);
84 static int32_t ispcmd(XS_T *);
85 static int ispioctl (struct scsipi_link *, u_long, caddr_t, int, struct proc *);
86
87 static struct scsipi_device isp_dev = { NULL, NULL, NULL, NULL };
88 static int isp_polled_cmd(struct ispsoftc *, XS_T *);
89 static void isp_dog(void *);
90 static void isp_command_requeue(void *);
91 static void isp_internal_restart(void *);
92
93 /*
94 * Complete attachment of hardware, include subdevices.
95 */
96 void
97 isp_attach(struct ispsoftc *isp)
98 {
99 isp->isp_osinfo._adapter.scsipi_minphys = ispminphys;
100 isp->isp_osinfo._adapter.scsipi_ioctl = ispioctl;
101 isp->isp_osinfo._adapter.scsipi_cmd = ispcmd;
102
103 isp->isp_state = ISP_RUNSTATE;
104 isp->isp_osinfo._link.scsipi_scsi.channel =
105 (IS_DUALBUS(isp))? 0 : SCSI_CHANNEL_ONLY_ONE;
106 isp->isp_osinfo._link.adapter_softc = isp;
107 isp->isp_osinfo._link.device = &isp_dev;
108 isp->isp_osinfo._link.adapter = &isp->isp_osinfo._adapter;
109 isp->isp_osinfo._link.openings = isp->isp_maxcmds;
110 /*
111 * Until the midlayer is fixed to use REPORT LUNS, limit to 8 luns.
112 */
113 isp->isp_osinfo._link.scsipi_scsi.max_lun =
114 (isp->isp_maxluns < 7)? isp->isp_maxluns - 1 : 7;
115 TAILQ_INIT(&isp->isp_osinfo.waitq); /* The 2nd bus will share.. */
116
117 if (IS_FC(isp)) {
118 isp->isp_osinfo._link.scsipi_scsi.max_target = MAX_FC_TARG-1;
119 } else {
120 sdparam *sdp = isp->isp_param;
121 isp->isp_osinfo._link.scsipi_scsi.max_target = MAX_TARGETS-1;
122 isp->isp_osinfo._link.scsipi_scsi.adapter_target =
123 sdp->isp_initiator_id;
124 isp->isp_osinfo.discovered[0] = 1 << sdp->isp_initiator_id;
125 if (IS_DUALBUS(isp)) {
126 isp->isp_osinfo._link_b = isp->isp_osinfo._link;
127 sdp++;
128 isp->isp_osinfo.discovered[1] =
129 1 << sdp->isp_initiator_id;
130 isp->isp_osinfo._link_b.scsipi_scsi.adapter_target =
131 sdp->isp_initiator_id;
132 isp->isp_osinfo._link_b.scsipi_scsi.channel = 1;
133 isp->isp_osinfo._link_b.scsipi_scsi.max_lun =
134 isp->isp_osinfo._link.scsipi_scsi.max_lun;
135 }
136 }
137 isp->isp_osinfo._link.type = BUS_SCSI;
138
139 /*
140 * Send a SCSI Bus Reset.
141 */
142 if (IS_SCSI(isp)) {
143 int bus = 0;
144 ISP_LOCK(isp);
145 (void) isp_control(isp, ISPCTL_RESET_BUS, &bus);
146 if (IS_DUALBUS(isp)) {
147 bus++;
148 (void) isp_control(isp, ISPCTL_RESET_BUS, &bus);
149 }
150 ISP_UNLOCK(isp);
151 } else {
152 int defid;
153 fcparam *fcp = isp->isp_param;
154 delay(2 * 1000000);
155 defid = MAX_FC_TARG;
156 ISP_LOCK(isp);
157 /*
158 * We probably won't have clock interrupts running,
159 * so we'll be really short (smoke test, really)
160 * at this time.
161 */
162 if (isp_control(isp, ISPCTL_FCLINK_TEST, NULL)) {
163 (void) isp_control(isp, ISPCTL_PDB_SYNC, NULL);
164 if (fcp->isp_fwstate == FW_READY &&
165 fcp->isp_loopstate >= LOOP_PDB_RCVD) {
166 defid = fcp->isp_loopid;
167 }
168 }
169 ISP_UNLOCK(isp);
170 isp->isp_osinfo._link.scsipi_scsi.adapter_target = defid;
171 }
172
173 /*
174 * After this point, we'll be doing the new configuration
175 * schema which allows interrups, so we can do tsleep/wakeup
176 * for mailbox stuff at that point.
177 */
178 isp->isp_osinfo.no_mbox_ints = 0;
179
180 /*
181 * And attach children (if any).
182 */
183 config_found((void *)isp, &isp->isp_osinfo._link, scsiprint);
184 if (IS_DUALBUS(isp)) {
185 config_found((void *)isp, &isp->isp_osinfo._link_b, scsiprint);
186 }
187 }
188
189 /*
190 * minphys our xfers
191 *
192 * Unfortunately, the buffer pointer describes the target device- not the
193 * adapter device, so we can't use the pointer to find out what kind of
194 * adapter we are and adjust accordingly.
195 */
196
197 static void
198 ispminphys(struct buf *bp)
199 {
200 /*
201 * XX: Only the 1020 has a 24 bit limit.
202 */
203 if (bp->b_bcount >= (1 << 24)) {
204 bp->b_bcount = (1 << 24);
205 }
206 minphys(bp);
207 }
208
209 static int
210 ispioctl(struct scsipi_link *sc_link, u_long cmd, caddr_t addr,
211 int flag, struct proc *p)
212 {
213 struct ispsoftc *isp = sc_link->adapter_softc;
214 int s, chan, retval = ENOTTY;
215
216 chan = (sc_link->scsipi_scsi.channel == SCSI_CHANNEL_ONLY_ONE)? 0 :
217 sc_link->scsipi_scsi.channel;
218
219 switch (cmd) {
220 case SCBUSACCEL:
221 {
222 struct scbusaccel_args *sp = (struct scbusaccel_args *)addr;
223 if (IS_SCSI(isp) && sp->sa_lun == 0) {
224 int dflags = 0;
225 sdparam *sdp = SDPARAM(isp);
226
227 sdp += chan;
228 if (sp->sa_flags & SC_ACCEL_TAGS)
229 dflags |= DPARM_TQING;
230 if (sp->sa_flags & SC_ACCEL_WIDE)
231 dflags |= DPARM_WIDE;
232 if (sp->sa_flags & SC_ACCEL_SYNC)
233 dflags |= DPARM_SYNC;
234 s = splbio();
235 sdp->isp_devparam[sp->sa_target].dev_flags |= dflags;
236 dflags = sdp->isp_devparam[sp->sa_target].dev_flags;
237 sdp->isp_devparam[sp->sa_target].dev_update = 1;
238 isp->isp_update |= (1 << chan);
239 splx(s);
240 isp_prt(isp, ISP_LOGDEBUG1,
241 "ispioctl: device flags 0x%x for %d.%d.X",
242 dflags, chan, sp->sa_target);
243 }
244 retval = 0;
245 break;
246 }
247 case SCBUSIORESET:
248 s = splbio();
249 if (isp_control(isp, ISPCTL_RESET_BUS, &chan))
250 retval = EIO;
251 else
252 retval = 0;
253 (void) splx(s);
254 break;
255 default:
256 break;
257 }
258 return (retval);
259 }
260
261
262 static int32_t
263 ispcmd(XS_T *xs)
264 {
265 struct ispsoftc *isp;
266 int result, s;
267
268 isp = XS_ISP(xs);
269 s = splbio();
270 if (isp->isp_state < ISP_RUNSTATE) {
271 DISABLE_INTS(isp);
272 isp_init(isp);
273 if (isp->isp_state != ISP_INITSTATE) {
274 ENABLE_INTS(isp);
275 (void) splx(s);
276 XS_SETERR(xs, HBA_BOTCH);
277 return (COMPLETE);
278 }
279 isp->isp_state = ISP_RUNSTATE;
280 ENABLE_INTS(isp);
281 }
282
283 /*
284 * Check for queue blockage...
285 */
286 if (isp->isp_osinfo.blocked) {
287 if (xs->xs_control & XS_CTL_POLL) {
288 xs->error = XS_DRIVER_STUFFUP;
289 splx(s);
290 return (TRY_AGAIN_LATER);
291 }
292 TAILQ_INSERT_TAIL(&isp->isp_osinfo.waitq, xs, adapter_q);
293 splx(s);
294 return (SUCCESSFULLY_QUEUED);
295 }
296
297 if (xs->xs_control & XS_CTL_POLL) {
298 volatile u_int8_t ombi = isp->isp_osinfo.no_mbox_ints;
299 isp->isp_osinfo.no_mbox_ints = 1;
300 result = isp_polled_cmd(isp, xs);
301 isp->isp_osinfo.no_mbox_ints = ombi;
302 (void) splx(s);
303 return (result);
304 }
305
306 result = isp_start(xs);
307 #if 0
308 {
309 static int na[16] = { 0 };
310 if (na[isp->isp_unit] < isp->isp_nactive) {
311 isp_prt(isp, ISP_LOGALL, "active hiwater %d", isp->isp_nactive);
312 na[isp->isp_unit] = isp->isp_nactive;
313 }
314 }
315 #endif
316 switch (result) {
317 case CMD_QUEUED:
318 result = SUCCESSFULLY_QUEUED;
319 if (xs->timeout) {
320 callout_reset(&xs->xs_callout, _XT(xs), isp_dog, xs);
321 }
322 break;
323 case CMD_EAGAIN:
324 result = TRY_AGAIN_LATER;
325 break;
326 case CMD_RQLATER:
327 result = SUCCESSFULLY_QUEUED;
328 callout_reset(&xs->xs_callout, hz, isp_command_requeue, xs);
329 break;
330 case CMD_COMPLETE:
331 result = COMPLETE;
332 break;
333 }
334 (void) splx(s);
335 return (result);
336 }
337
338 static int
339 isp_polled_cmd(struct ispsoftc *isp, XS_T *xs)
340 {
341 int result;
342 int infinite = 0, mswait;
343
344 result = isp_start(xs);
345
346 switch (result) {
347 case CMD_QUEUED:
348 result = SUCCESSFULLY_QUEUED;
349 break;
350 case CMD_RQLATER:
351 case CMD_EAGAIN:
352 if (XS_NOERR(xs)) {
353 xs->error = XS_DRIVER_STUFFUP;
354 }
355 result = TRY_AGAIN_LATER;
356 break;
357 case CMD_COMPLETE:
358 result = COMPLETE;
359 break;
360
361 }
362
363 if (result != SUCCESSFULLY_QUEUED) {
364 return (result);
365 }
366
367 /*
368 * If we can't use interrupts, poll on completion.
369 */
370 if ((mswait = XS_TIME(xs)) == 0)
371 infinite = 1;
372
373 while (mswait || infinite) {
374 if (isp_intr((void *)isp)) {
375 if (XS_CMD_DONE_P(xs)) {
376 break;
377 }
378 }
379 USEC_DELAY(1000);
380 mswait -= 1;
381 }
382
383 /*
384 * If no other error occurred but we didn't finish,
385 * something bad happened.
386 */
387 if (XS_CMD_DONE_P(xs) == 0) {
388 if (isp_control(isp, ISPCTL_ABORT_CMD, xs)) {
389 isp_reinit(isp);
390 }
391 if (XS_NOERR(xs)) {
392 XS_SETERR(xs, HBA_BOTCH);
393 }
394 }
395 result = COMPLETE;
396 return (result);
397 }
398
399 void
400 isp_done(XS_T *xs)
401 {
402 XS_CMD_S_DONE(xs);
403 if (XS_CMD_WDOG_P(xs) == 0) {
404 struct ispsoftc *isp = XS_ISP(xs);
405 callout_stop(&xs->xs_callout);
406 if (XS_CMD_GRACE_P(xs)) {
407 isp_prt(isp, ISP_LOGDEBUG1,
408 "finished command on borrowed time");
409 }
410 XS_CMD_S_CLEAR(xs);
411 scsipi_done(xs);
412 }
413 }
414
415 static void
416 isp_dog(void *arg)
417 {
418 XS_T *xs = arg;
419 struct ispsoftc *isp = XS_ISP(xs);
420 u_int16_t handle;
421
422 ISP_ILOCK(isp);
423 /*
424 * We've decided this command is dead. Make sure we're not trying
425 * to kill a command that's already dead by getting it's handle and
426 * and seeing whether it's still alive.
427 */
428 handle = isp_find_handle(isp, xs);
429 if (handle) {
430 u_int16_t r, r1, i;
431
432 if (XS_CMD_DONE_P(xs)) {
433 isp_prt(isp, ISP_LOGDEBUG1,
434 "watchdog found done cmd (handle 0x%x)", handle);
435 ISP_IUNLOCK(isp);
436 return;
437 }
438
439 if (XS_CMD_WDOG_P(xs)) {
440 isp_prt(isp, ISP_LOGDEBUG1,
441 "recursive watchdog (handle 0x%x)", handle);
442 ISP_IUNLOCK(isp);
443 return;
444 }
445
446 XS_CMD_S_WDOG(xs);
447
448 i = 0;
449 do {
450 r = ISP_READ(isp, BIU_ISR);
451 USEC_DELAY(1);
452 r1 = ISP_READ(isp, BIU_ISR);
453 } while (r != r1 && ++i < 1000);
454
455 if (INT_PENDING(isp, r) && isp_intr(isp) && XS_CMD_DONE_P(xs)) {
456 isp_prt(isp, ISP_LOGDEBUG1, "watchdog cleanup (%x, %x)",
457 handle, r);
458 XS_CMD_C_WDOG(xs);
459 isp_done(xs);
460 } else if (XS_CMD_GRACE_P(xs)) {
461 isp_prt(isp, ISP_LOGDEBUG1, "watchdog timeout (%x, %x)",
462 handle, r);
463 /*
464 * Make sure the command is *really* dead before we
465 * release the handle (and DMA resources) for reuse.
466 */
467 (void) isp_control(isp, ISPCTL_ABORT_CMD, arg);
468
469 /*
470 * After this point, the comamnd is really dead.
471 */
472 if (XS_XFRLEN(xs)) {
473 ISP_DMAFREE(isp, xs, handle);
474 }
475 isp_destroy_handle(isp, handle);
476 XS_SETERR(xs, XS_TIMEOUT);
477 XS_CMD_S_CLEAR(xs);
478 isp_done(xs);
479 } else {
480 u_int16_t iptr, optr;
481 ispreq_t *mp;
482 isp_prt(isp, ISP_LOGDEBUG2,
483 "possible command timeout (%x, %x)", handle, r);
484 XS_CMD_C_WDOG(xs);
485 callout_reset(&xs->xs_callout, hz, isp_dog, xs);
486 if (isp_getrqentry(isp, &iptr, &optr, (void **) &mp)) {
487 ISP_UNLOCK(isp);
488 return;
489 }
490 XS_CMD_S_GRACE(xs);
491 MEMZERO((void *) mp, sizeof (*mp));
492 mp->req_header.rqs_entry_count = 1;
493 mp->req_header.rqs_entry_type = RQSTYPE_MARKER;
494 mp->req_modifier = SYNC_ALL;
495 mp->req_target = XS_CHANNEL(xs) << 7;
496 ISP_SWIZZLE_REQUEST(isp, mp);
497 ISP_ADD_REQUEST(isp, iptr);
498 }
499 } else {
500 isp_prt(isp, ISP_LOGDEBUG0, "watchdog with no command");
501 }
502 ISP_IUNLOCK(isp);
503 }
504
505 /*
506 * Free any associated resources prior to decommissioning and
507 * set the card to a known state (so it doesn't wake up and kick
508 * us when we aren't expecting it to).
509 *
510 * Locks are held before coming here.
511 */
512 void
513 isp_uninit(struct ispsoftc *isp)
514 {
515 isp_lock(isp);
516 /*
517 * Leave with interrupts disabled.
518 */
519 DISABLE_INTS(isp);
520 isp_unlock(isp);
521 }
522
523 /*
524 * Restart function for a command to be requeued later.
525 */
526 static void
527 isp_command_requeue(void *arg)
528 {
529 struct scsipi_xfer *xs = arg;
530 struct ispsoftc *isp = XS_ISP(xs);
531 ISP_ILOCK(isp);
532 switch (ispcmd(xs)) {
533 case SUCCESSFULLY_QUEUED:
534 isp_prt(isp, ISP_LOGINFO,
535 "requeued commands for %d.%d", XS_TGT(xs), XS_LUN(xs));
536 if (xs->timeout) {
537 callout_reset(&xs->xs_callout, _XT(xs), isp_dog, xs);
538 }
539 break;
540 case TRY_AGAIN_LATER:
541 isp_prt(isp, ISP_LOGINFO,
542 "EAGAIN on requeue for %d.%d", XS_TGT(xs), XS_LUN(xs));
543 callout_reset(&xs->xs_callout, hz, isp_command_requeue, xs);
544 break;
545 case COMPLETE:
546 /* can only be an error */
547 XS_CMD_S_DONE(xs);
548 callout_stop(&xs->xs_callout);
549 if (XS_NOERR(xs)) {
550 XS_SETERR(xs, HBA_BOTCH);
551 }
552 scsipi_done(xs);
553 break;
554 }
555 ISP_IUNLOCK(isp);
556 }
557
558 /*
559 * Restart function after a LOOP UP event (e.g.),
560 * done as a timeout for some hysteresis.
561 */
562 static void
563 isp_internal_restart(void *arg)
564 {
565 struct ispsoftc *isp = arg;
566 int result, nrestarted = 0;
567
568 ISP_ILOCK(isp);
569 if (isp->isp_osinfo.blocked == 0) {
570 struct scsipi_xfer *xs;
571 while ((xs = TAILQ_FIRST(&isp->isp_osinfo.waitq)) != NULL) {
572 TAILQ_REMOVE(&isp->isp_osinfo.waitq, xs, adapter_q);
573 result = isp_start(xs);
574 if (result != CMD_QUEUED) {
575 isp_prt(isp, ISP_LOGERR,
576 "botched command restart (err=%d)", result);
577 XS_CMD_S_DONE(xs);
578 if (xs->error == XS_NOERROR)
579 xs->error = XS_DRIVER_STUFFUP;
580 callout_stop(&xs->xs_callout);
581 scsipi_done(xs);
582 } else if (xs->timeout) {
583 callout_reset(&xs->xs_callout,
584 _XT(xs), isp_dog, xs);
585 }
586 nrestarted++;
587 }
588 isp_prt(isp, ISP_LOGINFO,
589 "isp_restart requeued %d commands", nrestarted);
590 }
591 ISP_IUNLOCK(isp);
592 }
593
594 int
595 isp_async(struct ispsoftc *isp, ispasync_t cmd, void *arg)
596 {
597 int bus, tgt;
598 int s = splbio();
599 switch (cmd) {
600 case ISPASYNC_NEW_TGT_PARAMS:
601 if (IS_SCSI(isp) && isp->isp_dblev) {
602 sdparam *sdp = isp->isp_param;
603 char *wt;
604 int mhz, flags, period;
605
606 tgt = *((int *) arg);
607 bus = (tgt >> 16) & 0xffff;
608 tgt &= 0xffff;
609 sdp += bus;
610 flags = sdp->isp_devparam[tgt].cur_dflags;
611 period = sdp->isp_devparam[tgt].cur_period;
612
613 if ((flags & DPARM_SYNC) && period &&
614 (sdp->isp_devparam[tgt].cur_offset) != 0) {
615 /*
616 * There's some ambiguity about our negotiated speed
617 * if we haven't detected LVD mode correctly (which
618 * seems to happen, unfortunately). If we're in LVD
619 * mode, then different rules apply about speed.
620 */
621 if (sdp->isp_lvdmode || period < 0xc) {
622 switch (period) {
623 case 0x9:
624 mhz = 80;
625 break;
626 case 0xa:
627 mhz = 40;
628 break;
629 case 0xb:
630 mhz = 33;
631 break;
632 case 0xc:
633 mhz = 25;
634 break;
635 default:
636 mhz = 1000 / (period * 4);
637 break;
638 }
639 } else {
640 mhz = 1000 / (period * 4);
641 }
642 } else {
643 mhz = 0;
644 }
645 switch (flags & (DPARM_WIDE|DPARM_TQING)) {
646 case DPARM_WIDE:
647 wt = ", 16 bit wide";
648 break;
649 case DPARM_TQING:
650 wt = ", Tagged Queueing Enabled";
651 break;
652 case DPARM_WIDE|DPARM_TQING:
653 wt = ", 16 bit wide, Tagged Queueing Enabled";
654 break;
655 default:
656 wt = " ";
657 break;
658 }
659 if (mhz) {
660 isp_prt(isp, ISP_LOGINFO,
661 "Bus %d Target %d at %dMHz Max Offset %d%s",
662 bus, tgt, mhz, sdp->isp_devparam[tgt].cur_offset,
663 wt);
664 } else {
665 isp_prt(isp, ISP_LOGINFO,
666 "Bus %d Target %d Async Mode%s", bus, tgt, wt);
667 }
668 break;
669 }
670 case ISPASYNC_BUS_RESET:
671 if (arg)
672 bus = *((int *) arg);
673 else
674 bus = 0;
675 isp_prt(isp, ISP_LOGINFO, "SCSI bus %d reset detected", bus);
676 break;
677 case ISPASYNC_LOOP_DOWN:
678 /*
679 * Hopefully we get here in time to minimize the number
680 * of commands we are firing off that are sure to die.
681 */
682 isp->isp_osinfo.blocked = 1;
683 isp_prt(isp, ISP_LOGINFO, "Loop DOWN");
684 break;
685 case ISPASYNC_LOOP_UP:
686 isp->isp_osinfo.blocked = 0;
687 callout_reset(&isp->isp_osinfo._restart, 1,
688 isp_internal_restart, isp);
689 isp_prt(isp, ISP_LOGINFO, "Loop UP");
690 break;
691 case ISPASYNC_PROMENADE:
692 if (IS_FC(isp) && isp->isp_dblev) {
693 const char fmt[] = "Target %d (Loop 0x%x) Port ID 0x%x "
694 "(role %s) %s\n Port WWN 0x%08x%08x\n Node WWN 0x%08x%08x";
695 const static char *roles[4] = {
696 "No", "Target", "Initiator", "Target/Initiator"
697 };
698 fcparam *fcp = isp->isp_param;
699 int tgt = *((int *) arg);
700 struct lportdb *lp = &fcp->portdb[tgt];
701
702 isp_prt(isp, ISP_LOGINFO, fmt, tgt, lp->loopid, lp->portid,
703 roles[lp->roles & 0x3],
704 (lp->valid)? "Arrived" : "Departed",
705 (u_int32_t) (lp->port_wwn >> 32),
706 (u_int32_t) (lp->port_wwn & 0xffffffffLL),
707 (u_int32_t) (lp->node_wwn >> 32),
708 (u_int32_t) (lp->node_wwn & 0xffffffffLL));
709 break;
710 }
711 case ISPASYNC_CHANGE_NOTIFY:
712 if (arg == (void *) 1) {
713 isp_prt(isp, ISP_LOGINFO,
714 "Name Server Database Changed");
715 } else {
716 isp_prt(isp, ISP_LOGINFO,
717 "Name Server Database Changed");
718 }
719 break;
720 case ISPASYNC_FABRIC_DEV:
721 {
722 int target, lrange;
723 struct lportdb *lp = NULL;
724 char *pt;
725 sns_ganrsp_t *resp = (sns_ganrsp_t *) arg;
726 u_int32_t portid;
727 u_int64_t wwpn, wwnn;
728 fcparam *fcp = isp->isp_param;
729
730 portid =
731 (((u_int32_t) resp->snscb_port_id[0]) << 16) |
732 (((u_int32_t) resp->snscb_port_id[1]) << 8) |
733 (((u_int32_t) resp->snscb_port_id[2]));
734
735 wwpn =
736 (((u_int64_t)resp->snscb_portname[0]) << 56) |
737 (((u_int64_t)resp->snscb_portname[1]) << 48) |
738 (((u_int64_t)resp->snscb_portname[2]) << 40) |
739 (((u_int64_t)resp->snscb_portname[3]) << 32) |
740 (((u_int64_t)resp->snscb_portname[4]) << 24) |
741 (((u_int64_t)resp->snscb_portname[5]) << 16) |
742 (((u_int64_t)resp->snscb_portname[6]) << 8) |
743 (((u_int64_t)resp->snscb_portname[7]));
744
745 wwnn =
746 (((u_int64_t)resp->snscb_nodename[0]) << 56) |
747 (((u_int64_t)resp->snscb_nodename[1]) << 48) |
748 (((u_int64_t)resp->snscb_nodename[2]) << 40) |
749 (((u_int64_t)resp->snscb_nodename[3]) << 32) |
750 (((u_int64_t)resp->snscb_nodename[4]) << 24) |
751 (((u_int64_t)resp->snscb_nodename[5]) << 16) |
752 (((u_int64_t)resp->snscb_nodename[6]) << 8) |
753 (((u_int64_t)resp->snscb_nodename[7]));
754 if (portid == 0 || wwpn == 0) {
755 break;
756 }
757
758 switch (resp->snscb_port_type) {
759 case 1:
760 pt = " N_Port";
761 break;
762 case 2:
763 pt = " NL_Port";
764 break;
765 case 3:
766 pt = "F/NL_Port";
767 break;
768 case 0x7f:
769 pt = " Nx_Port";
770 break;
771 case 0x81:
772 pt = " F_port";
773 break;
774 case 0x82:
775 pt = " FL_Port";
776 break;
777 case 0x84:
778 pt = " E_port";
779 break;
780 default:
781 pt = "?";
782 break;
783 }
784 isp_prt(isp, ISP_LOGINFO,
785 "%s @ 0x%x, Node 0x%08x%08x Port %08x%08x",
786 pt, portid, ((u_int32_t) (wwnn >> 32)), ((u_int32_t) wwnn),
787 ((u_int32_t) (wwpn >> 32)), ((u_int32_t) wwpn));
788 /*
789 * We're only interested in SCSI_FCP types (for now)
790 */
791 if ((resp->snscb_fc4_types[2] & 1) == 0) {
792 break;
793 }
794 if (fcp->isp_topo != TOPO_F_PORT)
795 lrange = FC_SNS_ID+1;
796 else
797 lrange = 0;
798 /*
799 * Is it already in our list?
800 */
801 for (target = lrange; target < MAX_FC_TARG; target++) {
802 if (target >= FL_PORT_ID && target <= FC_SNS_ID) {
803 continue;
804 }
805 lp = &fcp->portdb[target];
806 if (lp->port_wwn == wwpn && lp->node_wwn == wwnn) {
807 lp->fabric_dev = 1;
808 break;
809 }
810 }
811 if (target < MAX_FC_TARG) {
812 break;
813 }
814 for (target = lrange; target < MAX_FC_TARG; target++) {
815 if (target >= FL_PORT_ID && target <= FC_SNS_ID) {
816 continue;
817 }
818 lp = &fcp->portdb[target];
819 if (lp->port_wwn == 0) {
820 break;
821 }
822 }
823 if (target == MAX_FC_TARG) {
824 isp_prt(isp, ISP_LOGWARN,
825 "no more space for fabric devices");
826 break;
827 }
828 lp->node_wwn = wwnn;
829 lp->port_wwn = wwpn;
830 lp->portid = portid;
831 lp->fabric_dev = 1;
832 break;
833 }
834 default:
835 break;
836 }
837 (void) splx(s);
838 return (0);
839 }
840
841 #include <machine/stdarg.h>
842 void
843 isp_prt(struct ispsoftc *isp, int level, const char *fmt, ...)
844 {
845 va_list ap;
846 if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) {
847 return;
848 }
849 printf("%s: ", isp->isp_name);
850 va_start(ap, fmt);
851 vprintf(fmt, ap);
852 va_end(ap);
853 printf("\n");
854 }
855