isp_netbsd.c revision 1.18.2.13 1 /* $NetBSD: isp_netbsd.c,v 1.18.2.13 2001/03/27 15:31:58 bouyer Exp $ */
2 /*
3 * This driver, which is contained in NetBSD in the files:
4 *
5 * sys/dev/ic/isp.c
6 * sys/dev/ic/isp_inline.h
7 * sys/dev/ic/isp_netbsd.c
8 * sys/dev/ic/isp_netbsd.h
9 * sys/dev/ic/isp_target.c
10 * sys/dev/ic/isp_target.h
11 * sys/dev/ic/isp_tpublic.h
12 * sys/dev/ic/ispmbox.h
13 * sys/dev/ic/ispreg.h
14 * sys/dev/ic/ispvar.h
15 * sys/microcode/isp/asm_sbus.h
16 * sys/microcode/isp/asm_1040.h
17 * sys/microcode/isp/asm_1080.h
18 * sys/microcode/isp/asm_12160.h
19 * sys/microcode/isp/asm_2100.h
20 * sys/microcode/isp/asm_2200.h
21 * sys/pci/isp_pci.c
22 * sys/sbus/isp_sbus.c
23 *
24 * Is being actively maintained by Matthew Jacob (mjacob (at) netbsd.org).
25 * This driver also is shared source with FreeBSD, OpenBSD, Linux, Solaris,
26 * Linux versions. This tends to be an interesting maintenance problem.
27 *
28 * Please coordinate with Matthew Jacob on changes you wish to make here.
29 */
30 /*
31 * Platform (NetBSD) dependent common attachment code for Qlogic adapters.
32 * Matthew Jacob <mjacob (at) nas.nasa.gov>
33 */
34 /*
35 * Copyright (C) 1997, 1998, 1999 National Aeronautics & Space Administration
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. The name of the author may not be used to endorse or promote products
47 * derived from this software without specific prior written permission
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
50 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
51 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
52 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
53 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
54 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
55 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
56 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
57 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
58 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
59 */
60
61 #include <dev/ic/isp_netbsd.h>
62 #include <sys/scsiio.h>
63
64
65 /*
66 * Set a timeout for the watchdogging of a command.
67 *
68 * The dimensional analysis is
69 *
70 * milliseconds * (seconds/millisecond) * (ticks/second) = ticks
71 *
72 * =
73 *
74 * (milliseconds / 1000) * hz = ticks
75 *
76 *
77 * For timeouts less than 1 second, we'll get zero. Because of this, and
78 * because we want to establish *our* timeout to be longer than what the
79 * firmware might do, we just add 3 seconds at the back end.
80 */
81 #define _XT(xs) ((((xs)->timeout/1000) * hz) + (3 * hz))
82
83 static void ispminphys(struct buf *);
84 static void isprequest (struct scsipi_channel *,
85 scsipi_adapter_req_t, void *);
86 static int
87 ispioctl(struct scsipi_channel *, u_long, caddr_t, int, struct proc *);
88
89 static void isp_polled_cmd(struct ispsoftc *, XS_T *);
90 static void isp_dog(void *);
91
92 /*
93 * Complete attachment of hardware, include subdevices.
94 */
95 void
96 isp_attach(struct ispsoftc *isp)
97 {
98 isp->isp_state = ISP_RUNSTATE;
99
100 isp->isp_osinfo._adapter.adapt_dev = &isp->isp_osinfo._dev;
101 isp->isp_osinfo._adapter.adapt_nchannels = IS_DUALBUS(isp) ? 2 : 1;
102 isp->isp_osinfo._adapter.adapt_openings = isp->isp_maxcmds; /* XXX per adapter or per channel ? */
103 isp->isp_osinfo._adapter.adapt_max_periph = isp->isp_maxcmds;
104 isp->isp_osinfo._adapter.adapt_ioctl = ispioctl;
105 isp->isp_osinfo._adapter.adapt_request = isprequest;
106 isp->isp_osinfo._adapter.adapt_minphys = ispminphys;
107
108 isp->isp_osinfo._chan.chan_adapter = &isp->isp_osinfo._adapter;
109 isp->isp_osinfo._chan.chan_bustype = &scsi_bustype;
110 isp->isp_osinfo._chan.chan_channel = 0;
111 /*
112 * Until the midlayer is fixed to use REPORT LUNS, limit to 8 luns.
113 */
114 isp->isp_osinfo._chan.chan_nluns =
115 (isp->isp_maxluns < 7)? isp->isp_maxluns : 8;
116
117 TAILQ_INIT(&isp->isp_osinfo.waitq); /* The 2nd bus will share.. */
118
119 if (IS_FC(isp)) {
120 isp->isp_osinfo._chan.chan_ntargets = MAX_FC_TARG;
121 } else {
122 sdparam *sdp = isp->isp_param;
123 isp->isp_osinfo._chan.chan_ntargets = MAX_TARGETS;
124 isp->isp_osinfo._chan.chan_id = sdp->isp_initiator_id;
125 isp->isp_osinfo.discovered[0] = 1 << sdp->isp_initiator_id;
126 if (IS_DUALBUS(isp)) {
127 isp->isp_osinfo._chan_b = isp->isp_osinfo._chan;
128 sdp++;
129 isp->isp_osinfo.discovered[1] =
130 1 << sdp->isp_initiator_id;
131 isp->isp_osinfo._chan_b.chan_id = sdp->isp_initiator_id;
132 isp->isp_osinfo._chan_b.chan_channel = 1;
133 }
134 }
135
136 /*
137 * Send a SCSI Bus Reset.
138 */
139 if (IS_SCSI(isp)) {
140 int bus = 0;
141 ISP_LOCK(isp);
142 (void) isp_control(isp, ISPCTL_RESET_BUS, &bus);
143 if (IS_DUALBUS(isp)) {
144 bus++;
145 (void) isp_control(isp, ISPCTL_RESET_BUS, &bus);
146 }
147 ISP_UNLOCK(isp);
148 } else {
149 int defid;
150 fcparam *fcp = isp->isp_param;
151 delay(2 * 1000000);
152 defid = MAX_FC_TARG;
153 ISP_LOCK(isp);
154 /*
155 * We probably won't have clock interrupts running,
156 * so we'll be really short (smoke test, really)
157 * at this time.
158 */
159 if (isp_control(isp, ISPCTL_FCLINK_TEST, NULL)) {
160 (void) isp_control(isp, ISPCTL_PDB_SYNC, NULL);
161 if (fcp->isp_fwstate == FW_READY &&
162 fcp->isp_loopstate >= LOOP_PDB_RCVD) {
163 defid = fcp->isp_loopid;
164 }
165 }
166 ISP_UNLOCK(isp);
167 isp->isp_osinfo._chan.chan_id = defid;
168 }
169
170 /*
171 * After this point, we'll be doing the new configuration
172 * schema which allows interrups, so we can do tsleep/wakeup
173 * for mailbox stuff at that point.
174 */
175 isp->isp_osinfo.no_mbox_ints = 0;
176
177 /*
178 * And attach children (if any).
179 */
180 config_found((void *)isp, &isp->isp_osinfo._chan, scsiprint);
181 if (IS_DUALBUS(isp)) {
182 config_found((void *)isp, &isp->isp_osinfo._chan_b, scsiprint);
183 }
184 }
185
186 /*
187 * minphys our xfers
188 *
189 * Unfortunately, the buffer pointer describes the target device- not the
190 * adapter device, so we can't use the pointer to find out what kind of
191 * adapter we are and adjust accordingly.
192 */
193
194 static void
195 ispminphys(struct buf *bp)
196 {
197 /*
198 * XX: Only the 1020 has a 24 bit limit.
199 */
200 if (bp->b_bcount >= (1 << 24)) {
201 bp->b_bcount = (1 << 24);
202 }
203 minphys(bp);
204 }
205
206 static int
207 ispioctl(struct scsipi_channel *chan, u_long cmd, caddr_t addr, int flag,
208 struct proc *p)
209 {
210 struct ispsoftc *isp = (void *)chan->chan_adapter->adapt_dev;
211 int s, retval = ENOTTY;
212
213 switch (cmd) {
214 case SCBUSIORESET:
215 s = splbio();
216 if (isp_control(isp, ISPCTL_RESET_BUS, &chan->chan_channel))
217 retval = EIO;
218 else
219 retval = 0;
220 (void) splx(s);
221 break;
222 default:
223 break;
224 }
225 return (retval);
226 }
227
228
229 static void
230 isprequest(struct scsipi_channel *chan, scsipi_adapter_req_t req, void *arg)
231 {
232 struct scsipi_periph *periph;
233 struct ispsoftc *isp = (void *)chan->chan_adapter->adapt_dev;
234 XS_T *xs;
235 int s, result;
236
237 switch (req) {
238 case ADAPTER_REQ_RUN_XFER:
239 xs = arg;
240 periph = xs->xs_periph;
241 s = splbio();
242 if (isp->isp_state < ISP_RUNSTATE) {
243 DISABLE_INTS(isp);
244 isp_init(isp);
245 if (isp->isp_state != ISP_INITSTATE) {
246 ENABLE_INTS(isp);
247 (void) splx(s);
248 XS_SETERR(xs, HBA_BOTCH);
249 scsipi_done(xs);
250 return;
251 }
252 isp->isp_state = ISP_RUNSTATE;
253 ENABLE_INTS(isp);
254 }
255
256 if (xs->xs_control & XS_CTL_POLL) {
257 volatile u_int8_t ombi = isp->isp_osinfo.no_mbox_ints;
258 isp->isp_osinfo.no_mbox_ints = 1;
259 isp_polled_cmd(isp, xs);
260 isp->isp_osinfo.no_mbox_ints = ombi;
261 (void) splx(s);
262 return;
263 }
264
265 result = isp_start(xs);
266 #if 0
267 {
268 static int na[16] = { 0 };
269 if (na[isp->isp_unit] < isp->isp_nactive) {
270 isp_prt(isp, ISP_LOGALL, "active hiwater %d", isp->isp_nactive);
271 na[isp->isp_unit] = isp->isp_nactive;
272 }
273 }
274 #endif
275 switch (result) {
276 case CMD_QUEUED:
277 if (xs->timeout) {
278 callout_reset(&xs->xs_callout, _XT(xs), isp_dog, xs);
279 }
280 break;
281 case CMD_EAGAIN:
282 xs->error = XS_REQUEUE;
283 scsipi_done(xs);
284 break;
285 case CMD_RQLATER:
286 xs->error = XS_RESOURCE_SHORTAGE;
287 scsipi_done(xs);
288 break;
289 case CMD_COMPLETE:
290 scsipi_done(xs);
291 break;
292 }
293 (void) splx(s);
294 return;
295
296 case ADAPTER_REQ_GROW_RESOURCES:
297 /* XXX Not supported. */
298 return;
299
300 case ADAPTER_REQ_SET_XFER_MODE:
301 {
302 struct scsipi_xfer_mode *xm = arg;
303 if (IS_SCSI(isp)) {
304 int dflags = 0;
305 sdparam *sdp = SDPARAM(isp);
306
307 sdp += chan->chan_channel;
308 if (xm->xm_mode & PERIPH_CAP_TQING)
309 dflags |= DPARM_TQING;
310 if (xm->xm_mode & PERIPH_CAP_WIDE16)
311 dflags |= DPARM_WIDE;
312 if (xm->xm_mode & PERIPH_CAP_SYNC)
313 dflags |= DPARM_SYNC;
314 s = splbio();
315 sdp->isp_devparam[xm->xm_target].dev_flags |= dflags;
316 dflags = sdp->isp_devparam[xm->xm_target].dev_flags;
317 sdp->isp_devparam[xm->xm_target].dev_update = 1;
318 isp->isp_update |= (1 << chan->chan_channel);
319 splx(s);
320 isp_prt(isp, ISP_LOGDEBUG1,
321 "ispioctl: device flags 0x%x for %d.%d.X",
322 dflags, chan->chan_channel, xm->xm_target);
323 }
324 }
325 }
326 }
327
328 static void
329 isp_polled_cmd( struct ispsoftc *isp, XS_T *xs)
330 {
331 int result;
332 int infinite = 0, mswait;
333
334 result = isp_start(xs);
335
336 switch (result) {
337 case CMD_QUEUED:
338 break;
339 case CMD_RQLATER:
340 case CMD_EAGAIN:
341 if (XS_NOERR(xs)) {
342 xs->error = XS_REQUEUE;
343 }
344 /* FALLTHROUGH */
345 case CMD_COMPLETE:
346 scsipi_done(xs);
347 return;
348
349 }
350
351 /*
352 * If we can't use interrupts, poll on completion.
353 */
354 if ((mswait = XS_TIME(xs)) == 0)
355 infinite = 1;
356
357 while (mswait || infinite) {
358 if (isp_intr((void *)isp)) {
359 if (XS_CMD_DONE_P(xs)) {
360 break;
361 }
362 }
363 USEC_DELAY(1000);
364 mswait -= 1;
365 }
366
367 /*
368 * If no other error occurred but we didn't finish,
369 * something bad happened.
370 */
371 if (XS_CMD_DONE_P(xs) == 0) {
372 if (isp_control(isp, ISPCTL_ABORT_CMD, xs)) {
373 isp_reinit(isp);
374 }
375 if (XS_NOERR(xs)) {
376 XS_SETERR(xs, HBA_BOTCH);
377 }
378 }
379 scsipi_done(xs);
380 }
381
382 void
383 isp_done(XS_T *xs)
384 {
385 XS_CMD_S_DONE(xs);
386 if (XS_CMD_WDOG_P(xs) == 0) {
387 struct ispsoftc *isp = XS_ISP(xs);
388 callout_stop(&xs->xs_callout);
389 if (XS_CMD_GRACE_P(xs)) {
390 isp_prt(isp, ISP_LOGDEBUG1,
391 "finished command on borrowed time");
392 }
393 XS_CMD_S_CLEAR(xs);
394 scsipi_done(xs);
395 }
396 }
397
398 static void
399 isp_dog(void *arg)
400 {
401 XS_T *xs = arg;
402 struct ispsoftc *isp = XS_ISP(xs);
403 u_int16_t handle;
404
405 ISP_ILOCK(isp);
406 /*
407 * We've decided this command is dead. Make sure we're not trying
408 * to kill a command that's already dead by getting it's handle and
409 * and seeing whether it's still alive.
410 */
411 handle = isp_find_handle(isp, xs);
412 if (handle) {
413 u_int16_t r, r1, i;
414
415 if (XS_CMD_DONE_P(xs)) {
416 isp_prt(isp, ISP_LOGDEBUG1,
417 "watchdog found done cmd (handle 0x%x)", handle);
418 ISP_IUNLOCK(isp);
419 return;
420 }
421
422 if (XS_CMD_WDOG_P(xs)) {
423 isp_prt(isp, ISP_LOGDEBUG1,
424 "recursive watchdog (handle 0x%x)", handle);
425 ISP_IUNLOCK(isp);
426 return;
427 }
428
429 XS_CMD_S_WDOG(xs);
430
431 i = 0;
432 do {
433 r = ISP_READ(isp, BIU_ISR);
434 USEC_DELAY(1);
435 r1 = ISP_READ(isp, BIU_ISR);
436 } while (r != r1 && ++i < 1000);
437
438 if (INT_PENDING(isp, r) && isp_intr(isp) && XS_CMD_DONE_P(xs)) {
439 isp_prt(isp, ISP_LOGDEBUG1, "watchdog cleanup (%x, %x)",
440 handle, r);
441 XS_CMD_C_WDOG(xs);
442 isp_done(xs);
443 } else if (XS_CMD_GRACE_P(xs)) {
444 isp_prt(isp, ISP_LOGDEBUG1, "watchdog timeout (%x, %x)",
445 handle, r);
446 /*
447 * Make sure the command is *really* dead before we
448 * release the handle (and DMA resources) for reuse.
449 */
450 (void) isp_control(isp, ISPCTL_ABORT_CMD, arg);
451
452 /*
453 * After this point, the comamnd is really dead.
454 */
455 if (XS_XFRLEN(xs)) {
456 ISP_DMAFREE(isp, xs, handle);
457 }
458 isp_destroy_handle(isp, handle);
459 XS_SETERR(xs, XS_TIMEOUT);
460 XS_CMD_S_CLEAR(xs);
461 isp_done(xs);
462 } else {
463 u_int16_t iptr, optr;
464 ispreq_t *mp;
465 isp_prt(isp, ISP_LOGDEBUG2,
466 "possible command timeout (%x, %x)", handle, r);
467 XS_CMD_C_WDOG(xs);
468 callout_reset(&xs->xs_callout, hz, isp_dog, xs);
469 if (isp_getrqentry(isp, &iptr, &optr, (void **) &mp)) {
470 ISP_UNLOCK(isp);
471 return;
472 }
473 XS_CMD_S_GRACE(xs);
474 MEMZERO((void *) mp, sizeof (*mp));
475 mp->req_header.rqs_entry_count = 1;
476 mp->req_header.rqs_entry_type = RQSTYPE_MARKER;
477 mp->req_modifier = SYNC_ALL;
478 mp->req_target = XS_CHANNEL(xs) << 7;
479 ISP_SWIZZLE_REQUEST(isp, mp);
480 ISP_ADD_REQUEST(isp, iptr);
481 }
482 } else {
483 isp_prt(isp, ISP_LOGDEBUG0, "watchdog with no command");
484 }
485 ISP_IUNLOCK(isp);
486 }
487
488 /*
489 * Free any associated resources prior to decommissioning and
490 * set the card to a known state (so it doesn't wake up and kick
491 * us when we aren't expecting it to).
492 *
493 * Locks are held before coming here.
494 */
495 void
496 isp_uninit(struct ispsoftc *isp)
497 {
498 isp_lock(isp);
499 /*
500 * Leave with interrupts disabled.
501 */
502 DISABLE_INTS(isp);
503 isp_unlock(isp);
504 }
505
506 int
507 isp_async(struct ispsoftc *isp, ispasync_t cmd, void *arg)
508 {
509 int bus, tgt;
510 int s = splbio();
511 switch (cmd) {
512 case ISPASYNC_NEW_TGT_PARAMS:
513 if (IS_SCSI(isp) && isp->isp_dblev) {
514 sdparam *sdp = isp->isp_param;
515 int flags;
516 struct scsipi_xfer_mode xm;
517
518 tgt = *((int *) arg);
519 bus = (tgt >> 16) & 0xffff;
520 tgt &= 0xffff;
521 sdp += bus;
522 flags = sdp->isp_devparam[tgt].cur_dflags;
523
524 xm.xm_mode = 0;
525 xm.xm_period = sdp->isp_devparam[tgt].cur_period;
526 xm.xm_offset = sdp->isp_devparam[tgt].cur_offset;
527 xm.xm_target = tgt;
528
529 if ((flags & DPARM_SYNC) && xm.xm_period && xm.xm_offset)
530 xm.xm_mode |= PERIPH_CAP_SYNC;
531 if (flags & DPARM_WIDE)
532 xm.xm_mode |= PERIPH_CAP_WIDE16;
533 if (flags & DPARM_TQING)
534 xm.xm_mode |= PERIPH_CAP_TQING;
535 scsipi_async_event(
536 bus ? (&isp->isp_osinfo._chan_b) : (&isp->isp_osinfo._chan),
537 ASYNC_EVENT_XFER_MODE,
538 &xm);
539 break;
540 }
541 case ISPASYNC_BUS_RESET:
542 if (arg)
543 bus = *((int *) arg);
544 else
545 bus = 0;
546 isp_prt(isp, ISP_LOGINFO, "SCSI bus %d reset detected", bus);
547 break;
548 case ISPASYNC_LOOP_DOWN:
549 /*
550 * Hopefully we get here in time to minimize the number
551 * of commands we are firing off that are sure to die.
552 */
553 scsipi_channel_freeze(&isp->isp_osinfo._chan, 1);
554 if (IS_DUALBUS(isp))
555 scsipi_channel_freeze(&isp->isp_osinfo._chan_b, 1);
556 isp_prt(isp, ISP_LOGINFO, "Loop DOWN");
557 break;
558 case ISPASYNC_LOOP_UP:
559 callout_reset(&isp->isp_osinfo._restart, 1,
560 scsipi_channel_timed_thaw, &isp->isp_osinfo._chan);
561 if (IS_DUALBUS(isp)) {
562 callout_reset(&isp->isp_osinfo._restart, 1,
563 scsipi_channel_timed_thaw,
564 &isp->isp_osinfo._chan_b);
565 }
566 isp_prt(isp, ISP_LOGINFO, "Loop UP");
567 break;
568 case ISPASYNC_PROMENADE:
569 if (IS_FC(isp) && isp->isp_dblev) {
570 const char fmt[] = "Target %d (Loop 0x%x) Port ID 0x%x "
571 "(role %s) %s\n Port WWN 0x%08x%08x\n Node WWN 0x%08x%08x";
572 const static char *roles[4] = {
573 "No", "Target", "Initiator", "Target/Initiator"
574 };
575 fcparam *fcp = isp->isp_param;
576 int tgt = *((int *) arg);
577 struct lportdb *lp = &fcp->portdb[tgt];
578
579 isp_prt(isp, ISP_LOGINFO, fmt, tgt, lp->loopid, lp->portid,
580 roles[lp->roles & 0x3],
581 (lp->valid)? "Arrived" : "Departed",
582 (u_int32_t) (lp->port_wwn >> 32),
583 (u_int32_t) (lp->port_wwn & 0xffffffffLL),
584 (u_int32_t) (lp->node_wwn >> 32),
585 (u_int32_t) (lp->node_wwn & 0xffffffffLL));
586 break;
587 }
588 case ISPASYNC_CHANGE_NOTIFY:
589 if (arg == (void *) 1) {
590 isp_prt(isp, ISP_LOGINFO,
591 "Name Server Database Changed");
592 } else {
593 isp_prt(isp, ISP_LOGINFO,
594 "Name Server Database Changed");
595 }
596 break;
597 case ISPASYNC_FABRIC_DEV:
598 {
599 int target, lrange;
600 struct lportdb *lp = NULL;
601 char *pt;
602 sns_ganrsp_t *resp = (sns_ganrsp_t *) arg;
603 u_int32_t portid;
604 u_int64_t wwpn, wwnn;
605 fcparam *fcp = isp->isp_param;
606
607 portid =
608 (((u_int32_t) resp->snscb_port_id[0]) << 16) |
609 (((u_int32_t) resp->snscb_port_id[1]) << 8) |
610 (((u_int32_t) resp->snscb_port_id[2]));
611
612 wwpn =
613 (((u_int64_t)resp->snscb_portname[0]) << 56) |
614 (((u_int64_t)resp->snscb_portname[1]) << 48) |
615 (((u_int64_t)resp->snscb_portname[2]) << 40) |
616 (((u_int64_t)resp->snscb_portname[3]) << 32) |
617 (((u_int64_t)resp->snscb_portname[4]) << 24) |
618 (((u_int64_t)resp->snscb_portname[5]) << 16) |
619 (((u_int64_t)resp->snscb_portname[6]) << 8) |
620 (((u_int64_t)resp->snscb_portname[7]));
621
622 wwnn =
623 (((u_int64_t)resp->snscb_nodename[0]) << 56) |
624 (((u_int64_t)resp->snscb_nodename[1]) << 48) |
625 (((u_int64_t)resp->snscb_nodename[2]) << 40) |
626 (((u_int64_t)resp->snscb_nodename[3]) << 32) |
627 (((u_int64_t)resp->snscb_nodename[4]) << 24) |
628 (((u_int64_t)resp->snscb_nodename[5]) << 16) |
629 (((u_int64_t)resp->snscb_nodename[6]) << 8) |
630 (((u_int64_t)resp->snscb_nodename[7]));
631 if (portid == 0 || wwpn == 0) {
632 break;
633 }
634
635 switch (resp->snscb_port_type) {
636 case 1:
637 pt = " N_Port";
638 break;
639 case 2:
640 pt = " NL_Port";
641 break;
642 case 3:
643 pt = "F/NL_Port";
644 break;
645 case 0x7f:
646 pt = " Nx_Port";
647 break;
648 case 0x81:
649 pt = " F_port";
650 break;
651 case 0x82:
652 pt = " FL_Port";
653 break;
654 case 0x84:
655 pt = " E_port";
656 break;
657 default:
658 pt = "?";
659 break;
660 }
661 isp_prt(isp, ISP_LOGINFO,
662 "%s @ 0x%x, Node 0x%08x%08x Port %08x%08x",
663 pt, portid, ((u_int32_t) (wwnn >> 32)), ((u_int32_t) wwnn),
664 ((u_int32_t) (wwpn >> 32)), ((u_int32_t) wwpn));
665 /*
666 * We're only interested in SCSI_FCP types (for now)
667 */
668 if ((resp->snscb_fc4_types[2] & 1) == 0) {
669 break;
670 }
671 if (fcp->isp_topo != TOPO_F_PORT)
672 lrange = FC_SNS_ID+1;
673 else
674 lrange = 0;
675 /*
676 * Is it already in our list?
677 */
678 for (target = lrange; target < MAX_FC_TARG; target++) {
679 if (target >= FL_PORT_ID && target <= FC_SNS_ID) {
680 continue;
681 }
682 lp = &fcp->portdb[target];
683 if (lp->port_wwn == wwpn && lp->node_wwn == wwnn) {
684 lp->fabric_dev = 1;
685 break;
686 }
687 }
688 if (target < MAX_FC_TARG) {
689 break;
690 }
691 for (target = lrange; target < MAX_FC_TARG; target++) {
692 if (target >= FL_PORT_ID && target <= FC_SNS_ID) {
693 continue;
694 }
695 lp = &fcp->portdb[target];
696 if (lp->port_wwn == 0) {
697 break;
698 }
699 }
700 if (target == MAX_FC_TARG) {
701 isp_prt(isp, ISP_LOGWARN,
702 "no more space for fabric devices");
703 break;
704 }
705 lp->node_wwn = wwnn;
706 lp->port_wwn = wwpn;
707 lp->portid = portid;
708 lp->fabric_dev = 1;
709 break;
710 }
711 default:
712 break;
713 }
714 (void) splx(s);
715 return (0);
716 }
717
718 #include <machine/stdarg.h>
719 void
720 isp_prt(struct ispsoftc *isp, int level, const char *fmt, ...)
721 {
722 va_list ap;
723 if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) {
724 return;
725 }
726 printf("%s: ", isp->isp_name);
727 va_start(ap, fmt);
728 vprintf(fmt, ap);
729 va_end(ap);
730 printf("\n");
731 }
732