isp_netbsd.c revision 1.18.2.12 1 /* $NetBSD: isp_netbsd.c,v 1.18.2.12 2001/03/27 13:08:12 bouyer Exp $ */
2 /*
3 * This driver, which is contained in NetBSD in the files:
4 *
5 * sys/dev/ic/isp.c
6 * sys/dev/ic/isp_inline.h
7 * sys/dev/ic/isp_netbsd.c
8 * sys/dev/ic/isp_netbsd.h
9 * sys/dev/ic/isp_target.c
10 * sys/dev/ic/isp_target.h
11 * sys/dev/ic/isp_tpublic.h
12 * sys/dev/ic/ispmbox.h
13 * sys/dev/ic/ispreg.h
14 * sys/dev/ic/ispvar.h
15 * sys/microcode/isp/asm_sbus.h
16 * sys/microcode/isp/asm_1040.h
17 * sys/microcode/isp/asm_1080.h
18 * sys/microcode/isp/asm_12160.h
19 * sys/microcode/isp/asm_2100.h
20 * sys/microcode/isp/asm_2200.h
21 * sys/pci/isp_pci.c
22 * sys/sbus/isp_sbus.c
23 *
24 * Is being actively maintained by Matthew Jacob (mjacob (at) netbsd.org).
25 * This driver also is shared source with FreeBSD, OpenBSD, Linux, Solaris,
26 * Linux versions. This tends to be an interesting maintenance problem.
27 *
28 * Please coordinate with Matthew Jacob on changes you wish to make here.
29 */
30 /*
31 * Platform (NetBSD) dependent common attachment code for Qlogic adapters.
32 * Matthew Jacob <mjacob (at) nas.nasa.gov>
33 */
34 /*
35 * Copyright (C) 1997, 1998, 1999 National Aeronautics & Space Administration
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. The name of the author may not be used to endorse or promote products
47 * derived from this software without specific prior written permission
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
50 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
51 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
52 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
53 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
54 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
55 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
56 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
57 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
58 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
59 */
60
61 #include <dev/ic/isp_netbsd.h>
62 #include <sys/scsiio.h>
63
64
65 /*
66 * Set a timeout for the watchdogging of a command.
67 *
68 * The dimensional analysis is
69 *
70 * milliseconds * (seconds/millisecond) * (ticks/second) = ticks
71 *
72 * =
73 *
74 * (milliseconds / 1000) * hz = ticks
75 *
76 *
77 * For timeouts less than 1 second, we'll get zero. Because of this, and
78 * because we want to establish *our* timeout to be longer than what the
79 * firmware might do, we just add 3 seconds at the back end.
80 */
81 #define _XT(xs) ((((xs)->timeout/1000) * hz) + (3 * hz))
82
83 static void ispminphys __P((struct buf *));
84 static void isprequest __P((struct scsipi_channel *,
85 scsipi_adapter_req_t, void *));
86 static int
87 ispioctl __P((struct scsipi_channel *, u_long, caddr_t, int, struct proc *));
88
89 static void isp_polled_cmd __P((struct ispsoftc *, XS_T *));
90 static void isp_dog __P((void *));
91
92 /*
93 * Complete attachment of hardware, include subdevices.
94 */
95 void
96 isp_attach(isp)
97 struct ispsoftc *isp;
98 {
99 isp->isp_state = ISP_RUNSTATE;
100
101 isp->isp_osinfo._adapter.adapt_dev = &isp->isp_osinfo._dev;
102 isp->isp_osinfo._adapter.adapt_nchannels = IS_DUALBUS(isp) ? 2 : 1;
103 isp->isp_osinfo._adapter.adapt_openings = isp->isp_maxcmds; /* XXX per adapter or per channel ? */
104 isp->isp_osinfo._adapter.adapt_max_periph = isp->isp_maxcmds;
105 isp->isp_osinfo._adapter.adapt_ioctl = ispioctl;
106 isp->isp_osinfo._adapter.adapt_request = isprequest;
107 isp->isp_osinfo._adapter.adapt_minphys = ispminphys;
108
109 isp->isp_osinfo._chan.chan_adapter = &isp->isp_osinfo._adapter;
110 isp->isp_osinfo._chan.chan_bustype = &scsi_bustype;
111 isp->isp_osinfo._chan.chan_channel = 0;
112 /*
113 * Until the midlayer is fixed to use REPORT LUNS, limit to 8 luns.
114 */
115 isp->isp_osinfo._chan.chan_nluns =
116 (isp->isp_maxluns < 7)? isp->isp_maxluns : 8;
117
118 TAILQ_INIT(&isp->isp_osinfo.waitq); /* The 2nd bus will share.. */
119
120 if (IS_FC(isp)) {
121 isp->isp_osinfo._chan.chan_ntargets = MAX_FC_TARG;
122 } else {
123 sdparam *sdp = isp->isp_param;
124 isp->isp_osinfo._chan.chan_ntargets = MAX_TARGETS;
125 isp->isp_osinfo._chan.chan_id = sdp->isp_initiator_id;
126 isp->isp_osinfo.discovered[0] = 1 << sdp->isp_initiator_id;
127 if (IS_DUALBUS(isp)) {
128 isp->isp_osinfo._chan_b = isp->isp_osinfo._chan;
129 sdp++;
130 isp->isp_osinfo.discovered[1] =
131 1 << sdp->isp_initiator_id;
132 isp->isp_osinfo._chan_b.chan_id = sdp->isp_initiator_id;
133 isp->isp_osinfo._chan_b.chan_channel = 1;
134 }
135 }
136
137 /*
138 * Send a SCSI Bus Reset.
139 */
140 if (IS_SCSI(isp)) {
141 int bus = 0;
142 ISP_LOCK(isp);
143 (void) isp_control(isp, ISPCTL_RESET_BUS, &bus);
144 if (IS_DUALBUS(isp)) {
145 bus++;
146 (void) isp_control(isp, ISPCTL_RESET_BUS, &bus);
147 }
148 ISP_UNLOCK(isp);
149 } else {
150 int defid;
151 fcparam *fcp = isp->isp_param;
152 delay(2 * 1000000);
153 defid = MAX_FC_TARG;
154 ISP_LOCK(isp);
155 /*
156 * We probably won't have clock interrupts running,
157 * so we'll be really short (smoke test, really)
158 * at this time.
159 */
160 if (isp_control(isp, ISPCTL_FCLINK_TEST, NULL)) {
161 (void) isp_control(isp, ISPCTL_PDB_SYNC, NULL);
162 if (fcp->isp_fwstate == FW_READY &&
163 fcp->isp_loopstate >= LOOP_PDB_RCVD) {
164 defid = fcp->isp_loopid;
165 }
166 }
167 ISP_UNLOCK(isp);
168 isp->isp_osinfo._chan.chan_id = defid;
169 }
170
171 /*
172 * After this point, we'll be doing the new configuration
173 * schema which allows interrups, so we can do tsleep/wakeup
174 * for mailbox stuff at that point.
175 */
176 isp->isp_osinfo.no_mbox_ints = 0;
177
178 /*
179 * And attach children (if any).
180 */
181 config_found((void *)isp, &isp->isp_osinfo._chan, scsiprint);
182 if (IS_DUALBUS(isp)) {
183 config_found((void *)isp, &isp->isp_osinfo._chan_b, scsiprint);
184 }
185 }
186
187 /*
188 * minphys our xfers
189 *
190 * Unfortunately, the buffer pointer describes the target device- not the
191 * adapter device, so we can't use the pointer to find out what kind of
192 * adapter we are and adjust accordingly.
193 */
194
195 static void
196 ispminphys(bp)
197 struct buf *bp;
198 {
199 /*
200 * XX: Only the 1020 has a 24 bit limit.
201 */
202 if (bp->b_bcount >= (1 << 24)) {
203 bp->b_bcount = (1 << 24);
204 }
205 minphys(bp);
206 }
207
208 static int
209 ispioctl(chan, cmd, addr, flag, p)
210 struct scsipi_channel *chan;
211 u_long cmd;
212 caddr_t addr;
213 int flag;
214 struct proc *p;
215 {
216 struct ispsoftc *isp = (void *)chan->chan_adapter->adapt_dev;
217 int s, retval = ENOTTY;
218
219 switch (cmd) {
220 case SCBUSIORESET:
221 s = splbio();
222 if (isp_control(isp, ISPCTL_RESET_BUS, &chan->chan_channel))
223 retval = EIO;
224 else
225 retval = 0;
226 (void) splx(s);
227 break;
228 default:
229 break;
230 }
231 return (retval);
232 }
233
234
235 static void
236 isprequest(chan, req, arg)
237 struct scsipi_channel *chan;
238 scsipi_adapter_req_t req;
239 void *arg;
240 {
241 struct scsipi_periph *periph;
242 struct ispsoftc *isp = (void *)chan->chan_adapter->adapt_dev;
243 XS_T *xs;
244 int s, result;
245
246 switch (req) {
247 case ADAPTER_REQ_RUN_XFER:
248 xs = arg;
249 periph = xs->xs_periph;
250 s = splbio();
251 if (isp->isp_state < ISP_RUNSTATE) {
252 DISABLE_INTS(isp);
253 isp_init(isp);
254 if (isp->isp_state != ISP_INITSTATE) {
255 ENABLE_INTS(isp);
256 (void) splx(s);
257 XS_SETERR(xs, HBA_BOTCH);
258 scsipi_done(xs);
259 return;
260 }
261 isp->isp_state = ISP_RUNSTATE;
262 ENABLE_INTS(isp);
263 }
264
265 if (xs->xs_control & XS_CTL_POLL) {
266 volatile u_int8_t ombi = isp->isp_osinfo.no_mbox_ints;
267 isp->isp_osinfo.no_mbox_ints = 1;
268 isp_polled_cmd(isp, xs);
269 isp->isp_osinfo.no_mbox_ints = ombi;
270 (void) splx(s);
271 return;
272 }
273
274 result = isp_start(xs);
275 #if 0
276 {
277 static int na[16] = { 0 };
278 if (na[isp->isp_unit] < isp->isp_nactive) {
279 isp_prt(isp, ISP_LOGALL, "active hiwater %d", isp->isp_nactive);
280 na[isp->isp_unit] = isp->isp_nactive;
281 }
282 }
283 #endif
284 switch (result) {
285 case CMD_QUEUED:
286 if (xs->timeout) {
287 callout_reset(&xs->xs_callout, _XT(xs), isp_dog, xs);
288 }
289 break;
290 case CMD_EAGAIN:
291 xs->error = XS_REQUEUE;
292 scsipi_done(xs);
293 break;
294 case CMD_RQLATER:
295 xs->error = XS_RESOURCE_SHORTAGE;
296 scsipi_done(xs);
297 break;
298 case CMD_COMPLETE:
299 scsipi_done(xs);
300 break;
301 }
302 (void) splx(s);
303 return;
304
305 case ADAPTER_REQ_GROW_RESOURCES:
306 /* XXX Not supported. */
307 return;
308
309 case ADAPTER_REQ_SET_XFER_MODE:
310 {
311 struct scsipi_xfer_mode *xm = arg;
312 if (IS_SCSI(isp)) {
313 int dflags = 0;
314 sdparam *sdp = SDPARAM(isp);
315
316 sdp += chan->chan_channel;
317 if (xm->xm_mode & PERIPH_CAP_TQING)
318 dflags |= DPARM_TQING;
319 if (xm->xm_mode & PERIPH_CAP_WIDE16)
320 dflags |= DPARM_WIDE;
321 if (xm->xm_mode & PERIPH_CAP_SYNC)
322 dflags |= DPARM_SYNC;
323 s = splbio();
324 sdp->isp_devparam[xm->xm_target].dev_flags |= dflags;
325 dflags = sdp->isp_devparam[xm->xm_target].dev_flags;
326 sdp->isp_devparam[xm->xm_target].dev_update = 1;
327 isp->isp_update |= (1 << chan->chan_channel);
328 splx(s);
329 isp_prt(isp, ISP_LOGDEBUG1,
330 "ispioctl: device flags 0x%x for %d.%d.X",
331 dflags, chan->chan_channel, xm->xm_target);
332 }
333 }
334 }
335 }
336
337 static void
338 isp_polled_cmd(isp, xs)
339 struct ispsoftc *isp;
340 XS_T *xs;
341 {
342 int result;
343 int infinite = 0, mswait;
344
345 result = isp_start(xs);
346
347 switch (result) {
348 case CMD_QUEUED:
349 break;
350 case CMD_RQLATER:
351 case CMD_EAGAIN:
352 if (XS_NOERR(xs)) {
353 xs->error = XS_REQUEUE;
354 }
355 /* FALLTHROUGH */
356 case CMD_COMPLETE:
357 scsipi_done(xs);
358 return;
359
360 }
361
362 /*
363 * If we can't use interrupts, poll on completion.
364 */
365 if ((mswait = XS_TIME(xs)) == 0)
366 infinite = 1;
367
368 while (mswait || infinite) {
369 if (isp_intr((void *)isp)) {
370 if (XS_CMD_DONE_P(xs)) {
371 break;
372 }
373 }
374 USEC_DELAY(1000);
375 mswait -= 1;
376 }
377
378 /*
379 * If no other error occurred but we didn't finish,
380 * something bad happened.
381 */
382 if (XS_CMD_DONE_P(xs) == 0) {
383 if (isp_control(isp, ISPCTL_ABORT_CMD, xs)) {
384 isp_reinit(isp);
385 }
386 if (XS_NOERR(xs)) {
387 XS_SETERR(xs, HBA_BOTCH);
388 }
389 }
390 scsipi_done(xs);
391 }
392
393 void
394 isp_done(xs)
395 XS_T *xs;
396 {
397 XS_CMD_S_DONE(xs);
398 if (XS_CMD_WDOG_P(xs) == 0) {
399 struct ispsoftc *isp = XS_ISP(xs);
400 callout_stop(&xs->xs_callout);
401 if (XS_CMD_GRACE_P(xs)) {
402 isp_prt(isp, ISP_LOGDEBUG1,
403 "finished command on borrowed time");
404 }
405 XS_CMD_S_CLEAR(xs);
406 scsipi_done(xs);
407 }
408 }
409
410 static void
411 isp_dog(arg)
412 void *arg;
413 {
414 XS_T *xs = arg;
415 struct ispsoftc *isp = XS_ISP(xs);
416 u_int32_t handle;
417
418 ISP_ILOCK(isp);
419 /*
420 * We've decided this command is dead. Make sure we're not trying
421 * to kill a command that's already dead by getting it's handle and
422 * and seeing whether it's still alive.
423 */
424 handle = isp_find_handle(isp, xs);
425 if (handle) {
426 u_int16_t r, r1, i;
427
428 if (XS_CMD_DONE_P(xs)) {
429 isp_prt(isp, ISP_LOGDEBUG1,
430 "watchdog found done cmd (handle 0x%x)", handle);
431 ISP_IUNLOCK(isp);
432 return;
433 }
434
435 if (XS_CMD_WDOG_P(xs)) {
436 isp_prt(isp, ISP_LOGDEBUG1,
437 "recursive watchdog (handle 0x%x)", handle);
438 ISP_IUNLOCK(isp);
439 return;
440 }
441
442 XS_CMD_S_WDOG(xs);
443
444 i = 0;
445 do {
446 r = ISP_READ(isp, BIU_ISR);
447 USEC_DELAY(1);
448 r1 = ISP_READ(isp, BIU_ISR);
449 } while (r != r1 && ++i < 1000);
450
451 if (INT_PENDING(isp, r) && isp_intr(isp) && XS_CMD_DONE_P(xs)) {
452 isp_prt(isp, ISP_LOGDEBUG1, "watchdog cleanup (%x, %x)",
453 handle, r);
454 XS_CMD_C_WDOG(xs);
455 isp_done(xs);
456 } else if (XS_CMD_GRACE_P(xs)) {
457 isp_prt(isp, ISP_LOGDEBUG1, "watchdog timeout (%x, %x)",
458 handle, r);
459 /*
460 * Make sure the command is *really* dead before we
461 * release the handle (and DMA resources) for reuse.
462 */
463 (void) isp_control(isp, ISPCTL_ABORT_CMD, arg);
464
465 /*
466 * After this point, the comamnd is really dead.
467 */
468 if (XS_XFRLEN(xs)) {
469 ISP_DMAFREE(isp, xs, handle);
470 }
471 isp_destroy_handle(isp, handle);
472 XS_SETERR(xs, XS_TIMEOUT);
473 XS_CMD_S_CLEAR(xs);
474 isp_done(xs);
475 } else {
476 u_int16_t iptr, optr;
477 ispreq_t *mp;
478 isp_prt(isp, ISP_LOGDEBUG2,
479 "possible command timeout (%x, %x)", handle, r);
480 XS_CMD_C_WDOG(xs);
481 callout_reset(&xs->xs_callout, hz, isp_dog, xs);
482 if (isp_getrqentry(isp, &iptr, &optr, (void **) &mp)) {
483 ISP_UNLOCK(isp);
484 return;
485 }
486 XS_CMD_S_GRACE(xs);
487 MEMZERO((void *) mp, sizeof (*mp));
488 mp->req_header.rqs_entry_count = 1;
489 mp->req_header.rqs_entry_type = RQSTYPE_MARKER;
490 mp->req_modifier = SYNC_ALL;
491 mp->req_target = XS_CHANNEL(xs) << 7;
492 ISP_SWIZZLE_REQUEST(isp, mp);
493 ISP_ADD_REQUEST(isp, iptr);
494 }
495 } else {
496 isp_prt(isp, ISP_LOGDEBUG0, "watchdog with no command");
497 }
498 ISP_IUNLOCK(isp);
499 }
500
501 /*
502 * Free any associated resources prior to decommissioning and
503 * set the card to a known state (so it doesn't wake up and kick
504 * us when we aren't expecting it to).
505 *
506 * Locks are held before coming here.
507 */
508 void
509 isp_uninit(isp)
510 struct ispsoftc *isp;
511 {
512 isp_lock(isp);
513 /*
514 * Leave with interrupts disabled.
515 */
516 DISABLE_INTS(isp);
517 isp_unlock(isp);
518 }
519
520 int
521 isp_async(isp, cmd, arg)
522 struct ispsoftc *isp;
523 ispasync_t cmd;
524 void *arg;
525 {
526 int bus, tgt;
527 int s = splbio();
528 switch (cmd) {
529 case ISPASYNC_NEW_TGT_PARAMS:
530 if (IS_SCSI(isp) && isp->isp_dblev) {
531 sdparam *sdp = isp->isp_param;
532 int flags;
533 struct scsipi_xfer_mode xm;
534
535 tgt = *((int *) arg);
536 bus = (tgt >> 16) & 0xffff;
537 tgt &= 0xffff;
538 sdp += bus;
539 flags = sdp->isp_devparam[tgt].cur_dflags;
540
541 xm.xm_mode = 0;
542 xm.xm_period = sdp->isp_devparam[tgt].cur_period;
543 xm.xm_offset = sdp->isp_devparam[tgt].cur_offset;
544 xm.xm_target = tgt;
545
546 if ((flags & DPARM_SYNC) && xm.xm_period && xm.xm_offset)
547 xm.xm_mode |= PERIPH_CAP_SYNC;
548 if (flags & DPARM_WIDE)
549 xm.xm_mode |= PERIPH_CAP_WIDE16;
550 if (flags & DPARM_TQING)
551 xm.xm_mode |= PERIPH_CAP_TQING;
552 scsipi_async_event(
553 bus ? (&isp->isp_osinfo._chan_b) : (&isp->isp_osinfo._chan),
554 ASYNC_EVENT_XFER_MODE,
555 &xm);
556 break;
557 }
558 case ISPASYNC_BUS_RESET:
559 if (arg)
560 bus = *((int *) arg);
561 else
562 bus = 0;
563 isp_prt(isp, ISP_LOGINFO, "SCSI bus %d reset detected", bus);
564 break;
565 case ISPASYNC_LOOP_DOWN:
566 /*
567 * Hopefully we get here in time to minimize the number
568 * of commands we are firing off that are sure to die.
569 */
570 scsipi_channel_freeze(&isp->isp_osinfo._chan, 1);
571 if (IS_DUALBUS(isp))
572 scsipi_channel_freeze(&isp->isp_osinfo._chan_b, 1);
573 isp_prt(isp, ISP_LOGINFO, "Loop DOWN");
574 break;
575 case ISPASYNC_LOOP_UP:
576 callout_reset(&isp->isp_osinfo._restart, 1,
577 scsipi_channel_timed_thaw, &isp->isp_osinfo._chan);
578 if (IS_DUALBUS(isp)) {
579 callout_reset(&isp->isp_osinfo._restart, 1,
580 scsipi_channel_timed_thaw,
581 &isp->isp_osinfo._chan_b);
582 }
583 isp_prt(isp, ISP_LOGINFO, "Loop UP");
584 break;
585 case ISPASYNC_PROMENADE:
586 if (IS_FC(isp) && isp->isp_dblev) {
587 const char fmt[] = "Target %d (Loop 0x%x) Port ID 0x%x "
588 "(role %s) %s\n Port WWN 0x%08x%08x\n Node WWN 0x%08x%08x";
589 const static char *roles[4] = {
590 "No", "Target", "Initiator", "Target/Initiator"
591 };
592 fcparam *fcp = isp->isp_param;
593 int tgt = *((int *) arg);
594 struct lportdb *lp = &fcp->portdb[tgt];
595
596 isp_prt(isp, ISP_LOGINFO, fmt, tgt, lp->loopid, lp->portid,
597 roles[lp->roles & 0x3],
598 (lp->valid)? "Arrived" : "Departed",
599 (u_int32_t) (lp->port_wwn >> 32),
600 (u_int32_t) (lp->port_wwn & 0xffffffffLL),
601 (u_int32_t) (lp->node_wwn >> 32),
602 (u_int32_t) (lp->node_wwn & 0xffffffffLL));
603 break;
604 }
605 case ISPASYNC_CHANGE_NOTIFY:
606 if (arg == (void *) 1) {
607 isp_prt(isp, ISP_LOGINFO,
608 "Name Server Database Changed");
609 } else {
610 isp_prt(isp, ISP_LOGINFO,
611 "Name Server Database Changed");
612 }
613 break;
614 case ISPASYNC_FABRIC_DEV:
615 {
616 int target, lrange;
617 struct lportdb *lp = NULL;
618 char *pt;
619 sns_ganrsp_t *resp = (sns_ganrsp_t *) arg;
620 u_int32_t portid;
621 u_int64_t wwpn, wwnn;
622 fcparam *fcp = isp->isp_param;
623
624 portid =
625 (((u_int32_t) resp->snscb_port_id[0]) << 16) |
626 (((u_int32_t) resp->snscb_port_id[1]) << 8) |
627 (((u_int32_t) resp->snscb_port_id[2]));
628
629 wwpn =
630 (((u_int64_t)resp->snscb_portname[0]) << 56) |
631 (((u_int64_t)resp->snscb_portname[1]) << 48) |
632 (((u_int64_t)resp->snscb_portname[2]) << 40) |
633 (((u_int64_t)resp->snscb_portname[3]) << 32) |
634 (((u_int64_t)resp->snscb_portname[4]) << 24) |
635 (((u_int64_t)resp->snscb_portname[5]) << 16) |
636 (((u_int64_t)resp->snscb_portname[6]) << 8) |
637 (((u_int64_t)resp->snscb_portname[7]));
638
639 wwnn =
640 (((u_int64_t)resp->snscb_nodename[0]) << 56) |
641 (((u_int64_t)resp->snscb_nodename[1]) << 48) |
642 (((u_int64_t)resp->snscb_nodename[2]) << 40) |
643 (((u_int64_t)resp->snscb_nodename[3]) << 32) |
644 (((u_int64_t)resp->snscb_nodename[4]) << 24) |
645 (((u_int64_t)resp->snscb_nodename[5]) << 16) |
646 (((u_int64_t)resp->snscb_nodename[6]) << 8) |
647 (((u_int64_t)resp->snscb_nodename[7]));
648 if (portid == 0 || wwpn == 0) {
649 break;
650 }
651
652 switch (resp->snscb_port_type) {
653 case 1:
654 pt = " N_Port";
655 break;
656 case 2:
657 pt = " NL_Port";
658 break;
659 case 3:
660 pt = "F/NL_Port";
661 break;
662 case 0x7f:
663 pt = " Nx_Port";
664 break;
665 case 0x81:
666 pt = " F_port";
667 break;
668 case 0x82:
669 pt = " FL_Port";
670 break;
671 case 0x84:
672 pt = " E_port";
673 break;
674 default:
675 pt = "?";
676 break;
677 }
678 isp_prt(isp, ISP_LOGINFO,
679 "%s @ 0x%x, Node 0x%08x%08x Port %08x%08x",
680 pt, portid, ((u_int32_t) (wwnn >> 32)), ((u_int32_t) wwnn),
681 ((u_int32_t) (wwpn >> 32)), ((u_int32_t) wwpn));
682 /*
683 * We're only interested in SCSI_FCP types (for now)
684 */
685 if ((resp->snscb_fc4_types[2] & 1) == 0) {
686 break;
687 }
688 if (fcp->isp_topo != TOPO_F_PORT)
689 lrange = FC_SNS_ID+1;
690 else
691 lrange = 0;
692 /*
693 * Is it already in our list?
694 */
695 for (target = lrange; target < MAX_FC_TARG; target++) {
696 if (target >= FL_PORT_ID && target <= FC_SNS_ID) {
697 continue;
698 }
699 lp = &fcp->portdb[target];
700 if (lp->port_wwn == wwpn && lp->node_wwn == wwnn) {
701 lp->fabric_dev = 1;
702 break;
703 }
704 }
705 if (target < MAX_FC_TARG) {
706 break;
707 }
708 for (target = lrange; target < MAX_FC_TARG; target++) {
709 if (target >= FL_PORT_ID && target <= FC_SNS_ID) {
710 continue;
711 }
712 lp = &fcp->portdb[target];
713 if (lp->port_wwn == 0) {
714 break;
715 }
716 }
717 if (target == MAX_FC_TARG) {
718 isp_prt(isp, ISP_LOGWARN,
719 "no more space for fabric devices");
720 break;
721 }
722 lp->node_wwn = wwnn;
723 lp->port_wwn = wwpn;
724 lp->portid = portid;
725 lp->fabric_dev = 1;
726 break;
727 }
728 default:
729 break;
730 }
731 (void) splx(s);
732 return (0);
733 }
734
735 #include <machine/stdarg.h>
736 void
737 #ifdef __STDC__
738 isp_prt(struct ispsoftc *isp, int level, const char *fmt, ...)
739 #else
740 isp_prt(isp, fmt, va_alist)
741 struct ispsoftc *isp;
742 char *fmt;
743 va_dcl;
744 #endif
745 {
746 va_list ap;
747 if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) {
748 return;
749 }
750 printf("%s: ", isp->isp_name);
751 va_start(ap, fmt);
752 vprintf(fmt, ap);
753 va_end(ap);
754 printf("\n");
755 }
756