isp_netbsd.c revision 1.18.2.14 1 /* $NetBSD: isp_netbsd.c,v 1.18.2.14 2001/04/06 07:27:58 bouyer Exp $ */
2 /*
3 * This driver, which is contained in NetBSD in the files:
4 *
5 * sys/dev/ic/isp.c
6 * sys/dev/ic/isp_inline.h
7 * sys/dev/ic/isp_netbsd.c
8 * sys/dev/ic/isp_netbsd.h
9 * sys/dev/ic/isp_target.c
10 * sys/dev/ic/isp_target.h
11 * sys/dev/ic/isp_tpublic.h
12 * sys/dev/ic/ispmbox.h
13 * sys/dev/ic/ispreg.h
14 * sys/dev/ic/ispvar.h
15 * sys/microcode/isp/asm_sbus.h
16 * sys/microcode/isp/asm_1040.h
17 * sys/microcode/isp/asm_1080.h
18 * sys/microcode/isp/asm_12160.h
19 * sys/microcode/isp/asm_2100.h
20 * sys/microcode/isp/asm_2200.h
21 * sys/pci/isp_pci.c
22 * sys/sbus/isp_sbus.c
23 *
24 * Is being actively maintained by Matthew Jacob (mjacob (at) netbsd.org).
25 * This driver also is shared source with FreeBSD, OpenBSD, Linux, Solaris,
26 * Linux versions. This tends to be an interesting maintenance problem.
27 *
28 * Please coordinate with Matthew Jacob on changes you wish to make here.
29 */
30 /*
31 * Platform (NetBSD) dependent common attachment code for Qlogic adapters.
32 * Matthew Jacob <mjacob (at) nas.nasa.gov>
33 */
34 /*
35 * Copyright (C) 1997, 1998, 1999 National Aeronautics & Space Administration
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. The name of the author may not be used to endorse or promote products
47 * derived from this software without specific prior written permission
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
50 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
51 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
52 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
53 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
54 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
55 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
56 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
57 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
58 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
59 */
60
61 #include <dev/ic/isp_netbsd.h>
62 #include <sys/scsiio.h>
63
64
65 /*
66 * Set a timeout for the watchdogging of a command.
67 *
68 * The dimensional analysis is
69 *
70 * milliseconds * (seconds/millisecond) * (ticks/second) = ticks
71 *
72 * =
73 *
74 * (milliseconds / 1000) * hz = ticks
75 *
76 *
77 * For timeouts less than 1 second, we'll get zero. Because of this, and
78 * because we want to establish *our* timeout to be longer than what the
79 * firmware might do, we just add 3 seconds at the back end.
80 */
81 #define _XT(xs) ((((xs)->timeout/1000) * hz) + (3 * hz))
82
83 static void ispminphys(struct buf *);
84 static void isprequest (struct scsipi_channel *,
85 scsipi_adapter_req_t, void *);
86 static int
87 ispioctl(struct scsipi_channel *, u_long, caddr_t, int, struct proc *);
88
89 static void isp_polled_cmd(struct ispsoftc *, XS_T *);
90 static void isp_dog(void *);
91
92 /*
93 * Complete attachment of hardware, include subdevices.
94 */
95 void
96 isp_attach(struct ispsoftc *isp)
97 {
98 isp->isp_state = ISP_RUNSTATE;
99
100 isp->isp_osinfo._adapter.adapt_dev = &isp->isp_osinfo._dev;
101 isp->isp_osinfo._adapter.adapt_nchannels = IS_DUALBUS(isp) ? 2 : 1;
102 isp->isp_osinfo._adapter.adapt_openings = isp->isp_maxcmds; /* XXX per adapter or per channel ? */
103 isp->isp_osinfo._adapter.adapt_max_periph =
104 (isp->isp_maxcmds > 256) ? 256 : isp->isp_maxcmds;
105 isp->isp_osinfo._adapter.adapt_ioctl = ispioctl;
106 isp->isp_osinfo._adapter.adapt_request = isprequest;
107 isp->isp_osinfo._adapter.adapt_minphys = ispminphys;
108
109 isp->isp_osinfo._chan.chan_adapter = &isp->isp_osinfo._adapter;
110 isp->isp_osinfo._chan.chan_bustype = &scsi_bustype;
111 isp->isp_osinfo._chan.chan_channel = 0;
112 /*
113 * Until the midlayer is fixed to use REPORT LUNS, limit to 8 luns.
114 */
115 isp->isp_osinfo._chan.chan_nluns =
116 (isp->isp_maxluns < 7)? isp->isp_maxluns : 8;
117
118 TAILQ_INIT(&isp->isp_osinfo.waitq); /* The 2nd bus will share.. */
119
120 if (IS_FC(isp)) {
121 isp->isp_osinfo._chan.chan_ntargets = MAX_FC_TARG;
122 } else {
123 sdparam *sdp = isp->isp_param;
124 isp->isp_osinfo._chan.chan_ntargets = MAX_TARGETS;
125 isp->isp_osinfo._chan.chan_id = sdp->isp_initiator_id;
126 isp->isp_osinfo.discovered[0] = 1 << sdp->isp_initiator_id;
127 if (IS_DUALBUS(isp)) {
128 isp->isp_osinfo._chan_b = isp->isp_osinfo._chan;
129 sdp++;
130 isp->isp_osinfo.discovered[1] =
131 1 << sdp->isp_initiator_id;
132 isp->isp_osinfo._chan_b.chan_id = sdp->isp_initiator_id;
133 isp->isp_osinfo._chan_b.chan_channel = 1;
134 }
135 }
136
137 /*
138 * Send a SCSI Bus Reset.
139 */
140 if (IS_SCSI(isp)) {
141 int bus = 0;
142 ISP_LOCK(isp);
143 (void) isp_control(isp, ISPCTL_RESET_BUS, &bus);
144 if (IS_DUALBUS(isp)) {
145 bus++;
146 (void) isp_control(isp, ISPCTL_RESET_BUS, &bus);
147 }
148 ISP_UNLOCK(isp);
149 } else {
150 int defid;
151 fcparam *fcp = isp->isp_param;
152 delay(2 * 1000000);
153 defid = MAX_FC_TARG;
154 ISP_LOCK(isp);
155 /*
156 * We probably won't have clock interrupts running,
157 * so we'll be really short (smoke test, really)
158 * at this time.
159 */
160 if (isp_control(isp, ISPCTL_FCLINK_TEST, NULL)) {
161 (void) isp_control(isp, ISPCTL_PDB_SYNC, NULL);
162 if (fcp->isp_fwstate == FW_READY &&
163 fcp->isp_loopstate >= LOOP_PDB_RCVD) {
164 defid = fcp->isp_loopid;
165 }
166 }
167 ISP_UNLOCK(isp);
168 isp->isp_osinfo._chan.chan_id = defid;
169 }
170
171 /*
172 * After this point, we'll be doing the new configuration
173 * schema which allows interrups, so we can do tsleep/wakeup
174 * for mailbox stuff at that point.
175 */
176 isp->isp_osinfo.no_mbox_ints = 0;
177
178 /*
179 * And attach children (if any).
180 */
181 config_found((void *)isp, &isp->isp_osinfo._chan, scsiprint);
182 if (IS_DUALBUS(isp)) {
183 config_found((void *)isp, &isp->isp_osinfo._chan_b, scsiprint);
184 }
185 }
186
187 /*
188 * minphys our xfers
189 *
190 * Unfortunately, the buffer pointer describes the target device- not the
191 * adapter device, so we can't use the pointer to find out what kind of
192 * adapter we are and adjust accordingly.
193 */
194
195 static void
196 ispminphys(struct buf *bp)
197 {
198 /*
199 * XX: Only the 1020 has a 24 bit limit.
200 */
201 if (bp->b_bcount >= (1 << 24)) {
202 bp->b_bcount = (1 << 24);
203 }
204 minphys(bp);
205 }
206
207 static int
208 ispioctl(struct scsipi_channel *chan, u_long cmd, caddr_t addr, int flag,
209 struct proc *p)
210 {
211 struct ispsoftc *isp = (void *)chan->chan_adapter->adapt_dev;
212 int s, retval = ENOTTY;
213
214 switch (cmd) {
215 case SCBUSIORESET:
216 s = splbio();
217 if (isp_control(isp, ISPCTL_RESET_BUS, &chan->chan_channel))
218 retval = EIO;
219 else
220 retval = 0;
221 (void) splx(s);
222 break;
223 default:
224 break;
225 }
226 return (retval);
227 }
228
229
230 static void
231 isprequest(struct scsipi_channel *chan, scsipi_adapter_req_t req, void *arg)
232 {
233 struct scsipi_periph *periph;
234 struct ispsoftc *isp = (void *)chan->chan_adapter->adapt_dev;
235 XS_T *xs;
236 int s, result;
237
238 switch (req) {
239 case ADAPTER_REQ_RUN_XFER:
240 xs = arg;
241 periph = xs->xs_periph;
242 s = splbio();
243 if (isp->isp_state < ISP_RUNSTATE) {
244 DISABLE_INTS(isp);
245 isp_init(isp);
246 if (isp->isp_state != ISP_INITSTATE) {
247 ENABLE_INTS(isp);
248 (void) splx(s);
249 XS_SETERR(xs, HBA_BOTCH);
250 scsipi_done(xs);
251 return;
252 }
253 isp->isp_state = ISP_RUNSTATE;
254 ENABLE_INTS(isp);
255 }
256
257 if (xs->xs_control & XS_CTL_POLL) {
258 volatile u_int8_t ombi = isp->isp_osinfo.no_mbox_ints;
259 isp->isp_osinfo.no_mbox_ints = 1;
260 isp_polled_cmd(isp, xs);
261 isp->isp_osinfo.no_mbox_ints = ombi;
262 (void) splx(s);
263 return;
264 }
265
266 result = isp_start(xs);
267 #if 0
268 {
269 static int na[16] = { 0 };
270 if (na[isp->isp_unit] < isp->isp_nactive) {
271 isp_prt(isp, ISP_LOGALL, "active hiwater %d", isp->isp_nactive);
272 na[isp->isp_unit] = isp->isp_nactive;
273 }
274 }
275 #endif
276 switch (result) {
277 case CMD_QUEUED:
278 if (xs->timeout) {
279 callout_reset(&xs->xs_callout, _XT(xs), isp_dog, xs);
280 }
281 break;
282 case CMD_EAGAIN:
283 xs->error = XS_REQUEUE;
284 scsipi_done(xs);
285 break;
286 case CMD_RQLATER:
287 xs->error = XS_RESOURCE_SHORTAGE;
288 scsipi_done(xs);
289 break;
290 case CMD_COMPLETE:
291 scsipi_done(xs);
292 break;
293 }
294 (void) splx(s);
295 return;
296
297 case ADAPTER_REQ_GROW_RESOURCES:
298 /* XXX Not supported. */
299 return;
300
301 case ADAPTER_REQ_SET_XFER_MODE:
302 {
303 struct scsipi_xfer_mode *xm = arg;
304 if (IS_SCSI(isp)) {
305 int dflags = 0;
306 sdparam *sdp = SDPARAM(isp);
307
308 sdp += chan->chan_channel;
309 if (xm->xm_mode & PERIPH_CAP_TQING)
310 dflags |= DPARM_TQING;
311 if (xm->xm_mode & PERIPH_CAP_WIDE16)
312 dflags |= DPARM_WIDE;
313 if (xm->xm_mode & PERIPH_CAP_SYNC)
314 dflags |= DPARM_SYNC;
315 s = splbio();
316 sdp->isp_devparam[xm->xm_target].dev_flags |= dflags;
317 dflags = sdp->isp_devparam[xm->xm_target].dev_flags;
318 sdp->isp_devparam[xm->xm_target].dev_update = 1;
319 isp->isp_update |= (1 << chan->chan_channel);
320 splx(s);
321 isp_prt(isp, ISP_LOGDEBUG1,
322 "ispioctl: device flags 0x%x for %d.%d.X",
323 dflags, chan->chan_channel, xm->xm_target);
324 }
325 }
326 }
327 }
328
329 static void
330 isp_polled_cmd( struct ispsoftc *isp, XS_T *xs)
331 {
332 int result;
333 int infinite = 0, mswait;
334
335 result = isp_start(xs);
336
337 switch (result) {
338 case CMD_QUEUED:
339 break;
340 case CMD_RQLATER:
341 case CMD_EAGAIN:
342 if (XS_NOERR(xs)) {
343 xs->error = XS_REQUEUE;
344 }
345 /* FALLTHROUGH */
346 case CMD_COMPLETE:
347 scsipi_done(xs);
348 return;
349
350 }
351
352 /*
353 * If we can't use interrupts, poll on completion.
354 */
355 if ((mswait = XS_TIME(xs)) == 0)
356 infinite = 1;
357
358 while (mswait || infinite) {
359 if (isp_intr((void *)isp)) {
360 if (XS_CMD_DONE_P(xs)) {
361 break;
362 }
363 }
364 USEC_DELAY(1000);
365 mswait -= 1;
366 }
367
368 /*
369 * If no other error occurred but we didn't finish,
370 * something bad happened.
371 */
372 if (XS_CMD_DONE_P(xs) == 0) {
373 if (isp_control(isp, ISPCTL_ABORT_CMD, xs)) {
374 isp_reinit(isp);
375 }
376 if (XS_NOERR(xs)) {
377 XS_SETERR(xs, HBA_BOTCH);
378 }
379 }
380 scsipi_done(xs);
381 }
382
383 void
384 isp_done(XS_T *xs)
385 {
386 XS_CMD_S_DONE(xs);
387 if (XS_CMD_WDOG_P(xs) == 0) {
388 struct ispsoftc *isp = XS_ISP(xs);
389 callout_stop(&xs->xs_callout);
390 if (XS_CMD_GRACE_P(xs)) {
391 isp_prt(isp, ISP_LOGDEBUG1,
392 "finished command on borrowed time");
393 }
394 XS_CMD_S_CLEAR(xs);
395 scsipi_done(xs);
396 }
397 }
398
399 static void
400 isp_dog(void *arg)
401 {
402 XS_T *xs = arg;
403 struct ispsoftc *isp = XS_ISP(xs);
404 u_int16_t handle;
405
406 ISP_ILOCK(isp);
407 /*
408 * We've decided this command is dead. Make sure we're not trying
409 * to kill a command that's already dead by getting it's handle and
410 * and seeing whether it's still alive.
411 */
412 handle = isp_find_handle(isp, xs);
413 if (handle) {
414 u_int16_t r, r1, i;
415
416 if (XS_CMD_DONE_P(xs)) {
417 isp_prt(isp, ISP_LOGDEBUG1,
418 "watchdog found done cmd (handle 0x%x)", handle);
419 ISP_IUNLOCK(isp);
420 return;
421 }
422
423 if (XS_CMD_WDOG_P(xs)) {
424 isp_prt(isp, ISP_LOGDEBUG1,
425 "recursive watchdog (handle 0x%x)", handle);
426 ISP_IUNLOCK(isp);
427 return;
428 }
429
430 XS_CMD_S_WDOG(xs);
431
432 i = 0;
433 do {
434 r = ISP_READ(isp, BIU_ISR);
435 USEC_DELAY(1);
436 r1 = ISP_READ(isp, BIU_ISR);
437 } while (r != r1 && ++i < 1000);
438
439 if (INT_PENDING(isp, r) && isp_intr(isp) && XS_CMD_DONE_P(xs)) {
440 isp_prt(isp, ISP_LOGDEBUG1, "watchdog cleanup (%x, %x)",
441 handle, r);
442 XS_CMD_C_WDOG(xs);
443 isp_done(xs);
444 } else if (XS_CMD_GRACE_P(xs)) {
445 isp_prt(isp, ISP_LOGDEBUG1, "watchdog timeout (%x, %x)",
446 handle, r);
447 /*
448 * Make sure the command is *really* dead before we
449 * release the handle (and DMA resources) for reuse.
450 */
451 (void) isp_control(isp, ISPCTL_ABORT_CMD, arg);
452
453 /*
454 * After this point, the comamnd is really dead.
455 */
456 if (XS_XFRLEN(xs)) {
457 ISP_DMAFREE(isp, xs, handle);
458 }
459 isp_destroy_handle(isp, handle);
460 XS_SETERR(xs, XS_TIMEOUT);
461 XS_CMD_S_CLEAR(xs);
462 isp_done(xs);
463 } else {
464 u_int16_t iptr, optr;
465 ispreq_t *mp;
466 isp_prt(isp, ISP_LOGDEBUG2,
467 "possible command timeout (%x, %x)", handle, r);
468 XS_CMD_C_WDOG(xs);
469 callout_reset(&xs->xs_callout, hz, isp_dog, xs);
470 if (isp_getrqentry(isp, &iptr, &optr, (void **) &mp)) {
471 ISP_UNLOCK(isp);
472 return;
473 }
474 XS_CMD_S_GRACE(xs);
475 MEMZERO((void *) mp, sizeof (*mp));
476 mp->req_header.rqs_entry_count = 1;
477 mp->req_header.rqs_entry_type = RQSTYPE_MARKER;
478 mp->req_modifier = SYNC_ALL;
479 mp->req_target = XS_CHANNEL(xs) << 7;
480 ISP_SWIZZLE_REQUEST(isp, mp);
481 ISP_ADD_REQUEST(isp, iptr);
482 }
483 } else {
484 isp_prt(isp, ISP_LOGDEBUG0, "watchdog with no command");
485 }
486 ISP_IUNLOCK(isp);
487 }
488
489 /*
490 * Free any associated resources prior to decommissioning and
491 * set the card to a known state (so it doesn't wake up and kick
492 * us when we aren't expecting it to).
493 *
494 * Locks are held before coming here.
495 */
496 void
497 isp_uninit(struct ispsoftc *isp)
498 {
499 isp_lock(isp);
500 /*
501 * Leave with interrupts disabled.
502 */
503 DISABLE_INTS(isp);
504 isp_unlock(isp);
505 }
506
507 int
508 isp_async(struct ispsoftc *isp, ispasync_t cmd, void *arg)
509 {
510 int bus, tgt;
511 int s = splbio();
512 switch (cmd) {
513 case ISPASYNC_NEW_TGT_PARAMS:
514 if (IS_SCSI(isp) && isp->isp_dblev) {
515 sdparam *sdp = isp->isp_param;
516 int flags;
517 struct scsipi_xfer_mode xm;
518
519 tgt = *((int *) arg);
520 bus = (tgt >> 16) & 0xffff;
521 tgt &= 0xffff;
522 sdp += bus;
523 flags = sdp->isp_devparam[tgt].cur_dflags;
524
525 xm.xm_mode = 0;
526 xm.xm_period = sdp->isp_devparam[tgt].cur_period;
527 xm.xm_offset = sdp->isp_devparam[tgt].cur_offset;
528 xm.xm_target = tgt;
529
530 if ((flags & DPARM_SYNC) && xm.xm_period && xm.xm_offset)
531 xm.xm_mode |= PERIPH_CAP_SYNC;
532 if (flags & DPARM_WIDE)
533 xm.xm_mode |= PERIPH_CAP_WIDE16;
534 if (flags & DPARM_TQING)
535 xm.xm_mode |= PERIPH_CAP_TQING;
536 scsipi_async_event(
537 bus ? (&isp->isp_osinfo._chan_b) : (&isp->isp_osinfo._chan),
538 ASYNC_EVENT_XFER_MODE,
539 &xm);
540 break;
541 }
542 case ISPASYNC_BUS_RESET:
543 if (arg)
544 bus = *((int *) arg);
545 else
546 bus = 0;
547 isp_prt(isp, ISP_LOGINFO, "SCSI bus %d reset detected", bus);
548 break;
549 case ISPASYNC_LOOP_DOWN:
550 /*
551 * Hopefully we get here in time to minimize the number
552 * of commands we are firing off that are sure to die.
553 */
554 scsipi_channel_freeze(&isp->isp_osinfo._chan, 1);
555 if (IS_DUALBUS(isp))
556 scsipi_channel_freeze(&isp->isp_osinfo._chan_b, 1);
557 isp_prt(isp, ISP_LOGINFO, "Loop DOWN");
558 break;
559 case ISPASYNC_LOOP_UP:
560 callout_reset(&isp->isp_osinfo._restart, 1,
561 scsipi_channel_timed_thaw, &isp->isp_osinfo._chan);
562 if (IS_DUALBUS(isp)) {
563 callout_reset(&isp->isp_osinfo._restart, 1,
564 scsipi_channel_timed_thaw,
565 &isp->isp_osinfo._chan_b);
566 }
567 isp_prt(isp, ISP_LOGINFO, "Loop UP");
568 break;
569 case ISPASYNC_PROMENADE:
570 if (IS_FC(isp) && isp->isp_dblev) {
571 const char fmt[] = "Target %d (Loop 0x%x) Port ID 0x%x "
572 "(role %s) %s\n Port WWN 0x%08x%08x\n Node WWN 0x%08x%08x";
573 const static char *roles[4] = {
574 "No", "Target", "Initiator", "Target/Initiator"
575 };
576 fcparam *fcp = isp->isp_param;
577 int tgt = *((int *) arg);
578 struct lportdb *lp = &fcp->portdb[tgt];
579
580 isp_prt(isp, ISP_LOGINFO, fmt, tgt, lp->loopid, lp->portid,
581 roles[lp->roles & 0x3],
582 (lp->valid)? "Arrived" : "Departed",
583 (u_int32_t) (lp->port_wwn >> 32),
584 (u_int32_t) (lp->port_wwn & 0xffffffffLL),
585 (u_int32_t) (lp->node_wwn >> 32),
586 (u_int32_t) (lp->node_wwn & 0xffffffffLL));
587 break;
588 }
589 case ISPASYNC_CHANGE_NOTIFY:
590 if (arg == (void *) 1) {
591 isp_prt(isp, ISP_LOGINFO,
592 "Name Server Database Changed");
593 } else {
594 isp_prt(isp, ISP_LOGINFO,
595 "Name Server Database Changed");
596 }
597 break;
598 case ISPASYNC_FABRIC_DEV:
599 {
600 int target, lrange;
601 struct lportdb *lp = NULL;
602 char *pt;
603 sns_ganrsp_t *resp = (sns_ganrsp_t *) arg;
604 u_int32_t portid;
605 u_int64_t wwpn, wwnn;
606 fcparam *fcp = isp->isp_param;
607
608 portid =
609 (((u_int32_t) resp->snscb_port_id[0]) << 16) |
610 (((u_int32_t) resp->snscb_port_id[1]) << 8) |
611 (((u_int32_t) resp->snscb_port_id[2]));
612
613 wwpn =
614 (((u_int64_t)resp->snscb_portname[0]) << 56) |
615 (((u_int64_t)resp->snscb_portname[1]) << 48) |
616 (((u_int64_t)resp->snscb_portname[2]) << 40) |
617 (((u_int64_t)resp->snscb_portname[3]) << 32) |
618 (((u_int64_t)resp->snscb_portname[4]) << 24) |
619 (((u_int64_t)resp->snscb_portname[5]) << 16) |
620 (((u_int64_t)resp->snscb_portname[6]) << 8) |
621 (((u_int64_t)resp->snscb_portname[7]));
622
623 wwnn =
624 (((u_int64_t)resp->snscb_nodename[0]) << 56) |
625 (((u_int64_t)resp->snscb_nodename[1]) << 48) |
626 (((u_int64_t)resp->snscb_nodename[2]) << 40) |
627 (((u_int64_t)resp->snscb_nodename[3]) << 32) |
628 (((u_int64_t)resp->snscb_nodename[4]) << 24) |
629 (((u_int64_t)resp->snscb_nodename[5]) << 16) |
630 (((u_int64_t)resp->snscb_nodename[6]) << 8) |
631 (((u_int64_t)resp->snscb_nodename[7]));
632 if (portid == 0 || wwpn == 0) {
633 break;
634 }
635
636 switch (resp->snscb_port_type) {
637 case 1:
638 pt = " N_Port";
639 break;
640 case 2:
641 pt = " NL_Port";
642 break;
643 case 3:
644 pt = "F/NL_Port";
645 break;
646 case 0x7f:
647 pt = " Nx_Port";
648 break;
649 case 0x81:
650 pt = " F_port";
651 break;
652 case 0x82:
653 pt = " FL_Port";
654 break;
655 case 0x84:
656 pt = " E_port";
657 break;
658 default:
659 pt = "?";
660 break;
661 }
662 isp_prt(isp, ISP_LOGINFO,
663 "%s @ 0x%x, Node 0x%08x%08x Port %08x%08x",
664 pt, portid, ((u_int32_t) (wwnn >> 32)), ((u_int32_t) wwnn),
665 ((u_int32_t) (wwpn >> 32)), ((u_int32_t) wwpn));
666 /*
667 * We're only interested in SCSI_FCP types (for now)
668 */
669 if ((resp->snscb_fc4_types[2] & 1) == 0) {
670 break;
671 }
672 if (fcp->isp_topo != TOPO_F_PORT)
673 lrange = FC_SNS_ID+1;
674 else
675 lrange = 0;
676 /*
677 * Is it already in our list?
678 */
679 for (target = lrange; target < MAX_FC_TARG; target++) {
680 if (target >= FL_PORT_ID && target <= FC_SNS_ID) {
681 continue;
682 }
683 lp = &fcp->portdb[target];
684 if (lp->port_wwn == wwpn && lp->node_wwn == wwnn) {
685 lp->fabric_dev = 1;
686 break;
687 }
688 }
689 if (target < MAX_FC_TARG) {
690 break;
691 }
692 for (target = lrange; target < MAX_FC_TARG; target++) {
693 if (target >= FL_PORT_ID && target <= FC_SNS_ID) {
694 continue;
695 }
696 lp = &fcp->portdb[target];
697 if (lp->port_wwn == 0) {
698 break;
699 }
700 }
701 if (target == MAX_FC_TARG) {
702 isp_prt(isp, ISP_LOGWARN,
703 "no more space for fabric devices");
704 break;
705 }
706 lp->node_wwn = wwnn;
707 lp->port_wwn = wwpn;
708 lp->portid = portid;
709 lp->fabric_dev = 1;
710 break;
711 }
712 default:
713 break;
714 }
715 (void) splx(s);
716 return (0);
717 }
718
719 #include <machine/stdarg.h>
720 void
721 isp_prt(struct ispsoftc *isp, int level, const char *fmt, ...)
722 {
723 va_list ap;
724 if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) {
725 return;
726 }
727 printf("%s: ", isp->isp_name);
728 va_start(ap, fmt);
729 vprintf(fmt, ap);
730 va_end(ap);
731 printf("\n");
732 }
733