isp_netbsd.c revision 1.18 1 /* $NetBSD: isp_netbsd.c,v 1.18 1999/10/17 01:23:21 mjacob Exp $ */
2 /*
3 * Platform (NetBSD) dependent common attachment code for Qlogic adapters.
4 * Matthew Jacob <mjacob (at) nas.nasa.gov>
5 */
6 /*
7 * Copyright (C) 1997, 1998, 1999 National Aeronautics & Space Administration
8 * All rights reserved.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. The name of the author may not be used to endorse or promote products
19 * derived from this software without specific prior written permission
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 #include <dev/ic/isp_netbsd.h>
34 #include <sys/scsiio.h>
35
36 static void ispminphys __P((struct buf *));
37 static int32_t ispcmd_slow __P((ISP_SCSI_XFER_T *));
38 static int32_t ispcmd __P((ISP_SCSI_XFER_T *));
39 static int
40 ispioctl __P((struct scsipi_link *, u_long, caddr_t, int, struct proc *));
41
42 static struct scsipi_device isp_dev = { NULL, NULL, NULL, NULL };
43 static int isp_poll __P((struct ispsoftc *, ISP_SCSI_XFER_T *, int));
44 static void isp_watch __P((void *));
45 static void isp_command_requeue __P((void *));
46 static void isp_internal_restart __P((void *));
47
48 /*
49 * Complete attachment of hardware, include subdevices.
50 */
51 void
52 isp_attach(isp)
53 struct ispsoftc *isp;
54 {
55
56 isp->isp_osinfo._adapter.scsipi_minphys = ispminphys;
57 isp->isp_osinfo._adapter.scsipi_ioctl = ispioctl;
58
59 isp->isp_state = ISP_RUNSTATE;
60 isp->isp_osinfo._link.scsipi_scsi.channel = SCSI_CHANNEL_ONLY_ONE;
61 isp->isp_osinfo._link.adapter_softc = isp;
62 isp->isp_osinfo._link.device = &isp_dev;
63 isp->isp_osinfo._link.adapter = &isp->isp_osinfo._adapter;
64 isp->isp_osinfo._link.openings = isp->isp_maxcmds;
65 TAILQ_INIT(&isp->isp_osinfo.waitq); /* XXX 2nd Bus? */
66
67 if (IS_FC(isp)) {
68 /*
69 * Give it another chance here to come alive...
70 */
71 fcparam *fcp = isp->isp_param;
72 isp->isp_osinfo._adapter.scsipi_cmd = ispcmd;
73 if (fcp->isp_fwstate != FW_READY) {
74 (void) isp_control(isp, ISPCTL_FCLINK_TEST, NULL);
75 }
76 isp->isp_osinfo._link.scsipi_scsi.max_target = MAX_FC_TARG-1;
77 #ifdef ISP2100_SCCLUN
78 /*
79 * 16 bits worth, but let's be reasonable..
80 */
81 isp->isp_osinfo._link.scsipi_scsi.max_lun = 255;
82 #else
83 isp->isp_osinfo._link.scsipi_scsi.max_lun = 15;
84 #endif
85 isp->isp_osinfo._link.scsipi_scsi.adapter_target =
86 ((fcparam *)isp->isp_param)->isp_loopid;
87 } else {
88 sdparam *sdp = isp->isp_param;
89 isp->isp_osinfo._adapter.scsipi_cmd = ispcmd_slow;
90 isp->isp_osinfo._link.scsipi_scsi.max_target = MAX_TARGETS-1;
91 if (isp->isp_bustype == ISP_BT_SBUS) {
92 isp->isp_osinfo._link.scsipi_scsi.max_lun = 7;
93 } else {
94 /*
95 * Too much target breakage at present.
96 */
97 #if 0
98 if (isp->isp_fwrev >= ISP_FW_REV(7,55,0))
99 isp->isp_osinfo._link.scsipi_scsi.max_lun = 31;
100 else
101 #endif
102 isp->isp_osinfo._link.scsipi_scsi.max_lun = 7;
103 }
104 isp->isp_osinfo._link.scsipi_scsi.adapter_target =
105 sdp->isp_initiator_id;
106 isp->isp_osinfo.discovered[0] = 1 << sdp->isp_initiator_id;
107 if (IS_12X0(isp)) {
108 isp->isp_osinfo._link_b = isp->isp_osinfo._link;
109 sdp++;
110 isp->isp_osinfo.discovered[1] =
111 1 << sdp->isp_initiator_id;
112 isp->isp_osinfo._link_b.scsipi_scsi.adapter_target =
113 sdp->isp_initiator_id;
114 isp->isp_osinfo._link_b.scsipi_scsi.channel = 1;
115 }
116 }
117 isp->isp_osinfo._link.type = BUS_SCSI;
118
119 /*
120 * Send a SCSI Bus Reset (used to be done as part of attach,
121 * but now left to the OS outer layers).
122 */
123 if (IS_SCSI(isp)) {
124 int bus = 0;
125 (void) isp_control(isp, ISPCTL_RESET_BUS, &bus);
126 if (IS_12X0(isp)) {
127 bus++;
128 (void) isp_control(isp, ISPCTL_RESET_BUS, &bus);
129 }
130 SYS_DELAY(2*1000000);
131 }
132
133 /*
134 * Start the watchdog.
135 */
136 isp->isp_dogactive = 1;
137 timeout(isp_watch, isp, WATCH_INTERVAL * hz);
138
139 /*
140 * And attach children (if any).
141 */
142 config_found((void *)isp, &isp->isp_osinfo._link, scsiprint);
143 if (IS_12X0(isp)) {
144 config_found((void *)isp, &isp->isp_osinfo._link_b, scsiprint);
145 }
146 }
147
148 /*
149 * minphys our xfers
150 *
151 * Unfortunately, the buffer pointer describes the target device- not the
152 * adapter device, so we can't use the pointer to find out what kind of
153 * adapter we are and adjust accordingly.
154 */
155
156 static void
157 ispminphys(bp)
158 struct buf *bp;
159 {
160 /*
161 * XX: Only the 1020 has a 24 bit limit.
162 */
163 if (bp->b_bcount >= (1 << 24)) {
164 bp->b_bcount = (1 << 24);
165 }
166 minphys(bp);
167 }
168
169 static int32_t
170 ispcmd_slow(xs)
171 ISP_SCSI_XFER_T *xs;
172 {
173 /*
174 * Have we completed discovery for this adapter?
175 */
176 if ((xs->xs_control & XS_CTL_DISCOVERY) == 0) {
177 struct ispsoftc *isp = XS_ISP(xs);
178 sdparam *sdp = isp->isp_param;
179 int s = splbio();
180 int chan = XS_CHANNEL(xs), chmax = IS_12X0(isp)? 2 : 1;
181 u_int16_t f = DPARM_DEFAULT;
182
183 sdp += chan;
184 if (xs->sc_link->quirks & SDEV_NOSYNC) {
185 f ^= DPARM_SYNC;
186 }
187 if (xs->sc_link->quirks & SDEV_NOWIDE) {
188 f ^= DPARM_WIDE;
189 }
190 if (xs->sc_link->quirks & SDEV_NOTAG) {
191 f ^= DPARM_TQING;
192 }
193 sdp->isp_devparam[XS_TGT(xs)].dev_flags = f;
194 sdp->isp_devparam[XS_TGT(xs)].dev_update = 1;
195 isp->isp_osinfo.discovered[chan] |= (1 << XS_TGT(xs));
196 f = 0xffff ^ (1 << sdp->isp_initiator_id);
197 for (chan = 0; chan < chmax; chan++) {
198 if (isp->isp_osinfo.discovered[chan] == f)
199 break;
200 }
201 if (chan == chmax) {
202 isp->isp_osinfo._adapter.scsipi_cmd = ispcmd;
203 isp->isp_update = 1;
204 if (IS_12X0(isp))
205 isp->isp_update |= 2;
206 }
207 (void) splx(s);
208 }
209 return (ispcmd(xs));
210 }
211
212 static int
213 ispioctl(sc_link, cmd, addr, flag, p)
214 struct scsipi_link *sc_link;
215 u_long cmd;
216 caddr_t addr;
217 int flag;
218 struct proc *p;
219 {
220 struct ispsoftc *isp = sc_link->adapter_softc;
221 int s, chan, retval = ENOTTY;
222
223 switch (cmd) {
224 case SCBUSIORESET:
225 chan = sc_link->scsipi_scsi.channel;
226 s = splbio();
227 if (isp_control(isp, ISPCTL_RESET_BUS, &chan))
228 retval = EIO;
229 else
230 retval = 0;
231 (void) splx(s);
232 break;
233 default:
234 break;
235 }
236 return (retval);
237 }
238
239
240 static int32_t
241 ispcmd(xs)
242 ISP_SCSI_XFER_T *xs;
243 {
244 struct ispsoftc *isp;
245 int result, s;
246
247 isp = XS_ISP(xs);
248 s = splbio();
249 if (isp->isp_state < ISP_RUNSTATE) {
250 DISABLE_INTS(isp);
251 isp_init(isp);
252 if (isp->isp_state != ISP_INITSTATE) {
253 ENABLE_INTS(isp);
254 (void) splx(s);
255 XS_SETERR(xs, HBA_BOTCH);
256 return (COMPLETE);
257 }
258 isp->isp_state = ISP_RUNSTATE;
259 ENABLE_INTS(isp);
260 }
261
262 /*
263 * Check for queue blockage...
264 */
265 if (isp->isp_osinfo.blocked) {
266 if (xs->xs_control & XS_CTL_POLL) {
267 xs->error = XS_DRIVER_STUFFUP;
268 splx(s);
269 return (TRY_AGAIN_LATER);
270 }
271 TAILQ_INSERT_TAIL(&isp->isp_osinfo.waitq, xs, adapter_q);
272 splx(s);
273 return (SUCCESSFULLY_QUEUED);
274 }
275 DISABLE_INTS(isp);
276 result = ispscsicmd(xs);
277 ENABLE_INTS(isp);
278
279 if ((xs->xs_control & XS_CTL_POLL) == 0) {
280 switch (result) {
281 case CMD_QUEUED:
282 result = SUCCESSFULLY_QUEUED;
283 break;
284 case CMD_EAGAIN:
285 result = TRY_AGAIN_LATER;
286 break;
287 case CMD_RQLATER:
288 result = SUCCESSFULLY_QUEUED;
289 timeout(isp_command_requeue, xs, hz);
290 break;
291 case CMD_COMPLETE:
292 result = COMPLETE;
293 break;
294 }
295 (void) splx(s);
296 return (result);
297 }
298
299 switch (result) {
300 case CMD_QUEUED:
301 result = SUCCESSFULLY_QUEUED;
302 break;
303 case CMD_RQLATER:
304 case CMD_EAGAIN:
305 if (XS_NOERR(xs)) {
306 xs->error = XS_DRIVER_STUFFUP;
307 }
308 result = TRY_AGAIN_LATER;
309 break;
310 case CMD_COMPLETE:
311 result = COMPLETE;
312 break;
313
314 }
315 /*
316 * If we can't use interrupts, poll on completion.
317 */
318 if (result == SUCCESSFULLY_QUEUED) {
319 if (isp_poll(isp, xs, XS_TIME(xs))) {
320 /*
321 * If no other error occurred but we didn't finish,
322 * something bad happened.
323 */
324 if (XS_IS_CMD_DONE(xs) == 0) {
325 if (isp_control(isp, ISPCTL_ABORT_CMD, xs)) {
326 isp_restart(isp);
327 }
328 if (XS_NOERR(xs)) {
329 XS_SETERR(xs, HBA_BOTCH);
330 }
331 }
332 }
333 result = COMPLETE;
334 }
335 (void) splx(s);
336 return (result);
337 }
338
339 static int
340 isp_poll(isp, xs, mswait)
341 struct ispsoftc *isp;
342 ISP_SCSI_XFER_T *xs;
343 int mswait;
344 {
345
346 while (mswait) {
347 /* Try the interrupt handling routine */
348 (void)isp_intr((void *)isp);
349
350 /* See if the xs is now done */
351 if (XS_IS_CMD_DONE(xs)) {
352 return (0);
353 }
354 SYS_DELAY(1000); /* wait one millisecond */
355 mswait--;
356 }
357 return (1);
358 }
359
360 static void
361 isp_watch(arg)
362 void *arg;
363 {
364 int i;
365 struct ispsoftc *isp = arg;
366 struct scsipi_xfer *xs;
367 int s;
368
369 /*
370 * Look for completely dead commands (but not polled ones).
371 */
372 s = splbio();
373 for (i = 0; i < isp->isp_maxcmds; i++) {
374 xs = isp->isp_xflist[i];
375 if (xs == NULL) {
376 continue;
377 }
378 if (xs->timeout == 0 || (xs->xs_control & XS_CTL_POLL)) {
379 continue;
380 }
381 xs->timeout -= (WATCH_INTERVAL * 1000);
382 /*
383 * Avoid later thinking that this
384 * transaction is not being timed.
385 * Then give ourselves to watchdog
386 * periods of grace.
387 */
388 if (xs->timeout == 0) {
389 xs->timeout = 1;
390 } else if (xs->timeout > -(2 * WATCH_INTERVAL * 1000)) {
391 continue;
392 }
393 if (isp_control(isp, ISPCTL_ABORT_CMD, xs)) {
394 printf("%s: isp_watch failed to abort command\n",
395 isp->isp_name);
396 isp_restart(isp);
397 break;
398 }
399 }
400 timeout(isp_watch, isp, WATCH_INTERVAL * hz);
401 isp->isp_dogactive = 1;
402 (void) splx(s);
403 }
404
405 /*
406 * Free any associated resources prior to decommissioning and
407 * set the card to a known state (so it doesn't wake up and kick
408 * us when we aren't expecting it to).
409 *
410 * Locks are held before coming here.
411 */
412 void
413 isp_uninit(isp)
414 struct ispsoftc *isp;
415 {
416 ISP_ILOCKVAL_DECL;
417 ISP_ILOCK(isp);
418 /*
419 * Leave with interrupts disabled.
420 */
421 DISABLE_INTS(isp);
422
423 /*
424 * Turn off the watchdog (if active).
425 */
426 if (isp->isp_dogactive) {
427 untimeout(isp_watch, isp);
428 isp->isp_dogactive = 0;
429 }
430
431 ISP_IUNLOCK(isp);
432 }
433
434 /*
435 * Restart function for a command to be requeued later.
436 */
437 static void
438 isp_command_requeue(arg)
439 void *arg;
440 {
441 struct scsipi_xfer *xs = arg;
442 struct ispsoftc *isp = XS_ISP(xs);
443 int s = splbio();
444 switch (ispcmd_slow(xs)) {
445 case SUCCESSFULLY_QUEUED:
446 printf("%s: isp_command_requeue: requeued for %d.%d\n",
447 isp->isp_name, XS_TGT(xs), XS_LUN(xs));
448 break;
449 case TRY_AGAIN_LATER:
450 printf("%s: EAGAIN for %d.%d\n",
451 isp->isp_name, XS_TGT(xs), XS_LUN(xs));
452 /* FALLTHROUGH */
453 case COMPLETE:
454 /* can only be an error */
455 xs->xs_status |= XS_STS_DONE;
456 if (XS_NOERR(xs)) {
457 XS_SETERR(xs, HBA_BOTCH);
458 }
459 scsipi_done(xs);
460 break;
461 }
462 (void) splx(s);
463 }
464
465 /*
466 * Restart function after a LOOP UP event (e.g.),
467 * done as a timeout for some hysteresis.
468 */
469 static void
470 isp_internal_restart(arg)
471 void *arg;
472 {
473 struct ispsoftc *isp = arg;
474 int result, nrestarted = 0, s;
475
476 s = splbio();
477 if (isp->isp_osinfo.blocked == 0) {
478 struct scsipi_xfer *xs;
479 while ((xs = TAILQ_FIRST(&isp->isp_osinfo.waitq)) != NULL) {
480 TAILQ_REMOVE(&isp->isp_osinfo.waitq, xs, adapter_q);
481 DISABLE_INTS(isp);
482 result = ispscsicmd(xs);
483 ENABLE_INTS(isp);
484 if (result != CMD_QUEUED) {
485 printf("%s: botched command restart (0x%x)\n",
486 isp->isp_name, result);
487 xs->xs_status |= XS_STS_DONE;
488 if (xs->error == XS_NOERROR)
489 xs->error = XS_DRIVER_STUFFUP;
490 scsipi_done(xs);
491 }
492 nrestarted++;
493 }
494 printf("%s: requeued %d commands\n", isp->isp_name, nrestarted);
495 }
496 (void) splx(s);
497 }
498
499 int
500 isp_async(isp, cmd, arg)
501 struct ispsoftc *isp;
502 ispasync_t cmd;
503 void *arg;
504 {
505 int bus, tgt;
506 int s = splbio();
507 switch (cmd) {
508 case ISPASYNC_NEW_TGT_PARAMS:
509 if (IS_SCSI(isp) && isp->isp_dblev) {
510 sdparam *sdp = isp->isp_param;
511 char *wt;
512 int mhz, flags, period;
513
514 tgt = *((int *) arg);
515 bus = (tgt >> 16) & 0xffff;
516 tgt &= 0xffff;
517
518 flags = sdp->isp_devparam[tgt].cur_dflags;
519 period = sdp->isp_devparam[tgt].cur_period;
520 if ((flags & DPARM_SYNC) && period &&
521 (sdp->isp_devparam[tgt].cur_offset) != 0) {
522 if (sdp->isp_lvdmode) {
523 switch (period) {
524 case 0xa:
525 mhz = 40;
526 break;
527 case 0xb:
528 mhz = 33;
529 break;
530 case 0xc:
531 mhz = 25;
532 break;
533 default:
534 mhz = 1000 / (period * 4);
535 break;
536 }
537 } else {
538 mhz = 1000 / (period * 4);
539 }
540 } else {
541 mhz = 0;
542 }
543 switch (flags & (DPARM_WIDE|DPARM_TQING)) {
544 case DPARM_WIDE:
545 wt = ", 16 bit wide\n";
546 break;
547 case DPARM_TQING:
548 wt = ", Tagged Queueing Enabled\n";
549 break;
550 case DPARM_WIDE|DPARM_TQING:
551 wt = ", 16 bit wide, Tagged Queueing Enabled\n";
552 break;
553 default:
554 wt = "\n";
555 break;
556 }
557 if (mhz) {
558 printf("%s: Bus %d Target %d at %dMHz Max "
559 "Offset %d%s", isp->isp_name, bus, tgt, mhz,
560 sdp->isp_devparam[tgt].cur_offset, wt);
561 } else {
562 printf("%s: Bus %d Target %d Async Mode%s",
563 isp->isp_name, bus, tgt, wt);
564 }
565 break;
566 }
567 case ISPASYNC_BUS_RESET:
568 if (arg)
569 bus = *((int *) arg);
570 else
571 bus = 0;
572 printf("%s: SCSI bus %d reset detected\n", isp->isp_name, bus);
573 break;
574 case ISPASYNC_LOOP_DOWN:
575 /*
576 * Hopefully we get here in time to minimize the number
577 * of commands we are firing off that are sure to die.
578 */
579 isp->isp_osinfo.blocked = 1;
580 printf("%s: Loop DOWN\n", isp->isp_name);
581 break;
582 case ISPASYNC_LOOP_UP:
583 isp->isp_osinfo.blocked = 0;
584 timeout(isp_internal_restart, isp, 1);
585 printf("%s: Loop UP\n", isp->isp_name);
586 break;
587 case ISPASYNC_PDB_CHANGED:
588 if (IS_FC(isp) && isp->isp_dblev) {
589 const char *fmt = "%s: Target %d (Loop 0x%x) Port ID 0x%x "
590 "role %s %s\n Port WWN 0x%08x%08x\n Node WWN 0x%08x%08x\n";
591 const static char *roles[4] = {
592 "No", "Target", "Initiator", "Target/Initiator"
593 };
594 char *ptr;
595 fcparam *fcp = isp->isp_param;
596 int tgt = *((int *) arg);
597 struct lportdb *lp = &fcp->portdb[tgt];
598
599 if (lp->valid) {
600 ptr = "arrived";
601 } else {
602 ptr = "disappeared";
603 }
604 printf(fmt, isp->isp_name, tgt, lp->loopid, lp->portid,
605 roles[lp->roles & 0x3], ptr,
606 (u_int32_t) (lp->port_wwn >> 32),
607 (u_int32_t) (lp->port_wwn & 0xffffffffLL),
608 (u_int32_t) (lp->node_wwn >> 32),
609 (u_int32_t) (lp->node_wwn & 0xffffffffLL));
610 break;
611 }
612 #ifdef ISP2100_FABRIC
613 case ISPASYNC_CHANGE_NOTIFY:
614 printf("%s: Name Server Database Changed\n", isp->isp_name);
615 break;
616 case ISPASYNC_FABRIC_DEV:
617 {
618 int target;
619 struct lportdb *lp;
620 sns_scrsp_t *resp = (sns_scrsp_t *) arg;
621 u_int32_t portid;
622 u_int64_t wwn;
623 fcparam *fcp = isp->isp_param;
624
625 portid =
626 (((u_int32_t) resp->snscb_port_id[0]) << 16) |
627 (((u_int32_t) resp->snscb_port_id[1]) << 8) |
628 (((u_int32_t) resp->snscb_port_id[2]));
629 wwn =
630 (((u_int64_t)resp->snscb_portname[0]) << 56) |
631 (((u_int64_t)resp->snscb_portname[1]) << 48) |
632 (((u_int64_t)resp->snscb_portname[2]) << 40) |
633 (((u_int64_t)resp->snscb_portname[3]) << 32) |
634 (((u_int64_t)resp->snscb_portname[4]) << 24) |
635 (((u_int64_t)resp->snscb_portname[5]) << 16) |
636 (((u_int64_t)resp->snscb_portname[6]) << 8) |
637 (((u_int64_t)resp->snscb_portname[7]));
638 printf("%s: Fabric Device (Type 0x%x)@PortID 0x%x WWN "
639 "0x%08x%08x\n", isp->isp_name, resp->snscb_port_type,
640 portid, ((u_int32_t)(wwn >> 32)),
641 ((u_int32_t)(wwn & 0xffffffff)));
642 if (resp->snscb_port_type != 2)
643 break;
644 for (target = FC_SNS_ID+1; target < MAX_FC_TARG; target++) {
645 lp = &fcp->portdb[target];
646 if (lp->port_wwn == wwn)
647 break;
648 }
649 if (target < MAX_FC_TARG) {
650 break;
651 }
652 for (target = FC_SNS_ID+1; target < MAX_FC_TARG; target++) {
653 lp = &fcp->portdb[target];
654 if (lp->port_wwn == 0)
655 break;
656 }
657 if (target == MAX_FC_TARG) {
658 printf("%s: no more space for fabric devices\n",
659 isp->isp_name);
660 return (-1);
661 }
662 lp->port_wwn = lp->node_wwn = wwn;
663 lp->portid = portid;
664 break;
665 }
666 #endif
667 default:
668 break;
669 }
670 (void) splx(s);
671 return (0);
672 }
673