isp_netbsd.c revision 1.73.2.3 1 /* $NetBSD: isp_netbsd.c,v 1.73.2.3 2007/05/13 17:36:24 ad Exp $ */
2 /*
3 * This driver, which is contained in NetBSD in the files:
4 *
5 * sys/dev/ic/isp.c
6 * sys/dev/ic/isp_inline.h
7 * sys/dev/ic/isp_netbsd.c
8 * sys/dev/ic/isp_netbsd.h
9 * sys/dev/ic/isp_target.c
10 * sys/dev/ic/isp_target.h
11 * sys/dev/ic/isp_tpublic.h
12 * sys/dev/ic/ispmbox.h
13 * sys/dev/ic/ispreg.h
14 * sys/dev/ic/ispvar.h
15 * sys/microcode/isp/asm_sbus.h
16 * sys/microcode/isp/asm_1040.h
17 * sys/microcode/isp/asm_1080.h
18 * sys/microcode/isp/asm_12160.h
19 * sys/microcode/isp/asm_2100.h
20 * sys/microcode/isp/asm_2200.h
21 * sys/pci/isp_pci.c
22 * sys/sbus/isp_sbus.c
23 *
24 * Is being actively maintained by Matthew Jacob (mjacob (at) NetBSD.org).
25 * This driver also is shared source with FreeBSD, OpenBSD, Linux, Solaris,
26 * Linux versions. This tends to be an interesting maintenance problem.
27 *
28 * Please coordinate with Matthew Jacob on changes you wish to make here.
29 */
30 /*
31 * Platform (NetBSD) dependent common attachment code for Qlogic adapters.
32 * Matthew Jacob <mjacob (at) nas.nasa.gov>
33 */
34 /*
35 * Copyright (C) 1997, 1998, 1999 National Aeronautics & Space Administration
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. The name of the author may not be used to endorse or promote products
47 * derived from this software without specific prior written permission
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
50 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
51 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
52 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
53 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
54 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
55 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
56 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
57 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
58 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
59 */
60
61 #include <sys/cdefs.h>
62 __KERNEL_RCSID(0, "$NetBSD: isp_netbsd.c,v 1.73.2.3 2007/05/13 17:36:24 ad Exp $");
63
64 #include <dev/ic/isp_netbsd.h>
65 #include <sys/scsiio.h>
66
67
68 /*
69 * Set a timeout for the watchdogging of a command.
70 *
71 * The dimensional analysis is
72 *
73 * milliseconds * (seconds/millisecond) * (ticks/second) = ticks
74 *
75 * =
76 *
77 * (milliseconds / 1000) * hz = ticks
78 *
79 *
80 * For timeouts less than 1 second, we'll get zero. Because of this, and
81 * because we want to establish *our* timeout to be longer than what the
82 * firmware might do, we just add 3 seconds at the back end.
83 */
84 #define _XT(xs) ((((xs)->timeout/1000) * hz) + (3 * hz))
85
86 static void isp_config_interrupts(struct device *);
87 static void ispminphys_1020(struct buf *);
88 static void ispminphys(struct buf *);
89 static INLINE void ispcmd(struct ispsoftc *, XS_T *);
90 static void isprequest(struct scsipi_channel *, scsipi_adapter_req_t, void *);
91 static int
92 ispioctl(struct scsipi_channel *, u_long, void *, int, struct proc *);
93
94 static void isp_polled_cmd(struct ispsoftc *, XS_T *);
95 static void isp_dog(void *);
96 static void isp_fc_worker(void *);
97
98 /*
99 * Complete attachment of hardware, include subdevices.
100 */
101 void
102 isp_attach(struct ispsoftc *isp)
103 {
104 isp->isp_state = ISP_RUNSTATE;
105
106 isp->isp_osinfo._adapter.adapt_dev = &isp->isp_osinfo._dev;
107 isp->isp_osinfo._adapter.adapt_nchannels = IS_DUALBUS(isp) ? 2 : 1;
108 isp->isp_osinfo._adapter.adapt_openings = isp->isp_maxcmds;
109 /*
110 * It's not stated whether max_periph is limited by SPI
111 * tag uage, but let's assume that it is.
112 */
113 isp->isp_osinfo._adapter.adapt_max_periph = min(isp->isp_maxcmds, 255);
114 isp->isp_osinfo._adapter.adapt_ioctl = ispioctl;
115 isp->isp_osinfo._adapter.adapt_request = isprequest;
116 if (isp->isp_type <= ISP_HA_SCSI_1020A) {
117 isp->isp_osinfo._adapter.adapt_minphys = ispminphys_1020;
118 } else {
119 isp->isp_osinfo._adapter.adapt_minphys = ispminphys;
120 }
121
122 isp->isp_osinfo._chan.chan_adapter = &isp->isp_osinfo._adapter;
123 isp->isp_osinfo._chan.chan_bustype = &scsi_bustype;
124 isp->isp_osinfo._chan.chan_channel = 0;
125
126 /*
127 * Until the midlayer is fixed to use REPORT LUNS, limit to 8 luns.
128 */
129 isp->isp_osinfo._chan.chan_nluns = min(isp->isp_maxluns, 8);
130
131 if (IS_FC(isp)) {
132 #if 0 /* XXX channel "settle" time seems to sidestep some nasty race */
133 isp->isp_osinfo._chan.chan_flags = SCSIPI_CHAN_NOSETTLE;
134 #endif
135 isp->isp_osinfo._chan.chan_ntargets = MAX_FC_TARG;
136 isp->isp_osinfo._chan.chan_id = MAX_FC_TARG;
137 isp->isp_osinfo.threadwork = 1;
138 #ifdef ISP_FW_CRASH_DUMP
139 if (IS_2200(isp)) {
140 FCPARAM(isp)->isp_dump_data =
141 malloc(QLA2200_RISC_IMAGE_DUMP_SIZE, M_DEVBUF,
142 M_NOWAIT);
143 } else if (IS_23XX(isp)) {
144 FCPARAM(isp)->isp_dump_data =
145 malloc(QLA2300_RISC_IMAGE_DUMP_SIZE, M_DEVBUF,
146 M_NOWAIT);
147 }
148 if (FCPARAM(isp)->isp_dump_data)
149 FCPARAM(isp)->isp_dump_data[0] = 0;
150 #endif
151 } else {
152 int bus = 0;
153 sdparam *sdp = isp->isp_param;
154
155 isp->isp_osinfo._chan.chan_ntargets = MAX_TARGETS;
156 isp->isp_osinfo._chan.chan_id = sdp->isp_initiator_id;
157 isp->isp_osinfo.discovered[0] = 1 << sdp->isp_initiator_id;
158 if (IS_DUALBUS(isp)) {
159 isp->isp_osinfo._chan_b = isp->isp_osinfo._chan;
160 sdp++;
161 isp->isp_osinfo.discovered[1] =
162 1 << sdp->isp_initiator_id;
163 isp->isp_osinfo._chan_b.chan_id = sdp->isp_initiator_id;
164 isp->isp_osinfo._chan_b.chan_channel = 1;
165 }
166 ISP_LOCK(isp);
167 (void) isp_control(isp, ISPCTL_RESET_BUS, &bus);
168 if (IS_DUALBUS(isp)) {
169 bus++;
170 (void) isp_control(isp, ISPCTL_RESET_BUS, &bus);
171 }
172 ISP_UNLOCK(isp);
173 }
174
175
176 /*
177 * Defer enabling mailbox interrupts until later.
178 */
179 config_interrupts((struct device *) isp, isp_config_interrupts);
180
181 /*
182 * And attach children (if any).
183 */
184 config_found((void *)isp, &isp->isp_chanA, scsiprint);
185 if (IS_DUALBUS(isp)) {
186 config_found((void *)isp, &isp->isp_chanB, scsiprint);
187 }
188 }
189
190
191 static void
192 isp_config_interrupts(struct device *self)
193 {
194 struct ispsoftc *isp = (struct ispsoftc *) self;
195
196 /*
197 * After this point, we'll be doing the new configuration
198 * schema which allows interrupts, so we can do tsleep/wakeup
199 * for mailbox stuff at that point, if that's allowed.
200 */
201 if (IS_FC(isp)) {
202 isp->isp_osinfo.no_mbox_ints = 0;
203
204 if (kthread_create(PRI_NONE, 0, NULL, isp_fc_worker, isp,
205 &isp->isp_osinfo.thread, "%s:fc_thrd", isp->isp_name)) {
206 isp_prt(isp, ISP_LOGERR,
207 "unable to create FC worker thread");
208 panic("isp_config_interrupts");
209 }
210 }
211 }
212
213
214 /*
215 * minphys our xfers
216 */
217
218 static void
219 ispminphys_1020(struct buf *bp)
220 {
221 if (bp->b_bcount >= (1 << 24)) {
222 bp->b_bcount = (1 << 24);
223 }
224 minphys(bp);
225 }
226
227 static void
228 ispminphys(struct buf *bp)
229 {
230 if (bp->b_bcount >= (1 << 30)) {
231 bp->b_bcount = (1 << 30);
232 }
233 minphys(bp);
234 }
235
236 static int
237 ispioctl(struct scsipi_channel *chan, u_long cmd, void *addr,
238 int flag, struct proc *p)
239 {
240 struct ispsoftc *isp = (void *)chan->chan_adapter->adapt_dev;
241 int retval = ENOTTY;
242
243 switch (cmd) {
244 #ifdef ISP_FW_CRASH_DUMP
245 case ISP_GET_FW_CRASH_DUMP:
246 {
247 u_int16_t *ptr = FCPARAM(isp)->isp_dump_data;
248 size_t sz;
249
250 retval = 0;
251 if (IS_2200(isp))
252 sz = QLA2200_RISC_IMAGE_DUMP_SIZE;
253 else
254 sz = QLA2300_RISC_IMAGE_DUMP_SIZE;
255 ISP_LOCK(isp);
256 if (ptr && *ptr) {
257 void *uaddr = *((void **) addr);
258 if (copyout(ptr, uaddr, sz)) {
259 retval = EFAULT;
260 } else {
261 *ptr = 0;
262 }
263 } else {
264 retval = ENXIO;
265 }
266 ISP_UNLOCK(isp);
267 break;
268 }
269
270 case ISP_FORCE_CRASH_DUMP:
271 ISP_LOCK(isp);
272 if (isp->isp_osinfo.blocked == 0) {
273 isp->isp_osinfo.blocked = 1;
274 scsipi_channel_freeze(&isp->isp_chanA, 1);
275 }
276 isp_fw_dump(isp);
277 isp_reinit(isp);
278 ISP_UNLOCK(isp);
279 retval = 0;
280 break;
281 #endif
282 case ISP_SDBLEV:
283 {
284 int olddblev = isp->isp_dblev;
285 isp->isp_dblev = *(int *)addr;
286 *(int *)addr = olddblev;
287 retval = 0;
288 break;
289 }
290 case ISP_RESETHBA:
291 ISP_LOCK(isp);
292 isp_reinit(isp);
293 ISP_UNLOCK(isp);
294 retval = 0;
295 break;
296 case ISP_RESCAN:
297 if (IS_FC(isp)) {
298 ISP_LOCK(isp);
299 if (isp_fc_runstate(isp, 5 * 1000000)) {
300 retval = EIO;
301 } else {
302 retval = 0;
303 }
304 ISP_UNLOCK(isp);
305 }
306 break;
307 case ISP_FC_LIP:
308 if (IS_FC(isp)) {
309 ISP_LOCK(isp);
310 if (isp_control(isp, ISPCTL_SEND_LIP, 0)) {
311 retval = EIO;
312 } else {
313 retval = 0;
314 }
315 ISP_UNLOCK(isp);
316 }
317 break;
318 case ISP_FC_GETDINFO:
319 {
320 struct isp_fc_device *ifc = (struct isp_fc_device *) addr;
321 struct lportdb *lp;
322
323 if (/* ifc->loopid < 0 || */ ifc->loopid >= MAX_FC_TARG) {
324 retval = EINVAL;
325 break;
326 }
327 ISP_LOCK(isp);
328 lp = &FCPARAM(isp)->portdb[ifc->loopid];
329 if (lp->valid) {
330 ifc->loopid = lp->loopid;
331 ifc->portid = lp->portid;
332 ifc->node_wwn = lp->node_wwn;
333 ifc->port_wwn = lp->port_wwn;
334 retval = 0;
335 } else {
336 retval = ENODEV;
337 }
338 ISP_UNLOCK(isp);
339 break;
340 }
341 case ISP_GET_STATS:
342 {
343 isp_stats_t *sp = (isp_stats_t *) addr;
344
345 MEMZERO(sp, sizeof (*sp));
346 sp->isp_stat_version = ISP_STATS_VERSION;
347 sp->isp_type = isp->isp_type;
348 sp->isp_revision = isp->isp_revision;
349 ISP_LOCK(isp);
350 sp->isp_stats[ISP_INTCNT] = isp->isp_intcnt;
351 sp->isp_stats[ISP_INTBOGUS] = isp->isp_intbogus;
352 sp->isp_stats[ISP_INTMBOXC] = isp->isp_intmboxc;
353 sp->isp_stats[ISP_INGOASYNC] = isp->isp_intoasync;
354 sp->isp_stats[ISP_RSLTCCMPLT] = isp->isp_rsltccmplt;
355 sp->isp_stats[ISP_FPHCCMCPLT] = isp->isp_fphccmplt;
356 sp->isp_stats[ISP_RSCCHIWAT] = isp->isp_rscchiwater;
357 sp->isp_stats[ISP_FPCCHIWAT] = isp->isp_fpcchiwater;
358 ISP_UNLOCK(isp);
359 retval = 0;
360 break;
361 }
362 case ISP_CLR_STATS:
363 ISP_LOCK(isp);
364 isp->isp_intcnt = 0;
365 isp->isp_intbogus = 0;
366 isp->isp_intmboxc = 0;
367 isp->isp_intoasync = 0;
368 isp->isp_rsltccmplt = 0;
369 isp->isp_fphccmplt = 0;
370 isp->isp_rscchiwater = 0;
371 isp->isp_fpcchiwater = 0;
372 ISP_UNLOCK(isp);
373 retval = 0;
374 break;
375 case ISP_FC_GETHINFO:
376 {
377 struct isp_hba_device *hba = (struct isp_hba_device *) addr;
378 MEMZERO(hba, sizeof (*hba));
379 ISP_LOCK(isp);
380 hba->fc_speed = FCPARAM(isp)->isp_gbspeed;
381 hba->fc_scsi_supported = 1;
382 hba->fc_topology = FCPARAM(isp)->isp_topo + 1;
383 hba->fc_loopid = FCPARAM(isp)->isp_loopid;
384 hba->nvram_node_wwn = FCPARAM(isp)->isp_nodewwn;
385 hba->nvram_port_wwn = FCPARAM(isp)->isp_portwwn;
386 hba->active_node_wwn = ISP_NODEWWN(isp);
387 hba->active_port_wwn = ISP_PORTWWN(isp);
388 ISP_UNLOCK(isp);
389 break;
390 }
391 case SCBUSIORESET:
392 ISP_LOCK(isp);
393 if (isp_control(isp, ISPCTL_RESET_BUS, &chan->chan_channel))
394 retval = EIO;
395 else
396 retval = 0;
397 ISP_UNLOCK(isp);
398 break;
399 default:
400 break;
401 }
402 return (retval);
403 }
404
405 static INLINE void
406 ispcmd(struct ispsoftc *isp, XS_T *xs)
407 {
408 ISP_LOCK(isp);
409 if (isp->isp_state < ISP_RUNSTATE) {
410 DISABLE_INTS(isp);
411 isp_init(isp);
412 if (isp->isp_state != ISP_INITSTATE) {
413 ENABLE_INTS(isp);
414 ISP_UNLOCK(isp);
415 isp_prt(isp, ISP_LOGERR, "isp not at init state");
416 XS_SETERR(xs, HBA_BOTCH);
417 scsipi_done(xs);
418 return;
419 }
420 isp->isp_state = ISP_RUNSTATE;
421 ENABLE_INTS(isp);
422 }
423 /*
424 * Handle the case of a FC card where the FC thread hasn't
425 * fired up yet and we have loop state to clean up. If we
426 * can't clear things up and we've never seen loop up, bounce
427 * the command.
428 */
429 if (IS_FC(isp) && isp->isp_osinfo.threadwork &&
430 isp->isp_osinfo.thread == 0) {
431 volatile u_int8_t ombi = isp->isp_osinfo.no_mbox_ints;
432 int delay_time;
433
434 if (xs->xs_control & XS_CTL_POLL) {
435 isp->isp_osinfo.no_mbox_ints = 1;
436 }
437
438 if (isp->isp_osinfo.loop_checked == 0) {
439 delay_time = 10 * 1000000;
440 isp->isp_osinfo.loop_checked = 1;
441 } else {
442 delay_time = 250000;
443 }
444
445 if (isp_fc_runstate(isp, delay_time) != 0) {
446 if (xs->xs_control & XS_CTL_POLL) {
447 isp->isp_osinfo.no_mbox_ints = ombi;
448 }
449 if (FCPARAM(isp)->loop_seen_once == 0) {
450 XS_SETERR(xs, HBA_SELTIMEOUT);
451 scsipi_done(xs);
452 ISP_UNLOCK(isp);
453 return;
454 }
455 /*
456 * Otherwise, fall thru to be queued up for later.
457 */
458 } else {
459 int wasblocked =
460 (isp->isp_osinfo.blocked || isp->isp_osinfo.paused);
461 isp->isp_osinfo.threadwork = 0;
462 isp->isp_osinfo.blocked =
463 isp->isp_osinfo.paused = 0;
464 if (wasblocked) {
465 scsipi_channel_thaw(&isp->isp_chanA, 1);
466 }
467 }
468 if (xs->xs_control & XS_CTL_POLL) {
469 isp->isp_osinfo.no_mbox_ints = ombi;
470 }
471 }
472
473 if (isp->isp_osinfo.paused) {
474 isp_prt(isp, ISP_LOGWARN, "I/O while paused");
475 xs->error = XS_RESOURCE_SHORTAGE;
476 scsipi_done(xs);
477 ISP_UNLOCK(isp);
478 return;
479 }
480 if (isp->isp_osinfo.blocked) {
481 isp_prt(isp, ISP_LOGWARN, "I/O while blocked");
482 xs->error = XS_REQUEUE;
483 scsipi_done(xs);
484 ISP_UNLOCK(isp);
485 return;
486 }
487
488 if (xs->xs_control & XS_CTL_POLL) {
489 volatile u_int8_t ombi = isp->isp_osinfo.no_mbox_ints;
490 isp->isp_osinfo.no_mbox_ints = 1;
491 isp_polled_cmd(isp, xs);
492 isp->isp_osinfo.no_mbox_ints = ombi;
493 ISP_UNLOCK(isp);
494 return;
495 }
496
497 switch (isp_start(xs)) {
498 case CMD_QUEUED:
499 if (xs->timeout) {
500 callout_reset(&xs->xs_callout, _XT(xs), isp_dog, xs);
501 }
502 break;
503 case CMD_EAGAIN:
504 isp->isp_osinfo.paused = 1;
505 xs->error = XS_RESOURCE_SHORTAGE;
506 scsipi_channel_freeze(&isp->isp_chanA, 1);
507 if (IS_DUALBUS(isp)) {
508 scsipi_channel_freeze(&isp->isp_chanB, 1);
509 }
510 scsipi_done(xs);
511 break;
512 case CMD_RQLATER:
513 /*
514 * We can only get RQLATER from FC devices (1 channel only)
515 *
516 * Also, if we've never seen loop up, bounce the command
517 * (somebody has booted with no FC cable connected)
518 */
519 if (FCPARAM(isp)->loop_seen_once == 0) {
520 XS_SETERR(xs, HBA_SELTIMEOUT);
521 scsipi_done(xs);
522 break;
523 }
524 if (isp->isp_osinfo.blocked == 0) {
525 isp->isp_osinfo.blocked = 1;
526 scsipi_channel_freeze(&isp->isp_chanA, 1);
527 }
528 xs->error = XS_REQUEUE;
529 scsipi_done(xs);
530 break;
531 case CMD_COMPLETE:
532 scsipi_done(xs);
533 break;
534 }
535 ISP_UNLOCK(isp);
536 }
537
538 static void
539 isprequest(struct scsipi_channel *chan, scsipi_adapter_req_t req, void *arg)
540 {
541 struct ispsoftc *isp = (void *)chan->chan_adapter->adapt_dev;
542
543 switch (req) {
544 case ADAPTER_REQ_RUN_XFER:
545 ispcmd(isp, (XS_T *) arg);
546 break;
547
548 case ADAPTER_REQ_GROW_RESOURCES:
549 /* Not supported. */
550 break;
551
552 case ADAPTER_REQ_SET_XFER_MODE:
553 if (IS_SCSI(isp)) {
554 struct scsipi_xfer_mode *xm = arg;
555 int dflags = 0;
556 sdparam *sdp = SDPARAM(isp);
557
558 sdp += chan->chan_channel;
559 if (xm->xm_mode & PERIPH_CAP_TQING)
560 dflags |= DPARM_TQING;
561 if (xm->xm_mode & PERIPH_CAP_WIDE16)
562 dflags |= DPARM_WIDE;
563 if (xm->xm_mode & PERIPH_CAP_SYNC)
564 dflags |= DPARM_SYNC;
565 ISP_LOCK(isp);
566 sdp->isp_devparam[xm->xm_target].goal_flags |= dflags;
567 dflags = sdp->isp_devparam[xm->xm_target].goal_flags;
568 sdp->isp_devparam[xm->xm_target].dev_update = 1;
569 isp->isp_update |= (1 << chan->chan_channel);
570 ISP_UNLOCK(isp);
571 isp_prt(isp, ISP_LOGDEBUG1,
572 "ispioctl: device flags 0x%x for %d.%d.X",
573 dflags, chan->chan_channel, xm->xm_target);
574 break;
575 }
576 default:
577 break;
578 }
579 }
580
581 static void
582 isp_polled_cmd(struct ispsoftc *isp, XS_T *xs)
583 {
584 int result;
585 int infinite = 0, mswait;
586
587 result = isp_start(xs);
588
589 switch (result) {
590 case CMD_QUEUED:
591 break;
592 case CMD_RQLATER:
593 if (XS_NOERR(xs)) {
594 xs->error = XS_REQUEUE;
595 }
596 case CMD_EAGAIN:
597 if (XS_NOERR(xs)) {
598 xs->error = XS_RESOURCE_SHORTAGE;
599 }
600 /* FALLTHROUGH */
601 case CMD_COMPLETE:
602 scsipi_done(xs);
603 return;
604
605 }
606
607 /*
608 * If we can't use interrupts, poll on completion.
609 */
610 if ((mswait = XS_TIME(xs)) == 0)
611 infinite = 1;
612
613 while (mswait || infinite) {
614 u_int16_t isr, sema, mbox;
615 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
616 isp_intr(isp, isr, sema, mbox);
617 if (XS_CMD_DONE_P(xs)) {
618 break;
619 }
620 }
621 USEC_DELAY(1000);
622 mswait -= 1;
623 }
624
625 /*
626 * If no other error occurred but we didn't finish,
627 * something bad happened.
628 */
629 if (XS_CMD_DONE_P(xs) == 0) {
630 if (isp_control(isp, ISPCTL_ABORT_CMD, xs)) {
631 isp_reinit(isp);
632 }
633 if (XS_NOERR(xs)) {
634 isp_prt(isp, ISP_LOGERR, "polled command timed out");
635 XS_SETERR(xs, HBA_BOTCH);
636 }
637 }
638 scsipi_done(xs);
639 }
640
641 void
642 isp_done(XS_T *xs)
643 {
644 if (XS_CMD_WDOG_P(xs) == 0) {
645 struct ispsoftc *isp = XS_ISP(xs);
646 callout_stop(&xs->xs_callout);
647 if (XS_CMD_GRACE_P(xs)) {
648 isp_prt(isp, ISP_LOGDEBUG1,
649 "finished command on borrowed time");
650 }
651 XS_CMD_S_CLEAR(xs);
652 /*
653 * Fixup- if we get a QFULL, we need
654 * to set XS_BUSY as the error.
655 */
656 if (xs->status == SCSI_QUEUE_FULL) {
657 xs->error = XS_BUSY;
658 }
659 if (isp->isp_osinfo.paused) {
660 isp->isp_osinfo.paused = 0;
661 scsipi_channel_timed_thaw(&isp->isp_chanA);
662 if (IS_DUALBUS(isp)) {
663 scsipi_channel_timed_thaw(&isp->isp_chanB);
664 }
665 }
666 if (xs->error == XS_DRIVER_STUFFUP) {
667 isp_prt(isp, ISP_LOGERR, "BOTCHED cmd for %d.%d.%d cmd 0x%x datalen %ld",
668 XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs), XS_CDBP(xs)[0], (long) XS_XFRLEN(xs));
669 }
670 scsipi_done(xs);
671 }
672 }
673
674 static void
675 isp_dog(void *arg)
676 {
677 XS_T *xs = arg;
678 struct ispsoftc *isp = XS_ISP(xs);
679 u_int16_t handle;
680
681 ISP_ILOCK(isp);
682 /*
683 * We've decided this command is dead. Make sure we're not trying
684 * to kill a command that's already dead by getting it's handle and
685 * and seeing whether it's still alive.
686 */
687 handle = isp_find_handle(isp, xs);
688 if (handle) {
689 u_int16_t isr, mbox, sema;
690
691 if (XS_CMD_DONE_P(xs)) {
692 isp_prt(isp, ISP_LOGDEBUG1,
693 "watchdog found done cmd (handle 0x%x)", handle);
694 ISP_IUNLOCK(isp);
695 return;
696 }
697
698 if (XS_CMD_WDOG_P(xs)) {
699 isp_prt(isp, ISP_LOGDEBUG1,
700 "recursive watchdog (handle 0x%x)", handle);
701 ISP_IUNLOCK(isp);
702 return;
703 }
704
705 XS_CMD_S_WDOG(xs);
706
707 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
708 isp_intr(isp, isr, sema, mbox);
709
710 }
711 if (XS_CMD_DONE_P(xs)) {
712 isp_prt(isp, ISP_LOGDEBUG1,
713 "watchdog cleanup for handle 0x%x", handle);
714 XS_CMD_C_WDOG(xs);
715 isp_done(xs);
716 } else if (XS_CMD_GRACE_P(xs)) {
717 isp_prt(isp, ISP_LOGDEBUG1,
718 "watchdog timeout for handle 0x%x", handle);
719 /*
720 * Make sure the command is *really* dead before we
721 * release the handle (and DMA resources) for reuse.
722 */
723 (void) isp_control(isp, ISPCTL_ABORT_CMD, arg);
724
725 /*
726 * After this point, the command is really dead.
727 */
728 if (XS_XFRLEN(xs)) {
729 ISP_DMAFREE(isp, xs, handle);
730 }
731 isp_destroy_handle(isp, handle);
732 XS_SETERR(xs, XS_TIMEOUT);
733 XS_CMD_S_CLEAR(xs);
734 isp_done(xs);
735 } else {
736 u_int16_t nxti, optr;
737 ispreq_t local, *mp = &local, *qe;
738 isp_prt(isp, ISP_LOGDEBUG2,
739 "possible command timeout on handle %x", handle);
740 XS_CMD_C_WDOG(xs);
741 callout_reset(&xs->xs_callout, hz, isp_dog, xs);
742 if (isp_getrqentry(isp, &nxti, &optr, (void *) &qe)) {
743 ISP_UNLOCK(isp);
744 return;
745 }
746 XS_CMD_S_GRACE(xs);
747 MEMZERO((void *) mp, sizeof (*mp));
748 mp->req_header.rqs_entry_count = 1;
749 mp->req_header.rqs_entry_type = RQSTYPE_MARKER;
750 mp->req_modifier = SYNC_ALL;
751 mp->req_target = XS_CHANNEL(xs) << 7;
752 isp_put_request(isp, mp, qe);
753 ISP_ADD_REQUEST(isp, nxti);
754 }
755 } else {
756 isp_prt(isp, ISP_LOGDEBUG0, "watchdog with no command");
757 }
758 ISP_IUNLOCK(isp);
759 }
760
761 static void
762 isp_fc_worker(void *arg)
763 {
764 void scsipi_run_queue(struct scsipi_channel *);
765 struct ispsoftc *isp = arg;
766
767 for (;;) {
768 int s;
769
770 /*
771 * Note we do *not* use the ISP_LOCK/ISP_UNLOCK macros here.
772 */
773 s = splbio();
774 while (isp->isp_osinfo.threadwork) {
775 isp->isp_osinfo.threadwork = 0;
776 if (isp_fc_runstate(isp, 250000) == 0) {
777 break;
778 }
779 if (isp->isp_osinfo.loop_checked &&
780 FCPARAM(isp)->loop_seen_once == 0) {
781 splx(s);
782 goto skip;
783 }
784 isp->isp_osinfo.loop_checked = 1;
785 isp->isp_osinfo.threadwork = 1;
786 splx(s);
787 delay(500 * 1000);
788 s = splbio();
789 }
790 if (FCPARAM(isp)->isp_fwstate != FW_READY ||
791 FCPARAM(isp)->isp_loopstate != LOOP_READY) {
792 isp_prt(isp, ISP_LOGINFO, "isp_fc_runstate in vain");
793 isp->isp_osinfo.threadwork = 1;
794 splx(s);
795 continue;
796 }
797
798 if (isp->isp_osinfo.blocked) {
799 isp->isp_osinfo.blocked = 0;
800 isp_prt(isp, ISP_LOGDEBUG0,
801 "restarting queues (freeze count %d)",
802 isp->isp_chanA.chan_qfreeze);
803 scsipi_channel_thaw(&isp->isp_chanA, 1);
804 }
805
806 if (isp->isp_osinfo.thread == NULL)
807 break;
808
809 skip:
810 (void) tsleep(&isp->isp_osinfo.thread, PRIBIO, "fcclnup", 0);
811
812 splx(s);
813 }
814
815 /* In case parent is waiting for us to exit. */
816 wakeup(&isp->isp_osinfo.thread);
817
818 kthread_exit(0);
819 }
820
821 /*
822 * Free any associated resources prior to decommissioning and
823 * set the card to a known state (so it doesn't wake up and kick
824 * us when we aren't expecting it to).
825 *
826 * Locks are held before coming here.
827 */
828 void
829 isp_uninit(struct ispsoftc *isp)
830 {
831 isp_lock(isp);
832 /*
833 * Leave with interrupts disabled.
834 */
835 DISABLE_INTS(isp);
836 isp_unlock(isp);
837 }
838
839 int
840 isp_async(struct ispsoftc *isp, ispasync_t cmd, void *arg)
841 {
842 int bus, tgt;
843
844 switch (cmd) {
845 case ISPASYNC_NEW_TGT_PARAMS:
846 if (IS_SCSI(isp) && isp->isp_dblev) {
847 sdparam *sdp = isp->isp_param;
848 int flags;
849 struct scsipi_xfer_mode xm;
850
851 tgt = *((int *) arg);
852 bus = (tgt >> 16) & 0xffff;
853 tgt &= 0xffff;
854 sdp += bus;
855 flags = sdp->isp_devparam[tgt].actv_flags;
856
857 xm.xm_mode = 0;
858 xm.xm_period = sdp->isp_devparam[tgt].actv_period;
859 xm.xm_offset = sdp->isp_devparam[tgt].actv_offset;
860 xm.xm_target = tgt;
861
862 if ((flags & DPARM_SYNC) && xm.xm_period && xm.xm_offset)
863 xm.xm_mode |= PERIPH_CAP_SYNC;
864 if (flags & DPARM_WIDE)
865 xm.xm_mode |= PERIPH_CAP_WIDE16;
866 if (flags & DPARM_TQING)
867 xm.xm_mode |= PERIPH_CAP_TQING;
868 scsipi_async_event(bus? &isp->isp_chanB : &isp->isp_chanA,
869 ASYNC_EVENT_XFER_MODE, &xm);
870 break;
871 }
872 case ISPASYNC_BUS_RESET:
873 bus = *((int *) arg);
874 scsipi_async_event(bus? &isp->isp_chanB : &isp->isp_chanA,
875 ASYNC_EVENT_RESET, NULL);
876 isp_prt(isp, ISP_LOGINFO, "SCSI bus %d reset detected", bus);
877 break;
878 case ISPASYNC_LIP:
879 /*
880 * Don't do queue freezes or blockage until we have the
881 * thread running that can unfreeze/unblock us.
882 */
883 if (isp->isp_osinfo.blocked == 0) {
884 if (isp->isp_osinfo.thread) {
885 isp->isp_osinfo.blocked = 1;
886 scsipi_channel_freeze(&isp->isp_chanA, 1);
887 }
888 }
889 isp_prt(isp, ISP_LOGINFO, "LIP Received");
890 break;
891 case ISPASYNC_LOOP_RESET:
892 /*
893 * Don't do queue freezes or blockage until we have the
894 * thread running that can unfreeze/unblock us.
895 */
896 if (isp->isp_osinfo.blocked == 0) {
897 if (isp->isp_osinfo.thread) {
898 isp->isp_osinfo.blocked = 1;
899 scsipi_channel_freeze(&isp->isp_chanA, 1);
900 }
901 }
902 isp_prt(isp, ISP_LOGINFO, "Loop Reset Received");
903 break;
904 case ISPASYNC_LOOP_DOWN:
905 /*
906 * Don't do queue freezes or blockage until we have the
907 * thread running that can unfreeze/unblock us.
908 */
909 if (isp->isp_osinfo.blocked == 0) {
910 if (isp->isp_osinfo.thread) {
911 isp->isp_osinfo.blocked = 1;
912 scsipi_channel_freeze(&isp->isp_chanA, 1);
913 }
914 }
915 isp_prt(isp, ISP_LOGINFO, "Loop DOWN");
916 break;
917 case ISPASYNC_LOOP_UP:
918 /*
919 * Let the subsequent ISPASYNC_CHANGE_NOTIFY invoke
920 * the FC worker thread. When the FC worker thread
921 * is done, let *it* call scsipi_channel_thaw...
922 */
923 isp_prt(isp, ISP_LOGINFO, "Loop UP");
924 break;
925 case ISPASYNC_PROMENADE:
926 if (IS_FC(isp) && isp->isp_dblev) {
927 static const char fmt[] = "Target %d (Loop 0x%x) Port ID 0x%x "
928 "(role %s) %s\n Port WWN 0x%08x%08x\n Node WWN 0x%08x%08x";
929 static const char *const roles[4] = {
930 "None", "Target", "Initiator", "Target/Initiator"
931 };
932 fcparam *fcp = isp->isp_param;
933 int tgt1 = *((int *) arg);
934 struct lportdb *lp = &fcp->portdb[tgt1];
935
936 isp_prt(isp, ISP_LOGINFO, fmt, tgt1, lp->loopid, lp->portid,
937 roles[lp->roles & 0x3],
938 (lp->valid)? "Arrived" : "Departed",
939 (u_int32_t) (lp->port_wwn >> 32),
940 (u_int32_t) (lp->port_wwn & 0xffffffffLL),
941 (u_int32_t) (lp->node_wwn >> 32),
942 (u_int32_t) (lp->node_wwn & 0xffffffffLL));
943 break;
944 }
945 case ISPASYNC_CHANGE_NOTIFY:
946 if (arg == ISPASYNC_CHANGE_PDB) {
947 isp_prt(isp, ISP_LOGINFO, "Port Database Changed");
948 } else if (arg == ISPASYNC_CHANGE_SNS) {
949 isp_prt(isp, ISP_LOGINFO,
950 "Name Server Database Changed");
951 }
952
953 /*
954 * We can set blocked here because we know it's now okay
955 * to try and run isp_fc_runstate (in order to build loop
956 * state). But we don't try and freeze the midlayer's queue
957 * if we have no thread that we can wake to later unfreeze
958 * it.
959 */
960 if (isp->isp_osinfo.blocked == 0) {
961 isp->isp_osinfo.blocked = 1;
962 if (isp->isp_osinfo.thread) {
963 scsipi_channel_freeze(&isp->isp_chanA, 1);
964 }
965 }
966 /*
967 * Note that we have work for the thread to do, and
968 * if the thread is here already, wake it up.
969 */
970 isp->isp_osinfo.threadwork++;
971 if (isp->isp_osinfo.thread) {
972 wakeup(&isp->isp_osinfo.thread);
973 } else {
974 isp_prt(isp, ISP_LOGDEBUG1, "no FC thread yet");
975 }
976 break;
977 case ISPASYNC_FABRIC_DEV:
978 {
979 int target, base, lim;
980 fcparam *fcp = isp->isp_param;
981 struct lportdb *lp = NULL;
982 struct lportdb *clp = (struct lportdb *) arg;
983 const char *pt;
984
985 switch (clp->port_type) {
986 case 1:
987 pt = " N_Port";
988 break;
989 case 2:
990 pt = " NL_Port";
991 break;
992 case 3:
993 pt = "F/NL_Port";
994 break;
995 case 0x7f:
996 pt = " Nx_Port";
997 break;
998 case 0x81:
999 pt = " F_port";
1000 break;
1001 case 0x82:
1002 pt = " FL_Port";
1003 break;
1004 case 0x84:
1005 pt = " E_port";
1006 break;
1007 default:
1008 pt = " ";
1009 break;
1010 }
1011
1012 isp_prt(isp, ISP_LOGINFO,
1013 "%s Fabric Device @ PortID 0x%x", pt, clp->portid);
1014
1015 /*
1016 * If we don't have an initiator role we bail.
1017 *
1018 * We just use ISPASYNC_FABRIC_DEV for announcement purposes.
1019 */
1020
1021 if ((isp->isp_role & ISP_ROLE_INITIATOR) == 0) {
1022 break;
1023 }
1024
1025 /*
1026 * Is this entry for us? If so, we bail.
1027 */
1028
1029 if (fcp->isp_portid == clp->portid) {
1030 break;
1031 }
1032
1033 /*
1034 * Else, the default policy is to find room for it in
1035 * our local port database. Later, when we execute
1036 * the call to isp_pdb_sync either this newly arrived
1037 * or already logged in device will be (re)announced.
1038 */
1039
1040 if (fcp->isp_topo == TOPO_FL_PORT)
1041 base = FC_SNS_ID+1;
1042 else
1043 base = 0;
1044
1045 if (fcp->isp_topo == TOPO_N_PORT)
1046 lim = 1;
1047 else
1048 lim = MAX_FC_TARG;
1049
1050 /*
1051 * Is it already in our list?
1052 */
1053 for (target = base; target < lim; target++) {
1054 if (target >= FL_PORT_ID && target <= FC_SNS_ID) {
1055 continue;
1056 }
1057 lp = &fcp->portdb[target];
1058 if (lp->port_wwn == clp->port_wwn &&
1059 lp->node_wwn == clp->node_wwn) {
1060 lp->fabric_dev = 1;
1061 break;
1062 }
1063 }
1064 if (target < lim) {
1065 break;
1066 }
1067 for (target = base; target < lim; target++) {
1068 if (target >= FL_PORT_ID && target <= FC_SNS_ID) {
1069 continue;
1070 }
1071 lp = &fcp->portdb[target];
1072 if (lp->port_wwn == 0) {
1073 break;
1074 }
1075 }
1076 if (target == lim) {
1077 isp_prt(isp, ISP_LOGWARN,
1078 "out of space for fabric devices");
1079 break;
1080 }
1081 lp->port_type = clp->port_type;
1082 lp->fc4_type = clp->fc4_type;
1083 lp->node_wwn = clp->node_wwn;
1084 lp->port_wwn = clp->port_wwn;
1085 lp->portid = clp->portid;
1086 lp->fabric_dev = 1;
1087 break;
1088 }
1089 case ISPASYNC_FW_CRASH:
1090 {
1091 u_int16_t mbox1, mbox6;
1092 mbox1 = ISP_READ(isp, OUTMAILBOX1);
1093 if (IS_DUALBUS(isp)) {
1094 mbox6 = ISP_READ(isp, OUTMAILBOX6);
1095 } else {
1096 mbox6 = 0;
1097 }
1098 isp_prt(isp, ISP_LOGERR,
1099 "Internal Firmware Error on bus %d @ RISC Address 0x%x",
1100 mbox6, mbox1);
1101 #ifdef ISP_FW_CRASH_DUMP
1102 if (IS_FC(isp)) {
1103 if (isp->isp_osinfo.blocked == 0) {
1104 isp->isp_osinfo.blocked = 1;
1105 scsipi_channel_freeze(&isp->isp_chanA, 1);
1106 }
1107 isp_fw_dump(isp);
1108 }
1109 isp_reinit(isp);
1110 isp_async(isp, ISPASYNC_FW_RESTARTED, NULL);
1111 #endif
1112 break;
1113 }
1114 default:
1115 break;
1116 }
1117 return (0);
1118 }
1119
1120 #include <machine/stdarg.h>
1121 void
1122 isp_prt(struct ispsoftc *isp, int level, const char *fmt, ...)
1123 {
1124 va_list ap;
1125 if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) {
1126 return;
1127 }
1128 printf("%s: ", isp->isp_name);
1129 va_start(ap, fmt);
1130 vprintf(fmt, ap);
1131 va_end(ap);
1132 printf("\n");
1133 }
1134