aic7xxx_osm.c revision 1.6 1 /* $NetBSD: aic7xxx_osm.c,v 1.6 2003/04/21 16:52:07 fvdl Exp $ */
2
3 /*
4 * Bus independent FreeBSD shim for the aic7xxx based adaptec SCSI controllers
5 *
6 * Copyright (c) 1994-2001 Justin T. Gibbs.
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions, and the following disclaimer,
14 * without modification.
15 * 2. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * Alternatively, this software may be distributed under the terms of the
19 * GNU Public License ("GPL").
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
25 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * //depot/aic7xxx/freebsd/dev/aic7xxx/aic7xxx_osm.c#12 $
34 *
35 * $FreeBSD: src/sys/dev/aic7xxx/aic7xxx_osm.c,v 1.31 2002/11/30 19:08:58 scottl Exp $
36 */
37 /*
38 * Ported from FreeBSD by Pascal Renauld, Network Storage Solutions, Inc. - April 2003
39 */
40 #include <dev/ic/aic7xxx_osm.h>
41 #include <dev/ic/aic7xxx_inline.h>
42
43 #ifndef AHC_TMODE_ENABLE
44 #define AHC_TMODE_ENABLE 0
45 #endif
46
47
48 static void ahc_action(struct scsipi_channel *chan, scsipi_adapter_req_t req, void *arg);
49 static void ahc_execute_scb(void *arg, bus_dma_segment_t *dm_segs, int nsegments);
50 static int ahc_poll(struct ahc_softc *ahc, int wait);
51 static void ahc_setup_data(struct ahc_softc *ahc,
52 struct scsipi_xfer *xs, struct scb *scb);
53 static void ahc_set_recoveryscb(struct ahc_softc *ahc, struct scb *scb);
54 static int ahc_ioctl(struct scsipi_channel *channel, u_long cmd, caddr_t addr, int flag,
55 struct proc *p);
56
57
58
59 /*
60 * Attach all the sub-devices we can find
61 */
62 int
63 ahc_attach(struct ahc_softc *ahc)
64 {
65 u_long s;
66 int i;
67 char ahc_info[256];
68
69 LIST_INIT(&ahc->pending_scbs);
70 for (i = 0; i < AHC_NUM_TARGETS; i++)
71 TAILQ_INIT(&ahc->untagged_queues[i]);
72
73 ahc_lock(ahc, &s);
74
75 ahc->sc_adapter.adapt_dev = &ahc->sc_dev;
76 ahc->sc_adapter.adapt_nchannels = (ahc->features & AHC_TWIN) ? 2 : 1;
77
78 ahc->sc_adapter.adapt_openings = AHC_MAX_QUEUE;
79 ahc->sc_adapter.adapt_max_periph = 16;
80
81 ahc->sc_adapter.adapt_ioctl = ahc_ioctl;
82 ahc->sc_adapter.adapt_minphys = ahc_minphys;
83 ahc->sc_adapter.adapt_request = ahc_action;
84
85 ahc->sc_channel.chan_adapter = &ahc->sc_adapter;
86 ahc->sc_channel.chan_bustype = &scsi_bustype;
87 ahc->sc_channel.chan_channel = 0;
88 ahc->sc_channel.chan_ntargets = (ahc->features & AHC_WIDE) ? 16 : 8;
89 ahc->sc_channel.chan_nluns = 8 /*AHC_NUM_LUNS*/;
90 ahc->sc_channel.chan_id = ahc->our_id;
91
92 if (ahc->features & AHC_TWIN) {
93 ahc->sc_channel_b = ahc->sc_channel;
94 ahc->sc_channel_b.chan_id = ahc->our_id_b;
95 ahc->sc_channel_b.chan_channel = 1;
96 }
97
98 ahc_controller_info(ahc, ahc_info);
99 printf("%s: %s\n", ahc->sc_dev.dv_xname, ahc_info);
100
101 if ((ahc->flags & AHC_PRIMARY_CHANNEL) == 0) {
102 ahc->sc_child = config_found((void *)&ahc->sc_dev,
103 &ahc->sc_channel, scsiprint);
104 if (ahc->features & AHC_TWIN)
105 ahc->sc_child_b = config_found((void *)&ahc->sc_dev,
106 &ahc->sc_channel_b, scsiprint);
107 } else {
108 ahc->sc_child = config_found((void *)&ahc->sc_dev,
109 &ahc->sc_channel_b, scsiprint);
110 ahc->sc_child_b = config_found((void *)&ahc->sc_dev,
111 &ahc->sc_channel, scsiprint);
112 }
113
114 ahc_intr_enable(ahc, TRUE);
115
116 ahc_unlock(ahc, &s);
117 return (1);
118 }
119
120 /*
121 * Catch an interrupt from the adapter
122 */
123 void
124 ahc_platform_intr(void *arg)
125 {
126 struct ahc_softc *ahc;
127
128 ahc = (struct ahc_softc *)arg;
129 ahc_intr(ahc);
130 }
131
132 /*
133 * We have an scb which has been processed by the
134 * adaptor, now we look to see how the operation
135 * went.
136 */
137 void
138 ahc_done(struct ahc_softc *ahc, struct scb *scb)
139 {
140 struct scsipi_xfer *xs;
141 struct scsipi_periph *periph;
142 u_long s;
143
144 xs = scb->xs;
145 periph = xs->xs_periph;
146 LIST_REMOVE(scb, pending_links);
147 if ((scb->flags & SCB_UNTAGGEDQ) != 0) {
148 struct scb_tailq *untagged_q;
149 int target_offset;
150
151 target_offset = SCB_GET_TARGET_OFFSET(ahc, scb);
152 untagged_q = &ahc->untagged_queues[target_offset];
153 TAILQ_REMOVE(untagged_q, scb, links.tqe);
154 scb->flags &= ~SCB_UNTAGGEDQ;
155 ahc_run_untagged_queue(ahc, untagged_q);
156 }
157
158 callout_stop(&scb->xs->xs_callout);
159
160 if (xs->datalen) {
161 int op;
162
163 if (xs->xs_control & XS_CTL_DATA_IN)
164 op = BUS_DMASYNC_POSTREAD;
165 else
166 op = BUS_DMASYNC_POSTWRITE;
167 bus_dmamap_sync(ahc->parent_dmat, scb->dmamap, 0,
168 scb->dmamap->dm_mapsize, op);
169 bus_dmamap_unload(ahc->parent_dmat, scb->dmamap);
170 }
171
172 /*
173 * If the recovery SCB completes, we have to be
174 * out of our timeout.
175 */
176 if ((scb->flags & SCB_RECOVERY_SCB) != 0) {
177 struct scb *list_scb;
178
179 /*
180 * We were able to complete the command successfully,
181 * so reinstate the timeouts for all other pending
182 * commands.
183 */
184 LIST_FOREACH(list_scb, &ahc->pending_scbs, pending_links) {
185 struct scsipi_xfer *xs = list_scb->xs;
186
187 if (!(xs->xs_control & XS_CTL_POLL)) {
188 callout_reset(&list_scb->xs->xs_callout,
189 (list_scb->xs->timeout > 1000000) ?
190 (list_scb->xs->timeout / 1000) * hz :
191 (list_scb->xs->timeout * hz) / 1000,
192 ahc_timeout, list_scb);
193 }
194 }
195
196 if (ahc_get_transaction_status(scb) == CAM_BDR_SENT
197 || ahc_get_transaction_status(scb) == CAM_REQ_ABORTED)
198 ahc_set_transaction_status(scb, CAM_CMD_TIMEOUT);
199 scsipi_printaddr(xs->xs_periph);
200 printf("%s: no longer in timeout, status = %x\n",
201 ahc_name(ahc), xs->status);
202 }
203
204 /* Don't clobber any existing error state */
205 if (xs->error != XS_NOERROR) {
206 /* Don't clobber any existing error state */
207 } else if ((scb->flags & SCB_SENSE) != 0) {
208 /*
209 * We performed autosense retrieval.
210 *
211 * Zero any sense not transferred by the
212 * device. The SCSI spec mandates that any
213 * untransfered data should be assumed to be
214 * zero. Complete the 'bounce' of sense information
215 * through buffers accessible via bus-space by
216 * copying it into the clients csio.
217 */
218 memset(&xs->sense.scsi_sense, 0, sizeof(xs->sense.scsi_sense));
219 memcpy(&xs->sense.scsi_sense,
220 ahc_get_sense_buf(ahc, scb),
221 sizeof(xs->sense.scsi_sense));
222 xs->error = XS_SENSE;
223 }
224 if (scb->flags & SCB_FREEZE_QUEUE) {
225 scsipi_periph_thaw(periph, 1);
226 scb->flags &= ~SCB_FREEZE_QUEUE;
227 }
228
229 ahc_lock(ahc, &s);
230 ahc_free_scb(ahc, scb);
231 ahc_unlock(ahc, &s);
232
233 scsipi_done(xs);
234 }
235
236 static int
237 ahc_ioctl(struct scsipi_channel *channel, u_long cmd, caddr_t addr, int flag,
238 struct proc *p)
239 {
240 struct ahc_softc *ahc = (void *)channel->chan_adapter->adapt_dev;
241 int s, ret = ENOTTY;
242
243 switch (cmd) {
244 case SCBUSIORESET:
245 s = splbio();
246 ahc_reset_channel(ahc, channel->chan_channel == 1 ? 'B' : 'A',
247 TRUE);
248 splx(s);
249 ret = 0;
250 break;
251 default:
252 break;
253 }
254
255 return ret;
256 }
257
258 static void
259 ahc_action(struct scsipi_channel *chan, scsipi_adapter_req_t req, void *arg)
260 {
261 struct ahc_softc *ahc;
262 int s;
263 struct ahc_initiator_tinfo *tinfo;
264 struct ahc_tmode_tstate *tstate;
265 char channel;
266
267 ahc = (void *)chan->chan_adapter->adapt_dev;
268
269 channel = chan->chan_channel == 0 ? 'A' : 'B';
270
271 if (ahc->inited_channels[channel - 'A'] == 0) {
272 if ((channel == 'A' && (ahc->flags & AHC_RESET_BUS_A)) ||
273 (channel == 'B' && (ahc->flags & AHC_RESET_BUS_B))) {
274 s = splbio();
275 ahc_reset_channel(ahc, channel, TRUE);
276 splx(s);
277 }
278 ahc->inited_channels[channel - 'A'] = 1;
279 }
280
281 switch (req) {
282
283 case ADAPTER_REQ_RUN_XFER:
284 {
285 struct scsipi_xfer *xs;
286 struct scsipi_periph *periph;
287 struct scb *scb;
288 struct hardware_scb *hscb;
289 u_int target_id;
290 u_int our_id;
291 u_long s;
292
293 xs = arg;
294 periph = xs->xs_periph;
295
296 target_id = periph->periph_target;
297 our_id = ahc->our_id;
298
299 SC_DEBUG(xs->xs_periph, SCSIPI_DB3, ("ahc_action\n"));
300
301 /*
302 * get an scb to use.
303 */
304 ahc_lock(ahc, &s);
305 if ((scb = ahc_get_scb(ahc)) == NULL) {
306 xs->error = XS_RESOURCE_SHORTAGE;
307 ahc_unlock(ahc, &s);
308 scsipi_done(xs);
309 return;
310 }
311 ahc_unlock(ahc, &s);
312
313 hscb = scb->hscb;
314
315 SC_DEBUG(periph, SCSIPI_DB3, ("start scb(%p)\n", scb));
316 scb->xs = xs;
317
318 /*
319 * Put all the arguments for the xfer in the scb
320 */
321 hscb->control = 0;
322 hscb->scsiid = BUILD_SCSIID(ahc, 0, target_id, our_id);
323 hscb->lun = periph->periph_lun;
324 if (xs->xs_control & XS_CTL_RESET) {
325 hscb->cdb_len = 0;
326 scb->flags |= SCB_DEVICE_RESET;
327 hscb->control |= MK_MESSAGE;
328 ahc_execute_scb(scb, NULL, 0);
329 }
330
331 ahc_setup_data(ahc, xs, scb);
332
333 break;
334 }
335 case ADAPTER_REQ_GROW_RESOURCES:
336 printf("%s: ADAPTER_REQ_GROW_RESOURCES\n", ahc_name(ahc));
337 return;
338
339 case ADAPTER_REQ_SET_XFER_MODE:
340 {
341 struct scsipi_xfer_mode *xm = arg;
342 struct ahc_devinfo devinfo;
343 int target_id, our_id, first;
344 u_int width;
345 char channel;
346
347 target_id = xm->xm_target;
348 our_id = chan->chan_id;
349 channel = (chan->chan_channel == 1) ? 'B' : 'A';
350 s = splbio();
351 tinfo = ahc_fetch_transinfo(ahc, channel, our_id, target_id,
352 &tstate);
353 ahc_compile_devinfo(&devinfo, our_id, target_id,
354 0, channel, ROLE_INITIATOR);
355
356 /*
357 * XXX since the period and offset are not provided here,
358 * fake things by forcing a renegotiation using the user
359 * settings if this is called for the first time (i.e.
360 * during probe). Also, cap various values at the user
361 * values, assuming that the user set it up that way.
362 */
363 if (ahc->inited_target[target_id] == 0) {
364 tinfo->goal = tinfo->user;
365 tstate->tagenable |=
366 (ahc->user_tagenable & devinfo.target_mask);
367 tstate->discenable |=
368 (ahc->user_discenable & devinfo.target_mask);
369 ahc->inited_target[target_id] = 1;
370 first = 1;
371 } else
372 first = 0;
373
374 if (xm->xm_mode & (PERIPH_CAP_WIDE16 | PERIPH_CAP_DT))
375 width = MSG_EXT_WDTR_BUS_16_BIT;
376 else
377 width = MSG_EXT_WDTR_BUS_8_BIT;
378
379 ahc_validate_width(ahc, NULL, &width, ROLE_UNKNOWN);
380 if (width > tinfo->user.width)
381 width = tinfo->user.width;
382 tinfo->goal.width = width;
383
384 if (!(xm->xm_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_DT))) {
385 tinfo->goal.period = 0;
386 tinfo->goal.offset = 0;
387 tinfo->goal.ppr_options = 0;
388 }
389
390 if ((xm->xm_mode & PERIPH_CAP_DT) &&
391 (tinfo->user.ppr_options & MSG_EXT_PPR_DT_REQ))
392 tinfo->goal.ppr_options |= MSG_EXT_PPR_DT_REQ;
393 else
394 tinfo->goal.ppr_options &= ~MSG_EXT_PPR_DT_REQ;
395
396 if ((xm->xm_mode & PERIPH_CAP_TQING) &&
397 (ahc->user_tagenable & devinfo.target_mask))
398 tstate->tagenable |= devinfo.target_mask;
399 else
400 tstate->tagenable &= ~devinfo.target_mask;
401
402 /*
403 * If this is the first request, and no negotiation is
404 * needed, just confirm the state to the scsipi layer,
405 * so that it can print a message.
406 */
407 if (!ahc_update_neg_request(ahc, &devinfo, tstate,
408 tinfo, AHC_NEG_IF_NON_ASYNC) && first)
409 scsipi_async_event(chan, ASYNC_EVENT_XFER_MODE, xm);
410 splx(s);
411 }
412 }
413
414 return;
415 }
416
417 static void
418 ahc_execute_scb(void *arg, bus_dma_segment_t *dm_segs, int nsegments)
419 {
420 struct scb *scb;
421 struct scsipi_xfer *xs;
422 struct ahc_softc *ahc;
423 struct ahc_initiator_tinfo *tinfo;
424 struct ahc_tmode_tstate *tstate;
425
426 u_int mask;
427 long s;
428
429 scb = (struct scb *)arg;
430 xs = scb->xs;
431 xs->error = 0;
432 xs->status = 0;
433 xs->xs_status = 0;
434 ahc = (void *)xs->xs_periph->periph_channel->chan_adapter->adapt_dev;
435
436 if (nsegments != 0) {
437 struct ahc_dma_seg *sg;
438 bus_dma_segment_t *end_seg;
439 int op;
440
441 end_seg = dm_segs + nsegments;
442
443 /* Copy the segments into our SG list */
444 sg = scb->sg_list;
445 while (dm_segs < end_seg) {
446 uint32_t len;
447
448 sg->addr = ahc_htole32(dm_segs->ds_addr);
449 len = dm_segs->ds_len
450 | ((dm_segs->ds_addr >> 8) & 0x7F000000);
451 sg->len = ahc_htole32(len);
452 sg++;
453 dm_segs++;
454 }
455
456 /*
457 * Note where to find the SG entries in bus space.
458 * We also set the full residual flag which the
459 * sequencer will clear as soon as a data transfer
460 * occurs.
461 */
462 scb->hscb->sgptr = ahc_htole32(scb->sg_list_phys|SG_FULL_RESID);
463
464 if (xs->xs_control & XS_CTL_DATA_IN)
465 op = BUS_DMASYNC_PREREAD;
466 else
467 op = BUS_DMASYNC_PREWRITE;
468
469 bus_dmamap_sync(ahc->parent_dmat, scb->dmamap, 0,
470 scb->dmamap->dm_mapsize, op);
471
472 sg--;
473 sg->len |= ahc_htole32(AHC_DMA_LAST_SEG);
474
475 /* Copy the first SG into the "current" data pointer area */
476 scb->hscb->dataptr = scb->sg_list->addr;
477 scb->hscb->datacnt = scb->sg_list->len;
478 } else {
479 scb->hscb->sgptr = ahc_htole32(SG_LIST_NULL);
480 scb->hscb->dataptr = 0;
481 scb->hscb->datacnt = 0;
482 }
483
484 scb->sg_count = nsegments;
485
486 ahc_lock(ahc, &s);
487
488 /*
489 * Last time we need to check if this SCB needs to
490 * be aborted.
491 */
492 if (xs->xs_status & XS_STS_DONE) {
493 if (nsegments != 0)
494 bus_dmamap_unload(ahc->buffer_dmat, scb->dmamap);
495 ahc_free_scb(ahc, scb);
496 ahc_unlock(ahc, &s);
497 scsipi_done(xs);
498 return;
499 }
500
501 tinfo = ahc_fetch_transinfo(ahc, ahc->channel,
502 SCSIID_OUR_ID(scb->hscb->scsiid),
503 SCSIID_TARGET(ahc, scb->hscb->scsiid),
504 &tstate);
505
506 mask = SCB_GET_TARGET_MASK(ahc, scb);
507 scb->hscb->scsirate = tinfo->scsirate;
508 scb->hscb->scsioffset = tinfo->curr.offset;
509
510 if ((tstate->ultraenb & mask) != 0)
511 scb->hscb->control |= ULTRAENB;
512
513 if ((tstate->discenable & mask) != 0)
514 scb->hscb->control |= DISCENB;
515
516 if (xs->xs_tag_type)
517 scb->hscb->control |= xs->xs_tag_type;
518
519 if ((xs->xs_control & XS_CTL_DISCOVERY) && (tinfo->goal.width == 0
520 && tinfo->goal.offset == 0
521 && tinfo->goal.ppr_options == 0)) {
522 scb->flags |= SCB_NEGOTIATE;
523 scb->hscb->control |= MK_MESSAGE;
524 } else if ((tstate->auto_negotiate & mask) != 0) {
525 scb->flags |= SCB_AUTO_NEGOTIATE;
526 scb->hscb->control |= MK_MESSAGE;
527 }
528
529 LIST_INSERT_HEAD(&ahc->pending_scbs, scb, pending_links);
530
531 if (!(xs->xs_control & XS_CTL_POLL)) {
532 callout_reset(&scb->xs->xs_callout, xs->timeout > 1000000 ?
533 (xs->timeout / 1000) * hz : (xs->timeout * hz) / 1000,
534 ahc_timeout, scb);
535 }
536
537 /*
538 * We only allow one untagged transaction
539 * per target in the initiator role unless
540 * we are storing a full busy target *lun*
541 * table in SCB space.
542 */
543 if ((scb->hscb->control & (TARGET_SCB|TAG_ENB)) == 0
544 && (ahc->flags & AHC_SCB_BTT) == 0) {
545 struct scb_tailq *untagged_q;
546 int target_offset;
547
548 target_offset = SCB_GET_TARGET_OFFSET(ahc, scb);
549 untagged_q = &(ahc->untagged_queues[target_offset]);
550 TAILQ_INSERT_TAIL(untagged_q, scb, links.tqe);
551 scb->flags |= SCB_UNTAGGEDQ;
552 if (TAILQ_FIRST(untagged_q) != scb) {
553 ahc_unlock(ahc, &s);
554 return;
555 }
556 }
557 scb->flags |= SCB_ACTIVE;
558
559 if ((scb->flags & SCB_TARGET_IMMEDIATE) != 0) {
560 /* Define a mapping from our tag to the SCB. */
561 ahc->scb_data->scbindex[scb->hscb->tag] = scb;
562 ahc_pause(ahc);
563 if ((ahc->flags & AHC_PAGESCBS) == 0)
564 ahc_outb(ahc, SCBPTR, scb->hscb->tag);
565 ahc_outb(ahc, TARG_IMMEDIATE_SCB, scb->hscb->tag);
566 ahc_unpause(ahc);
567 } else {
568 ahc_queue_scb(ahc, scb);
569 }
570
571 if (!(xs->xs_control & XS_CTL_POLL)) {
572 ahc_unlock(ahc, &s);
573 return;
574 }
575
576 /*
577 * If we can't use interrupts, poll for completion
578 */
579 SC_DEBUG(xs->xs_periph, SCSIPI_DB3, ("cmd_poll\n"));
580 do {
581 if (ahc_poll(ahc, xs->timeout)) {
582 if (!(xs->xs_control & XS_CTL_SILENT))
583 printf("cmd fail\n");
584 ahc_timeout(scb);
585 break;
586 }
587 } while (!(xs->xs_status & XS_STS_DONE));
588 ahc_unlock(ahc, &s);
589
590 return;
591 }
592
593 static int
594 ahc_poll(struct ahc_softc *ahc, int wait)
595 {
596 while (--wait) {
597 DELAY(1000);
598 if (ahc_inb(ahc, INTSTAT) & INT_PEND)
599 break;
600 }
601
602 if (wait == 0) {
603 printf("%s: board is not responding\n", ahc_name(ahc));
604 return (EIO);
605 }
606
607 ahc_intr((void *)ahc);
608 return (0);
609 }
610
611 static void
612 ahc_setup_data(struct ahc_softc *ahc, struct scsipi_xfer *xs,
613 struct scb *scb)
614 {
615 struct hardware_scb *hscb;
616
617 hscb = scb->hscb;
618 xs->resid = xs->status = 0;
619
620 hscb->cdb_len = xs->cmdlen;
621 if (hscb->cdb_len > sizeof(hscb->cdb32)) {
622 u_long s;
623
624 ahc_set_transaction_status(scb, CAM_REQ_INVALID);
625 ahc_lock(ahc, &s);
626 ahc_free_scb(ahc, scb);
627 ahc_unlock(ahc, &s);
628 scsipi_done(xs);
629 return;
630 }
631
632 if (hscb->cdb_len > 12) {
633 memcpy(hscb->cdb32, xs->cmd, hscb->cdb_len);
634 scb->flags |= SCB_CDB32_PTR;
635 } else {
636 memcpy(hscb->shared_data.cdb, xs->cmd, hscb->cdb_len);
637 }
638
639 /* Only use S/G if there is a transfer */
640 if (xs->datalen) {
641 int error;
642
643 error = bus_dmamap_load(ahc->parent_dmat,
644 scb->dmamap, xs->data,
645 xs->datalen, NULL,
646 ((xs->xs_control & XS_CTL_NOSLEEP) ?
647 BUS_DMA_NOWAIT : BUS_DMA_WAITOK) |
648 BUS_DMA_STREAMING |
649 ((xs->xs_control & XS_CTL_DATA_IN) ?
650 BUS_DMA_READ : BUS_DMA_WRITE));
651 if (error) {
652 #ifdef AHC_DEBUG
653 printf("%s: in ahc_setup_data(): bus_dmamap_load() "
654 "= %d\n",
655 ahc_name(ahc), error);
656 #endif
657 xs->error = XS_RESOURCE_SHORTAGE;
658 scsipi_done(xs);
659 return;
660 }
661 ahc_execute_scb(scb,
662 scb->dmamap->dm_segs,
663 scb->dmamap->dm_nsegs);
664 } else {
665 ahc_execute_scb(scb, NULL, 0);
666 }
667 }
668
669 static void
670 ahc_set_recoveryscb(struct ahc_softc *ahc, struct scb *scb) {
671
672 if ((scb->flags & SCB_RECOVERY_SCB) == 0) {
673 struct scb *list_scb;
674
675 scb->flags |= SCB_RECOVERY_SCB;
676
677 /*
678 * Take all queued, but not sent SCBs out of the equation.
679 * Also ensure that no new CCBs are queued to us while we
680 * try to fix this problem.
681 */
682 scsipi_channel_freeze(&ahc->sc_channel, 1);
683 if (ahc->features & AHC_TWIN)
684 scsipi_channel_freeze(&ahc->sc_channel_b, 1);
685
686 /*
687 * Go through all of our pending SCBs and remove
688 * any scheduled timeouts for them. We will reschedule
689 * them after we've successfully fixed this problem.
690 */
691 LIST_FOREACH(list_scb, &ahc->pending_scbs, pending_links) {
692 callout_stop(&list_scb->xs->xs_callout);
693 }
694 }
695 }
696
697 void
698 ahc_timeout(void *arg)
699 {
700 struct scb *scb;
701 struct ahc_softc *ahc;
702 long s;
703 int found;
704 u_int last_phase;
705 int target;
706 int lun;
707 int i;
708 char channel;
709
710 scb = (struct scb *)arg;
711 ahc = (struct ahc_softc *)scb->ahc_softc;
712
713 ahc_lock(ahc, &s);
714
715 ahc_pause_and_flushwork(ahc);
716
717 if ((scb->flags & SCB_ACTIVE) == 0) {
718 /* Previous timeout took care of me already */
719 printf("%s: Timedout SCB already complete. "
720 "Interrupts may not be functioning.\n", ahc_name(ahc));
721 ahc_unpause(ahc);
722 ahc_unlock(ahc, &s);
723 return;
724 }
725
726 target = SCB_GET_TARGET(ahc, scb);
727 channel = SCB_GET_CHANNEL(ahc, scb);
728 lun = SCB_GET_LUN(scb);
729
730 ahc_print_path(ahc, scb);
731 printf("SCB 0x%x - timed out\n", scb->hscb->tag);
732 ahc_dump_card_state(ahc);
733 last_phase = ahc_inb(ahc, LASTPHASE);
734 if (scb->sg_count > 0) {
735 for (i = 0; i < scb->sg_count; i++) {
736 printf("sg[%d] - Addr 0x%x : Length %d\n",
737 i,
738 scb->sg_list[i].addr,
739 scb->sg_list[i].len & AHC_SG_LEN_MASK);
740 }
741 }
742 if (scb->flags & (SCB_DEVICE_RESET|SCB_ABORT)) {
743 /*
744 * Been down this road before.
745 * Do a full bus reset.
746 */
747 bus_reset:
748 ahc_set_transaction_status(scb, CAM_CMD_TIMEOUT);
749 found = ahc_reset_channel(ahc, channel, /*Initiate Reset*/TRUE);
750 printf("%s: Issued Channel %c Bus Reset. "
751 "%d SCBs aborted\n", ahc_name(ahc), channel, found);
752 } else {
753 /*
754 * If we are a target, transition to bus free and report
755 * the timeout.
756 *
757 * The target/initiator that is holding up the bus may not
758 * be the same as the one that triggered this timeout
759 * (different commands have different timeout lengths).
760 * If the bus is idle and we are actiing as the initiator
761 * for this request, queue a BDR message to the timed out
762 * target. Otherwise, if the timed out transaction is
763 * active:
764 * Initiator transaction:
765 * Stuff the message buffer with a BDR message and assert
766 * ATN in the hopes that the target will let go of the bus
767 * and go to the mesgout phase. If this fails, we'll
768 * get another timeout 2 seconds later which will attempt
769 * a bus reset.
770 *
771 * Target transaction:
772 * Transition to BUS FREE and report the error.
773 * It's good to be the target!
774 */
775 u_int active_scb_index;
776 u_int saved_scbptr;
777
778 saved_scbptr = ahc_inb(ahc, SCBPTR);
779 active_scb_index = ahc_inb(ahc, SCB_TAG);
780
781 if ((ahc_inb(ahc, SEQ_FLAGS) & NOT_IDENTIFIED) == 0
782 && (active_scb_index < ahc->scb_data->numscbs)) {
783 struct scb *active_scb;
784
785 /*
786 * If the active SCB is not us, assume that
787 * the active SCB has a longer timeout than
788 * the timedout SCB, and wait for the active
789 * SCB to timeout.
790 */
791 active_scb = ahc_lookup_scb(ahc, active_scb_index);
792 if (active_scb != scb) {
793 uint64_t newtimeout;
794
795 ahc_print_path(ahc, scb);
796 printf("Other SCB Timeout%s",
797 (scb->flags & SCB_OTHERTCL_TIMEOUT) != 0
798 ? " again\n" : "\n");
799 scb->flags |= SCB_OTHERTCL_TIMEOUT;
800 newtimeout = MAX(active_scb->xs->timeout,
801 scb->xs->timeout);
802 callout_reset(&scb->xs->xs_callout,
803 newtimeout > 1000000 ?
804 (newtimeout / 1000) * hz :
805 (newtimeout * hz) / 1000,
806 ahc_timeout, scb);
807 ahc_unpause(ahc);
808 ahc_unlock(ahc, &s);
809 return;
810 }
811
812 /* It's us */
813 if ((scb->flags & SCB_TARGET_SCB) != 0) {
814
815 /*
816 * Send back any queued up transactions
817 * and properly record the error condition.
818 */
819 ahc_abort_scbs(ahc, SCB_GET_TARGET(ahc, scb),
820 SCB_GET_CHANNEL(ahc, scb),
821 SCB_GET_LUN(scb),
822 scb->hscb->tag,
823 ROLE_TARGET,
824 CAM_CMD_TIMEOUT);
825
826 /* Will clear us from the bus */
827 ahc_restart(ahc);
828 ahc_unlock(ahc, &s);
829 return;
830 }
831
832 ahc_set_recoveryscb(ahc, active_scb);
833 ahc_outb(ahc, MSG_OUT, HOST_MSG);
834 ahc_outb(ahc, SCSISIGO, last_phase|ATNO);
835 ahc_print_path(ahc, active_scb);
836 printf("BDR message in message buffer\n");
837 active_scb->flags |= SCB_DEVICE_RESET;
838 callout_reset(&active_scb->xs->xs_callout,
839 2 * hz, ahc_timeout, active_scb);
840 ahc_unpause(ahc);
841 } else {
842 int disconnected;
843
844 /* XXX Shouldn't panic. Just punt instead? */
845 if ((scb->flags & SCB_TARGET_SCB) != 0)
846 panic("Timed-out target SCB but bus idle");
847
848 if (last_phase != P_BUSFREE
849 && (ahc_inb(ahc, SSTAT0) & TARGET) != 0) {
850 /* XXX What happened to the SCB? */
851 /* Hung target selection. Goto busfree */
852 printf("%s: Hung target selection\n",
853 ahc_name(ahc));
854 ahc_restart(ahc);
855 ahc_unlock(ahc, &s);
856 return;
857 }
858
859 if (ahc_search_qinfifo(ahc, target, channel, lun,
860 scb->hscb->tag, ROLE_INITIATOR,
861 /*status*/0, SEARCH_COUNT) > 0) {
862 disconnected = FALSE;
863 } else {
864 disconnected = TRUE;
865 }
866
867 if (disconnected) {
868
869 ahc_set_recoveryscb(ahc, scb);
870 /*
871 * Actually re-queue this SCB in an attempt
872 * to select the device before it reconnects.
873 * In either case (selection or reselection),
874 * we will now issue a target reset to the
875 * timed-out device.
876 *
877 * Set the MK_MESSAGE control bit indicating
878 * that we desire to send a message. We
879 * also set the disconnected flag since
880 * in the paging case there is no guarantee
881 * that our SCB control byte matches the
882 * version on the card. We don't want the
883 * sequencer to abort the command thinking
884 * an unsolicited reselection occurred.
885 */
886 scb->hscb->control |= MK_MESSAGE|DISCONNECTED;
887 scb->flags |= SCB_DEVICE_RESET;
888
889 /*
890 * Remove any cached copy of this SCB in the
891 * disconnected list in preparation for the
892 * queuing of our abort SCB. We use the
893 * same element in the SCB, SCB_NEXT, for
894 * both the qinfifo and the disconnected list.
895 */
896 ahc_search_disc_list(ahc, target, channel,
897 lun, scb->hscb->tag,
898 /*stop_on_first*/TRUE,
899 /*remove*/TRUE,
900 /*save_state*/FALSE);
901
902 /*
903 * In the non-paging case, the sequencer will
904 * never re-reference the in-core SCB.
905 * To make sure we are notified during
906 * reslection, set the MK_MESSAGE flag in
907 * the card's copy of the SCB.
908 */
909 if ((ahc->flags & AHC_PAGESCBS) == 0) {
910 ahc_outb(ahc, SCBPTR, scb->hscb->tag);
911 ahc_outb(ahc, SCB_CONTROL,
912 ahc_inb(ahc, SCB_CONTROL)
913 | MK_MESSAGE);
914 }
915
916 /*
917 * Clear out any entries in the QINFIFO first
918 * so we are the next SCB for this target
919 * to run.
920 */
921 ahc_search_qinfifo(ahc,
922 SCB_GET_TARGET(ahc, scb),
923 channel, SCB_GET_LUN(scb),
924 SCB_LIST_NULL,
925 ROLE_INITIATOR,
926 CAM_REQUEUE_REQ,
927 SEARCH_COMPLETE);
928 ahc_print_path(ahc, scb);
929 printf("Queuing a BDR SCB\n");
930 ahc_qinfifo_requeue_tail(ahc, scb);
931 ahc_outb(ahc, SCBPTR, saved_scbptr);
932 callout_reset(&scb->xs->xs_callout, 2 * hz,
933 ahc_timeout, scb);
934 ahc_unpause(ahc);
935 } else {
936 /* Go "immediatly" to the bus reset */
937 /* This shouldn't happen */
938 ahc_set_recoveryscb(ahc, scb);
939 ahc_print_path(ahc, scb);
940 printf("SCB %d: Immediate reset. "
941 "Flags = 0x%x\n", scb->hscb->tag,
942 scb->flags);
943 goto bus_reset;
944 }
945 }
946 }
947 ahc_unlock(ahc, &s);
948 }
949
950 void
951 ahc_platform_set_tags(struct ahc_softc *ahc,
952 struct ahc_devinfo *devinfo, int enable)
953 {
954 struct ahc_initiator_tinfo *tinfo;
955 struct ahc_tmode_tstate *tstate;
956
957 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid,
958 devinfo->target, &tstate);
959
960 if (enable)
961 tstate->tagenable |= devinfo->target_mask;
962 else
963 tstate->tagenable &= ~devinfo->target_mask;
964 }
965
966 int
967 ahc_platform_alloc(struct ahc_softc *ahc, void *platform_arg)
968 {
969 if (sizeof(struct ahc_platform_data) == 0)
970 return 0;
971 ahc->platform_data = malloc(sizeof(struct ahc_platform_data), M_DEVBUF,
972 M_NOWAIT);
973 if (ahc->platform_data == NULL)
974 return (ENOMEM);
975 return (0);
976 }
977
978 void
979 ahc_platform_free(struct ahc_softc *ahc)
980 {
981 if (sizeof(struct ahc_platform_data) == 0)
982 return;
983 free(ahc->platform_data, M_DEVBUF);
984 }
985
986 int
987 ahc_softc_comp(struct ahc_softc *lahc, struct ahc_softc *rahc)
988 {
989 return (0);
990 }
991
992 int
993 ahc_detach(struct device *self, int flags)
994 {
995 int rv = 0;
996
997 struct ahc_softc *ahc = (struct ahc_softc*)self;
998
999 ahc_intr_enable(ahc, FALSE);
1000 if (ahc->sc_child != NULL)
1001 rv = config_detach(ahc->sc_child, flags);
1002 if (rv == 0 && ahc->sc_child_b != NULL)
1003 rv = config_detach(ahc->sc_child_b, flags);
1004
1005 shutdownhook_disestablish(ahc->shutdown_hook);
1006
1007 ahc_free(ahc);
1008
1009 return (rv);
1010 }
1011
1012
1013 void
1014 ahc_send_async(struct ahc_softc *ahc, char channel, u_int target, u_int lun,
1015 ac_code code, void *opt_arg)
1016 {
1017 struct ahc_tmode_tstate *tstate;
1018 struct ahc_initiator_tinfo *tinfo;
1019 struct ahc_devinfo devinfo;
1020 struct scsipi_channel *chan;
1021 struct scsipi_xfer_mode xm;
1022
1023 chan = channel == 'B' ? &ahc->sc_channel_b : &ahc->sc_channel;
1024 switch (code) {
1025 case AC_TRANSFER_NEG:
1026 tinfo = ahc_fetch_transinfo(ahc, channel, ahc->our_id, target,
1027 &tstate);
1028 ahc_compile_devinfo(&devinfo, ahc->our_id, target, lun,
1029 channel, ROLE_UNKNOWN);
1030 /*
1031 * Don't bother if negotiating. XXX?
1032 */
1033 if (tinfo->curr.period != tinfo->goal.period
1034 || tinfo->curr.width != tinfo->goal.width
1035 || tinfo->curr.offset != tinfo->goal.offset
1036 || tinfo->curr.ppr_options != tinfo->goal.ppr_options)
1037 break;
1038 xm.xm_target = target;
1039 xm.xm_mode = 0;
1040 xm.xm_period = tinfo->curr.period;
1041 xm.xm_offset = tinfo->curr.offset;
1042 if (tinfo->curr.width == MSG_EXT_WDTR_BUS_16_BIT)
1043 xm.xm_mode |= PERIPH_CAP_WIDE16;
1044 if (tinfo->curr.period)
1045 xm.xm_mode |= PERIPH_CAP_SYNC;
1046 if (tstate->tagenable & devinfo.target_mask)
1047 xm.xm_mode |= PERIPH_CAP_TQING;
1048 if (tinfo->curr.ppr_options & MSG_EXT_PPR_DT_REQ)
1049 xm.xm_mode |= PERIPH_CAP_DT;
1050 scsipi_async_event(chan, ASYNC_EVENT_XFER_MODE, &xm);
1051 break;
1052 case AC_BUS_RESET:
1053 scsipi_async_event(chan, ASYNC_EVENT_RESET, NULL);
1054 case AC_SENT_BDR:
1055 default:
1056 break;
1057 }
1058 }
1059