aic7xxx_osm.c revision 1.10 1 /* $NetBSD: aic7xxx_osm.c,v 1.10 2003/07/14 15:47:10 lukem Exp $ */
2
3 /*
4 * Bus independent FreeBSD shim for the aic7xxx based adaptec SCSI controllers
5 *
6 * Copyright (c) 1994-2001 Justin T. Gibbs.
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions, and the following disclaimer,
14 * without modification.
15 * 2. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * Alternatively, this software may be distributed under the terms of the
19 * GNU Public License ("GPL").
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
25 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * //depot/aic7xxx/freebsd/dev/aic7xxx/aic7xxx_osm.c#12 $
34 *
35 * $FreeBSD: src/sys/dev/aic7xxx/aic7xxx_osm.c,v 1.31 2002/11/30 19:08:58 scottl Exp $
36 */
37 /*
38 * Ported from FreeBSD by Pascal Renauld, Network Storage Solutions, Inc. - April 2003
39 */
40
41 #include <sys/cdefs.h>
42 __KERNEL_RCSID(0, "$NetBSD: aic7xxx_osm.c,v 1.10 2003/07/14 15:47:10 lukem Exp $");
43
44 #include <dev/ic/aic7xxx_osm.h>
45 #include <dev/ic/aic7xxx_inline.h>
46
47 #ifndef AHC_TMODE_ENABLE
48 #define AHC_TMODE_ENABLE 0
49 #endif
50
51
52 static void ahc_action(struct scsipi_channel *chan, scsipi_adapter_req_t req, void *arg);
53 static void ahc_execute_scb(void *arg, bus_dma_segment_t *dm_segs, int nsegments);
54 static int ahc_poll(struct ahc_softc *ahc, int wait);
55 static void ahc_setup_data(struct ahc_softc *ahc,
56 struct scsipi_xfer *xs, struct scb *scb);
57 static void ahc_set_recoveryscb(struct ahc_softc *ahc, struct scb *scb);
58 static int ahc_ioctl(struct scsipi_channel *channel, u_long cmd, caddr_t addr, int flag,
59 struct proc *p);
60
61
62
63 /*
64 * Attach all the sub-devices we can find
65 */
66 int
67 ahc_attach(struct ahc_softc *ahc)
68 {
69 u_long s;
70 int i;
71 char ahc_info[256];
72
73 LIST_INIT(&ahc->pending_scbs);
74 for (i = 0; i < AHC_NUM_TARGETS; i++)
75 TAILQ_INIT(&ahc->untagged_queues[i]);
76
77 ahc_lock(ahc, &s);
78
79 ahc->sc_adapter.adapt_dev = &ahc->sc_dev;
80 ahc->sc_adapter.adapt_nchannels = (ahc->features & AHC_TWIN) ? 2 : 1;
81
82 ahc->sc_adapter.adapt_openings = AHC_MAX_QUEUE;
83 ahc->sc_adapter.adapt_max_periph = 16;
84
85 ahc->sc_adapter.adapt_ioctl = ahc_ioctl;
86 ahc->sc_adapter.adapt_minphys = ahc_minphys;
87 ahc->sc_adapter.adapt_request = ahc_action;
88
89 ahc->sc_channel.chan_adapter = &ahc->sc_adapter;
90 ahc->sc_channel.chan_bustype = &scsi_bustype;
91 ahc->sc_channel.chan_channel = 0;
92 ahc->sc_channel.chan_ntargets = (ahc->features & AHC_WIDE) ? 16 : 8;
93 ahc->sc_channel.chan_nluns = 8 /*AHC_NUM_LUNS*/;
94 ahc->sc_channel.chan_id = ahc->our_id;
95
96 if (ahc->features & AHC_TWIN) {
97 ahc->sc_channel_b = ahc->sc_channel;
98 ahc->sc_channel_b.chan_id = ahc->our_id_b;
99 ahc->sc_channel_b.chan_channel = 1;
100 }
101
102 ahc_controller_info(ahc, ahc_info);
103 printf("%s: %s\n", ahc->sc_dev.dv_xname, ahc_info);
104
105 if ((ahc->flags & AHC_PRIMARY_CHANNEL) == 0) {
106 ahc->sc_child = config_found((void *)&ahc->sc_dev,
107 &ahc->sc_channel, scsiprint);
108 if (ahc->features & AHC_TWIN)
109 ahc->sc_child_b = config_found((void *)&ahc->sc_dev,
110 &ahc->sc_channel_b, scsiprint);
111 } else {
112 if (ahc->features & AHC_TWIN)
113 ahc->sc_child = config_found((void *)&ahc->sc_dev,
114 &ahc->sc_channel_b, scsiprint);
115 ahc->sc_child_b = config_found((void *)&ahc->sc_dev,
116 &ahc->sc_channel, scsiprint);
117 }
118
119 ahc_intr_enable(ahc, TRUE);
120
121 if (ahc->flags & AHC_RESET_BUS_A)
122 ahc_reset_channel(ahc, 'A', TRUE);
123 if ((ahc->features & AHC_TWIN) && ahc->flags & AHC_RESET_BUS_B)
124 ahc_reset_channel(ahc, 'B', TRUE);
125
126 ahc_unlock(ahc, &s);
127 return (1);
128 }
129
130 /*
131 * Catch an interrupt from the adapter
132 */
133 void
134 ahc_platform_intr(void *arg)
135 {
136 struct ahc_softc *ahc;
137
138 ahc = (struct ahc_softc *)arg;
139 ahc_intr(ahc);
140 }
141
142 /*
143 * We have an scb which has been processed by the
144 * adaptor, now we look to see how the operation
145 * went.
146 */
147 void
148 ahc_done(struct ahc_softc *ahc, struct scb *scb)
149 {
150 struct scsipi_xfer *xs;
151 struct scsipi_periph *periph;
152 u_long s;
153
154 xs = scb->xs;
155 periph = xs->xs_periph;
156 LIST_REMOVE(scb, pending_links);
157 if ((scb->flags & SCB_UNTAGGEDQ) != 0) {
158 struct scb_tailq *untagged_q;
159 int target_offset;
160
161 target_offset = SCB_GET_TARGET_OFFSET(ahc, scb);
162 untagged_q = &ahc->untagged_queues[target_offset];
163 TAILQ_REMOVE(untagged_q, scb, links.tqe);
164 scb->flags &= ~SCB_UNTAGGEDQ;
165 ahc_run_untagged_queue(ahc, untagged_q);
166 }
167
168 callout_stop(&scb->xs->xs_callout);
169
170 if (xs->datalen) {
171 int op;
172
173 if (xs->xs_control & XS_CTL_DATA_IN)
174 op = BUS_DMASYNC_POSTREAD;
175 else
176 op = BUS_DMASYNC_POSTWRITE;
177 bus_dmamap_sync(ahc->parent_dmat, scb->dmamap, 0,
178 scb->dmamap->dm_mapsize, op);
179 bus_dmamap_unload(ahc->parent_dmat, scb->dmamap);
180 }
181
182 /*
183 * If the recovery SCB completes, we have to be
184 * out of our timeout.
185 */
186 if ((scb->flags & SCB_RECOVERY_SCB) != 0) {
187 struct scb *list_scb;
188
189 /*
190 * We were able to complete the command successfully,
191 * so reinstate the timeouts for all other pending
192 * commands.
193 */
194 LIST_FOREACH(list_scb, &ahc->pending_scbs, pending_links) {
195 struct scsipi_xfer *xs = list_scb->xs;
196
197 if (!(xs->xs_control & XS_CTL_POLL)) {
198 callout_reset(&list_scb->xs->xs_callout,
199 (list_scb->xs->timeout > 1000000) ?
200 (list_scb->xs->timeout / 1000) * hz :
201 (list_scb->xs->timeout * hz) / 1000,
202 ahc_timeout, list_scb);
203 }
204 }
205
206 if (ahc_get_transaction_status(scb) == CAM_BDR_SENT
207 || ahc_get_transaction_status(scb) == CAM_REQ_ABORTED)
208 ahc_set_transaction_status(scb, CAM_CMD_TIMEOUT);
209 scsipi_printaddr(xs->xs_periph);
210 printf("%s: no longer in timeout, status = %x\n",
211 ahc_name(ahc), xs->status);
212 }
213
214 /* Don't clobber any existing error state */
215 if (xs->error != XS_NOERROR) {
216 /* Don't clobber any existing error state */
217 } else if ((scb->flags & SCB_SENSE) != 0) {
218 /*
219 * We performed autosense retrieval.
220 *
221 * Zero any sense not transferred by the
222 * device. The SCSI spec mandates that any
223 * untransfered data should be assumed to be
224 * zero. Complete the 'bounce' of sense information
225 * through buffers accessible via bus-space by
226 * copying it into the clients csio.
227 */
228 memset(&xs->sense.scsi_sense, 0, sizeof(xs->sense.scsi_sense));
229 memcpy(&xs->sense.scsi_sense,
230 ahc_get_sense_buf(ahc, scb),
231 sizeof(xs->sense.scsi_sense));
232 xs->error = XS_SENSE;
233 }
234 if (scb->flags & SCB_FREEZE_QUEUE) {
235 scsipi_periph_thaw(periph, 1);
236 scb->flags &= ~SCB_FREEZE_QUEUE;
237 }
238
239 ahc_lock(ahc, &s);
240 ahc_free_scb(ahc, scb);
241 ahc_unlock(ahc, &s);
242
243 scsipi_done(xs);
244 }
245
246 static int
247 ahc_ioctl(struct scsipi_channel *channel, u_long cmd, caddr_t addr, int flag,
248 struct proc *p)
249 {
250 struct ahc_softc *ahc = (void *)channel->chan_adapter->adapt_dev;
251 int s, ret = ENOTTY;
252
253 switch (cmd) {
254 case SCBUSIORESET:
255 s = splbio();
256 ahc_reset_channel(ahc, channel->chan_channel == 1 ? 'B' : 'A',
257 TRUE);
258 splx(s);
259 ret = 0;
260 break;
261 default:
262 break;
263 }
264
265 return ret;
266 }
267
268 static void
269 ahc_action(struct scsipi_channel *chan, scsipi_adapter_req_t req, void *arg)
270 {
271 struct ahc_softc *ahc;
272 int s;
273 struct ahc_initiator_tinfo *tinfo;
274 struct ahc_tmode_tstate *tstate;
275
276 ahc = (void *)chan->chan_adapter->adapt_dev;
277
278 switch (req) {
279
280 case ADAPTER_REQ_RUN_XFER:
281 {
282 struct scsipi_xfer *xs;
283 struct scsipi_periph *periph;
284 struct scb *scb;
285 struct hardware_scb *hscb;
286 u_int target_id;
287 u_int our_id;
288 u_long s;
289
290 xs = arg;
291 periph = xs->xs_periph;
292
293 target_id = periph->periph_target;
294 our_id = ahc->our_id;
295
296 SC_DEBUG(xs->xs_periph, SCSIPI_DB3, ("ahc_action\n"));
297
298 /*
299 * get an scb to use.
300 */
301 ahc_lock(ahc, &s);
302 if ((scb = ahc_get_scb(ahc)) == NULL) {
303 xs->error = XS_RESOURCE_SHORTAGE;
304 ahc_unlock(ahc, &s);
305 scsipi_done(xs);
306 return;
307 }
308 ahc_unlock(ahc, &s);
309
310 hscb = scb->hscb;
311
312 SC_DEBUG(periph, SCSIPI_DB3, ("start scb(%p)\n", scb));
313 scb->xs = xs;
314
315 /*
316 * Put all the arguments for the xfer in the scb
317 */
318 hscb->control = 0;
319 hscb->scsiid = BUILD_SCSIID(ahc, 0, target_id, our_id);
320 hscb->lun = periph->periph_lun;
321 if (xs->xs_control & XS_CTL_RESET) {
322 hscb->cdb_len = 0;
323 scb->flags |= SCB_DEVICE_RESET;
324 hscb->control |= MK_MESSAGE;
325 ahc_execute_scb(scb, NULL, 0);
326 }
327
328 ahc_setup_data(ahc, xs, scb);
329
330 break;
331 }
332 case ADAPTER_REQ_GROW_RESOURCES:
333 printf("%s: ADAPTER_REQ_GROW_RESOURCES\n", ahc_name(ahc));
334 return;
335
336 case ADAPTER_REQ_SET_XFER_MODE:
337 {
338 struct scsipi_xfer_mode *xm = arg;
339 struct ahc_devinfo devinfo;
340 int target_id, our_id, first;
341 u_int width;
342 char channel;
343
344 target_id = xm->xm_target;
345 our_id = chan->chan_id;
346 channel = (chan->chan_channel == 1) ? 'B' : 'A';
347 s = splbio();
348 tinfo = ahc_fetch_transinfo(ahc, channel, our_id, target_id,
349 &tstate);
350 ahc_compile_devinfo(&devinfo, our_id, target_id,
351 0, channel, ROLE_INITIATOR);
352
353 /*
354 * XXX since the period and offset are not provided here,
355 * fake things by forcing a renegotiation using the user
356 * settings if this is called for the first time (i.e.
357 * during probe). Also, cap various values at the user
358 * values, assuming that the user set it up that way.
359 */
360 if (ahc->inited_target[target_id] == 0) {
361 tinfo->goal = tinfo->user;
362 tstate->tagenable |=
363 (ahc->user_tagenable & devinfo.target_mask);
364 tstate->discenable |=
365 (ahc->user_discenable & devinfo.target_mask);
366 ahc->inited_target[target_id] = 1;
367 first = 1;
368 } else
369 first = 0;
370
371 if (xm->xm_mode & (PERIPH_CAP_WIDE16 | PERIPH_CAP_DT))
372 width = MSG_EXT_WDTR_BUS_16_BIT;
373 else
374 width = MSG_EXT_WDTR_BUS_8_BIT;
375
376 ahc_validate_width(ahc, NULL, &width, ROLE_UNKNOWN);
377 if (width > tinfo->user.width)
378 width = tinfo->user.width;
379 tinfo->goal.width = width;
380
381 if (!(xm->xm_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_DT))) {
382 tinfo->goal.period = 0;
383 tinfo->goal.offset = 0;
384 tinfo->goal.ppr_options = 0;
385 }
386
387 if ((xm->xm_mode & PERIPH_CAP_DT) &&
388 (tinfo->user.ppr_options & MSG_EXT_PPR_DT_REQ))
389 tinfo->goal.ppr_options |= MSG_EXT_PPR_DT_REQ;
390 else
391 tinfo->goal.ppr_options &= ~MSG_EXT_PPR_DT_REQ;
392
393 if ((xm->xm_mode & PERIPH_CAP_TQING) &&
394 (ahc->user_tagenable & devinfo.target_mask))
395 tstate->tagenable |= devinfo.target_mask;
396 else
397 tstate->tagenable &= ~devinfo.target_mask;
398
399 /*
400 * If this is the first request, and no negotiation is
401 * needed, just confirm the state to the scsipi layer,
402 * so that it can print a message.
403 */
404 if (!ahc_update_neg_request(ahc, &devinfo, tstate,
405 tinfo, AHC_NEG_IF_NON_ASYNC) && first) {
406 xm->xm_mode = 0;
407 xm->xm_period = tinfo->curr.period;
408 xm->xm_offset = tinfo->curr.offset;
409 if (tinfo->curr.width == MSG_EXT_WDTR_BUS_16_BIT)
410 xm->xm_mode |= PERIPH_CAP_WIDE16;
411 if (tinfo->curr.period)
412 xm->xm_mode |= PERIPH_CAP_SYNC;
413 if (tstate->tagenable & devinfo.target_mask)
414 xm->xm_mode |= PERIPH_CAP_TQING;
415 if (tinfo->curr.ppr_options & MSG_EXT_PPR_DT_REQ)
416 xm->xm_mode |= PERIPH_CAP_DT;
417 scsipi_async_event(chan, ASYNC_EVENT_XFER_MODE, xm);
418 }
419 splx(s);
420 }
421 }
422
423 return;
424 }
425
426 static void
427 ahc_execute_scb(void *arg, bus_dma_segment_t *dm_segs, int nsegments)
428 {
429 struct scb *scb;
430 struct scsipi_xfer *xs;
431 struct ahc_softc *ahc;
432 struct ahc_initiator_tinfo *tinfo;
433 struct ahc_tmode_tstate *tstate;
434
435 u_int mask;
436 long s;
437
438 scb = (struct scb *)arg;
439 xs = scb->xs;
440 xs->error = 0;
441 xs->status = 0;
442 xs->xs_status = 0;
443 ahc = (void *)xs->xs_periph->periph_channel->chan_adapter->adapt_dev;
444
445 if (nsegments != 0) {
446 struct ahc_dma_seg *sg;
447 bus_dma_segment_t *end_seg;
448 int op;
449
450 end_seg = dm_segs + nsegments;
451
452 /* Copy the segments into our SG list */
453 sg = scb->sg_list;
454 while (dm_segs < end_seg) {
455 uint32_t len;
456
457 sg->addr = ahc_htole32(dm_segs->ds_addr);
458 len = dm_segs->ds_len
459 | ((dm_segs->ds_addr >> 8) & 0x7F000000);
460 sg->len = ahc_htole32(len);
461 sg++;
462 dm_segs++;
463 }
464
465 /*
466 * Note where to find the SG entries in bus space.
467 * We also set the full residual flag which the
468 * sequencer will clear as soon as a data transfer
469 * occurs.
470 */
471 scb->hscb->sgptr = ahc_htole32(scb->sg_list_phys|SG_FULL_RESID);
472
473 if (xs->xs_control & XS_CTL_DATA_IN)
474 op = BUS_DMASYNC_PREREAD;
475 else
476 op = BUS_DMASYNC_PREWRITE;
477
478 bus_dmamap_sync(ahc->parent_dmat, scb->dmamap, 0,
479 scb->dmamap->dm_mapsize, op);
480
481 sg--;
482 sg->len |= ahc_htole32(AHC_DMA_LAST_SEG);
483
484 /* Copy the first SG into the "current" data pointer area */
485 scb->hscb->dataptr = scb->sg_list->addr;
486 scb->hscb->datacnt = scb->sg_list->len;
487 } else {
488 scb->hscb->sgptr = ahc_htole32(SG_LIST_NULL);
489 scb->hscb->dataptr = 0;
490 scb->hscb->datacnt = 0;
491 }
492
493 scb->sg_count = nsegments;
494
495 ahc_lock(ahc, &s);
496
497 /*
498 * Last time we need to check if this SCB needs to
499 * be aborted.
500 */
501 if (xs->xs_status & XS_STS_DONE) {
502 if (nsegments != 0)
503 bus_dmamap_unload(ahc->buffer_dmat, scb->dmamap);
504 ahc_free_scb(ahc, scb);
505 ahc_unlock(ahc, &s);
506 scsipi_done(xs);
507 return;
508 }
509
510 tinfo = ahc_fetch_transinfo(ahc, ahc->channel,
511 SCSIID_OUR_ID(scb->hscb->scsiid),
512 SCSIID_TARGET(ahc, scb->hscb->scsiid),
513 &tstate);
514
515 mask = SCB_GET_TARGET_MASK(ahc, scb);
516 scb->hscb->scsirate = tinfo->scsirate;
517 scb->hscb->scsioffset = tinfo->curr.offset;
518
519 if ((tstate->ultraenb & mask) != 0)
520 scb->hscb->control |= ULTRAENB;
521
522 if ((tstate->discenable & mask) != 0)
523 scb->hscb->control |= DISCENB;
524
525 if (xs->xs_tag_type)
526 scb->hscb->control |= xs->xs_tag_type;
527
528 if ((xs->xs_control & XS_CTL_DISCOVERY) && (tinfo->goal.width == 0
529 && tinfo->goal.offset == 0
530 && tinfo->goal.ppr_options == 0)) {
531 scb->flags |= SCB_NEGOTIATE;
532 scb->hscb->control |= MK_MESSAGE;
533 } else if ((tstate->auto_negotiate & mask) != 0) {
534 scb->flags |= SCB_AUTO_NEGOTIATE;
535 scb->hscb->control |= MK_MESSAGE;
536 }
537
538 LIST_INSERT_HEAD(&ahc->pending_scbs, scb, pending_links);
539
540 if (!(xs->xs_control & XS_CTL_POLL)) {
541 callout_reset(&scb->xs->xs_callout, xs->timeout > 1000000 ?
542 (xs->timeout / 1000) * hz : (xs->timeout * hz) / 1000,
543 ahc_timeout, scb);
544 }
545
546 /*
547 * We only allow one untagged transaction
548 * per target in the initiator role unless
549 * we are storing a full busy target *lun*
550 * table in SCB space.
551 */
552 if ((scb->hscb->control & (TARGET_SCB|TAG_ENB)) == 0
553 && (ahc->flags & AHC_SCB_BTT) == 0) {
554 struct scb_tailq *untagged_q;
555 int target_offset;
556
557 target_offset = SCB_GET_TARGET_OFFSET(ahc, scb);
558 untagged_q = &(ahc->untagged_queues[target_offset]);
559 TAILQ_INSERT_TAIL(untagged_q, scb, links.tqe);
560 scb->flags |= SCB_UNTAGGEDQ;
561 if (TAILQ_FIRST(untagged_q) != scb) {
562 ahc_unlock(ahc, &s);
563 return;
564 }
565 }
566 scb->flags |= SCB_ACTIVE;
567
568 if ((scb->flags & SCB_TARGET_IMMEDIATE) != 0) {
569 /* Define a mapping from our tag to the SCB. */
570 ahc->scb_data->scbindex[scb->hscb->tag] = scb;
571 ahc_pause(ahc);
572 if ((ahc->flags & AHC_PAGESCBS) == 0)
573 ahc_outb(ahc, SCBPTR, scb->hscb->tag);
574 ahc_outb(ahc, TARG_IMMEDIATE_SCB, scb->hscb->tag);
575 ahc_unpause(ahc);
576 } else {
577 ahc_queue_scb(ahc, scb);
578 }
579
580 if (!(xs->xs_control & XS_CTL_POLL)) {
581 ahc_unlock(ahc, &s);
582 return;
583 }
584
585 /*
586 * If we can't use interrupts, poll for completion
587 */
588 SC_DEBUG(xs->xs_periph, SCSIPI_DB3, ("cmd_poll\n"));
589 do {
590 if (ahc_poll(ahc, xs->timeout)) {
591 if (!(xs->xs_control & XS_CTL_SILENT))
592 printf("cmd fail\n");
593 ahc_timeout(scb);
594 break;
595 }
596 } while (!(xs->xs_status & XS_STS_DONE));
597 ahc_unlock(ahc, &s);
598
599 return;
600 }
601
602 static int
603 ahc_poll(struct ahc_softc *ahc, int wait)
604 {
605 while (--wait) {
606 DELAY(1000);
607 if (ahc_inb(ahc, INTSTAT) & INT_PEND)
608 break;
609 }
610
611 if (wait == 0) {
612 printf("%s: board is not responding\n", ahc_name(ahc));
613 return (EIO);
614 }
615
616 ahc_intr((void *)ahc);
617 return (0);
618 }
619
620 static void
621 ahc_setup_data(struct ahc_softc *ahc, struct scsipi_xfer *xs,
622 struct scb *scb)
623 {
624 struct hardware_scb *hscb;
625
626 hscb = scb->hscb;
627 xs->resid = xs->status = 0;
628
629 hscb->cdb_len = xs->cmdlen;
630 if (hscb->cdb_len > sizeof(hscb->cdb32)) {
631 u_long s;
632
633 ahc_set_transaction_status(scb, CAM_REQ_INVALID);
634 ahc_lock(ahc, &s);
635 ahc_free_scb(ahc, scb);
636 ahc_unlock(ahc, &s);
637 scsipi_done(xs);
638 return;
639 }
640
641 if (hscb->cdb_len > 12) {
642 memcpy(hscb->cdb32, xs->cmd, hscb->cdb_len);
643 scb->flags |= SCB_CDB32_PTR;
644 } else {
645 memcpy(hscb->shared_data.cdb, xs->cmd, hscb->cdb_len);
646 }
647
648 /* Only use S/G if there is a transfer */
649 if (xs->datalen) {
650 int error;
651
652 error = bus_dmamap_load(ahc->parent_dmat,
653 scb->dmamap, xs->data,
654 xs->datalen, NULL,
655 ((xs->xs_control & XS_CTL_NOSLEEP) ?
656 BUS_DMA_NOWAIT : BUS_DMA_WAITOK) |
657 BUS_DMA_STREAMING |
658 ((xs->xs_control & XS_CTL_DATA_IN) ?
659 BUS_DMA_READ : BUS_DMA_WRITE));
660 if (error) {
661 #ifdef AHC_DEBUG
662 printf("%s: in ahc_setup_data(): bus_dmamap_load() "
663 "= %d\n",
664 ahc_name(ahc), error);
665 #endif
666 xs->error = XS_RESOURCE_SHORTAGE;
667 scsipi_done(xs);
668 return;
669 }
670 ahc_execute_scb(scb,
671 scb->dmamap->dm_segs,
672 scb->dmamap->dm_nsegs);
673 } else {
674 ahc_execute_scb(scb, NULL, 0);
675 }
676 }
677
678 static void
679 ahc_set_recoveryscb(struct ahc_softc *ahc, struct scb *scb) {
680
681 if ((scb->flags & SCB_RECOVERY_SCB) == 0) {
682 struct scb *list_scb;
683
684 scb->flags |= SCB_RECOVERY_SCB;
685
686 /*
687 * Take all queued, but not sent SCBs out of the equation.
688 * Also ensure that no new CCBs are queued to us while we
689 * try to fix this problem.
690 */
691 scsipi_channel_freeze(&ahc->sc_channel, 1);
692 if (ahc->features & AHC_TWIN)
693 scsipi_channel_freeze(&ahc->sc_channel_b, 1);
694
695 /*
696 * Go through all of our pending SCBs and remove
697 * any scheduled timeouts for them. We will reschedule
698 * them after we've successfully fixed this problem.
699 */
700 LIST_FOREACH(list_scb, &ahc->pending_scbs, pending_links) {
701 callout_stop(&list_scb->xs->xs_callout);
702 }
703 }
704 }
705
706 void
707 ahc_timeout(void *arg)
708 {
709 struct scb *scb;
710 struct ahc_softc *ahc;
711 long s;
712 int found;
713 u_int last_phase;
714 int target;
715 int lun;
716 int i;
717 char channel;
718
719 scb = (struct scb *)arg;
720 ahc = (struct ahc_softc *)scb->ahc_softc;
721
722 ahc_lock(ahc, &s);
723
724 ahc_pause_and_flushwork(ahc);
725
726 if ((scb->flags & SCB_ACTIVE) == 0) {
727 /* Previous timeout took care of me already */
728 printf("%s: Timedout SCB already complete. "
729 "Interrupts may not be functioning.\n", ahc_name(ahc));
730 ahc_unpause(ahc);
731 ahc_unlock(ahc, &s);
732 return;
733 }
734
735 target = SCB_GET_TARGET(ahc, scb);
736 channel = SCB_GET_CHANNEL(ahc, scb);
737 lun = SCB_GET_LUN(scb);
738
739 ahc_print_path(ahc, scb);
740 printf("SCB 0x%x - timed out\n", scb->hscb->tag);
741 ahc_dump_card_state(ahc);
742 last_phase = ahc_inb(ahc, LASTPHASE);
743 if (scb->sg_count > 0) {
744 for (i = 0; i < scb->sg_count; i++) {
745 printf("sg[%d] - Addr 0x%x : Length %d\n",
746 i,
747 scb->sg_list[i].addr,
748 scb->sg_list[i].len & AHC_SG_LEN_MASK);
749 }
750 }
751 if (scb->flags & (SCB_DEVICE_RESET|SCB_ABORT)) {
752 /*
753 * Been down this road before.
754 * Do a full bus reset.
755 */
756 bus_reset:
757 ahc_set_transaction_status(scb, CAM_CMD_TIMEOUT);
758 found = ahc_reset_channel(ahc, channel, /*Initiate Reset*/TRUE);
759 printf("%s: Issued Channel %c Bus Reset. "
760 "%d SCBs aborted\n", ahc_name(ahc), channel, found);
761 } else {
762 /*
763 * If we are a target, transition to bus free and report
764 * the timeout.
765 *
766 * The target/initiator that is holding up the bus may not
767 * be the same as the one that triggered this timeout
768 * (different commands have different timeout lengths).
769 * If the bus is idle and we are actiing as the initiator
770 * for this request, queue a BDR message to the timed out
771 * target. Otherwise, if the timed out transaction is
772 * active:
773 * Initiator transaction:
774 * Stuff the message buffer with a BDR message and assert
775 * ATN in the hopes that the target will let go of the bus
776 * and go to the mesgout phase. If this fails, we'll
777 * get another timeout 2 seconds later which will attempt
778 * a bus reset.
779 *
780 * Target transaction:
781 * Transition to BUS FREE and report the error.
782 * It's good to be the target!
783 */
784 u_int active_scb_index;
785 u_int saved_scbptr;
786
787 saved_scbptr = ahc_inb(ahc, SCBPTR);
788 active_scb_index = ahc_inb(ahc, SCB_TAG);
789
790 if ((ahc_inb(ahc, SEQ_FLAGS) & NOT_IDENTIFIED) == 0
791 && (active_scb_index < ahc->scb_data->numscbs)) {
792 struct scb *active_scb;
793
794 /*
795 * If the active SCB is not us, assume that
796 * the active SCB has a longer timeout than
797 * the timedout SCB, and wait for the active
798 * SCB to timeout.
799 */
800 active_scb = ahc_lookup_scb(ahc, active_scb_index);
801 if (active_scb != scb) {
802 uint64_t newtimeout;
803
804 ahc_print_path(ahc, scb);
805 printf("Other SCB Timeout%s",
806 (scb->flags & SCB_OTHERTCL_TIMEOUT) != 0
807 ? " again\n" : "\n");
808 scb->flags |= SCB_OTHERTCL_TIMEOUT;
809 newtimeout = MAX(active_scb->xs->timeout,
810 scb->xs->timeout);
811 callout_reset(&scb->xs->xs_callout,
812 newtimeout > 1000000 ?
813 (newtimeout / 1000) * hz :
814 (newtimeout * hz) / 1000,
815 ahc_timeout, scb);
816 ahc_unpause(ahc);
817 ahc_unlock(ahc, &s);
818 return;
819 }
820
821 /* It's us */
822 if ((scb->flags & SCB_TARGET_SCB) != 0) {
823
824 /*
825 * Send back any queued up transactions
826 * and properly record the error condition.
827 */
828 ahc_abort_scbs(ahc, SCB_GET_TARGET(ahc, scb),
829 SCB_GET_CHANNEL(ahc, scb),
830 SCB_GET_LUN(scb),
831 scb->hscb->tag,
832 ROLE_TARGET,
833 CAM_CMD_TIMEOUT);
834
835 /* Will clear us from the bus */
836 ahc_restart(ahc);
837 ahc_unlock(ahc, &s);
838 return;
839 }
840
841 ahc_set_recoveryscb(ahc, active_scb);
842 ahc_outb(ahc, MSG_OUT, HOST_MSG);
843 ahc_outb(ahc, SCSISIGO, last_phase|ATNO);
844 ahc_print_path(ahc, active_scb);
845 printf("BDR message in message buffer\n");
846 active_scb->flags |= SCB_DEVICE_RESET;
847 callout_reset(&active_scb->xs->xs_callout,
848 2 * hz, ahc_timeout, active_scb);
849 ahc_unpause(ahc);
850 } else {
851 int disconnected;
852
853 /* XXX Shouldn't panic. Just punt instead? */
854 if ((scb->flags & SCB_TARGET_SCB) != 0)
855 panic("Timed-out target SCB but bus idle");
856
857 if (last_phase != P_BUSFREE
858 && (ahc_inb(ahc, SSTAT0) & TARGET) != 0) {
859 /* XXX What happened to the SCB? */
860 /* Hung target selection. Goto busfree */
861 printf("%s: Hung target selection\n",
862 ahc_name(ahc));
863 ahc_restart(ahc);
864 ahc_unlock(ahc, &s);
865 return;
866 }
867
868 if (ahc_search_qinfifo(ahc, target, channel, lun,
869 scb->hscb->tag, ROLE_INITIATOR,
870 /*status*/0, SEARCH_COUNT) > 0) {
871 disconnected = FALSE;
872 } else {
873 disconnected = TRUE;
874 }
875
876 if (disconnected) {
877
878 ahc_set_recoveryscb(ahc, scb);
879 /*
880 * Actually re-queue this SCB in an attempt
881 * to select the device before it reconnects.
882 * In either case (selection or reselection),
883 * we will now issue a target reset to the
884 * timed-out device.
885 *
886 * Set the MK_MESSAGE control bit indicating
887 * that we desire to send a message. We
888 * also set the disconnected flag since
889 * in the paging case there is no guarantee
890 * that our SCB control byte matches the
891 * version on the card. We don't want the
892 * sequencer to abort the command thinking
893 * an unsolicited reselection occurred.
894 */
895 scb->hscb->control |= MK_MESSAGE|DISCONNECTED;
896 scb->flags |= SCB_DEVICE_RESET;
897
898 /*
899 * Remove any cached copy of this SCB in the
900 * disconnected list in preparation for the
901 * queuing of our abort SCB. We use the
902 * same element in the SCB, SCB_NEXT, for
903 * both the qinfifo and the disconnected list.
904 */
905 ahc_search_disc_list(ahc, target, channel,
906 lun, scb->hscb->tag,
907 /*stop_on_first*/TRUE,
908 /*remove*/TRUE,
909 /*save_state*/FALSE);
910
911 /*
912 * In the non-paging case, the sequencer will
913 * never re-reference the in-core SCB.
914 * To make sure we are notified during
915 * reslection, set the MK_MESSAGE flag in
916 * the card's copy of the SCB.
917 */
918 if ((ahc->flags & AHC_PAGESCBS) == 0) {
919 ahc_outb(ahc, SCBPTR, scb->hscb->tag);
920 ahc_outb(ahc, SCB_CONTROL,
921 ahc_inb(ahc, SCB_CONTROL)
922 | MK_MESSAGE);
923 }
924
925 /*
926 * Clear out any entries in the QINFIFO first
927 * so we are the next SCB for this target
928 * to run.
929 */
930 ahc_search_qinfifo(ahc,
931 SCB_GET_TARGET(ahc, scb),
932 channel, SCB_GET_LUN(scb),
933 SCB_LIST_NULL,
934 ROLE_INITIATOR,
935 CAM_REQUEUE_REQ,
936 SEARCH_COMPLETE);
937 ahc_print_path(ahc, scb);
938 printf("Queuing a BDR SCB\n");
939 ahc_qinfifo_requeue_tail(ahc, scb);
940 ahc_outb(ahc, SCBPTR, saved_scbptr);
941 callout_reset(&scb->xs->xs_callout, 2 * hz,
942 ahc_timeout, scb);
943 ahc_unpause(ahc);
944 } else {
945 /* Go "immediatly" to the bus reset */
946 /* This shouldn't happen */
947 ahc_set_recoveryscb(ahc, scb);
948 ahc_print_path(ahc, scb);
949 printf("SCB %d: Immediate reset. "
950 "Flags = 0x%x\n", scb->hscb->tag,
951 scb->flags);
952 goto bus_reset;
953 }
954 }
955 }
956 ahc_unlock(ahc, &s);
957 }
958
959 void
960 ahc_platform_set_tags(struct ahc_softc *ahc,
961 struct ahc_devinfo *devinfo, int enable)
962 {
963 struct ahc_initiator_tinfo *tinfo;
964 struct ahc_tmode_tstate *tstate;
965
966 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid,
967 devinfo->target, &tstate);
968
969 if (enable)
970 tstate->tagenable |= devinfo->target_mask;
971 else
972 tstate->tagenable &= ~devinfo->target_mask;
973 }
974
975 int
976 ahc_platform_alloc(struct ahc_softc *ahc, void *platform_arg)
977 {
978 if (sizeof(struct ahc_platform_data) == 0)
979 return 0;
980 ahc->platform_data = malloc(sizeof(struct ahc_platform_data), M_DEVBUF,
981 M_NOWAIT);
982 if (ahc->platform_data == NULL)
983 return (ENOMEM);
984 return (0);
985 }
986
987 void
988 ahc_platform_free(struct ahc_softc *ahc)
989 {
990 if (sizeof(struct ahc_platform_data) == 0)
991 return;
992 free(ahc->platform_data, M_DEVBUF);
993 }
994
995 int
996 ahc_softc_comp(struct ahc_softc *lahc, struct ahc_softc *rahc)
997 {
998 return (0);
999 }
1000
1001 int
1002 ahc_detach(struct device *self, int flags)
1003 {
1004 int rv = 0;
1005
1006 struct ahc_softc *ahc = (struct ahc_softc*)self;
1007
1008 ahc_intr_enable(ahc, FALSE);
1009 if (ahc->sc_child != NULL)
1010 rv = config_detach(ahc->sc_child, flags);
1011 if (rv == 0 && ahc->sc_child_b != NULL)
1012 rv = config_detach(ahc->sc_child_b, flags);
1013
1014 shutdownhook_disestablish(ahc->shutdown_hook);
1015
1016 ahc_free(ahc);
1017
1018 return (rv);
1019 }
1020
1021
1022 void
1023 ahc_send_async(struct ahc_softc *ahc, char channel, u_int target, u_int lun,
1024 ac_code code, void *opt_arg)
1025 {
1026 struct ahc_tmode_tstate *tstate;
1027 struct ahc_initiator_tinfo *tinfo;
1028 struct ahc_devinfo devinfo;
1029 struct scsipi_channel *chan;
1030 struct scsipi_xfer_mode xm;
1031
1032 chan = channel == 'B' ? &ahc->sc_channel_b : &ahc->sc_channel;
1033 switch (code) {
1034 case AC_TRANSFER_NEG:
1035 tinfo = ahc_fetch_transinfo(ahc, channel, ahc->our_id, target,
1036 &tstate);
1037 ahc_compile_devinfo(&devinfo, ahc->our_id, target, lun,
1038 channel, ROLE_UNKNOWN);
1039 /*
1040 * Don't bother if negotiating. XXX?
1041 */
1042 if (tinfo->curr.period != tinfo->goal.period
1043 || tinfo->curr.width != tinfo->goal.width
1044 || tinfo->curr.offset != tinfo->goal.offset
1045 || tinfo->curr.ppr_options != tinfo->goal.ppr_options)
1046 break;
1047 xm.xm_target = target;
1048 xm.xm_mode = 0;
1049 xm.xm_period = tinfo->curr.period;
1050 xm.xm_offset = tinfo->curr.offset;
1051 if (tinfo->curr.width == MSG_EXT_WDTR_BUS_16_BIT)
1052 xm.xm_mode |= PERIPH_CAP_WIDE16;
1053 if (tinfo->curr.period)
1054 xm.xm_mode |= PERIPH_CAP_SYNC;
1055 if (tstate->tagenable & devinfo.target_mask)
1056 xm.xm_mode |= PERIPH_CAP_TQING;
1057 if (tinfo->curr.ppr_options & MSG_EXT_PPR_DT_REQ)
1058 xm.xm_mode |= PERIPH_CAP_DT;
1059 scsipi_async_event(chan, ASYNC_EVENT_XFER_MODE, &xm);
1060 break;
1061 case AC_BUS_RESET:
1062 scsipi_async_event(chan, ASYNC_EVENT_RESET, NULL);
1063 case AC_SENT_BDR:
1064 default:
1065 break;
1066 }
1067 }
1068