aic7xxx_osm.c revision 1.17 1 /* $NetBSD: aic7xxx_osm.c,v 1.17 2005/05/30 04:43:46 christos Exp $ */
2
3 /*
4 * Bus independent FreeBSD shim for the aic7xxx based adaptec SCSI controllers
5 *
6 * Copyright (c) 1994-2001 Justin T. Gibbs.
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions, and the following disclaimer,
14 * without modification.
15 * 2. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * Alternatively, this software may be distributed under the terms of the
19 * GNU Public License ("GPL").
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
25 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * //depot/aic7xxx/freebsd/dev/aic7xxx/aic7xxx_osm.c#12 $
34 *
35 * $FreeBSD: src/sys/dev/aic7xxx/aic7xxx_osm.c,v 1.31 2002/11/30 19:08:58 scottl Exp $
36 */
37 /*
38 * Ported from FreeBSD by Pascal Renauld, Network Storage Solutions, Inc. - April 2003
39 */
40
41 #include <sys/cdefs.h>
42 __KERNEL_RCSID(0, "$NetBSD: aic7xxx_osm.c,v 1.17 2005/05/30 04:43:46 christos Exp $");
43
44 #include <dev/ic/aic7xxx_osm.h>
45 #include <dev/ic/aic7xxx_inline.h>
46
47 #ifndef AHC_TMODE_ENABLE
48 #define AHC_TMODE_ENABLE 0
49 #endif
50
51
52 static void ahc_action(struct scsipi_channel *chan, scsipi_adapter_req_t req, void *arg);
53 static void ahc_execute_scb(void *arg, bus_dma_segment_t *dm_segs, int nsegments);
54 static int ahc_poll(struct ahc_softc *ahc, int wait);
55 static void ahc_setup_data(struct ahc_softc *ahc,
56 struct scsipi_xfer *xs, struct scb *scb);
57 static void ahc_set_recoveryscb(struct ahc_softc *ahc, struct scb *scb);
58 static int ahc_ioctl(struct scsipi_channel *channel, u_long cmd, caddr_t addr, int flag,
59 struct proc *p);
60
61
62
63 /*
64 * Attach all the sub-devices we can find
65 */
66 int
67 ahc_attach(struct ahc_softc *ahc)
68 {
69 u_long s;
70 int i;
71 char ahc_info[256];
72
73 LIST_INIT(&ahc->pending_scbs);
74 for (i = 0; i < AHC_NUM_TARGETS; i++)
75 TAILQ_INIT(&ahc->untagged_queues[i]);
76
77 ahc_lock(ahc, &s);
78
79 ahc->sc_adapter.adapt_dev = &ahc->sc_dev;
80 ahc->sc_adapter.adapt_nchannels = (ahc->features & AHC_TWIN) ? 2 : 1;
81
82 ahc->sc_adapter.adapt_openings = AHC_MAX_QUEUE;
83 ahc->sc_adapter.adapt_max_periph = 16;
84
85 ahc->sc_adapter.adapt_ioctl = ahc_ioctl;
86 ahc->sc_adapter.adapt_minphys = ahc_minphys;
87 ahc->sc_adapter.adapt_request = ahc_action;
88
89 ahc->sc_channel.chan_adapter = &ahc->sc_adapter;
90 ahc->sc_channel.chan_bustype = &scsi_bustype;
91 ahc->sc_channel.chan_channel = 0;
92 ahc->sc_channel.chan_ntargets = (ahc->features & AHC_WIDE) ? 16 : 8;
93 ahc->sc_channel.chan_nluns = 8 /*AHC_NUM_LUNS*/;
94 ahc->sc_channel.chan_id = ahc->our_id;
95
96 if (ahc->features & AHC_TWIN) {
97 ahc->sc_channel_b = ahc->sc_channel;
98 ahc->sc_channel_b.chan_id = ahc->our_id_b;
99 ahc->sc_channel_b.chan_channel = 1;
100 }
101
102 ahc_controller_info(ahc, ahc_info, sizeof(ahc_info));
103 printf("%s: %s\n", ahc->sc_dev.dv_xname, ahc_info);
104
105 if ((ahc->flags & AHC_PRIMARY_CHANNEL) == 0) {
106 ahc->sc_child = config_found((void *)&ahc->sc_dev,
107 &ahc->sc_channel, scsiprint);
108 if (ahc->features & AHC_TWIN)
109 ahc->sc_child_b = config_found((void *)&ahc->sc_dev,
110 &ahc->sc_channel_b, scsiprint);
111 } else {
112 if (ahc->features & AHC_TWIN)
113 ahc->sc_child = config_found((void *)&ahc->sc_dev,
114 &ahc->sc_channel_b, scsiprint);
115 ahc->sc_child_b = config_found((void *)&ahc->sc_dev,
116 &ahc->sc_channel, scsiprint);
117 }
118
119 ahc_intr_enable(ahc, TRUE);
120
121 if (ahc->flags & AHC_RESET_BUS_A)
122 ahc_reset_channel(ahc, 'A', TRUE);
123 if ((ahc->features & AHC_TWIN) && ahc->flags & AHC_RESET_BUS_B)
124 ahc_reset_channel(ahc, 'B', TRUE);
125
126 ahc_unlock(ahc, &s);
127 return (1);
128 }
129
130 /*
131 * Catch an interrupt from the adapter
132 */
133 void
134 ahc_platform_intr(void *arg)
135 {
136 struct ahc_softc *ahc;
137
138 ahc = (struct ahc_softc *)arg;
139 ahc_intr(ahc);
140 }
141
142 /*
143 * We have an scb which has been processed by the
144 * adaptor, now we look to see how the operation
145 * went.
146 */
147 void
148 ahc_done(struct ahc_softc *ahc, struct scb *scb)
149 {
150 struct scsipi_xfer *xs;
151 struct scsipi_periph *periph;
152 u_long s;
153
154 xs = scb->xs;
155 periph = xs->xs_periph;
156 LIST_REMOVE(scb, pending_links);
157 if ((scb->flags & SCB_UNTAGGEDQ) != 0) {
158 struct scb_tailq *untagged_q;
159 int target_offset;
160
161 target_offset = SCB_GET_TARGET_OFFSET(ahc, scb);
162 untagged_q = &ahc->untagged_queues[target_offset];
163 TAILQ_REMOVE(untagged_q, scb, links.tqe);
164 scb->flags &= ~SCB_UNTAGGEDQ;
165 ahc_run_untagged_queue(ahc, untagged_q);
166 }
167
168 callout_stop(&scb->xs->xs_callout);
169
170 if (xs->datalen) {
171 int op;
172
173 if (xs->xs_control & XS_CTL_DATA_IN)
174 op = BUS_DMASYNC_POSTREAD;
175 else
176 op = BUS_DMASYNC_POSTWRITE;
177 bus_dmamap_sync(ahc->parent_dmat, scb->dmamap, 0,
178 scb->dmamap->dm_mapsize, op);
179 bus_dmamap_unload(ahc->parent_dmat, scb->dmamap);
180 }
181
182 /*
183 * If the recovery SCB completes, we have to be
184 * out of our timeout.
185 */
186 if ((scb->flags & SCB_RECOVERY_SCB) != 0) {
187 struct scb *list_scb;
188
189 /*
190 * We were able to complete the command successfully,
191 * so reinstate the timeouts for all other pending
192 * commands.
193 */
194 LIST_FOREACH(list_scb, &ahc->pending_scbs, pending_links) {
195 if (!(list_scb->xs->xs_control & XS_CTL_POLL)) {
196 callout_reset(&list_scb->xs->xs_callout,
197 (list_scb->xs->timeout > 1000000) ?
198 (list_scb->xs->timeout / 1000) * hz :
199 (list_scb->xs->timeout * hz) / 1000,
200 ahc_timeout, list_scb);
201 }
202 }
203
204 if (ahc_get_transaction_status(scb) == CAM_BDR_SENT
205 || ahc_get_transaction_status(scb) == CAM_REQ_ABORTED)
206 ahc_set_transaction_status(scb, CAM_CMD_TIMEOUT);
207 scsipi_printaddr(xs->xs_periph);
208 printf("%s: no longer in timeout, status = %x\n",
209 ahc_name(ahc), xs->status);
210 }
211
212 /* Don't clobber any existing error state */
213 if (xs->error != XS_NOERROR) {
214 /* Don't clobber any existing error state */
215 } else if ((scb->flags & SCB_SENSE) != 0) {
216 /*
217 * We performed autosense retrieval.
218 *
219 * Zero any sense not transferred by the
220 * device. The SCSI spec mandates that any
221 * untransferred data should be assumed to be
222 * zero. Complete the 'bounce' of sense information
223 * through buffers accessible via bus-space by
224 * copying it into the clients csio.
225 */
226 memset(&xs->sense.scsi_sense, 0, sizeof(xs->sense.scsi_sense));
227 memcpy(&xs->sense.scsi_sense,
228 ahc_get_sense_buf(ahc, scb),
229 sizeof(xs->sense.scsi_sense));
230 xs->error = XS_SENSE;
231 }
232 if (scb->flags & SCB_FREEZE_QUEUE) {
233 scsipi_periph_thaw(periph, 1);
234 scb->flags &= ~SCB_FREEZE_QUEUE;
235 }
236
237 ahc_lock(ahc, &s);
238 ahc_free_scb(ahc, scb);
239 ahc_unlock(ahc, &s);
240
241 scsipi_done(xs);
242 }
243
244 static int
245 ahc_ioctl(struct scsipi_channel *channel, u_long cmd, caddr_t addr, int flag,
246 struct proc *p)
247 {
248 struct ahc_softc *ahc = (void *)channel->chan_adapter->adapt_dev;
249 int s, ret = ENOTTY;
250
251 switch (cmd) {
252 case SCBUSIORESET:
253 s = splbio();
254 ahc_reset_channel(ahc, channel->chan_channel == 1 ? 'B' : 'A',
255 TRUE);
256 splx(s);
257 ret = 0;
258 break;
259 default:
260 break;
261 }
262
263 return ret;
264 }
265
266 static void
267 ahc_action(struct scsipi_channel *chan, scsipi_adapter_req_t req, void *arg)
268 {
269 struct ahc_softc *ahc;
270 int s;
271 struct ahc_initiator_tinfo *tinfo;
272 struct ahc_tmode_tstate *tstate;
273
274 ahc = (void *)chan->chan_adapter->adapt_dev;
275
276 switch (req) {
277
278 case ADAPTER_REQ_RUN_XFER:
279 {
280 struct scsipi_xfer *xs;
281 struct scsipi_periph *periph;
282 struct scb *scb;
283 struct hardware_scb *hscb;
284 u_int target_id;
285 u_int our_id;
286 u_long ss;
287
288 xs = arg;
289 periph = xs->xs_periph;
290
291 target_id = periph->periph_target;
292 our_id = ahc->our_id;
293
294 SC_DEBUG(xs->xs_periph, SCSIPI_DB3, ("ahc_action\n"));
295
296 /*
297 * get an scb to use.
298 */
299 ahc_lock(ahc, &ss);
300 if ((scb = ahc_get_scb(ahc)) == NULL) {
301 xs->error = XS_RESOURCE_SHORTAGE;
302 ahc_unlock(ahc, &ss);
303 scsipi_done(xs);
304 return;
305 }
306 ahc_unlock(ahc, &ss);
307
308 hscb = scb->hscb;
309
310 SC_DEBUG(periph, SCSIPI_DB3, ("start scb(%p)\n", scb));
311 scb->xs = xs;
312
313 /*
314 * Put all the arguments for the xfer in the scb
315 */
316 hscb->control = 0;
317 hscb->scsiid = BUILD_SCSIID(ahc, 0, target_id, our_id);
318 hscb->lun = periph->periph_lun;
319 if (xs->xs_control & XS_CTL_RESET) {
320 hscb->cdb_len = 0;
321 scb->flags |= SCB_DEVICE_RESET;
322 hscb->control |= MK_MESSAGE;
323 ahc_execute_scb(scb, NULL, 0);
324 }
325
326 ahc_setup_data(ahc, xs, scb);
327
328 break;
329 }
330 case ADAPTER_REQ_GROW_RESOURCES:
331 printf("%s: ADAPTER_REQ_GROW_RESOURCES\n", ahc_name(ahc));
332 return;
333
334 case ADAPTER_REQ_SET_XFER_MODE:
335 {
336 struct scsipi_xfer_mode *xm = arg;
337 struct ahc_devinfo devinfo;
338 int target_id, our_id, first;
339 u_int width;
340 char channel;
341 u_int ppr_options, period, offset;
342 struct ahc_syncrate *syncrate;
343 uint16_t old_autoneg;
344
345 target_id = xm->xm_target;
346 our_id = chan->chan_id;
347 channel = (chan->chan_channel == 1) ? 'B' : 'A';
348 s = splbio();
349 tinfo = ahc_fetch_transinfo(ahc, channel, our_id, target_id,
350 &tstate);
351 ahc_compile_devinfo(&devinfo, our_id, target_id,
352 0, channel, ROLE_INITIATOR);
353
354 old_autoneg = tstate->auto_negotiate;
355
356 /*
357 * XXX since the period and offset are not provided here,
358 * fake things by forcing a renegotiation using the user
359 * settings if this is called for the first time (i.e.
360 * during probe). Also, cap various values at the user
361 * values, assuming that the user set it up that way.
362 */
363 if (ahc->inited_target[target_id] == 0) {
364 period = tinfo->user.period;
365 offset = tinfo->user.offset;
366 ppr_options = tinfo->user.ppr_options;
367 width = tinfo->user.width;
368 tstate->tagenable |=
369 (ahc->user_tagenable & devinfo.target_mask);
370 tstate->discenable |=
371 (ahc->user_discenable & devinfo.target_mask);
372 ahc->inited_target[target_id] = 1;
373 first = 1;
374 } else
375 first = 0;
376
377 if (xm->xm_mode & (PERIPH_CAP_WIDE16 | PERIPH_CAP_DT))
378 width = MSG_EXT_WDTR_BUS_16_BIT;
379 else
380 width = MSG_EXT_WDTR_BUS_8_BIT;
381
382 ahc_validate_width(ahc, NULL, &width, ROLE_UNKNOWN);
383 if (width > tinfo->user.width)
384 width = tinfo->user.width;
385 ahc_set_width(ahc, &devinfo, width, AHC_TRANS_GOAL, FALSE);
386
387 if (!(xm->xm_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_DT))) {
388 period = 0;
389 offset = 0;
390 ppr_options = 0;
391 }
392
393 if ((xm->xm_mode & PERIPH_CAP_DT) &&
394 (ppr_options & MSG_EXT_PPR_DT_REQ))
395 ppr_options |= MSG_EXT_PPR_DT_REQ;
396 else
397 ppr_options &= ~MSG_EXT_PPR_DT_REQ;
398 if ((tstate->discenable & devinfo.target_mask) == 0 ||
399 (tstate->tagenable & devinfo.target_mask) == 0)
400 ppr_options &= ~MSG_EXT_PPR_IU_REQ;
401
402 if ((xm->xm_mode & PERIPH_CAP_TQING) &&
403 (ahc->user_tagenable & devinfo.target_mask))
404 tstate->tagenable |= devinfo.target_mask;
405 else
406 tstate->tagenable &= ~devinfo.target_mask;
407
408 syncrate = ahc_find_syncrate(ahc, &period, &ppr_options,
409 AHC_SYNCRATE_MAX);
410 ahc_validate_offset(ahc, NULL, syncrate, &offset,
411 width, ROLE_UNKNOWN);
412
413 if (offset == 0) {
414 period = 0;
415 ppr_options = 0;
416 }
417
418 if (ppr_options != 0
419 && tinfo->user.transport_version >= 3) {
420 tinfo->goal.transport_version =
421 tinfo->user.transport_version;
422 tinfo->curr.transport_version =
423 tinfo->user.transport_version;
424 }
425
426 ahc_set_syncrate(ahc, &devinfo, syncrate, period, offset,
427 ppr_options, AHC_TRANS_GOAL, FALSE);
428
429 /*
430 * If this is the first request, and no negotiation is
431 * needed, just confirm the state to the scsipi layer,
432 * so that it can print a message.
433 */
434 if (old_autoneg == tstate->auto_negotiate && first) {
435 xm->xm_mode = 0;
436 xm->xm_period = tinfo->curr.period;
437 xm->xm_offset = tinfo->curr.offset;
438 if (tinfo->curr.width == MSG_EXT_WDTR_BUS_16_BIT)
439 xm->xm_mode |= PERIPH_CAP_WIDE16;
440 if (tinfo->curr.period)
441 xm->xm_mode |= PERIPH_CAP_SYNC;
442 if (tstate->tagenable & devinfo.target_mask)
443 xm->xm_mode |= PERIPH_CAP_TQING;
444 if (tinfo->curr.ppr_options & MSG_EXT_PPR_DT_REQ)
445 xm->xm_mode |= PERIPH_CAP_DT;
446 scsipi_async_event(chan, ASYNC_EVENT_XFER_MODE, xm);
447 }
448 splx(s);
449 }
450 }
451
452 return;
453 }
454
455 static void
456 ahc_execute_scb(void *arg, bus_dma_segment_t *dm_segs, int nsegments)
457 {
458 struct scb *scb;
459 struct scsipi_xfer *xs;
460 struct ahc_softc *ahc;
461 struct ahc_initiator_tinfo *tinfo;
462 struct ahc_tmode_tstate *tstate;
463
464 u_int mask;
465 long s;
466
467 scb = (struct scb *)arg;
468 xs = scb->xs;
469 xs->error = 0;
470 xs->status = 0;
471 xs->xs_status = 0;
472 ahc = (void *)xs->xs_periph->periph_channel->chan_adapter->adapt_dev;
473
474 if (nsegments != 0) {
475 struct ahc_dma_seg *sg;
476 bus_dma_segment_t *end_seg;
477 int op;
478
479 end_seg = dm_segs + nsegments;
480
481 /* Copy the segments into our SG list */
482 sg = scb->sg_list;
483 while (dm_segs < end_seg) {
484 uint32_t len;
485
486 sg->addr = ahc_htole32(dm_segs->ds_addr);
487 len = dm_segs->ds_len
488 | ((dm_segs->ds_addr >> 8) & 0x7F000000);
489 sg->len = ahc_htole32(len);
490 sg++;
491 dm_segs++;
492 }
493
494 /*
495 * Note where to find the SG entries in bus space.
496 * We also set the full residual flag which the
497 * sequencer will clear as soon as a data transfer
498 * occurs.
499 */
500 scb->hscb->sgptr = ahc_htole32(scb->sg_list_phys|SG_FULL_RESID);
501
502 if (xs->xs_control & XS_CTL_DATA_IN)
503 op = BUS_DMASYNC_PREREAD;
504 else
505 op = BUS_DMASYNC_PREWRITE;
506
507 bus_dmamap_sync(ahc->parent_dmat, scb->dmamap, 0,
508 scb->dmamap->dm_mapsize, op);
509
510 sg--;
511 sg->len |= ahc_htole32(AHC_DMA_LAST_SEG);
512
513 /* Copy the first SG into the "current" data pointer area */
514 scb->hscb->dataptr = scb->sg_list->addr;
515 scb->hscb->datacnt = scb->sg_list->len;
516 } else {
517 scb->hscb->sgptr = ahc_htole32(SG_LIST_NULL);
518 scb->hscb->dataptr = 0;
519 scb->hscb->datacnt = 0;
520 }
521
522 scb->sg_count = nsegments;
523
524 ahc_lock(ahc, &s);
525
526 /*
527 * Last time we need to check if this SCB needs to
528 * be aborted.
529 */
530 if (xs->xs_status & XS_STS_DONE) {
531 if (nsegments != 0)
532 bus_dmamap_unload(ahc->buffer_dmat, scb->dmamap);
533 ahc_free_scb(ahc, scb);
534 ahc_unlock(ahc, &s);
535 scsipi_done(xs);
536 return;
537 }
538
539 tinfo = ahc_fetch_transinfo(ahc, ahc->channel,
540 SCSIID_OUR_ID(scb->hscb->scsiid),
541 SCSIID_TARGET(ahc, scb->hscb->scsiid),
542 &tstate);
543
544 mask = SCB_GET_TARGET_MASK(ahc, scb);
545 scb->hscb->scsirate = tinfo->scsirate;
546 scb->hscb->scsioffset = tinfo->curr.offset;
547
548 if ((tstate->ultraenb & mask) != 0)
549 scb->hscb->control |= ULTRAENB;
550
551 if ((tstate->discenable & mask) != 0)
552 scb->hscb->control |= DISCENB;
553
554 if (xs->xs_tag_type)
555 scb->hscb->control |= xs->xs_tag_type;
556
557 #if 1 /* This looks like it makes sense at first, but it can loop */
558 if ((xs->xs_control & XS_CTL_DISCOVERY) && (tinfo->goal.width == 0
559 && tinfo->goal.offset == 0
560 && tinfo->goal.ppr_options == 0)) {
561 scb->flags |= SCB_NEGOTIATE;
562 scb->hscb->control |= MK_MESSAGE;
563 } else
564 #endif
565 if ((tstate->auto_negotiate & mask) != 0) {
566 scb->flags |= SCB_AUTO_NEGOTIATE;
567 scb->hscb->control |= MK_MESSAGE;
568 }
569
570 LIST_INSERT_HEAD(&ahc->pending_scbs, scb, pending_links);
571
572 if (!(xs->xs_control & XS_CTL_POLL)) {
573 callout_reset(&scb->xs->xs_callout, xs->timeout > 1000000 ?
574 (xs->timeout / 1000) * hz : (xs->timeout * hz) / 1000,
575 ahc_timeout, scb);
576 }
577
578 /*
579 * We only allow one untagged transaction
580 * per target in the initiator role unless
581 * we are storing a full busy target *lun*
582 * table in SCB space.
583 */
584 if ((scb->hscb->control & (TARGET_SCB|TAG_ENB)) == 0
585 && (ahc->flags & AHC_SCB_BTT) == 0) {
586 struct scb_tailq *untagged_q;
587 int target_offset;
588
589 target_offset = SCB_GET_TARGET_OFFSET(ahc, scb);
590 untagged_q = &(ahc->untagged_queues[target_offset]);
591 TAILQ_INSERT_TAIL(untagged_q, scb, links.tqe);
592 scb->flags |= SCB_UNTAGGEDQ;
593 if (TAILQ_FIRST(untagged_q) != scb) {
594 ahc_unlock(ahc, &s);
595 return;
596 }
597 }
598 scb->flags |= SCB_ACTIVE;
599
600 if ((scb->flags & SCB_TARGET_IMMEDIATE) != 0) {
601 /* Define a mapping from our tag to the SCB. */
602 ahc->scb_data->scbindex[scb->hscb->tag] = scb;
603 ahc_pause(ahc);
604 if ((ahc->flags & AHC_PAGESCBS) == 0)
605 ahc_outb(ahc, SCBPTR, scb->hscb->tag);
606 ahc_outb(ahc, TARG_IMMEDIATE_SCB, scb->hscb->tag);
607 ahc_unpause(ahc);
608 } else {
609 ahc_queue_scb(ahc, scb);
610 }
611
612 if (!(xs->xs_control & XS_CTL_POLL)) {
613 ahc_unlock(ahc, &s);
614 return;
615 }
616
617 /*
618 * If we can't use interrupts, poll for completion
619 */
620 SC_DEBUG(xs->xs_periph, SCSIPI_DB3, ("cmd_poll\n"));
621 do {
622 if (ahc_poll(ahc, xs->timeout)) {
623 if (!(xs->xs_control & XS_CTL_SILENT))
624 printf("cmd fail\n");
625 ahc_timeout(scb);
626 break;
627 }
628 } while (!(xs->xs_status & XS_STS_DONE));
629 ahc_unlock(ahc, &s);
630
631 return;
632 }
633
634 static int
635 ahc_poll(struct ahc_softc *ahc, int wait)
636 {
637 while (--wait) {
638 DELAY(1000);
639 if (ahc_inb(ahc, INTSTAT) & INT_PEND)
640 break;
641 }
642
643 if (wait == 0) {
644 printf("%s: board is not responding\n", ahc_name(ahc));
645 return (EIO);
646 }
647
648 ahc_intr((void *)ahc);
649 return (0);
650 }
651
652 static void
653 ahc_setup_data(struct ahc_softc *ahc, struct scsipi_xfer *xs,
654 struct scb *scb)
655 {
656 struct hardware_scb *hscb;
657
658 hscb = scb->hscb;
659 xs->resid = xs->status = 0;
660
661 hscb->cdb_len = xs->cmdlen;
662 if (hscb->cdb_len > sizeof(hscb->cdb32)) {
663 u_long s;
664
665 ahc_set_transaction_status(scb, CAM_REQ_INVALID);
666 ahc_lock(ahc, &s);
667 ahc_free_scb(ahc, scb);
668 ahc_unlock(ahc, &s);
669 scsipi_done(xs);
670 return;
671 }
672
673 if (hscb->cdb_len > 12) {
674 memcpy(hscb->cdb32, xs->cmd, hscb->cdb_len);
675 scb->flags |= SCB_CDB32_PTR;
676 } else {
677 memcpy(hscb->shared_data.cdb, xs->cmd, hscb->cdb_len);
678 }
679
680 /* Only use S/G if there is a transfer */
681 if (xs->datalen) {
682 int error;
683
684 error = bus_dmamap_load(ahc->parent_dmat,
685 scb->dmamap, xs->data,
686 xs->datalen, NULL,
687 ((xs->xs_control & XS_CTL_NOSLEEP) ?
688 BUS_DMA_NOWAIT : BUS_DMA_WAITOK) |
689 BUS_DMA_STREAMING |
690 ((xs->xs_control & XS_CTL_DATA_IN) ?
691 BUS_DMA_READ : BUS_DMA_WRITE));
692 if (error) {
693 #ifdef AHC_DEBUG
694 printf("%s: in ahc_setup_data(): bus_dmamap_load() "
695 "= %d\n",
696 ahc_name(ahc), error);
697 #endif
698 xs->error = XS_RESOURCE_SHORTAGE;
699 scsipi_done(xs);
700 return;
701 }
702 ahc_execute_scb(scb,
703 scb->dmamap->dm_segs,
704 scb->dmamap->dm_nsegs);
705 } else {
706 ahc_execute_scb(scb, NULL, 0);
707 }
708 }
709
710 static void
711 ahc_set_recoveryscb(struct ahc_softc *ahc, struct scb *scb) {
712
713 if ((scb->flags & SCB_RECOVERY_SCB) == 0) {
714 struct scb *list_scb;
715
716 scb->flags |= SCB_RECOVERY_SCB;
717
718 /*
719 * Take all queued, but not sent SCBs out of the equation.
720 * Also ensure that no new CCBs are queued to us while we
721 * try to fix this problem.
722 */
723 scsipi_channel_freeze(&ahc->sc_channel, 1);
724 if (ahc->features & AHC_TWIN)
725 scsipi_channel_freeze(&ahc->sc_channel_b, 1);
726
727 /*
728 * Go through all of our pending SCBs and remove
729 * any scheduled timeouts for them. We will reschedule
730 * them after we've successfully fixed this problem.
731 */
732 LIST_FOREACH(list_scb, &ahc->pending_scbs, pending_links) {
733 callout_stop(&list_scb->xs->xs_callout);
734 }
735 }
736 }
737
738 void
739 ahc_timeout(void *arg)
740 {
741 struct scb *scb;
742 struct ahc_softc *ahc;
743 long s;
744 int found;
745 u_int last_phase;
746 int target;
747 int lun;
748 int i;
749 char channel;
750
751 scb = (struct scb *)arg;
752 ahc = (struct ahc_softc *)scb->ahc_softc;
753
754 ahc_lock(ahc, &s);
755
756 ahc_pause_and_flushwork(ahc);
757
758 if ((scb->flags & SCB_ACTIVE) == 0) {
759 /* Previous timeout took care of me already */
760 printf("%s: Timedout SCB already complete. "
761 "Interrupts may not be functioning.\n", ahc_name(ahc));
762 ahc_unpause(ahc);
763 ahc_unlock(ahc, &s);
764 return;
765 }
766
767 target = SCB_GET_TARGET(ahc, scb);
768 channel = SCB_GET_CHANNEL(ahc, scb);
769 lun = SCB_GET_LUN(scb);
770
771 ahc_print_path(ahc, scb);
772 printf("SCB 0x%x - timed out\n", scb->hscb->tag);
773 ahc_dump_card_state(ahc);
774 last_phase = ahc_inb(ahc, LASTPHASE);
775 if (scb->sg_count > 0) {
776 for (i = 0; i < scb->sg_count; i++) {
777 printf("sg[%d] - Addr 0x%x : Length %d\n",
778 i,
779 scb->sg_list[i].addr,
780 scb->sg_list[i].len & AHC_SG_LEN_MASK);
781 }
782 }
783 if (scb->flags & (SCB_DEVICE_RESET|SCB_ABORT)) {
784 /*
785 * Been down this road before.
786 * Do a full bus reset.
787 */
788 bus_reset:
789 ahc_set_transaction_status(scb, CAM_CMD_TIMEOUT);
790 found = ahc_reset_channel(ahc, channel, /*Initiate Reset*/TRUE);
791 printf("%s: Issued Channel %c Bus Reset. "
792 "%d SCBs aborted\n", ahc_name(ahc), channel, found);
793 } else {
794 /*
795 * If we are a target, transition to bus free and report
796 * the timeout.
797 *
798 * The target/initiator that is holding up the bus may not
799 * be the same as the one that triggered this timeout
800 * (different commands have different timeout lengths).
801 * If the bus is idle and we are acting as the initiator
802 * for this request, queue a BDR message to the timed out
803 * target. Otherwise, if the timed out transaction is
804 * active:
805 * Initiator transaction:
806 * Stuff the message buffer with a BDR message and assert
807 * ATN in the hopes that the target will let go of the bus
808 * and go to the mesgout phase. If this fails, we'll
809 * get another timeout 2 seconds later which will attempt
810 * a bus reset.
811 *
812 * Target transaction:
813 * Transition to BUS FREE and report the error.
814 * It's good to be the target!
815 */
816 u_int active_scb_index;
817 u_int saved_scbptr;
818
819 saved_scbptr = ahc_inb(ahc, SCBPTR);
820 active_scb_index = ahc_inb(ahc, SCB_TAG);
821
822 if ((ahc_inb(ahc, SEQ_FLAGS) & NOT_IDENTIFIED) == 0
823 && (active_scb_index < ahc->scb_data->numscbs)) {
824 struct scb *active_scb;
825
826 /*
827 * If the active SCB is not us, assume that
828 * the active SCB has a longer timeout than
829 * the timedout SCB, and wait for the active
830 * SCB to timeout.
831 */
832 active_scb = ahc_lookup_scb(ahc, active_scb_index);
833 if (active_scb != scb) {
834 uint64_t newtimeout;
835
836 ahc_print_path(ahc, scb);
837 printf("Other SCB Timeout%s",
838 (scb->flags & SCB_OTHERTCL_TIMEOUT) != 0
839 ? " again\n" : "\n");
840 scb->flags |= SCB_OTHERTCL_TIMEOUT;
841 newtimeout = MAX(active_scb->xs->timeout,
842 scb->xs->timeout);
843 callout_reset(&scb->xs->xs_callout,
844 newtimeout > 1000000 ?
845 (newtimeout / 1000) * hz :
846 (newtimeout * hz) / 1000,
847 ahc_timeout, scb);
848 ahc_unpause(ahc);
849 ahc_unlock(ahc, &s);
850 return;
851 }
852
853 /* It's us */
854 if ((scb->flags & SCB_TARGET_SCB) != 0) {
855
856 /*
857 * Send back any queued up transactions
858 * and properly record the error condition.
859 */
860 ahc_abort_scbs(ahc, SCB_GET_TARGET(ahc, scb),
861 SCB_GET_CHANNEL(ahc, scb),
862 SCB_GET_LUN(scb),
863 scb->hscb->tag,
864 ROLE_TARGET,
865 CAM_CMD_TIMEOUT);
866
867 /* Will clear us from the bus */
868 ahc_restart(ahc);
869 ahc_unlock(ahc, &s);
870 return;
871 }
872
873 ahc_set_recoveryscb(ahc, active_scb);
874 ahc_outb(ahc, MSG_OUT, HOST_MSG);
875 ahc_outb(ahc, SCSISIGO, last_phase|ATNO);
876 ahc_print_path(ahc, active_scb);
877 printf("BDR message in message buffer\n");
878 active_scb->flags |= SCB_DEVICE_RESET;
879 callout_reset(&active_scb->xs->xs_callout,
880 2 * hz, ahc_timeout, active_scb);
881 ahc_unpause(ahc);
882 } else {
883 int disconnected;
884
885 /* XXX Shouldn't panic. Just punt instead? */
886 if ((scb->flags & SCB_TARGET_SCB) != 0)
887 panic("Timed-out target SCB but bus idle");
888
889 if (last_phase != P_BUSFREE
890 && (ahc_inb(ahc, SSTAT0) & TARGET) != 0) {
891 /* XXX What happened to the SCB? */
892 /* Hung target selection. Goto busfree */
893 printf("%s: Hung target selection\n",
894 ahc_name(ahc));
895 ahc_restart(ahc);
896 ahc_unlock(ahc, &s);
897 return;
898 }
899
900 if (ahc_search_qinfifo(ahc, target, channel, lun,
901 scb->hscb->tag, ROLE_INITIATOR,
902 /*status*/0, SEARCH_COUNT) > 0) {
903 disconnected = FALSE;
904 } else {
905 disconnected = TRUE;
906 }
907
908 if (disconnected) {
909
910 ahc_set_recoveryscb(ahc, scb);
911 /*
912 * Actually re-queue this SCB in an attempt
913 * to select the device before it reconnects.
914 * In either case (selection or reselection),
915 * we will now issue a target reset to the
916 * timed-out device.
917 *
918 * Set the MK_MESSAGE control bit indicating
919 * that we desire to send a message. We
920 * also set the disconnected flag since
921 * in the paging case there is no guarantee
922 * that our SCB control byte matches the
923 * version on the card. We don't want the
924 * sequencer to abort the command thinking
925 * an unsolicited reselection occurred.
926 */
927 scb->hscb->control |= MK_MESSAGE|DISCONNECTED;
928 scb->flags |= SCB_DEVICE_RESET;
929
930 /*
931 * Remove any cached copy of this SCB in the
932 * disconnected list in preparation for the
933 * queuing of our abort SCB. We use the
934 * same element in the SCB, SCB_NEXT, for
935 * both the qinfifo and the disconnected list.
936 */
937 ahc_search_disc_list(ahc, target, channel,
938 lun, scb->hscb->tag,
939 /*stop_on_first*/TRUE,
940 /*remove*/TRUE,
941 /*save_state*/FALSE);
942
943 /*
944 * In the non-paging case, the sequencer will
945 * never re-reference the in-core SCB.
946 * To make sure we are notified during
947 * reslection, set the MK_MESSAGE flag in
948 * the card's copy of the SCB.
949 */
950 if ((ahc->flags & AHC_PAGESCBS) == 0) {
951 ahc_outb(ahc, SCBPTR, scb->hscb->tag);
952 ahc_outb(ahc, SCB_CONTROL,
953 ahc_inb(ahc, SCB_CONTROL)
954 | MK_MESSAGE);
955 }
956
957 /*
958 * Clear out any entries in the QINFIFO first
959 * so we are the next SCB for this target
960 * to run.
961 */
962 ahc_search_qinfifo(ahc,
963 SCB_GET_TARGET(ahc, scb),
964 channel, SCB_GET_LUN(scb),
965 SCB_LIST_NULL,
966 ROLE_INITIATOR,
967 CAM_REQUEUE_REQ,
968 SEARCH_COMPLETE);
969 ahc_print_path(ahc, scb);
970 printf("Queuing a BDR SCB\n");
971 ahc_qinfifo_requeue_tail(ahc, scb);
972 ahc_outb(ahc, SCBPTR, saved_scbptr);
973 callout_reset(&scb->xs->xs_callout, 2 * hz,
974 ahc_timeout, scb);
975 ahc_unpause(ahc);
976 } else {
977 /* Go "immediatly" to the bus reset */
978 /* This shouldn't happen */
979 ahc_set_recoveryscb(ahc, scb);
980 ahc_print_path(ahc, scb);
981 printf("SCB %d: Immediate reset. "
982 "Flags = 0x%x\n", scb->hscb->tag,
983 scb->flags);
984 goto bus_reset;
985 }
986 }
987 }
988 ahc_unlock(ahc, &s);
989 }
990
991 void
992 ahc_platform_set_tags(struct ahc_softc *ahc,
993 struct ahc_devinfo *devinfo, int enable)
994 {
995 struct ahc_tmode_tstate *tstate;
996
997 ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid,
998 devinfo->target, &tstate);
999
1000 if (enable)
1001 tstate->tagenable |= devinfo->target_mask;
1002 else
1003 tstate->tagenable &= ~devinfo->target_mask;
1004 }
1005
1006 int
1007 ahc_platform_alloc(struct ahc_softc *ahc, void *platform_arg)
1008 {
1009 if (sizeof(struct ahc_platform_data) == 0)
1010 return 0;
1011 ahc->platform_data = malloc(sizeof(struct ahc_platform_data), M_DEVBUF,
1012 M_NOWAIT);
1013 if (ahc->platform_data == NULL)
1014 return (ENOMEM);
1015 return (0);
1016 }
1017
1018 void
1019 ahc_platform_free(struct ahc_softc *ahc)
1020 {
1021 if (sizeof(struct ahc_platform_data) == 0)
1022 return;
1023 free(ahc->platform_data, M_DEVBUF);
1024 }
1025
1026 int
1027 ahc_softc_comp(struct ahc_softc *lahc, struct ahc_softc *rahc)
1028 {
1029 return (0);
1030 }
1031
1032 int
1033 ahc_detach(struct device *self, int flags)
1034 {
1035 int rv = 0;
1036
1037 struct ahc_softc *ahc = (struct ahc_softc*)self;
1038
1039 ahc_intr_enable(ahc, FALSE);
1040 if (ahc->sc_child != NULL)
1041 rv = config_detach(ahc->sc_child, flags);
1042 if (rv == 0 && ahc->sc_child_b != NULL)
1043 rv = config_detach(ahc->sc_child_b, flags);
1044
1045 shutdownhook_disestablish(ahc->shutdown_hook);
1046
1047 ahc_free(ahc);
1048
1049 return (rv);
1050 }
1051
1052
1053 void
1054 ahc_send_async(struct ahc_softc *ahc, char channel, u_int target, u_int lun,
1055 ac_code code, void *opt_arg)
1056 {
1057 struct ahc_tmode_tstate *tstate;
1058 struct ahc_initiator_tinfo *tinfo;
1059 struct ahc_devinfo devinfo;
1060 struct scsipi_channel *chan;
1061 struct scsipi_xfer_mode xm;
1062
1063 chan = channel == 'B' ? &ahc->sc_channel_b : &ahc->sc_channel;
1064 switch (code) {
1065 case AC_TRANSFER_NEG:
1066 tinfo = ahc_fetch_transinfo(ahc, channel, ahc->our_id, target,
1067 &tstate);
1068 ahc_compile_devinfo(&devinfo, ahc->our_id, target, lun,
1069 channel, ROLE_UNKNOWN);
1070 /*
1071 * Don't bother if negotiating. XXX?
1072 */
1073 if (tinfo->curr.period != tinfo->goal.period
1074 || tinfo->curr.width != tinfo->goal.width
1075 || tinfo->curr.offset != tinfo->goal.offset
1076 || tinfo->curr.ppr_options != tinfo->goal.ppr_options)
1077 break;
1078 xm.xm_target = target;
1079 xm.xm_mode = 0;
1080 xm.xm_period = tinfo->curr.period;
1081 xm.xm_offset = tinfo->curr.offset;
1082 if (tinfo->curr.width == MSG_EXT_WDTR_BUS_16_BIT)
1083 xm.xm_mode |= PERIPH_CAP_WIDE16;
1084 if (tinfo->curr.period)
1085 xm.xm_mode |= PERIPH_CAP_SYNC;
1086 if (tstate->tagenable & devinfo.target_mask)
1087 xm.xm_mode |= PERIPH_CAP_TQING;
1088 if (tinfo->curr.ppr_options & MSG_EXT_PPR_DT_REQ)
1089 xm.xm_mode |= PERIPH_CAP_DT;
1090 scsipi_async_event(chan, ASYNC_EVENT_XFER_MODE, &xm);
1091 break;
1092 case AC_BUS_RESET:
1093 scsipi_async_event(chan, ASYNC_EVENT_RESET, NULL);
1094 case AC_SENT_BDR:
1095 default:
1096 break;
1097 }
1098 }
1099