aic7xxx_osm.c revision 1.34 1 /* $NetBSD: aic7xxx_osm.c,v 1.34 2009/09/12 19:16:35 tsutsui Exp $ */
2
3 /*
4 * Bus independent FreeBSD shim for the aic7xxx based adaptec SCSI controllers
5 *
6 * Copyright (c) 1994-2001 Justin T. Gibbs.
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions, and the following disclaimer,
14 * without modification.
15 * 2. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * Alternatively, this software may be distributed under the terms of the
19 * GNU Public License ("GPL").
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
25 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * //depot/aic7xxx/freebsd/dev/aic7xxx/aic7xxx_osm.c#12 $
34 *
35 * $FreeBSD: src/sys/dev/aic7xxx/aic7xxx_osm.c,v 1.31 2002/11/30 19:08:58 scottl Exp $
36 */
37 /*
38 * Ported from FreeBSD by Pascal Renauld, Network Storage Solutions, Inc. - April 2003
39 */
40
41 #include <sys/cdefs.h>
42 __KERNEL_RCSID(0, "$NetBSD: aic7xxx_osm.c,v 1.34 2009/09/12 19:16:35 tsutsui Exp $");
43
44 #include <dev/ic/aic7xxx_osm.h>
45 #include <dev/ic/aic7xxx_inline.h>
46
47 #ifndef AHC_TMODE_ENABLE
48 #define AHC_TMODE_ENABLE 0
49 #endif
50
51
52 static void ahc_action(struct scsipi_channel *chan, scsipi_adapter_req_t req, void *arg);
53 static void ahc_execute_scb(void *arg, bus_dma_segment_t *dm_segs, int nsegments);
54 static int ahc_poll(struct ahc_softc *ahc, int wait);
55 static void ahc_setup_data(struct ahc_softc *ahc,
56 struct scsipi_xfer *xs, struct scb *scb);
57 static void ahc_set_recoveryscb(struct ahc_softc *ahc, struct scb *scb);
58 static int ahc_ioctl(struct scsipi_channel *channel, u_long cmd,
59 void *addr, int flag, struct proc *p);
60
61 static bool ahc_pmf_suspend(device_t PMF_FN_PROTO);
62 static bool ahc_pmf_resume(device_t PMF_FN_PROTO);
63 static bool ahc_pmf_shutdown(device_t, int);
64
65
66 /*
67 * Attach all the sub-devices we can find
68 */
69 int
70 ahc_attach(struct ahc_softc *ahc)
71 {
72 u_long s;
73 int i;
74 char ahc_info[256];
75
76 LIST_INIT(&ahc->pending_scbs);
77 for (i = 0; i < AHC_NUM_TARGETS; i++)
78 TAILQ_INIT(&ahc->untagged_queues[i]);
79
80 ahc_lock(ahc, &s);
81
82 ahc->sc_adapter.adapt_dev = ahc->sc_dev;
83 ahc->sc_adapter.adapt_nchannels = (ahc->features & AHC_TWIN) ? 2 : 1;
84
85 ahc->sc_adapter.adapt_openings = ahc->scb_data->numscbs - 1;
86 ahc->sc_adapter.adapt_max_periph = 16;
87
88 ahc->sc_adapter.adapt_ioctl = ahc_ioctl;
89 ahc->sc_adapter.adapt_minphys = ahc_minphys;
90 ahc->sc_adapter.adapt_request = ahc_action;
91
92 ahc->sc_channel.chan_adapter = &ahc->sc_adapter;
93 ahc->sc_channel.chan_bustype = &scsi_bustype;
94 ahc->sc_channel.chan_channel = 0;
95 ahc->sc_channel.chan_ntargets = (ahc->features & AHC_WIDE) ? 16 : 8;
96 ahc->sc_channel.chan_nluns = 8 /*AHC_NUM_LUNS*/;
97 ahc->sc_channel.chan_id = ahc->our_id;
98 ahc->sc_channel.chan_flags |= SCSIPI_CHAN_CANGROW;
99
100 if (ahc->features & AHC_TWIN) {
101 ahc->sc_channel_b = ahc->sc_channel;
102 ahc->sc_channel_b.chan_id = ahc->our_id_b;
103 ahc->sc_channel_b.chan_channel = 1;
104 }
105
106 ahc_controller_info(ahc, ahc_info, sizeof(ahc_info));
107 printf("%s: %s\n", device_xname(ahc->sc_dev), ahc_info);
108
109 if ((ahc->flags & AHC_PRIMARY_CHANNEL) == 0) {
110 ahc->sc_child = config_found(ahc->sc_dev,
111 &ahc->sc_channel, scsiprint);
112 if (ahc->features & AHC_TWIN)
113 ahc->sc_child_b = config_found(ahc->sc_dev,
114 &ahc->sc_channel_b, scsiprint);
115 } else {
116 if (ahc->features & AHC_TWIN)
117 ahc->sc_child = config_found(ahc->sc_dev,
118 &ahc->sc_channel_b, scsiprint);
119 ahc->sc_child_b = config_found(ahc->sc_dev,
120 &ahc->sc_channel, scsiprint);
121 }
122
123 ahc_intr_enable(ahc, TRUE);
124
125 if (ahc->flags & AHC_RESET_BUS_A)
126 ahc_reset_channel(ahc, 'A', TRUE);
127 if ((ahc->features & AHC_TWIN) && ahc->flags & AHC_RESET_BUS_B)
128 ahc_reset_channel(ahc, 'B', TRUE);
129
130 if (!pmf_device_register1(ahc->sc_dev,
131 ahc_pmf_suspend, ahc_pmf_resume, ahc_pmf_shutdown))
132 aprint_error_dev(ahc->sc_dev,
133 "couldn't establish power handler\n");
134
135 ahc_unlock(ahc, &s);
136 return (1);
137 }
138
139 /*
140 * XXX we should call the real suspend and resume functions here
141 * but pmf(9) stuff on cardbus backend is untested yet
142 */
143
144 static bool
145 ahc_pmf_suspend(device_t dev PMF_FN_ARGS)
146 {
147 struct ahc_softc *sc = device_private(dev);
148 #if 0
149 return (ahc_suspend(sc) == 0);
150 #else
151 ahc_shutdown(sc);
152 return true;
153 #endif
154 }
155
156 static bool
157 ahc_pmf_resume(device_t dev PMF_FN_ARGS)
158 {
159 #if 0
160 struct ahc_softc *sc = device_private(dev);
161
162 return (ahc_resume(sc) == 0);
163 #else
164 return true;
165 #endif
166 }
167
168 static bool
169 ahc_pmf_shutdown(device_t dev, int howto)
170 {
171 struct ahc_softc *sc = device_private(dev);
172
173 /* Disable all interrupt sources by resetting the controller */
174 ahc_shutdown(sc);
175
176 return true;
177 }
178
179 /*
180 * Catch an interrupt from the adapter
181 */
182 void
183 ahc_platform_intr(void *arg)
184 {
185 struct ahc_softc *ahc;
186
187 ahc = (struct ahc_softc *)arg;
188 ahc_intr(ahc);
189 }
190
191 /*
192 * We have an scb which has been processed by the
193 * adaptor, now we look to see how the operation
194 * went.
195 */
196 void
197 ahc_done(struct ahc_softc *ahc, struct scb *scb)
198 {
199 struct scsipi_xfer *xs;
200 struct scsipi_periph *periph;
201 u_long s;
202
203 xs = scb->xs;
204 periph = xs->xs_periph;
205 LIST_REMOVE(scb, pending_links);
206 if ((scb->flags & SCB_UNTAGGEDQ) != 0) {
207 struct scb_tailq *untagged_q;
208 int target_offset;
209
210 target_offset = SCB_GET_TARGET_OFFSET(ahc, scb);
211 untagged_q = &ahc->untagged_queues[target_offset];
212 TAILQ_REMOVE(untagged_q, scb, links.tqe);
213 scb->flags &= ~SCB_UNTAGGEDQ;
214 ahc_run_untagged_queue(ahc, untagged_q);
215 }
216
217 callout_stop(&scb->xs->xs_callout);
218
219 if (xs->datalen) {
220 int op;
221
222 if (xs->xs_control & XS_CTL_DATA_IN)
223 op = BUS_DMASYNC_POSTREAD;
224 else
225 op = BUS_DMASYNC_POSTWRITE;
226 bus_dmamap_sync(ahc->parent_dmat, scb->dmamap, 0,
227 scb->dmamap->dm_mapsize, op);
228 bus_dmamap_unload(ahc->parent_dmat, scb->dmamap);
229 }
230
231 /*
232 * If the recovery SCB completes, we have to be
233 * out of our timeout.
234 */
235 if ((scb->flags & SCB_RECOVERY_SCB) != 0) {
236 struct scb *list_scb;
237
238 /*
239 * We were able to complete the command successfully,
240 * so reinstate the timeouts for all other pending
241 * commands.
242 */
243 LIST_FOREACH(list_scb, &ahc->pending_scbs, pending_links) {
244 if (!(list_scb->xs->xs_control & XS_CTL_POLL)) {
245 callout_reset(&list_scb->xs->xs_callout,
246 (list_scb->xs->timeout > 1000000) ?
247 (list_scb->xs->timeout / 1000) * hz :
248 (list_scb->xs->timeout * hz) / 1000,
249 ahc_timeout, list_scb);
250 }
251 }
252
253 if (ahc_get_transaction_status(scb) == CAM_BDR_SENT
254 || ahc_get_transaction_status(scb) == CAM_REQ_ABORTED)
255 ahc_set_transaction_status(scb, CAM_CMD_TIMEOUT);
256 scsipi_printaddr(xs->xs_periph);
257 printf("%s: no longer in timeout, status = %x\n",
258 ahc_name(ahc), xs->status);
259 }
260
261 /* Don't clobber any existing error state */
262 if (xs->error != XS_NOERROR) {
263 /* Don't clobber any existing error state */
264 } else if ((scb->flags & SCB_SENSE) != 0) {
265 /*
266 * We performed autosense retrieval.
267 *
268 * Zero any sense not transferred by the
269 * device. The SCSI spec mandates that any
270 * untransferred data should be assumed to be
271 * zero. Complete the 'bounce' of sense information
272 * through buffers accessible via bus-space by
273 * copying it into the clients csio.
274 */
275 memset(&xs->sense.scsi_sense, 0, sizeof(xs->sense.scsi_sense));
276 memcpy(&xs->sense.scsi_sense,
277 ahc_get_sense_buf(ahc, scb),
278 sizeof(xs->sense.scsi_sense));
279 xs->error = XS_SENSE;
280 }
281 if (scb->flags & SCB_FREEZE_QUEUE) {
282 scsipi_periph_thaw(periph, 1);
283 scb->flags &= ~SCB_FREEZE_QUEUE;
284 }
285
286 ahc_lock(ahc, &s);
287 ahc_free_scb(ahc, scb);
288 ahc_unlock(ahc, &s);
289
290 scsipi_done(xs);
291 }
292
293 static int
294 ahc_ioctl(struct scsipi_channel *channel, u_long cmd, void *addr,
295 int flag, struct proc *p)
296 {
297 struct ahc_softc *ahc = device_private(channel->chan_adapter->adapt_dev);
298 int s, ret = ENOTTY;
299
300 switch (cmd) {
301 case SCBUSIORESET:
302 s = splbio();
303 ahc_reset_channel(ahc, channel->chan_channel == 1 ? 'B' : 'A',
304 TRUE);
305 splx(s);
306 ret = 0;
307 break;
308 default:
309 break;
310 }
311
312 return ret;
313 }
314
315 static void
316 ahc_action(struct scsipi_channel *chan, scsipi_adapter_req_t req, void *arg)
317 {
318 struct ahc_softc *ahc;
319 int s;
320 struct ahc_initiator_tinfo *tinfo;
321 struct ahc_tmode_tstate *tstate;
322
323 ahc = device_private(chan->chan_adapter->adapt_dev);
324
325 switch (req) {
326
327 case ADAPTER_REQ_RUN_XFER:
328 {
329 struct scsipi_xfer *xs;
330 struct scsipi_periph *periph;
331 struct scb *scb;
332 struct hardware_scb *hscb;
333 u_int target_id;
334 u_int our_id;
335 u_long ss;
336
337 xs = arg;
338 periph = xs->xs_periph;
339
340 target_id = periph->periph_target;
341 our_id = ahc->our_id;
342
343 SC_DEBUG(xs->xs_periph, SCSIPI_DB3, ("ahc_action\n"));
344
345 /*
346 * get an scb to use.
347 */
348 ahc_lock(ahc, &ss);
349 if ((scb = ahc_get_scb(ahc)) == NULL) {
350 xs->error = XS_RESOURCE_SHORTAGE;
351 ahc_unlock(ahc, &ss);
352 scsipi_done(xs);
353 return;
354 }
355 ahc_unlock(ahc, &ss);
356
357 hscb = scb->hscb;
358
359 SC_DEBUG(periph, SCSIPI_DB3, ("start scb(%p)\n", scb));
360 scb->xs = xs;
361
362 /*
363 * Put all the arguments for the xfer in the scb
364 */
365 hscb->control = 0;
366 hscb->scsiid = BUILD_SCSIID(ahc, 0, target_id, our_id);
367 hscb->lun = periph->periph_lun;
368 if (xs->xs_control & XS_CTL_RESET) {
369 hscb->cdb_len = 0;
370 scb->flags |= SCB_DEVICE_RESET;
371 hscb->control |= MK_MESSAGE;
372 ahc_execute_scb(scb, NULL, 0);
373 }
374
375 ahc_setup_data(ahc, xs, scb);
376
377 break;
378 }
379 case ADAPTER_REQ_GROW_RESOURCES:
380 #ifdef AHC_DEBUG
381 printf("%s: ADAPTER_REQ_GROW_RESOURCES\n", ahc_name(ahc));
382 #endif
383 chan->chan_adapter->adapt_openings += ahc_alloc_scbs(ahc);
384 if (ahc->scb_data->numscbs >= AHC_SCB_MAX_ALLOC)
385 chan->chan_flags &= ~SCSIPI_CHAN_CANGROW;
386 return;
387
388 case ADAPTER_REQ_SET_XFER_MODE:
389 {
390 struct scsipi_xfer_mode *xm = arg;
391 struct ahc_devinfo devinfo;
392 int target_id, our_id, first;
393 u_int width;
394 char channel;
395 u_int ppr_options = 0, period, offset;
396 struct ahc_syncrate *syncrate;
397 uint16_t old_autoneg;
398
399 target_id = xm->xm_target;
400 our_id = chan->chan_id;
401 channel = (chan->chan_channel == 1) ? 'B' : 'A';
402 s = splbio();
403 tinfo = ahc_fetch_transinfo(ahc, channel, our_id, target_id,
404 &tstate);
405 ahc_compile_devinfo(&devinfo, our_id, target_id,
406 0, channel, ROLE_INITIATOR);
407
408 old_autoneg = tstate->auto_negotiate;
409
410 /*
411 * XXX since the period and offset are not provided here,
412 * fake things by forcing a renegotiation using the user
413 * settings if this is called for the first time (i.e.
414 * during probe). Also, cap various values at the user
415 * values, assuming that the user set it up that way.
416 */
417 if (ahc->inited_target[target_id] == 0) {
418 period = tinfo->user.period;
419 offset = tinfo->user.offset;
420 ppr_options = tinfo->user.ppr_options;
421 width = tinfo->user.width;
422 tstate->tagenable |=
423 (ahc->user_tagenable & devinfo.target_mask);
424 tstate->discenable |=
425 (ahc->user_discenable & devinfo.target_mask);
426 ahc->inited_target[target_id] = 1;
427 first = 1;
428 } else
429 first = 0;
430
431 if (xm->xm_mode & (PERIPH_CAP_WIDE16 | PERIPH_CAP_DT))
432 width = MSG_EXT_WDTR_BUS_16_BIT;
433 else
434 width = MSG_EXT_WDTR_BUS_8_BIT;
435
436 ahc_validate_width(ahc, NULL, &width, ROLE_UNKNOWN);
437 if (width > tinfo->user.width)
438 width = tinfo->user.width;
439 ahc_set_width(ahc, &devinfo, width, AHC_TRANS_GOAL, FALSE);
440
441 if (!(xm->xm_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_DT))) {
442 period = 0;
443 offset = 0;
444 ppr_options = 0;
445 }
446
447 if ((xm->xm_mode & PERIPH_CAP_DT) &&
448 (ppr_options & MSG_EXT_PPR_DT_REQ))
449 ppr_options |= MSG_EXT_PPR_DT_REQ;
450 else
451 ppr_options &= ~MSG_EXT_PPR_DT_REQ;
452 if ((tstate->discenable & devinfo.target_mask) == 0 ||
453 (tstate->tagenable & devinfo.target_mask) == 0)
454 ppr_options &= ~MSG_EXT_PPR_IU_REQ;
455
456 if ((xm->xm_mode & PERIPH_CAP_TQING) &&
457 (ahc->user_tagenable & devinfo.target_mask))
458 tstate->tagenable |= devinfo.target_mask;
459 else
460 tstate->tagenable &= ~devinfo.target_mask;
461
462 syncrate = ahc_find_syncrate(ahc, &period, &ppr_options,
463 AHC_SYNCRATE_MAX);
464 ahc_validate_offset(ahc, NULL, syncrate, &offset,
465 width, ROLE_UNKNOWN);
466
467 if (offset == 0) {
468 period = 0;
469 ppr_options = 0;
470 }
471
472 if (ppr_options != 0
473 && tinfo->user.transport_version >= 3) {
474 tinfo->goal.transport_version =
475 tinfo->user.transport_version;
476 tinfo->curr.transport_version =
477 tinfo->user.transport_version;
478 }
479
480 ahc_set_syncrate(ahc, &devinfo, syncrate, period, offset,
481 ppr_options, AHC_TRANS_GOAL, FALSE);
482
483 /*
484 * If this is the first request, and no negotiation is
485 * needed, just confirm the state to the scsipi layer,
486 * so that it can print a message.
487 */
488 if (old_autoneg == tstate->auto_negotiate && first) {
489 xm->xm_mode = 0;
490 xm->xm_period = tinfo->curr.period;
491 xm->xm_offset = tinfo->curr.offset;
492 if (tinfo->curr.width == MSG_EXT_WDTR_BUS_16_BIT)
493 xm->xm_mode |= PERIPH_CAP_WIDE16;
494 if (tinfo->curr.period)
495 xm->xm_mode |= PERIPH_CAP_SYNC;
496 if (tstate->tagenable & devinfo.target_mask)
497 xm->xm_mode |= PERIPH_CAP_TQING;
498 if (tinfo->curr.ppr_options & MSG_EXT_PPR_DT_REQ)
499 xm->xm_mode |= PERIPH_CAP_DT;
500 scsipi_async_event(chan, ASYNC_EVENT_XFER_MODE, xm);
501 }
502 splx(s);
503 }
504 }
505
506 return;
507 }
508
509 static void
510 ahc_execute_scb(void *arg, bus_dma_segment_t *dm_segs, int nsegments)
511 {
512 struct scb *scb;
513 struct scsipi_xfer *xs;
514 struct ahc_softc *ahc;
515 struct ahc_initiator_tinfo *tinfo;
516 struct ahc_tmode_tstate *tstate;
517
518 u_int mask;
519 u_long s;
520
521 scb = (struct scb *)arg;
522 xs = scb->xs;
523 xs->error = 0;
524 xs->status = 0;
525 xs->xs_status = 0;
526 ahc = device_private(xs->xs_periph->periph_channel->chan_adapter->adapt_dev);
527
528 if (nsegments != 0) {
529 struct ahc_dma_seg *sg;
530 bus_dma_segment_t *end_seg;
531 int op;
532
533 end_seg = dm_segs + nsegments;
534
535 /* Copy the segments into our SG list */
536 sg = scb->sg_list;
537 while (dm_segs < end_seg) {
538 uint32_t len;
539
540 sg->addr = ahc_htole32(dm_segs->ds_addr);
541 len = dm_segs->ds_len
542 | ((dm_segs->ds_addr >> 8) & AHC_SG_HIGH_ADDR_MASK);
543 sg->len = ahc_htole32(len);
544 sg++;
545 dm_segs++;
546 }
547
548 /*
549 * Note where to find the SG entries in bus space.
550 * We also set the full residual flag which the
551 * sequencer will clear as soon as a data transfer
552 * occurs.
553 */
554 scb->hscb->sgptr = ahc_htole32(scb->sg_list_phys|SG_FULL_RESID);
555
556 if (xs->xs_control & XS_CTL_DATA_IN)
557 op = BUS_DMASYNC_PREREAD;
558 else
559 op = BUS_DMASYNC_PREWRITE;
560
561 bus_dmamap_sync(ahc->parent_dmat, scb->dmamap, 0,
562 scb->dmamap->dm_mapsize, op);
563
564 sg--;
565 sg->len |= ahc_htole32(AHC_DMA_LAST_SEG);
566
567 /* Copy the first SG into the "current" data pointer area */
568 scb->hscb->dataptr = scb->sg_list->addr;
569 scb->hscb->datacnt = scb->sg_list->len;
570 } else {
571 scb->hscb->sgptr = ahc_htole32(SG_LIST_NULL);
572 scb->hscb->dataptr = 0;
573 scb->hscb->datacnt = 0;
574 }
575
576 scb->sg_count = nsegments;
577
578 ahc_lock(ahc, &s);
579
580 /*
581 * Last time we need to check if this SCB needs to
582 * be aborted.
583 */
584 if (xs->xs_status & XS_STS_DONE) {
585 if (nsegments != 0)
586 bus_dmamap_unload(ahc->buffer_dmat, scb->dmamap);
587 ahc_free_scb(ahc, scb);
588 ahc_unlock(ahc, &s);
589 scsipi_done(xs);
590 return;
591 }
592
593 tinfo = ahc_fetch_transinfo(ahc, ahc->channel,
594 SCSIID_OUR_ID(scb->hscb->scsiid),
595 SCSIID_TARGET(ahc, scb->hscb->scsiid),
596 &tstate);
597
598 mask = SCB_GET_TARGET_MASK(ahc, scb);
599 scb->hscb->scsirate = tinfo->scsirate;
600 scb->hscb->scsioffset = tinfo->curr.offset;
601
602 if ((tstate->ultraenb & mask) != 0)
603 scb->hscb->control |= ULTRAENB;
604
605 if ((tstate->discenable & mask) != 0)
606 scb->hscb->control |= DISCENB;
607
608 if (xs->xs_tag_type)
609 scb->hscb->control |= xs->xs_tag_type;
610
611 #if 1 /* This looks like it makes sense at first, but it can loop */
612 if ((xs->xs_control & XS_CTL_DISCOVERY) && (tinfo->goal.width == 0
613 && tinfo->goal.offset == 0
614 && tinfo->goal.ppr_options == 0)) {
615 scb->flags |= SCB_NEGOTIATE;
616 scb->hscb->control |= MK_MESSAGE;
617 } else
618 #endif
619 if ((tstate->auto_negotiate & mask) != 0) {
620 scb->flags |= SCB_AUTO_NEGOTIATE;
621 scb->hscb->control |= MK_MESSAGE;
622 }
623
624 LIST_INSERT_HEAD(&ahc->pending_scbs, scb, pending_links);
625
626 if (!(xs->xs_control & XS_CTL_POLL)) {
627 callout_reset(&scb->xs->xs_callout, xs->timeout > 1000000 ?
628 (xs->timeout / 1000) * hz : (xs->timeout * hz) / 1000,
629 ahc_timeout, scb);
630 }
631
632 /*
633 * We only allow one untagged transaction
634 * per target in the initiator role unless
635 * we are storing a full busy target *lun*
636 * table in SCB space.
637 */
638 if ((scb->hscb->control & (TARGET_SCB|TAG_ENB)) == 0
639 && (ahc->flags & AHC_SCB_BTT) == 0) {
640 struct scb_tailq *untagged_q;
641 int target_offset;
642
643 target_offset = SCB_GET_TARGET_OFFSET(ahc, scb);
644 untagged_q = &(ahc->untagged_queues[target_offset]);
645 TAILQ_INSERT_TAIL(untagged_q, scb, links.tqe);
646 scb->flags |= SCB_UNTAGGEDQ;
647 if (TAILQ_FIRST(untagged_q) != scb) {
648 ahc_unlock(ahc, &s);
649 return;
650 }
651 }
652 scb->flags |= SCB_ACTIVE;
653
654 if ((scb->flags & SCB_TARGET_IMMEDIATE) != 0) {
655 /* Define a mapping from our tag to the SCB. */
656 ahc->scb_data->scbindex[scb->hscb->tag] = scb;
657 ahc_pause(ahc);
658 if ((ahc->flags & AHC_PAGESCBS) == 0)
659 ahc_outb(ahc, SCBPTR, scb->hscb->tag);
660 ahc_outb(ahc, TARG_IMMEDIATE_SCB, scb->hscb->tag);
661 ahc_unpause(ahc);
662 } else {
663 ahc_queue_scb(ahc, scb);
664 }
665
666 if (!(xs->xs_control & XS_CTL_POLL)) {
667 ahc_unlock(ahc, &s);
668 return;
669 }
670
671 /*
672 * If we can't use interrupts, poll for completion
673 */
674
675 SC_DEBUG(xs->xs_periph, SCSIPI_DB3, ("cmd_poll\n"));
676 do {
677 if (ahc_poll(ahc, xs->timeout)) {
678 if (!(xs->xs_control & XS_CTL_SILENT))
679 printf("cmd fail\n");
680 ahc_timeout(scb);
681 break;
682 }
683 } while (!(xs->xs_status & XS_STS_DONE));
684 ahc_unlock(ahc, &s);
685
686 return;
687 }
688
689 static int
690 ahc_poll(struct ahc_softc *ahc, int wait)
691 {
692 while (--wait) {
693 DELAY(1000);
694 if (ahc_inb(ahc, INTSTAT) & INT_PEND)
695 break;
696 }
697
698 if (wait == 0) {
699 printf("%s: board is not responding\n", ahc_name(ahc));
700 return (EIO);
701 }
702
703 ahc_intr((void *)ahc);
704 return (0);
705 }
706
707 static void
708 ahc_setup_data(struct ahc_softc *ahc, struct scsipi_xfer *xs,
709 struct scb *scb)
710 {
711 struct hardware_scb *hscb;
712
713 hscb = scb->hscb;
714 xs->resid = xs->status = 0;
715
716 hscb->cdb_len = xs->cmdlen;
717 if (hscb->cdb_len > sizeof(hscb->cdb32)) {
718 u_long s;
719
720 ahc_set_transaction_status(scb, CAM_REQ_INVALID);
721 ahc_lock(ahc, &s);
722 ahc_free_scb(ahc, scb);
723 ahc_unlock(ahc, &s);
724 scsipi_done(xs);
725 return;
726 }
727
728 if (hscb->cdb_len > 12) {
729 memcpy(hscb->cdb32, xs->cmd, hscb->cdb_len);
730 scb->flags |= SCB_CDB32_PTR;
731 } else {
732 memcpy(hscb->shared_data.cdb, xs->cmd, hscb->cdb_len);
733 }
734
735 /* Only use S/G if there is a transfer */
736 if (xs->datalen) {
737 int error;
738
739 error = bus_dmamap_load(ahc->parent_dmat,
740 scb->dmamap, xs->data,
741 xs->datalen, NULL,
742 ((xs->xs_control & XS_CTL_NOSLEEP) ?
743 BUS_DMA_NOWAIT : BUS_DMA_WAITOK) |
744 BUS_DMA_STREAMING |
745 ((xs->xs_control & XS_CTL_DATA_IN) ?
746 BUS_DMA_READ : BUS_DMA_WRITE));
747 if (error) {
748 #ifdef AHC_DEBUG
749 printf("%s: in ahc_setup_data(): bus_dmamap_load() "
750 "= %d\n",
751 ahc_name(ahc), error);
752 #endif
753 xs->error = XS_RESOURCE_SHORTAGE;
754 scsipi_done(xs);
755 return;
756 }
757 ahc_execute_scb(scb,
758 scb->dmamap->dm_segs,
759 scb->dmamap->dm_nsegs);
760 } else {
761 ahc_execute_scb(scb, NULL, 0);
762 }
763 }
764
765 static void
766 ahc_set_recoveryscb(struct ahc_softc *ahc, struct scb *scb) {
767
768 if ((scb->flags & SCB_RECOVERY_SCB) == 0) {
769 struct scb *list_scb;
770
771 scb->flags |= SCB_RECOVERY_SCB;
772
773 /*
774 * Take all queued, but not sent SCBs out of the equation.
775 * Also ensure that no new CCBs are queued to us while we
776 * try to fix this problem.
777 */
778 scsipi_channel_freeze(&ahc->sc_channel, 1);
779 if (ahc->features & AHC_TWIN)
780 scsipi_channel_freeze(&ahc->sc_channel_b, 1);
781
782 /*
783 * Go through all of our pending SCBs and remove
784 * any scheduled timeouts for them. We will reschedule
785 * them after we've successfully fixed this problem.
786 */
787 LIST_FOREACH(list_scb, &ahc->pending_scbs, pending_links) {
788 callout_stop(&list_scb->xs->xs_callout);
789 }
790 }
791 }
792
793 void
794 ahc_timeout(void *arg)
795 {
796 struct scb *scb;
797 struct ahc_softc *ahc;
798 u_long s;
799 int found;
800 u_int last_phase;
801 int target;
802 int lun;
803 int i;
804 char channel;
805
806 scb = (struct scb *)arg;
807 ahc = (struct ahc_softc *)scb->ahc_softc;
808
809 ahc_lock(ahc, &s);
810
811 ahc_pause_and_flushwork(ahc);
812
813 if ((scb->flags & SCB_ACTIVE) == 0) {
814 /* Previous timeout took care of me already */
815 printf("%s: Timedout SCB already complete. "
816 "Interrupts may not be functioning.\n", ahc_name(ahc));
817 ahc_unpause(ahc);
818 ahc_unlock(ahc, &s);
819 return;
820 }
821
822 target = SCB_GET_TARGET(ahc, scb);
823 channel = SCB_GET_CHANNEL(ahc, scb);
824 lun = SCB_GET_LUN(scb);
825
826 ahc_print_path(ahc, scb);
827 printf("SCB 0x%x - timed out\n", scb->hscb->tag);
828 ahc_dump_card_state(ahc);
829 last_phase = ahc_inb(ahc, LASTPHASE);
830 if (scb->sg_count > 0) {
831 for (i = 0; i < scb->sg_count; i++) {
832 printf("sg[%d] - Addr 0x%x : Length %d\n",
833 i,
834 scb->sg_list[i].addr,
835 scb->sg_list[i].len & AHC_SG_LEN_MASK);
836 }
837 }
838 if (scb->flags & (SCB_DEVICE_RESET|SCB_ABORT)) {
839 /*
840 * Been down this road before.
841 * Do a full bus reset.
842 */
843 bus_reset:
844 ahc_set_transaction_status(scb, CAM_CMD_TIMEOUT);
845 found = ahc_reset_channel(ahc, channel, /*Initiate Reset*/TRUE);
846 printf("%s: Issued Channel %c Bus Reset. "
847 "%d SCBs aborted\n", ahc_name(ahc), channel, found);
848 } else {
849 /*
850 * If we are a target, transition to bus free and report
851 * the timeout.
852 *
853 * The target/initiator that is holding up the bus may not
854 * be the same as the one that triggered this timeout
855 * (different commands have different timeout lengths).
856 * If the bus is idle and we are acting as the initiator
857 * for this request, queue a BDR message to the timed out
858 * target. Otherwise, if the timed out transaction is
859 * active:
860 * Initiator transaction:
861 * Stuff the message buffer with a BDR message and assert
862 * ATN in the hopes that the target will let go of the bus
863 * and go to the mesgout phase. If this fails, we'll
864 * get another timeout 2 seconds later which will attempt
865 * a bus reset.
866 *
867 * Target transaction:
868 * Transition to BUS FREE and report the error.
869 * It's good to be the target!
870 */
871 u_int active_scb_index;
872 u_int saved_scbptr;
873
874 saved_scbptr = ahc_inb(ahc, SCBPTR);
875 active_scb_index = ahc_inb(ahc, SCB_TAG);
876
877 if ((ahc_inb(ahc, SEQ_FLAGS) & NOT_IDENTIFIED) == 0
878 && (active_scb_index < ahc->scb_data->numscbs)) {
879 struct scb *active_scb;
880
881 /*
882 * If the active SCB is not us, assume that
883 * the active SCB has a longer timeout than
884 * the timedout SCB, and wait for the active
885 * SCB to timeout.
886 */
887 active_scb = ahc_lookup_scb(ahc, active_scb_index);
888 if (active_scb != scb) {
889 uint64_t newtimeout;
890
891 ahc_print_path(ahc, scb);
892 printf("Other SCB Timeout%s",
893 (scb->flags & SCB_OTHERTCL_TIMEOUT) != 0
894 ? " again\n" : "\n");
895 scb->flags |= SCB_OTHERTCL_TIMEOUT;
896 newtimeout = MAX(active_scb->xs->timeout,
897 scb->xs->timeout);
898 callout_reset(&scb->xs->xs_callout,
899 newtimeout > 1000000 ?
900 (newtimeout / 1000) * hz :
901 (newtimeout * hz) / 1000,
902 ahc_timeout, scb);
903 ahc_unpause(ahc);
904 ahc_unlock(ahc, &s);
905 return;
906 }
907
908 /* It's us */
909 if ((scb->flags & SCB_TARGET_SCB) != 0) {
910
911 /*
912 * Send back any queued up transactions
913 * and properly record the error condition.
914 */
915 ahc_abort_scbs(ahc, SCB_GET_TARGET(ahc, scb),
916 SCB_GET_CHANNEL(ahc, scb),
917 SCB_GET_LUN(scb),
918 scb->hscb->tag,
919 ROLE_TARGET,
920 CAM_CMD_TIMEOUT);
921
922 /* Will clear us from the bus */
923 ahc_restart(ahc);
924 ahc_unlock(ahc, &s);
925 return;
926 }
927
928 ahc_set_recoveryscb(ahc, active_scb);
929 ahc_outb(ahc, MSG_OUT, HOST_MSG);
930 ahc_outb(ahc, SCSISIGO, last_phase|ATNO);
931 ahc_print_path(ahc, active_scb);
932 printf("BDR message in message buffer\n");
933 active_scb->flags |= SCB_DEVICE_RESET;
934 callout_reset(&active_scb->xs->xs_callout,
935 2 * hz, ahc_timeout, active_scb);
936 ahc_unpause(ahc);
937 } else {
938 int disconnected;
939
940 /* XXX Shouldn't panic. Just punt instead? */
941 if ((scb->flags & SCB_TARGET_SCB) != 0)
942 panic("Timed-out target SCB but bus idle");
943
944 if (last_phase != P_BUSFREE
945 && (ahc_inb(ahc, SSTAT0) & TARGET) != 0) {
946 /* XXX What happened to the SCB? */
947 /* Hung target selection. Goto busfree */
948 printf("%s: Hung target selection\n",
949 ahc_name(ahc));
950 ahc_restart(ahc);
951 ahc_unlock(ahc, &s);
952 return;
953 }
954
955 if (ahc_search_qinfifo(ahc, target, channel, lun,
956 scb->hscb->tag, ROLE_INITIATOR,
957 /*status*/0, SEARCH_COUNT) > 0) {
958 disconnected = FALSE;
959 } else {
960 disconnected = TRUE;
961 }
962
963 if (disconnected) {
964
965 ahc_set_recoveryscb(ahc, scb);
966 /*
967 * Actually re-queue this SCB in an attempt
968 * to select the device before it reconnects.
969 * In either case (selection or reselection),
970 * we will now issue a target reset to the
971 * timed-out device.
972 *
973 * Set the MK_MESSAGE control bit indicating
974 * that we desire to send a message. We
975 * also set the disconnected flag since
976 * in the paging case there is no guarantee
977 * that our SCB control byte matches the
978 * version on the card. We don't want the
979 * sequencer to abort the command thinking
980 * an unsolicited reselection occurred.
981 */
982 scb->hscb->control |= MK_MESSAGE|DISCONNECTED;
983 scb->flags |= SCB_DEVICE_RESET;
984
985 /*
986 * Remove any cached copy of this SCB in the
987 * disconnected list in preparation for the
988 * queuing of our abort SCB. We use the
989 * same element in the SCB, SCB_NEXT, for
990 * both the qinfifo and the disconnected list.
991 */
992 ahc_search_disc_list(ahc, target, channel,
993 lun, scb->hscb->tag,
994 /*stop_on_first*/TRUE,
995 /*remove*/TRUE,
996 /*save_state*/FALSE);
997
998 /*
999 * In the non-paging case, the sequencer will
1000 * never re-reference the in-core SCB.
1001 * To make sure we are notified during
1002 * reslection, set the MK_MESSAGE flag in
1003 * the card's copy of the SCB.
1004 */
1005 if ((ahc->flags & AHC_PAGESCBS) == 0) {
1006 ahc_outb(ahc, SCBPTR, scb->hscb->tag);
1007 ahc_outb(ahc, SCB_CONTROL,
1008 ahc_inb(ahc, SCB_CONTROL)
1009 | MK_MESSAGE);
1010 }
1011
1012 /*
1013 * Clear out any entries in the QINFIFO first
1014 * so we are the next SCB for this target
1015 * to run.
1016 */
1017 ahc_search_qinfifo(ahc,
1018 SCB_GET_TARGET(ahc, scb),
1019 channel, SCB_GET_LUN(scb),
1020 SCB_LIST_NULL,
1021 ROLE_INITIATOR,
1022 CAM_REQUEUE_REQ,
1023 SEARCH_COMPLETE);
1024 ahc_print_path(ahc, scb);
1025 printf("Queuing a BDR SCB\n");
1026 ahc_qinfifo_requeue_tail(ahc, scb);
1027 ahc_outb(ahc, SCBPTR, saved_scbptr);
1028 callout_reset(&scb->xs->xs_callout, 2 * hz,
1029 ahc_timeout, scb);
1030 ahc_unpause(ahc);
1031 } else {
1032 /* Go "immediatly" to the bus reset */
1033 /* This shouldn't happen */
1034 ahc_set_recoveryscb(ahc, scb);
1035 ahc_print_path(ahc, scb);
1036 printf("SCB %d: Immediate reset. "
1037 "Flags = 0x%x\n", scb->hscb->tag,
1038 scb->flags);
1039 goto bus_reset;
1040 }
1041 }
1042 }
1043 ahc_unlock(ahc, &s);
1044 }
1045
1046 void
1047 ahc_platform_set_tags(struct ahc_softc *ahc,
1048 struct ahc_devinfo *devinfo, int enable)
1049 {
1050 struct ahc_tmode_tstate *tstate;
1051
1052 ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid,
1053 devinfo->target, &tstate);
1054
1055 if (enable)
1056 tstate->tagenable |= devinfo->target_mask;
1057 else
1058 tstate->tagenable &= ~devinfo->target_mask;
1059 }
1060
1061 int
1062 ahc_platform_alloc(struct ahc_softc *ahc, void *platform_arg)
1063 {
1064 if (sizeof(struct ahc_platform_data) == 0)
1065 return 0;
1066 ahc->platform_data = malloc(sizeof(struct ahc_platform_data), M_DEVBUF,
1067 M_NOWAIT);
1068 if (ahc->platform_data == NULL)
1069 return (ENOMEM);
1070 return (0);
1071 }
1072
1073 void
1074 ahc_platform_free(struct ahc_softc *ahc)
1075 {
1076 if (sizeof(struct ahc_platform_data) == 0)
1077 return;
1078 free(ahc->platform_data, M_DEVBUF);
1079 }
1080
1081 int
1082 ahc_softc_comp(struct ahc_softc *lahc, struct ahc_softc *rahc)
1083 {
1084 return (0);
1085 }
1086
1087 int
1088 ahc_detach(struct ahc_softc *ahc, int flags)
1089 {
1090 int rv = 0;
1091
1092 ahc_intr_enable(ahc, FALSE);
1093 if (ahc->sc_child != NULL)
1094 rv = config_detach(ahc->sc_child, flags);
1095 if (rv == 0 && ahc->sc_child_b != NULL)
1096 rv = config_detach(ahc->sc_child_b, flags);
1097
1098 pmf_device_deregister(ahc->sc_dev);
1099 ahc_free(ahc);
1100
1101 return (rv);
1102 }
1103
1104
1105 void
1106 ahc_send_async(struct ahc_softc *ahc, char channel, u_int target, u_int lun,
1107 ac_code code, void *opt_arg)
1108 {
1109 struct ahc_tmode_tstate *tstate;
1110 struct ahc_initiator_tinfo *tinfo;
1111 struct ahc_devinfo devinfo;
1112 struct scsipi_channel *chan;
1113 struct scsipi_xfer_mode xm;
1114
1115 chan = channel == 'B' ? &ahc->sc_channel_b : &ahc->sc_channel;
1116 switch (code) {
1117 case AC_TRANSFER_NEG:
1118 tinfo = ahc_fetch_transinfo(ahc, channel, ahc->our_id, target,
1119 &tstate);
1120 ahc_compile_devinfo(&devinfo, ahc->our_id, target, lun,
1121 channel, ROLE_UNKNOWN);
1122 /*
1123 * Don't bother if negotiating. XXX?
1124 */
1125 if (tinfo->curr.period != tinfo->goal.period
1126 || tinfo->curr.width != tinfo->goal.width
1127 || tinfo->curr.offset != tinfo->goal.offset
1128 || tinfo->curr.ppr_options != tinfo->goal.ppr_options)
1129 break;
1130 xm.xm_target = target;
1131 xm.xm_mode = 0;
1132 xm.xm_period = tinfo->curr.period;
1133 xm.xm_offset = tinfo->curr.offset;
1134 if (tinfo->curr.width == MSG_EXT_WDTR_BUS_16_BIT)
1135 xm.xm_mode |= PERIPH_CAP_WIDE16;
1136 if (tinfo->curr.period)
1137 xm.xm_mode |= PERIPH_CAP_SYNC;
1138 if (tstate->tagenable & devinfo.target_mask)
1139 xm.xm_mode |= PERIPH_CAP_TQING;
1140 if (tinfo->curr.ppr_options & MSG_EXT_PPR_DT_REQ)
1141 xm.xm_mode |= PERIPH_CAP_DT;
1142 scsipi_async_event(chan, ASYNC_EVENT_XFER_MODE, &xm);
1143 break;
1144 case AC_BUS_RESET:
1145 scsipi_async_event(chan, ASYNC_EVENT_RESET, NULL);
1146 case AC_SENT_BDR:
1147 default:
1148 break;
1149 }
1150 }
1151