aic7xxx_osm.c revision 1.32 1 /* $NetBSD: aic7xxx_osm.c,v 1.32 2009/09/02 11:10:37 tsutsui Exp $ */
2
3 /*
4 * Bus independent FreeBSD shim for the aic7xxx based adaptec SCSI controllers
5 *
6 * Copyright (c) 1994-2001 Justin T. Gibbs.
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions, and the following disclaimer,
14 * without modification.
15 * 2. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * Alternatively, this software may be distributed under the terms of the
19 * GNU Public License ("GPL").
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
25 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * //depot/aic7xxx/freebsd/dev/aic7xxx/aic7xxx_osm.c#12 $
34 *
35 * $FreeBSD: src/sys/dev/aic7xxx/aic7xxx_osm.c,v 1.31 2002/11/30 19:08:58 scottl Exp $
36 */
37 /*
38 * Ported from FreeBSD by Pascal Renauld, Network Storage Solutions, Inc. - April 2003
39 */
40
41 #include <sys/cdefs.h>
42 __KERNEL_RCSID(0, "$NetBSD: aic7xxx_osm.c,v 1.32 2009/09/02 11:10:37 tsutsui Exp $");
43
44 #include <dev/ic/aic7xxx_osm.h>
45 #include <dev/ic/aic7xxx_inline.h>
46
47 #ifndef AHC_TMODE_ENABLE
48 #define AHC_TMODE_ENABLE 0
49 #endif
50
51
52 static void ahc_action(struct scsipi_channel *chan, scsipi_adapter_req_t req, void *arg);
53 static void ahc_execute_scb(void *arg, bus_dma_segment_t *dm_segs, int nsegments);
54 static int ahc_poll(struct ahc_softc *ahc, int wait);
55 static void ahc_setup_data(struct ahc_softc *ahc,
56 struct scsipi_xfer *xs, struct scb *scb);
57 static void ahc_set_recoveryscb(struct ahc_softc *ahc, struct scb *scb);
58 static int ahc_ioctl(struct scsipi_channel *channel, u_long cmd,
59 void *addr, int flag, struct proc *p);
60
61 static bool ahc_pmf_suspend(device_t PMF_FN_PROTO);
62 static bool ahc_pmf_resume(device_t PMF_FN_PROTO);
63
64
65 /*
66 * Attach all the sub-devices we can find
67 */
68 int
69 ahc_attach(struct ahc_softc *ahc)
70 {
71 u_long s;
72 int i;
73 char ahc_info[256];
74
75 LIST_INIT(&ahc->pending_scbs);
76 for (i = 0; i < AHC_NUM_TARGETS; i++)
77 TAILQ_INIT(&ahc->untagged_queues[i]);
78
79 ahc_lock(ahc, &s);
80
81 ahc->sc_adapter.adapt_dev = ahc->sc_dev;
82 ahc->sc_adapter.adapt_nchannels = (ahc->features & AHC_TWIN) ? 2 : 1;
83
84 ahc->sc_adapter.adapt_openings = ahc->scb_data->numscbs - 1;
85 ahc->sc_adapter.adapt_max_periph = 16;
86
87 ahc->sc_adapter.adapt_ioctl = ahc_ioctl;
88 ahc->sc_adapter.adapt_minphys = ahc_minphys;
89 ahc->sc_adapter.adapt_request = ahc_action;
90
91 ahc->sc_channel.chan_adapter = &ahc->sc_adapter;
92 ahc->sc_channel.chan_bustype = &scsi_bustype;
93 ahc->sc_channel.chan_channel = 0;
94 ahc->sc_channel.chan_ntargets = (ahc->features & AHC_WIDE) ? 16 : 8;
95 ahc->sc_channel.chan_nluns = 8 /*AHC_NUM_LUNS*/;
96 ahc->sc_channel.chan_id = ahc->our_id;
97 ahc->sc_channel.chan_flags |= SCSIPI_CHAN_CANGROW;
98
99 if (ahc->features & AHC_TWIN) {
100 ahc->sc_channel_b = ahc->sc_channel;
101 ahc->sc_channel_b.chan_id = ahc->our_id_b;
102 ahc->sc_channel_b.chan_channel = 1;
103 }
104
105 ahc_controller_info(ahc, ahc_info, sizeof(ahc_info));
106 printf("%s: %s\n", device_xname(ahc->sc_dev), ahc_info);
107
108 if ((ahc->flags & AHC_PRIMARY_CHANNEL) == 0) {
109 ahc->sc_child = config_found(ahc->sc_dev,
110 &ahc->sc_channel, scsiprint);
111 if (ahc->features & AHC_TWIN)
112 ahc->sc_child_b = config_found(ahc->sc_dev,
113 &ahc->sc_channel_b, scsiprint);
114 } else {
115 if (ahc->features & AHC_TWIN)
116 ahc->sc_child = config_found(ahc->sc_dev,
117 &ahc->sc_channel_b, scsiprint);
118 ahc->sc_child_b = config_found(ahc->sc_dev,
119 &ahc->sc_channel, scsiprint);
120 }
121
122 ahc_intr_enable(ahc, TRUE);
123
124 if (ahc->flags & AHC_RESET_BUS_A)
125 ahc_reset_channel(ahc, 'A', TRUE);
126 if ((ahc->features & AHC_TWIN) && ahc->flags & AHC_RESET_BUS_B)
127 ahc_reset_channel(ahc, 'B', TRUE);
128
129 if (!pmf_device_register(ahc->sc_dev, ahc_pmf_suspend, ahc_pmf_resume))
130 aprint_error_dev(ahc->sc_dev,
131 "couldn't establish power handler\n");
132
133 ahc_unlock(ahc, &s);
134 return (1);
135 }
136
137 /*
138 * XXX we should call the real suspend and resume functions here
139 * but for some reason ahc_suspend() panics on shutdown
140 */
141
142 static bool
143 ahc_pmf_suspend(device_t dev PMF_FN_ARGS)
144 {
145 struct ahc_softc *sc = device_private(dev);
146 #if 0
147 return (ahc_suspend(sc) == 0);
148 #else
149 ahc_shutdown(sc);
150 return true;
151 #endif
152 }
153
154 static bool
155 ahc_pmf_resume(device_t dev PMF_FN_ARGS)
156 {
157 #if 0
158 struct ahc_softc *sc = device_private(dev);
159
160 return (ahc_resume(sc) == 0);
161 #else
162 return true;
163 #endif
164 }
165
166 /*
167 * Catch an interrupt from the adapter
168 */
169 void
170 ahc_platform_intr(void *arg)
171 {
172 struct ahc_softc *ahc;
173
174 ahc = (struct ahc_softc *)arg;
175 ahc_intr(ahc);
176 }
177
178 /*
179 * We have an scb which has been processed by the
180 * adaptor, now we look to see how the operation
181 * went.
182 */
183 void
184 ahc_done(struct ahc_softc *ahc, struct scb *scb)
185 {
186 struct scsipi_xfer *xs;
187 struct scsipi_periph *periph;
188 u_long s;
189
190 xs = scb->xs;
191 periph = xs->xs_periph;
192 LIST_REMOVE(scb, pending_links);
193 if ((scb->flags & SCB_UNTAGGEDQ) != 0) {
194 struct scb_tailq *untagged_q;
195 int target_offset;
196
197 target_offset = SCB_GET_TARGET_OFFSET(ahc, scb);
198 untagged_q = &ahc->untagged_queues[target_offset];
199 TAILQ_REMOVE(untagged_q, scb, links.tqe);
200 scb->flags &= ~SCB_UNTAGGEDQ;
201 ahc_run_untagged_queue(ahc, untagged_q);
202 }
203
204 callout_stop(&scb->xs->xs_callout);
205
206 if (xs->datalen) {
207 int op;
208
209 if (xs->xs_control & XS_CTL_DATA_IN)
210 op = BUS_DMASYNC_POSTREAD;
211 else
212 op = BUS_DMASYNC_POSTWRITE;
213 bus_dmamap_sync(ahc->parent_dmat, scb->dmamap, 0,
214 scb->dmamap->dm_mapsize, op);
215 bus_dmamap_unload(ahc->parent_dmat, scb->dmamap);
216 }
217
218 /*
219 * If the recovery SCB completes, we have to be
220 * out of our timeout.
221 */
222 if ((scb->flags & SCB_RECOVERY_SCB) != 0) {
223 struct scb *list_scb;
224
225 /*
226 * We were able to complete the command successfully,
227 * so reinstate the timeouts for all other pending
228 * commands.
229 */
230 LIST_FOREACH(list_scb, &ahc->pending_scbs, pending_links) {
231 if (!(list_scb->xs->xs_control & XS_CTL_POLL)) {
232 callout_reset(&list_scb->xs->xs_callout,
233 (list_scb->xs->timeout > 1000000) ?
234 (list_scb->xs->timeout / 1000) * hz :
235 (list_scb->xs->timeout * hz) / 1000,
236 ahc_timeout, list_scb);
237 }
238 }
239
240 if (ahc_get_transaction_status(scb) == CAM_BDR_SENT
241 || ahc_get_transaction_status(scb) == CAM_REQ_ABORTED)
242 ahc_set_transaction_status(scb, CAM_CMD_TIMEOUT);
243 scsipi_printaddr(xs->xs_periph);
244 printf("%s: no longer in timeout, status = %x\n",
245 ahc_name(ahc), xs->status);
246 }
247
248 /* Don't clobber any existing error state */
249 if (xs->error != XS_NOERROR) {
250 /* Don't clobber any existing error state */
251 } else if ((scb->flags & SCB_SENSE) != 0) {
252 /*
253 * We performed autosense retrieval.
254 *
255 * Zero any sense not transferred by the
256 * device. The SCSI spec mandates that any
257 * untransferred data should be assumed to be
258 * zero. Complete the 'bounce' of sense information
259 * through buffers accessible via bus-space by
260 * copying it into the clients csio.
261 */
262 memset(&xs->sense.scsi_sense, 0, sizeof(xs->sense.scsi_sense));
263 memcpy(&xs->sense.scsi_sense,
264 ahc_get_sense_buf(ahc, scb),
265 sizeof(xs->sense.scsi_sense));
266 xs->error = XS_SENSE;
267 }
268 if (scb->flags & SCB_FREEZE_QUEUE) {
269 scsipi_periph_thaw(periph, 1);
270 scb->flags &= ~SCB_FREEZE_QUEUE;
271 }
272
273 ahc_lock(ahc, &s);
274 ahc_free_scb(ahc, scb);
275 ahc_unlock(ahc, &s);
276
277 scsipi_done(xs);
278 }
279
280 static int
281 ahc_ioctl(struct scsipi_channel *channel, u_long cmd, void *addr,
282 int flag, struct proc *p)
283 {
284 struct ahc_softc *ahc = device_private(channel->chan_adapter->adapt_dev);
285 int s, ret = ENOTTY;
286
287 switch (cmd) {
288 case SCBUSIORESET:
289 s = splbio();
290 ahc_reset_channel(ahc, channel->chan_channel == 1 ? 'B' : 'A',
291 TRUE);
292 splx(s);
293 ret = 0;
294 break;
295 default:
296 break;
297 }
298
299 return ret;
300 }
301
302 static void
303 ahc_action(struct scsipi_channel *chan, scsipi_adapter_req_t req, void *arg)
304 {
305 struct ahc_softc *ahc;
306 int s;
307 struct ahc_initiator_tinfo *tinfo;
308 struct ahc_tmode_tstate *tstate;
309
310 ahc = device_private(chan->chan_adapter->adapt_dev);
311
312 switch (req) {
313
314 case ADAPTER_REQ_RUN_XFER:
315 {
316 struct scsipi_xfer *xs;
317 struct scsipi_periph *periph;
318 struct scb *scb;
319 struct hardware_scb *hscb;
320 u_int target_id;
321 u_int our_id;
322 u_long ss;
323
324 xs = arg;
325 periph = xs->xs_periph;
326
327 target_id = periph->periph_target;
328 our_id = ahc->our_id;
329
330 SC_DEBUG(xs->xs_periph, SCSIPI_DB3, ("ahc_action\n"));
331
332 /*
333 * get an scb to use.
334 */
335 ahc_lock(ahc, &ss);
336 if ((scb = ahc_get_scb(ahc)) == NULL) {
337 xs->error = XS_RESOURCE_SHORTAGE;
338 ahc_unlock(ahc, &ss);
339 scsipi_done(xs);
340 return;
341 }
342 ahc_unlock(ahc, &ss);
343
344 hscb = scb->hscb;
345
346 SC_DEBUG(periph, SCSIPI_DB3, ("start scb(%p)\n", scb));
347 scb->xs = xs;
348
349 /*
350 * Put all the arguments for the xfer in the scb
351 */
352 hscb->control = 0;
353 hscb->scsiid = BUILD_SCSIID(ahc, 0, target_id, our_id);
354 hscb->lun = periph->periph_lun;
355 if (xs->xs_control & XS_CTL_RESET) {
356 hscb->cdb_len = 0;
357 scb->flags |= SCB_DEVICE_RESET;
358 hscb->control |= MK_MESSAGE;
359 ahc_execute_scb(scb, NULL, 0);
360 }
361
362 ahc_setup_data(ahc, xs, scb);
363
364 break;
365 }
366 case ADAPTER_REQ_GROW_RESOURCES:
367 #ifdef AHC_DEBUG
368 printf("%s: ADAPTER_REQ_GROW_RESOURCES\n", ahc_name(ahc));
369 #endif
370 chan->chan_adapter->adapt_openings += ahc_alloc_scbs(ahc);
371 if (ahc->scb_data->numscbs >= AHC_SCB_MAX_ALLOC)
372 chan->chan_flags &= ~SCSIPI_CHAN_CANGROW;
373 return;
374
375 case ADAPTER_REQ_SET_XFER_MODE:
376 {
377 struct scsipi_xfer_mode *xm = arg;
378 struct ahc_devinfo devinfo;
379 int target_id, our_id, first;
380 u_int width;
381 char channel;
382 u_int ppr_options = 0, period, offset;
383 struct ahc_syncrate *syncrate;
384 uint16_t old_autoneg;
385
386 target_id = xm->xm_target;
387 our_id = chan->chan_id;
388 channel = (chan->chan_channel == 1) ? 'B' : 'A';
389 s = splbio();
390 tinfo = ahc_fetch_transinfo(ahc, channel, our_id, target_id,
391 &tstate);
392 ahc_compile_devinfo(&devinfo, our_id, target_id,
393 0, channel, ROLE_INITIATOR);
394
395 old_autoneg = tstate->auto_negotiate;
396
397 /*
398 * XXX since the period and offset are not provided here,
399 * fake things by forcing a renegotiation using the user
400 * settings if this is called for the first time (i.e.
401 * during probe). Also, cap various values at the user
402 * values, assuming that the user set it up that way.
403 */
404 if (ahc->inited_target[target_id] == 0) {
405 period = tinfo->user.period;
406 offset = tinfo->user.offset;
407 ppr_options = tinfo->user.ppr_options;
408 width = tinfo->user.width;
409 tstate->tagenable |=
410 (ahc->user_tagenable & devinfo.target_mask);
411 tstate->discenable |=
412 (ahc->user_discenable & devinfo.target_mask);
413 ahc->inited_target[target_id] = 1;
414 first = 1;
415 } else
416 first = 0;
417
418 if (xm->xm_mode & (PERIPH_CAP_WIDE16 | PERIPH_CAP_DT))
419 width = MSG_EXT_WDTR_BUS_16_BIT;
420 else
421 width = MSG_EXT_WDTR_BUS_8_BIT;
422
423 ahc_validate_width(ahc, NULL, &width, ROLE_UNKNOWN);
424 if (width > tinfo->user.width)
425 width = tinfo->user.width;
426 ahc_set_width(ahc, &devinfo, width, AHC_TRANS_GOAL, FALSE);
427
428 if (!(xm->xm_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_DT))) {
429 period = 0;
430 offset = 0;
431 ppr_options = 0;
432 }
433
434 if ((xm->xm_mode & PERIPH_CAP_DT) &&
435 (ppr_options & MSG_EXT_PPR_DT_REQ))
436 ppr_options |= MSG_EXT_PPR_DT_REQ;
437 else
438 ppr_options &= ~MSG_EXT_PPR_DT_REQ;
439 if ((tstate->discenable & devinfo.target_mask) == 0 ||
440 (tstate->tagenable & devinfo.target_mask) == 0)
441 ppr_options &= ~MSG_EXT_PPR_IU_REQ;
442
443 if ((xm->xm_mode & PERIPH_CAP_TQING) &&
444 (ahc->user_tagenable & devinfo.target_mask))
445 tstate->tagenable |= devinfo.target_mask;
446 else
447 tstate->tagenable &= ~devinfo.target_mask;
448
449 syncrate = ahc_find_syncrate(ahc, &period, &ppr_options,
450 AHC_SYNCRATE_MAX);
451 ahc_validate_offset(ahc, NULL, syncrate, &offset,
452 width, ROLE_UNKNOWN);
453
454 if (offset == 0) {
455 period = 0;
456 ppr_options = 0;
457 }
458
459 if (ppr_options != 0
460 && tinfo->user.transport_version >= 3) {
461 tinfo->goal.transport_version =
462 tinfo->user.transport_version;
463 tinfo->curr.transport_version =
464 tinfo->user.transport_version;
465 }
466
467 ahc_set_syncrate(ahc, &devinfo, syncrate, period, offset,
468 ppr_options, AHC_TRANS_GOAL, FALSE);
469
470 /*
471 * If this is the first request, and no negotiation is
472 * needed, just confirm the state to the scsipi layer,
473 * so that it can print a message.
474 */
475 if (old_autoneg == tstate->auto_negotiate && first) {
476 xm->xm_mode = 0;
477 xm->xm_period = tinfo->curr.period;
478 xm->xm_offset = tinfo->curr.offset;
479 if (tinfo->curr.width == MSG_EXT_WDTR_BUS_16_BIT)
480 xm->xm_mode |= PERIPH_CAP_WIDE16;
481 if (tinfo->curr.period)
482 xm->xm_mode |= PERIPH_CAP_SYNC;
483 if (tstate->tagenable & devinfo.target_mask)
484 xm->xm_mode |= PERIPH_CAP_TQING;
485 if (tinfo->curr.ppr_options & MSG_EXT_PPR_DT_REQ)
486 xm->xm_mode |= PERIPH_CAP_DT;
487 scsipi_async_event(chan, ASYNC_EVENT_XFER_MODE, xm);
488 }
489 splx(s);
490 }
491 }
492
493 return;
494 }
495
496 static void
497 ahc_execute_scb(void *arg, bus_dma_segment_t *dm_segs, int nsegments)
498 {
499 struct scb *scb;
500 struct scsipi_xfer *xs;
501 struct ahc_softc *ahc;
502 struct ahc_initiator_tinfo *tinfo;
503 struct ahc_tmode_tstate *tstate;
504
505 u_int mask;
506 long s;
507
508 scb = (struct scb *)arg;
509 xs = scb->xs;
510 xs->error = 0;
511 xs->status = 0;
512 xs->xs_status = 0;
513 ahc = device_private(xs->xs_periph->periph_channel->chan_adapter->adapt_dev);
514
515 if (nsegments != 0) {
516 struct ahc_dma_seg *sg;
517 bus_dma_segment_t *end_seg;
518 int op;
519
520 end_seg = dm_segs + nsegments;
521
522 /* Copy the segments into our SG list */
523 sg = scb->sg_list;
524 while (dm_segs < end_seg) {
525 uint32_t len;
526
527 sg->addr = ahc_htole32(dm_segs->ds_addr);
528 len = dm_segs->ds_len
529 | ((dm_segs->ds_addr >> 8) & AHC_SG_HIGH_ADDR_MASK);
530 sg->len = ahc_htole32(len);
531 sg++;
532 dm_segs++;
533 }
534
535 /*
536 * Note where to find the SG entries in bus space.
537 * We also set the full residual flag which the
538 * sequencer will clear as soon as a data transfer
539 * occurs.
540 */
541 scb->hscb->sgptr = ahc_htole32(scb->sg_list_phys|SG_FULL_RESID);
542
543 if (xs->xs_control & XS_CTL_DATA_IN)
544 op = BUS_DMASYNC_PREREAD;
545 else
546 op = BUS_DMASYNC_PREWRITE;
547
548 bus_dmamap_sync(ahc->parent_dmat, scb->dmamap, 0,
549 scb->dmamap->dm_mapsize, op);
550
551 sg--;
552 sg->len |= ahc_htole32(AHC_DMA_LAST_SEG);
553
554 /* Copy the first SG into the "current" data pointer area */
555 scb->hscb->dataptr = scb->sg_list->addr;
556 scb->hscb->datacnt = scb->sg_list->len;
557 } else {
558 scb->hscb->sgptr = ahc_htole32(SG_LIST_NULL);
559 scb->hscb->dataptr = 0;
560 scb->hscb->datacnt = 0;
561 }
562
563 scb->sg_count = nsegments;
564
565 ahc_lock(ahc, &s);
566
567 /*
568 * Last time we need to check if this SCB needs to
569 * be aborted.
570 */
571 if (xs->xs_status & XS_STS_DONE) {
572 if (nsegments != 0)
573 bus_dmamap_unload(ahc->buffer_dmat, scb->dmamap);
574 ahc_free_scb(ahc, scb);
575 ahc_unlock(ahc, &s);
576 scsipi_done(xs);
577 return;
578 }
579
580 tinfo = ahc_fetch_transinfo(ahc, ahc->channel,
581 SCSIID_OUR_ID(scb->hscb->scsiid),
582 SCSIID_TARGET(ahc, scb->hscb->scsiid),
583 &tstate);
584
585 mask = SCB_GET_TARGET_MASK(ahc, scb);
586 scb->hscb->scsirate = tinfo->scsirate;
587 scb->hscb->scsioffset = tinfo->curr.offset;
588
589 if ((tstate->ultraenb & mask) != 0)
590 scb->hscb->control |= ULTRAENB;
591
592 if ((tstate->discenable & mask) != 0)
593 scb->hscb->control |= DISCENB;
594
595 if (xs->xs_tag_type)
596 scb->hscb->control |= xs->xs_tag_type;
597
598 #if 1 /* This looks like it makes sense at first, but it can loop */
599 if ((xs->xs_control & XS_CTL_DISCOVERY) && (tinfo->goal.width == 0
600 && tinfo->goal.offset == 0
601 && tinfo->goal.ppr_options == 0)) {
602 scb->flags |= SCB_NEGOTIATE;
603 scb->hscb->control |= MK_MESSAGE;
604 } else
605 #endif
606 if ((tstate->auto_negotiate & mask) != 0) {
607 scb->flags |= SCB_AUTO_NEGOTIATE;
608 scb->hscb->control |= MK_MESSAGE;
609 }
610
611 LIST_INSERT_HEAD(&ahc->pending_scbs, scb, pending_links);
612
613 if (!(xs->xs_control & XS_CTL_POLL)) {
614 callout_reset(&scb->xs->xs_callout, xs->timeout > 1000000 ?
615 (xs->timeout / 1000) * hz : (xs->timeout * hz) / 1000,
616 ahc_timeout, scb);
617 }
618
619 /*
620 * We only allow one untagged transaction
621 * per target in the initiator role unless
622 * we are storing a full busy target *lun*
623 * table in SCB space.
624 */
625 if ((scb->hscb->control & (TARGET_SCB|TAG_ENB)) == 0
626 && (ahc->flags & AHC_SCB_BTT) == 0) {
627 struct scb_tailq *untagged_q;
628 int target_offset;
629
630 target_offset = SCB_GET_TARGET_OFFSET(ahc, scb);
631 untagged_q = &(ahc->untagged_queues[target_offset]);
632 TAILQ_INSERT_TAIL(untagged_q, scb, links.tqe);
633 scb->flags |= SCB_UNTAGGEDQ;
634 if (TAILQ_FIRST(untagged_q) != scb) {
635 ahc_unlock(ahc, &s);
636 return;
637 }
638 }
639 scb->flags |= SCB_ACTIVE;
640
641 if ((scb->flags & SCB_TARGET_IMMEDIATE) != 0) {
642 /* Define a mapping from our tag to the SCB. */
643 ahc->scb_data->scbindex[scb->hscb->tag] = scb;
644 ahc_pause(ahc);
645 if ((ahc->flags & AHC_PAGESCBS) == 0)
646 ahc_outb(ahc, SCBPTR, scb->hscb->tag);
647 ahc_outb(ahc, TARG_IMMEDIATE_SCB, scb->hscb->tag);
648 ahc_unpause(ahc);
649 } else {
650 ahc_queue_scb(ahc, scb);
651 }
652
653 if (!(xs->xs_control & XS_CTL_POLL)) {
654 ahc_unlock(ahc, &s);
655 return;
656 }
657
658 /*
659 * If we can't use interrupts, poll for completion
660 */
661
662 SC_DEBUG(xs->xs_periph, SCSIPI_DB3, ("cmd_poll\n"));
663 do {
664 if (ahc_poll(ahc, xs->timeout)) {
665 if (!(xs->xs_control & XS_CTL_SILENT))
666 printf("cmd fail\n");
667 ahc_timeout(scb);
668 break;
669 }
670 } while (!(xs->xs_status & XS_STS_DONE));
671 ahc_unlock(ahc, &s);
672
673 return;
674 }
675
676 static int
677 ahc_poll(struct ahc_softc *ahc, int wait)
678 {
679 while (--wait) {
680 DELAY(1000);
681 if (ahc_inb(ahc, INTSTAT) & INT_PEND)
682 break;
683 }
684
685 if (wait == 0) {
686 printf("%s: board is not responding\n", ahc_name(ahc));
687 return (EIO);
688 }
689
690 ahc_intr((void *)ahc);
691 return (0);
692 }
693
694 static void
695 ahc_setup_data(struct ahc_softc *ahc, struct scsipi_xfer *xs,
696 struct scb *scb)
697 {
698 struct hardware_scb *hscb;
699
700 hscb = scb->hscb;
701 xs->resid = xs->status = 0;
702
703 hscb->cdb_len = xs->cmdlen;
704 if (hscb->cdb_len > sizeof(hscb->cdb32)) {
705 u_long s;
706
707 ahc_set_transaction_status(scb, CAM_REQ_INVALID);
708 ahc_lock(ahc, &s);
709 ahc_free_scb(ahc, scb);
710 ahc_unlock(ahc, &s);
711 scsipi_done(xs);
712 return;
713 }
714
715 if (hscb->cdb_len > 12) {
716 memcpy(hscb->cdb32, xs->cmd, hscb->cdb_len);
717 scb->flags |= SCB_CDB32_PTR;
718 } else {
719 memcpy(hscb->shared_data.cdb, xs->cmd, hscb->cdb_len);
720 }
721
722 /* Only use S/G if there is a transfer */
723 if (xs->datalen) {
724 int error;
725
726 error = bus_dmamap_load(ahc->parent_dmat,
727 scb->dmamap, xs->data,
728 xs->datalen, NULL,
729 ((xs->xs_control & XS_CTL_NOSLEEP) ?
730 BUS_DMA_NOWAIT : BUS_DMA_WAITOK) |
731 BUS_DMA_STREAMING |
732 ((xs->xs_control & XS_CTL_DATA_IN) ?
733 BUS_DMA_READ : BUS_DMA_WRITE));
734 if (error) {
735 #ifdef AHC_DEBUG
736 printf("%s: in ahc_setup_data(): bus_dmamap_load() "
737 "= %d\n",
738 ahc_name(ahc), error);
739 #endif
740 xs->error = XS_RESOURCE_SHORTAGE;
741 scsipi_done(xs);
742 return;
743 }
744 ahc_execute_scb(scb,
745 scb->dmamap->dm_segs,
746 scb->dmamap->dm_nsegs);
747 } else {
748 ahc_execute_scb(scb, NULL, 0);
749 }
750 }
751
752 static void
753 ahc_set_recoveryscb(struct ahc_softc *ahc, struct scb *scb) {
754
755 if ((scb->flags & SCB_RECOVERY_SCB) == 0) {
756 struct scb *list_scb;
757
758 scb->flags |= SCB_RECOVERY_SCB;
759
760 /*
761 * Take all queued, but not sent SCBs out of the equation.
762 * Also ensure that no new CCBs are queued to us while we
763 * try to fix this problem.
764 */
765 scsipi_channel_freeze(&ahc->sc_channel, 1);
766 if (ahc->features & AHC_TWIN)
767 scsipi_channel_freeze(&ahc->sc_channel_b, 1);
768
769 /*
770 * Go through all of our pending SCBs and remove
771 * any scheduled timeouts for them. We will reschedule
772 * them after we've successfully fixed this problem.
773 */
774 LIST_FOREACH(list_scb, &ahc->pending_scbs, pending_links) {
775 callout_stop(&list_scb->xs->xs_callout);
776 }
777 }
778 }
779
780 void
781 ahc_timeout(void *arg)
782 {
783 struct scb *scb;
784 struct ahc_softc *ahc;
785 long s;
786 int found;
787 u_int last_phase;
788 int target;
789 int lun;
790 int i;
791 char channel;
792
793 scb = (struct scb *)arg;
794 ahc = (struct ahc_softc *)scb->ahc_softc;
795
796 ahc_lock(ahc, &s);
797
798 ahc_pause_and_flushwork(ahc);
799
800 if ((scb->flags & SCB_ACTIVE) == 0) {
801 /* Previous timeout took care of me already */
802 printf("%s: Timedout SCB already complete. "
803 "Interrupts may not be functioning.\n", ahc_name(ahc));
804 ahc_unpause(ahc);
805 ahc_unlock(ahc, &s);
806 return;
807 }
808
809 target = SCB_GET_TARGET(ahc, scb);
810 channel = SCB_GET_CHANNEL(ahc, scb);
811 lun = SCB_GET_LUN(scb);
812
813 ahc_print_path(ahc, scb);
814 printf("SCB 0x%x - timed out\n", scb->hscb->tag);
815 ahc_dump_card_state(ahc);
816 last_phase = ahc_inb(ahc, LASTPHASE);
817 if (scb->sg_count > 0) {
818 for (i = 0; i < scb->sg_count; i++) {
819 printf("sg[%d] - Addr 0x%x : Length %d\n",
820 i,
821 scb->sg_list[i].addr,
822 scb->sg_list[i].len & AHC_SG_LEN_MASK);
823 }
824 }
825 if (scb->flags & (SCB_DEVICE_RESET|SCB_ABORT)) {
826 /*
827 * Been down this road before.
828 * Do a full bus reset.
829 */
830 bus_reset:
831 ahc_set_transaction_status(scb, CAM_CMD_TIMEOUT);
832 found = ahc_reset_channel(ahc, channel, /*Initiate Reset*/TRUE);
833 printf("%s: Issued Channel %c Bus Reset. "
834 "%d SCBs aborted\n", ahc_name(ahc), channel, found);
835 } else {
836 /*
837 * If we are a target, transition to bus free and report
838 * the timeout.
839 *
840 * The target/initiator that is holding up the bus may not
841 * be the same as the one that triggered this timeout
842 * (different commands have different timeout lengths).
843 * If the bus is idle and we are acting as the initiator
844 * for this request, queue a BDR message to the timed out
845 * target. Otherwise, if the timed out transaction is
846 * active:
847 * Initiator transaction:
848 * Stuff the message buffer with a BDR message and assert
849 * ATN in the hopes that the target will let go of the bus
850 * and go to the mesgout phase. If this fails, we'll
851 * get another timeout 2 seconds later which will attempt
852 * a bus reset.
853 *
854 * Target transaction:
855 * Transition to BUS FREE and report the error.
856 * It's good to be the target!
857 */
858 u_int active_scb_index;
859 u_int saved_scbptr;
860
861 saved_scbptr = ahc_inb(ahc, SCBPTR);
862 active_scb_index = ahc_inb(ahc, SCB_TAG);
863
864 if ((ahc_inb(ahc, SEQ_FLAGS) & NOT_IDENTIFIED) == 0
865 && (active_scb_index < ahc->scb_data->numscbs)) {
866 struct scb *active_scb;
867
868 /*
869 * If the active SCB is not us, assume that
870 * the active SCB has a longer timeout than
871 * the timedout SCB, and wait for the active
872 * SCB to timeout.
873 */
874 active_scb = ahc_lookup_scb(ahc, active_scb_index);
875 if (active_scb != scb) {
876 uint64_t newtimeout;
877
878 ahc_print_path(ahc, scb);
879 printf("Other SCB Timeout%s",
880 (scb->flags & SCB_OTHERTCL_TIMEOUT) != 0
881 ? " again\n" : "\n");
882 scb->flags |= SCB_OTHERTCL_TIMEOUT;
883 newtimeout = MAX(active_scb->xs->timeout,
884 scb->xs->timeout);
885 callout_reset(&scb->xs->xs_callout,
886 newtimeout > 1000000 ?
887 (newtimeout / 1000) * hz :
888 (newtimeout * hz) / 1000,
889 ahc_timeout, scb);
890 ahc_unpause(ahc);
891 ahc_unlock(ahc, &s);
892 return;
893 }
894
895 /* It's us */
896 if ((scb->flags & SCB_TARGET_SCB) != 0) {
897
898 /*
899 * Send back any queued up transactions
900 * and properly record the error condition.
901 */
902 ahc_abort_scbs(ahc, SCB_GET_TARGET(ahc, scb),
903 SCB_GET_CHANNEL(ahc, scb),
904 SCB_GET_LUN(scb),
905 scb->hscb->tag,
906 ROLE_TARGET,
907 CAM_CMD_TIMEOUT);
908
909 /* Will clear us from the bus */
910 ahc_restart(ahc);
911 ahc_unlock(ahc, &s);
912 return;
913 }
914
915 ahc_set_recoveryscb(ahc, active_scb);
916 ahc_outb(ahc, MSG_OUT, HOST_MSG);
917 ahc_outb(ahc, SCSISIGO, last_phase|ATNO);
918 ahc_print_path(ahc, active_scb);
919 printf("BDR message in message buffer\n");
920 active_scb->flags |= SCB_DEVICE_RESET;
921 callout_reset(&active_scb->xs->xs_callout,
922 2 * hz, ahc_timeout, active_scb);
923 ahc_unpause(ahc);
924 } else {
925 int disconnected;
926
927 /* XXX Shouldn't panic. Just punt instead? */
928 if ((scb->flags & SCB_TARGET_SCB) != 0)
929 panic("Timed-out target SCB but bus idle");
930
931 if (last_phase != P_BUSFREE
932 && (ahc_inb(ahc, SSTAT0) & TARGET) != 0) {
933 /* XXX What happened to the SCB? */
934 /* Hung target selection. Goto busfree */
935 printf("%s: Hung target selection\n",
936 ahc_name(ahc));
937 ahc_restart(ahc);
938 ahc_unlock(ahc, &s);
939 return;
940 }
941
942 if (ahc_search_qinfifo(ahc, target, channel, lun,
943 scb->hscb->tag, ROLE_INITIATOR,
944 /*status*/0, SEARCH_COUNT) > 0) {
945 disconnected = FALSE;
946 } else {
947 disconnected = TRUE;
948 }
949
950 if (disconnected) {
951
952 ahc_set_recoveryscb(ahc, scb);
953 /*
954 * Actually re-queue this SCB in an attempt
955 * to select the device before it reconnects.
956 * In either case (selection or reselection),
957 * we will now issue a target reset to the
958 * timed-out device.
959 *
960 * Set the MK_MESSAGE control bit indicating
961 * that we desire to send a message. We
962 * also set the disconnected flag since
963 * in the paging case there is no guarantee
964 * that our SCB control byte matches the
965 * version on the card. We don't want the
966 * sequencer to abort the command thinking
967 * an unsolicited reselection occurred.
968 */
969 scb->hscb->control |= MK_MESSAGE|DISCONNECTED;
970 scb->flags |= SCB_DEVICE_RESET;
971
972 /*
973 * Remove any cached copy of this SCB in the
974 * disconnected list in preparation for the
975 * queuing of our abort SCB. We use the
976 * same element in the SCB, SCB_NEXT, for
977 * both the qinfifo and the disconnected list.
978 */
979 ahc_search_disc_list(ahc, target, channel,
980 lun, scb->hscb->tag,
981 /*stop_on_first*/TRUE,
982 /*remove*/TRUE,
983 /*save_state*/FALSE);
984
985 /*
986 * In the non-paging case, the sequencer will
987 * never re-reference the in-core SCB.
988 * To make sure we are notified during
989 * reslection, set the MK_MESSAGE flag in
990 * the card's copy of the SCB.
991 */
992 if ((ahc->flags & AHC_PAGESCBS) == 0) {
993 ahc_outb(ahc, SCBPTR, scb->hscb->tag);
994 ahc_outb(ahc, SCB_CONTROL,
995 ahc_inb(ahc, SCB_CONTROL)
996 | MK_MESSAGE);
997 }
998
999 /*
1000 * Clear out any entries in the QINFIFO first
1001 * so we are the next SCB for this target
1002 * to run.
1003 */
1004 ahc_search_qinfifo(ahc,
1005 SCB_GET_TARGET(ahc, scb),
1006 channel, SCB_GET_LUN(scb),
1007 SCB_LIST_NULL,
1008 ROLE_INITIATOR,
1009 CAM_REQUEUE_REQ,
1010 SEARCH_COMPLETE);
1011 ahc_print_path(ahc, scb);
1012 printf("Queuing a BDR SCB\n");
1013 ahc_qinfifo_requeue_tail(ahc, scb);
1014 ahc_outb(ahc, SCBPTR, saved_scbptr);
1015 callout_reset(&scb->xs->xs_callout, 2 * hz,
1016 ahc_timeout, scb);
1017 ahc_unpause(ahc);
1018 } else {
1019 /* Go "immediatly" to the bus reset */
1020 /* This shouldn't happen */
1021 ahc_set_recoveryscb(ahc, scb);
1022 ahc_print_path(ahc, scb);
1023 printf("SCB %d: Immediate reset. "
1024 "Flags = 0x%x\n", scb->hscb->tag,
1025 scb->flags);
1026 goto bus_reset;
1027 }
1028 }
1029 }
1030 ahc_unlock(ahc, &s);
1031 }
1032
1033 void
1034 ahc_platform_set_tags(struct ahc_softc *ahc,
1035 struct ahc_devinfo *devinfo, int enable)
1036 {
1037 struct ahc_tmode_tstate *tstate;
1038
1039 ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid,
1040 devinfo->target, &tstate);
1041
1042 if (enable)
1043 tstate->tagenable |= devinfo->target_mask;
1044 else
1045 tstate->tagenable &= ~devinfo->target_mask;
1046 }
1047
1048 int
1049 ahc_platform_alloc(struct ahc_softc *ahc, void *platform_arg)
1050 {
1051 if (sizeof(struct ahc_platform_data) == 0)
1052 return 0;
1053 ahc->platform_data = malloc(sizeof(struct ahc_platform_data), M_DEVBUF,
1054 M_NOWAIT);
1055 if (ahc->platform_data == NULL)
1056 return (ENOMEM);
1057 return (0);
1058 }
1059
1060 void
1061 ahc_platform_free(struct ahc_softc *ahc)
1062 {
1063 if (sizeof(struct ahc_platform_data) == 0)
1064 return;
1065 free(ahc->platform_data, M_DEVBUF);
1066 }
1067
1068 int
1069 ahc_softc_comp(struct ahc_softc *lahc, struct ahc_softc *rahc)
1070 {
1071 return (0);
1072 }
1073
1074 int
1075 ahc_detach(struct ahc_softc *ahc, int flags)
1076 {
1077 int rv = 0;
1078
1079 ahc_intr_enable(ahc, FALSE);
1080 if (ahc->sc_child != NULL)
1081 rv = config_detach(ahc->sc_child, flags);
1082 if (rv == 0 && ahc->sc_child_b != NULL)
1083 rv = config_detach(ahc->sc_child_b, flags);
1084
1085 pmf_device_deregister(ahc->sc_dev);
1086 ahc_free(ahc);
1087
1088 return (rv);
1089 }
1090
1091
1092 void
1093 ahc_send_async(struct ahc_softc *ahc, char channel, u_int target, u_int lun,
1094 ac_code code, void *opt_arg)
1095 {
1096 struct ahc_tmode_tstate *tstate;
1097 struct ahc_initiator_tinfo *tinfo;
1098 struct ahc_devinfo devinfo;
1099 struct scsipi_channel *chan;
1100 struct scsipi_xfer_mode xm;
1101
1102 chan = channel == 'B' ? &ahc->sc_channel_b : &ahc->sc_channel;
1103 switch (code) {
1104 case AC_TRANSFER_NEG:
1105 tinfo = ahc_fetch_transinfo(ahc, channel, ahc->our_id, target,
1106 &tstate);
1107 ahc_compile_devinfo(&devinfo, ahc->our_id, target, lun,
1108 channel, ROLE_UNKNOWN);
1109 /*
1110 * Don't bother if negotiating. XXX?
1111 */
1112 if (tinfo->curr.period != tinfo->goal.period
1113 || tinfo->curr.width != tinfo->goal.width
1114 || tinfo->curr.offset != tinfo->goal.offset
1115 || tinfo->curr.ppr_options != tinfo->goal.ppr_options)
1116 break;
1117 xm.xm_target = target;
1118 xm.xm_mode = 0;
1119 xm.xm_period = tinfo->curr.period;
1120 xm.xm_offset = tinfo->curr.offset;
1121 if (tinfo->curr.width == MSG_EXT_WDTR_BUS_16_BIT)
1122 xm.xm_mode |= PERIPH_CAP_WIDE16;
1123 if (tinfo->curr.period)
1124 xm.xm_mode |= PERIPH_CAP_SYNC;
1125 if (tstate->tagenable & devinfo.target_mask)
1126 xm.xm_mode |= PERIPH_CAP_TQING;
1127 if (tinfo->curr.ppr_options & MSG_EXT_PPR_DT_REQ)
1128 xm.xm_mode |= PERIPH_CAP_DT;
1129 scsipi_async_event(chan, ASYNC_EVENT_XFER_MODE, &xm);
1130 break;
1131 case AC_BUS_RESET:
1132 scsipi_async_event(chan, ASYNC_EVENT_RESET, NULL);
1133 case AC_SENT_BDR:
1134 default:
1135 break;
1136 }
1137 }
1138