aic79xx_osm.c revision 1.26 1 /* $NetBSD: aic79xx_osm.c,v 1.26 2009/09/05 12:43:56 tsutsui Exp $ */
2
3 /*
4 * Bus independent NetBSD shim for the aic7xxx based adaptec SCSI controllers
5 *
6 * Copyright (c) 1994-2002 Justin T. Gibbs.
7 * Copyright (c) 2001-2002 Adaptec Inc.
8 * All rights reserved.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions, and the following disclaimer,
15 * without modification.
16 * 2. The name of the author may not be used to endorse or promote products
17 * derived from this software without specific prior written permission.
18 *
19 * Alternatively, this software may be distributed under the terms of the
20 * GNU Public License ("GPL").
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
26 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * //depot/aic7xxx/freebsd/dev/aic7xxx/aic79xx_osm.c#26 $
35 *
36 * $FreeBSD: src/sys/dev/aic7xxx/aic79xx_osm.c,v 1.11 2003/05/04 00:20:07 gibbs Exp $
37 */
38 /*
39 * Ported from FreeBSD by Pascal Renauld, Network Storage Solutions, Inc.
40 * - April 2003
41 */
42
43 #include <sys/cdefs.h>
44 __KERNEL_RCSID(0, "$NetBSD: aic79xx_osm.c,v 1.26 2009/09/05 12:43:56 tsutsui Exp $");
45
46 #include <dev/ic/aic79xx_osm.h>
47 #include <dev/ic/aic79xx_inline.h>
48
49 #ifndef AHD_TMODE_ENABLE
50 #define AHD_TMODE_ENABLE 0
51 #endif
52
53 static int ahd_ioctl(struct scsipi_channel *channel, u_long cmd,
54 void *addr, int flag, struct proc *p);
55 static void ahd_action(struct scsipi_channel *chan,
56 scsipi_adapter_req_t req, void *arg);
57 static void ahd_execute_scb(void *arg, bus_dma_segment_t *dm_segs,
58 int nsegments);
59 static int ahd_poll(struct ahd_softc *ahd, int wait);
60 static void ahd_setup_data(struct ahd_softc *ahd, struct scsipi_xfer *xs,
61 struct scb *scb);
62
63 #if NOT_YET
64 static void ahd_set_recoveryscb(struct ahd_softc *ahd, struct scb *scb);
65 #endif
66
67 static bool ahd_pmf_suspend(device_t PMF_FN_PROTO);
68 static bool ahd_pmf_resume(device_t PMF_FN_PROTO);
69 static bool ahd_pmf_shutdown(device_t, int);
70
71 /*
72 * Attach all the sub-devices we can find
73 */
74 int
75 ahd_attach(struct ahd_softc *ahd)
76 {
77 int s;
78 char ahd_info[256];
79
80 ahd_controller_info(ahd, ahd_info, sizeof(ahd_info));
81 printf("%s: %s\n", ahd_name(ahd), ahd_info);
82
83 ahd_lock(ahd, &s);
84
85 ahd->sc_adapter.adapt_dev = &ahd->sc_dev;
86 ahd->sc_adapter.adapt_nchannels = 1;
87
88 ahd->sc_adapter.adapt_openings = ahd->scb_data.numscbs - 1;
89 ahd->sc_adapter.adapt_max_periph = 32;
90
91 ahd->sc_adapter.adapt_ioctl = ahd_ioctl;
92 ahd->sc_adapter.adapt_minphys = ahd_minphys;
93 ahd->sc_adapter.adapt_request = ahd_action;
94
95 ahd->sc_channel.chan_adapter = &ahd->sc_adapter;
96 ahd->sc_channel.chan_bustype = &scsi_bustype;
97 ahd->sc_channel.chan_channel = 0;
98 ahd->sc_channel.chan_ntargets = AHD_NUM_TARGETS;
99 ahd->sc_channel.chan_nluns = 8 /*AHD_NUM_LUNS*/;
100 ahd->sc_channel.chan_id = ahd->our_id;
101 ahd->sc_channel.chan_flags |= SCSIPI_CHAN_CANGROW;
102
103 ahd->sc_child = config_found(&ahd->sc_dev, &ahd->sc_channel, scsiprint);
104
105 ahd_intr_enable(ahd, TRUE);
106
107 if (ahd->flags & AHD_RESET_BUS_A)
108 ahd_reset_channel(ahd, 'A', TRUE);
109
110 if (!pmf_device_register1(&ahd->sc_dev,
111 ahd_pmf_suspend, ahd_pmf_resume, ahd_pmf_shutdown))
112 aprint_error_dev(&ahd->sc_dev,
113 "couldn't establish power handler\n");
114
115 ahd_unlock(ahd, &s);
116
117 return (1);
118 }
119
120 static bool
121 ahd_pmf_suspend(device_t dev PMF_FN_ARGS)
122 {
123 struct ahd_softc *sc = device_private(dev);
124 #if 0
125 return (ahd_suspend(sc) == 0);
126 #else
127 ahd_shutdown(sc);
128 return true;
129 #endif
130 }
131
132 static bool
133 ahd_pmf_resume(device_t dev PMF_FN_ARGS)
134 {
135 #if 0
136 struct ahd_softc *sc = device_private(dev);
137
138 return (ahd_resume(sc) == 0);
139 #else
140 return true;
141 #endif
142 }
143
144 static bool
145 ahd_pmf_shutdown(device_t dev, int howto)
146 {
147 struct ahd_softc *sc = device_private(dev);
148
149 /* Disable all interrupt sources by resetting the controller */
150 ahd_shutdown(sc);
151
152 return true;
153 }
154
155 static int
156 ahd_ioctl(struct scsipi_channel *channel, u_long cmd,
157 void *addr, int flag, struct proc *p)
158 {
159 struct ahd_softc *ahd = (void *)channel->chan_adapter->adapt_dev;
160 int s, ret = ENOTTY;
161
162 switch (cmd) {
163 case SCBUSIORESET:
164 s = splbio();
165 ahd_reset_channel(ahd, channel->chan_channel == 1 ? 'B' : 'A', TRUE);
166 splx(s);
167 ret = 0;
168 break;
169 default:
170 break;
171 }
172
173 return ret;
174 }
175
176 /*
177 * Catch an interrupt from the adapter
178 */
179 void
180 ahd_platform_intr(void *arg)
181 {
182 struct ahd_softc *ahd;
183
184 ahd = arg;
185
186 printf("%s; ahd_platform_intr\n", ahd_name(ahd));
187
188 ahd_intr(ahd);
189 }
190
191 /*
192 * We have an scb which has been processed by the
193 * adaptor, now we look to see how the operation * went.
194 */
195 void
196 ahd_done(struct ahd_softc *ahd, struct scb *scb)
197 {
198 struct scsipi_xfer *xs;
199 struct scsipi_periph *periph;
200 int s;
201
202 LIST_REMOVE(scb, pending_links);
203
204 xs = scb->xs;
205 periph = xs->xs_periph;
206
207 callout_stop(&scb->xs->xs_callout);
208
209 if (xs->datalen) {
210 int op;
211
212 if (xs->xs_control & XS_CTL_DATA_IN)
213 op = BUS_DMASYNC_POSTREAD;
214 else
215 op = BUS_DMASYNC_POSTWRITE;
216
217 bus_dmamap_sync(ahd->parent_dmat, scb->dmamap, 0,
218 scb->dmamap->dm_mapsize, op);
219 bus_dmamap_unload(ahd->parent_dmat, scb->dmamap);
220 }
221
222 /*
223 * If the recovery SCB completes, we have to be
224 * out of our timeout.
225 */
226 if ((scb->flags & SCB_RECOVERY_SCB) != 0) {
227 struct scb *list_scb;
228
229 /*
230 * We were able to complete the command successfully,
231 * so reinstate the timeouts for all other pending
232 * commands.
233 */
234 LIST_FOREACH(list_scb, &ahd->pending_scbs, pending_links) {
235 struct scsipi_xfer *txs = list_scb->xs;
236
237 if (!(txs->xs_control & XS_CTL_POLL)) {
238 callout_reset(&txs->xs_callout,
239 (txs->timeout > 1000000) ?
240 (txs->timeout / 1000) * hz :
241 (txs->timeout * hz) / 1000,
242 ahd_timeout, list_scb);
243 }
244 }
245
246 if (ahd_get_transaction_status(scb) != XS_NOERROR)
247 ahd_set_transaction_status(scb, XS_TIMEOUT);
248 scsipi_printaddr(xs->xs_periph);
249 printf("%s: no longer in timeout, status = %x\n",
250 ahd_name(ahd), xs->status);
251 }
252
253 if (xs->error != XS_NOERROR) {
254 /* Don't clobber any existing error state */
255 } else if ((xs->status == SCSI_STATUS_BUSY) ||
256 (xs->status == SCSI_STATUS_QUEUE_FULL)) {
257 ahd_set_transaction_status(scb, XS_BUSY);
258 printf("%s: drive (ID %d, LUN %d) queue full (SCB 0x%x)\n",
259 ahd_name(ahd), SCB_GET_TARGET(ahd,scb), SCB_GET_LUN(scb), SCB_GET_TAG(scb));
260 } else if ((scb->flags & SCB_SENSE) != 0) {
261 /*
262 * We performed autosense retrieval.
263 *
264 * zero the sense data before having
265 * the drive fill it. The SCSI spec mandates
266 * that any untransferred data should be
267 * assumed to be zero. Complete the 'bounce'
268 * of sense information through buffers accessible
269 * via bus-space by copying it into the clients
270 * csio.
271 */
272 memset(&xs->sense.scsi_sense, 0, sizeof(xs->sense.scsi_sense));
273 memcpy(&xs->sense.scsi_sense, ahd_get_sense_buf(ahd, scb),
274 sizeof(struct scsi_sense_data));
275
276 ahd_set_transaction_status(scb, XS_SENSE);
277 } else if ((scb->flags & SCB_PKT_SENSE) != 0) {
278 struct scsi_status_iu_header *siu;
279 u_int sense_len;
280 #ifdef AHD_DEBUG
281 int i;
282 #endif
283 /*
284 * Copy only the sense data into the provided buffer.
285 */
286 siu = (struct scsi_status_iu_header *)scb->sense_data;
287 sense_len = MIN(scsi_4btoul(siu->sense_length),
288 sizeof(xs->sense.scsi_sense));
289 memset(&xs->sense.scsi_sense, 0, sizeof(xs->sense.scsi_sense));
290 memcpy(&xs->sense.scsi_sense,
291 scb->sense_data + SIU_SENSE_OFFSET(siu), sense_len);
292 #ifdef AHD_DEBUG
293 printf("Copied %d bytes of sense data offset %d:", sense_len,
294 SIU_SENSE_OFFSET(siu));
295 for (i = 0; i < sense_len; i++)
296 printf(" 0x%x", ((uint8_t *)&xs->sense.scsi_sense)[i]);
297 printf("\n");
298 #endif
299 ahd_set_transaction_status(scb, XS_SENSE);
300 }
301
302 if (scb->flags & SCB_FREEZE_QUEUE) {
303 scsipi_periph_thaw(periph, 1);
304 scb->flags &= ~SCB_FREEZE_QUEUE;
305 }
306
307 if (scb->flags & SCB_REQUEUE)
308 ahd_set_transaction_status(scb, XS_REQUEUE);
309
310 ahd_lock(ahd, &s);
311 ahd_free_scb(ahd, scb);
312 ahd_unlock(ahd, &s);
313
314 scsipi_done(xs);
315 }
316
317 static void
318 ahd_action(struct scsipi_channel *chan, scsipi_adapter_req_t req, void *arg)
319 {
320 struct ahd_softc *ahd;
321 struct ahd_initiator_tinfo *tinfo;
322 struct ahd_tmode_tstate *tstate;
323
324 ahd = (void *)chan->chan_adapter->adapt_dev;
325
326 switch(req) {
327
328 case ADAPTER_REQ_RUN_XFER:
329 {
330 struct scsipi_xfer *xs;
331 struct scsipi_periph *periph;
332 struct scb *scb;
333 struct hardware_scb *hscb;
334 u_int target_id;
335 u_int our_id;
336 u_int col_idx;
337 char channel;
338 int s;
339
340 xs = arg;
341 periph = xs->xs_periph;
342
343 SC_DEBUG(periph, SCSIPI_DB3, ("ahd_action\n"));
344
345 target_id = periph->periph_target;
346 our_id = ahd->our_id;
347 channel = (chan->chan_channel == 1) ? 'B' : 'A';
348
349 /*
350 * get an scb to use.
351 */
352 ahd_lock(ahd, &s);
353 tinfo = ahd_fetch_transinfo(ahd, channel, our_id,
354 target_id, &tstate);
355
356 if (xs->xs_tag_type != 0 ||
357 (tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ) != 0)
358 col_idx = AHD_NEVER_COL_IDX;
359 else
360 col_idx = AHD_BUILD_COL_IDX(target_id,
361 periph->periph_lun);
362
363 if ((scb = ahd_get_scb(ahd, col_idx)) == NULL) {
364 xs->error = XS_RESOURCE_SHORTAGE;
365 ahd_unlock(ahd, &s);
366 scsipi_done(xs);
367 return;
368 }
369 ahd_unlock(ahd, &s);
370
371 hscb = scb->hscb;
372
373 SC_DEBUG(periph, SCSIPI_DB3, ("start scb(%p)\n", scb));
374 scb->xs = xs;
375
376 /*
377 * Put all the arguments for the xfer in the scb
378 */
379 hscb->control = 0;
380 hscb->scsiid = BUILD_SCSIID(ahd, sim, target_id, our_id);
381 hscb->lun = periph->periph_lun;
382 if (xs->xs_control & XS_CTL_RESET) {
383 hscb->cdb_len = 0;
384 scb->flags |= SCB_DEVICE_RESET;
385 hscb->control |= MK_MESSAGE;
386 hscb->task_management = SIU_TASKMGMT_LUN_RESET;
387 ahd_execute_scb(scb, NULL, 0);
388 } else {
389 hscb->task_management = 0;
390 }
391
392 ahd_setup_data(ahd, xs, scb);
393 break;
394 }
395
396 case ADAPTER_REQ_GROW_RESOURCES:
397 #ifdef AHC_DEBUG
398 printf("%s: ADAPTER_REQ_GROW_RESOURCES\n", ahd_name(ahd));
399 #endif
400 chan->chan_adapter->adapt_openings += ahd_alloc_scbs(ahd);
401 if (ahd->scb_data.numscbs >= AHD_SCB_MAX_ALLOC)
402 chan->chan_flags &= ~SCSIPI_CHAN_CANGROW;
403 break;
404
405 case ADAPTER_REQ_SET_XFER_MODE:
406 {
407 struct scsipi_xfer_mode *xm = arg;
408 struct ahd_devinfo devinfo;
409 int target_id, our_id, first;
410 u_int width;
411 int s;
412 char channel;
413 u_int ppr_options = 0, period, offset;
414 uint16_t old_autoneg;
415
416 target_id = xm->xm_target;
417 our_id = chan->chan_id;
418 channel = 'A';
419 s = splbio();
420 tinfo = ahd_fetch_transinfo(ahd, channel, our_id, target_id,
421 &tstate);
422 ahd_compile_devinfo(&devinfo, our_id, target_id,
423 0, channel, ROLE_INITIATOR);
424
425 old_autoneg = tstate->auto_negotiate;
426
427 /*
428 * XXX since the period and offset are not provided here,
429 * fake things by forcing a renegotiation using the user
430 * settings if this is called for the first time (i.e.
431 * during probe). Also, cap various values at the user
432 * values, assuming that the user set it up that way.
433 */
434 if (ahd->inited_target[target_id] == 0) {
435 period = tinfo->user.period;
436 offset = tinfo->user.offset;
437 ppr_options = tinfo->user.ppr_options;
438 width = tinfo->user.width;
439 tstate->tagenable |=
440 (ahd->user_tagenable & devinfo.target_mask);
441 tstate->discenable |=
442 (ahd->user_discenable & devinfo.target_mask);
443 ahd->inited_target[target_id] = 1;
444 first = 1;
445 } else
446 first = 0;
447
448 if (xm->xm_mode & (PERIPH_CAP_WIDE16 | PERIPH_CAP_DT))
449 width = MSG_EXT_WDTR_BUS_16_BIT;
450 else
451 width = MSG_EXT_WDTR_BUS_8_BIT;
452
453 ahd_validate_width(ahd, NULL, &width, ROLE_UNKNOWN);
454 if (width > tinfo->user.width)
455 width = tinfo->user.width;
456 ahd_set_width(ahd, &devinfo, width, AHD_TRANS_GOAL, FALSE);
457
458 if (!(xm->xm_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_DT))) {
459 period = 0;
460 offset = 0;
461 ppr_options = 0;
462 }
463
464 if ((xm->xm_mode & PERIPH_CAP_DT) &&
465 (tinfo->user.ppr_options & MSG_EXT_PPR_DT_REQ))
466 ppr_options |= MSG_EXT_PPR_DT_REQ;
467 else
468 ppr_options &= ~MSG_EXT_PPR_DT_REQ;
469
470 if ((tstate->discenable & devinfo.target_mask) == 0 ||
471 (tstate->tagenable & devinfo.target_mask) == 0)
472 ppr_options &= ~MSG_EXT_PPR_IU_REQ;
473
474 if ((xm->xm_mode & PERIPH_CAP_TQING) &&
475 (ahd->user_tagenable & devinfo.target_mask))
476 tstate->tagenable |= devinfo.target_mask;
477 else
478 tstate->tagenable &= ~devinfo.target_mask;
479
480 ahd_find_syncrate(ahd, &period, &ppr_options, AHD_SYNCRATE_MAX);
481 ahd_validate_offset(ahd, NULL, period, &offset,
482 MSG_EXT_WDTR_BUS_8_BIT, ROLE_UNKNOWN);
483 if (offset == 0) {
484 period = 0;
485 ppr_options = 0;
486 }
487 if (ppr_options != 0
488 && tinfo->user.transport_version >= 3) {
489 tinfo->goal.transport_version =
490 tinfo->user.transport_version;
491 tinfo->curr.transport_version =
492 tinfo->user.transport_version;
493 }
494
495 ahd_set_syncrate(ahd, &devinfo, period, offset,
496 ppr_options, AHD_TRANS_GOAL, FALSE);
497
498 /*
499 * If this is the first request, and no negotiation is
500 * needed, just confirm the state to the scsipi layer,
501 * so that it can print a message.
502 */
503 if (old_autoneg == tstate->auto_negotiate && first) {
504 xm->xm_mode = 0;
505 xm->xm_period = tinfo->curr.period;
506 xm->xm_offset = tinfo->curr.offset;
507 if (tinfo->curr.width == MSG_EXT_WDTR_BUS_16_BIT)
508 xm->xm_mode |= PERIPH_CAP_WIDE16;
509 if (tinfo->curr.period)
510 xm->xm_mode |= PERIPH_CAP_SYNC;
511 if (tstate->tagenable & devinfo.target_mask)
512 xm->xm_mode |= PERIPH_CAP_TQING;
513 if (tinfo->curr.ppr_options & MSG_EXT_PPR_DT_REQ)
514 xm->xm_mode |= PERIPH_CAP_DT;
515 scsipi_async_event(chan, ASYNC_EVENT_XFER_MODE, xm);
516 }
517 splx(s);
518 }
519 }
520
521 return;
522 }
523
524 static void
525 ahd_execute_scb(void *arg, bus_dma_segment_t *dm_segs, int nsegments)
526 {
527 struct scb *scb;
528 struct scsipi_xfer *xs;
529 struct ahd_softc *ahd;
530 struct ahd_initiator_tinfo *tinfo;
531 struct ahd_tmode_tstate *tstate;
532 u_int mask;
533 int s;
534
535 scb = arg;
536 xs = scb->xs;
537 xs->error = 0;
538 xs->status = 0;
539 xs->xs_status = 0;
540 ahd = (void*)xs->xs_periph->periph_channel->chan_adapter->adapt_dev;
541
542 scb->sg_count = 0;
543 if (nsegments != 0) {
544 void *sg;
545 int op;
546 u_int i;
547
548 ahd_setup_data_scb(ahd, scb);
549
550 /* Copy the segments into our SG list */
551 for (i = nsegments, sg = scb->sg_list; i > 0; i--) {
552
553 sg = ahd_sg_setup(ahd, scb, sg, dm_segs->ds_addr,
554 dm_segs->ds_len,
555 /*last*/i == 1);
556 dm_segs++;
557 }
558
559 if (xs->xs_control & XS_CTL_DATA_IN)
560 op = BUS_DMASYNC_PREREAD;
561 else
562 op = BUS_DMASYNC_PREWRITE;
563
564 bus_dmamap_sync(ahd->parent_dmat, scb->dmamap, 0,
565 scb->dmamap->dm_mapsize, op);
566 }
567
568 ahd_lock(ahd, &s);
569
570 /*
571 * Last time we need to check if this SCB needs to
572 * be aborted.
573 */
574 if (ahd_get_scsi_status(scb) == XS_STS_DONE) {
575 if (nsegments != 0)
576 bus_dmamap_unload(ahd->parent_dmat,
577 scb->dmamap);
578 ahd_free_scb(ahd, scb);
579 ahd_unlock(ahd, &s);
580 return;
581 }
582
583 tinfo = ahd_fetch_transinfo(ahd, SCSIID_CHANNEL(ahd, scb->hscb->scsiid),
584 SCSIID_OUR_ID(scb->hscb->scsiid),
585 SCSIID_TARGET(ahd, scb->hscb->scsiid),
586 &tstate);
587
588 mask = SCB_GET_TARGET_MASK(ahd, scb);
589
590 if ((tstate->discenable & mask) != 0)
591 scb->hscb->control |= DISCENB;
592
593 if ((tstate->tagenable & mask) != 0)
594 scb->hscb->control |= xs->xs_tag_type|TAG_ENB;
595
596 if ((tinfo->curr.ppr_options & MSG_EXT_PPR_IU) != 0) {
597 scb->flags |= SCB_PACKETIZED;
598 if (scb->hscb->task_management != 0)
599 scb->hscb->control &= ~MK_MESSAGE;
600 }
601
602 #if 0 /* This looks like it makes sense at first, but it can loop */
603 if ((xs->xs_control & XS_CTL_DISCOVERY) &&
604 (tinfo->goal.width != 0
605 || tinfo->goal.period != 0
606 || tinfo->goal.ppr_options != 0)) {
607 scb->flags |= SCB_NEGOTIATE;
608 scb->hscb->control |= MK_MESSAGE;
609 } else
610 #endif
611 if ((tstate->auto_negotiate & mask) != 0) {
612 scb->flags |= SCB_AUTO_NEGOTIATE;
613 scb->hscb->control |= MK_MESSAGE;
614 }
615
616 LIST_INSERT_HEAD(&ahd->pending_scbs, scb, pending_links);
617
618 scb->flags |= SCB_ACTIVE;
619
620 if (!(xs->xs_control & XS_CTL_POLL)) {
621 callout_reset(&scb->xs->xs_callout, xs->timeout > 1000000 ?
622 (xs->timeout / 1000) * hz : (xs->timeout * hz) / 1000,
623 ahd_timeout, scb);
624 }
625
626 if ((scb->flags & SCB_TARGET_IMMEDIATE) != 0) {
627 /* Define a mapping from our tag to the SCB. */
628 ahd->scb_data.scbindex[SCB_GET_TAG(scb)] = scb;
629 ahd_pause(ahd);
630 ahd_set_scbptr(ahd, SCB_GET_TAG(scb));
631 ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_TARG);
632 ahd_unpause(ahd);
633 } else {
634 ahd_queue_scb(ahd, scb);
635 }
636
637 if (!(xs->xs_control & XS_CTL_POLL)) {
638 ahd_unlock(ahd, &s);
639 return;
640 }
641 /*
642 * If we can't use interrupts, poll for completion
643 */
644 SC_DEBUG(xs->xs_periph, SCSIPI_DB3, ("cmd_poll\n"));
645 do {
646 if (ahd_poll(ahd, xs->timeout)) {
647 if (!(xs->xs_control & XS_CTL_SILENT))
648 printf("cmd fail\n");
649 ahd_timeout(scb);
650 break;
651 }
652 } while (!(xs->xs_status & XS_STS_DONE));
653
654 ahd_unlock(ahd, &s);
655 }
656
657 static int
658 ahd_poll(struct ahd_softc *ahd, int wait)
659 {
660
661 while (--wait) {
662 DELAY(1000);
663 if (ahd_inb(ahd, INTSTAT) & INT_PEND)
664 break;
665 }
666
667 if (wait == 0) {
668 printf("%s: board is not responding\n", ahd_name(ahd));
669 return (EIO);
670 }
671
672 ahd_intr(ahd);
673 return (0);
674 }
675
676
677 static void
678 ahd_setup_data(struct ahd_softc *ahd, struct scsipi_xfer *xs,
679 struct scb *scb)
680 {
681 struct hardware_scb *hscb;
682
683 hscb = scb->hscb;
684 xs->resid = xs->status = 0;
685
686 hscb->cdb_len = xs->cmdlen;
687 if (hscb->cdb_len > MAX_CDB_LEN) {
688 int s;
689 /*
690 * Should CAM start to support CDB sizes
691 * greater than 16 bytes, we could use
692 * the sense buffer to store the CDB.
693 */
694 ahd_set_transaction_status(scb,
695 XS_DRIVER_STUFFUP);
696
697 ahd_lock(ahd, &s);
698 ahd_free_scb(ahd, scb);
699 ahd_unlock(ahd, &s);
700 scsipi_done(xs);
701 }
702 memcpy(hscb->shared_data.idata.cdb, xs->cmd, hscb->cdb_len);
703
704 /* Only use S/G if there is a transfer */
705 if (xs->datalen) {
706 int error;
707
708 error = bus_dmamap_load(ahd->parent_dmat,
709 scb->dmamap, xs->data,
710 xs->datalen, NULL,
711 ((xs->xs_control & XS_CTL_NOSLEEP) ?
712 BUS_DMA_NOWAIT : BUS_DMA_WAITOK) |
713 BUS_DMA_STREAMING |
714 ((xs->xs_control & XS_CTL_DATA_IN) ?
715 BUS_DMA_READ : BUS_DMA_WRITE));
716 if (error) {
717 #ifdef AHD_DEBUG
718 printf("%s: in ahd_setup_data(): bus_dmamap_load() "
719 "= %d\n",
720 ahd_name(ahd), error);
721 #endif
722 xs->error = XS_RESOURCE_SHORTAGE;
723 scsipi_done(xs);
724 return;
725 }
726 ahd_execute_scb(scb,
727 scb->dmamap->dm_segs,
728 scb->dmamap->dm_nsegs);
729 } else {
730 ahd_execute_scb(scb, NULL, 0);
731 }
732 }
733
734 void
735 ahd_timeout(void *arg)
736 {
737 struct scb *scb;
738 struct ahd_softc *ahd;
739 ahd_mode_state saved_modes;
740 int s;
741
742 scb = arg;
743 ahd = scb->ahd_softc;
744
745 printf("%s: ahd_timeout\n", ahd_name(ahd));
746
747 ahd_lock(ahd, &s);
748
749 ahd_pause_and_flushwork(ahd);
750 saved_modes = ahd_save_modes(ahd);
751 #if 0
752 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
753 ahd_outb(ahd, SCSISIGO, ACKO);
754 printf("set ACK\n");
755 ahd_outb(ahd, SCSISIGO, 0);
756 printf("clearing Ack\n");
757 ahd_restore_modes(ahd, saved_modes);
758 #endif
759 if ((scb->flags & SCB_ACTIVE) == 0) {
760 /* Previous timeout took care of me already */
761 printf("%s: Timedout SCB already complete. "
762 "Interrupts may not be functioning.\n", ahd_name(ahd));
763 ahd_unpause(ahd);
764 ahd_unlock(ahd, &s);
765 return;
766 }
767
768 ahd_print_path(ahd, scb);
769 printf("SCB 0x%x - timed out\n", SCB_GET_TAG(scb));
770 ahd_dump_card_state(ahd);
771 ahd_reset_channel(ahd, SIM_CHANNEL(ahd, sim),
772 /*initiate reset*/TRUE);
773 ahd_unlock(ahd, &s);
774 return;
775 }
776
777 int
778 ahd_platform_alloc(struct ahd_softc *ahd, void *platform_arg)
779 {
780 ahd->platform_data = malloc(sizeof(struct ahd_platform_data), M_DEVBUF,
781 M_NOWAIT /*| M_ZERO*/);
782 if (ahd->platform_data == NULL)
783 return (ENOMEM);
784
785 memset(ahd->platform_data, 0, sizeof(struct ahd_platform_data));
786
787 return (0);
788 }
789
790 void
791 ahd_platform_free(struct ahd_softc *ahd)
792 {
793 free(ahd->platform_data, M_DEVBUF);
794 }
795
796 int
797 ahd_softc_comp(struct ahd_softc *lahd, struct ahd_softc *rahd)
798 {
799 /* We don't sort softcs under NetBSD so report equal always */
800 return (0);
801 }
802
803 int
804 ahd_detach(device_t self, int flags)
805 {
806 int rv = 0;
807
808 struct ahd_softc *ahd = (struct ahd_softc*)self;
809
810 if (ahd->sc_child != NULL)
811 rv = config_detach(ahd->sc_child, flags);
812
813 pmf_device_deregister(&ahd->sc_dev);
814
815 ahd_free(ahd);
816
817 return rv;
818 }
819
820 void
821 ahd_platform_set_tags(struct ahd_softc *ahd,
822 struct ahd_devinfo *devinfo, ahd_queue_alg alg)
823 {
824 struct ahd_tmode_tstate *tstate;
825
826 ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid,
827 devinfo->target, &tstate);
828
829 if (alg != AHD_QUEUE_NONE)
830 tstate->tagenable |= devinfo->target_mask;
831 else
832 tstate->tagenable &= ~devinfo->target_mask;
833 }
834
835 void
836 ahd_send_async(struct ahd_softc *ahd, char channel, u_int target, u_int lun,
837 ac_code code, void *opt_arg)
838 {
839 struct ahd_tmode_tstate *tstate;
840 struct ahd_initiator_tinfo *tinfo;
841 struct ahd_devinfo devinfo;
842 struct scsipi_channel *chan;
843 struct scsipi_xfer_mode xm;
844
845 #ifdef DIAGNOSTIC
846 if (channel != 'A')
847 panic("ahd_send_async: not channel A");
848 #endif
849 chan = &ahd->sc_channel;
850 switch (code) {
851 case AC_TRANSFER_NEG:
852 tinfo = ahd_fetch_transinfo(ahd, channel, ahd->our_id, target,
853 &tstate);
854 ahd_compile_devinfo(&devinfo, ahd->our_id, target, lun,
855 channel, ROLE_UNKNOWN);
856 /*
857 * Don't bother if negotiating. XXX?
858 */
859 if (tinfo->curr.period != tinfo->goal.period
860 || tinfo->curr.width != tinfo->goal.width
861 || tinfo->curr.offset != tinfo->goal.offset
862 || tinfo->curr.ppr_options != tinfo->goal.ppr_options)
863 break;
864 xm.xm_target = target;
865 xm.xm_mode = 0;
866 xm.xm_period = tinfo->curr.period;
867 xm.xm_offset = tinfo->curr.offset;
868 if (tinfo->goal.ppr_options & MSG_EXT_PPR_DT_REQ)
869 xm.xm_mode |= PERIPH_CAP_DT;
870 if (tinfo->curr.width == MSG_EXT_WDTR_BUS_16_BIT)
871 xm.xm_mode |= PERIPH_CAP_WIDE16;
872 if (tinfo->curr.period)
873 xm.xm_mode |= PERIPH_CAP_SYNC;
874 if (tstate->tagenable & devinfo.target_mask)
875 xm.xm_mode |= PERIPH_CAP_TQING;
876 scsipi_async_event(chan, ASYNC_EVENT_XFER_MODE, &xm);
877 break;
878 case AC_BUS_RESET:
879 scsipi_async_event(chan, ASYNC_EVENT_RESET, NULL);
880 case AC_SENT_BDR:
881 default:
882 break;
883 }
884 }
885