aic79xx_osm.c revision 1.6 1 /* $NetBSD: aic79xx_osm.c,v 1.6 2003/09/02 21:02:57 fvdl Exp $ */
2
3 /*
4 * Bus independent NetBSD shim for the aic7xxx based adaptec SCSI controllers
5 *
6 * Copyright (c) 1994-2002 Justin T. Gibbs.
7 * Copyright (c) 2001-2002 Adaptec Inc.
8 * All rights reserved.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions, and the following disclaimer,
15 * without modification.
16 * 2. The name of the author may not be used to endorse or promote products
17 * derived from this software without specific prior written permission.
18 *
19 * Alternatively, this software may be distributed under the terms of the
20 * GNU Public License ("GPL").
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
26 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * //depot/aic7xxx/freebsd/dev/aic7xxx/aic79xx_osm.c#26 $
35 *
36 * $FreeBSD: src/sys/dev/aic7xxx/aic79xx_osm.c,v 1.11 2003/05/04 00:20:07 gibbs Exp $
37 */
38 /*
39 * Ported from FreeBSD by Pascal Renauld, Network Storage Solutions, Inc.
40 * - April 2003
41 */
42
43 #include <sys/cdefs.h>
44 __KERNEL_RCSID(0, "$NetBSD: aic79xx_osm.c,v 1.6 2003/09/02 21:02:57 fvdl Exp $");
45
46 #include <dev/ic/aic79xx_osm.h>
47 #include <dev/ic/aic7xxx_cam.h>
48 #include <dev/ic/aic79xx_inline.h>
49
50 #ifndef AHD_TMODE_ENABLE
51 #define AHD_TMODE_ENABLE 0
52 #endif
53
54 static int ahd_ioctl(struct scsipi_channel *channel, u_long cmd,
55 caddr_t addr, int flag, struct proc *p);
56 static void ahd_action(struct scsipi_channel *chan,
57 scsipi_adapter_req_t req, void *arg);
58 static void ahd_execute_scb(void *arg, bus_dma_segment_t *dm_segs,
59 int nsegments);
60 static int ahd_poll(struct ahd_softc *ahd, int wait);
61 static void ahd_setup_data(struct ahd_softc *ahd, struct scsipi_xfer *xs,
62 struct scb *scb);
63
64 #if NOT_YET
65 static void ahd_set_recoveryscb(struct ahd_softc *ahd, struct scb *scb);
66 #endif
67
68 /*
69 * Attach all the sub-devices we can find
70 */
71 int
72 ahd_attach(struct ahd_softc *ahd)
73 {
74 int s;
75 char ahd_info[256];
76
77 ahd_controller_info(ahd, ahd_info);
78 printf("%s: %s\n", ahd->sc_dev.dv_xname, ahd_info);
79
80 ahd_lock(ahd, &s);
81
82 ahd->sc_adapter.adapt_dev = &ahd->sc_dev;
83 ahd->sc_adapter.adapt_nchannels = 1;
84
85 ahd->sc_adapter.adapt_openings = AHD_MAX_QUEUE;
86 ahd->sc_adapter.adapt_max_periph = 32;
87
88 ahd->sc_adapter.adapt_ioctl = ahd_ioctl;
89 ahd->sc_adapter.adapt_minphys = ahd_minphys;
90 ahd->sc_adapter.adapt_request = ahd_action;
91
92 ahd->sc_channel.chan_adapter = &ahd->sc_adapter;
93 ahd->sc_channel.chan_bustype = &scsi_bustype;
94 ahd->sc_channel.chan_channel = 0;
95 ahd->sc_channel.chan_ntargets = AHD_NUM_TARGETS;
96 ahd->sc_channel.chan_nluns = 8 /*AHD_NUM_LUNS*/;
97 ahd->sc_channel.chan_id = ahd->our_id;
98
99 ahd->sc_child = config_found((void *)ahd, &ahd->sc_channel, scsiprint);
100
101 ahd_intr_enable(ahd, TRUE);
102
103 if (ahd->flags & AHD_RESET_BUS_A)
104 ahd_reset_channel(ahd, 'A', TRUE);
105
106 ahd_unlock(ahd, &s);
107
108 return (1);
109 }
110
111 static int
112 ahd_ioctl(struct scsipi_channel *channel, u_long cmd,
113 caddr_t addr, int flag, struct proc *p)
114 {
115 struct ahd_softc *ahd = (void *)channel->chan_adapter->adapt_dev;
116 int s, ret = ENOTTY;
117
118 switch (cmd) {
119 case SCBUSIORESET:
120 s = splbio();
121 ahd_reset_channel(ahd, channel->chan_channel == 1 ? 'B' : 'A', TRUE);
122 splx(s);
123 ret = 0;
124 break;
125 default:
126 break;
127 }
128
129 return ret;
130 }
131
132 /*
133 * Catch an interrupt from the adapter
134 */
135 void
136 ahd_platform_intr(void *arg)
137 {
138 struct ahd_softc *ahd;
139
140 ahd = (struct ahd_softc *)arg;
141
142 printf("%s; ahd_platform_intr\n", ahd_name(ahd));
143
144 ahd_intr(ahd);
145 }
146
147 /*
148 * We have an scb which has been processed by the
149 * adaptor, now we look to see how the operation * went.
150 */
151 void
152 ahd_done(struct ahd_softc *ahd, struct scb *scb)
153 {
154 struct scsipi_xfer *xs;
155 struct scsipi_periph *periph;
156 int target;
157 int s;
158
159 LIST_REMOVE(scb, pending_links);
160
161 xs = scb->xs;
162 periph = xs->xs_periph;
163
164 callout_stop(&scb->xs->xs_callout);
165
166 target = periph->periph_target;
167
168 if (xs->datalen) {
169 int op;
170
171 if (xs->xs_control & XS_CTL_DATA_IN)
172 op = BUS_DMASYNC_POSTREAD;
173 else
174 op = BUS_DMASYNC_POSTWRITE;
175
176 bus_dmamap_sync(ahd->parent_dmat, scb->dmamap, 0,
177 scb->dmamap->dm_mapsize, op);
178 bus_dmamap_unload(ahd->parent_dmat, scb->dmamap);
179 }
180
181 /*
182 * If the recovery SCB completes, we have to be
183 * out of our timeout.
184 */
185 if ((scb->flags & SCB_RECOVERY_SCB) != 0) {
186 struct scb *list_scb;
187
188 /*
189 * We were able to complete the command successfully,
190 * so reinstate the timeouts for all other pending
191 * commands.
192 */
193 LIST_FOREACH(list_scb, &ahd->pending_scbs, pending_links) {
194 struct scsipi_xfer *txs = list_scb->xs;
195
196 if (!(txs->xs_control & XS_CTL_POLL)) {
197 callout_reset(&txs->xs_callout,
198 (txs->timeout > 1000000) ?
199 (txs->timeout / 1000) * hz :
200 (txs->timeout * hz) / 1000,
201 ahd_timeout, list_scb);
202 }
203 }
204
205 if (ahd_get_transaction_status(scb) != XS_NOERROR)
206 ahd_set_transaction_status(scb, XS_TIMEOUT);
207 scsipi_printaddr(xs->xs_periph);
208 printf("%s: no longer in timeout, status = %x\n",
209 ahd_name(ahd), xs->status);
210 }
211
212 if (xs->error != XS_NOERROR) {
213 /* Don't clobber any existing error state */
214 } else if ((xs->status == SCSI_STATUS_BUSY) ||
215 (xs->status == SCSI_STATUS_QUEUE_FULL)) {
216 ahd_set_transaction_status(scb, XS_BUSY);
217 printf("%s: drive (ID %d, LUN %d) queue full (SCB 0x%x)\n",
218 ahd_name(ahd), SCB_GET_TARGET(ahd,scb), SCB_GET_LUN(scb), SCB_GET_TAG(scb));
219 } else if ((scb->flags & SCB_SENSE) != 0) {
220 /*
221 * We performed autosense retrieval.
222 *
223 * zero the sense data before having
224 * the drive fill it. The SCSI spec mandates
225 * that any untransferred data should be
226 * assumed to be zero. Complete the 'bounce'
227 * of sense information through buffers accessible
228 * via bus-space by copying it into the clients
229 * csio.
230 */
231 memset(&xs->sense.scsi_sense, 0, sizeof(xs->sense.scsi_sense));
232 memcpy(&xs->sense.scsi_sense, ahd_get_sense_buf(ahd, scb),
233 sizeof(struct scsipi_sense_data));
234
235 ahd_set_transaction_status(scb, XS_SENSE);
236 } else if ((scb->flags & SCB_PKT_SENSE) != 0) {
237 struct scsi_status_iu_header *siu;
238 u_int sense_len;
239 int i;
240
241 /*
242 * Copy only the sense data into the provided buffer.
243 */
244 siu = (struct scsi_status_iu_header *)scb->sense_data;
245 sense_len = MIN(scsi_4btoul(siu->sense_length),
246 sizeof(&xs->sense.scsi_sense));
247 memset(&xs->sense.scsi_sense, 0, sizeof(xs->sense.scsi_sense));
248 memcpy(&xs->sense.scsi_sense,
249 scb->sense_data + SIU_SENSE_OFFSET(siu), sense_len);
250 printf("Copied %d bytes of sense data offset %d:", sense_len,
251 SIU_SENSE_OFFSET(siu));
252 for (i = 0; i < sense_len; i++)
253 printf(" 0x%x", ((uint8_t *)&xs->sense.scsi_sense)[i]);
254 printf("\n");
255
256 ahd_set_transaction_status(scb, XS_SENSE);
257 }
258
259 if (scb->flags & SCB_FREEZE_QUEUE) {
260 scsipi_periph_thaw(periph, 1);
261 scb->flags &= ~SCB_FREEZE_QUEUE;
262 }
263
264 if (scb->flags & SCB_REQUEUE)
265 ahd_set_transaction_status(scb, XS_REQUEUE);
266
267 ahd_lock(ahd, &s);
268 ahd_free_scb(ahd, scb);
269 ahd_unlock(ahd, &s);
270
271 scsipi_done(xs);
272 }
273
274 static void
275 ahd_action(struct scsipi_channel *chan, scsipi_adapter_req_t req, void *arg)
276 {
277 struct ahd_softc *ahd;
278 struct ahd_initiator_tinfo *tinfo;
279 struct ahd_tmode_tstate *tstate;
280
281 ahd = (void *)chan->chan_adapter->adapt_dev;
282
283 switch(req) {
284
285 case ADAPTER_REQ_RUN_XFER:
286 {
287 struct scsipi_xfer *xs;
288 struct scsipi_periph *periph;
289 struct scb *scb;
290 struct hardware_scb *hscb;
291 u_int target_id;
292 u_int our_id;
293 u_int col_idx;
294 char channel;
295 int s;
296
297 xs = arg;
298 periph = xs->xs_periph;
299
300 SC_DEBUG(periph, SCSIPI_DB3, ("ahd_action\n"));
301
302 target_id = periph->periph_target;
303 our_id = ahd->our_id;
304 channel = (chan->chan_channel == 1) ? 'B' : 'A';
305
306 /*
307 * get an scb to use.
308 */
309 ahd_lock(ahd, &s);
310 tinfo = ahd_fetch_transinfo(ahd, channel, our_id,
311 target_id, &tstate);
312
313 if (xs->xs_tag_type != 0 ||
314 (tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ) != 0)
315 col_idx = AHD_NEVER_COL_IDX;
316 else
317 col_idx = AHD_BUILD_COL_IDX(target_id,
318 periph->periph_lun);
319
320 if ((scb = ahd_get_scb(ahd, col_idx)) == NULL) {
321 xs->error = XS_RESOURCE_SHORTAGE;
322 ahd_unlock(ahd, &s);
323 scsipi_done(xs);
324 return;
325 }
326 ahd_unlock(ahd, &s);
327
328 hscb = scb->hscb;
329
330 SC_DEBUG(periph, SCSIPI_DB3, ("start scb(%p)\n", scb));
331 scb->xs = xs;
332
333 /*
334 * Put all the arguments for the xfer in the scb
335 */
336 hscb->control = 0;
337 hscb->scsiid = BUILD_SCSIID(ahd, sim, target_id, our_id);
338 hscb->lun = periph->periph_lun;
339 if (xs->xs_control & XS_CTL_RESET) {
340 hscb->cdb_len = 0;
341 scb->flags |= SCB_DEVICE_RESET;
342 hscb->control |= MK_MESSAGE;
343 hscb->task_management = SIU_TASKMGMT_LUN_RESET;
344 ahd_execute_scb(scb, NULL, 0);
345 } else {
346 hscb->task_management = 0;
347 }
348
349 ahd_setup_data(ahd, xs, scb);
350 break;
351 }
352
353 case ADAPTER_REQ_GROW_RESOURCES:
354 printf("%s: ADAPTER_REQ_GROW_RESOURCES\n", ahd_name(ahd));
355 break;
356
357 case ADAPTER_REQ_SET_XFER_MODE:
358 {
359 struct scsipi_xfer_mode *xm = arg;
360 struct ahd_devinfo devinfo;
361 int target_id, our_id, first;
362 u_int width;
363 int s;
364 char channel;
365 u_int ppr_options, period, offset;
366 uint16_t old_autoneg;
367
368 target_id = xm->xm_target;
369 our_id = chan->chan_id;
370 channel = 'A';
371 s = splbio();
372 tinfo = ahd_fetch_transinfo(ahd, channel, our_id, target_id,
373 &tstate);
374 ahd_compile_devinfo(&devinfo, our_id, target_id,
375 0, channel, ROLE_INITIATOR);
376
377 old_autoneg = tstate->auto_negotiate;
378
379 /*
380 * XXX since the period and offset are not provided here,
381 * fake things by forcing a renegotiation using the user
382 * settings if this is called for the first time (i.e.
383 * during probe). Also, cap various values at the user
384 * values, assuming that the user set it up that way.
385 */
386 if (ahd->inited_target[target_id] == 0) {
387 period = tinfo->user.period;
388 offset = tinfo->user.offset;
389 ppr_options = tinfo->user.ppr_options;
390 width = tinfo->user.width;
391 tstate->tagenable |=
392 (ahd->user_tagenable & devinfo.target_mask);
393 tstate->discenable |=
394 (ahd->user_discenable & devinfo.target_mask);
395 ahd->inited_target[target_id] = 1;
396 first = 1;
397 } else
398 first = 0;
399
400 if (xm->xm_mode & (PERIPH_CAP_WIDE16 | PERIPH_CAP_DT))
401 width = MSG_EXT_WDTR_BUS_16_BIT;
402 else
403 width = MSG_EXT_WDTR_BUS_8_BIT;
404
405 ahd_validate_width(ahd, NULL, &width, ROLE_UNKNOWN);
406 if (width > tinfo->user.width)
407 width = tinfo->user.width;
408 ahd_set_width(ahd, &devinfo, width, AHD_TRANS_GOAL, FALSE);
409
410 if (!(xm->xm_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_DT))) {
411 period = 0;
412 offset = 0;
413 ppr_options = 0;
414 }
415
416 if ((xm->xm_mode & PERIPH_CAP_DT) &&
417 (tinfo->user.ppr_options & MSG_EXT_PPR_DT_REQ))
418 ppr_options |= MSG_EXT_PPR_DT_REQ;
419 else
420 ppr_options &= ~MSG_EXT_PPR_DT_REQ;
421
422 if ((tstate->discenable & devinfo.target_mask) == 0 ||
423 (tstate->tagenable & devinfo.target_mask) == 0)
424 ppr_options &= ~MSG_EXT_PPR_IU_REQ;
425
426 if ((xm->xm_mode & PERIPH_CAP_TQING) &&
427 (ahd->user_tagenable & devinfo.target_mask))
428 tstate->tagenable |= devinfo.target_mask;
429 else
430 tstate->tagenable &= ~devinfo.target_mask;
431
432 ahd_find_syncrate(ahd, &period, &ppr_options, AHD_SYNCRATE_MAX);
433 ahd_validate_offset(ahd, NULL, period, &offset,
434 MSG_EXT_WDTR_BUS_8_BIT, ROLE_UNKNOWN);
435 if (offset == 0) {
436 period = 0;
437 ppr_options = 0;
438 }
439 if (ppr_options != 0
440 && tinfo->user.transport_version >= 3) {
441 tinfo->goal.transport_version =
442 tinfo->user.transport_version;
443 tinfo->curr.transport_version =
444 tinfo->user.transport_version;
445 }
446
447 ahd_set_syncrate(ahd, &devinfo, period, offset,
448 ppr_options, AHD_TRANS_GOAL, FALSE);
449
450 /*
451 * If this is the first request, and no negotiation is
452 * needed, just confirm the state to the scsipi layer,
453 * so that it can print a message.
454 */
455 if (old_autoneg == tstate->auto_negotiate && first) {
456 xm->xm_mode = 0;
457 xm->xm_period = tinfo->curr.period;
458 xm->xm_offset = tinfo->curr.offset;
459 if (tinfo->curr.width == MSG_EXT_WDTR_BUS_16_BIT)
460 xm->xm_mode |= PERIPH_CAP_WIDE16;
461 if (tinfo->curr.period)
462 xm->xm_mode |= PERIPH_CAP_SYNC;
463 if (tstate->tagenable & devinfo.target_mask)
464 xm->xm_mode |= PERIPH_CAP_TQING;
465 if (tinfo->curr.ppr_options & MSG_EXT_PPR_DT_REQ)
466 xm->xm_mode |= PERIPH_CAP_DT;
467 scsipi_async_event(chan, ASYNC_EVENT_XFER_MODE, xm);
468 }
469 splx(s);
470 }
471 }
472
473 return;
474 }
475
476 static void
477 ahd_execute_scb(void *arg, bus_dma_segment_t *dm_segs, int nsegments)
478 {
479 struct scb *scb;
480 struct scsipi_xfer *xs;
481 struct ahd_softc *ahd;
482 struct ahd_initiator_tinfo *tinfo;
483 struct ahd_tmode_tstate *tstate;
484 u_int mask;
485 int s;
486
487 scb = (struct scb*)arg;
488 xs = scb->xs;
489 xs->error = 0;
490 xs->status = 0;
491 xs->xs_status = 0;
492 ahd = (void*)xs->xs_periph->periph_channel->chan_adapter->adapt_dev;
493
494 scb->sg_count = 0;
495 if (nsegments != 0) {
496 void *sg;
497 int op;
498 u_int i;
499
500 ahd_setup_data_scb(ahd, scb);
501
502 /* Copy the segments into our SG list */
503 for (i = nsegments, sg = scb->sg_list; i > 0; i--) {
504
505 sg = ahd_sg_setup(ahd, scb, sg, dm_segs->ds_addr,
506 dm_segs->ds_len,
507 /*last*/i == 1);
508 dm_segs++;
509 }
510
511 if (xs->xs_control & XS_CTL_DATA_IN)
512 op = BUS_DMASYNC_PREREAD;
513 else
514 op = BUS_DMASYNC_PREWRITE;
515
516 bus_dmamap_sync(ahd->parent_dmat, scb->dmamap, 0,
517 scb->dmamap->dm_mapsize, op);
518 }
519
520 ahd_lock(ahd, &s);
521
522 /*
523 * Last time we need to check if this SCB needs to
524 * be aborted.
525 */
526 if (ahd_get_scsi_status(scb) == XS_STS_DONE) {
527 if (nsegments != 0)
528 bus_dmamap_unload(ahd->parent_dmat,
529 scb->dmamap);
530 ahd_free_scb(ahd, scb);
531 ahd_unlock(ahd, &s);
532 return;
533 }
534
535 tinfo = ahd_fetch_transinfo(ahd, SCSIID_CHANNEL(ahd, scb->hscb->scsiid),
536 SCSIID_OUR_ID(scb->hscb->scsiid),
537 SCSIID_TARGET(ahd, scb->hscb->scsiid),
538 &tstate);
539
540 mask = SCB_GET_TARGET_MASK(ahd, scb);
541
542 if ((tstate->discenable & mask) != 0)
543 scb->hscb->control |= DISCENB;
544
545 if ((tstate->tagenable & mask) != 0)
546 scb->hscb->control |= xs->xs_tag_type|TAG_ENB;
547
548 if ((tinfo->curr.ppr_options & MSG_EXT_PPR_IU) != 0) {
549 scb->flags |= SCB_PACKETIZED;
550 if (scb->hscb->task_management != 0)
551 scb->hscb->control &= ~MK_MESSAGE;
552 }
553
554 #if 0 /* This looks like it makes sense at first, but it can loop */
555 if ((xs->xs_control & XS_CTL_DISCOVERY) &&
556 (tinfo->goal.width != 0
557 || tinfo->goal.period != 0
558 || tinfo->goal.ppr_options != 0)) {
559 scb->flags |= SCB_NEGOTIATE;
560 scb->hscb->control |= MK_MESSAGE;
561 } else
562 #endif
563 if ((tstate->auto_negotiate & mask) != 0) {
564 scb->flags |= SCB_AUTO_NEGOTIATE;
565 scb->hscb->control |= MK_MESSAGE;
566 }
567
568 LIST_INSERT_HEAD(&ahd->pending_scbs, scb, pending_links);
569
570 scb->flags |= SCB_ACTIVE;
571
572 if (!(xs->xs_control & XS_CTL_POLL)) {
573 callout_reset(&scb->xs->xs_callout, xs->timeout > 1000000 ?
574 (xs->timeout / 1000) * hz : (xs->timeout * hz) / 1000,
575 ahd_timeout, scb);
576 }
577
578 if ((scb->flags & SCB_TARGET_IMMEDIATE) != 0) {
579 /* Define a mapping from our tag to the SCB. */
580 ahd->scb_data.scbindex[SCB_GET_TAG(scb)] = scb;
581 ahd_pause(ahd);
582 ahd_set_scbptr(ahd, SCB_GET_TAG(scb));
583 ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_TARG);
584 ahd_unpause(ahd);
585 } else {
586 ahd_queue_scb(ahd, scb);
587 }
588
589 if (!(xs->xs_control & XS_CTL_POLL)) {
590 ahd_unlock(ahd, &s);
591 return;
592 }
593 /*
594 * If we can't use interrupts, poll for completion
595 */
596 SC_DEBUG(xs->xs_periph, SCSIPI_DB3, ("cmd_poll\n"));
597 do {
598 if (ahd_poll(ahd, xs->timeout)) {
599 if (!(xs->xs_control & XS_CTL_SILENT))
600 printf("cmd fail\n");
601 ahd_timeout(scb);
602 break;
603 }
604 } while (!(xs->xs_status & XS_STS_DONE));
605
606 ahd_unlock(ahd, &s);
607 }
608
609 static int
610 ahd_poll(struct ahd_softc *ahd, int wait)
611 {
612
613 while (--wait) {
614 DELAY(1000);
615 if (ahd_inb(ahd, INTSTAT) & INT_PEND)
616 break;
617 }
618
619 if (wait == 0) {
620 printf("%s: board is not responding\n", ahd_name(ahd));
621 return (EIO);
622 }
623
624 ahd_intr((void *)ahd);
625 return (0);
626 }
627
628
629 static void
630 ahd_setup_data(struct ahd_softc *ahd, struct scsipi_xfer *xs,
631 struct scb *scb)
632 {
633 struct hardware_scb *hscb;
634
635 hscb = scb->hscb;
636 xs->resid = xs->status = 0;
637
638 hscb->cdb_len = xs->cmdlen;
639 if (hscb->cdb_len > MAX_CDB_LEN) {
640 int s;
641 /*
642 * Should CAM start to support CDB sizes
643 * greater than 16 bytes, we could use
644 * the sense buffer to store the CDB.
645 */
646 ahd_set_transaction_status(scb,
647 XS_DRIVER_STUFFUP);
648
649 ahd_lock(ahd, &s);
650 ahd_free_scb(ahd, scb);
651 ahd_unlock(ahd, &s);
652 scsipi_done(xs);
653 }
654 memcpy(hscb->shared_data.idata.cdb, xs->cmd, hscb->cdb_len);
655
656 /* Only use S/G if there is a transfer */
657 if (xs->datalen) {
658 int error;
659
660 error = bus_dmamap_load(ahd->parent_dmat,
661 scb->dmamap, xs->data,
662 xs->datalen, NULL,
663 ((xs->xs_control & XS_CTL_NOSLEEP) ?
664 BUS_DMA_NOWAIT : BUS_DMA_WAITOK) |
665 BUS_DMA_STREAMING |
666 ((xs->xs_control & XS_CTL_DATA_IN) ?
667 BUS_DMA_READ : BUS_DMA_WRITE));
668 if (error) {
669 #ifdef AHD_DEBUG
670 printf("%s: in ahc_setup_data(): bus_dmamap_load() "
671 "= %d\n",
672 ahd_name(ahd), error);
673 #endif
674 xs->error = XS_RESOURCE_SHORTAGE;
675 scsipi_done(xs);
676 return;
677 }
678 ahd_execute_scb(scb,
679 scb->dmamap->dm_segs,
680 scb->dmamap->dm_nsegs);
681 } else {
682 ahd_execute_scb(scb, NULL, 0);
683 }
684 }
685
686 void
687 ahd_timeout(void *arg)
688 {
689 struct scb *scb;
690 struct ahd_softc *ahd;
691 ahd_mode_state saved_modes;
692 int s;
693 int target;
694 int lun;
695 char channel;
696
697 scb = (struct scb *)arg;
698 ahd = (struct ahd_softc *)scb->ahd_softc;
699
700 printf("%s: ahd_timeout\n", ahd_name(ahd));
701
702 ahd_lock(ahd, &s);
703
704 ahd_pause_and_flushwork(ahd);
705 saved_modes = ahd_save_modes(ahd);
706 #if 0
707 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
708 ahd_outb(ahd, SCSISIGO, ACKO);
709 printf("set ACK\n");
710 ahd_outb(ahd, SCSISIGO, 0);
711 printf("clearing Ack\n");
712 ahd_restore_modes(ahd, saved_modes);
713 #endif
714 if ((scb->flags & SCB_ACTIVE) == 0) {
715 /* Previous timeout took care of me already */
716 printf("%s: Timedout SCB already complete. "
717 "Interrupts may not be functioning.\n", ahd_name(ahd));
718 ahd_unpause(ahd);
719 ahd_unlock(ahd, &s);
720 return;
721 }
722
723 target = SCB_GET_TARGET(ahd, scb);
724 channel = SCB_GET_CHANNEL(ahd, scb);
725 lun = SCB_GET_LUN(scb);
726
727 ahd_print_path(ahd, scb);
728 printf("SCB 0x%x - timed out\n", SCB_GET_TAG(scb));
729 ahd_dump_card_state(ahd);
730 ahd_reset_channel(ahd, SIM_CHANNEL(ahd, sim),
731 /*initiate reset*/TRUE);
732 ahd_unlock(ahd, &s);
733 return;
734 }
735
736 int
737 ahd_platform_alloc(struct ahd_softc *ahd, void *platform_arg)
738 {
739 ahd->platform_data = malloc(sizeof(struct ahd_platform_data), M_DEVBUF,
740 M_NOWAIT /*| M_ZERO*/);
741 if (ahd->platform_data == NULL)
742 return (ENOMEM);
743
744 memset(ahd->platform_data, 0, sizeof(struct ahd_platform_data));
745
746 return (0);
747 }
748
749 void
750 ahd_platform_free(struct ahd_softc *ahd)
751 {
752 free(ahd->platform_data, M_DEVBUF);
753 }
754
755 int
756 ahd_softc_comp(struct ahd_softc *lahd, struct ahd_softc *rahd)
757 {
758 /* We don't sort softcs under NetBSD so report equal always */
759 return (0);
760 }
761
762 int
763 ahd_detach(struct device *self, int flags)
764 {
765 int rv = 0;
766
767 struct ahd_softc *ahd = (struct ahd_softc*)self;
768
769 if (ahd->sc_child != NULL)
770 rv = config_detach((void *)ahd->sc_child, flags);
771
772 shutdownhook_disestablish(ahd->shutdown_hook);
773
774 ahd_free(ahd);
775
776 return rv;
777 }
778
779 void
780 ahd_platform_set_tags(struct ahd_softc *ahd,
781 struct ahd_devinfo *devinfo, ahd_queue_alg alg)
782 {
783 struct ahd_initiator_tinfo *tinfo;
784 struct ahd_tmode_tstate *tstate;
785
786 tinfo = ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid,
787 devinfo->target, &tstate);
788
789 if (alg != AHD_QUEUE_NONE)
790 tstate->tagenable |= devinfo->target_mask;
791 else
792 tstate->tagenable &= ~devinfo->target_mask;
793 }
794
795 void
796 ahd_send_async(struct ahd_softc *ahc, char channel, u_int target, u_int lun,
797 ac_code code, void *opt_arg)
798 {
799 struct ahd_tmode_tstate *tstate;
800 struct ahd_initiator_tinfo *tinfo;
801 struct ahd_devinfo devinfo;
802 struct scsipi_channel *chan;
803 struct scsipi_xfer_mode xm;
804
805 #ifdef DIAGNOSTIC
806 if (channel != 'A')
807 panic("ahd_send_async: not channel A");
808 #endif
809 chan = &ahc->sc_channel;
810 switch (code) {
811 case AC_TRANSFER_NEG:
812 tinfo = ahd_fetch_transinfo(ahc, channel, ahc->our_id, target,
813 &tstate);
814 ahd_compile_devinfo(&devinfo, ahc->our_id, target, lun,
815 channel, ROLE_UNKNOWN);
816 /*
817 * Don't bother if negotiating. XXX?
818 */
819 if (tinfo->curr.period != tinfo->goal.period
820 || tinfo->curr.width != tinfo->goal.width
821 || tinfo->curr.offset != tinfo->goal.offset
822 || tinfo->curr.ppr_options != tinfo->goal.ppr_options)
823 break;
824 xm.xm_target = target;
825 xm.xm_mode = 0;
826 xm.xm_period = tinfo->curr.period;
827 xm.xm_offset = tinfo->curr.offset;
828 if (tinfo->goal.ppr_options & MSG_EXT_PPR_DT_REQ)
829 xm.xm_mode |= PERIPH_CAP_DT;
830 if (tinfo->curr.width == MSG_EXT_WDTR_BUS_16_BIT)
831 xm.xm_mode |= PERIPH_CAP_WIDE16;
832 if (tinfo->curr.period)
833 xm.xm_mode |= PERIPH_CAP_SYNC;
834 if (tstate->tagenable & devinfo.target_mask)
835 xm.xm_mode |= PERIPH_CAP_TQING;
836 scsipi_async_event(chan, ASYNC_EVENT_XFER_MODE, &xm);
837 break;
838 case AC_BUS_RESET:
839 scsipi_async_event(chan, ASYNC_EVENT_RESET, NULL);
840 case AC_SENT_BDR:
841 default:
842 break;
843 }
844 }
845