aic79xx_osm.c revision 1.9 1 /* $NetBSD: aic79xx_osm.c,v 1.9 2004/10/04 11:08:47 fvdl Exp $ */
2
3 /*
4 * Bus independent NetBSD shim for the aic7xxx based adaptec SCSI controllers
5 *
6 * Copyright (c) 1994-2002 Justin T. Gibbs.
7 * Copyright (c) 2001-2002 Adaptec Inc.
8 * All rights reserved.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions, and the following disclaimer,
15 * without modification.
16 * 2. The name of the author may not be used to endorse or promote products
17 * derived from this software without specific prior written permission.
18 *
19 * Alternatively, this software may be distributed under the terms of the
20 * GNU Public License ("GPL").
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
26 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * //depot/aic7xxx/freebsd/dev/aic7xxx/aic79xx_osm.c#26 $
35 *
36 * $FreeBSD: src/sys/dev/aic7xxx/aic79xx_osm.c,v 1.11 2003/05/04 00:20:07 gibbs Exp $
37 */
38 /*
39 * Ported from FreeBSD by Pascal Renauld, Network Storage Solutions, Inc.
40 * - April 2003
41 */
42
43 #include <sys/cdefs.h>
44 __KERNEL_RCSID(0, "$NetBSD: aic79xx_osm.c,v 1.9 2004/10/04 11:08:47 fvdl Exp $");
45
46 #include <dev/ic/aic79xx_osm.h>
47 #include <dev/ic/aic7xxx_cam.h>
48 #include <dev/ic/aic79xx_inline.h>
49
50 #ifndef AHD_TMODE_ENABLE
51 #define AHD_TMODE_ENABLE 0
52 #endif
53
54 static int ahd_ioctl(struct scsipi_channel *channel, u_long cmd,
55 caddr_t addr, int flag, struct proc *p);
56 static void ahd_action(struct scsipi_channel *chan,
57 scsipi_adapter_req_t req, void *arg);
58 static void ahd_execute_scb(void *arg, bus_dma_segment_t *dm_segs,
59 int nsegments);
60 static int ahd_poll(struct ahd_softc *ahd, int wait);
61 static void ahd_setup_data(struct ahd_softc *ahd, struct scsipi_xfer *xs,
62 struct scb *scb);
63
64 #if NOT_YET
65 static void ahd_set_recoveryscb(struct ahd_softc *ahd, struct scb *scb);
66 #endif
67
68 /*
69 * Attach all the sub-devices we can find
70 */
71 int
72 ahd_attach(struct ahd_softc *ahd)
73 {
74 int s;
75 char ahd_info[256];
76
77 ahd_controller_info(ahd, ahd_info, sizeof(ahd_info));
78 printf("%s: %s\n", ahd->sc_dev.dv_xname, ahd_info);
79
80 ahd_lock(ahd, &s);
81
82 ahd->sc_adapter.adapt_dev = &ahd->sc_dev;
83 ahd->sc_adapter.adapt_nchannels = 1;
84
85 ahd->sc_adapter.adapt_openings = AHD_MAX_QUEUE;
86 ahd->sc_adapter.adapt_max_periph = 32;
87
88 ahd->sc_adapter.adapt_ioctl = ahd_ioctl;
89 ahd->sc_adapter.adapt_minphys = ahd_minphys;
90 ahd->sc_adapter.adapt_request = ahd_action;
91
92 ahd->sc_channel.chan_adapter = &ahd->sc_adapter;
93 ahd->sc_channel.chan_bustype = &scsi_bustype;
94 ahd->sc_channel.chan_channel = 0;
95 ahd->sc_channel.chan_ntargets = AHD_NUM_TARGETS;
96 ahd->sc_channel.chan_nluns = 8 /*AHD_NUM_LUNS*/;
97 ahd->sc_channel.chan_id = ahd->our_id;
98
99 ahd->sc_child = config_found((void *)ahd, &ahd->sc_channel, scsiprint);
100
101 ahd_intr_enable(ahd, TRUE);
102
103 if (ahd->flags & AHD_RESET_BUS_A)
104 ahd_reset_channel(ahd, 'A', TRUE);
105
106 ahd_unlock(ahd, &s);
107
108 return (1);
109 }
110
111 static int
112 ahd_ioctl(struct scsipi_channel *channel, u_long cmd,
113 caddr_t addr, int flag, struct proc *p)
114 {
115 struct ahd_softc *ahd = (void *)channel->chan_adapter->adapt_dev;
116 int s, ret = ENOTTY;
117
118 switch (cmd) {
119 case SCBUSIORESET:
120 s = splbio();
121 ahd_reset_channel(ahd, channel->chan_channel == 1 ? 'B' : 'A', TRUE);
122 splx(s);
123 ret = 0;
124 break;
125 default:
126 break;
127 }
128
129 return ret;
130 }
131
132 /*
133 * Catch an interrupt from the adapter
134 */
135 void
136 ahd_platform_intr(void *arg)
137 {
138 struct ahd_softc *ahd;
139
140 ahd = (struct ahd_softc *)arg;
141
142 printf("%s; ahd_platform_intr\n", ahd_name(ahd));
143
144 ahd_intr(ahd);
145 }
146
147 /*
148 * We have an scb which has been processed by the
149 * adaptor, now we look to see how the operation * went.
150 */
151 void
152 ahd_done(struct ahd_softc *ahd, struct scb *scb)
153 {
154 struct scsipi_xfer *xs;
155 struct scsipi_periph *periph;
156 int s;
157
158 LIST_REMOVE(scb, pending_links);
159
160 xs = scb->xs;
161 periph = xs->xs_periph;
162
163 callout_stop(&scb->xs->xs_callout);
164
165 if (xs->datalen) {
166 int op;
167
168 if (xs->xs_control & XS_CTL_DATA_IN)
169 op = BUS_DMASYNC_POSTREAD;
170 else
171 op = BUS_DMASYNC_POSTWRITE;
172
173 bus_dmamap_sync(ahd->parent_dmat, scb->dmamap, 0,
174 scb->dmamap->dm_mapsize, op);
175 bus_dmamap_unload(ahd->parent_dmat, scb->dmamap);
176 }
177
178 /*
179 * If the recovery SCB completes, we have to be
180 * out of our timeout.
181 */
182 if ((scb->flags & SCB_RECOVERY_SCB) != 0) {
183 struct scb *list_scb;
184
185 /*
186 * We were able to complete the command successfully,
187 * so reinstate the timeouts for all other pending
188 * commands.
189 */
190 LIST_FOREACH(list_scb, &ahd->pending_scbs, pending_links) {
191 struct scsipi_xfer *txs = list_scb->xs;
192
193 if (!(txs->xs_control & XS_CTL_POLL)) {
194 callout_reset(&txs->xs_callout,
195 (txs->timeout > 1000000) ?
196 (txs->timeout / 1000) * hz :
197 (txs->timeout * hz) / 1000,
198 ahd_timeout, list_scb);
199 }
200 }
201
202 if (ahd_get_transaction_status(scb) != XS_NOERROR)
203 ahd_set_transaction_status(scb, XS_TIMEOUT);
204 scsipi_printaddr(xs->xs_periph);
205 printf("%s: no longer in timeout, status = %x\n",
206 ahd_name(ahd), xs->status);
207 }
208
209 if (xs->error != XS_NOERROR) {
210 /* Don't clobber any existing error state */
211 } else if ((xs->status == SCSI_STATUS_BUSY) ||
212 (xs->status == SCSI_STATUS_QUEUE_FULL)) {
213 ahd_set_transaction_status(scb, XS_BUSY);
214 printf("%s: drive (ID %d, LUN %d) queue full (SCB 0x%x)\n",
215 ahd_name(ahd), SCB_GET_TARGET(ahd,scb), SCB_GET_LUN(scb), SCB_GET_TAG(scb));
216 } else if ((scb->flags & SCB_SENSE) != 0) {
217 /*
218 * We performed autosense retrieval.
219 *
220 * zero the sense data before having
221 * the drive fill it. The SCSI spec mandates
222 * that any untransferred data should be
223 * assumed to be zero. Complete the 'bounce'
224 * of sense information through buffers accessible
225 * via bus-space by copying it into the clients
226 * csio.
227 */
228 memset(&xs->sense.scsi_sense, 0, sizeof(xs->sense.scsi_sense));
229 memcpy(&xs->sense.scsi_sense, ahd_get_sense_buf(ahd, scb),
230 sizeof(struct scsipi_sense_data));
231
232 ahd_set_transaction_status(scb, XS_SENSE);
233 } else if ((scb->flags & SCB_PKT_SENSE) != 0) {
234 struct scsi_status_iu_header *siu;
235 u_int sense_len;
236 #ifdef AHD_DEBUG
237 int i;
238 #endif
239 /*
240 * Copy only the sense data into the provided buffer.
241 */
242 siu = (struct scsi_status_iu_header *)scb->sense_data;
243 sense_len = MIN(scsi_4btoul(siu->sense_length),
244 sizeof(&xs->sense.scsi_sense));
245 memset(&xs->sense.scsi_sense, 0, sizeof(xs->sense.scsi_sense));
246 memcpy(&xs->sense.scsi_sense,
247 scb->sense_data + SIU_SENSE_OFFSET(siu), sense_len);
248 #ifdef AHD_DEBUG
249 printf("Copied %d bytes of sense data offset %d:", sense_len,
250 SIU_SENSE_OFFSET(siu));
251 for (i = 0; i < sense_len; i++)
252 printf(" 0x%x", ((uint8_t *)&xs->sense.scsi_sense)[i]);
253 printf("\n");
254 #endif
255 ahd_set_transaction_status(scb, XS_SENSE);
256 }
257
258 if (scb->flags & SCB_FREEZE_QUEUE) {
259 scsipi_periph_thaw(periph, 1);
260 scb->flags &= ~SCB_FREEZE_QUEUE;
261 }
262
263 if (scb->flags & SCB_REQUEUE)
264 ahd_set_transaction_status(scb, XS_REQUEUE);
265
266 ahd_lock(ahd, &s);
267 ahd_free_scb(ahd, scb);
268 ahd_unlock(ahd, &s);
269
270 scsipi_done(xs);
271 }
272
273 static void
274 ahd_action(struct scsipi_channel *chan, scsipi_adapter_req_t req, void *arg)
275 {
276 struct ahd_softc *ahd;
277 struct ahd_initiator_tinfo *tinfo;
278 struct ahd_tmode_tstate *tstate;
279
280 ahd = (void *)chan->chan_adapter->adapt_dev;
281
282 switch(req) {
283
284 case ADAPTER_REQ_RUN_XFER:
285 {
286 struct scsipi_xfer *xs;
287 struct scsipi_periph *periph;
288 struct scb *scb;
289 struct hardware_scb *hscb;
290 u_int target_id;
291 u_int our_id;
292 u_int col_idx;
293 char channel;
294 int s;
295
296 xs = arg;
297 periph = xs->xs_periph;
298
299 SC_DEBUG(periph, SCSIPI_DB3, ("ahd_action\n"));
300
301 target_id = periph->periph_target;
302 our_id = ahd->our_id;
303 channel = (chan->chan_channel == 1) ? 'B' : 'A';
304
305 /*
306 * get an scb to use.
307 */
308 ahd_lock(ahd, &s);
309 tinfo = ahd_fetch_transinfo(ahd, channel, our_id,
310 target_id, &tstate);
311
312 if (xs->xs_tag_type != 0 ||
313 (tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ) != 0)
314 col_idx = AHD_NEVER_COL_IDX;
315 else
316 col_idx = AHD_BUILD_COL_IDX(target_id,
317 periph->periph_lun);
318
319 if ((scb = ahd_get_scb(ahd, col_idx)) == NULL) {
320 xs->error = XS_RESOURCE_SHORTAGE;
321 ahd_unlock(ahd, &s);
322 scsipi_done(xs);
323 return;
324 }
325 ahd_unlock(ahd, &s);
326
327 hscb = scb->hscb;
328
329 SC_DEBUG(periph, SCSIPI_DB3, ("start scb(%p)\n", scb));
330 scb->xs = xs;
331
332 /*
333 * Put all the arguments for the xfer in the scb
334 */
335 hscb->control = 0;
336 hscb->scsiid = BUILD_SCSIID(ahd, sim, target_id, our_id);
337 hscb->lun = periph->periph_lun;
338 if (xs->xs_control & XS_CTL_RESET) {
339 hscb->cdb_len = 0;
340 scb->flags |= SCB_DEVICE_RESET;
341 hscb->control |= MK_MESSAGE;
342 hscb->task_management = SIU_TASKMGMT_LUN_RESET;
343 ahd_execute_scb(scb, NULL, 0);
344 } else {
345 hscb->task_management = 0;
346 }
347
348 ahd_setup_data(ahd, xs, scb);
349 break;
350 }
351
352 case ADAPTER_REQ_GROW_RESOURCES:
353 printf("%s: ADAPTER_REQ_GROW_RESOURCES\n", ahd_name(ahd));
354 break;
355
356 case ADAPTER_REQ_SET_XFER_MODE:
357 {
358 struct scsipi_xfer_mode *xm = arg;
359 struct ahd_devinfo devinfo;
360 int target_id, our_id, first;
361 u_int width;
362 int s;
363 char channel;
364 u_int ppr_options, period, offset;
365 uint16_t old_autoneg;
366
367 target_id = xm->xm_target;
368 our_id = chan->chan_id;
369 channel = 'A';
370 s = splbio();
371 tinfo = ahd_fetch_transinfo(ahd, channel, our_id, target_id,
372 &tstate);
373 ahd_compile_devinfo(&devinfo, our_id, target_id,
374 0, channel, ROLE_INITIATOR);
375
376 old_autoneg = tstate->auto_negotiate;
377
378 /*
379 * XXX since the period and offset are not provided here,
380 * fake things by forcing a renegotiation using the user
381 * settings if this is called for the first time (i.e.
382 * during probe). Also, cap various values at the user
383 * values, assuming that the user set it up that way.
384 */
385 if (ahd->inited_target[target_id] == 0) {
386 period = tinfo->user.period;
387 offset = tinfo->user.offset;
388 ppr_options = tinfo->user.ppr_options;
389 width = tinfo->user.width;
390 tstate->tagenable |=
391 (ahd->user_tagenable & devinfo.target_mask);
392 tstate->discenable |=
393 (ahd->user_discenable & devinfo.target_mask);
394 ahd->inited_target[target_id] = 1;
395 first = 1;
396 } else
397 first = 0;
398
399 if (xm->xm_mode & (PERIPH_CAP_WIDE16 | PERIPH_CAP_DT))
400 width = MSG_EXT_WDTR_BUS_16_BIT;
401 else
402 width = MSG_EXT_WDTR_BUS_8_BIT;
403
404 ahd_validate_width(ahd, NULL, &width, ROLE_UNKNOWN);
405 if (width > tinfo->user.width)
406 width = tinfo->user.width;
407 ahd_set_width(ahd, &devinfo, width, AHD_TRANS_GOAL, FALSE);
408
409 if (!(xm->xm_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_DT))) {
410 period = 0;
411 offset = 0;
412 ppr_options = 0;
413 }
414
415 if ((xm->xm_mode & PERIPH_CAP_DT) &&
416 (tinfo->user.ppr_options & MSG_EXT_PPR_DT_REQ))
417 ppr_options |= MSG_EXT_PPR_DT_REQ;
418 else
419 ppr_options &= ~MSG_EXT_PPR_DT_REQ;
420
421 if ((tstate->discenable & devinfo.target_mask) == 0 ||
422 (tstate->tagenable & devinfo.target_mask) == 0)
423 ppr_options &= ~MSG_EXT_PPR_IU_REQ;
424
425 if ((xm->xm_mode & PERIPH_CAP_TQING) &&
426 (ahd->user_tagenable & devinfo.target_mask))
427 tstate->tagenable |= devinfo.target_mask;
428 else
429 tstate->tagenable &= ~devinfo.target_mask;
430
431 ahd_find_syncrate(ahd, &period, &ppr_options, AHD_SYNCRATE_MAX);
432 ahd_validate_offset(ahd, NULL, period, &offset,
433 MSG_EXT_WDTR_BUS_8_BIT, ROLE_UNKNOWN);
434 if (offset == 0) {
435 period = 0;
436 ppr_options = 0;
437 }
438 if (ppr_options != 0
439 && tinfo->user.transport_version >= 3) {
440 tinfo->goal.transport_version =
441 tinfo->user.transport_version;
442 tinfo->curr.transport_version =
443 tinfo->user.transport_version;
444 }
445
446 ahd_set_syncrate(ahd, &devinfo, period, offset,
447 ppr_options, AHD_TRANS_GOAL, FALSE);
448
449 /*
450 * If this is the first request, and no negotiation is
451 * needed, just confirm the state to the scsipi layer,
452 * so that it can print a message.
453 */
454 if (old_autoneg == tstate->auto_negotiate && first) {
455 xm->xm_mode = 0;
456 xm->xm_period = tinfo->curr.period;
457 xm->xm_offset = tinfo->curr.offset;
458 if (tinfo->curr.width == MSG_EXT_WDTR_BUS_16_BIT)
459 xm->xm_mode |= PERIPH_CAP_WIDE16;
460 if (tinfo->curr.period)
461 xm->xm_mode |= PERIPH_CAP_SYNC;
462 if (tstate->tagenable & devinfo.target_mask)
463 xm->xm_mode |= PERIPH_CAP_TQING;
464 if (tinfo->curr.ppr_options & MSG_EXT_PPR_DT_REQ)
465 xm->xm_mode |= PERIPH_CAP_DT;
466 scsipi_async_event(chan, ASYNC_EVENT_XFER_MODE, xm);
467 }
468 splx(s);
469 }
470 }
471
472 return;
473 }
474
475 static void
476 ahd_execute_scb(void *arg, bus_dma_segment_t *dm_segs, int nsegments)
477 {
478 struct scb *scb;
479 struct scsipi_xfer *xs;
480 struct ahd_softc *ahd;
481 struct ahd_initiator_tinfo *tinfo;
482 struct ahd_tmode_tstate *tstate;
483 u_int mask;
484 int s;
485
486 scb = (struct scb*)arg;
487 xs = scb->xs;
488 xs->error = 0;
489 xs->status = 0;
490 xs->xs_status = 0;
491 ahd = (void*)xs->xs_periph->periph_channel->chan_adapter->adapt_dev;
492
493 scb->sg_count = 0;
494 if (nsegments != 0) {
495 void *sg;
496 int op;
497 u_int i;
498
499 ahd_setup_data_scb(ahd, scb);
500
501 /* Copy the segments into our SG list */
502 for (i = nsegments, sg = scb->sg_list; i > 0; i--) {
503
504 sg = ahd_sg_setup(ahd, scb, sg, dm_segs->ds_addr,
505 dm_segs->ds_len,
506 /*last*/i == 1);
507 dm_segs++;
508 }
509
510 if (xs->xs_control & XS_CTL_DATA_IN)
511 op = BUS_DMASYNC_PREREAD;
512 else
513 op = BUS_DMASYNC_PREWRITE;
514
515 bus_dmamap_sync(ahd->parent_dmat, scb->dmamap, 0,
516 scb->dmamap->dm_mapsize, op);
517 }
518
519 ahd_lock(ahd, &s);
520
521 /*
522 * Last time we need to check if this SCB needs to
523 * be aborted.
524 */
525 if (ahd_get_scsi_status(scb) == XS_STS_DONE) {
526 if (nsegments != 0)
527 bus_dmamap_unload(ahd->parent_dmat,
528 scb->dmamap);
529 ahd_free_scb(ahd, scb);
530 ahd_unlock(ahd, &s);
531 return;
532 }
533
534 tinfo = ahd_fetch_transinfo(ahd, SCSIID_CHANNEL(ahd, scb->hscb->scsiid),
535 SCSIID_OUR_ID(scb->hscb->scsiid),
536 SCSIID_TARGET(ahd, scb->hscb->scsiid),
537 &tstate);
538
539 mask = SCB_GET_TARGET_MASK(ahd, scb);
540
541 if ((tstate->discenable & mask) != 0)
542 scb->hscb->control |= DISCENB;
543
544 if ((tstate->tagenable & mask) != 0)
545 scb->hscb->control |= xs->xs_tag_type|TAG_ENB;
546
547 if ((tinfo->curr.ppr_options & MSG_EXT_PPR_IU) != 0) {
548 scb->flags |= SCB_PACKETIZED;
549 if (scb->hscb->task_management != 0)
550 scb->hscb->control &= ~MK_MESSAGE;
551 }
552
553 #if 0 /* This looks like it makes sense at first, but it can loop */
554 if ((xs->xs_control & XS_CTL_DISCOVERY) &&
555 (tinfo->goal.width != 0
556 || tinfo->goal.period != 0
557 || tinfo->goal.ppr_options != 0)) {
558 scb->flags |= SCB_NEGOTIATE;
559 scb->hscb->control |= MK_MESSAGE;
560 } else
561 #endif
562 if ((tstate->auto_negotiate & mask) != 0) {
563 scb->flags |= SCB_AUTO_NEGOTIATE;
564 scb->hscb->control |= MK_MESSAGE;
565 }
566
567 LIST_INSERT_HEAD(&ahd->pending_scbs, scb, pending_links);
568
569 scb->flags |= SCB_ACTIVE;
570
571 if (!(xs->xs_control & XS_CTL_POLL)) {
572 callout_reset(&scb->xs->xs_callout, xs->timeout > 1000000 ?
573 (xs->timeout / 1000) * hz : (xs->timeout * hz) / 1000,
574 ahd_timeout, scb);
575 }
576
577 if ((scb->flags & SCB_TARGET_IMMEDIATE) != 0) {
578 /* Define a mapping from our tag to the SCB. */
579 ahd->scb_data.scbindex[SCB_GET_TAG(scb)] = scb;
580 ahd_pause(ahd);
581 ahd_set_scbptr(ahd, SCB_GET_TAG(scb));
582 ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_TARG);
583 ahd_unpause(ahd);
584 } else {
585 ahd_queue_scb(ahd, scb);
586 }
587
588 if (!(xs->xs_control & XS_CTL_POLL)) {
589 ahd_unlock(ahd, &s);
590 return;
591 }
592 /*
593 * If we can't use interrupts, poll for completion
594 */
595 SC_DEBUG(xs->xs_periph, SCSIPI_DB3, ("cmd_poll\n"));
596 do {
597 if (ahd_poll(ahd, xs->timeout)) {
598 if (!(xs->xs_control & XS_CTL_SILENT))
599 printf("cmd fail\n");
600 ahd_timeout(scb);
601 break;
602 }
603 } while (!(xs->xs_status & XS_STS_DONE));
604
605 ahd_unlock(ahd, &s);
606 }
607
608 static int
609 ahd_poll(struct ahd_softc *ahd, int wait)
610 {
611
612 while (--wait) {
613 DELAY(1000);
614 if (ahd_inb(ahd, INTSTAT) & INT_PEND)
615 break;
616 }
617
618 if (wait == 0) {
619 printf("%s: board is not responding\n", ahd_name(ahd));
620 return (EIO);
621 }
622
623 ahd_intr((void *)ahd);
624 return (0);
625 }
626
627
628 static void
629 ahd_setup_data(struct ahd_softc *ahd, struct scsipi_xfer *xs,
630 struct scb *scb)
631 {
632 struct hardware_scb *hscb;
633
634 hscb = scb->hscb;
635 xs->resid = xs->status = 0;
636
637 hscb->cdb_len = xs->cmdlen;
638 if (hscb->cdb_len > MAX_CDB_LEN) {
639 int s;
640 /*
641 * Should CAM start to support CDB sizes
642 * greater than 16 bytes, we could use
643 * the sense buffer to store the CDB.
644 */
645 ahd_set_transaction_status(scb,
646 XS_DRIVER_STUFFUP);
647
648 ahd_lock(ahd, &s);
649 ahd_free_scb(ahd, scb);
650 ahd_unlock(ahd, &s);
651 scsipi_done(xs);
652 }
653 memcpy(hscb->shared_data.idata.cdb, xs->cmd, hscb->cdb_len);
654
655 /* Only use S/G if there is a transfer */
656 if (xs->datalen) {
657 int error;
658
659 error = bus_dmamap_load(ahd->parent_dmat,
660 scb->dmamap, xs->data,
661 xs->datalen, NULL,
662 ((xs->xs_control & XS_CTL_NOSLEEP) ?
663 BUS_DMA_NOWAIT : BUS_DMA_WAITOK) |
664 BUS_DMA_STREAMING |
665 ((xs->xs_control & XS_CTL_DATA_IN) ?
666 BUS_DMA_READ : BUS_DMA_WRITE));
667 if (error) {
668 #ifdef AHD_DEBUG
669 printf("%s: in ahc_setup_data(): bus_dmamap_load() "
670 "= %d\n",
671 ahd_name(ahd), error);
672 #endif
673 xs->error = XS_RESOURCE_SHORTAGE;
674 scsipi_done(xs);
675 return;
676 }
677 ahd_execute_scb(scb,
678 scb->dmamap->dm_segs,
679 scb->dmamap->dm_nsegs);
680 } else {
681 ahd_execute_scb(scb, NULL, 0);
682 }
683 }
684
685 void
686 ahd_timeout(void *arg)
687 {
688 struct scb *scb;
689 struct ahd_softc *ahd;
690 ahd_mode_state saved_modes;
691 int s;
692
693 scb = (struct scb *)arg;
694 ahd = (struct ahd_softc *)scb->ahd_softc;
695
696 printf("%s: ahd_timeout\n", ahd_name(ahd));
697
698 ahd_lock(ahd, &s);
699
700 ahd_pause_and_flushwork(ahd);
701 saved_modes = ahd_save_modes(ahd);
702 #if 0
703 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
704 ahd_outb(ahd, SCSISIGO, ACKO);
705 printf("set ACK\n");
706 ahd_outb(ahd, SCSISIGO, 0);
707 printf("clearing Ack\n");
708 ahd_restore_modes(ahd, saved_modes);
709 #endif
710 if ((scb->flags & SCB_ACTIVE) == 0) {
711 /* Previous timeout took care of me already */
712 printf("%s: Timedout SCB already complete. "
713 "Interrupts may not be functioning.\n", ahd_name(ahd));
714 ahd_unpause(ahd);
715 ahd_unlock(ahd, &s);
716 return;
717 }
718
719 ahd_print_path(ahd, scb);
720 printf("SCB 0x%x - timed out\n", SCB_GET_TAG(scb));
721 ahd_dump_card_state(ahd);
722 ahd_reset_channel(ahd, SIM_CHANNEL(ahd, sim),
723 /*initiate reset*/TRUE);
724 ahd_unlock(ahd, &s);
725 return;
726 }
727
728 int
729 ahd_platform_alloc(struct ahd_softc *ahd, void *platform_arg)
730 {
731 ahd->platform_data = malloc(sizeof(struct ahd_platform_data), M_DEVBUF,
732 M_NOWAIT /*| M_ZERO*/);
733 if (ahd->platform_data == NULL)
734 return (ENOMEM);
735
736 memset(ahd->platform_data, 0, sizeof(struct ahd_platform_data));
737
738 return (0);
739 }
740
741 void
742 ahd_platform_free(struct ahd_softc *ahd)
743 {
744 free(ahd->platform_data, M_DEVBUF);
745 }
746
747 int
748 ahd_softc_comp(struct ahd_softc *lahd, struct ahd_softc *rahd)
749 {
750 /* We don't sort softcs under NetBSD so report equal always */
751 return (0);
752 }
753
754 int
755 ahd_detach(struct device *self, int flags)
756 {
757 int rv = 0;
758
759 struct ahd_softc *ahd = (struct ahd_softc*)self;
760
761 if (ahd->sc_child != NULL)
762 rv = config_detach((void *)ahd->sc_child, flags);
763
764 shutdownhook_disestablish(ahd->shutdown_hook);
765
766 ahd_free(ahd);
767
768 return rv;
769 }
770
771 void
772 ahd_platform_set_tags(struct ahd_softc *ahd,
773 struct ahd_devinfo *devinfo, ahd_queue_alg alg)
774 {
775 struct ahd_tmode_tstate *tstate;
776
777 ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid,
778 devinfo->target, &tstate);
779
780 if (alg != AHD_QUEUE_NONE)
781 tstate->tagenable |= devinfo->target_mask;
782 else
783 tstate->tagenable &= ~devinfo->target_mask;
784 }
785
786 void
787 ahd_send_async(struct ahd_softc *ahc, char channel, u_int target, u_int lun,
788 ac_code code, void *opt_arg)
789 {
790 struct ahd_tmode_tstate *tstate;
791 struct ahd_initiator_tinfo *tinfo;
792 struct ahd_devinfo devinfo;
793 struct scsipi_channel *chan;
794 struct scsipi_xfer_mode xm;
795
796 #ifdef DIAGNOSTIC
797 if (channel != 'A')
798 panic("ahd_send_async: not channel A");
799 #endif
800 chan = &ahc->sc_channel;
801 switch (code) {
802 case AC_TRANSFER_NEG:
803 tinfo = ahd_fetch_transinfo(ahc, channel, ahc->our_id, target,
804 &tstate);
805 ahd_compile_devinfo(&devinfo, ahc->our_id, target, lun,
806 channel, ROLE_UNKNOWN);
807 /*
808 * Don't bother if negotiating. XXX?
809 */
810 if (tinfo->curr.period != tinfo->goal.period
811 || tinfo->curr.width != tinfo->goal.width
812 || tinfo->curr.offset != tinfo->goal.offset
813 || tinfo->curr.ppr_options != tinfo->goal.ppr_options)
814 break;
815 xm.xm_target = target;
816 xm.xm_mode = 0;
817 xm.xm_period = tinfo->curr.period;
818 xm.xm_offset = tinfo->curr.offset;
819 if (tinfo->goal.ppr_options & MSG_EXT_PPR_DT_REQ)
820 xm.xm_mode |= PERIPH_CAP_DT;
821 if (tinfo->curr.width == MSG_EXT_WDTR_BUS_16_BIT)
822 xm.xm_mode |= PERIPH_CAP_WIDE16;
823 if (tinfo->curr.period)
824 xm.xm_mode |= PERIPH_CAP_SYNC;
825 if (tstate->tagenable & devinfo.target_mask)
826 xm.xm_mode |= PERIPH_CAP_TQING;
827 scsipi_async_event(chan, ASYNC_EVENT_XFER_MODE, &xm);
828 break;
829 case AC_BUS_RESET:
830 scsipi_async_event(chan, ASYNC_EVENT_RESET, NULL);
831 case AC_SENT_BDR:
832 default:
833 break;
834 }
835 }
836