aic79xx_osm.c revision 1.7 1 /* $NetBSD: aic79xx_osm.c,v 1.7 2003/10/30 01:58:17 simonb Exp $ */
2
3 /*
4 * Bus independent NetBSD shim for the aic7xxx based adaptec SCSI controllers
5 *
6 * Copyright (c) 1994-2002 Justin T. Gibbs.
7 * Copyright (c) 2001-2002 Adaptec Inc.
8 * All rights reserved.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions, and the following disclaimer,
15 * without modification.
16 * 2. The name of the author may not be used to endorse or promote products
17 * derived from this software without specific prior written permission.
18 *
19 * Alternatively, this software may be distributed under the terms of the
20 * GNU Public License ("GPL").
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
26 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * //depot/aic7xxx/freebsd/dev/aic7xxx/aic79xx_osm.c#26 $
35 *
36 * $FreeBSD: src/sys/dev/aic7xxx/aic79xx_osm.c,v 1.11 2003/05/04 00:20:07 gibbs Exp $
37 */
38 /*
39 * Ported from FreeBSD by Pascal Renauld, Network Storage Solutions, Inc.
40 * - April 2003
41 */
42
43 #include <sys/cdefs.h>
44 __KERNEL_RCSID(0, "$NetBSD: aic79xx_osm.c,v 1.7 2003/10/30 01:58:17 simonb Exp $");
45
46 #include <dev/ic/aic79xx_osm.h>
47 #include <dev/ic/aic7xxx_cam.h>
48 #include <dev/ic/aic79xx_inline.h>
49
50 #ifndef AHD_TMODE_ENABLE
51 #define AHD_TMODE_ENABLE 0
52 #endif
53
54 static int ahd_ioctl(struct scsipi_channel *channel, u_long cmd,
55 caddr_t addr, int flag, struct proc *p);
56 static void ahd_action(struct scsipi_channel *chan,
57 scsipi_adapter_req_t req, void *arg);
58 static void ahd_execute_scb(void *arg, bus_dma_segment_t *dm_segs,
59 int nsegments);
60 static int ahd_poll(struct ahd_softc *ahd, int wait);
61 static void ahd_setup_data(struct ahd_softc *ahd, struct scsipi_xfer *xs,
62 struct scb *scb);
63
64 #if NOT_YET
65 static void ahd_set_recoveryscb(struct ahd_softc *ahd, struct scb *scb);
66 #endif
67
68 /*
69 * Attach all the sub-devices we can find
70 */
71 int
72 ahd_attach(struct ahd_softc *ahd)
73 {
74 int s;
75 char ahd_info[256];
76
77 ahd_controller_info(ahd, ahd_info);
78 printf("%s: %s\n", ahd->sc_dev.dv_xname, ahd_info);
79
80 ahd_lock(ahd, &s);
81
82 ahd->sc_adapter.adapt_dev = &ahd->sc_dev;
83 ahd->sc_adapter.adapt_nchannels = 1;
84
85 ahd->sc_adapter.adapt_openings = AHD_MAX_QUEUE;
86 ahd->sc_adapter.adapt_max_periph = 32;
87
88 ahd->sc_adapter.adapt_ioctl = ahd_ioctl;
89 ahd->sc_adapter.adapt_minphys = ahd_minphys;
90 ahd->sc_adapter.adapt_request = ahd_action;
91
92 ahd->sc_channel.chan_adapter = &ahd->sc_adapter;
93 ahd->sc_channel.chan_bustype = &scsi_bustype;
94 ahd->sc_channel.chan_channel = 0;
95 ahd->sc_channel.chan_ntargets = AHD_NUM_TARGETS;
96 ahd->sc_channel.chan_nluns = 8 /*AHD_NUM_LUNS*/;
97 ahd->sc_channel.chan_id = ahd->our_id;
98
99 ahd->sc_child = config_found((void *)ahd, &ahd->sc_channel, scsiprint);
100
101 ahd_intr_enable(ahd, TRUE);
102
103 if (ahd->flags & AHD_RESET_BUS_A)
104 ahd_reset_channel(ahd, 'A', TRUE);
105
106 ahd_unlock(ahd, &s);
107
108 return (1);
109 }
110
111 static int
112 ahd_ioctl(struct scsipi_channel *channel, u_long cmd,
113 caddr_t addr, int flag, struct proc *p)
114 {
115 struct ahd_softc *ahd = (void *)channel->chan_adapter->adapt_dev;
116 int s, ret = ENOTTY;
117
118 switch (cmd) {
119 case SCBUSIORESET:
120 s = splbio();
121 ahd_reset_channel(ahd, channel->chan_channel == 1 ? 'B' : 'A', TRUE);
122 splx(s);
123 ret = 0;
124 break;
125 default:
126 break;
127 }
128
129 return ret;
130 }
131
132 /*
133 * Catch an interrupt from the adapter
134 */
135 void
136 ahd_platform_intr(void *arg)
137 {
138 struct ahd_softc *ahd;
139
140 ahd = (struct ahd_softc *)arg;
141
142 printf("%s; ahd_platform_intr\n", ahd_name(ahd));
143
144 ahd_intr(ahd);
145 }
146
147 /*
148 * We have an scb which has been processed by the
149 * adaptor, now we look to see how the operation * went.
150 */
151 void
152 ahd_done(struct ahd_softc *ahd, struct scb *scb)
153 {
154 struct scsipi_xfer *xs;
155 struct scsipi_periph *periph;
156 int s;
157
158 LIST_REMOVE(scb, pending_links);
159
160 xs = scb->xs;
161 periph = xs->xs_periph;
162
163 callout_stop(&scb->xs->xs_callout);
164
165 if (xs->datalen) {
166 int op;
167
168 if (xs->xs_control & XS_CTL_DATA_IN)
169 op = BUS_DMASYNC_POSTREAD;
170 else
171 op = BUS_DMASYNC_POSTWRITE;
172
173 bus_dmamap_sync(ahd->parent_dmat, scb->dmamap, 0,
174 scb->dmamap->dm_mapsize, op);
175 bus_dmamap_unload(ahd->parent_dmat, scb->dmamap);
176 }
177
178 /*
179 * If the recovery SCB completes, we have to be
180 * out of our timeout.
181 */
182 if ((scb->flags & SCB_RECOVERY_SCB) != 0) {
183 struct scb *list_scb;
184
185 /*
186 * We were able to complete the command successfully,
187 * so reinstate the timeouts for all other pending
188 * commands.
189 */
190 LIST_FOREACH(list_scb, &ahd->pending_scbs, pending_links) {
191 struct scsipi_xfer *txs = list_scb->xs;
192
193 if (!(txs->xs_control & XS_CTL_POLL)) {
194 callout_reset(&txs->xs_callout,
195 (txs->timeout > 1000000) ?
196 (txs->timeout / 1000) * hz :
197 (txs->timeout * hz) / 1000,
198 ahd_timeout, list_scb);
199 }
200 }
201
202 if (ahd_get_transaction_status(scb) != XS_NOERROR)
203 ahd_set_transaction_status(scb, XS_TIMEOUT);
204 scsipi_printaddr(xs->xs_periph);
205 printf("%s: no longer in timeout, status = %x\n",
206 ahd_name(ahd), xs->status);
207 }
208
209 if (xs->error != XS_NOERROR) {
210 /* Don't clobber any existing error state */
211 } else if ((xs->status == SCSI_STATUS_BUSY) ||
212 (xs->status == SCSI_STATUS_QUEUE_FULL)) {
213 ahd_set_transaction_status(scb, XS_BUSY);
214 printf("%s: drive (ID %d, LUN %d) queue full (SCB 0x%x)\n",
215 ahd_name(ahd), SCB_GET_TARGET(ahd,scb), SCB_GET_LUN(scb), SCB_GET_TAG(scb));
216 } else if ((scb->flags & SCB_SENSE) != 0) {
217 /*
218 * We performed autosense retrieval.
219 *
220 * zero the sense data before having
221 * the drive fill it. The SCSI spec mandates
222 * that any untransferred data should be
223 * assumed to be zero. Complete the 'bounce'
224 * of sense information through buffers accessible
225 * via bus-space by copying it into the clients
226 * csio.
227 */
228 memset(&xs->sense.scsi_sense, 0, sizeof(xs->sense.scsi_sense));
229 memcpy(&xs->sense.scsi_sense, ahd_get_sense_buf(ahd, scb),
230 sizeof(struct scsipi_sense_data));
231
232 ahd_set_transaction_status(scb, XS_SENSE);
233 } else if ((scb->flags & SCB_PKT_SENSE) != 0) {
234 struct scsi_status_iu_header *siu;
235 u_int sense_len;
236 int i;
237
238 /*
239 * Copy only the sense data into the provided buffer.
240 */
241 siu = (struct scsi_status_iu_header *)scb->sense_data;
242 sense_len = MIN(scsi_4btoul(siu->sense_length),
243 sizeof(&xs->sense.scsi_sense));
244 memset(&xs->sense.scsi_sense, 0, sizeof(xs->sense.scsi_sense));
245 memcpy(&xs->sense.scsi_sense,
246 scb->sense_data + SIU_SENSE_OFFSET(siu), sense_len);
247 printf("Copied %d bytes of sense data offset %d:", sense_len,
248 SIU_SENSE_OFFSET(siu));
249 for (i = 0; i < sense_len; i++)
250 printf(" 0x%x", ((uint8_t *)&xs->sense.scsi_sense)[i]);
251 printf("\n");
252
253 ahd_set_transaction_status(scb, XS_SENSE);
254 }
255
256 if (scb->flags & SCB_FREEZE_QUEUE) {
257 scsipi_periph_thaw(periph, 1);
258 scb->flags &= ~SCB_FREEZE_QUEUE;
259 }
260
261 if (scb->flags & SCB_REQUEUE)
262 ahd_set_transaction_status(scb, XS_REQUEUE);
263
264 ahd_lock(ahd, &s);
265 ahd_free_scb(ahd, scb);
266 ahd_unlock(ahd, &s);
267
268 scsipi_done(xs);
269 }
270
271 static void
272 ahd_action(struct scsipi_channel *chan, scsipi_adapter_req_t req, void *arg)
273 {
274 struct ahd_softc *ahd;
275 struct ahd_initiator_tinfo *tinfo;
276 struct ahd_tmode_tstate *tstate;
277
278 ahd = (void *)chan->chan_adapter->adapt_dev;
279
280 switch(req) {
281
282 case ADAPTER_REQ_RUN_XFER:
283 {
284 struct scsipi_xfer *xs;
285 struct scsipi_periph *periph;
286 struct scb *scb;
287 struct hardware_scb *hscb;
288 u_int target_id;
289 u_int our_id;
290 u_int col_idx;
291 char channel;
292 int s;
293
294 xs = arg;
295 periph = xs->xs_periph;
296
297 SC_DEBUG(periph, SCSIPI_DB3, ("ahd_action\n"));
298
299 target_id = periph->periph_target;
300 our_id = ahd->our_id;
301 channel = (chan->chan_channel == 1) ? 'B' : 'A';
302
303 /*
304 * get an scb to use.
305 */
306 ahd_lock(ahd, &s);
307 tinfo = ahd_fetch_transinfo(ahd, channel, our_id,
308 target_id, &tstate);
309
310 if (xs->xs_tag_type != 0 ||
311 (tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ) != 0)
312 col_idx = AHD_NEVER_COL_IDX;
313 else
314 col_idx = AHD_BUILD_COL_IDX(target_id,
315 periph->periph_lun);
316
317 if ((scb = ahd_get_scb(ahd, col_idx)) == NULL) {
318 xs->error = XS_RESOURCE_SHORTAGE;
319 ahd_unlock(ahd, &s);
320 scsipi_done(xs);
321 return;
322 }
323 ahd_unlock(ahd, &s);
324
325 hscb = scb->hscb;
326
327 SC_DEBUG(periph, SCSIPI_DB3, ("start scb(%p)\n", scb));
328 scb->xs = xs;
329
330 /*
331 * Put all the arguments for the xfer in the scb
332 */
333 hscb->control = 0;
334 hscb->scsiid = BUILD_SCSIID(ahd, sim, target_id, our_id);
335 hscb->lun = periph->periph_lun;
336 if (xs->xs_control & XS_CTL_RESET) {
337 hscb->cdb_len = 0;
338 scb->flags |= SCB_DEVICE_RESET;
339 hscb->control |= MK_MESSAGE;
340 hscb->task_management = SIU_TASKMGMT_LUN_RESET;
341 ahd_execute_scb(scb, NULL, 0);
342 } else {
343 hscb->task_management = 0;
344 }
345
346 ahd_setup_data(ahd, xs, scb);
347 break;
348 }
349
350 case ADAPTER_REQ_GROW_RESOURCES:
351 printf("%s: ADAPTER_REQ_GROW_RESOURCES\n", ahd_name(ahd));
352 break;
353
354 case ADAPTER_REQ_SET_XFER_MODE:
355 {
356 struct scsipi_xfer_mode *xm = arg;
357 struct ahd_devinfo devinfo;
358 int target_id, our_id, first;
359 u_int width;
360 int s;
361 char channel;
362 u_int ppr_options, period, offset;
363 uint16_t old_autoneg;
364
365 target_id = xm->xm_target;
366 our_id = chan->chan_id;
367 channel = 'A';
368 s = splbio();
369 tinfo = ahd_fetch_transinfo(ahd, channel, our_id, target_id,
370 &tstate);
371 ahd_compile_devinfo(&devinfo, our_id, target_id,
372 0, channel, ROLE_INITIATOR);
373
374 old_autoneg = tstate->auto_negotiate;
375
376 /*
377 * XXX since the period and offset are not provided here,
378 * fake things by forcing a renegotiation using the user
379 * settings if this is called for the first time (i.e.
380 * during probe). Also, cap various values at the user
381 * values, assuming that the user set it up that way.
382 */
383 if (ahd->inited_target[target_id] == 0) {
384 period = tinfo->user.period;
385 offset = tinfo->user.offset;
386 ppr_options = tinfo->user.ppr_options;
387 width = tinfo->user.width;
388 tstate->tagenable |=
389 (ahd->user_tagenable & devinfo.target_mask);
390 tstate->discenable |=
391 (ahd->user_discenable & devinfo.target_mask);
392 ahd->inited_target[target_id] = 1;
393 first = 1;
394 } else
395 first = 0;
396
397 if (xm->xm_mode & (PERIPH_CAP_WIDE16 | PERIPH_CAP_DT))
398 width = MSG_EXT_WDTR_BUS_16_BIT;
399 else
400 width = MSG_EXT_WDTR_BUS_8_BIT;
401
402 ahd_validate_width(ahd, NULL, &width, ROLE_UNKNOWN);
403 if (width > tinfo->user.width)
404 width = tinfo->user.width;
405 ahd_set_width(ahd, &devinfo, width, AHD_TRANS_GOAL, FALSE);
406
407 if (!(xm->xm_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_DT))) {
408 period = 0;
409 offset = 0;
410 ppr_options = 0;
411 }
412
413 if ((xm->xm_mode & PERIPH_CAP_DT) &&
414 (tinfo->user.ppr_options & MSG_EXT_PPR_DT_REQ))
415 ppr_options |= MSG_EXT_PPR_DT_REQ;
416 else
417 ppr_options &= ~MSG_EXT_PPR_DT_REQ;
418
419 if ((tstate->discenable & devinfo.target_mask) == 0 ||
420 (tstate->tagenable & devinfo.target_mask) == 0)
421 ppr_options &= ~MSG_EXT_PPR_IU_REQ;
422
423 if ((xm->xm_mode & PERIPH_CAP_TQING) &&
424 (ahd->user_tagenable & devinfo.target_mask))
425 tstate->tagenable |= devinfo.target_mask;
426 else
427 tstate->tagenable &= ~devinfo.target_mask;
428
429 ahd_find_syncrate(ahd, &period, &ppr_options, AHD_SYNCRATE_MAX);
430 ahd_validate_offset(ahd, NULL, period, &offset,
431 MSG_EXT_WDTR_BUS_8_BIT, ROLE_UNKNOWN);
432 if (offset == 0) {
433 period = 0;
434 ppr_options = 0;
435 }
436 if (ppr_options != 0
437 && tinfo->user.transport_version >= 3) {
438 tinfo->goal.transport_version =
439 tinfo->user.transport_version;
440 tinfo->curr.transport_version =
441 tinfo->user.transport_version;
442 }
443
444 ahd_set_syncrate(ahd, &devinfo, period, offset,
445 ppr_options, AHD_TRANS_GOAL, FALSE);
446
447 /*
448 * If this is the first request, and no negotiation is
449 * needed, just confirm the state to the scsipi layer,
450 * so that it can print a message.
451 */
452 if (old_autoneg == tstate->auto_negotiate && first) {
453 xm->xm_mode = 0;
454 xm->xm_period = tinfo->curr.period;
455 xm->xm_offset = tinfo->curr.offset;
456 if (tinfo->curr.width == MSG_EXT_WDTR_BUS_16_BIT)
457 xm->xm_mode |= PERIPH_CAP_WIDE16;
458 if (tinfo->curr.period)
459 xm->xm_mode |= PERIPH_CAP_SYNC;
460 if (tstate->tagenable & devinfo.target_mask)
461 xm->xm_mode |= PERIPH_CAP_TQING;
462 if (tinfo->curr.ppr_options & MSG_EXT_PPR_DT_REQ)
463 xm->xm_mode |= PERIPH_CAP_DT;
464 scsipi_async_event(chan, ASYNC_EVENT_XFER_MODE, xm);
465 }
466 splx(s);
467 }
468 }
469
470 return;
471 }
472
473 static void
474 ahd_execute_scb(void *arg, bus_dma_segment_t *dm_segs, int nsegments)
475 {
476 struct scb *scb;
477 struct scsipi_xfer *xs;
478 struct ahd_softc *ahd;
479 struct ahd_initiator_tinfo *tinfo;
480 struct ahd_tmode_tstate *tstate;
481 u_int mask;
482 int s;
483
484 scb = (struct scb*)arg;
485 xs = scb->xs;
486 xs->error = 0;
487 xs->status = 0;
488 xs->xs_status = 0;
489 ahd = (void*)xs->xs_periph->periph_channel->chan_adapter->adapt_dev;
490
491 scb->sg_count = 0;
492 if (nsegments != 0) {
493 void *sg;
494 int op;
495 u_int i;
496
497 ahd_setup_data_scb(ahd, scb);
498
499 /* Copy the segments into our SG list */
500 for (i = nsegments, sg = scb->sg_list; i > 0; i--) {
501
502 sg = ahd_sg_setup(ahd, scb, sg, dm_segs->ds_addr,
503 dm_segs->ds_len,
504 /*last*/i == 1);
505 dm_segs++;
506 }
507
508 if (xs->xs_control & XS_CTL_DATA_IN)
509 op = BUS_DMASYNC_PREREAD;
510 else
511 op = BUS_DMASYNC_PREWRITE;
512
513 bus_dmamap_sync(ahd->parent_dmat, scb->dmamap, 0,
514 scb->dmamap->dm_mapsize, op);
515 }
516
517 ahd_lock(ahd, &s);
518
519 /*
520 * Last time we need to check if this SCB needs to
521 * be aborted.
522 */
523 if (ahd_get_scsi_status(scb) == XS_STS_DONE) {
524 if (nsegments != 0)
525 bus_dmamap_unload(ahd->parent_dmat,
526 scb->dmamap);
527 ahd_free_scb(ahd, scb);
528 ahd_unlock(ahd, &s);
529 return;
530 }
531
532 tinfo = ahd_fetch_transinfo(ahd, SCSIID_CHANNEL(ahd, scb->hscb->scsiid),
533 SCSIID_OUR_ID(scb->hscb->scsiid),
534 SCSIID_TARGET(ahd, scb->hscb->scsiid),
535 &tstate);
536
537 mask = SCB_GET_TARGET_MASK(ahd, scb);
538
539 if ((tstate->discenable & mask) != 0)
540 scb->hscb->control |= DISCENB;
541
542 if ((tstate->tagenable & mask) != 0)
543 scb->hscb->control |= xs->xs_tag_type|TAG_ENB;
544
545 if ((tinfo->curr.ppr_options & MSG_EXT_PPR_IU) != 0) {
546 scb->flags |= SCB_PACKETIZED;
547 if (scb->hscb->task_management != 0)
548 scb->hscb->control &= ~MK_MESSAGE;
549 }
550
551 #if 0 /* This looks like it makes sense at first, but it can loop */
552 if ((xs->xs_control & XS_CTL_DISCOVERY) &&
553 (tinfo->goal.width != 0
554 || tinfo->goal.period != 0
555 || tinfo->goal.ppr_options != 0)) {
556 scb->flags |= SCB_NEGOTIATE;
557 scb->hscb->control |= MK_MESSAGE;
558 } else
559 #endif
560 if ((tstate->auto_negotiate & mask) != 0) {
561 scb->flags |= SCB_AUTO_NEGOTIATE;
562 scb->hscb->control |= MK_MESSAGE;
563 }
564
565 LIST_INSERT_HEAD(&ahd->pending_scbs, scb, pending_links);
566
567 scb->flags |= SCB_ACTIVE;
568
569 if (!(xs->xs_control & XS_CTL_POLL)) {
570 callout_reset(&scb->xs->xs_callout, xs->timeout > 1000000 ?
571 (xs->timeout / 1000) * hz : (xs->timeout * hz) / 1000,
572 ahd_timeout, scb);
573 }
574
575 if ((scb->flags & SCB_TARGET_IMMEDIATE) != 0) {
576 /* Define a mapping from our tag to the SCB. */
577 ahd->scb_data.scbindex[SCB_GET_TAG(scb)] = scb;
578 ahd_pause(ahd);
579 ahd_set_scbptr(ahd, SCB_GET_TAG(scb));
580 ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_TARG);
581 ahd_unpause(ahd);
582 } else {
583 ahd_queue_scb(ahd, scb);
584 }
585
586 if (!(xs->xs_control & XS_CTL_POLL)) {
587 ahd_unlock(ahd, &s);
588 return;
589 }
590 /*
591 * If we can't use interrupts, poll for completion
592 */
593 SC_DEBUG(xs->xs_periph, SCSIPI_DB3, ("cmd_poll\n"));
594 do {
595 if (ahd_poll(ahd, xs->timeout)) {
596 if (!(xs->xs_control & XS_CTL_SILENT))
597 printf("cmd fail\n");
598 ahd_timeout(scb);
599 break;
600 }
601 } while (!(xs->xs_status & XS_STS_DONE));
602
603 ahd_unlock(ahd, &s);
604 }
605
606 static int
607 ahd_poll(struct ahd_softc *ahd, int wait)
608 {
609
610 while (--wait) {
611 DELAY(1000);
612 if (ahd_inb(ahd, INTSTAT) & INT_PEND)
613 break;
614 }
615
616 if (wait == 0) {
617 printf("%s: board is not responding\n", ahd_name(ahd));
618 return (EIO);
619 }
620
621 ahd_intr((void *)ahd);
622 return (0);
623 }
624
625
626 static void
627 ahd_setup_data(struct ahd_softc *ahd, struct scsipi_xfer *xs,
628 struct scb *scb)
629 {
630 struct hardware_scb *hscb;
631
632 hscb = scb->hscb;
633 xs->resid = xs->status = 0;
634
635 hscb->cdb_len = xs->cmdlen;
636 if (hscb->cdb_len > MAX_CDB_LEN) {
637 int s;
638 /*
639 * Should CAM start to support CDB sizes
640 * greater than 16 bytes, we could use
641 * the sense buffer to store the CDB.
642 */
643 ahd_set_transaction_status(scb,
644 XS_DRIVER_STUFFUP);
645
646 ahd_lock(ahd, &s);
647 ahd_free_scb(ahd, scb);
648 ahd_unlock(ahd, &s);
649 scsipi_done(xs);
650 }
651 memcpy(hscb->shared_data.idata.cdb, xs->cmd, hscb->cdb_len);
652
653 /* Only use S/G if there is a transfer */
654 if (xs->datalen) {
655 int error;
656
657 error = bus_dmamap_load(ahd->parent_dmat,
658 scb->dmamap, xs->data,
659 xs->datalen, NULL,
660 ((xs->xs_control & XS_CTL_NOSLEEP) ?
661 BUS_DMA_NOWAIT : BUS_DMA_WAITOK) |
662 BUS_DMA_STREAMING |
663 ((xs->xs_control & XS_CTL_DATA_IN) ?
664 BUS_DMA_READ : BUS_DMA_WRITE));
665 if (error) {
666 #ifdef AHD_DEBUG
667 printf("%s: in ahc_setup_data(): bus_dmamap_load() "
668 "= %d\n",
669 ahd_name(ahd), error);
670 #endif
671 xs->error = XS_RESOURCE_SHORTAGE;
672 scsipi_done(xs);
673 return;
674 }
675 ahd_execute_scb(scb,
676 scb->dmamap->dm_segs,
677 scb->dmamap->dm_nsegs);
678 } else {
679 ahd_execute_scb(scb, NULL, 0);
680 }
681 }
682
683 void
684 ahd_timeout(void *arg)
685 {
686 struct scb *scb;
687 struct ahd_softc *ahd;
688 ahd_mode_state saved_modes;
689 int s;
690
691 scb = (struct scb *)arg;
692 ahd = (struct ahd_softc *)scb->ahd_softc;
693
694 printf("%s: ahd_timeout\n", ahd_name(ahd));
695
696 ahd_lock(ahd, &s);
697
698 ahd_pause_and_flushwork(ahd);
699 saved_modes = ahd_save_modes(ahd);
700 #if 0
701 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
702 ahd_outb(ahd, SCSISIGO, ACKO);
703 printf("set ACK\n");
704 ahd_outb(ahd, SCSISIGO, 0);
705 printf("clearing Ack\n");
706 ahd_restore_modes(ahd, saved_modes);
707 #endif
708 if ((scb->flags & SCB_ACTIVE) == 0) {
709 /* Previous timeout took care of me already */
710 printf("%s: Timedout SCB already complete. "
711 "Interrupts may not be functioning.\n", ahd_name(ahd));
712 ahd_unpause(ahd);
713 ahd_unlock(ahd, &s);
714 return;
715 }
716
717 ahd_print_path(ahd, scb);
718 printf("SCB 0x%x - timed out\n", SCB_GET_TAG(scb));
719 ahd_dump_card_state(ahd);
720 ahd_reset_channel(ahd, SIM_CHANNEL(ahd, sim),
721 /*initiate reset*/TRUE);
722 ahd_unlock(ahd, &s);
723 return;
724 }
725
726 int
727 ahd_platform_alloc(struct ahd_softc *ahd, void *platform_arg)
728 {
729 ahd->platform_data = malloc(sizeof(struct ahd_platform_data), M_DEVBUF,
730 M_NOWAIT /*| M_ZERO*/);
731 if (ahd->platform_data == NULL)
732 return (ENOMEM);
733
734 memset(ahd->platform_data, 0, sizeof(struct ahd_platform_data));
735
736 return (0);
737 }
738
739 void
740 ahd_platform_free(struct ahd_softc *ahd)
741 {
742 free(ahd->platform_data, M_DEVBUF);
743 }
744
745 int
746 ahd_softc_comp(struct ahd_softc *lahd, struct ahd_softc *rahd)
747 {
748 /* We don't sort softcs under NetBSD so report equal always */
749 return (0);
750 }
751
752 int
753 ahd_detach(struct device *self, int flags)
754 {
755 int rv = 0;
756
757 struct ahd_softc *ahd = (struct ahd_softc*)self;
758
759 if (ahd->sc_child != NULL)
760 rv = config_detach((void *)ahd->sc_child, flags);
761
762 shutdownhook_disestablish(ahd->shutdown_hook);
763
764 ahd_free(ahd);
765
766 return rv;
767 }
768
769 void
770 ahd_platform_set_tags(struct ahd_softc *ahd,
771 struct ahd_devinfo *devinfo, ahd_queue_alg alg)
772 {
773 struct ahd_tmode_tstate *tstate;
774
775 ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid,
776 devinfo->target, &tstate);
777
778 if (alg != AHD_QUEUE_NONE)
779 tstate->tagenable |= devinfo->target_mask;
780 else
781 tstate->tagenable &= ~devinfo->target_mask;
782 }
783
784 void
785 ahd_send_async(struct ahd_softc *ahc, char channel, u_int target, u_int lun,
786 ac_code code, void *opt_arg)
787 {
788 struct ahd_tmode_tstate *tstate;
789 struct ahd_initiator_tinfo *tinfo;
790 struct ahd_devinfo devinfo;
791 struct scsipi_channel *chan;
792 struct scsipi_xfer_mode xm;
793
794 #ifdef DIAGNOSTIC
795 if (channel != 'A')
796 panic("ahd_send_async: not channel A");
797 #endif
798 chan = &ahc->sc_channel;
799 switch (code) {
800 case AC_TRANSFER_NEG:
801 tinfo = ahd_fetch_transinfo(ahc, channel, ahc->our_id, target,
802 &tstate);
803 ahd_compile_devinfo(&devinfo, ahc->our_id, target, lun,
804 channel, ROLE_UNKNOWN);
805 /*
806 * Don't bother if negotiating. XXX?
807 */
808 if (tinfo->curr.period != tinfo->goal.period
809 || tinfo->curr.width != tinfo->goal.width
810 || tinfo->curr.offset != tinfo->goal.offset
811 || tinfo->curr.ppr_options != tinfo->goal.ppr_options)
812 break;
813 xm.xm_target = target;
814 xm.xm_mode = 0;
815 xm.xm_period = tinfo->curr.period;
816 xm.xm_offset = tinfo->curr.offset;
817 if (tinfo->goal.ppr_options & MSG_EXT_PPR_DT_REQ)
818 xm.xm_mode |= PERIPH_CAP_DT;
819 if (tinfo->curr.width == MSG_EXT_WDTR_BUS_16_BIT)
820 xm.xm_mode |= PERIPH_CAP_WIDE16;
821 if (tinfo->curr.period)
822 xm.xm_mode |= PERIPH_CAP_SYNC;
823 if (tstate->tagenable & devinfo.target_mask)
824 xm.xm_mode |= PERIPH_CAP_TQING;
825 scsipi_async_event(chan, ASYNC_EVENT_XFER_MODE, &xm);
826 break;
827 case AC_BUS_RESET:
828 scsipi_async_event(chan, ASYNC_EVENT_RESET, NULL);
829 case AC_SENT_BDR:
830 default:
831 break;
832 }
833 }
834