aic79xx_osm.c revision 1.2 1 /* $NetBSD: aic79xx_osm.c,v 1.2 2003/04/21 16:52:07 fvdl Exp $ */
2
3 /*
4 * Bus independent NetBSD shim for the aic7xxx based adaptec SCSI controllers
5 *
6 * Copyright (c) 1994-2002 Justin T. Gibbs.
7 * Copyright (c) 2001-2002 Adaptec Inc.
8 * All rights reserved.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions, and the following disclaimer,
15 * without modification.
16 * 2. The name of the author may not be used to endorse or promote products
17 * derived from this software without specific prior written permission.
18 *
19 * Alternatively, this software may be distributed under the terms of the
20 * GNU Public License ("GPL").
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
26 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * //depot/aic7xxx/freebsd/dev/aic7xxx/aic79xx_osm.c#26 $
35 *
36 * $FreeBSD: src/sys/dev/aic7xxx/aic79xx_osm.c,v 1.8 2003/02/27 23:23:16 gibbs Exp $
37 */
38 /*
39 * Ported from FreeBSD by Pascal Renauld, Network Storage Solutions, Inc.
40 * - April 2003
41 */
42
43 #include <dev/ic/aic79xx_osm.h>
44 #include <dev/ic/aic7xxx_cam.h>
45 #include <dev/ic/aic79xx_inline.h>
46
47 #ifndef AHD_TMODE_ENABLE
48 #define AHD_TMODE_ENABLE 0
49 #endif
50
51 static int ahd_ioctl(struct scsipi_channel *channel, u_long cmd,
52 caddr_t addr, int flag, struct proc *p);
53 static void ahd_action(struct scsipi_channel *chan,
54 scsipi_adapter_req_t req, void *arg);
55 static void ahd_execute_scb(void *arg, bus_dma_segment_t *dm_segs,
56 int nsegments);
57 static int ahd_poll(struct ahd_softc *ahd, int wait);
58 static void ahd_setup_data(struct ahd_softc *ahd, struct scsipi_xfer *xs,
59 struct scb *scb);
60
61 #if NOT_YET
62 static void ahd_set_recoveryscb(struct ahd_softc *ahd, struct scb *scb);
63 #endif
64
65 /*
66 * Attach all the sub-devices we can find
67 */
68 int
69 ahd_attach(struct ahd_softc *ahd)
70 {
71 int s;
72 char ahd_info[256];
73
74 ahd_controller_info(ahd, ahd_info);
75 printf("%s: %s\n", ahd->sc_dev.dv_xname, ahd_info);
76
77 ahd_lock(ahd, &s);
78
79 ahd->sc_adapter.adapt_dev = &ahd->sc_dev;
80 ahd->sc_adapter.adapt_nchannels = 1;
81
82 ahd->sc_adapter.adapt_openings = AHD_MAX_QUEUE;
83 ahd->sc_adapter.adapt_max_periph = 32;
84
85 ahd->sc_adapter.adapt_ioctl = ahd_ioctl;
86 ahd->sc_adapter.adapt_minphys = ahd_minphys;
87 ahd->sc_adapter.adapt_request = ahd_action;
88
89 ahd->sc_channel.chan_adapter = &ahd->sc_adapter;
90 ahd->sc_channel.chan_bustype = &scsi_bustype;
91 ahd->sc_channel.chan_channel = 0;
92 ahd->sc_channel.chan_ntargets = AHD_NUM_TARGETS;
93 ahd->sc_channel.chan_nluns = 8 /*AHD_NUM_LUNS*/;
94 ahd->sc_channel.chan_id = ahd->our_id;
95
96 ahd->sc_child = config_found((void *)ahd, &ahd->sc_channel, scsiprint);
97
98 ahd_intr_enable(ahd, TRUE);
99
100 ahd_unlock(ahd, &s);
101
102 return (1);
103 }
104
105 static int
106 ahd_ioctl(struct scsipi_channel *channel, u_long cmd,
107 caddr_t addr, int flag, struct proc *p)
108 {
109 struct ahd_softc *ahd = (void *)channel->chan_adapter->adapt_dev;
110 int s, ret = ENOTTY;
111
112 switch (cmd) {
113 case SCBUSIORESET:
114 s = splbio();
115 ahd_reset_channel(ahd, channel->chan_channel == 1 ? 'B' : 'A', TRUE);
116 splx(s);
117 ret = 0;
118 break;
119 default:
120 break;
121 }
122
123 return ret;
124 }
125
126 /*
127 * Catch an interrupt from the adapter
128 */
129 void
130 ahd_platform_intr(void *arg)
131 {
132 struct ahd_softc *ahd;
133
134 ahd = (struct ahd_softc *)arg;
135
136 printf("%s; ahd_platform_intr\n", ahd_name(ahd));
137
138 ahd_intr(ahd);
139 }
140
141 /*
142 * We have an scb which has been processed by the
143 * adaptor, now we look to see how the operation * went.
144 */
145 void
146 ahd_done(struct ahd_softc *ahd, struct scb *scb)
147 {
148 struct scsipi_xfer *xs;
149 struct scsipi_periph *periph;
150 int target;
151 int s;
152
153 LIST_REMOVE(scb, pending_links);
154
155 xs = scb->xs;
156 periph = xs->xs_periph;
157
158 callout_stop(&scb->xs->xs_callout);
159
160 target = periph->periph_target;
161
162 if (xs->datalen) {
163 int op;
164
165 if (xs->xs_control & XS_CTL_DATA_IN)
166 op = BUS_DMASYNC_POSTREAD;
167 else
168 op = BUS_DMASYNC_POSTWRITE;
169
170 bus_dmamap_sync(ahd->parent_dmat, scb->dmamap, 0,
171 scb->dmamap->dm_mapsize, op);
172 bus_dmamap_unload(ahd->parent_dmat, scb->dmamap);
173 }
174
175 /*
176 * If the recovery SCB completes, we have to be
177 * out of our timeout.
178 */
179 if ((scb->flags & SCB_RECOVERY_SCB) != 0) {
180 struct scb *list_scb;
181
182 /*
183 * We were able to complete the command successfully,
184 * so reinstate the timeouts for all other pending
185 * commands.
186 */
187 LIST_FOREACH(list_scb, &ahd->pending_scbs, pending_links) {
188 struct scsipi_xfer *txs = list_scb->xs;
189
190 if (!(txs->xs_control & XS_CTL_POLL)) {
191 callout_reset(&txs->xs_callout,
192 (txs->timeout > 1000000) ?
193 (txs->timeout / 1000) * hz :
194 (txs->timeout * hz) / 1000,
195 ahd_timeout, list_scb);
196 }
197 }
198
199 if (ahd_get_transaction_status(scb) != XS_NOERROR)
200 ahd_set_transaction_status(scb, XS_TIMEOUT);
201 scsipi_printaddr(xs->xs_periph);
202 printf("%s: no longer in timeout, status = %x\n",
203 ahd_name(ahd), xs->status);
204 }
205
206 if (xs->error != XS_NOERROR) {
207 /* Don't clobber any existing error state */
208 } else if ((xs->status == SCSI_STATUS_BUSY) ||
209 (xs->status == SCSI_STATUS_QUEUE_FULL)) {
210 ahd_set_transaction_status(scb, XS_BUSY);
211 printf("%s: drive (ID %d, LUN %d) queue full (SCB 0x%x)\n",
212 ahd_name(ahd), SCB_GET_TARGET(ahd,scb), SCB_GET_LUN(scb), SCB_GET_TAG(scb));
213 } else if ((scb->flags & SCB_SENSE) != 0) {
214 /*
215 * We performed autosense retrieval.
216 *
217 * zero the sense data before having
218 * the drive fill it. The SCSI spec mandates
219 * that any untransferred data should be
220 * assumed to be zero. Complete the 'bounce'
221 * of sense information through buffers accessible
222 * via bus-space by copying it into the clients
223 * csio.
224 */
225 memset(&xs->sense.scsi_sense, 0, sizeof(xs->sense.scsi_sense));
226 memcpy(&xs->sense.scsi_sense, ahd_get_sense_buf(ahd, scb),
227 sizeof(struct scsipi_sense_data));
228
229 ahd_set_transaction_status(scb, XS_SENSE);
230 } else if ((scb->flags & SCB_PKT_SENSE) != 0) {
231 struct scsi_status_iu_header *siu;
232 u_int sense_len;
233 int i;
234
235 /*
236 * Copy only the sense data into the provided buffer.
237 */
238 siu = (struct scsi_status_iu_header *)scb->sense_data;
239 sense_len = MIN(scsi_4btoul(siu->sense_length),
240 sizeof(&xs->sense.scsi_sense));
241 memset(&xs->sense.scsi_sense, 0, sizeof(xs->sense.scsi_sense));
242 memcpy(&xs->sense.scsi_sense,
243 scb->sense_data + SIU_SENSE_OFFSET(siu), sense_len);
244 printf("Copied %d bytes of sense data offset %d:", sense_len,
245 SIU_SENSE_OFFSET(siu));
246 for (i = 0; i < sense_len; i++)
247 printf(" 0x%x", ((uint8_t *)&xs->sense.scsi_sense)[i]);
248 printf("\n");
249
250 ahd_set_transaction_status(scb, XS_SENSE);
251 }
252
253 if (scb->flags & SCB_FREEZE_QUEUE) {
254 scsipi_periph_thaw(periph, 1);
255 scb->flags &= ~SCB_FREEZE_QUEUE;
256 }
257
258 if (scb->flags & SCB_REQUEUE)
259 ahd_set_transaction_status(scb, XS_REQUEUE);
260
261 ahd_lock(ahd, &s);
262 ahd_free_scb(ahd, scb);
263 ahd_unlock(ahd, &s);
264
265 scsipi_done(xs);
266 }
267
268 static void
269 ahd_action(struct scsipi_channel *chan, scsipi_adapter_req_t req, void *arg)
270 {
271 struct ahd_softc *ahd;
272 struct ahd_initiator_tinfo *tinfo;
273 struct ahd_tmode_tstate *tstate;
274 int s;
275
276 ahd = (void *)chan->chan_adapter->adapt_dev;
277
278 if (ahd->inited_channels[0] == 0) {
279 if (ahd->flags & AHD_RESET_BUS_A) {
280 s = splbio();
281 ahd_reset_channel(ahd, 'A', TRUE);
282 splx(s);
283 }
284 ahd->inited_channels[0] = 1;
285 }
286
287 switch(req) {
288
289 case ADAPTER_REQ_RUN_XFER:
290 {
291 struct scsipi_xfer *xs;
292 struct scsipi_periph *periph;
293 struct scb *scb;
294 struct hardware_scb *hscb;
295 u_int target_id;
296 u_int our_id;
297 u_int col_idx;
298 char channel;
299 int s;
300
301 xs = arg;
302 periph = xs->xs_periph;
303
304 SC_DEBUG(periph, SCSIPI_DB3, ("ahd_action\n"));
305
306 target_id = periph->periph_target;
307 our_id = ahd->our_id;
308 channel = (chan->chan_channel == 1) ? 'B' : 'A';
309
310 /*
311 * get an scb to use.
312 */
313 ahd_lock(ahd, &s);
314 tinfo = ahd_fetch_transinfo(ahd, channel, our_id,
315 target_id, &tstate);
316
317 col_idx = AHD_NEVER_COL_IDX; /* ??? */
318
319 if ((scb = ahd_get_scb(ahd, col_idx)) == NULL) {
320 xs->error = XS_RESOURCE_SHORTAGE;
321 ahd_unlock(ahd, &s);
322 scsipi_done(xs);
323 return;
324 }
325 ahd_unlock(ahd, &s);
326
327 hscb = scb->hscb;
328
329 SC_DEBUG(periph, SCSIPI_DB3, ("start scb(%p)\n", scb));
330 scb->xs = xs;
331
332 /*
333 * Put all the arguments for the xfer in the scb
334 */
335 hscb->control = 0;
336 hscb->scsiid = BUILD_SCSIID(ahd, sim, target_id, our_id);
337 hscb->lun = periph->periph_lun;
338 if (xs->xs_control & XS_CTL_RESET) {
339 hscb->cdb_len = 0;
340 scb->flags |= SCB_DEVICE_RESET;
341 hscb->control |= MK_MESSAGE;
342 hscb->task_management = SIU_TASKMGMT_LUN_RESET;
343 ahd_execute_scb(scb, NULL, 0);
344 } else {
345 hscb->task_management = 0;
346 }
347
348 ahd_setup_data(ahd, xs, scb);
349 break;
350 }
351
352 case ADAPTER_REQ_GROW_RESOURCES:
353 printf("%s: ADAPTER_REQ_GROW_RESOURCES\n", ahd_name(ahd));
354 break;
355
356 case ADAPTER_REQ_SET_XFER_MODE:
357 {
358 struct scsipi_xfer_mode *xm = arg;
359 struct ahd_devinfo devinfo;
360 int target_id, our_id, first;
361 u_int width;
362 int s;
363 char channel;
364
365 target_id = xm->xm_target;
366 our_id = chan->chan_id;
367 channel = 'A';
368 s = splbio();
369 tinfo = ahd_fetch_transinfo(ahd, channel, our_id, target_id,
370 &tstate);
371 ahd_compile_devinfo(&devinfo, our_id, target_id,
372 0, channel, ROLE_INITIATOR);
373
374 /*
375 * XXX since the period and offset are not provided here,
376 * fake things by forcing a renegotiation using the user
377 * settings if this is called for the first time (i.e.
378 * during probe). Also, cap various values at the user
379 * values, assuming that the user set it up that way.
380 */
381 if (ahd->inited_target[target_id] == 0) {
382 tinfo->goal = tinfo->user;
383 tstate->tagenable |=
384 (ahd->user_tagenable & devinfo.target_mask);
385 tstate->discenable |=
386 (ahd->user_discenable & devinfo.target_mask);
387 ahd->inited_target[target_id] = 1;
388 first = 1;
389 } else
390 first = 0;
391
392 if (xm->xm_mode & (PERIPH_CAP_WIDE16 | PERIPH_CAP_DT))
393 width = MSG_EXT_WDTR_BUS_16_BIT;
394 else
395 width = MSG_EXT_WDTR_BUS_8_BIT;
396
397 ahd_validate_width(ahd, NULL, &width, ROLE_UNKNOWN);
398 if (width > tinfo->user.width)
399 width = tinfo->user.width;
400 tinfo->goal.width = width;
401
402 if (!(xm->xm_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_DT))) {
403 tinfo->goal.period = 0;
404 tinfo->goal.offset = 0;
405 tinfo->goal.ppr_options = 0;
406 }
407
408 if ((xm->xm_mode & PERIPH_CAP_DT) &&
409 (tinfo->user.ppr_options & MSG_EXT_PPR_DT_REQ))
410 tinfo->goal.ppr_options |= MSG_EXT_PPR_DT_REQ;
411 else
412 tinfo->goal.ppr_options &= ~MSG_EXT_PPR_DT_REQ;
413
414 if ((xm->xm_mode & PERIPH_CAP_TQING) &&
415 (ahd->user_tagenable & devinfo.target_mask))
416 tstate->tagenable |= devinfo.target_mask;
417 else
418 tstate->tagenable &= ~devinfo.target_mask;
419
420 /*
421 * If this is the first request, and no negotiation is
422 * needed, just confirm the state to the scsipi layer,
423 * so that it can print a message.
424 */
425 if (!ahd_update_neg_request(ahd, &devinfo, tstate,
426 tinfo, AHD_NEG_IF_NON_ASYNC) && first)
427 scsipi_async_event(chan, ASYNC_EVENT_XFER_MODE, xm);
428 splx(s);
429 }
430 }
431
432 return;
433 }
434
435 static void
436 ahd_execute_scb(void *arg, bus_dma_segment_t *dm_segs, int nsegments)
437 {
438 struct scb *scb;
439 struct scsipi_xfer *xs;
440 struct ahd_softc *ahd;
441 struct ahd_initiator_tinfo *tinfo;
442 struct ahd_tmode_tstate *tstate;
443 u_int mask;
444 int s;
445
446 scb = (struct scb*)arg;
447 xs = scb->xs;
448 xs->error = 0;
449 xs->status = 0;
450 xs->xs_status = 0;
451 ahd = (void*)xs->xs_periph->periph_channel->chan_adapter->adapt_dev;
452
453 scb->sg_count = 0;
454 if (nsegments != 0) {
455 void *sg;
456 int op;
457 u_int i;
458
459 ahd_setup_data_scb(ahd, scb);
460
461 /* Copy the segments into our SG list */
462 for (i = nsegments, sg = scb->sg_list; i > 0; i--) {
463
464 sg = ahd_sg_setup(ahd, scb, sg, dm_segs->ds_addr,
465 dm_segs->ds_len,
466 /*last*/i == 1);
467 dm_segs++;
468 }
469
470 if (xs->xs_control & XS_CTL_DATA_IN)
471 op = BUS_DMASYNC_PREREAD;
472 else
473 op = BUS_DMASYNC_PREWRITE;
474
475 bus_dmamap_sync(ahd->parent_dmat, scb->dmamap, 0,
476 scb->dmamap->dm_mapsize, op);
477 }
478
479 ahd_lock(ahd, &s);
480
481 /*
482 * Last time we need to check if this SCB needs to
483 * be aborted.
484 */
485 if (ahd_get_scsi_status(scb) == XS_STS_DONE) {
486 if (nsegments != 0)
487 bus_dmamap_unload(ahd->parent_dmat,
488 scb->dmamap);
489 ahd_free_scb(ahd, scb);
490 ahd_unlock(ahd, &s);
491 return;
492 }
493
494 tinfo = ahd_fetch_transinfo(ahd, SCSIID_CHANNEL(ahd, scb->hscb->scsiid),
495 SCSIID_OUR_ID(scb->hscb->scsiid),
496 SCSIID_TARGET(ahd, scb->hscb->scsiid),
497 &tstate);
498
499 mask = SCB_GET_TARGET_MASK(ahd, scb);
500
501 if ((tstate->discenable & mask) != 0)
502 scb->hscb->control |= DISCENB;
503
504 if ((tstate->tagenable & mask) != 0)
505 scb->hscb->control |= xs->xs_tag_type|TAG_ENB;
506
507 if ((tinfo->curr.ppr_options & MSG_EXT_PPR_IU) != 0) {
508 scb->flags |= SCB_PACKETIZED;
509 if (scb->hscb->task_management != 0)
510 scb->hscb->control &= ~MK_MESSAGE;
511 }
512
513 if ((xs->xs_control & XS_CTL_DISCOVERY) &&
514 (tinfo->goal.width != 0
515 || tinfo->goal.period != 0
516 || tinfo->goal.ppr_options != 0)) {
517 scb->flags |= SCB_NEGOTIATE;
518 scb->hscb->control |= MK_MESSAGE;
519 } else if ((tstate->auto_negotiate & mask) != 0) {
520 scb->flags |= SCB_AUTO_NEGOTIATE;
521 scb->hscb->control |= MK_MESSAGE;
522 }
523
524 LIST_INSERT_HEAD(&ahd->pending_scbs, scb, pending_links);
525
526 scb->flags |= SCB_ACTIVE;
527
528 if (!(xs->xs_control & XS_CTL_POLL)) {
529 callout_reset(&scb->xs->xs_callout, xs->timeout > 1000000 ?
530 (xs->timeout / 1000) * hz : (xs->timeout * hz) / 1000,
531 ahd_timeout, scb);
532 }
533
534 if ((scb->flags & SCB_TARGET_IMMEDIATE) != 0) {
535 /* Define a mapping from our tag to the SCB. */
536 ahd->scb_data.scbindex[SCB_GET_TAG(scb)] = scb;
537 ahd_pause(ahd);
538 ahd_set_scbptr(ahd, SCB_GET_TAG(scb));
539 ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_TARG);
540 ahd_unpause(ahd);
541 } else {
542 ahd_queue_scb(ahd, scb);
543 }
544
545 if (!(xs->xs_control & XS_CTL_POLL)) {
546 ahd_unlock(ahd, &s);
547 return;
548 }
549 /*
550 * If we can't use interrupts, poll for completion
551 */
552 SC_DEBUG(xs->xs_periph, SCSIPI_DB3, ("cmd_poll\n"));
553 do {
554 if (ahd_poll(ahd, xs->timeout)) {
555 if (!(xs->xs_control & XS_CTL_SILENT))
556 printf("cmd fail\n");
557 ahd_timeout(scb);
558 break;
559 }
560 } while (!(xs->xs_status & XS_STS_DONE));
561
562 ahd_unlock(ahd, &s);
563 }
564
565 static int
566 ahd_poll(struct ahd_softc *ahd, int wait)
567 {
568
569 while (--wait) {
570 DELAY(1000);
571 if (ahd_inb(ahd, INTSTAT) & INT_PEND)
572 break;
573 }
574
575 if (wait == 0) {
576 printf("%s: board is not responding\n", ahd_name(ahd));
577 return (EIO);
578 }
579
580 ahd_intr((void *)ahd);
581 return (0);
582 }
583
584
585 static void
586 ahd_setup_data(struct ahd_softc *ahd, struct scsipi_xfer *xs,
587 struct scb *scb)
588 {
589 struct hardware_scb *hscb;
590
591 hscb = scb->hscb;
592 xs->resid = xs->status = 0;
593
594 hscb->cdb_len = xs->cmdlen;
595 if (hscb->cdb_len > MAX_CDB_LEN) {
596 int s;
597 /*
598 * Should CAM start to support CDB sizes
599 * greater than 16 bytes, we could use
600 * the sense buffer to store the CDB.
601 */
602 ahd_set_transaction_status(scb,
603 XS_DRIVER_STUFFUP);
604
605 ahd_lock(ahd, &s);
606 ahd_free_scb(ahd, scb);
607 ahd_unlock(ahd, &s);
608 scsipi_done(xs);
609 }
610 memcpy(hscb->shared_data.idata.cdb, xs->cmd, hscb->cdb_len);
611
612 /* Only use S/G if there is a transfer */
613 if (xs->datalen) {
614 int error;
615
616 error = bus_dmamap_load(ahd->parent_dmat,
617 scb->dmamap, xs->data,
618 xs->datalen, NULL,
619 ((xs->xs_control & XS_CTL_NOSLEEP) ?
620 BUS_DMA_NOWAIT : BUS_DMA_WAITOK) |
621 BUS_DMA_STREAMING |
622 ((xs->xs_control & XS_CTL_DATA_IN) ?
623 BUS_DMA_READ : BUS_DMA_WRITE));
624 if (error) {
625 #ifdef AHD_DEBUG
626 printf("%s: in ahc_setup_data(): bus_dmamap_load() "
627 "= %d\n",
628 ahd_name(ahd), error);
629 #endif
630 xs->error = XS_RESOURCE_SHORTAGE;
631 scsipi_done(xs);
632 return;
633 }
634 ahd_execute_scb(scb,
635 scb->dmamap->dm_segs,
636 scb->dmamap->dm_nsegs);
637 } else {
638 ahd_execute_scb(scb, NULL, 0);
639 }
640 }
641
642 void
643 ahd_timeout(void *arg)
644 {
645 struct scb *scb;
646 struct ahd_softc *ahd;
647 ahd_mode_state saved_modes;
648 int s;
649 int target;
650 int lun;
651 char channel;
652
653 scb = (struct scb *)arg;
654 ahd = (struct ahd_softc *)scb->ahd_softc;
655
656 printf("%s: ahd_timeout\n", ahd_name(ahd));
657
658 ahd_lock(ahd, &s);
659
660 ahd_pause_and_flushwork(ahd);
661 saved_modes = ahd_save_modes(ahd);
662 #if 0
663 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
664 ahd_outb(ahd, SCSISIGO, ACKO);
665 printf("set ACK\n");
666 ahd_outb(ahd, SCSISIGO, 0);
667 printf("clearing Ack\n");
668 ahd_restore_modes(ahd, saved_modes);
669 #endif
670 if ((scb->flags & SCB_ACTIVE) == 0) {
671 /* Previous timeout took care of me already */
672 printf("%s: Timedout SCB already complete. "
673 "Interrupts may not be functioning.\n", ahd_name(ahd));
674 ahd_unpause(ahd);
675 ahd_unlock(ahd, &s);
676 return;
677 }
678
679 target = SCB_GET_TARGET(ahd, scb);
680 channel = SCB_GET_CHANNEL(ahd, scb);
681 lun = SCB_GET_LUN(scb);
682
683 ahd_print_path(ahd, scb);
684 printf("SCB 0x%x - timed out\n", SCB_GET_TAG(scb));
685 ahd_dump_card_state(ahd);
686 ahd_reset_channel(ahd, SIM_CHANNEL(ahd, sim),
687 /*initiate reset*/TRUE);
688 ahd_unlock(ahd, &s);
689 return;
690 }
691
692 int
693 ahd_platform_alloc(struct ahd_softc *ahd, void *platform_arg)
694 {
695 ahd->platform_data = malloc(sizeof(struct ahd_platform_data), M_DEVBUF,
696 M_NOWAIT /*| M_ZERO*/);
697 if (ahd->platform_data == NULL)
698 return (ENOMEM);
699
700 memset(ahd->platform_data, 0, sizeof(struct ahd_platform_data));
701
702 return (0);
703 }
704
705 void
706 ahd_platform_free(struct ahd_softc *ahd)
707 {
708 free(ahd->platform_data, M_DEVBUF);
709 }
710
711 int
712 ahd_softc_comp(struct ahd_softc *lahd, struct ahd_softc *rahd)
713 {
714 /* We don't sort softcs under NetBSD so report equal always */
715 return (0);
716 }
717
718 int
719 ahd_detach(struct device *self, int flags)
720 {
721 int rv = 0;
722
723 struct ahd_softc *ahd = (struct ahd_softc*)self;
724
725 if (ahd->sc_child != NULL)
726 rv = config_detach((void *)ahd->sc_child, flags);
727
728 shutdownhook_disestablish(ahd->shutdown_hook);
729
730 ahd_free(ahd);
731
732 return rv;
733 }
734
735 void
736 ahd_platform_set_tags(struct ahd_softc *ahd,
737 struct ahd_devinfo *devinfo, ahd_queue_alg alg)
738 {
739 struct ahd_initiator_tinfo *tinfo;
740 struct ahd_tmode_tstate *tstate;
741
742 tinfo = ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid,
743 devinfo->target, &tstate);
744
745 if (alg != AHD_QUEUE_NONE)
746 tstate->tagenable |= devinfo->target_mask;
747 else
748 tstate->tagenable &= ~devinfo->target_mask;
749 }
750
751 void
752 ahd_send_async(struct ahd_softc *ahc, char channel, u_int target, u_int lun,
753 ac_code code, void *opt_arg)
754 {
755 struct ahd_tmode_tstate *tstate;
756 struct ahd_initiator_tinfo *tinfo;
757 struct ahd_devinfo devinfo;
758 struct scsipi_channel *chan;
759 struct scsipi_xfer_mode xm;
760
761 #ifdef DIAGNOSTIC
762 if (channel != 'A')
763 panic("ahd_send_async: not channel A");
764 #endif
765 chan = &ahc->sc_channel;
766 switch (code) {
767 case AC_TRANSFER_NEG:
768 tinfo = ahd_fetch_transinfo(ahc, channel, ahc->our_id, target,
769 &tstate);
770 ahd_compile_devinfo(&devinfo, ahc->our_id, target, lun,
771 channel, ROLE_UNKNOWN);
772 /*
773 * Don't bother if negotiating. XXX?
774 */
775 if (tinfo->curr.period != tinfo->goal.period
776 || tinfo->curr.width != tinfo->goal.width
777 || tinfo->curr.offset != tinfo->goal.offset
778 || tinfo->curr.ppr_options != tinfo->goal.ppr_options)
779 break;
780 xm.xm_target = target;
781 xm.xm_mode = 0;
782 xm.xm_period = tinfo->curr.period;
783 xm.xm_offset = tinfo->curr.offset;
784 if (tinfo->goal.ppr_options & MSG_EXT_PPR_DT_REQ)
785 xm.xm_mode |= PERIPH_CAP_DT;
786 if (tinfo->curr.width == MSG_EXT_WDTR_BUS_16_BIT)
787 xm.xm_mode |= PERIPH_CAP_WIDE16;
788 if (tinfo->curr.period)
789 xm.xm_mode |= PERIPH_CAP_SYNC;
790 if (tstate->tagenable & devinfo.target_mask)
791 xm.xm_mode |= PERIPH_CAP_TQING;
792 scsipi_async_event(chan, ASYNC_EVENT_XFER_MODE, &xm);
793 break;
794 case AC_BUS_RESET:
795 scsipi_async_event(chan, ASYNC_EVENT_RESET, NULL);
796 case AC_SENT_BDR:
797 default:
798 break;
799 }
800 }
801