aic79xx_osm.c revision 1.4 1 /* $NetBSD: aic79xx_osm.c,v 1.4 2003/07/14 15:47:06 lukem Exp $ */
2
3 /*
4 * Bus independent NetBSD shim for the aic7xxx based adaptec SCSI controllers
5 *
6 * Copyright (c) 1994-2002 Justin T. Gibbs.
7 * Copyright (c) 2001-2002 Adaptec Inc.
8 * All rights reserved.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions, and the following disclaimer,
15 * without modification.
16 * 2. The name of the author may not be used to endorse or promote products
17 * derived from this software without specific prior written permission.
18 *
19 * Alternatively, this software may be distributed under the terms of the
20 * GNU Public License ("GPL").
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
26 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * //depot/aic7xxx/freebsd/dev/aic7xxx/aic79xx_osm.c#26 $
35 *
36 * $FreeBSD: src/sys/dev/aic7xxx/aic79xx_osm.c,v 1.8 2003/02/27 23:23:16 gibbs Exp $
37 */
38 /*
39 * Ported from FreeBSD by Pascal Renauld, Network Storage Solutions, Inc.
40 * - April 2003
41 */
42
43 #include <sys/cdefs.h>
44 __KERNEL_RCSID(0, "$NetBSD: aic79xx_osm.c,v 1.4 2003/07/14 15:47:06 lukem Exp $");
45
46 #include <dev/ic/aic79xx_osm.h>
47 #include <dev/ic/aic7xxx_cam.h>
48 #include <dev/ic/aic79xx_inline.h>
49
50 #ifndef AHD_TMODE_ENABLE
51 #define AHD_TMODE_ENABLE 0
52 #endif
53
54 static int ahd_ioctl(struct scsipi_channel *channel, u_long cmd,
55 caddr_t addr, int flag, struct proc *p);
56 static void ahd_action(struct scsipi_channel *chan,
57 scsipi_adapter_req_t req, void *arg);
58 static void ahd_execute_scb(void *arg, bus_dma_segment_t *dm_segs,
59 int nsegments);
60 static int ahd_poll(struct ahd_softc *ahd, int wait);
61 static void ahd_setup_data(struct ahd_softc *ahd, struct scsipi_xfer *xs,
62 struct scb *scb);
63
64 #if NOT_YET
65 static void ahd_set_recoveryscb(struct ahd_softc *ahd, struct scb *scb);
66 #endif
67
68 /*
69 * Attach all the sub-devices we can find
70 */
71 int
72 ahd_attach(struct ahd_softc *ahd)
73 {
74 int s;
75 char ahd_info[256];
76
77 ahd_controller_info(ahd, ahd_info);
78 printf("%s: %s\n", ahd->sc_dev.dv_xname, ahd_info);
79
80 ahd_lock(ahd, &s);
81
82 ahd->sc_adapter.adapt_dev = &ahd->sc_dev;
83 ahd->sc_adapter.adapt_nchannels = 1;
84
85 ahd->sc_adapter.adapt_openings = AHD_MAX_QUEUE;
86 ahd->sc_adapter.adapt_max_periph = 32;
87
88 ahd->sc_adapter.adapt_ioctl = ahd_ioctl;
89 ahd->sc_adapter.adapt_minphys = ahd_minphys;
90 ahd->sc_adapter.adapt_request = ahd_action;
91
92 ahd->sc_channel.chan_adapter = &ahd->sc_adapter;
93 ahd->sc_channel.chan_bustype = &scsi_bustype;
94 ahd->sc_channel.chan_channel = 0;
95 ahd->sc_channel.chan_ntargets = AHD_NUM_TARGETS;
96 ahd->sc_channel.chan_nluns = 8 /*AHD_NUM_LUNS*/;
97 ahd->sc_channel.chan_id = ahd->our_id;
98
99 ahd->sc_child = config_found((void *)ahd, &ahd->sc_channel, scsiprint);
100
101 ahd_intr_enable(ahd, TRUE);
102
103 if (ahd->flags & AHD_RESET_BUS_A)
104 ahd_reset_channel(ahd, 'A', TRUE);
105
106 ahd_unlock(ahd, &s);
107
108 return (1);
109 }
110
111 static int
112 ahd_ioctl(struct scsipi_channel *channel, u_long cmd,
113 caddr_t addr, int flag, struct proc *p)
114 {
115 struct ahd_softc *ahd = (void *)channel->chan_adapter->adapt_dev;
116 int s, ret = ENOTTY;
117
118 switch (cmd) {
119 case SCBUSIORESET:
120 s = splbio();
121 ahd_reset_channel(ahd, channel->chan_channel == 1 ? 'B' : 'A', TRUE);
122 splx(s);
123 ret = 0;
124 break;
125 default:
126 break;
127 }
128
129 return ret;
130 }
131
132 /*
133 * Catch an interrupt from the adapter
134 */
135 void
136 ahd_platform_intr(void *arg)
137 {
138 struct ahd_softc *ahd;
139
140 ahd = (struct ahd_softc *)arg;
141
142 printf("%s; ahd_platform_intr\n", ahd_name(ahd));
143
144 ahd_intr(ahd);
145 }
146
147 /*
148 * We have an scb which has been processed by the
149 * adaptor, now we look to see how the operation * went.
150 */
151 void
152 ahd_done(struct ahd_softc *ahd, struct scb *scb)
153 {
154 struct scsipi_xfer *xs;
155 struct scsipi_periph *periph;
156 int target;
157 int s;
158
159 LIST_REMOVE(scb, pending_links);
160
161 xs = scb->xs;
162 periph = xs->xs_periph;
163
164 callout_stop(&scb->xs->xs_callout);
165
166 target = periph->periph_target;
167
168 if (xs->datalen) {
169 int op;
170
171 if (xs->xs_control & XS_CTL_DATA_IN)
172 op = BUS_DMASYNC_POSTREAD;
173 else
174 op = BUS_DMASYNC_POSTWRITE;
175
176 bus_dmamap_sync(ahd->parent_dmat, scb->dmamap, 0,
177 scb->dmamap->dm_mapsize, op);
178 bus_dmamap_unload(ahd->parent_dmat, scb->dmamap);
179 }
180
181 /*
182 * If the recovery SCB completes, we have to be
183 * out of our timeout.
184 */
185 if ((scb->flags & SCB_RECOVERY_SCB) != 0) {
186 struct scb *list_scb;
187
188 /*
189 * We were able to complete the command successfully,
190 * so reinstate the timeouts for all other pending
191 * commands.
192 */
193 LIST_FOREACH(list_scb, &ahd->pending_scbs, pending_links) {
194 struct scsipi_xfer *txs = list_scb->xs;
195
196 if (!(txs->xs_control & XS_CTL_POLL)) {
197 callout_reset(&txs->xs_callout,
198 (txs->timeout > 1000000) ?
199 (txs->timeout / 1000) * hz :
200 (txs->timeout * hz) / 1000,
201 ahd_timeout, list_scb);
202 }
203 }
204
205 if (ahd_get_transaction_status(scb) != XS_NOERROR)
206 ahd_set_transaction_status(scb, XS_TIMEOUT);
207 scsipi_printaddr(xs->xs_periph);
208 printf("%s: no longer in timeout, status = %x\n",
209 ahd_name(ahd), xs->status);
210 }
211
212 if (xs->error != XS_NOERROR) {
213 /* Don't clobber any existing error state */
214 } else if ((xs->status == SCSI_STATUS_BUSY) ||
215 (xs->status == SCSI_STATUS_QUEUE_FULL)) {
216 ahd_set_transaction_status(scb, XS_BUSY);
217 printf("%s: drive (ID %d, LUN %d) queue full (SCB 0x%x)\n",
218 ahd_name(ahd), SCB_GET_TARGET(ahd,scb), SCB_GET_LUN(scb), SCB_GET_TAG(scb));
219 } else if ((scb->flags & SCB_SENSE) != 0) {
220 /*
221 * We performed autosense retrieval.
222 *
223 * zero the sense data before having
224 * the drive fill it. The SCSI spec mandates
225 * that any untransferred data should be
226 * assumed to be zero. Complete the 'bounce'
227 * of sense information through buffers accessible
228 * via bus-space by copying it into the clients
229 * csio.
230 */
231 memset(&xs->sense.scsi_sense, 0, sizeof(xs->sense.scsi_sense));
232 memcpy(&xs->sense.scsi_sense, ahd_get_sense_buf(ahd, scb),
233 sizeof(struct scsipi_sense_data));
234
235 ahd_set_transaction_status(scb, XS_SENSE);
236 } else if ((scb->flags & SCB_PKT_SENSE) != 0) {
237 struct scsi_status_iu_header *siu;
238 u_int sense_len;
239 int i;
240
241 /*
242 * Copy only the sense data into the provided buffer.
243 */
244 siu = (struct scsi_status_iu_header *)scb->sense_data;
245 sense_len = MIN(scsi_4btoul(siu->sense_length),
246 sizeof(&xs->sense.scsi_sense));
247 memset(&xs->sense.scsi_sense, 0, sizeof(xs->sense.scsi_sense));
248 memcpy(&xs->sense.scsi_sense,
249 scb->sense_data + SIU_SENSE_OFFSET(siu), sense_len);
250 printf("Copied %d bytes of sense data offset %d:", sense_len,
251 SIU_SENSE_OFFSET(siu));
252 for (i = 0; i < sense_len; i++)
253 printf(" 0x%x", ((uint8_t *)&xs->sense.scsi_sense)[i]);
254 printf("\n");
255
256 ahd_set_transaction_status(scb, XS_SENSE);
257 }
258
259 if (scb->flags & SCB_FREEZE_QUEUE) {
260 scsipi_periph_thaw(periph, 1);
261 scb->flags &= ~SCB_FREEZE_QUEUE;
262 }
263
264 if (scb->flags & SCB_REQUEUE)
265 ahd_set_transaction_status(scb, XS_REQUEUE);
266
267 ahd_lock(ahd, &s);
268 ahd_free_scb(ahd, scb);
269 ahd_unlock(ahd, &s);
270
271 scsipi_done(xs);
272 }
273
274 static void
275 ahd_action(struct scsipi_channel *chan, scsipi_adapter_req_t req, void *arg)
276 {
277 struct ahd_softc *ahd;
278 struct ahd_initiator_tinfo *tinfo;
279 struct ahd_tmode_tstate *tstate;
280
281 ahd = (void *)chan->chan_adapter->adapt_dev;
282
283 switch(req) {
284
285 case ADAPTER_REQ_RUN_XFER:
286 {
287 struct scsipi_xfer *xs;
288 struct scsipi_periph *periph;
289 struct scb *scb;
290 struct hardware_scb *hscb;
291 u_int target_id;
292 u_int our_id;
293 u_int col_idx;
294 char channel;
295 int s;
296
297 xs = arg;
298 periph = xs->xs_periph;
299
300 SC_DEBUG(periph, SCSIPI_DB3, ("ahd_action\n"));
301
302 target_id = periph->periph_target;
303 our_id = ahd->our_id;
304 channel = (chan->chan_channel == 1) ? 'B' : 'A';
305
306 /*
307 * get an scb to use.
308 */
309 ahd_lock(ahd, &s);
310 tinfo = ahd_fetch_transinfo(ahd, channel, our_id,
311 target_id, &tstate);
312
313 col_idx = AHD_NEVER_COL_IDX; /* ??? */
314
315 if ((scb = ahd_get_scb(ahd, col_idx)) == NULL) {
316 xs->error = XS_RESOURCE_SHORTAGE;
317 ahd_unlock(ahd, &s);
318 scsipi_done(xs);
319 return;
320 }
321 ahd_unlock(ahd, &s);
322
323 hscb = scb->hscb;
324
325 SC_DEBUG(periph, SCSIPI_DB3, ("start scb(%p)\n", scb));
326 scb->xs = xs;
327
328 /*
329 * Put all the arguments for the xfer in the scb
330 */
331 hscb->control = 0;
332 hscb->scsiid = BUILD_SCSIID(ahd, sim, target_id, our_id);
333 hscb->lun = periph->periph_lun;
334 if (xs->xs_control & XS_CTL_RESET) {
335 hscb->cdb_len = 0;
336 scb->flags |= SCB_DEVICE_RESET;
337 hscb->control |= MK_MESSAGE;
338 hscb->task_management = SIU_TASKMGMT_LUN_RESET;
339 ahd_execute_scb(scb, NULL, 0);
340 } else {
341 hscb->task_management = 0;
342 }
343
344 ahd_setup_data(ahd, xs, scb);
345 break;
346 }
347
348 case ADAPTER_REQ_GROW_RESOURCES:
349 printf("%s: ADAPTER_REQ_GROW_RESOURCES\n", ahd_name(ahd));
350 break;
351
352 case ADAPTER_REQ_SET_XFER_MODE:
353 {
354 struct scsipi_xfer_mode *xm = arg;
355 struct ahd_devinfo devinfo;
356 int target_id, our_id, first;
357 u_int width;
358 int s;
359 char channel;
360
361 target_id = xm->xm_target;
362 our_id = chan->chan_id;
363 channel = 'A';
364 s = splbio();
365 tinfo = ahd_fetch_transinfo(ahd, channel, our_id, target_id,
366 &tstate);
367 ahd_compile_devinfo(&devinfo, our_id, target_id,
368 0, channel, ROLE_INITIATOR);
369
370 /*
371 * XXX since the period and offset are not provided here,
372 * fake things by forcing a renegotiation using the user
373 * settings if this is called for the first time (i.e.
374 * during probe). Also, cap various values at the user
375 * values, assuming that the user set it up that way.
376 */
377 if (ahd->inited_target[target_id] == 0) {
378 tinfo->goal = tinfo->user;
379 tstate->tagenable |=
380 (ahd->user_tagenable & devinfo.target_mask);
381 tstate->discenable |=
382 (ahd->user_discenable & devinfo.target_mask);
383 ahd->inited_target[target_id] = 1;
384 first = 1;
385 } else
386 first = 0;
387
388 if (xm->xm_mode & (PERIPH_CAP_WIDE16 | PERIPH_CAP_DT))
389 width = MSG_EXT_WDTR_BUS_16_BIT;
390 else
391 width = MSG_EXT_WDTR_BUS_8_BIT;
392
393 ahd_validate_width(ahd, NULL, &width, ROLE_UNKNOWN);
394 if (width > tinfo->user.width)
395 width = tinfo->user.width;
396 tinfo->goal.width = width;
397
398 if (!(xm->xm_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_DT))) {
399 tinfo->goal.period = 0;
400 tinfo->goal.offset = 0;
401 tinfo->goal.ppr_options = 0;
402 }
403
404 if ((xm->xm_mode & PERIPH_CAP_DT) &&
405 (tinfo->user.ppr_options & MSG_EXT_PPR_DT_REQ))
406 tinfo->goal.ppr_options |= MSG_EXT_PPR_DT_REQ;
407 else
408 tinfo->goal.ppr_options &= ~MSG_EXT_PPR_DT_REQ;
409
410 if ((xm->xm_mode & PERIPH_CAP_TQING) &&
411 (ahd->user_tagenable & devinfo.target_mask))
412 tstate->tagenable |= devinfo.target_mask;
413 else
414 tstate->tagenable &= ~devinfo.target_mask;
415
416 /*
417 * If this is the first request, and no negotiation is
418 * needed, just confirm the state to the scsipi layer,
419 * so that it can print a message.
420 */
421 if (!ahd_update_neg_request(ahd, &devinfo, tstate,
422 tinfo, AHD_NEG_IF_NON_ASYNC) && first)
423 scsipi_async_event(chan, ASYNC_EVENT_XFER_MODE, xm);
424 splx(s);
425 }
426 }
427
428 return;
429 }
430
431 static void
432 ahd_execute_scb(void *arg, bus_dma_segment_t *dm_segs, int nsegments)
433 {
434 struct scb *scb;
435 struct scsipi_xfer *xs;
436 struct ahd_softc *ahd;
437 struct ahd_initiator_tinfo *tinfo;
438 struct ahd_tmode_tstate *tstate;
439 u_int mask;
440 int s;
441
442 scb = (struct scb*)arg;
443 xs = scb->xs;
444 xs->error = 0;
445 xs->status = 0;
446 xs->xs_status = 0;
447 ahd = (void*)xs->xs_periph->periph_channel->chan_adapter->adapt_dev;
448
449 scb->sg_count = 0;
450 if (nsegments != 0) {
451 void *sg;
452 int op;
453 u_int i;
454
455 ahd_setup_data_scb(ahd, scb);
456
457 /* Copy the segments into our SG list */
458 for (i = nsegments, sg = scb->sg_list; i > 0; i--) {
459
460 sg = ahd_sg_setup(ahd, scb, sg, dm_segs->ds_addr,
461 dm_segs->ds_len,
462 /*last*/i == 1);
463 dm_segs++;
464 }
465
466 if (xs->xs_control & XS_CTL_DATA_IN)
467 op = BUS_DMASYNC_PREREAD;
468 else
469 op = BUS_DMASYNC_PREWRITE;
470
471 bus_dmamap_sync(ahd->parent_dmat, scb->dmamap, 0,
472 scb->dmamap->dm_mapsize, op);
473 }
474
475 ahd_lock(ahd, &s);
476
477 /*
478 * Last time we need to check if this SCB needs to
479 * be aborted.
480 */
481 if (ahd_get_scsi_status(scb) == XS_STS_DONE) {
482 if (nsegments != 0)
483 bus_dmamap_unload(ahd->parent_dmat,
484 scb->dmamap);
485 ahd_free_scb(ahd, scb);
486 ahd_unlock(ahd, &s);
487 return;
488 }
489
490 tinfo = ahd_fetch_transinfo(ahd, SCSIID_CHANNEL(ahd, scb->hscb->scsiid),
491 SCSIID_OUR_ID(scb->hscb->scsiid),
492 SCSIID_TARGET(ahd, scb->hscb->scsiid),
493 &tstate);
494
495 mask = SCB_GET_TARGET_MASK(ahd, scb);
496
497 if ((tstate->discenable & mask) != 0)
498 scb->hscb->control |= DISCENB;
499
500 if ((tstate->tagenable & mask) != 0)
501 scb->hscb->control |= xs->xs_tag_type|TAG_ENB;
502
503 if ((tinfo->curr.ppr_options & MSG_EXT_PPR_IU) != 0) {
504 scb->flags |= SCB_PACKETIZED;
505 if (scb->hscb->task_management != 0)
506 scb->hscb->control &= ~MK_MESSAGE;
507 }
508
509 if ((xs->xs_control & XS_CTL_DISCOVERY) &&
510 (tinfo->goal.width != 0
511 || tinfo->goal.period != 0
512 || tinfo->goal.ppr_options != 0)) {
513 scb->flags |= SCB_NEGOTIATE;
514 scb->hscb->control |= MK_MESSAGE;
515 } else if ((tstate->auto_negotiate & mask) != 0) {
516 scb->flags |= SCB_AUTO_NEGOTIATE;
517 scb->hscb->control |= MK_MESSAGE;
518 }
519
520 LIST_INSERT_HEAD(&ahd->pending_scbs, scb, pending_links);
521
522 scb->flags |= SCB_ACTIVE;
523
524 if (!(xs->xs_control & XS_CTL_POLL)) {
525 callout_reset(&scb->xs->xs_callout, xs->timeout > 1000000 ?
526 (xs->timeout / 1000) * hz : (xs->timeout * hz) / 1000,
527 ahd_timeout, scb);
528 }
529
530 if ((scb->flags & SCB_TARGET_IMMEDIATE) != 0) {
531 /* Define a mapping from our tag to the SCB. */
532 ahd->scb_data.scbindex[SCB_GET_TAG(scb)] = scb;
533 ahd_pause(ahd);
534 ahd_set_scbptr(ahd, SCB_GET_TAG(scb));
535 ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_TARG);
536 ahd_unpause(ahd);
537 } else {
538 ahd_queue_scb(ahd, scb);
539 }
540
541 if (!(xs->xs_control & XS_CTL_POLL)) {
542 ahd_unlock(ahd, &s);
543 return;
544 }
545 /*
546 * If we can't use interrupts, poll for completion
547 */
548 SC_DEBUG(xs->xs_periph, SCSIPI_DB3, ("cmd_poll\n"));
549 do {
550 if (ahd_poll(ahd, xs->timeout)) {
551 if (!(xs->xs_control & XS_CTL_SILENT))
552 printf("cmd fail\n");
553 ahd_timeout(scb);
554 break;
555 }
556 } while (!(xs->xs_status & XS_STS_DONE));
557
558 ahd_unlock(ahd, &s);
559 }
560
561 static int
562 ahd_poll(struct ahd_softc *ahd, int wait)
563 {
564
565 while (--wait) {
566 DELAY(1000);
567 if (ahd_inb(ahd, INTSTAT) & INT_PEND)
568 break;
569 }
570
571 if (wait == 0) {
572 printf("%s: board is not responding\n", ahd_name(ahd));
573 return (EIO);
574 }
575
576 ahd_intr((void *)ahd);
577 return (0);
578 }
579
580
581 static void
582 ahd_setup_data(struct ahd_softc *ahd, struct scsipi_xfer *xs,
583 struct scb *scb)
584 {
585 struct hardware_scb *hscb;
586
587 hscb = scb->hscb;
588 xs->resid = xs->status = 0;
589
590 hscb->cdb_len = xs->cmdlen;
591 if (hscb->cdb_len > MAX_CDB_LEN) {
592 int s;
593 /*
594 * Should CAM start to support CDB sizes
595 * greater than 16 bytes, we could use
596 * the sense buffer to store the CDB.
597 */
598 ahd_set_transaction_status(scb,
599 XS_DRIVER_STUFFUP);
600
601 ahd_lock(ahd, &s);
602 ahd_free_scb(ahd, scb);
603 ahd_unlock(ahd, &s);
604 scsipi_done(xs);
605 }
606 memcpy(hscb->shared_data.idata.cdb, xs->cmd, hscb->cdb_len);
607
608 /* Only use S/G if there is a transfer */
609 if (xs->datalen) {
610 int error;
611
612 error = bus_dmamap_load(ahd->parent_dmat,
613 scb->dmamap, xs->data,
614 xs->datalen, NULL,
615 ((xs->xs_control & XS_CTL_NOSLEEP) ?
616 BUS_DMA_NOWAIT : BUS_DMA_WAITOK) |
617 BUS_DMA_STREAMING |
618 ((xs->xs_control & XS_CTL_DATA_IN) ?
619 BUS_DMA_READ : BUS_DMA_WRITE));
620 if (error) {
621 #ifdef AHD_DEBUG
622 printf("%s: in ahc_setup_data(): bus_dmamap_load() "
623 "= %d\n",
624 ahd_name(ahd), error);
625 #endif
626 xs->error = XS_RESOURCE_SHORTAGE;
627 scsipi_done(xs);
628 return;
629 }
630 ahd_execute_scb(scb,
631 scb->dmamap->dm_segs,
632 scb->dmamap->dm_nsegs);
633 } else {
634 ahd_execute_scb(scb, NULL, 0);
635 }
636 }
637
638 void
639 ahd_timeout(void *arg)
640 {
641 struct scb *scb;
642 struct ahd_softc *ahd;
643 ahd_mode_state saved_modes;
644 int s;
645 int target;
646 int lun;
647 char channel;
648
649 scb = (struct scb *)arg;
650 ahd = (struct ahd_softc *)scb->ahd_softc;
651
652 printf("%s: ahd_timeout\n", ahd_name(ahd));
653
654 ahd_lock(ahd, &s);
655
656 ahd_pause_and_flushwork(ahd);
657 saved_modes = ahd_save_modes(ahd);
658 #if 0
659 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
660 ahd_outb(ahd, SCSISIGO, ACKO);
661 printf("set ACK\n");
662 ahd_outb(ahd, SCSISIGO, 0);
663 printf("clearing Ack\n");
664 ahd_restore_modes(ahd, saved_modes);
665 #endif
666 if ((scb->flags & SCB_ACTIVE) == 0) {
667 /* Previous timeout took care of me already */
668 printf("%s: Timedout SCB already complete. "
669 "Interrupts may not be functioning.\n", ahd_name(ahd));
670 ahd_unpause(ahd);
671 ahd_unlock(ahd, &s);
672 return;
673 }
674
675 target = SCB_GET_TARGET(ahd, scb);
676 channel = SCB_GET_CHANNEL(ahd, scb);
677 lun = SCB_GET_LUN(scb);
678
679 ahd_print_path(ahd, scb);
680 printf("SCB 0x%x - timed out\n", SCB_GET_TAG(scb));
681 ahd_dump_card_state(ahd);
682 ahd_reset_channel(ahd, SIM_CHANNEL(ahd, sim),
683 /*initiate reset*/TRUE);
684 ahd_unlock(ahd, &s);
685 return;
686 }
687
688 int
689 ahd_platform_alloc(struct ahd_softc *ahd, void *platform_arg)
690 {
691 ahd->platform_data = malloc(sizeof(struct ahd_platform_data), M_DEVBUF,
692 M_NOWAIT /*| M_ZERO*/);
693 if (ahd->platform_data == NULL)
694 return (ENOMEM);
695
696 memset(ahd->platform_data, 0, sizeof(struct ahd_platform_data));
697
698 return (0);
699 }
700
701 void
702 ahd_platform_free(struct ahd_softc *ahd)
703 {
704 free(ahd->platform_data, M_DEVBUF);
705 }
706
707 int
708 ahd_softc_comp(struct ahd_softc *lahd, struct ahd_softc *rahd)
709 {
710 /* We don't sort softcs under NetBSD so report equal always */
711 return (0);
712 }
713
714 int
715 ahd_detach(struct device *self, int flags)
716 {
717 int rv = 0;
718
719 struct ahd_softc *ahd = (struct ahd_softc*)self;
720
721 if (ahd->sc_child != NULL)
722 rv = config_detach((void *)ahd->sc_child, flags);
723
724 shutdownhook_disestablish(ahd->shutdown_hook);
725
726 ahd_free(ahd);
727
728 return rv;
729 }
730
731 void
732 ahd_platform_set_tags(struct ahd_softc *ahd,
733 struct ahd_devinfo *devinfo, ahd_queue_alg alg)
734 {
735 struct ahd_initiator_tinfo *tinfo;
736 struct ahd_tmode_tstate *tstate;
737
738 tinfo = ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid,
739 devinfo->target, &tstate);
740
741 if (alg != AHD_QUEUE_NONE)
742 tstate->tagenable |= devinfo->target_mask;
743 else
744 tstate->tagenable &= ~devinfo->target_mask;
745 }
746
747 void
748 ahd_send_async(struct ahd_softc *ahc, char channel, u_int target, u_int lun,
749 ac_code code, void *opt_arg)
750 {
751 struct ahd_tmode_tstate *tstate;
752 struct ahd_initiator_tinfo *tinfo;
753 struct ahd_devinfo devinfo;
754 struct scsipi_channel *chan;
755 struct scsipi_xfer_mode xm;
756
757 #ifdef DIAGNOSTIC
758 if (channel != 'A')
759 panic("ahd_send_async: not channel A");
760 #endif
761 chan = &ahc->sc_channel;
762 switch (code) {
763 case AC_TRANSFER_NEG:
764 tinfo = ahd_fetch_transinfo(ahc, channel, ahc->our_id, target,
765 &tstate);
766 ahd_compile_devinfo(&devinfo, ahc->our_id, target, lun,
767 channel, ROLE_UNKNOWN);
768 /*
769 * Don't bother if negotiating. XXX?
770 */
771 if (tinfo->curr.period != tinfo->goal.period
772 || tinfo->curr.width != tinfo->goal.width
773 || tinfo->curr.offset != tinfo->goal.offset
774 || tinfo->curr.ppr_options != tinfo->goal.ppr_options)
775 break;
776 xm.xm_target = target;
777 xm.xm_mode = 0;
778 xm.xm_period = tinfo->curr.period;
779 xm.xm_offset = tinfo->curr.offset;
780 if (tinfo->goal.ppr_options & MSG_EXT_PPR_DT_REQ)
781 xm.xm_mode |= PERIPH_CAP_DT;
782 if (tinfo->curr.width == MSG_EXT_WDTR_BUS_16_BIT)
783 xm.xm_mode |= PERIPH_CAP_WIDE16;
784 if (tinfo->curr.period)
785 xm.xm_mode |= PERIPH_CAP_SYNC;
786 if (tstate->tagenable & devinfo.target_mask)
787 xm.xm_mode |= PERIPH_CAP_TQING;
788 scsipi_async_event(chan, ASYNC_EVENT_XFER_MODE, &xm);
789 break;
790 case AC_BUS_RESET:
791 scsipi_async_event(chan, ASYNC_EVENT_RESET, NULL);
792 case AC_SENT_BDR:
793 default:
794 break;
795 }
796 }
797