Lines Matching defs:ecb
108 * Offset of an ECB from the beginning of the ECB DMA mapping.
279 ahb_send_mbox(struct ahb_softc *sc, int opcode, struct ahb_ecb *ecb)
296 bus_space_write_4(iot, ioh, MBOXOUT0, ecb->ecb_dma_addr);
298 ecb->xs->xs_periph->periph_target);
300 if ((ecb->xs->xs_control & XS_CTL_POLL) == 0)
301 callout_reset(&ecb->xs->xs_callout,
302 mstohz(ecb->timeout), ahb_timeout, ecb);
309 ahb_send_immed(struct ahb_softc *sc, u_int32_t cmd, struct ahb_ecb *ecb)
329 ecb->xs->xs_periph->periph_target);
331 if ((ecb->xs->xs_control & XS_CTL_POLL) == 0)
332 callout_reset(&ecb->xs->xs_callout,
333 mstohz(ecb->timeout), ahb_timeout, ecb);
345 struct ahb_ecb *ecb;
376 ecb = ahb_ecb_lookup(sc, mboxval);
377 if (!ecb) {
379 "BAD ECB RETURNED!\n");
385 ecb = sc->sc_immed_ecb;
387 ecb->flags |= ECB_IMMED_FAIL;
391 ecb = sc->sc_immed_ecb;
401 callout_stop(&ecb->xs->xs_callout);
402 ahb_done(sc, ecb);
411 ahb_reset_ecb(struct ahb_softc *sc, struct ahb_ecb *ecb)
414 ecb->flags = 0;
418 * A ecb (and hence a mbx-out is put onto the
422 ahb_free_ecb(struct ahb_softc *sc, struct ahb_ecb *ecb)
427 ahb_reset_ecb(sc, ecb);
428 TAILQ_INSERT_HEAD(&sc->sc_free_ecb, ecb, chain);
436 ahb_init_ecb(struct ahb_softc *sc, struct ahb_ecb *ecb)
442 * Create the DMA map for this ECB.
445 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, &ecb->dmamap_xfer);
447 aprint_error_dev(sc->sc_dev, "can't create ecb dmamap_xfer\n");
451 ecb->ecb_dma_addr = sc->sc_dmamap_ecb->dm_segs[0].ds_addr +
452 AHB_ECB_OFF(ecb);
458 hashnum = ECB_HASH(ecb->ecb_dma_addr);
459 ecb->nexthash = sc->sc_ecbhash[hashnum];
460 sc->sc_ecbhash[hashnum] = ecb;
461 ahb_reset_ecb(sc, ecb);
468 struct ahb_ecb *ecb;
473 ecb = &ecbstore[i];
474 if ((error = ahb_init_ecb(sc, ecb)) != 0) {
476 "unable to initialize ecb, error = %d\n", error);
479 TAILQ_INSERT_TAIL(&sc->sc_free_ecb, ecb, chain);
486 * Get a free ecb
494 struct ahb_ecb *ecb;
498 ecb = TAILQ_FIRST(&sc->sc_free_ecb);
499 if (ecb != NULL) {
500 TAILQ_REMOVE(&sc->sc_free_ecb, ecb, chain);
501 ecb->flags |= ECB_ALLOC;
504 return (ecb);
508 * Lookup and return the ECB that has the specified DMA address.
514 struct ahb_ecb *ecb = sc->sc_ecbhash[hashnum];
516 while (ecb) {
517 if (ecb->ecb_dma_addr == ecb_phys)
519 ecb = ecb->nexthash;
521 return ecb;
525 * We have a ecb which has been processed by the adaptor, now we look to see
529 ahb_done(struct ahb_softc *sc, struct ahb_ecb *ecb)
533 struct scsipi_xfer *xs = ecb->xs;
538 AHB_ECB_OFF(ecb), sizeof(struct ahb_ecb),
546 bus_dmamap_sync(dmat, ecb->dmamap_xfer, 0,
547 ecb->dmamap_xfer->dm_mapsize,
550 bus_dmamap_unload(dmat, ecb->dmamap_xfer);
557 if ((ecb->flags & ECB_ALLOC) == 0) {
558 aprint_error_dev(sc->sc_dev, "exiting ecb not allocated!\n");
561 if (ecb->flags & ECB_IMMED) {
562 if (ecb->flags & ECB_IMMED_FAIL)
567 if (ecb->ecb_status.host_stat != HS_OK) {
568 switch (ecb->ecb_status.host_stat) {
575 ecb->ecb_status.host_stat);
578 } else if (ecb->ecb_status.target_stat != SCSI_OK) {
579 switch (ecb->ecb_status.target_stat) {
581 s1 = &ecb->ecb_sense;
592 ecb->ecb_status.target_stat);
599 ahb_free_ecb(sc, ecb);
742 "unable to create ecb DMA map, error = %d\n", error);
748 "unable to load ecb DMA map, error = %d\n", error);
792 struct ahb_ecb *ecb;
803 /* Get an ECB to use. */
804 ecb = ahb_get_ecb(sc);
810 if (ecb == NULL) {
812 printf("unable to allocate ecb\n");
817 ecb->xs = xs;
818 ecb->timeout = xs->timeout;
822 * command, and store its ecb for later
827 ecb->flags |= ECB_IMMED;
829 ahb_free_ecb(sc, ecb);
834 sc->sc_immed_ecb = ecb;
837 ahb_send_immed(sc, AHB_TARG_RESET, ecb);
846 if (ahb_poll(sc, xs, ecb->timeout))
847 ahb_timeout(ecb);
852 * Put all the arguments for the xfer in the ecb
854 if (xs->cmdlen > sizeof(ecb->scsi_cmd)) {
856 "cmdlen %d too large for ECB\n", xs->cmdlen);
860 ecb->opcode = ECB_SCSI_OP;
861 ecb->opt1 = ECB_SES /*| ECB_DSB*/ | ECB_ARS;
862 ecb->opt2 = periph->periph_lun | ECB_NRB;
863 memcpy(&ecb->scsi_cmd, xs->cmd,
864 ecb->scsi_cmd_length = xs->cmdlen);
865 ecb->sense_ptr = ecb->ecb_dma_addr +
867 ecb->req_sense_length = sizeof(ecb->ecb_sense);
868 ecb->status = ecb->ecb_dma_addr +
870 ecb->ecb_status.host_stat = 0x00;
871 ecb->ecb_status.target_stat = 0x00;
880 ecb->dmamap_xfer, (struct uio *)xs->data,
886 ecb->dmamap_xfer, xs->data, xs->datalen,
904 ahb_free_ecb(sc, ecb);
909 bus_dmamap_sync(dmat, ecb->dmamap_xfer, 0,
910 ecb->dmamap_xfer->dm_mapsize,
918 for (seg = 0; seg < ecb->dmamap_xfer->dm_nsegs; seg++) {
919 ecb->ahb_dma[seg].seg_addr =
920 ecb->dmamap_xfer->dm_segs[seg].ds_addr;
921 ecb->ahb_dma[seg].seg_len =
922 ecb->dmamap_xfer->dm_segs[seg].ds_len;
925 ecb->data_addr = ecb->ecb_dma_addr +
927 ecb->data_length = ecb->dmamap_xfer->dm_nsegs *
929 ecb->opt1 |= ECB_S_G;
931 ecb->data_addr = 0;
932 ecb->data_length = 0;
934 ecb->link_addr = 0;
937 AHB_ECB_OFF(ecb), sizeof(struct ahb_ecb),
941 ahb_send_mbox(sc, OP_START_ECB, ecb);
950 if (ahb_poll(sc, xs, ecb->timeout)) {
951 ahb_timeout(ecb);
952 if (ahb_poll(sc, xs, ecb->timeout))
953 ahb_timeout(ecb);
994 struct ahb_ecb *ecb = arg;
995 struct scsipi_xfer *xs = ecb->xs;
1006 if (ecb->flags & ECB_IMMED) {
1008 ecb->flags |= ECB_IMMED_FAIL;
1017 if (ecb->flags & ECB_ABORT) {
1024 ecb->xs->error = XS_TIMEOUT;
1025 ecb->timeout = AHB_ABORT_TIMEOUT;
1026 ecb->flags |= ECB_ABORT;
1027 ahb_send_mbox(sc, OP_ABORT_ECB, ecb);