isp_sbus.c revision 1.38 1 /* $NetBSD: isp_sbus.c,v 1.38 2001/02/25 01:44:02 mjacob Exp $ */
2 /*
3 * This driver, which is contained in NetBSD in the files:
4 *
5 * sys/dev/ic/isp.c
6 * sys/dev/ic/isp_inline.h
7 * sys/dev/ic/isp_netbsd.c
8 * sys/dev/ic/isp_netbsd.h
9 * sys/dev/ic/isp_target.c
10 * sys/dev/ic/isp_target.h
11 * sys/dev/ic/isp_tpublic.h
12 * sys/dev/ic/ispmbox.h
13 * sys/dev/ic/ispreg.h
14 * sys/dev/ic/ispvar.h
15 * sys/microcode/isp/asm_sbus.h
16 * sys/microcode/isp/asm_1040.h
17 * sys/microcode/isp/asm_1080.h
18 * sys/microcode/isp/asm_12160.h
19 * sys/microcode/isp/asm_2100.h
20 * sys/microcode/isp/asm_2200.h
21 * sys/pci/isp_pci.c
22 * sys/sbus/isp_sbus.c
23 *
24 * Is being actively maintained by Matthew Jacob (mjacob (at) netbsd.org).
25 * This driver also is shared source with FreeBSD, OpenBSD, Linux, Solaris,
26 * Linux versions. This tends to be an interesting maintenance problem.
27 *
28 * Please coordinate with Matthew Jacob on changes you wish to make here.
29 */
30 /*
31 * SBus specific probe and attach routines for Qlogic ISP SCSI adapters.
32 *
33 * Copyright (c) 1997, 2001 by Matthew Jacob
34 * NASA AMES Research Center
35 * All rights reserved.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 * 1. Redistributions of source code must retain the above copyright
41 * notice immediately at the beginning of the file, without modification,
42 * this list of conditions, and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 *
47 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
48 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
49 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
50 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
51 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
52 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
53 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
54 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
55 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
56 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
57 * SUCH DAMAGE.
58 *
59 */
60
61 #include <sys/param.h>
62 #include <sys/systm.h>
63 #include <sys/device.h>
64 #include <sys/kernel.h>
65 #include <sys/malloc.h>
66 #include <sys/queue.h>
67
68 #include <machine/bus.h>
69 #include <machine/intr.h>
70 #include <machine/autoconf.h>
71
72 #include <dev/ic/isp_netbsd.h>
73 #include <dev/microcode/isp/asm_sbus.h>
74 #include <dev/sbus/sbusvar.h>
75 #include <sys/reboot.h>
76
77 static int isp_sbus_intr __P((void *));
78 static u_int16_t isp_sbus_rd_reg __P((struct ispsoftc *, int));
79 static void isp_sbus_wr_reg __P((struct ispsoftc *, int, u_int16_t));
80 static int isp_sbus_mbxdma __P((struct ispsoftc *));
81 static int isp_sbus_dmasetup __P((struct ispsoftc *, struct scsipi_xfer *,
82 ispreq_t *, u_int16_t *, u_int16_t));
83 static void isp_sbus_dmateardown __P((struct ispsoftc *, struct scsipi_xfer *,
84 u_int32_t));
85
86 #ifndef ISP_1000_RISC_CODE
87 #define ISP_1000_RISC_CODE NULL
88 #endif
89
90 static struct ispmdvec mdvec = {
91 isp_sbus_rd_reg,
92 isp_sbus_wr_reg,
93 isp_sbus_mbxdma,
94 isp_sbus_dmasetup,
95 isp_sbus_dmateardown,
96 NULL,
97 NULL,
98 NULL,
99 ISP_1000_RISC_CODE
100 };
101
102 struct isp_sbussoftc {
103 struct ispsoftc sbus_isp;
104 struct sbusdev sbus_sd;
105 sdparam sbus_dev;
106 bus_space_tag_t sbus_bustag;
107 bus_dma_tag_t sbus_dmatag;
108 bus_space_handle_t sbus_reg;
109 int sbus_node;
110 int sbus_pri;
111 struct ispmdvec sbus_mdvec;
112 bus_dmamap_t *sbus_dmamap;
113 bus_dmamap_t sbus_rquest_dmamap;
114 bus_dmamap_t sbus_result_dmamap;
115 int16_t sbus_poff[_NREG_BLKS];
116 };
117
118
119 static int isp_match __P((struct device *, struct cfdata *, void *));
120 static void isp_sbus_attach __P((struct device *, struct device *, void *));
121 struct cfattach isp_sbus_ca = {
122 sizeof (struct isp_sbussoftc), isp_match, isp_sbus_attach
123 };
124
125 static int
126 isp_match(parent, cf, aux)
127 struct device *parent;
128 struct cfdata *cf;
129 void *aux;
130 {
131 int rv;
132 #ifdef DEBUG
133 static int oneshot = 1;
134 #endif
135 struct sbus_attach_args *sa = aux;
136
137 rv = (strcmp(cf->cf_driver->cd_name, sa->sa_name) == 0 ||
138 strcmp("PTI,ptisp", sa->sa_name) == 0 ||
139 strcmp("ptisp", sa->sa_name) == 0 ||
140 strcmp("SUNW,isp", sa->sa_name) == 0 ||
141 strcmp("QLGC,isp", sa->sa_name) == 0);
142 #ifdef DEBUG
143 if (rv && oneshot) {
144 oneshot = 0;
145 printf("Qlogic ISP Driver, NetBSD (sbus) Platform Version "
146 "%d.%d Core Version %d.%d\n",
147 ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR,
148 ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR);
149 }
150 #endif
151 return (rv);
152 }
153
154
155 static void
156 isp_sbus_attach(parent, self, aux)
157 struct device *parent, *self;
158 void *aux;
159 {
160 int freq, ispburst, sbusburst;
161 struct sbus_attach_args *sa = aux;
162 struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) self;
163 struct ispsoftc *isp = &sbc->sbus_isp;
164
165 printf(" for %s\n", sa->sa_name);
166
167 sbc->sbus_bustag = sa->sa_bustag;
168 sbc->sbus_dmatag = sa->sa_dmatag;
169 if (sa->sa_nintr != 0)
170 sbc->sbus_pri = sa->sa_pri;
171 sbc->sbus_mdvec = mdvec;
172
173 if (sa->sa_npromvaddrs != 0) {
174 sbc->sbus_reg = (bus_space_handle_t)sa->sa_promvaddrs[0];
175 } else {
176 if (sbus_bus_map(sa->sa_bustag, sa->sa_slot, sa->sa_offset,
177 sa->sa_size, BUS_SPACE_MAP_LINEAR, 0,
178 &sbc->sbus_reg) != 0) {
179 printf("%s: cannot map registers\n", self->dv_xname);
180 return;
181 }
182 }
183 sbc->sbus_node = sa->sa_node;
184
185 freq = getpropint(sa->sa_node, "clock-frequency", 0);
186 if (freq) {
187 /*
188 * Convert from HZ to MHz, rounding up.
189 */
190 freq = (freq + 500000)/1000000;
191 #if 0
192 printf("%s: %d MHz\n", self->dv_xname, freq);
193 #endif
194 }
195 sbc->sbus_mdvec.dv_clock = freq;
196
197 /*
198 * Now figure out what the proper burst sizes, etc., to use.
199 * Unfortunately, there is no ddi_dma_burstsizes here which
200 * walks up the tree finding the limiting burst size node (if
201 * any).
202 */
203 sbusburst = ((struct sbus_softc *)parent)->sc_burst;
204 if (sbusburst == 0)
205 sbusburst = SBUS_BURST_32 - 1;
206 ispburst = getpropint(sa->sa_node, "burst-sizes", -1);
207 if (ispburst == -1) {
208 ispburst = sbusburst;
209 }
210 ispburst &= sbusburst;
211 ispburst &= ~(1 << 7);
212 ispburst &= ~(1 << 6);
213 sbc->sbus_mdvec.dv_conf1 = 0;
214 if (ispburst & (1 << 5)) {
215 sbc->sbus_mdvec.dv_conf1 = BIU_SBUS_CONF1_FIFO_32;
216 } else if (ispburst & (1 << 4)) {
217 sbc->sbus_mdvec.dv_conf1 = BIU_SBUS_CONF1_FIFO_16;
218 } else if (ispburst & (1 << 3)) {
219 sbc->sbus_mdvec.dv_conf1 =
220 BIU_SBUS_CONF1_BURST8 | BIU_SBUS_CONF1_FIFO_8;
221 }
222 if (sbc->sbus_mdvec.dv_conf1) {
223 sbc->sbus_mdvec.dv_conf1 |= BIU_BURST_ENABLE;
224 }
225
226 /*
227 * Some early versions of the PTI SBus adapter
228 * would fail in trying to download (via poking)
229 * FW. We give up on them.
230 */
231 if (strcmp("PTI,ptisp", sa->sa_name) == 0 ||
232 strcmp("ptisp", sa->sa_name) == 0) {
233 sbc->sbus_mdvec.dv_ispfw = NULL;
234 }
235
236 isp->isp_mdvec = &sbc->sbus_mdvec;
237 isp->isp_bustype = ISP_BT_SBUS;
238 isp->isp_type = ISP_HA_SCSI_UNKNOWN;
239 isp->isp_param = &sbc->sbus_dev;
240 bzero(isp->isp_param, sizeof (sdparam));
241
242 sbc->sbus_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF;
243 sbc->sbus_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = SBUS_MBOX_REGS_OFF;
244 sbc->sbus_poff[SXP_BLOCK >> _BLK_REG_SHFT] = SBUS_SXP_REGS_OFF;
245 sbc->sbus_poff[RISC_BLOCK >> _BLK_REG_SHFT] = SBUS_RISC_REGS_OFF;
246 sbc->sbus_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF;
247
248 /* Establish interrupt channel */
249 bus_intr_establish(sbc->sbus_bustag, sbc->sbus_pri, IPL_BIO, 0,
250 isp_sbus_intr, sbc);
251 sbus_establish(&sbc->sbus_sd, &sbc->sbus_isp.isp_osinfo._dev);
252
253 /*
254 * Set up logging levels.
255 */
256 #ifdef ISP_LOGDEFAULT
257 isp->isp_dblev = ISP_LOGDEFAULT;
258 #else
259 isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR;
260 if (bootverbose)
261 isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO;
262 #ifdef SCSIDEBUG
263 isp->isp_dblev |= ISP_LOGDEBUG1|ISP_LOGDEBUG2;
264 #endif
265 #ifdef DEBUG
266 isp->isp_dblev |= ISP_LOGDEBUG0;
267 #endif
268 #endif
269
270 isp->isp_confopts = self->dv_cfdata->cf_flags;
271 isp->isp_role = ISP_DEFAULT_ROLES;
272
273 /*
274 * There's no tool on sparc to set NVRAM for ISPs, so ignore it.
275 */
276 isp->isp_confopts |= ISP_CFG_NONVRAM;
277 ISP_LOCK(isp);
278 isp->isp_osinfo.no_mbox_ints = 1;
279 isp_reset(isp);
280 if (isp->isp_state != ISP_RESETSTATE) {
281 ISP_UNLOCK(isp);
282 return;
283 }
284 ENABLE_INTS(isp);
285 isp_init(isp);
286 if (isp->isp_state != ISP_INITSTATE) {
287 isp_uninit(isp);
288 ISP_UNLOCK(isp);
289 return;
290 }
291
292 /*
293 * do generic attach.
294 */
295 ISP_UNLOCK(isp);
296 isp_attach(isp);
297 if (isp->isp_state != ISP_RUNSTATE) {
298 ISP_LOCK(isp);
299 isp_uninit(isp);
300 ISP_UNLOCK(isp);
301 }
302 }
303
304 static int
305 isp_sbus_intr(arg)
306 void *arg;
307 {
308 int rv;
309 struct isp_sbussoftc *sbc = (struct isp_sbussoftc *)arg;
310 bus_dmamap_sync(sbc->sbus_dmatag, sbc->sbus_result_dmamap, 0,
311 sbc->sbus_result_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
312 sbc->sbus_isp.isp_osinfo.onintstack = 1;
313 rv = isp_intr(arg);
314 sbc->sbus_isp.isp_osinfo.onintstack = 0;
315 return (rv);
316 }
317
318 static u_int16_t
319 isp_sbus_rd_reg(isp, regoff)
320 struct ispsoftc *isp;
321 int regoff;
322 {
323 struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp;
324 int offset = sbc->sbus_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
325 offset += (regoff & 0xff);
326 return (bus_space_read_2(sbc->sbus_bustag, sbc->sbus_reg, offset));
327 }
328
329 static void
330 isp_sbus_wr_reg(isp, regoff, val)
331 struct ispsoftc *isp;
332 int regoff;
333 u_int16_t val;
334 {
335 struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp;
336 int offset = sbc->sbus_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
337 offset += (regoff & 0xff);
338 bus_space_write_2(sbc->sbus_bustag, sbc->sbus_reg, offset, val);
339 }
340
341 static int
342 isp_sbus_mbxdma(isp)
343 struct ispsoftc *isp;
344 {
345 struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp;
346 bus_dma_tag_t dmatag = sbc->sbus_dmatag;
347 bus_dma_segment_t seg;
348 int rs, i;
349 size_t n;
350 bus_size_t len;
351
352 if (isp->isp_rquest_dma)
353 return (0);
354
355 n = sizeof (XS_T **) * isp->isp_maxcmds;
356 isp->isp_xflist = (XS_T **) malloc(n, M_DEVBUF, M_WAITOK);
357 if (isp->isp_xflist == NULL) {
358 isp_prt(isp, ISP_LOGERR, "cannot alloc xflist array");
359 return (1);
360 }
361 bzero(isp->isp_xflist, n);
362 n = sizeof (bus_dmamap_t) * isp->isp_maxcmds;
363 sbc->sbus_dmamap = (bus_dmamap_t *) malloc(n, M_DEVBUF, M_WAITOK);
364 if (sbc->sbus_dmamap == NULL) {
365 free(isp->isp_xflist, M_DEVBUF);
366 isp->isp_xflist = NULL;
367 isp_prt(isp, ISP_LOGERR, "cannot alloc dmamap array");
368 return (1);
369 }
370 for (i = 0; i < isp->isp_maxcmds; i++) {
371 /* Allocate a DMA handle */
372 if (bus_dmamap_create(dmatag, MAXPHYS, 1, MAXPHYS, 0,
373 BUS_DMA_NOWAIT, &sbc->sbus_dmamap[i]) != 0) {
374 isp_prt(isp, ISP_LOGERR, "cmd DMA maps create error");
375 break;
376 }
377 }
378 if (i < isp->isp_maxcmds) {
379 while (--i >= 0) {
380 bus_dmamap_destroy(dmatag, sbc->sbus_dmamap[i]);
381 }
382 free(isp->isp_xflist, M_DEVBUF);
383 free(sbc->sbus_dmamap, M_DEVBUF);
384 isp->isp_xflist = NULL;
385 sbc->sbus_dmamap = NULL;
386 return (1);
387 }
388
389 /*
390 * Allocate and map the request queue.
391 */
392 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
393 /* Allocate DMA map */
394 if (bus_dmamap_create(dmatag, len, 1, len, 0, BUS_DMA_NOWAIT,
395 &sbc->sbus_rquest_dmamap) != 0) {
396 goto dmafail;
397 }
398
399 /* Allocate DMA buffer */
400 if (bus_dmamem_alloc(dmatag, len, 0, 0, &seg, 1, &rs, BUS_DMA_NOWAIT)) {
401 goto dmafail;
402 }
403
404 /* Load the buffer */
405 if (bus_dmamap_load_raw(dmatag, sbc->sbus_rquest_dmamap,
406 &seg, rs, len, BUS_DMA_NOWAIT) != 0) {
407 bus_dmamem_free(dmatag, &seg, rs);
408 goto dmafail;
409 }
410 isp->isp_rquest_dma = sbc->sbus_rquest_dmamap->dm_segs[0].ds_addr;
411
412 /* Map DMA buffer in CPU addressable space */
413 if (bus_dmamem_map(dmatag, &seg, rs, len, (caddr_t *)&isp->isp_rquest,
414 BUS_DMA_NOWAIT)) {
415 bus_dmamap_unload(dmatag, sbc->sbus_rquest_dmamap);
416 bus_dmamem_free(dmatag, &seg, rs);
417 goto dmafail;
418 }
419
420 /*
421 * Allocate and map the result queue.
422 */
423 len = ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
424 /* Allocate DMA map */
425 if (bus_dmamap_create(dmatag, len, 1, len, 0, BUS_DMA_NOWAIT,
426 &sbc->sbus_result_dmamap) != 0) {
427 goto dmafail;
428 }
429
430 /* Allocate DMA buffer */
431 if (bus_dmamem_alloc(dmatag, len, 0, 0, &seg, 1, &rs, BUS_DMA_NOWAIT)) {
432 goto dmafail;
433 }
434
435 /* Load the buffer */
436 if (bus_dmamap_load_raw(dmatag, sbc->sbus_result_dmamap,
437 &seg, rs, len, BUS_DMA_NOWAIT|BUS_DMA_COHERENT) != 0) {
438 bus_dmamem_free(dmatag, &seg, rs);
439 goto dmafail;
440 }
441
442 /* Map DMA buffer in CPU addressable space */
443 if (bus_dmamem_map(dmatag, &seg, rs, len, (caddr_t *)&isp->isp_result,
444 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) {
445 bus_dmamap_unload(dmatag, sbc->sbus_result_dmamap);
446 bus_dmamem_free(dmatag, &seg, rs);
447 goto dmafail;
448 }
449 isp->isp_result_dma = sbc->sbus_result_dmamap->dm_segs[0].ds_addr;
450
451 return (0);
452
453 dmafail:
454 for (i = 0; i < isp->isp_maxcmds; i++) {
455 bus_dmamap_destroy(dmatag, sbc->sbus_dmamap[i]);
456 }
457 free(sbc->sbus_dmamap, M_DEVBUF);
458 free(isp->isp_xflist, M_DEVBUF);
459 isp->isp_xflist = NULL;
460 sbc->sbus_dmamap = NULL;
461 return (1);
462 }
463
464 /*
465 * Map a DMA request.
466 * We're guaranteed that rq->req_handle is a value from 1 to isp->isp_maxcmds.
467 */
468
469 static int
470 isp_sbus_dmasetup(isp, xs, rq, iptrp, optr)
471 struct ispsoftc *isp;
472 struct scsipi_xfer *xs;
473 ispreq_t *rq;
474 u_int16_t *iptrp;
475 u_int16_t optr;
476 {
477 struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp;
478 bus_dmamap_t dmap;
479 ispcontreq_t *crq;
480 int cansleep = (xs->xs_control & XS_CTL_NOSLEEP) == 0;
481 int in = (xs->xs_control & XS_CTL_DATA_IN) != 0;
482
483 if (xs->datalen == 0) {
484 rq->req_seg_count = 1;
485 goto mbxsync;
486 }
487
488 dmap = sbc->sbus_dmamap[isp_handle_index(rq->req_handle)];
489 if (dmap->dm_nsegs != 0) {
490 panic("%s: dma map already allocated\n", isp->isp_name);
491 /* NOTREACHED */
492 }
493 if (bus_dmamap_load(sbc->sbus_dmatag, dmap, xs->data, xs->datalen,
494 NULL, cansleep? BUS_DMA_WAITOK : BUS_DMA_NOWAIT) != 0) {
495 XS_SETERR(xs, HBA_BOTCH);
496 return (CMD_COMPLETE);
497 }
498
499 bus_dmamap_sync(sbc->sbus_dmatag, dmap, 0, xs->datalen,
500 in? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
501
502 if (in) {
503 rq->req_flags |= REQFLAG_DATA_IN;
504 } else {
505 rq->req_flags |= REQFLAG_DATA_OUT;
506 }
507
508 if (XS_CDBLEN(xs) > 12) {
509 crq = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, *iptrp);
510 *iptrp = ISP_NXT_QENTRY(*iptrp, RQUEST_QUEUE_LEN(isp));
511 if (*iptrp == optr) {
512 isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++");
513 bus_dmamap_unload(sbc->sbus_dmatag, dmap);
514 XS_SETERR(xs, HBA_BOTCH);
515 return (CMD_EAGAIN);
516 }
517 rq->req_seg_count = 2;
518 rq->req_dataseg[0].ds_count = 0;
519 rq->req_dataseg[0].ds_base = 0;
520 bzero((void *)crq, sizeof (*crq));
521 crq->req_header.rqs_entry_count = 1;
522 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
523 crq->req_dataseg[0].ds_count = xs->datalen;
524 crq->req_dataseg[0].ds_base = dmap->dm_segs[0].ds_addr;
525 ISP_SBUSIFY_ISPHDR(isp, &crq->req_header)
526 } else {
527 rq->req_dataseg[0].ds_count = xs->datalen;
528 rq->req_dataseg[0].ds_base = dmap->dm_segs[0].ds_addr;
529 rq->req_seg_count = 1;
530 }
531
532 mbxsync:
533 ISP_SWIZZLE_REQUEST(isp, rq);
534 bus_dmamap_sync(sbc->sbus_dmatag, sbc->sbus_rquest_dmamap, 0,
535 sbc->sbus_rquest_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE);
536 return (CMD_QUEUED);
537 }
538
539 static void
540 isp_sbus_dmateardown(isp, xs, handle)
541 struct ispsoftc *isp;
542 struct scsipi_xfer *xs;
543 u_int32_t handle;
544 {
545 struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp;
546 bus_dmamap_t dmap;
547
548 dmap = sbc->sbus_dmamap[isp_handle_index(handle)];
549
550 if (dmap->dm_nsegs == 0) {
551 panic("%s: dma map not already allocated\n", isp->isp_name);
552 /* NOTREACHED */
553 }
554 bus_dmamap_sync(sbc->sbus_dmatag, dmap, 0,
555 xs->datalen, (xs->xs_control & XS_CTL_DATA_IN)?
556 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
557 bus_dmamap_unload(sbc->sbus_dmatag, dmap);
558 }
559