isp_sbus.c revision 1.26.4.3 1 /* $NetBSD: isp_sbus.c,v 1.26.4.3 2001/01/25 18:25:12 jhawk Exp $ */
2 /*
3 * This driver, which is contained in NetBSD in the files:
4 *
5 * sys/dev/ic/isp.c
6 * sys/dev/ic/isp_inline.h
7 * sys/dev/ic/isp_netbsd.c
8 * sys/dev/ic/isp_netbsd.h
9 * sys/dev/ic/isp_target.c
10 * sys/dev/ic/isp_target.h
11 * sys/dev/ic/isp_tpublic.h
12 * sys/dev/ic/ispmbox.h
13 * sys/dev/ic/ispreg.h
14 * sys/dev/ic/ispvar.h
15 * sys/microcode/isp/asm_sbus.h
16 * sys/microcode/isp/asm_1040.h
17 * sys/microcode/isp/asm_1080.h
18 * sys/microcode/isp/asm_12160.h
19 * sys/microcode/isp/asm_2100.h
20 * sys/microcode/isp/asm_2200.h
21 * sys/pci/isp_pci.c
22 * sys/sbus/isp_sbus.c
23 *
24 * Is being actively maintained by Matthew Jacob (mjacob (at) netbsd.org).
25 * This driver also is shared source with FreeBSD, OpenBSD, Linux, Solaris,
26 * Linux versions. This tends to be an interesting maintenance problem.
27 *
28 * Please coordinate with Matthew Jacob on changes you wish to make here.
29 */
30 /*
31 * SBus specific probe and attach routines for Qlogic ISP SCSI adapters.
32 *
33 * Copyright (c) 1997 by Matthew Jacob
34 * NASA AMES Research Center
35 * All rights reserved.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 * 1. Redistributions of source code must retain the above copyright
41 * notice immediately at the beginning of the file, without modification,
42 * this list of conditions, and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. The name of the author may not be used to endorse or promote products
47 * derived from this software without specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
53 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * SUCH DAMAGE.
60 *
61 */
62
63 #include <sys/param.h>
64 #include <sys/systm.h>
65 #include <sys/device.h>
66 #include <sys/kernel.h>
67 #include <sys/malloc.h>
68 #include <sys/queue.h>
69
70 #include <machine/bus.h>
71 #include <machine/intr.h>
72 #include <machine/autoconf.h>
73
74 #include <dev/ic/isp_netbsd.h>
75 #include <dev/microcode/isp/asm_sbus.h>
76 #include <dev/sbus/sbusvar.h>
77
78 static int isp_sbus_intr __P((void *));
79 static u_int16_t isp_sbus_rd_reg __P((struct ispsoftc *, int));
80 static void isp_sbus_wr_reg __P((struct ispsoftc *, int, u_int16_t));
81 static int isp_sbus_mbxdma __P((struct ispsoftc *));
82 static int isp_sbus_dmasetup __P((struct ispsoftc *, struct scsipi_xfer *,
83 ispreq_t *, u_int16_t *, u_int16_t));
84 static void isp_sbus_dmateardown __P((struct ispsoftc *, struct scsipi_xfer *,
85 u_int32_t));
86
87 #ifndef ISP_1000_RISC_CODE
88 #define ISP_1000_RISC_CODE NULL
89 #endif
90
91 static struct ispmdvec mdvec = {
92 isp_sbus_rd_reg,
93 isp_sbus_wr_reg,
94 isp_sbus_mbxdma,
95 isp_sbus_dmasetup,
96 isp_sbus_dmateardown,
97 NULL,
98 NULL,
99 NULL,
100 ISP_1000_RISC_CODE
101 };
102
103 struct isp_sbussoftc {
104 struct ispsoftc sbus_isp;
105 struct sbusdev sbus_sd;
106 sdparam sbus_dev;
107 bus_space_tag_t sbus_bustag;
108 bus_dma_tag_t sbus_dmatag;
109 bus_space_handle_t sbus_reg;
110 int sbus_node;
111 int sbus_pri;
112 struct ispmdvec sbus_mdvec;
113 bus_dmamap_t *sbus_dmamap;
114 bus_dmamap_t sbus_request_dmamap;
115 bus_dmamap_t sbus_result_dmamap;
116 int16_t sbus_poff[_NREG_BLKS];
117 };
118
119
120 static int isp_match __P((struct device *, struct cfdata *, void *));
121 static void isp_sbus_attach __P((struct device *, struct device *, void *));
122 struct cfattach isp_sbus_ca = {
123 sizeof (struct isp_sbussoftc), isp_match, isp_sbus_attach
124 };
125
126 static int
127 isp_match(parent, cf, aux)
128 struct device *parent;
129 struct cfdata *cf;
130 void *aux;
131 {
132 int rv;
133 #ifdef DEBUG
134 static int oneshot = 1;
135 #endif
136 struct sbus_attach_args *sa = aux;
137
138 rv = (strcmp(cf->cf_driver->cd_name, sa->sa_name) == 0 ||
139 strcmp("PTI,ptisp", sa->sa_name) == 0 ||
140 strcmp("ptisp", sa->sa_name) == 0 ||
141 strcmp("SUNW,isp", sa->sa_name) == 0 ||
142 strcmp("QLGC,isp", sa->sa_name) == 0);
143 #ifdef DEBUG
144 if (rv && oneshot) {
145 oneshot = 0;
146 printf("Qlogic ISP Driver, NetBSD (sbus) Platform Version "
147 "%d.%d Core Version %d.%d\n",
148 ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR,
149 ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR);
150 }
151 #endif
152 return (rv);
153 }
154
155
156 static void
157 isp_sbus_attach(parent, self, aux)
158 struct device *parent, *self;
159 void *aux;
160 {
161 int freq, ispburst, sbusburst;
162 struct sbus_attach_args *sa = aux;
163 struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) self;
164 struct ispsoftc *isp = &sbc->sbus_isp;
165
166 printf(" for %s\n", sa->sa_name);
167
168 sbc->sbus_bustag = sa->sa_bustag;
169 sbc->sbus_dmatag = sa->sa_dmatag;
170 if (sa->sa_nintr != 0)
171 sbc->sbus_pri = sa->sa_pri;
172 sbc->sbus_mdvec = mdvec;
173
174 if (sa->sa_npromvaddrs != 0) {
175 sbc->sbus_reg = (bus_space_handle_t)sa->sa_promvaddrs[0];
176 } else {
177 if (sbus_bus_map(sa->sa_bustag, sa->sa_slot, sa->sa_offset,
178 sa->sa_size, BUS_SPACE_MAP_LINEAR, 0,
179 &sbc->sbus_reg) != 0) {
180 printf("%s: cannot map registers\n", self->dv_xname);
181 return;
182 }
183 }
184 sbc->sbus_node = sa->sa_node;
185
186 freq = getpropint(sa->sa_node, "clock-frequency", 0);
187 if (freq) {
188 /*
189 * Convert from HZ to MHz, rounding up.
190 */
191 freq = (freq + 500000)/1000000;
192 #if 0
193 printf("%s: %d MHz\n", self->dv_xname, freq);
194 #endif
195 }
196 sbc->sbus_mdvec.dv_clock = freq;
197
198 /*
199 * Now figure out what the proper burst sizes, etc., to use.
200 * Unfortunately, there is no ddi_dma_burstsizes here which
201 * walks up the tree finding the limiting burst size node (if
202 * any).
203 */
204 sbusburst = ((struct sbus_softc *)parent)->sc_burst;
205 if (sbusburst == 0)
206 sbusburst = SBUS_BURST_32 - 1;
207 ispburst = getpropint(sa->sa_node, "burst-sizes", -1);
208 if (ispburst == -1) {
209 ispburst = sbusburst;
210 }
211 ispburst &= sbusburst;
212 ispburst &= ~(1 << 7);
213 ispburst &= ~(1 << 6);
214 sbc->sbus_mdvec.dv_conf1 = 0;
215 if (ispburst & (1 << 5)) {
216 sbc->sbus_mdvec.dv_conf1 = BIU_SBUS_CONF1_FIFO_32;
217 } else if (ispburst & (1 << 4)) {
218 sbc->sbus_mdvec.dv_conf1 = BIU_SBUS_CONF1_FIFO_16;
219 } else if (ispburst & (1 << 3)) {
220 sbc->sbus_mdvec.dv_conf1 =
221 BIU_SBUS_CONF1_BURST8 | BIU_SBUS_CONF1_FIFO_8;
222 }
223 if (sbc->sbus_mdvec.dv_conf1) {
224 sbc->sbus_mdvec.dv_conf1 |= BIU_BURST_ENABLE;
225 }
226
227 /*
228 * Some early versions of the PTI SBus adapter
229 * would fail in trying to download (via poking)
230 * FW. We give up on them.
231 */
232 if (strcmp("PTI,ptisp", sa->sa_name) == 0 ||
233 strcmp("ptisp", sa->sa_name) == 0) {
234 sbc->sbus_mdvec.dv_ispfw = NULL;
235 }
236
237 isp->isp_mdvec = &sbc->sbus_mdvec;
238 isp->isp_bustype = ISP_BT_SBUS;
239 isp->isp_type = ISP_HA_SCSI_UNKNOWN;
240 isp->isp_param = &sbc->sbus_dev;
241 bzero(isp->isp_param, sizeof (sdparam));
242
243 sbc->sbus_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF;
244 sbc->sbus_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = SBUS_MBOX_REGS_OFF;
245 sbc->sbus_poff[SXP_BLOCK >> _BLK_REG_SHFT] = SBUS_SXP_REGS_OFF;
246 sbc->sbus_poff[RISC_BLOCK >> _BLK_REG_SHFT] = SBUS_RISC_REGS_OFF;
247 sbc->sbus_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF;
248
249 /*
250 * Set up logging levels.
251 */
252 #ifdef ISP_LOGDEFAULT
253 isp->isp_dblev = ISP_LOGDEFAULT;
254 #else
255 isp->isp_dblev = ISP_LOGCONFIG|ISP_LOGWARN|ISP_LOGERR;
256 #ifdef SCSIDEBUG
257 isp->isp_dblev |= ISP_LOGDEBUG1|ISP_LOGDEBUG2;
258 #endif
259 #ifdef DEBUG
260 isp->isp_dblev |= ISP_LOGDEBUG0|ISP_LOGINFO;
261 #endif
262 #endif
263 isp->isp_confopts = self->dv_cfdata->cf_flags;
264 /*
265 * There's no tool on sparc to set NVRAM for ISPs, so ignore it.
266 */
267 isp->isp_confopts |= ISP_CFG_NONVRAM;
268 ISP_LOCK(isp);
269 isp->isp_osinfo.no_mbox_ints = 1;
270 isp_reset(isp);
271 if (isp->isp_state != ISP_RESETSTATE) {
272 ISP_UNLOCK(isp);
273 return;
274 }
275 isp_init(isp);
276 if (isp->isp_state != ISP_INITSTATE) {
277 isp_uninit(isp);
278 ISP_UNLOCK(isp);
279 return;
280 }
281 /* Establish interrupt channel */
282 bus_intr_establish(sbc->sbus_bustag, sbc->sbus_pri, IPL_BIO, 0,
283 isp_sbus_intr, sbc);
284 ENABLE_INTS(isp);
285 ISP_UNLOCK(isp);
286
287 sbus_establish(&sbc->sbus_sd, &sbc->sbus_isp.isp_osinfo._dev);
288
289 /*
290 * do generic attach.
291 */
292 isp_attach(isp);
293 if (isp->isp_state != ISP_RUNSTATE) {
294 isp_uninit(isp);
295 }
296 }
297
298 static int
299 isp_sbus_intr(arg)
300 void *arg;
301 {
302 int rv;
303 struct isp_sbussoftc *sbc = (struct isp_sbussoftc *)arg;
304 bus_dmamap_sync(sbc->sbus_dmatag, sbc->sbus_result_dmamap, 0,
305 sbc->sbus_result_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
306 sbc->sbus_isp.isp_osinfo.onintstack = 1;
307 rv = isp_intr(arg);
308 sbc->sbus_isp.isp_osinfo.onintstack = 0;
309 return (rv);
310 }
311
312 static u_int16_t
313 isp_sbus_rd_reg(isp, regoff)
314 struct ispsoftc *isp;
315 int regoff;
316 {
317 struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp;
318 int offset = sbc->sbus_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
319 offset += (regoff & 0xff);
320 return (bus_space_read_2(sbc->sbus_bustag, sbc->sbus_reg, offset));
321 }
322
323 static void
324 isp_sbus_wr_reg(isp, regoff, val)
325 struct ispsoftc *isp;
326 int regoff;
327 u_int16_t val;
328 {
329 struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp;
330 int offset = sbc->sbus_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
331 offset += (regoff & 0xff);
332 bus_space_write_2(sbc->sbus_bustag, sbc->sbus_reg, offset, val);
333 }
334
335 static int
336 isp_sbus_mbxdma(isp)
337 struct ispsoftc *isp;
338 {
339 struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp;
340 bus_dma_tag_t dmatag = sbc->sbus_dmatag;
341 bus_dma_segment_t seg;
342 int rs, i;
343 size_t n;
344 bus_size_t len;
345
346 if (isp->isp_rquest_dma)
347 return (0);
348
349 n = sizeof (XS_T **) * isp->isp_maxcmds;
350 isp->isp_xflist = (XS_T **) malloc(n, M_DEVBUF, M_WAITOK);
351 if (isp->isp_xflist == NULL) {
352 isp_prt(isp, ISP_LOGERR, "cannot alloc xflist array");
353 return (1);
354 }
355 bzero(isp->isp_xflist, n);
356 n = sizeof (bus_dmamap_t) * isp->isp_maxcmds;
357 sbc->sbus_dmamap = (bus_dmamap_t *) malloc(n, M_DEVBUF, M_WAITOK);
358 if (sbc->sbus_dmamap == NULL) {
359 free(isp->isp_xflist, M_DEVBUF);
360 isp->isp_xflist = NULL;
361 isp_prt(isp, ISP_LOGERR, "cannot alloc dmamap array");
362 return (1);
363 }
364 for (i = 0; i < isp->isp_maxcmds; i++) {
365 /* Allocate a DMA handle */
366 if (bus_dmamap_create(dmatag, MAXPHYS, 1, MAXPHYS, 0,
367 BUS_DMA_NOWAIT, &sbc->sbus_dmamap[i]) != 0) {
368 isp_prt(isp, ISP_LOGERR, "cmd DMA maps create error");
369 break;
370 }
371 }
372 if (i < isp->isp_maxcmds) {
373 while (--i >= 0) {
374 bus_dmamap_destroy(dmatag, sbc->sbus_dmamap[i]);
375 }
376 free(isp->isp_xflist, M_DEVBUF);
377 free(sbc->sbus_dmamap, M_DEVBUF);
378 isp->isp_xflist = NULL;
379 sbc->sbus_dmamap = NULL;
380 return (1);
381 }
382
383 /*
384 * Allocate and map the request queue.
385 */
386 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
387 /* Allocate DMA map */
388 if (bus_dmamap_create(dmatag, len, 1, len, 0, BUS_DMA_NOWAIT,
389 &sbc->sbus_request_dmamap) != 0) {
390 goto dmafail;
391 }
392
393 /* Allocate DMA buffer */
394 if (bus_dmamem_alloc(dmatag, len, 0, 0, &seg, 1, &rs, BUS_DMA_NOWAIT)) {
395 goto dmafail;
396 }
397
398 /* Load the buffer */
399 if (bus_dmamap_load_raw(dmatag, sbc->sbus_request_dmamap,
400 &seg, rs, len, BUS_DMA_NOWAIT) != 0) {
401 bus_dmamem_free(dmatag, &seg, rs);
402 goto dmafail;
403 }
404 isp->isp_rquest_dma = sbc->sbus_request_dmamap->dm_segs[0].ds_addr;
405
406 /* Map DMA buffer in CPU addressable space */
407 if (bus_dmamem_map(dmatag, &seg, rs, len, (caddr_t *)&isp->isp_rquest,
408 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) {
409 bus_dmamap_unload(dmatag, sbc->sbus_request_dmamap);
410 bus_dmamem_free(dmatag, &seg, rs);
411 goto dmafail;
412 }
413
414 /*
415 * Allocate and map the result queue.
416 */
417 len = ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
418 /* Allocate DMA map */
419 if (bus_dmamap_create(dmatag, len, 1, len, 0, BUS_DMA_NOWAIT,
420 &sbc->sbus_result_dmamap) != 0) {
421 goto dmafail;
422 }
423
424 /* Allocate DMA buffer */
425 if (bus_dmamem_alloc(dmatag, len, 0, 0, &seg, 1, &rs, BUS_DMA_NOWAIT)) {
426 goto dmafail;
427 }
428
429 /* Load the buffer */
430 if (bus_dmamap_load_raw(dmatag, sbc->sbus_result_dmamap,
431 &seg, rs, len, BUS_DMA_NOWAIT) != 0) {
432 bus_dmamem_free(dmatag, &seg, rs);
433 goto dmafail;
434 }
435
436 /* Map DMA buffer in CPU addressable space */
437 if (bus_dmamem_map(dmatag, &seg, rs, len, (caddr_t *)&isp->isp_result,
438 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) {
439 bus_dmamap_unload(dmatag, sbc->sbus_result_dmamap);
440 bus_dmamem_free(dmatag, &seg, rs);
441 goto dmafail;
442 }
443 isp->isp_result_dma = sbc->sbus_result_dmamap->dm_segs[0].ds_addr;
444
445 return (0);
446
447 dmafail:
448 for (i = 0; i < isp->isp_maxcmds; i++) {
449 bus_dmamap_destroy(dmatag, sbc->sbus_dmamap[i]);
450 }
451 free(sbc->sbus_dmamap, M_DEVBUF);
452 free(isp->isp_xflist, M_DEVBUF);
453 isp->isp_xflist = NULL;
454 sbc->sbus_dmamap = NULL;
455 return (1);
456 }
457
458 /*
459 * Map a DMA request.
460 * We're guaranteed that rq->req_handle is a value from 1 to isp->isp_maxcmds.
461 */
462
463 static int
464 isp_sbus_dmasetup(isp, xs, rq, iptrp, optr)
465 struct ispsoftc *isp;
466 struct scsipi_xfer *xs;
467 ispreq_t *rq;
468 u_int16_t *iptrp;
469 u_int16_t optr;
470 {
471 struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp;
472 bus_dmamap_t dmap;
473 ispcontreq_t *crq;
474 int cansleep = (xs->xs_control & XS_CTL_NOSLEEP) == 0;
475 int in = (xs->xs_control & XS_CTL_DATA_IN) != 0;
476
477 if (xs->datalen == 0) {
478 rq->req_seg_count = 1;
479 goto mbxsync;
480 }
481
482 dmap = sbc->sbus_dmamap[isp_handle_index(rq->req_handle)];
483 if (dmap->dm_nsegs != 0) {
484 panic("%s: dma map already allocated\n", isp->isp_name);
485 /* NOTREACHED */
486 }
487 if (bus_dmamap_load(sbc->sbus_dmatag, dmap, xs->data, xs->datalen,
488 NULL, cansleep? BUS_DMA_WAITOK : BUS_DMA_NOWAIT) != 0) {
489 XS_SETERR(xs, HBA_BOTCH);
490 return (CMD_COMPLETE);
491 }
492
493 bus_dmamap_sync(sbc->sbus_dmatag, dmap, dmap->dm_segs[0].ds_addr,
494 xs->datalen, in? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
495
496 if (in) {
497 rq->req_flags |= REQFLAG_DATA_IN;
498 } else {
499 rq->req_flags |= REQFLAG_DATA_OUT;
500 }
501
502 if (XS_CDBLEN(xs) > 12) {
503 crq = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, *iptrp);
504 *iptrp = ISP_NXT_QENTRY(*iptrp, RQUEST_QUEUE_LEN(isp));
505 if (*iptrp == optr) {
506 isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++");
507 bus_dmamap_unload(sbc->sbus_dmatag, dmap);
508 XS_SETERR(xs, HBA_BOTCH);
509 return (CMD_EAGAIN);
510 }
511 rq->req_seg_count = 2;
512 rq->req_dataseg[0].ds_count = 0;
513 rq->req_dataseg[0].ds_base = 0;
514 bzero((void *)crq, sizeof (*crq));
515 crq->req_header.rqs_entry_count = 1;
516 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
517 crq->req_dataseg[0].ds_count = xs->datalen;
518 crq->req_dataseg[0].ds_base = dmap->dm_segs[0].ds_addr;
519 ISP_SBUSIFY_ISPHDR(isp, &crq->req_header)
520 } else {
521 rq->req_dataseg[0].ds_count = xs->datalen;
522 rq->req_dataseg[0].ds_base = dmap->dm_segs[0].ds_addr;
523 rq->req_seg_count = 1;
524 }
525
526 mbxsync:
527 ISP_SWIZZLE_REQUEST(isp, rq);
528 #if 0
529 /*
530 * If we ever map cacheable memory, we need to do something like this.
531 */
532 bus_dmamap_sync(sbc->sbus_dmat, sbc->sbus_rquest_dmap, 0,
533 sbc->sbus_rquest_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
534 #endif
535 return (CMD_QUEUED);
536 }
537
538 static void
539 isp_sbus_dmateardown(isp, xs, handle)
540 struct ispsoftc *isp;
541 struct scsipi_xfer *xs;
542 u_int32_t handle;
543 {
544 struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp;
545 bus_dmamap_t dmap;
546
547 dmap = sbc->sbus_dmamap[isp_handle_index(handle)];
548
549 if (dmap->dm_nsegs == 0) {
550 panic("%s: dma map not already allocated\n", isp->isp_name);
551 /* NOTREACHED */
552 }
553 bus_dmamap_sync(sbc->sbus_dmatag, dmap, dmap->dm_segs[0].ds_addr,
554 xs->datalen, (xs->xs_control & XS_CTL_DATA_IN)?
555 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
556 bus_dmamap_unload(sbc->sbus_dmatag, dmap);
557 }
558