isp_sbus.c revision 1.26.4.4 1 /* $NetBSD: isp_sbus.c,v 1.26.4.4 2001/03/16 19:09:28 he Exp $ */
2 /*
3 * This driver, which is contained in NetBSD in the files:
4 *
5 * sys/dev/ic/isp.c
6 * sys/dev/ic/isp_inline.h
7 * sys/dev/ic/isp_netbsd.c
8 * sys/dev/ic/isp_netbsd.h
9 * sys/dev/ic/isp_target.c
10 * sys/dev/ic/isp_target.h
11 * sys/dev/ic/isp_tpublic.h
12 * sys/dev/ic/ispmbox.h
13 * sys/dev/ic/ispreg.h
14 * sys/dev/ic/ispvar.h
15 * sys/microcode/isp/asm_sbus.h
16 * sys/microcode/isp/asm_1040.h
17 * sys/microcode/isp/asm_1080.h
18 * sys/microcode/isp/asm_12160.h
19 * sys/microcode/isp/asm_2100.h
20 * sys/microcode/isp/asm_2200.h
21 * sys/pci/isp_pci.c
22 * sys/sbus/isp_sbus.c
23 *
24 * Is being actively maintained by Matthew Jacob (mjacob (at) netbsd.org).
25 * This driver also is shared source with FreeBSD, OpenBSD, Linux, Solaris,
26 * Linux versions. This tends to be an interesting maintenance problem.
27 *
28 * Please coordinate with Matthew Jacob on changes you wish to make here.
29 */
30 /*
31 * SBus specific probe and attach routines for Qlogic ISP SCSI adapters.
32 *
33 * Copyright (c) 1997, 2001 by Matthew Jacob
34 * NASA AMES Research Center
35 * All rights reserved.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 * 1. Redistributions of source code must retain the above copyright
41 * notice immediately at the beginning of the file, without modification,
42 * this list of conditions, and the following disclaimer.
43 * 2. The name of the author may not be used to endorse or promote products
44 * derived from this software without specific prior written permission.
45 *
46 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
47 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
50 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
51 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
52 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
53 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
54 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
55 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
56 * SUCH DAMAGE.
57 *
58 */
59
60 #include <sys/param.h>
61 #include <sys/systm.h>
62 #include <sys/device.h>
63 #include <sys/kernel.h>
64 #include <sys/malloc.h>
65 #include <sys/queue.h>
66
67 #include <machine/bus.h>
68 #include <machine/intr.h>
69 #include <machine/autoconf.h>
70
71 #include <dev/ic/isp_netbsd.h>
72 #include <dev/microcode/isp/asm_sbus.h>
73 #include <dev/sbus/sbusvar.h>
74 #include <sys/reboot.h>
75
76 /*
77 * Gross! But there's no way around this until either bus_dma is corrected
78 * or sparc64 iommu.c is fixed.
79 */
80 #if _MACHINE == sparc64
81 #define LMAP_FLAGS BUS_DMA_NOWAIT|BUS_DMA_COHERENT
82 #else
83 #define LMAP_FLAGS BUS_DMA_NOWAIT
84 #endif
85
86 static int isp_sbus_intr __P((void *));
87 static u_int16_t isp_sbus_rd_reg __P((struct ispsoftc *, int));
88 static void isp_sbus_wr_reg __P((struct ispsoftc *, int, u_int16_t));
89 static int isp_sbus_mbxdma __P((struct ispsoftc *));
90 static int isp_sbus_dmasetup __P((struct ispsoftc *, struct scsipi_xfer *,
91 ispreq_t *, u_int16_t *, u_int16_t));
92 static void isp_sbus_dmateardown __P((struct ispsoftc *, struct scsipi_xfer *,
93 u_int32_t));
94
95 #ifndef ISP_1000_RISC_CODE
96 #define ISP_1000_RISC_CODE NULL
97 #endif
98
99 static struct ispmdvec mdvec = {
100 isp_sbus_rd_reg,
101 isp_sbus_wr_reg,
102 isp_sbus_mbxdma,
103 isp_sbus_dmasetup,
104 isp_sbus_dmateardown,
105 NULL,
106 NULL,
107 NULL,
108 ISP_1000_RISC_CODE
109 };
110
111 struct isp_sbussoftc {
112 struct ispsoftc sbus_isp;
113 struct sbusdev sbus_sd;
114 sdparam sbus_dev;
115 bus_space_tag_t sbus_bustag;
116 bus_dma_tag_t sbus_dmatag;
117 bus_space_handle_t sbus_reg;
118 int sbus_node;
119 int sbus_pri;
120 struct ispmdvec sbus_mdvec;
121 bus_dmamap_t *sbus_dmamap;
122 bus_dmamap_t sbus_rquest_dmamap;
123 bus_dmamap_t sbus_result_dmamap;
124 int16_t sbus_poff[_NREG_BLKS];
125 };
126
127
128 static int isp_match __P((struct device *, struct cfdata *, void *));
129 static void isp_sbus_attach __P((struct device *, struct device *, void *));
130 struct cfattach isp_sbus_ca = {
131 sizeof (struct isp_sbussoftc), isp_match, isp_sbus_attach
132 };
133
134 static int
135 isp_match(parent, cf, aux)
136 struct device *parent;
137 struct cfdata *cf;
138 void *aux;
139 {
140 int rv;
141 #ifdef DEBUG
142 static int oneshot = 1;
143 #endif
144 struct sbus_attach_args *sa = aux;
145
146 rv = (strcmp(cf->cf_driver->cd_name, sa->sa_name) == 0 ||
147 strcmp("PTI,ptisp", sa->sa_name) == 0 ||
148 strcmp("ptisp", sa->sa_name) == 0 ||
149 strcmp("SUNW,isp", sa->sa_name) == 0 ||
150 strcmp("QLGC,isp", sa->sa_name) == 0);
151 #ifdef DEBUG
152 if (rv && oneshot) {
153 oneshot = 0;
154 printf("Qlogic ISP Driver, NetBSD (sbus) Platform Version "
155 "%d.%d Core Version %d.%d\n",
156 ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR,
157 ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR);
158 }
159 #endif
160 return (rv);
161 }
162
163
164 static void
165 isp_sbus_attach(parent, self, aux)
166 struct device *parent, *self;
167 void *aux;
168 {
169 int freq, ispburst, sbusburst;
170 struct sbus_attach_args *sa = aux;
171 struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) self;
172 struct ispsoftc *isp = &sbc->sbus_isp;
173
174 printf(" for %s\n", sa->sa_name);
175
176 sbc->sbus_bustag = sa->sa_bustag;
177 sbc->sbus_dmatag = sa->sa_dmatag;
178 if (sa->sa_nintr != 0)
179 sbc->sbus_pri = sa->sa_pri;
180 sbc->sbus_mdvec = mdvec;
181
182 if (sa->sa_npromvaddrs != 0) {
183 sbc->sbus_reg = (bus_space_handle_t)sa->sa_promvaddrs[0];
184 } else {
185 if (sbus_bus_map(sa->sa_bustag, sa->sa_slot, sa->sa_offset,
186 sa->sa_size, BUS_SPACE_MAP_LINEAR, 0,
187 &sbc->sbus_reg) != 0) {
188 printf("%s: cannot map registers\n", self->dv_xname);
189 return;
190 }
191 }
192 sbc->sbus_node = sa->sa_node;
193
194 freq = getpropint(sa->sa_node, "clock-frequency", 0);
195 if (freq) {
196 /*
197 * Convert from HZ to MHz, rounding up.
198 */
199 freq = (freq + 500000)/1000000;
200 #if 0
201 printf("%s: %d MHz\n", self->dv_xname, freq);
202 #endif
203 }
204 sbc->sbus_mdvec.dv_clock = freq;
205
206 /*
207 * Now figure out what the proper burst sizes, etc., to use.
208 * Unfortunately, there is no ddi_dma_burstsizes here which
209 * walks up the tree finding the limiting burst size node (if
210 * any).
211 */
212 sbusburst = ((struct sbus_softc *)parent)->sc_burst;
213 if (sbusburst == 0)
214 sbusburst = SBUS_BURST_32 - 1;
215 ispburst = getpropint(sa->sa_node, "burst-sizes", -1);
216 if (ispburst == -1) {
217 ispburst = sbusburst;
218 }
219 ispburst &= sbusburst;
220 ispburst &= ~(1 << 7);
221 ispburst &= ~(1 << 6);
222 sbc->sbus_mdvec.dv_conf1 = 0;
223 if (ispburst & (1 << 5)) {
224 sbc->sbus_mdvec.dv_conf1 = BIU_SBUS_CONF1_FIFO_32;
225 } else if (ispburst & (1 << 4)) {
226 sbc->sbus_mdvec.dv_conf1 = BIU_SBUS_CONF1_FIFO_16;
227 } else if (ispburst & (1 << 3)) {
228 sbc->sbus_mdvec.dv_conf1 =
229 BIU_SBUS_CONF1_BURST8 | BIU_SBUS_CONF1_FIFO_8;
230 }
231 if (sbc->sbus_mdvec.dv_conf1) {
232 sbc->sbus_mdvec.dv_conf1 |= BIU_BURST_ENABLE;
233 }
234
235 /*
236 * Some early versions of the PTI SBus adapter
237 * would fail in trying to download (via poking)
238 * FW. We give up on them.
239 */
240 if (strcmp("PTI,ptisp", sa->sa_name) == 0 ||
241 strcmp("ptisp", sa->sa_name) == 0) {
242 sbc->sbus_mdvec.dv_ispfw = NULL;
243 }
244
245 isp->isp_mdvec = &sbc->sbus_mdvec;
246 isp->isp_bustype = ISP_BT_SBUS;
247 isp->isp_type = ISP_HA_SCSI_UNKNOWN;
248 isp->isp_param = &sbc->sbus_dev;
249 bzero(isp->isp_param, sizeof (sdparam));
250
251 sbc->sbus_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF;
252 sbc->sbus_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = SBUS_MBOX_REGS_OFF;
253 sbc->sbus_poff[SXP_BLOCK >> _BLK_REG_SHFT] = SBUS_SXP_REGS_OFF;
254 sbc->sbus_poff[RISC_BLOCK >> _BLK_REG_SHFT] = SBUS_RISC_REGS_OFF;
255 sbc->sbus_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF;
256
257 /* Establish interrupt channel */
258 bus_intr_establish(sbc->sbus_bustag, sbc->sbus_pri, IPL_BIO, 0,
259 isp_sbus_intr, sbc);
260 sbus_establish(&sbc->sbus_sd, &sbc->sbus_isp.isp_osinfo._dev);
261
262 /*
263 * Set up logging levels.
264 */
265 #ifdef ISP_LOGDEFAULT
266 isp->isp_dblev = ISP_LOGDEFAULT;
267 #else
268 isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR;
269 #ifdef SCSIDEBUG
270 isp->isp_dblev |= ISP_LOGDEBUG1|ISP_LOGDEBUG2;
271 #endif
272 #ifdef DEBUG
273 isp->isp_dblev |= ISP_LOGDEBUG0;
274 isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO;
275 #endif
276 #endif
277
278 isp->isp_confopts = self->dv_cfdata->cf_flags;
279 isp->isp_role = ISP_DEFAULT_ROLES;
280
281 /*
282 * There's no tool on sparc to set NVRAM for ISPs, so ignore it.
283 */
284 isp->isp_confopts |= ISP_CFG_NONVRAM;
285 ISP_LOCK(isp);
286 isp->isp_osinfo.no_mbox_ints = 1;
287 isp_reset(isp);
288 if (isp->isp_state != ISP_RESETSTATE) {
289 ISP_UNLOCK(isp);
290 return;
291 }
292 ENABLE_INTS(isp);
293 isp_init(isp);
294 if (isp->isp_state != ISP_INITSTATE) {
295 isp_uninit(isp);
296 ISP_UNLOCK(isp);
297 return;
298 }
299
300 /*
301 * do generic attach.
302 */
303 ISP_UNLOCK(isp);
304 isp_attach(isp);
305 if (isp->isp_state != ISP_RUNSTATE) {
306 ISP_LOCK(isp);
307 isp_uninit(isp);
308 ISP_UNLOCK(isp);
309 }
310 }
311
312 static int
313 isp_sbus_intr(arg)
314 void *arg;
315 {
316 int rv;
317 struct isp_sbussoftc *sbc = (struct isp_sbussoftc *)arg;
318 bus_dmamap_sync(sbc->sbus_dmatag, sbc->sbus_result_dmamap, 0,
319 sbc->sbus_result_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
320 sbc->sbus_isp.isp_osinfo.onintstack = 1;
321 rv = isp_intr(arg);
322 sbc->sbus_isp.isp_osinfo.onintstack = 0;
323 return (rv);
324 }
325
326 static u_int16_t
327 isp_sbus_rd_reg(isp, regoff)
328 struct ispsoftc *isp;
329 int regoff;
330 {
331 struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp;
332 int offset = sbc->sbus_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
333 offset += (regoff & 0xff);
334 return (bus_space_read_2(sbc->sbus_bustag, sbc->sbus_reg, offset));
335 }
336
337 static void
338 isp_sbus_wr_reg(isp, regoff, val)
339 struct ispsoftc *isp;
340 int regoff;
341 u_int16_t val;
342 {
343 struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp;
344 int offset = sbc->sbus_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
345 offset += (regoff & 0xff);
346 bus_space_write_2(sbc->sbus_bustag, sbc->sbus_reg, offset, val);
347 }
348
349 static int
350 isp_sbus_mbxdma(isp)
351 struct ispsoftc *isp;
352 {
353 struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp;
354 bus_dma_tag_t dmatag = sbc->sbus_dmatag;
355 bus_dma_segment_t reqseg, rspseg;
356 int reqrs, rsprs, i, progress;
357 size_t n;
358 bus_size_t len;
359
360 if (isp->isp_rquest_dma)
361 return (0);
362
363 n = sizeof (XS_T **) * isp->isp_maxcmds;
364 isp->isp_xflist = (XS_T **) malloc(n, M_DEVBUF, M_WAITOK);
365 if (isp->isp_xflist == NULL) {
366 isp_prt(isp, ISP_LOGERR, "cannot alloc xflist array");
367 return (1);
368 }
369 bzero(isp->isp_xflist, n);
370 n = sizeof (bus_dmamap_t) * isp->isp_maxcmds;
371 sbc->sbus_dmamap = (bus_dmamap_t *) malloc(n, M_DEVBUF, M_WAITOK);
372 if (sbc->sbus_dmamap == NULL) {
373 free(isp->isp_xflist, M_DEVBUF);
374 isp->isp_xflist = NULL;
375 isp_prt(isp, ISP_LOGERR, "cannot alloc dmamap array");
376 return (1);
377 }
378 for (i = 0; i < isp->isp_maxcmds; i++) {
379 /* Allocate a DMA handle */
380 if (bus_dmamap_create(dmatag, MAXPHYS, 1, MAXPHYS, 0,
381 BUS_DMA_NOWAIT, &sbc->sbus_dmamap[i]) != 0) {
382 isp_prt(isp, ISP_LOGERR, "cmd DMA maps create error");
383 break;
384 }
385 }
386 if (i < isp->isp_maxcmds) {
387 while (--i >= 0) {
388 bus_dmamap_destroy(dmatag, sbc->sbus_dmamap[i]);
389 }
390 free(isp->isp_xflist, M_DEVBUF);
391 free(sbc->sbus_dmamap, M_DEVBUF);
392 isp->isp_xflist = NULL;
393 sbc->sbus_dmamap = NULL;
394 return (1);
395 }
396
397 /*
398 * Allocate and map the request and response queues
399 */
400 progress = 0;
401 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
402 if (bus_dmamap_create(dmatag, len, 1, len, 0, BUS_DMA_NOWAIT,
403 &sbc->sbus_rquest_dmamap) != 0) {
404 goto dmafail;
405 }
406 progress++;
407 if (bus_dmamem_alloc(dmatag, len, 0, 0, &reqseg, 1, &reqrs,
408 BUS_DMA_NOWAIT)) {
409 goto dmafail;
410 }
411 progress++;
412 if (bus_dmamap_load_raw(dmatag, sbc->sbus_rquest_dmamap, &reqseg, reqrs,
413 len, LMAP_FLAGS) != 0) {
414 goto dmafail;
415 }
416 isp->isp_rquest_dma = sbc->sbus_rquest_dmamap->dm_segs[0].ds_addr;
417 progress++;
418 if (bus_dmamem_map(dmatag, &reqseg, reqrs, len,
419 (caddr_t *)&isp->isp_rquest, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) {
420 goto dmafail;
421 }
422 progress++;
423
424 len = ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
425 if (bus_dmamap_create(dmatag, len, 1, len, 0, BUS_DMA_NOWAIT,
426 &sbc->sbus_result_dmamap) != 0) {
427 goto dmafail;
428 }
429 progress++;
430 if (bus_dmamem_alloc(dmatag, len, 0, 0, &rspseg, 1, &rsprs,
431 BUS_DMA_NOWAIT)) {
432 goto dmafail;
433 }
434 progress++;
435 if (bus_dmamap_load_raw(dmatag, sbc->sbus_result_dmamap, &rspseg, rsprs,
436 len, LMAP_FLAGS) != 0) {
437 goto dmafail;
438 }
439 progress++;
440 if (bus_dmamem_map(dmatag, &rspseg, rsprs, len,
441 (caddr_t *)&isp->isp_result, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) {
442 goto dmafail;
443 }
444 isp->isp_result_dma = sbc->sbus_result_dmamap->dm_segs[0].ds_addr;
445 return (0);
446
447 dmafail:
448 isp_prt(isp, ISP_LOGERR, "Mailbox DMA Setup Failure");
449
450 if (progress >= 8) {
451 bus_dmamem_unmap(dmatag,
452 isp->isp_result, ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)));
453 }
454 if (progress >= 7) {
455 bus_dmamap_unload(dmatag, sbc->sbus_result_dmamap);
456 }
457 if (progress >= 6) {
458 bus_dmamem_free(dmatag, &rspseg, rsprs);
459 }
460 if (progress >= 5) {
461 bus_dmamap_destroy(dmatag, sbc->sbus_result_dmamap);
462 }
463
464
465 if (progress >= 4) {
466 bus_dmamem_unmap(dmatag,
467 isp->isp_rquest, ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)));
468 }
469 if (progress >= 3) {
470 bus_dmamap_unload(dmatag, sbc->sbus_rquest_dmamap);
471 }
472 if (progress >= 2) {
473 bus_dmamem_free(dmatag, &reqseg, reqrs);
474 }
475 if (progress >= 1) {
476 bus_dmamap_destroy(dmatag, sbc->sbus_rquest_dmamap);
477 }
478
479 for (i = 0; i < isp->isp_maxcmds; i++) {
480 bus_dmamap_destroy(dmatag, sbc->sbus_dmamap[i]);
481 }
482 free(sbc->sbus_dmamap, M_DEVBUF);
483 free(isp->isp_xflist, M_DEVBUF);
484 isp->isp_xflist = NULL;
485 sbc->sbus_dmamap = NULL;
486 return (1);
487 }
488
489 /*
490 * Map a DMA request.
491 * We're guaranteed that rq->req_handle is a value from 1 to isp->isp_maxcmds.
492 */
493
494 static int
495 isp_sbus_dmasetup(isp, xs, rq, iptrp, optr)
496 struct ispsoftc *isp;
497 struct scsipi_xfer *xs;
498 ispreq_t *rq;
499 u_int16_t *iptrp;
500 u_int16_t optr;
501 {
502 struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp;
503 bus_dmamap_t dmap;
504 ispcontreq_t *crq;
505 int cansleep = (xs->xs_control & XS_CTL_NOSLEEP) == 0;
506 int in = (xs->xs_control & XS_CTL_DATA_IN) != 0;
507
508 if (xs->datalen == 0) {
509 rq->req_seg_count = 1;
510 goto mbxsync;
511 }
512
513 dmap = sbc->sbus_dmamap[isp_handle_index(rq->req_handle)];
514 if (dmap->dm_nsegs != 0) {
515 panic("%s: dma map already allocated\n", isp->isp_name);
516 /* NOTREACHED */
517 }
518 if (bus_dmamap_load(sbc->sbus_dmatag, dmap, xs->data, xs->datalen,
519 NULL, cansleep? BUS_DMA_WAITOK : BUS_DMA_NOWAIT) != 0) {
520 XS_SETERR(xs, HBA_BOTCH);
521 return (CMD_COMPLETE);
522 }
523
524 bus_dmamap_sync(sbc->sbus_dmatag, dmap, 0, xs->datalen,
525 in? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
526
527 if (in) {
528 rq->req_flags |= REQFLAG_DATA_IN;
529 } else {
530 rq->req_flags |= REQFLAG_DATA_OUT;
531 }
532
533 if (XS_CDBLEN(xs) > 12) {
534 crq = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, *iptrp);
535 *iptrp = ISP_NXT_QENTRY(*iptrp, RQUEST_QUEUE_LEN(isp));
536 if (*iptrp == optr) {
537 isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++");
538 bus_dmamap_unload(sbc->sbus_dmatag, dmap);
539 XS_SETERR(xs, HBA_BOTCH);
540 return (CMD_EAGAIN);
541 }
542 rq->req_seg_count = 2;
543 rq->req_dataseg[0].ds_count = 0;
544 rq->req_dataseg[0].ds_base = 0;
545 bzero((void *)crq, sizeof (*crq));
546 crq->req_header.rqs_entry_count = 1;
547 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
548 crq->req_dataseg[0].ds_count = xs->datalen;
549 crq->req_dataseg[0].ds_base = dmap->dm_segs[0].ds_addr;
550 ISP_SBUSIFY_ISPHDR(isp, &crq->req_header)
551 } else {
552 rq->req_dataseg[0].ds_count = xs->datalen;
553 rq->req_dataseg[0].ds_base = dmap->dm_segs[0].ds_addr;
554 rq->req_seg_count = 1;
555 }
556
557 mbxsync:
558 ISP_SWIZZLE_REQUEST(isp, rq);
559 bus_dmamap_sync(sbc->sbus_dmatag, sbc->sbus_rquest_dmamap, 0,
560 sbc->sbus_rquest_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE);
561 return (CMD_QUEUED);
562 }
563
564 static void
565 isp_sbus_dmateardown(isp, xs, handle)
566 struct ispsoftc *isp;
567 struct scsipi_xfer *xs;
568 u_int32_t handle;
569 {
570 struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp;
571 bus_dmamap_t dmap;
572
573 dmap = sbc->sbus_dmamap[isp_handle_index(handle)];
574
575 if (dmap->dm_nsegs == 0) {
576 panic("%s: dma map not already allocated\n", isp->isp_name);
577 /* NOTREACHED */
578 }
579 bus_dmamap_sync(sbc->sbus_dmatag, dmap, 0,
580 xs->datalen, (xs->xs_control & XS_CTL_DATA_IN)?
581 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
582 bus_dmamap_unload(sbc->sbus_dmatag, dmap);
583 }
584