isp_sbus.c revision 1.40 1 /* $NetBSD: isp_sbus.c,v 1.40 2001/02/28 05:46:46 mjacob Exp $ */
2 /*
3 * This driver, which is contained in NetBSD in the files:
4 *
5 * sys/dev/ic/isp.c
6 * sys/dev/ic/isp_inline.h
7 * sys/dev/ic/isp_netbsd.c
8 * sys/dev/ic/isp_netbsd.h
9 * sys/dev/ic/isp_target.c
10 * sys/dev/ic/isp_target.h
11 * sys/dev/ic/isp_tpublic.h
12 * sys/dev/ic/ispmbox.h
13 * sys/dev/ic/ispreg.h
14 * sys/dev/ic/ispvar.h
15 * sys/microcode/isp/asm_sbus.h
16 * sys/microcode/isp/asm_1040.h
17 * sys/microcode/isp/asm_1080.h
18 * sys/microcode/isp/asm_12160.h
19 * sys/microcode/isp/asm_2100.h
20 * sys/microcode/isp/asm_2200.h
21 * sys/pci/isp_pci.c
22 * sys/sbus/isp_sbus.c
23 *
24 * Is being actively maintained by Matthew Jacob (mjacob (at) netbsd.org).
25 * This driver also is shared source with FreeBSD, OpenBSD, Linux, Solaris,
26 * Linux versions. This tends to be an interesting maintenance problem.
27 *
28 * Please coordinate with Matthew Jacob on changes you wish to make here.
29 */
30 /*
31 * SBus specific probe and attach routines for Qlogic ISP SCSI adapters.
32 *
33 * Copyright (c) 1997, 2001 by Matthew Jacob
34 * NASA AMES Research Center
35 * All rights reserved.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 * 1. Redistributions of source code must retain the above copyright
41 * notice immediately at the beginning of the file, without modification,
42 * this list of conditions, and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 *
47 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
48 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
49 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
50 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
51 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
52 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
53 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
54 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
55 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
56 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
57 * SUCH DAMAGE.
58 *
59 */
60
61 #include <sys/param.h>
62 #include <sys/systm.h>
63 #include <sys/device.h>
64 #include <sys/kernel.h>
65 #include <sys/malloc.h>
66 #include <sys/queue.h>
67
68 #include <machine/bus.h>
69 #include <machine/intr.h>
70 #include <machine/autoconf.h>
71
72 #include <dev/ic/isp_netbsd.h>
73 #include <dev/microcode/isp/asm_sbus.h>
74 #include <dev/sbus/sbusvar.h>
75 #include <sys/reboot.h>
76
77 /*
78 * Gross! But there's no way around this until either bus_dma is corrected
79 * or sparc64 iommu.c is fixed.
80 */
81 #if _MACHINE == sparc64
82 #define LMAP_FLAGS BUS_DMA_NOWAIT|BUS_DMA_COHERENT
83 #else
84 #define LMAP_FLAGS BUS_DMA_NOWAIT
85 #endif
86
87 static int isp_sbus_intr __P((void *));
88 static u_int16_t isp_sbus_rd_reg __P((struct ispsoftc *, int));
89 static void isp_sbus_wr_reg __P((struct ispsoftc *, int, u_int16_t));
90 static int isp_sbus_mbxdma __P((struct ispsoftc *));
91 static int isp_sbus_dmasetup __P((struct ispsoftc *, struct scsipi_xfer *,
92 ispreq_t *, u_int16_t *, u_int16_t));
93 static void isp_sbus_dmateardown __P((struct ispsoftc *, struct scsipi_xfer *,
94 u_int32_t));
95
96 #ifndef ISP_1000_RISC_CODE
97 #define ISP_1000_RISC_CODE NULL
98 #endif
99
100 static struct ispmdvec mdvec = {
101 isp_sbus_rd_reg,
102 isp_sbus_wr_reg,
103 isp_sbus_mbxdma,
104 isp_sbus_dmasetup,
105 isp_sbus_dmateardown,
106 NULL,
107 NULL,
108 NULL,
109 ISP_1000_RISC_CODE
110 };
111
112 struct isp_sbussoftc {
113 struct ispsoftc sbus_isp;
114 struct sbusdev sbus_sd;
115 sdparam sbus_dev;
116 bus_space_tag_t sbus_bustag;
117 bus_dma_tag_t sbus_dmatag;
118 bus_space_handle_t sbus_reg;
119 int sbus_node;
120 int sbus_pri;
121 struct ispmdvec sbus_mdvec;
122 bus_dmamap_t *sbus_dmamap;
123 bus_dmamap_t sbus_rquest_dmamap;
124 bus_dmamap_t sbus_result_dmamap;
125 int16_t sbus_poff[_NREG_BLKS];
126 };
127
128
129 static int isp_match __P((struct device *, struct cfdata *, void *));
130 static void isp_sbus_attach __P((struct device *, struct device *, void *));
131 struct cfattach isp_sbus_ca = {
132 sizeof (struct isp_sbussoftc), isp_match, isp_sbus_attach
133 };
134
135 static int
136 isp_match(parent, cf, aux)
137 struct device *parent;
138 struct cfdata *cf;
139 void *aux;
140 {
141 int rv;
142 #ifdef DEBUG
143 static int oneshot = 1;
144 #endif
145 struct sbus_attach_args *sa = aux;
146
147 rv = (strcmp(cf->cf_driver->cd_name, sa->sa_name) == 0 ||
148 strcmp("PTI,ptisp", sa->sa_name) == 0 ||
149 strcmp("ptisp", sa->sa_name) == 0 ||
150 strcmp("SUNW,isp", sa->sa_name) == 0 ||
151 strcmp("QLGC,isp", sa->sa_name) == 0);
152 #ifdef DEBUG
153 if (rv && oneshot) {
154 oneshot = 0;
155 printf("Qlogic ISP Driver, NetBSD (sbus) Platform Version "
156 "%d.%d Core Version %d.%d\n",
157 ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR,
158 ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR);
159 }
160 #endif
161 return (rv);
162 }
163
164
165 static void
166 isp_sbus_attach(parent, self, aux)
167 struct device *parent, *self;
168 void *aux;
169 {
170 int freq, ispburst, sbusburst;
171 struct sbus_attach_args *sa = aux;
172 struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) self;
173 struct ispsoftc *isp = &sbc->sbus_isp;
174
175 printf(" for %s\n", sa->sa_name);
176
177 sbc->sbus_bustag = sa->sa_bustag;
178 sbc->sbus_dmatag = sa->sa_dmatag;
179 if (sa->sa_nintr != 0)
180 sbc->sbus_pri = sa->sa_pri;
181 sbc->sbus_mdvec = mdvec;
182
183 if (sa->sa_npromvaddrs != 0) {
184 sbc->sbus_reg = (bus_space_handle_t)sa->sa_promvaddrs[0];
185 } else {
186 if (sbus_bus_map(sa->sa_bustag, sa->sa_slot, sa->sa_offset,
187 sa->sa_size, BUS_SPACE_MAP_LINEAR, 0,
188 &sbc->sbus_reg) != 0) {
189 printf("%s: cannot map registers\n", self->dv_xname);
190 return;
191 }
192 }
193 sbc->sbus_node = sa->sa_node;
194
195 freq = getpropint(sa->sa_node, "clock-frequency", 0);
196 if (freq) {
197 /*
198 * Convert from HZ to MHz, rounding up.
199 */
200 freq = (freq + 500000)/1000000;
201 #if 0
202 printf("%s: %d MHz\n", self->dv_xname, freq);
203 #endif
204 }
205 sbc->sbus_mdvec.dv_clock = freq;
206
207 /*
208 * Now figure out what the proper burst sizes, etc., to use.
209 * Unfortunately, there is no ddi_dma_burstsizes here which
210 * walks up the tree finding the limiting burst size node (if
211 * any).
212 */
213 sbusburst = ((struct sbus_softc *)parent)->sc_burst;
214 if (sbusburst == 0)
215 sbusburst = SBUS_BURST_32 - 1;
216 ispburst = getpropint(sa->sa_node, "burst-sizes", -1);
217 if (ispburst == -1) {
218 ispburst = sbusburst;
219 }
220 ispburst &= sbusburst;
221 ispburst &= ~(1 << 7);
222 ispburst &= ~(1 << 6);
223 sbc->sbus_mdvec.dv_conf1 = 0;
224 if (ispburst & (1 << 5)) {
225 sbc->sbus_mdvec.dv_conf1 = BIU_SBUS_CONF1_FIFO_32;
226 } else if (ispburst & (1 << 4)) {
227 sbc->sbus_mdvec.dv_conf1 = BIU_SBUS_CONF1_FIFO_16;
228 } else if (ispburst & (1 << 3)) {
229 sbc->sbus_mdvec.dv_conf1 =
230 BIU_SBUS_CONF1_BURST8 | BIU_SBUS_CONF1_FIFO_8;
231 }
232 if (sbc->sbus_mdvec.dv_conf1) {
233 sbc->sbus_mdvec.dv_conf1 |= BIU_BURST_ENABLE;
234 }
235
236 /*
237 * Some early versions of the PTI SBus adapter
238 * would fail in trying to download (via poking)
239 * FW. We give up on them.
240 */
241 if (strcmp("PTI,ptisp", sa->sa_name) == 0 ||
242 strcmp("ptisp", sa->sa_name) == 0) {
243 sbc->sbus_mdvec.dv_ispfw = NULL;
244 }
245
246 isp->isp_mdvec = &sbc->sbus_mdvec;
247 isp->isp_bustype = ISP_BT_SBUS;
248 isp->isp_type = ISP_HA_SCSI_UNKNOWN;
249 isp->isp_param = &sbc->sbus_dev;
250 bzero(isp->isp_param, sizeof (sdparam));
251
252 sbc->sbus_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF;
253 sbc->sbus_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = SBUS_MBOX_REGS_OFF;
254 sbc->sbus_poff[SXP_BLOCK >> _BLK_REG_SHFT] = SBUS_SXP_REGS_OFF;
255 sbc->sbus_poff[RISC_BLOCK >> _BLK_REG_SHFT] = SBUS_RISC_REGS_OFF;
256 sbc->sbus_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF;
257
258 /* Establish interrupt channel */
259 bus_intr_establish(sbc->sbus_bustag, sbc->sbus_pri, IPL_BIO, 0,
260 isp_sbus_intr, sbc);
261 sbus_establish(&sbc->sbus_sd, &sbc->sbus_isp.isp_osinfo._dev);
262
263 /*
264 * Set up logging levels.
265 */
266 #ifdef ISP_LOGDEFAULT
267 isp->isp_dblev = ISP_LOGDEFAULT;
268 #else
269 isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR;
270 if (bootverbose)
271 isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO;
272 #ifdef SCSIDEBUG
273 isp->isp_dblev |= ISP_LOGDEBUG1|ISP_LOGDEBUG2;
274 #endif
275 #ifdef DEBUG
276 isp->isp_dblev |= ISP_LOGDEBUG0;
277 #endif
278 #endif
279
280 isp->isp_confopts = self->dv_cfdata->cf_flags;
281 isp->isp_role = ISP_DEFAULT_ROLES;
282
283 /*
284 * There's no tool on sparc to set NVRAM for ISPs, so ignore it.
285 */
286 isp->isp_confopts |= ISP_CFG_NONVRAM;
287 ISP_LOCK(isp);
288 isp->isp_osinfo.no_mbox_ints = 1;
289 isp_reset(isp);
290 if (isp->isp_state != ISP_RESETSTATE) {
291 ISP_UNLOCK(isp);
292 return;
293 }
294 ENABLE_INTS(isp);
295 isp_init(isp);
296 if (isp->isp_state != ISP_INITSTATE) {
297 isp_uninit(isp);
298 ISP_UNLOCK(isp);
299 return;
300 }
301
302 /*
303 * do generic attach.
304 */
305 ISP_UNLOCK(isp);
306 isp_attach(isp);
307 if (isp->isp_state != ISP_RUNSTATE) {
308 ISP_LOCK(isp);
309 isp_uninit(isp);
310 ISP_UNLOCK(isp);
311 }
312 }
313
314 static int
315 isp_sbus_intr(arg)
316 void *arg;
317 {
318 int rv;
319 struct isp_sbussoftc *sbc = (struct isp_sbussoftc *)arg;
320 bus_dmamap_sync(sbc->sbus_dmatag, sbc->sbus_result_dmamap, 0,
321 sbc->sbus_result_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
322 sbc->sbus_isp.isp_osinfo.onintstack = 1;
323 rv = isp_intr(arg);
324 sbc->sbus_isp.isp_osinfo.onintstack = 0;
325 return (rv);
326 }
327
328 static u_int16_t
329 isp_sbus_rd_reg(isp, regoff)
330 struct ispsoftc *isp;
331 int regoff;
332 {
333 struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp;
334 int offset = sbc->sbus_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
335 offset += (regoff & 0xff);
336 return (bus_space_read_2(sbc->sbus_bustag, sbc->sbus_reg, offset));
337 }
338
339 static void
340 isp_sbus_wr_reg(isp, regoff, val)
341 struct ispsoftc *isp;
342 int regoff;
343 u_int16_t val;
344 {
345 struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp;
346 int offset = sbc->sbus_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
347 offset += (regoff & 0xff);
348 bus_space_write_2(sbc->sbus_bustag, sbc->sbus_reg, offset, val);
349 }
350
351 static int
352 isp_sbus_mbxdma(isp)
353 struct ispsoftc *isp;
354 {
355 struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp;
356 bus_dma_tag_t dmatag = sbc->sbus_dmatag;
357 bus_dma_segment_t reqseg, rspseg;
358 int reqrs, rsprs, i, progress;
359 size_t n;
360 bus_size_t len;
361
362 if (isp->isp_rquest_dma)
363 return (0);
364
365 n = sizeof (XS_T **) * isp->isp_maxcmds;
366 isp->isp_xflist = (XS_T **) malloc(n, M_DEVBUF, M_WAITOK);
367 if (isp->isp_xflist == NULL) {
368 isp_prt(isp, ISP_LOGERR, "cannot alloc xflist array");
369 return (1);
370 }
371 bzero(isp->isp_xflist, n);
372 n = sizeof (bus_dmamap_t) * isp->isp_maxcmds;
373 sbc->sbus_dmamap = (bus_dmamap_t *) malloc(n, M_DEVBUF, M_WAITOK);
374 if (sbc->sbus_dmamap == NULL) {
375 free(isp->isp_xflist, M_DEVBUF);
376 isp->isp_xflist = NULL;
377 isp_prt(isp, ISP_LOGERR, "cannot alloc dmamap array");
378 return (1);
379 }
380 for (i = 0; i < isp->isp_maxcmds; i++) {
381 /* Allocate a DMA handle */
382 if (bus_dmamap_create(dmatag, MAXPHYS, 1, MAXPHYS, 0,
383 BUS_DMA_NOWAIT, &sbc->sbus_dmamap[i]) != 0) {
384 isp_prt(isp, ISP_LOGERR, "cmd DMA maps create error");
385 break;
386 }
387 }
388 if (i < isp->isp_maxcmds) {
389 while (--i >= 0) {
390 bus_dmamap_destroy(dmatag, sbc->sbus_dmamap[i]);
391 }
392 free(isp->isp_xflist, M_DEVBUF);
393 free(sbc->sbus_dmamap, M_DEVBUF);
394 isp->isp_xflist = NULL;
395 sbc->sbus_dmamap = NULL;
396 return (1);
397 }
398
399 /*
400 * Allocate and map the request and response queues
401 */
402 progress = 0;
403 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
404 if (bus_dmamap_create(dmatag, len, 1, len, 0, BUS_DMA_NOWAIT,
405 &sbc->sbus_rquest_dmamap) != 0) {
406 goto dmafail;
407 }
408 progress++;
409 if (bus_dmamem_alloc(dmatag, len, 0, 0, &reqseg, 1, &reqrs,
410 BUS_DMA_NOWAIT)) {
411 goto dmafail;
412 }
413 progress++;
414 if (bus_dmamap_load_raw(dmatag, sbc->sbus_rquest_dmamap, &reqseg, reqrs,
415 len, LMAP_FLAGS) != 0) {
416 goto dmafail;
417 }
418 isp->isp_rquest_dma = sbc->sbus_rquest_dmamap->dm_segs[0].ds_addr;
419 progress++;
420 if (bus_dmamem_map(dmatag, &reqseg, reqrs, len,
421 (caddr_t *)&isp->isp_rquest, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) {
422 goto dmafail;
423 }
424 progress++;
425
426 len = ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
427 if (bus_dmamap_create(dmatag, len, 1, len, 0, BUS_DMA_NOWAIT,
428 &sbc->sbus_result_dmamap) != 0) {
429 goto dmafail;
430 }
431 progress++;
432 if (bus_dmamem_alloc(dmatag, len, 0, 0, &rspseg, 1, &rsprs,
433 BUS_DMA_NOWAIT)) {
434 goto dmafail;
435 }
436 progress++;
437 if (bus_dmamap_load_raw(dmatag, sbc->sbus_result_dmamap, &rspseg, rsprs,
438 len, LMAP_FLAGS) != 0) {
439 goto dmafail;
440 }
441 progress++;
442 if (bus_dmamem_map(dmatag, &rspseg, rsprs, len,
443 (caddr_t *)&isp->isp_result, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) {
444 goto dmafail;
445 }
446 isp->isp_result_dma = sbc->sbus_result_dmamap->dm_segs[0].ds_addr;
447 return (0);
448
449 dmafail:
450 isp_prt(isp, ISP_LOGERR, "Mailbox DMA Setup Failure");
451
452 if (progress >= 8) {
453 bus_dmamem_unmap(dmatag,
454 isp->isp_result, ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)));
455 }
456 if (progress >= 7) {
457 bus_dmamap_unload(dmatag, sbc->sbus_result_dmamap);
458 }
459 if (progress >= 6) {
460 bus_dmamem_free(dmatag, &rspseg, rsprs);
461 }
462 if (progress >= 5) {
463 bus_dmamap_destroy(dmatag, sbc->sbus_result_dmamap);
464 }
465
466
467 if (progress >= 4) {
468 bus_dmamem_unmap(dmatag,
469 isp->isp_rquest, ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)));
470 }
471 if (progress >= 3) {
472 bus_dmamap_unload(dmatag, sbc->sbus_rquest_dmamap);
473 }
474 if (progress >= 2) {
475 bus_dmamem_free(dmatag, &reqseg, reqrs);
476 }
477 if (progress >= 1) {
478 bus_dmamap_destroy(dmatag, sbc->sbus_rquest_dmamap);
479 }
480
481 for (i = 0; i < isp->isp_maxcmds; i++) {
482 bus_dmamap_destroy(dmatag, sbc->sbus_dmamap[i]);
483 }
484 free(sbc->sbus_dmamap, M_DEVBUF);
485 free(isp->isp_xflist, M_DEVBUF);
486 isp->isp_xflist = NULL;
487 sbc->sbus_dmamap = NULL;
488 return (1);
489 }
490
491 /*
492 * Map a DMA request.
493 * We're guaranteed that rq->req_handle is a value from 1 to isp->isp_maxcmds.
494 */
495
496 static int
497 isp_sbus_dmasetup(isp, xs, rq, iptrp, optr)
498 struct ispsoftc *isp;
499 struct scsipi_xfer *xs;
500 ispreq_t *rq;
501 u_int16_t *iptrp;
502 u_int16_t optr;
503 {
504 struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp;
505 bus_dmamap_t dmap;
506 ispcontreq_t *crq;
507 int cansleep = (xs->xs_control & XS_CTL_NOSLEEP) == 0;
508 int in = (xs->xs_control & XS_CTL_DATA_IN) != 0;
509
510 if (xs->datalen == 0) {
511 rq->req_seg_count = 1;
512 goto mbxsync;
513 }
514
515 dmap = sbc->sbus_dmamap[isp_handle_index(rq->req_handle)];
516 if (dmap->dm_nsegs != 0) {
517 panic("%s: dma map already allocated\n", isp->isp_name);
518 /* NOTREACHED */
519 }
520 if (bus_dmamap_load(sbc->sbus_dmatag, dmap, xs->data, xs->datalen,
521 NULL, cansleep? BUS_DMA_WAITOK : BUS_DMA_NOWAIT) != 0) {
522 XS_SETERR(xs, HBA_BOTCH);
523 return (CMD_COMPLETE);
524 }
525
526 bus_dmamap_sync(sbc->sbus_dmatag, dmap, 0, xs->datalen,
527 in? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
528
529 if (in) {
530 rq->req_flags |= REQFLAG_DATA_IN;
531 } else {
532 rq->req_flags |= REQFLAG_DATA_OUT;
533 }
534
535 if (XS_CDBLEN(xs) > 12) {
536 crq = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, *iptrp);
537 *iptrp = ISP_NXT_QENTRY(*iptrp, RQUEST_QUEUE_LEN(isp));
538 if (*iptrp == optr) {
539 isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++");
540 bus_dmamap_unload(sbc->sbus_dmatag, dmap);
541 XS_SETERR(xs, HBA_BOTCH);
542 return (CMD_EAGAIN);
543 }
544 rq->req_seg_count = 2;
545 rq->req_dataseg[0].ds_count = 0;
546 rq->req_dataseg[0].ds_base = 0;
547 bzero((void *)crq, sizeof (*crq));
548 crq->req_header.rqs_entry_count = 1;
549 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
550 crq->req_dataseg[0].ds_count = xs->datalen;
551 crq->req_dataseg[0].ds_base = dmap->dm_segs[0].ds_addr;
552 ISP_SBUSIFY_ISPHDR(isp, &crq->req_header)
553 } else {
554 rq->req_dataseg[0].ds_count = xs->datalen;
555 rq->req_dataseg[0].ds_base = dmap->dm_segs[0].ds_addr;
556 rq->req_seg_count = 1;
557 }
558
559 mbxsync:
560 ISP_SWIZZLE_REQUEST(isp, rq);
561 bus_dmamap_sync(sbc->sbus_dmatag, sbc->sbus_rquest_dmamap, 0,
562 sbc->sbus_rquest_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE);
563 return (CMD_QUEUED);
564 }
565
566 static void
567 isp_sbus_dmateardown(isp, xs, handle)
568 struct ispsoftc *isp;
569 struct scsipi_xfer *xs;
570 u_int32_t handle;
571 {
572 struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp;
573 bus_dmamap_t dmap;
574
575 dmap = sbc->sbus_dmamap[isp_handle_index(handle)];
576
577 if (dmap->dm_nsegs == 0) {
578 panic("%s: dma map not already allocated\n", isp->isp_name);
579 /* NOTREACHED */
580 }
581 bus_dmamap_sync(sbc->sbus_dmatag, dmap, 0,
582 xs->datalen, (xs->xs_control & XS_CTL_DATA_IN)?
583 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
584 bus_dmamap_unload(sbc->sbus_dmatag, dmap);
585 }
586