isp_sbus.c revision 1.73.4.2 1 /* $NetBSD: isp_sbus.c,v 1.73.4.2 2009/06/20 07:20:28 yamt Exp $ */
2 /*
3 * SBus specific probe and attach routines for Qlogic ISP SCSI adapters.
4 *
5 * Copyright (C) 1997, 1998, 1999 National Aeronautics & Space Administration
6 * All rights reserved.
7 *
8 * Additional Copyright (C) 2000-2007 by Matthew Jacob
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. The name of the author may not be used to endorse or promote products
20 * derived from this software without specific prior written permission
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: isp_sbus.c,v 1.73.4.2 2009/06/20 07:20:28 yamt Exp $");
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/device.h>
41 #include <sys/kernel.h>
42 #include <sys/malloc.h>
43 #include <sys/queue.h>
44 #include <dev/ic/isp_netbsd.h>
45 #include <sys/intr.h>
46 #include <machine/autoconf.h>
47 #include <dev/sbus/sbusvar.h>
48 #include <sys/reboot.h>
49
50 static void isp_sbus_reset0(ispsoftc_t *);
51 static void isp_sbus_reset1(ispsoftc_t *);
52 static int isp_sbus_intr(void *);
53 static int
54 isp_sbus_rd_isr(ispsoftc_t *, uint32_t *, uint16_t *, uint16_t *);
55 static uint32_t isp_sbus_rd_reg(ispsoftc_t *, int);
56 static void isp_sbus_wr_reg (ispsoftc_t *, int, uint32_t);
57 static int isp_sbus_mbxdma(ispsoftc_t *);
58 static int isp_sbus_dmasetup(ispsoftc_t *, XS_T *, ispreq_t *, uint32_t *,
59 uint32_t);
60 static void isp_sbus_dmateardown(ispsoftc_t *, XS_T *, uint32_t);
61
62 #ifndef ISP_DISABLE_FW
63 #include <dev/microcode/isp/asm_sbus.h>
64 #else
65 #define ISP_1000_RISC_CODE NULL
66 #endif
67
68 static const struct ispmdvec mdvec = {
69 isp_sbus_rd_isr,
70 isp_sbus_rd_reg,
71 isp_sbus_wr_reg,
72 isp_sbus_mbxdma,
73 isp_sbus_dmasetup,
74 isp_sbus_dmateardown,
75 isp_sbus_reset0,
76 isp_sbus_reset1,
77 NULL,
78 ISP_1000_RISC_CODE,
79 0,
80 0
81 };
82
83 struct isp_sbussoftc {
84 ispsoftc_t sbus_isp;
85 struct sbusdev sbus_sd;
86 sdparam sbus_dev;
87 struct scsipi_channel sbus_chan;
88 bus_space_tag_t sbus_bustag;
89 bus_space_handle_t sbus_reg;
90 int sbus_node;
91 int sbus_pri;
92 struct ispmdvec sbus_mdvec;
93 bus_dmamap_t *sbus_dmamap;
94 int16_t sbus_poff[_NREG_BLKS];
95 };
96
97
98 static int isp_match(device_t, cfdata_t, void *);
99 static void isp_sbus_attach(device_t, device_t, void *);
100 CFATTACH_DECL(isp_sbus, sizeof (struct isp_sbussoftc),
101 isp_match, isp_sbus_attach, NULL, NULL);
102
103 static int
104 isp_match(device_t parent, cfdata_t cf, void *aux)
105 {
106 int rv;
107 struct sbus_attach_args *sa = aux;
108
109 rv = (strcmp(cf->cf_name, sa->sa_name) == 0 ||
110 strcmp("PTI,ptisp", sa->sa_name) == 0 ||
111 strcmp("ptisp", sa->sa_name) == 0 ||
112 strcmp("SUNW,isp", sa->sa_name) == 0 ||
113 strcmp("QLGC,isp", sa->sa_name) == 0);
114
115 return (rv);
116 }
117
118
119 static void
120 isp_sbus_attach(device_t parent, device_t self, void *aux)
121 {
122 int freq, ispburst, sbusburst;
123 struct sbus_attach_args *sa = aux;
124 struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) self;
125 struct sbus_softc *sbsc = device_private(parent);
126 ispsoftc_t *isp = &sbc->sbus_isp;
127
128 printf(" for %s\n", sa->sa_name);
129
130 isp->isp_nchan = isp->isp_osinfo.adapter.adapt_nchannels = 1;
131
132 sbc->sbus_bustag = sa->sa_bustag;
133 if (sa->sa_nintr != 0)
134 sbc->sbus_pri = sa->sa_pri;
135 sbc->sbus_mdvec = mdvec;
136
137 if (sa->sa_npromvaddrs) {
138 sbus_promaddr_to_handle(sa->sa_bustag,
139 sa->sa_promvaddrs[0], &sbc->sbus_reg);
140 } else {
141 if (sbus_bus_map(sa->sa_bustag, sa->sa_slot, sa->sa_offset,
142 sa->sa_size, 0, &sbc->sbus_reg) != 0) {
143 aprint_error_dev(self, "cannot map registers\n");
144 return;
145 }
146 }
147 sbc->sbus_node = sa->sa_node;
148
149 freq = prom_getpropint(sa->sa_node, "clock-frequency", 0);
150 if (freq) {
151 /*
152 * Convert from HZ to MHz, rounding up.
153 */
154 freq = (freq + 500000)/1000000;
155 }
156 sbc->sbus_mdvec.dv_clock = freq;
157
158 /*
159 * Now figure out what the proper burst sizes, etc., to use.
160 * Unfortunately, there is no ddi_dma_burstsizes here which
161 * walks up the tree finding the limiting burst size node (if
162 * any).
163 */
164 sbusburst = sbsc->sc_burst;
165 if (sbusburst == 0)
166 sbusburst = SBUS_BURST_32 - 1;
167 ispburst = prom_getpropint(sa->sa_node, "burst-sizes", -1);
168 if (ispburst == -1) {
169 ispburst = sbusburst;
170 }
171 ispburst &= sbusburst;
172 ispburst &= ~(1 << 7);
173 ispburst &= ~(1 << 6);
174 sbc->sbus_mdvec.dv_conf1 = 0;
175 if (ispburst & (1 << 5)) {
176 sbc->sbus_mdvec.dv_conf1 = BIU_SBUS_CONF1_FIFO_32;
177 } else if (ispburst & (1 << 4)) {
178 sbc->sbus_mdvec.dv_conf1 = BIU_SBUS_CONF1_FIFO_16;
179 } else if (ispburst & (1 << 3)) {
180 sbc->sbus_mdvec.dv_conf1 =
181 BIU_SBUS_CONF1_BURST8 | BIU_SBUS_CONF1_FIFO_8;
182 }
183 if (sbc->sbus_mdvec.dv_conf1) {
184 sbc->sbus_mdvec.dv_conf1 |= BIU_BURST_ENABLE;
185 }
186
187 isp->isp_mdvec = &sbc->sbus_mdvec;
188 isp->isp_bustype = ISP_BT_SBUS;
189 isp->isp_type = ISP_HA_SCSI_UNKNOWN;
190 isp->isp_param = &sbc->sbus_dev;
191 isp->isp_dmatag = sa->sa_dmatag;
192 MEMZERO(isp->isp_param, sizeof (sdparam));
193 isp->isp_osinfo.chan = &sbc->sbus_chan;
194
195 sbc->sbus_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF;
196 sbc->sbus_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = SBUS_MBOX_REGS_OFF;
197 sbc->sbus_poff[SXP_BLOCK >> _BLK_REG_SHFT] = SBUS_SXP_REGS_OFF;
198 sbc->sbus_poff[RISC_BLOCK >> _BLK_REG_SHFT] = SBUS_RISC_REGS_OFF;
199 sbc->sbus_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF;
200
201 /* Establish interrupt channel */
202 bus_intr_establish(sbc->sbus_bustag, sbc->sbus_pri, IPL_BIO,
203 isp_sbus_intr, sbc);
204 sbus_establish(&sbc->sbus_sd, &sbc->sbus_isp.isp_osinfo.dev);
205
206 /*
207 * Set up logging levels.
208 */
209 #ifdef ISP_LOGDEFAULT
210 isp->isp_dblev = ISP_LOGDEFAULT;
211 #else
212 isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR;
213 if (bootverbose)
214 isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO;
215 #ifdef SCSIDEBUG
216 isp->isp_dblev |= ISP_LOGDEBUG1|ISP_LOGDEBUG2;
217 #endif
218 #ifdef DEBUG
219 isp->isp_dblev |= ISP_LOGDEBUG0;
220 #endif
221 #endif
222
223 isp->isp_confopts = device_cfdata(self)->cf_flags;
224 SDPARAM(isp, 0)->role = ISP_DEFAULT_ROLES;
225
226 /*
227 * There's no tool on sparc to set NVRAM for ISPs, so ignore it.
228 */
229 isp->isp_confopts |= ISP_CFG_NONVRAM;
230
231 /*
232 * Mark things if we're a PTI SBus adapter.
233 */
234 if (strcmp("PTI,ptisp", sa->sa_name) == 0 ||
235 strcmp("ptisp", sa->sa_name) == 0) {
236 SDPARAM(isp, 0)->isp_ptisp = 1;
237 }
238 ISP_LOCK(isp);
239 isp_reset(isp);
240 if (isp->isp_state != ISP_RESETSTATE) {
241 ISP_UNLOCK(isp);
242 return;
243 }
244 ISP_ENABLE_INTS(isp);
245 isp_init(isp);
246 if (isp->isp_state != ISP_INITSTATE) {
247 isp_uninit(isp);
248 ISP_UNLOCK(isp);
249 return;
250 }
251
252 /*
253 * do generic attach.
254 */
255 ISP_UNLOCK(isp);
256 isp_attach(isp);
257 }
258
259
260 static void
261 isp_sbus_reset0(ispsoftc_t *isp)
262 {
263 ISP_DISABLE_INTS(isp);
264 }
265
266 static void
267 isp_sbus_reset1(ispsoftc_t *isp)
268 {
269 ISP_ENABLE_INTS(isp);
270 }
271
272 static int
273 isp_sbus_intr(void *arg)
274 {
275 uint32_t isr;
276 uint16_t sema, mbox;
277 ispsoftc_t *isp = arg;
278
279 if (ISP_READ_ISR(isp, &isr, &sema, &mbox) == 0) {
280 isp->isp_intbogus++;
281 return (0);
282 } else {
283 struct isp_sbussoftc *sbc = arg;
284 sbc->sbus_isp.isp_osinfo.onintstack = 1;
285 isp_intr(isp, isr, sema, mbox);
286 sbc->sbus_isp.isp_osinfo.onintstack = 0;
287 return (1);
288 }
289 }
290
291 #define IspVirt2Off(a, x) \
292 (((struct isp_sbussoftc *)a)->sbus_poff[((x) & _BLK_REG_MASK) >> \
293 _BLK_REG_SHFT] + ((x) & 0xff))
294
295 #define BXR2(sbc, off) \
296 bus_space_read_2(sbc->sbus_bustag, sbc->sbus_reg, off)
297
298 static int
299 isp_sbus_rd_isr(ispsoftc_t *isp, uint32_t *isrp,
300 uint16_t *semap, uint16_t *mbp)
301 {
302 struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp;
303 uint32_t isr;
304 uint16_t sema;
305
306 isr = BXR2(sbc, IspVirt2Off(isp, BIU_ISR));
307 sema = BXR2(sbc, IspVirt2Off(isp, BIU_SEMA));
308 isp_prt(isp, ISP_LOGDEBUG3, "ISR 0x%x SEMA 0x%x", isr, sema);
309 isr &= INT_PENDING_MASK(isp);
310 sema &= BIU_SEMA_LOCK;
311 if (isr == 0 && sema == 0) {
312 return (0);
313 }
314 *isrp = isr;
315 if ((*semap = sema) != 0) {
316 *mbp = BXR2(sbc, IspVirt2Off(isp, OUTMAILBOX0));
317 }
318 return (1);
319 }
320
321 static uint32_t
322 isp_sbus_rd_reg(ispsoftc_t *isp, int regoff)
323 {
324 struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp;
325 int offset = sbc->sbus_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
326 offset += (regoff & 0xff);
327 return (bus_space_read_2(sbc->sbus_bustag, sbc->sbus_reg, offset));
328 }
329
330 static void
331 isp_sbus_wr_reg(ispsoftc_t *isp, int regoff, uint32_t val)
332 {
333 struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp;
334 int offset = sbc->sbus_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
335 offset += (regoff & 0xff);
336 bus_space_write_2(sbc->sbus_bustag, sbc->sbus_reg, offset, val);
337 }
338
339 static int
340 isp_sbus_mbxdma(ispsoftc_t *isp)
341 {
342 struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp;
343 bus_dma_segment_t reqseg, rspseg;
344 int reqrs, rsprs, i, progress;
345 size_t n;
346 bus_size_t len;
347
348 if (isp->isp_rquest_dma)
349 return (0);
350
351 n = isp->isp_maxcmds * sizeof (XS_T *);
352 isp->isp_xflist = (XS_T **) malloc(n, M_DEVBUF, M_WAITOK);
353 if (isp->isp_xflist == NULL) {
354 isp_prt(isp, ISP_LOGERR, "cannot alloc xflist array");
355 return (1);
356 }
357 MEMZERO(isp->isp_xflist, n);
358 n = sizeof (bus_dmamap_t) * isp->isp_maxcmds;
359 sbc->sbus_dmamap = (bus_dmamap_t *) malloc(n, M_DEVBUF, M_WAITOK);
360 if (sbc->sbus_dmamap == NULL) {
361 free(isp->isp_xflist, M_DEVBUF);
362 isp->isp_xflist = NULL;
363 isp_prt(isp, ISP_LOGERR, "cannot alloc dmamap array");
364 return (1);
365 }
366 for (i = 0; i < isp->isp_maxcmds; i++) {
367 /* Allocate a DMA handle */
368 if (bus_dmamap_create(isp->isp_dmatag, MAXPHYS, 1, MAXPHYS,
369 1 << 24, BUS_DMA_NOWAIT, &sbc->sbus_dmamap[i]) != 0) {
370 isp_prt(isp, ISP_LOGERR, "cmd DMA maps create error");
371 break;
372 }
373 }
374 if (i < isp->isp_maxcmds) {
375 while (--i >= 0) {
376 bus_dmamap_destroy(isp->isp_dmatag,
377 sbc->sbus_dmamap[i]);
378 }
379 free(isp->isp_xflist, M_DEVBUF);
380 free(sbc->sbus_dmamap, M_DEVBUF);
381 isp->isp_xflist = NULL;
382 sbc->sbus_dmamap = NULL;
383 return (1);
384 }
385
386 /*
387 * Allocate and map the request and response queues
388 */
389 progress = 0;
390 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
391 if (bus_dmamem_alloc(isp->isp_dmatag, len, 0, 0, &reqseg, 1, &reqrs,
392 BUS_DMA_NOWAIT)) {
393 goto dmafail;
394 }
395 progress++;
396 if (bus_dmamem_map(isp->isp_dmatag, &reqseg, reqrs, len,
397 (void *)&isp->isp_rquest, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) {
398 goto dmafail;
399 }
400 progress++;
401 if (bus_dmamap_create(isp->isp_dmatag, len, 1, len, 1 << 24,
402 BUS_DMA_NOWAIT, &isp->isp_rqdmap) != 0) {
403 goto dmafail;
404 }
405 progress++;
406 if (bus_dmamap_load(isp->isp_dmatag, isp->isp_rqdmap,
407 isp->isp_rquest, len, NULL, BUS_DMA_NOWAIT) != 0) {
408 goto dmafail;
409 }
410 progress++;
411 isp->isp_rquest_dma = isp->isp_rqdmap->dm_segs[0].ds_addr;
412
413 len = ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
414 if (bus_dmamem_alloc(isp->isp_dmatag, len, 0, 0, &rspseg, 1, &rsprs,
415 BUS_DMA_NOWAIT)) {
416 goto dmafail;
417 }
418 progress++;
419 if (bus_dmamem_map(isp->isp_dmatag, &rspseg, rsprs, len,
420 (void *)&isp->isp_result, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) {
421 goto dmafail;
422 }
423 progress++;
424 if (bus_dmamap_create(isp->isp_dmatag, len, 1, len, 1 << 24,
425 BUS_DMA_NOWAIT, &isp->isp_rsdmap) != 0) {
426 goto dmafail;
427 }
428 progress++;
429 if (bus_dmamap_load(isp->isp_dmatag, isp->isp_rsdmap,
430 isp->isp_result, len, NULL, BUS_DMA_NOWAIT) != 0) {
431 goto dmafail;
432 }
433 isp->isp_result_dma = isp->isp_rsdmap->dm_segs[0].ds_addr;
434
435 return (0);
436
437 dmafail:
438 isp_prt(isp, ISP_LOGERR, "Mailbox DMA Setup Failure");
439
440 if (progress >= 8) {
441 bus_dmamap_unload(isp->isp_dmatag, isp->isp_rsdmap);
442 }
443 if (progress >= 7) {
444 bus_dmamap_destroy(isp->isp_dmatag, isp->isp_rsdmap);
445 }
446 if (progress >= 6) {
447 bus_dmamem_unmap(isp->isp_dmatag,
448 isp->isp_result, ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)));
449 }
450 if (progress >= 5) {
451 bus_dmamem_free(isp->isp_dmatag, &rspseg, rsprs);
452 }
453
454 if (progress >= 4) {
455 bus_dmamap_unload(isp->isp_dmatag, isp->isp_rqdmap);
456 }
457 if (progress >= 3) {
458 bus_dmamap_destroy(isp->isp_dmatag, isp->isp_rqdmap);
459 }
460 if (progress >= 2) {
461 bus_dmamem_unmap(isp->isp_dmatag,
462 isp->isp_rquest, ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)));
463 }
464 if (progress >= 1) {
465 bus_dmamem_free(isp->isp_dmatag, &reqseg, reqrs);
466 }
467
468 for (i = 0; i < isp->isp_maxcmds; i++) {
469 bus_dmamap_destroy(isp->isp_dmatag, sbc->sbus_dmamap[i]);
470 }
471 free(sbc->sbus_dmamap, M_DEVBUF);
472 free(isp->isp_xflist, M_DEVBUF);
473 isp->isp_xflist = NULL;
474 sbc->sbus_dmamap = NULL;
475 return (1);
476 }
477
478 /*
479 * Map a DMA request.
480 * We're guaranteed that rq->req_handle is a value from 1 to isp->isp_maxcmds.
481 */
482
483 static int
484 isp_sbus_dmasetup(ispsoftc_t *isp, XS_T *xs, ispreq_t *rq,
485 uint32_t *nxtip, uint32_t optr)
486 {
487 struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp;
488 bus_dmamap_t dmap;
489 ispreq_t *qep;
490 int error, cansleep = (xs->xs_control & XS_CTL_NOSLEEP) == 0;
491 int in = (xs->xs_control & XS_CTL_DATA_IN) != 0;
492
493 qep = (ispreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, isp->isp_reqidx);
494 if (xs->datalen == 0) {
495 rq->req_seg_count = 1;
496 goto mbxsync;
497 }
498
499 dmap = sbc->sbus_dmamap[isp_handle_index(rq->req_handle)];
500 if (dmap->dm_nsegs != 0) {
501 panic("%s: DMA map already allocated", device_xname(&isp->isp_osinfo.dev));
502 /* NOTREACHED */
503 }
504 error = bus_dmamap_load(isp->isp_dmatag, dmap, xs->data, xs->datalen,
505 NULL, (cansleep ? BUS_DMA_WAITOK : BUS_DMA_NOWAIT) |
506 BUS_DMA_STREAMING);
507 if (error != 0) {
508 XS_SETERR(xs, HBA_BOTCH);
509 if (error == EAGAIN || error == ENOMEM)
510 return (CMD_EAGAIN);
511 else
512 return (CMD_COMPLETE);
513 }
514
515 bus_dmamap_sync(isp->isp_dmatag, dmap, 0, xs->datalen,
516 in? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
517
518 if (in) {
519 rq->req_flags |= REQFLAG_DATA_IN;
520 } else {
521 rq->req_flags |= REQFLAG_DATA_OUT;
522 }
523
524 if (XS_CDBLEN(xs) > 12) {
525 uint32_t onxti;
526 ispcontreq_t local, *crq = &local, *cqe;
527
528 onxti = *nxtip;
529 cqe = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, onxti);
530 *nxtip = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp));
531 if (*nxtip == optr) {
532 isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++");
533 bus_dmamap_unload(isp->isp_dmatag, dmap);
534 XS_SETERR(xs, HBA_BOTCH);
535 return (CMD_EAGAIN);
536 }
537 rq->req_seg_count = 2;
538 MEMZERO((void *)crq, sizeof (*crq));
539 crq->req_header.rqs_entry_count = 1;
540 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
541 crq->req_dataseg[0].ds_count = xs->datalen;
542 crq->req_dataseg[0].ds_base = dmap->dm_segs[0].ds_addr;
543 isp_put_cont_req(isp, crq, cqe);
544 MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN);
545 } else {
546 rq->req_seg_count = 1;
547 rq->req_dataseg[0].ds_count = xs->datalen;
548 rq->req_dataseg[0].ds_base = dmap->dm_segs[0].ds_addr;
549 }
550
551 mbxsync:
552 if (XS_CDBLEN(xs) > 12) {
553 isp_put_extended_request(isp,
554 (ispextreq_t *)rq, (ispextreq_t *) qep);
555 } else {
556 isp_put_request(isp, rq, qep);
557 }
558 return (CMD_QUEUED);
559 }
560
561 static void
562 isp_sbus_dmateardown(ispsoftc_t *isp, XS_T *xs, uint32_t handle)
563 {
564 struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp;
565 bus_dmamap_t dmap;
566
567 dmap = sbc->sbus_dmamap[isp_handle_index(handle)];
568
569 if (dmap->dm_nsegs == 0) {
570 panic("%s: DMA map not already allocated", device_xname(&isp->isp_osinfo.dev));
571 /* NOTREACHED */
572 }
573 bus_dmamap_sync(isp->isp_dmatag, dmap, 0,
574 xs->datalen, (xs->xs_control & XS_CTL_DATA_IN)?
575 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
576 bus_dmamap_unload(isp->isp_dmatag, dmap);
577 }
578