isp_sbus.c revision 1.40.2.6 1 /* $NetBSD: isp_sbus.c,v 1.40.2.6 2001/11/14 19:15:58 nathanw Exp $ */
2 /*
3 * This driver, which is contained in NetBSD in the files:
4 *
5 * sys/dev/ic/isp.c
6 * sys/dev/ic/isp_inline.h
7 * sys/dev/ic/isp_netbsd.c
8 * sys/dev/ic/isp_netbsd.h
9 * sys/dev/ic/isp_target.c
10 * sys/dev/ic/isp_target.h
11 * sys/dev/ic/isp_tpublic.h
12 * sys/dev/ic/ispmbox.h
13 * sys/dev/ic/ispreg.h
14 * sys/dev/ic/ispvar.h
15 * sys/microcode/isp/asm_sbus.h
16 * sys/microcode/isp/asm_1040.h
17 * sys/microcode/isp/asm_1080.h
18 * sys/microcode/isp/asm_12160.h
19 * sys/microcode/isp/asm_2100.h
20 * sys/microcode/isp/asm_2200.h
21 * sys/pci/isp_pci.c
22 * sys/sbus/isp_sbus.c
23 *
24 * Is being actively maintained by Matthew Jacob (mjacob (at) netbsd.org).
25 * This driver also is shared source with FreeBSD, OpenBSD, Linux, Solaris,
26 * Linux versions. This tends to be an interesting maintenance problem.
27 *
28 * Please coordinate with Matthew Jacob on changes you wish to make here.
29 */
30 /*
31 * SBus specific probe and attach routines for Qlogic ISP SCSI adapters.
32 *
33 * Copyright (c) 1997, 2001 by Matthew Jacob
34 * NASA AMES Research Center
35 * All rights reserved.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 * 1. Redistributions of source code must retain the above copyright
41 * notice immediately at the beginning of the file, without modification,
42 * this list of conditions, and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 *
47 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
48 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
49 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
50 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
51 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
52 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
53 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
54 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
55 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
56 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
57 * SUCH DAMAGE.
58 *
59 */
60
61 #include <sys/cdefs.h>
62 __KERNEL_RCSID(0, "$NetBSD: isp_sbus.c,v 1.40.2.6 2001/11/14 19:15:58 nathanw Exp $");
63
64 #include <sys/param.h>
65 #include <sys/systm.h>
66 #include <sys/device.h>
67 #include <sys/kernel.h>
68 #include <sys/malloc.h>
69 #include <sys/queue.h>
70
71 #include <machine/bus.h>
72 #include <machine/intr.h>
73 #include <machine/autoconf.h>
74
75 #include <dev/ic/isp_netbsd.h>
76 #include <dev/microcode/isp/asm_sbus.h>
77 #include <dev/sbus/sbusvar.h>
78 #include <sys/reboot.h>
79
80 static int isp_sbus_intr(void *);
81 static int
82 isp_sbus_rd_isr(struct ispsoftc *, u_int16_t *, u_int16_t *, u_int16_t *);
83 static u_int16_t isp_sbus_rd_reg(struct ispsoftc *, int);
84 static void isp_sbus_wr_reg (struct ispsoftc *, int, u_int16_t);
85 static int isp_sbus_mbxdma(struct ispsoftc *);
86 static int isp_sbus_dmasetup(struct ispsoftc *, XS_T *, ispreq_t *, u_int16_t *,
87 u_int16_t);
88 static void isp_sbus_dmateardown(struct ispsoftc *, XS_T *, u_int16_t);
89
90 #ifndef ISP_1000_RISC_CODE
91 #define ISP_1000_RISC_CODE NULL
92 #endif
93
94 static struct ispmdvec mdvec = {
95 isp_sbus_rd_isr,
96 isp_sbus_rd_reg,
97 isp_sbus_wr_reg,
98 isp_sbus_mbxdma,
99 isp_sbus_dmasetup,
100 isp_sbus_dmateardown,
101 NULL,
102 NULL,
103 NULL,
104 ISP_1000_RISC_CODE
105 };
106
107 struct isp_sbussoftc {
108 struct ispsoftc sbus_isp;
109 struct sbusdev sbus_sd;
110 sdparam sbus_dev;
111 bus_space_tag_t sbus_bustag;
112 bus_dma_tag_t sbus_dmatag;
113 bus_space_handle_t sbus_reg;
114 int sbus_node;
115 int sbus_pri;
116 struct ispmdvec sbus_mdvec;
117 bus_dmamap_t *sbus_dmamap;
118 bus_dmamap_t sbus_rquest_dmamap;
119 bus_dmamap_t sbus_result_dmamap;
120 int16_t sbus_poff[_NREG_BLKS];
121 };
122
123
124 static int isp_match(struct device *, struct cfdata *, void *);
125 static void isp_sbus_attach(struct device *, struct device *, void *);
126 struct cfattach isp_sbus_ca = {
127 sizeof (struct isp_sbussoftc), isp_match, isp_sbus_attach
128 };
129
130 static int
131 isp_match(struct device *parent, struct cfdata *cf, void *aux)
132 {
133 int rv;
134 #ifdef DEBUG
135 static int oneshot = 1;
136 #endif
137 struct sbus_attach_args *sa = aux;
138
139 rv = (strcmp(cf->cf_driver->cd_name, sa->sa_name) == 0 ||
140 strcmp("PTI,ptisp", sa->sa_name) == 0 ||
141 strcmp("ptisp", sa->sa_name) == 0 ||
142 strcmp("SUNW,isp", sa->sa_name) == 0 ||
143 strcmp("QLGC,isp", sa->sa_name) == 0);
144 #ifdef DEBUG
145 if (rv && oneshot) {
146 oneshot = 0;
147 printf("Qlogic ISP Driver, NetBSD (sbus) Platform Version "
148 "%d.%d Core Version %d.%d\n",
149 ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR,
150 ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR);
151 }
152 #endif
153 return (rv);
154 }
155
156
157 static void
158 isp_sbus_attach(struct device *parent, struct device *self, void *aux)
159 {
160 int freq, ispburst, sbusburst;
161 struct sbus_attach_args *sa = aux;
162 struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) self;
163 struct ispsoftc *isp = &sbc->sbus_isp;
164
165 printf(" for %s\n", sa->sa_name);
166
167 sbc->sbus_bustag = sa->sa_bustag;
168 sbc->sbus_dmatag = sa->sa_dmatag;
169 if (sa->sa_nintr != 0)
170 sbc->sbus_pri = sa->sa_pri;
171 sbc->sbus_mdvec = mdvec;
172
173 if (sa->sa_npromvaddrs != 0) {
174 sbc->sbus_reg = (bus_space_handle_t)sa->sa_promvaddrs[0];
175 } else {
176 if (sbus_bus_map(sa->sa_bustag, sa->sa_slot, sa->sa_offset,
177 sa->sa_size, BUS_SPACE_MAP_LINEAR, 0,
178 &sbc->sbus_reg) != 0) {
179 printf("%s: cannot map registers\n", self->dv_xname);
180 return;
181 }
182 }
183 sbc->sbus_node = sa->sa_node;
184
185 freq = PROM_getpropint(sa->sa_node, "clock-frequency", 0);
186 if (freq) {
187 /*
188 * Convert from HZ to MHz, rounding up.
189 */
190 freq = (freq + 500000)/1000000;
191 #if 0
192 printf("%s: %d MHz\n", self->dv_xname, freq);
193 #endif
194 }
195 sbc->sbus_mdvec.dv_clock = freq;
196
197 /*
198 * Now figure out what the proper burst sizes, etc., to use.
199 * Unfortunately, there is no ddi_dma_burstsizes here which
200 * walks up the tree finding the limiting burst size node (if
201 * any).
202 */
203 sbusburst = ((struct sbus_softc *)parent)->sc_burst;
204 if (sbusburst == 0)
205 sbusburst = SBUS_BURST_32 - 1;
206 ispburst = PROM_getpropint(sa->sa_node, "burst-sizes", -1);
207 if (ispburst == -1) {
208 ispburst = sbusburst;
209 }
210 ispburst &= sbusburst;
211 ispburst &= ~(1 << 7);
212 ispburst &= ~(1 << 6);
213 sbc->sbus_mdvec.dv_conf1 = 0;
214 if (ispburst & (1 << 5)) {
215 sbc->sbus_mdvec.dv_conf1 = BIU_SBUS_CONF1_FIFO_32;
216 } else if (ispburst & (1 << 4)) {
217 sbc->sbus_mdvec.dv_conf1 = BIU_SBUS_CONF1_FIFO_16;
218 } else if (ispburst & (1 << 3)) {
219 sbc->sbus_mdvec.dv_conf1 =
220 BIU_SBUS_CONF1_BURST8 | BIU_SBUS_CONF1_FIFO_8;
221 }
222 if (sbc->sbus_mdvec.dv_conf1) {
223 sbc->sbus_mdvec.dv_conf1 |= BIU_BURST_ENABLE;
224 }
225
226 /*
227 * Some early versions of the PTI SBus adapter
228 * would fail in trying to download (via poking)
229 * FW. We give up on them.
230 */
231 if (strcmp("PTI,ptisp", sa->sa_name) == 0 ||
232 strcmp("ptisp", sa->sa_name) == 0) {
233 sbc->sbus_mdvec.dv_ispfw = NULL;
234 }
235
236 isp->isp_mdvec = &sbc->sbus_mdvec;
237 isp->isp_bustype = ISP_BT_SBUS;
238 isp->isp_type = ISP_HA_SCSI_UNKNOWN;
239 isp->isp_param = &sbc->sbus_dev;
240 bzero(isp->isp_param, sizeof (sdparam));
241
242 sbc->sbus_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF;
243 sbc->sbus_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = SBUS_MBOX_REGS_OFF;
244 sbc->sbus_poff[SXP_BLOCK >> _BLK_REG_SHFT] = SBUS_SXP_REGS_OFF;
245 sbc->sbus_poff[RISC_BLOCK >> _BLK_REG_SHFT] = SBUS_RISC_REGS_OFF;
246 sbc->sbus_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF;
247
248 /* Establish interrupt channel */
249 bus_intr_establish(sbc->sbus_bustag, sbc->sbus_pri, IPL_BIO, 0,
250 isp_sbus_intr, sbc);
251 sbus_establish(&sbc->sbus_sd, &sbc->sbus_isp.isp_osinfo._dev);
252
253 /*
254 * Set up logging levels.
255 */
256 #ifdef ISP_LOGDEFAULT
257 isp->isp_dblev = ISP_LOGDEFAULT;
258 #else
259 isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR;
260 if (bootverbose)
261 isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO;
262 #ifdef SCSIDEBUG
263 isp->isp_dblev |= ISP_LOGDEBUG1|ISP_LOGDEBUG2;
264 #endif
265 #ifdef DEBUG
266 isp->isp_dblev |= ISP_LOGDEBUG0;
267 #endif
268 #endif
269
270 isp->isp_confopts = self->dv_cfdata->cf_flags;
271 isp->isp_role = ISP_DEFAULT_ROLES;
272
273 /*
274 * There's no tool on sparc to set NVRAM for ISPs, so ignore it.
275 */
276 isp->isp_confopts |= ISP_CFG_NONVRAM;
277 ISP_LOCK(isp);
278 isp->isp_osinfo.no_mbox_ints = 1;
279 isp_reset(isp);
280 if (isp->isp_state != ISP_RESETSTATE) {
281 ISP_UNLOCK(isp);
282 return;
283 }
284 ENABLE_INTS(isp);
285 isp_init(isp);
286 if (isp->isp_state != ISP_INITSTATE) {
287 isp_uninit(isp);
288 ISP_UNLOCK(isp);
289 return;
290 }
291
292 /*
293 * do generic attach.
294 */
295 ISP_UNLOCK(isp);
296 isp_attach(isp);
297 if (isp->isp_state != ISP_RUNSTATE) {
298 ISP_LOCK(isp);
299 isp_uninit(isp);
300 ISP_UNLOCK(isp);
301 }
302 }
303
304 static int
305 isp_sbus_intr(void *arg)
306 {
307 u_int16_t isr, sema, mbox;
308 struct ispsoftc *isp = arg;
309 struct isp_sbussoftc *sbc = arg;
310
311 bus_dmamap_sync(sbc->sbus_dmatag, sbc->sbus_result_dmamap, 0,
312 sbc->sbus_result_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
313 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
314 sbc->sbus_isp.isp_osinfo.onintstack = 1;
315 isp_intr(isp, isr, sema, mbox);
316 sbc->sbus_isp.isp_osinfo.onintstack = 0;
317 return (1);
318 } else {
319 return (0);
320 }
321 }
322
323 #define IspVirt2Off(a, x) \
324 (((struct isp_sbussoftc *)a)->sbus_poff[((x) & _BLK_REG_MASK) >> \
325 _BLK_REG_SHFT] + ((x) & 0xff))
326
327 #define BXR2(sbc, off) \
328 bus_space_read_2(sbc->sbus_bustag, sbc->sbus_reg, off)
329
330 static int
331 isp_sbus_rd_isr(struct ispsoftc *isp, u_int16_t *isrp,
332 u_int16_t *semap, u_int16_t *mbp)
333 {
334 struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp;
335 u_int16_t isr, sema;
336
337 isr = BXR2(sbc, IspVirt2Off(isp, BIU_ISR));
338 sema = BXR2(sbc, IspVirt2Off(isp, BIU_SEMA));
339 isp_prt(isp, ISP_LOGDEBUG3, "ISR 0x%x SEMA 0x%x", isr, sema);
340 isr &= INT_PENDING_MASK(isp);
341 sema &= BIU_SEMA_LOCK;
342 if (isr == 0 && sema == 0) {
343 return (0);
344 }
345 *isrp = isr;
346 if ((*semap = sema) != 0) {
347 *mbp = BXR2(sbc, IspVirt2Off(isp, OUTMAILBOX0));
348 }
349 return (1);
350 }
351
352 static u_int16_t
353 isp_sbus_rd_reg(struct ispsoftc *isp, int regoff)
354 {
355 struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp;
356 int offset = sbc->sbus_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
357 offset += (regoff & 0xff);
358 return (bus_space_read_2(sbc->sbus_bustag, sbc->sbus_reg, offset));
359 }
360
361 static void
362 isp_sbus_wr_reg(struct ispsoftc *isp, int regoff, u_int16_t val)
363 {
364 struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp;
365 int offset = sbc->sbus_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
366 offset += (regoff & 0xff);
367 bus_space_write_2(sbc->sbus_bustag, sbc->sbus_reg, offset, val);
368 }
369
370 static int
371 isp_sbus_mbxdma(struct ispsoftc *isp)
372 {
373 struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp;
374 bus_dma_tag_t dmatag = sbc->sbus_dmatag;
375 bus_dma_segment_t reqseg, rspseg;
376 int reqrs, rsprs, i, progress;
377 size_t n;
378 bus_size_t len;
379
380 if (isp->isp_rquest_dma)
381 return (0);
382
383 n = isp->isp_maxcmds * sizeof (XS_T *);
384 isp->isp_xflist = (XS_T **) malloc(n, M_DEVBUF, M_WAITOK);
385 if (isp->isp_xflist == NULL) {
386 isp_prt(isp, ISP_LOGERR, "cannot alloc xflist array");
387 return (1);
388 }
389 bzero(isp->isp_xflist, n);
390 n = sizeof (bus_dmamap_t) * isp->isp_maxcmds;
391 sbc->sbus_dmamap = (bus_dmamap_t *) malloc(n, M_DEVBUF, M_WAITOK);
392 if (sbc->sbus_dmamap == NULL) {
393 free(isp->isp_xflist, M_DEVBUF);
394 isp->isp_xflist = NULL;
395 isp_prt(isp, ISP_LOGERR, "cannot alloc dmamap array");
396 return (1);
397 }
398 for (i = 0; i < isp->isp_maxcmds; i++) {
399 /* Allocate a DMA handle */
400 if (bus_dmamap_create(dmatag, MAXPHYS, 1, MAXPHYS, 0,
401 BUS_DMA_NOWAIT, &sbc->sbus_dmamap[i]) != 0) {
402 isp_prt(isp, ISP_LOGERR, "cmd DMA maps create error");
403 break;
404 }
405 }
406 if (i < isp->isp_maxcmds) {
407 while (--i >= 0) {
408 bus_dmamap_destroy(dmatag, sbc->sbus_dmamap[i]);
409 }
410 free(isp->isp_xflist, M_DEVBUF);
411 free(sbc->sbus_dmamap, M_DEVBUF);
412 isp->isp_xflist = NULL;
413 sbc->sbus_dmamap = NULL;
414 return (1);
415 }
416
417 /*
418 * Allocate and map the request and response queues
419 */
420 progress = 0;
421 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
422 if (bus_dmamem_alloc(dmatag, len, 0, 0, &reqseg, 1, &reqrs,
423 BUS_DMA_NOWAIT)) {
424 goto dmafail;
425 }
426 progress++;
427 if (bus_dmamem_map(dmatag, &reqseg, reqrs, len,
428 (caddr_t *)&isp->isp_rquest, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) {
429 goto dmafail;
430 }
431 progress++;
432 if (bus_dmamap_create(dmatag, len, 1, len, 0, BUS_DMA_NOWAIT,
433 &sbc->sbus_rquest_dmamap) != 0) {
434 goto dmafail;
435 }
436 progress++;
437 if (bus_dmamap_load(dmatag, sbc->sbus_rquest_dmamap,
438 isp->isp_rquest, len, NULL, BUS_DMA_NOWAIT) != 0) {
439 goto dmafail;
440 }
441 progress++;
442 isp->isp_rquest_dma = sbc->sbus_rquest_dmamap->dm_segs[0].ds_addr;
443
444 len = ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
445 if (bus_dmamem_alloc(dmatag, len, 0, 0, &rspseg, 1, &rsprs,
446 BUS_DMA_NOWAIT)) {
447 goto dmafail;
448 }
449 progress++;
450 if (bus_dmamem_map(dmatag, &rspseg, rsprs, len,
451 (caddr_t *)&isp->isp_result, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) {
452 goto dmafail;
453 }
454 progress++;
455 if (bus_dmamap_create(dmatag, len, 1, len, 0, BUS_DMA_NOWAIT,
456 &sbc->sbus_result_dmamap) != 0) {
457 goto dmafail;
458 }
459 progress++;
460 if (bus_dmamap_load(dmatag, sbc->sbus_result_dmamap,
461 isp->isp_result, len, NULL, BUS_DMA_NOWAIT) != 0) {
462 goto dmafail;
463 }
464 isp->isp_result_dma = sbc->sbus_result_dmamap->dm_segs[0].ds_addr;
465
466 return (0);
467
468 dmafail:
469 isp_prt(isp, ISP_LOGERR, "Mailbox DMA Setup Failure");
470
471 if (progress >= 8) {
472 bus_dmamap_unload(dmatag, sbc->sbus_result_dmamap);
473 }
474 if (progress >= 7) {
475 bus_dmamap_destroy(dmatag, sbc->sbus_result_dmamap);
476 }
477 if (progress >= 6) {
478 bus_dmamem_unmap(dmatag,
479 isp->isp_result, ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)));
480 }
481 if (progress >= 5) {
482 bus_dmamem_free(dmatag, &rspseg, rsprs);
483 }
484
485 if (progress >= 4) {
486 bus_dmamap_unload(dmatag, sbc->sbus_rquest_dmamap);
487 }
488 if (progress >= 3) {
489 bus_dmamap_destroy(dmatag, sbc->sbus_rquest_dmamap);
490 }
491 if (progress >= 2) {
492 bus_dmamem_unmap(dmatag,
493 isp->isp_rquest, ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)));
494 }
495 if (progress >= 1) {
496 bus_dmamem_free(dmatag, &reqseg, reqrs);
497 }
498
499 for (i = 0; i < isp->isp_maxcmds; i++) {
500 bus_dmamap_destroy(dmatag, sbc->sbus_dmamap[i]);
501 }
502 free(sbc->sbus_dmamap, M_DEVBUF);
503 free(isp->isp_xflist, M_DEVBUF);
504 isp->isp_xflist = NULL;
505 sbc->sbus_dmamap = NULL;
506 return (1);
507 }
508
509 /*
510 * Map a DMA request.
511 * We're guaranteed that rq->req_handle is a value from 1 to isp->isp_maxcmds.
512 */
513
514 static int
515 isp_sbus_dmasetup(struct ispsoftc *isp, XS_T *xs, ispreq_t *rq,
516 u_int16_t *iptrp, u_int16_t optr)
517 {
518 struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp;
519 bus_dmamap_t dmap;
520 ispcontreq_t *crq;
521 int cansleep = (xs->xs_control & XS_CTL_NOSLEEP) == 0;
522 int in = (xs->xs_control & XS_CTL_DATA_IN) != 0;
523
524 if (xs->datalen == 0) {
525 rq->req_seg_count = 1;
526 goto mbxsync;
527 }
528
529 dmap = sbc->sbus_dmamap[isp_handle_index(rq->req_handle)];
530 if (dmap->dm_nsegs != 0) {
531 panic("%s: dma map already allocated\n", isp->isp_name);
532 /* NOTREACHED */
533 }
534 if (bus_dmamap_load(sbc->sbus_dmatag, dmap, xs->data, xs->datalen,
535 NULL, (cansleep ? BUS_DMA_WAITOK : BUS_DMA_NOWAIT) |
536 BUS_DMA_STREAMING) != 0) {
537 XS_SETERR(xs, HBA_BOTCH);
538 return (CMD_COMPLETE);
539 }
540
541 bus_dmamap_sync(sbc->sbus_dmatag, dmap, 0, xs->datalen,
542 in? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
543
544 if (in) {
545 rq->req_flags |= REQFLAG_DATA_IN;
546 } else {
547 rq->req_flags |= REQFLAG_DATA_OUT;
548 }
549
550 if (XS_CDBLEN(xs) > 12) {
551 crq = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, *iptrp);
552 *iptrp = ISP_NXT_QENTRY(*iptrp, RQUEST_QUEUE_LEN(isp));
553 if (*iptrp == optr) {
554 isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++");
555 bus_dmamap_unload(sbc->sbus_dmatag, dmap);
556 XS_SETERR(xs, HBA_BOTCH);
557 return (CMD_EAGAIN);
558 }
559 rq->req_seg_count = 2;
560 rq->req_dataseg[0].ds_count = 0;
561 rq->req_dataseg[0].ds_base = 0;
562 bzero((void *)crq, sizeof (*crq));
563 crq->req_header.rqs_entry_count = 1;
564 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
565 crq->req_dataseg[0].ds_count = xs->datalen;
566 crq->req_dataseg[0].ds_base = dmap->dm_segs[0].ds_addr;
567 ISP_SBUSIFY_ISPHDR(isp, &crq->req_header)
568 } else {
569 rq->req_dataseg[0].ds_count = xs->datalen;
570 rq->req_dataseg[0].ds_base = dmap->dm_segs[0].ds_addr;
571 rq->req_seg_count = 1;
572 }
573
574 mbxsync:
575 ISP_SWIZZLE_REQUEST(isp, rq);
576 bus_dmamap_sync(sbc->sbus_dmatag, sbc->sbus_rquest_dmamap, 0,
577 sbc->sbus_rquest_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE);
578 return (CMD_QUEUED);
579 }
580
581 static void
582 isp_sbus_dmateardown(struct ispsoftc *isp, XS_T *xs, u_int16_t handle)
583 {
584 struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp;
585 bus_dmamap_t dmap;
586
587 dmap = sbc->sbus_dmamap[isp_handle_index(handle)];
588
589 if (dmap->dm_nsegs == 0) {
590 panic("%s: dma map not already allocated\n", isp->isp_name);
591 /* NOTREACHED */
592 }
593 bus_dmamap_sync(sbc->sbus_dmatag, dmap, 0,
594 xs->datalen, (xs->xs_control & XS_CTL_DATA_IN)?
595 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
596 bus_dmamap_unload(sbc->sbus_dmatag, dmap);
597 }
598