if_le_ioasic.c revision 1.13 1 1.13 nisimura /* $NetBSD: if_le_ioasic.c,v 1.13 1999/09/09 06:33:38 nisimura Exp $ */
2 1.1 cgd
3 1.1 cgd /*
4 1.1 cgd * Copyright (c) 1996 Carnegie-Mellon University.
5 1.1 cgd * All rights reserved.
6 1.1 cgd *
7 1.1 cgd * Author: Chris G. Demetriou
8 1.1 cgd *
9 1.1 cgd * Permission to use, copy, modify and distribute this software and
10 1.1 cgd * its documentation is hereby granted, provided that both the copyright
11 1.1 cgd * notice and this permission notice appear in all copies of the
12 1.1 cgd * software, derivative works or modified versions, and any portions
13 1.1 cgd * thereof, and that both notices appear in supporting documentation.
14 1.1 cgd *
15 1.1 cgd * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
16 1.1 cgd * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
17 1.1 cgd * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
18 1.1 cgd *
19 1.1 cgd * Carnegie Mellon requests users of this software to return to
20 1.1 cgd *
21 1.1 cgd * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
22 1.1 cgd * School of Computer Science
23 1.1 cgd * Carnegie Mellon University
24 1.1 cgd * Pittsburgh PA 15213-3890
25 1.1 cgd *
26 1.1 cgd * any improvements or extensions that they make and grant Carnegie the
27 1.1 cgd * rights to redistribute these changes.
28 1.1 cgd */
29 1.1 cgd
30 1.1 cgd /*
31 1.1 cgd * LANCE on DEC IOCTL ASIC.
32 1.1 cgd */
33 1.9 jonathan
34 1.9 jonathan #include <sys/cdefs.h> /* RCS ID & macro defns */
35 1.13 nisimura __KERNEL_RCSID(0, "$NetBSD: if_le_ioasic.c,v 1.13 1999/09/09 06:33:38 nisimura Exp $");
36 1.11 jonathan
37 1.11 jonathan #include "opt_inet.h"
38 1.1 cgd
39 1.1 cgd #include <sys/param.h>
40 1.1 cgd #include <sys/systm.h>
41 1.1 cgd #include <sys/mbuf.h>
42 1.1 cgd #include <sys/syslog.h>
43 1.1 cgd #include <sys/socket.h>
44 1.1 cgd #include <sys/device.h>
45 1.1 cgd
46 1.1 cgd #include <net/if.h>
47 1.5 cgd #include <net/if_ether.h>
48 1.6 thorpej #include <net/if_media.h>
49 1.1 cgd
50 1.1 cgd #ifdef INET
51 1.1 cgd #include <netinet/in.h>
52 1.4 is #include <netinet/if_inarp.h>
53 1.1 cgd #endif
54 1.1 cgd
55 1.12 drochner #include <dev/ic/lancereg.h>
56 1.12 drochner #include <dev/ic/lancevar.h>
57 1.1 cgd #include <dev/ic/am7990reg.h>
58 1.1 cgd #include <dev/ic/am7990var.h>
59 1.1 cgd
60 1.2 thorpej #include <dev/tc/if_levar.h>
61 1.1 cgd #include <dev/tc/tcvar.h>
62 1.13 nisimura #include <dev/tc/ioasicreg.h>
63 1.1 cgd #include <dev/tc/ioasicvar.h>
64 1.1 cgd
65 1.12 drochner #if defined(_KERNEL) && !defined(_LKM)
66 1.12 drochner #include "opt_ddb.h"
67 1.12 drochner #endif
68 1.12 drochner
69 1.13 nisimura caddr_t le_iomem;
70 1.13 nisimura
71 1.13 nisimura static int le_ioasic_match __P((struct device *, struct cfdata *, void *));
72 1.13 nisimura static void le_ioasic_attach __P((struct device *, struct device *, void *));
73 1.13 nisimura
74 1.13 nisimura struct cfattach le_ioasic_ca = {
75 1.13 nisimura sizeof(struct le_softc), le_ioasic_match, le_ioasic_attach
76 1.13 nisimura };
77 1.13 nisimura
78 1.13 nisimura static void ioasic_lance_dma_setup __P((struct device *));
79 1.13 nisimura
80 1.12 drochner #ifdef DDB
81 1.12 drochner #define integrate
82 1.12 drochner #define hide
83 1.12 drochner #else
84 1.12 drochner #define integrate static __inline
85 1.12 drochner #define hide static
86 1.12 drochner #endif
87 1.12 drochner
88 1.12 drochner hide void le_ioasic_copytobuf_gap2 __P((struct lance_softc *, void *,
89 1.2 thorpej int, int));
90 1.12 drochner hide void le_ioasic_copyfrombuf_gap2 __P((struct lance_softc *, void *,
91 1.2 thorpej int, int));
92 1.2 thorpej
93 1.12 drochner hide void le_ioasic_copytobuf_gap16 __P((struct lance_softc *, void *,
94 1.2 thorpej int, int));
95 1.12 drochner hide void le_ioasic_copyfrombuf_gap16 __P((struct lance_softc *, void *,
96 1.2 thorpej int, int));
97 1.12 drochner hide void le_ioasic_zerobuf_gap16 __P((struct lance_softc *, int, int));
98 1.2 thorpej
99 1.1 cgd int
100 1.1 cgd le_ioasic_match(parent, match, aux)
101 1.1 cgd struct device *parent;
102 1.3 cgd struct cfdata *match;
103 1.3 cgd void *aux;
104 1.1 cgd {
105 1.1 cgd struct ioasicdev_attach_args *d = aux;
106 1.1 cgd
107 1.1 cgd if (!ioasic_submatch(match, aux))
108 1.1 cgd return (0);
109 1.1 cgd if (strncmp("lance", d->iada_modname, TC_ROM_LLEN))
110 1.1 cgd return (0);
111 1.1 cgd
112 1.1 cgd return (1);
113 1.1 cgd }
114 1.1 cgd
115 1.1 cgd void
116 1.1 cgd le_ioasic_attach(parent, self, aux)
117 1.1 cgd struct device *parent, *self;
118 1.1 cgd void *aux;
119 1.1 cgd {
120 1.1 cgd struct ioasicdev_attach_args *d = aux;
121 1.2 thorpej register struct le_softc *lesc = (void *)self;
122 1.12 drochner register struct lance_softc *sc = &lesc->sc_am7990.lsc;
123 1.1 cgd
124 1.13 nisimura ioasic_lance_dma_setup(parent);
125 1.13 nisimura
126 1.13 nisimura if (le_iomem == 0) {
127 1.13 nisimura printf("%s: DMA area not set up\n", sc->sc_dev.dv_xname);
128 1.13 nisimura return;
129 1.13 nisimura }
130 1.13 nisimura
131 1.2 thorpej lesc->sc_r1 = (struct lereg1 *)
132 1.1 cgd TC_DENSE_TO_SPARSE(TC_PHYS_TO_UNCACHED(d->iada_addr));
133 1.1 cgd sc->sc_mem = (void *)TC_PHYS_TO_UNCACHED(le_iomem);
134 1.1 cgd
135 1.2 thorpej sc->sc_copytodesc = le_ioasic_copytobuf_gap2;
136 1.2 thorpej sc->sc_copyfromdesc = le_ioasic_copyfrombuf_gap2;
137 1.2 thorpej sc->sc_copytobuf = le_ioasic_copytobuf_gap16;
138 1.2 thorpej sc->sc_copyfrombuf = le_ioasic_copyfrombuf_gap16;
139 1.2 thorpej sc->sc_zerobuf = le_ioasic_zerobuf_gap16;
140 1.1 cgd
141 1.12 drochner dec_le_common_attach(&lesc->sc_am7990, ioasic_lance_ether_address());
142 1.1 cgd
143 1.2 thorpej ioasic_intr_establish(parent, d->iada_cookie, TC_IPL_NET,
144 1.2 thorpej am7990_intr, sc);
145 1.2 thorpej }
146 1.2 thorpej
147 1.2 thorpej /*
148 1.2 thorpej * Special memory access functions needed by ioasic-attached LANCE
149 1.2 thorpej * chips.
150 1.2 thorpej */
151 1.2 thorpej
152 1.2 thorpej /*
153 1.2 thorpej * gap2: two bytes of data followed by two bytes of pad.
154 1.2 thorpej *
155 1.2 thorpej * Buffers must be 4-byte aligned. The code doesn't worry about
156 1.2 thorpej * doing an extra byte.
157 1.2 thorpej */
158 1.2 thorpej
159 1.2 thorpej void
160 1.2 thorpej le_ioasic_copytobuf_gap2(sc, fromv, boff, len)
161 1.12 drochner struct lance_softc *sc;
162 1.2 thorpej void *fromv;
163 1.2 thorpej int boff;
164 1.2 thorpej register int len;
165 1.2 thorpej {
166 1.2 thorpej volatile caddr_t buf = sc->sc_mem;
167 1.2 thorpej register caddr_t from = fromv;
168 1.2 thorpej register volatile u_int16_t *bptr;
169 1.2 thorpej
170 1.2 thorpej if (boff & 0x1) {
171 1.2 thorpej /* handle unaligned first byte */
172 1.2 thorpej bptr = ((volatile u_int16_t *)buf) + (boff - 1);
173 1.2 thorpej *bptr = (*from++ << 8) | (*bptr & 0xff);
174 1.2 thorpej bptr += 2;
175 1.2 thorpej len--;
176 1.2 thorpej } else
177 1.2 thorpej bptr = ((volatile u_int16_t *)buf) + boff;
178 1.2 thorpej while (len > 1) {
179 1.2 thorpej *bptr = (from[1] << 8) | (from[0] & 0xff);
180 1.2 thorpej bptr += 2;
181 1.2 thorpej from += 2;
182 1.2 thorpej len -= 2;
183 1.2 thorpej }
184 1.2 thorpej if (len == 1)
185 1.2 thorpej *bptr = (u_int16_t)*from;
186 1.2 thorpej }
187 1.2 thorpej
188 1.2 thorpej void
189 1.2 thorpej le_ioasic_copyfrombuf_gap2(sc, tov, boff, len)
190 1.12 drochner struct lance_softc *sc;
191 1.2 thorpej void *tov;
192 1.2 thorpej int boff, len;
193 1.2 thorpej {
194 1.2 thorpej volatile caddr_t buf = sc->sc_mem;
195 1.2 thorpej register caddr_t to = tov;
196 1.2 thorpej register volatile u_int16_t *bptr;
197 1.2 thorpej register u_int16_t tmp;
198 1.2 thorpej
199 1.2 thorpej if (boff & 0x1) {
200 1.2 thorpej /* handle unaligned first byte */
201 1.2 thorpej bptr = ((volatile u_int16_t *)buf) + (boff - 1);
202 1.2 thorpej *to++ = (*bptr >> 8) & 0xff;
203 1.2 thorpej bptr += 2;
204 1.2 thorpej len--;
205 1.2 thorpej } else
206 1.2 thorpej bptr = ((volatile u_int16_t *)buf) + boff;
207 1.2 thorpej while (len > 1) {
208 1.2 thorpej tmp = *bptr;
209 1.2 thorpej *to++ = tmp & 0xff;
210 1.2 thorpej *to++ = (tmp >> 8) & 0xff;
211 1.2 thorpej bptr += 2;
212 1.2 thorpej len -= 2;
213 1.2 thorpej }
214 1.2 thorpej if (len == 1)
215 1.2 thorpej *to = *bptr & 0xff;
216 1.2 thorpej }
217 1.2 thorpej
218 1.2 thorpej /*
219 1.2 thorpej * gap16: 16 bytes of data followed by 16 bytes of pad.
220 1.2 thorpej *
221 1.2 thorpej * Buffers must be 32-byte aligned.
222 1.2 thorpej */
223 1.2 thorpej
224 1.2 thorpej void
225 1.2 thorpej le_ioasic_copytobuf_gap16(sc, fromv, boff, len)
226 1.12 drochner struct lance_softc *sc;
227 1.2 thorpej void *fromv;
228 1.2 thorpej int boff;
229 1.2 thorpej register int len;
230 1.2 thorpej {
231 1.2 thorpej volatile caddr_t buf = sc->sc_mem;
232 1.2 thorpej register caddr_t from = fromv;
233 1.2 thorpej register caddr_t bptr;
234 1.2 thorpej
235 1.2 thorpej bptr = buf + ((boff << 1) & ~0x1f);
236 1.2 thorpej boff &= 0xf;
237 1.8 jonathan
238 1.8 jonathan /*
239 1.8 jonathan * Dispose of boff so destination of subsequent copies is
240 1.8 jonathan * 16-byte aligned.
241 1.8 jonathan */
242 1.8 jonathan if (boff) {
243 1.8 jonathan register int xfer;
244 1.8 jonathan xfer = min(len, 16 - boff);
245 1.2 thorpej bcopy(from, bptr + boff, xfer);
246 1.2 thorpej from += xfer;
247 1.2 thorpej bptr += 32;
248 1.2 thorpej len -= xfer;
249 1.2 thorpej }
250 1.8 jonathan
251 1.8 jonathan /* Destination of copies is now 16-byte aligned. */
252 1.8 jonathan if (len >= 16)
253 1.8 jonathan switch ((u_long)from & (sizeof(u_int32_t) -1)) {
254 1.8 jonathan case 2:
255 1.8 jonathan /* Ethernet headers make this the dominant case. */
256 1.8 jonathan do {
257 1.8 jonathan register u_int32_t *dst = (u_int32_t*)bptr;
258 1.8 jonathan register u_int16_t t0;
259 1.8 jonathan register u_int32_t t1, t2, t3, t4;
260 1.8 jonathan
261 1.8 jonathan /* read from odd-16-bit-aligned, cached src */
262 1.8 jonathan t0 = *(u_int16_t*)from;
263 1.8 jonathan t1 = *(u_int32_t*)(from+2);
264 1.8 jonathan t2 = *(u_int32_t*)(from+6);
265 1.8 jonathan t3 = *(u_int32_t*)(from+10);
266 1.8 jonathan t4 = *(u_int16_t*)(from+14);
267 1.8 jonathan
268 1.8 jonathan /* DMA buffer is uncached on mips */
269 1.8 jonathan dst[0] = t0 | (t1 << 16);
270 1.8 jonathan dst[1] = (t1 >> 16) | (t2 << 16);
271 1.8 jonathan dst[2] = (t2 >> 16) | (t3 << 16);
272 1.8 jonathan dst[3] = (t3 >> 16) | (t4 << 16);
273 1.8 jonathan
274 1.8 jonathan from += 16;
275 1.8 jonathan bptr += 32;
276 1.8 jonathan len -= 16;
277 1.8 jonathan } while (len >= 16);
278 1.8 jonathan break;
279 1.8 jonathan
280 1.8 jonathan case 0:
281 1.8 jonathan do {
282 1.8 jonathan register u_int32_t *src = (u_int32_t*)from;
283 1.8 jonathan register u_int32_t *dst = (u_int32_t*)bptr;
284 1.8 jonathan register u_int32_t t0, t1, t2, t3;
285 1.8 jonathan
286 1.8 jonathan t0 = src[0]; t1 = src[1]; t2 = src[2]; t3 = src[3];
287 1.8 jonathan dst[0] = t0; dst[1] = t1; dst[2] = t2; dst[3] = t3;
288 1.8 jonathan
289 1.8 jonathan from += 16;
290 1.8 jonathan bptr += 32;
291 1.8 jonathan len -= 16;
292 1.8 jonathan } while (len >= 16);
293 1.8 jonathan break;
294 1.8 jonathan
295 1.8 jonathan default:
296 1.8 jonathan /* Does odd-aligned case ever happen? */
297 1.8 jonathan do {
298 1.8 jonathan bcopy(from, bptr, 16);
299 1.8 jonathan from += 16;
300 1.8 jonathan bptr += 32;
301 1.8 jonathan len -= 16;
302 1.8 jonathan } while (len >= 16);
303 1.8 jonathan break;
304 1.8 jonathan }
305 1.8 jonathan if (len)
306 1.8 jonathan bcopy(from, bptr, len);
307 1.2 thorpej }
308 1.2 thorpej
309 1.2 thorpej void
310 1.2 thorpej le_ioasic_copyfrombuf_gap16(sc, tov, boff, len)
311 1.12 drochner struct lance_softc *sc;
312 1.2 thorpej void *tov;
313 1.2 thorpej int boff, len;
314 1.2 thorpej {
315 1.2 thorpej volatile caddr_t buf = sc->sc_mem;
316 1.2 thorpej register caddr_t to = tov;
317 1.2 thorpej register caddr_t bptr;
318 1.2 thorpej
319 1.2 thorpej bptr = buf + ((boff << 1) & ~0x1f);
320 1.2 thorpej boff &= 0xf;
321 1.8 jonathan
322 1.8 jonathan /* Dispose of boff. source of copy is subsequently 16-byte aligned. */
323 1.8 jonathan if (boff) {
324 1.8 jonathan register int xfer;
325 1.8 jonathan xfer = min(len, 16 - boff);
326 1.8 jonathan bcopy(bptr+boff, to, xfer);
327 1.2 thorpej to += xfer;
328 1.2 thorpej bptr += 32;
329 1.2 thorpej len -= xfer;
330 1.2 thorpej }
331 1.8 jonathan if (len >= 16)
332 1.8 jonathan switch ((u_long)to & (sizeof(u_int32_t) -1)) {
333 1.8 jonathan case 2:
334 1.8 jonathan /*
335 1.8 jonathan * to is aligned to an odd 16-bit boundary. Ethernet headers
336 1.8 jonathan * make this the dominant case (98% or more).
337 1.8 jonathan */
338 1.8 jonathan do {
339 1.8 jonathan register u_int32_t *src = (u_int32_t*)bptr;
340 1.8 jonathan register u_int32_t t0, t1, t2, t3;
341 1.8 jonathan
342 1.8 jonathan /* read from uncached aligned DMA buf */
343 1.8 jonathan t0 = src[0]; t1 = src[1]; t2 = src[2]; t3 = src[3];
344 1.8 jonathan
345 1.8 jonathan /* write to odd-16-bit-word aligned dst */
346 1.8 jonathan *(u_int16_t *) (to+0) = (u_short) t0;
347 1.8 jonathan *(u_int32_t *) (to+2) = (t0 >> 16) | (t1 << 16);
348 1.8 jonathan *(u_int32_t *) (to+6) = (t1 >> 16) | (t2 << 16);
349 1.8 jonathan *(u_int32_t *) (to+10) = (t2 >> 16) | (t3 << 16);
350 1.8 jonathan *(u_int16_t *) (to+14) = (t3 >> 16);
351 1.8 jonathan bptr += 32;
352 1.8 jonathan to += 16;
353 1.8 jonathan len -= 16;
354 1.8 jonathan } while (len > 16);
355 1.8 jonathan break;
356 1.8 jonathan case 0:
357 1.8 jonathan /* 32-bit aligned aligned copy. Rare. */
358 1.8 jonathan do {
359 1.8 jonathan register u_int32_t *src = (u_int32_t*)bptr;
360 1.8 jonathan register u_int32_t *dst = (u_int32_t*)to;
361 1.8 jonathan register u_int32_t t0, t1, t2, t3;
362 1.8 jonathan
363 1.8 jonathan t0 = src[0]; t1 = src[1]; t2 = src[2]; t3 = src[3];
364 1.8 jonathan dst[0] = t0; dst[1] = t1; dst[2] = t2; dst[3] = t3;
365 1.8 jonathan to += 16;
366 1.8 jonathan bptr += 32;
367 1.8 jonathan len -= 16;
368 1.8 jonathan } while (len > 16);
369 1.8 jonathan break;
370 1.8 jonathan
371 1.8 jonathan /* XXX Does odd-byte-aligned case ever happen? */
372 1.8 jonathan default:
373 1.8 jonathan do {
374 1.8 jonathan bcopy(bptr, to, 16);
375 1.8 jonathan to += 16;
376 1.8 jonathan bptr += 32;
377 1.8 jonathan len -= 16;
378 1.8 jonathan } while (len > 16);
379 1.8 jonathan break;
380 1.8 jonathan }
381 1.8 jonathan if (len)
382 1.8 jonathan bcopy(bptr, to, len);
383 1.2 thorpej }
384 1.2 thorpej
385 1.2 thorpej void
386 1.2 thorpej le_ioasic_zerobuf_gap16(sc, boff, len)
387 1.12 drochner struct lance_softc *sc;
388 1.2 thorpej int boff, len;
389 1.2 thorpej {
390 1.2 thorpej volatile caddr_t buf = sc->sc_mem;
391 1.2 thorpej register caddr_t bptr;
392 1.2 thorpej register int xfer;
393 1.2 thorpej
394 1.2 thorpej bptr = buf + ((boff << 1) & ~0x1f);
395 1.2 thorpej boff &= 0xf;
396 1.2 thorpej xfer = min(len, 16 - boff);
397 1.2 thorpej while (len > 0) {
398 1.2 thorpej bzero(bptr + boff, xfer);
399 1.2 thorpej bptr += 32;
400 1.2 thorpej boff = 0;
401 1.2 thorpej len -= xfer;
402 1.2 thorpej xfer = min(len, 16);
403 1.2 thorpej }
404 1.13 nisimura }
405 1.13 nisimura
406 1.13 nisimura #define LE_IOASIC_MEMSIZE (128*1024)
407 1.13 nisimura #define LE_IOASIC_MEMALIGN (128*1024)
408 1.13 nisimura
409 1.13 nisimura void
410 1.13 nisimura ioasic_lance_dma_setup(parent)
411 1.13 nisimura struct device *parent;
412 1.13 nisimura {
413 1.13 nisimura struct ioasic_softc *sc = (void *)parent;
414 1.13 nisimura bus_dma_tag_t dmat = sc->sc_dmat;
415 1.13 nisimura bus_dma_segment_t seg;
416 1.13 nisimura tc_addr_t tca;
417 1.13 nisimura int rseg;
418 1.13 nisimura
419 1.13 nisimura /*
420 1.13 nisimura * Allocate a DMA area for the chip.
421 1.13 nisimura */
422 1.13 nisimura if (bus_dmamem_alloc(dmat, LE_IOASIC_MEMSIZE, LE_IOASIC_MEMALIGN,
423 1.13 nisimura 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
424 1.13 nisimura printf("%s: can't allocate DMA area for LANCE\n",
425 1.13 nisimura sc->sc_dv.dv_xname);
426 1.13 nisimura return;
427 1.13 nisimura }
428 1.13 nisimura if (bus_dmamem_map(dmat, &seg, rseg, LE_IOASIC_MEMSIZE,
429 1.13 nisimura &le_iomem, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) {
430 1.13 nisimura printf("%s: can't map DMA area for LANCE\n",
431 1.13 nisimura sc->sc_dv.dv_xname);
432 1.13 nisimura bus_dmamem_free(dmat, &seg, rseg);
433 1.13 nisimura return;
434 1.13 nisimura }
435 1.13 nisimura
436 1.13 nisimura /*
437 1.13 nisimura * Create and load the DMA map for the DMA area.
438 1.13 nisimura */
439 1.13 nisimura if (bus_dmamap_create(dmat, LE_IOASIC_MEMSIZE, 1,
440 1.13 nisimura LE_IOASIC_MEMSIZE, 0, BUS_DMA_NOWAIT, &sc->sc_lance_dmam)) {
441 1.13 nisimura printf("%s: can't create DMA map\n", sc->sc_dv.dv_xname);
442 1.13 nisimura goto bad;
443 1.13 nisimura }
444 1.13 nisimura if (bus_dmamap_load(dmat, sc->sc_lance_dmam,
445 1.13 nisimura le_iomem, LE_IOASIC_MEMSIZE, NULL, BUS_DMA_NOWAIT)) {
446 1.13 nisimura printf("%s: can't load DMA map\n", sc->sc_dv.dv_xname);
447 1.13 nisimura goto bad;
448 1.13 nisimura }
449 1.13 nisimura
450 1.13 nisimura tca = (tc_addr_t)sc->sc_lance_dmam->dm_segs[0].ds_addr;
451 1.13 nisimura *(u_int32_t *)(ioasic_base + IOASIC_LANCE_DMAPTR)
452 1.13 nisimura = ((tca << 3) & ~(tc_addr_t)0x1f) | ((tca >> 29) & 0x1f);
453 1.13 nisimura tc_wmb();
454 1.13 nisimura
455 1.13 nisimura *(u_int32_t *)(ioasic_base + IOASIC_CSR) |= IOASIC_CSR_DMAEN_LANCE;
456 1.13 nisimura tc_wmb();
457 1.13 nisimura return;
458 1.13 nisimura
459 1.13 nisimura bad:
460 1.13 nisimura bus_dmamem_unmap(dmat, le_iomem, LE_IOASIC_MEMSIZE);
461 1.13 nisimura bus_dmamem_free(dmat, &seg, rseg);
462 1.13 nisimura le_iomem = 0;
463 1.1 cgd }
464