cia_dma.c revision 1.2.6.1 1 /* $NetBSD: cia_dma.c,v 1.2.6.1 1997/09/04 00:53:28 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 1997 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 #include <sys/cdefs.h> /* RCS ID & Copyright macro defns */
41
42 __KERNEL_RCSID(0, "$NetBSD: cia_dma.c,v 1.2.6.1 1997/09/04 00:53:28 thorpej Exp $");
43
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/kernel.h>
47 #include <sys/device.h>
48 #include <sys/malloc.h>
49 #include <vm/vm.h>
50
51 #define _ALPHA_BUS_DMA_PRIVATE
52 #include <machine/bus.h>
53
54 #include <dev/pci/pcireg.h>
55 #include <dev/pci/pcivar.h>
56 #include <alpha/pci/ciareg.h>
57 #include <alpha/pci/ciavar.h>
58
59 bus_dma_tag_t cia_dma_get_tag __P((bus_dma_tag_t, alpha_bus_t));
60
61 int cia_bus_dmamap_create_sgmap __P((bus_dma_tag_t, bus_size_t, int,
62 bus_size_t, bus_size_t, int, bus_dmamap_t *));
63
64 void cia_bus_dmamap_destroy_sgmap __P((bus_dma_tag_t, bus_dmamap_t));
65
66 int cia_bus_dmamap_load_direct __P((bus_dma_tag_t, bus_dmamap_t, void *,
67 bus_size_t, struct proc *, int));
68 int cia_bus_dmamap_load_sgmap __P((bus_dma_tag_t, bus_dmamap_t, void *,
69 bus_size_t, struct proc *, int));
70
71 int cia_bus_dmamap_load_mbuf_direct __P((bus_dma_tag_t, bus_dmamap_t,
72 struct mbuf *, int));
73 int cia_bus_dmamap_load_mbuf_sgmap __P((bus_dma_tag_t, bus_dmamap_t,
74 struct mbuf *, int));
75
76 int cia_bus_dmamap_load_uio_direct __P((bus_dma_tag_t, bus_dmamap_t,
77 struct uio *, int));
78 int cia_bus_dmamap_load_uio_sgmap __P((bus_dma_tag_t, bus_dmamap_t,
79 struct uio *, int));
80
81 int cia_bus_dmamap_load_raw_direct __P((bus_dma_tag_t, bus_dmamap_t,
82 bus_dma_segment_t *, int, bus_size_t, int));
83 int cia_bus_dmamap_load_raw_sgmap __P((bus_dma_tag_t, bus_dmamap_t,
84 bus_dma_segment_t *, int, bus_size_t, int));
85
86 void cia_bus_dmamap_unload_sgmap __P((bus_dma_tag_t, bus_dmamap_t));
87
88 /*
89 * The direct-mapped DMA window begins at this PCI address.
90 */
91 #define CIA_DIRECT_MAPPED_BASE 0x40000000
92
93 /*
94 * The 8M SGMAP-mapped DMA window begins at this PCI address.
95 */
96 #define CIA_SGMAP_MAPPED_BASE (8*1024*1024)
97
98 /*
99 * Macro to flush CIA scatter/gather TLB.
100 */
101 #define CIA_TLB_INVALIDATE() \
102 do { \
103 alpha_mb(); \
104 REGVAL(CIA_PCI_TBIA) = CIA_PCI_TBIA_ALL; \
105 alpha_mb(); \
106 } while (0)
107
108 void
109 cia_dma_init(ccp)
110 struct cia_config *ccp;
111 {
112 bus_addr_t tbase;
113 u_int32_t memcs_en;
114 bus_dma_tag_t t;
115
116 /*
117 * Initialize the DMA tag used for direct-mapped DMA.
118 */
119 t = &ccp->cc_dmat_direct;
120 t->_cookie = ccp;
121 t->_get_tag = cia_dma_get_tag;
122 t->_dmamap_create = _bus_dmamap_create;
123 t->_dmamap_destroy = _bus_dmamap_destroy;
124 t->_dmamap_load = cia_bus_dmamap_load_direct;
125 t->_dmamap_load_mbuf = cia_bus_dmamap_load_mbuf_direct;
126 t->_dmamap_load_uio = cia_bus_dmamap_load_uio_direct;
127 t->_dmamap_load_raw = cia_bus_dmamap_load_raw_direct;
128 t->_dmamap_unload = _bus_dmamap_unload;
129 t->_dmamap_sync = NULL; /* Nothing to do. */
130
131 t->_dmamem_alloc = _bus_dmamem_alloc;
132 t->_dmamem_free = _bus_dmamem_free;
133 t->_dmamem_map = _bus_dmamem_map;
134 t->_dmamem_unmap = _bus_dmamem_unmap;
135 t->_dmamem_mmap = _bus_dmamem_mmap;
136
137 /*
138 * Initialize the DMA tag used for sgmap-mapped DMA.
139 */
140 t = &ccp->cc_dmat_sgmap;
141 t->_cookie = ccp;
142 t->_get_tag = cia_dma_get_tag;
143 t->_dmamap_create = cia_bus_dmamap_create_sgmap;
144 t->_dmamap_destroy = cia_bus_dmamap_destroy_sgmap;
145 t->_dmamap_load = cia_bus_dmamap_load_sgmap;
146 t->_dmamap_load_mbuf = cia_bus_dmamap_load_mbuf_sgmap;
147 t->_dmamap_load_uio = cia_bus_dmamap_load_uio_sgmap;
148 t->_dmamap_load_raw = cia_bus_dmamap_load_raw_sgmap;
149 t->_dmamap_unload = cia_bus_dmamap_unload_sgmap;
150 t->_dmamap_sync = NULL; /* Nothing to do. */
151
152 t->_dmamem_alloc = _bus_dmamem_alloc;
153 t->_dmamem_free = _bus_dmamem_free;
154 t->_dmamem_map = _bus_dmamem_map;
155 t->_dmamem_unmap = _bus_dmamem_unmap;
156 t->_dmamem_mmap = _bus_dmamem_mmap;
157
158 /*
159 * The firmware has set up window 1 as a 1G direct-mapped DMA
160 * window beginning at 1G. We leave it alone. Leave window
161 * 0 alone until we reconfigure it for SGMAP-mapped DMA.
162 * Windows 2 and 3 are already disabled.
163 */
164
165 /*
166 * Initialize the SGMAP if safe to do so.
167 */
168 if (ccp->cc_mallocsafe) {
169 alpha_sgmap_init(t, &ccp->cc_sgmap, "cia_sgmap",
170 CIA_SGMAP_MAPPED_BASE, 0, (8*1024*1024),
171 sizeof(u_int64_t), NULL);
172
173 /* Remember the MEMCS value. */
174 alpha_mb();
175 memcs_en = REGVAL(CIA_PCI_W0BASE) & CIA_PCI_WnBASE_MEMCS_EN;
176
177 /* Now disable window 0. */
178 REGVAL(CIA_PCI_W0BASE) = 0;
179 alpha_mb();
180
181 /*
182 * Set up window 0 as an 8MB SGMAP-mapped window
183 * starting at 8MB.
184 */
185 tbase = ccp->cc_sgmap.aps_ptpa >> CIA_PCI_TnBASE_SHIFT;
186 if ((tbase & CIA_PCI_TnBASE_MASK) != tbase)
187 panic("cia_dma_init: bad page table address");
188 REGVAL(CIA_PCI_T0BASE) = tbase;
189 REGVAL(CIA_PCI_W0MASK) = CIA_PCI_WnMASK_8M;
190 alpha_mb();
191
192 REGVAL(CIA_PCI_W0BASE) = CIA_SGMAP_MAPPED_BASE |
193 CIA_PCI_WnBASE_SG_EN | CIA_PCI_WnBASE_W_EN | memcs_en;
194 alpha_mb();
195
196 CIA_TLB_INVALIDATE();
197 }
198
199 /* XXX XXX BEGIN XXX XXX */
200 { /* XXX */
201 extern vm_offset_t alpha_XXX_dmamap_or; /* XXX */
202 alpha_XXX_dmamap_or = CIA_DIRECT_MAPPED_BASE; /* XXX */
203 } /* XXX */
204 /* XXX XXX END XXX XXX */
205 }
206
207 /*
208 * Return the bus dma tag to be used for the specified bus type.
209 * INTERNAL USE ONLY!
210 */
211 bus_dma_tag_t
212 cia_dma_get_tag(t, bustype)
213 bus_dma_tag_t t;
214 alpha_bus_t bustype;
215 {
216 struct cia_config *ccp = t->_cookie;
217
218 switch (bustype) {
219 case ALPHA_BUS_PCI:
220 case ALPHA_BUS_EISA:
221 /*
222 * Systems with a CIA can only support 1G
223 * of memory, so we use the direct-mapped window
224 * on busses that have 32-bit DMA.
225 */
226 return (&ccp->cc_dmat_direct);
227
228 case ALPHA_BUS_ISA:
229 /*
230 * ISA doesn't have enough address bits to use
231 * the direct-mapped DMA window, so we must use
232 * SGMAPs.
233 */
234 return (&ccp->cc_dmat_sgmap);
235
236 default:
237 panic("cia_dma_get_tag: shouldn't be here, really...");
238 }
239 }
240
241 /*
242 * Create a CIA SGMAP-mapped DMA map.
243 */
244 int
245 cia_bus_dmamap_create_sgmap(t, size, nsegments, maxsegsz, boundary,
246 flags, dmamp)
247 bus_dma_tag_t t;
248 bus_size_t size;
249 int nsegments;
250 bus_size_t maxsegsz;
251 bus_size_t boundary;
252 int flags;
253 bus_dmamap_t *dmamp;
254 {
255 struct cia_config *ccp = t->_cookie;
256 struct alpha_sgmap_cookie *a;
257 bus_dmamap_t map;
258 int error;
259
260 error = _bus_dmamap_create(t, size, nsegments, maxsegsz,
261 boundary, flags, dmamp);
262 if (error)
263 return (error);
264
265 map = *dmamp;
266
267 a = malloc(sizeof(struct alpha_sgmap_cookie), M_DEVBUF,
268 (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK);
269 if (a == NULL) {
270 _bus_dmamap_destroy(t, map);
271 return (ENOMEM);
272 }
273 bzero(a, sizeof(struct alpha_sgmap_cookie));
274 map->_dm_sgcookie = a;
275
276 if (flags & BUS_DMA_ALLOCNOW) {
277 error = alpha_sgmap_alloc(map, round_page(size),
278 &ccp->cc_sgmap, flags);
279 if (error)
280 cia_bus_dmamap_destroy_sgmap(t, map);
281 }
282
283 return (error);
284 }
285
286 /*
287 * Destroy a CIA SGMAP-mapped DMA map.
288 */
289 void
290 cia_bus_dmamap_destroy_sgmap(t, map)
291 bus_dma_tag_t t;
292 bus_dmamap_t map;
293 {
294 struct cia_config *ccp = t->_cookie;
295 struct alpha_sgmap_cookie *a = map->_dm_sgcookie;
296
297 if (a->apdc_flags & APDC_HAS_SGMAP)
298 alpha_sgmap_free(&ccp->cc_sgmap, a);
299
300 free(a, M_DEVBUF);
301 _bus_dmamap_destroy(t, map);
302 }
303
304 /*
305 * Load a CIA direct-mapped DMA map with a linear buffer.
306 */
307 int
308 cia_bus_dmamap_load_direct(t, map, buf, buflen, p, flags)
309 bus_dma_tag_t t;
310 bus_dmamap_t map;
311 void *buf;
312 bus_size_t buflen;
313 struct proc *p;
314 int flags;
315 {
316
317 return (_bus_dmamap_load_direct_common(t, map, buf, buflen, p,
318 flags, CIA_DIRECT_MAPPED_BASE));
319 }
320
321 /*
322 * Load a CIA SGMAP-mapped DMA map with a linear buffer.
323 */
324 int
325 cia_bus_dmamap_load_sgmap(t, map, buf, buflen, p, flags)
326 bus_dma_tag_t t;
327 bus_dmamap_t map;
328 void *buf;
329 bus_size_t buflen;
330 struct proc *p;
331 int flags;
332 {
333 struct cia_config *ccp = t->_cookie;
334 int error;
335
336 error = pci_sgmap_pte64_load(t, map, buf, buflen, p, flags,
337 &ccp->cc_sgmap);
338 if (error == 0)
339 CIA_TLB_INVALIDATE();
340
341 return (error);
342 }
343
344 /*
345 * Load a CIA direct-mapped DMA map with an mbuf chain.
346 */
347 int
348 cia_bus_dmamap_load_mbuf_direct(t, map, m, flags)
349 bus_dma_tag_t t;
350 bus_dmamap_t map;
351 struct mbuf *m;
352 int flags;
353 {
354
355 return (_bus_dmamap_load_mbuf_direct_common(t, map, m,
356 flags, CIA_DIRECT_MAPPED_BASE));
357 }
358
359 /*
360 * Load a CIA SGMAP-mapped DMA map with an mbuf chain.
361 */
362 int
363 cia_bus_dmamap_load_mbuf_sgmap(t, map, m, flags)
364 bus_dma_tag_t t;
365 bus_dmamap_t map;
366 struct mbuf *m;
367 int flags;
368 {
369 struct cia_config *ccp = t->_cookie;
370 int error;
371
372 error = pci_sgmap_pte64_load_mbuf(t, map, m, flags, &ccp->cc_sgmap);
373 if (error == 0)
374 CIA_TLB_INVALIDATE();
375
376 return (error);
377 }
378
379 /*
380 * Load a CIA direct-mapped DMA map with a uio.
381 */
382 int
383 cia_bus_dmamap_load_uio_direct(t, map, uio, flags)
384 bus_dma_tag_t t;
385 bus_dmamap_t map;
386 struct uio *uio;
387 int flags;
388 {
389
390 return (_bus_dmamap_load_uio_direct_common(t, map, uio,
391 flags, CIA_DIRECT_MAPPED_BASE));
392 }
393
394 /*
395 * Load a CIA SGMAP-mapped DMA map with a uio.
396 */
397 int
398 cia_bus_dmamap_load_uio_sgmap(t, map, uio, flags)
399 bus_dma_tag_t t;
400 bus_dmamap_t map;
401 struct uio *uio;
402 int flags;
403 {
404 struct cia_config *ccp = t->_cookie;
405 int error;
406
407 error = pci_sgmap_pte64_load_uio(t, map, uio, flags, &ccp->cc_sgmap);
408 if (error == 0)
409 CIA_TLB_INVALIDATE();
410
411 return (error);
412 }
413
414 /*
415 * Load a CIA direct-mapped DMA map with raw memory.
416 */
417 int
418 cia_bus_dmamap_load_raw_direct(t, map, segs, nsegs, size, flags)
419 bus_dma_tag_t t;
420 bus_dmamap_t map;
421 bus_dma_segment_t *segs;
422 int nsegs;
423 bus_size_t size;
424 int flags;
425 {
426
427 return (_bus_dmamap_load_raw_direct_common(t, map, segs, nsegs,
428 size, flags, CIA_DIRECT_MAPPED_BASE));
429 }
430
431 /*
432 * Load a CIA SGMAP-mapped DMA map with raw memory.
433 */
434 int
435 cia_bus_dmamap_load_raw_sgmap(t, map, segs, nsegs, size, flags)
436 bus_dma_tag_t t;
437 bus_dmamap_t map;
438 bus_dma_segment_t *segs;
439 int nsegs;
440 bus_size_t size;
441 int flags;
442 {
443 struct cia_config *ccp = t->_cookie;
444 int error;
445
446 error = pci_sgmap_pte64_load_raw(t, map, segs, nsegs, size, flags,
447 &ccp->cc_sgmap);
448 if (error == 0)
449 CIA_TLB_INVALIDATE();
450
451 return (error);
452 }
453
454 /*
455 * Unload a CIA DMA map.
456 */
457 void
458 cia_bus_dmamap_unload_sgmap(t, map)
459 bus_dma_tag_t t;
460 bus_dmamap_t map;
461 {
462 struct cia_config *ccp = t->_cookie;
463
464 /*
465 * Invalidate any SGMAP page table entries used by this
466 * mapping.
467 */
468 pci_sgmap_pte64_unload(t, map, &ccp->cc_sgmap);
469 CIA_TLB_INVALIDATE();
470
471 /*
472 * Do the generic bits of the unload.
473 */
474 _bus_dmamap_unload(t, map);
475 }
476