cia_dma.c revision 1.4 1 /* $NetBSD: cia_dma.c,v 1.4 1998/01/17 03:43:59 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 1997 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 #include <sys/cdefs.h> /* RCS ID & Copyright macro defns */
41
42 __KERNEL_RCSID(0, "$NetBSD: cia_dma.c,v 1.4 1998/01/17 03:43:59 thorpej Exp $");
43
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/kernel.h>
47 #include <sys/device.h>
48 #include <sys/malloc.h>
49 #include <vm/vm.h>
50
51 #define _ALPHA_BUS_DMA_PRIVATE
52 #include <machine/bus.h>
53
54 #include <dev/pci/pcireg.h>
55 #include <dev/pci/pcivar.h>
56 #include <alpha/pci/ciareg.h>
57 #include <alpha/pci/ciavar.h>
58
59 bus_dma_tag_t cia_dma_get_tag __P((bus_dma_tag_t, alpha_bus_t));
60
61 int cia_bus_dmamap_create_sgmap __P((bus_dma_tag_t, bus_size_t, int,
62 bus_size_t, bus_size_t, int, bus_dmamap_t *));
63
64 void cia_bus_dmamap_destroy_sgmap __P((bus_dma_tag_t, bus_dmamap_t));
65
66 int cia_bus_dmamap_load_direct __P((bus_dma_tag_t, bus_dmamap_t, void *,
67 bus_size_t, struct proc *, int));
68 int cia_bus_dmamap_load_sgmap __P((bus_dma_tag_t, bus_dmamap_t, void *,
69 bus_size_t, struct proc *, int));
70
71 int cia_bus_dmamap_load_mbuf_direct __P((bus_dma_tag_t, bus_dmamap_t,
72 struct mbuf *, int));
73 int cia_bus_dmamap_load_mbuf_sgmap __P((bus_dma_tag_t, bus_dmamap_t,
74 struct mbuf *, int));
75
76 int cia_bus_dmamap_load_uio_direct __P((bus_dma_tag_t, bus_dmamap_t,
77 struct uio *, int));
78 int cia_bus_dmamap_load_uio_sgmap __P((bus_dma_tag_t, bus_dmamap_t,
79 struct uio *, int));
80
81 int cia_bus_dmamap_load_raw_direct __P((bus_dma_tag_t, bus_dmamap_t,
82 bus_dma_segment_t *, int, bus_size_t, int));
83 int cia_bus_dmamap_load_raw_sgmap __P((bus_dma_tag_t, bus_dmamap_t,
84 bus_dma_segment_t *, int, bus_size_t, int));
85
86 void cia_bus_dmamap_unload_sgmap __P((bus_dma_tag_t, bus_dmamap_t));
87
88 /*
89 * The direct-mapped DMA window begins at this PCI address.
90 */
91 #define CIA_DIRECT_MAPPED_BASE 0x40000000
92
93 /*
94 * The 8M SGMAP-mapped DMA window begins at this PCI address.
95 */
96 #define CIA_SGMAP_MAPPED_BASE (8*1024*1024)
97
98 /*
99 * Macro to flush CIA scatter/gather TLB.
100 */
101 #define CIA_TLB_INVALIDATE() \
102 do { \
103 alpha_mb(); \
104 REGVAL(CIA_PCI_TBIA) = CIA_PCI_TBIA_ALL; \
105 alpha_mb(); \
106 } while (0)
107
108 void
109 cia_dma_init(ccp)
110 struct cia_config *ccp;
111 {
112 bus_addr_t tbase;
113 bus_dma_tag_t t;
114
115 /*
116 * Initialize the DMA tag used for direct-mapped DMA.
117 */
118 t = &ccp->cc_dmat_direct;
119 t->_cookie = ccp;
120 t->_get_tag = cia_dma_get_tag;
121 t->_dmamap_create = _bus_dmamap_create;
122 t->_dmamap_destroy = _bus_dmamap_destroy;
123 t->_dmamap_load = cia_bus_dmamap_load_direct;
124 t->_dmamap_load_mbuf = cia_bus_dmamap_load_mbuf_direct;
125 t->_dmamap_load_uio = cia_bus_dmamap_load_uio_direct;
126 t->_dmamap_load_raw = cia_bus_dmamap_load_raw_direct;
127 t->_dmamap_unload = _bus_dmamap_unload;
128 t->_dmamap_sync = NULL; /* Nothing to do. */
129
130 t->_dmamem_alloc = _bus_dmamem_alloc;
131 t->_dmamem_free = _bus_dmamem_free;
132 t->_dmamem_map = _bus_dmamem_map;
133 t->_dmamem_unmap = _bus_dmamem_unmap;
134 t->_dmamem_mmap = _bus_dmamem_mmap;
135
136 /*
137 * Initialize the DMA tag used for sgmap-mapped DMA.
138 */
139 t = &ccp->cc_dmat_sgmap;
140 t->_cookie = ccp;
141 t->_get_tag = cia_dma_get_tag;
142 t->_dmamap_create = cia_bus_dmamap_create_sgmap;
143 t->_dmamap_destroy = cia_bus_dmamap_destroy_sgmap;
144 t->_dmamap_load = cia_bus_dmamap_load_sgmap;
145 t->_dmamap_load_mbuf = cia_bus_dmamap_load_mbuf_sgmap;
146 t->_dmamap_load_uio = cia_bus_dmamap_load_uio_sgmap;
147 t->_dmamap_load_raw = cia_bus_dmamap_load_raw_sgmap;
148 t->_dmamap_unload = cia_bus_dmamap_unload_sgmap;
149 t->_dmamap_sync = NULL; /* Nothing to do. */
150
151 t->_dmamem_alloc = _bus_dmamem_alloc;
152 t->_dmamem_free = _bus_dmamem_free;
153 t->_dmamem_map = _bus_dmamem_map;
154 t->_dmamem_unmap = _bus_dmamem_unmap;
155 t->_dmamem_mmap = _bus_dmamem_mmap;
156
157 /*
158 * The firmware has set up window 1 as a 1G direct-mapped DMA
159 * window beginning at 1G. We leave it alone. Leave window
160 * 0 alone until we reconfigure it for SGMAP-mapped DMA.
161 * Windows 2 and 3 are already disabled.
162 */
163
164 /*
165 * Initialize the SGMAP if safe to do so. Must align page
166 * table to 32k (hardware bug?).
167 */
168 if (ccp->cc_mallocsafe) {
169 alpha_sgmap_init(t, &ccp->cc_sgmap, "cia_sgmap",
170 CIA_SGMAP_MAPPED_BASE, 0, (8*1024*1024),
171 sizeof(u_int64_t), NULL, (32*1024*1024));
172
173 /*
174 * Set up window 0 as an 8MB SGMAP-mapped window
175 * starting at 8MB.
176 */
177 REGVAL(CIA_PCI_W0BASE) = CIA_SGMAP_MAPPED_BASE |
178 CIA_PCI_WnBASE_SG_EN | CIA_PCI_WnBASE_W_EN;
179 alpha_mb();
180
181 REGVAL(CIA_PCI_W0MASK) = CIA_PCI_WnMASK_8M;
182 alpha_mb();
183
184 tbase = ccp->cc_sgmap.aps_ptpa >> CIA_PCI_TnBASE_SHIFT;
185 if ((tbase & CIA_PCI_TnBASE_MASK) != tbase)
186 panic("cia_dma_init: bad page table address");
187 REGVAL(CIA_PCI_T0BASE) = tbase;
188 alpha_mb();
189
190 CIA_TLB_INVALIDATE();
191 }
192
193 /* XXX XXX BEGIN XXX XXX */
194 { /* XXX */
195 extern vm_offset_t alpha_XXX_dmamap_or; /* XXX */
196 alpha_XXX_dmamap_or = CIA_DIRECT_MAPPED_BASE; /* XXX */
197 } /* XXX */
198 /* XXX XXX END XXX XXX */
199 }
200
201 /*
202 * Return the bus dma tag to be used for the specified bus type.
203 * INTERNAL USE ONLY!
204 */
205 bus_dma_tag_t
206 cia_dma_get_tag(t, bustype)
207 bus_dma_tag_t t;
208 alpha_bus_t bustype;
209 {
210 struct cia_config *ccp = t->_cookie;
211
212 switch (bustype) {
213 case ALPHA_BUS_PCI:
214 case ALPHA_BUS_EISA:
215 /*
216 * Systems with a CIA can only support 1G
217 * of memory, so we use the direct-mapped window
218 * on busses that have 32-bit DMA.
219 */
220 return (&ccp->cc_dmat_direct);
221
222 case ALPHA_BUS_ISA:
223 /*
224 * ISA doesn't have enough address bits to use
225 * the direct-mapped DMA window, so we must use
226 * SGMAPs.
227 */
228 return (&ccp->cc_dmat_sgmap);
229
230 default:
231 panic("cia_dma_get_tag: shouldn't be here, really...");
232 }
233 }
234
235 /*
236 * Create a CIA SGMAP-mapped DMA map.
237 */
238 int
239 cia_bus_dmamap_create_sgmap(t, size, nsegments, maxsegsz, boundary,
240 flags, dmamp)
241 bus_dma_tag_t t;
242 bus_size_t size;
243 int nsegments;
244 bus_size_t maxsegsz;
245 bus_size_t boundary;
246 int flags;
247 bus_dmamap_t *dmamp;
248 {
249 struct cia_config *ccp = t->_cookie;
250 struct alpha_sgmap_cookie *a;
251 bus_dmamap_t map;
252 int error;
253
254 error = _bus_dmamap_create(t, size, nsegments, maxsegsz,
255 boundary, flags, dmamp);
256 if (error)
257 return (error);
258
259 map = *dmamp;
260
261 a = malloc(sizeof(struct alpha_sgmap_cookie), M_DEVBUF,
262 (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK);
263 if (a == NULL) {
264 _bus_dmamap_destroy(t, map);
265 return (ENOMEM);
266 }
267 bzero(a, sizeof(struct alpha_sgmap_cookie));
268 map->_dm_sgcookie = a;
269
270 if (flags & BUS_DMA_ALLOCNOW) {
271 error = alpha_sgmap_alloc(map, round_page(size),
272 &ccp->cc_sgmap, flags);
273 if (error)
274 cia_bus_dmamap_destroy_sgmap(t, map);
275 }
276
277 return (error);
278 }
279
280 /*
281 * Destroy a CIA SGMAP-mapped DMA map.
282 */
283 void
284 cia_bus_dmamap_destroy_sgmap(t, map)
285 bus_dma_tag_t t;
286 bus_dmamap_t map;
287 {
288 struct cia_config *ccp = t->_cookie;
289 struct alpha_sgmap_cookie *a = map->_dm_sgcookie;
290
291 if (a->apdc_flags & APDC_HAS_SGMAP)
292 alpha_sgmap_free(&ccp->cc_sgmap, a);
293
294 free(a, M_DEVBUF);
295 _bus_dmamap_destroy(t, map);
296 }
297
298 /*
299 * Load a CIA direct-mapped DMA map with a linear buffer.
300 */
301 int
302 cia_bus_dmamap_load_direct(t, map, buf, buflen, p, flags)
303 bus_dma_tag_t t;
304 bus_dmamap_t map;
305 void *buf;
306 bus_size_t buflen;
307 struct proc *p;
308 int flags;
309 {
310
311 return (_bus_dmamap_load_direct_common(t, map, buf, buflen, p,
312 flags, CIA_DIRECT_MAPPED_BASE));
313 }
314
315 /*
316 * Load a CIA SGMAP-mapped DMA map with a linear buffer.
317 */
318 int
319 cia_bus_dmamap_load_sgmap(t, map, buf, buflen, p, flags)
320 bus_dma_tag_t t;
321 bus_dmamap_t map;
322 void *buf;
323 bus_size_t buflen;
324 struct proc *p;
325 int flags;
326 {
327 struct cia_config *ccp = t->_cookie;
328 int error;
329
330 error = pci_sgmap_pte64_load(t, map, buf, buflen, p, flags,
331 &ccp->cc_sgmap);
332 if (error == 0)
333 CIA_TLB_INVALIDATE();
334
335 return (error);
336 }
337
338 /*
339 * Load a CIA direct-mapped DMA map with an mbuf chain.
340 */
341 int
342 cia_bus_dmamap_load_mbuf_direct(t, map, m, flags)
343 bus_dma_tag_t t;
344 bus_dmamap_t map;
345 struct mbuf *m;
346 int flags;
347 {
348
349 return (_bus_dmamap_load_mbuf_direct_common(t, map, m,
350 flags, CIA_DIRECT_MAPPED_BASE));
351 }
352
353 /*
354 * Load a CIA SGMAP-mapped DMA map with an mbuf chain.
355 */
356 int
357 cia_bus_dmamap_load_mbuf_sgmap(t, map, m, flags)
358 bus_dma_tag_t t;
359 bus_dmamap_t map;
360 struct mbuf *m;
361 int flags;
362 {
363 struct cia_config *ccp = t->_cookie;
364 int error;
365
366 error = pci_sgmap_pte64_load_mbuf(t, map, m, flags, &ccp->cc_sgmap);
367 if (error == 0)
368 CIA_TLB_INVALIDATE();
369
370 return (error);
371 }
372
373 /*
374 * Load a CIA direct-mapped DMA map with a uio.
375 */
376 int
377 cia_bus_dmamap_load_uio_direct(t, map, uio, flags)
378 bus_dma_tag_t t;
379 bus_dmamap_t map;
380 struct uio *uio;
381 int flags;
382 {
383
384 return (_bus_dmamap_load_uio_direct_common(t, map, uio,
385 flags, CIA_DIRECT_MAPPED_BASE));
386 }
387
388 /*
389 * Load a CIA SGMAP-mapped DMA map with a uio.
390 */
391 int
392 cia_bus_dmamap_load_uio_sgmap(t, map, uio, flags)
393 bus_dma_tag_t t;
394 bus_dmamap_t map;
395 struct uio *uio;
396 int flags;
397 {
398 struct cia_config *ccp = t->_cookie;
399 int error;
400
401 error = pci_sgmap_pte64_load_uio(t, map, uio, flags, &ccp->cc_sgmap);
402 if (error == 0)
403 CIA_TLB_INVALIDATE();
404
405 return (error);
406 }
407
408 /*
409 * Load a CIA direct-mapped DMA map with raw memory.
410 */
411 int
412 cia_bus_dmamap_load_raw_direct(t, map, segs, nsegs, size, flags)
413 bus_dma_tag_t t;
414 bus_dmamap_t map;
415 bus_dma_segment_t *segs;
416 int nsegs;
417 bus_size_t size;
418 int flags;
419 {
420
421 return (_bus_dmamap_load_raw_direct_common(t, map, segs, nsegs,
422 size, flags, CIA_DIRECT_MAPPED_BASE));
423 }
424
425 /*
426 * Load a CIA SGMAP-mapped DMA map with raw memory.
427 */
428 int
429 cia_bus_dmamap_load_raw_sgmap(t, map, segs, nsegs, size, flags)
430 bus_dma_tag_t t;
431 bus_dmamap_t map;
432 bus_dma_segment_t *segs;
433 int nsegs;
434 bus_size_t size;
435 int flags;
436 {
437 struct cia_config *ccp = t->_cookie;
438 int error;
439
440 error = pci_sgmap_pte64_load_raw(t, map, segs, nsegs, size, flags,
441 &ccp->cc_sgmap);
442 if (error == 0)
443 CIA_TLB_INVALIDATE();
444
445 return (error);
446 }
447
448 /*
449 * Unload a CIA DMA map.
450 */
451 void
452 cia_bus_dmamap_unload_sgmap(t, map)
453 bus_dma_tag_t t;
454 bus_dmamap_t map;
455 {
456 struct cia_config *ccp = t->_cookie;
457
458 /*
459 * Invalidate any SGMAP page table entries used by this
460 * mapping.
461 */
462 pci_sgmap_pte64_unload(t, map, &ccp->cc_sgmap);
463 CIA_TLB_INVALIDATE();
464
465 /*
466 * Do the generic bits of the unload.
467 */
468 _bus_dmamap_unload(t, map);
469 }
470