cia_dma.c revision 1.10 1 /* $NetBSD: cia_dma.c,v 1.10 1998/06/04 18:11:23 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 #include <sys/cdefs.h> /* RCS ID & Copyright macro defns */
41
42 __KERNEL_RCSID(0, "$NetBSD: cia_dma.c,v 1.10 1998/06/04 18:11:23 thorpej Exp $");
43
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/kernel.h>
47 #include <sys/device.h>
48 #include <sys/malloc.h>
49 #include <vm/vm.h>
50
51 #define _ALPHA_BUS_DMA_PRIVATE
52 #include <machine/bus.h>
53
54 #include <dev/pci/pcireg.h>
55 #include <dev/pci/pcivar.h>
56 #include <alpha/pci/ciareg.h>
57 #include <alpha/pci/ciavar.h>
58
59 bus_dma_tag_t cia_dma_get_tag __P((bus_dma_tag_t, alpha_bus_t));
60
61 int cia_bus_dmamap_create_sgmap __P((bus_dma_tag_t, bus_size_t, int,
62 bus_size_t, bus_size_t, int, bus_dmamap_t *));
63
64 void cia_bus_dmamap_destroy_sgmap __P((bus_dma_tag_t, bus_dmamap_t));
65
66 int cia_bus_dmamap_load_sgmap __P((bus_dma_tag_t, bus_dmamap_t, void *,
67 bus_size_t, struct proc *, int));
68
69 int cia_bus_dmamap_load_mbuf_sgmap __P((bus_dma_tag_t, bus_dmamap_t,
70 struct mbuf *, int));
71
72 int cia_bus_dmamap_load_uio_sgmap __P((bus_dma_tag_t, bus_dmamap_t,
73 struct uio *, int));
74
75 int cia_bus_dmamap_load_raw_sgmap __P((bus_dma_tag_t, bus_dmamap_t,
76 bus_dma_segment_t *, int, bus_size_t, int));
77
78 void cia_bus_dmamap_unload_sgmap __P((bus_dma_tag_t, bus_dmamap_t));
79
80 /*
81 * Direct-mapped window: 1G at 1G
82 */
83 #define CIA_DIRECT_MAPPED_BASE (1*1024*1024*1024)
84 #define CIA_DIRECT_MAPPED_SIZE (1*1024*1024*1024)
85
86 /*
87 * SGMAP window: 8M at 8M
88 */
89 #define CIA_SGMAP_MAPPED_BASE (8*1024*1024)
90 #define CIA_SGMAP_MAPPED_SIZE (8*1024*1024)
91
92 void cia_tlb_invalidate __P((void));
93 void cia_broken_pyxis_tlb_invalidate __P((void));
94
95 void (*cia_tlb_invalidate_fn) __P((void));
96
97 #define CIA_TLB_INVALIDATE() (*cia_tlb_invalidate_fn)()
98
99 struct alpha_sgmap cia_pyxis_bug_sgmap;
100 #define CIA_PYXIS_BUG_BASE (1*128*1024)
101 #define CIA_PYXIS_BUG_SIZE (2*1024*1024)
102
103 void
104 cia_dma_init(ccp)
105 struct cia_config *ccp;
106 {
107 bus_addr_t tbase;
108 bus_dma_tag_t t;
109
110 /*
111 * Initialize the DMA tag used for direct-mapped DMA.
112 */
113 t = &ccp->cc_dmat_direct;
114 t->_cookie = ccp;
115 t->_wbase = CIA_DIRECT_MAPPED_BASE;
116 t->_wsize = CIA_DIRECT_MAPPED_SIZE;
117 t->_next_window = NULL;
118 t->_boundary = 0;
119 t->_sgmap = NULL;
120 t->_get_tag = cia_dma_get_tag;
121 t->_dmamap_create = _bus_dmamap_create;
122 t->_dmamap_destroy = _bus_dmamap_destroy;
123 t->_dmamap_load = _bus_dmamap_load_direct;
124 t->_dmamap_load_mbuf = _bus_dmamap_load_mbuf_direct;
125 t->_dmamap_load_uio = _bus_dmamap_load_uio_direct;
126 t->_dmamap_load_raw = _bus_dmamap_load_raw_direct;
127 t->_dmamap_unload = _bus_dmamap_unload;
128 t->_dmamap_sync = _bus_dmamap_sync;
129
130 t->_dmamem_alloc = _bus_dmamem_alloc;
131 t->_dmamem_free = _bus_dmamem_free;
132 t->_dmamem_map = _bus_dmamem_map;
133 t->_dmamem_unmap = _bus_dmamem_unmap;
134 t->_dmamem_mmap = _bus_dmamem_mmap;
135
136 /*
137 * Initialize the DMA tag used for sgmap-mapped DMA.
138 */
139 t = &ccp->cc_dmat_sgmap;
140 t->_cookie = ccp;
141 t->_wbase = CIA_SGMAP_MAPPED_BASE;
142 t->_wsize = CIA_SGMAP_MAPPED_SIZE;
143 t->_next_window = NULL;
144 t->_boundary = 0;
145 t->_sgmap = &ccp->cc_sgmap;
146 t->_get_tag = cia_dma_get_tag;
147 t->_dmamap_create = cia_bus_dmamap_create_sgmap;
148 t->_dmamap_destroy = cia_bus_dmamap_destroy_sgmap;
149 t->_dmamap_load = cia_bus_dmamap_load_sgmap;
150 t->_dmamap_load_mbuf = cia_bus_dmamap_load_mbuf_sgmap;
151 t->_dmamap_load_uio = cia_bus_dmamap_load_uio_sgmap;
152 t->_dmamap_load_raw = cia_bus_dmamap_load_raw_sgmap;
153 t->_dmamap_unload = cia_bus_dmamap_unload_sgmap;
154 t->_dmamap_sync = _bus_dmamap_sync;
155
156 t->_dmamem_alloc = _bus_dmamem_alloc;
157 t->_dmamem_free = _bus_dmamem_free;
158 t->_dmamem_map = _bus_dmamem_map;
159 t->_dmamem_unmap = _bus_dmamem_unmap;
160 t->_dmamem_mmap = _bus_dmamem_mmap;
161
162 /*
163 * The firmware has set up window 1 as a 1G direct-mapped DMA
164 * window beginning at 1G. We leave it alone. Leave window
165 * 0 alone until we reconfigure it for SGMAP-mapped DMA.
166 * Windows 2 and 3 are already disabled.
167 */
168
169 /*
170 * Initialize the SGMAP if safe to do so. Must align page
171 * table to 32k (hardware bug?).
172 */
173 if (ccp->cc_mallocsafe) {
174 alpha_sgmap_init(t, &ccp->cc_sgmap, "cia_sgmap",
175 CIA_SGMAP_MAPPED_BASE, 0, CIA_SGMAP_MAPPED_SIZE,
176 sizeof(u_int64_t), NULL, (32*1024*1024));
177
178 /*
179 * Set up window 0 as an 8MB SGMAP-mapped window
180 * starting at 8MB.
181 */
182 REGVAL(CIA_PCI_W0BASE) = CIA_SGMAP_MAPPED_BASE |
183 CIA_PCI_WnBASE_SG_EN | CIA_PCI_WnBASE_W_EN;
184 alpha_mb();
185
186 REGVAL(CIA_PCI_W0MASK) = CIA_PCI_WnMASK_8M;
187 alpha_mb();
188
189 tbase = ccp->cc_sgmap.aps_ptpa >> CIA_PCI_TnBASE_SHIFT;
190 if ((tbase & CIA_PCI_TnBASE_MASK) != tbase)
191 panic("cia_dma_init: bad page table address");
192 REGVAL(CIA_PCI_T0BASE) = tbase;
193 alpha_mb();
194
195 /*
196 * Pass 1 and 2 (i.e. revision <= 1) of the Pyxis have a
197 * broken scatter/gather TLB; it cannot be invalidated. To
198 * work around this problem, we configure window 2 as an SG
199 * 2M window at 128M, which we use in DMA loopback mode to
200 * read a spill page. This works by causing TLB misses,
201 * causing the old entries to be purged to make room for
202 * the new entries coming in for the spill page.
203 */
204 if ((ccp->cc_flags & CCF_ISPYXIS) != 0 && ccp->cc_rev <= 1) {
205 u_int64_t *page_table;
206 int i;
207
208 cia_tlb_invalidate_fn =
209 cia_broken_pyxis_tlb_invalidate;
210
211 alpha_sgmap_init(t, &cia_pyxis_bug_sgmap,
212 "pyxis_bug_sgmap", CIA_PYXIS_BUG_BASE, 0,
213 CIA_PYXIS_BUG_SIZE, sizeof(u_int64_t), NULL,
214 (32*1024*1024));
215
216 REGVAL(CIA_PCI_W2BASE) = CIA_PYXIS_BUG_BASE |
217 CIA_PCI_WnBASE_SG_EN | CIA_PCI_WnBASE_W_EN;
218 alpha_mb();
219
220 REGVAL(CIA_PCI_W2MASK) = CIA_PCI_WnMASK_2M;
221 alpha_mb();
222
223 tbase = cia_pyxis_bug_sgmap.aps_ptpa >>
224 CIA_PCI_TnBASE_SHIFT;
225 if ((tbase & CIA_PCI_TnBASE_MASK) != tbase)
226 panic("cia_dma_init: bad page table address");
227 REGVAL(CIA_PCI_T2BASE) = tbase;
228 alpha_mb();
229
230 /*
231 * Initialize the page table to point at the spill
232 * page. Leave the last entry invalid.
233 */
234 pci_sgmap_pte64_init_spill_page_pte();
235 for (i = 0, page_table = cia_pyxis_bug_sgmap.aps_pt;
236 i < (CIA_PYXIS_BUG_SIZE / PAGE_SIZE) - 1; i++) {
237 page_table[i] =
238 pci_sgmap_pte64_prefetch_spill_page_pte;
239 }
240 alpha_mb();
241 } else
242 cia_tlb_invalidate_fn = cia_tlb_invalidate;
243
244 CIA_TLB_INVALIDATE();
245 }
246
247 /* XXX XXX BEGIN XXX XXX */
248 { /* XXX */
249 extern vm_offset_t alpha_XXX_dmamap_or; /* XXX */
250 alpha_XXX_dmamap_or = CIA_DIRECT_MAPPED_BASE; /* XXX */
251 } /* XXX */
252 /* XXX XXX END XXX XXX */
253 }
254
255 /*
256 * Return the bus dma tag to be used for the specified bus type.
257 * INTERNAL USE ONLY!
258 */
259 bus_dma_tag_t
260 cia_dma_get_tag(t, bustype)
261 bus_dma_tag_t t;
262 alpha_bus_t bustype;
263 {
264 struct cia_config *ccp = t->_cookie;
265
266 switch (bustype) {
267 case ALPHA_BUS_PCI:
268 case ALPHA_BUS_EISA:
269 /*
270 * Systems with a CIA can only support 1G
271 * of memory, so we use the direct-mapped window
272 * on busses that have 32-bit DMA.
273 */
274 return (&ccp->cc_dmat_direct);
275
276 case ALPHA_BUS_ISA:
277 /*
278 * ISA doesn't have enough address bits to use
279 * the direct-mapped DMA window, so we must use
280 * SGMAPs.
281 */
282 return (&ccp->cc_dmat_sgmap);
283
284 default:
285 panic("cia_dma_get_tag: shouldn't be here, really...");
286 }
287 }
288
289 /*
290 * Create a CIA SGMAP-mapped DMA map.
291 */
292 int
293 cia_bus_dmamap_create_sgmap(t, size, nsegments, maxsegsz, boundary,
294 flags, dmamp)
295 bus_dma_tag_t t;
296 bus_size_t size;
297 int nsegments;
298 bus_size_t maxsegsz;
299 bus_size_t boundary;
300 int flags;
301 bus_dmamap_t *dmamp;
302 {
303 bus_dmamap_t map;
304 int error;
305
306 error = _bus_dmamap_create(t, size, nsegments, maxsegsz,
307 boundary, flags, dmamp);
308 if (error)
309 return (error);
310
311 map = *dmamp;
312
313 if (flags & BUS_DMA_ALLOCNOW) {
314 error = alpha_sgmap_alloc(map, round_page(size),
315 t->_sgmap, flags);
316 if (error)
317 cia_bus_dmamap_destroy_sgmap(t, map);
318 }
319
320 return (error);
321 }
322
323 /*
324 * Destroy a CIA SGMAP-mapped DMA map.
325 */
326 void
327 cia_bus_dmamap_destroy_sgmap(t, map)
328 bus_dma_tag_t t;
329 bus_dmamap_t map;
330 {
331
332 if (map->_dm_flags & DMAMAP_HAS_SGMAP)
333 alpha_sgmap_free(map, t->_sgmap);
334
335 _bus_dmamap_destroy(t, map);
336 }
337
338 /*
339 * Load a CIA SGMAP-mapped DMA map with a linear buffer.
340 */
341 int
342 cia_bus_dmamap_load_sgmap(t, map, buf, buflen, p, flags)
343 bus_dma_tag_t t;
344 bus_dmamap_t map;
345 void *buf;
346 bus_size_t buflen;
347 struct proc *p;
348 int flags;
349 {
350 int error;
351
352 error = pci_sgmap_pte64_load(t, map, buf, buflen, p, flags,
353 t->_sgmap);
354 if (error == 0)
355 CIA_TLB_INVALIDATE();
356
357 return (error);
358 }
359
360 /*
361 * Load a CIA SGMAP-mapped DMA map with an mbuf chain.
362 */
363 int
364 cia_bus_dmamap_load_mbuf_sgmap(t, map, m, flags)
365 bus_dma_tag_t t;
366 bus_dmamap_t map;
367 struct mbuf *m;
368 int flags;
369 {
370 int error;
371
372 error = pci_sgmap_pte64_load_mbuf(t, map, m, flags, t->_sgmap);
373 if (error == 0)
374 CIA_TLB_INVALIDATE();
375
376 return (error);
377 }
378
379 /*
380 * Load a CIA SGMAP-mapped DMA map with a uio.
381 */
382 int
383 cia_bus_dmamap_load_uio_sgmap(t, map, uio, flags)
384 bus_dma_tag_t t;
385 bus_dmamap_t map;
386 struct uio *uio;
387 int flags;
388 {
389 int error;
390
391 error = pci_sgmap_pte64_load_uio(t, map, uio, flags, t->_sgmap);
392 if (error == 0)
393 CIA_TLB_INVALIDATE();
394
395 return (error);
396 }
397
398 /*
399 * Load a CIA SGMAP-mapped DMA map with raw memory.
400 */
401 int
402 cia_bus_dmamap_load_raw_sgmap(t, map, segs, nsegs, size, flags)
403 bus_dma_tag_t t;
404 bus_dmamap_t map;
405 bus_dma_segment_t *segs;
406 int nsegs;
407 bus_size_t size;
408 int flags;
409 {
410 int error;
411
412 error = pci_sgmap_pte64_load_raw(t, map, segs, nsegs, size, flags,
413 t->_sgmap);
414 if (error == 0)
415 CIA_TLB_INVALIDATE();
416
417 return (error);
418 }
419
420 /*
421 * Unload a CIA DMA map.
422 */
423 void
424 cia_bus_dmamap_unload_sgmap(t, map)
425 bus_dma_tag_t t;
426 bus_dmamap_t map;
427 {
428
429 /*
430 * Invalidate any SGMAP page table entries used by this
431 * mapping.
432 */
433 pci_sgmap_pte64_unload(t, map, t->_sgmap);
434 CIA_TLB_INVALIDATE();
435
436 /*
437 * Do the generic bits of the unload.
438 */
439 _bus_dmamap_unload(t, map);
440 }
441
442 /*
443 * Flush the CIA scatter/gather TLB.
444 */
445 void
446 cia_tlb_invalidate()
447 {
448
449 alpha_mb();
450 REGVAL(CIA_PCI_TBIA) = CIA_PCI_TBIA_ALL;
451 alpha_mb();
452 }
453
454 /*
455 * Flush the scatter/gather TLB on broken Pyxis chips.
456 */
457 void
458 cia_broken_pyxis_tlb_invalidate()
459 {
460 volatile u_int64_t dummy;
461 u_int32_t ctrl;
462 int i, s;
463
464 s = splhigh();
465
466 /*
467 * Put the Pyxis into PCI loopback mode.
468 */
469 alpha_mb();
470 ctrl = REGVAL(CIA_CSR_CTRL);
471 REGVAL(CIA_CSR_CTRL) = ctrl | CTRL_PCI_LOOP_EN;
472 alpha_mb();
473
474 /*
475 * Now, read from PCI dense memory space at offset 128M (our
476 * target window base), skipping 64k on each read. This forces
477 * S/G TLB misses.
478 *
479 * XXX Looks like the TLB entries are `not quite LRU'. We need
480 * XXX to read more times than there are actual tags!
481 */
482 for (i = 0; i < CIA_TLB_NTAGS + 4; i++) {
483 dummy = *((volatile u_int64_t *)
484 ALPHA_PHYS_TO_K0SEG(CIA_PCI_DENSE + CIA_PYXIS_BUG_BASE +
485 (i * 65536)));
486 }
487
488 /*
489 * Restore normal PCI operation.
490 */
491 alpha_mb();
492 REGVAL(CIA_CSR_CTRL) = ctrl;
493 alpha_mb();
494
495 splx(s);
496 }
497