cia_dma.c revision 1.17 1 /* $NetBSD: cia_dma.c,v 1.17 2001/01/03 19:16:00 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 #include <sys/cdefs.h> /* RCS ID & Copyright macro defns */
41
42 __KERNEL_RCSID(0, "$NetBSD: cia_dma.c,v 1.17 2001/01/03 19:16:00 thorpej Exp $");
43
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/kernel.h>
47 #include <sys/device.h>
48 #include <sys/malloc.h>
49
50 #include <uvm/uvm_extern.h>
51
52 #define _ALPHA_BUS_DMA_PRIVATE
53 #include <machine/bus.h>
54
55 #include <dev/pci/pcireg.h>
56 #include <dev/pci/pcivar.h>
57 #include <alpha/pci/ciareg.h>
58 #include <alpha/pci/ciavar.h>
59
60 bus_dma_tag_t cia_dma_get_tag __P((bus_dma_tag_t, alpha_bus_t));
61
62 int cia_bus_dmamap_create_direct __P((bus_dma_tag_t, bus_size_t, int,
63 bus_size_t, bus_size_t, int, bus_dmamap_t *));
64
65 int cia_bus_dmamap_load_sgmap __P((bus_dma_tag_t, bus_dmamap_t, void *,
66 bus_size_t, struct proc *, int));
67
68 int cia_bus_dmamap_load_mbuf_sgmap __P((bus_dma_tag_t, bus_dmamap_t,
69 struct mbuf *, int));
70
71 int cia_bus_dmamap_load_uio_sgmap __P((bus_dma_tag_t, bus_dmamap_t,
72 struct uio *, int));
73
74 int cia_bus_dmamap_load_raw_sgmap __P((bus_dma_tag_t, bus_dmamap_t,
75 bus_dma_segment_t *, int, bus_size_t, int));
76
77 void cia_bus_dmamap_unload_sgmap __P((bus_dma_tag_t, bus_dmamap_t));
78
79 /*
80 * Direct-mapped window: 1G at 1G
81 */
82 #define CIA_DIRECT_MAPPED_BASE (1*1024*1024*1024)
83 #define CIA_DIRECT_MAPPED_SIZE (1*1024*1024*1024)
84
85 /*
86 * SGMAP window: 8M at 8M
87 */
88 #define CIA_SGMAP_MAPPED_BASE (8*1024*1024)
89 #define CIA_SGMAP_MAPPED_SIZE (8*1024*1024)
90
91 void cia_tlb_invalidate __P((void));
92 void cia_broken_pyxis_tlb_invalidate __P((void));
93
94 void (*cia_tlb_invalidate_fn) __P((void));
95
96 #define CIA_TLB_INVALIDATE() (*cia_tlb_invalidate_fn)()
97
98 struct alpha_sgmap cia_pyxis_bug_sgmap;
99 #define CIA_PYXIS_BUG_BASE (128*1024*1024)
100 #define CIA_PYXIS_BUG_SIZE (2*1024*1024)
101
102 void
103 cia_dma_init(ccp)
104 struct cia_config *ccp;
105 {
106 bus_addr_t tbase;
107 bus_dma_tag_t t;
108
109 /*
110 * Initialize the DMA tag used for direct-mapped DMA.
111 */
112 t = &ccp->cc_dmat_direct;
113 t->_cookie = ccp;
114 t->_wbase = CIA_DIRECT_MAPPED_BASE;
115 t->_wsize = CIA_DIRECT_MAPPED_SIZE;
116 t->_next_window = NULL;
117 t->_boundary = 0;
118 t->_sgmap = NULL;
119 t->_get_tag = cia_dma_get_tag;
120 t->_dmamap_create = cia_bus_dmamap_create_direct;
121 t->_dmamap_destroy = _bus_dmamap_destroy;
122 t->_dmamap_load = _bus_dmamap_load_direct;
123 t->_dmamap_load_mbuf = _bus_dmamap_load_mbuf_direct;
124 t->_dmamap_load_uio = _bus_dmamap_load_uio_direct;
125 t->_dmamap_load_raw = _bus_dmamap_load_raw_direct;
126 t->_dmamap_unload = _bus_dmamap_unload;
127 t->_dmamap_sync = _bus_dmamap_sync;
128
129 t->_dmamem_alloc = _bus_dmamem_alloc;
130 t->_dmamem_free = _bus_dmamem_free;
131 t->_dmamem_map = _bus_dmamem_map;
132 t->_dmamem_unmap = _bus_dmamem_unmap;
133 t->_dmamem_mmap = _bus_dmamem_mmap;
134
135 /*
136 * Initialize the DMA tag used for sgmap-mapped DMA.
137 */
138 t = &ccp->cc_dmat_sgmap;
139 t->_cookie = ccp;
140 t->_wbase = CIA_SGMAP_MAPPED_BASE;
141 t->_wsize = CIA_SGMAP_MAPPED_SIZE;
142 t->_next_window = NULL;
143 t->_boundary = 0;
144 t->_sgmap = &ccp->cc_sgmap;
145 t->_get_tag = cia_dma_get_tag;
146 t->_dmamap_create = alpha_sgmap_dmamap_create;
147 t->_dmamap_destroy = alpha_sgmap_dmamap_destroy;
148 t->_dmamap_load = cia_bus_dmamap_load_sgmap;
149 t->_dmamap_load_mbuf = cia_bus_dmamap_load_mbuf_sgmap;
150 t->_dmamap_load_uio = cia_bus_dmamap_load_uio_sgmap;
151 t->_dmamap_load_raw = cia_bus_dmamap_load_raw_sgmap;
152 t->_dmamap_unload = cia_bus_dmamap_unload_sgmap;
153 t->_dmamap_sync = _bus_dmamap_sync;
154
155 t->_dmamem_alloc = _bus_dmamem_alloc;
156 t->_dmamem_free = _bus_dmamem_free;
157 t->_dmamem_map = _bus_dmamem_map;
158 t->_dmamem_unmap = _bus_dmamem_unmap;
159 t->_dmamem_mmap = _bus_dmamem_mmap;
160
161 /*
162 * The firmware has set up window 1 as a 1G direct-mapped DMA
163 * window beginning at 1G. We leave it alone. Leave window
164 * 0 alone until we reconfigure it for SGMAP-mapped DMA.
165 * Windows 2 and 3 are already disabled.
166 */
167
168 /*
169 * Initialize the SGMAP. Must align page table to 32k
170 * (hardware bug?).
171 */
172 alpha_sgmap_init(t, &ccp->cc_sgmap, "cia_sgmap",
173 CIA_SGMAP_MAPPED_BASE, 0, CIA_SGMAP_MAPPED_SIZE,
174 sizeof(u_int64_t), NULL, (32*1024));
175
176 /*
177 * Set up window 0 as an 8MB SGMAP-mapped window
178 * starting at 8MB.
179 */
180 REGVAL(CIA_PCI_W0BASE) = CIA_SGMAP_MAPPED_BASE |
181 CIA_PCI_WnBASE_SG_EN | CIA_PCI_WnBASE_W_EN;
182 alpha_mb();
183
184 REGVAL(CIA_PCI_W0MASK) = CIA_PCI_WnMASK_8M;
185 alpha_mb();
186
187 tbase = ccp->cc_sgmap.aps_ptpa >> CIA_PCI_TnBASE_SHIFT;
188 if ((tbase & CIA_PCI_TnBASE_MASK) != tbase)
189 panic("cia_dma_init: bad page table address");
190 REGVAL(CIA_PCI_T0BASE) = tbase;
191 alpha_mb();
192
193 /*
194 * Pass 1 and 2 (i.e. revision <= 1) of the Pyxis have a
195 * broken scatter/gather TLB; it cannot be invalidated. To
196 * work around this problem, we configure window 2 as an SG
197 * 2M window at 128M, which we use in DMA loopback mode to
198 * read a spill page. This works by causing TLB misses,
199 * causing the old entries to be purged to make room for
200 * the new entries coming in for the spill page.
201 */
202 if ((ccp->cc_flags & CCF_ISPYXIS) != 0 && ccp->cc_rev <= 1) {
203 u_int64_t *page_table;
204 int i;
205
206 cia_tlb_invalidate_fn =
207 cia_broken_pyxis_tlb_invalidate;
208
209 alpha_sgmap_init(t, &cia_pyxis_bug_sgmap,
210 "pyxis_bug_sgmap", CIA_PYXIS_BUG_BASE, 0,
211 CIA_PYXIS_BUG_SIZE, sizeof(u_int64_t), NULL,
212 (32*1024));
213
214 REGVAL(CIA_PCI_W2BASE) = CIA_PYXIS_BUG_BASE |
215 CIA_PCI_WnBASE_SG_EN | CIA_PCI_WnBASE_W_EN;
216 alpha_mb();
217
218 REGVAL(CIA_PCI_W2MASK) = CIA_PCI_WnMASK_2M;
219 alpha_mb();
220
221 tbase = cia_pyxis_bug_sgmap.aps_ptpa >>
222 CIA_PCI_TnBASE_SHIFT;
223 if ((tbase & CIA_PCI_TnBASE_MASK) != tbase)
224 panic("cia_dma_init: bad page table address");
225 REGVAL(CIA_PCI_T2BASE) = tbase;
226 alpha_mb();
227
228 /*
229 * Initialize the page table to point at the spill
230 * page. Leave the last entry invalid.
231 */
232 pci_sgmap_pte64_init_spill_page_pte();
233 for (i = 0, page_table = cia_pyxis_bug_sgmap.aps_pt;
234 i < (CIA_PYXIS_BUG_SIZE / PAGE_SIZE) - 1; i++) {
235 page_table[i] =
236 pci_sgmap_pte64_prefetch_spill_page_pte;
237 }
238 alpha_mb();
239 } else
240 cia_tlb_invalidate_fn = cia_tlb_invalidate;
241
242 CIA_TLB_INVALIDATE();
243
244 /* XXX XXX BEGIN XXX XXX */
245 { /* XXX */
246 extern paddr_t alpha_XXX_dmamap_or; /* XXX */
247 alpha_XXX_dmamap_or = CIA_DIRECT_MAPPED_BASE; /* XXX */
248 } /* XXX */
249 /* XXX XXX END XXX XXX */
250 }
251
252 /*
253 * Return the bus dma tag to be used for the specified bus type.
254 * INTERNAL USE ONLY!
255 */
256 bus_dma_tag_t
257 cia_dma_get_tag(t, bustype)
258 bus_dma_tag_t t;
259 alpha_bus_t bustype;
260 {
261 struct cia_config *ccp = t->_cookie;
262
263 switch (bustype) {
264 case ALPHA_BUS_PCI:
265 case ALPHA_BUS_EISA:
266 /*
267 * Systems with a CIA can only support 1G
268 * of memory, so we use the direct-mapped window
269 * on busses that have 32-bit DMA.
270 */
271 return (&ccp->cc_dmat_direct);
272
273 case ALPHA_BUS_ISA:
274 /*
275 * ISA doesn't have enough address bits to use
276 * the direct-mapped DMA window, so we must use
277 * SGMAPs.
278 */
279 return (&ccp->cc_dmat_sgmap);
280
281 default:
282 panic("cia_dma_get_tag: shouldn't be here, really...");
283 }
284 }
285
286 /*
287 * Create a CIA direct-mapped DMA map.
288 */
289 int
290 cia_bus_dmamap_create_direct(t, size, nsegments, maxsegsz, boundary,
291 flags, dmamp)
292 bus_dma_tag_t t;
293 bus_size_t size;
294 int nsegments;
295 bus_size_t maxsegsz;
296 bus_size_t boundary;
297 int flags;
298 bus_dmamap_t *dmamp;
299 {
300 struct cia_config *ccp = t->_cookie;
301 bus_dmamap_t map;
302 int error;
303
304 error = _bus_dmamap_create(t, size, nsegments, maxsegsz,
305 boundary, flags, dmamp);
306 if (error)
307 return (error);
308
309 map = *dmamp;
310
311 if ((ccp->cc_flags & CCF_PYXISBUG) != 0 &&
312 map->_dm_segcnt > 1) {
313 /*
314 * We have a Pyxis with the DMA page crossing bug, make
315 * sure we don't coalesce adjacent DMA segments.
316 *
317 * NOTE: We can only do this if the max segment count
318 * is greater than 1. This is because many network
319 * drivers allocate large contiguous blocks of memory
320 * for control data structures, even though they won't
321 * do any single DMA that crosses a page coundary.
322 * -- thorpej (at) netbsd.org, 2/5/2000
323 */
324 map->_dm_flags |= DMAMAP_NO_COALESCE;
325 }
326
327 return (0);
328 }
329
330 /*
331 * Load a CIA SGMAP-mapped DMA map with a linear buffer.
332 */
333 int
334 cia_bus_dmamap_load_sgmap(t, map, buf, buflen, p, flags)
335 bus_dma_tag_t t;
336 bus_dmamap_t map;
337 void *buf;
338 bus_size_t buflen;
339 struct proc *p;
340 int flags;
341 {
342 int error;
343
344 error = pci_sgmap_pte64_load(t, map, buf, buflen, p, flags,
345 t->_sgmap);
346 if (error == 0)
347 CIA_TLB_INVALIDATE();
348
349 return (error);
350 }
351
352 /*
353 * Load a CIA SGMAP-mapped DMA map with an mbuf chain.
354 */
355 int
356 cia_bus_dmamap_load_mbuf_sgmap(t, map, m, flags)
357 bus_dma_tag_t t;
358 bus_dmamap_t map;
359 struct mbuf *m;
360 int flags;
361 {
362 int error;
363
364 error = pci_sgmap_pte64_load_mbuf(t, map, m, flags, t->_sgmap);
365 if (error == 0)
366 CIA_TLB_INVALIDATE();
367
368 return (error);
369 }
370
371 /*
372 * Load a CIA SGMAP-mapped DMA map with a uio.
373 */
374 int
375 cia_bus_dmamap_load_uio_sgmap(t, map, uio, flags)
376 bus_dma_tag_t t;
377 bus_dmamap_t map;
378 struct uio *uio;
379 int flags;
380 {
381 int error;
382
383 error = pci_sgmap_pte64_load_uio(t, map, uio, flags, t->_sgmap);
384 if (error == 0)
385 CIA_TLB_INVALIDATE();
386
387 return (error);
388 }
389
390 /*
391 * Load a CIA SGMAP-mapped DMA map with raw memory.
392 */
393 int
394 cia_bus_dmamap_load_raw_sgmap(t, map, segs, nsegs, size, flags)
395 bus_dma_tag_t t;
396 bus_dmamap_t map;
397 bus_dma_segment_t *segs;
398 int nsegs;
399 bus_size_t size;
400 int flags;
401 {
402 int error;
403
404 error = pci_sgmap_pte64_load_raw(t, map, segs, nsegs, size, flags,
405 t->_sgmap);
406 if (error == 0)
407 CIA_TLB_INVALIDATE();
408
409 return (error);
410 }
411
412 /*
413 * Unload a CIA DMA map.
414 */
415 void
416 cia_bus_dmamap_unload_sgmap(t, map)
417 bus_dma_tag_t t;
418 bus_dmamap_t map;
419 {
420
421 /*
422 * Invalidate any SGMAP page table entries used by this
423 * mapping.
424 */
425 pci_sgmap_pte64_unload(t, map, t->_sgmap);
426 CIA_TLB_INVALIDATE();
427
428 /*
429 * Do the generic bits of the unload.
430 */
431 _bus_dmamap_unload(t, map);
432 }
433
434 /*
435 * Flush the CIA scatter/gather TLB.
436 */
437 void
438 cia_tlb_invalidate()
439 {
440
441 alpha_mb();
442 REGVAL(CIA_PCI_TBIA) = CIA_PCI_TBIA_ALL;
443 alpha_mb();
444 }
445
446 /*
447 * Flush the scatter/gather TLB on broken Pyxis chips.
448 */
449 void
450 cia_broken_pyxis_tlb_invalidate()
451 {
452 volatile u_int64_t dummy;
453 u_int32_t ctrl;
454 int i, s;
455
456 s = splhigh();
457
458 /*
459 * Put the Pyxis into PCI loopback mode.
460 */
461 alpha_mb();
462 ctrl = REGVAL(CIA_CSR_CTRL);
463 REGVAL(CIA_CSR_CTRL) = ctrl | CTRL_PCI_LOOP_EN;
464 alpha_mb();
465
466 /*
467 * Now, read from PCI dense memory space at offset 128M (our
468 * target window base), skipping 64k on each read. This forces
469 * S/G TLB misses.
470 *
471 * XXX Looks like the TLB entries are `not quite LRU'. We need
472 * XXX to read more times than there are actual tags!
473 */
474 for (i = 0; i < CIA_TLB_NTAGS + 4; i++) {
475 dummy = *((volatile u_int64_t *)
476 ALPHA_PHYS_TO_K0SEG(CIA_PCI_DENSE + CIA_PYXIS_BUG_BASE +
477 (i * 65536)));
478 }
479
480 /*
481 * Restore normal PCI operation.
482 */
483 alpha_mb();
484 REGVAL(CIA_CSR_CTRL) = ctrl;
485 alpha_mb();
486
487 splx(s);
488 }
489