cia_dma.c revision 1.15 1 /* $NetBSD: cia_dma.c,v 1.15 2000/02/06 01:26:50 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 #include <sys/cdefs.h> /* RCS ID & Copyright macro defns */
41
42 __KERNEL_RCSID(0, "$NetBSD: cia_dma.c,v 1.15 2000/02/06 01:26:50 thorpej Exp $");
43
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/kernel.h>
47 #include <sys/device.h>
48 #include <sys/malloc.h>
49 #include <vm/vm.h>
50
51 #define _ALPHA_BUS_DMA_PRIVATE
52 #include <machine/bus.h>
53
54 #include <dev/pci/pcireg.h>
55 #include <dev/pci/pcivar.h>
56 #include <alpha/pci/ciareg.h>
57 #include <alpha/pci/ciavar.h>
58
59 bus_dma_tag_t cia_dma_get_tag __P((bus_dma_tag_t, alpha_bus_t));
60
61 int cia_bus_dmamap_create_direct __P((bus_dma_tag_t, bus_size_t, int,
62 bus_size_t, bus_size_t, int, bus_dmamap_t *));
63
64 int cia_bus_dmamap_create_sgmap __P((bus_dma_tag_t, bus_size_t, int,
65 bus_size_t, bus_size_t, int, bus_dmamap_t *));
66
67 void cia_bus_dmamap_destroy_sgmap __P((bus_dma_tag_t, bus_dmamap_t));
68
69 int cia_bus_dmamap_load_sgmap __P((bus_dma_tag_t, bus_dmamap_t, void *,
70 bus_size_t, struct proc *, int));
71
72 int cia_bus_dmamap_load_mbuf_sgmap __P((bus_dma_tag_t, bus_dmamap_t,
73 struct mbuf *, int));
74
75 int cia_bus_dmamap_load_uio_sgmap __P((bus_dma_tag_t, bus_dmamap_t,
76 struct uio *, int));
77
78 int cia_bus_dmamap_load_raw_sgmap __P((bus_dma_tag_t, bus_dmamap_t,
79 bus_dma_segment_t *, int, bus_size_t, int));
80
81 void cia_bus_dmamap_unload_sgmap __P((bus_dma_tag_t, bus_dmamap_t));
82
83 /*
84 * Direct-mapped window: 1G at 1G
85 */
86 #define CIA_DIRECT_MAPPED_BASE (1*1024*1024*1024)
87 #define CIA_DIRECT_MAPPED_SIZE (1*1024*1024*1024)
88
89 /*
90 * SGMAP window: 8M at 8M
91 */
92 #define CIA_SGMAP_MAPPED_BASE (8*1024*1024)
93 #define CIA_SGMAP_MAPPED_SIZE (8*1024*1024)
94
95 void cia_tlb_invalidate __P((void));
96 void cia_broken_pyxis_tlb_invalidate __P((void));
97
98 void (*cia_tlb_invalidate_fn) __P((void));
99
100 #define CIA_TLB_INVALIDATE() (*cia_tlb_invalidate_fn)()
101
102 struct alpha_sgmap cia_pyxis_bug_sgmap;
103 #define CIA_PYXIS_BUG_BASE (128*1024*1024)
104 #define CIA_PYXIS_BUG_SIZE (2*1024*1024)
105
106 void
107 cia_dma_init(ccp)
108 struct cia_config *ccp;
109 {
110 bus_addr_t tbase;
111 bus_dma_tag_t t;
112
113 /*
114 * Initialize the DMA tag used for direct-mapped DMA.
115 */
116 t = &ccp->cc_dmat_direct;
117 t->_cookie = ccp;
118 t->_wbase = CIA_DIRECT_MAPPED_BASE;
119 t->_wsize = CIA_DIRECT_MAPPED_SIZE;
120 t->_next_window = NULL;
121 t->_boundary = 0;
122 t->_sgmap = NULL;
123 t->_get_tag = cia_dma_get_tag;
124 t->_dmamap_create = cia_bus_dmamap_create_direct;
125 t->_dmamap_destroy = _bus_dmamap_destroy;
126 t->_dmamap_load = _bus_dmamap_load_direct;
127 t->_dmamap_load_mbuf = _bus_dmamap_load_mbuf_direct;
128 t->_dmamap_load_uio = _bus_dmamap_load_uio_direct;
129 t->_dmamap_load_raw = _bus_dmamap_load_raw_direct;
130 t->_dmamap_unload = _bus_dmamap_unload;
131 t->_dmamap_sync = _bus_dmamap_sync;
132
133 t->_dmamem_alloc = _bus_dmamem_alloc;
134 t->_dmamem_free = _bus_dmamem_free;
135 t->_dmamem_map = _bus_dmamem_map;
136 t->_dmamem_unmap = _bus_dmamem_unmap;
137 t->_dmamem_mmap = _bus_dmamem_mmap;
138
139 /*
140 * Initialize the DMA tag used for sgmap-mapped DMA.
141 */
142 t = &ccp->cc_dmat_sgmap;
143 t->_cookie = ccp;
144 t->_wbase = CIA_SGMAP_MAPPED_BASE;
145 t->_wsize = CIA_SGMAP_MAPPED_SIZE;
146 t->_next_window = NULL;
147 t->_boundary = 0;
148 t->_sgmap = &ccp->cc_sgmap;
149 t->_get_tag = cia_dma_get_tag;
150 t->_dmamap_create = cia_bus_dmamap_create_sgmap;
151 t->_dmamap_destroy = cia_bus_dmamap_destroy_sgmap;
152 t->_dmamap_load = cia_bus_dmamap_load_sgmap;
153 t->_dmamap_load_mbuf = cia_bus_dmamap_load_mbuf_sgmap;
154 t->_dmamap_load_uio = cia_bus_dmamap_load_uio_sgmap;
155 t->_dmamap_load_raw = cia_bus_dmamap_load_raw_sgmap;
156 t->_dmamap_unload = cia_bus_dmamap_unload_sgmap;
157 t->_dmamap_sync = _bus_dmamap_sync;
158
159 t->_dmamem_alloc = _bus_dmamem_alloc;
160 t->_dmamem_free = _bus_dmamem_free;
161 t->_dmamem_map = _bus_dmamem_map;
162 t->_dmamem_unmap = _bus_dmamem_unmap;
163 t->_dmamem_mmap = _bus_dmamem_mmap;
164
165 /*
166 * The firmware has set up window 1 as a 1G direct-mapped DMA
167 * window beginning at 1G. We leave it alone. Leave window
168 * 0 alone until we reconfigure it for SGMAP-mapped DMA.
169 * Windows 2 and 3 are already disabled.
170 */
171
172 /*
173 * Initialize the SGMAP. Must align page table to 32k
174 * (hardware bug?).
175 */
176 alpha_sgmap_init(t, &ccp->cc_sgmap, "cia_sgmap",
177 CIA_SGMAP_MAPPED_BASE, 0, CIA_SGMAP_MAPPED_SIZE,
178 sizeof(u_int64_t), NULL, (32*1024));
179
180 /*
181 * Set up window 0 as an 8MB SGMAP-mapped window
182 * starting at 8MB.
183 */
184 REGVAL(CIA_PCI_W0BASE) = CIA_SGMAP_MAPPED_BASE |
185 CIA_PCI_WnBASE_SG_EN | CIA_PCI_WnBASE_W_EN;
186 alpha_mb();
187
188 REGVAL(CIA_PCI_W0MASK) = CIA_PCI_WnMASK_8M;
189 alpha_mb();
190
191 tbase = ccp->cc_sgmap.aps_ptpa >> CIA_PCI_TnBASE_SHIFT;
192 if ((tbase & CIA_PCI_TnBASE_MASK) != tbase)
193 panic("cia_dma_init: bad page table address");
194 REGVAL(CIA_PCI_T0BASE) = tbase;
195 alpha_mb();
196
197 /*
198 * Pass 1 and 2 (i.e. revision <= 1) of the Pyxis have a
199 * broken scatter/gather TLB; it cannot be invalidated. To
200 * work around this problem, we configure window 2 as an SG
201 * 2M window at 128M, which we use in DMA loopback mode to
202 * read a spill page. This works by causing TLB misses,
203 * causing the old entries to be purged to make room for
204 * the new entries coming in for the spill page.
205 */
206 if ((ccp->cc_flags & CCF_ISPYXIS) != 0 && ccp->cc_rev <= 1) {
207 u_int64_t *page_table;
208 int i;
209
210 cia_tlb_invalidate_fn =
211 cia_broken_pyxis_tlb_invalidate;
212
213 alpha_sgmap_init(t, &cia_pyxis_bug_sgmap,
214 "pyxis_bug_sgmap", CIA_PYXIS_BUG_BASE, 0,
215 CIA_PYXIS_BUG_SIZE, sizeof(u_int64_t), NULL,
216 (32*1024));
217
218 REGVAL(CIA_PCI_W2BASE) = CIA_PYXIS_BUG_BASE |
219 CIA_PCI_WnBASE_SG_EN | CIA_PCI_WnBASE_W_EN;
220 alpha_mb();
221
222 REGVAL(CIA_PCI_W2MASK) = CIA_PCI_WnMASK_2M;
223 alpha_mb();
224
225 tbase = cia_pyxis_bug_sgmap.aps_ptpa >>
226 CIA_PCI_TnBASE_SHIFT;
227 if ((tbase & CIA_PCI_TnBASE_MASK) != tbase)
228 panic("cia_dma_init: bad page table address");
229 REGVAL(CIA_PCI_T2BASE) = tbase;
230 alpha_mb();
231
232 /*
233 * Initialize the page table to point at the spill
234 * page. Leave the last entry invalid.
235 */
236 pci_sgmap_pte64_init_spill_page_pte();
237 for (i = 0, page_table = cia_pyxis_bug_sgmap.aps_pt;
238 i < (CIA_PYXIS_BUG_SIZE / PAGE_SIZE) - 1; i++) {
239 page_table[i] =
240 pci_sgmap_pte64_prefetch_spill_page_pte;
241 }
242 alpha_mb();
243 } else
244 cia_tlb_invalidate_fn = cia_tlb_invalidate;
245
246 CIA_TLB_INVALIDATE();
247
248 /* XXX XXX BEGIN XXX XXX */
249 { /* XXX */
250 extern paddr_t alpha_XXX_dmamap_or; /* XXX */
251 alpha_XXX_dmamap_or = CIA_DIRECT_MAPPED_BASE; /* XXX */
252 } /* XXX */
253 /* XXX XXX END XXX XXX */
254 }
255
256 /*
257 * Return the bus dma tag to be used for the specified bus type.
258 * INTERNAL USE ONLY!
259 */
260 bus_dma_tag_t
261 cia_dma_get_tag(t, bustype)
262 bus_dma_tag_t t;
263 alpha_bus_t bustype;
264 {
265 struct cia_config *ccp = t->_cookie;
266
267 switch (bustype) {
268 case ALPHA_BUS_PCI:
269 case ALPHA_BUS_EISA:
270 /*
271 * Systems with a CIA can only support 1G
272 * of memory, so we use the direct-mapped window
273 * on busses that have 32-bit DMA.
274 */
275 return (&ccp->cc_dmat_direct);
276
277 case ALPHA_BUS_ISA:
278 /*
279 * ISA doesn't have enough address bits to use
280 * the direct-mapped DMA window, so we must use
281 * SGMAPs.
282 */
283 return (&ccp->cc_dmat_sgmap);
284
285 default:
286 panic("cia_dma_get_tag: shouldn't be here, really...");
287 }
288 }
289
290 /*
291 * Create a CIA direct-mapped DMA map.
292 */
293 int
294 cia_bus_dmamap_create_direct(t, size, nsegments, maxsegsz, boundary,
295 flags, dmamp)
296 bus_dma_tag_t t;
297 bus_size_t size;
298 int nsegments;
299 bus_size_t maxsegsz;
300 bus_size_t boundary;
301 int flags;
302 bus_dmamap_t *dmamp;
303 {
304 struct cia_config *ccp = t->_cookie;
305 bus_dmamap_t map;
306 int error;
307
308 error = _bus_dmamap_create(t, size, nsegments, maxsegsz,
309 boundary, flags, dmamp);
310 if (error)
311 return (error);
312
313 map = *dmamp;
314
315 if ((ccp->cc_flags & CCF_PYXISBUG) != 0 &&
316 map->_dm_segcnt > 1) {
317 /*
318 * We have a Pyxis with the DMA page crossing bug, make
319 * sure we don't coalesce adjacent DMA segments.
320 *
321 * NOTE: We can only do this if the max segment count
322 * is greater than 1. This is because many network
323 * drivers allocate large contiguous blocks of memory
324 * for control data structures, even though they won't
325 * do any single DMA that crosses a page coundary.
326 * -- thorpej (at) netbsd.org, 2/5/2000
327 */
328 map->_dm_flags |= DMAMAP_NO_COALESCE;
329 }
330
331 return (0);
332 }
333
334 /*
335 * Create a CIA SGMAP-mapped DMA map.
336 */
337 int
338 cia_bus_dmamap_create_sgmap(t, size, nsegments, maxsegsz, boundary,
339 flags, dmamp)
340 bus_dma_tag_t t;
341 bus_size_t size;
342 int nsegments;
343 bus_size_t maxsegsz;
344 bus_size_t boundary;
345 int flags;
346 bus_dmamap_t *dmamp;
347 {
348 bus_dmamap_t map;
349 int error;
350
351 error = _bus_dmamap_create(t, size, nsegments, maxsegsz,
352 boundary, flags, dmamp);
353 if (error)
354 return (error);
355
356 map = *dmamp;
357
358 if (flags & BUS_DMA_ALLOCNOW) {
359 error = alpha_sgmap_alloc(map, round_page(size),
360 t->_sgmap, flags);
361 if (error)
362 cia_bus_dmamap_destroy_sgmap(t, map);
363 }
364
365 return (error);
366 }
367
368 /*
369 * Destroy a CIA SGMAP-mapped DMA map.
370 */
371 void
372 cia_bus_dmamap_destroy_sgmap(t, map)
373 bus_dma_tag_t t;
374 bus_dmamap_t map;
375 {
376
377 if (map->_dm_flags & DMAMAP_HAS_SGMAP)
378 alpha_sgmap_free(map, t->_sgmap);
379
380 _bus_dmamap_destroy(t, map);
381 }
382
383 /*
384 * Load a CIA SGMAP-mapped DMA map with a linear buffer.
385 */
386 int
387 cia_bus_dmamap_load_sgmap(t, map, buf, buflen, p, flags)
388 bus_dma_tag_t t;
389 bus_dmamap_t map;
390 void *buf;
391 bus_size_t buflen;
392 struct proc *p;
393 int flags;
394 {
395 int error;
396
397 error = pci_sgmap_pte64_load(t, map, buf, buflen, p, flags,
398 t->_sgmap);
399 if (error == 0)
400 CIA_TLB_INVALIDATE();
401
402 return (error);
403 }
404
405 /*
406 * Load a CIA SGMAP-mapped DMA map with an mbuf chain.
407 */
408 int
409 cia_bus_dmamap_load_mbuf_sgmap(t, map, m, flags)
410 bus_dma_tag_t t;
411 bus_dmamap_t map;
412 struct mbuf *m;
413 int flags;
414 {
415 int error;
416
417 error = pci_sgmap_pte64_load_mbuf(t, map, m, flags, t->_sgmap);
418 if (error == 0)
419 CIA_TLB_INVALIDATE();
420
421 return (error);
422 }
423
424 /*
425 * Load a CIA SGMAP-mapped DMA map with a uio.
426 */
427 int
428 cia_bus_dmamap_load_uio_sgmap(t, map, uio, flags)
429 bus_dma_tag_t t;
430 bus_dmamap_t map;
431 struct uio *uio;
432 int flags;
433 {
434 int error;
435
436 error = pci_sgmap_pte64_load_uio(t, map, uio, flags, t->_sgmap);
437 if (error == 0)
438 CIA_TLB_INVALIDATE();
439
440 return (error);
441 }
442
443 /*
444 * Load a CIA SGMAP-mapped DMA map with raw memory.
445 */
446 int
447 cia_bus_dmamap_load_raw_sgmap(t, map, segs, nsegs, size, flags)
448 bus_dma_tag_t t;
449 bus_dmamap_t map;
450 bus_dma_segment_t *segs;
451 int nsegs;
452 bus_size_t size;
453 int flags;
454 {
455 int error;
456
457 error = pci_sgmap_pte64_load_raw(t, map, segs, nsegs, size, flags,
458 t->_sgmap);
459 if (error == 0)
460 CIA_TLB_INVALIDATE();
461
462 return (error);
463 }
464
465 /*
466 * Unload a CIA DMA map.
467 */
468 void
469 cia_bus_dmamap_unload_sgmap(t, map)
470 bus_dma_tag_t t;
471 bus_dmamap_t map;
472 {
473
474 /*
475 * Invalidate any SGMAP page table entries used by this
476 * mapping.
477 */
478 pci_sgmap_pte64_unload(t, map, t->_sgmap);
479 CIA_TLB_INVALIDATE();
480
481 /*
482 * Do the generic bits of the unload.
483 */
484 _bus_dmamap_unload(t, map);
485 }
486
487 /*
488 * Flush the CIA scatter/gather TLB.
489 */
490 void
491 cia_tlb_invalidate()
492 {
493
494 alpha_mb();
495 REGVAL(CIA_PCI_TBIA) = CIA_PCI_TBIA_ALL;
496 alpha_mb();
497 }
498
499 /*
500 * Flush the scatter/gather TLB on broken Pyxis chips.
501 */
502 void
503 cia_broken_pyxis_tlb_invalidate()
504 {
505 volatile u_int64_t dummy;
506 u_int32_t ctrl;
507 int i, s;
508
509 s = splhigh();
510
511 /*
512 * Put the Pyxis into PCI loopback mode.
513 */
514 alpha_mb();
515 ctrl = REGVAL(CIA_CSR_CTRL);
516 REGVAL(CIA_CSR_CTRL) = ctrl | CTRL_PCI_LOOP_EN;
517 alpha_mb();
518
519 /*
520 * Now, read from PCI dense memory space at offset 128M (our
521 * target window base), skipping 64k on each read. This forces
522 * S/G TLB misses.
523 *
524 * XXX Looks like the TLB entries are `not quite LRU'. We need
525 * XXX to read more times than there are actual tags!
526 */
527 for (i = 0; i < CIA_TLB_NTAGS + 4; i++) {
528 dummy = *((volatile u_int64_t *)
529 ALPHA_PHYS_TO_K0SEG(CIA_PCI_DENSE + CIA_PYXIS_BUG_BASE +
530 (i * 65536)));
531 }
532
533 /*
534 * Restore normal PCI operation.
535 */
536 alpha_mb();
537 REGVAL(CIA_CSR_CTRL) = ctrl;
538 alpha_mb();
539
540 splx(s);
541 }
542