cia_dma.c revision 1.16 1 /* $NetBSD: cia_dma.c,v 1.16 2000/06/29 08:58:46 mrg Exp $ */
2
3 /*-
4 * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 #include <sys/cdefs.h> /* RCS ID & Copyright macro defns */
41
42 __KERNEL_RCSID(0, "$NetBSD: cia_dma.c,v 1.16 2000/06/29 08:58:46 mrg Exp $");
43
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/kernel.h>
47 #include <sys/device.h>
48 #include <sys/malloc.h>
49
50 #include <uvm/uvm_extern.h>
51
52 #define _ALPHA_BUS_DMA_PRIVATE
53 #include <machine/bus.h>
54
55 #include <dev/pci/pcireg.h>
56 #include <dev/pci/pcivar.h>
57 #include <alpha/pci/ciareg.h>
58 #include <alpha/pci/ciavar.h>
59
60 bus_dma_tag_t cia_dma_get_tag __P((bus_dma_tag_t, alpha_bus_t));
61
62 int cia_bus_dmamap_create_direct __P((bus_dma_tag_t, bus_size_t, int,
63 bus_size_t, bus_size_t, int, bus_dmamap_t *));
64
65 int cia_bus_dmamap_create_sgmap __P((bus_dma_tag_t, bus_size_t, int,
66 bus_size_t, bus_size_t, int, bus_dmamap_t *));
67
68 void cia_bus_dmamap_destroy_sgmap __P((bus_dma_tag_t, bus_dmamap_t));
69
70 int cia_bus_dmamap_load_sgmap __P((bus_dma_tag_t, bus_dmamap_t, void *,
71 bus_size_t, struct proc *, int));
72
73 int cia_bus_dmamap_load_mbuf_sgmap __P((bus_dma_tag_t, bus_dmamap_t,
74 struct mbuf *, int));
75
76 int cia_bus_dmamap_load_uio_sgmap __P((bus_dma_tag_t, bus_dmamap_t,
77 struct uio *, int));
78
79 int cia_bus_dmamap_load_raw_sgmap __P((bus_dma_tag_t, bus_dmamap_t,
80 bus_dma_segment_t *, int, bus_size_t, int));
81
82 void cia_bus_dmamap_unload_sgmap __P((bus_dma_tag_t, bus_dmamap_t));
83
84 /*
85 * Direct-mapped window: 1G at 1G
86 */
87 #define CIA_DIRECT_MAPPED_BASE (1*1024*1024*1024)
88 #define CIA_DIRECT_MAPPED_SIZE (1*1024*1024*1024)
89
90 /*
91 * SGMAP window: 8M at 8M
92 */
93 #define CIA_SGMAP_MAPPED_BASE (8*1024*1024)
94 #define CIA_SGMAP_MAPPED_SIZE (8*1024*1024)
95
96 void cia_tlb_invalidate __P((void));
97 void cia_broken_pyxis_tlb_invalidate __P((void));
98
99 void (*cia_tlb_invalidate_fn) __P((void));
100
101 #define CIA_TLB_INVALIDATE() (*cia_tlb_invalidate_fn)()
102
103 struct alpha_sgmap cia_pyxis_bug_sgmap;
104 #define CIA_PYXIS_BUG_BASE (128*1024*1024)
105 #define CIA_PYXIS_BUG_SIZE (2*1024*1024)
106
107 void
108 cia_dma_init(ccp)
109 struct cia_config *ccp;
110 {
111 bus_addr_t tbase;
112 bus_dma_tag_t t;
113
114 /*
115 * Initialize the DMA tag used for direct-mapped DMA.
116 */
117 t = &ccp->cc_dmat_direct;
118 t->_cookie = ccp;
119 t->_wbase = CIA_DIRECT_MAPPED_BASE;
120 t->_wsize = CIA_DIRECT_MAPPED_SIZE;
121 t->_next_window = NULL;
122 t->_boundary = 0;
123 t->_sgmap = NULL;
124 t->_get_tag = cia_dma_get_tag;
125 t->_dmamap_create = cia_bus_dmamap_create_direct;
126 t->_dmamap_destroy = _bus_dmamap_destroy;
127 t->_dmamap_load = _bus_dmamap_load_direct;
128 t->_dmamap_load_mbuf = _bus_dmamap_load_mbuf_direct;
129 t->_dmamap_load_uio = _bus_dmamap_load_uio_direct;
130 t->_dmamap_load_raw = _bus_dmamap_load_raw_direct;
131 t->_dmamap_unload = _bus_dmamap_unload;
132 t->_dmamap_sync = _bus_dmamap_sync;
133
134 t->_dmamem_alloc = _bus_dmamem_alloc;
135 t->_dmamem_free = _bus_dmamem_free;
136 t->_dmamem_map = _bus_dmamem_map;
137 t->_dmamem_unmap = _bus_dmamem_unmap;
138 t->_dmamem_mmap = _bus_dmamem_mmap;
139
140 /*
141 * Initialize the DMA tag used for sgmap-mapped DMA.
142 */
143 t = &ccp->cc_dmat_sgmap;
144 t->_cookie = ccp;
145 t->_wbase = CIA_SGMAP_MAPPED_BASE;
146 t->_wsize = CIA_SGMAP_MAPPED_SIZE;
147 t->_next_window = NULL;
148 t->_boundary = 0;
149 t->_sgmap = &ccp->cc_sgmap;
150 t->_get_tag = cia_dma_get_tag;
151 t->_dmamap_create = cia_bus_dmamap_create_sgmap;
152 t->_dmamap_destroy = cia_bus_dmamap_destroy_sgmap;
153 t->_dmamap_load = cia_bus_dmamap_load_sgmap;
154 t->_dmamap_load_mbuf = cia_bus_dmamap_load_mbuf_sgmap;
155 t->_dmamap_load_uio = cia_bus_dmamap_load_uio_sgmap;
156 t->_dmamap_load_raw = cia_bus_dmamap_load_raw_sgmap;
157 t->_dmamap_unload = cia_bus_dmamap_unload_sgmap;
158 t->_dmamap_sync = _bus_dmamap_sync;
159
160 t->_dmamem_alloc = _bus_dmamem_alloc;
161 t->_dmamem_free = _bus_dmamem_free;
162 t->_dmamem_map = _bus_dmamem_map;
163 t->_dmamem_unmap = _bus_dmamem_unmap;
164 t->_dmamem_mmap = _bus_dmamem_mmap;
165
166 /*
167 * The firmware has set up window 1 as a 1G direct-mapped DMA
168 * window beginning at 1G. We leave it alone. Leave window
169 * 0 alone until we reconfigure it for SGMAP-mapped DMA.
170 * Windows 2 and 3 are already disabled.
171 */
172
173 /*
174 * Initialize the SGMAP. Must align page table to 32k
175 * (hardware bug?).
176 */
177 alpha_sgmap_init(t, &ccp->cc_sgmap, "cia_sgmap",
178 CIA_SGMAP_MAPPED_BASE, 0, CIA_SGMAP_MAPPED_SIZE,
179 sizeof(u_int64_t), NULL, (32*1024));
180
181 /*
182 * Set up window 0 as an 8MB SGMAP-mapped window
183 * starting at 8MB.
184 */
185 REGVAL(CIA_PCI_W0BASE) = CIA_SGMAP_MAPPED_BASE |
186 CIA_PCI_WnBASE_SG_EN | CIA_PCI_WnBASE_W_EN;
187 alpha_mb();
188
189 REGVAL(CIA_PCI_W0MASK) = CIA_PCI_WnMASK_8M;
190 alpha_mb();
191
192 tbase = ccp->cc_sgmap.aps_ptpa >> CIA_PCI_TnBASE_SHIFT;
193 if ((tbase & CIA_PCI_TnBASE_MASK) != tbase)
194 panic("cia_dma_init: bad page table address");
195 REGVAL(CIA_PCI_T0BASE) = tbase;
196 alpha_mb();
197
198 /*
199 * Pass 1 and 2 (i.e. revision <= 1) of the Pyxis have a
200 * broken scatter/gather TLB; it cannot be invalidated. To
201 * work around this problem, we configure window 2 as an SG
202 * 2M window at 128M, which we use in DMA loopback mode to
203 * read a spill page. This works by causing TLB misses,
204 * causing the old entries to be purged to make room for
205 * the new entries coming in for the spill page.
206 */
207 if ((ccp->cc_flags & CCF_ISPYXIS) != 0 && ccp->cc_rev <= 1) {
208 u_int64_t *page_table;
209 int i;
210
211 cia_tlb_invalidate_fn =
212 cia_broken_pyxis_tlb_invalidate;
213
214 alpha_sgmap_init(t, &cia_pyxis_bug_sgmap,
215 "pyxis_bug_sgmap", CIA_PYXIS_BUG_BASE, 0,
216 CIA_PYXIS_BUG_SIZE, sizeof(u_int64_t), NULL,
217 (32*1024));
218
219 REGVAL(CIA_PCI_W2BASE) = CIA_PYXIS_BUG_BASE |
220 CIA_PCI_WnBASE_SG_EN | CIA_PCI_WnBASE_W_EN;
221 alpha_mb();
222
223 REGVAL(CIA_PCI_W2MASK) = CIA_PCI_WnMASK_2M;
224 alpha_mb();
225
226 tbase = cia_pyxis_bug_sgmap.aps_ptpa >>
227 CIA_PCI_TnBASE_SHIFT;
228 if ((tbase & CIA_PCI_TnBASE_MASK) != tbase)
229 panic("cia_dma_init: bad page table address");
230 REGVAL(CIA_PCI_T2BASE) = tbase;
231 alpha_mb();
232
233 /*
234 * Initialize the page table to point at the spill
235 * page. Leave the last entry invalid.
236 */
237 pci_sgmap_pte64_init_spill_page_pte();
238 for (i = 0, page_table = cia_pyxis_bug_sgmap.aps_pt;
239 i < (CIA_PYXIS_BUG_SIZE / PAGE_SIZE) - 1; i++) {
240 page_table[i] =
241 pci_sgmap_pte64_prefetch_spill_page_pte;
242 }
243 alpha_mb();
244 } else
245 cia_tlb_invalidate_fn = cia_tlb_invalidate;
246
247 CIA_TLB_INVALIDATE();
248
249 /* XXX XXX BEGIN XXX XXX */
250 { /* XXX */
251 extern paddr_t alpha_XXX_dmamap_or; /* XXX */
252 alpha_XXX_dmamap_or = CIA_DIRECT_MAPPED_BASE; /* XXX */
253 } /* XXX */
254 /* XXX XXX END XXX XXX */
255 }
256
257 /*
258 * Return the bus dma tag to be used for the specified bus type.
259 * INTERNAL USE ONLY!
260 */
261 bus_dma_tag_t
262 cia_dma_get_tag(t, bustype)
263 bus_dma_tag_t t;
264 alpha_bus_t bustype;
265 {
266 struct cia_config *ccp = t->_cookie;
267
268 switch (bustype) {
269 case ALPHA_BUS_PCI:
270 case ALPHA_BUS_EISA:
271 /*
272 * Systems with a CIA can only support 1G
273 * of memory, so we use the direct-mapped window
274 * on busses that have 32-bit DMA.
275 */
276 return (&ccp->cc_dmat_direct);
277
278 case ALPHA_BUS_ISA:
279 /*
280 * ISA doesn't have enough address bits to use
281 * the direct-mapped DMA window, so we must use
282 * SGMAPs.
283 */
284 return (&ccp->cc_dmat_sgmap);
285
286 default:
287 panic("cia_dma_get_tag: shouldn't be here, really...");
288 }
289 }
290
291 /*
292 * Create a CIA direct-mapped DMA map.
293 */
294 int
295 cia_bus_dmamap_create_direct(t, size, nsegments, maxsegsz, boundary,
296 flags, dmamp)
297 bus_dma_tag_t t;
298 bus_size_t size;
299 int nsegments;
300 bus_size_t maxsegsz;
301 bus_size_t boundary;
302 int flags;
303 bus_dmamap_t *dmamp;
304 {
305 struct cia_config *ccp = t->_cookie;
306 bus_dmamap_t map;
307 int error;
308
309 error = _bus_dmamap_create(t, size, nsegments, maxsegsz,
310 boundary, flags, dmamp);
311 if (error)
312 return (error);
313
314 map = *dmamp;
315
316 if ((ccp->cc_flags & CCF_PYXISBUG) != 0 &&
317 map->_dm_segcnt > 1) {
318 /*
319 * We have a Pyxis with the DMA page crossing bug, make
320 * sure we don't coalesce adjacent DMA segments.
321 *
322 * NOTE: We can only do this if the max segment count
323 * is greater than 1. This is because many network
324 * drivers allocate large contiguous blocks of memory
325 * for control data structures, even though they won't
326 * do any single DMA that crosses a page coundary.
327 * -- thorpej (at) netbsd.org, 2/5/2000
328 */
329 map->_dm_flags |= DMAMAP_NO_COALESCE;
330 }
331
332 return (0);
333 }
334
335 /*
336 * Create a CIA SGMAP-mapped DMA map.
337 */
338 int
339 cia_bus_dmamap_create_sgmap(t, size, nsegments, maxsegsz, boundary,
340 flags, dmamp)
341 bus_dma_tag_t t;
342 bus_size_t size;
343 int nsegments;
344 bus_size_t maxsegsz;
345 bus_size_t boundary;
346 int flags;
347 bus_dmamap_t *dmamp;
348 {
349 bus_dmamap_t map;
350 int error;
351
352 error = _bus_dmamap_create(t, size, nsegments, maxsegsz,
353 boundary, flags, dmamp);
354 if (error)
355 return (error);
356
357 map = *dmamp;
358
359 if (flags & BUS_DMA_ALLOCNOW) {
360 error = alpha_sgmap_alloc(map, round_page(size),
361 t->_sgmap, flags);
362 if (error)
363 cia_bus_dmamap_destroy_sgmap(t, map);
364 }
365
366 return (error);
367 }
368
369 /*
370 * Destroy a CIA SGMAP-mapped DMA map.
371 */
372 void
373 cia_bus_dmamap_destroy_sgmap(t, map)
374 bus_dma_tag_t t;
375 bus_dmamap_t map;
376 {
377
378 if (map->_dm_flags & DMAMAP_HAS_SGMAP)
379 alpha_sgmap_free(map, t->_sgmap);
380
381 _bus_dmamap_destroy(t, map);
382 }
383
384 /*
385 * Load a CIA SGMAP-mapped DMA map with a linear buffer.
386 */
387 int
388 cia_bus_dmamap_load_sgmap(t, map, buf, buflen, p, flags)
389 bus_dma_tag_t t;
390 bus_dmamap_t map;
391 void *buf;
392 bus_size_t buflen;
393 struct proc *p;
394 int flags;
395 {
396 int error;
397
398 error = pci_sgmap_pte64_load(t, map, buf, buflen, p, flags,
399 t->_sgmap);
400 if (error == 0)
401 CIA_TLB_INVALIDATE();
402
403 return (error);
404 }
405
406 /*
407 * Load a CIA SGMAP-mapped DMA map with an mbuf chain.
408 */
409 int
410 cia_bus_dmamap_load_mbuf_sgmap(t, map, m, flags)
411 bus_dma_tag_t t;
412 bus_dmamap_t map;
413 struct mbuf *m;
414 int flags;
415 {
416 int error;
417
418 error = pci_sgmap_pte64_load_mbuf(t, map, m, flags, t->_sgmap);
419 if (error == 0)
420 CIA_TLB_INVALIDATE();
421
422 return (error);
423 }
424
425 /*
426 * Load a CIA SGMAP-mapped DMA map with a uio.
427 */
428 int
429 cia_bus_dmamap_load_uio_sgmap(t, map, uio, flags)
430 bus_dma_tag_t t;
431 bus_dmamap_t map;
432 struct uio *uio;
433 int flags;
434 {
435 int error;
436
437 error = pci_sgmap_pte64_load_uio(t, map, uio, flags, t->_sgmap);
438 if (error == 0)
439 CIA_TLB_INVALIDATE();
440
441 return (error);
442 }
443
444 /*
445 * Load a CIA SGMAP-mapped DMA map with raw memory.
446 */
447 int
448 cia_bus_dmamap_load_raw_sgmap(t, map, segs, nsegs, size, flags)
449 bus_dma_tag_t t;
450 bus_dmamap_t map;
451 bus_dma_segment_t *segs;
452 int nsegs;
453 bus_size_t size;
454 int flags;
455 {
456 int error;
457
458 error = pci_sgmap_pte64_load_raw(t, map, segs, nsegs, size, flags,
459 t->_sgmap);
460 if (error == 0)
461 CIA_TLB_INVALIDATE();
462
463 return (error);
464 }
465
466 /*
467 * Unload a CIA DMA map.
468 */
469 void
470 cia_bus_dmamap_unload_sgmap(t, map)
471 bus_dma_tag_t t;
472 bus_dmamap_t map;
473 {
474
475 /*
476 * Invalidate any SGMAP page table entries used by this
477 * mapping.
478 */
479 pci_sgmap_pte64_unload(t, map, t->_sgmap);
480 CIA_TLB_INVALIDATE();
481
482 /*
483 * Do the generic bits of the unload.
484 */
485 _bus_dmamap_unload(t, map);
486 }
487
488 /*
489 * Flush the CIA scatter/gather TLB.
490 */
491 void
492 cia_tlb_invalidate()
493 {
494
495 alpha_mb();
496 REGVAL(CIA_PCI_TBIA) = CIA_PCI_TBIA_ALL;
497 alpha_mb();
498 }
499
500 /*
501 * Flush the scatter/gather TLB on broken Pyxis chips.
502 */
503 void
504 cia_broken_pyxis_tlb_invalidate()
505 {
506 volatile u_int64_t dummy;
507 u_int32_t ctrl;
508 int i, s;
509
510 s = splhigh();
511
512 /*
513 * Put the Pyxis into PCI loopback mode.
514 */
515 alpha_mb();
516 ctrl = REGVAL(CIA_CSR_CTRL);
517 REGVAL(CIA_CSR_CTRL) = ctrl | CTRL_PCI_LOOP_EN;
518 alpha_mb();
519
520 /*
521 * Now, read from PCI dense memory space at offset 128M (our
522 * target window base), skipping 64k on each read. This forces
523 * S/G TLB misses.
524 *
525 * XXX Looks like the TLB entries are `not quite LRU'. We need
526 * XXX to read more times than there are actual tags!
527 */
528 for (i = 0; i < CIA_TLB_NTAGS + 4; i++) {
529 dummy = *((volatile u_int64_t *)
530 ALPHA_PHYS_TO_K0SEG(CIA_PCI_DENSE + CIA_PYXIS_BUG_BASE +
531 (i * 65536)));
532 }
533
534 /*
535 * Restore normal PCI operation.
536 */
537 alpha_mb();
538 REGVAL(CIA_CSR_CTRL) = ctrl;
539 alpha_mb();
540
541 splx(s);
542 }
543