dvma.c revision 1.3 1 /* $NetBSD: dvma.c,v 1.3 1997/02/24 01:37:52 jeremy Exp $ */
2
3 /*-
4 * Copyright (c) 1996 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Gordon W. Ross and Jeremy Cooper.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * DVMA (Direct Virtual Memory Access - like DMA)
41 *
42 * In the Sun3 architecture, memory cycles initiated by secondary bus
43 * masters (DVMA devices) passed through the same MMU that governed CPU
44 * accesses. All DVMA devices were wired in such a way so that an offset
45 * was added to the addresses they issued, causing them to access virtual
46 * memory starting at address 0x0FF00000 - the offset. The task of
47 * enabling a DVMA device to access main memory only involved creating
48 * valid mapping in the MMU that translated these high addresses into the
49 * appropriate physical addresses.
50 *
51 * The Sun3x presents a challenge to programming DVMA because the MMU is no
52 * longer shared by both secondary bus masters and the CPU. The MC68030's
53 * built-in MMU serves only to manage virtual memory accesses initiated by
54 * the CPU. Secondary bus master bus accesses pass through a different MMU,
55 * aptly named the 'I/O Mapper'. To enable every device driver that uses
56 * DVMA to understand that these two address spaces are disconnected would
57 * require a tremendous amount of code re-writing. To avoid this, we will
58 * ensure that the I/O Mapper and the MC68030 MMU are programmed together,
59 * so that DVMA mappings are consistent in both the CPU virtual address
60 * space and secondary bus master address space - creating an environment
61 * just like the Sun3 system.
62 *
63 * The maximum address space that any DVMA device in the Sun3x architecture
64 * is capable of addressing is 24 bits wide (16 Megabytes.) We can alias
65 * all of the mappings that exist in the I/O mapper by duplicating them in
66 * a specially reserved section of the CPU's virtual address space, 16
67 * Megabytes in size. Whenever a DVMA buffer is allocated, the allocation
68 * code will enter in a mapping both in the MC68030 MMU page tables and the
69 * I/O mapper.
70 *
71 * The address returned by the allocation routine is a virtual address that
72 * the requesting driver must use to access the buffer. It is up to the
73 * device driver to convert this virtual address into the appropriate slave
74 * address that its device should issue to access the buffer. (The will be
75 * routines that will assist the driver in doing so.)
76 */
77 #include <sys/param.h>
78 #include <sys/systm.h>
79 #include <sys/device.h>
80 #include <sys/proc.h>
81 #include <sys/malloc.h>
82 #include <sys/map.h>
83 #include <sys/buf.h>
84 #include <sys/vnode.h>
85 #include <sys/user.h>
86 #include <sys/core.h>
87 #include <sys/exec.h>
88
89 #include <vm/vm.h>
90 #include <vm/vm_kern.h>
91 #include <vm/vm_map.h>
92
93 #include <machine/autoconf.h>
94 #include <machine/cpu.h>
95 #include <machine/enable.h>
96 #include <machine/reg.h>
97 #include <machine/pmap.h>
98 #include <machine/dvma.h>
99 #include <machine/machdep.h>
100
101 #include "iommu.h"
102
103 /*
104 * Use a resource map to manage DVMA scratch-memory pages.
105 */
106
107 /* Number of slots in dvmamap. */
108 int dvma_max_segs = 256;
109 struct map *dvmamap;
110
111 void
112 dvma_init()
113 {
114
115 /*
116 * Create the resource map for DVMA pages.
117 */
118 dvmamap = malloc((sizeof(struct map) * dvma_max_segs),
119 M_DEVBUF, M_WAITOK);
120
121 rminit(dvmamap, btoc(DVMA_SPACE_LENGTH), btoc(0xFF000000),
122 "dvmamap", dvma_max_segs);
123
124 /*
125 * Enable DVMA in the System Enable register.
126 * Note: This is only necessary for VME slave accesses.
127 * On-board devices are always capable of DVMA.
128 * *enable_reg |= ENA_SDVMA;
129 */
130 }
131
132
133 /*
134 * Given a DVMA address, return the physical address that
135 * would be used by some OTHER bus-master besides the CPU.
136 * (Examples: on-board ie/le, VME xy board).
137 */
138 u_long
139 dvma_kvtopa(kva, bustype)
140 void * kva;
141 int bustype;
142 {
143 u_long addr, mask;
144
145 addr = (u_long)kva;
146 if ((addr & DVMA_SPACE_START) != DVMA_SPACE_START)
147 panic("dvma_kvtopa: bad dmva addr=0x%x\n", addr);
148
149 /* Everything has just 24 bits. */
150 mask = DVMA_SLAVE_MASK;
151
152 return(addr & mask);
153 }
154
155
156 /*
157 * Map a range [va, va+len] of wired virtual addresses in the given map
158 * to a kernel address in DVMA space.
159 */
160 void *
161 dvma_mapin(kmem_va, len, canwait)
162 void * kmem_va;
163 int len, canwait;
164 {
165 void * dvma_addr;
166 vm_offset_t kva, tva;
167 register int npf, s;
168 register vm_offset_t pa;
169 long off, pn;
170
171 kva = (u_long)kmem_va;
172 #ifdef DIAGNOSTIC
173 /*
174 * Addresses below VM_MIN_KERNEL_ADDRESS are not part of the kernel
175 * map and should not participate in DVMA.
176 */
177 if (kva < VM_MIN_KERNEL_ADDRESS)
178 panic("dvma_mapin: bad kva");
179 #endif
180
181 /*
182 * Calculate the offset of the data buffer from a page boundary.
183 */
184 off = (int)kva & PGOFSET;
185 kva -= off; /* Truncate starting address to nearest page. */
186 len = round_page(len + off); /* Round the buffer length to pages. */
187 npf = btoc(len); /* Determine the number of pages to be mapped. */
188
189 s = splimp();
190 for (;;) {
191 /*
192 * Try to allocate DVMA space of the appropriate size
193 * in which to do a transfer.
194 */
195 pn = rmalloc(dvmamap, npf);
196
197 if (pn != 0)
198 break;
199 if (canwait) {
200 (void)tsleep(dvmamap, PRIBIO+1, "physio", 0);
201 continue;
202 }
203 splx(s);
204 return NULL;
205 }
206 splx(s);
207
208
209 /*
210 * Tva is the starting page to which the data buffer will be double
211 * mapped. Dvma_addr is the starting address of the buffer within
212 * that page and is the return value of the function.
213 */
214 tva = ctob(pn);
215 dvma_addr = (void *) (tva + off);
216
217 for (;npf--; kva += NBPG, tva += NBPG) {
218 /*
219 * Retrieve the physical address of each page in the buffer
220 * and enter mappings into the I/O MMU so they may be seen
221 * by external bus masters and into the special DVMA space
222 * in the MC68030 MMU so they may be seen by the CPU.
223 */
224 pa = pmap_extract(pmap_kernel(), kva);
225 #ifdef DEBUG
226 if (pa == 0)
227 panic("dvma_mapin: null page frame");
228 #endif DEBUG
229
230 iommu_enter((tva & DVMA_SLAVE_MASK), pa);
231 pmap_enter(pmap_kernel(), tva, pa | PMAP_NC,
232 VM_PROT_READ|VM_PROT_WRITE, 1);
233 }
234
235 return (dvma_addr);
236 }
237
238 /*
239 * Remove double map of `va' in DVMA space at `kva'.
240 *
241 * TODO - This function might be the perfect place to handle the
242 * synchronization between the DVMA cache and central RAM
243 * on the 3/470.
244 */
245 void
246 dvma_mapout(dvma_addr, len)
247 void * dvma_addr;
248 int len;
249 {
250 u_long kva;
251 int s, off;
252
253 kva = (u_long)dvma_addr;
254 off = (int)kva & PGOFSET;
255 kva -= off;
256 len = round_page(len + off);
257
258 iommu_remove((kva & DVMA_SLAVE_MASK), len);
259
260 /*
261 * XXX - don't call pmap_remove() with DVMA space yet.
262 * XXX It cannot (currently) handle the removal
263 * XXX of address ranges which do not participate in the
264 * XXX PV system by virtue of their _virtual_ addresses.
265 * XXX DVMA is one of these special address spaces.
266 */
267 #ifdef DVMA_ON_PVLIST
268 pmap_remove(pmap_kernel(), kva, kva + len);
269 #endif /* DVMA_ON_PVLIST */
270
271 s = splimp();
272 rmfree(dvmamap, btoc(len), btoc(kva));
273 wakeup(dvmamap);
274 splx(s);
275 }
276