sgmap.c revision 1.4 1 1.4 matt /* $NetBSD: sgmap.c,v 1.4 2000/03/07 00:04:13 matt Exp $ */
2 1.1 ragge
3 1.1 ragge /*-
4 1.1 ragge * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc.
5 1.1 ragge * All rights reserved.
6 1.1 ragge *
7 1.1 ragge * This code is derived from software contributed to The NetBSD Foundation
8 1.1 ragge * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 1.1 ragge * NASA Ames Research Center.
10 1.1 ragge *
11 1.1 ragge * Redistribution and use in source and binary forms, with or without
12 1.1 ragge * modification, are permitted provided that the following conditions
13 1.1 ragge * are met:
14 1.1 ragge * 1. Redistributions of source code must retain the above copyright
15 1.1 ragge * notice, this list of conditions and the following disclaimer.
16 1.1 ragge * 2. Redistributions in binary form must reproduce the above copyright
17 1.1 ragge * notice, this list of conditions and the following disclaimer in the
18 1.1 ragge * documentation and/or other materials provided with the distribution.
19 1.1 ragge * 3. All advertising materials mentioning features or use of this software
20 1.1 ragge * must display the following acknowledgement:
21 1.1 ragge * This product includes software developed by the NetBSD
22 1.1 ragge * Foundation, Inc. and its contributors.
23 1.1 ragge * 4. Neither the name of The NetBSD Foundation nor the names of its
24 1.1 ragge * contributors may be used to endorse or promote products derived
25 1.1 ragge * from this software without specific prior written permission.
26 1.1 ragge *
27 1.1 ragge * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 1.1 ragge * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 1.1 ragge * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 1.1 ragge * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 1.1 ragge * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 1.1 ragge * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 1.1 ragge * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 1.1 ragge * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 1.1 ragge * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 1.1 ragge * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 1.1 ragge * POSSIBILITY OF SUCH DAMAGE.
38 1.1 ragge */
39 1.1 ragge
40 1.1 ragge #include <sys/param.h>
41 1.1 ragge #include <sys/systm.h>
42 1.1 ragge #include <sys/kernel.h>
43 1.1 ragge #include <sys/proc.h>
44 1.1 ragge #include <sys/malloc.h>
45 1.1 ragge
46 1.1 ragge #include <vm/vm.h>
47 1.1 ragge
48 1.1 ragge #include <machine/bus.h>
49 1.1 ragge #include <machine/sgmap.h>
50 1.1 ragge
51 1.1 ragge void
52 1.1 ragge vax_sgmap_init(t, sgmap, name, sgvabase, sgvasize, ptva, minptalign)
53 1.1 ragge bus_dma_tag_t t;
54 1.1 ragge struct vax_sgmap *sgmap;
55 1.1 ragge const char *name;
56 1.1 ragge bus_addr_t sgvabase;
57 1.1 ragge bus_size_t sgvasize;
58 1.1 ragge struct pte *ptva;
59 1.1 ragge bus_size_t minptalign;
60 1.1 ragge {
61 1.1 ragge bus_dma_segment_t seg;
62 1.1 ragge size_t ptsize;
63 1.1 ragge int rseg;
64 1.1 ragge
65 1.1 ragge if (sgvasize & PGOFSET) {
66 1.1 ragge printf("size botch for sgmap `%s'\n", name);
67 1.1 ragge goto die;
68 1.1 ragge }
69 1.1 ragge
70 1.1 ragge sgmap->aps_sgvabase = sgvabase;
71 1.1 ragge sgmap->aps_sgvasize = sgvasize;
72 1.1 ragge
73 1.1 ragge if (ptva != NULL) {
74 1.1 ragge /*
75 1.1 ragge * We already have a page table; this may be a system
76 1.1 ragge * where the page table resides in bridge-resident SRAM.
77 1.1 ragge */
78 1.1 ragge sgmap->aps_pt = ptva;
79 1.1 ragge } else {
80 1.1 ragge /*
81 1.1 ragge * Compute the page table size and allocate it. At minimum,
82 1.1 ragge * this must be aligned to the page table size. However,
83 1.1 ragge * some platforms have more strict alignment reqirements.
84 1.1 ragge */
85 1.1 ragge ptsize = (sgvasize / VAX_NBPG) * sizeof(struct pte);
86 1.1 ragge if (minptalign != 0) {
87 1.1 ragge if (minptalign < ptsize)
88 1.1 ragge minptalign = ptsize;
89 1.1 ragge } else
90 1.1 ragge minptalign = ptsize;
91 1.1 ragge if (bus_dmamem_alloc(t, ptsize, minptalign, 0, &seg, 1, &rseg,
92 1.1 ragge BUS_DMA_NOWAIT)) {
93 1.1 ragge panic("unable to allocate page table for sgmap `%s'\n",
94 1.1 ragge name);
95 1.1 ragge goto die;
96 1.1 ragge }
97 1.1 ragge sgmap->aps_pt = (struct pte *)(seg.ds_addr | KERNBASE);
98 1.1 ragge }
99 1.1 ragge
100 1.1 ragge /*
101 1.1 ragge * Create the extent map used to manage the virtual address
102 1.1 ragge * space.
103 1.1 ragge */
104 1.1 ragge sgmap->aps_ex = extent_create((char *)name, sgvabase, sgvasize - 1,
105 1.1 ragge M_DMAMAP, NULL, 0, EX_NOWAIT|EX_NOCOALESCE);
106 1.1 ragge if (sgmap->aps_ex == NULL) {
107 1.1 ragge printf("unable to create extent map for sgmap `%s'\n", name);
108 1.1 ragge goto die;
109 1.1 ragge }
110 1.1 ragge
111 1.1 ragge return;
112 1.1 ragge die:
113 1.1 ragge panic("vax_sgmap_init");
114 1.1 ragge }
115 1.1 ragge
116 1.1 ragge int
117 1.1 ragge vax_sgmap_alloc(map, origlen, sgmap, flags)
118 1.1 ragge bus_dmamap_t map;
119 1.1 ragge bus_size_t origlen;
120 1.1 ragge struct vax_sgmap *sgmap;
121 1.1 ragge int flags;
122 1.1 ragge {
123 1.1 ragge int error;
124 1.1 ragge bus_size_t len = origlen;
125 1.1 ragge
126 1.1 ragge #ifdef DIAGNOSTIC
127 1.1 ragge if (map->_dm_flags & DMAMAP_HAS_SGMAP)
128 1.1 ragge panic("vax_sgmap_alloc: already have sgva space");
129 1.1 ragge #endif
130 1.1 ragge
131 1.2 ragge map->_dm_sgvalen = vax_round_page(len);
132 1.1 ragge
133 1.1 ragge #if 0
134 1.1 ragge printf("len %x -> %x, _dm_sgvalen %x _dm_boundary %x boundary %x -> ",
135 1.1 ragge origlen, len, map->_dm_sgvalen, map->_dm_boundary, boundary);
136 1.1 ragge #endif
137 1.1 ragge
138 1.1 ragge error = extent_alloc(sgmap->aps_ex, map->_dm_sgvalen, VAX_NBPG,
139 1.1 ragge 0, (flags & BUS_DMA_NOWAIT) ? EX_NOWAIT : EX_WAITOK,
140 1.1 ragge &map->_dm_sgva);
141 1.1 ragge #if 0
142 1.1 ragge printf("error %d _dm_sgva %x\n", error, map->_dm_sgva);
143 1.1 ragge #endif
144 1.1 ragge
145 1.1 ragge if (error == 0)
146 1.1 ragge map->_dm_flags |= DMAMAP_HAS_SGMAP;
147 1.1 ragge else
148 1.1 ragge map->_dm_flags &= ~DMAMAP_HAS_SGMAP;
149 1.1 ragge
150 1.1 ragge return (error);
151 1.1 ragge }
152 1.1 ragge
153 1.1 ragge void
154 1.1 ragge vax_sgmap_free(map, sgmap)
155 1.1 ragge bus_dmamap_t map;
156 1.1 ragge struct vax_sgmap *sgmap;
157 1.1 ragge {
158 1.1 ragge
159 1.1 ragge #ifdef DIAGNOSTIC
160 1.1 ragge if ((map->_dm_flags & DMAMAP_HAS_SGMAP) == 0)
161 1.1 ragge panic("vax_sgmap_free: no sgva space to free");
162 1.1 ragge #endif
163 1.1 ragge
164 1.1 ragge if (extent_free(sgmap->aps_ex, map->_dm_sgva, map->_dm_sgvalen,
165 1.1 ragge EX_NOWAIT))
166 1.1 ragge panic("vax_sgmap_free");
167 1.1 ragge
168 1.1 ragge map->_dm_flags &= ~DMAMAP_HAS_SGMAP;
169 1.1 ragge }
170 1.1 ragge
171 1.1 ragge int
172 1.1 ragge vax_sgmap_load(t, map, buf, buflen, p, flags, sgmap)
173 1.1 ragge bus_dma_tag_t t;
174 1.1 ragge bus_dmamap_t map;
175 1.1 ragge void *buf;
176 1.1 ragge bus_size_t buflen;
177 1.1 ragge struct proc *p;
178 1.1 ragge int flags;
179 1.1 ragge struct vax_sgmap *sgmap;
180 1.1 ragge {
181 1.1 ragge vaddr_t endva, va = (vaddr_t)buf;
182 1.1 ragge paddr_t pa;
183 1.1 ragge bus_addr_t dmaoffset;
184 1.1 ragge bus_size_t dmalen;
185 1.1 ragge long *pte, *page_table = (long *)sgmap->aps_pt;
186 1.1 ragge int pteidx, error;
187 1.1 ragge
188 1.1 ragge /*
189 1.1 ragge * Make sure that on error condition we return "no valid mappings".
190 1.1 ragge */
191 1.1 ragge map->dm_mapsize = 0;
192 1.1 ragge map->dm_nsegs = 0;
193 1.1 ragge
194 1.1 ragge if (buflen > map->_dm_size)
195 1.1 ragge return (EINVAL);
196 1.1 ragge
197 1.1 ragge /*
198 1.1 ragge * Remember the offset into the first page and the total
199 1.1 ragge * transfer length.
200 1.1 ragge */
201 1.1 ragge dmaoffset = ((u_long)buf) & VAX_PGOFSET;
202 1.1 ragge dmalen = buflen;
203 1.1 ragge
204 1.1 ragge
205 1.1 ragge /*
206 1.1 ragge * Allocate the necessary virtual address space for the
207 1.1 ragge * mapping. Round the size, since we deal with whole pages.
208 1.1 ragge */
209 1.2 ragge endva = vax_round_page(va + buflen);
210 1.2 ragge va = vax_trunc_page(va);
211 1.1 ragge if ((map->_dm_flags & DMAMAP_HAS_SGMAP) == 0) {
212 1.1 ragge error = vax_sgmap_alloc(map, (endva - va), sgmap, flags);
213 1.1 ragge if (error)
214 1.1 ragge return (error);
215 1.1 ragge }
216 1.1 ragge
217 1.1 ragge pteidx = map->_dm_sgva >> VAX_PGSHIFT;
218 1.1 ragge pte = &page_table[pteidx];
219 1.1 ragge
220 1.1 ragge /*
221 1.1 ragge * Generate the DMA address.
222 1.1 ragge */
223 1.1 ragge map->dm_segs[0].ds_addr = map->_dm_sgva + dmaoffset;
224 1.1 ragge map->dm_segs[0].ds_len = dmalen;
225 1.1 ragge
226 1.1 ragge
227 1.1 ragge map->_dm_pteidx = pteidx;
228 1.1 ragge map->_dm_ptecnt = 0;
229 1.1 ragge
230 1.1 ragge /*
231 1.1 ragge * Create the bus-specific page tables.
232 1.1 ragge * Can be done much more efficient than this.
233 1.1 ragge */
234 1.4 matt for (; va < endva; va += VAX_NBPG, pte++, map->_dm_ptecnt++) {
235 1.1 ragge /*
236 1.1 ragge * Get the physical address for this segment.
237 1.1 ragge */
238 1.1 ragge if (p != NULL)
239 1.3 thorpej (void) pmap_extract(p->p_vmspace->vm_map.pmap, va, &pa);
240 1.1 ragge else
241 1.1 ragge pa = kvtophys(va);
242 1.1 ragge
243 1.1 ragge /*
244 1.1 ragge * Load the current PTE with this page.
245 1.1 ragge */
246 1.1 ragge *pte = (pa >> VAX_PGSHIFT) | PG_V;
247 1.1 ragge }
248 1.1 ragge
249 1.1 ragge map->dm_mapsize = buflen;
250 1.1 ragge map->dm_nsegs = 1;
251 1.1 ragge return (0);
252 1.1 ragge }
253 1.1 ragge
254 1.1 ragge int
255 1.1 ragge vax_sgmap_load_mbuf(t, map, m, flags, sgmap)
256 1.1 ragge bus_dma_tag_t t;
257 1.1 ragge bus_dmamap_t map;
258 1.1 ragge struct mbuf *m;
259 1.1 ragge int flags;
260 1.1 ragge struct vax_sgmap *sgmap;
261 1.1 ragge {
262 1.1 ragge
263 1.1 ragge panic("vax_sgmap_load_mbuf : not implemented");
264 1.1 ragge }
265 1.1 ragge
266 1.1 ragge int
267 1.1 ragge vax_sgmap_load_uio(t, map, uio, flags, sgmap)
268 1.1 ragge bus_dma_tag_t t;
269 1.1 ragge bus_dmamap_t map;
270 1.1 ragge struct uio *uio;
271 1.1 ragge int flags;
272 1.1 ragge struct vax_sgmap *sgmap;
273 1.1 ragge {
274 1.1 ragge
275 1.1 ragge panic("vax_sgmap_load_uio : not implemented");
276 1.1 ragge }
277 1.1 ragge
278 1.1 ragge int
279 1.1 ragge vax_sgmap_load_raw(t, map, segs, nsegs, size, flags, sgmap)
280 1.1 ragge bus_dma_tag_t t;
281 1.1 ragge bus_dmamap_t map;
282 1.1 ragge bus_dma_segment_t *segs;
283 1.1 ragge int nsegs;
284 1.1 ragge bus_size_t size;
285 1.1 ragge int flags;
286 1.1 ragge struct vax_sgmap *sgmap;
287 1.1 ragge {
288 1.1 ragge
289 1.1 ragge panic("vax_sgmap_load_raw : not implemented");
290 1.1 ragge }
291 1.1 ragge
292 1.1 ragge void
293 1.1 ragge vax_sgmap_unload(t, map, sgmap)
294 1.1 ragge bus_dma_tag_t t;
295 1.1 ragge bus_dmamap_t map;
296 1.1 ragge struct vax_sgmap *sgmap;
297 1.1 ragge {
298 1.1 ragge long *pte, *page_table = (long *)sgmap->aps_pt;
299 1.4 matt int ptecnt;
300 1.1 ragge
301 1.1 ragge /*
302 1.1 ragge * Invalidate the PTEs for the mapping.
303 1.1 ragge */
304 1.4 matt for (ptecnt = map->_dm_ptecnt, pte = &page_table[map->_dm_pteidx];
305 1.4 matt ptecnt-- != 0; ) {
306 1.4 matt *pte++ = 0;
307 1.1 ragge }
308 1.1 ragge
309 1.1 ragge /*
310 1.1 ragge * Free the virtual address space used by the mapping
311 1.1 ragge * if necessary.
312 1.1 ragge */
313 1.1 ragge if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
314 1.1 ragge vax_sgmap_free(map, sgmap);
315 1.1 ragge /*
316 1.1 ragge * Mark the mapping invalid.
317 1.1 ragge */
318 1.1 ragge map->dm_mapsize = 0;
319 1.1 ragge map->dm_nsegs = 0;
320 1.1 ragge }
321