sgmap.c revision 1.14 1 /* $NetBSD: sgmap.c,v 1.14 2008/03/11 05:34:03 matt Exp $ */
2
3 /*-
4 * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 #include <sys/cdefs.h>
41 __KERNEL_RCSID(0, "$NetBSD: sgmap.c,v 1.14 2008/03/11 05:34:03 matt Exp $");
42
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/proc.h>
47 #include <sys/malloc.h>
48
49 #include <uvm/uvm_extern.h>
50
51 #include <machine/bus.h>
52 #include <machine/sgmap.h>
53
54 void
55 vax_sgmap_init(bus_dma_tag_t t, struct vax_sgmap *sgmap, const char *name,
56 bus_addr_t sgvabase, bus_size_t sgvasize, struct pte *ptva,
57 bus_size_t minptalign)
58 {
59 bus_dma_segment_t seg;
60 size_t ptsize;
61 int rseg;
62
63 if (sgvasize & PGOFSET) {
64 printf("size botch for sgmap `%s'\n", name);
65 goto die;
66 }
67
68 sgmap->aps_sgvabase = sgvabase;
69 sgmap->aps_sgvasize = sgvasize;
70
71 if (ptva != NULL) {
72 /*
73 * We already have a page table; this may be a system
74 * where the page table resides in bridge-resident SRAM.
75 */
76 sgmap->aps_pt = ptva;
77 } else {
78 /*
79 * Compute the page table size and allocate it. At minimum,
80 * this must be aligned to the page table size. However,
81 * some platforms have more strict alignment reqirements.
82 */
83 ptsize = (sgvasize / VAX_NBPG) * sizeof(struct pte);
84 if (minptalign != 0) {
85 if (minptalign < ptsize)
86 minptalign = ptsize;
87 } else
88 minptalign = ptsize;
89 if (bus_dmamem_alloc(t, ptsize, minptalign, 0, &seg, 1, &rseg,
90 BUS_DMA_NOWAIT)) {
91 panic("unable to allocate page table for sgmap `%s'",
92 name);
93 goto die;
94 }
95 sgmap->aps_pt = (struct pte *)(seg.ds_addr | KERNBASE);
96 }
97
98 /*
99 * Create the extent map used to manage the virtual address
100 * space.
101 */
102 sgmap->aps_ex = extent_create(name, sgvabase, sgvasize - 1,
103 M_DMAMAP, NULL, 0, EX_NOWAIT|EX_NOCOALESCE);
104 if (sgmap->aps_ex == NULL) {
105 printf("unable to create extent map for sgmap `%s'\n", name);
106 goto die;
107 }
108
109 return;
110 die:
111 panic("vax_sgmap_init");
112 }
113
114 int
115 vax_sgmap_alloc(bus_dmamap_t map, bus_size_t origlen, struct vax_sgmap *sgmap,
116 int flags)
117 {
118 int error;
119 bus_size_t len = origlen;
120
121 #ifdef DIAGNOSTIC
122 if (map->_dm_flags & DMAMAP_HAS_SGMAP)
123 panic("vax_sgmap_alloc: already have sgva space");
124 #endif
125
126 /* If we need a spill page (for the VS4000 SCSI), make sure we
127 * allocate enough space for an extra page.
128 */
129 if (flags & VAX_BUS_DMA_SPILLPAGE) {
130 len += VAX_NBPG;
131 }
132
133 map->_dm_sgvalen = vax_round_page(len);
134 #if 0
135 printf("len %x -> %x, _dm_sgvalen %x _dm_boundary %x boundary %x -> ",
136 origlen, len, map->_dm_sgvalen, map->_dm_boundary, boundary);
137 #endif
138
139 error = extent_alloc(sgmap->aps_ex, map->_dm_sgvalen, VAX_NBPG,
140 0, (flags & BUS_DMA_NOWAIT) ? EX_NOWAIT : EX_WAITOK,
141 &map->_dm_sgva);
142 #if 0
143 printf("error %d _dm_sgva %x\n", error, map->_dm_sgva);
144 #endif
145
146 if (error == 0)
147 map->_dm_flags |= DMAMAP_HAS_SGMAP;
148 else
149 map->_dm_flags &= ~DMAMAP_HAS_SGMAP;
150
151 return (error);
152 }
153
154 void
155 vax_sgmap_free(bus_dmamap_t map, struct vax_sgmap *sgmap)
156 {
157
158 #ifdef DIAGNOSTIC
159 if ((map->_dm_flags & DMAMAP_HAS_SGMAP) == 0)
160 panic("vax_sgmap_free: no sgva space to free");
161 #endif
162
163 if (extent_free(sgmap->aps_ex, map->_dm_sgva, map->_dm_sgvalen,
164 EX_NOWAIT))
165 panic("vax_sgmap_free");
166
167 map->_dm_flags &= ~DMAMAP_HAS_SGMAP;
168 }
169
170 int
171 vax_sgmap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf, bus_size_t buflen,
172 struct proc *p, int flags, struct vax_sgmap *sgmap)
173 {
174 vaddr_t endva, va = (vaddr_t)buf;
175 paddr_t pa;
176 bus_addr_t dmaoffset;
177 bus_size_t dmalen;
178 long *pte, *page_table = (long *)sgmap->aps_pt;
179 int pteidx, error;
180
181 /*
182 * Make sure that on error condition we return "no valid mappings".
183 */
184 map->dm_mapsize = 0;
185 map->dm_nsegs = 0;
186
187 if (buflen > map->_dm_size)
188 return (EINVAL);
189
190 /*
191 * Remember the offset into the first page and the total
192 * transfer length.
193 */
194 dmaoffset = ((u_long)buf) & VAX_PGOFSET;
195 dmalen = buflen;
196
197
198 /*
199 * Allocate the necessary virtual address space for the
200 * mapping. Round the size, since we deal with whole pages.
201 */
202 endva = vax_round_page(va + buflen);
203 va = vax_trunc_page(va);
204 if ((map->_dm_flags & DMAMAP_HAS_SGMAP) == 0) {
205 error = vax_sgmap_alloc(map, (endva - va), sgmap, flags);
206 if (error)
207 return (error);
208 }
209
210 pteidx = map->_dm_sgva >> VAX_PGSHIFT;
211 pte = &page_table[pteidx];
212
213 /*
214 * Generate the DMA address.
215 */
216 map->dm_segs[0].ds_addr = map->_dm_sgva + dmaoffset;
217 map->dm_segs[0].ds_len = dmalen;
218
219
220 map->_dm_pteidx = pteidx;
221 map->_dm_ptecnt = 0;
222
223 /*
224 * Create the bus-specific page tables.
225 * Can be done much more efficient than this.
226 */
227 for (; va < endva; va += VAX_NBPG, pte++, map->_dm_ptecnt++) {
228 /*
229 * Get the physical address for this segment.
230 */
231 if (p != NULL)
232 (void) pmap_extract(p->p_vmspace->vm_map.pmap, va, &pa);
233 else
234 pa = kvtophys(va);
235
236 /*
237 * Load the current PTE with this page.
238 */
239 *pte = (pa >> VAX_PGSHIFT) | PG_V;
240 }
241 /* The VS4000 SCSI prefetcher doesn't like to end on a page boundary
242 * so add an extra page to quiet it down.
243 */
244 if (flags & VAX_BUS_DMA_SPILLPAGE) {
245 *pte = pte[-1];
246 map->_dm_ptecnt++;
247 }
248
249 map->dm_mapsize = buflen;
250 map->dm_nsegs = 1;
251 return (0);
252 }
253
254 int
255 vax_sgmap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m,
256 int flags, struct vax_sgmap *sgmap)
257 {
258
259 panic("vax_sgmap_load_mbuf : not implemented");
260 }
261
262 int
263 vax_sgmap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio,
264 int flags, struct vax_sgmap *sgmap)
265 {
266
267 panic("vax_sgmap_load_uio : not implemented");
268 }
269
270 int
271 vax_sgmap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, bus_dma_segment_t *segs,
272 int nsegs, bus_size_t size, int flags, struct vax_sgmap *sgmap)
273 {
274
275 panic("vax_sgmap_load_raw : not implemented");
276 }
277
278 void
279 vax_sgmap_unload(bus_dma_tag_t t, bus_dmamap_t map, struct vax_sgmap *sgmap)
280 {
281 long *pte, *page_table = (long *)sgmap->aps_pt;
282 int ptecnt;
283
284 /*
285 * Invalidate the PTEs for the mapping.
286 */
287 for (ptecnt = map->_dm_ptecnt, pte = &page_table[map->_dm_pteidx];
288 ptecnt-- != 0; ) {
289 *pte++ = 0;
290 }
291
292 /*
293 * Free the virtual address space used by the mapping
294 * if necessary.
295 */
296 if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
297 vax_sgmap_free(map, sgmap);
298 /*
299 * Mark the mapping invalid.
300 */
301 map->dm_mapsize = 0;
302 map->dm_nsegs = 0;
303 }
304