sgmap.c revision 1.16 1 /* $NetBSD: sgmap.c,v 1.16 2010/12/14 23:44:50 matt Exp $ */
2
3 /*-
4 * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: sgmap.c,v 1.16 2010/12/14 23:44:50 matt Exp $");
35
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/bus.h>
39 #include <sys/kernel.h>
40 #include <sys/proc.h>
41 #include <sys/malloc.h>
42
43 #include <uvm/uvm_extern.h>
44
45 #include <machine/sgmap.h>
46
47 void
48 vax_sgmap_init(bus_dma_tag_t t, struct vax_sgmap *sgmap, const char *name,
49 bus_addr_t sgvabase, bus_size_t sgvasize, struct pte *ptva,
50 bus_size_t minptalign)
51 {
52 bus_dma_segment_t seg;
53 size_t ptsize;
54 int rseg;
55
56 if (sgvasize & PGOFSET) {
57 printf("size botch for sgmap `%s'\n", name);
58 goto die;
59 }
60
61 sgmap->aps_sgvabase = sgvabase;
62 sgmap->aps_sgvasize = sgvasize;
63
64 if (ptva != NULL) {
65 /*
66 * We already have a page table; this may be a system
67 * where the page table resides in bridge-resident SRAM.
68 */
69 sgmap->aps_pt = ptva;
70 } else {
71 /*
72 * Compute the page table size and allocate it. At minimum,
73 * this must be aligned to the page table size. However,
74 * some platforms have more strict alignment reqirements.
75 */
76 ptsize = (sgvasize / VAX_NBPG) * sizeof(struct pte);
77 if (minptalign != 0) {
78 if (minptalign < ptsize)
79 minptalign = ptsize;
80 } else
81 minptalign = ptsize;
82 if (bus_dmamem_alloc(t, ptsize, minptalign, 0, &seg, 1, &rseg,
83 BUS_DMA_NOWAIT)) {
84 panic("unable to allocate page table for sgmap `%s'",
85 name);
86 goto die;
87 }
88 sgmap->aps_pt = (struct pte *)(seg.ds_addr | KERNBASE);
89 }
90
91 /*
92 * Create the extent map used to manage the virtual address
93 * space.
94 */
95 sgmap->aps_ex = extent_create(name, sgvabase, sgvasize - 1,
96 M_DMAMAP, NULL, 0, EX_NOWAIT|EX_NOCOALESCE);
97 if (sgmap->aps_ex == NULL) {
98 printf("unable to create extent map for sgmap `%s'\n", name);
99 goto die;
100 }
101
102 return;
103 die:
104 panic("vax_sgmap_init");
105 }
106
107 int
108 vax_sgmap_alloc(bus_dmamap_t map, bus_size_t origlen, struct vax_sgmap *sgmap,
109 int flags)
110 {
111 int error;
112 bus_size_t len = origlen;
113
114 #ifdef DIAGNOSTIC
115 if (map->_dm_flags & DMAMAP_HAS_SGMAP)
116 panic("vax_sgmap_alloc: already have sgva space");
117 #endif
118
119 /* If we need a spill page (for the VS4000 SCSI), make sure we
120 * allocate enough space for an extra page.
121 */
122 if (flags & VAX_BUS_DMA_SPILLPAGE) {
123 len += VAX_NBPG;
124 }
125
126 map->_dm_sgvalen = vax_round_page(len);
127 #if 0
128 printf("len %x -> %x, _dm_sgvalen %x _dm_boundary %x boundary %x -> ",
129 origlen, len, map->_dm_sgvalen, map->_dm_boundary, boundary);
130 #endif
131
132 error = extent_alloc(sgmap->aps_ex, map->_dm_sgvalen, VAX_NBPG,
133 0, (flags & BUS_DMA_NOWAIT) ? EX_NOWAIT : EX_WAITOK,
134 &map->_dm_sgva);
135 #if 0
136 printf("error %d _dm_sgva %x\n", error, map->_dm_sgva);
137 #endif
138
139 if (error == 0)
140 map->_dm_flags |= DMAMAP_HAS_SGMAP;
141 else
142 map->_dm_flags &= ~DMAMAP_HAS_SGMAP;
143
144 return (error);
145 }
146
147 void
148 vax_sgmap_free(bus_dmamap_t map, struct vax_sgmap *sgmap)
149 {
150
151 #ifdef DIAGNOSTIC
152 if ((map->_dm_flags & DMAMAP_HAS_SGMAP) == 0)
153 panic("vax_sgmap_free: no sgva space to free");
154 #endif
155
156 if (extent_free(sgmap->aps_ex, map->_dm_sgva, map->_dm_sgvalen,
157 EX_NOWAIT))
158 panic("vax_sgmap_free");
159
160 map->_dm_flags &= ~DMAMAP_HAS_SGMAP;
161 }
162
163 int
164 vax_sgmap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf, bus_size_t buflen,
165 struct proc *p, int flags, struct vax_sgmap *sgmap)
166 {
167 vaddr_t endva, va = (vaddr_t)buf;
168 paddr_t pa;
169 bus_addr_t dmaoffset;
170 bus_size_t dmalen;
171 long *pte, *page_table = (long *)sgmap->aps_pt;
172 int pteidx, error;
173
174 /*
175 * Make sure that on error condition we return "no valid mappings".
176 */
177 map->dm_mapsize = 0;
178 map->dm_nsegs = 0;
179
180 if (buflen > map->_dm_size)
181 return (EINVAL);
182
183 /*
184 * Remember the offset into the first page and the total
185 * transfer length.
186 */
187 dmaoffset = ((u_long)buf) & VAX_PGOFSET;
188 dmalen = buflen;
189
190
191 /*
192 * Allocate the necessary virtual address space for the
193 * mapping. Round the size, since we deal with whole pages.
194 */
195 endva = vax_round_page(va + buflen);
196 va = vax_trunc_page(va);
197 if ((map->_dm_flags & DMAMAP_HAS_SGMAP) == 0) {
198 error = vax_sgmap_alloc(map, (endva - va), sgmap, flags);
199 if (error)
200 return (error);
201 }
202
203 pteidx = map->_dm_sgva >> VAX_PGSHIFT;
204 pte = &page_table[pteidx];
205
206 /*
207 * Generate the DMA address.
208 */
209 map->dm_segs[0].ds_addr = map->_dm_sgva + dmaoffset;
210 map->dm_segs[0].ds_len = dmalen;
211
212
213 map->_dm_pteidx = pteidx;
214 map->_dm_ptecnt = 0;
215
216 /*
217 * Create the bus-specific page tables.
218 * Can be done much more efficient than this.
219 */
220 for (; va < endva; va += VAX_NBPG, pte++, map->_dm_ptecnt++) {
221 /*
222 * Get the physical address for this segment.
223 */
224 if (p != NULL)
225 (void) pmap_extract(p->p_vmspace->vm_map.pmap, va, &pa);
226 else
227 pa = kvtophys(va);
228
229 /*
230 * Load the current PTE with this page.
231 */
232 *pte = (pa >> VAX_PGSHIFT) | PG_V;
233 }
234 /* The VS4000 SCSI prefetcher doesn't like to end on a page boundary
235 * so add an extra page to quiet it down.
236 */
237 if (flags & VAX_BUS_DMA_SPILLPAGE) {
238 *pte = pte[-1];
239 map->_dm_ptecnt++;
240 }
241
242 map->dm_mapsize = buflen;
243 map->dm_nsegs = 1;
244 return (0);
245 }
246
247 int
248 vax_sgmap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m,
249 int flags, struct vax_sgmap *sgmap)
250 {
251
252 panic("vax_sgmap_load_mbuf : not implemented");
253 }
254
255 int
256 vax_sgmap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio,
257 int flags, struct vax_sgmap *sgmap)
258 {
259
260 panic("vax_sgmap_load_uio : not implemented");
261 }
262
263 int
264 vax_sgmap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, bus_dma_segment_t *segs,
265 int nsegs, bus_size_t size, int flags, struct vax_sgmap *sgmap)
266 {
267
268 panic("vax_sgmap_load_raw : not implemented");
269 }
270
271 void
272 vax_sgmap_unload(bus_dma_tag_t t, bus_dmamap_t map, struct vax_sgmap *sgmap)
273 {
274 long *pte, *page_table = (long *)sgmap->aps_pt;
275 int ptecnt;
276
277 /*
278 * Invalidate the PTEs for the mapping.
279 */
280 for (ptecnt = map->_dm_ptecnt, pte = &page_table[map->_dm_pteidx];
281 ptecnt-- != 0; ) {
282 *pte++ = 0;
283 }
284
285 /*
286 * Free the virtual address space used by the mapping
287 * if necessary.
288 */
289 if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
290 vax_sgmap_free(map, sgmap);
291 /*
292 * Mark the mapping invalid.
293 */
294 map->dm_mapsize = 0;
295 map->dm_nsegs = 0;
296 }
297