sgmap.c revision 1.18 1 /* $NetBSD: sgmap.c,v 1.18 2015/07/05 02:03:36 matt Exp $ */
2
3 /*-
4 * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: sgmap.c,v 1.18 2015/07/05 02:03:36 matt Exp $");
35
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/bus.h>
39 #include <sys/kernel.h>
40 #include <sys/proc.h>
41 #include <sys/malloc.h>
42
43 #include <uvm/uvm_extern.h>
44
45 #include <machine/sgmap.h>
46
47 void
48 vax_sgmap_init(bus_dma_tag_t t, struct vax_sgmap *sgmap, const char *name,
49 bus_addr_t sgvabase, bus_size_t sgvasize, struct pte *ptva,
50 bus_size_t minptalign)
51 {
52 bus_dma_segment_t seg;
53 size_t ptsize;
54 int rseg;
55
56 if (sgvasize & PGOFSET) {
57 printf("size botch for sgmap `%s'\n", name);
58 goto die;
59 }
60
61 sgmap->aps_sgvabase = sgvabase;
62 sgmap->aps_sgvasize = sgvasize;
63
64 if (ptva != NULL) {
65 /*
66 * We already have a page table; this may be a system
67 * where the page table resides in bridge-resident SRAM.
68 */
69 sgmap->aps_pt = ptva;
70 } else {
71 /*
72 * Compute the page table size and allocate it. At minimum,
73 * this must be aligned to the page table size. However,
74 * some platforms have more strict alignment reqirements.
75 */
76 ptsize = (sgvasize / VAX_NBPG) * sizeof(struct pte);
77 if (minptalign != 0) {
78 if (minptalign < ptsize)
79 minptalign = ptsize;
80 } else
81 minptalign = ptsize;
82 if (bus_dmamem_alloc(t, ptsize, minptalign, 0, &seg, 1, &rseg,
83 BUS_DMA_NOWAIT)) {
84 panic("unable to allocate page table for sgmap `%s'",
85 name);
86 goto die;
87 }
88 sgmap->aps_pt = (struct pte *)(seg.ds_addr | KERNBASE);
89 }
90
91 /*
92 * Create the extent map used to manage the virtual address
93 * space.
94 */
95 sgmap->aps_ex = extent_create(name, sgvabase, sgvasize - 1,
96 NULL, 0, EX_NOWAIT|EX_NOCOALESCE);
97 if (sgmap->aps_ex == NULL) {
98 printf("unable to create extent map for sgmap `%s'\n", name);
99 goto die;
100 }
101
102 return;
103 die:
104 panic("vax_sgmap_init");
105 }
106
107 int
108 vax_sgmap_alloc(bus_dmamap_t map, bus_size_t origlen, struct vax_sgmap *sgmap,
109 int flags)
110 {
111 int error;
112 bus_size_t len = origlen;
113
114 #ifdef DIAGNOSTIC
115 if (map->_dm_flags & DMAMAP_HAS_SGMAP)
116 panic("vax_sgmap_alloc: already have sgva space");
117 #endif
118
119 /* If we need a spill page (for the VS4000 SCSI), make sure we
120 * allocate enough space for an extra page.
121 */
122 if (flags & VAX_BUS_DMA_SPILLPAGE) {
123 len += VAX_NBPG;
124 }
125
126 map->_dm_sgvalen = vax_round_page(len);
127 #define DEBUG_SGMAP 0
128 #if DEBUG_SGMAP
129 printf("len %x -> %x, _dm_sgvalen %x _dm_boundary %x boundary %x -> ",
130 //origlen, len, map->_dm_sgvalen, map->_dm_boundary, boundary);
131 (unsigned int)origlen, (unsigned int)len, (unsigned int)map->_dm_sgvalen, (unsigned int)map->_dm_boundary, 1);
132 #endif
133
134 error = extent_alloc(sgmap->aps_ex, map->_dm_sgvalen, VAX_NBPG,
135 0, (flags & BUS_DMA_NOWAIT) ? EX_NOWAIT : EX_WAITOK,
136 &map->_dm_sgva);
137 #if DEBUG_SGMAP
138 printf("error %d _dm_sgva %lx\n", error, map->_dm_sgva);
139 #endif
140
141 if (error == 0)
142 map->_dm_flags |= DMAMAP_HAS_SGMAP;
143 else
144 map->_dm_flags &= ~DMAMAP_HAS_SGMAP;
145
146 return (error);
147 }
148
149 void
150 vax_sgmap_free(bus_dmamap_t map, struct vax_sgmap *sgmap)
151 {
152
153 #ifdef DIAGNOSTIC
154 if ((map->_dm_flags & DMAMAP_HAS_SGMAP) == 0)
155 panic("vax_sgmap_free: no sgva space to free");
156 #endif
157
158 if (extent_free(sgmap->aps_ex, map->_dm_sgva, map->_dm_sgvalen,
159 EX_NOWAIT))
160 panic("vax_sgmap_free");
161
162 map->_dm_flags &= ~DMAMAP_HAS_SGMAP;
163 }
164
165 int
166 vax_sgmap_reserve(bus_addr_t ba, bus_size_t len, struct vax_sgmap *sgmap)
167 {
168 return extent_alloc_region(sgmap->aps_ex, ba, len, EX_NOWAIT);
169 }
170
171 int
172 vax_sgmap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf, bus_size_t buflen,
173 struct proc *p, int flags, struct vax_sgmap *sgmap)
174 {
175 vaddr_t endva, va = (vaddr_t)buf;
176 paddr_t pa;
177 bus_addr_t dmaoffset;
178 bus_size_t dmalen;
179 long *pte, *page_table = (long *)sgmap->aps_pt;
180 int pteidx, error;
181
182 /*
183 * Make sure that on error condition we return "no valid mappings".
184 */
185 map->dm_mapsize = 0;
186 map->dm_nsegs = 0;
187
188 if (buflen > map->_dm_size)
189 return (EINVAL);
190
191 /*
192 * Remember the offset into the first page and the total
193 * transfer length.
194 */
195 dmaoffset = ((u_long)buf) & VAX_PGOFSET;
196 dmalen = buflen;
197
198
199 /*
200 * Allocate the necessary virtual address space for the
201 * mapping. Round the size, since we deal with whole pages.
202 */
203 endva = vax_round_page(va + buflen);
204 va = vax_trunc_page(va);
205 if ((map->_dm_flags & DMAMAP_HAS_SGMAP) == 0) {
206 error = vax_sgmap_alloc(map, (endva - va), sgmap, flags);
207 if (error)
208 return (error);
209 }
210
211 pteidx = map->_dm_sgva >> VAX_PGSHIFT;
212 pte = &page_table[pteidx];
213
214 /*
215 * Generate the DMA address.
216 */
217 map->dm_segs[0].ds_addr = map->_dm_sgva + dmaoffset;
218 map->dm_segs[0].ds_len = dmalen;
219
220
221 map->_dm_pteidx = pteidx;
222 map->_dm_ptecnt = 0;
223
224 /*
225 * Create the bus-specific page tables.
226 * Can be done much more efficient than this.
227 */
228 for (; va < endva; va += VAX_NBPG, pte++, map->_dm_ptecnt++) {
229 /*
230 * Get the physical address for this segment.
231 */
232 if (p != NULL)
233 (void) pmap_extract(p->p_vmspace->vm_map.pmap, va, &pa);
234 else
235 pa = kvtophys(va);
236
237 /*
238 * Load the current PTE with this page.
239 */
240 *pte = (pa >> VAX_PGSHIFT) | PG_V;
241 }
242 /* The VS4000 SCSI prefetcher doesn't like to end on a page boundary
243 * so add an extra page to quiet it down.
244 */
245 if (flags & VAX_BUS_DMA_SPILLPAGE) {
246 *pte = pte[-1];
247 map->_dm_ptecnt++;
248 }
249
250 map->dm_mapsize = buflen;
251 map->dm_nsegs = 1;
252 return (0);
253 }
254
255 int
256 vax_sgmap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m,
257 int flags, struct vax_sgmap *sgmap)
258 {
259
260 panic("vax_sgmap_load_mbuf : not implemented");
261 }
262
263 int
264 vax_sgmap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio,
265 int flags, struct vax_sgmap *sgmap)
266 {
267
268 panic("vax_sgmap_load_uio : not implemented");
269 }
270
271 int
272 vax_sgmap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, bus_dma_segment_t *segs,
273 int nsegs, bus_size_t size, int flags, struct vax_sgmap *sgmap)
274 {
275
276 panic("vax_sgmap_load_raw : not implemented");
277 }
278
279 void
280 vax_sgmap_unload(bus_dma_tag_t t, bus_dmamap_t map, struct vax_sgmap *sgmap)
281 {
282 long *pte, *page_table = (long *)sgmap->aps_pt;
283 int ptecnt;
284
285 /*
286 * Invalidate the PTEs for the mapping.
287 */
288 for (ptecnt = map->_dm_ptecnt, pte = &page_table[map->_dm_pteidx];
289 ptecnt-- != 0; ) {
290 *pte++ = 0;
291 }
292
293 /*
294 * Free the virtual address space used by the mapping
295 * if necessary.
296 */
297 if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
298 vax_sgmap_free(map, sgmap);
299 /*
300 * Mark the mapping invalid.
301 */
302 map->dm_mapsize = 0;
303 map->dm_nsegs = 0;
304 }
305