sgmap.c revision 1.20 1 1.20 thorpej /* $NetBSD: sgmap.c,v 1.20 2023/12/03 00:49:46 thorpej Exp $ */
2 1.1 ragge
3 1.1 ragge /*-
4 1.1 ragge * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc.
5 1.1 ragge * All rights reserved.
6 1.1 ragge *
7 1.1 ragge * This code is derived from software contributed to The NetBSD Foundation
8 1.1 ragge * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 1.1 ragge * NASA Ames Research Center.
10 1.1 ragge *
11 1.1 ragge * Redistribution and use in source and binary forms, with or without
12 1.1 ragge * modification, are permitted provided that the following conditions
13 1.1 ragge * are met:
14 1.1 ragge * 1. Redistributions of source code must retain the above copyright
15 1.1 ragge * notice, this list of conditions and the following disclaimer.
16 1.1 ragge * 2. Redistributions in binary form must reproduce the above copyright
17 1.1 ragge * notice, this list of conditions and the following disclaimer in the
18 1.1 ragge * documentation and/or other materials provided with the distribution.
19 1.1 ragge *
20 1.1 ragge * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 1.1 ragge * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 1.1 ragge * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 1.1 ragge * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 1.1 ragge * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 1.1 ragge * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 1.1 ragge * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 1.1 ragge * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 1.1 ragge * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 1.1 ragge * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 1.1 ragge * POSSIBILITY OF SUCH DAMAGE.
31 1.1 ragge */
32 1.11 lukem
33 1.11 lukem #include <sys/cdefs.h>
34 1.20 thorpej __KERNEL_RCSID(0, "$NetBSD: sgmap.c,v 1.20 2023/12/03 00:49:46 thorpej Exp $");
35 1.1 ragge
36 1.1 ragge #include <sys/param.h>
37 1.1 ragge #include <sys/systm.h>
38 1.16 matt #include <sys/bus.h>
39 1.1 ragge #include <sys/kernel.h>
40 1.1 ragge #include <sys/proc.h>
41 1.1 ragge #include <sys/malloc.h>
42 1.1 ragge
43 1.8 mrg #include <uvm/uvm_extern.h>
44 1.1 ragge
45 1.1 ragge #include <machine/sgmap.h>
46 1.1 ragge
47 1.1 ragge void
48 1.14 matt vax_sgmap_init(bus_dma_tag_t t, struct vax_sgmap *sgmap, const char *name,
49 1.14 matt bus_addr_t sgvabase, bus_size_t sgvasize, struct pte *ptva,
50 1.14 matt bus_size_t minptalign)
51 1.1 ragge {
52 1.1 ragge bus_dma_segment_t seg;
53 1.1 ragge size_t ptsize;
54 1.1 ragge int rseg;
55 1.1 ragge
56 1.1 ragge if (sgvasize & PGOFSET) {
57 1.1 ragge printf("size botch for sgmap `%s'\n", name);
58 1.1 ragge goto die;
59 1.1 ragge }
60 1.1 ragge
61 1.1 ragge sgmap->aps_sgvabase = sgvabase;
62 1.1 ragge sgmap->aps_sgvasize = sgvasize;
63 1.1 ragge
64 1.1 ragge if (ptva != NULL) {
65 1.1 ragge /*
66 1.1 ragge * We already have a page table; this may be a system
67 1.1 ragge * where the page table resides in bridge-resident SRAM.
68 1.1 ragge */
69 1.1 ragge sgmap->aps_pt = ptva;
70 1.1 ragge } else {
71 1.1 ragge /*
72 1.1 ragge * Compute the page table size and allocate it. At minimum,
73 1.1 ragge * this must be aligned to the page table size. However,
74 1.1 ragge * some platforms have more strict alignment reqirements.
75 1.1 ragge */
76 1.1 ragge ptsize = (sgvasize / VAX_NBPG) * sizeof(struct pte);
77 1.1 ragge if (minptalign != 0) {
78 1.1 ragge if (minptalign < ptsize)
79 1.1 ragge minptalign = ptsize;
80 1.1 ragge } else
81 1.1 ragge minptalign = ptsize;
82 1.1 ragge if (bus_dmamem_alloc(t, ptsize, minptalign, 0, &seg, 1, &rseg,
83 1.1 ragge BUS_DMA_NOWAIT)) {
84 1.10 provos panic("unable to allocate page table for sgmap `%s'",
85 1.1 ragge name);
86 1.1 ragge goto die;
87 1.1 ragge }
88 1.1 ragge sgmap->aps_pt = (struct pte *)(seg.ds_addr | KERNBASE);
89 1.1 ragge }
90 1.1 ragge
91 1.1 ragge /*
92 1.20 thorpej * Create the arena used to manage the virtual address
93 1.1 ragge * space.
94 1.1 ragge */
95 1.20 thorpej sgmap->aps_arena = vmem_create(name, sgvabase, sgvasize,
96 1.20 thorpej VAX_NBPG, /* quantum */
97 1.20 thorpej NULL, /* importfn */
98 1.20 thorpej NULL, /* releasefn */
99 1.20 thorpej NULL, /* source */
100 1.20 thorpej 0, /* qcache_max */
101 1.20 thorpej VM_SLEEP,
102 1.20 thorpej IPL_VM);
103 1.20 thorpej KASSERT(sgmap->aps_arena != NULL);
104 1.1 ragge return;
105 1.1 ragge die:
106 1.1 ragge panic("vax_sgmap_init");
107 1.1 ragge }
108 1.1 ragge
109 1.1 ragge int
110 1.14 matt vax_sgmap_alloc(bus_dmamap_t map, bus_size_t origlen, struct vax_sgmap *sgmap,
111 1.14 matt int flags)
112 1.1 ragge {
113 1.1 ragge int error;
114 1.1 ragge bus_size_t len = origlen;
115 1.1 ragge
116 1.1 ragge #ifdef DIAGNOSTIC
117 1.1 ragge if (map->_dm_flags & DMAMAP_HAS_SGMAP)
118 1.1 ragge panic("vax_sgmap_alloc: already have sgva space");
119 1.1 ragge #endif
120 1.1 ragge
121 1.5 matt /* If we need a spill page (for the VS4000 SCSI), make sure we
122 1.5 matt * allocate enough space for an extra page.
123 1.5 matt */
124 1.5 matt if (flags & VAX_BUS_DMA_SPILLPAGE) {
125 1.5 matt len += VAX_NBPG;
126 1.5 matt }
127 1.5 matt
128 1.2 ragge map->_dm_sgvalen = vax_round_page(len);
129 1.18 matt #define DEBUG_SGMAP 0
130 1.18 matt #if DEBUG_SGMAP
131 1.1 ragge printf("len %x -> %x, _dm_sgvalen %x _dm_boundary %x boundary %x -> ",
132 1.18 matt //origlen, len, map->_dm_sgvalen, map->_dm_boundary, boundary);
133 1.18 matt (unsigned int)origlen, (unsigned int)len, (unsigned int)map->_dm_sgvalen, (unsigned int)map->_dm_boundary, 1);
134 1.1 ragge #endif
135 1.1 ragge
136 1.20 thorpej const vm_flag_t vmflags = VM_BESTFIT |
137 1.20 thorpej ((flags & BUS_DMA_NOWAIT) ? VM_NOSLEEP : VM_SLEEP);
138 1.20 thorpej
139 1.20 thorpej error = vmem_xalloc(sgmap->aps_arena, map->_dm_sgvalen,
140 1.20 thorpej 0, /* alignment */
141 1.20 thorpej 0, /* phase */
142 1.20 thorpej map->_dm_boundary, /* nocross */
143 1.20 thorpej VMEM_ADDR_MIN, /* minaddr */
144 1.20 thorpej VMEM_ADDR_MAX, /* maxaddr */
145 1.20 thorpej vmflags,
146 1.20 thorpej &map->_dm_sgva);
147 1.20 thorpej
148 1.18 matt #if DEBUG_SGMAP
149 1.18 matt printf("error %d _dm_sgva %lx\n", error, map->_dm_sgva);
150 1.1 ragge #endif
151 1.1 ragge
152 1.1 ragge if (error == 0)
153 1.1 ragge map->_dm_flags |= DMAMAP_HAS_SGMAP;
154 1.1 ragge else
155 1.1 ragge map->_dm_flags &= ~DMAMAP_HAS_SGMAP;
156 1.1 ragge
157 1.1 ragge return (error);
158 1.1 ragge }
159 1.1 ragge
160 1.1 ragge void
161 1.14 matt vax_sgmap_free(bus_dmamap_t map, struct vax_sgmap *sgmap)
162 1.1 ragge {
163 1.1 ragge
164 1.1 ragge #ifdef DIAGNOSTIC
165 1.1 ragge if ((map->_dm_flags & DMAMAP_HAS_SGMAP) == 0)
166 1.1 ragge panic("vax_sgmap_free: no sgva space to free");
167 1.1 ragge #endif
168 1.1 ragge
169 1.20 thorpej vmem_xfree(sgmap->aps_arena, map->_dm_sgva, map->_dm_sgvalen);
170 1.1 ragge
171 1.1 ragge map->_dm_flags &= ~DMAMAP_HAS_SGMAP;
172 1.1 ragge }
173 1.1 ragge
174 1.1 ragge int
175 1.18 matt vax_sgmap_reserve(bus_addr_t ba, bus_size_t len, struct vax_sgmap *sgmap)
176 1.18 matt {
177 1.20 thorpej return vmem_xalloc_addr(sgmap->aps_arena, ba, len, VM_NOSLEEP);
178 1.18 matt }
179 1.18 matt
180 1.18 matt int
181 1.14 matt vax_sgmap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf, bus_size_t buflen,
182 1.14 matt struct proc *p, int flags, struct vax_sgmap *sgmap)
183 1.1 ragge {
184 1.1 ragge vaddr_t endva, va = (vaddr_t)buf;
185 1.1 ragge paddr_t pa;
186 1.1 ragge bus_addr_t dmaoffset;
187 1.1 ragge bus_size_t dmalen;
188 1.1 ragge long *pte, *page_table = (long *)sgmap->aps_pt;
189 1.1 ragge int pteidx, error;
190 1.1 ragge
191 1.1 ragge /*
192 1.1 ragge * Make sure that on error condition we return "no valid mappings".
193 1.1 ragge */
194 1.1 ragge map->dm_mapsize = 0;
195 1.1 ragge map->dm_nsegs = 0;
196 1.1 ragge
197 1.1 ragge if (buflen > map->_dm_size)
198 1.1 ragge return (EINVAL);
199 1.1 ragge
200 1.1 ragge /*
201 1.1 ragge * Remember the offset into the first page and the total
202 1.1 ragge * transfer length.
203 1.1 ragge */
204 1.1 ragge dmaoffset = ((u_long)buf) & VAX_PGOFSET;
205 1.1 ragge dmalen = buflen;
206 1.1 ragge
207 1.1 ragge
208 1.1 ragge /*
209 1.1 ragge * Allocate the necessary virtual address space for the
210 1.1 ragge * mapping. Round the size, since we deal with whole pages.
211 1.1 ragge */
212 1.2 ragge endva = vax_round_page(va + buflen);
213 1.2 ragge va = vax_trunc_page(va);
214 1.1 ragge if ((map->_dm_flags & DMAMAP_HAS_SGMAP) == 0) {
215 1.1 ragge error = vax_sgmap_alloc(map, (endva - va), sgmap, flags);
216 1.1 ragge if (error)
217 1.1 ragge return (error);
218 1.1 ragge }
219 1.1 ragge
220 1.1 ragge pteidx = map->_dm_sgva >> VAX_PGSHIFT;
221 1.1 ragge pte = &page_table[pteidx];
222 1.1 ragge
223 1.1 ragge /*
224 1.1 ragge * Generate the DMA address.
225 1.1 ragge */
226 1.1 ragge map->dm_segs[0].ds_addr = map->_dm_sgva + dmaoffset;
227 1.1 ragge map->dm_segs[0].ds_len = dmalen;
228 1.1 ragge
229 1.1 ragge
230 1.1 ragge map->_dm_pteidx = pteidx;
231 1.1 ragge map->_dm_ptecnt = 0;
232 1.1 ragge
233 1.1 ragge /*
234 1.1 ragge * Create the bus-specific page tables.
235 1.1 ragge * Can be done much more efficient than this.
236 1.1 ragge */
237 1.4 matt for (; va < endva; va += VAX_NBPG, pte++, map->_dm_ptecnt++) {
238 1.1 ragge /*
239 1.1 ragge * Get the physical address for this segment.
240 1.1 ragge */
241 1.1 ragge if (p != NULL)
242 1.3 thorpej (void) pmap_extract(p->p_vmspace->vm_map.pmap, va, &pa);
243 1.1 ragge else
244 1.1 ragge pa = kvtophys(va);
245 1.1 ragge
246 1.1 ragge /*
247 1.1 ragge * Load the current PTE with this page.
248 1.1 ragge */
249 1.9 matt *pte = (pa >> VAX_PGSHIFT) | PG_V;
250 1.5 matt }
251 1.5 matt /* The VS4000 SCSI prefetcher doesn't like to end on a page boundary
252 1.5 matt * so add an extra page to quiet it down.
253 1.5 matt */
254 1.5 matt if (flags & VAX_BUS_DMA_SPILLPAGE) {
255 1.5 matt *pte = pte[-1];
256 1.5 matt map->_dm_ptecnt++;
257 1.1 ragge }
258 1.1 ragge
259 1.1 ragge map->dm_mapsize = buflen;
260 1.1 ragge map->dm_nsegs = 1;
261 1.1 ragge return (0);
262 1.1 ragge }
263 1.1 ragge
264 1.1 ragge int
265 1.14 matt vax_sgmap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m,
266 1.14 matt int flags, struct vax_sgmap *sgmap)
267 1.1 ragge {
268 1.1 ragge
269 1.1 ragge panic("vax_sgmap_load_mbuf : not implemented");
270 1.1 ragge }
271 1.1 ragge
272 1.1 ragge int
273 1.14 matt vax_sgmap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio,
274 1.14 matt int flags, struct vax_sgmap *sgmap)
275 1.1 ragge {
276 1.1 ragge
277 1.1 ragge panic("vax_sgmap_load_uio : not implemented");
278 1.1 ragge }
279 1.1 ragge
280 1.1 ragge int
281 1.14 matt vax_sgmap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, bus_dma_segment_t *segs,
282 1.14 matt int nsegs, bus_size_t size, int flags, struct vax_sgmap *sgmap)
283 1.1 ragge {
284 1.1 ragge
285 1.1 ragge panic("vax_sgmap_load_raw : not implemented");
286 1.1 ragge }
287 1.1 ragge
288 1.1 ragge void
289 1.14 matt vax_sgmap_unload(bus_dma_tag_t t, bus_dmamap_t map, struct vax_sgmap *sgmap)
290 1.1 ragge {
291 1.1 ragge long *pte, *page_table = (long *)sgmap->aps_pt;
292 1.4 matt int ptecnt;
293 1.1 ragge
294 1.1 ragge /*
295 1.1 ragge * Invalidate the PTEs for the mapping.
296 1.1 ragge */
297 1.4 matt for (ptecnt = map->_dm_ptecnt, pte = &page_table[map->_dm_pteidx];
298 1.4 matt ptecnt-- != 0; ) {
299 1.4 matt *pte++ = 0;
300 1.1 ragge }
301 1.1 ragge
302 1.1 ragge /*
303 1.1 ragge * Free the virtual address space used by the mapping
304 1.1 ragge * if necessary.
305 1.1 ragge */
306 1.1 ragge if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
307 1.1 ragge vax_sgmap_free(map, sgmap);
308 1.1 ragge /*
309 1.1 ragge * Mark the mapping invalid.
310 1.1 ragge */
311 1.1 ragge map->dm_mapsize = 0;
312 1.1 ragge map->dm_nsegs = 0;
313 1.1 ragge }
314