sgmap.c revision 1.21 1 1.21 thorpej /* $NetBSD: sgmap.c,v 1.21 2023/12/20 15:34:45 thorpej Exp $ */
2 1.1 ragge
3 1.1 ragge /*-
4 1.1 ragge * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc.
5 1.1 ragge * All rights reserved.
6 1.1 ragge *
7 1.1 ragge * This code is derived from software contributed to The NetBSD Foundation
8 1.1 ragge * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 1.1 ragge * NASA Ames Research Center.
10 1.1 ragge *
11 1.1 ragge * Redistribution and use in source and binary forms, with or without
12 1.1 ragge * modification, are permitted provided that the following conditions
13 1.1 ragge * are met:
14 1.1 ragge * 1. Redistributions of source code must retain the above copyright
15 1.1 ragge * notice, this list of conditions and the following disclaimer.
16 1.1 ragge * 2. Redistributions in binary form must reproduce the above copyright
17 1.1 ragge * notice, this list of conditions and the following disclaimer in the
18 1.1 ragge * documentation and/or other materials provided with the distribution.
19 1.1 ragge *
20 1.1 ragge * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 1.1 ragge * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 1.1 ragge * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 1.1 ragge * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 1.1 ragge * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 1.1 ragge * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 1.1 ragge * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 1.1 ragge * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 1.1 ragge * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 1.1 ragge * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 1.1 ragge * POSSIBILITY OF SUCH DAMAGE.
31 1.1 ragge */
32 1.11 lukem
33 1.11 lukem #include <sys/cdefs.h>
34 1.21 thorpej __KERNEL_RCSID(0, "$NetBSD: sgmap.c,v 1.21 2023/12/20 15:34:45 thorpej Exp $");
35 1.1 ragge
36 1.1 ragge #include <sys/param.h>
37 1.1 ragge #include <sys/systm.h>
38 1.16 matt #include <sys/bus.h>
39 1.1 ragge #include <sys/kernel.h>
40 1.1 ragge #include <sys/proc.h>
41 1.1 ragge
42 1.8 mrg #include <uvm/uvm_extern.h>
43 1.1 ragge
44 1.1 ragge #include <machine/sgmap.h>
45 1.1 ragge
46 1.1 ragge void
47 1.14 matt vax_sgmap_init(bus_dma_tag_t t, struct vax_sgmap *sgmap, const char *name,
48 1.14 matt bus_addr_t sgvabase, bus_size_t sgvasize, struct pte *ptva,
49 1.14 matt bus_size_t minptalign)
50 1.1 ragge {
51 1.1 ragge bus_dma_segment_t seg;
52 1.1 ragge size_t ptsize;
53 1.1 ragge int rseg;
54 1.1 ragge
55 1.1 ragge if (sgvasize & PGOFSET) {
56 1.1 ragge printf("size botch for sgmap `%s'\n", name);
57 1.1 ragge goto die;
58 1.1 ragge }
59 1.1 ragge
60 1.1 ragge sgmap->aps_sgvabase = sgvabase;
61 1.1 ragge sgmap->aps_sgvasize = sgvasize;
62 1.1 ragge
63 1.1 ragge if (ptva != NULL) {
64 1.1 ragge /*
65 1.1 ragge * We already have a page table; this may be a system
66 1.1 ragge * where the page table resides in bridge-resident SRAM.
67 1.1 ragge */
68 1.1 ragge sgmap->aps_pt = ptva;
69 1.1 ragge } else {
70 1.1 ragge /*
71 1.1 ragge * Compute the page table size and allocate it. At minimum,
72 1.1 ragge * this must be aligned to the page table size. However,
73 1.1 ragge * some platforms have more strict alignment reqirements.
74 1.1 ragge */
75 1.1 ragge ptsize = (sgvasize / VAX_NBPG) * sizeof(struct pte);
76 1.1 ragge if (minptalign != 0) {
77 1.1 ragge if (minptalign < ptsize)
78 1.1 ragge minptalign = ptsize;
79 1.1 ragge } else
80 1.1 ragge minptalign = ptsize;
81 1.1 ragge if (bus_dmamem_alloc(t, ptsize, minptalign, 0, &seg, 1, &rseg,
82 1.1 ragge BUS_DMA_NOWAIT)) {
83 1.10 provos panic("unable to allocate page table for sgmap `%s'",
84 1.1 ragge name);
85 1.1 ragge goto die;
86 1.1 ragge }
87 1.1 ragge sgmap->aps_pt = (struct pte *)(seg.ds_addr | KERNBASE);
88 1.1 ragge }
89 1.1 ragge
90 1.1 ragge /*
91 1.20 thorpej * Create the arena used to manage the virtual address
92 1.1 ragge * space.
93 1.1 ragge */
94 1.20 thorpej sgmap->aps_arena = vmem_create(name, sgvabase, sgvasize,
95 1.20 thorpej VAX_NBPG, /* quantum */
96 1.20 thorpej NULL, /* importfn */
97 1.20 thorpej NULL, /* releasefn */
98 1.20 thorpej NULL, /* source */
99 1.20 thorpej 0, /* qcache_max */
100 1.20 thorpej VM_SLEEP,
101 1.20 thorpej IPL_VM);
102 1.20 thorpej KASSERT(sgmap->aps_arena != NULL);
103 1.1 ragge return;
104 1.1 ragge die:
105 1.1 ragge panic("vax_sgmap_init");
106 1.1 ragge }
107 1.1 ragge
108 1.1 ragge int
109 1.14 matt vax_sgmap_alloc(bus_dmamap_t map, bus_size_t origlen, struct vax_sgmap *sgmap,
110 1.14 matt int flags)
111 1.1 ragge {
112 1.1 ragge int error;
113 1.1 ragge bus_size_t len = origlen;
114 1.1 ragge
115 1.1 ragge #ifdef DIAGNOSTIC
116 1.1 ragge if (map->_dm_flags & DMAMAP_HAS_SGMAP)
117 1.1 ragge panic("vax_sgmap_alloc: already have sgva space");
118 1.1 ragge #endif
119 1.1 ragge
120 1.5 matt /* If we need a spill page (for the VS4000 SCSI), make sure we
121 1.5 matt * allocate enough space for an extra page.
122 1.5 matt */
123 1.5 matt if (flags & VAX_BUS_DMA_SPILLPAGE) {
124 1.5 matt len += VAX_NBPG;
125 1.5 matt }
126 1.5 matt
127 1.2 ragge map->_dm_sgvalen = vax_round_page(len);
128 1.18 matt #define DEBUG_SGMAP 0
129 1.18 matt #if DEBUG_SGMAP
130 1.1 ragge printf("len %x -> %x, _dm_sgvalen %x _dm_boundary %x boundary %x -> ",
131 1.18 matt //origlen, len, map->_dm_sgvalen, map->_dm_boundary, boundary);
132 1.18 matt (unsigned int)origlen, (unsigned int)len, (unsigned int)map->_dm_sgvalen, (unsigned int)map->_dm_boundary, 1);
133 1.1 ragge #endif
134 1.1 ragge
135 1.20 thorpej const vm_flag_t vmflags = VM_BESTFIT |
136 1.20 thorpej ((flags & BUS_DMA_NOWAIT) ? VM_NOSLEEP : VM_SLEEP);
137 1.20 thorpej
138 1.20 thorpej error = vmem_xalloc(sgmap->aps_arena, map->_dm_sgvalen,
139 1.20 thorpej 0, /* alignment */
140 1.20 thorpej 0, /* phase */
141 1.20 thorpej map->_dm_boundary, /* nocross */
142 1.20 thorpej VMEM_ADDR_MIN, /* minaddr */
143 1.20 thorpej VMEM_ADDR_MAX, /* maxaddr */
144 1.20 thorpej vmflags,
145 1.20 thorpej &map->_dm_sgva);
146 1.20 thorpej
147 1.18 matt #if DEBUG_SGMAP
148 1.18 matt printf("error %d _dm_sgva %lx\n", error, map->_dm_sgva);
149 1.1 ragge #endif
150 1.1 ragge
151 1.1 ragge if (error == 0)
152 1.1 ragge map->_dm_flags |= DMAMAP_HAS_SGMAP;
153 1.1 ragge else
154 1.1 ragge map->_dm_flags &= ~DMAMAP_HAS_SGMAP;
155 1.1 ragge
156 1.1 ragge return (error);
157 1.1 ragge }
158 1.1 ragge
159 1.1 ragge void
160 1.14 matt vax_sgmap_free(bus_dmamap_t map, struct vax_sgmap *sgmap)
161 1.1 ragge {
162 1.1 ragge
163 1.1 ragge #ifdef DIAGNOSTIC
164 1.1 ragge if ((map->_dm_flags & DMAMAP_HAS_SGMAP) == 0)
165 1.1 ragge panic("vax_sgmap_free: no sgva space to free");
166 1.1 ragge #endif
167 1.1 ragge
168 1.20 thorpej vmem_xfree(sgmap->aps_arena, map->_dm_sgva, map->_dm_sgvalen);
169 1.1 ragge
170 1.1 ragge map->_dm_flags &= ~DMAMAP_HAS_SGMAP;
171 1.1 ragge }
172 1.1 ragge
173 1.1 ragge int
174 1.18 matt vax_sgmap_reserve(bus_addr_t ba, bus_size_t len, struct vax_sgmap *sgmap)
175 1.18 matt {
176 1.20 thorpej return vmem_xalloc_addr(sgmap->aps_arena, ba, len, VM_NOSLEEP);
177 1.18 matt }
178 1.18 matt
179 1.18 matt int
180 1.14 matt vax_sgmap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf, bus_size_t buflen,
181 1.14 matt struct proc *p, int flags, struct vax_sgmap *sgmap)
182 1.1 ragge {
183 1.1 ragge vaddr_t endva, va = (vaddr_t)buf;
184 1.1 ragge paddr_t pa;
185 1.1 ragge bus_addr_t dmaoffset;
186 1.1 ragge bus_size_t dmalen;
187 1.1 ragge long *pte, *page_table = (long *)sgmap->aps_pt;
188 1.1 ragge int pteidx, error;
189 1.1 ragge
190 1.1 ragge /*
191 1.1 ragge * Make sure that on error condition we return "no valid mappings".
192 1.1 ragge */
193 1.1 ragge map->dm_mapsize = 0;
194 1.1 ragge map->dm_nsegs = 0;
195 1.1 ragge
196 1.1 ragge if (buflen > map->_dm_size)
197 1.1 ragge return (EINVAL);
198 1.1 ragge
199 1.1 ragge /*
200 1.1 ragge * Remember the offset into the first page and the total
201 1.1 ragge * transfer length.
202 1.1 ragge */
203 1.1 ragge dmaoffset = ((u_long)buf) & VAX_PGOFSET;
204 1.1 ragge dmalen = buflen;
205 1.1 ragge
206 1.1 ragge
207 1.1 ragge /*
208 1.1 ragge * Allocate the necessary virtual address space for the
209 1.1 ragge * mapping. Round the size, since we deal with whole pages.
210 1.1 ragge */
211 1.2 ragge endva = vax_round_page(va + buflen);
212 1.2 ragge va = vax_trunc_page(va);
213 1.1 ragge if ((map->_dm_flags & DMAMAP_HAS_SGMAP) == 0) {
214 1.1 ragge error = vax_sgmap_alloc(map, (endva - va), sgmap, flags);
215 1.1 ragge if (error)
216 1.1 ragge return (error);
217 1.1 ragge }
218 1.1 ragge
219 1.1 ragge pteidx = map->_dm_sgva >> VAX_PGSHIFT;
220 1.1 ragge pte = &page_table[pteidx];
221 1.1 ragge
222 1.1 ragge /*
223 1.1 ragge * Generate the DMA address.
224 1.1 ragge */
225 1.1 ragge map->dm_segs[0].ds_addr = map->_dm_sgva + dmaoffset;
226 1.1 ragge map->dm_segs[0].ds_len = dmalen;
227 1.1 ragge
228 1.1 ragge
229 1.1 ragge map->_dm_pteidx = pteidx;
230 1.1 ragge map->_dm_ptecnt = 0;
231 1.1 ragge
232 1.1 ragge /*
233 1.1 ragge * Create the bus-specific page tables.
234 1.1 ragge * Can be done much more efficient than this.
235 1.1 ragge */
236 1.4 matt for (; va < endva; va += VAX_NBPG, pte++, map->_dm_ptecnt++) {
237 1.1 ragge /*
238 1.1 ragge * Get the physical address for this segment.
239 1.1 ragge */
240 1.1 ragge if (p != NULL)
241 1.3 thorpej (void) pmap_extract(p->p_vmspace->vm_map.pmap, va, &pa);
242 1.1 ragge else
243 1.1 ragge pa = kvtophys(va);
244 1.1 ragge
245 1.1 ragge /*
246 1.1 ragge * Load the current PTE with this page.
247 1.1 ragge */
248 1.9 matt *pte = (pa >> VAX_PGSHIFT) | PG_V;
249 1.5 matt }
250 1.5 matt /* The VS4000 SCSI prefetcher doesn't like to end on a page boundary
251 1.5 matt * so add an extra page to quiet it down.
252 1.5 matt */
253 1.5 matt if (flags & VAX_BUS_DMA_SPILLPAGE) {
254 1.5 matt *pte = pte[-1];
255 1.5 matt map->_dm_ptecnt++;
256 1.1 ragge }
257 1.1 ragge
258 1.1 ragge map->dm_mapsize = buflen;
259 1.1 ragge map->dm_nsegs = 1;
260 1.1 ragge return (0);
261 1.1 ragge }
262 1.1 ragge
263 1.1 ragge int
264 1.14 matt vax_sgmap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m,
265 1.14 matt int flags, struct vax_sgmap *sgmap)
266 1.1 ragge {
267 1.1 ragge
268 1.1 ragge panic("vax_sgmap_load_mbuf : not implemented");
269 1.1 ragge }
270 1.1 ragge
271 1.1 ragge int
272 1.14 matt vax_sgmap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio,
273 1.14 matt int flags, struct vax_sgmap *sgmap)
274 1.1 ragge {
275 1.1 ragge
276 1.1 ragge panic("vax_sgmap_load_uio : not implemented");
277 1.1 ragge }
278 1.1 ragge
279 1.1 ragge int
280 1.14 matt vax_sgmap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, bus_dma_segment_t *segs,
281 1.14 matt int nsegs, bus_size_t size, int flags, struct vax_sgmap *sgmap)
282 1.1 ragge {
283 1.1 ragge
284 1.1 ragge panic("vax_sgmap_load_raw : not implemented");
285 1.1 ragge }
286 1.1 ragge
287 1.1 ragge void
288 1.14 matt vax_sgmap_unload(bus_dma_tag_t t, bus_dmamap_t map, struct vax_sgmap *sgmap)
289 1.1 ragge {
290 1.1 ragge long *pte, *page_table = (long *)sgmap->aps_pt;
291 1.4 matt int ptecnt;
292 1.1 ragge
293 1.1 ragge /*
294 1.1 ragge * Invalidate the PTEs for the mapping.
295 1.1 ragge */
296 1.4 matt for (ptecnt = map->_dm_ptecnt, pte = &page_table[map->_dm_pteidx];
297 1.4 matt ptecnt-- != 0; ) {
298 1.4 matt *pte++ = 0;
299 1.1 ragge }
300 1.1 ragge
301 1.1 ragge /*
302 1.1 ragge * Free the virtual address space used by the mapping
303 1.1 ragge * if necessary.
304 1.1 ragge */
305 1.1 ragge if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
306 1.1 ragge vax_sgmap_free(map, sgmap);
307 1.1 ragge /*
308 1.1 ragge * Mark the mapping invalid.
309 1.1 ragge */
310 1.1 ragge map->dm_mapsize = 0;
311 1.1 ragge map->dm_nsegs = 0;
312 1.1 ragge }
313