sgmap_common.c revision 1.27 1 /* $NetBSD: sgmap_common.c,v 1.27 2020/06/17 04:12:39 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 1997, 1998, 2001 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 #include <sys/cdefs.h> /* RCS ID & Copyright macro defns */
34
35 __KERNEL_RCSID(0, "$NetBSD: sgmap_common.c,v 1.27 2020/06/17 04:12:39 thorpej Exp $");
36
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/malloc.h>
41 #include <sys/proc.h>
42
43 #include <uvm/uvm_extern.h>
44
45 #define _ALPHA_BUS_DMA_PRIVATE
46 #include <sys/bus.h>
47
48 #include <alpha/common/sgmapvar.h>
49
50 /*
51 * Some systems will prefetch the next page during a memory -> device DMA.
52 * This can cause machine checks if there is not a spill page after the
53 * last page of the DMA (thus avoiding hitting an invalid SGMAP PTE).
54 */
55 vaddr_t alpha_sgmap_prefetch_spill_page_va;
56 bus_addr_t alpha_sgmap_prefetch_spill_page_pa;
57
58 void
59 alpha_sgmap_init(bus_dma_tag_t t, struct alpha_sgmap *sgmap, const char *name,
60 bus_addr_t wbase, bus_addr_t sgvabase, bus_size_t sgvasize, size_t ptesize,
61 void *ptva, bus_size_t minptalign)
62 {
63 bus_dma_segment_t seg;
64 size_t ptsize;
65 int rseg;
66
67 if (sgvasize & PGOFSET) {
68 printf("size botch for sgmap `%s'\n", name);
69 goto die;
70 }
71
72 sgmap->aps_wbase = wbase;
73 sgmap->aps_sgvabase = sgvabase;
74 sgmap->aps_sgvasize = sgvasize;
75
76 if (ptva != NULL) {
77 /*
78 * We already have a page table; this may be a system
79 * where the page table resides in bridge-resident SRAM.
80 */
81 sgmap->aps_pt = ptva;
82 sgmap->aps_ptpa = 0;
83 } else {
84 /*
85 * Compute the page table size and allocate it. At minimum,
86 * this must be aligned to the page table size. However,
87 * some platforms have more strict alignment reqirements.
88 */
89 ptsize = (sgvasize / PAGE_SIZE) * ptesize;
90 if (minptalign != 0) {
91 if (minptalign < ptsize)
92 minptalign = ptsize;
93 } else
94 minptalign = ptsize;
95 if (bus_dmamem_alloc(t, ptsize, minptalign, 0, &seg, 1, &rseg,
96 BUS_DMA_NOWAIT)) {
97 panic("unable to allocate page table for sgmap `%s'",
98 name);
99 goto die;
100 }
101 sgmap->aps_ptpa = seg.ds_addr;
102 sgmap->aps_pt = (void *)ALPHA_PHYS_TO_K0SEG(sgmap->aps_ptpa);
103 }
104
105 /*
106 * Create the arena used to manage the virtual address
107 * space.
108 *
109 * XXX Consider using a quantum cache up to MAXPHYS+PAGE_SIZE
110 * XXX (extra page to handle the spill page). For now, we don't,
111 * XXX because we are using constrained allocations everywhere.
112 */
113 sgmap->aps_arena = vmem_create(name, sgvabase, sgvasize,
114 PAGE_SIZE, /* quantum */
115 NULL, /* importfn */
116 NULL, /* releasefn */
117 NULL, /* source */
118 0, /* qcache_max */
119 VM_SLEEP,
120 IPL_VM);
121 KASSERT(sgmap->aps_arena != NULL);
122
123 /*
124 * Allocate a spill page if that hasn't already been done.
125 */
126 if (alpha_sgmap_prefetch_spill_page_va == 0) {
127 if (bus_dmamem_alloc(t, PAGE_SIZE, 0, 0, &seg, 1, &rseg,
128 BUS_DMA_NOWAIT)) {
129 printf("unable to allocate spill page for sgmap `%s'\n",
130 name);
131 goto die;
132 }
133 alpha_sgmap_prefetch_spill_page_pa = seg.ds_addr;
134 alpha_sgmap_prefetch_spill_page_va =
135 ALPHA_PHYS_TO_K0SEG(alpha_sgmap_prefetch_spill_page_pa);
136 memset((void *)alpha_sgmap_prefetch_spill_page_va, 0,
137 PAGE_SIZE);
138 }
139
140 return;
141 die:
142 panic("alpha_sgmap_init");
143 }
144
145 int
146 alpha_sgmap_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
147 bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp)
148 {
149 bus_dmamap_t map;
150 int error;
151
152 error = _bus_dmamap_create(t, size, nsegments, maxsegsz,
153 boundary, flags, &map);
154 if (error)
155 return (error);
156
157 /* XXX BUS_DMA_ALLOCNOW */
158
159 if (error == 0)
160 *dmamp = map;
161 else
162 alpha_sgmap_dmamap_destroy(t, map);
163
164 return (error);
165 }
166
167 void
168 alpha_sgmap_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
169 {
170
171 KASSERT(map->dm_mapsize == 0);
172
173 _bus_dmamap_destroy(t, map);
174 }
175