io-mapping.h revision 1.10 1 /* $NetBSD: io-mapping.h,v 1.10 2021/12/19 12:03:30 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2013 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #ifndef _LINUX_IO_MAPPING_H_
33 #define _LINUX_IO_MAPPING_H_
34
35 #include <sys/param.h>
36 #include <sys/bus.h>
37 #include <sys/kmem.h>
38 #include <sys/systm.h>
39 #include <sys/mman.h>
40
41 #include <uvm/uvm_extern.h>
42
43 struct io_mapping {
44 bus_space_tag_t diom_bst;
45 bus_addr_t base; /* Linux API */
46 bus_size_t size; /* Linux API */
47 vaddr_t diom_va;
48 bus_size_t diom_mapsize;
49 bool diom_atomic;
50 };
51
52 static inline bool
53 bus_space_io_mapping_init_wc(bus_space_tag_t bst, struct io_mapping *mapping,
54 bus_addr_t addr, bus_size_t size)
55 {
56 bus_size_t offset;
57
58 KASSERT(PAGE_SIZE <= size);
59 KASSERT(0 == (size & (PAGE_SIZE - 1)));
60 KASSERT(__type_fit(off_t, size));
61
62 /*
63 * XXX For x86: Reserve the region (bus_space_reserve) and set
64 * an MTRR to make it write-combining. Doesn't matter if we
65 * have PAT and we use pmap_kenter_pa, but matters if we don't
66 * have PAT or if we later make this use direct map.
67 */
68
69 /* Make sure the region is mappable. */
70 for (offset = 0; offset < size; offset += PAGE_SIZE) {
71 if (bus_space_mmap(bst, addr, offset, PROT_READ|PROT_WRITE,
72 BUS_SPACE_MAP_LINEAR|BUS_SPACE_MAP_PREFETCHABLE)
73 == (paddr_t)-1)
74 return false;
75 }
76
77 /* Initialize the mapping record. */
78 mapping->diom_bst = bst;
79 mapping->base = addr;
80 mapping->size = size;
81 mapping->diom_mapsize = 0;
82 mapping->diom_atomic = false;
83
84 /* Allocate kva for one page. */
85 mapping->diom_va = uvm_km_alloc(kernel_map, PAGE_SIZE, PAGE_SIZE,
86 UVM_KMF_VAONLY | UVM_KMF_WAITVA);
87 KASSERT(mapping->diom_va != 0);
88
89 return true;
90 }
91
92 static inline void
93 io_mapping_fini(struct io_mapping *mapping)
94 {
95
96 KASSERT(mapping->diom_mapsize == 0);
97 KASSERT(!mapping->diom_atomic);
98
99 uvm_km_free(kernel_map, mapping->diom_va, PAGE_SIZE, UVM_KMF_VAONLY);
100 mapping->diom_va = 0; /* paranoia */
101 }
102
103 static inline struct io_mapping *
104 bus_space_io_mapping_create_wc(bus_space_tag_t bst, bus_addr_t addr,
105 bus_size_t size)
106 {
107 struct io_mapping *mapping;
108
109 mapping = kmem_alloc(sizeof(*mapping), KM_SLEEP);
110 if (!bus_space_io_mapping_init_wc(bst, mapping, addr, size)) {
111 kmem_free(mapping, sizeof(*mapping));
112 return NULL;
113 }
114
115 return mapping;
116 }
117
118 static inline void
119 io_mapping_free(struct io_mapping *mapping)
120 {
121
122 io_mapping_fini(mapping);
123 kmem_free(mapping, sizeof(*mapping));
124 }
125
126 static inline void *
127 io_mapping_map_wc(struct io_mapping *mapping, bus_addr_t offset,
128 bus_size_t size)
129 {
130 bus_size_t pg, npgs = size >> PAGE_SHIFT;
131 vaddr_t va;
132 paddr_t cookie;
133
134 KASSERT(0 == (offset & (PAGE_SIZE - 1)));
135 KASSERT(PAGE_SIZE <= mapping->size);
136 KASSERT(offset <= (mapping->size - PAGE_SIZE));
137 KASSERT(__type_fit(off_t, offset));
138 KASSERT(mapping->diom_mapsize == 0);
139 KASSERT(!mapping->diom_atomic);
140
141 va = uvm_km_alloc(kernel_map, size, PAGE_SIZE,
142 UVM_KMF_VAONLY|UVM_KMF_WAITVA);
143 for (pg = 0; pg < npgs; pg++) {
144 cookie = bus_space_mmap(mapping->diom_bst, mapping->base,
145 offset + pg*PAGE_SIZE,
146 PROT_READ|PROT_WRITE,
147 BUS_SPACE_MAP_LINEAR|BUS_SPACE_MAP_PREFETCHABLE);
148 KASSERT(cookie != (paddr_t)-1);
149
150 pmap_kenter_pa(va, pmap_phys_address(cookie),
151 PROT_READ|PROT_WRITE, pmap_mmap_flags(cookie));
152 }
153 pmap_update(pmap_kernel());
154
155 mapping->diom_mapsize = size;
156 mapping->diom_atomic = false;
157 return (void *)va;
158 }
159
160 static inline void
161 io_mapping_unmap(struct io_mapping *mapping, void *ptr __diagused)
162 {
163 vaddr_t va = (vaddr_t)ptr;
164
165 KASSERT(mapping->diom_mapsize);
166 KASSERT(!mapping->diom_atomic);
167 KASSERT(mapping->diom_va != va);
168
169 pmap_kremove(va, PAGE_SIZE);
170 pmap_update(pmap_kernel());
171
172 uvm_km_free(kernel_map, va, mapping->diom_mapsize, UVM_KMF_VAONLY);
173
174 mapping->diom_mapsize = 0;
175 mapping->diom_atomic = false;
176 }
177
178 static inline void *
179 io_mapping_map_atomic_wc(struct io_mapping *mapping, bus_addr_t offset)
180 {
181 paddr_t cookie;
182
183 KASSERT(0 == (offset & (PAGE_SIZE - 1)));
184 KASSERT(PAGE_SIZE <= mapping->size);
185 KASSERT(offset <= (mapping->size - PAGE_SIZE));
186 KASSERT(__type_fit(off_t, offset));
187 KASSERT(mapping->diom_mapsize == 0);
188 KASSERT(!mapping->diom_atomic);
189
190 cookie = bus_space_mmap(mapping->diom_bst, mapping->base, offset,
191 PROT_READ|PROT_WRITE,
192 BUS_SPACE_MAP_LINEAR|BUS_SPACE_MAP_PREFETCHABLE);
193 KASSERT(cookie != (paddr_t)-1);
194
195 pmap_kenter_pa(mapping->diom_va, pmap_phys_address(cookie),
196 PROT_READ|PROT_WRITE, pmap_mmap_flags(cookie));
197 pmap_update(pmap_kernel());
198
199 mapping->diom_mapsize = PAGE_SIZE;
200 mapping->diom_atomic = true;
201 return (void *)mapping->diom_va;
202 }
203
204 static inline void
205 io_mapping_unmap_atomic(struct io_mapping *mapping, void *ptr __diagused)
206 {
207
208 KASSERT(mapping->diom_mapsize);
209 KASSERT(mapping->diom_atomic);
210 KASSERT(mapping->diom_va == (vaddr_t)ptr);
211
212 pmap_kremove(mapping->diom_va, PAGE_SIZE);
213 pmap_update(pmap_kernel());
214
215 mapping->diom_mapsize = 0;
216 mapping->diom_atomic = false;
217 }
218
219 #endif /* _LINUX_IO_MAPPING_H_ */
220