linux_kmap.c revision 1.14 1 /* $NetBSD: linux_kmap.c,v 1.14 2018/08/27 15:09:58 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2013 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: linux_kmap.c,v 1.14 2018/08/27 15:09:58 riastradh Exp $");
34
35 #include <sys/types.h>
36 #include <sys/kmem.h>
37 #include <sys/mutex.h>
38 #include <sys/rbtree.h>
39 #include <sys/sdt.h>
40
41 #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS
42 #include <dev/mm.h>
43 #endif
44
45 #include <uvm/uvm_extern.h>
46
47 #include <linux/highmem.h>
48
49 #undef linux_kmap /* symbol vs trace point namespace clash */
50
51 SDT_PROBE_DEFINE2(sdt, linux, kmap, map,
52 "paddr_t"/*paddr*/, "vaddr_t"/*vaddr*/);
53 SDT_PROBE_DEFINE2(sdt, linux, kmap, unmap,
54 "paddr_t"/*paddr*/, "vaddr_t"/*vaddr*/);
55 SDT_PROBE_DEFINE2(sdt, linux, kmap, map__atomic,
56 "paddr_t"/*paddr*/, "vaddr_t"/*vaddr*/);
57 SDT_PROBE_DEFINE2(sdt, linux, kmap, unmap__atomic,
58 "paddr_t"/*paddr*/, "vaddr_t"/*vaddr*/);
59
60 /*
61 * XXX Kludgerific implementation of Linux kmap_atomic, which is
62 * required not to fail. To accomodate this, we reserve one page of
63 * kva at boot (or load) and limit the system to at most kmap_atomic in
64 * use at a time.
65 */
66
67 static kmutex_t linux_kmap_atomic_lock;
68 static vaddr_t linux_kmap_atomic_vaddr;
69
70 static kmutex_t linux_kmap_lock;
71 static rb_tree_t linux_kmap_entries;
72
73 struct linux_kmap_entry {
74 paddr_t lke_paddr;
75 vaddr_t lke_vaddr;
76 unsigned int lke_refcnt;
77 rb_node_t lke_node;
78 };
79
80 static int
81 lke_compare_nodes(void *ctx __unused, const void *an, const void *bn)
82 {
83 const struct linux_kmap_entry *const a = an;
84 const struct linux_kmap_entry *const b = bn;
85
86 if (a->lke_paddr < b->lke_paddr)
87 return -1;
88 else if (a->lke_paddr > b->lke_paddr)
89 return +1;
90 else
91 return 0;
92 }
93
94 static int
95 lke_compare_key(void *ctx __unused, const void *node, const void *key)
96 {
97 const struct linux_kmap_entry *const lke = node;
98 const paddr_t *const paddrp = key;
99
100 if (lke->lke_paddr < *paddrp)
101 return -1;
102 else if (lke->lke_paddr > *paddrp)
103 return +1;
104 else
105 return 0;
106 }
107
108 static const rb_tree_ops_t linux_kmap_entry_ops = {
109 .rbto_compare_nodes = &lke_compare_nodes,
110 .rbto_compare_key = &lke_compare_key,
111 .rbto_node_offset = offsetof(struct linux_kmap_entry, lke_node),
112 .rbto_context = NULL,
113 };
114
115 int
116 linux_kmap_init(void)
117 {
118
119 /* IPL_VM since interrupt handlers use kmap_atomic. */
120 mutex_init(&linux_kmap_atomic_lock, MUTEX_DEFAULT, IPL_VM);
121
122 linux_kmap_atomic_vaddr = uvm_km_alloc(kernel_map, PAGE_SIZE, 0,
123 (UVM_KMF_VAONLY | UVM_KMF_WAITVA));
124
125 KASSERT(linux_kmap_atomic_vaddr != 0);
126 KASSERT(!pmap_extract(pmap_kernel(), linux_kmap_atomic_vaddr, NULL));
127
128 mutex_init(&linux_kmap_lock, MUTEX_DEFAULT, IPL_NONE);
129 rb_tree_init(&linux_kmap_entries, &linux_kmap_entry_ops);
130
131 return 0;
132 }
133
134 void
135 linux_kmap_fini(void)
136 {
137
138 KASSERT(RB_TREE_MIN(&linux_kmap_entries) == NULL);
139 #if 0 /* XXX no rb_tree_destroy */
140 rb_tree_destroy(&linux_kmap_entries);
141 #endif
142 mutex_destroy(&linux_kmap_lock);
143
144 KASSERT(linux_kmap_atomic_vaddr != 0);
145 KASSERT(!pmap_extract(pmap_kernel(), linux_kmap_atomic_vaddr, NULL));
146
147 uvm_km_free(kernel_map, linux_kmap_atomic_vaddr, PAGE_SIZE,
148 (UVM_KMF_VAONLY | UVM_KMF_WAITVA));
149
150 mutex_destroy(&linux_kmap_atomic_lock);
151 }
152
153 void *
154 kmap_atomic(struct page *page)
155 {
156 const paddr_t paddr = uvm_vm_page_to_phys(&page->p_vmp);
157 vaddr_t vaddr;
158
159 #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS
160 if (mm_md_direct_mapped_phys(paddr, &vaddr))
161 goto out;
162 #endif
163
164 mutex_spin_enter(&linux_kmap_atomic_lock);
165 KASSERT(linux_kmap_atomic_vaddr != 0);
166 KASSERT(!pmap_extract(pmap_kernel(), linux_kmap_atomic_vaddr, NULL));
167 vaddr = linux_kmap_atomic_vaddr;
168 pmap_kenter_pa(vaddr, paddr, (VM_PROT_READ | VM_PROT_WRITE), 0);
169 pmap_update(pmap_kernel());
170
171 out: SDT_PROBE2(sdt, linux, kmap, map__atomic, paddr, vaddr);
172 return (void *)vaddr;
173 }
174
175 void
176 kunmap_atomic(void *addr)
177 {
178 const vaddr_t vaddr = (vaddr_t)addr;
179 paddr_t paddr;
180 bool ok __diagused;
181
182 ok = pmap_extract(pmap_kernel(), vaddr, &paddr);
183 KASSERT(ok);
184
185 SDT_PROBE2(sdt, linux, kmap, unmap__atomic, paddr, vaddr);
186
187 #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS
188 {
189 vaddr_t vaddr1;
190 if (mm_md_direct_mapped_phys(paddr, &vaddr1) && vaddr1 == vaddr)
191 return;
192 }
193 #endif
194
195 KASSERT(mutex_owned(&linux_kmap_atomic_lock));
196 KASSERT(linux_kmap_atomic_vaddr == vaddr);
197
198 pmap_kremove(vaddr, PAGE_SIZE);
199 pmap_update(pmap_kernel());
200
201 mutex_spin_exit(&linux_kmap_atomic_lock);
202 }
203
204 void *
205 linux_kmap(struct page *page)
206 {
207 const paddr_t paddr = VM_PAGE_TO_PHYS(&page->p_vmp);
208 vaddr_t vaddr;
209
210 ASSERT_SLEEPABLE();
211
212 #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS
213 if (mm_md_direct_mapped_phys(paddr, &vaddr))
214 goto out;
215 #endif
216
217 vaddr = uvm_km_alloc(kernel_map, PAGE_SIZE, 0,
218 (UVM_KMF_VAONLY | UVM_KMF_WAITVA));
219 KASSERT(vaddr != 0);
220
221 struct linux_kmap_entry *const lke = kmem_alloc(sizeof(*lke),
222 KM_SLEEP);
223 lke->lke_paddr = paddr;
224 lke->lke_vaddr = vaddr;
225
226 mutex_enter(&linux_kmap_lock);
227 struct linux_kmap_entry *const collision __diagused =
228 rb_tree_insert_node(&linux_kmap_entries, lke);
229 KASSERT(collision == lke);
230 mutex_exit(&linux_kmap_lock);
231
232 KASSERT(!pmap_extract(pmap_kernel(), vaddr, NULL));
233 pmap_kenter_pa(vaddr, paddr, (VM_PROT_READ | VM_PROT_WRITE), 0);
234 pmap_update(pmap_kernel());
235
236 out: SDT_PROBE2(sdt, linux, kmap, map, paddr, vaddr);
237 return (void *)vaddr;
238 }
239
240 void
241 kunmap(struct page *page)
242 {
243 const paddr_t paddr = VM_PAGE_TO_PHYS(&page->p_vmp);
244 vaddr_t vaddr;
245
246 ASSERT_SLEEPABLE();
247
248 #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS
249 if (mm_md_direct_mapped_phys(paddr, &vaddr))
250 goto out;
251 #endif
252
253 mutex_enter(&linux_kmap_lock);
254 struct linux_kmap_entry *const lke =
255 rb_tree_find_node(&linux_kmap_entries, &paddr);
256 KASSERT(lke != NULL);
257 rb_tree_remove_node(&linux_kmap_entries, lke);
258 mutex_exit(&linux_kmap_lock);
259
260 vaddr = lke->lke_vaddr;
261 kmem_free(lke, sizeof(*lke));
262
263 KASSERT(pmap_extract(pmap_kernel(), vaddr, NULL));
264
265 pmap_kremove(vaddr, PAGE_SIZE);
266 pmap_update(pmap_kernel());
267
268 uvm_km_free(kernel_map, vaddr, PAGE_SIZE, UVM_KMF_VAONLY);
269
270 out: SDT_PROBE2(sdt, linux, kmap, unmap, paddr, vaddr);
271 }
272