1 1.13 riastrad /* $NetBSD: linux_kmap.c,v 1.16 2018/08/27 15:28:53 riastradh Exp $ */ 2 1.2 riastrad 3 1.2 riastrad /*- 4 1.2 riastrad * Copyright (c) 2013 The NetBSD Foundation, Inc. 5 1.2 riastrad * All rights reserved. 6 1.2 riastrad * 7 1.2 riastrad * This code is derived from software contributed to The NetBSD Foundation 8 1.2 riastrad * by Taylor R. Campbell. 9 1.2 riastrad * 10 1.2 riastrad * Redistribution and use in source and binary forms, with or without 11 1.2 riastrad * modification, are permitted provided that the following conditions 12 1.2 riastrad * are met: 13 1.2 riastrad * 1. Redistributions of source code must retain the above copyright 14 1.2 riastrad * notice, this list of conditions and the following disclaimer. 15 1.2 riastrad * 2. Redistributions in binary form must reproduce the above copyright 16 1.2 riastrad * notice, this list of conditions and the following disclaimer in the 17 1.2 riastrad * documentation and/or other materials provided with the distribution. 18 1.2 riastrad * 19 1.2 riastrad * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 1.2 riastrad * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 1.2 riastrad * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 1.2 riastrad * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 1.2 riastrad * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 1.2 riastrad * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 1.2 riastrad * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 1.2 riastrad * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 1.2 riastrad * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 1.2 riastrad * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 1.2 riastrad * POSSIBILITY OF SUCH DAMAGE. 30 1.2 riastrad */ 31 1.2 riastrad 32 1.2 riastrad #include <sys/cdefs.h> 33 1.13 riastrad __KERNEL_RCSID(0, "$NetBSD: linux_kmap.c,v 1.16 2018/08/27 15:28:53 riastradh Exp $"); 34 1.2 riastrad 35 1.2 riastrad #include <sys/types.h> 36 1.2 riastrad #include <sys/kmem.h> 37 1.2 riastrad #include <sys/mutex.h> 38 1.2 riastrad #include <sys/rbtree.h> 39 1.13 riastrad #include <sys/sdt.h> 40 1.2 riastrad 41 1.10 riastrad #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS 42 1.10 riastrad #include <dev/mm.h> 43 1.10 riastrad #endif 44 1.10 riastrad 45 1.2 riastrad #include <uvm/uvm_extern.h> 46 1.2 riastrad 47 1.2 riastrad #include <linux/highmem.h> 48 1.2 riastrad 49 1.13 riastrad SDT_PROBE_DEFINE2(sdt, linux, kmap, map, 50 1.13 riastrad "paddr_t"/*paddr*/, "vaddr_t"/*vaddr*/); 51 1.13 riastrad SDT_PROBE_DEFINE2(sdt, linux, kmap, unmap, 52 1.13 riastrad "paddr_t"/*paddr*/, "vaddr_t"/*vaddr*/); 53 1.13 riastrad SDT_PROBE_DEFINE2(sdt, linux, kmap, map__atomic, 54 1.13 riastrad "paddr_t"/*paddr*/, "vaddr_t"/*vaddr*/); 55 1.13 riastrad SDT_PROBE_DEFINE2(sdt, linux, kmap, unmap__atomic, 56 1.13 riastrad "paddr_t"/*paddr*/, "vaddr_t"/*vaddr*/); 57 1.13 riastrad 58 1.2 riastrad /* 59 1.2 riastrad * XXX Kludgerific implementation of Linux kmap_atomic, which is 60 1.2 riastrad * required not to fail. To accomodate this, we reserve one page of 61 1.2 riastrad * kva at boot (or load) and limit the system to at most kmap_atomic in 62 1.2 riastrad * use at a time. 63 1.2 riastrad */ 64 1.2 riastrad 65 1.2 riastrad static kmutex_t linux_kmap_atomic_lock; 66 1.2 riastrad static vaddr_t linux_kmap_atomic_vaddr; 67 1.2 riastrad 68 1.2 riastrad static kmutex_t linux_kmap_lock; 69 1.2 riastrad static rb_tree_t linux_kmap_entries; 70 1.2 riastrad 71 1.2 riastrad struct linux_kmap_entry { 72 1.2 riastrad paddr_t lke_paddr; 73 1.2 riastrad vaddr_t lke_vaddr; 74 1.2 riastrad unsigned int lke_refcnt; 75 1.2 riastrad rb_node_t lke_node; 76 1.2 riastrad }; 77 1.2 riastrad 78 1.2 riastrad static int 79 1.2 riastrad lke_compare_nodes(void *ctx __unused, const void *an, const void *bn) 80 1.2 riastrad { 81 1.2 riastrad const struct linux_kmap_entry *const a = an; 82 1.2 riastrad const struct linux_kmap_entry *const b = bn; 83 1.2 riastrad 84 1.2 riastrad if (a->lke_paddr < b->lke_paddr) 85 1.2 riastrad return -1; 86 1.2 riastrad else if (a->lke_paddr > b->lke_paddr) 87 1.2 riastrad return +1; 88 1.2 riastrad else 89 1.2 riastrad return 0; 90 1.2 riastrad } 91 1.2 riastrad 92 1.2 riastrad static int 93 1.2 riastrad lke_compare_key(void *ctx __unused, const void *node, const void *key) 94 1.2 riastrad { 95 1.2 riastrad const struct linux_kmap_entry *const lke = node; 96 1.2 riastrad const paddr_t *const paddrp = key; 97 1.2 riastrad 98 1.2 riastrad if (lke->lke_paddr < *paddrp) 99 1.2 riastrad return -1; 100 1.2 riastrad else if (lke->lke_paddr > *paddrp) 101 1.2 riastrad return +1; 102 1.2 riastrad else 103 1.2 riastrad return 0; 104 1.2 riastrad } 105 1.2 riastrad 106 1.2 riastrad static const rb_tree_ops_t linux_kmap_entry_ops = { 107 1.2 riastrad .rbto_compare_nodes = &lke_compare_nodes, 108 1.2 riastrad .rbto_compare_key = &lke_compare_key, 109 1.2 riastrad .rbto_node_offset = offsetof(struct linux_kmap_entry, lke_node), 110 1.2 riastrad .rbto_context = NULL, 111 1.2 riastrad }; 112 1.2 riastrad 113 1.2 riastrad int 114 1.2 riastrad linux_kmap_init(void) 115 1.2 riastrad { 116 1.2 riastrad 117 1.12 mrg /* IPL_VM since interrupt handlers use kmap_atomic. */ 118 1.12 mrg mutex_init(&linux_kmap_atomic_lock, MUTEX_DEFAULT, IPL_VM); 119 1.2 riastrad 120 1.2 riastrad linux_kmap_atomic_vaddr = uvm_km_alloc(kernel_map, PAGE_SIZE, 0, 121 1.2 riastrad (UVM_KMF_VAONLY | UVM_KMF_WAITVA)); 122 1.2 riastrad 123 1.2 riastrad KASSERT(linux_kmap_atomic_vaddr != 0); 124 1.2 riastrad KASSERT(!pmap_extract(pmap_kernel(), linux_kmap_atomic_vaddr, NULL)); 125 1.2 riastrad 126 1.5 riastrad mutex_init(&linux_kmap_lock, MUTEX_DEFAULT, IPL_NONE); 127 1.2 riastrad rb_tree_init(&linux_kmap_entries, &linux_kmap_entry_ops); 128 1.2 riastrad 129 1.2 riastrad return 0; 130 1.2 riastrad } 131 1.2 riastrad 132 1.2 riastrad void 133 1.2 riastrad linux_kmap_fini(void) 134 1.2 riastrad { 135 1.2 riastrad 136 1.6 riastrad KASSERT(RB_TREE_MIN(&linux_kmap_entries) == NULL); 137 1.6 riastrad #if 0 /* XXX no rb_tree_destroy */ 138 1.2 riastrad rb_tree_destroy(&linux_kmap_entries); 139 1.2 riastrad #endif 140 1.2 riastrad mutex_destroy(&linux_kmap_lock); 141 1.2 riastrad 142 1.2 riastrad KASSERT(linux_kmap_atomic_vaddr != 0); 143 1.2 riastrad KASSERT(!pmap_extract(pmap_kernel(), linux_kmap_atomic_vaddr, NULL)); 144 1.2 riastrad 145 1.2 riastrad uvm_km_free(kernel_map, linux_kmap_atomic_vaddr, PAGE_SIZE, 146 1.2 riastrad (UVM_KMF_VAONLY | UVM_KMF_WAITVA)); 147 1.2 riastrad 148 1.2 riastrad mutex_destroy(&linux_kmap_atomic_lock); 149 1.2 riastrad } 150 1.2 riastrad 151 1.2 riastrad void * 152 1.2 riastrad kmap_atomic(struct page *page) 153 1.2 riastrad { 154 1.2 riastrad const paddr_t paddr = uvm_vm_page_to_phys(&page->p_vmp); 155 1.9 riastrad vaddr_t vaddr; 156 1.2 riastrad 157 1.10 riastrad #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS 158 1.10 riastrad if (mm_md_direct_mapped_phys(paddr, &vaddr)) 159 1.13 riastrad goto out; 160 1.10 riastrad #endif 161 1.10 riastrad 162 1.2 riastrad mutex_spin_enter(&linux_kmap_atomic_lock); 163 1.2 riastrad KASSERT(linux_kmap_atomic_vaddr != 0); 164 1.2 riastrad KASSERT(!pmap_extract(pmap_kernel(), linux_kmap_atomic_vaddr, NULL)); 165 1.9 riastrad vaddr = linux_kmap_atomic_vaddr; 166 1.9 riastrad pmap_kenter_pa(vaddr, paddr, (VM_PROT_READ | VM_PROT_WRITE), 0); 167 1.2 riastrad pmap_update(pmap_kernel()); 168 1.2 riastrad 169 1.16 riastrad #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS 170 1.16 riastrad out: 171 1.16 riastrad #endif 172 1.16 riastrad SDT_PROBE2(sdt, linux, kmap, map__atomic, paddr, vaddr); 173 1.2 riastrad return (void *)vaddr; 174 1.2 riastrad } 175 1.2 riastrad 176 1.2 riastrad void 177 1.2 riastrad kunmap_atomic(void *addr) 178 1.2 riastrad { 179 1.2 riastrad const vaddr_t vaddr = (vaddr_t)addr; 180 1.10 riastrad paddr_t paddr; 181 1.10 riastrad bool ok __diagused; 182 1.10 riastrad 183 1.10 riastrad ok = pmap_extract(pmap_kernel(), vaddr, &paddr); 184 1.10 riastrad KASSERT(ok); 185 1.13 riastrad 186 1.13 riastrad SDT_PROBE2(sdt, linux, kmap, unmap__atomic, paddr, vaddr); 187 1.13 riastrad 188 1.13 riastrad #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS 189 1.13 riastrad { 190 1.13 riastrad vaddr_t vaddr1; 191 1.10 riastrad if (mm_md_direct_mapped_phys(paddr, &vaddr1) && vaddr1 == vaddr) 192 1.10 riastrad return; 193 1.10 riastrad } 194 1.10 riastrad #endif 195 1.10 riastrad 196 1.2 riastrad KASSERT(mutex_owned(&linux_kmap_atomic_lock)); 197 1.2 riastrad KASSERT(linux_kmap_atomic_vaddr == vaddr); 198 1.2 riastrad 199 1.2 riastrad pmap_kremove(vaddr, PAGE_SIZE); 200 1.2 riastrad pmap_update(pmap_kernel()); 201 1.2 riastrad 202 1.2 riastrad mutex_spin_exit(&linux_kmap_atomic_lock); 203 1.2 riastrad } 204 1.2 riastrad 205 1.2 riastrad void * 206 1.15 riastrad kmap(struct page *page) 207 1.2 riastrad { 208 1.2 riastrad const paddr_t paddr = VM_PAGE_TO_PHYS(&page->p_vmp); 209 1.9 riastrad vaddr_t vaddr; 210 1.7 riastrad 211 1.7 riastrad ASSERT_SLEEPABLE(); 212 1.7 riastrad 213 1.10 riastrad #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS 214 1.10 riastrad if (mm_md_direct_mapped_phys(paddr, &vaddr)) 215 1.13 riastrad goto out; 216 1.10 riastrad #endif 217 1.10 riastrad 218 1.9 riastrad vaddr = uvm_km_alloc(kernel_map, PAGE_SIZE, 0, 219 1.2 riastrad (UVM_KMF_VAONLY | UVM_KMF_WAITVA)); 220 1.2 riastrad KASSERT(vaddr != 0); 221 1.2 riastrad 222 1.2 riastrad struct linux_kmap_entry *const lke = kmem_alloc(sizeof(*lke), 223 1.2 riastrad KM_SLEEP); 224 1.2 riastrad lke->lke_paddr = paddr; 225 1.2 riastrad lke->lke_vaddr = vaddr; 226 1.2 riastrad 227 1.5 riastrad mutex_enter(&linux_kmap_lock); 228 1.8 riastrad struct linux_kmap_entry *const collision __diagused = 229 1.2 riastrad rb_tree_insert_node(&linux_kmap_entries, lke); 230 1.2 riastrad KASSERT(collision == lke); 231 1.5 riastrad mutex_exit(&linux_kmap_lock); 232 1.2 riastrad 233 1.2 riastrad KASSERT(!pmap_extract(pmap_kernel(), vaddr, NULL)); 234 1.9 riastrad pmap_kenter_pa(vaddr, paddr, (VM_PROT_READ | VM_PROT_WRITE), 0); 235 1.2 riastrad pmap_update(pmap_kernel()); 236 1.2 riastrad 237 1.16 riastrad #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS 238 1.16 riastrad out: 239 1.16 riastrad #endif 240 1.16 riastrad SDT_PROBE2(sdt, linux, kmap, map, paddr, vaddr); 241 1.2 riastrad return (void *)vaddr; 242 1.2 riastrad } 243 1.2 riastrad 244 1.2 riastrad void 245 1.2 riastrad kunmap(struct page *page) 246 1.2 riastrad { 247 1.2 riastrad const paddr_t paddr = VM_PAGE_TO_PHYS(&page->p_vmp); 248 1.13 riastrad vaddr_t vaddr; 249 1.2 riastrad 250 1.7 riastrad ASSERT_SLEEPABLE(); 251 1.7 riastrad 252 1.10 riastrad #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS 253 1.13 riastrad if (mm_md_direct_mapped_phys(paddr, &vaddr)) 254 1.13 riastrad goto out; 255 1.10 riastrad #endif 256 1.10 riastrad 257 1.5 riastrad mutex_enter(&linux_kmap_lock); 258 1.2 riastrad struct linux_kmap_entry *const lke = 259 1.2 riastrad rb_tree_find_node(&linux_kmap_entries, &paddr); 260 1.2 riastrad KASSERT(lke != NULL); 261 1.2 riastrad rb_tree_remove_node(&linux_kmap_entries, lke); 262 1.5 riastrad mutex_exit(&linux_kmap_lock); 263 1.2 riastrad 264 1.13 riastrad vaddr = lke->lke_vaddr; 265 1.2 riastrad kmem_free(lke, sizeof(*lke)); 266 1.2 riastrad 267 1.2 riastrad KASSERT(pmap_extract(pmap_kernel(), vaddr, NULL)); 268 1.2 riastrad 269 1.2 riastrad pmap_kremove(vaddr, PAGE_SIZE); 270 1.2 riastrad pmap_update(pmap_kernel()); 271 1.4 riastrad 272 1.4 riastrad uvm_km_free(kernel_map, vaddr, PAGE_SIZE, UVM_KMF_VAONLY); 273 1.13 riastrad 274 1.16 riastrad #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS 275 1.16 riastrad out: 276 1.16 riastrad #endif 277 1.16 riastrad SDT_PROBE2(sdt, linux, kmap, unmap, paddr, vaddr); 278 1.2 riastrad } 279