Home | History | Annotate | Line # | Download | only in linux
      1 /*	$NetBSD: linux_kmap.c,v 1.16 2018/08/27 15:28:53 riastradh Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2013 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Taylor R. Campbell.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #include <sys/cdefs.h>
     33 __KERNEL_RCSID(0, "$NetBSD: linux_kmap.c,v 1.16 2018/08/27 15:28:53 riastradh Exp $");
     34 
     35 #include <sys/types.h>
     36 #include <sys/kmem.h>
     37 #include <sys/mutex.h>
     38 #include <sys/rbtree.h>
     39 #include <sys/sdt.h>
     40 
     41 #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS
     42 #include <dev/mm.h>
     43 #endif
     44 
     45 #include <uvm/uvm_extern.h>
     46 
     47 #include <linux/highmem.h>
     48 
     49 SDT_PROBE_DEFINE2(sdt, linux, kmap, map,
     50     "paddr_t"/*paddr*/, "vaddr_t"/*vaddr*/);
     51 SDT_PROBE_DEFINE2(sdt, linux, kmap, unmap,
     52     "paddr_t"/*paddr*/, "vaddr_t"/*vaddr*/);
     53 SDT_PROBE_DEFINE2(sdt, linux, kmap, map__atomic,
     54     "paddr_t"/*paddr*/, "vaddr_t"/*vaddr*/);
     55 SDT_PROBE_DEFINE2(sdt, linux, kmap, unmap__atomic,
     56     "paddr_t"/*paddr*/, "vaddr_t"/*vaddr*/);
     57 
     58 /*
     59  * XXX Kludgerific implementation of Linux kmap_atomic, which is
     60  * required not to fail.  To accomodate this, we reserve one page of
     61  * kva at boot (or load) and limit the system to at most kmap_atomic in
     62  * use at a time.
     63  */
     64 
     65 static kmutex_t linux_kmap_atomic_lock;
     66 static vaddr_t linux_kmap_atomic_vaddr;
     67 
     68 static kmutex_t linux_kmap_lock;
     69 static rb_tree_t linux_kmap_entries;
     70 
     71 struct linux_kmap_entry {
     72 	paddr_t		lke_paddr;
     73 	vaddr_t		lke_vaddr;
     74 	unsigned int	lke_refcnt;
     75 	rb_node_t	lke_node;
     76 };
     77 
     78 static int
     79 lke_compare_nodes(void *ctx __unused, const void *an, const void *bn)
     80 {
     81 	const struct linux_kmap_entry *const a = an;
     82 	const struct linux_kmap_entry *const b = bn;
     83 
     84 	if (a->lke_paddr < b->lke_paddr)
     85 		return -1;
     86 	else if (a->lke_paddr > b->lke_paddr)
     87 		return +1;
     88 	else
     89 		return 0;
     90 }
     91 
     92 static int
     93 lke_compare_key(void *ctx __unused, const void *node, const void *key)
     94 {
     95 	const struct linux_kmap_entry *const lke = node;
     96 	const paddr_t *const paddrp = key;
     97 
     98 	if (lke->lke_paddr < *paddrp)
     99 		return -1;
    100 	else if (lke->lke_paddr > *paddrp)
    101 		return +1;
    102 	else
    103 		return 0;
    104 }
    105 
    106 static const rb_tree_ops_t linux_kmap_entry_ops = {
    107 	.rbto_compare_nodes = &lke_compare_nodes,
    108 	.rbto_compare_key = &lke_compare_key,
    109 	.rbto_node_offset = offsetof(struct linux_kmap_entry, lke_node),
    110 	.rbto_context = NULL,
    111 };
    112 
    113 int
    114 linux_kmap_init(void)
    115 {
    116 
    117 	/* IPL_VM since interrupt handlers use kmap_atomic.  */
    118 	mutex_init(&linux_kmap_atomic_lock, MUTEX_DEFAULT, IPL_VM);
    119 
    120 	linux_kmap_atomic_vaddr = uvm_km_alloc(kernel_map, PAGE_SIZE, 0,
    121 	    (UVM_KMF_VAONLY | UVM_KMF_WAITVA));
    122 
    123 	KASSERT(linux_kmap_atomic_vaddr != 0);
    124 	KASSERT(!pmap_extract(pmap_kernel(), linux_kmap_atomic_vaddr, NULL));
    125 
    126 	mutex_init(&linux_kmap_lock, MUTEX_DEFAULT, IPL_NONE);
    127 	rb_tree_init(&linux_kmap_entries, &linux_kmap_entry_ops);
    128 
    129 	return 0;
    130 }
    131 
    132 void
    133 linux_kmap_fini(void)
    134 {
    135 
    136 	KASSERT(RB_TREE_MIN(&linux_kmap_entries) == NULL);
    137 #if 0				/* XXX no rb_tree_destroy */
    138 	rb_tree_destroy(&linux_kmap_entries);
    139 #endif
    140 	mutex_destroy(&linux_kmap_lock);
    141 
    142 	KASSERT(linux_kmap_atomic_vaddr != 0);
    143 	KASSERT(!pmap_extract(pmap_kernel(), linux_kmap_atomic_vaddr, NULL));
    144 
    145 	uvm_km_free(kernel_map, linux_kmap_atomic_vaddr, PAGE_SIZE,
    146 	    (UVM_KMF_VAONLY | UVM_KMF_WAITVA));
    147 
    148 	mutex_destroy(&linux_kmap_atomic_lock);
    149 }
    150 
    151 void *
    152 kmap_atomic(struct page *page)
    153 {
    154 	const paddr_t paddr = uvm_vm_page_to_phys(&page->p_vmp);
    155 	vaddr_t vaddr;
    156 
    157 #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS
    158 	if (mm_md_direct_mapped_phys(paddr, &vaddr))
    159 		goto out;
    160 #endif
    161 
    162 	mutex_spin_enter(&linux_kmap_atomic_lock);
    163 	KASSERT(linux_kmap_atomic_vaddr != 0);
    164 	KASSERT(!pmap_extract(pmap_kernel(), linux_kmap_atomic_vaddr, NULL));
    165 	vaddr = linux_kmap_atomic_vaddr;
    166 	pmap_kenter_pa(vaddr, paddr, (VM_PROT_READ | VM_PROT_WRITE), 0);
    167 	pmap_update(pmap_kernel());
    168 
    169 #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS
    170 out:
    171 #endif
    172 	SDT_PROBE2(sdt, linux, kmap, map__atomic,  paddr, vaddr);
    173 	return (void *)vaddr;
    174 }
    175 
    176 void
    177 kunmap_atomic(void *addr)
    178 {
    179 	const vaddr_t vaddr = (vaddr_t)addr;
    180 	paddr_t paddr;
    181 	bool ok __diagused;
    182 
    183 	ok = pmap_extract(pmap_kernel(), vaddr, &paddr);
    184 	KASSERT(ok);
    185 
    186 	SDT_PROBE2(sdt, linux, kmap, unmap__atomic,  paddr, vaddr);
    187 
    188 #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS
    189     {
    190 	vaddr_t vaddr1;
    191 	if (mm_md_direct_mapped_phys(paddr, &vaddr1) && vaddr1 == vaddr)
    192 		return;
    193     }
    194 #endif
    195 
    196 	KASSERT(mutex_owned(&linux_kmap_atomic_lock));
    197 	KASSERT(linux_kmap_atomic_vaddr == vaddr);
    198 
    199 	pmap_kremove(vaddr, PAGE_SIZE);
    200 	pmap_update(pmap_kernel());
    201 
    202 	mutex_spin_exit(&linux_kmap_atomic_lock);
    203 }
    204 
    205 void *
    206 kmap(struct page *page)
    207 {
    208 	const paddr_t paddr = VM_PAGE_TO_PHYS(&page->p_vmp);
    209 	vaddr_t vaddr;
    210 
    211 	ASSERT_SLEEPABLE();
    212 
    213 #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS
    214 	if (mm_md_direct_mapped_phys(paddr, &vaddr))
    215 		goto out;
    216 #endif
    217 
    218 	vaddr = uvm_km_alloc(kernel_map, PAGE_SIZE, 0,
    219 	    (UVM_KMF_VAONLY | UVM_KMF_WAITVA));
    220 	KASSERT(vaddr != 0);
    221 
    222 	struct linux_kmap_entry *const lke = kmem_alloc(sizeof(*lke),
    223 	    KM_SLEEP);
    224 	lke->lke_paddr = paddr;
    225 	lke->lke_vaddr = vaddr;
    226 
    227 	mutex_enter(&linux_kmap_lock);
    228 	struct linux_kmap_entry *const collision __diagused =
    229 	    rb_tree_insert_node(&linux_kmap_entries, lke);
    230 	KASSERT(collision == lke);
    231 	mutex_exit(&linux_kmap_lock);
    232 
    233 	KASSERT(!pmap_extract(pmap_kernel(), vaddr, NULL));
    234 	pmap_kenter_pa(vaddr, paddr, (VM_PROT_READ | VM_PROT_WRITE), 0);
    235 	pmap_update(pmap_kernel());
    236 
    237 #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS
    238 out:
    239 #endif
    240 	SDT_PROBE2(sdt, linux, kmap, map,  paddr, vaddr);
    241 	return (void *)vaddr;
    242 }
    243 
    244 void
    245 kunmap(struct page *page)
    246 {
    247 	const paddr_t paddr = VM_PAGE_TO_PHYS(&page->p_vmp);
    248 	vaddr_t vaddr;
    249 
    250 	ASSERT_SLEEPABLE();
    251 
    252 #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS
    253 	if (mm_md_direct_mapped_phys(paddr, &vaddr))
    254 		goto out;
    255 #endif
    256 
    257 	mutex_enter(&linux_kmap_lock);
    258 	struct linux_kmap_entry *const lke =
    259 	    rb_tree_find_node(&linux_kmap_entries, &paddr);
    260 	KASSERT(lke != NULL);
    261 	rb_tree_remove_node(&linux_kmap_entries, lke);
    262 	mutex_exit(&linux_kmap_lock);
    263 
    264 	vaddr = lke->lke_vaddr;
    265 	kmem_free(lke, sizeof(*lke));
    266 
    267 	KASSERT(pmap_extract(pmap_kernel(), vaddr, NULL));
    268 
    269 	pmap_kremove(vaddr, PAGE_SIZE);
    270 	pmap_update(pmap_kernel());
    271 
    272 	uvm_km_free(kernel_map, vaddr, PAGE_SIZE, UVM_KMF_VAONLY);
    273 
    274 #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS
    275 out:
    276 #endif
    277 	SDT_PROBE2(sdt, linux, kmap, unmap,  paddr, vaddr);
    278 }
    279