vmalloc.h revision 1.7 1 /* $NetBSD: vmalloc.h,v 1.7 2018/08/27 14:40:56 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2013, 2018 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #ifndef _LINUX_VMALLOC_H_
33 #define _LINUX_VMALLOC_H_
34
35 #include <sys/malloc.h>
36
37 #include <uvm/uvm_extern.h>
38
39 #include <linux/mm_types.h>
40
41 #include <asm/page.h>
42
43 /*
44 * XXX vmalloc and kmalloc both use malloc(9). If you change this, be
45 * sure to update kmalloc in <linux/slab.h> and kvfree in <linux/mm.h>.
46 */
47
48 static inline bool
49 is_vmalloc_addr(void *addr)
50 {
51 return true;
52 }
53
54 static inline void *
55 vmalloc(unsigned long size)
56 {
57 return malloc(size, M_TEMP, M_WAITOK);
58 }
59
60 static inline void *
61 vmalloc_user(unsigned long size)
62 {
63 return malloc(size, M_TEMP, (M_WAITOK | M_ZERO));
64 }
65
66 static inline void *
67 vzalloc(unsigned long size)
68 {
69 return malloc(size, M_TEMP, (M_WAITOK | M_ZERO));
70 }
71
72 static inline void
73 vfree(void *ptr)
74 {
75 if (ptr == NULL)
76 return;
77 return free(ptr, M_TEMP);
78 }
79
80 #define PAGE_KERNEL UVM_PROT_RW
81
82 /*
83 * vmap(pages, npages, flags, prot)
84 *
85 * Map pages[0], pages[1], ..., pages[npages-1] into contiguous
86 * kernel virtual address space with the specified protection, and
87 * return a KVA pointer to the start.
88 *
89 * prot may be a bitwise ior of UVM_PROT_READ/WRITE/EXEC and
90 * PMAP_* cache flags accepted by pmap_enter().
91 */
92 static inline void *
93 vmap(struct page **pages, unsigned npages, unsigned long flags,
94 pgprot_t protflags)
95 {
96 vm_prot_t justprot = protflags & UVM_PROT_ALL;
97 vaddr_t va;
98 unsigned i;
99
100 /* Allocate some KVA, or return NULL if we can't. */
101 va = uvm_km_alloc(kernel_map, (vsize_t)npages << PAGE_SHIFT, PAGE_SIZE,
102 UVM_KMF_VAONLY|UVM_KMF_NOWAIT);
103 if (va == 0)
104 return NULL;
105
106 /* Ask pmap to map the KVA to the specified page addresses. */
107 for (i = 0; i < npages; i++) {
108 pmap_kenter_pa(va + i*PAGE_SIZE, page_to_phys(pages[i]),
109 justprot, protflags);
110 }
111
112 /* Commit the pmap updates. */
113 pmap_update(pmap_kernel());
114
115 return (void *)va;
116 }
117
118 /*
119 * vunmap(ptr, npages)
120 *
121 * Unmap the KVA pages starting at ptr that were mapped by a call
122 * to vmap with the same npages parameter.
123 */
124 static inline void
125 vunmap(void *ptr, unsigned npages)
126 {
127 vaddr_t va = (vaddr_t)ptr;
128
129 /* Ask pmap to unmap the KVA. */
130 pmap_kremove(va, (vsize_t)npages << PAGE_SHIFT);
131
132 /* Commit the pmap updates. */
133 pmap_update(pmap_kernel());
134
135 /*
136 * Now that the pmap is no longer mapping the KVA we allocated
137 * on any CPU, it is safe to free the KVA.
138 */
139 uvm_km_free(kernel_map, va, (vsize_t)npages << PAGE_SHIFT,
140 UVM_KMF_VAONLY);
141 }
142
143 #endif /* _LINUX_VMALLOC_H_ */
144