vmalloc.h revision 1.10 1 /* $NetBSD: vmalloc.h,v 1.10 2021/12/19 10:51:24 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2013, 2018 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #ifndef _LINUX_VMALLOC_H_
33 #define _LINUX_VMALLOC_H_
34
35 #include <sys/malloc.h>
36
37 #include <uvm/uvm_extern.h>
38
39 #include <linux/mm.h>
40 #include <linux/mm_types.h>
41 #include <linux/overflow.h>
42
43 #include <asm/page.h>
44
45 struct notifier_block;
46
47 /*
48 * XXX vmalloc and kmalloc both use malloc(9). If you change this, be
49 * sure to update kmalloc in <linux/slab.h> and kvfree in <linux/mm.h>.
50 */
51
52 static inline bool
53 is_vmalloc_addr(void *addr)
54 {
55 return true;
56 }
57
58 static inline void *
59 vmalloc(unsigned long size)
60 {
61 return malloc(size, M_TEMP, M_WAITOK);
62 }
63
64 static inline void *
65 vmalloc_user(unsigned long size)
66 {
67 return malloc(size, M_TEMP, (M_WAITOK | M_ZERO));
68 }
69
70 static inline void *
71 vzalloc(unsigned long size)
72 {
73 return malloc(size, M_TEMP, (M_WAITOK | M_ZERO));
74 }
75
76 static inline void
77 vfree(void *ptr)
78 {
79 if (ptr == NULL)
80 return;
81 return free(ptr, M_TEMP);
82 }
83
84 #define PAGE_KERNEL UVM_PROT_RW
85
86 /*
87 * vmap(pages, npages, flags, prot)
88 *
89 * Map pages[0], pages[1], ..., pages[npages-1] into contiguous
90 * kernel virtual address space with the specified protection, and
91 * return a KVA pointer to the start.
92 *
93 * prot may be a bitwise ior of UVM_PROT_READ/WRITE/EXEC and
94 * PMAP_* cache flags accepted by pmap_enter().
95 */
96 static inline void *
97 vmap(struct page **pages, unsigned npages, unsigned long flags,
98 pgprot_t protflags)
99 {
100 vm_prot_t justprot = protflags & UVM_PROT_ALL;
101 vaddr_t va;
102 unsigned i;
103
104 /* Allocate some KVA, or return NULL if we can't. */
105 va = uvm_km_alloc(kernel_map, (vsize_t)npages << PAGE_SHIFT, PAGE_SIZE,
106 UVM_KMF_VAONLY|UVM_KMF_NOWAIT);
107 if (va == 0)
108 return NULL;
109
110 /* Ask pmap to map the KVA to the specified page addresses. */
111 for (i = 0; i < npages; i++) {
112 pmap_kenter_pa(va + i*PAGE_SIZE, page_to_phys(pages[i]),
113 justprot, protflags);
114 }
115
116 /* Commit the pmap updates. */
117 pmap_update(pmap_kernel());
118
119 return (void *)va;
120 }
121
122 /*
123 * vunmap(ptr, npages)
124 *
125 * Unmap the KVA pages starting at ptr that were mapped by a call
126 * to vmap with the same npages parameter.
127 */
128 static inline void
129 vunmap(void *ptr, unsigned npages)
130 {
131 vaddr_t va = (vaddr_t)ptr;
132
133 /* Ask pmap to unmap the KVA. */
134 pmap_kremove(va, (vsize_t)npages << PAGE_SHIFT);
135
136 /* Commit the pmap updates. */
137 pmap_update(pmap_kernel());
138
139 /*
140 * Now that the pmap is no longer mapping the KVA we allocated
141 * on any CPU, it is safe to free the KVA.
142 */
143 uvm_km_free(kernel_map, va, (vsize_t)npages << PAGE_SHIFT,
144 UVM_KMF_VAONLY);
145 }
146
147 static inline int
148 register_vmap_purge_notifier(struct notifier_block *nb __unused)
149 {
150 return 0;
151 }
152
153 static inline int
154 unregister_vmap_purge_notifier(struct notifier_block *nb __unused)
155 {
156 return 0;
157 }
158
159 #endif /* _LINUX_VMALLOC_H_ */
160