vmalloc.h revision 1.6 1 1.6 riastrad /* $NetBSD: vmalloc.h,v 1.6 2018/08/27 13:44:54 riastradh Exp $ */
2 1.2 riastrad
3 1.2 riastrad /*-
4 1.5 riastrad * Copyright (c) 2013, 2018 The NetBSD Foundation, Inc.
5 1.2 riastrad * All rights reserved.
6 1.2 riastrad *
7 1.2 riastrad * This code is derived from software contributed to The NetBSD Foundation
8 1.2 riastrad * by Taylor R. Campbell.
9 1.2 riastrad *
10 1.2 riastrad * Redistribution and use in source and binary forms, with or without
11 1.2 riastrad * modification, are permitted provided that the following conditions
12 1.2 riastrad * are met:
13 1.2 riastrad * 1. Redistributions of source code must retain the above copyright
14 1.2 riastrad * notice, this list of conditions and the following disclaimer.
15 1.2 riastrad * 2. Redistributions in binary form must reproduce the above copyright
16 1.2 riastrad * notice, this list of conditions and the following disclaimer in the
17 1.2 riastrad * documentation and/or other materials provided with the distribution.
18 1.2 riastrad *
19 1.2 riastrad * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.2 riastrad * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.2 riastrad * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.2 riastrad * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.2 riastrad * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.2 riastrad * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.2 riastrad * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.2 riastrad * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.2 riastrad * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.2 riastrad * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.2 riastrad * POSSIBILITY OF SUCH DAMAGE.
30 1.2 riastrad */
31 1.2 riastrad
32 1.2 riastrad #ifndef _LINUX_VMALLOC_H_
33 1.2 riastrad #define _LINUX_VMALLOC_H_
34 1.2 riastrad
35 1.2 riastrad #include <sys/malloc.h>
36 1.2 riastrad
37 1.3 riastrad #include <uvm/uvm_extern.h>
38 1.3 riastrad
39 1.3 riastrad #include <linux/mm_types.h>
40 1.3 riastrad
41 1.5 riastrad #include <asm/page.h>
42 1.5 riastrad
43 1.6 riastrad /*
44 1.6 riastrad * XXX vmalloc and kmalloc both use malloc(9). If you change this, be
45 1.6 riastrad * sure to update kmalloc in <linux/slab.h> and kvfree in <linux/mm.h>.
46 1.6 riastrad */
47 1.6 riastrad
48 1.4 riastrad static inline bool
49 1.4 riastrad is_vmalloc_addr(void *addr)
50 1.4 riastrad {
51 1.4 riastrad return true;
52 1.4 riastrad }
53 1.4 riastrad
54 1.4 riastrad static inline void *
55 1.4 riastrad vmalloc(unsigned long size)
56 1.4 riastrad {
57 1.4 riastrad return malloc(size, M_TEMP, M_WAITOK);
58 1.4 riastrad }
59 1.4 riastrad
60 1.2 riastrad static inline void *
61 1.2 riastrad vmalloc_user(unsigned long size)
62 1.2 riastrad {
63 1.2 riastrad return malloc(size, M_TEMP, (M_WAITOK | M_ZERO));
64 1.2 riastrad }
65 1.2 riastrad
66 1.2 riastrad static inline void *
67 1.2 riastrad vzalloc(unsigned long size)
68 1.2 riastrad {
69 1.2 riastrad return malloc(size, M_TEMP, (M_WAITOK | M_ZERO));
70 1.2 riastrad }
71 1.2 riastrad
72 1.2 riastrad static inline void
73 1.2 riastrad vfree(void *ptr)
74 1.2 riastrad {
75 1.2 riastrad return free(ptr, M_TEMP);
76 1.2 riastrad }
77 1.2 riastrad
78 1.5 riastrad #define PAGE_KERNEL UVM_PROT_RW
79 1.3 riastrad
80 1.5 riastrad /*
81 1.5 riastrad * vmap(pages, npages, flags, prot)
82 1.5 riastrad *
83 1.5 riastrad * Map pages[0], pages[1], ..., pages[npages-1] into contiguous
84 1.5 riastrad * kernel virtual address space with the specified protection, and
85 1.5 riastrad * return a KVA pointer to the start.
86 1.5 riastrad *
87 1.5 riastrad * prot may be a bitwise ior of UVM_PROT_READ/WRITE/EXEC and
88 1.5 riastrad * PMAP_* cache flags accepted by pmap_enter().
89 1.5 riastrad */
90 1.3 riastrad static inline void *
91 1.5 riastrad vmap(struct page **pages, unsigned npages, unsigned long flags,
92 1.5 riastrad pgprot_t protflags)
93 1.3 riastrad {
94 1.5 riastrad vm_prot_t justprot = protflags & UVM_PROT_ALL;
95 1.3 riastrad vaddr_t va;
96 1.5 riastrad unsigned i;
97 1.3 riastrad
98 1.5 riastrad /* Allocate some KVA, or return NULL if we can't. */
99 1.5 riastrad va = uvm_km_alloc(kernel_map, (vsize_t)npages << PAGE_SHIFT, PAGE_SIZE,
100 1.5 riastrad UVM_KMF_VAONLY|UVM_KMF_NOWAIT);
101 1.3 riastrad if (va == 0)
102 1.3 riastrad return NULL;
103 1.3 riastrad
104 1.5 riastrad /* Ask pmap to map the KVA to the specified page addresses. */
105 1.5 riastrad for (i = 0; i < npages; i++) {
106 1.5 riastrad pmap_kenter_pa(va + i*PAGE_SIZE, page_to_phys(pages[i]),
107 1.5 riastrad justprot, protflags);
108 1.5 riastrad }
109 1.5 riastrad
110 1.5 riastrad /* Commit the pmap updates. */
111 1.5 riastrad pmap_update(pmap_kernel());
112 1.5 riastrad
113 1.3 riastrad return (void *)va;
114 1.3 riastrad }
115 1.3 riastrad
116 1.5 riastrad /*
117 1.5 riastrad * vunmap(ptr, npages)
118 1.5 riastrad *
119 1.5 riastrad * Unmap the KVA pages starting at ptr that were mapped by a call
120 1.5 riastrad * to vmap with the same npages parameter.
121 1.5 riastrad */
122 1.3 riastrad static inline void
123 1.3 riastrad vunmap(void *ptr, unsigned npages)
124 1.3 riastrad {
125 1.5 riastrad vaddr_t va = (vaddr_t)ptr;
126 1.5 riastrad
127 1.5 riastrad /* Ask pmap to unmap the KVA. */
128 1.5 riastrad pmap_kremove(va, (vsize_t)npages << PAGE_SHIFT);
129 1.5 riastrad
130 1.5 riastrad /* Commit the pmap updates. */
131 1.5 riastrad pmap_update(pmap_kernel());
132 1.3 riastrad
133 1.5 riastrad /*
134 1.5 riastrad * Now that the pmap is no longer mapping the KVA we allocated
135 1.5 riastrad * on any CPU, it is safe to free the KVA.
136 1.5 riastrad */
137 1.5 riastrad uvm_km_free(kernel_map, va, (vsize_t)npages << PAGE_SHIFT,
138 1.5 riastrad UVM_KMF_VAONLY);
139 1.3 riastrad }
140 1.3 riastrad
141 1.2 riastrad #endif /* _LINUX_VMALLOC_H_ */
142