vmalloc.h revision 1.5 1 1.5 riastrad /* $NetBSD: vmalloc.h,v 1.5 2018/08/06 00:30:07 riastradh Exp $ */
2 1.2 riastrad
3 1.2 riastrad /*-
4 1.5 riastrad * Copyright (c) 2013, 2018 The NetBSD Foundation, Inc.
5 1.2 riastrad * All rights reserved.
6 1.2 riastrad *
7 1.2 riastrad * This code is derived from software contributed to The NetBSD Foundation
8 1.2 riastrad * by Taylor R. Campbell.
9 1.2 riastrad *
10 1.2 riastrad * Redistribution and use in source and binary forms, with or without
11 1.2 riastrad * modification, are permitted provided that the following conditions
12 1.2 riastrad * are met:
13 1.2 riastrad * 1. Redistributions of source code must retain the above copyright
14 1.2 riastrad * notice, this list of conditions and the following disclaimer.
15 1.2 riastrad * 2. Redistributions in binary form must reproduce the above copyright
16 1.2 riastrad * notice, this list of conditions and the following disclaimer in the
17 1.2 riastrad * documentation and/or other materials provided with the distribution.
18 1.2 riastrad *
19 1.2 riastrad * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.2 riastrad * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.2 riastrad * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.2 riastrad * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.2 riastrad * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.2 riastrad * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.2 riastrad * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.2 riastrad * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.2 riastrad * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.2 riastrad * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.2 riastrad * POSSIBILITY OF SUCH DAMAGE.
30 1.2 riastrad */
31 1.2 riastrad
32 1.2 riastrad #ifndef _LINUX_VMALLOC_H_
33 1.2 riastrad #define _LINUX_VMALLOC_H_
34 1.2 riastrad
35 1.2 riastrad #include <sys/malloc.h>
36 1.2 riastrad
37 1.3 riastrad #include <uvm/uvm_extern.h>
38 1.3 riastrad
39 1.3 riastrad #include <linux/mm_types.h>
40 1.3 riastrad
41 1.5 riastrad #include <asm/page.h>
42 1.5 riastrad
43 1.4 riastrad static inline bool
44 1.4 riastrad is_vmalloc_addr(void *addr)
45 1.4 riastrad {
46 1.4 riastrad /* XXX Assumes vmalloc and kmalloc both use malloc(9). */
47 1.4 riastrad return true;
48 1.4 riastrad }
49 1.4 riastrad
50 1.4 riastrad static inline void *
51 1.4 riastrad vmalloc(unsigned long size)
52 1.4 riastrad {
53 1.4 riastrad return malloc(size, M_TEMP, M_WAITOK);
54 1.4 riastrad }
55 1.4 riastrad
56 1.2 riastrad static inline void *
57 1.2 riastrad vmalloc_user(unsigned long size)
58 1.2 riastrad {
59 1.2 riastrad return malloc(size, M_TEMP, (M_WAITOK | M_ZERO));
60 1.2 riastrad }
61 1.2 riastrad
62 1.2 riastrad static inline void *
63 1.2 riastrad vzalloc(unsigned long size)
64 1.2 riastrad {
65 1.2 riastrad return malloc(size, M_TEMP, (M_WAITOK | M_ZERO));
66 1.2 riastrad }
67 1.2 riastrad
68 1.2 riastrad static inline void
69 1.2 riastrad vfree(void *ptr)
70 1.2 riastrad {
71 1.2 riastrad return free(ptr, M_TEMP);
72 1.2 riastrad }
73 1.2 riastrad
74 1.5 riastrad #define PAGE_KERNEL UVM_PROT_RW
75 1.3 riastrad
76 1.5 riastrad /*
77 1.5 riastrad * vmap(pages, npages, flags, prot)
78 1.5 riastrad *
79 1.5 riastrad * Map pages[0], pages[1], ..., pages[npages-1] into contiguous
80 1.5 riastrad * kernel virtual address space with the specified protection, and
81 1.5 riastrad * return a KVA pointer to the start.
82 1.5 riastrad *
83 1.5 riastrad * prot may be a bitwise ior of UVM_PROT_READ/WRITE/EXEC and
84 1.5 riastrad * PMAP_* cache flags accepted by pmap_enter().
85 1.5 riastrad */
86 1.3 riastrad static inline void *
87 1.5 riastrad vmap(struct page **pages, unsigned npages, unsigned long flags,
88 1.5 riastrad pgprot_t protflags)
89 1.3 riastrad {
90 1.5 riastrad vm_prot_t justprot = protflags & UVM_PROT_ALL;
91 1.3 riastrad vaddr_t va;
92 1.5 riastrad unsigned i;
93 1.3 riastrad
94 1.5 riastrad /* Allocate some KVA, or return NULL if we can't. */
95 1.5 riastrad va = uvm_km_alloc(kernel_map, (vsize_t)npages << PAGE_SHIFT, PAGE_SIZE,
96 1.5 riastrad UVM_KMF_VAONLY|UVM_KMF_NOWAIT);
97 1.3 riastrad if (va == 0)
98 1.3 riastrad return NULL;
99 1.3 riastrad
100 1.5 riastrad /* Ask pmap to map the KVA to the specified page addresses. */
101 1.5 riastrad for (i = 0; i < npages; i++) {
102 1.5 riastrad pmap_kenter_pa(va + i*PAGE_SIZE, page_to_phys(pages[i]),
103 1.5 riastrad justprot, protflags);
104 1.5 riastrad }
105 1.5 riastrad
106 1.5 riastrad /* Commit the pmap updates. */
107 1.5 riastrad pmap_update(pmap_kernel());
108 1.5 riastrad
109 1.3 riastrad return (void *)va;
110 1.3 riastrad }
111 1.3 riastrad
112 1.5 riastrad /*
113 1.5 riastrad * vunmap(ptr, npages)
114 1.5 riastrad *
115 1.5 riastrad * Unmap the KVA pages starting at ptr that were mapped by a call
116 1.5 riastrad * to vmap with the same npages parameter.
117 1.5 riastrad */
118 1.3 riastrad static inline void
119 1.3 riastrad vunmap(void *ptr, unsigned npages)
120 1.3 riastrad {
121 1.5 riastrad vaddr_t va = (vaddr_t)ptr;
122 1.5 riastrad
123 1.5 riastrad /* Ask pmap to unmap the KVA. */
124 1.5 riastrad pmap_kremove(va, (vsize_t)npages << PAGE_SHIFT);
125 1.5 riastrad
126 1.5 riastrad /* Commit the pmap updates. */
127 1.5 riastrad pmap_update(pmap_kernel());
128 1.3 riastrad
129 1.5 riastrad /*
130 1.5 riastrad * Now that the pmap is no longer mapping the KVA we allocated
131 1.5 riastrad * on any CPU, it is safe to free the KVA.
132 1.5 riastrad */
133 1.5 riastrad uvm_km_free(kernel_map, va, (vsize_t)npages << PAGE_SHIFT,
134 1.5 riastrad UVM_KMF_VAONLY);
135 1.3 riastrad }
136 1.3 riastrad
137 1.2 riastrad #endif /* _LINUX_VMALLOC_H_ */
138