mm.h revision 1.22 1 /* $NetBSD: mm.h,v 1.22 2021/12/19 11:46:58 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2013 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #ifndef _LINUX_MM_H_
33 #define _LINUX_MM_H_
34
35 #include <sys/malloc.h>
36
37 #include <uvm/uvm_extern.h>
38 #include <uvm/uvm_object.h>
39
40 #include <asm/page.h>
41 #include <linux/shrinker.h>
42 #include <linux/slab.h>
43 #include <linux/sizes.h>
44
45 struct file;
46
47 /* XXX Ugh bletch! Whattakludge! Linux's sense is reversed... */
48 #undef PAGE_MASK
49 #define PAGE_MASK (~(PAGE_SIZE-1))
50
51 #define PAGE_ALIGN(x) (((x) + (PAGE_SIZE-1)) & ~(PAGE_SIZE-1))
52 #define offset_in_page(x) ((uintptr_t)(x) & (PAGE_SIZE-1))
53
54 #define untagged_addr(x) (x)
55
56 struct sysinfo {
57 unsigned long totalram;
58 unsigned long totalhigh;
59 uint32_t mem_unit;
60 };
61
62 static inline void
63 si_meminfo(struct sysinfo *si)
64 {
65
66 si->totalram = uvmexp.npages;
67 si->totalhigh = kernel_map->size >> PAGE_SHIFT;
68 si->mem_unit = PAGE_SIZE;
69 /* XXX Fill in more as needed. */
70 }
71
72 static inline size_t
73 si_mem_available(void)
74 {
75
76 /* XXX ? */
77 return uvmexp.free;
78 }
79
80 static inline unsigned long
81 vm_mmap(struct file *file __unused, unsigned long base __unused,
82 unsigned long size __unused, unsigned long prot __unused,
83 unsigned long flags __unused, unsigned long token __unused)
84 {
85
86 return -ENODEV;
87 }
88
89 static inline unsigned long
90 totalram_pages(void)
91 {
92
93 return uvmexp.npages;
94 }
95
96 static inline unsigned long
97 get_num_physpages(void)
98 {
99
100 return uvmexp.npages;
101 }
102
103 static inline void *
104 kvmalloc(size_t size, gfp_t gfp)
105 {
106
107 return kmalloc(size, gfp);
108 }
109
110 static inline void *
111 kvzalloc(size_t size, gfp_t gfp)
112 {
113
114 return kmalloc(size, gfp | __GFP_ZERO);
115 }
116
117 static inline void *
118 kvcalloc(size_t nelem, size_t elemsize, gfp_t gfp)
119 {
120
121 KASSERT(elemsize > 0);
122 if (SIZE_MAX/elemsize < nelem)
123 return NULL;
124 return kvzalloc(nelem * elemsize, gfp);
125 }
126
127 static inline void *
128 kvmalloc_array(size_t nelem, size_t elemsize, gfp_t gfp)
129 {
130
131 KASSERT(elemsize != 0);
132 if (nelem > SIZE_MAX/elemsize)
133 return NULL;
134 return kmalloc(nelem * elemsize, gfp);
135 }
136
137 /*
138 * XXX Requires that kmalloc in <linux/slab.h> and vmalloc in
139 * <linux/vmalloc.h> both use malloc(9). If you change either of
140 * those, be sure to update this.
141 */
142 static inline void
143 kvfree(void *ptr)
144 {
145
146 if (ptr != NULL)
147 free(ptr, M_TEMP);
148 }
149
150 static inline void
151 set_page_dirty(struct page *page)
152 {
153 struct vm_page *pg = &page->p_vmp;
154
155 /* XXX */
156 if (pg->uobject != NULL) {
157 rw_enter(pg->uobject->vmobjlock, RW_WRITER);
158 uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_DIRTY);
159 rw_exit(pg->uobject->vmobjlock);
160 } else {
161 uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_DIRTY);
162 }
163 }
164
165 #endif /* _LINUX_MM_H_ */
166