mm.h revision 1.23 1 1.23 riastrad /* $NetBSD: mm.h,v 1.23 2021/12/19 12:07:55 riastradh Exp $ */
2 1.2 riastrad
3 1.2 riastrad /*-
4 1.2 riastrad * Copyright (c) 2013 The NetBSD Foundation, Inc.
5 1.2 riastrad * All rights reserved.
6 1.2 riastrad *
7 1.2 riastrad * This code is derived from software contributed to The NetBSD Foundation
8 1.2 riastrad * by Taylor R. Campbell.
9 1.2 riastrad *
10 1.2 riastrad * Redistribution and use in source and binary forms, with or without
11 1.2 riastrad * modification, are permitted provided that the following conditions
12 1.2 riastrad * are met:
13 1.2 riastrad * 1. Redistributions of source code must retain the above copyright
14 1.2 riastrad * notice, this list of conditions and the following disclaimer.
15 1.2 riastrad * 2. Redistributions in binary form must reproduce the above copyright
16 1.2 riastrad * notice, this list of conditions and the following disclaimer in the
17 1.2 riastrad * documentation and/or other materials provided with the distribution.
18 1.2 riastrad *
19 1.2 riastrad * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.2 riastrad * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.2 riastrad * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.2 riastrad * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.2 riastrad * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.2 riastrad * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.2 riastrad * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.2 riastrad * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.2 riastrad * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.2 riastrad * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.2 riastrad * POSSIBILITY OF SUCH DAMAGE.
30 1.2 riastrad */
31 1.2 riastrad
32 1.2 riastrad #ifndef _LINUX_MM_H_
33 1.2 riastrad #define _LINUX_MM_H_
34 1.2 riastrad
35 1.2 riastrad #include <uvm/uvm_extern.h>
36 1.12 skrll #include <uvm/uvm_object.h>
37 1.3 riastrad
38 1.3 riastrad #include <asm/page.h>
39 1.7 riastrad #include <linux/shrinker.h>
40 1.14 riastrad #include <linux/slab.h>
41 1.17 riastrad #include <linux/sizes.h>
42 1.7 riastrad
43 1.5 riastrad struct file;
44 1.5 riastrad
45 1.2 riastrad /* XXX Ugh bletch! Whattakludge! Linux's sense is reversed... */
46 1.2 riastrad #undef PAGE_MASK
47 1.2 riastrad #define PAGE_MASK (~(PAGE_SIZE-1))
48 1.2 riastrad
49 1.2 riastrad #define PAGE_ALIGN(x) (((x) + (PAGE_SIZE-1)) & ~(PAGE_SIZE-1))
50 1.22 riastrad #define offset_in_page(x) ((uintptr_t)(x) & (PAGE_SIZE-1))
51 1.2 riastrad
52 1.18 riastrad #define untagged_addr(x) (x)
53 1.18 riastrad
54 1.3 riastrad struct sysinfo {
55 1.3 riastrad unsigned long totalram;
56 1.3 riastrad unsigned long totalhigh;
57 1.3 riastrad uint32_t mem_unit;
58 1.3 riastrad };
59 1.3 riastrad
60 1.3 riastrad static inline void
61 1.3 riastrad si_meminfo(struct sysinfo *si)
62 1.3 riastrad {
63 1.3 riastrad
64 1.3 riastrad si->totalram = uvmexp.npages;
65 1.3 riastrad si->totalhigh = kernel_map->size >> PAGE_SHIFT;
66 1.3 riastrad si->mem_unit = PAGE_SIZE;
67 1.3 riastrad /* XXX Fill in more as needed. */
68 1.3 riastrad }
69 1.3 riastrad
70 1.16 riastrad static inline size_t
71 1.16 riastrad si_mem_available(void)
72 1.16 riastrad {
73 1.16 riastrad
74 1.16 riastrad /* XXX ? */
75 1.16 riastrad return uvmexp.free;
76 1.16 riastrad }
77 1.16 riastrad
78 1.2 riastrad static inline unsigned long
79 1.5 riastrad vm_mmap(struct file *file __unused, unsigned long base __unused,
80 1.5 riastrad unsigned long size __unused, unsigned long prot __unused,
81 1.5 riastrad unsigned long flags __unused, unsigned long token __unused)
82 1.2 riastrad {
83 1.2 riastrad
84 1.5 riastrad return -ENODEV;
85 1.2 riastrad }
86 1.2 riastrad
87 1.6 jmcneill static inline unsigned long
88 1.21 riastrad totalram_pages(void)
89 1.21 riastrad {
90 1.21 riastrad
91 1.21 riastrad return uvmexp.npages;
92 1.21 riastrad }
93 1.21 riastrad
94 1.21 riastrad static inline unsigned long
95 1.6 jmcneill get_num_physpages(void)
96 1.6 jmcneill {
97 1.21 riastrad
98 1.6 jmcneill return uvmexp.npages;
99 1.6 jmcneill }
100 1.6 jmcneill
101 1.14 riastrad static inline void *
102 1.19 riastrad kvmalloc(size_t size, gfp_t gfp)
103 1.19 riastrad {
104 1.19 riastrad
105 1.19 riastrad return kmalloc(size, gfp);
106 1.19 riastrad }
107 1.19 riastrad
108 1.19 riastrad static inline void *
109 1.15 riastrad kvzalloc(size_t size, gfp_t gfp)
110 1.15 riastrad {
111 1.15 riastrad
112 1.15 riastrad return kmalloc(size, gfp | __GFP_ZERO);
113 1.15 riastrad }
114 1.15 riastrad
115 1.15 riastrad static inline void *
116 1.20 riastrad kvcalloc(size_t nelem, size_t elemsize, gfp_t gfp)
117 1.20 riastrad {
118 1.20 riastrad
119 1.20 riastrad KASSERT(elemsize > 0);
120 1.20 riastrad if (SIZE_MAX/elemsize < nelem)
121 1.20 riastrad return NULL;
122 1.20 riastrad return kvzalloc(nelem * elemsize, gfp);
123 1.20 riastrad }
124 1.20 riastrad
125 1.20 riastrad static inline void *
126 1.14 riastrad kvmalloc_array(size_t nelem, size_t elemsize, gfp_t gfp)
127 1.14 riastrad {
128 1.14 riastrad
129 1.14 riastrad KASSERT(elemsize != 0);
130 1.14 riastrad if (nelem > SIZE_MAX/elemsize)
131 1.14 riastrad return NULL;
132 1.14 riastrad return kmalloc(nelem * elemsize, gfp);
133 1.14 riastrad }
134 1.14 riastrad
135 1.9 riastrad /*
136 1.23 riastrad * XXX kvfree must additionally work on kmalloc (linux/slab.h) and
137 1.23 riastrad * vmalloc (linux/vmalloc.h). If you change either of those, be sure
138 1.23 riastrad * to change this too.
139 1.9 riastrad */
140 1.23 riastrad
141 1.7 riastrad static inline void
142 1.9 riastrad kvfree(void *ptr)
143 1.7 riastrad {
144 1.23 riastrad kfree(ptr);
145 1.7 riastrad }
146 1.7 riastrad
147 1.8 riastrad static inline void
148 1.8 riastrad set_page_dirty(struct page *page)
149 1.8 riastrad {
150 1.11 ad struct vm_page *pg = &page->p_vmp;
151 1.8 riastrad
152 1.11 ad /* XXX */
153 1.11 ad if (pg->uobject != NULL) {
154 1.13 ad rw_enter(pg->uobject->vmobjlock, RW_WRITER);
155 1.11 ad uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_DIRTY);
156 1.13 ad rw_exit(pg->uobject->vmobjlock);
157 1.11 ad } else {
158 1.11 ad uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_DIRTY);
159 1.11 ad }
160 1.8 riastrad }
161 1.8 riastrad
162 1.2 riastrad #endif /* _LINUX_MM_H_ */
163