uvm_extern.h revision 1.224 1 1.224 ad /* $NetBSD: uvm_extern.h,v 1.224 2020/04/23 21:47:09 ad Exp $ */
2 1.1 mrg
3 1.1 mrg /*
4 1.1 mrg * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 1.1 mrg * All rights reserved.
6 1.1 mrg *
7 1.1 mrg * Redistribution and use in source and binary forms, with or without
8 1.1 mrg * modification, are permitted provided that the following conditions
9 1.1 mrg * are met:
10 1.1 mrg * 1. Redistributions of source code must retain the above copyright
11 1.1 mrg * notice, this list of conditions and the following disclaimer.
12 1.1 mrg * 2. Redistributions in binary form must reproduce the above copyright
13 1.1 mrg * notice, this list of conditions and the following disclaimer in the
14 1.1 mrg * documentation and/or other materials provided with the distribution.
15 1.1 mrg *
16 1.1 mrg * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 1.1 mrg * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 1.1 mrg * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 1.1 mrg * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 1.1 mrg * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 1.1 mrg * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 1.1 mrg * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 1.1 mrg * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 1.1 mrg * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 1.1 mrg * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 1.4 mrg *
27 1.4 mrg * from: Id: uvm_extern.h,v 1.1.2.21 1998/02/07 01:16:53 chs Exp
28 1.1 mrg */
29 1.1 mrg
30 1.44 mrg /*-
31 1.45 mrg * Copyright (c) 1991, 1992, 1993
32 1.44 mrg * The Regents of the University of California. All rights reserved.
33 1.44 mrg *
34 1.44 mrg * Redistribution and use in source and binary forms, with or without
35 1.44 mrg * modification, are permitted provided that the following conditions
36 1.44 mrg * are met:
37 1.44 mrg * 1. Redistributions of source code must retain the above copyright
38 1.44 mrg * notice, this list of conditions and the following disclaimer.
39 1.44 mrg * 2. Redistributions in binary form must reproduce the above copyright
40 1.44 mrg * notice, this list of conditions and the following disclaimer in the
41 1.44 mrg * documentation and/or other materials provided with the distribution.
42 1.83 agc * 3. Neither the name of the University nor the names of its contributors
43 1.44 mrg * may be used to endorse or promote products derived from this software
44 1.44 mrg * without specific prior written permission.
45 1.44 mrg *
46 1.44 mrg * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
47 1.44 mrg * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48 1.44 mrg * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49 1.44 mrg * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
50 1.44 mrg * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
51 1.44 mrg * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
52 1.44 mrg * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
53 1.44 mrg * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
54 1.44 mrg * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
55 1.44 mrg * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
56 1.44 mrg * SUCH DAMAGE.
57 1.44 mrg *
58 1.44 mrg * @(#)vm_extern.h 8.5 (Berkeley) 5/3/95
59 1.44 mrg */
60 1.44 mrg
61 1.8 perry #ifndef _UVM_UVM_EXTERN_H_
62 1.8 perry #define _UVM_UVM_EXTERN_H_
63 1.1 mrg
64 1.1 mrg /*
65 1.1 mrg * uvm_extern.h: this file defines the external interface to the VM system.
66 1.1 mrg *
67 1.1 mrg * this should be the only file included by non-VM parts of the kernel
68 1.1 mrg * which need access to VM services. if you want to know the interface
69 1.1 mrg * to the MI VM layer without knowing the details, this is the file to
70 1.1 mrg * learn.
71 1.1 mrg *
72 1.1 mrg * NOTE: vm system calls are prototyped in syscallargs.h
73 1.1 mrg */
74 1.1 mrg
75 1.1 mrg /*
76 1.1 mrg * defines
77 1.1 mrg */
78 1.1 mrg
79 1.1 mrg /*
80 1.1 mrg * the following defines are for uvm_map and functions which call it.
81 1.1 mrg */
82 1.1 mrg
83 1.1 mrg /* protections bits */
84 1.1 mrg #define UVM_PROT_MASK 0x07 /* protection mask */
85 1.1 mrg #define UVM_PROT_NONE 0x00 /* protection none */
86 1.1 mrg #define UVM_PROT_ALL 0x07 /* everything */
87 1.1 mrg #define UVM_PROT_READ 0x01 /* read */
88 1.1 mrg #define UVM_PROT_WRITE 0x02 /* write */
89 1.1 mrg #define UVM_PROT_EXEC 0x04 /* exec */
90 1.1 mrg
91 1.1 mrg /* protection short codes */
92 1.1 mrg #define UVM_PROT_R 0x01 /* read */
93 1.1 mrg #define UVM_PROT_W 0x02 /* write */
94 1.1 mrg #define UVM_PROT_RW 0x03 /* read-write */
95 1.1 mrg #define UVM_PROT_X 0x04 /* exec */
96 1.1 mrg #define UVM_PROT_RX 0x05 /* read-exec */
97 1.1 mrg #define UVM_PROT_WX 0x06 /* write-exec */
98 1.1 mrg #define UVM_PROT_RWX 0x07 /* read-write-exec */
99 1.1 mrg
100 1.1 mrg /* 0x08: not used */
101 1.1 mrg
102 1.1 mrg /* inherit codes */
103 1.1 mrg #define UVM_INH_MASK 0x30 /* inherit mask */
104 1.1 mrg #define UVM_INH_SHARE 0x00 /* "share" */
105 1.1 mrg #define UVM_INH_COPY 0x10 /* "copy" */
106 1.1 mrg #define UVM_INH_NONE 0x20 /* "none" */
107 1.1 mrg #define UVM_INH_DONATE 0x30 /* "donate" << not used */
108 1.1 mrg
109 1.1 mrg /* 0x40, 0x80: not used */
110 1.1 mrg
111 1.1 mrg /* bits 0x700: max protection, 0x800: not used */
112 1.1 mrg
113 1.1 mrg /* bits 0x7000: advice, 0x8000: not used */
114 1.107 yamt /* advice: matches MADV_* from sys/mman.h and POSIX_FADV_* from sys/fcntl.h */
115 1.1 mrg #define UVM_ADV_NORMAL 0x0 /* 'normal' */
116 1.1 mrg #define UVM_ADV_RANDOM 0x1 /* 'random' */
117 1.1 mrg #define UVM_ADV_SEQUENTIAL 0x2 /* 'sequential' */
118 1.156 pooka #define UVM_ADV_WILLNEED 0x3 /* pages will be needed */
119 1.156 pooka #define UVM_ADV_DONTNEED 0x4 /* pages won't be needed */
120 1.156 pooka #define UVM_ADV_NOREUSE 0x5 /* pages will be used only once */
121 1.1 mrg #define UVM_ADV_MASK 0x7 /* mask */
122 1.1 mrg
123 1.98 yamt /* bits 0xffff0000: mapping flags */
124 1.206 chs #define UVM_FLAG_FIXED 0x00010000 /* find space */
125 1.206 chs #define UVM_FLAG_OVERLAY 0x00020000 /* establish overlay */
126 1.206 chs #define UVM_FLAG_NOMERGE 0x00040000 /* don't merge map entries */
127 1.206 chs #define UVM_FLAG_COPYONW 0x00080000 /* set copy_on_write flag */
128 1.206 chs #define UVM_FLAG_AMAPPAD 0x00100000 /* for bss: pad amap */
129 1.206 chs #define UVM_FLAG_TRYLOCK 0x00200000 /* fail if we can not lock map */
130 1.206 chs #define UVM_FLAG_NOWAIT 0x00400000 /* not allowed to sleep */
131 1.206 chs #define UVM_FLAG_WAITVA 0x00800000 /* wait for va */
132 1.206 chs #define UVM_FLAG_VAONLY 0x02000000 /* unmap: no pages are mapped */
133 1.206 chs #define UVM_FLAG_COLORMATCH 0x04000000 /* match color given in off */
134 1.206 chs #define UVM_FLAG_UNMAP 0x08000000 /* unmap existing entries */
135 1.1 mrg
136 1.205 christos #define UVM_FLAG_BITS "\177\020\
137 1.205 christos F\0\3\
138 1.205 christos :\0PROT=NONE\0\
139 1.205 christos :\1PROT=R\0\
140 1.205 christos :\2PROT=W\0\
141 1.205 christos :\3PROT=RW\0\
142 1.205 christos :\4PROT=X\0\
143 1.205 christos :\5PROT=RX\0\
144 1.205 christos :\6PROT=WX\0\
145 1.205 christos :\7PROT=RWX\0\
146 1.205 christos F\4\2\
147 1.205 christos :\0INH=SHARE\0\
148 1.205 christos :\1INH=COPY\0\
149 1.205 christos :\2INH=NONE\0\
150 1.205 christos :\3INH=DONATE\0\
151 1.205 christos F\10\3\
152 1.205 christos :\0MAXPROT=NONE\0\
153 1.205 christos :\1MAXPROT=R\0\
154 1.205 christos :\2MAXPROT=W\0\
155 1.205 christos :\3MAXPROT=RW\0\
156 1.205 christos :\4MAXPROT=X\0\
157 1.205 christos :\5MAXPROT=RX\0\
158 1.205 christos :\6MAXPROT=WX\0\
159 1.205 christos :\7MAXPROT=RWX\0\
160 1.205 christos F\14\3\
161 1.205 christos :\0ADV=NORMAL\0\
162 1.205 christos :\1ADV=RANDOM\0\
163 1.205 christos :\2ADV=SEQUENTIAL\0\
164 1.205 christos :\3ADV=WILLNEED\0\
165 1.205 christos :\4ADV=DONTNEED\0\
166 1.205 christos :\5ADV=NOREUSE\0\
167 1.205 christos b\20FIXED\0\
168 1.205 christos b\21OVERLAY\0\
169 1.205 christos b\22NOMERGE\0\
170 1.205 christos b\23COPYONW\0\
171 1.205 christos b\24AMAPPAD\0\
172 1.205 christos b\25TRYLOCK\0\
173 1.205 christos b\26NOWAIT\0\
174 1.205 christos b\27WAITVA\0\
175 1.205 christos b\30VAONLY\0\
176 1.206 chs b\31COLORMATCH\0\
177 1.206 chs b\32UNMAP\0\
178 1.206 chs "
179 1.205 christos
180 1.1 mrg /* macros to extract info */
181 1.1 mrg #define UVM_PROTECTION(X) ((X) & UVM_PROT_MASK)
182 1.1 mrg #define UVM_INHERIT(X) (((X) & UVM_INH_MASK) >> 4)
183 1.1 mrg #define UVM_MAXPROTECTION(X) (((X) >> 8) & UVM_PROT_MASK)
184 1.1 mrg #define UVM_ADVICE(X) (((X) >> 12) & UVM_ADV_MASK)
185 1.1 mrg
186 1.1 mrg #define UVM_MAPFLAG(PROT,MAXPROT,INH,ADVICE,FLAGS) \
187 1.114 yamt (((MAXPROT) << 8)|(PROT)|(INH)|((ADVICE) << 12)|(FLAGS))
188 1.1 mrg
189 1.67 chs /* magic offset value: offset not known(obj) or don't care(!obj) */
190 1.38 kleink #define UVM_UNKNOWN_OFFSET ((voff_t) -1)
191 1.1 mrg
192 1.1 mrg /*
193 1.100 yamt * the following defines are for uvm_km_alloc/free's flags
194 1.1 mrg */
195 1.100 yamt #define UVM_KMF_WIRED 0x1 /* allocation type: wired */
196 1.100 yamt #define UVM_KMF_PAGEABLE 0x2 /* allocation type: pageable */
197 1.100 yamt #define UVM_KMF_VAONLY 0x4 /* allocation type: VA only */
198 1.100 yamt #define UVM_KMF_TYPEMASK (UVM_KMF_VAONLY | UVM_KMF_PAGEABLE | UVM_KMF_WIRED)
199 1.100 yamt #define UVM_KMF_CANFAIL 0x8 /* caller handles failure */
200 1.100 yamt #define UVM_KMF_ZERO 0x10 /* want zero filled memory */
201 1.115 drochner #define UVM_KMF_EXEC 0x20 /* need executable mapping */
202 1.1 mrg #define UVM_KMF_TRYLOCK UVM_FLAG_TRYLOCK /* try locking only */
203 1.75 thorpej #define UVM_KMF_NOWAIT UVM_FLAG_NOWAIT /* not allowed to sleep */
204 1.100 yamt #define UVM_KMF_WAITVA UVM_FLAG_WAITVA /* sleep for va */
205 1.176 matt #define UVM_KMF_COLORMATCH UVM_FLAG_COLORMATCH /* start at color in align */
206 1.1 mrg
207 1.1 mrg /*
208 1.15 thorpej * the following defines the strategies for uvm_pagealloc_strat()
209 1.15 thorpej */
210 1.152 abs #define UVM_PGA_STRAT_NORMAL 0 /* priority (low id to high) walk */
211 1.15 thorpej #define UVM_PGA_STRAT_ONLY 1 /* only specified free list */
212 1.15 thorpej #define UVM_PGA_STRAT_FALLBACK 2 /* ONLY falls back on NORMAL */
213 1.216 ad #define UVM_PGA_STRAT_NUMA 3 /* strongly prefer ideal bucket */
214 1.15 thorpej
215 1.15 thorpej /*
216 1.24 chs * flags for uvm_pagealloc_strat()
217 1.24 chs */
218 1.39 thorpej #define UVM_PGA_USERESERVE 0x0001 /* ok to use reserve pages */
219 1.39 thorpej #define UVM_PGA_ZERO 0x0002 /* returned page must be zero'd */
220 1.24 chs
221 1.24 chs /*
222 1.209 jdolecek * flags for ubc_uiomove()
223 1.53 chs */
224 1.194 riastrad #define UBC_READ 0x001 /* reading from object */
225 1.194 riastrad #define UBC_WRITE 0x002 /* writing to object */
226 1.194 riastrad #define UBC_FAULTBUSY 0x004 /* nobody else is using these pages, so busy
227 1.194 riastrad * them at alloc and unbusy at release (e.g.,
228 1.194 riastrad * for writes extending a file) */
229 1.224 ad #define UBC_ISMAPPED 0x008 /* object may be mapped by a process */
230 1.53 chs
231 1.53 chs /*
232 1.97 chs * flags for ubc_release()
233 1.97 chs */
234 1.194 riastrad #define UBC_UNMAP 0x010 /* unmap pages now -- don't leave the
235 1.194 riastrad * mappings cached indefinitely */
236 1.130 yamt
237 1.130 yamt /*
238 1.194 riastrad * flags for ubc_uiomove()
239 1.130 yamt */
240 1.194 riastrad #define UBC_PARTIALOK 0x100 /* return early on error; otherwise, zero all
241 1.194 riastrad * remaining bytes after error */
242 1.97 chs
243 1.97 chs /*
244 1.53 chs * flags for uvn_findpages().
245 1.53 chs */
246 1.67 chs #define UFP_ALL 0x00
247 1.67 chs #define UFP_NOWAIT 0x01
248 1.67 chs #define UFP_NOALLOC 0x02
249 1.67 chs #define UFP_NOCACHE 0x04
250 1.67 chs #define UFP_NORDONLY 0x08
251 1.67 chs #define UFP_DIRTYONLY 0x10
252 1.67 chs #define UFP_BACKWARD 0x20
253 1.222 ad #define UFP_NOBUSY 0x40
254 1.53 chs
255 1.53 chs /*
256 1.33 thorpej * lockflags that control the locking behavior of various functions.
257 1.33 thorpej */
258 1.33 thorpej #define UVM_LK_ENTER 0x00000001 /* map locked on entry */
259 1.33 thorpej #define UVM_LK_EXIT 0x00000002 /* leave map locked on exit */
260 1.33 thorpej
261 1.33 thorpej /*
262 1.149 christos * Default number of pages to allocate on the stack
263 1.149 christos */
264 1.149 christos #define UBC_MAX_PAGES 8
265 1.149 christos
266 1.149 christos /*
267 1.155 rmind * Value representing inactive emap.
268 1.155 rmind */
269 1.155 rmind #define UVM_EMAP_INACTIVE (0)
270 1.155 rmind
271 1.155 rmind /*
272 1.1 mrg * structures
273 1.1 mrg */
274 1.1 mrg
275 1.97 chs struct buf;
276 1.1 mrg struct core;
277 1.97 chs struct loadavg;
278 1.1 mrg struct mount;
279 1.1 mrg struct pglist;
280 1.1 mrg struct proc;
281 1.1 mrg struct uio;
282 1.1 mrg struct uvm_object;
283 1.1 mrg struct vm_anon;
284 1.1 mrg struct vmspace;
285 1.10 thorpej struct pmap;
286 1.1 mrg struct vnode;
287 1.65 chs struct vm_map_entry;
288 1.65 chs struct vm_map;
289 1.65 chs struct vm_page;
290 1.97 chs struct vmtotal;
291 1.53 chs
292 1.1 mrg /*
293 1.108 yamt * uvm_pctparam: parameter to be shown as percentage to user.
294 1.108 yamt */
295 1.108 yamt
296 1.108 yamt #define UVM_PCTPARAM_SHIFT 8
297 1.108 yamt #define UVM_PCTPARAM_SCALE (1 << UVM_PCTPARAM_SHIFT)
298 1.108 yamt #define UVM_PCTPARAM_APPLY(pct, x) \
299 1.108 yamt (((x) * (pct)->pct_scaled) >> UVM_PCTPARAM_SHIFT)
300 1.108 yamt struct uvm_pctparam {
301 1.118 yamt int pct_pct; /* percent [0, 100] */ /* should be the first member */
302 1.108 yamt int pct_scaled;
303 1.118 yamt int (*pct_check)(struct uvm_pctparam *, int);
304 1.108 yamt };
305 1.108 yamt
306 1.108 yamt /*
307 1.1 mrg * uvmexp: global data structures that are exported to parts of the kernel
308 1.1 mrg * other than the vm system.
309 1.1 mrg */
310 1.1 mrg
311 1.1 mrg struct uvmexp {
312 1.9 mrg /* vm_page constants */
313 1.1 mrg int pagesize; /* size of a page (PAGE_SIZE): must be power of 2 */
314 1.1 mrg int pagemask; /* page mask */
315 1.1 mrg int pageshift; /* page shift */
316 1.1 mrg
317 1.9 mrg /* vm_page counters */
318 1.1 mrg int npages; /* number of pages we manage */
319 1.1 mrg int free; /* number of free pages */
320 1.3 chs int paging; /* number of pages in the process of being paged out */
321 1.1 mrg int wired; /* number of wired pages */
322 1.57 chs
323 1.63 chs /*
324 1.57 chs * Adding anything before this line will break binary compatibility
325 1.57 chs * with top(1) on NetBSD 1.5.
326 1.54 simonb */
327 1.57 chs
328 1.67 chs int ncolors; /* number of page color buckets: must be p-o-2 */
329 1.67 chs int colormask; /* color bucket mask */
330 1.67 chs
331 1.55 simonb int zeropages; /* number of zero'd pages */
332 1.3 chs int reserve_pagedaemon; /* number of pages reserved for pagedaemon */
333 1.55 simonb int reserve_kernel; /* number of pages reserved for kernel */
334 1.137 ad unsigned anonpages; /* number of pages used by anon mappings */
335 1.137 ad unsigned filepages; /* number of pages used by cached file data */
336 1.137 ad unsigned execpages; /* number of pages used by cached exec data */
337 1.1 mrg
338 1.9 mrg /* pageout params */
339 1.9 mrg int freemin; /* min number of free pages */
340 1.9 mrg int freetarg; /* target number of free pages */
341 1.9 mrg int wiredmax; /* max number of wired pages */
342 1.1 mrg
343 1.9 mrg /* swap */
344 1.1 mrg int nswapdev; /* number of configured swap devices in system */
345 1.1 mrg int swpages; /* number of PAGE_SIZE'ed swap pages */
346 1.84 pk int swpgavail; /* number of swap pages currently available */
347 1.3 chs int swpginuse; /* number of swap pages in use */
348 1.23 chs int swpgonly; /* number of swap pages in use, not also in RAM */
349 1.1 mrg int nswget; /* number of times fault calls uvm_swap_get() */
350 1.1 mrg
351 1.110 simonb /* stat counters. XXX: should be 64-bit counters */
352 1.214 ad int faults; /* page fault count */
353 1.214 ad int traps; /* trap count */
354 1.214 ad int intrs; /* interrupt count */
355 1.214 ad int swtch; /* context switch count */
356 1.214 ad int softs; /* software interrupt count */
357 1.214 ad int syscalls; /* system calls */
358 1.7 mrg int pageins; /* pagein operation count */
359 1.7 mrg /* pageouts are in pdpageouts below */
360 1.160 rmind int _unused1;
361 1.160 rmind int _unused2;
362 1.7 mrg int pgswapin; /* pages swapped in */
363 1.7 mrg int pgswapout; /* pages swapped out */
364 1.7 mrg int forks; /* forks */
365 1.1 mrg int forks_ppwait; /* forks where parent waits */
366 1.1 mrg int forks_sharevm; /* forks where vmspace is shared */
367 1.40 thorpej int pga_zerohit; /* pagealloc where zero wanted and zero
368 1.40 thorpej was available */
369 1.40 thorpej int pga_zeromiss; /* pagealloc where zero wanted and zero
370 1.40 thorpej not available */
371 1.50 thorpej int zeroaborts; /* number of times page zeroing was
372 1.50 thorpej aborted */
373 1.60 thorpej int colorhit; /* pagealloc where we got optimal color */
374 1.60 thorpej int colormiss; /* pagealloc where we didn't */
375 1.146 ad int cpuhit; /* pagealloc where we allocated locally */
376 1.146 ad int cpumiss; /* pagealloc where we didn't */
377 1.1 mrg
378 1.110 simonb /* fault subcounters. XXX: should be 64-bit counters */
379 1.1 mrg int fltnoram; /* number of times fault was out of ram */
380 1.1 mrg int fltnoanon; /* number of times fault was out of anons */
381 1.1 mrg int fltpgwait; /* number of times fault had to wait on a page */
382 1.1 mrg int fltpgrele; /* number of times fault found a released page */
383 1.1 mrg int fltrelck; /* number of times fault relock called */
384 1.1 mrg int fltrelckok; /* number of times fault relock is a success */
385 1.1 mrg int fltanget; /* number of times fault gets anon page */
386 1.1 mrg int fltanretry; /* number of times fault retrys an anon get */
387 1.1 mrg int fltamcopy; /* number of times fault clears "needs copy" */
388 1.1 mrg int fltnamap; /* number of times fault maps a neighbor anon page */
389 1.1 mrg int fltnomap; /* number of times fault maps a neighbor obj page */
390 1.1 mrg int fltlget; /* number of times fault does a locked pgo_get */
391 1.1 mrg int fltget; /* number of times fault does an unlocked get */
392 1.1 mrg int flt_anon; /* number of times fault anon (case 1a) */
393 1.1 mrg int flt_acow; /* number of times fault anon cow (case 1b) */
394 1.1 mrg int flt_obj; /* number of times fault is on object page (2a) */
395 1.1 mrg int flt_prcopy; /* number of times fault promotes with copy (2b) */
396 1.1 mrg int flt_przero; /* number of times fault promotes with zerofill (2b) */
397 1.1 mrg
398 1.110 simonb /* daemon counters. XXX: should be 64-bit counters */
399 1.1 mrg int pdwoke; /* number of times daemon woke up */
400 1.1 mrg int pdrevs; /* number of times daemon rev'd clock hand */
401 1.160 rmind int _unused3;
402 1.1 mrg int pdfreed; /* number of pages daemon freed since boot */
403 1.78 wiz int pdscans; /* number of pages daemon scanned since boot */
404 1.1 mrg int pdanscan; /* number of anonymous pages scanned by daemon */
405 1.1 mrg int pdobscan; /* number of object pages scanned by daemon */
406 1.1 mrg int pdreact; /* number of pages daemon reactivated since boot */
407 1.1 mrg int pdbusy; /* number of times daemon found a busy page */
408 1.1 mrg int pdpageouts; /* number of times daemon started a pageout */
409 1.1 mrg int pdpending; /* number of times daemon got a pending pagout */
410 1.1 mrg int pddeact; /* number of pages daemon deactivates */
411 1.69 chs int pdreanon; /* anon pages reactivated due to thresholds */
412 1.69 chs int pdrefile; /* file pages reactivated due to thresholds */
413 1.69 chs int pdreexec; /* executable pages reactivated due to thresholds */
414 1.207 mrg
415 1.207 mrg int bootpages; /* number of pages stolen at boot */
416 1.54 simonb };
417 1.54 simonb
418 1.54 simonb /*
419 1.54 simonb * The following structure is 64-bit alignment safe. New elements
420 1.54 simonb * should only be added to the end of this structure so binary
421 1.54 simonb * compatibility can be preserved.
422 1.54 simonb */
423 1.54 simonb struct uvmexp_sysctl {
424 1.54 simonb int64_t pagesize;
425 1.54 simonb int64_t pagemask;
426 1.54 simonb int64_t pageshift;
427 1.54 simonb int64_t npages;
428 1.54 simonb int64_t free;
429 1.54 simonb int64_t active;
430 1.54 simonb int64_t inactive;
431 1.54 simonb int64_t paging;
432 1.54 simonb int64_t wired;
433 1.54 simonb int64_t zeropages;
434 1.54 simonb int64_t reserve_pagedaemon;
435 1.54 simonb int64_t reserve_kernel;
436 1.54 simonb int64_t freemin;
437 1.54 simonb int64_t freetarg;
438 1.160 rmind int64_t inactarg; /* unused */
439 1.54 simonb int64_t wiredmax;
440 1.54 simonb int64_t nswapdev;
441 1.54 simonb int64_t swpages;
442 1.54 simonb int64_t swpginuse;
443 1.54 simonb int64_t swpgonly;
444 1.54 simonb int64_t nswget;
445 1.160 rmind int64_t unused1; /* unused; was nanon */
446 1.146 ad int64_t cpuhit;
447 1.146 ad int64_t cpumiss;
448 1.54 simonb int64_t faults;
449 1.54 simonb int64_t traps;
450 1.54 simonb int64_t intrs;
451 1.54 simonb int64_t swtch;
452 1.54 simonb int64_t softs;
453 1.54 simonb int64_t syscalls;
454 1.54 simonb int64_t pageins;
455 1.160 rmind int64_t swapins; /* unused */
456 1.160 rmind int64_t swapouts; /* unused */
457 1.214 ad int64_t pgswapin; /* unused */
458 1.54 simonb int64_t pgswapout;
459 1.54 simonb int64_t forks;
460 1.54 simonb int64_t forks_ppwait;
461 1.54 simonb int64_t forks_sharevm;
462 1.54 simonb int64_t pga_zerohit;
463 1.54 simonb int64_t pga_zeromiss;
464 1.54 simonb int64_t zeroaborts;
465 1.54 simonb int64_t fltnoram;
466 1.54 simonb int64_t fltnoanon;
467 1.54 simonb int64_t fltpgwait;
468 1.54 simonb int64_t fltpgrele;
469 1.54 simonb int64_t fltrelck;
470 1.54 simonb int64_t fltrelckok;
471 1.54 simonb int64_t fltanget;
472 1.54 simonb int64_t fltanretry;
473 1.54 simonb int64_t fltamcopy;
474 1.54 simonb int64_t fltnamap;
475 1.54 simonb int64_t fltnomap;
476 1.54 simonb int64_t fltlget;
477 1.54 simonb int64_t fltget;
478 1.54 simonb int64_t flt_anon;
479 1.54 simonb int64_t flt_acow;
480 1.54 simonb int64_t flt_obj;
481 1.54 simonb int64_t flt_prcopy;
482 1.54 simonb int64_t flt_przero;
483 1.54 simonb int64_t pdwoke;
484 1.54 simonb int64_t pdrevs;
485 1.160 rmind int64_t unused4;
486 1.54 simonb int64_t pdfreed;
487 1.54 simonb int64_t pdscans;
488 1.54 simonb int64_t pdanscan;
489 1.54 simonb int64_t pdobscan;
490 1.54 simonb int64_t pdreact;
491 1.54 simonb int64_t pdbusy;
492 1.54 simonb int64_t pdpageouts;
493 1.54 simonb int64_t pdpending;
494 1.54 simonb int64_t pddeact;
495 1.55 simonb int64_t anonpages;
496 1.69 chs int64_t filepages;
497 1.69 chs int64_t execpages;
498 1.60 thorpej int64_t colorhit;
499 1.60 thorpej int64_t colormiss;
500 1.61 thorpej int64_t ncolors;
501 1.207 mrg int64_t bootpages;
502 1.207 mrg int64_t poolpages;
503 1.214 ad int64_t countsyncone;
504 1.214 ad int64_t countsyncall;
505 1.219 ad int64_t anonunknown;
506 1.219 ad int64_t anonclean;
507 1.219 ad int64_t anondirty;
508 1.219 ad int64_t fileunknown;
509 1.219 ad int64_t fileclean;
510 1.219 ad int64_t filedirty;
511 1.222 ad int64_t fltup;
512 1.222 ad int64_t fltnoup;
513 1.1 mrg };
514 1.1 mrg
515 1.31 thorpej #ifdef _KERNEL
516 1.97 chs /* we need this before including uvm_page.h on some platforms */
517 1.45 mrg extern struct uvmexp uvmexp;
518 1.168 matt /* MD code needs this without including <uvm/uvm.h> */
519 1.168 matt extern bool vm_page_zero_enable;
520 1.45 mrg #endif
521 1.1 mrg
522 1.45 mrg /*
523 1.45 mrg * Finally, bring in standard UVM headers.
524 1.45 mrg */
525 1.45 mrg #include <sys/vmmeter.h>
526 1.45 mrg #include <sys/queue.h>
527 1.116 he #include <sys/lock.h>
528 1.180 para #ifdef _KERNEL
529 1.180 para #include <sys/vmem.h>
530 1.180 para #endif
531 1.45 mrg #include <uvm/uvm_param.h>
532 1.45 mrg #include <uvm/uvm_prot.h>
533 1.45 mrg #include <uvm/uvm_pmap.h>
534 1.196 christos #if defined(_KERNEL) || defined(_KMEMUSER)
535 1.45 mrg #include <uvm/uvm_map.h>
536 1.45 mrg #include <uvm/uvm_pager.h>
537 1.203 christos #endif
538 1.45 mrg
539 1.203 christos #ifdef _KERNEL
540 1.45 mrg /*
541 1.200 cherry * Include the uvm_hotplug(9) API unconditionally until
542 1.200 cherry * uvm_page_physload() et. al. are obsoleted
543 1.200 cherry *
544 1.200 cherry * After this, MD code will have to explicitly include it if needed.
545 1.200 cherry */
546 1.200 cherry #include <uvm/uvm_physseg.h>
547 1.201 cherry #endif
548 1.200 cherry
549 1.200 cherry /*
550 1.138 yamt * helpers for calling ubc_release()
551 1.138 yamt */
552 1.138 yamt #ifdef PMAP_CACHE_VIVT
553 1.224 ad #define UBC_VNODE_FLAGS(vp) \
554 1.224 ad ((((vp)->v_iflag & VI_TEXT) != 0 ? UBC_UNMAP : 0) |
555 1.224 ad (((vp)->v_vflag & VV_MAPPED) != 0 ? UBC_ISMAPPED : 0))
556 1.138 yamt #else
557 1.224 ad #define UBC_VNODE_FLAGS(vp) \
558 1.224 ad (((vp)->v_vflag & VV_MAPPED) != 0 ? UBC_ISMAPPED : 0)
559 1.138 yamt #endif
560 1.138 yamt
561 1.196 christos #if defined(_KERNEL) || defined(_KMEMUSER)
562 1.138 yamt /*
563 1.45 mrg * Shareable process virtual address space.
564 1.45 mrg * May eventually be merged with vm_map.
565 1.45 mrg * Several fields are temporary (text, data stuff).
566 1.45 mrg */
567 1.45 mrg struct vmspace {
568 1.45 mrg struct vm_map vm_map; /* VM address map */
569 1.92 pk int vm_refcnt; /* number of references *
570 1.208 maya * note: protected by vm_map.misc_lock */
571 1.128 christos void * vm_shm; /* SYS5 shared memory private data XXX */
572 1.45 mrg /* we copy from vm_startcopy to the end of the structure on fork */
573 1.45 mrg #define vm_startcopy vm_rssize
574 1.59 thorpej segsz_t vm_rssize; /* current resident set size in pages */
575 1.211 christos segsz_t vm_rssmax; /* max resident size in pages */
576 1.45 mrg segsz_t vm_tsize; /* text size (pages) XXX */
577 1.45 mrg segsz_t vm_dsize; /* data size (pages) XXX */
578 1.45 mrg segsz_t vm_ssize; /* stack size (pages) */
579 1.153 mrg segsz_t vm_issize; /* initial unmapped stack size (pages) */
580 1.128 christos void * vm_taddr; /* user virtual address of text XXX */
581 1.128 christos void * vm_daddr; /* user virtual address of data XXX */
582 1.128 christos void *vm_maxsaddr; /* user VA at max stack growth */
583 1.128 christos void *vm_minsaddr; /* user VA at top of stack */
584 1.142 christos size_t vm_aslr_delta_mmap; /* mmap() random delta for ASLR */
585 1.45 mrg };
586 1.111 yamt #define VMSPACE_IS_KERNEL_P(vm) VM_MAP_IS_KERNEL(&(vm)->vm_map)
587 1.196 christos #endif
588 1.45 mrg
589 1.45 mrg #ifdef _KERNEL
590 1.43 mrg
591 1.43 mrg /*
592 1.70 thorpej * used to keep state while iterating over the map for a core dump.
593 1.70 thorpej */
594 1.70 thorpej struct uvm_coredump_state {
595 1.70 thorpej void *cookie; /* opaque for the caller */
596 1.70 thorpej vaddr_t start; /* start of region */
597 1.102 matt vaddr_t realend; /* real end of region */
598 1.102 matt vaddr_t end; /* virtual end of region */
599 1.70 thorpej vm_prot_t prot; /* protection of region */
600 1.70 thorpej int flags; /* flags; see below */
601 1.70 thorpej };
602 1.70 thorpej
603 1.70 thorpej #define UVM_COREDUMP_STACK 0x01 /* region is user stack */
604 1.70 thorpej
605 1.70 thorpej /*
606 1.43 mrg * the various kernel maps, owned by MD code
607 1.43 mrg */
608 1.65 chs extern struct vm_map *kernel_map;
609 1.65 chs extern struct vm_map *phys_map;
610 1.79 thorpej
611 1.79 thorpej /*
612 1.223 thorpej * uvm_voaddr:
613 1.223 thorpej *
614 1.223 thorpej * This structure encapsulates UVM's unique virtual object address
615 1.223 thorpej * for an individual byte inside a pageable page. Pageable pages can
616 1.223 thorpej * be owned by either a uvm_object (UVM_VOADDR_TYPE_OBJECT) or a
617 1.223 thorpej * vm_anon (UVM_VOADDR_TYPE_ANON).
618 1.223 thorpej *
619 1.223 thorpej * In each case, the byte offset into the owning object
620 1.223 thorpej * (uvm_object or vm_anon) is included in the ID, so that
621 1.223 thorpej * two different offsets into the same page have distinct
622 1.223 thorpej * IDs.
623 1.223 thorpej *
624 1.223 thorpej * Note that the page does not necessarily have to be resident
625 1.223 thorpej * in order to know the virtual object address. However, it
626 1.223 thorpej * is required that any pending copy-on-write is resolved.
627 1.223 thorpej *
628 1.223 thorpej * When someone wants a virtual object address, an extra reference
629 1.223 thorpej * is taken on the owner while the caller uses the ID. This
630 1.223 thorpej * ensures that the identity is stable for the duration of its
631 1.223 thorpej * use.
632 1.223 thorpej */
633 1.223 thorpej struct uvm_voaddr {
634 1.223 thorpej enum {
635 1.223 thorpej UVM_VOADDR_TYPE_OBJECT = 1,
636 1.223 thorpej UVM_VOADDR_TYPE_ANON = 2,
637 1.223 thorpej } type;
638 1.223 thorpej union {
639 1.223 thorpej struct uvm_object *uobj;
640 1.223 thorpej struct vm_anon *anon;
641 1.223 thorpej };
642 1.223 thorpej voff_t offset;
643 1.223 thorpej };
644 1.223 thorpej
645 1.223 thorpej /*
646 1.1 mrg * macros
647 1.1 mrg */
648 1.1 mrg
649 1.45 mrg #define vm_resident_count(vm) (pmap_resident_count((vm)->vm_map.pmap))
650 1.1 mrg
651 1.44 mrg
652 1.44 mrg /* vm_machdep.c */
653 1.170 pooka int vmapbuf(struct buf *, vsize_t);
654 1.91 junyoung void vunmapbuf(struct buf *, vsize_t);
655 1.213 chs void ktext_write(void *, const void *, size_t);
656 1.31 thorpej
657 1.1 mrg /* uvm_aobj.c */
658 1.213 chs struct uvm_object *uao_create(voff_t, int);
659 1.190 riastrad void uao_set_pgfl(struct uvm_object *, int);
660 1.91 junyoung void uao_detach(struct uvm_object *);
661 1.91 junyoung void uao_reference(struct uvm_object *);
662 1.1 mrg
663 1.53 chs /* uvm_bio.c */
664 1.91 junyoung void ubc_init(void);
665 1.191 riastrad void ubchist_init(void);
666 1.130 yamt int ubc_uiomove(struct uvm_object *, struct uio *, vsize_t,
667 1.134 yamt int, int);
668 1.174 hannken void ubc_zerorange(struct uvm_object *, off_t, size_t, int);
669 1.173 rmind void ubc_purge(struct uvm_object *);
670 1.53 chs
671 1.1 mrg /* uvm_fault.c */
672 1.112 drochner #define uvm_fault(m, a, p) uvm_fault_internal(m, a, p, 0)
673 1.112 drochner int uvm_fault_internal(struct vm_map *, vaddr_t, vm_prot_t, int);
674 1.112 drochner /* handle a page fault */
675 1.1 mrg
676 1.1 mrg /* uvm_glue.c */
677 1.1 mrg #if defined(KGDB)
678 1.128 christos void uvm_chgkprot(void *, size_t, int);
679 1.1 mrg #endif
680 1.126 thorpej void uvm_proc_fork(struct proc *, struct proc *, bool);
681 1.91 junyoung void uvm_lwp_fork(struct lwp *, struct lwp *,
682 1.91 junyoung void *, size_t, void (*)(void *), void *);
683 1.188 dsl int uvm_coredump_walkmap(struct proc *,
684 1.188 dsl int (*)(struct uvm_coredump_state *), void *);
685 1.187 dsl int uvm_coredump_count_segs(struct proc *);
686 1.91 junyoung void uvm_proc_exit(struct proc *);
687 1.91 junyoung void uvm_lwp_exit(struct lwp *);
688 1.218 ad void uvm_idle(void);
689 1.91 junyoung void uvm_init_limits(struct proc *);
690 1.173 rmind bool uvm_kernacc(void *, size_t, vm_prot_t);
691 1.141 perry __dead void uvm_scheduler(void);
692 1.160 rmind vaddr_t uvm_uarea_alloc(void);
693 1.160 rmind void uvm_uarea_free(vaddr_t);
694 1.184 matt vaddr_t uvm_uarea_system_alloc(struct cpu_info *);
695 1.171 matt void uvm_uarea_system_free(vaddr_t);
696 1.161 rmind vaddr_t uvm_lwp_getuarea(lwp_t *);
697 1.161 rmind void uvm_lwp_setuarea(lwp_t *, vaddr_t);
698 1.119 chs int uvm_vslock(struct vmspace *, void *, size_t, vm_prot_t);
699 1.119 chs void uvm_vsunlock(struct vmspace *, void *, size_t);
700 1.135 ad void uvm_cpu_attach(struct cpu_info *);
701 1.1 mrg
702 1.1 mrg
703 1.1 mrg /* uvm_init.c */
704 1.199 cherry void uvm_md_init(void);
705 1.91 junyoung void uvm_init(void);
706 1.1 mrg
707 1.1 mrg /* uvm_io.c */
708 1.197 christos int uvm_io(struct vm_map *, struct uio *, int);
709 1.1 mrg
710 1.1 mrg /* uvm_km.c */
711 1.100 yamt vaddr_t uvm_km_alloc(struct vm_map *, vsize_t, vsize_t,
712 1.100 yamt uvm_flag_t);
713 1.198 maxv int uvm_km_protect(struct vm_map *, vaddr_t, vsize_t,
714 1.198 maxv vm_prot_t);
715 1.100 yamt void uvm_km_free(struct vm_map *, vaddr_t, vsize_t,
716 1.100 yamt uvm_flag_t);
717 1.100 yamt
718 1.91 junyoung struct vm_map *uvm_km_suballoc(struct vm_map *, vaddr_t *,
719 1.126 thorpej vaddr_t *, vsize_t, int, bool,
720 1.180 para struct vm_map *);
721 1.180 para int uvm_km_kmem_alloc(vmem_t *, vmem_size_t, vm_flag_t,
722 1.180 para vmem_addr_t *);
723 1.180 para void uvm_km_kmem_free(vmem_t *, vmem_addr_t, vmem_size_t);
724 1.180 para bool uvm_km_va_starved_p(void);
725 1.65 chs
726 1.1 mrg /* uvm_map.c */
727 1.91 junyoung int uvm_map(struct vm_map *, vaddr_t *, vsize_t,
728 1.91 junyoung struct uvm_object *, voff_t, vsize_t,
729 1.91 junyoung uvm_flag_t);
730 1.91 junyoung int uvm_map_pageable(struct vm_map *, vaddr_t,
731 1.126 thorpej vaddr_t, bool, int);
732 1.91 junyoung int uvm_map_pageable_all(struct vm_map *, int, vsize_t);
733 1.126 thorpej bool uvm_map_checkprot(struct vm_map *, vaddr_t,
734 1.91 junyoung vaddr_t, vm_prot_t);
735 1.91 junyoung int uvm_map_protect(struct vm_map *, vaddr_t,
736 1.126 thorpej vaddr_t, vm_prot_t, bool);
737 1.204 joerg int uvm_map_protect_user(struct lwp *, vaddr_t, vaddr_t,
738 1.204 joerg vm_prot_t);
739 1.185 martin struct vmspace *uvmspace_alloc(vaddr_t, vaddr_t, bool);
740 1.91 junyoung void uvmspace_init(struct vmspace *, struct pmap *,
741 1.185 martin vaddr_t, vaddr_t, bool);
742 1.185 martin void uvmspace_exec(struct lwp *, vaddr_t, vaddr_t, bool);
743 1.185 martin void uvmspace_spawn(struct lwp *, vaddr_t, vaddr_t, bool);
744 1.91 junyoung struct vmspace *uvmspace_fork(struct vmspace *);
745 1.111 yamt void uvmspace_addref(struct vmspace *);
746 1.91 junyoung void uvmspace_free(struct vmspace *);
747 1.91 junyoung void uvmspace_share(struct proc *, struct proc *);
748 1.91 junyoung void uvmspace_unshare(struct lwp *);
749 1.1 mrg
750 1.223 thorpej bool uvm_voaddr_acquire(struct vm_map *, vaddr_t,
751 1.223 thorpej struct uvm_voaddr *);
752 1.223 thorpej void uvm_voaddr_release(struct uvm_voaddr *);
753 1.223 thorpej int uvm_voaddr_compare(const struct uvm_voaddr *,
754 1.223 thorpej const struct uvm_voaddr *);
755 1.223 thorpej
756 1.140 yamt void uvm_whatis(uintptr_t, void (*)(const char *, ...));
757 1.1 mrg
758 1.1 mrg /* uvm_meter.c */
759 1.91 junyoung int uvm_sysctl(int *, u_int, void *, size_t *,
760 1.91 junyoung void *, size_t, struct proc *);
761 1.118 yamt int uvm_pctparam_check(struct uvm_pctparam *, int);
762 1.108 yamt void uvm_pctparam_set(struct uvm_pctparam *, int);
763 1.118 yamt int uvm_pctparam_get(struct uvm_pctparam *);
764 1.118 yamt void uvm_pctparam_init(struct uvm_pctparam *, int,
765 1.118 yamt int (*)(struct uvm_pctparam *, int));
766 1.118 yamt int uvm_pctparam_createsysctlnode(struct uvm_pctparam *,
767 1.118 yamt const char *, const char *);
768 1.214 ad void uvm_update_uvmexp(void);
769 1.1 mrg
770 1.1 mrg /* uvm_mmap.c */
771 1.192 chs int uvm_mmap_dev(struct proc *, void **, size_t, dev_t,
772 1.192 chs off_t);
773 1.192 chs int uvm_mmap_anon(struct proc *, void **, size_t);
774 1.195 martin vaddr_t uvm_default_mapaddr(struct proc *, vaddr_t, vsize_t,
775 1.195 martin int);
776 1.1 mrg
777 1.109 yamt /* uvm_mremap.c */
778 1.109 yamt int uvm_mremap(struct vm_map *, vaddr_t, vsize_t,
779 1.109 yamt struct vm_map *, vaddr_t *, vsize_t,
780 1.109 yamt struct proc *, int);
781 1.109 yamt
782 1.120 yamt /* uvm_object.c */
783 1.173 rmind void uvm_obj_init(struct uvm_object *,
784 1.173 rmind const struct uvm_pagerops *, bool, u_int);
785 1.221 ad void uvm_obj_setlock(struct uvm_object *, krwlock_t *);
786 1.173 rmind void uvm_obj_destroy(struct uvm_object *, bool);
787 1.175 christos int uvm_obj_wirepages(struct uvm_object *, off_t, off_t,
788 1.175 christos struct pglist *);
789 1.173 rmind void uvm_obj_unwirepages(struct uvm_object *, off_t, off_t);
790 1.120 yamt
791 1.1 mrg /* uvm_page.c */
792 1.217 ad int uvm_availmem(void);
793 1.216 ad void uvm_page_numa_load(paddr_t, paddr_t, u_int);
794 1.91 junyoung struct vm_page *uvm_pagealloc_strat(struct uvm_object *,
795 1.91 junyoung voff_t, struct vm_anon *, int, int, int);
796 1.24 chs #define uvm_pagealloc(obj, off, anon, flags) \
797 1.24 chs uvm_pagealloc_strat((obj), (off), (anon), (flags), \
798 1.24 chs UVM_PGA_STRAT_NORMAL, 0)
799 1.91 junyoung void uvm_pagereplace(struct vm_page *,
800 1.91 junyoung struct vm_page *);
801 1.91 junyoung void uvm_pagerealloc(struct vm_page *,
802 1.91 junyoung struct uvm_object *, voff_t);
803 1.91 junyoung void uvm_setpagesize(void);
804 1.1 mrg
805 1.53 chs /* uvm_pager.c */
806 1.91 junyoung void uvm_aio_aiodone(struct buf *);
807 1.145 yamt void uvm_aio_aiodone_pages(struct vm_page **, int, bool,
808 1.145 yamt int);
809 1.53 chs
810 1.1 mrg /* uvm_pdaemon.c */
811 1.91 junyoung void uvm_pageout(void *);
812 1.124 yamt struct work;
813 1.124 yamt void uvm_aiodone_worker(struct work *, void *);
814 1.143 ad void uvm_pageout_start(int);
815 1.143 ad void uvm_pageout_done(int);
816 1.118 yamt void uvm_estimatepageable(int *, int *);
817 1.1 mrg
818 1.1 mrg /* uvm_pglist.c */
819 1.91 junyoung int uvm_pglistalloc(psize_t, paddr_t, paddr_t,
820 1.91 junyoung paddr_t, paddr_t, struct pglist *, int, int);
821 1.91 junyoung void uvm_pglistfree(struct pglist *);
822 1.1 mrg
823 1.1 mrg /* uvm_swap.c */
824 1.91 junyoung void uvm_swap_init(void);
825 1.1 mrg
826 1.1 mrg /* uvm_unix.c */
827 1.91 junyoung int uvm_grow(struct proc *, vaddr_t);
828 1.1 mrg
829 1.1 mrg /* uvm_user.c */
830 1.91 junyoung void uvm_deallocate(struct vm_map *, vaddr_t, vsize_t);
831 1.1 mrg
832 1.1 mrg /* uvm_vnode.c */
833 1.219 ad struct uvm_page_array;
834 1.91 junyoung void uvm_vnp_setsize(struct vnode *, voff_t);
835 1.130 yamt void uvm_vnp_setwritesize(struct vnode *, voff_t);
836 1.91 junyoung int uvn_findpages(struct uvm_object *, voff_t,
837 1.219 ad unsigned int *, struct vm_page **,
838 1.219 ad struct uvm_page_array *, unsigned int);
839 1.126 thorpej bool uvn_text_p(struct uvm_object *);
840 1.126 thorpej bool uvn_clean_p(struct uvm_object *);
841 1.126 thorpej bool uvn_needs_writefault_p(struct uvm_object *);
842 1.37 thorpej
843 1.37 thorpej /* kern_malloc.c */
844 1.181 para void kmeminit_nkmempages(void);
845 1.181 para extern int nkmempages;
846 1.1 mrg
847 1.31 thorpej #endif /* _KERNEL */
848 1.31 thorpej
849 1.8 perry #endif /* _UVM_UVM_EXTERN_H_ */
850