uvm_amap.h revision 1.1 1 /* $Id: uvm_amap.h,v 1.1 1998/02/05 06:25:10 mrg Exp $ */
2
3 /*
4 * XXXCDC: "ROUGH DRAFT" QUALITY UVM PRE-RELEASE FILE!
5 * >>>USE AT YOUR OWN RISK, WORK IS NOT FINISHED<<<
6 */
7 /*
8 *
9 * Copyright (c) 1997 Charles D. Cranor and Washington University.
10 * All rights reserved.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by Charles D. Cranor and
23 * Washington University.
24 * 4. The name of the author may not be used to endorse or promote products
25 * derived from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
28 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
29 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
30 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
31 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
32 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
33 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
34 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
35 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
36 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * uvm_amap.h
41 */
42
43 UVMHIST_DECL(maphist);
44
45 /*
46 * defines for handling of large sparce amaps.
47 *
48 * currently the kernel likes to allocate large chunks of VM to reserve
49 * them for possible use. for example, it allocates (reserves) a large
50 * chunk of user VM for possible stack growth. most of the time only
51 * a page or two of this VM is actually used. since the stack is anonymous
52 * memory it makes sense for it to live in an amap, but if we allocated
53 * an amap for the entire stack range we could end up wasting a large
54 * amount of malloc'd KVM.
55 *
56 * for example, on the i386 at boot time we allocate two amaps for the stack
57 * of /sbin/init:
58 * 1. a 7680 slot amap at protection 0 (reserve space for stack)
59 * 2. a 512 slot amap at protection 7 (top of stack)
60 *
61 * most of that VM is never mapped or used.
62 *
63 * to avoid allocating amap resources for the whole range we have the
64 * VM system look for maps that are larger than UVM_AMAP_LARGE slots
65 * (note that 1 slot = 1 vm_page). for maps that are large, we attempt
66 * to break them up into UVM_AMAP_CHUNK slot sized amaps.
67 *
68 * so, in the i386 example, the 7680 slot area is never referenced so
69 * nothing gets allocated. the 512 slot area is referenced, and it
70 * gets divided into 16 slot chunks (hopefully with one 16 slot chunk
71 * being enough to handle the whole stack...).
72 */
73
74 #define UVM_AMAP_LARGE 256 /* # of slots in "large" amap */
75 #define UVM_AMAP_CHUNK 16 /* # of slots to chunk large amaps in */
76
77
78 #ifdef DIAGNOSTIC
79 #define AMAP_B2SLOT(S,B) { \
80 if ((B) % PAGE_SIZE) \
81 panic("AMAP_B2SLOT: invalid byte count"); \
82 (S) = (B) / PAGE_SIZE; \
83 }
84 #else
85 #define AMAP_B2SLOT(S,B) (S) = (B) / PAGE_SIZE
86 #endif
87
88 #ifdef VM_AMAP_PPREF
89 #define PPREF_NONE ((int *) -1)
90 #endif
91
92 /*
93 * handle inline options
94 */
95
96 #ifdef UVM_AMAP_INLINE
97 #define AMAP_INLINE static __inline
98 #else
99 #define AMAP_INLINE /* nothing */
100 #endif /* UVM_AMAP_INLINE */
101
102 /*
103 * prototypes: the following prototypes define the interface to amaps
104 */
105
106 AMAP_INLINE vm_offset_t amap_add __P((struct vm_aref *, vm_offset_t,
107 struct vm_anon *, int));
108 struct vm_amap *amap_alloc __P((vm_offset_t, vm_offset_t, int));
109 void amap_copy __P((vm_map_t, vm_map_entry_t, int, boolean_t,
110 vm_offset_t, vm_offset_t));
111 void amap_cow_now __P((vm_map_t, vm_map_entry_t));
112 void amap_extend __P((vm_map_entry_t, vm_size_t));
113 void amap_free __P((struct vm_amap *));
114 AMAP_INLINE struct vm_anon *amap_lookup __P((struct vm_aref *, vm_offset_t));
115 AMAP_INLINE void amap_lookups __P((struct vm_aref *, vm_offset_t,
116 struct vm_anon **, int));
117 #ifdef VM_AMAP_PPREF
118 void amap_pp_adjref __P((struct vm_amap *, int, vm_size_t, int));
119 void amap_pp_establish __P((struct vm_amap *));
120 #endif
121 AMAP_INLINE void amap_ref __P((vm_map_entry_t, int));
122 void amap_share_protect __P((vm_map_entry_t, vm_prot_t));
123 void amap_splitref __P((struct vm_aref *, struct vm_aref *, vm_offset_t));
124 AMAP_INLINE void amap_unadd __P((struct vm_amap *, vm_offset_t));
125 AMAP_INLINE void amap_unref __P((vm_map_entry_t, int));
126 void amap_wipeout __P((struct vm_amap *));
127 #ifdef VM_AMAP_PPREF
128 void amap_wiperange __P((struct vm_amap *, int, int));
129 #endif
130
131 struct vm_anon *uvm_analloc __P((void));
132 void uvm_anfree __P((struct vm_anon *));
133 void uvm_anon_init __P((void));
134 void uvm_anon_add __P((int));
135 struct vm_page *uvm_anon_lockloanpg __P((struct vm_anon *));
136