uvm_amap.h revision 1.4 1 1.4 perry /* $NetBSD: uvm_amap.h,v 1.4 1998/02/10 02:34:20 perry Exp $ */
2 1.1 mrg
3 1.1 mrg /*
4 1.1 mrg * XXXCDC: "ROUGH DRAFT" QUALITY UVM PRE-RELEASE FILE!
5 1.1 mrg * >>>USE AT YOUR OWN RISK, WORK IS NOT FINISHED<<<
6 1.1 mrg */
7 1.1 mrg /*
8 1.1 mrg *
9 1.1 mrg * Copyright (c) 1997 Charles D. Cranor and Washington University.
10 1.1 mrg * All rights reserved.
11 1.1 mrg *
12 1.1 mrg * Redistribution and use in source and binary forms, with or without
13 1.1 mrg * modification, are permitted provided that the following conditions
14 1.1 mrg * are met:
15 1.1 mrg * 1. Redistributions of source code must retain the above copyright
16 1.1 mrg * notice, this list of conditions and the following disclaimer.
17 1.1 mrg * 2. Redistributions in binary form must reproduce the above copyright
18 1.1 mrg * notice, this list of conditions and the following disclaimer in the
19 1.1 mrg * documentation and/or other materials provided with the distribution.
20 1.1 mrg * 3. All advertising materials mentioning features or use of this software
21 1.1 mrg * must display the following acknowledgement:
22 1.1 mrg * This product includes software developed by Charles D. Cranor and
23 1.1 mrg * Washington University.
24 1.1 mrg * 4. The name of the author may not be used to endorse or promote products
25 1.1 mrg * derived from this software without specific prior written permission.
26 1.1 mrg *
27 1.1 mrg * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
28 1.1 mrg * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
29 1.1 mrg * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
30 1.1 mrg * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
31 1.1 mrg * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
32 1.1 mrg * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
33 1.1 mrg * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
34 1.1 mrg * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
35 1.1 mrg * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
36 1.1 mrg * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 1.3 mrg *
38 1.3 mrg * from: Id: uvm_amap.h,v 1.1.2.7 1998/01/05 18:12:56 chuck Exp
39 1.1 mrg */
40 1.1 mrg
41 1.4 perry #ifndef _UVM_UVM_AMAP_H_
42 1.4 perry #define _UVM_UVM_AMAP_H_
43 1.4 perry
44 1.1 mrg /*
45 1.1 mrg * uvm_amap.h
46 1.1 mrg */
47 1.1 mrg
48 1.1 mrg UVMHIST_DECL(maphist);
49 1.1 mrg
50 1.1 mrg /*
51 1.1 mrg * defines for handling of large sparce amaps.
52 1.1 mrg *
53 1.1 mrg * currently the kernel likes to allocate large chunks of VM to reserve
54 1.1 mrg * them for possible use. for example, it allocates (reserves) a large
55 1.1 mrg * chunk of user VM for possible stack growth. most of the time only
56 1.1 mrg * a page or two of this VM is actually used. since the stack is anonymous
57 1.1 mrg * memory it makes sense for it to live in an amap, but if we allocated
58 1.1 mrg * an amap for the entire stack range we could end up wasting a large
59 1.1 mrg * amount of malloc'd KVM.
60 1.1 mrg *
61 1.1 mrg * for example, on the i386 at boot time we allocate two amaps for the stack
62 1.1 mrg * of /sbin/init:
63 1.1 mrg * 1. a 7680 slot amap at protection 0 (reserve space for stack)
64 1.1 mrg * 2. a 512 slot amap at protection 7 (top of stack)
65 1.1 mrg *
66 1.1 mrg * most of that VM is never mapped or used.
67 1.1 mrg *
68 1.1 mrg * to avoid allocating amap resources for the whole range we have the
69 1.1 mrg * VM system look for maps that are larger than UVM_AMAP_LARGE slots
70 1.1 mrg * (note that 1 slot = 1 vm_page). for maps that are large, we attempt
71 1.1 mrg * to break them up into UVM_AMAP_CHUNK slot sized amaps.
72 1.1 mrg *
73 1.1 mrg * so, in the i386 example, the 7680 slot area is never referenced so
74 1.1 mrg * nothing gets allocated. the 512 slot area is referenced, and it
75 1.1 mrg * gets divided into 16 slot chunks (hopefully with one 16 slot chunk
76 1.1 mrg * being enough to handle the whole stack...).
77 1.1 mrg */
78 1.1 mrg
79 1.1 mrg #define UVM_AMAP_LARGE 256 /* # of slots in "large" amap */
80 1.1 mrg #define UVM_AMAP_CHUNK 16 /* # of slots to chunk large amaps in */
81 1.1 mrg
82 1.1 mrg
83 1.1 mrg #ifdef DIAGNOSTIC
84 1.1 mrg #define AMAP_B2SLOT(S,B) { \
85 1.1 mrg if ((B) % PAGE_SIZE) \
86 1.1 mrg panic("AMAP_B2SLOT: invalid byte count"); \
87 1.1 mrg (S) = (B) / PAGE_SIZE; \
88 1.1 mrg }
89 1.1 mrg #else
90 1.1 mrg #define AMAP_B2SLOT(S,B) (S) = (B) / PAGE_SIZE
91 1.1 mrg #endif
92 1.1 mrg
93 1.1 mrg #ifdef VM_AMAP_PPREF
94 1.1 mrg #define PPREF_NONE ((int *) -1)
95 1.1 mrg #endif
96 1.1 mrg
97 1.1 mrg /*
98 1.1 mrg * handle inline options
99 1.1 mrg */
100 1.1 mrg
101 1.1 mrg #ifdef UVM_AMAP_INLINE
102 1.1 mrg #define AMAP_INLINE static __inline
103 1.1 mrg #else
104 1.1 mrg #define AMAP_INLINE /* nothing */
105 1.1 mrg #endif /* UVM_AMAP_INLINE */
106 1.1 mrg
107 1.1 mrg /*
108 1.1 mrg * prototypes: the following prototypes define the interface to amaps
109 1.1 mrg */
110 1.1 mrg
111 1.1 mrg AMAP_INLINE vm_offset_t amap_add __P((struct vm_aref *, vm_offset_t,
112 1.1 mrg struct vm_anon *, int));
113 1.1 mrg struct vm_amap *amap_alloc __P((vm_offset_t, vm_offset_t, int));
114 1.1 mrg void amap_copy __P((vm_map_t, vm_map_entry_t, int, boolean_t,
115 1.1 mrg vm_offset_t, vm_offset_t));
116 1.1 mrg void amap_cow_now __P((vm_map_t, vm_map_entry_t));
117 1.1 mrg void amap_extend __P((vm_map_entry_t, vm_size_t));
118 1.1 mrg void amap_free __P((struct vm_amap *));
119 1.1 mrg AMAP_INLINE struct vm_anon *amap_lookup __P((struct vm_aref *, vm_offset_t));
120 1.1 mrg AMAP_INLINE void amap_lookups __P((struct vm_aref *, vm_offset_t,
121 1.1 mrg struct vm_anon **, int));
122 1.1 mrg #ifdef VM_AMAP_PPREF
123 1.1 mrg void amap_pp_adjref __P((struct vm_amap *, int, vm_size_t, int));
124 1.1 mrg void amap_pp_establish __P((struct vm_amap *));
125 1.1 mrg #endif
126 1.1 mrg AMAP_INLINE void amap_ref __P((vm_map_entry_t, int));
127 1.1 mrg void amap_share_protect __P((vm_map_entry_t, vm_prot_t));
128 1.1 mrg void amap_splitref __P((struct vm_aref *, struct vm_aref *, vm_offset_t));
129 1.1 mrg AMAP_INLINE void amap_unadd __P((struct vm_amap *, vm_offset_t));
130 1.1 mrg AMAP_INLINE void amap_unref __P((vm_map_entry_t, int));
131 1.1 mrg void amap_wipeout __P((struct vm_amap *));
132 1.1 mrg #ifdef VM_AMAP_PPREF
133 1.1 mrg void amap_wiperange __P((struct vm_amap *, int, int));
134 1.1 mrg #endif
135 1.1 mrg
136 1.1 mrg struct vm_anon *uvm_analloc __P((void));
137 1.1 mrg void uvm_anfree __P((struct vm_anon *));
138 1.1 mrg void uvm_anon_init __P((void));
139 1.1 mrg void uvm_anon_add __P((int));
140 1.1 mrg struct vm_page *uvm_anon_lockloanpg __P((struct vm_anon *));
141 1.4 perry
142 1.4 perry #endif /* _UVM_UVM_AMAP_H_ */
143