vmparam.h revision 1.12.10.2 1 1.1 matt /*-
2 1.1 matt * Copyright (c) 2001 The NetBSD Foundation, Inc.
3 1.1 matt * All rights reserved.
4 1.1 matt *
5 1.1 matt * This code is derived from software contributed to The NetBSD Foundation
6 1.1 matt * by Matt Thomas <matt (at) 3am-softwre.com> of Allegro Networks, Inc.
7 1.1 matt *
8 1.1 matt * Redistribution and use in source and binary forms, with or without
9 1.1 matt * modification, are permitted provided that the following conditions
10 1.1 matt * are met:
11 1.1 matt * 1. Redistributions of source code must retain the above copyright
12 1.1 matt * notice, this list of conditions and the following disclaimer.
13 1.1 matt * 2. Redistributions in binary form must reproduce the above copyright
14 1.1 matt * notice, this list of conditions and the following disclaimer in the
15 1.1 matt * documentation and/or other materials provided with the distribution.
16 1.1 matt *
17 1.1 matt * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
18 1.1 matt * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
19 1.1 matt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20 1.1 matt * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
21 1.1 matt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 1.1 matt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 1.1 matt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 1.1 matt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 1.1 matt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 1.1 matt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 1.1 matt * POSSIBILITY OF SUCH DAMAGE.
28 1.1 matt */
29 1.1 matt
30 1.1 matt #ifndef _POWERPC_OEA_VMPARAM_H_
31 1.1 matt #define _POWERPC_OEA_VMPARAM_H_
32 1.1 matt
33 1.1 matt #include <sys/queue.h>
34 1.1 matt
35 1.1 matt /*
36 1.1 matt * Most of the definitions in this can be overriden by a machine-specific
37 1.1 matt * vmparam.h if required. Otherwise a port can just include this file
38 1.1 matt * get the right thing to happen.
39 1.1 matt */
40 1.1 matt
41 1.6 thorpej /*
42 1.6 thorpej * OEA processors have 4K pages. Override the PAGE_* definitions
43 1.6 thorpej * to be compile-time constants.
44 1.6 thorpej */
45 1.6 thorpej #define PAGE_SHIFT 12
46 1.6 thorpej #define PAGE_SIZE (1 << PAGE_SHIFT)
47 1.6 thorpej #define PAGE_MASK (PAGE_SIZE - 1)
48 1.6 thorpej
49 1.1 matt #ifndef USRSTACK
50 1.1 matt #define USRSTACK VM_MAXUSER_ADDRESS
51 1.1 matt #endif
52 1.1 matt
53 1.2 matt #ifndef USRSTACK32
54 1.2 matt #define USRSTACK32 ((uint32_t)VM_MAXUSER_ADDRESS)
55 1.2 matt #endif
56 1.2 matt
57 1.1 matt #ifndef MAXTSIZ
58 1.1 matt #define MAXTSIZ (64*1024*1024) /* maximum text size */
59 1.1 matt #endif
60 1.1 matt
61 1.1 matt #ifndef MAXDSIZ
62 1.1 matt #define MAXDSIZ (1024*1024*1024) /* maximum data size */
63 1.1 matt #endif
64 1.1 matt
65 1.1 matt #ifndef MAXSSIZ
66 1.1 matt #define MAXSSIZ (32*1024*1024) /* maximum stack size */
67 1.1 matt #endif
68 1.1 matt
69 1.1 matt #ifndef DFLDSIZ
70 1.11 aymeric #define DFLDSIZ (256*1024*1024) /* default data size */
71 1.1 matt #endif
72 1.1 matt
73 1.1 matt #ifndef DFLSSIZ
74 1.1 matt #define DFLSSIZ (2*1024*1024) /* default stack size */
75 1.1 matt #endif
76 1.1 matt
77 1.1 matt /*
78 1.1 matt * Default number of pages in the user raw I/O map.
79 1.1 matt */
80 1.1 matt #ifndef USRIOSIZE
81 1.1 matt #define USRIOSIZE 1024
82 1.1 matt #endif
83 1.1 matt
84 1.1 matt /*
85 1.1 matt * The number of seconds for a process to be blocked before being
86 1.1 matt * considered very swappable.
87 1.1 matt */
88 1.1 matt #ifndef MAXSLP
89 1.1 matt #define MAXSLP 20
90 1.1 matt #endif
91 1.1 matt
92 1.1 matt /*
93 1.1 matt * Segment handling stuff
94 1.1 matt */
95 1.2 matt #define SEGMENT_LENGTH ( 0x10000000L)
96 1.2 matt #define SEGMENT_MASK (~0x0fffffffL)
97 1.1 matt
98 1.1 matt /*
99 1.1 matt * Macros to manipulate VSIDs
100 1.1 matt */
101 1.1 matt #if 0
102 1.1 matt /*
103 1.7 matt * Move the SR# to the top bits to make the lower bits entirely random
104 1.1 matt * so to give better PTE distribution.
105 1.1 matt */
106 1.7 matt #define VSID__KEYSHFT (SR_VSID_WIDTH - SR_KEY_LEN)
107 1.7 matt #define VSID_SR_INCREMENT ((1L << VSID__KEYSHFT) - 1)
108 1.7 matt #define VSID__HASHMASK (VSID_SR_INCREMENT - 1)
109 1.7 matt #define VSID_MAKE(sr, hash) \
110 1.7 matt (( \
111 1.7 matt (((sr) << VSID__KEYSHFT) | ((hash) & VSID__HASMASK))
112 1.7 matt << SR_VSID_SHFT) & SR_VSID)
113 1.7 matt #define VSID_TO_SR(vsid) \
114 1.7 matt (((vsid) & SR_VSID) >> (SR_VSID_SHFT + VSID__KEYSHFT))
115 1.7 matt #define VSID_TO_HASH(vsid) \
116 1.7 matt (((vsid) & SR_VSID) >> SR_VSID_SHFT) & VSID__HASHMASK)
117 1.1 matt #else
118 1.7 matt #define VSID__HASHSHFT (SR_KEY_LEN)
119 1.7 matt #define VSID_SR_INCREMENT (1L << 0)
120 1.7 matt #define VSID__KEYMASK ((1L << VSID__HASHSHFT) - 1)
121 1.7 matt #define VSID_MAKE(sr, hash) \
122 1.7 matt (( \
123 1.7 matt (((hash) << VSID__HASHSHFT) | ((sr) & VSID__KEYMASK)) \
124 1.7 matt << SR_VSID_SHFT) & SR_VSID)
125 1.7 matt #define VSID_TO_SR(vsid) \
126 1.7 matt (((vsid) >> SR_VSID_SHFT) & VSID__KEYMASK)
127 1.7 matt #define VSID_TO_HASH(vsid) \
128 1.7 matt (((vsid) & SR_VSID) >> (SR_VSID_SHFT + VSID__HASHSHFT))
129 1.12 garbled #endif /*0*/
130 1.1 matt
131 1.1 matt /*
132 1.1 matt * Fixed segments
133 1.1 matt */
134 1.1 matt #ifndef USER_SR
135 1.1 matt #define USER_SR 12
136 1.1 matt #endif
137 1.3 matt #ifndef KERNEL_SR
138 1.1 matt #define KERNEL_SR 13
139 1.3 matt #endif
140 1.3 matt #ifndef KERNEL2_SR
141 1.1 matt #define KERNEL2_SR 14
142 1.3 matt #endif
143 1.1 matt #define KERNEL2_SEGMENT VSID_MAKE(KERNEL2_SR, KERNEL_VSIDBITS)
144 1.1 matt #define KERNEL_VSIDBITS 0xfffff
145 1.10 matt #define PHYSMAP_VSIDBITS 0xffffe
146 1.10 matt #define PHYSMAPN_SEGMENT(s) VSID_MAKE(s, PHYSMAP_VSIDBITS)
147 1.1 matt #define KERNEL_SEGMENT VSID_MAKE(KERNEL_SR, KERNEL_VSIDBITS)
148 1.9 sanjayl #define KERNELN_SEGMENT(s) VSID_MAKE(s, KERNEL_VSIDBITS)
149 1.9 sanjayl /* XXXSL: need something here that will never be mapped */
150 1.9 sanjayl #define EMPTY_SEGMENT VSID_MAKE(0, 0xffffe)
151 1.1 matt #define USER_ADDR ((void *)(USER_SR << ADDR_SR_SHFT))
152 1.1 matt
153 1.1 matt /*
154 1.1 matt * Some system constants
155 1.1 matt */
156 1.1 matt #ifndef NPMAPS
157 1.1 matt #define NPMAPS 32768 /* Number of pmaps in system */
158 1.1 matt #endif
159 1.1 matt
160 1.1 matt #define VM_MIN_ADDRESS ((vaddr_t) 0)
161 1.2 matt #define VM_MAXUSER_ADDRESS ((vaddr_t) ~0xfffL)
162 1.1 matt #define VM_MAX_ADDRESS VM_MAXUSER_ADDRESS
163 1.1 matt #define VM_MIN_KERNEL_ADDRESS ((vaddr_t) (KERNEL_SR << ADDR_SR_SHFT))
164 1.1 matt #define VM_MAX_KERNEL_ADDRESS (VM_MIN_KERNEL_ADDRESS + 2*SEGMENT_LENGTH)
165 1.1 matt
166 1.4 matt /*
167 1.4 matt * The address to which unspecified mapping requests default
168 1.4 matt * Put the stack in it's own segment and start mmaping at the
169 1.4 matt * top of the next lower segment.
170 1.4 matt */
171 1.5 atatat #ifdef _KERNEL_OPT
172 1.5 atatat #include "opt_uvm.h"
173 1.5 atatat #endif
174 1.8 matt #define __USE_TOPDOWN_VM
175 1.4 matt #define VM_DEFAULT_ADDRESS(da, sz) \
176 1.4 matt (((VM_MAXUSER_ADDRESS - MAXSSIZ) & SEGMENT_MASK) - round_page(sz))
177 1.4 matt
178 1.1 matt #ifndef VM_PHYSSEG_MAX
179 1.1 matt #define VM_PHYSSEG_MAX 16
180 1.1 matt #endif
181 1.1 matt #define VM_PHYSSEG_STRAT VM_PSTRAT_BIGFIRST
182 1.1 matt #define VM_PHYSSEG_NOADD
183 1.1 matt
184 1.1 matt #ifndef VM_PHYS_SIZE
185 1.1 matt #define VM_PHYS_SIZE (USRIOSIZE * PAGE_SIZE)
186 1.1 matt #endif
187 1.1 matt
188 1.1 matt #ifndef VM_MAX_KERNEL_BUF
189 1.1 matt #define VM_MAX_KERNEL_BUF (SEGMENT_LENGTH * 3 / 4)
190 1.1 matt #endif
191 1.1 matt
192 1.1 matt #define VM_NFREELIST 16 /* 16 distinct memory segments */
193 1.1 matt #define VM_FREELIST_DEFAULT 0
194 1.1 matt #define VM_FREELIST_FIRST256 1
195 1.1 matt #define VM_FREELIST_FIRST16 2
196 1.1 matt #define VM_FREELIST_MAX 3
197 1.1 matt
198 1.1 matt #ifndef _LOCORE
199 1.1 matt
200 1.1 matt LIST_HEAD(pvo_head, pvo_entry);
201 1.1 matt
202 1.1 matt #define __HAVE_VM_PAGE_MD
203 1.1 matt
204 1.1 matt struct vm_page_md {
205 1.1 matt struct pvo_head mdpg_pvoh;
206 1.1 matt unsigned int mdpg_attrs;
207 1.1 matt };
208 1.1 matt
209 1.1 matt #define VM_MDPAGE_INIT(pg) do { \
210 1.1 matt LIST_INIT(&(pg)->mdpage.mdpg_pvoh); \
211 1.1 matt (pg)->mdpage.mdpg_attrs = 0; \
212 1.1 matt } while (/*CONSTCOND*/0)
213 1.1 matt
214 1.1 matt #endif /* _LOCORE */
215 1.1 matt
216 1.1 matt #endif /* _POWERPC_OEA_VMPARAM_H_ */
217