uvm_physseg.c revision 1.8.12.2 1 1.8.12.2 jdolecek /* $NetBSD: uvm_physseg.c,v 1.8.12.2 2017/12/03 11:39:22 jdolecek Exp $ */
2 1.8.12.2 jdolecek
3 1.8.12.2 jdolecek /*
4 1.8.12.2 jdolecek * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 1.8.12.2 jdolecek * Copyright (c) 1991, 1993, The Regents of the University of California.
6 1.8.12.2 jdolecek *
7 1.8.12.2 jdolecek * All rights reserved.
8 1.8.12.2 jdolecek *
9 1.8.12.2 jdolecek * This code is derived from software contributed to Berkeley by
10 1.8.12.2 jdolecek * The Mach Operating System project at Carnegie-Mellon University.
11 1.8.12.2 jdolecek *
12 1.8.12.2 jdolecek * Redistribution and use in source and binary forms, with or without
13 1.8.12.2 jdolecek * modification, are permitted provided that the following conditions
14 1.8.12.2 jdolecek * are met:
15 1.8.12.2 jdolecek * 1. Redistributions of source code must retain the above copyright
16 1.8.12.2 jdolecek * notice, this list of conditions and the following disclaimer.
17 1.8.12.2 jdolecek * 2. Redistributions in binary form must reproduce the above copyright
18 1.8.12.2 jdolecek * notice, this list of conditions and the following disclaimer in the
19 1.8.12.2 jdolecek * documentation and/or other materials provided with the distribution.
20 1.8.12.2 jdolecek * 3. Neither the name of the University nor the names of its contributors
21 1.8.12.2 jdolecek * may be used to endorse or promote products derived from this software
22 1.8.12.2 jdolecek * without specific prior written permission.
23 1.8.12.2 jdolecek *
24 1.8.12.2 jdolecek * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 1.8.12.2 jdolecek * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 1.8.12.2 jdolecek * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 1.8.12.2 jdolecek * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 1.8.12.2 jdolecek * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 1.8.12.2 jdolecek * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 1.8.12.2 jdolecek * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 1.8.12.2 jdolecek * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 1.8.12.2 jdolecek * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 1.8.12.2 jdolecek * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 1.8.12.2 jdolecek * SUCH DAMAGE.
35 1.8.12.2 jdolecek *
36 1.8.12.2 jdolecek * @(#)vm_page.h 7.3 (Berkeley) 4/21/91
37 1.8.12.2 jdolecek * from: Id: uvm_page.h,v 1.1.2.6 1998/02/04 02:31:42 chuck Exp
38 1.8.12.2 jdolecek *
39 1.8.12.2 jdolecek *
40 1.8.12.2 jdolecek * Copyright (c) 1987, 1990 Carnegie-Mellon University.
41 1.8.12.2 jdolecek * All rights reserved.
42 1.8.12.2 jdolecek *
43 1.8.12.2 jdolecek * Permission to use, copy, modify and distribute this software and
44 1.8.12.2 jdolecek * its documentation is hereby granted, provided that both the copyright
45 1.8.12.2 jdolecek * notice and this permission notice appear in all copies of the
46 1.8.12.2 jdolecek * software, derivative works or modified versions, and any portions
47 1.8.12.2 jdolecek * thereof, and that both notices appear in supporting documentation.
48 1.8.12.2 jdolecek *
49 1.8.12.2 jdolecek * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
50 1.8.12.2 jdolecek * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
51 1.8.12.2 jdolecek * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
52 1.8.12.2 jdolecek *
53 1.8.12.2 jdolecek * Carnegie Mellon requests users of this software to return to
54 1.8.12.2 jdolecek *
55 1.8.12.2 jdolecek * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
56 1.8.12.2 jdolecek * School of Computer Science
57 1.8.12.2 jdolecek * Carnegie Mellon University
58 1.8.12.2 jdolecek * Pittsburgh PA 15213-3890
59 1.8.12.2 jdolecek *
60 1.8.12.2 jdolecek * any improvements or extensions that they make and grant Carnegie the
61 1.8.12.2 jdolecek * rights to redistribute these changes.
62 1.8.12.2 jdolecek */
63 1.8.12.2 jdolecek
64 1.8.12.2 jdolecek /*
65 1.8.12.2 jdolecek * Consolidated API from uvm_page.c and others.
66 1.8.12.2 jdolecek * Consolidated and designed by Cherry G. Mathew <cherry (at) zyx.in>
67 1.8.12.2 jdolecek * rbtree(3) backing implementation by:
68 1.8.12.2 jdolecek * Santhosh N. Raju <santhosh.raju (at) gmail.com>
69 1.8.12.2 jdolecek */
70 1.8.12.2 jdolecek
71 1.8.12.2 jdolecek #ifdef _KERNEL_OPT
72 1.8.12.2 jdolecek #include "opt_uvm.h"
73 1.8.12.2 jdolecek #endif
74 1.8.12.2 jdolecek
75 1.8.12.2 jdolecek #include <sys/param.h>
76 1.8.12.2 jdolecek #include <sys/types.h>
77 1.8.12.2 jdolecek #include <sys/extent.h>
78 1.8.12.2 jdolecek #include <sys/kmem.h>
79 1.8.12.2 jdolecek
80 1.8.12.2 jdolecek #include <uvm/uvm.h>
81 1.8.12.2 jdolecek #include <uvm/uvm_page.h>
82 1.8.12.2 jdolecek #include <uvm/uvm_param.h>
83 1.8.12.2 jdolecek #include <uvm/uvm_pdpolicy.h>
84 1.8.12.2 jdolecek #include <uvm/uvm_physseg.h>
85 1.8.12.2 jdolecek
86 1.8.12.2 jdolecek /*
87 1.8.12.2 jdolecek * uvm_physseg: describes one segment of physical memory
88 1.8.12.2 jdolecek */
89 1.8.12.2 jdolecek struct uvm_physseg {
90 1.8.12.2 jdolecek struct rb_node rb_node; /* tree information */
91 1.8.12.2 jdolecek paddr_t start; /* PF# of first page in segment */
92 1.8.12.2 jdolecek paddr_t end; /* (PF# of last page in segment) + 1 */
93 1.8.12.2 jdolecek paddr_t avail_start; /* PF# of first free page in segment */
94 1.8.12.2 jdolecek paddr_t avail_end; /* (PF# of last free page in segment) +1 */
95 1.8.12.2 jdolecek struct vm_page *pgs; /* vm_page structures (from start) */
96 1.8.12.2 jdolecek struct extent *ext; /* extent(9) structure to manage pgs[] */
97 1.8.12.2 jdolecek int free_list; /* which free list they belong on */
98 1.8.12.2 jdolecek u_int start_hint; /* start looking for free pages here */
99 1.8.12.2 jdolecek /* protected by uvm_fpageqlock */
100 1.8.12.2 jdolecek #ifdef __HAVE_PMAP_PHYSSEG
101 1.8.12.2 jdolecek struct pmap_physseg pmseg; /* pmap specific (MD) data */
102 1.8.12.2 jdolecek #endif
103 1.8.12.2 jdolecek };
104 1.8.12.2 jdolecek
105 1.8.12.2 jdolecek /*
106 1.8.12.2 jdolecek * These functions are reserved for uvm(9) internal use and are not
107 1.8.12.2 jdolecek * exported in the header file uvm_physseg.h
108 1.8.12.2 jdolecek *
109 1.8.12.2 jdolecek * Thus they are redefined here.
110 1.8.12.2 jdolecek */
111 1.8.12.2 jdolecek void uvm_physseg_init_seg(uvm_physseg_t, struct vm_page *);
112 1.8.12.2 jdolecek void uvm_physseg_seg_chomp_slab(uvm_physseg_t, struct vm_page *, size_t);
113 1.8.12.2 jdolecek
114 1.8.12.2 jdolecek /* returns a pgs array */
115 1.8.12.2 jdolecek struct vm_page *uvm_physseg_seg_alloc_from_slab(uvm_physseg_t, size_t);
116 1.8.12.2 jdolecek
117 1.8.12.2 jdolecek #if defined(UVM_HOTPLUG) /* rbtree impementation */
118 1.8.12.2 jdolecek
119 1.8.12.2 jdolecek #define HANDLE_TO_PHYSSEG_NODE(h) ((struct uvm_physseg *)(h))
120 1.8.12.2 jdolecek #define PHYSSEG_NODE_TO_HANDLE(u) ((uvm_physseg_t)(u))
121 1.8.12.2 jdolecek
122 1.8.12.2 jdolecek struct uvm_physseg_graph {
123 1.8.12.2 jdolecek struct rb_tree rb_tree; /* Tree for entries */
124 1.8.12.2 jdolecek int nentries; /* Number of entries */
125 1.8.12.2 jdolecek };
126 1.8.12.2 jdolecek
127 1.8.12.2 jdolecek static struct uvm_physseg_graph uvm_physseg_graph;
128 1.8.12.2 jdolecek
129 1.8.12.2 jdolecek /*
130 1.8.12.2 jdolecek * Note on kmem(9) allocator usage:
131 1.8.12.2 jdolecek * We take the conservative approach that plug/unplug are allowed to
132 1.8.12.2 jdolecek * fail in high memory stress situations.
133 1.8.12.2 jdolecek *
134 1.8.12.2 jdolecek * We want to avoid re-entrant situations in which one plug/unplug
135 1.8.12.2 jdolecek * operation is waiting on a previous one to complete, since this
136 1.8.12.2 jdolecek * makes the design more complicated than necessary.
137 1.8.12.2 jdolecek *
138 1.8.12.2 jdolecek * We may review this and change its behaviour, once the use cases
139 1.8.12.2 jdolecek * become more obvious.
140 1.8.12.2 jdolecek */
141 1.8.12.2 jdolecek
142 1.8.12.2 jdolecek /*
143 1.8.12.2 jdolecek * Special alloc()/free() functions for boot time support:
144 1.8.12.2 jdolecek * We assume that alloc() at boot time is only for new 'vm_physseg's
145 1.8.12.2 jdolecek * This allows us to use a static array for memory allocation at boot
146 1.8.12.2 jdolecek * time. Thus we avoid using kmem(9) which is not ready at this point
147 1.8.12.2 jdolecek * in boot.
148 1.8.12.2 jdolecek *
149 1.8.12.2 jdolecek * After kmem(9) is ready, we use it. We currently discard any free()s
150 1.8.12.2 jdolecek * to this static array, since the size is small enough to be a
151 1.8.12.2 jdolecek * trivial waste on all architectures we run on.
152 1.8.12.2 jdolecek */
153 1.8.12.2 jdolecek
154 1.8.12.2 jdolecek static size_t nseg = 0;
155 1.8.12.2 jdolecek static struct uvm_physseg uvm_physseg[VM_PHYSSEG_MAX];
156 1.8.12.2 jdolecek
157 1.8.12.2 jdolecek static void *
158 1.8.12.2 jdolecek uvm_physseg_alloc(size_t sz)
159 1.8.12.2 jdolecek {
160 1.8.12.2 jdolecek /*
161 1.8.12.2 jdolecek * During boot time, we only support allocating vm_physseg
162 1.8.12.2 jdolecek * entries from the static array.
163 1.8.12.2 jdolecek * We need to assert for this.
164 1.8.12.2 jdolecek */
165 1.8.12.2 jdolecek
166 1.8.12.2 jdolecek if (__predict_false(uvm.page_init_done == false)) {
167 1.8.12.2 jdolecek if (sz % sizeof(struct uvm_physseg))
168 1.8.12.2 jdolecek panic("%s: tried to alloc size other than multiple"
169 1.8.12.2 jdolecek " of struct uvm_physseg at boot\n", __func__);
170 1.8.12.2 jdolecek
171 1.8.12.2 jdolecek size_t n = sz / sizeof(struct uvm_physseg);
172 1.8.12.2 jdolecek nseg += n;
173 1.8.12.2 jdolecek
174 1.8.12.2 jdolecek KASSERT(nseg > 0 && nseg <= VM_PHYSSEG_MAX);
175 1.8.12.2 jdolecek
176 1.8.12.2 jdolecek return &uvm_physseg[nseg - n];
177 1.8.12.2 jdolecek }
178 1.8.12.2 jdolecek
179 1.8.12.2 jdolecek return kmem_zalloc(sz, KM_NOSLEEP);
180 1.8.12.2 jdolecek }
181 1.8.12.2 jdolecek
182 1.8.12.2 jdolecek static void
183 1.8.12.2 jdolecek uvm_physseg_free(void *p, size_t sz)
184 1.8.12.2 jdolecek {
185 1.8.12.2 jdolecek /*
186 1.8.12.2 jdolecek * This is a bit tricky. We do allow simulation of free()
187 1.8.12.2 jdolecek * during boot (for eg: when MD code is "steal"ing memory,
188 1.8.12.2 jdolecek * and the segment has been exhausted (and thus needs to be
189 1.8.12.2 jdolecek * free() - ed.
190 1.8.12.2 jdolecek * free() also complicates things because we leak the
191 1.8.12.2 jdolecek * free(). Therefore calling code can't assume that free()-ed
192 1.8.12.2 jdolecek * memory is available for alloc() again, at boot time.
193 1.8.12.2 jdolecek *
194 1.8.12.2 jdolecek * Thus we can't explicitly disallow free()s during
195 1.8.12.2 jdolecek * boot time. However, the same restriction for alloc()
196 1.8.12.2 jdolecek * applies to free(). We only allow uvm_physseg related free()s
197 1.8.12.2 jdolecek * via this function during boot time.
198 1.8.12.2 jdolecek */
199 1.8.12.2 jdolecek
200 1.8.12.2 jdolecek if (__predict_false(uvm.page_init_done == false)) {
201 1.8.12.2 jdolecek if (sz % sizeof(struct uvm_physseg))
202 1.8.12.2 jdolecek panic("%s: tried to free size other than struct uvm_physseg"
203 1.8.12.2 jdolecek " at boot\n", __func__);
204 1.8.12.2 jdolecek
205 1.8.12.2 jdolecek }
206 1.8.12.2 jdolecek
207 1.8.12.2 jdolecek /*
208 1.8.12.2 jdolecek * Could have been in a single if(){} block - split for
209 1.8.12.2 jdolecek * clarity
210 1.8.12.2 jdolecek */
211 1.8.12.2 jdolecek
212 1.8.12.2 jdolecek if ((struct uvm_physseg *)p >= uvm_physseg &&
213 1.8.12.2 jdolecek (struct uvm_physseg *)p < (uvm_physseg + VM_PHYSSEG_MAX)) {
214 1.8.12.2 jdolecek if (sz % sizeof(struct uvm_physseg))
215 1.8.12.2 jdolecek panic("%s: tried to free() other than struct uvm_physseg"
216 1.8.12.2 jdolecek " from static array\n", __func__);
217 1.8.12.2 jdolecek
218 1.8.12.2 jdolecek if ((sz / sizeof(struct uvm_physseg)) >= VM_PHYSSEG_MAX)
219 1.8.12.2 jdolecek panic("%s: tried to free() the entire static array!", __func__);
220 1.8.12.2 jdolecek return; /* Nothing to free */
221 1.8.12.2 jdolecek }
222 1.8.12.2 jdolecek
223 1.8.12.2 jdolecek kmem_free(p, sz);
224 1.8.12.2 jdolecek }
225 1.8.12.2 jdolecek
226 1.8.12.2 jdolecek /* XXX: Multi page size */
227 1.8.12.2 jdolecek bool
228 1.8.12.2 jdolecek uvm_physseg_plug(paddr_t pfn, size_t pages, uvm_physseg_t *psp)
229 1.8.12.2 jdolecek {
230 1.8.12.2 jdolecek int preload;
231 1.8.12.2 jdolecek size_t slabpages;
232 1.8.12.2 jdolecek struct uvm_physseg *ps, *current_ps = NULL;
233 1.8.12.2 jdolecek struct vm_page *slab = NULL, *pgs = NULL;
234 1.8.12.2 jdolecek
235 1.8.12.2 jdolecek #ifdef DEBUG
236 1.8.12.2 jdolecek paddr_t off;
237 1.8.12.2 jdolecek uvm_physseg_t upm;
238 1.8.12.2 jdolecek upm = uvm_physseg_find(pfn, &off);
239 1.8.12.2 jdolecek
240 1.8.12.2 jdolecek ps = HANDLE_TO_PHYSSEG_NODE(upm);
241 1.8.12.2 jdolecek
242 1.8.12.2 jdolecek if (ps != NULL) /* XXX; do we allow "update" plugs ? */
243 1.8.12.2 jdolecek return false;
244 1.8.12.2 jdolecek #endif
245 1.8.12.2 jdolecek
246 1.8.12.2 jdolecek /*
247 1.8.12.2 jdolecek * do we have room?
248 1.8.12.2 jdolecek */
249 1.8.12.2 jdolecek
250 1.8.12.2 jdolecek ps = uvm_physseg_alloc(sizeof (struct uvm_physseg));
251 1.8.12.2 jdolecek if (ps == NULL) {
252 1.8.12.2 jdolecek printf("uvm_page_physload: unable to load physical memory "
253 1.8.12.2 jdolecek "segment\n");
254 1.8.12.2 jdolecek printf("\t%d segments allocated, ignoring 0x%"PRIxPADDR" -> 0x%"PRIxPADDR"\n",
255 1.8.12.2 jdolecek VM_PHYSSEG_MAX, pfn, pfn + pages + 1);
256 1.8.12.2 jdolecek printf("\tincrease VM_PHYSSEG_MAX\n");
257 1.8.12.2 jdolecek return false;
258 1.8.12.2 jdolecek }
259 1.8.12.2 jdolecek
260 1.8.12.2 jdolecek /* span init */
261 1.8.12.2 jdolecek ps->start = pfn;
262 1.8.12.2 jdolecek ps->end = pfn + pages;
263 1.8.12.2 jdolecek
264 1.8.12.2 jdolecek /*
265 1.8.12.2 jdolecek * XXX: Ugly hack because uvmexp.npages accounts for only
266 1.8.12.2 jdolecek * those pages in the segment included below as well - this
267 1.8.12.2 jdolecek * should be legacy and removed.
268 1.8.12.2 jdolecek */
269 1.8.12.2 jdolecek
270 1.8.12.2 jdolecek ps->avail_start = ps->start;
271 1.8.12.2 jdolecek ps->avail_end = ps->end;
272 1.8.12.2 jdolecek
273 1.8.12.2 jdolecek /*
274 1.8.12.2 jdolecek * check to see if this is a "preload" (i.e. uvm_page_init hasn't been
275 1.8.12.2 jdolecek * called yet, so kmem is not available).
276 1.8.12.2 jdolecek */
277 1.8.12.2 jdolecek
278 1.8.12.2 jdolecek preload = 1; /* We are going to assume it is a preload */
279 1.8.12.2 jdolecek
280 1.8.12.2 jdolecek RB_TREE_FOREACH(current_ps, &(uvm_physseg_graph.rb_tree)) {
281 1.8.12.2 jdolecek /* If there are non NULL pages then we are not in a preload */
282 1.8.12.2 jdolecek if (current_ps->pgs != NULL) {
283 1.8.12.2 jdolecek preload = 0;
284 1.8.12.2 jdolecek /* Try to scavenge from earlier unplug()s. */
285 1.8.12.2 jdolecek pgs = uvm_physseg_seg_alloc_from_slab(current_ps, pages);
286 1.8.12.2 jdolecek
287 1.8.12.2 jdolecek if (pgs != NULL) {
288 1.8.12.2 jdolecek break;
289 1.8.12.2 jdolecek }
290 1.8.12.2 jdolecek }
291 1.8.12.2 jdolecek }
292 1.8.12.2 jdolecek
293 1.8.12.2 jdolecek
294 1.8.12.2 jdolecek /*
295 1.8.12.2 jdolecek * if VM is already running, attempt to kmem_alloc vm_page structures
296 1.8.12.2 jdolecek */
297 1.8.12.2 jdolecek
298 1.8.12.2 jdolecek if (!preload) {
299 1.8.12.2 jdolecek if (pgs == NULL) { /* Brand new */
300 1.8.12.2 jdolecek /* Iteratively try alloc down from uvmexp.npages */
301 1.8.12.2 jdolecek for (slabpages = (size_t) uvmexp.npages; slabpages >= pages; slabpages--) {
302 1.8.12.2 jdolecek slab = kmem_zalloc(sizeof *pgs * (long unsigned int)slabpages, KM_NOSLEEP);
303 1.8.12.2 jdolecek if (slab != NULL)
304 1.8.12.2 jdolecek break;
305 1.8.12.2 jdolecek }
306 1.8.12.2 jdolecek
307 1.8.12.2 jdolecek if (slab == NULL) {
308 1.8.12.2 jdolecek uvm_physseg_free(ps, sizeof(struct uvm_physseg));
309 1.8.12.2 jdolecek return false;
310 1.8.12.2 jdolecek }
311 1.8.12.2 jdolecek
312 1.8.12.2 jdolecek uvm_physseg_seg_chomp_slab(ps, slab, (size_t) slabpages);
313 1.8.12.2 jdolecek /* We allocate enough for this plug */
314 1.8.12.2 jdolecek pgs = uvm_physseg_seg_alloc_from_slab(ps, pages);
315 1.8.12.2 jdolecek
316 1.8.12.2 jdolecek if (pgs == NULL) {
317 1.8.12.2 jdolecek printf("unable to uvm_physseg_seg_alloc_from_slab() from backend\n");
318 1.8.12.2 jdolecek return false;
319 1.8.12.2 jdolecek }
320 1.8.12.2 jdolecek } else {
321 1.8.12.2 jdolecek /* Reuse scavenged extent */
322 1.8.12.2 jdolecek ps->ext = current_ps->ext;
323 1.8.12.2 jdolecek }
324 1.8.12.2 jdolecek
325 1.8.12.2 jdolecek physmem += pages;
326 1.8.12.2 jdolecek uvmpdpol_reinit();
327 1.8.12.2 jdolecek } else { /* Boot time - see uvm_page.c:uvm_page_init() */
328 1.8.12.2 jdolecek pgs = NULL;
329 1.8.12.2 jdolecek ps->pgs = pgs;
330 1.8.12.2 jdolecek }
331 1.8.12.2 jdolecek
332 1.8.12.2 jdolecek /*
333 1.8.12.2 jdolecek * now insert us in the proper place in uvm_physseg_graph.rb_tree
334 1.8.12.2 jdolecek */
335 1.8.12.2 jdolecek
336 1.8.12.2 jdolecek current_ps = rb_tree_insert_node(&(uvm_physseg_graph.rb_tree), ps);
337 1.8.12.2 jdolecek if (current_ps != ps) {
338 1.8.12.2 jdolecek panic("uvm_page_physload: Duplicate address range detected!");
339 1.8.12.2 jdolecek }
340 1.8.12.2 jdolecek uvm_physseg_graph.nentries++;
341 1.8.12.2 jdolecek
342 1.8.12.2 jdolecek /*
343 1.8.12.2 jdolecek * uvm_pagefree() requires the PHYS_TO_VM_PAGE(pgs[i]) on the
344 1.8.12.2 jdolecek * newly allocated pgs[] to return the correct value. This is
345 1.8.12.2 jdolecek * a bit of a chicken and egg problem, since it needs
346 1.8.12.2 jdolecek * uvm_physseg_find() to succeed. For this, the node needs to
347 1.8.12.2 jdolecek * be inserted *before* uvm_physseg_init_seg() happens.
348 1.8.12.2 jdolecek *
349 1.8.12.2 jdolecek * During boot, this happens anyway, since
350 1.8.12.2 jdolecek * uvm_physseg_init_seg() is called later on and separately
351 1.8.12.2 jdolecek * from uvm_page.c:uvm_page_init().
352 1.8.12.2 jdolecek * In the case of hotplug we need to ensure this.
353 1.8.12.2 jdolecek */
354 1.8.12.2 jdolecek
355 1.8.12.2 jdolecek if (__predict_true(!preload))
356 1.8.12.2 jdolecek uvm_physseg_init_seg(ps, pgs);
357 1.8.12.2 jdolecek
358 1.8.12.2 jdolecek if (psp != NULL)
359 1.8.12.2 jdolecek *psp = ps;
360 1.8.12.2 jdolecek
361 1.8.12.2 jdolecek return true;
362 1.8.12.2 jdolecek }
363 1.8.12.2 jdolecek
364 1.8.12.2 jdolecek static int
365 1.8.12.2 jdolecek uvm_physseg_compare_nodes(void *ctx, const void *nnode1, const void *nnode2)
366 1.8.12.2 jdolecek {
367 1.8.12.2 jdolecek const struct uvm_physseg *enode1 = nnode1;
368 1.8.12.2 jdolecek const struct uvm_physseg *enode2 = nnode2;
369 1.8.12.2 jdolecek
370 1.8.12.2 jdolecek KASSERT(enode1->start < enode2->start || enode1->start >= enode2->end);
371 1.8.12.2 jdolecek KASSERT(enode2->start < enode1->start || enode2->start >= enode1->end);
372 1.8.12.2 jdolecek
373 1.8.12.2 jdolecek if (enode1->start < enode2->start)
374 1.8.12.2 jdolecek return -1;
375 1.8.12.2 jdolecek if (enode1->start >= enode2->end)
376 1.8.12.2 jdolecek return 1;
377 1.8.12.2 jdolecek return 0;
378 1.8.12.2 jdolecek }
379 1.8.12.2 jdolecek
380 1.8.12.2 jdolecek static int
381 1.8.12.2 jdolecek uvm_physseg_compare_key(void *ctx, const void *nnode, const void *pkey)
382 1.8.12.2 jdolecek {
383 1.8.12.2 jdolecek const struct uvm_physseg *enode = nnode;
384 1.8.12.2 jdolecek const paddr_t pa = *(const paddr_t *) pkey;
385 1.8.12.2 jdolecek
386 1.8.12.2 jdolecek if(enode->start <= pa && pa < enode->end)
387 1.8.12.2 jdolecek return 0;
388 1.8.12.2 jdolecek if (enode->start < pa)
389 1.8.12.2 jdolecek return -1;
390 1.8.12.2 jdolecek if (enode->end > pa)
391 1.8.12.2 jdolecek return 1;
392 1.8.12.2 jdolecek
393 1.8.12.2 jdolecek return 0;
394 1.8.12.2 jdolecek }
395 1.8.12.2 jdolecek
396 1.8.12.2 jdolecek static const rb_tree_ops_t uvm_physseg_tree_ops = {
397 1.8.12.2 jdolecek .rbto_compare_nodes = uvm_physseg_compare_nodes,
398 1.8.12.2 jdolecek .rbto_compare_key = uvm_physseg_compare_key,
399 1.8.12.2 jdolecek .rbto_node_offset = offsetof(struct uvm_physseg, rb_node),
400 1.8.12.2 jdolecek .rbto_context = NULL
401 1.8.12.2 jdolecek };
402 1.8.12.2 jdolecek
403 1.8.12.2 jdolecek /*
404 1.8.12.2 jdolecek * uvm_physseg_init: init the physmem
405 1.8.12.2 jdolecek *
406 1.8.12.2 jdolecek * => physmem unit should not be in use at this point
407 1.8.12.2 jdolecek */
408 1.8.12.2 jdolecek
409 1.8.12.2 jdolecek void
410 1.8.12.2 jdolecek uvm_physseg_init(void)
411 1.8.12.2 jdolecek {
412 1.8.12.2 jdolecek rb_tree_init(&(uvm_physseg_graph.rb_tree), &uvm_physseg_tree_ops);
413 1.8.12.2 jdolecek uvm_physseg_graph.nentries = 0;
414 1.8.12.2 jdolecek }
415 1.8.12.2 jdolecek
416 1.8.12.2 jdolecek uvm_physseg_t
417 1.8.12.2 jdolecek uvm_physseg_get_next(uvm_physseg_t upm)
418 1.8.12.2 jdolecek {
419 1.8.12.2 jdolecek /* next of invalid is invalid, not fatal */
420 1.8.12.2 jdolecek if (uvm_physseg_valid_p(upm) == false)
421 1.8.12.2 jdolecek return UVM_PHYSSEG_TYPE_INVALID;
422 1.8.12.2 jdolecek
423 1.8.12.2 jdolecek return (uvm_physseg_t) rb_tree_iterate(&(uvm_physseg_graph.rb_tree), upm,
424 1.8.12.2 jdolecek RB_DIR_RIGHT);
425 1.8.12.2 jdolecek }
426 1.8.12.2 jdolecek
427 1.8.12.2 jdolecek uvm_physseg_t
428 1.8.12.2 jdolecek uvm_physseg_get_prev(uvm_physseg_t upm)
429 1.8.12.2 jdolecek {
430 1.8.12.2 jdolecek /* prev of invalid is invalid, not fatal */
431 1.8.12.2 jdolecek if (uvm_physseg_valid_p(upm) == false)
432 1.8.12.2 jdolecek return UVM_PHYSSEG_TYPE_INVALID;
433 1.8.12.2 jdolecek
434 1.8.12.2 jdolecek return (uvm_physseg_t) rb_tree_iterate(&(uvm_physseg_graph.rb_tree), upm,
435 1.8.12.2 jdolecek RB_DIR_LEFT);
436 1.8.12.2 jdolecek }
437 1.8.12.2 jdolecek
438 1.8.12.2 jdolecek uvm_physseg_t
439 1.8.12.2 jdolecek uvm_physseg_get_last(void)
440 1.8.12.2 jdolecek {
441 1.8.12.2 jdolecek return (uvm_physseg_t) RB_TREE_MAX(&(uvm_physseg_graph.rb_tree));
442 1.8.12.2 jdolecek }
443 1.8.12.2 jdolecek
444 1.8.12.2 jdolecek uvm_physseg_t
445 1.8.12.2 jdolecek uvm_physseg_get_first(void)
446 1.8.12.2 jdolecek {
447 1.8.12.2 jdolecek return (uvm_physseg_t) RB_TREE_MIN(&(uvm_physseg_graph.rb_tree));
448 1.8.12.2 jdolecek }
449 1.8.12.2 jdolecek
450 1.8.12.2 jdolecek paddr_t
451 1.8.12.2 jdolecek uvm_physseg_get_highest_frame(void)
452 1.8.12.2 jdolecek {
453 1.8.12.2 jdolecek struct uvm_physseg *ps =
454 1.8.12.2 jdolecek (uvm_physseg_t) RB_TREE_MAX(&(uvm_physseg_graph.rb_tree));
455 1.8.12.2 jdolecek
456 1.8.12.2 jdolecek return ps->end - 1;
457 1.8.12.2 jdolecek }
458 1.8.12.2 jdolecek
459 1.8.12.2 jdolecek /*
460 1.8.12.2 jdolecek * uvm_page_physunload: unload physical memory and return it to
461 1.8.12.2 jdolecek * caller.
462 1.8.12.2 jdolecek */
463 1.8.12.2 jdolecek bool
464 1.8.12.2 jdolecek uvm_page_physunload(uvm_physseg_t upm, int freelist, paddr_t *paddrp)
465 1.8.12.2 jdolecek {
466 1.8.12.2 jdolecek struct uvm_physseg *seg;
467 1.8.12.2 jdolecek
468 1.8.12.2 jdolecek if (__predict_true(uvm.page_init_done == true))
469 1.8.12.2 jdolecek panic("%s: unload attempted after uvm_page_init()\n", __func__);
470 1.8.12.2 jdolecek
471 1.8.12.2 jdolecek seg = HANDLE_TO_PHYSSEG_NODE(upm);
472 1.8.12.2 jdolecek
473 1.8.12.2 jdolecek if (seg->free_list != freelist) {
474 1.8.12.2 jdolecek paddrp = NULL;
475 1.8.12.2 jdolecek return false;
476 1.8.12.2 jdolecek }
477 1.8.12.2 jdolecek
478 1.8.12.2 jdolecek /*
479 1.8.12.2 jdolecek * During cold boot, what we're about to unplug hasn't been
480 1.8.12.2 jdolecek * put on the uvm freelist, nor has uvmexp.npages been
481 1.8.12.2 jdolecek * updated. (This happens in uvm_page.c:uvm_page_init())
482 1.8.12.2 jdolecek *
483 1.8.12.2 jdolecek * For hotplug, we assume here that the pages being unloaded
484 1.8.12.2 jdolecek * here are completely out of sight of uvm (ie; not on any uvm
485 1.8.12.2 jdolecek * lists), and that uvmexp.npages has been suitably
486 1.8.12.2 jdolecek * decremented before we're called.
487 1.8.12.2 jdolecek *
488 1.8.12.2 jdolecek * XXX: will avail_end == start if avail_start < avail_end?
489 1.8.12.2 jdolecek */
490 1.8.12.2 jdolecek
491 1.8.12.2 jdolecek /* try from front */
492 1.8.12.2 jdolecek if (seg->avail_start == seg->start &&
493 1.8.12.2 jdolecek seg->avail_start < seg->avail_end) {
494 1.8.12.2 jdolecek *paddrp = ctob(seg->avail_start);
495 1.8.12.2 jdolecek return uvm_physseg_unplug(seg->avail_start, 1);
496 1.8.12.2 jdolecek }
497 1.8.12.2 jdolecek
498 1.8.12.2 jdolecek /* try from rear */
499 1.8.12.2 jdolecek if (seg->avail_end == seg->end &&
500 1.8.12.2 jdolecek seg->avail_start < seg->avail_end) {
501 1.8.12.2 jdolecek *paddrp = ctob(seg->avail_end - 1);
502 1.8.12.2 jdolecek return uvm_physseg_unplug(seg->avail_end - 1, 1);
503 1.8.12.2 jdolecek }
504 1.8.12.2 jdolecek
505 1.8.12.2 jdolecek return false;
506 1.8.12.2 jdolecek }
507 1.8.12.2 jdolecek
508 1.8.12.2 jdolecek bool
509 1.8.12.2 jdolecek uvm_page_physunload_force(uvm_physseg_t upm, int freelist, paddr_t *paddrp)
510 1.8.12.2 jdolecek {
511 1.8.12.2 jdolecek struct uvm_physseg *seg;
512 1.8.12.2 jdolecek
513 1.8.12.2 jdolecek seg = HANDLE_TO_PHYSSEG_NODE(upm);
514 1.8.12.2 jdolecek
515 1.8.12.2 jdolecek if (__predict_true(uvm.page_init_done == true))
516 1.8.12.2 jdolecek panic("%s: unload attempted after uvm_page_init()\n", __func__);
517 1.8.12.2 jdolecek /* any room in this bank? */
518 1.8.12.2 jdolecek if (seg->avail_start >= seg->avail_end) {
519 1.8.12.2 jdolecek paddrp = NULL;
520 1.8.12.2 jdolecek return false; /* nope */
521 1.8.12.2 jdolecek }
522 1.8.12.2 jdolecek
523 1.8.12.2 jdolecek *paddrp = ctob(seg->avail_start);
524 1.8.12.2 jdolecek
525 1.8.12.2 jdolecek /* Always unplug from front */
526 1.8.12.2 jdolecek return uvm_physseg_unplug(seg->avail_start, 1);
527 1.8.12.2 jdolecek }
528 1.8.12.2 jdolecek
529 1.8.12.2 jdolecek
530 1.8.12.2 jdolecek /*
531 1.8.12.2 jdolecek * vm_physseg_find: find vm_physseg structure that belongs to a PA
532 1.8.12.2 jdolecek */
533 1.8.12.2 jdolecek uvm_physseg_t
534 1.8.12.2 jdolecek uvm_physseg_find(paddr_t pframe, psize_t *offp)
535 1.8.12.2 jdolecek {
536 1.8.12.2 jdolecek struct uvm_physseg * ps = NULL;
537 1.8.12.2 jdolecek
538 1.8.12.2 jdolecek ps = rb_tree_find_node(&(uvm_physseg_graph.rb_tree), &pframe);
539 1.8.12.2 jdolecek
540 1.8.12.2 jdolecek if(ps != NULL && offp != NULL)
541 1.8.12.2 jdolecek *offp = pframe - ps->start;
542 1.8.12.2 jdolecek
543 1.8.12.2 jdolecek return ps;
544 1.8.12.2 jdolecek }
545 1.8.12.2 jdolecek
546 1.8.12.2 jdolecek #else /* UVM_HOTPLUG */
547 1.8.12.2 jdolecek
548 1.8.12.2 jdolecek /*
549 1.8.12.2 jdolecek * physical memory config is stored in vm_physmem.
550 1.8.12.2 jdolecek */
551 1.8.12.2 jdolecek
552 1.8.12.2 jdolecek #define VM_PHYSMEM_PTR(i) (&vm_physmem[i])
553 1.8.12.2 jdolecek #if VM_PHYSSEG_MAX == 1
554 1.8.12.2 jdolecek #define VM_PHYSMEM_PTR_SWAP(i, j) /* impossible */
555 1.8.12.2 jdolecek #else
556 1.8.12.2 jdolecek #define VM_PHYSMEM_PTR_SWAP(i, j) \
557 1.8.12.2 jdolecek do { vm_physmem[(i)] = vm_physmem[(j)]; } while (0)
558 1.8.12.2 jdolecek #endif
559 1.8.12.2 jdolecek
560 1.8.12.2 jdolecek #define HANDLE_TO_PHYSSEG_NODE(h) (VM_PHYSMEM_PTR((int)h))
561 1.8.12.2 jdolecek #define PHYSSEG_NODE_TO_HANDLE(u) ((int)((vsize_t) (u - vm_physmem) / sizeof(struct uvm_physseg)))
562 1.8.12.2 jdolecek
563 1.8.12.2 jdolecek static struct uvm_physseg vm_physmem[VM_PHYSSEG_MAX]; /* XXXCDC: uvm.physmem */
564 1.8.12.2 jdolecek static int vm_nphysseg = 0; /* XXXCDC: uvm.nphysseg */
565 1.8.12.2 jdolecek #define vm_nphysmem vm_nphysseg
566 1.8.12.2 jdolecek
567 1.8.12.2 jdolecek void
568 1.8.12.2 jdolecek uvm_physseg_init(void)
569 1.8.12.2 jdolecek {
570 1.8.12.2 jdolecek /* XXX: Provisioning for rb_tree related init(s) */
571 1.8.12.2 jdolecek return;
572 1.8.12.2 jdolecek }
573 1.8.12.2 jdolecek
574 1.8.12.2 jdolecek int
575 1.8.12.2 jdolecek uvm_physseg_get_next(uvm_physseg_t lcv)
576 1.8.12.2 jdolecek {
577 1.8.12.2 jdolecek /* next of invalid is invalid, not fatal */
578 1.8.12.2 jdolecek if (uvm_physseg_valid_p(lcv) == false)
579 1.8.12.2 jdolecek return UVM_PHYSSEG_TYPE_INVALID;
580 1.8.12.2 jdolecek
581 1.8.12.2 jdolecek return (lcv + 1);
582 1.8.12.2 jdolecek }
583 1.8.12.2 jdolecek
584 1.8.12.2 jdolecek int
585 1.8.12.2 jdolecek uvm_physseg_get_prev(uvm_physseg_t lcv)
586 1.8.12.2 jdolecek {
587 1.8.12.2 jdolecek /* prev of invalid is invalid, not fatal */
588 1.8.12.2 jdolecek if (uvm_physseg_valid_p(lcv) == false)
589 1.8.12.2 jdolecek return UVM_PHYSSEG_TYPE_INVALID;
590 1.8.12.2 jdolecek
591 1.8.12.2 jdolecek return (lcv - 1);
592 1.8.12.2 jdolecek }
593 1.8.12.2 jdolecek
594 1.8.12.2 jdolecek int
595 1.8.12.2 jdolecek uvm_physseg_get_last(void)
596 1.8.12.2 jdolecek {
597 1.8.12.2 jdolecek return (vm_nphysseg - 1);
598 1.8.12.2 jdolecek }
599 1.8.12.2 jdolecek
600 1.8.12.2 jdolecek int
601 1.8.12.2 jdolecek uvm_physseg_get_first(void)
602 1.8.12.2 jdolecek {
603 1.8.12.2 jdolecek return 0;
604 1.8.12.2 jdolecek }
605 1.8.12.2 jdolecek
606 1.8.12.2 jdolecek paddr_t
607 1.8.12.2 jdolecek uvm_physseg_get_highest_frame(void)
608 1.8.12.2 jdolecek {
609 1.8.12.2 jdolecek int lcv;
610 1.8.12.2 jdolecek paddr_t last = 0;
611 1.8.12.2 jdolecek struct uvm_physseg *ps;
612 1.8.12.2 jdolecek
613 1.8.12.2 jdolecek for (lcv = 0; lcv < vm_nphysseg; lcv++) {
614 1.8.12.2 jdolecek ps = VM_PHYSMEM_PTR(lcv);
615 1.8.12.2 jdolecek if (last < ps->end)
616 1.8.12.2 jdolecek last = ps->end;
617 1.8.12.2 jdolecek }
618 1.8.12.2 jdolecek
619 1.8.12.2 jdolecek return last;
620 1.8.12.2 jdolecek }
621 1.8.12.2 jdolecek
622 1.8.12.2 jdolecek
623 1.8.12.2 jdolecek static struct vm_page *
624 1.8.12.2 jdolecek uvm_post_preload_check(void)
625 1.8.12.2 jdolecek {
626 1.8.12.2 jdolecek int preload, lcv;
627 1.8.12.2 jdolecek
628 1.8.12.2 jdolecek /*
629 1.8.12.2 jdolecek * check to see if this is a "preload" (i.e. uvm_page_init hasn't been
630 1.8.12.2 jdolecek * called yet, so kmem is not available).
631 1.8.12.2 jdolecek */
632 1.8.12.2 jdolecek
633 1.8.12.2 jdolecek for (lcv = 0 ; lcv < vm_nphysmem ; lcv++) {
634 1.8.12.2 jdolecek if (VM_PHYSMEM_PTR(lcv)->pgs)
635 1.8.12.2 jdolecek break;
636 1.8.12.2 jdolecek }
637 1.8.12.2 jdolecek preload = (lcv == vm_nphysmem);
638 1.8.12.2 jdolecek
639 1.8.12.2 jdolecek /*
640 1.8.12.2 jdolecek * if VM is already running, attempt to kmem_alloc vm_page structures
641 1.8.12.2 jdolecek */
642 1.8.12.2 jdolecek
643 1.8.12.2 jdolecek if (!preload) {
644 1.8.12.2 jdolecek panic("Tried to add RAM after uvm_page_init");
645 1.8.12.2 jdolecek }
646 1.8.12.2 jdolecek
647 1.8.12.2 jdolecek return NULL;
648 1.8.12.2 jdolecek }
649 1.8.12.2 jdolecek
650 1.8.12.2 jdolecek /*
651 1.8.12.2 jdolecek * uvm_page_physunload: unload physical memory and return it to
652 1.8.12.2 jdolecek * caller.
653 1.8.12.2 jdolecek */
654 1.8.12.2 jdolecek bool
655 1.8.12.2 jdolecek uvm_page_physunload(uvm_physseg_t psi, int freelist, paddr_t *paddrp)
656 1.8.12.2 jdolecek {
657 1.8.12.2 jdolecek int x;
658 1.8.12.2 jdolecek struct uvm_physseg *seg;
659 1.8.12.2 jdolecek
660 1.8.12.2 jdolecek uvm_post_preload_check();
661 1.8.12.2 jdolecek
662 1.8.12.2 jdolecek seg = VM_PHYSMEM_PTR(psi);
663 1.8.12.2 jdolecek
664 1.8.12.2 jdolecek if (seg->free_list != freelist) {
665 1.8.12.2 jdolecek paddrp = NULL;
666 1.8.12.2 jdolecek return false;
667 1.8.12.2 jdolecek }
668 1.8.12.2 jdolecek
669 1.8.12.2 jdolecek /* try from front */
670 1.8.12.2 jdolecek if (seg->avail_start == seg->start &&
671 1.8.12.2 jdolecek seg->avail_start < seg->avail_end) {
672 1.8.12.2 jdolecek *paddrp = ctob(seg->avail_start);
673 1.8.12.2 jdolecek seg->avail_start++;
674 1.8.12.2 jdolecek seg->start++;
675 1.8.12.2 jdolecek /* nothing left? nuke it */
676 1.8.12.2 jdolecek if (seg->avail_start == seg->end) {
677 1.8.12.2 jdolecek if (vm_nphysmem == 1)
678 1.8.12.2 jdolecek panic("uvm_page_physget: out of memory!");
679 1.8.12.2 jdolecek vm_nphysmem--;
680 1.8.12.2 jdolecek for (x = psi ; x < vm_nphysmem ; x++)
681 1.8.12.2 jdolecek /* structure copy */
682 1.8.12.2 jdolecek VM_PHYSMEM_PTR_SWAP(x, x + 1);
683 1.8.12.2 jdolecek }
684 1.8.12.2 jdolecek return (true);
685 1.8.12.2 jdolecek }
686 1.8.12.2 jdolecek
687 1.8.12.2 jdolecek /* try from rear */
688 1.8.12.2 jdolecek if (seg->avail_end == seg->end &&
689 1.8.12.2 jdolecek seg->avail_start < seg->avail_end) {
690 1.8.12.2 jdolecek *paddrp = ctob(seg->avail_end - 1);
691 1.8.12.2 jdolecek seg->avail_end--;
692 1.8.12.2 jdolecek seg->end--;
693 1.8.12.2 jdolecek /* nothing left? nuke it */
694 1.8.12.2 jdolecek if (seg->avail_end == seg->start) {
695 1.8.12.2 jdolecek if (vm_nphysmem == 1)
696 1.8.12.2 jdolecek panic("uvm_page_physget: out of memory!");
697 1.8.12.2 jdolecek vm_nphysmem--;
698 1.8.12.2 jdolecek for (x = psi ; x < vm_nphysmem ; x++)
699 1.8.12.2 jdolecek /* structure copy */
700 1.8.12.2 jdolecek VM_PHYSMEM_PTR_SWAP(x, x + 1);
701 1.8.12.2 jdolecek }
702 1.8.12.2 jdolecek return (true);
703 1.8.12.2 jdolecek }
704 1.8.12.2 jdolecek
705 1.8.12.2 jdolecek return false;
706 1.8.12.2 jdolecek }
707 1.8.12.2 jdolecek
708 1.8.12.2 jdolecek bool
709 1.8.12.2 jdolecek uvm_page_physunload_force(uvm_physseg_t psi, int freelist, paddr_t *paddrp)
710 1.8.12.2 jdolecek {
711 1.8.12.2 jdolecek int x;
712 1.8.12.2 jdolecek struct uvm_physseg *seg;
713 1.8.12.2 jdolecek
714 1.8.12.2 jdolecek uvm_post_preload_check();
715 1.8.12.2 jdolecek
716 1.8.12.2 jdolecek seg = VM_PHYSMEM_PTR(psi);
717 1.8.12.2 jdolecek
718 1.8.12.2 jdolecek /* any room in this bank? */
719 1.8.12.2 jdolecek if (seg->avail_start >= seg->avail_end) {
720 1.8.12.2 jdolecek paddrp = NULL;
721 1.8.12.2 jdolecek return false; /* nope */
722 1.8.12.2 jdolecek }
723 1.8.12.2 jdolecek
724 1.8.12.2 jdolecek *paddrp = ctob(seg->avail_start);
725 1.8.12.2 jdolecek seg->avail_start++;
726 1.8.12.2 jdolecek /* truncate! */
727 1.8.12.2 jdolecek seg->start = seg->avail_start;
728 1.8.12.2 jdolecek
729 1.8.12.2 jdolecek /* nothing left? nuke it */
730 1.8.12.2 jdolecek if (seg->avail_start == seg->end) {
731 1.8.12.2 jdolecek if (vm_nphysmem == 1)
732 1.8.12.2 jdolecek panic("uvm_page_physget: out of memory!");
733 1.8.12.2 jdolecek vm_nphysmem--;
734 1.8.12.2 jdolecek for (x = psi ; x < vm_nphysmem ; x++)
735 1.8.12.2 jdolecek /* structure copy */
736 1.8.12.2 jdolecek VM_PHYSMEM_PTR_SWAP(x, x + 1);
737 1.8.12.2 jdolecek }
738 1.8.12.2 jdolecek return (true);
739 1.8.12.2 jdolecek }
740 1.8.12.2 jdolecek
741 1.8.12.2 jdolecek bool
742 1.8.12.2 jdolecek uvm_physseg_plug(paddr_t pfn, size_t pages, uvm_physseg_t *psp)
743 1.8.12.2 jdolecek {
744 1.8.12.2 jdolecek int lcv;
745 1.8.12.2 jdolecek struct vm_page *pgs;
746 1.8.12.2 jdolecek struct uvm_physseg *ps;
747 1.8.12.2 jdolecek
748 1.8.12.2 jdolecek #ifdef DEBUG
749 1.8.12.2 jdolecek paddr_t off;
750 1.8.12.2 jdolecek uvm_physseg_t upm;
751 1.8.12.2 jdolecek upm = uvm_physseg_find(pfn, &off);
752 1.8.12.2 jdolecek
753 1.8.12.2 jdolecek if (uvm_physseg_valid_p(upm)) /* XXX; do we allow "update" plugs ? */
754 1.8.12.2 jdolecek return false;
755 1.8.12.2 jdolecek #endif
756 1.8.12.2 jdolecek
757 1.8.12.2 jdolecek paddr_t start = pfn;
758 1.8.12.2 jdolecek paddr_t end = pfn + pages;
759 1.8.12.2 jdolecek paddr_t avail_start = start;
760 1.8.12.2 jdolecek paddr_t avail_end = end;
761 1.8.12.2 jdolecek
762 1.8.12.2 jdolecek if (uvmexp.pagesize == 0)
763 1.8.12.2 jdolecek panic("uvm_page_physload: page size not set!");
764 1.8.12.2 jdolecek
765 1.8.12.2 jdolecek /*
766 1.8.12.2 jdolecek * do we have room?
767 1.8.12.2 jdolecek */
768 1.8.12.2 jdolecek
769 1.8.12.2 jdolecek if (vm_nphysmem == VM_PHYSSEG_MAX) {
770 1.8.12.2 jdolecek printf("uvm_page_physload: unable to load physical memory "
771 1.8.12.2 jdolecek "segment\n");
772 1.8.12.2 jdolecek printf("\t%d segments allocated, ignoring 0x%llx -> 0x%llx\n",
773 1.8.12.2 jdolecek VM_PHYSSEG_MAX, (long long)start, (long long)end);
774 1.8.12.2 jdolecek printf("\tincrease VM_PHYSSEG_MAX\n");
775 1.8.12.2 jdolecek if (psp != NULL)
776 1.8.12.2 jdolecek *psp = UVM_PHYSSEG_TYPE_INVALID_OVERFLOW;
777 1.8.12.2 jdolecek return false;
778 1.8.12.2 jdolecek }
779 1.8.12.2 jdolecek
780 1.8.12.2 jdolecek /*
781 1.8.12.2 jdolecek * check to see if this is a "preload" (i.e. uvm_page_init hasn't been
782 1.8.12.2 jdolecek * called yet, so kmem is not available).
783 1.8.12.2 jdolecek */
784 1.8.12.2 jdolecek pgs = uvm_post_preload_check();
785 1.8.12.2 jdolecek
786 1.8.12.2 jdolecek /*
787 1.8.12.2 jdolecek * now insert us in the proper place in vm_physmem[]
788 1.8.12.2 jdolecek */
789 1.8.12.2 jdolecek
790 1.8.12.2 jdolecek #if (VM_PHYSSEG_STRAT == VM_PSTRAT_RANDOM)
791 1.8.12.2 jdolecek /* random: put it at the end (easy!) */
792 1.8.12.2 jdolecek ps = VM_PHYSMEM_PTR(vm_nphysmem);
793 1.8.12.2 jdolecek lcv = vm_nphysmem;
794 1.8.12.2 jdolecek #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
795 1.8.12.2 jdolecek {
796 1.8.12.2 jdolecek int x;
797 1.8.12.2 jdolecek /* sort by address for binary search */
798 1.8.12.2 jdolecek for (lcv = 0 ; lcv < vm_nphysmem ; lcv++)
799 1.8.12.2 jdolecek if (start < VM_PHYSMEM_PTR(lcv)->start)
800 1.8.12.2 jdolecek break;
801 1.8.12.2 jdolecek ps = VM_PHYSMEM_PTR(lcv);
802 1.8.12.2 jdolecek /* move back other entries, if necessary ... */
803 1.8.12.2 jdolecek for (x = vm_nphysmem ; x > lcv ; x--)
804 1.8.12.2 jdolecek /* structure copy */
805 1.8.12.2 jdolecek VM_PHYSMEM_PTR_SWAP(x, x - 1);
806 1.8.12.2 jdolecek }
807 1.8.12.2 jdolecek #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
808 1.8.12.2 jdolecek {
809 1.8.12.2 jdolecek int x;
810 1.8.12.2 jdolecek /* sort by largest segment first */
811 1.8.12.2 jdolecek for (lcv = 0 ; lcv < vm_nphysmem ; lcv++)
812 1.8.12.2 jdolecek if ((end - start) >
813 1.8.12.2 jdolecek (VM_PHYSMEM_PTR(lcv)->end - VM_PHYSMEM_PTR(lcv)->start))
814 1.8.12.2 jdolecek break;
815 1.8.12.2 jdolecek ps = VM_PHYSMEM_PTR(lcv);
816 1.8.12.2 jdolecek /* move back other entries, if necessary ... */
817 1.8.12.2 jdolecek for (x = vm_nphysmem ; x > lcv ; x--)
818 1.8.12.2 jdolecek /* structure copy */
819 1.8.12.2 jdolecek VM_PHYSMEM_PTR_SWAP(x, x - 1);
820 1.8.12.2 jdolecek }
821 1.8.12.2 jdolecek #else
822 1.8.12.2 jdolecek panic("uvm_page_physload: unknown physseg strategy selected!");
823 1.8.12.2 jdolecek #endif
824 1.8.12.2 jdolecek
825 1.8.12.2 jdolecek ps->start = start;
826 1.8.12.2 jdolecek ps->end = end;
827 1.8.12.2 jdolecek ps->avail_start = avail_start;
828 1.8.12.2 jdolecek ps->avail_end = avail_end;
829 1.8.12.2 jdolecek
830 1.8.12.2 jdolecek ps->pgs = pgs;
831 1.8.12.2 jdolecek
832 1.8.12.2 jdolecek vm_nphysmem++;
833 1.8.12.2 jdolecek
834 1.8.12.2 jdolecek if (psp != NULL)
835 1.8.12.2 jdolecek *psp = lcv;
836 1.8.12.2 jdolecek
837 1.8.12.2 jdolecek return true;
838 1.8.12.2 jdolecek }
839 1.8.12.2 jdolecek
840 1.8.12.2 jdolecek /*
841 1.8.12.2 jdolecek * when VM_PHYSSEG_MAX is 1, we can simplify these functions
842 1.8.12.2 jdolecek */
843 1.8.12.2 jdolecek
844 1.8.12.2 jdolecek #if VM_PHYSSEG_MAX == 1
845 1.8.12.2 jdolecek static inline int vm_physseg_find_contig(struct uvm_physseg *, int, paddr_t, psize_t *);
846 1.8.12.2 jdolecek #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
847 1.8.12.2 jdolecek static inline int vm_physseg_find_bsearch(struct uvm_physseg *, int, paddr_t, psize_t *);
848 1.8.12.2 jdolecek #else
849 1.8.12.2 jdolecek static inline int vm_physseg_find_linear(struct uvm_physseg *, int, paddr_t, psize_t *);
850 1.8.12.2 jdolecek #endif
851 1.8.12.2 jdolecek
852 1.8.12.2 jdolecek /*
853 1.8.12.2 jdolecek * vm_physseg_find: find vm_physseg structure that belongs to a PA
854 1.8.12.2 jdolecek */
855 1.8.12.2 jdolecek int
856 1.8.12.2 jdolecek uvm_physseg_find(paddr_t pframe, psize_t *offp)
857 1.8.12.2 jdolecek {
858 1.8.12.2 jdolecek
859 1.8.12.2 jdolecek #if VM_PHYSSEG_MAX == 1
860 1.8.12.2 jdolecek return vm_physseg_find_contig(vm_physmem, vm_nphysseg, pframe, offp);
861 1.8.12.2 jdolecek #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
862 1.8.12.2 jdolecek return vm_physseg_find_bsearch(vm_physmem, vm_nphysseg, pframe, offp);
863 1.8.12.2 jdolecek #else
864 1.8.12.2 jdolecek return vm_physseg_find_linear(vm_physmem, vm_nphysseg, pframe, offp);
865 1.8.12.2 jdolecek #endif
866 1.8.12.2 jdolecek }
867 1.8.12.2 jdolecek
868 1.8.12.2 jdolecek #if VM_PHYSSEG_MAX == 1
869 1.8.12.2 jdolecek static inline int
870 1.8.12.2 jdolecek vm_physseg_find_contig(struct uvm_physseg *segs, int nsegs, paddr_t pframe, psize_t *offp)
871 1.8.12.2 jdolecek {
872 1.8.12.2 jdolecek
873 1.8.12.2 jdolecek /* 'contig' case */
874 1.8.12.2 jdolecek if (pframe >= segs[0].start && pframe < segs[0].end) {
875 1.8.12.2 jdolecek if (offp)
876 1.8.12.2 jdolecek *offp = pframe - segs[0].start;
877 1.8.12.2 jdolecek return(0);
878 1.8.12.2 jdolecek }
879 1.8.12.2 jdolecek return(-1);
880 1.8.12.2 jdolecek }
881 1.8.12.2 jdolecek
882 1.8.12.2 jdolecek #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
883 1.8.12.2 jdolecek
884 1.8.12.2 jdolecek static inline int
885 1.8.12.2 jdolecek vm_physseg_find_bsearch(struct uvm_physseg *segs, int nsegs, paddr_t pframe, psize_t *offp)
886 1.8.12.2 jdolecek {
887 1.8.12.2 jdolecek /* binary search for it */
888 1.8.12.2 jdolecek int start, len, guess;
889 1.8.12.2 jdolecek
890 1.8.12.2 jdolecek /*
891 1.8.12.2 jdolecek * if try is too large (thus target is less than try) we reduce
892 1.8.12.2 jdolecek * the length to trunc(len/2) [i.e. everything smaller than "try"]
893 1.8.12.2 jdolecek *
894 1.8.12.2 jdolecek * if the try is too small (thus target is greater than try) then
895 1.8.12.2 jdolecek * we set the new start to be (try + 1). this means we need to
896 1.8.12.2 jdolecek * reduce the length to (round(len/2) - 1).
897 1.8.12.2 jdolecek *
898 1.8.12.2 jdolecek * note "adjust" below which takes advantage of the fact that
899 1.8.12.2 jdolecek * (round(len/2) - 1) == trunc((len - 1) / 2)
900 1.8.12.2 jdolecek * for any value of len we may have
901 1.8.12.2 jdolecek */
902 1.8.12.2 jdolecek
903 1.8.12.2 jdolecek for (start = 0, len = nsegs ; len != 0 ; len = len / 2) {
904 1.8.12.2 jdolecek guess = start + (len / 2); /* try in the middle */
905 1.8.12.2 jdolecek
906 1.8.12.2 jdolecek /* start past our try? */
907 1.8.12.2 jdolecek if (pframe >= segs[guess].start) {
908 1.8.12.2 jdolecek /* was try correct? */
909 1.8.12.2 jdolecek if (pframe < segs[guess].end) {
910 1.8.12.2 jdolecek if (offp)
911 1.8.12.2 jdolecek *offp = pframe - segs[guess].start;
912 1.8.12.2 jdolecek return guess; /* got it */
913 1.8.12.2 jdolecek }
914 1.8.12.2 jdolecek start = guess + 1; /* next time, start here */
915 1.8.12.2 jdolecek len--; /* "adjust" */
916 1.8.12.2 jdolecek } else {
917 1.8.12.2 jdolecek /*
918 1.8.12.2 jdolecek * pframe before try, just reduce length of
919 1.8.12.2 jdolecek * region, done in "for" loop
920 1.8.12.2 jdolecek */
921 1.8.12.2 jdolecek }
922 1.8.12.2 jdolecek }
923 1.8.12.2 jdolecek return(-1);
924 1.8.12.2 jdolecek }
925 1.8.12.2 jdolecek
926 1.8.12.2 jdolecek #else
927 1.8.12.2 jdolecek
928 1.8.12.2 jdolecek static inline int
929 1.8.12.2 jdolecek vm_physseg_find_linear(struct uvm_physseg *segs, int nsegs, paddr_t pframe, psize_t *offp)
930 1.8.12.2 jdolecek {
931 1.8.12.2 jdolecek /* linear search for it */
932 1.8.12.2 jdolecek int lcv;
933 1.8.12.2 jdolecek
934 1.8.12.2 jdolecek for (lcv = 0; lcv < nsegs; lcv++) {
935 1.8.12.2 jdolecek if (pframe >= segs[lcv].start &&
936 1.8.12.2 jdolecek pframe < segs[lcv].end) {
937 1.8.12.2 jdolecek if (offp)
938 1.8.12.2 jdolecek *offp = pframe - segs[lcv].start;
939 1.8.12.2 jdolecek return(lcv); /* got it */
940 1.8.12.2 jdolecek }
941 1.8.12.2 jdolecek }
942 1.8.12.2 jdolecek return(-1);
943 1.8.12.2 jdolecek }
944 1.8.12.2 jdolecek #endif
945 1.8.12.2 jdolecek #endif /* UVM_HOTPLUG */
946 1.8.12.2 jdolecek
947 1.8.12.2 jdolecek bool
948 1.8.12.2 jdolecek uvm_physseg_valid_p(uvm_physseg_t upm)
949 1.8.12.2 jdolecek {
950 1.8.12.2 jdolecek struct uvm_physseg *ps;
951 1.8.12.2 jdolecek
952 1.8.12.2 jdolecek if (upm == UVM_PHYSSEG_TYPE_INVALID ||
953 1.8.12.2 jdolecek upm == UVM_PHYSSEG_TYPE_INVALID_EMPTY ||
954 1.8.12.2 jdolecek upm == UVM_PHYSSEG_TYPE_INVALID_OVERFLOW)
955 1.8.12.2 jdolecek return false;
956 1.8.12.2 jdolecek
957 1.8.12.2 jdolecek /*
958 1.8.12.2 jdolecek * This is the delicate init dance -
959 1.8.12.2 jdolecek * needs to go with the dance.
960 1.8.12.2 jdolecek */
961 1.8.12.2 jdolecek if (uvm.page_init_done != true)
962 1.8.12.2 jdolecek return true;
963 1.8.12.2 jdolecek
964 1.8.12.2 jdolecek ps = HANDLE_TO_PHYSSEG_NODE(upm);
965 1.8.12.2 jdolecek
966 1.8.12.2 jdolecek /* Extra checks needed only post uvm_page_init() */
967 1.8.12.2 jdolecek if (ps->pgs == NULL)
968 1.8.12.2 jdolecek return false;
969 1.8.12.2 jdolecek
970 1.8.12.2 jdolecek /* XXX: etc. */
971 1.8.12.2 jdolecek
972 1.8.12.2 jdolecek return true;
973 1.8.12.2 jdolecek
974 1.8.12.2 jdolecek }
975 1.8.12.2 jdolecek
976 1.8.12.2 jdolecek /*
977 1.8.12.2 jdolecek * Boot protocol dictates that these must be able to return partially
978 1.8.12.2 jdolecek * initialised segments.
979 1.8.12.2 jdolecek */
980 1.8.12.2 jdolecek paddr_t
981 1.8.12.2 jdolecek uvm_physseg_get_start(uvm_physseg_t upm)
982 1.8.12.2 jdolecek {
983 1.8.12.2 jdolecek if (uvm_physseg_valid_p(upm) == false)
984 1.8.12.2 jdolecek return (paddr_t) -1;
985 1.8.12.2 jdolecek
986 1.8.12.2 jdolecek return HANDLE_TO_PHYSSEG_NODE(upm)->start;
987 1.8.12.2 jdolecek }
988 1.8.12.2 jdolecek
989 1.8.12.2 jdolecek paddr_t
990 1.8.12.2 jdolecek uvm_physseg_get_end(uvm_physseg_t upm)
991 1.8.12.2 jdolecek {
992 1.8.12.2 jdolecek if (uvm_physseg_valid_p(upm) == false)
993 1.8.12.2 jdolecek return (paddr_t) -1;
994 1.8.12.2 jdolecek
995 1.8.12.2 jdolecek return HANDLE_TO_PHYSSEG_NODE(upm)->end;
996 1.8.12.2 jdolecek }
997 1.8.12.2 jdolecek
998 1.8.12.2 jdolecek paddr_t
999 1.8.12.2 jdolecek uvm_physseg_get_avail_start(uvm_physseg_t upm)
1000 1.8.12.2 jdolecek {
1001 1.8.12.2 jdolecek if (uvm_physseg_valid_p(upm) == false)
1002 1.8.12.2 jdolecek return (paddr_t) -1;
1003 1.8.12.2 jdolecek
1004 1.8.12.2 jdolecek return HANDLE_TO_PHYSSEG_NODE(upm)->avail_start;
1005 1.8.12.2 jdolecek }
1006 1.8.12.2 jdolecek
1007 1.8.12.2 jdolecek #if defined(UVM_PHYSSEG_LEGACY)
1008 1.8.12.2 jdolecek void
1009 1.8.12.2 jdolecek uvm_physseg_set_avail_start(uvm_physseg_t upm, paddr_t avail_start)
1010 1.8.12.2 jdolecek {
1011 1.8.12.2 jdolecek struct uvm_physseg *ps = HANDLE_TO_PHYSSEG_NODE(upm);
1012 1.8.12.2 jdolecek
1013 1.8.12.2 jdolecek #if defined(DIAGNOSTIC)
1014 1.8.12.2 jdolecek paddr_t avail_end;
1015 1.8.12.2 jdolecek avail_end = uvm_physseg_get_avail_end(upm);
1016 1.8.12.2 jdolecek KASSERT(uvm_physseg_valid_p(upm));
1017 1.8.12.2 jdolecek KASSERT(avail_start < avail_end && avail_start >= ps->start);
1018 1.8.12.2 jdolecek #endif
1019 1.8.12.2 jdolecek
1020 1.8.12.2 jdolecek ps->avail_start = avail_start;
1021 1.8.12.2 jdolecek }
1022 1.8.12.2 jdolecek void uvm_physseg_set_avail_end(uvm_physseg_t upm, paddr_t avail_end)
1023 1.8.12.2 jdolecek {
1024 1.8.12.2 jdolecek struct uvm_physseg *ps = HANDLE_TO_PHYSSEG_NODE(upm);
1025 1.8.12.2 jdolecek
1026 1.8.12.2 jdolecek #if defined(DIAGNOSTIC)
1027 1.8.12.2 jdolecek paddr_t avail_start;
1028 1.8.12.2 jdolecek avail_start = uvm_physseg_get_avail_start(upm);
1029 1.8.12.2 jdolecek KASSERT(uvm_physseg_valid_p(upm));
1030 1.8.12.2 jdolecek KASSERT(avail_end > avail_start && avail_end <= ps->end);
1031 1.8.12.2 jdolecek #endif
1032 1.8.12.2 jdolecek
1033 1.8.12.2 jdolecek ps->avail_end = avail_end;
1034 1.8.12.2 jdolecek }
1035 1.8.12.2 jdolecek
1036 1.8.12.2 jdolecek #endif /* UVM_PHYSSEG_LEGACY */
1037 1.8.12.2 jdolecek
1038 1.8.12.2 jdolecek paddr_t
1039 1.8.12.2 jdolecek uvm_physseg_get_avail_end(uvm_physseg_t upm)
1040 1.8.12.2 jdolecek {
1041 1.8.12.2 jdolecek if (uvm_physseg_valid_p(upm) == false)
1042 1.8.12.2 jdolecek return (paddr_t) -1;
1043 1.8.12.2 jdolecek
1044 1.8.12.2 jdolecek return HANDLE_TO_PHYSSEG_NODE(upm)->avail_end;
1045 1.8.12.2 jdolecek }
1046 1.8.12.2 jdolecek
1047 1.8.12.2 jdolecek struct vm_page *
1048 1.8.12.2 jdolecek uvm_physseg_get_pg(uvm_physseg_t upm, paddr_t idx)
1049 1.8.12.2 jdolecek {
1050 1.8.12.2 jdolecek KASSERT(uvm_physseg_valid_p(upm));
1051 1.8.12.2 jdolecek return &HANDLE_TO_PHYSSEG_NODE(upm)->pgs[idx];
1052 1.8.12.2 jdolecek }
1053 1.8.12.2 jdolecek
1054 1.8.12.2 jdolecek #ifdef __HAVE_PMAP_PHYSSEG
1055 1.8.12.2 jdolecek struct pmap_physseg *
1056 1.8.12.2 jdolecek uvm_physseg_get_pmseg(uvm_physseg_t upm)
1057 1.8.12.2 jdolecek {
1058 1.8.12.2 jdolecek KASSERT(uvm_physseg_valid_p(upm));
1059 1.8.12.2 jdolecek return &(HANDLE_TO_PHYSSEG_NODE(upm)->pmseg);
1060 1.8.12.2 jdolecek }
1061 1.8.12.2 jdolecek #endif
1062 1.8.12.2 jdolecek
1063 1.8.12.2 jdolecek int
1064 1.8.12.2 jdolecek uvm_physseg_get_free_list(uvm_physseg_t upm)
1065 1.8.12.2 jdolecek {
1066 1.8.12.2 jdolecek KASSERT(uvm_physseg_valid_p(upm));
1067 1.8.12.2 jdolecek return HANDLE_TO_PHYSSEG_NODE(upm)->free_list;
1068 1.8.12.2 jdolecek }
1069 1.8.12.2 jdolecek
1070 1.8.12.2 jdolecek u_int
1071 1.8.12.2 jdolecek uvm_physseg_get_start_hint(uvm_physseg_t upm)
1072 1.8.12.2 jdolecek {
1073 1.8.12.2 jdolecek KASSERT(uvm_physseg_valid_p(upm));
1074 1.8.12.2 jdolecek return HANDLE_TO_PHYSSEG_NODE(upm)->start_hint;
1075 1.8.12.2 jdolecek }
1076 1.8.12.2 jdolecek
1077 1.8.12.2 jdolecek bool
1078 1.8.12.2 jdolecek uvm_physseg_set_start_hint(uvm_physseg_t upm, u_int start_hint)
1079 1.8.12.2 jdolecek {
1080 1.8.12.2 jdolecek if (uvm_physseg_valid_p(upm) == false)
1081 1.8.12.2 jdolecek return false;
1082 1.8.12.2 jdolecek
1083 1.8.12.2 jdolecek HANDLE_TO_PHYSSEG_NODE(upm)->start_hint = start_hint;
1084 1.8.12.2 jdolecek return true;
1085 1.8.12.2 jdolecek }
1086 1.8.12.2 jdolecek
1087 1.8.12.2 jdolecek void
1088 1.8.12.2 jdolecek uvm_physseg_init_seg(uvm_physseg_t upm, struct vm_page *pgs)
1089 1.8.12.2 jdolecek {
1090 1.8.12.2 jdolecek psize_t i;
1091 1.8.12.2 jdolecek psize_t n;
1092 1.8.12.2 jdolecek paddr_t paddr;
1093 1.8.12.2 jdolecek struct uvm_physseg *seg;
1094 1.8.12.2 jdolecek
1095 1.8.12.2 jdolecek KASSERT(upm != UVM_PHYSSEG_TYPE_INVALID && pgs != NULL);
1096 1.8.12.2 jdolecek
1097 1.8.12.2 jdolecek seg = HANDLE_TO_PHYSSEG_NODE(upm);
1098 1.8.12.2 jdolecek KASSERT(seg != NULL);
1099 1.8.12.2 jdolecek KASSERT(seg->pgs == NULL);
1100 1.8.12.2 jdolecek
1101 1.8.12.2 jdolecek n = seg->end - seg->start;
1102 1.8.12.2 jdolecek seg->pgs = pgs;
1103 1.8.12.2 jdolecek
1104 1.8.12.2 jdolecek /* init and free vm_pages (we've already zeroed them) */
1105 1.8.12.2 jdolecek paddr = ctob(seg->start);
1106 1.8.12.2 jdolecek for (i = 0 ; i < n ; i++, paddr += PAGE_SIZE) {
1107 1.8.12.2 jdolecek seg->pgs[i].phys_addr = paddr;
1108 1.8.12.2 jdolecek #ifdef __HAVE_VM_PAGE_MD
1109 1.8.12.2 jdolecek VM_MDPAGE_INIT(&seg->pgs[i]);
1110 1.8.12.2 jdolecek #endif
1111 1.8.12.2 jdolecek if (atop(paddr) >= seg->avail_start &&
1112 1.8.12.2 jdolecek atop(paddr) < seg->avail_end) {
1113 1.8.12.2 jdolecek uvmexp.npages++;
1114 1.8.12.2 jdolecek mutex_enter(&uvm_pageqlock);
1115 1.8.12.2 jdolecek /* add page to free pool */
1116 1.8.12.2 jdolecek uvm_pagefree(&seg->pgs[i]);
1117 1.8.12.2 jdolecek mutex_exit(&uvm_pageqlock);
1118 1.8.12.2 jdolecek }
1119 1.8.12.2 jdolecek }
1120 1.8.12.2 jdolecek }
1121 1.8.12.2 jdolecek
1122 1.8.12.2 jdolecek void
1123 1.8.12.2 jdolecek uvm_physseg_seg_chomp_slab(uvm_physseg_t upm, struct vm_page *pgs, size_t n)
1124 1.8.12.2 jdolecek {
1125 1.8.12.2 jdolecek struct uvm_physseg *seg = HANDLE_TO_PHYSSEG_NODE(upm);
1126 1.8.12.2 jdolecek
1127 1.8.12.2 jdolecek /* max number of pre-boot unplug()s allowed */
1128 1.8.12.2 jdolecek #define UVM_PHYSSEG_BOOT_UNPLUG_MAX VM_PHYSSEG_MAX
1129 1.8.12.2 jdolecek
1130 1.8.12.2 jdolecek static char btslab_ex_storage[EXTENT_FIXED_STORAGE_SIZE(UVM_PHYSSEG_BOOT_UNPLUG_MAX)];
1131 1.8.12.2 jdolecek
1132 1.8.12.2 jdolecek if (__predict_false(uvm.page_init_done == false)) {
1133 1.8.12.2 jdolecek seg->ext = extent_create("Boot time slab", (u_long) pgs, (u_long) (pgs + n),
1134 1.8.12.2 jdolecek (void *)btslab_ex_storage, sizeof(btslab_ex_storage), 0);
1135 1.8.12.2 jdolecek } else {
1136 1.8.12.2 jdolecek seg->ext = extent_create("Hotplug slab", (u_long) pgs, (u_long) (pgs + n), NULL, 0, 0);
1137 1.8.12.2 jdolecek }
1138 1.8.12.2 jdolecek
1139 1.8.12.2 jdolecek KASSERT(seg->ext != NULL);
1140 1.8.12.2 jdolecek
1141 1.8.12.2 jdolecek }
1142 1.8.12.2 jdolecek
1143 1.8.12.2 jdolecek struct vm_page *
1144 1.8.12.2 jdolecek uvm_physseg_seg_alloc_from_slab(uvm_physseg_t upm, size_t pages)
1145 1.8.12.2 jdolecek {
1146 1.8.12.2 jdolecek int err;
1147 1.8.12.2 jdolecek struct uvm_physseg *seg;
1148 1.8.12.2 jdolecek struct vm_page *pgs = NULL;
1149 1.8.12.2 jdolecek
1150 1.8.12.2 jdolecek seg = HANDLE_TO_PHYSSEG_NODE(upm);
1151 1.8.12.2 jdolecek
1152 1.8.12.2 jdolecek KASSERT(pages > 0);
1153 1.8.12.2 jdolecek
1154 1.8.12.2 jdolecek if (__predict_false(seg->ext == NULL)) {
1155 1.8.12.2 jdolecek /*
1156 1.8.12.2 jdolecek * This is a situation unique to boot time.
1157 1.8.12.2 jdolecek * It shouldn't happen at any point other than from
1158 1.8.12.2 jdolecek * the first uvm_page.c:uvm_page_init() call
1159 1.8.12.2 jdolecek * Since we're in a loop, we can get away with the
1160 1.8.12.2 jdolecek * below.
1161 1.8.12.2 jdolecek */
1162 1.8.12.2 jdolecek KASSERT(uvm.page_init_done != true);
1163 1.8.12.2 jdolecek
1164 1.8.12.2 jdolecek seg->ext = HANDLE_TO_PHYSSEG_NODE(uvm_physseg_get_prev(upm))->ext;
1165 1.8.12.2 jdolecek
1166 1.8.12.2 jdolecek KASSERT(seg->ext != NULL);
1167 1.8.12.2 jdolecek }
1168 1.8.12.2 jdolecek
1169 1.8.12.2 jdolecek /* We allocate enough for this segment */
1170 1.8.12.2 jdolecek err = extent_alloc(seg->ext, sizeof(*pgs) * pages, 1, 0, EX_BOUNDZERO, (u_long *)&pgs);
1171 1.8.12.2 jdolecek
1172 1.8.12.2 jdolecek if (err != 0) {
1173 1.8.12.2 jdolecek #ifdef DEBUG
1174 1.8.12.2 jdolecek printf("%s: extent_alloc failed with error: %d \n",
1175 1.8.12.2 jdolecek __func__, err);
1176 1.8.12.2 jdolecek #endif
1177 1.8.12.2 jdolecek }
1178 1.8.12.2 jdolecek
1179 1.8.12.2 jdolecek return pgs;
1180 1.8.12.2 jdolecek }
1181 1.8.12.2 jdolecek
1182 1.8.12.2 jdolecek /*
1183 1.8.12.2 jdolecek * uvm_page_physload: load physical memory into VM system
1184 1.8.12.2 jdolecek *
1185 1.8.12.2 jdolecek * => all args are PFs
1186 1.8.12.2 jdolecek * => all pages in start/end get vm_page structures
1187 1.8.12.2 jdolecek * => areas marked by avail_start/avail_end get added to the free page pool
1188 1.8.12.2 jdolecek * => we are limited to VM_PHYSSEG_MAX physical memory segments
1189 1.8.12.2 jdolecek */
1190 1.8.12.2 jdolecek
1191 1.8.12.2 jdolecek uvm_physseg_t
1192 1.8.12.2 jdolecek uvm_page_physload(paddr_t start, paddr_t end, paddr_t avail_start,
1193 1.8.12.2 jdolecek paddr_t avail_end, int free_list)
1194 1.8.12.2 jdolecek {
1195 1.8.12.2 jdolecek struct uvm_physseg *ps;
1196 1.8.12.2 jdolecek uvm_physseg_t upm;
1197 1.8.12.2 jdolecek
1198 1.8.12.2 jdolecek if (__predict_true(uvm.page_init_done == true))
1199 1.8.12.2 jdolecek panic("%s: unload attempted after uvm_page_init()\n", __func__);
1200 1.8.12.2 jdolecek if (uvmexp.pagesize == 0)
1201 1.8.12.2 jdolecek panic("uvm_page_physload: page size not set!");
1202 1.8.12.2 jdolecek if (free_list >= VM_NFREELIST || free_list < VM_FREELIST_DEFAULT)
1203 1.8.12.2 jdolecek panic("uvm_page_physload: bad free list %d", free_list);
1204 1.8.12.2 jdolecek if (start >= end)
1205 1.8.12.2 jdolecek panic("uvm_page_physload: start >= end");
1206 1.8.12.2 jdolecek
1207 1.8.12.2 jdolecek if (uvm_physseg_plug(start, end - start, &upm) == false) {
1208 1.8.12.2 jdolecek panic("uvm_physseg_plug() failed at boot.");
1209 1.8.12.2 jdolecek /* NOTREACHED */
1210 1.8.12.2 jdolecek return UVM_PHYSSEG_TYPE_INVALID; /* XXX: correct type */
1211 1.8.12.2 jdolecek }
1212 1.8.12.2 jdolecek
1213 1.8.12.2 jdolecek ps = HANDLE_TO_PHYSSEG_NODE(upm);
1214 1.8.12.2 jdolecek
1215 1.8.12.2 jdolecek /* Legacy */
1216 1.8.12.2 jdolecek ps->avail_start = avail_start;
1217 1.8.12.2 jdolecek ps->avail_end = avail_end;
1218 1.8.12.2 jdolecek
1219 1.8.12.2 jdolecek ps->free_list = free_list; /* XXX: */
1220 1.8.12.2 jdolecek
1221 1.8.12.2 jdolecek
1222 1.8.12.2 jdolecek return upm;
1223 1.8.12.2 jdolecek }
1224 1.8.12.2 jdolecek
1225 1.8.12.2 jdolecek bool
1226 1.8.12.2 jdolecek uvm_physseg_unplug(paddr_t pfn, size_t pages)
1227 1.8.12.2 jdolecek {
1228 1.8.12.2 jdolecek uvm_physseg_t upm;
1229 1.8.12.2 jdolecek paddr_t off = 0, start __diagused, end;
1230 1.8.12.2 jdolecek struct uvm_physseg *seg;
1231 1.8.12.2 jdolecek
1232 1.8.12.2 jdolecek upm = uvm_physseg_find(pfn, &off);
1233 1.8.12.2 jdolecek
1234 1.8.12.2 jdolecek if (!uvm_physseg_valid_p(upm)) {
1235 1.8.12.2 jdolecek printf("%s: Tried to unplug from unknown offset\n", __func__);
1236 1.8.12.2 jdolecek return false;
1237 1.8.12.2 jdolecek }
1238 1.8.12.2 jdolecek
1239 1.8.12.2 jdolecek seg = HANDLE_TO_PHYSSEG_NODE(upm);
1240 1.8.12.2 jdolecek
1241 1.8.12.2 jdolecek start = uvm_physseg_get_start(upm);
1242 1.8.12.2 jdolecek end = uvm_physseg_get_end(upm);
1243 1.8.12.2 jdolecek
1244 1.8.12.2 jdolecek if (end < (pfn + pages)) {
1245 1.8.12.2 jdolecek printf("%s: Tried to unplug oversized span \n", __func__);
1246 1.8.12.2 jdolecek return false;
1247 1.8.12.2 jdolecek }
1248 1.8.12.2 jdolecek
1249 1.8.12.2 jdolecek KASSERT(pfn == start + off); /* sanity */
1250 1.8.12.2 jdolecek
1251 1.8.12.2 jdolecek if (__predict_true(uvm.page_init_done == true)) {
1252 1.8.12.2 jdolecek /* XXX: KASSERT() that seg->pgs[] are not on any uvm lists */
1253 1.8.12.2 jdolecek if (extent_free(seg->ext, (u_long)(seg->pgs + off), sizeof(struct vm_page) * pages, EX_MALLOCOK | EX_NOWAIT) != 0)
1254 1.8.12.2 jdolecek return false;
1255 1.8.12.2 jdolecek }
1256 1.8.12.2 jdolecek
1257 1.8.12.2 jdolecek if (off == 0 && (pfn + pages) == end) {
1258 1.8.12.2 jdolecek #if defined(UVM_HOTPLUG) /* rbtree implementation */
1259 1.8.12.2 jdolecek int segcount = 0;
1260 1.8.12.2 jdolecek struct uvm_physseg *current_ps;
1261 1.8.12.2 jdolecek /* Complete segment */
1262 1.8.12.2 jdolecek if (uvm_physseg_graph.nentries == 1)
1263 1.8.12.2 jdolecek panic("%s: out of memory!", __func__);
1264 1.8.12.2 jdolecek
1265 1.8.12.2 jdolecek if (__predict_true(uvm.page_init_done == true)) {
1266 1.8.12.2 jdolecek RB_TREE_FOREACH(current_ps, &(uvm_physseg_graph.rb_tree)) {
1267 1.8.12.2 jdolecek if (seg->ext == current_ps->ext)
1268 1.8.12.2 jdolecek segcount++;
1269 1.8.12.2 jdolecek }
1270 1.8.12.2 jdolecek KASSERT(segcount > 0);
1271 1.8.12.2 jdolecek
1272 1.8.12.2 jdolecek if (segcount == 1) {
1273 1.8.12.2 jdolecek extent_destroy(seg->ext);
1274 1.8.12.2 jdolecek }
1275 1.8.12.2 jdolecek
1276 1.8.12.2 jdolecek /*
1277 1.8.12.2 jdolecek * We assume that the unplug will succeed from
1278 1.8.12.2 jdolecek * this point onwards
1279 1.8.12.2 jdolecek */
1280 1.8.12.2 jdolecek uvmexp.npages -= (int) pages;
1281 1.8.12.2 jdolecek }
1282 1.8.12.2 jdolecek
1283 1.8.12.2 jdolecek rb_tree_remove_node(&(uvm_physseg_graph.rb_tree), upm);
1284 1.8.12.2 jdolecek memset(seg, 0, sizeof(struct uvm_physseg));
1285 1.8.12.2 jdolecek uvm_physseg_free(seg, sizeof(struct uvm_physseg));
1286 1.8.12.2 jdolecek uvm_physseg_graph.nentries--;
1287 1.8.12.2 jdolecek #else /* UVM_HOTPLUG */
1288 1.8.12.2 jdolecek int x;
1289 1.8.12.2 jdolecek if (vm_nphysmem == 1)
1290 1.8.12.2 jdolecek panic("uvm_page_physget: out of memory!");
1291 1.8.12.2 jdolecek vm_nphysmem--;
1292 1.8.12.2 jdolecek for (x = upm ; x < vm_nphysmem ; x++)
1293 1.8.12.2 jdolecek /* structure copy */
1294 1.8.12.2 jdolecek VM_PHYSMEM_PTR_SWAP(x, x + 1);
1295 1.8.12.2 jdolecek #endif /* UVM_HOTPLUG */
1296 1.8.12.2 jdolecek /* XXX: KASSERT() that seg->pgs[] are not on any uvm lists */
1297 1.8.12.2 jdolecek return true;
1298 1.8.12.2 jdolecek }
1299 1.8.12.2 jdolecek
1300 1.8.12.2 jdolecek if (off > 0 &&
1301 1.8.12.2 jdolecek (pfn + pages) < end) {
1302 1.8.12.2 jdolecek #if defined(UVM_HOTPLUG) /* rbtree implementation */
1303 1.8.12.2 jdolecek /* middle chunk - need a new segment */
1304 1.8.12.2 jdolecek struct uvm_physseg *ps, *current_ps;
1305 1.8.12.2 jdolecek ps = uvm_physseg_alloc(sizeof (struct uvm_physseg));
1306 1.8.12.2 jdolecek if (ps == NULL) {
1307 1.8.12.2 jdolecek printf("%s: Unable to allocated new fragment vm_physseg \n",
1308 1.8.12.2 jdolecek __func__);
1309 1.8.12.2 jdolecek return false;
1310 1.8.12.2 jdolecek }
1311 1.8.12.2 jdolecek
1312 1.8.12.2 jdolecek /* Remove middle chunk */
1313 1.8.12.2 jdolecek if (__predict_true(uvm.page_init_done == true)) {
1314 1.8.12.2 jdolecek KASSERT(seg->ext != NULL);
1315 1.8.12.2 jdolecek ps->ext = seg->ext;
1316 1.8.12.2 jdolecek
1317 1.8.12.2 jdolecek /* XXX: KASSERT() that seg->pgs[] are not on any uvm lists */
1318 1.8.12.2 jdolecek /*
1319 1.8.12.2 jdolecek * We assume that the unplug will succeed from
1320 1.8.12.2 jdolecek * this point onwards
1321 1.8.12.2 jdolecek */
1322 1.8.12.2 jdolecek uvmexp.npages -= (int) pages;
1323 1.8.12.2 jdolecek }
1324 1.8.12.2 jdolecek
1325 1.8.12.2 jdolecek ps->start = pfn + pages;
1326 1.8.12.2 jdolecek ps->avail_start = ps->start; /* XXX: Legacy */
1327 1.8.12.2 jdolecek
1328 1.8.12.2 jdolecek ps->end = seg->end;
1329 1.8.12.2 jdolecek ps->avail_end = ps->end; /* XXX: Legacy */
1330 1.8.12.2 jdolecek
1331 1.8.12.2 jdolecek seg->end = pfn;
1332 1.8.12.2 jdolecek seg->avail_end = seg->end; /* XXX: Legacy */
1333 1.8.12.2 jdolecek
1334 1.8.12.2 jdolecek
1335 1.8.12.2 jdolecek /*
1336 1.8.12.2 jdolecek * The new pgs array points to the beginning of the
1337 1.8.12.2 jdolecek * tail fragment.
1338 1.8.12.2 jdolecek */
1339 1.8.12.2 jdolecek if (__predict_true(uvm.page_init_done == true))
1340 1.8.12.2 jdolecek ps->pgs = seg->pgs + off + pages;
1341 1.8.12.2 jdolecek
1342 1.8.12.2 jdolecek current_ps = rb_tree_insert_node(&(uvm_physseg_graph.rb_tree), ps);
1343 1.8.12.2 jdolecek if (current_ps != ps) {
1344 1.8.12.2 jdolecek panic("uvm_page_physload: Duplicate address range detected!");
1345 1.8.12.2 jdolecek }
1346 1.8.12.2 jdolecek uvm_physseg_graph.nentries++;
1347 1.8.12.2 jdolecek #else /* UVM_HOTPLUG */
1348 1.8.12.2 jdolecek panic("%s: can't unplug() from the middle of a segment without"
1349 1.8.12.2 jdolecek " UVM_HOTPLUG\n", __func__);
1350 1.8.12.2 jdolecek /* NOTREACHED */
1351 1.8.12.2 jdolecek #endif /* UVM_HOTPLUG */
1352 1.8.12.2 jdolecek return true;
1353 1.8.12.2 jdolecek }
1354 1.8.12.2 jdolecek
1355 1.8.12.2 jdolecek if (off == 0 && (pfn + pages) < end) {
1356 1.8.12.2 jdolecek /* Remove front chunk */
1357 1.8.12.2 jdolecek if (__predict_true(uvm.page_init_done == true)) {
1358 1.8.12.2 jdolecek /* XXX: KASSERT() that seg->pgs[] are not on any uvm lists */
1359 1.8.12.2 jdolecek /*
1360 1.8.12.2 jdolecek * We assume that the unplug will succeed from
1361 1.8.12.2 jdolecek * this point onwards
1362 1.8.12.2 jdolecek */
1363 1.8.12.2 jdolecek uvmexp.npages -= (int) pages;
1364 1.8.12.2 jdolecek }
1365 1.8.12.2 jdolecek
1366 1.8.12.2 jdolecek /* Truncate */
1367 1.8.12.2 jdolecek seg->start = pfn + pages;
1368 1.8.12.2 jdolecek seg->avail_start = seg->start; /* XXX: Legacy */
1369 1.8.12.2 jdolecek
1370 1.8.12.2 jdolecek /*
1371 1.8.12.2 jdolecek * Move the pgs array start to the beginning of the
1372 1.8.12.2 jdolecek * tail end.
1373 1.8.12.2 jdolecek */
1374 1.8.12.2 jdolecek if (__predict_true(uvm.page_init_done == true))
1375 1.8.12.2 jdolecek seg->pgs += pages;
1376 1.8.12.2 jdolecek
1377 1.8.12.2 jdolecek return true;
1378 1.8.12.2 jdolecek }
1379 1.8.12.2 jdolecek
1380 1.8.12.2 jdolecek if (off > 0 && (pfn + pages) == end) {
1381 1.8.12.2 jdolecek /* back chunk */
1382 1.8.12.2 jdolecek
1383 1.8.12.2 jdolecek
1384 1.8.12.2 jdolecek /* Truncate! */
1385 1.8.12.2 jdolecek seg->end = pfn;
1386 1.8.12.2 jdolecek seg->avail_end = seg->end; /* XXX: Legacy */
1387 1.8.12.2 jdolecek
1388 1.8.12.2 jdolecek uvmexp.npages -= (int) pages;
1389 1.8.12.2 jdolecek
1390 1.8.12.2 jdolecek return true;
1391 1.8.12.2 jdolecek }
1392 1.8.12.2 jdolecek
1393 1.8.12.2 jdolecek printf("%s: Tried to unplug unknown range \n", __func__);
1394 1.8.12.2 jdolecek
1395 1.8.12.2 jdolecek return false;
1396 1.8.12.2 jdolecek }
1397