subr_disk.c revision 1.85.2.4 1 1.85.2.4 ad e -1:
2 1.85.2.4 ad goto wraparound;
3 1.85.2.4 ad }
4 1.85.2.4 ad if (tmp->ownspace >= length)
5 1.85.2.4 ad goto listsearch;
6 1.85.2.4 ad }
7 1.85.2.4 ad if (prev == NULL)
8 1.85.2.4 ad goto notfound;
9 1.14 thorpej
10 1.85.2.4 ad if (topdown) {
11 1.85.2.4 ad KASSERT(orig_hint >= prev->next->start - length ||
12 1.85.2.4 ad prev->next->start - length > prev->next->start);
13 1.85.2.4 ad hint = prev->next->start - length;
14 1.85.2.4 ad } else {
15 1.85.2.4 ad KASSERT(orig_hint <= prev->end);
16 1.85.2.4 ad hint = prev->end;
17 1.85.2.4 ad }
18 1.85.2.4 ad switch (uvm_map_space_avail(&hint, length, uoffset, align,
19 1.85.2.4 ad topdown, prev)) {
20 1.85.2.4 ad case 1:
21 1.85.2.4 ad entry = prev;
22 1.85.2.4 ad goto found;
23 1.85.2.4 ad case -1:
24 1.85.2.4 ad goto wraparound;
25 1.85.2.4 ad }
26 1.85.2.4 ad if (prev->ownspace >= length)
27 1.85.2.4 ad goto listsearch;
28 1.84 martin
29 1.85.2.4 ad if (topdown)
30 1.85.2.4 ad tmp = RB_LEFT(prev, rb_entry);
31 1.85.2.4 ad else
32 1.85.2.4 ad tmp = RB_RIGHT(prev, rb_entry);
33 1.85.2.4 ad for (;;) {
34 1.85.2.4 ad KASSERT(tmp && tmp->space >= length);
35 1.85.2.4 ad if (topdown)
36 1.85.2.4 ad child = RB_RIGHT(tmp, rb_entry);
37 1.85.2.4 ad else
38 1.85.2.4 ad child = RB_LEFT(tmp, rb_entry);
39 1.85.2.4 ad if (child && child->space >= length) {
40 1.85.2.4 ad tmp = child;
41 1.85.2.4 ad continue;
42 1.85.2.4 ad }
43 1.85.2.4 ad if (tmp->ownspace >= length)
44 1.85.2.4 ad break;
45 1.85.2.4 ad if (topdown)
46 1.85.2.4 ad tmp = RB_LEFT(tmp, rb_entry);
47 1.85.2.4 ad else
48 1.85.2.4 ad tmp = RB_RIGHT(tmp, rb_entry);
49 1.85.2.4 ad }
50 1.11 mycroft
51 1.85.2.4 ad if (topdown) {
52 1.85.2.4 ad KASSERT(orig_hint >= tmp->next->start - length ||
53 1.85.2.4 ad tmp->next->start - length > tmp->next->start);
54 1.85.2.4 ad hint = tmp->next->start - length;
55 1.85.2.4 ad } else {
56 1.85.2.4 ad KASSERT(orig_hint <= tmp->end);
57 1.85.2.4 ad hint = tmp->end;
58 1.85.2.4 ad }
59 1.85.2.4 ad switch (uvm_map_space_avail(&hint, length, uoffset, align,
60 1.85.2.4 ad topdown, tmp)) {
61 1.85.2.4 ad case 1:
62 1.85.2.4 ad entry = tmp;
63 1.85.2.4 ad goto found;
64 1.85.2.4 ad case -1:
65 1.85.2.4 ad goto wraparound;
66 1.85.2.4 ad }
67 1.11 mycroft
68 1.85.2.4 ad /*
69 1.85.2.4 ad * The tree fails to find an entry because of offset or alignment
70 1.85.2.4 ad * restrictions. Search the list instead.
71 1.85.2.4 ad */
72 1.85.2.4 ad listsearch:
73 1.85.2.4 ad /*
74 1.85.2.4 ad * Look through the rest of the map, trying to fit a new region in
75 1.85.2.4 ad * the gap between existing regions, or after the very last region.
76 1.85.2.4 ad * note: entry->end = base VA of current gap,
77 1.85.2.4 ad * entry->next->start = VA of end of current gap
78 1.85.2.4 ad */
79 1.11 mycroft
80 1.85.2.4 ad for (;;) {
81 1.85.2.4 ad /* Update hint for current gap. */
82 1.85.2.4 ad hint = topdown ? entry->next->start - length : entry->end;
83 1.85.2.4 ad
84 1.85.2.4 ad /* See if it fits. */
85 1.85.2.4 ad switch (uvm_map_space_avail(&hint, length, uoffset, align,
86 1.85.2.4 ad topdown, entry)) {
87 1.85.2.4 ad case 1:
88 1.85.2.4 ad goto found;
89 1.85.2.4 ad case -1:
90 1.85.2.4 ad goto wraparound;
91 1.85.2.4 ad }
92 1.11 mycroft
93 1.85.2.4 ad /* Advance to next/previous gap */
94 1.85.2.4 ad if (topdown) {
95 1.85.2.4 ad if (entry == &map->header) {
96 1.85.2.4 ad UVMHIST_LOG(maphist, "<- failed (off start)",
97 1.85.2.4 ad 0,0,0,0);
98 1.85.2.4 ad goto notfound;
99 1.85.2.4 ad }
100 1.85.2.4 ad entry = entry->prev;
101 1.85.2.4 ad } else {
102 1.85.2.4 ad entry = entry->next;
103 1.85.2.4 ad if (entry == &map->header) {
104 1.85.2.4 ad UVMHIST_LOG(maphist, "<- failed (off end)",
105 1.85.2.4 ad 0,0,0,0);
106 1.85.2.4 ad goto notfound;
107 1.85.2.4 ad }
108 1.11 mycroft }
109 1.11 mycroft }
110 1.15 thorpej
111 1.85.2.4 ad found:
112 1.85.2.4 ad SAVE_HINT(map, map->hint, entry);
113 1.85.2.4 ad *result = hint;
114 1.85.2.4 ad UVMHIST_LOG(maphist,"<- got it! (result=0x%x)", hint, 0,0,0);
115 1.85.2.4 ad KASSERT( topdown || hint >= orig_hint);
116 1.85.2.4 ad KASSERT(!topdown || hint <= orig_hint);
117 1.85.2.4 ad KASSERT(entry->end <= hint);
118 1.85.2.4 ad KASSERT(hint + length <= entry->next->start);
119 1.85.2.4 ad return (entry);
120 1.15 thorpej
121 1.85.2.4 ad wraparound:
122 1.85.2.4 ad UVMHIST_LOG(maphist, "<- failed (wrap around)", 0,0,0,0);
123 1.15 thorpej
124 1.76 yamt return (NULL);
125 1.15 thorpej
126 1.85.2.4 ad notfound:
127 1.85.2.4 ad UVMHIST_LOG(maphist, "<- failed (notfound)", 0,0,0,0);
128 1.70 yamt
129 1.85.2.4 ad return (NULL);
130 1.70 yamt }
131 1.70 yamt
132 1.85.2.3 ad /*
133 1.85.2.4 ad * U N M A P - m a i n h e l p e r f u n c t i o n s
134 1.85.2.3 ad */
135 1.85.2.4 ad
136 1.85.2.4 ad /*
137 1.85.2.4 ad * uvm_unmap_remove: remove mappings from a vm_map (from "start" up to "stop")
138 1.85.2.4 ad *
139 1.85.2.4 ad * => caller must check alignment and size
140 1.85.2.4 ad * => map must be locked by caller
141 1.85.2.4 ad * => we return a list of map entries that we've remove from the map
142 1.85.2.4 ad * in "entry_list"
143 1.85.2.4 ad */
144 1.85.2.4 ad
145 1.85.2.3 ad void
146 1.85.2.4 ad uvm_unmap_remove(struct vm_map *map, vaddr_t start, vaddr_t end,
147 1.85.2.4 ad struct vm_map_entry **entry_list /* OUT */,
148 1.85.2.4 ad struct uvm_mapent_reservation *umr, int flags)
149 1.85.2.4 ad {
150 1.85.2.4 ad struct vm_map_entry *entry, *first_entry, *next;
151 1.85.2.4 ad vaddr_t len;
152 1.85.2.4 ad UVMHIST_FUNC("uvm_unmap_remove"); UVMHIST_CALLED(maphist);
153 1.85.2.4 ad
154 1.85.2.4 ad UVMHIST_LOG(maphist,"(map=0x%x, start=0x%x, end=0x%x)",
155 1.85.2.4 ad map, start, end, 0);
156 1.85.2.4 ad VM_MAP_RANGE_CHECK(map, start, end);
157 1.85.2.4 ad
158 1.85.2.4 ad uvm_map_check(map, "unmap_remove entry");
159 1.79 kardel
160 1.15 thorpej /*
161 1.85.2.4 ad * find first entry
162 1.15 thorpej */
163 1.15 thorpej
164 1.85.2.4 ad if (uvm_map_lookup_entry(map, start, &first_entry) == true) {
165 1.85.2.4 ad /* clip and go... */
166 1.85.2.4 ad entry = first_entry;
167 1.85.2.4 ad UVM_MAP_CLIP_START(map, entry, start, umr);
168 1.85.2.4 ad /* critical! prevents stale hint */
169 1.85.2.4 ad SAVE_HINT(map, entry, entry->prev);
170 1.85.2.4 ad } else {
171 1.85.2.4 ad entry = first_entry->next;
172 1.85.2.4 ad }
173 1.15 thorpej
174 1.15 thorpej /*
175 1.85.2.4 ad * Save the free space hint
176 1.15 thorpej */
177 1.15 thorpej
178 1.85.2.4 ad if (map->first_free != &map->header && map->first_free->start >= start)
179 1.85.2.4 ad map->first_free = entry->prev;
180 1.15 thorpej
181 1.15 thorpej /*
182 1.85.2.4 ad * note: we now re-use first_entry for a different task. we remove
183 1.85.2.4 ad * a number of map entries from the map and save them in a linked
184 1.85.2.4 ad * list headed by "first_entry". once we remove them from the map
185 1.85.2.4 ad * the caller should unlock the map and drop the references to the
186 1.85.2.4 ad * backing objects [c.f. uvm_unmap_detach]. the object is to
187 1.85.2.4 ad * separate unmapping from reference dropping. why?
188 1.85.2.4 ad * [1] the map has to be locked for unmapping
189 1.85.2.4 ad * [2] the map need not be locked for reference dropping
190 1.85.2.4 ad * [3] dropping references may trigger pager I/O, and if we hit
191 1.85.2.4 ad * a pager that does synchronous I/O we may have to wait for it.
192 1.85.2.4 ad * [4] we would like all waiting for I/O to occur with maps unlocked
193 1.85.2.4 ad * so that we don't block other threads.
194 1.23 thorpej */
195 1.85.2.4 ad
196 1.85.2.4 ad first_entry = NULL;
197 1.85.2.4 ad *entry_list = NULL;
198 1.23 thorpej
199 1.23 thorpej /*
200 1.85.2.4 ad * break up the area into map entry sized regions and unmap. note
201 1.85.2.4 ad * that all mappings have to be removed before we can even consider
202 1.85.2.4 ad * dropping references to amaps or VM objects (otherwise we could end
203 1.85.2.4 ad * up with a mapping to a page on the free list which would be very bad)
204 1.81 thorpej */
205 1.85.2.4 ad
206 1.85.2.4 ad while ((entry != &map->header) && (entry->start < end)) {
207 1.85.2.4 ad KASSERT((entry->flags & UVM_MAP_FIRST) == 0);
208 1.85.2.4 ad
209 1.85.2.4 ad UVM_MAP_CLIP_END(map, entry, end, umr);
210 1.85.2.4 ad next = entry->next;
211 1.85.2.4 ad len = entry->end - entry->start;
212 1.85.2.4 ad
213 1.85.2.4 ad /*
214 1.85.2.4 ad * unwire before removing addresses from the pmap; otherwise
215 1.85.2.4 ad * unwiring will put the entries back into the pmap (XXX).
216 1.85.2.4 ad */
217 1.85.2.4 ad
218 1.85.2.4 ad if (VM_MAPENT_ISWIRED(entry)) {
219 1.85.2.4 ad uvm_map_entry_unwire(map, entry);
220 1.85.2.4 ad }
221 1.85.2.4 ad if (flags & UVM_FLAG_VAONLY) {
222 1.85.2.4 ad
223 1.85.2.4 ad /* nothing */
224 1.85.2.4 ad
225 1.85.2.4 ad } else if ((map->flags & VM_MAP_PAGEABLE) == 0) {
226 1.85.2.4 ad
227 1.85.2.4 ad /*
228 1.85.2.4 ad * if the map is non-pageable, any pages mapped there
229 1.85.2.4 ad * must be wired and entered with pmap_kenter_pa(),
230 1.85.2.4 ad * and we should free any such pages immediately.
231 1.85.2.4 ad * this is mostly used for kmem_map and mb_map.
232 1.85.2.4 ad */
233 1.85.2.4 ad
234 1.85.2.4 ad if ((entry->flags & UVM_MAP_KMAPENT) == 0) {
235 1.85.2.4 ad uvm_km_pgremove_intrsafe(entry->start,
236 1.85.2.4 ad entry->end);
237 1.85.2.4 ad pmap_kremove(entry->start, len);
238 1.85.2.4 ad }
239 1.85.2.4 ad } else if (UVM_ET_ISOBJ(entry) &&
240 1.85.2.4 ad UVM_OBJ_IS_KERN_OBJECT(entry->object.uvm_obj)) {
241 1.85.2.4 ad KASSERT(vm_map_pmap(map) == pmap_kernel());
242 1.85.2.4 ad
243 1.85.2.4 ad /*
244 1.85.2.4 ad * note: kernel object mappings are currently used in
245 1.85.2.4 ad * two ways:
246 1.85.2.4 ad * [1] "normal" mappings of pages in the kernel object
247 1.85.2.4 ad * [2] uvm_km_valloc'd allocations in which we
248 1.85.2.4 ad * pmap_enter in some non-kernel-object page
249 1.85.2.4 ad * (e.g. vmapbuf).
250 1.85.2.4 ad *
251 1.85.2.4 ad * for case [1], we need to remove the mapping from
252 1.85.2.4 ad * the pmap and then remove the page from the kernel
253 1.85.2.4 ad * object (because, once pages in a kernel object are
254 1.85.2.4 ad * unmapped they are no longer needed, unlike, say,
255 1.85.2.4 ad * a vnode where you might want the data to persist
256 1.85.2.4 ad * until flushed out of a queue).
257 1.85.2.4 ad *
258 1.85.2.4 ad * for case [2], we need to remove the mapping from
259 1.85.2.4 ad * the pmap. there shouldn't be any pages at the
260 1.85.2.4 ad * specified offset in the kernel object [but it
261 1.85.2.4 ad * doesn't hurt to call uvm_km_pgremove just to be
262 1.85.2.4 ad * safe?]
263 1.85.2.4 ad *
264 1.85.2.4 ad * uvm_km_pgremove currently does the following:
265 1.85.2.4 ad * for pages in the kernel object in range:
266 1.85.2.4 ad * - drops the swap slot
267 1.85.2.4 ad * - uvm_pagefree the page
268 1.85.2.4 ad */
269 1.85.2.4 ad
270 1.85.2.4 ad /*
271 1.85.2.4 ad * remove mappings from pmap and drop the pages
272 1.85.2.4 ad * from the object. offsets are always relative
273 1.85.2.4 ad * to vm_map_min(kernel_map).
274 1.85.2.4 ad */
275 1.85.2.4 ad
276 1.85.2.4 ad pmap_remove(pmap_kernel(), entry->start,
277 1.85.2.4 ad entry->start + len);
278 1.85.2.4 ad uvm_km_pgremove(entry->start, entry->end);
279 1.85.2.4 ad
280 1.85.2.4 ad /*
281 1.85.2.4 ad * null out kernel_object reference, we've just
282 1.85.2.4 ad * dropped it
283 1.85.2.4 ad */
284 1.85.2.4 ad
285 1.85.2.4 ad entry->etype &= ~UVM_ET_OBJ;
286 1.85.2.4 ad entry->object.uvm_obj = NULL;
287 1.85.2.4 ad } else if (UVM_ET_ISOBJ(entry) || entry->aref.ar_amap) {
288 1.85.2.4 ad
289 1.85.2.4 ad /*
290 1.85.2.4 ad * remove mappings the standard way.
291 1.85.2.4 ad */
292 1.85.2.4 ad
293 1.85.2.4 ad pmap_remove(map->pmap, entry->start, entry->end);
294 1.85.2.4 ad }
295 1.85.2.4 ad
296 1.85.2.4 ad #if defined(DEBUG)
297 1.85.2.4 ad if ((entry->flags & UVM_MAP_KMAPENT) == 0) {
298 1.85.2.4 ad
299 1.85.2.4 ad /*
300 1.85.2.4 ad * check if there's remaining mapping,
301 1.85.2.4 ad * which is a bug in caller.
302 1.85.2.4 ad */
303 1.85.2.4 ad
304 1.85.2.4 ad vaddr_t va;
305 1.85.2.4 ad for (va = entry->start; va < entry->end;
306 1.85.2.4 ad va += PAGE_SIZE) {
307 1.85.2.4 ad if (pmap_extract(vm_map_pmap(map), va, NULL)) {
308 1.85.2.4 ad panic("uvm_unmap_remove: has mapping");
309 1.85.2.4 ad }
310 1.85.2.4 ad }
311 1.85.2.4 ad
312 1.85.2.4 ad if (VM_MAP_IS_KERNEL(map)) {
313 1.85.2.4 ad uvm_km_check_empty(entry->start, entry->end,
314 1.85.2.4 ad (map->flags & VM_MAP_INTRSAFE) != 0);
315 1.85.2.4 ad }
316 1.85.2.4 ad }
317 1.85.2.4 ad #endif /* defined(DEBUG) */
318 1.85.2.4 ad
319 1.85.2.4 ad /*
320 1.85.2.4 ad * remove entry from map and put it on our list of entries
321 1.85.2.4 ad * that we've nuked. then go to next entry.
322 1.85.2.4 ad */
323 1.85.2.4 ad
324 1.85.2.4 ad UVMHIST_LOG(maphist, " removed map entry 0x%x", entry, 0, 0,0);
325 1.85.2.4 ad
326 1.85.2.4 ad /* critical! prevents stale hint */
327 1.85.2.4 ad SAVE_HINT(map, entry, entry->prev);
328 1.85.2.4 ad
329 1.85.2.4 ad uvm_map_entry_unlink(map, entry);
330 1.85.2.4 ad KASSERT(map->size >= len);
331 1.85.2.4 ad map->size -= len;
332 1.85.2.4 ad entry->prev = NULL;
333 1.85.2.4 ad entry->next = first_entry;
334 1.85.2.4 ad first_entry = entry;
335 1.85.2.4 ad entry = next;
336 1.85.2.4 ad }
337 1.85.2.4 ad if ((map->flags & VM_MAP_DYING) == 0) {
338 1.85.2.4 ad pmap_update(vm_map_pmap(map));
339 1.81 thorpej }
340 1.81 thorpej
341 1.85.2.4 ad uvm_map_check(map, "unmap_remove leave");
342 1.85.2.4 ad
343 1.81 thorpej /*
344 1.85.2.4 ad * now we've cleaned up the map and are ready for the caller to drop
345 1.85.2.4 ad * references to the mapped objects.
346 1.15 thorpej */
347 1.15 thorpej
348 1.85.2.4 ad *entry_list = first_entry;
349 1.85.2.4 ad UVMHIST_LOG(maphist,"<- done!", 0, 0, 0, 0);
350 1.70 yamt
351 1.85.2.4 ad if (map->flags & VM_MAP_WANTVA) {
352 1.85.2.4 ad mutex_enter(&map->misc_lock);
353 1.85.2.4 ad map->flags &= ~VM_MAP_WANTVA;
354 1.85.2.4 ad cv_broadcast(&map->cv);
355 1.85.2.4 ad mutex_exit(&map->misc_lock);
356 1.85.2.4 ad }
357 1.70 yamt }
358 1.70 yamt
359 1.70 yamt /*
360 1.85.2.4 ad * uvm_unmap_detach: drop references in a chain of map entries
361 1.85.2.4 ad *
362 1.85.2.4 ad * => we will free the map entries as we traverse the list.
363 1.15 thorpej */
364 1.85.2.4 ad
365 1.15 thorpej void
366 1.85.2.4 ad uvm_unmap_detach(struct vm_map_entry *first_entry, int flags)
367 1.15 thorpej {
368 1.85.2.4 ad struct vm_map_entry *next_entry;
369 1.85.2.4 ad UVMHIST_FUNC("uvm_unmap_detach"); UVMHIST_CALLED(maphist);
370 1.76 yamt
371 1.85.2.4 ad while (first_entry) {
372 1.85.2.4 ad KASSERT(!VM_MAPENT_ISWIRED(first_entry));
373 1.85.2.4 ad UVMHIST_LOG(maphist,
374 1.85.2.4 ad " detach 0x%x: amap=0x%x, obj=0x%x, submap?=%d",
375 1.85.2.4 ad first_entry, first_entry->aref.ar_amap,
376 1.85.2.4 ad first_entry->object.uvm_obj,
377 1.85.2.4 ad UVM_ET_ISSUBMAP(first_entry));
378 1.15 thorpej
379 1.85.2.4 ad /*
380 1.85.2.4 ad * drop reference to amap, if we've got one
381 1.85.2.4 ad */
382 1.85.2.4 ad
383 1.85.2.4 ad if (first_entry->aref.ar_amap)
384 1.85.2.4 ad uvm_map_unreference_amap(first_entry, flags);
385 1.76 yamt
386 1.85.2.4 ad /*
387 1.85.2.4 ad * drop reference to our backing object, if we've got one
388 1.85.2.4 ad */
389 1.85.2.4 ad
390 1.85.2.4 ad KASSERT(!UVM_ET_ISSUBMAP(first_entry));
391 1.85.2.4 ad if (UVM_ET_ISOBJ(first_entry) &&
392 1.85.2.4 ad first_entry->object.uvm_obj->pgops->pgo_detach) {
393 1.85.2.4 ad (*first_entry->object.uvm_obj->pgops->pgo_detach)
394 1.85.2.4 ad (first_entry->object.uvm_obj);
395 1.85.2.4 ad }
396 1.85.2.4 ad next_entry = first_entry->next;
397 1.85.2.4 ad uvm_mapent_free(first_entry);
398 1.85.2.4 ad first_entry = next_entry;
399 1.85.2.4 ad }
400 1.85.2.4 ad UVMHIST_LOG(maphist, "<- done", 0,0,0,0);
401 1.39 hannken }
402 1.39 hannken
403 1.40 hannken /*
404 1.85.2.4 ad * E X T R A C T I O N F U N C T I O N S
405 1.83 scw */
406 1.83 scw
407 1.83 scw /*
408 1.85.2.4 ad * uvm_map_reserve: reserve space in a vm_map for future use.
409 1.85.2.4 ad *
410 1.85.2.4 ad * => we reserve space in a map by putting a dummy map entry in the
411 1.85.2.4 ad * map (dummy means obj=NULL, amap=NULL, prot=VM_PROT_NONE)
412 1.85.2.4 ad * => map should be unlocked (we will write lock it)
413 1.85.2.4 ad * => we return true if we were able to reserve space
414 1.85.2.4 ad * => XXXCDC: should be inline?
415 1.50 fvdl */
416 1.85.2.4 ad
417 1.50 fvdl int
418 1.85.2.4 ad uvm_map_reserve(struct vm_map *map, vsize_t size,
419 1.85.2.4 ad vaddr_t offset /* hint for pmap_prefer */,
420 1.85.2.4 ad vsize_t align /* alignment hint */,
421 1.85.2.4 ad vaddr_t *raddr /* IN:hint, OUT: reserved VA */,
422 1.85.2.4 ad uvm_flag_t flags /* UVM_FLAG_FIXED or 0 */)
423 1.50 fvdl {
424 1.85.2.4 ad UVMHIST_FUNC("uvm_map_reserve"); UVMHIST_CALLED(maphist);
425 1.50 fvdl
426 1.85.2.4 ad UVMHIST_LOG(maphist, "(map=0x%x, size=0x%x, offset=0x%x,addr=0x%x)",
427 1.85.2.4 ad map,size,offset,raddr);
428 1.50 fvdl
429 1.85.2.4 ad size = round_page(size);
430 1.85.2.4 ad
431 1.85.2.4 ad /*
432 1.85.2.4 ad * reserve some virtual space.
433 1.85.2.4 ad */
434 1.85.2.4 ad
435 1.85.2.4 ad if (uvm_map(map, raddr, size, NULL, offset, 0,
436 1.85.2.4 ad UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
437 1.85.2.4 ad UVM_ADV_RANDOM, UVM_FLAG_NOMERGE|flags)) != 0) {
438 1.85.2.4 ad UVMHIST_LOG(maphist, "<- done (no VM)", 0,0,0,0);
439 1.85.2.4 ad return (false);
440 1.50 fvdl }
441 1.50 fvdl
442 1.85.2.4 ad UVMHIST_LOG(maphist, "<- done (*raddr=0x%x)", *raddr,0,0,0);
443 1.85.2.4 ad return (true);
444 1.11 mycroft }
445 1.82 thorpej
446 1.82 thorpej /*
447 1.85.2.4 ad * uvm_map_replace: replace a reserved (blank) area of memory with
448 1.85.2.4 ad * real mappings.
449 1.85.2.4 ad *
450 1.85.2.4 ad * => caller must WRITE-LOCK the map
451 1.85.2.4 ad * => we return true if replacement was a success
452 1.85.2.4 ad * => we expect the newents chain to have nnewents entrys on it and
453 1.85.2.4 ad * we expect newents->prev to point to the last entry on the list
454 1.85.2.4 ad * => note newents is allowed to be NULL
455 1.83 scw */
456 1.85.2.4 ad
457 1.83 scw int
458 1.85.2.4 ad uvm_map_replace(struct vm_map *map, vaddr_t start, vaddr_t end,
459 1.85.2.4 ad struct vm_map_entry *newents, int nnewents)
460 1.83 scw {
461 1.85.2.4 ad struct vm_map_entry *oldent, *last;
462 1.83 scw
463 1.85.2.4 ad uvm_map_check(map, "map_replace entry");
464 1.83 scw
465 1.85.2.4 ad /*
466 1.85.2.4 ad * first find the blank map entry at the specified address
467 1.85.2.4 ad */
468 1.85.2.1 ad
469 1.85.2.4 ad if (!uvm_map_lookup_entry(map, start, &oldent)) {
470 1.85.2.4 ad return (false);
471 1.85.2.4 ad }
472 1.85.2.1 ad
473 1.85.2.4 ad /*
474 1.85.2.4 ad * check to make sure we have a proper blank entry
475 1.85.2.4 ad */
476 1.85.2.1 ad
477 1.85.2.4 ad if (end < oldent->end && !VM_MAP_USE_KMAPENT(map)) {
478 1.85.2.4 ad UVM_MAP_CLIP_END(map, oldent, end, NULL);
479 1.85.2.4 ad }
480 1.85.2.4 ad if (oldent->start != start || oldent->end != end ||
481 1.85.2.4 ad oldent->object.uvm_obj != NULL || oldent->aref.ar_amap != NULL) {
482 1.85.2.4 ad return (false);
483 1.85.2.4 ad }
484 1.85.2.1 ad
485 1.85.2.4 ad #ifdef DIAGNOSTIC
486 1.85.2.1 ad
487 1.85.2.4 ad /*
488 1.85.2.4 ad * sanity check the newents chain
489 1.85.2.4 ad */
490 1.85.2.1 ad
491 1.85.2.4 ad {
492 1.85.2.4 ad struct vm_map_entry *tmpent = newents;
493 1.85.2.4 ad int nent = 0;
494 1.85.2.4 ad vaddr_t cur = start;
495 1.85.2.4 ad
496 1.85.2.4 ad while (tmpent) {
497 1.85.2.4 ad nent++;
498 1.85.2.4 ad if (tmpent->start < cur)
499 1.85.2.4 ad panic("uvm_map_replace1");
500 1.85.2.4 ad if (tmpent->start > tmpent->end || tmpent->end > end) {
501 1.85.2.4 ad printf("tmpent->start=0x%lx, tmpent->end=0x%lx, end=0x%lx\n",
502 1.85.2.4 ad tmpent->start, tmpent->end, end);
503 1.85.2.4 ad panic("uvm_map_replace2");
504 1.85.2.4 ad }
505 1.85.2.4 ad cur = tmpent->end;
506 1.85.2.4 ad if (tmpent->next) {
507 1.85.2.4 ad if (tmpent->next->prev != tmpent)
508 1.85.2.4 ad panic("uvm_map_replace3");
509 1.85.2.4 ad } else {
510 1.85.2.4 ad if (newents->prev != tmpent)
511 1.85.2.4 ad panic("uvm_map_replace4");
512 1.85.2.4 ad }
513 1.85.2.4 ad tmpent = tmpent->next;
514 1.85.2.4 ad }
515 1.85.2.4 ad if (nent != nnewents)
516 1.85.2.4 ad panic("uvm_map_replace5");
517 1.85.2.4 ad }
518 1.85.2.4 ad #endif
519 1.82 thorpej
520 1.85.2.4 ad /*
521 1.85.2.4 ad * map entry is a valid blank! replace it. (this does all the
522 1.85.2.4 ad * work of map entry link/unlink...).
523 1.85.2.4 ad */
524 1.82 thorpej
525 1.85.2.4 ad if (newents) {
526 1.85.2.4 ad last = newents->prev;
527 1.82 thorpej
528 1.85.2.4 ad /* critical: flush stale hints out of map */
529 1.85.2.4 ad SAVE_HINT(map, map->hint, newents);
530 1.85.2.4 ad if (map->first_free == oldent)
531 1.85.2.4 ad map->first_free = last;
532 1.85.2.4 ad
533 1.85.2.4 ad last->next = oldent->next;
534 1.85.2.4 ad last->next->prev = last;
535 1.85.2.4 ad
536 1.85.2.4 ad /* Fix RB tree */
537 1.85.2.4 ad uvm_rb_remove(map, oldent);
538 1.85.2.4 ad
539 1.85.2.4 ad newents->prev = oldent->prev;
540 1.85.2.4 ad newents->prev->next = newents;
541 1.85.2.4 ad map->nentries = map->nentries + (nnewents - 1);
542 1.85.2.4 ad
543 1.85.2.4 ad /* Fixup the RB tree */
544 1.85.2.4 ad {
545 1.85.2.4 ad int i;
546 1.85.2.4 ad struct vm_map_entry *tmp;
547 1.85.2.4 ad
548 1.85.2.4 ad tmp = newents;
549 1.85.2.4 ad for (i = 0; i < nnewents && tmp; i++) {
550 1.85.2.4 ad uvm_rb_insert(map, tmp);
551 1.85.2.4 ad tmp = tmp->next;
552 1.85.2.4 ad }
553 1.85.2.4 ad }
554 1.85.2.4 ad } else {
555 1.85.2.4 ad /* NULL list of new entries: just remove the old one */
556 1.85.2.4 ad clear_hints(map, oldent);
557 1.85.2.4 ad uvm_map_entry_unlink(map, oldent);
558 1.82 thorpej }
559 1.82 thorpej
560 1.85.2.4 ad uvm_map_check(map, "map_repl