Home | History | Annotate | Line # | Download | only in kern
subr_disk.c revision 1.85.2.4
      1 e -1:
      2 			goto wraparound;
      3 		}
      4 		if (tmp->ownspace >= length)
      5 			goto listsearch;
      6 	}
      7 	if (prev == NULL)
      8 		goto notfound;
      9 
     10 	if (topdown) {
     11 		KASSERT(orig_hint >= prev->next->start - length ||
     12 		    prev->next->start - length > prev->next->start);
     13 		hint = prev->next->start - length;
     14 	} else {
     15 		KASSERT(orig_hint <= prev->end);
     16 		hint = prev->end;
     17 	}
     18 	switch (uvm_map_space_avail(&hint, length, uoffset, align,
     19 	    topdown, prev)) {
     20 	case 1:
     21 		entry = prev;
     22 		goto found;
     23 	case -1:
     24 		goto wraparound;
     25 	}
     26 	if (prev->ownspace >= length)
     27 		goto listsearch;
     28 
     29 	if (topdown)
     30 		tmp = RB_LEFT(prev, rb_entry);
     31 	else
     32 		tmp = RB_RIGHT(prev, rb_entry);
     33 	for (;;) {
     34 		KASSERT(tmp && tmp->space >= length);
     35 		if (topdown)
     36 			child = RB_RIGHT(tmp, rb_entry);
     37 		else
     38 			child = RB_LEFT(tmp, rb_entry);
     39 		if (child && child->space >= length) {
     40 			tmp = child;
     41 			continue;
     42 		}
     43 		if (tmp->ownspace >= length)
     44 			break;
     45 		if (topdown)
     46 			tmp = RB_LEFT(tmp, rb_entry);
     47 		else
     48 			tmp = RB_RIGHT(tmp, rb_entry);
     49 	}
     50 
     51 	if (topdown) {
     52 		KASSERT(orig_hint >= tmp->next->start - length ||
     53 		    tmp->next->start - length > tmp->next->start);
     54 		hint = tmp->next->start - length;
     55 	} else {
     56 		KASSERT(orig_hint <= tmp->end);
     57 		hint = tmp->end;
     58 	}
     59 	switch (uvm_map_space_avail(&hint, length, uoffset, align,
     60 	    topdown, tmp)) {
     61 	case 1:
     62 		entry = tmp;
     63 		goto found;
     64 	case -1:
     65 		goto wraparound;
     66 	}
     67 
     68 	/*
     69 	 * The tree fails to find an entry because of offset or alignment
     70 	 * restrictions.  Search the list instead.
     71 	 */
     72  listsearch:
     73 	/*
     74 	 * Look through the rest of the map, trying to fit a new region in
     75 	 * the gap between existing regions, or after the very last region.
     76 	 * note: entry->end = base VA of current gap,
     77 	 *	 entry->next->start = VA of end of current gap
     78 	 */
     79 
     80 	for (;;) {
     81 		/* Update hint for current gap. */
     82 		hint = topdown ? entry->next->start - length : entry->end;
     83 
     84 		/* See if it fits. */
     85 		switch (uvm_map_space_avail(&hint, length, uoffset, align,
     86 		    topdown, entry)) {
     87 		case 1:
     88 			goto found;
     89 		case -1:
     90 			goto wraparound;
     91 		}
     92 
     93 		/* Advance to next/previous gap */
     94 		if (topdown) {
     95 			if (entry == &map->header) {
     96 				UVMHIST_LOG(maphist, "<- failed (off start)",
     97 				    0,0,0,0);
     98 				goto notfound;
     99 			}
    100 			entry = entry->prev;
    101 		} else {
    102 			entry = entry->next;
    103 			if (entry == &map->header) {
    104 				UVMHIST_LOG(maphist, "<- failed (off end)",
    105 				    0,0,0,0);
    106 				goto notfound;
    107 			}
    108 		}
    109 	}
    110 
    111  found:
    112 	SAVE_HINT(map, map->hint, entry);
    113 	*result = hint;
    114 	UVMHIST_LOG(maphist,"<- got it!  (result=0x%x)", hint, 0,0,0);
    115 	KASSERT( topdown || hint >= orig_hint);
    116 	KASSERT(!topdown || hint <= orig_hint);
    117 	KASSERT(entry->end <= hint);
    118 	KASSERT(hint + length <= entry->next->start);
    119 	return (entry);
    120 
    121  wraparound:
    122 	UVMHIST_LOG(maphist, "<- failed (wrap around)", 0,0,0,0);
    123 
    124 	return (NULL);
    125 
    126  notfound:
    127 	UVMHIST_LOG(maphist, "<- failed (notfound)", 0,0,0,0);
    128 
    129 	return (NULL);
    130 }
    131 
    132 /*
    133  *   U N M A P   -   m a i n   h e l p e r   f u n c t i o n s
    134  */
    135 
    136 /*
    137  * uvm_unmap_remove: remove mappings from a vm_map (from "start" up to "stop")
    138  *
    139  * => caller must check alignment and size
    140  * => map must be locked by caller
    141  * => we return a list of map entries that we've remove from the map
    142  *    in "entry_list"
    143  */
    144 
    145 void
    146 uvm_unmap_remove(struct vm_map *map, vaddr_t start, vaddr_t end,
    147     struct vm_map_entry **entry_list /* OUT */,
    148     struct uvm_mapent_reservation *umr, int flags)
    149 {
    150 	struct vm_map_entry *entry, *first_entry, *next;
    151 	vaddr_t len;
    152 	UVMHIST_FUNC("uvm_unmap_remove"); UVMHIST_CALLED(maphist);
    153 
    154 	UVMHIST_LOG(maphist,"(map=0x%x, start=0x%x, end=0x%x)",
    155 	    map, start, end, 0);
    156 	VM_MAP_RANGE_CHECK(map, start, end);
    157 
    158 	uvm_map_check(map, "unmap_remove entry");
    159 
    160 	/*
    161 	 * find first entry
    162 	 */
    163 
    164 	if (uvm_map_lookup_entry(map, start, &first_entry) == true) {
    165 		/* clip and go... */
    166 		entry = first_entry;
    167 		UVM_MAP_CLIP_START(map, entry, start, umr);
    168 		/* critical!  prevents stale hint */
    169 		SAVE_HINT(map, entry, entry->prev);
    170 	} else {
    171 		entry = first_entry->next;
    172 	}
    173 
    174 	/*
    175 	 * Save the free space hint
    176 	 */
    177 
    178 	if (map->first_free != &map->header && map->first_free->start >= start)
    179 		map->first_free = entry->prev;
    180 
    181 	/*
    182 	 * note: we now re-use first_entry for a different task.  we remove
    183 	 * a number of map entries from the map and save them in a linked
    184 	 * list headed by "first_entry".  once we remove them from the map
    185 	 * the caller should unlock the map and drop the references to the
    186 	 * backing objects [c.f. uvm_unmap_detach].  the object is to
    187 	 * separate unmapping from reference dropping.  why?
    188 	 *   [1] the map has to be locked for unmapping
    189 	 *   [2] the map need not be locked for reference dropping
    190 	 *   [3] dropping references may trigger pager I/O, and if we hit
    191 	 *       a pager that does synchronous I/O we may have to wait for it.
    192 	 *   [4] we would like all waiting for I/O to occur with maps unlocked
    193 	 *       so that we don't block other threads.
    194 	 */
    195 
    196 	first_entry = NULL;
    197 	*entry_list = NULL;
    198 
    199 	/*
    200 	 * break up the area into map entry sized regions and unmap.  note
    201 	 * that all mappings have to be removed before we can even consider
    202 	 * dropping references to amaps or VM objects (otherwise we could end
    203 	 * up with a mapping to a page on the free list which would be very bad)
    204 	 */
    205 
    206 	while ((entry != &map->header) && (entry->start < end)) {
    207 		KASSERT((entry->flags & UVM_MAP_FIRST) == 0);
    208 
    209 		UVM_MAP_CLIP_END(map, entry, end, umr);
    210 		next = entry->next;
    211 		len = entry->end - entry->start;
    212 
    213 		/*
    214 		 * unwire before removing addresses from the pmap; otherwise
    215 		 * unwiring will put the entries back into the pmap (XXX).
    216 		 */
    217 
    218 		if (VM_MAPENT_ISWIRED(entry)) {
    219 			uvm_map_entry_unwire(map, entry);
    220 		}
    221 		if (flags & UVM_FLAG_VAONLY) {
    222 
    223 			/* nothing */
    224 
    225 		} else if ((map->flags & VM_MAP_PAGEABLE) == 0) {
    226 
    227 			/*
    228 			 * if the map is non-pageable, any pages mapped there
    229 			 * must be wired and entered with pmap_kenter_pa(),
    230 			 * and we should free any such pages immediately.
    231 			 * this is mostly used for kmem_map and mb_map.
    232 			 */
    233 
    234 			if ((entry->flags & UVM_MAP_KMAPENT) == 0) {
    235 				uvm_km_pgremove_intrsafe(entry->start,
    236 				    entry->end);
    237 				pmap_kremove(entry->start, len);
    238 			}
    239 		} else if (UVM_ET_ISOBJ(entry) &&
    240 			   UVM_OBJ_IS_KERN_OBJECT(entry->object.uvm_obj)) {
    241 			KASSERT(vm_map_pmap(map) == pmap_kernel());
    242 
    243 			/*
    244 			 * note: kernel object mappings are currently used in
    245 			 * two ways:
    246 			 *  [1] "normal" mappings of pages in the kernel object
    247 			 *  [2] uvm_km_valloc'd allocations in which we
    248 			 *      pmap_enter in some non-kernel-object page
    249 			 *      (e.g. vmapbuf).
    250 			 *
    251 			 * for case [1], we need to remove the mapping from
    252 			 * the pmap and then remove the page from the kernel
    253 			 * object (because, once pages in a kernel object are
    254 			 * unmapped they are no longer needed, unlike, say,
    255 			 * a vnode where you might want the data to persist
    256 			 * until flushed out of a queue).
    257 			 *
    258 			 * for case [2], we need to remove the mapping from
    259 			 * the pmap.  there shouldn't be any pages at the
    260 			 * specified offset in the kernel object [but it
    261 			 * doesn't hurt to call uvm_km_pgremove just to be
    262 			 * safe?]
    263 			 *
    264 			 * uvm_km_pgremove currently does the following:
    265 			 *   for pages in the kernel object in range:
    266 			 *     - drops the swap slot
    267 			 *     - uvm_pagefree the page
    268 			 */
    269 
    270 			/*
    271 			 * remove mappings from pmap and drop the pages
    272 			 * from the object.  offsets are always relative
    273 			 * to vm_map_min(kernel_map).
    274 			 */
    275 
    276 			pmap_remove(pmap_kernel(), entry->start,
    277 			    entry->start + len);
    278 			uvm_km_pgremove(entry->start, entry->end);
    279 
    280 			/*
    281 			 * null out kernel_object reference, we've just
    282 			 * dropped it
    283 			 */
    284 
    285 			entry->etype &= ~UVM_ET_OBJ;
    286 			entry->object.uvm_obj = NULL;
    287 		} else if (UVM_ET_ISOBJ(entry) || entry->aref.ar_amap) {
    288 
    289 			/*
    290 			 * remove mappings the standard way.
    291 			 */
    292 
    293 			pmap_remove(map->pmap, entry->start, entry->end);
    294 		}
    295 
    296 #if defined(DEBUG)
    297 		if ((entry->flags & UVM_MAP_KMAPENT) == 0) {
    298 
    299 			/*
    300 			 * check if there's remaining mapping,
    301 			 * which is a bug in caller.
    302 			 */
    303 
    304 			vaddr_t va;
    305 			for (va = entry->start; va < entry->end;
    306 			    va += PAGE_SIZE) {
    307 				if (pmap_extract(vm_map_pmap(map), va, NULL)) {
    308 					panic("uvm_unmap_remove: has mapping");
    309 				}
    310 			}
    311 
    312 			if (VM_MAP_IS_KERNEL(map)) {
    313 				uvm_km_check_empty(entry->start, entry->end,
    314 				    (map->flags & VM_MAP_INTRSAFE) != 0);
    315 			}
    316 		}
    317 #endif /* defined(DEBUG) */
    318 
    319 		/*
    320 		 * remove entry from map and put it on our list of entries
    321 		 * that we've nuked.  then go to next entry.
    322 		 */
    323 
    324 		UVMHIST_LOG(maphist, "  removed map entry 0x%x", entry, 0, 0,0);
    325 
    326 		/* critical!  prevents stale hint */
    327 		SAVE_HINT(map, entry, entry->prev);
    328 
    329 		uvm_map_entry_unlink(map, entry);
    330 		KASSERT(map->size >= len);
    331 		map->size -= len;
    332 		entry->prev = NULL;
    333 		entry->next = first_entry;
    334 		first_entry = entry;
    335 		entry = next;
    336 	}
    337 	if ((map->flags & VM_MAP_DYING) == 0) {
    338 		pmap_update(vm_map_pmap(map));
    339 	}
    340 
    341 	uvm_map_check(map, "unmap_remove leave");
    342 
    343 	/*
    344 	 * now we've cleaned up the map and are ready for the caller to drop
    345 	 * references to the mapped objects.
    346 	 */
    347 
    348 	*entry_list = first_entry;
    349 	UVMHIST_LOG(maphist,"<- done!", 0, 0, 0, 0);
    350 
    351 	if (map->flags & VM_MAP_WANTVA) {
    352 		mutex_enter(&map->misc_lock);
    353 		map->flags &= ~VM_MAP_WANTVA;
    354 		cv_broadcast(&map->cv);
    355 		mutex_exit(&map->misc_lock);
    356 	}
    357 }
    358 
    359 /*
    360  * uvm_unmap_detach: drop references in a chain of map entries
    361  *
    362  * => we will free the map entries as we traverse the list.
    363  */
    364 
    365 void
    366 uvm_unmap_detach(struct vm_map_entry *first_entry, int flags)
    367 {
    368 	struct vm_map_entry *next_entry;
    369 	UVMHIST_FUNC("uvm_unmap_detach"); UVMHIST_CALLED(maphist);
    370 
    371 	while (first_entry) {
    372 		KASSERT(!VM_MAPENT_ISWIRED(first_entry));
    373 		UVMHIST_LOG(maphist,
    374 		    "  detach 0x%x: amap=0x%x, obj=0x%x, submap?=%d",
    375 		    first_entry, first_entry->aref.ar_amap,
    376 		    first_entry->object.uvm_obj,
    377 		    UVM_ET_ISSUBMAP(first_entry));
    378 
    379 		/*
    380 		 * drop reference to amap, if we've got one
    381 		 */
    382 
    383 		if (first_entry->aref.ar_amap)
    384 			uvm_map_unreference_amap(first_entry, flags);
    385 
    386 		/*
    387 		 * drop reference to our backing object, if we've got one
    388 		 */
    389 
    390 		KASSERT(!UVM_ET_ISSUBMAP(first_entry));
    391 		if (UVM_ET_ISOBJ(first_entry) &&
    392 		    first_entry->object.uvm_obj->pgops->pgo_detach) {
    393 			(*first_entry->object.uvm_obj->pgops->pgo_detach)
    394 				(first_entry->object.uvm_obj);
    395 		}
    396 		next_entry = first_entry->next;
    397 		uvm_mapent_free(first_entry);
    398 		first_entry = next_entry;
    399 	}
    400 	UVMHIST_LOG(maphist, "<- done", 0,0,0,0);
    401 }
    402 
    403 /*
    404  *   E X T R A C T I O N   F U N C T I O N S
    405  */
    406 
    407 /*
    408  * uvm_map_reserve: reserve space in a vm_map for future use.
    409  *
    410  * => we reserve space in a map by putting a dummy map entry in the
    411  *    map (dummy means obj=NULL, amap=NULL, prot=VM_PROT_NONE)
    412  * => map should be unlocked (we will write lock it)
    413  * => we return true if we were able to reserve space
    414  * => XXXCDC: should be inline?
    415  */
    416 
    417 int
    418 uvm_map_reserve(struct vm_map *map, vsize_t size,
    419     vaddr_t offset	/* hint for pmap_prefer */,
    420     vsize_t align	/* alignment hint */,
    421     vaddr_t *raddr	/* IN:hint, OUT: reserved VA */,
    422     uvm_flag_t flags	/* UVM_FLAG_FIXED or 0 */)
    423 {
    424 	UVMHIST_FUNC("uvm_map_reserve"); UVMHIST_CALLED(maphist);
    425 
    426 	UVMHIST_LOG(maphist, "(map=0x%x, size=0x%x, offset=0x%x,addr=0x%x)",
    427 	    map,size,offset,raddr);
    428 
    429 	size = round_page(size);
    430 
    431 	/*
    432 	 * reserve some virtual space.
    433 	 */
    434 
    435 	if (uvm_map(map, raddr, size, NULL, offset, 0,
    436 	    UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
    437 	    UVM_ADV_RANDOM, UVM_FLAG_NOMERGE|flags)) != 0) {
    438 	    UVMHIST_LOG(maphist, "<- done (no VM)", 0,0,0,0);
    439 		return (false);
    440 	}
    441 
    442 	UVMHIST_LOG(maphist, "<- done (*raddr=0x%x)", *raddr,0,0,0);
    443 	return (true);
    444 }
    445 
    446 /*
    447  * uvm_map_replace: replace a reserved (blank) area of memory with
    448  * real mappings.
    449  *
    450  * => caller must WRITE-LOCK the map
    451  * => we return true if replacement was a success
    452  * => we expect the newents chain to have nnewents entrys on it and
    453  *    we expect newents->prev to point to the last entry on the list
    454  * => note newents is allowed to be NULL
    455  */
    456 
    457 int
    458 uvm_map_replace(struct vm_map *map, vaddr_t start, vaddr_t end,
    459     struct vm_map_entry *newents, int nnewents)
    460 {
    461 	struct vm_map_entry *oldent, *last;
    462 
    463 	uvm_map_check(map, "map_replace entry");
    464 
    465 	/*
    466 	 * first find the blank map entry at the specified address
    467 	 */
    468 
    469 	if (!uvm_map_lookup_entry(map, start, &oldent)) {
    470 		return (false);
    471 	}
    472 
    473 	/*
    474 	 * check to make sure we have a proper blank entry
    475 	 */
    476 
    477 	if (end < oldent->end && !VM_MAP_USE_KMAPENT(map)) {
    478 		UVM_MAP_CLIP_END(map, oldent, end, NULL);
    479 	}
    480 	if (oldent->start != start || oldent->end != end ||
    481 	    oldent->object.uvm_obj != NULL || oldent->aref.ar_amap != NULL) {
    482 		return (false);
    483 	}
    484 
    485 #ifdef DIAGNOSTIC
    486 
    487 	/*
    488 	 * sanity check the newents chain
    489 	 */
    490 
    491 	{
    492 		struct vm_map_entry *tmpent = newents;
    493 		int nent = 0;
    494 		vaddr_t cur = start;
    495 
    496 		while (tmpent) {
    497 			nent++;
    498 			if (tmpent->start < cur)
    499 				panic("uvm_map_replace1");
    500 			if (tmpent->start > tmpent->end || tmpent->end > end) {
    501 		printf("tmpent->start=0x%lx, tmpent->end=0x%lx, end=0x%lx\n",
    502 			    tmpent->start, tmpent->end, end);
    503 				panic("uvm_map_replace2");
    504 			}
    505 			cur = tmpent->end;
    506 			if (tmpent->next) {
    507 				if (tmpent->next->prev != tmpent)
    508 					panic("uvm_map_replace3");
    509 			} else {
    510 				if (newents->prev != tmpent)
    511 					panic("uvm_map_replace4");
    512 			}
    513 			tmpent = tmpent->next;
    514 		}
    515 		if (nent != nnewents)
    516 			panic("uvm_map_replace5");
    517 	}
    518 #endif
    519 
    520 	/*
    521 	 * map entry is a valid blank!   replace it.   (this does all the
    522 	 * work of map entry link/unlink...).
    523 	 */
    524 
    525 	if (newents) {
    526 		last = newents->prev;
    527 
    528 		/* critical: flush stale hints out of map */
    529 		SAVE_HINT(map, map->hint, newents);
    530 		if (map->first_free == oldent)
    531 			map->first_free = last;
    532 
    533 		last->next = oldent->next;
    534 		last->next->prev = last;
    535 
    536 		/* Fix RB tree */
    537 		uvm_rb_remove(map, oldent);
    538 
    539 		newents->prev = oldent->prev;
    540 		newents->prev->next = newents;
    541 		map->nentries = map->nentries + (nnewents - 1);
    542 
    543 		/* Fixup the RB tree */
    544 		{
    545 			int i;
    546 			struct vm_map_entry *tmp;
    547 
    548 			tmp = newents;
    549 			for (i = 0; i < nnewents && tmp; i++) {
    550 				uvm_rb_insert(map, tmp);
    551 				tmp = tmp->next;
    552 			}
    553 		}
    554 	} else {
    555 		/* NULL list of new entries: just remove the old one */
    556 		clear_hints(map, oldent);
    557 		uvm_map_entry_unlink(map, oldent);
    558 	}
    559 
    560 	uvm_map_check(map, "map_repl