Lines Matching defs:old_entry
4418 uvm_mapent_clone(struct vm_map *new_map, struct vm_map_entry *old_entry,
4424 /* old_entry -> new_entry */
4425 uvm_mapent_copy(old_entry, new_entry);
4456 struct vm_map_entry *old_entry)
4459 * if the old_entry needs a new amap (due to prev fork)
4465 if (UVM_ET_ISNEEDSCOPY(old_entry)) {
4467 amap_copy(old_map, old_entry, AMAP_COPY_NOCHUNK,
4472 uvm_mapent_clone(new_map, old_entry, AMAP_SHARED);
4478 struct vm_map_entry *old_entry)
4490 new_entry = uvm_mapent_clone(new_map, old_entry, 0);
4527 if (old_entry->aref.ar_amap != NULL) {
4528 if ((amap_flags(old_entry->aref.ar_amap) & AMAP_SHARED) != 0 ||
4529 VM_MAPENT_ISWIRED(old_entry)) {
4548 if (VM_MAPENT_ISWIRED(old_entry)) {
4555 if (old_entry->aref.ar_amap)
4567 if (old_entry->aref.ar_amap &&
4568 !UVM_ET_ISNEEDSCOPY(old_entry)) {
4569 if (old_entry->max_protection & VM_PROT_WRITE) {
4571 uvm_map_lock_entry(old_entry, RW_WRITER);
4573 uvm_map_lock_entry(old_entry, RW_READER);
4576 old_entry->start, old_entry->end,
4577 old_entry->protection & ~VM_PROT_WRITE);
4578 uvm_map_unlock_entry(old_entry);
4580 old_entry->etype |= UVM_ET_NEEDSCOPY;
4590 struct vm_map_entry *old_entry)
4594 new_entry = uvm_mapent_clone(new_map, old_entry, 0);
4631 struct vm_map_entry *old_entry;
4642 old_entry = old_map->header.next;
4649 while (old_entry != &old_map->header) {
4655 KASSERT(!UVM_ET_ISSUBMAP(old_entry));
4656 KASSERT(UVM_ET_ISCOPYONWRITE(old_entry) ||
4657 !UVM_ET_ISNEEDSCOPY(old_entry));
4659 switch (old_entry->inheritance) {
4664 new_map->size -= old_entry->end - old_entry->start;
4668 uvm_mapent_forkshared(new_map, old_map, old_entry);
4672 uvm_mapent_forkcopy(new_map, old_map, old_entry);
4676 uvm_mapent_forkzero(new_map, old_map, old_entry);
4682 old_entry = old_entry->next;