Lines Matching defs:new_entry
3539 struct vm_map_entry *new_entry, *first, *last;
3549 new_entry = uvm_map_mkentry(dstmap, first, last,
3551 if (new_entry == NULL)
3553 /* old_entry -> new_entry */
3554 new_entry->object = old_entry->object;
3555 new_entry->offset = old_entry->offset;
3556 new_entry->aref = old_entry->aref;
3557 new_entry->etype |= old_entry->etype & ~UVM_ET_FREEMAPPED;
3558 new_entry->protection = prot;
3559 new_entry->max_protection = maxprot;
3560 new_entry->inheritance = old_entry->inheritance;
3561 new_entry->advice = old_entry->advice;
3564 if (new_entry->aref.ar_amap) {
3565 new_entry->aref.ar_pageoff += off >> PAGE_SHIFT;
3566 amap_ref(new_entry->aref.ar_amap, new_entry->aref.ar_pageoff,
3567 (new_entry->end - new_entry->start) >> PAGE_SHIFT,
3571 if (UVM_ET_ISOBJ(new_entry) &&
3572 new_entry->object.uvm_obj->pgops->pgo_reference) {
3573 new_entry->offset += off;
3574 new_entry->object.uvm_obj->pgops->pgo_reference
3575 (new_entry->object.uvm_obj);
3578 return new_entry;
3592 * write later, old_entry and new_entry will refer to different memory
3617 struct vm_map_entry *new_entry;
3619 new_entry = uvm_mapent_share(new_map, old_entry->start,
3623 return (new_entry);
3630 * allocate new_entry, adjust reference counts.
3638 struct vm_map_entry *new_entry;
3641 new_entry = uvm_mapent_clone(new_map, old_entry->start,
3645 new_entry->etype |=
3683 amap_copy(new_map, new_entry, M_WAITOK, FALSE,
3705 amap_cow_now(new_map, new_entry);
3748 pmap_protect(new_map->pmap, new_entry->start,
3749 new_entry->end,
3750 new_entry->protection &
3755 return (new_entry);
3766 struct vm_map_entry *new_entry;
3768 new_entry = uvm_mapent_clone(new_map, old_entry->start,
3772 new_entry->etype |=
3775 if (new_entry->aref.ar_amap) {
3776 amap_unref(new_entry->aref.ar_amap, new_entry->aref.ar_pageoff,
3777 atop(new_entry->end - new_entry->start), 0);
3778 new_entry->aref.ar_amap = NULL;
3779 new_entry->aref.ar_pageoff = 0;
3782 if (UVM_ET_ISOBJ(new_entry)) {
3783 if (new_entry->object.uvm_obj->pgops->pgo_detach)
3784 new_entry->object.uvm_obj->pgops->pgo_detach(
3785 new_entry->object.uvm_obj);
3786 new_entry->object.uvm_obj = NULL;
3787 new_entry->etype &= ~UVM_ET_OBJ;
3790 return (new_entry);
3806 struct vm_map_entry *old_entry, *new_entry;
3840 new_entry = uvm_mapent_forkshared(vm2, new_map,
3844 new_entry = uvm_mapent_forkcopy(vm2, new_map,
3848 new_entry = uvm_mapent_forkzero(vm2, new_map,
3856 if (!UVM_ET_ISHOLE(new_entry))
3857 new_map->size += new_entry->end - new_entry->start;
3858 if (!UVM_ET_ISOBJ(new_entry) && !UVM_ET_ISHOLE(new_entry) &&
3859 new_entry->protection != PROT_NONE) {
3861 new_map, new_entry->start, new_entry->end);