Lines Matching defs:ufi

91  *     - init the "IN" params in the ufi structure
229 uvmfault_amapcopy(struct uvm_faultinfo *ufi)
235 if (uvmfault_lookup(ufi, TRUE) == FALSE)
241 if (UVM_ET_ISNEEDSCOPY(ufi->entry))
242 amap_copy(ufi->map, ufi->entry, M_NOWAIT,
243 UVM_ET_ISSTACK(ufi->entry) ? FALSE : TRUE,
244 ufi->orig_rvaddr, ufi->orig_rvaddr + 1);
249 if (UVM_ET_ISNEEDSCOPY(ufi->entry)) {
250 uvmfault_unlockmaps(ufi, TRUE);
258 uvmfault_unlockmaps(ufi, TRUE);
276 uvmfault_anonget(struct uvm_faultinfo *ufi, struct vm_amap *amap,
325 uvmfault_unlockall(ufi, NULL, NULL);
334 uvmfault_unlockall(ufi, amap, NULL);
340 uvmfault_unlockall(ufi, amap, NULL);
363 locked = uvmfault_relock(ufi);
394 uvmfault_unlockall(ufi, NULL, NULL);
425 uvmfault_unlockall(ufi, NULL, NULL);
462 if (ufi != NULL && amap_lookup(&ufi->entry->aref,
463 ufi->orig_rvaddr - ufi->entry->start) != anon) {
464 uvmfault_unlockall(ufi, amap, NULL);
490 uvmfault_promote(struct uvm_faultinfo *ufi,
495 struct vm_amap *amap = ufi->entry->aref.ar_amap;
514 uvmfault_unlockall(ufi, amap, uobj);
551 uvmfault_update_stats(struct uvm_faultinfo *ufi)
557 map = ufi->orig_map;
639 struct uvm_faultinfo ufi;
650 * init the IN parameters in the ufi
652 ufi.orig_map = orig_map;
653 ufi.orig_rvaddr = trunc_page(vaddr);
654 ufi.orig_size = PAGE_SIZE; /* can't get any smaller than this */
664 error = uvm_fault_check(&ufi, &flt, &anons, fault_type);
669 shadowed = uvm_fault_upper_lookup(&ufi, &flt, anons, pages);
672 error = uvm_fault_upper(&ufi, &flt, anons);
674 struct uvm_object *uobj = ufi.entry->object.uvm_obj;
687 error = uobj->pgops->pgo_fault(&ufi,
694 error = uvm_fault_lower(&ufi, &flt, pages);
718 uvm_fault_check(struct uvm_faultinfo *ufi, struct uvm_faultctx *flt,
728 if (uvmfault_lookup(ufi, FALSE) == FALSE) {
734 if ((ufi->map->flags & VM_MAP_PAGEABLE) == 0)
736 ufi->map, ufi->orig_rvaddr);
742 if ((ufi->entry->protection & flt->access_type) != flt->access_type) {
743 uvmfault_unlockmaps(ufi, FALSE);
750 * be more strict than ufi->entry->protection. "wired" means either
753 flt->enter_prot = ufi->entry->protection;
754 flt->pa_flags = UVM_ET_ISWC(ufi->entry) ? PMAP_WC : 0;
755 if (VM_MAPENT_ISWIRED(ufi->entry) || (fault_type == VM_FAULT_WIRE)) {
763 if (UVM_ET_ISNEEDSCOPY(ufi->entry)) {
765 (ufi->entry->object.uvm_obj == NULL)) {
767 uvmfault_unlockmaps(ufi, FALSE);
768 uvmfault_amapcopy(ufi);
783 amap = ufi->entry->aref.ar_amap; /* upper layer */
784 uobj = ufi->entry->object.uvm_obj; /* lower layer */
791 uvmfault_unlockmaps(ufi, FALSE);
814 nback = min(uvmadvice[ufi->entry->advice].nback,
815 (ufi->orig_rvaddr - ufi->entry->start) >> PAGE_SHIFT);
816 flt->startva = ufi->orig_rvaddr - ((vsize_t)nback << PAGE_SHIFT);
817 nforw = min(uvmadvice[ufi->entry->advice].nforw,
818 ((ufi->entry->end - ufi->orig_rvaddr) >> PAGE_SHIFT) - 1);
830 flt->startva = ufi->orig_rvaddr;
840 amap_lookups(&ufi->entry->aref,
841 flt->startva - ufi->entry->start, *ranons, flt->npages);
850 if (ufi->entry->advice == MADV_SEQUENTIAL && nback != 0) {
861 uoff = (flt->startva - ufi->entry->start) + ufi->entry->offset;
890 uvm_fault_upper_lookup(struct uvm_faultinfo *ufi,
894 struct vm_amap *amap = ufi->entry->aref.ar_amap;
941 !pmap_extract(ufi->orig_map->pmap, currva, &pa)) {
955 (void) pmap_enter(ufi->orig_map->pmap, currva,
964 pmap_update(ufi->orig_map->pmap);
978 uvm_fault_upper(struct uvm_faultinfo *ufi, struct uvm_faultctx *flt,
981 struct vm_amap *amap = ufi->entry->aref.ar_amap;
1000 error = uvmfault_anonget(ufi, amap, anon);
1035 error = uvmfault_promote(ufi, oanon->an_page, &anon, &pg);
1042 ret = amap_add(&ufi->entry->aref,
1043 ufi->orig_rvaddr - ufi->entry->start, anon, 1);
1083 if (pmap_enter(ufi->orig_map->pmap, ufi->orig_rvaddr,
1093 uvmfault_unlockall(ufi, amap, NULL);
1099 pmap_populate(ufi->orig_map->pmap, ufi->orig_rvaddr);
1132 uvmfault_unlockall(ufi, amap, NULL);
1133 pmap_update(ufi->orig_map->pmap);
1147 struct uvm_faultinfo *ufi, const struct uvm_faultctx *flt,
1150 struct uvm_object *uobj = ufi->entry->object.uvm_obj;
1161 ufi->entry->offset + (flt->startva - ufi->entry->start),
1163 flt->access_type & MASK(ufi->entry), ufi->entry->advice,
1193 if (pmap_extract(ufi->orig_map->pmap, currva, &pa))
1218 (void) pmap_enter(ufi->orig_map->pmap, currva,
1220 flt->enter_prot & MASK(ufi->entry), PMAP_CANFAIL);
1225 pmap_update(ufi->orig_map->pmap);
1243 uvm_fault_lower(struct uvm_faultinfo *ufi, struct uvm_faultctx *flt,
1246 struct vm_amap *amap = ufi->entry->aref.ar_amap;
1247 struct uvm_object *uobj = ufi->entry->object.uvm_obj;
1264 uobjpage = uvm_fault_lower_lookup(ufi, flt, pages);
1296 UVM_ET_ISCOPYONWRITE(ufi->entry);
1316 error = uvm_fault_lower_io(ufi, flt, &uobj, &uobjpage);
1336 if (UVM_ET_ISCOPYONWRITE(ufi->entry))
1357 error = uvmfault_promote(ufi, uobjpage, &anon, &pg);
1398 if (amap_add(&ufi->entry->aref,
1399 ufi->orig_rvaddr - ufi->entry->start, anon, 0)) {
1406 uvmfault_unlockall(ufi, amap, uobj);
1413 amap_populate(&ufi->entry->aref,
1414 ufi->orig_rvaddr - ufi->entry->start);
1434 if (pmap_enter(ufi->orig_map->pmap, ufi->orig_rvaddr,
1449 uvmfault_unlockall(ufi, amap, uobj);
1455 pmap_populate(ufi->orig_map->pmap, ufi->orig_rvaddr);
1495 uvmfault_unlockall(ufi, amap, uobj);
1496 pmap_update(ufi->orig_map->pmap);
1511 struct uvm_faultinfo *ufi, struct uvm_faultctx *flt,
1514 struct vm_amap * const amap = ufi->entry->aref.ar_amap;
1524 uoff = (ufi->orig_rvaddr - ufi->entry->start) + ufi->entry->offset;
1525 access_type = flt->access_type & MASK(ufi->entry);
1526 advice = ufi->entry->advice;
1528 uvmfault_unlockall(ufi, amap, NULL);
1552 if (!UVM_ET_ISNOFAULT(ufi->entry))
1561 locked = uvmfault_relock(ufi);
1577 if (locked && amap && amap_lookup(&ufi->entry->aref,
1578 ufi->orig_rvaddr - ufi->entry->start)) {
1580 uvmfault_unlockall(ufi, amap, NULL);
1737 uvmfault_unlockmaps(struct uvm_faultinfo *ufi, boolean_t write_locked)
1740 * ufi can be NULL when this isn't really a fault,
1743 if (ufi == NULL) {
1747 uvmfault_update_stats(ufi);
1749 vm_map_unlock(ufi->map);
1751 vm_map_unlock_read(ufi->map);
1761 uvmfault_unlockall(struct uvm_faultinfo *ufi, struct vm_amap *amap,
1768 uvmfault_unlockmaps(ufi, FALSE);
1787 uvmfault_lookup(struct uvm_faultinfo *ufi, boolean_t write_lock)
1792 * init ufi values for lookup.
1794 ufi->map = ufi->orig_map;
1795 ufi->size = ufi->orig_size;
1802 if (ufi->orig_rvaddr < ufi->map->min_offset ||
1803 ufi->orig_rvaddr >= ufi->map->max_offset)
1808 vm_map_lock(ufi->map);
1810 vm_map_lock_read(ufi->map);
1814 if (!uvm_map_lookup_entry(ufi->map, ufi->orig_rvaddr,
1815 &ufi->entry)) {
1816 uvmfault_unlockmaps(ufi, write_lock);
1821 if (ufi->entry->end - ufi->orig_rvaddr < ufi->size)
1822 ufi->size = ufi->entry->end - ufi->orig_rvaddr;
1828 if (UVM_ET_ISSUBMAP(ufi->entry)) {
1829 tmpmap = ufi->entry->object.sub_map;
1830 uvmfault_unlockmaps(ufi, write_lock);
1831 ufi->map = tmpmap;
1838 ufi->mapv = ufi->map->timestamp;
1853 uvmfault_relock(struct uvm_faultinfo *ufi)
1856 * ufi can be NULL when this isn't really a fault,
1859 if (ufi == NULL) {
1869 vm_map_lock_read(ufi->map);
1870 if (ufi->mapv != ufi->map->timestamp) {
1871 vm_map_unlock_read(ufi->map);