Lines Matching defs:uobj
62 * uobj | d/c | | d/c | | V | +----+ |
68 * no amap or uobj is present. this is an error.
72 * I/O takes place in upper level anon and uobj is not touched.
76 * case [2]: lower layer fault [uobj]
77 * 2A: [read on non-NULL uobj] or [write to non-copy_on_write area]
79 * 2B: [write to copy_on_write] or [read on NULL uobj]
80 * data is "promoted" from uobj to a new anon.
81 * if uobj is null, then we zero fill.
108 * - ensure source page is resident (if uobj)
110 * fill if uobj == NULL)
496 struct uvm_object *uobj = NULL;
501 uobj = uobjpage->uobject;
503 KASSERT(uobj == NULL || rw_lock_held(uobj->vmobjlock));
514 uvmfault_unlockall(ufi, amap, uobj);
674 struct uvm_object *uobj = ufi.entry->object.uvm_obj;
684 if (uobj != NULL && uobj->pgops->pgo_fault != NULL) {
685 rw_enter(uobj->vmobjlock, RW_WRITE);
687 error = uobj->pgops->pgo_fault(&ufi,
722 struct uvm_object *uobj;
784 uobj = ufi->entry->object.uvm_obj; /* lower layer */
790 if (amap == NULL && uobj == NULL) {
799 if (uobj != NULL && amap != NULL &&
858 if (uobj) {
862 rw_enter(uobj->vmobjlock, RW_WRITE);
863 (void) uobj->pgops->pgo_flush(uobj, uoff, uoff +
865 rw_exit(uobj->vmobjlock);
1138 * uvm_fault_lower_lookup: look up on-memory uobj pages.
1150 struct uvm_object *uobj = ufi->entry->object.uvm_obj;
1156 rw_enter(uobj->vmobjlock, flt->lower_lock_type);
1160 (void) uobj->pgops->pgo_get(uobj,
1233 * 1. check uobj
1239 * 3. if uobj is not ZFOD and page is not found, do i/o.
1247 struct uvm_object *uobj = ufi->entry->object.uvm_obj;
1260 if (uobj == NULL) {
1270 * faulted on). if we have faulted on the bottom (uobj)
1281 KASSERT(uobj == NULL ||
1282 rw_status(uobj->vmobjlock) == flt->lower_lock_type);
1290 if (uobj == NULL) {
1316 error = uvm_fault_lower_io(ufi, flt, &uobj, &uobjpage);
1350 KASSERT(uobj == NULL ||
1351 rw_status(uobj->vmobjlock) == flt->lower_lock_type);
1388 rw_exit(uobj->vmobjlock);
1389 uobj = NULL;
1406 uvmfault_unlockall(ufi, amap, uobj);
1420 * anon must be write locked (promotion). uobj can be either.
1426 KASSERT(uobj == NULL ||
1427 rw_status(uobj->vmobjlock) == flt->lower_lock_type);
1449 uvmfault_unlockall(ufi, amap, uobj);
1477 KASSERT(uobj != NULL);
1478 KASSERT(uobj->vmobjlock == pg->uobject->vmobjlock);
1488 uao_dropswap(uobj, pg->offset >> PAGE_SHIFT);
1495 uvmfault_unlockall(ufi, amap, uobj);
1515 struct uvm_object *uobj = *ruobj;
1533 KASSERT(rw_write_held(uobj->vmobjlock));
1538 result = uobj->pgops->pgo_get(uobj, uoff, &pg, &gotpages,
1556 uobj = NULL;
1567 uobj = pg->uobject;
1568 rw_enter(uobj->vmobjlock, flt->lower_lock_type);
1598 rw_exit(uobj->vmobjlock);
1606 *ruobj = uobj;
1762 struct uvm_object *uobj)
1764 if (uobj)
1765 rw_exit(uobj->vmobjlock);