Lines Matching defs:uobj

275 uvn_reference(struct uvm_object *uobj)
278 struct uvm_vnode *uvn = (struct uvm_vnode *) uobj;
281 rw_enter(uobj->vmobjlock, RW_WRITE);
285 uobj->uo_refs, uvn->u_flags);
289 uobj->uo_refs++;
290 rw_exit(uobj->vmobjlock);
303 uvn_detach(struct uvm_object *uobj)
309 rw_enter(uobj->vmobjlock, RW_WRITE);
310 uobj->uo_refs--; /* drop ref! */
311 if (uobj->uo_refs) { /* still more refs */
312 rw_exit(uobj->vmobjlock);
318 uvn = (struct uvm_vnode *) uobj;
333 uvn_flush(uobj, 0, 0, PGO_DEACTIVATE|PGO_ALLPAGES);
349 (void) uvn_flush(uobj, 0, 0, PGO_CLEANIT|PGO_FREE|PGO_ALLPAGES);
361 while (uobj->uo_npages && uvn->u_flags & UVM_VNODE_RELKILL) {
363 rwsleep_nsec(&uvn->u_nio, uobj->vmobjlock, PVM, "uvn_term",
368 rw_exit(uobj->vmobjlock);
380 KASSERT(RBT_EMPTY(uvm_objtree, &uobj->memt));
388 rw_exit(uobj->vmobjlock);
424 struct uvm_object *uobj = &uvn->u_obj;
428 rw_enter(uobj->vmobjlock, RW_WRITE);
430 rw_exit(uobj->vmobjlock);
497 rwsleep_nsec(&uvn->u_nio, uobj->vmobjlock, PVM, "uvn_term",
532 rw_exit(uobj->vmobjlock);
600 uvn_flush(struct uvm_object *uobj, voff_t start, voff_t stop, int flags)
602 struct uvm_vnode *uvn = (struct uvm_vnode *) uobj;
610 KASSERT(rw_write_held(uobj->vmobjlock));
635 KASSERT(uobj->pgops->pgo_mk_pcluster != 0);
637 if ((pp = uvm_pagelookup(uobj, curoff)) != NULL)
647 if ((pp = uvm_pagelookup(uobj, curoff)) == NULL)
691 uvm_pagewait(pp, uobj->vmobjlock,
693 rw_enter(uobj->vmobjlock, RW_WRITE);
725 result = uvm_pager_put(uobj, pp, &ppsp, &npages,
786 if (result == VM_PAGER_PEND && ptmp->uobject != uobj)
848 rwsleep_nsec(&uvn->u_nio, uobj->vmobjlock, PVM,
870 uvn_cluster(struct uvm_object *uobj, voff_t offset, voff_t *loffset,
873 struct uvm_vnode *uvn = (struct uvm_vnode *) uobj;
876 KASSERT(rw_write_held(uobj->vmobjlock));
899 uvn_put(struct uvm_object *uobj, struct vm_page **pps, int npages, int flags)
901 struct uvm_vnode *uvn = (struct uvm_vnode *)uobj;
904 KASSERT(rw_write_held(uobj->vmobjlock));
925 retval = uvn_io((struct uvm_vnode*)uobj, pps, npages, flags, UIO_WRITE);
943 uvn_get(struct uvm_object *uobj, voff_t offset, struct vm_page **pps,
951 KASSERT(rw_lock_held(uobj->vmobjlock));
952 KASSERT(rw_write_held(uobj->vmobjlock) ||
977 ptmp = uvm_pagelookup(uobj, current_offset);
1053 ptmp = uvm_pagelookup(uobj, current_offset);
1057 ptmp = uvm_pagealloc(uobj, current_offset,
1077 uvm_pagewait(ptmp, uobj->vmobjlock, "uvn_get");
1078 rw_enter(uobj->vmobjlock, RW_WRITE);
1105 result = uvn_io((struct uvm_vnode *) uobj, &ptmp, 1,
1122 rw_exit(uobj->vmobjlock);
1145 rw_exit(uobj->vmobjlock);
1161 struct uvm_object *uobj = &uvn->u_obj;
1173 KASSERT(rw_write_held(uobj->vmobjlock));
1186 rwsleep_nsec(&uvn->u_flags, uobj->vmobjlock, PVM, "uvn_iosync",
1212 rw_exit(uobj->vmobjlock);
1299 rw_enter(uobj->vmobjlock, RW_WRITE);
1363 struct uvm_object *uobj = &uvn->u_obj;
1367 rw_enter(uobj->vmobjlock, RW_WRITE);
1370 rw_exit(uobj->vmobjlock);
1380 rw_exit(uobj->vmobjlock);
1390 rw_exit(uobj->vmobjlock);
1442 struct uvm_object *uobj = &uvn->u_obj;
1446 rw_enter(uobj->vmobjlock, RW_WRITE);
1462 rw_exit(uobj->vmobjlock);