Lines Matching defs:object

89 static int vnode_pager_input_smlfs(vm_object_t object, vm_page_t m);
90 static int vnode_pager_input_old(vm_object_t object, vm_page_t m);
148 /* Create the VM system backing object for this vnode */
152 vm_object_t object;
156 object = vp->v_object;
157 if (object != NULL)
167 object = vnode_pager_alloc(vp, size, 0, 0, td->td_ucred);
170 * that the object is associated with the vp. We still have
174 VM_OBJECT_RLOCK(object);
175 last = refcount_release(&object->ref_count);
176 VM_OBJECT_RUNLOCK(object);
180 VNASSERT(vp->v_object != NULL, vp, ("%s: NULL object", __func__));
221 ("vnode_destroy_vobject: Terminating dead object"));
256 vm_object_t object;
269 object = vp->v_object;
271 if (object == NULL) {
273 * Add an object of the appropriate size
275 object = vm_object_allocate(OBJT_VNODE,
278 object->un_pager.vnp.vnp_size = size;
279 object->un_pager.vnp.writemappings = 0;
280 object->domain.dr_policy = vnode_domainset;
281 object->handle = handle;
283 VM_OBJECT_WLOCK(object);
284 vm_object_set_flag(object, OBJ_SIZEVNLOCK);
285 VM_OBJECT_WUNLOCK(object);
293 VM_OBJECT_WLOCK(object);
294 KASSERT(object->ref_count == 1,
295 ("leaked ref %p %d", object, object->ref_count));
296 object->type = OBJT_DEAD;
297 refcount_init(&object->ref_count, 0);
298 VM_OBJECT_WUNLOCK(object);
299 vm_object_destroy(object);
302 vp->v_object = object;
306 vm_object_reference(object);
308 if ((object->flags & OBJ_COLORED) == 0) {
309 VM_OBJECT_WLOCK(object);
310 vm_object_color(object, 0);
311 VM_OBJECT_WUNLOCK(object);
315 return (object);
319 * The object must be locked.
322 vnode_pager_dealloc(vm_object_t object)
327 vp = object->handle;
331 VM_OBJECT_ASSERT_WLOCKED(object);
332 vm_object_pip_wait(object, "vnpdea");
333 refs = object->ref_count;
335 object->handle = NULL;
336 object->type = OBJT_DEAD;
338 if (object->un_pager.vnp.writemappings > 0) {
339 object->un_pager.vnp.writemappings = 0;
349 * following object->handle. Clear all text references now.
357 VM_OBJECT_WUNLOCK(object);
360 VM_OBJECT_WLOCK(object);
364 vnode_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before,
367 struct vnode *vp = object->handle;
376 VM_OBJECT_ASSERT_LOCKED(object);
387 if (IDX_TO_OFF(pindex) >= object->un_pager.vnp.vnp_size)
399 lockstate = VM_OBJECT_DROP(object);
401 VM_OBJECT_PICKUP(object, lockstate);
420 roundup2(object->size, pagesperblock),
423 (uintmax_t )object->size));
426 if (pindex + *after >= object->size)
427 *after = object->size - 1 - pindex;
491 * the associated object that are affected by the size change.
494 * operation (possibly at object termination time), so we must be careful.
499 vm_object_t object;
503 if ((object = vp->v_object) == NULL)
515 VM_OBJECT_WLOCK(object);
516 if (object->type == OBJT_DEAD) {
517 VM_OBJECT_WUNLOCK(object);
520 KASSERT(object->type == OBJT_VNODE,
521 ("not vnode-backed object %p", object));
522 if (nsize == object->un_pager.vnp.vnp_size) {
526 VM_OBJECT_WUNLOCK(object);
530 if (nsize < object->un_pager.vnp.vnp_size) {
534 if (nobjsize < object->size)
535 vm_object_page_remove(object, nobjsize, object->size,
547 m = vm_page_grab(object, OFF_TO_IDX(nsize), VM_ALLOC_NOCREAT);
557 object->un_pager.vnp.vnp_size = nsize;
559 atomic_store_64(&object->un_pager.vnp.vnp_size, nsize);
561 object->size = nobjsize;
562 VM_OBJECT_WUNLOCK(object);
567 * cached pages in the associated object that are affected by the purge
575 struct vm_object *object;
582 object = vp->v_object;
588 if ((end != 0 && end <= start) || object == NULL)
591 VM_OBJECT_WLOCK(object);
594 vm_object_page_remove(object, pi, piend, 0);
599 m = vm_page_grab(object, pistart, VM_ALLOC_NOCREAT);
611 m = vm_page_grab(object, piend, VM_ALLOC_NOCREAT);
619 VM_OBJECT_WUNLOCK(object);
667 vnode_pager_input_smlfs(vm_object_t object, vm_page_t m)
679 vp = object->handle;
697 if (address >= object->un_pager.vnp.vnp_size) {
759 vnode_pager_input_old(vm_object_t object, vm_page_t m)
768 VM_OBJECT_ASSERT_WLOCKED(object);
774 if (IDX_TO_OFF(m->pindex) >= object->un_pager.vnp.vnp_size) {
778 if (IDX_TO_OFF(m->pindex) + size > object->un_pager.vnp.vnp_size)
779 size = object->un_pager.vnp.vnp_size - IDX_TO_OFF(m->pindex);
780 vp = object->handle;
781 VM_OBJECT_WUNLOCK(object);
811 VM_OBJECT_WLOCK(object);
832 vnode_pager_getpages(vm_object_t object, vm_page_t *m, int count, int *rbehind,
839 vp = object->handle;
847 vnode_pager_getpages_async(vm_object_t object, vm_page_t *m, int count,
853 vp = object->handle;
893 vm_object_t object;
910 object = vp->v_object;
915 KASSERT(foff < object->un_pager.vnp.vnp_size,
942 VM_OBJECT_WLOCK(object);
946 error = vnode_pager_input_old(object, m[i]);
950 VM_OBJECT_WUNLOCK(object);
966 error = vnode_pager_input_smlfs(object, m[i]);
1000 if (m[0]->pindex + after >= object->size)
1001 after = object->size - 1 - m[0]->pindex;
1012 rahead = min(rahead, object->size - m[count - 1]->pindex);
1047 VM_OBJECT_WLOCK(object);
1057 p = vm_page_alloc(object, tpindex, VM_ALLOC_NORMAL);
1083 if (!VM_OBJECT_WOWNED(object))
1084 VM_OBJECT_WLOCK(object);
1089 if (endpindex > object->size)
1090 endpindex = object->size;
1094 p = vm_page_alloc(object, tpindex, VM_ALLOC_NORMAL);
1105 if (VM_OBJECT_WOWNED(object))
1106 VM_OBJECT_WUNLOCK(object);
1134 if ((foff + bytecount) > object->un_pager.vnp.vnp_size)
1135 bytecount = object->un_pager.vnp.vnp_size - foff;
1218 vm_object_t object;
1225 object = bp->b_vp->v_object;
1249 VM_OBJECT_WLOCK(object);
1254 VM_OBJECT_WUNLOCK(object);
1259 VM_OBJECT_RLOCK(object);
1269 if (nextoff <= object->un_pager.vnp.vnp_size) {
1287 object->un_pager.vnp.vnp_size - tfoff);
1289 object->un_pager.vnp.vnp_size - tfoff)) == 0,
1296 VM_OBJECT_RUNLOCK(object);
1310 vnode_pager_putpages(vm_object_t object, vm_page_t *m, int count,
1335 vp = object->handle;
1336 VM_OBJECT_WUNLOCK(object);
1340 VM_OBJECT_WLOCK(object);
1374 vm_object_t object;
1385 object = vp->v_object;
1416 VM_OBJECT_RLOCK(object);
1417 if (maxsize + poffset > object->un_pager.vnp.vnp_size) {
1418 if (object->un_pager.vnp.vnp_size > poffset) {
1419 maxsize = object->un_pager.vnp.vnp_size - poffset;
1445 VM_OBJECT_RUNLOCK(object);
1627 vnode_pager_update_writecount(vm_object_t object, vm_offset_t start,
1633 VM_OBJECT_WLOCK(object);
1634 if (object->type != OBJT_VNODE) {
1635 VM_OBJECT_WUNLOCK(object);
1638 old_wm = object->un_pager.vnp.writemappings;
1639 object->un_pager.vnp.writemappings += (vm_ooffset_t)end - start;
1640 vp = object->handle;
1641 if (old_wm == 0 && object->un_pager.vnp.writemappings != 0) {
1646 } else if (old_wm != 0 && object->un_pager.vnp.writemappings == 0) {
1652 VM_OBJECT_WUNLOCK(object);
1656 vnode_pager_release_writecount(vm_object_t object, vm_offset_t start,
1663 VM_OBJECT_WLOCK(object);
1666 * First, recheck the object type to account for the race when
1669 if (object->type != OBJT_VNODE) {
1670 VM_OBJECT_WUNLOCK(object);
1679 if (object->un_pager.vnp.writemappings != inc) {
1680 object->un_pager.vnp.writemappings -= inc;
1681 VM_OBJECT_WUNLOCK(object);
1685 vp = object->handle;
1687 VM_OBJECT_WUNLOCK(object);
1693 * Decrement the object's writemappings, by swapping the start
1698 vnode_pager_update_writecount(object, end, start);
1706 vnode_pager_getvp(vm_object_t object, struct vnode **vpp, bool *vp_heldp)
1708 *vpp = object->handle;