Lines Matching defs:object
62 * Virtual memory object module.
113 static int vm_object_page_collect_flush(vm_object_t object, vm_page_t p,
118 static void vm_object_backing_remove(vm_object_t object);
123 * page of memory exists within exactly one object.
125 * An object is only deallocated when all "references"
127 * region of an object should be writeable.
129 * Associated with each object is a list of all resident
130 * memory pages belonging to that object; this list is
131 * maintained by the "vm_page" module, and locked by the object's
134 * Each object also records a "pager" routine which is
139 * The only items within the object structure which are
141 * reference count locked by object's lock
142 * pager routine locked by object's lock
147 struct mtx vm_object_list_mtx; /* lock for object list and count */
151 static SYSCTL_NODE(_vm_stats, OID_AUTO, object, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
152 "VM object stats");
157 "VM object collapses");
162 "VM object bypasses");
179 vm_object_t object;
181 object = (vm_object_t)mem;
182 KASSERT(object->ref_count == 0,
183 ("object %p ref_count = %d", object, object->ref_count));
184 KASSERT(TAILQ_EMPTY(&object->memq),
185 ("object %p has resident pages in its memq", object));
186 KASSERT(vm_radix_is_empty(&object->rtree),
187 ("object %p has resident pages in its trie", object));
189 KASSERT(LIST_EMPTY(&object->rvq),
190 ("object %p has reservations",
191 object));
193 KASSERT(!vm_object_busied(object),
194 ("object %p busy = %d", object, blockcount_read(&object->busy)));
195 KASSERT(object->resident_page_count == 0,
196 ("object %p resident_page_count = %d",
197 object, object->resident_page_count));
198 KASSERT(atomic_load_int(&object->shadow_count) == 0,
199 ("object %p shadow_count = %d",
200 object, atomic_load_int(&object->shadow_count)));
201 KASSERT(object->type == OBJT_DEAD,
202 ("object %p has non-dead type %d",
203 object, object->type));
204 KASSERT(object->charge == 0 && object->cred == NULL,
205 ("object %p has non-zero charge %ju (%p)",
206 object, (uintmax_t)object->charge, object->cred));
213 vm_object_t object;
215 object = (vm_object_t)mem;
216 rw_init_flags(&object->lock, "vmobject", RW_DUPOK | RW_NEW);
218 /* These are true for any object that has been freed */
219 object->type = OBJT_DEAD;
220 vm_radix_init(&object->rtree);
221 refcount_init(&object->ref_count, 0);
222 blockcount_init(&object->paging_in_progress);
223 blockcount_init(&object->busy);
224 object->resident_page_count = 0;
225 atomic_store_int(&object->shadow_count, 0);
226 object->flags = OBJ_DEAD;
229 TAILQ_INSERT_TAIL(&vm_object_list, object, object_list);
236 vm_object_t object, void *handle)
239 TAILQ_INIT(&object->memq);
240 LIST_INIT(&object->shadow_head);
242 object->type = type;
243 object->flags = flags;
245 pctrie_init(&object->un_pager.swp.swp_blks);
246 object->un_pager.swp.writemappings = 0;
252 * non-dead object.
256 object->pg_color = 0;
257 object->size = size;
258 object->domain.dr_policy = NULL;
259 object->generation = 1;
260 object->cleangeneration = 1;
261 refcount_init(&object->ref_count, 1);
262 object->memattr = VM_MEMATTR_DEFAULT;
263 object->cred = NULL;
264 object->charge = 0;
265 object->handle = handle;
266 object->backing_object = NULL;
267 object->backing_object_offset = (vm_ooffset_t) 0;
269 LIST_INIT(&object->rvq);
271 umtx_shm_object_init(object);
285 rw_init(&kernel_object->lock, "kernel vm object");
297 * to vm_pageout_fallback_object_lock locking a vm object
315 vm_object_clear_flag(vm_object_t object, u_short bits)
318 VM_OBJECT_ASSERT_WLOCKED(object);
319 object->flags &= ~bits;
323 * Sets the default memory attribute for the specified object. Pages
324 * that are allocated to this object are by default assigned this memory
328 * to the object. In the future, this requirement may be relaxed for
332 vm_object_set_memattr(vm_object_t object, vm_memattr_t memattr)
335 VM_OBJECT_ASSERT_WLOCKED(object);
337 if (object->type == OBJT_DEAD)
339 if (!TAILQ_EMPTY(&object->memq))
342 object->memattr = memattr;
347 vm_object_pip_add(vm_object_t object, short i)
351 blockcount_acquire(&object->paging_in_progress, i);
355 vm_object_pip_wakeup(vm_object_t object)
358 vm_object_pip_wakeupn(object, 1);
362 vm_object_pip_wakeupn(vm_object_t object, short i)
366 blockcount_release(&object->paging_in_progress, i);
370 * Atomically drop the object lock and wait for pip to drain. This protects
375 vm_object_pip_sleep(vm_object_t object, const char *waitid)
378 (void)blockcount_sleep(&object->paging_in_progress, &object->lock,
383 vm_object_pip_wait(vm_object_t object, const char *waitid)
386 VM_OBJECT_ASSERT_WLOCKED(object);
388 blockcount_wait(&object->paging_in_progress, &object->lock, waitid,
393 vm_object_pip_wait_unlocked(vm_object_t object, const char *waitid)
396 VM_OBJECT_ASSERT_UNLOCKED(object);
398 blockcount_wait(&object->paging_in_progress, NULL, waitid, PVM);
404 * Returns a new object with the given size.
409 vm_object_t object;
435 object = (vm_object_t)uma_zalloc(obj_zone, M_WAITOK);
436 _vm_object_allocate(type, size, flags, object, NULL);
438 return (object);
444 vm_object_t object;
447 object = (vm_object_t)uma_zalloc(obj_zone, M_WAITOK);
448 _vm_object_allocate(dyntype, size, flags, object, NULL);
450 return (object);
456 * Returns a new default object of the given size and marked as
464 vm_object_t handle, object;
472 object = uma_zalloc(obj_zone, M_WAITOK);
474 OBJ_ANON | OBJ_ONEMAPPING | OBJ_SWAP, object, handle);
475 object->cred = cred;
476 object->charge = cred != NULL ? charge : 0;
477 return (object);
481 vm_object_reference_vnode(vm_object_t object)
489 if (!refcount_acquire_if_gt(&object->ref_count, 0)) {
490 VM_OBJECT_RLOCK(object);
491 old = refcount_acquire(&object->ref_count);
492 if (object->type == OBJT_VNODE && old == 0)
493 vref(object->handle);
494 VM_OBJECT_RUNLOCK(object);
501 * Acquires a reference to the given object.
504 vm_object_reference(vm_object_t object)
507 if (object == NULL)
510 if (object->type == OBJT_VNODE)
511 vm_object_reference_vnode(object);
513 refcount_acquire(&object->ref_count);
514 KASSERT((object->flags & OBJ_DEAD) == 0,
515 ("vm_object_reference: Referenced dead object."));
521 * Gets another reference to the given object.
523 * The object must be locked.
526 vm_object_reference_locked(vm_object_t object)
530 VM_OBJECT_ASSERT_LOCKED(object);
531 old = refcount_acquire(&object->ref_count);
532 if (object->type == OBJT_VNODE && old == 0)
533 vref(object->handle);
534 KASSERT((object->flags & OBJ_DEAD) == 0,
535 ("vm_object_reference: Referenced dead object."));
539 * Handle deallocating an object of type OBJT_VNODE.
542 vm_object_deallocate_vnode(vm_object_t object)
544 struct vnode *vp = (struct vnode *) object->handle;
547 KASSERT(object->type == OBJT_VNODE,
548 ("vm_object_deallocate_vnode: not a vnode object"));
552 last = refcount_release(&object->ref_count);
553 VM_OBJECT_RUNLOCK(object);
559 umtx_shm_object_terminated(object);
566 * We dropped a reference on an object and discovered that it had a
568 * dropped. Attempt to collapse the sibling and backing object.
573 vm_object_t object;
576 object = LIST_FIRST(&backing_object->shadow_head);
577 KASSERT(object != NULL &&
582 KASSERT((object->flags & OBJ_ANON) != 0,
583 ("invalid shadow object %p", object));
585 if (!VM_OBJECT_TRYWLOCK(object)) {
587 * Prevent object from disappearing since we do not have a
590 vm_object_pip_add(object, 1);
592 VM_OBJECT_WLOCK(object);
593 vm_object_pip_wakeup(object);
600 if ((object->flags & (OBJ_DEAD | OBJ_COLLAPSING)) != 0 ||
601 !refcount_acquire_if_not_zero(&object->ref_count)) {
602 VM_OBJECT_WUNLOCK(object);
605 backing_object = object->backing_object;
607 vm_object_collapse(object);
608 VM_OBJECT_WUNLOCK(object);
610 return (object);
616 * Release a reference to the specified object,
619 * are gone, storage associated with this object
622 * No object may be locked.
625 vm_object_deallocate(vm_object_t object)
630 while (object != NULL) {
633 * vm_object_terminate() on the object chain. A ref count
636 * object.
638 if ((object->flags & OBJ_ANON) == 0)
639 released = refcount_release_if_gt(&object->ref_count, 1);
641 released = refcount_release_if_gt(&object->ref_count, 2);
645 if (object->type == OBJT_VNODE) {
646 VM_OBJECT_RLOCK(object);
647 if (object->type == OBJT_VNODE) {
648 vm_object_deallocate_vnode(object);
651 VM_OBJECT_RUNLOCK(object);
654 VM_OBJECT_WLOCK(object);
655 KASSERT(object->ref_count > 0,
656 ("vm_object_deallocate: object deallocated too many times: %d",
657 object->type));
661 * object we may need to collapse the shadow chain.
663 if (!refcount_release(&object->ref_count)) {
664 if (object->ref_count > 1 ||
665 atomic_load_int(&object->shadow_count) == 0) {
666 if ((object->flags & OBJ_ANON) != 0 &&
667 object->ref_count == 1)
668 vm_object_set_flag(object,
670 VM_OBJECT_WUNLOCK(object);
675 object = vm_object_deallocate_anon(object);
680 * Handle the final reference to an object. We restart
681 * the loop with the backing object to avoid recursion.
683 umtx_shm_object_terminated(object);
684 temp = object->backing_object;
686 KASSERT(object->type == OBJT_SWAP,
687 ("shadowed tmpfs v_object 2 %p", object));
688 vm_object_backing_remove(object);
691 KASSERT((object->flags & OBJ_DEAD) == 0,
692 ("vm_object_deallocate: Terminating dead object."));
693 vm_object_set_flag(object, OBJ_DEAD);
694 vm_object_terminate(object);
695 object = temp;
700 vm_object_destroy(vm_object_t object)
702 uma_zfree(obj_zone, object);
706 vm_object_sub_shadow(vm_object_t object)
708 KASSERT(object->shadow_count >= 1,
709 ("object %p sub_shadow count zero", object));
710 atomic_subtract_int(&object->shadow_count, 1);
714 vm_object_backing_remove_locked(vm_object_t object)
718 backing_object = object->backing_object;
719 VM_OBJECT_ASSERT_WLOCKED(object);
722 KASSERT((object->flags & OBJ_COLLAPSING) == 0,
723 ("vm_object_backing_remove: Removing collapsing object."));
726 if ((object->flags & OBJ_SHADOWLIST) != 0) {
727 LIST_REMOVE(object, shadow_list);
728 vm_object_clear_flag(object, OBJ_SHADOWLIST);
730 object->backing_object = NULL;
734 vm_object_backing_remove(vm_object_t object)
738 VM_OBJECT_ASSERT_WLOCKED(object);
740 backing_object = object->backing_object;
741 if ((object->flags & OBJ_SHADOWLIST) != 0) {
743 vm_object_backing_remove_locked(object);
746 object->backing_object = NULL;
752 vm_object_backing_insert_locked(vm_object_t object, vm_object_t backing_object)
755 VM_OBJECT_ASSERT_WLOCKED(object);
760 LIST_INSERT_HEAD(&backing_object->shadow_head, object,
762 vm_object_set_flag(object, OBJ_SHADOWLIST);
764 object->backing_object = backing_object;
768 vm_object_backing_insert(vm_object_t object, vm_object_t backing_object)
771 VM_OBJECT_ASSERT_WLOCKED(object);
775 vm_object_backing_insert_locked(object, backing_object);
778 object->backing_object = backing_object;
784 * Insert an object into a backing_object's shadow list with an additional
788 vm_object_backing_insert_ref(vm_object_t object, vm_object_t backing_object)
791 VM_OBJECT_ASSERT_WLOCKED(object);
796 ("shadowing dead anonymous object"));
798 vm_object_backing_insert_locked(object, backing_object);
804 object->backing_object = backing_object;
809 * Transfer a backing reference from backing_object to object.
812 vm_object_backing_transfer(vm_object_t object, vm_object_t backing_object)
818 * moves from within backing_object to within object.
820 vm_object_backing_remove_locked(object);
827 vm_object_backing_insert_locked(object, new_backing_object);
833 * is replaced by object.
835 object->backing_object = new_backing_object;
844 vm_object_collapse_wait(vm_object_t object)
847 VM_OBJECT_ASSERT_WLOCKED(object);
849 while ((object->flags & OBJ_COLLAPSING) != 0) {
850 vm_object_pip_wait(object, "vmcolwait");
856 * Waits for a backing object to clear a pending collapse and returns
857 * it locked if it is an ANON object.
860 vm_object_backing_collapse_wait(vm_object_t object)
864 VM_OBJECT_ASSERT_WLOCKED(object);
867 backing_object = object->backing_object;
874 VM_OBJECT_WUNLOCK(object);
877 VM_OBJECT_WLOCK(object);
883 * vm_object_terminate_single_page removes a pageable page from the object,
890 vm_object_t object __diagused = objectv;
893 KASSERT(p->object == object &&
896 p->object = NULL;
898 KASSERT((object->flags & OBJ_UNMANAGED) != 0 ||
908 * from the object and resets the object to an empty state.
911 vm_object_terminate_pages(vm_object_t object)
913 VM_OBJECT_ASSERT_WLOCKED(object);
916 * If the object contained any pages, then reset it to an empty state.
917 * Rather than incrementally removing each page from the object, the
918 * page and object are reset to any empty state.
920 if (object->resident_page_count == 0)
923 vm_radix_reclaim_callback(&object->rtree,
924 vm_object_terminate_single_page, object);
925 TAILQ_INIT(&object->memq);
926 object->resident_page_count = 0;
927 if (object->type == OBJT_VNODE)
928 vdrop(object->handle);
932 * vm_object_terminate actually destroys the specified object, freeing
935 * The object must be locked.
939 vm_object_terminate(vm_object_t object)
942 VM_OBJECT_ASSERT_WLOCKED(object);
943 KASSERT((object->flags & OBJ_DEAD) != 0,
944 ("terminating non-dead obj %p", object));
945 KASSERT((object->flags & OBJ_COLLAPSING) == 0,
946 ("terminating collapsing obj %p", object));
947 KASSERT(object->backing_object == NULL,
948 ("terminating shadow obj %p", object));
952 * done with the object. Note that new paging_in_progress
954 * OBJ_DEAD flag set (without unlocking the object), and avoid
955 * the object being terminated.
957 vm_object_pip_wait(object, "objtrm");
959 KASSERT(object->ref_count == 0,
960 ("vm_object_terminate: object with references, ref_count=%d",
961 object->ref_count));
963 if ((object->flags & OBJ_PG_DTOR) == 0)
964 vm_object_terminate_pages(object);
967 if (__predict_false(!LIST_EMPTY(&object->rvq)))
968 vm_reserv_break_all(object);
971 KASSERT(object->cred == NULL || (object->flags & OBJ_SWAP) != 0,
972 ("%s: non-swap obj %p has cred", __func__, object));
975 * Let the pager know object is dead.
977 vm_pager_deallocate(object);
978 VM_OBJECT_WUNLOCK(object);
980 vm_object_destroy(object);
984 * Make the page read-only so that we can clear the object flags. However, if
985 * this is a nosync mmap then the object is likely to stay dirty so do not
986 * mess with the page and do not clear the object flags. Returns TRUE if the
997 * nosync page, skip it. Note that the object flags were not
1012 * Clean all dirty pages in the specified range of object. Leaves page
1015 * leaving the object dirty.
1026 * The object must be locked.
1032 vm_object_page_clean(vm_object_t object, vm_ooffset_t start, vm_ooffset_t end,
1040 VM_OBJECT_ASSERT_WLOCKED(object);
1042 if (!vm_object_mightbedirty(object) || object->resident_page_count == 0)
1050 tend = (end == 0) ? object->size : OFF_TO_IDX(end + PAGE_MASK);
1051 allclean = tstart == 0 && tend >= object->size;
1055 curgeneration = object->generation;
1057 for (p = vm_page_find_least(object, tstart); p != NULL; p = np) {
1065 if (object->generation != curgeneration &&
1068 np = vm_page_find_least(object, pi);
1075 if (object->type == OBJT_VNODE) {
1076 n = vm_object_page_collect_flush(object, p, pagerflags,
1082 if (object->generation != curgeneration &&
1106 np = vm_page_find_least(object, pi + n);
1117 if (allclean && object->type == OBJT_VNODE)
1118 object->cleangeneration = curgeneration;
1123 vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int pagerflags,
1131 VM_OBJECT_ASSERT_WLOCKED(object);
1163 * anonymous objects, so we track down the vnode object
1168 * If the backing object is a device object with unmanaged pages, then any
1173 * may start out with a NULL object.
1176 vm_object_sync(vm_object_t object, vm_ooffset_t offset, vm_size_t size,
1185 if (object == NULL)
1189 VM_OBJECT_WLOCK(object);
1190 while ((backing_object = object->backing_object) != NULL) {
1192 offset += object->backing_object_offset;
1193 VM_OBJECT_WUNLOCK(object);
1194 object = backing_object;
1195 if (object->size < OFF_TO_IDX(offset + size))
1196 size = IDX_TO_OFF(object->size) - offset;
1210 if (object->type == OBJT_VNODE &&
1211 vm_object_mightbedirty(object) != 0 &&
1212 ((vp = object->handle)->v_vflag & VV_NOSYNC) == 0) {
1213 VM_OBJECT_WUNLOCK(object);
1217 atop(size) == object->size) {
1231 VM_OBJECT_WLOCK(object);
1232 res = vm_object_page_clean(object, offset, offset + size,
1234 VM_OBJECT_WUNLOCK(object);
1256 VM_OBJECT_WLOCK(object);
1258 if ((object->type == OBJT_VNODE ||
1259 object->type == OBJT_DEVICE) && invalidate) {
1260 if (object->type == OBJT_DEVICE)
1271 vm_object_page_remove(object, OFF_TO_IDX(offset),
1274 VM_OBJECT_WUNLOCK(object);
1279 * Determine whether the given advice can be applied to the object. Advice is
1285 vm_object_advice_applies(vm_object_t object, int advice)
1288 if ((object->flags & OBJ_UNMANAGED) != 0)
1292 return ((object->flags & (OBJ_ONEMAPPING | OBJ_ANON)) ==
1297 vm_object_madvise_freespace(vm_object_t object, int advice, vm_pindex_t pindex,
1302 vm_pager_freespace(object, pindex, size);
1308 * Implements the madvise function at the object/page level.
1310 * MADV_WILLNEED (any object)
1314 * MADV_DONTNEED (any object)
1326 vm_object_madvise(vm_object_t object, vm_pindex_t pindex, vm_pindex_t end,
1333 if (object == NULL)
1337 VM_OBJECT_WLOCK(object);
1338 if (!vm_object_advice_applies(object, advice)) {
1339 VM_OBJECT_WUNLOCK(object);
1342 for (m = vm_page_find_least(object, pindex); pindex < end; pindex++) {
1343 tobject = object;
1346 * If the next page isn't resident in the top-level object, we
1353 * Optimize a common case: if the top-level object has
1354 * no backing object, we can skip over the non-resident
1357 if (object->backing_object == NULL) {
1360 vm_object_madvise_freespace(object, advice,
1372 * Prepare to search the next object in the
1381 if (tobject != object)
1396 * can not be invalidated while the object lock is held.
1405 if (object != tobject)
1406 VM_OBJECT_WUNLOCK(object);
1423 if (tobject != object)
1426 VM_OBJECT_WUNLOCK(object);
1432 * Create a new object which is backed by the
1433 * specified existing object range. The source
1434 * object reference is deallocated.
1436 * The new object and offset into that object
1440 vm_object_shadow(vm_object_t *object, vm_ooffset_t *offset, vm_size_t length,
1446 source = *object;
1449 * Don't create the new object if the old object isn't shared.
1453 * harmless and we will end up with an extra shadow object that
1461 * Allocate a new object with the given length.
1466 * Store the offset into the source object, and fix up the offset into
1467 * the new object.
1475 * The new object shadows the source object, adding a
1477 * to point to the new object, removing a reference to
1478 * the source object. Net result: no change of
1488 * Try to optimize the result object's page color when
1490 * consistency in the combined shadowed object.
1510 *object = result;
1516 * Split the pages in a map entry into a new object. This affords
1517 * easier removal of unused pages, and keeps object inheritance from
1529 orig_object = entry->object.vm_object;
1531 ("vm_object_split: Splitting object with multiple mappings."));
1547 * additional reference on backing_object by new object will
1554 * At this point, the new object is still private, so the order in
1574 * that the object is in transition.
1622 * the original object, then transferring the reservation to
1623 * the new object is neither particularly beneficial nor
1625 * with the original object. If, however, all of the
1627 * object, then transferring the reservation is typically
1648 entry->object.vm_object = new_object;
1655 vm_object_collapse_scan_wait(struct pctrie_iter *pages, vm_object_t object,
1660 VM_OBJECT_ASSERT_WLOCKED(object);
1661 backing_object = object->backing_object;
1664 KASSERT(p == NULL || p->object == object || p->object == backing_object,
1665 ("invalid ownership %p %p %p", p, object, backing_object));
1668 VM_OBJECT_WUNLOCK(object);
1671 VM_OBJECT_WLOCK(object);
1672 } else if (p->object == object) {
1675 VM_OBJECT_WLOCK(object);
1677 VM_OBJECT_WUNLOCK(object);
1680 VM_OBJECT_WLOCK(object);
1688 vm_object_collapse_scan(vm_object_t object)
1695 VM_OBJECT_ASSERT_WLOCKED(object);
1696 VM_OBJECT_ASSERT_WLOCKED(object->backing_object);
1698 backing_object = object->backing_object;
1699 backing_offset_index = OFF_TO_IDX(object->backing_object_offset);
1713 next = vm_object_collapse_scan_wait(&pages, object, p);
1717 KASSERT(object->backing_object == backing_object,
1718 ("vm_object_collapse_scan: backing object mismatch %p != %p",
1719 object->backing_object, backing_object));
1720 KASSERT(p->object == backing_object,
1721 ("vm_object_collapse_scan: object mismatch %p != %p",
1722 p->object, backing_object));
1725 new_pindex >= object->size) {
1745 pp = vm_page_lookup(object, new_pindex);
1754 next = vm_object_collapse_scan_wait(&pages, object, pp);
1769 if (pp != NULL || vm_pager_has_page(object, new_pindex, NULL,
1775 * backing object.
1790 * backing object to the main object.
1796 if (!vm_page_iter_rename(&pages, p, object, new_pindex)) {
1798 next = vm_object_collapse_scan_wait(&pages, object,
1811 vm_reserv_rename(p, object, backing_object,
1823 * Collapse an object with the object backing it.
1824 * Pages in the backing object are moved into the
1825 * parent, and the backing object is deallocated.
1828 vm_object_collapse(vm_object_t object)
1832 VM_OBJECT_ASSERT_WLOCKED(object);
1835 KASSERT((object->flags & (OBJ_DEAD | OBJ_ANON)) == OBJ_ANON,
1836 ("collapsing invalid object"));
1843 backing_object = vm_object_backing_collapse_wait(object);
1847 KASSERT(object->ref_count > 0 &&
1848 object->ref_count > atomic_load_int(&object->shadow_count),
1850 object->ref_count, atomic_load_int(&object->shadow_count)));
1853 ("vm_object_collapse: Backing object already collapsing."));
1854 KASSERT((object->flags & (OBJ_COLLAPSING | OBJ_DEAD)) == 0,
1855 ("vm_object_collapse: object is already collapsing."));
1858 * We know that we can either collapse the backing object if
1860 * the parent bypass the object if the parent happens to shadow
1861 * all the resident pages in the entire backing object.
1868 vm_object_pip_add(object, 1);
1869 vm_object_set_flag(object, OBJ_COLLAPSING);
1875 * object, we can collapse it into the parent.
1877 vm_object_collapse_scan(object);
1880 * Move the pager from backing_object to object.
1883 * backing_object's and object's locks are released and
1886 swap_pager_copy(backing_object, object,
1887 OFF_TO_IDX(object->backing_object_offset), TRUE);
1892 vm_object_clear_flag(object, OBJ_COLLAPSING);
1893 vm_object_backing_transfer(object, backing_object);
1894 object->backing_object_offset +=
1896 VM_OBJECT_WUNLOCK(object);
1897 vm_object_pip_wakeup(object);
1902 * Since the backing object has no pages, no pager left,
1903 * and no object references within it, all that is
1914 VM_OBJECT_WLOCK(object);
1917 * If we do not entirely shadow the backing object,
1920 * The object lock and backing_object lock must not
1923 if (!swap_pager_scan_all_shadowed(object)) {
1929 * Make the parent shadow the next object in the
1933 vm_object_backing_remove_locked(object);
1936 vm_object_backing_insert_ref(object,
1938 object->backing_object_offset +=
1955 * Try again with this object's new backing object.
1963 * For the given object, either frees or invalidates each of the
1966 * mapping, then it may be invalidated but not removed from the object.
1969 * extends from "start" to the end of the object. If the option
1984 * The object must be locked.
1987 vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end,
1993 VM_OBJECT_ASSERT_WLOCKED(object);
1994 KASSERT((object->flags & OBJ_UNMANAGED) == 0 ||
1996 ("vm_object_page_remove: illegal options for object %p", object));
1997 if (object->resident_page_count == 0)
1999 vm_object_pip_add(object, 1);
2000 vm_page_iter_limit_init(&pages, object, end);
2030 VM_OBJECT_WLOCK(object);
2040 object->ref_count != 0)
2054 object->ref_count != 0 &&
2063 object->ref_count != 0 && !vm_page_try_remove_all(p))
2067 vm_object_pip_wakeup(object);
2069 vm_pager_freespace(object, start, (end == 0 ? object->size : end) -
2076 * For the given object, attempt to move the specified pages to
2082 * "start" to the end of the object.
2087 * The object must be locked.
2090 vm_object_page_noreuse(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
2094 VM_OBJECT_ASSERT_LOCKED(object);
2095 KASSERT((object->flags & (OBJ_FICTITIOUS | OBJ_UNMANAGED)) == 0,
2096 ("vm_object_page_noreuse: illegal object %p", object));
2097 if (object->resident_page_count == 0)
2099 p = vm_page_find_least(object, start);
2112 * Populate the specified range of the object with valid pages. Returns
2117 * OBJT_DEVICE object.
2119 * The object must be locked.
2122 vm_object_populate(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
2128 VM_OBJECT_ASSERT_WLOCKED(object);
2130 rv = vm_page_grab_valid(&m, object, pindex, VM_ALLOC_NORMAL);
2136 * the object.
2140 m = vm_page_lookup(object, start);
2152 * regions of memory into a single object.
2156 * NOTE: Only works at the moment if the second object is NULL -
2157 * if it's not, which object do we lock first?
2160 * prev_object First object to coalesce
2163 * next_size Size of reference to the second object
2168 * The object must *not* be locked.
2183 * Try to collapse the object first.
2189 * another object . has a copy elsewhere (any of which mean that the
2215 * later. Non-NULL cred in the object would prevent
2218 * cause allocation of the separate object for the map
2231 * Remove any pages that may still be in the object from a previous
2241 ("object %p overcharged 1 %jx %jx", prev_object,
2250 * Extend the object if necessary.
2260 vm_object_set_writeable_dirty_(vm_object_t object)
2262 atomic_add_int(&object->generation, 1);
2266 vm_object_mightbedirty_(vm_object_t object)
2268 return (object->generation != object->cleangeneration);
2274 * For each page offset within the specified range of the given object,
2280 vm_object_unwire(vm_object_t object, vm_ooffset_t offset, vm_size_t length,
2293 if ((object->flags & OBJ_FICTITIOUS) != 0)
2299 VM_OBJECT_RLOCK(object);
2300 m = vm_page_find_least(object, pindex);
2304 * The first object in the shadow chain doesn't
2306 * the page must exist in a backing object.
2308 tobject = object;
2331 for (tobject = object; locked_depth >= 1;
2334 if (tm->object != tobject)
2338 tobject = tm->object;
2349 /* Release the accumulated object locks. */
2350 for (tobject = object; locked_depth >= 1; locked_depth--) {
2358 * Return the vnode for the given object, or NULL if none exists.
2363 vm_object_vnode(vm_object_t object)
2367 VM_OBJECT_ASSERT_LOCKED(object);
2368 vm_pager_getvp(object, &vp, NULL);
2373 * Busy the vm object. This prevents new pages belonging to the object from
2405 * This function aims to determine if the object is mapped,
2460 * after reacquiring the VM object lock.
2485 * A page may belong to the object but be
2487 * object lock is not held. This makes the
2611 _vm_object_in_map(vm_map_t map, vm_object_t object, vm_map_entry_t entry)
2622 if (_vm_object_in_map(map, object, tmpe)) {
2627 tmpm = entry->object.sub_map;
2629 if (_vm_object_in_map(tmpm, object, tmpe)) {
2633 } else if ((obj = entry->object.vm_object) != NULL) {
2635 if (obj == object) {
2643 vm_object_in_map(vm_object_t object)
2651 if (_vm_object_in_map(&p->p_vmspace->vm_map, object, 0)) {
2657 if (_vm_object_in_map(kernel_map, object, 0))
2664 vm_object_t object;
2670 TAILQ_FOREACH(object, &vm_object_list, object_list) {
2671 if ((object->flags & OBJ_ANON) != 0) {
2672 if (object->ref_count == 0) {
2674 (long)object->size);
2676 if (!vm_object_in_map(object)) {
2680 object->ref_count, (u_long)object->size,
2681 (u_long)object->size,
2682 (void *)object->backing_object);
2693 DB_SHOW_COMMAND(object, vm_object_print_static)
2696 vm_object_t object = (vm_object_t)addr;
2706 if (object == NULL)
2711 object, (int)object->type, (uintmax_t)object->size,
2712 object->resident_page_count, object->ref_count, object->flags,
2713 object->cred ? object->cred->cr_ruid : -1, (uintmax_t)object->charge);
2715 atomic_load_int(&object->shadow_count),
2716 object->backing_object ? object->backing_object->ref_count : 0,
2717 object->backing_object, (uintmax_t)object->backing_object_offset);
2724 TAILQ_FOREACH(p, &object->memq, listq) {
2762 vm_object_t object;
2768 TAILQ_FOREACH(object, &vm_object_list, object_list) {
2769 db_printf("new object: %p\n", (void *)object);
2776 TAILQ_FOREACH(m, &object->memq, listq) {