Lines Matching refs:object

99 static void	vm_object_page_collect_flush(vm_object_t object, vm_page_t p,
389 _vm_object_allocate(objtype_t type, vm_pindex_t size, vm_object_t object, in _vm_object_allocate() argument
394 RB_INIT(&object->rb_memq); in _vm_object_allocate()
395 lwkt_token_init(&object->token, ident); in _vm_object_allocate()
397 TAILQ_INIT(&object->backing_list); in _vm_object_allocate()
398 lockinit(&object->backing_lk, "baclk", 0, 0); in _vm_object_allocate()
400 object->type = type; in _vm_object_allocate()
401 object->size = size; in _vm_object_allocate()
402 object->ref_count = 1; in _vm_object_allocate()
403 object->memattr = VM_MEMATTR_DEFAULT; in _vm_object_allocate()
404 object->hold_count = 0; in _vm_object_allocate()
405 object->flags = 0; in _vm_object_allocate()
406 if ((object->type == OBJT_DEFAULT) || (object->type == OBJT_SWAP)) in _vm_object_allocate()
407 vm_object_set_flag(object, OBJ_ONEMAPPING); in _vm_object_allocate()
408 object->paging_in_progress = 0; in _vm_object_allocate()
409 object->resident_page_count = 0; in _vm_object_allocate()
411 object->pg_color = vm_quickcolor(); in _vm_object_allocate()
412 object->handle = NULL; in _vm_object_allocate()
414 atomic_add_int(&object->generation, 1); in _vm_object_allocate()
415 object->swblock_count = 0; in _vm_object_allocate()
416 RB_INIT(&object->swblock_root); in _vm_object_allocate()
417 vm_object_lock_init(object); in _vm_object_allocate()
418 pmap_object_init(object); in _vm_object_allocate()
420 vm_object_hold(object); in _vm_object_allocate()
422 hash = vmobj_hash(object); in _vm_object_allocate()
424 TAILQ_INSERT_TAIL(&hash->list, object, object_entry); in _vm_object_allocate()
432 vm_object_init(vm_object_t object, vm_pindex_t size) in vm_object_init() argument
434 _vm_object_allocate(OBJT_DEFAULT, size, object, "vmobj"); in vm_object_init()
435 vm_object_drop(object); in vm_object_init()
506 VMOBJDEBUG(vm_object_reference_locked)(vm_object_t object VMOBJDBARGS) in VMOBJDEBUG()
508 KKASSERT(object != NULL); in VMOBJDEBUG()
509 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); in VMOBJDEBUG()
510 atomic_add_int(&object->ref_count, 1); in VMOBJDEBUG()
511 if (object->type == OBJT_VNODE) { in VMOBJDEBUG()
512 vref(object->handle); in VMOBJDEBUG()
516 debugvm_object_add(object, file, line, 1); in VMOBJDEBUG()
527 VMOBJDEBUG(vm_object_reference_quick)(vm_object_t object VMOBJDBARGS) in VMOBJDEBUG()
529 KKASSERT(object->type == OBJT_VNODE || object->ref_count > 0); in VMOBJDEBUG()
530 atomic_add_int(&object->ref_count, 1); in VMOBJDEBUG()
531 if (object->type == OBJT_VNODE) in VMOBJDEBUG()
532 vref(object->handle); in VMOBJDEBUG()
534 debugvm_object_add(object, file, line, 1); in VMOBJDEBUG()
548 VMOBJDEBUG(vm_object_vndeallocate)(vm_object_t object, struct vnode **vpp in VMOBJDEBUG()
551 struct vnode *vp = (struct vnode *) object->handle; in VMOBJDEBUG()
554 KASSERT(object->type == OBJT_VNODE, in VMOBJDEBUG()
557 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); in VMOBJDEBUG()
559 if (object->ref_count == 0) { in VMOBJDEBUG()
564 count = object->ref_count; in VMOBJDEBUG()
568 vm_object_upgrade(object); in VMOBJDEBUG()
569 if (atomic_fcmpset_int(&object->ref_count, &count, 0)) { in VMOBJDEBUG()
574 if (atomic_fcmpset_int(&object->ref_count, in VMOBJDEBUG()
583 debugvm_object_add(object, file, line, -1); in VMOBJDEBUG()
615 VMOBJDEBUG(vm_object_deallocate)(vm_object_t object VMOBJDBARGS) in VMOBJDEBUG()
620 if (object == NULL) in VMOBJDEBUG()
623 count = object->ref_count; in VMOBJDEBUG()
634 if (count <= 3 || (object->type == OBJT_VNODE && count <= 1)) { in VMOBJDEBUG()
636 debugvm_object_add(object, file, line, 0); in VMOBJDEBUG()
638 vm_object_hold(object); in VMOBJDEBUG()
639 vm_object_deallocate_locked(object); in VMOBJDEBUG()
640 vm_object_drop(object); in VMOBJDEBUG()
651 if (object->type == OBJT_VNODE) { in VMOBJDEBUG()
652 vp = (struct vnode *)object->handle; in VMOBJDEBUG()
653 if (atomic_fcmpset_int(&object->ref_count, in VMOBJDEBUG()
656 debugvm_object_add(object, file, line, -1); in VMOBJDEBUG()
664 if (atomic_fcmpset_int(&object->ref_count, in VMOBJDEBUG()
667 debugvm_object_add(object, file, line, -1); in VMOBJDEBUG()
679 VMOBJDEBUG(vm_object_deallocate_locked)(vm_object_t object VMOBJDBARGS) in VMOBJDEBUG()
684 if (object == NULL) in VMOBJDEBUG()
695 if (object->type == OBJT_VNODE) { in VMOBJDEBUG()
696 ASSERT_LWKT_TOKEN_HELD(&object->token); in VMOBJDEBUG()
697 vm_object_vndeallocate(object, NULL); in VMOBJDEBUG()
700 ASSERT_LWKT_TOKEN_HELD_EXCL(&object->token); in VMOBJDEBUG()
705 if (object->ref_count == 0) { in VMOBJDEBUG()
707 "too many times: %d", object->type); in VMOBJDEBUG()
709 if (object->ref_count > 2) { in VMOBJDEBUG()
710 atomic_add_int(&object->ref_count, -1); in VMOBJDEBUG()
712 debugvm_object_add(object, file, line, -1); in VMOBJDEBUG()
721 KKASSERT(object->ref_count != 0); in VMOBJDEBUG()
722 if (object->ref_count >= 2) { in VMOBJDEBUG()
723 atomic_add_int(&object->ref_count, -1); in VMOBJDEBUG()
725 debugvm_object_add(object, file, line, -1); in VMOBJDEBUG()
730 atomic_add_int(&object->ref_count, -1); in VMOBJDEBUG()
731 if ((object->flags & OBJ_DEAD) == 0) in VMOBJDEBUG()
732 vm_object_terminate(object); in VMOBJDEBUG()
746 vm_object_terminate(vm_object_t object) in vm_object_terminate() argument
755 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); in vm_object_terminate()
756 KKASSERT((object->flags & OBJ_DEAD) == 0); in vm_object_terminate()
757 vm_object_set_flag(object, OBJ_DEAD); in vm_object_terminate()
762 vm_object_pip_wait(object, "objtrm1"); in vm_object_terminate()
764 KASSERT(!object->paging_in_progress, in vm_object_terminate()
771 if (object->type == OBJT_VNODE) { in vm_object_terminate()
785 vp = (struct vnode *) object->handle; in vm_object_terminate()
787 vm_object_page_clean(object, 0, 0, OBJPC_SYNC); in vm_object_terminate()
795 vm_object_pip_wait(object, "objtrm2"); in vm_object_terminate()
797 if (object->ref_count != 0) { in vm_object_terminate()
799 "ref_count=%d", object->ref_count); in vm_object_terminate()
805 pmap_object_free(object); in vm_object_terminate()
813 info.object = object; in vm_object_terminate()
816 vm_page_rb_tree_RB_SCAN(&object->rb_memq, NULL, in vm_object_terminate()
823 vm_pager_deallocate(object); in vm_object_terminate()
832 if (RB_ROOT(&object->rb_memq) == NULL) in vm_object_terminate()
836 object, object->resident_page_count); in vm_object_terminate()
837 vm_page_rb_tree_RB_SCAN(&object->rb_memq, NULL, in vm_object_terminate()
844 KKASSERT(object->resident_page_count == 0); in vm_object_terminate()
849 hash = vmobj_hash(object); in vm_object_terminate()
851 TAILQ_REMOVE(&hash->list, object, object_entry); in vm_object_terminate()
854 if (object->ref_count != 0) { in vm_object_terminate()
856 "ref_count=%d", object->ref_count); in vm_object_terminate()
881 vm_object_t object; in vm_object_terminate_callback() local
883 object = p->object; in vm_object_terminate_callback()
884 KKASSERT(object == info->object); in vm_object_terminate_callback()
890 if (object != p->object) { in vm_object_terminate_callback()
944 vm_object_page_clean(vm_object_t object, vm_pindex_t start, vm_pindex_t end, in vm_object_page_clean() argument
953 vm_object_hold(object); in vm_object_page_clean()
954 if (object->type != OBJT_VNODE || in vm_object_page_clean()
955 (object->flags & OBJ_MIGHTBEDIRTY) == 0) { in vm_object_page_clean()
956 vm_object_drop(object); in vm_object_page_clean()
964 vp = object->handle; in vm_object_page_clean()
970 vm_object_set_flag(object, OBJ_CLEANING); in vm_object_page_clean()
977 info.end_pindex = object->size - 1; in vm_object_page_clean()
981 wholescan = (start == 0 && info.end_pindex == object->size - 1); in vm_object_page_clean()
984 info.object = object; in vm_object_page_clean()
994 vm_page_rb_tree_RB_SCAN(&object->rb_memq, rb_vm_page_scancmp, in vm_object_page_clean()
997 vm_object_clear_flag(object, in vm_object_page_clean()
999 if (object->type == OBJT_VNODE && in vm_object_page_clean()
1000 (vp = (struct vnode *)object->handle) != NULL) { in vm_object_page_clean()
1019 generation = object->generation; in vm_object_page_clean()
1020 vm_page_rb_tree_RB_SCAN(&object->rb_memq, rb_vm_page_scancmp, in vm_object_page_clean()
1022 } while (info.error || generation != object->generation); in vm_object_page_clean()
1024 vm_object_clear_flag(object, OBJ_CLEANING); in vm_object_page_clean()
1025 vm_object_drop(object); in vm_object_page_clean()
1037 KKASSERT(p->object == info->object); in vm_object_page_clean_pass1()
1045 KKASSERT(p->object == info->object); in vm_object_page_clean_pass1()
1068 KKASSERT(p->object == info->object); in vm_object_page_clean_pass2()
1077 generation = info->object->generation; in vm_object_page_clean_pass2()
1085 KKASSERT(p->object == info->object && in vm_object_page_clean_pass2()
1086 info->object->generation == generation); in vm_object_page_clean_pass2()
1128 vm_object_page_collect_flush(info->object, p, info->pagerflags); in vm_object_page_clean_pass2()
1148 vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int pagerflags) in vm_object_page_collect_flush() argument
1158 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object)); in vm_object_page_collect_flush()
1169 tp = vm_page_lookup_busy_try(object, pi - page_base + ib, in vm_object_page_collect_flush()
1198 pi - page_base + is < object->size) { in vm_object_page_collect_flush()
1201 tp = vm_page_lookup_busy_try(object, pi - page_base + is, in vm_object_page_collect_flush()
1261 vm_object_madvise(vm_object_t object, vm_pindex_t pindex, in vm_object_madvise() argument
1268 if (object == NULL) in vm_object_madvise()
1273 vm_object_hold(object); in vm_object_madvise()
1286 if ((object->type != OBJT_DEFAULT && in vm_object_madvise()
1287 object->type != OBJT_SWAP) || in vm_object_madvise()
1288 (object->flags & OBJ_ONEMAPPING) == 0) { in vm_object_madvise()
1293 m = vm_page_lookup_busy_try(object, pindex, TRUE, &error); in vm_object_madvise()
1303 if (advise == MADV_FREE && object->type == OBJT_SWAP) in vm_object_madvise()
1304 swap_pager_freespace(object, pindex, 1); in vm_object_madvise()
1351 if (object->type == OBJT_SWAP) in vm_object_madvise()
1352 swap_pager_freespace(object, pindex, 1); in vm_object_madvise()
1356 vm_object_drop(object); in vm_object_madvise()
1368 vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end, in vm_object_page_remove() argument
1381 vm_object_hold(object); in vm_object_page_remove()
1382 if (object == NULL || in vm_object_page_remove()
1383 (object->type != OBJT_MGTDEVICE && in vm_object_page_remove()
1384 object->resident_page_count == 0 && object->swblock_count == 0)) { in vm_object_page_remove()
1385 vm_object_drop(object); in vm_object_page_remove()
1388 KASSERT(object->type != OBJT_PHYS, in vm_object_page_remove()
1394 vm_object_pip_add(object, 1); in vm_object_page_remove()
1404 info.object = object; in vm_object_page_remove()
1408 end = object->size; in vm_object_page_remove()
1414 all = (start == 0 && info.end_pindex >= object->size - 1); in vm_object_page_remove()
1429 lockmgr(&object->backing_lk, LK_EXCLUSIVE); in vm_object_page_remove()
1430 TAILQ_FOREACH(ba, &object->backing_list, entry) { in vm_object_page_remove()
1466 lockmgr(&object->backing_lk, LK_RELEASE); in vm_object_page_remove()
1477 vm_page_rb_tree_RB_SCAN(&object->rb_memq, rb_vm_page_scancmp, in vm_object_page_remove()
1485 if (object->type != OBJT_SWAP || clean_only == FALSE) { in vm_object_page_remove()
1487 swap_pager_freespace_all(object); in vm_object_page_remove()
1489 swap_pager_freespace(object, info.start_pindex, in vm_object_page_remove()
1496 vm_object_pip_wakeup(object); in vm_object_page_remove()
1497 vm_object_drop(object); in vm_object_page_remove()
1512 if (info->object != p->object || in vm_object_page_remove_callback()
1516 info->object, p); in vm_object_page_remove_callback()
1524 if (info->object != p->object) { in vm_object_page_remove_callback()
1527 info->object, p); in vm_object_page_remove_callback()
1688 vm_object_set_writeable_dirty(vm_object_t object) in vm_object_set_writeable_dirty() argument
1697 if ((object->flags & (OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY)) != in vm_object_set_writeable_dirty()
1699 vm_object_set_flag(object, OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY); in vm_object_set_writeable_dirty()
1701 if (object->type == OBJT_VNODE && in vm_object_set_writeable_dirty()
1702 (vp = (struct vnode *)object->handle) != NULL) { in vm_object_set_writeable_dirty()
1729 static int _vm_object_in_map (vm_map_t map, vm_object_t object,
1731 static int vm_object_in_map (vm_object_t object);
1737 _vm_object_in_map(vm_map_t map, vm_object_t object, vm_map_entry_t entry) in _vm_object_in_map() argument
1750 if( _vm_object_in_map(map, object, tmpe)) { in _vm_object_in_map()
1763 if( _vm_object_in_map(tmpm, object, tmpe)) { in _vm_object_in_map()
1772 if (ba->object == object) in _vm_object_in_map()
1786 vm_object_t object; member
1794 vm_object_in_map(vm_object_t object) in vm_object_in_map() argument
1799 info.object = object; in vm_object_in_map()
1804 if( _vm_object_in_map(kernel_map, object, 0)) in vm_object_in_map()
1806 if( _vm_object_in_map(pager_map, object, 0)) in vm_object_in_map()
1808 if( _vm_object_in_map(buffer_map, object, 0)) in vm_object_in_map()
1822 if (_vm_object_in_map(&p->p_vmspace->vm_map, info->object, 0)) { in vm_object_in_map_callback()
1833 vm_object_t object; in DB_SHOW_COMMAND() local
1842 for (object = TAILQ_FIRST(&hash->list); in DB_SHOW_COMMAND()
1843 object != NULL; in DB_SHOW_COMMAND()
1844 object = TAILQ_NEXT(object, object_entry)) { in DB_SHOW_COMMAND()
1845 if (object->type == OBJT_MARKER) in DB_SHOW_COMMAND()
1847 if (object->handle != NULL || in DB_SHOW_COMMAND()
1848 (object->type != OBJT_DEFAULT && in DB_SHOW_COMMAND()
1849 object->type != OBJT_SWAP)) { in DB_SHOW_COMMAND()
1852 if (object->ref_count == 0) { in DB_SHOW_COMMAND()
1855 (long)object->size); in DB_SHOW_COMMAND()
1857 if (vm_object_in_map(object)) in DB_SHOW_COMMAND()
1861 object->ref_count, (u_long)object->size, in DB_SHOW_COMMAND()
1862 (u_long)object->size); in DB_SHOW_COMMAND()
1870 DB_SHOW_COMMAND(object, vm_object_print_static) in DB_SHOW_COMMAND() argument
1873 vm_object_t object = (vm_object_t)addr; in DB_SHOW_COMMAND() local
1883 if (object == NULL) in DB_SHOW_COMMAND()
1888 object, (int)object->type, (u_long)object->size, in DB_SHOW_COMMAND()
1889 object->resident_page_count, object->ref_count, object->flags); in DB_SHOW_COMMAND()
1900 RB_FOREACH(p, vm_page_rb_tree, &object->rb_memq) { in DB_SHOW_COMMAND()
1942 vm_object_t object; in DB_SHOW_COMMAND() local
1949 for (object = TAILQ_FIRST(&hash->list); in DB_SHOW_COMMAND()
1950 object != NULL; in DB_SHOW_COMMAND()
1951 object = TAILQ_NEXT(object, object_entry)) { in DB_SHOW_COMMAND()
1958 if (object->type == OBJT_MARKER) in DB_SHOW_COMMAND()
1960 db_printf("new object: %p\n", (void *)object); in DB_SHOW_COMMAND()
1970 osize = object->size; in DB_SHOW_COMMAND()
1974 m = vm_page_lookup(object, idx); in DB_SHOW_COMMAND()