Lines Matching defs:m
369 #define vm_page_lock(m) vm_page_lock_KBI((m), LOCK_FILE, LOCK_LINE)
370 #define vm_page_unlock(m) vm_page_unlock_KBI((m), LOCK_FILE, LOCK_LINE)
371 #define vm_page_trylock(m) vm_page_trylock_KBI((m), LOCK_FILE, LOCK_LINE)
373 #define vm_page_lockptr(m) (PA_LOCKPTR(VM_PAGE_TO_PHYS((m))))
374 #define vm_page_lock(m) mtx_lock(vm_page_lockptr((m)))
375 #define vm_page_unlock(m) mtx_unlock(vm_page_lockptr((m)))
376 #define vm_page_trylock(m) mtx_trylock(vm_page_lockptr((m)))
379 #define vm_page_assert_locked(m) \
380 vm_page_assert_locked_KBI((m), __FILE__, __LINE__)
381 #define vm_page_lock_assert(m, a) \
382 vm_page_lock_assert_KBI((m), (a), __FILE__, __LINE__)
384 #define vm_page_assert_locked(m)
385 #define vm_page_lock_assert(m, a)
598 bool vm_page_busy_acquire(vm_page_t m, int allocflags);
599 void vm_page_busy_downgrade(vm_page_t m);
600 int vm_page_busy_tryupgrade(vm_page_t m);
601 bool vm_page_busy_sleep(vm_page_t m, const char *msg, int allocflags);
602 void vm_page_busy_sleep_unlocked(vm_object_t obj, vm_page_t m,
604 void vm_page_free(vm_page_t m);
605 void vm_page_free_zero(vm_page_t m);
608 void vm_page_advise(vm_page_t m, int advice);
628 void vm_page_bits_set(vm_page_t m, vm_page_bits_t *bits, vm_page_bits_t set);
642 void vm_page_dequeue(vm_page_t m);
643 void vm_page_dequeue_deferred(vm_page_t m);
648 void vm_page_initfake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr);
650 void vm_page_init_page(vm_page_t m, vm_paddr_t pa, int segind, int pool);
652 void vm_page_invalid(vm_page_t m);
653 void vm_page_iter_free(struct pctrie_iter *pages, vm_page_t m);
657 bool vm_page_iter_remove(struct pctrie_iter *pages, vm_page_t m);
658 bool vm_page_iter_rename(struct pctrie_iter *old_pages, vm_page_t m,
660 void vm_page_launder(vm_page_t m);
663 vm_page_t vm_page_next(vm_page_t m);
665 void vm_page_pqbatch_submit(vm_page_t m, uint8_t queue);
666 bool vm_page_pqstate_commit(vm_page_t m, vm_page_astate_t *old,
668 vm_page_t vm_page_prev(vm_page_t m);
669 bool vm_page_ps_test(vm_page_t m, int psind, int flags, vm_page_t skip_m);
670 void vm_page_putfake(vm_page_t m);
671 void vm_page_readahead_finish(vm_page_t m);
679 void vm_page_reference(vm_page_t m);
682 void vm_page_release(vm_page_t m, int flags);
683 void vm_page_release_locked(vm_page_t m, int flags);
689 int vm_page_sbusied(vm_page_t m);
690 vm_page_bits_t vm_page_set_dirty(vm_page_t m);
691 void vm_page_set_valid_range(vm_page_t m, int base, int size);
693 void vm_page_sunbusy(vm_page_t m);
694 bool vm_page_try_remove_all(vm_page_t m);
695 bool vm_page_try_remove_write(vm_page_t m);
696 int vm_page_trysbusy(vm_page_t m);
697 int vm_page_tryxbusy(vm_page_t m);
699 void vm_page_unswappable(vm_page_t m);
700 void vm_page_unwire(vm_page_t m, uint8_t queue);
701 bool vm_page_unwire_noq(vm_page_t m);
702 void vm_page_updatefake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr);
704 bool vm_page_wire_mapped(vm_page_t m);
705 void vm_page_xunbusy_hard(vm_page_t m);
706 void vm_page_xunbusy_hard_unchecked(vm_page_t m);
710 void vm_page_valid(vm_page_t m);
714 void vm_page_zero_invalid(vm_page_t m, boolean_t setvalid);
717 void vm_page_dirty_KBI(vm_page_t m);
718 void vm_page_lock_KBI(vm_page_t m, const char *file, int line);
719 void vm_page_unlock_KBI(vm_page_t m, const char *file, int line);
720 int vm_page_trylock_KBI(vm_page_t m, const char *file, int line);
722 void vm_page_assert_locked_KBI(vm_page_t m, const char *file, int line);
723 void vm_page_lock_assert_KBI(vm_page_t m, int a, const char *file, int line);
726 #define vm_page_busy_fetch(m) atomic_load_int(&(m)->busy_lock)
728 #define vm_page_assert_busied(m) \
729 KASSERT(vm_page_busied(m), \
731 (m), __FILE__, __LINE__))
733 #define vm_page_assert_sbusied(m) \
734 KASSERT(vm_page_sbusied(m), \
736 (m), __FILE__, __LINE__))
738 #define vm_page_assert_unbusied(m) \
739 KASSERT((vm_page_busy_fetch(m) & ~VPB_BIT_WAITERS) != \
743 (m), (m)->busy_lock, curthread, __FILE__, __LINE__)); \
745 #define vm_page_assert_xbusied_unchecked(m) do { \
746 KASSERT(vm_page_xbusied(m), \
748 (m), __FILE__, __LINE__)); \
750 #define vm_page_assert_xbusied(m) do { \
751 vm_page_assert_xbusied_unchecked(m); \
752 KASSERT((vm_page_busy_fetch(m) & ~VPB_BIT_WAITERS) == \
756 (m), (m)->busy_lock, curthread, __FILE__, __LINE__)); \
759 #define vm_page_busied(m) \
760 (vm_page_busy_fetch(m) != VPB_UNBUSIED)
762 #define vm_page_xbusied(m) \
763 ((vm_page_busy_fetch(m) & VPB_SINGLE_EXCLUSIVE) != 0)
765 #define vm_page_busy_freed(m) \
766 (vm_page_busy_fetch(m) == VPB_FREED)
768 /* Note: page m's lock must not be owned by the caller. */
769 #define vm_page_xunbusy(m) do { \
770 if (!atomic_cmpset_rel_int(&(m)->busy_lock, \
772 vm_page_xunbusy_hard(m); \
774 #define vm_page_xunbusy_unchecked(m) do { \
775 if (!atomic_cmpset_rel_int(&(m)->busy_lock, \
777 vm_page_xunbusy_hard_unchecked(m); \
781 void vm_page_object_busy_assert(vm_page_t m);
782 #define VM_PAGE_OBJECT_BUSY_ASSERT(m) vm_page_object_busy_assert(m)
783 void vm_page_assert_pga_writeable(vm_page_t m, uint16_t bits);
784 #define VM_PAGE_ASSERT_PGA_WRITEABLE(m, bits) \
785 vm_page_assert_pga_writeable(m, bits)
791 #define vm_page_xbusy_claim(m) do { \
794 vm_page_assert_xbusied_unchecked((m)); \
796 _busy_lock = vm_page_busy_fetch(m); \
797 } while (!atomic_cmpset_int(&(m)->busy_lock, _busy_lock, \
801 #define VM_PAGE_OBJECT_BUSY_ASSERT(m) (void)0
802 #define VM_PAGE_ASSERT_PGA_WRITEABLE(m, bits) (void)0
803 #define vm_page_xbusy_claim(m)
816 vm_page_astate_load(vm_page_t m)
820 a._bits = atomic_load_32(&m->a._bits);
828 vm_page_astate_fcmpset(vm_page_t m, vm_page_astate_t *old, vm_page_astate_t new)
832 ("%s: invalid head requeue request for page %p", __func__, m));
834 ("%s: setting PGA_ENQUEUED with PQ_NONE in page %p", __func__, m));
838 return (atomic_fcmpset_32(&m->a._bits, &old->_bits, new._bits) != 0);
845 vm_page_aflag_clear(vm_page_t m, uint16_t bits)
854 addr = (void *)&m->a;
863 vm_page_aflag_set(vm_page_t m, uint16_t bits)
867 VM_PAGE_ASSERT_PGA_WRITEABLE(m, bits);
874 addr = (void *)&m->a;
890 vm_page_dirty(vm_page_t m)
895 vm_page_dirty_KBI(m);
897 m->dirty = VM_PAGE_BITS_ALL;
907 vm_page_undirty(vm_page_t m)
910 VM_PAGE_OBJECT_BUSY_ASSERT(m);
911 m->dirty = 0;
926 * Return the index of the queue containing m.
929 vm_page_queue(vm_page_t m)
932 return (_vm_page_queue(vm_page_astate_load(m)));
936 vm_page_active(vm_page_t m)
939 return (vm_page_queue(m) == PQ_ACTIVE);
943 vm_page_inactive(vm_page_t m)
946 return (vm_page_queue(m) == PQ_INACTIVE);
950 vm_page_in_laundry(vm_page_t m)
954 queue = vm_page_queue(m);
959 vm_page_clearref(vm_page_t m)
963 r = m->ref_count;
964 while (atomic_fcmpset_int(&m->ref_count, &r, r & (VPRC_BLOCKED |
975 vm_page_drop(vm_page_t m, u_int val)
984 old = atomic_fetchadd_int(&m->ref_count, -val);
986 ("vm_page_drop: page %p has an invalid refcount value", m));
999 vm_page_wired(vm_page_t m)
1002 return (VPRC_WIRE_COUNT(m->ref_count) > 0);
1006 vm_page_all_valid(vm_page_t m)
1009 return (m->valid == VM_PAGE_BITS_ALL);
1013 vm_page_any_valid(vm_page_t m)
1016 return (m->valid != 0);
1020 vm_page_none_valid(vm_page_t m)
1023 return (m->valid == 0);
1027 vm_page_domain(vm_page_t m __numa_used)
1032 segind = m->segind;
1033 KASSERT(segind < vm_phys_nsegs, ("segind %d m %p", segind, m));
1035 KASSERT(domn >= 0 && domn < vm_ndomains, ("domain %d m %p", domn, m));