Lines Matching defs:m

167 static void vm_page_alloc_check(vm_page_t m);
169 static bool _vm_page_busy_sleep(vm_object_t obj, vm_page_t m,
171 static void vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits);
172 static void vm_page_enqueue(vm_page_t m, uint8_t queue);
173 static bool vm_page_free_prep(vm_page_t m);
174 static void vm_page_free_toq(vm_page_t m);
176 static int vm_page_insert_after(vm_page_t m, vm_object_t object,
178 static void vm_page_insert_radixdone(vm_page_t m, vm_object_t object,
180 static void vm_page_mvqueue(vm_page_t m, const uint8_t queue,
184 static void vm_page_release_toq(vm_page_t m, uint8_t nqueue, bool noreuse);
332 vm_page_t m;
335 m = vm_phys_paddr_to_vm_page(pa);
336 if (m == NULL)
345 TAILQ_INSERT_TAIL(&blacklist_head, m, listq);
406 vm_page_t m;
415 TAILQ_FOREACH(m, &blacklist_head, listq) {
417 (uintmax_t)m->phys_addr);
504 vm_page_init_page(vm_page_t m, vm_paddr_t pa, int segind, int pool)
506 m->object = NULL;
507 m->ref_count = 0;
508 m->busy_lock = VPB_FREED;
509 m->flags = m->a.flags = 0;
510 m->phys_addr = pa;
511 m->a.queue = PQ_NONE;
512 m->psind = 0;
513 m->segind = segind;
514 m->order = VM_NFREEORDER;
515 m->pool = pool;
516 m->valid = m->dirty = 0;
517 pmap_page_init(m);
558 vm_page_t m;
775 m = &vm_page_array[ii];
776 vm_page_init_page(m, (first_page + ii) << PAGE_SHIFT, 0,
778 m->flags = PG_FICTITIOUS;
800 m = vm_phys_seg_paddr_to_vm_page(seg, startp);
802 startp < endp; startp += PAGE_SIZE, m++) {
803 vm_page_init_page(m, startp, segind,
836 m = vm_phys_seg_paddr_to_vm_page(seg, startp);
837 vm_page_init_page(m, startp, segind, pool);
840 vm_page_init_page(&m[j],
846 vm_phys_enqueue_contig(m, pool, pagecount);
877 vm_page_reference(vm_page_t m)
880 vm_page_aflag_set(m, PGA_REFERENCED);
891 vm_page_trybusy(vm_page_t m, int allocflags)
895 return (vm_page_trysbusy(m));
897 return (vm_page_tryxbusy(m));
908 vm_page_tryacquire(vm_page_t m, int allocflags)
912 locked = vm_page_trybusy(m, allocflags);
914 vm_page_wire(m);
925 vm_page_busy_acquire(vm_page_t m, int allocflags)
937 obj = atomic_load_ptr(&m->object);
939 if (vm_page_tryacquire(m, allocflags))
947 MPASS(locked || vm_page_wired(m));
948 if (_vm_page_busy_sleep(obj, m, m->pindex, "vmpba", allocflags,
953 KASSERT(m->object == obj || m->object == NULL,
955 m, obj));
965 vm_page_busy_downgrade(vm_page_t m)
969 vm_page_assert_xbusied(m);
971 x = vm_page_busy_fetch(m);
973 if (atomic_fcmpset_rel_int(&m->busy_lock,
978 wakeup(m);
988 vm_page_busy_tryupgrade(vm_page_t m)
992 vm_page_assert_sbusied(m);
994 x = vm_page_busy_fetch(m);
1001 if (!atomic_fcmpset_acq_int(&m->busy_lock, &x,
1014 vm_page_sbusied(vm_page_t m)
1018 x = vm_page_busy_fetch(m);
1028 vm_page_sunbusy(vm_page_t m)
1032 vm_page_assert_sbusied(m);
1034 x = vm_page_busy_fetch(m);
1039 if (atomic_fcmpset_int(&m->busy_lock, &x,
1046 if (!atomic_fcmpset_rel_int(&m->busy_lock, &x, VPB_UNBUSIED))
1050 wakeup(m);
1070 vm_page_busy_sleep(vm_page_t m, const char *wmesg, int allocflags)
1074 obj = m->object;
1077 return (_vm_page_busy_sleep(obj, m, m->pindex, wmesg, allocflags,
1094 vm_page_busy_sleep_unlocked(vm_object_t obj, vm_page_t m, vm_pindex_t pindex,
1099 (void)_vm_page_busy_sleep(obj, m, pindex, wmesg, allocflags, false);
1115 _vm_page_busy_sleep(vm_object_t obj, vm_page_t m, vm_pindex_t pindex,
1132 if (!vm_page_busied(m))
1136 sleepq_lock(m);
1137 x = vm_page_busy_fetch(m);
1145 m->object != obj || m->pindex != pindex) {
1146 sleepq_release(m);
1151 } while (!atomic_fcmpset_int(&m->busy_lock, &x, x | VPB_BIT_WAITERS));
1155 sleepq_add(m, NULL, wmesg, 0, 0);
1156 sleepq_wait(m, PVM);
1169 vm_page_trysbusy(vm_page_t m)
1174 obj = m->object;
1175 x = vm_page_busy_fetch(m);
1185 if (atomic_fcmpset_acq_int(&m->busy_lock, &x,
1191 obj = m->object;
1193 vm_page_sunbusy(m);
1207 vm_page_tryxbusy(vm_page_t m)
1211 if (atomic_cmpset_acq_int(&m->busy_lock, VPB_UNBUSIED,
1215 obj = m->object;
1217 vm_page_xunbusy(m);
1224 vm_page_xunbusy_hard_tail(vm_page_t m)
1226 atomic_store_rel_int(&m->busy_lock, VPB_UNBUSIED);
1228 wakeup(m);
1237 vm_page_xunbusy_hard(vm_page_t m)
1239 vm_page_assert_xbusied(m);
1240 vm_page_xunbusy_hard_tail(m);
1244 vm_page_xunbusy_hard_unchecked(vm_page_t m)
1246 vm_page_assert_xbusied_unchecked(m);
1247 vm_page_xunbusy_hard_tail(m);
1251 vm_page_busy_free(vm_page_t m)
1256 x = atomic_swap_int(&m->busy_lock, VPB_FREED);
1258 wakeup(m);
1279 vm_page_t m;
1282 m = vm_phys_paddr_to_vm_page(pa);
1283 if (m == NULL)
1284 m = vm_phys_fictitious_to_vm_page(pa);
1285 return (m);
1291 m = &vm_page_array[pi - first_page];
1292 return (m);
1310 vm_page_t m;
1312 m = uma_zalloc(fakepg_zone, M_WAITOK | M_ZERO);
1313 vm_page_initfake(m, paddr, memattr);
1314 return (m);
1318 vm_page_initfake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr)
1321 if ((m->flags & PG_FICTITIOUS) != 0) {
1329 m->phys_addr = paddr;
1330 m->a.queue = PQ_NONE;
1332 m->flags = PG_FICTITIOUS;
1334 m->oflags = VPO_UNMANAGED;
1335 m->busy_lock = VPB_CURTHREAD_EXCLUSIVE;
1337 m->ref_count = 1;
1338 pmap_page_init(m);
1340 pmap_page_set_memattr(m, memattr);
1349 vm_page_putfake(vm_page_t m)
1352 KASSERT((m->oflags & VPO_UNMANAGED) != 0, ("managed %p", m));
1353 KASSERT((m->flags & PG_FICTITIOUS) != 0,
1354 ("vm_page_putfake: bad page %p", m));
1355 vm_page_assert_xbusied(m);
1356 vm_page_busy_free(m);
1357 uma_zfree(fakepg_zone, m);
1367 vm_page_updatefake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr)
1370 KASSERT((m->flags & PG_FICTITIOUS) != 0,
1371 ("vm_page_updatefake: bad page %p", m));
1372 m->phys_addr = paddr;
1373 pmap_page_set_memattr(m, memattr);
1382 vm_page_free(vm_page_t m)
1385 m->flags &= ~PG_ZERO;
1386 vm_page_free_toq(m);
1395 vm_page_free_zero(vm_page_t m)
1398 m->flags |= PG_ZERO;
1399 vm_page_free_toq(m);
1407 vm_page_readahead_finish(vm_page_t m)
1411 KASSERT(!vm_page_none_valid(m), ("%s: %p is invalid", __func__, m));
1419 if ((vm_page_busy_fetch(m) & VPB_BIT_WAITERS) != 0)
1420 vm_page_activate(m);
1422 vm_page_deactivate(m);
1423 vm_page_xunbusy_unchecked(m);
1431 vm_page_free_invalid(vm_page_t m)
1434 KASSERT(vm_page_none_valid(m), ("page %p is valid", m));
1435 KASSERT(!pmap_page_is_mapped(m), ("page %p is mapped", m));
1436 KASSERT(m->object != NULL, ("page %p has no object", m));
1437 VM_OBJECT_ASSERT_WLOCKED(m->object);
1443 vm_page_xbusy_claim(m);
1452 if (vm_page_remove(m))
1453 vm_page_free(m);
1469 vm_page_dirty_KBI(vm_page_t m)
1473 KASSERT(vm_page_all_valid(m), ("vm_page_dirty: page is invalid!"));
1474 m->dirty = VM_PAGE_BITS_ALL;
1489 vm_page_insert_lookup(vm_page_t m, vm_object_t object, vm_pindex_t pindex,
1495 KASSERT(m->object == NULL,
1496 ("vm_page_insert: page %p already inserted", m));
1501 m->object = object;
1502 m->pindex = pindex;
1503 m->ref_count |= VPRC_OBJREF;
1511 error = vm_radix_iter_insert(pages, m);
1513 error = vm_radix_insert_lookup_lt(&object->rtree, m, &mpred);
1515 error = vm_radix_insert(&object->rtree, m);
1517 m->object = NULL;
1518 m->pindex = 0;
1519 m->ref_count &= ~VPRC_OBJREF;
1526 vm_page_insert_radixdone(m, object, mpred);
1527 vm_pager_page_inserted(object, m);
1539 vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex)
1541 return (vm_page_insert_lookup(m, object, pindex, NULL, false, NULL,
1548 * Inserts the page "m" into the specified object at offset "pindex".
1556 vm_page_insert_after(vm_page_t m, vm_object_t object, vm_pindex_t pindex,
1559 return (vm_page_insert_lookup(m, object, pindex, NULL, false, mpred,
1566 * Tries to insert the page "m" into the specified object at offset
1576 vm_page_iter_insert(struct pctrie_iter *pages, vm_page_t m, vm_object_t object,
1579 return (vm_page_insert_lookup(m, object, pindex, pages, true, mpred,
1586 * Complete page "m" insertion into the specified object after the
1589 * The page "mpred" must precede the offset "m->pindex" within the
1595 vm_page_insert_radixdone(vm_page_t m, vm_object_t object, vm_page_t mpred)
1599 KASSERT(object != NULL && m->object == object,
1600 ("vm_page_insert_radixdone: page %p has inconsistent object", m));
1601 KASSERT((m->ref_count & VPRC_OBJREF) != 0,
1602 ("vm_page_insert_radixdone: page %p is missing object ref", m));
1606 KASSERT(mpred->pindex < m->pindex,
1609 m->pindex < TAILQ_NEXT(mpred, listq)->pindex,
1613 m->pindex < TAILQ_FIRST(&object->memq)->pindex,
1618 TAILQ_INSERT_AFTER(&object->memq, mpred, m, listq);
1620 TAILQ_INSERT_HEAD(&object->memq, m, listq);
1637 if (pmap_page_is_write_mapped(m))
1644 * Complete page "m" removal from the specified object after the radix trie
1651 vm_page_remove_radixdone(vm_page_t m)
1655 vm_page_assert_xbusied(m);
1656 object = m->object;
1658 KASSERT((m->ref_count & VPRC_OBJREF) != 0,
1659 ("page %p is missing its object ref", m));
1662 if ((m->a.flags & PGA_SWAP_FREE) != 0)
1663 vm_pager_page_unswapped(m);
1665 vm_pager_page_removed(object, m);
1666 m->object = NULL;
1671 TAILQ_REMOVE(&object->memq, m, listq);
1693 vm_page_free_object_prep(vm_page_t m)
1695 KASSERT(((m->oflags & VPO_UNMANAGED) != 0) ==
1696 ((m->object->flags & OBJ_UNMANAGED) != 0),
1698 __func__, m));
1699 vm_page_assert_xbusied(m);
1705 KASSERT((m->flags & PG_FICTITIOUS) != 0 ||
1706 m->ref_count == VPRC_OBJREF,
1708 __func__, m, m->ref_count));
1709 vm_page_remove_radixdone(m);
1710 m->ref_count -= VPRC_OBJREF;
1720 vm_page_iter_free(struct pctrie_iter *pages, vm_page_t m)
1723 vm_page_free_object_prep(m);
1724 vm_page_xunbusy(m);
1725 m->flags &= ~PG_ZERO;
1726 vm_page_free_toq(m);
1742 vm_page_remove(vm_page_t m)
1746 dropped = vm_page_remove_xbusy(m);
1747 vm_page_xunbusy(m);
1759 vm_page_iter_remove(struct pctrie_iter *pages, vm_page_t m)
1764 vm_page_remove_radixdone(m);
1765 dropped = (vm_page_drop(m, VPRC_OBJREF) == VPRC_OBJREF);
1766 vm_page_xunbusy(m);
1777 vm_page_radix_remove(vm_page_t m)
1781 mrem = vm_radix_remove(&m->object->rtree, m->pindex);
1782 KASSERT(mrem == m,
1783 ("removed page %p, expected page %p", mrem, m));
1793 vm_page_remove_xbusy(vm_page_t m)
1796 vm_page_radix_remove(m);
1797 vm_page_remove_radixdone(m);
1798 return (vm_page_drop(m, VPRC_OBJREF) == VPRC_OBJREF);
1881 vm_page_t m;
1883 m = vm_page_lookup_unlocked(object, pindex);
1884 KASSERT(m != NULL && (vm_page_busied(m) || vm_page_wired(m)) &&
1885 m->object == object && m->pindex == pindex,
1886 ("vm_page_relookup: Invalid page %p", m));
1887 return (m);
1897 vm_page_busy_release(vm_page_t m)
1901 x = vm_page_busy_fetch(m);
1906 if (atomic_fcmpset_int(&m->busy_lock, &x,
1913 ("vm_page_busy_release: %p xbusy not owned.", m));
1914 if (!atomic_fcmpset_rel_int(&m->busy_lock, &x, VPB_UNBUSIED))
1917 wakeup(m);
1933 vm_page_t m;
1936 if ((m = TAILQ_FIRST(&object->memq)) != NULL && m->pindex < pindex)
1937 m = vm_radix_lookup_ge(&object->rtree, pindex);
1938 return (m);
1964 vm_page_next(vm_page_t m)
1968 VM_OBJECT_ASSERT_LOCKED(m->object);
1969 if ((next = TAILQ_NEXT(m, listq)) != NULL) {
1970 MPASS(next->object == m->object);
1971 if (next->pindex != m->pindex + 1)
1984 vm_page_prev(vm_page_t m)
1988 VM_OBJECT_ASSERT_LOCKED(m->object);
1989 if ((prev = TAILQ_PREV(m, pglist, listq)) != NULL) {
1990 MPASS(prev->object == m->object);
1991 if (prev->pindex != m->pindex - 1)
2088 vm_page_iter_rename(struct pctrie_iter *old_pages, vm_page_t m,
2094 KASSERT((m->ref_count & VPRC_OBJREF) != 0,
2095 ("%s: page %p is missing object ref", __func__, m));
2096 VM_OBJECT_ASSERT_WLOCKED(m->object);
2104 opidx = m->pindex;
2105 m->pindex = new_pindex;
2106 if (vm_radix_insert_lookup_lt(&new_object->rtree, m, &mpred) != 0) {
2107 m->pindex = opidx;
2115 m->pindex = opidx;
2117 vm_page_remove_radixdone(m);
2120 m->pindex = new_pindex;
2121 m->object = new_object;
2123 vm_page_insert_radixdone(m, new_object, mpred);
2124 vm_page_dirty(m);
2125 vm_pager_page_inserted(new_object, m);
2182 vm_page_t m;
2187 m = vm_page_alloc_domain_after(object, pindex, domain, req,
2189 if (m != NULL)
2193 return (m);
2254 vm_page_t m;
2273 m = NULL;
2278 m = vm_page_alloc_nofree_domain(domain, req);
2279 if (m != NULL)
2287 (m = vm_reserv_alloc_page(object, pindex, domain, req, mpred)) !=
2294 m = uma_zalloc(vmd->vmd_pgcache[VM_FREEPOOL_DEFAULT].zone,
2296 if (m != NULL) {
2306 m = vm_phys_alloc_pages(domain, VM_FREEPOOL_DEFAULT, 0);
2308 if (m == NULL) {
2316 if (m == NULL) {
2329 vm_page_dequeue(m);
2330 vm_page_alloc_check(m);
2335 flags |= m->flags & PG_ZERO;
2340 m->flags = flags;
2341 m->a.flags = 0;
2342 m->oflags = (object->flags & OBJ_UNMANAGED) != 0 ? VPO_UNMANAGED : 0;
2343 m->pool = VM_FREEPOOL_DEFAULT;
2345 m->busy_lock = VPB_CURTHREAD_EXCLUSIVE;
2347 m->busy_lock = VPB_SHARERS_WORD(1);
2349 m->busy_lock = VPB_UNBUSIED;
2352 m->ref_count = 1;
2354 m->a.act_count = 0;
2356 if (vm_page_insert_after(m, object, pindex, mpred)) {
2359 m->ref_count = 0;
2361 KASSERT(m->object == NULL, ("page %p has object", m));
2362 m->oflags = VPO_UNMANAGED;
2363 m->busy_lock = VPB_UNBUSIED;
2365 vm_page_free_toq(m);
2377 pmap_page_set_memattr(m, object->memattr);
2379 return (m);
2426 vm_page_t m;
2434 m = vm_page_alloc_contig_domain(object, pindex, domain, req,
2436 if (m != NULL)
2446 return (m);
2493 vm_page_t m, m_ret, mpred;
2551 for (m = m_ret; m < &m_ret[npages]; m++) {
2552 vm_page_dequeue(m);
2553 vm_page_alloc_check(m);
2554 m->a.flags = 0;
2555 m->flags = (m->flags | PG_NODUMP) & flags;
2556 m->busy_lock = busy_lock;
2558 m->ref_count = 1;
2559 m->a.act_count = 0;
2560 m->oflags = oflags;
2561 m->pool = VM_FREEPOOL_DEFAULT;
2562 if (vm_page_iter_insert(&pages, m, object, pindex, mpred)) {
2565 KASSERT(m->object == NULL,
2566 ("page %p has object", m));
2567 mpred = m;
2568 for (m = m_ret; m < &m_ret[npages]; m++) {
2569 if (m <= mpred &&
2571 m->ref_count = 0;
2572 m->oflags = VPO_UNMANAGED;
2573 m->busy_lock = VPB_UNBUSIED;
2575 vm_page_free_toq(m);
2584 mpred = m;
2586 pmap_page_set_memattr(m, memattr);
2600 vm_page_t m;
2616 m = vm_page_alloc_nofree_domain(domain, req);
2617 if (m != NULL)
2622 m = uma_zalloc(vmd->vmd_pgcache[VM_FREEPOOL_DIRECT].zone,
2624 if (m != NULL) {
2632 m = vm_phys_alloc_pages(domain, VM_FREEPOOL_DIRECT, 0);
2634 if (m == NULL) {
2642 if (m == NULL) {
2649 vm_page_dequeue(m);
2650 vm_page_alloc_check(m);
2655 m->pindex = 0xdeadc0dedeadc0de;
2656 m->flags = (m->flags & PG_ZERO) | flags;
2657 m->a.flags = 0;
2658 m->oflags = VPO_UNMANAGED;
2659 m->pool = VM_FREEPOOL_DIRECT;
2660 m->busy_lock = VPB_UNBUSIED;
2663 m->ref_count = 1;
2666 if ((req & VM_ALLOC_ZERO) != 0 && (m->flags & PG_ZERO) == 0)
2667 pmap_zero_page(m);
2669 return (m);
2692 vm_page_t m;
2716 m = &nqp->ma[nqp->offs++];
2720 return (m);
2727 vm_page_t m;
2732 m = vm_page_alloc_noobj_domain(domain, req);
2733 if (m != NULL)
2737 return (m);
2746 vm_page_t m;
2751 m = vm_page_alloc_noobj_contig_domain(domain, req, npages, low,
2753 if (m != NULL)
2757 return (m);
2765 vm_page_t m, m_ret;
2793 for (m = m_ret; m < &m_ret[npages]; m++) {
2794 vm_page_dequeue(m);
2795 vm_page_alloc_check(m);
2800 m->pindex = 0xdeadc0dedeadc0de;
2801 m->a.flags = 0;
2802 m->flags = (m->flags | PG_NODUMP) & flags;
2803 m->busy_lock = VPB_UNBUSIED;
2805 m->ref_count = 1;
2806 m->a.act_count = 0;
2807 m->oflags = VPO_UNMANAGED;
2808 m->pool = VM_FREEPOOL_DIRECT;
2816 if ((req & VM_ALLOC_ZERO) != 0 && (m->flags & PG_ZERO) == 0)
2817 pmap_zero_page(m);
2819 pmap_page_set_memattr(m, memattr);
2828 vm_page_alloc_check(vm_page_t m)
2831 KASSERT(m->object == NULL, ("page %p has object", m));
2832 KASSERT(m->a.queue == PQ_NONE &&
2833 (m->a.flags & PGA_QUEUE_STATE_MASK) == 0,
2835 m, m->a.queue, (m->a.flags & PGA_QUEUE_STATE_MASK)));
2836 KASSERT(m->ref_count == 0, ("page %p has references", m));
2837 KASSERT(vm_page_busy_freed(m), ("page %p is not freed", m));
2838 KASSERT(m->dirty == 0, ("page %p is dirty", m));
2839 KASSERT(pmap_page_get_memattr(m) == VM_MEMATTR_DEFAULT,
2841 m, pmap_page_get_memattr(m)));
2842 KASSERT(vm_page_none_valid(m), ("free page %p is valid", m));
2843 pmap_vm_page_alloc_check(m);
2879 vm_page_t m;
2886 m = (vm_page_t)store[i];
2887 vm_phys_free_pages(m, pgcache->pool, 0);
2921 vm_page_t m, m_run;
2932 for (m = m_start; m < m_end && run_len < npages; m += m_inc) {
2933 KASSERT((m->flags & PG_MARKER) == 0,
2934 ("page %p is PG_MARKER", m));
2935 KASSERT((m->flags & PG_FICTITIOUS) == 0 || m->ref_count >= 1,
2936 ("fictitious page %p has invalid ref count", m));
2947 if (m + npages > m_end)
2949 pa = VM_PAGE_TO_PHYS(m);
2963 if (vm_page_wired(m))
2966 else if ((level = vm_reserv_level(m)) >= 0 &&
2970 pa = VM_PAGE_TO_PHYS(m);
2975 else if ((object = atomic_load_ptr(&m->object)) != NULL) {
2982 if (object != m->object) {
2992 (level = vm_reserv_level_iffullpop(m)) >= 0) {
2995 pa = VM_PAGE_TO_PHYS(m);
3000 vm_page_queue(m) != PQ_NONE && !vm_page_busied(m)) {
3006 KASSERT(pmap_page_get_memattr(m) ==
3008 ("page %p has an unexpected memattr", m));
3009 KASSERT((m->oflags & (VPO_SWAPINPROG |
3011 ("page %p has unexpected oflags", m));
3026 } else if ((order = m->order) < VM_NFREEORDER) {
3058 m_run = m;
3097 vm_page_t m, m_end, m_new;
3104 m = m_run;
3106 for (; error == 0 && m < m_end; m++) {
3107 KASSERT((m->flags & (PG_FICTITIOUS | PG_MARKER)) == 0,
3108 ("page %p is PG_FICTITIOUS or PG_MARKER", m));
3114 if (vm_page_wired(m))
3116 else if ((object = atomic_load_ptr(&m->object)) != NULL) {
3123 if (m->object != object ||
3129 else if (vm_page_queue(m) != PQ_NONE &&
3130 vm_page_tryxbusy(m) != 0) {
3131 if (vm_page_wired(m)) {
3132 vm_page_xunbusy(m);
3136 KASSERT(pmap_page_get_memattr(m) ==
3138 ("page %p has an unexpected memattr", m));
3139 KASSERT(m->oflags == 0,
3140 ("page %p has unexpected oflags", m));
3142 if (!vm_page_none_valid(m)) {
3153 if ((m->flags & PG_NODUMP) != 0)
3180 vm_page_xunbusy(m);
3191 !vm_page_try_remove_all(m)) {
3192 vm_page_xunbusy(m);
3199 * Replace "m" with the new page. For
3200 * vm_page_replace(), "m" must be busy
3201 * and dequeued. Finally, change "m"
3204 m_new->a.flags = m->a.flags &
3209 pmap_copy_page(m, m_new);
3210 m_new->valid = m->valid;
3211 m_new->dirty = m->dirty;
3212 m->flags &= ~PG_ZERO;
3213 vm_page_dequeue(m);
3215 m->pindex, m) &&
3216 vm_page_free_prep(m))
3217 SLIST_INSERT_HEAD(&free, m,
3226 m->flags &= ~PG_ZERO;
3227 vm_page_dequeue(m);
3228 if (vm_page_free_prep(m))
3229 SLIST_INSERT_HEAD(&free, m,
3231 KASSERT(m->dirty == 0,
3232 ("page %p is dirty", m));
3239 MPASS(vm_page_domain(m) == domain);
3242 order = m->order;
3252 m += (1 << order) - 1;
3255 else if (vm_reserv_is_page_free(m))
3263 if ((m = SLIST_FIRST(&free)) != NULL) {
3270 MPASS(vm_page_domain(m) == domain);
3272 vm_phys_free_pages(m, m->pool, 0);
3274 } while ((m = SLIST_FIRST(&free)) != NULL);
3756 _vm_page_pagequeue(vm_page_t m, uint8_t queue)
3759 return (&vm_pagequeue_domain(m)->vmd_pagequeues[queue]);
3764 vm_page_pagequeue(vm_page_t m)
3767 return (_vm_page_pagequeue(m, vm_page_astate_load(m).queue));
3772 vm_page_pqstate_fcmpset(vm_page_t m, vm_page_astate_t *old, vm_page_astate_t new)
3778 if (__predict_true(vm_page_astate_fcmpset(m, old, new)))
3791 _vm_page_pqstate_commit_dequeue(struct vm_pagequeue *pq, vm_page_t m,
3797 KASSERT(vm_page_pagequeue(m) == pq,
3798 ("%s: queue %p does not match page %p", __func__, pq, m));
3813 next = TAILQ_NEXT(m, plinks.q);
3814 TAILQ_REMOVE(&pq->pq_pl, m, plinks.q);
3816 if (!vm_page_pqstate_fcmpset(m, old, new)) {
3818 TAILQ_INSERT_TAIL(&pq->pq_pl, m, plinks.q);
3820 TAILQ_INSERT_BEFORE(next, m, plinks.q);
3827 return (vm_page_pqstate_fcmpset(m, old, new));
3832 vm_page_pqstate_commit_dequeue(vm_page_t m, vm_page_astate_t *old,
3839 pq = _vm_page_pagequeue(m, old->queue);
3846 as = vm_page_astate_load(m);
3851 ret = _vm_page_pqstate_commit_dequeue(pq, m, old, new);
3861 _vm_page_pqstate_commit_requeue(struct vm_pagequeue *pq, vm_page_t m,
3872 if (!vm_page_pqstate_fcmpset(m, old, new))
3876 TAILQ_REMOVE(&pq->pq_pl, m, plinks.q);
3886 vmd = vm_pagequeue_domain(m);
3888 ("%s: invalid page queue for page %p", __func__, m));
3889 TAILQ_INSERT_BEFORE(&vmd->vmd_inacthead, m, plinks.q);
3891 TAILQ_INSERT_TAIL(&pq->pq_pl, m, plinks.q);
3901 vm_page_pqstate_commit_request(vm_page_t m, vm_page_astate_t *old,
3910 !vm_page_pqstate_fcmpset(m, old, new))
3912 vm_page_pqbatch_submit(m, new.queue);
3921 vm_page_pqstate_commit(vm_page_t m, vm_page_astate_t *old, vm_page_astate_t new)
3928 if (!vm_page_pqstate_commit_dequeue(m, old, new))
3931 vm_page_pqbatch_submit(m, new.queue);
3933 if (!vm_page_pqstate_fcmpset(m, old, new))
3937 vm_page_pqbatch_submit(m, new.queue);
3946 vm_pqbatch_process_page(struct vm_pagequeue *pq, vm_page_t m, uint8_t queue)
3954 KASSERT(pq == _vm_page_pagequeue(m, queue),
3955 ("%s: page %p does not belong to queue %p", __func__, m, pq));
3957 for (old = vm_page_astate_load(m);;) {
3963 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3964 ("%s: page %p is unmanaged", __func__, m));
3971 m, &old, new))) {
3978 m, &old, new))) {
4005 vm_page_pqbatch_submit(vm_page_t m, uint8_t queue)
4013 domain = vm_page_domain(m);
4016 slots_remaining = vm_batchqueue_insert(bq, m);
4040 vm_pqbatch_process_page(pq, m, queue);
4092 vm_page_dequeue_deferred(vm_page_t m)
4096 old = vm_page_astate_load(m);
4101 __func__, m));
4106 } while (!vm_page_pqstate_commit_request(m, &old, new));
4116 vm_page_dequeue(vm_page_t m)
4120 old = vm_page_astate_load(m);
4125 __func__, m));
4131 } while (!vm_page_pqstate_commit_dequeue(m, &old, new));
4140 vm_page_enqueue(vm_page_t m, uint8_t queue)
4143 KASSERT(m->a.queue == PQ_NONE &&
4144 (m->a.flags & PGA_QUEUE_STATE_MASK) == 0,
4145 ("%s: page %p is already enqueued", __func__, m));
4146 KASSERT(m->ref_count > 0,
4147 ("%s: page %p does not carry any references", __func__, m));
4149 m->a.queue = queue;
4150 if ((m->a.flags & PGA_REQUEUE) == 0)
4151 vm_page_aflag_set(m, PGA_REQUEUE);
4152 vm_page_pqbatch_submit(m, queue);
4167 vm_page_free_prep(vm_page_t m)
4177 if (PMAP_HAS_DMAP && (m->flags & PG_ZERO) != 0) {
4180 p = (uint64_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
4183 m, i, (uintmax_t)*p));
4186 KASSERT((m->flags & PG_NOFREE) == 0,
4188 if ((m->oflags & VPO_UNMANAGED) == 0) {
4189 KASSERT(!pmap_page_is_mapped(m),
4190 ("vm_page_free_prep: freeing mapped page %p", m));
4191 KASSERT((m->a.flags & (PGA_EXECUTABLE | PGA_WRITEABLE)) == 0,
4192 ("vm_page_free_prep: mapping flags set in page %p", m));
4194 KASSERT(m->a.queue == PQ_NONE,
4195 ("vm_page_free_prep: unmanaged page %p is queued", m));
4199 if (m->object != NULL) {
4200 vm_page_radix_remove(m);
4201 vm_page_free_object_prep(m);
4203 vm_page_assert_unbusied(m);
4205 vm_page_busy_free(m);
4211 if ((m->flags & PG_FICTITIOUS) != 0) {
4212 KASSERT(m->ref_count == 1,
4213 ("fictitious page %p is referenced", m));
4214 KASSERT(m->a.queue == PQ_NONE,
4215 ("fictitious page %p is queued", m));
4224 if ((m->oflags & VPO_UNMANAGED) == 0)
4225 vm_page_dequeue_deferred(m);
4227 m->valid = 0;
4228 vm_page_undirty(m);
4230 if (m->ref_count != 0)
4231 panic("vm_page_free_prep: page %p has references", m);
4236 if (pmap_page_get_memattr(m) != VM_MEMATTR_DEFAULT)
4237 pmap_page_set_memattr(m, VM_MEMATTR_DEFAULT);
4245 if ((m->flags & PG_PCPU_CACHE) == 0 && vm_reserv_free_page(m))
4262 vm_page_free_toq(vm_page_t m)
4267 if (!vm_page_free_prep(m))
4270 vmd = vm_pagequeue_domain(m);
4271 zone = vmd->vmd_pgcache[m->pool].zone;
4272 if ((m->flags & PG_PCPU_CACHE) != 0 && zone != NULL) {
4273 uma_zfree(zone, m);
4277 vm_phys_free_pages(m, m->pool, 0);
4292 vm_page_t m;
4299 while ((m = SLIST_FIRST(free)) != NULL) {
4302 vm_page_free_toq(m);
4315 vm_page_wire(vm_page_t m)
4320 if (m->object != NULL && !vm_page_busied(m) &&
4321 !vm_object_busied(m->object))
4322 VM_OBJECT_ASSERT_LOCKED(m->object);
4324 KASSERT((m->flags & PG_FICTITIOUS) == 0 ||
4325 VPRC_WIRE_COUNT(m->ref_count) >= 1,
4326 ("vm_page_wire: fictitious page %p has zero wirings", m));
4328 old = atomic_fetchadd_int(&m->ref_count, 1);
4330 ("vm_page_wire: counter overflow for page %p", m));
4332 if ((m->oflags & VPO_UNMANAGED) == 0)
4333 vm_page_aflag_set(m, PGA_DEQUEUE);
4346 vm_page_wire_mapped(vm_page_t m)
4350 old = atomic_load_int(&m->ref_count);
4353 ("vm_page_wire_mapped: wiring unreferenced page %p", m));
4356 } while (!atomic_fcmpset_int(&m->ref_count, &old, old + 1));
4359 if ((m->oflags & VPO_UNMANAGED) == 0)
4360 vm_page_aflag_set(m, PGA_DEQUEUE);
4372 vm_page_unwire_managed(vm_page_t m, uint8_t nqueue, bool noreuse)
4376 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
4377 ("%s: page %p is unmanaged", __func__, m));
4384 old = atomic_load_int(&m->ref_count);
4389 ("vm_page_unwire: wire count underflow for page %p", m));
4399 if ((vm_page_astate_load(m).flags & PGA_DEQUEUE) == 0)
4400 vm_page_aflag_set(m, PGA_DEQUEUE);
4409 vm_page_release_toq(m, nqueue, noreuse);
4411 vm_page_aflag_clear(m, PGA_DEQUEUE);
4413 } while (!atomic_fcmpset_rel_int(&m->ref_count, &old, old - 1));
4418 vm_page_free(m);
4432 vm_page_unwire(vm_page_t m, uint8_t nqueue)
4437 nqueue, m));
4439 if ((m->oflags & VPO_UNMANAGED) != 0) {
4440 if (vm_page_unwire_noq(m) && m->ref_count == 0)
4441 vm_page_free(m);
4444 vm_page_unwire_managed(m, nqueue, false);
4454 vm_page_unwire_noq(vm_page_t m)
4458 old = vm_page_drop(m, 1);
4460 ("%s: counter underflow for page %p", __func__, m));
4461 KASSERT((m->flags & PG_FICTITIOUS) == 0 || VPRC_WIRE_COUNT(old) > 1,
4462 ("%s: missing ref on fictitious page %p", __func__, m));
4466 if ((m->oflags & VPO_UNMANAGED) == 0)
4467 vm_page_aflag_clear(m, PGA_DEQUEUE);
4478 vm_page_mvqueue(vm_page_t m, const uint8_t nqueue, const uint16_t nflag)
4482 KASSERT(m->ref_count > 0,
4483 ("%s: page %p does not carry any references", __func__, m));
4487 if ((m->oflags & VPO_UNMANAGED) != 0 || vm_page_wired(m))
4490 old = vm_page_astate_load(m);
4510 } while (!vm_page_pqstate_commit(m, &old, new));
4517 vm_page_activate(vm_page_t m)
4520 vm_page_mvqueue(m, PQ_ACTIVE, PGA_REQUEUE);
4528 vm_page_deactivate(vm_page_t m)
4531 vm_page_mvqueue(m, PQ_INACTIVE, PGA_REQUEUE);
4535 vm_page_deactivate_noreuse(vm_page_t m)
4538 vm_page_mvqueue(m, PQ_INACTIVE, PGA_REQUEUE_HEAD);
4545 vm_page_launder(vm_page_t m)
4548 vm_page_mvqueue(m, PQ_LAUNDRY, PGA_REQUEUE);
4555 vm_page_unswappable(vm_page_t m)
4558 VM_OBJECT_ASSERT_LOCKED(m->object);
4559 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
4560 ("page %p already unswappable", m));
4562 vm_page_dequeue(m);
4563 vm_page_enqueue(m, PQ_UNSWAPPABLE);
4570 vm_page_release_toq(vm_page_t m, uint8_t nqueue, const bool noreuse)
4585 if (noreuse || vm_page_none_valid(m)) {
4592 old = vm_page_astate_load(m);
4609 } while (!vm_page_pqstate_commit(m, &old, new));
4616 vm_page_release(vm_page_t m, int flags)
4620 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
4621 ("vm_page_release: page %p is unmanaged", m));
4625 object = atomic_load_ptr(&m->object);
4629 if (vm_page_busied(m) || !VM_OBJECT_TRYWLOCK(object))
4631 if (object == m->object) {
4632 vm_page_release_locked(m, flags);
4639 vm_page_unwire_managed(m, PQ_INACTIVE, flags != 0);
4644 vm_page_release_locked(vm_page_t m, int flags)
4647 VM_OBJECT_ASSERT_WLOCKED(m->object);
4648 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
4649 ("vm_page_release_locked: page %p is unmanaged", m));
4651 if (vm_page_unwire_noq(m)) {
4653 (m->object->ref_count == 0 || !pmap_page_is_mapped(m)) &&
4654 m->dirty == 0 && vm_page_tryxbusy(m)) {
4660 if (__predict_true(!vm_page_wired(m))) {
4661 vm_page_free(m);
4664 vm_page_xunbusy(m);
4666 vm_page_release_toq(m, PQ_INACTIVE, flags != 0);
4672 vm_page_try_blocked_op(vm_page_t m, void (*op)(vm_page_t))
4676 KASSERT(m->object != NULL && (m->oflags & VPO_UNMANAGED) == 0,
4677 ("vm_page_try_blocked_op: page %p has no object", m));
4678 KASSERT(vm_page_busied(m),
4679 ("vm_page_try_blocked_op: page %p is not busy", m));
4680 VM_OBJECT_ASSERT_LOCKED(m->object);
4682 old = atomic_load_int(&m->ref_count);
4685 ("vm_page_try_blocked_op: page %p has no references", m));
4687 ("vm_page_try_blocked_op: page %p blocks wirings", m));
4690 } while (!atomic_fcmpset_int(&m->ref_count, &old, old | VPRC_BLOCKED));
4692 (op)(m);
4698 old = vm_page_drop(m, VPRC_BLOCKED);
4699 KASSERT(!VM_OBJECT_WOWNED(m->object) ||
4702 old, m));
4710 vm_page_try_remove_all(vm_page_t m)
4713 return (vm_page_try_blocked_op(m, pmap_remove_all));
4720 vm_page_try_remove_write(vm_page_t m)
4723 return (vm_page_try_blocked_op(m, pmap_remove_write));
4732 vm_page_advise(vm_page_t m, int advice)
4735 VM_OBJECT_ASSERT_WLOCKED(m->object);
4736 vm_page_assert_xbusied(m);
4745 vm_page_undirty(m);
4748 vm_page_activate(m);
4752 if (advice != MADV_FREE && m->dirty == 0 && pmap_is_modified(m))
4753 vm_page_dirty(m);
4759 vm_page_aflag_clear(m, PGA_REFERENCED);
4767 if (m->dirty == 0)
4768 vm_page_deactivate_noreuse(m);
4769 else if (!vm_page_in_laundry(m))
4770 vm_page_launder(m);
4779 vm_page_grab_release(vm_page_t m, int allocflags)
4784 vm_page_sunbusy(m);
4786 vm_page_xunbusy(m);
4801 vm_page_grab_sleep(vm_object_t object, vm_page_t m, vm_pindex_t pindex,
4813 vm_page_reference(m);
4815 if (_vm_page_busy_sleep(object, m, pindex, wmesg, allocflags, locked) &&
4873 vm_page_t m;
4879 if ((m = vm_page_lookup(object, pindex)) != NULL) {
4880 if (!vm_page_tryacquire(m, allocflags)) {
4881 if (vm_page_grab_sleep(object, m, pindex, "pgrbwt",
4890 m = vm_page_alloc(object, pindex, vm_page_grab_pflags(allocflags));
4891 if (m == NULL) {
4896 if (allocflags & VM_ALLOC_ZERO && (m->flags & PG_ZERO) == 0)
4897 pmap_zero_page(m);
4900 vm_page_grab_release(m, allocflags);
4902 return (m);
4920 vm_page_acquire_unlocked(vm_object_t object, vm_pindex_t pindex, vm_page_t m,
4923 if (m == NULL)
4924 m = vm_page_lookup_unlocked(object, pindex);
4925 for (; m != NULL; m = vm_page_lookup_unlocked(object, pindex)) {
4926 if (vm_page_trybusy(m, allocflags)) {
4927 if (m->object == object && m->pindex == pindex) {
4929 vm_page_wire(m);
4930 vm_page_grab_release(m, allocflags);
4934 vm_page_busy_release(m);
4938 if (!vm_page_grab_sleep(object, m, pindex, "pgnslp",
4942 return (m);
4952 vm_page_t m;
4955 m = vm_page_acquire_unlocked(object, pindex, NULL, allocflags);
4956 if (m == PAGE_NOT_ACQUIRED)
4958 if (m != NULL)
4959 return (m);
4971 m = vm_page_grab(object, pindex, allocflags);
4974 return (m);
4987 vm_page_t m;
5003 if ((m = vm_page_lookup(object, pindex)) != NULL) {
5013 if (!vm_page_trybusy(m,
5014 vm_page_all_valid(m) ? allocflags : 0)) {
5015 (void)vm_page_grab_sleep(object, m, pindex, "pgrbwt",
5019 if (vm_page_all_valid(m))
5022 vm_page_busy_release(m);
5029 } else if ((m = vm_page_alloc(object, pindex, pflags)) == NULL) {
5037 vm_page_assert_xbusied(m);
5042 ma[0] = m;
5049 ma[i] = vm_page_alloc(object, m->pindex + i,
5062 m = ma[0];
5075 MPASS(vm_page_all_valid(m));
5077 vm_page_zero_invalid(m, TRUE);
5081 vm_page_wire(m);
5082 if ((allocflags & VM_ALLOC_SBUSY) != 0 && vm_page_xbusied(m))
5083 vm_page_busy_downgrade(m);
5085 vm_page_busy_release(m);
5086 *mp = m;
5098 vm_page_t m;
5116 m = vm_page_acquire_unlocked(object, pindex, NULL, flags);
5117 if (m == PAGE_NOT_ACQUIRED)
5119 if (m != NULL) {
5120 if (vm_page_all_valid(m)) {
5122 vm_page_wire(m);
5123 vm_page_grab_release(m, allocflags);
5124 *mp = m;
5127 vm_page_busy_release(m);
5170 vm_page_t m, mpred;
5184 m = vm_page_mpred(object, pindex + i);
5185 if (m == NULL || m->pindex != pindex + i) {
5186 mpred = m;
5187 m = NULL;
5189 mpred = TAILQ_PREV(m, pglist, listq);
5191 if (m != NULL) {
5192 if (!vm_page_tryacquire(m, allocflags)) {
5193 if (vm_page_grab_sleep(object, m, pindex + i,
5201 m = vm_page_alloc_after(object, pindex + i,
5203 if (m == NULL) {
5210 if (vm_page_none_valid(m) &&
5212 if ((m->flags & PG_ZERO) == 0)
5213 pmap_zero_page(m);
5214 vm_page_valid(m);
5216 vm_page_grab_release(m, allocflags);
5217 ma[i] = mpred = m;
5218 m = vm_page_next(m);
5231 vm_page_t m;
5245 m = NULL;
5252 if (m == NULL || QMD_IS_TRASHED(m) || m->pindex != pindex ||
5253 atomic_load_ptr(&m->object) != object) {
5258 m = NULL;
5260 m = vm_page_acquire_unlocked(object, pindex, m, flags);
5261 if (m == PAGE_NOT_ACQUIRED)
5263 if (m == NULL)
5265 if ((flags & VM_ALLOC_ZERO) != 0 && vm_page_none_valid(m)) {
5266 if ((m->flags & PG_ZERO) == 0)
5267 pmap_zero_page(m);
5268 vm_page_valid(m);
5270 /* m will still be wired or busy according to flags. */
5271 vm_page_grab_release(m, allocflags);
5272 ma[i] = m;
5273 m = TAILQ_NEXT(m, listq);
5312 vm_page_bits_set(vm_page_t m, vm_page_bits_t *bits, vm_page_bits_t set)
5345 vm_page_bits_clear(vm_page_t m, vm_page_bits_t *bits, vm_page_bits_t clear)
5378 vm_page_bits_swap(vm_page_t m, vm_page_bits_t *bits, vm_page_bits_t newbits)
5444 vm_page_set_valid_range(vm_page_t m, int base, int size)
5449 vm_page_assert_busied(m);
5459 (m->valid & (1 << (base >> DEV_BSHIFT))) == 0)
5460 pmap_zero_page_area(m, frag, base - frag);
5469 (m->valid & (1 << (endoff >> DEV_BSHIFT))) == 0)
5470 pmap_zero_page_area(m, endoff,
5477 KASSERT((~m->valid & vm_page_bits(base, size) & m->dirty) == 0,
5478 ("vm_page_set_valid_range: page %p is dirty", m));
5484 if (vm_page_xbusied(m))
5485 m->valid |= pagebits;
5487 vm_page_bits_set(m, &m->valid, pagebits);
5495 vm_page_set_dirty(vm_page_t m)
5499 VM_PAGE_OBJECT_BUSY_ASSERT(m);
5501 if (vm_page_xbusied(m) && !pmap_page_is_write_mapped(m)) {
5502 old = m->dirty;
5503 m->dirty = VM_PAGE_BITS_ALL;
5505 old = vm_page_bits_swap(m, &m->dirty, VM_PAGE_BITS_ALL);
5506 if (old == 0 && (m->a.flags & PGA_SWAP_SPACE) != 0)
5507 vm_pager_page_unswapped(m);
5516 vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits)
5519 vm_page_assert_busied(m);
5528 if (vm_page_xbusied(m) && !pmap_page_is_write_mapped(m))
5529 m->dirty &= ~pagebits;
5531 vm_page_bits_clear(m, &m->dirty, pagebits);
5545 vm_page_set_validclean(vm_page_t m, int base, int size)
5550 vm_page_assert_busied(m);
5560 (m->valid & ((vm_page_bits_t)1 << (base >> DEV_BSHIFT))) == 0)
5561 pmap_zero_page_area(m, frag, base - frag);
5570 (m->valid & ((vm_page_bits_t)1 << (endoff >> DEV_BSHIFT))) == 0)
5571 pmap_zero_page_area(m, endoff,
5585 oldvalid = m->valid;
5587 if (vm_page_xbusied(m))
5588 m->valid |= pagebits;
5590 vm_page_bits_set(m, &m->valid, pagebits);
5616 pmap_clear_modify(m);
5617 m->dirty = 0;
5618 vm_page_aflag_clear(m, PGA_NOSYNC);
5619 } else if (oldvalid != VM_PAGE_BITS_ALL && vm_page_xbusied(m))
5620 m->dirty &= ~pagebits;
5622 vm_page_clear_dirty_mask(m, pagebits);
5626 vm_page_clear_dirty(vm_page_t m, int base, int size)
5629 vm_page_clear_dirty_mask(m, vm_page_bits(base, size));
5639 vm_page_set_invalid(vm_page_t m, int base, int size)
5648 object = m->object;
5650 vm_page_assert_busied(m);
5652 if (object->type == OBJT_VNODE && base == 0 && IDX_TO_OFF(m->pindex) +
5657 if (object->ref_count != 0 && vm_page_all_valid(m) && bits != 0)
5658 pmap_remove_all(m);
5659 KASSERT((bits == 0 && vm_page_all_valid(m)) ||
5660 !pmap_page_is_mapped(m),
5661 ("vm_page_set_invalid: page %p is mapped", m));
5662 if (vm_page_xbusied(m)) {
5663 m->valid &= ~bits;
5664 m->dirty &= ~bits;
5666 vm_page_bits_clear(m, &m->valid, bits);
5667 vm_page_bits_clear(m, &m->dirty, bits);
5680 vm_page_invalid(vm_page_t m)
5683 vm_page_assert_busied(m);
5684 VM_OBJECT_ASSERT_WLOCKED(m->object);
5685 MPASS(!pmap_page_is_mapped(m));
5687 if (vm_page_xbusied(m))
5688 m->valid = 0;
5690 vm_page_bits_clear(m, &m->valid, VM_PAGE_BITS_ALL);
5705 vm_page_zero_invalid(vm_page_t m, boolean_t setvalid)
5718 (m->valid & ((vm_page_bits_t)1 << i))) {
5720 pmap_zero_page_area(m,
5733 vm_page_valid(m);
5748 vm_page_is_valid(vm_page_t m, int base, int size)
5753 return (vm_page_any_valid(m) && (m->valid & bits) == bits);
5761 vm_page_ps_test(vm_page_t m, int psind, int flags, vm_page_t skip_m)
5766 object = m->object;
5770 KASSERT(psind <= m->psind,
5771 ("psind %d > psind %d of m %p", psind, m->psind, m));
5781 if (m[i].object != object)
5783 if (&m[i] == skip_m)
5785 if ((flags & PS_NONE_BUSY) != 0 && vm_page_busied(&m[i]))
5792 * on the object containing "m[i]".
5794 if (m[i].dirty != VM_PAGE_BITS_ALL)
5798 m[i].valid != VM_PAGE_BITS_ALL)
5808 vm_page_test_dirty(vm_page_t m)
5811 vm_page_assert_busied(m);
5812 if (m->dirty != VM_PAGE_BITS_ALL && pmap_is_modified(m))
5813 vm_page_dirty(m);
5817 vm_page_valid(vm_page_t m)
5820 vm_page_assert_busied(m);
5821 if (vm_page_xbusied(m))
5822 m->valid = VM_PAGE_BITS_ALL;
5824 vm_page_bits_set(m, &m->valid, VM_PAGE_BITS_ALL);
5828 vm_page_lock_KBI(vm_page_t m, const char *file, int line)
5831 mtx_lock_flags_(vm_page_lockptr(m), 0, file, line);
5835 vm_page_unlock_KBI(vm_page_t m, const char *file, int line)
5838 mtx_unlock_flags_(vm_page_lockptr(m), 0, file, line);
5842 vm_page_trylock_KBI(vm_page_t m, const char *file, int line)
5845 return (mtx_trylock_flags_(vm_page_lockptr(m), 0, file, line));
5850 vm_page_assert_locked_KBI(vm_page_t m, const char *file, int line)
5853 vm_page_lock_assert_KBI(m, MA_OWNED, file, line);
5857 vm_page_lock_assert_KBI(vm_page_t m, int a, const char *file, int line)
5860 mtx_assert_(vm_page_lockptr(m), a, file, line);
5866 vm_page_object_busy_assert(vm_page_t m)
5873 if (m->object != NULL && !vm_page_busied(m))
5874 VM_OBJECT_ASSERT_BUSY(m->object);
5878 vm_page_assert_pga_writeable(vm_page_t m, uint16_t bits)
5889 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
5891 if (!vm_page_xbusied(m))
5892 VM_OBJECT_ASSERT_BUSY(m->object);
5936 vm_page_t m;
5947 m = PHYS_TO_VM_PAGE(pmap_kextract(addr));
5949 m = PHYS_TO_VM_PAGE(addr);
5951 m = (vm_page_t)addr;
5955 m, m->object, (uintmax_t)m->pindex, (uintmax_t)m->phys_addr,
5956 m->a.queue, m->ref_count, m->a.flags, m->oflags,
5957 m->flags, m->a.act_count, m->busy_lock, m->valid, m->dirty);