Lines Matching defs:vmd

185 static int vm_domain_alloc_fail(struct vm_domain *vmd, vm_object_t object,
214 struct vm_domain *vmd;
221 vmd = VM_DOMAIN(domain);
223 pgcache = &vmd->vmd_pgcache[pool];
236 vmd->vmd_page_count / 1000;
331 struct vm_domain *vmd;
339 vmd = VM_DOMAIN(vm_phys_domain(pa));
340 vm_domain_free_lock(vmd);
342 vm_domain_free_unlock(vmd);
344 vm_domain_freecnt_inc(vmd, -1);
444 struct vm_domain *vmd;
448 vmd = VM_DOMAIN(domain);
449 bzero(vmd, sizeof(*vmd));
450 *__DECONST(const char **, &vmd->vmd_pagequeues[PQ_INACTIVE].pq_name) =
452 *__DECONST(const char **, &vmd->vmd_pagequeues[PQ_ACTIVE].pq_name) =
454 *__DECONST(const char **, &vmd->vmd_pagequeues[PQ_LAUNDRY].pq_name) =
457 &vmd->vmd_pagequeues[PQ_UNSWAPPABLE].pq_name) =
459 vmd->vmd_domain = domain;
460 vmd->vmd_page_count = 0;
461 vmd->vmd_free_count = 0;
462 vmd->vmd_segs = 0;
463 vmd->vmd_oom = false;
464 vmd->vmd_helper_threads_enabled = true;
466 pq = &vmd->vmd_pagequeues[i];
471 vm_page_init_marker(&vmd->vmd_markers[i], i, 0);
473 mtx_init(&vmd->vmd_free_mtx, "vm page free queue", NULL, MTX_DEF);
474 mtx_init(&vmd->vmd_pageout_mtx, "vm pageout lock", NULL, MTX_DEF);
475 snprintf(vmd->vmd_name, sizeof(vmd->vmd_name), "%d", domain);
481 vm_page_init_marker(&vmd->vmd_inacthead, PQ_INACTIVE, PGA_ENQUEUED);
482 TAILQ_INSERT_HEAD(&vmd->vmd_pagequeues[PQ_INACTIVE].pq_pl,
483 &vmd->vmd_inacthead, plinks.q);
491 vm_page_init_marker(&vmd->vmd_clock[0], PQ_ACTIVE, PGA_ENQUEUED);
492 vm_page_init_marker(&vmd->vmd_clock[1], PQ_ACTIVE, PGA_ENQUEUED);
493 TAILQ_INSERT_HEAD(&vmd->vmd_pagequeues[PQ_ACTIVE].pq_pl,
494 &vmd->vmd_clock[0], plinks.q);
495 TAILQ_INSERT_TAIL(&vmd->vmd_pagequeues[PQ_ACTIVE].pq_pl,
496 &vmd->vmd_clock[1], plinks.q);
557 struct vm_domain *vmd;
844 vmd = VM_DOMAIN(seg->domain);
845 vm_domain_free_lock(vmd);
847 vm_domain_free_unlock(vmd);
848 vm_domain_freecnt_inc(vmd, pagecount);
850 vmd->vmd_page_count += (u_int)pagecount;
851 vmd->vmd_segs |= 1UL << segind;
2201 _vm_domain_allocate(struct vm_domain *vmd, int req_class, int npages)
2208 limit = vmd->vmd_interrupt_free_min;
2210 limit = vmd->vmd_free_reserved;
2216 old = atomic_load_int(&vmd->vmd_free_count);
2221 } while (atomic_fcmpset_int(&vmd->vmd_free_count, &old, new) == 0);
2224 if (vm_paging_needed(vmd, new) && !vm_paging_needed(vmd, old))
2225 pagedaemon_wakeup(vmd->vmd_domain);
2228 if ((old >= vmd->vmd_free_min && new < vmd->vmd_free_min) ||
2229 (old >= vmd->vmd_free_severe && new < vmd->vmd_free_severe))
2230 vm_domain_set(vmd);
2236 vm_domain_allocate(struct vm_domain *vmd, int req, int npages)
2246 return (_vm_domain_allocate(vmd, req_class, npages));
2253 struct vm_domain *vmd;
2292 vmd = VM_DOMAIN(domain);
2293 if (vmd->vmd_pgcache[VM_FREEPOOL_DEFAULT].zone != NULL) {
2294 m = uma_zalloc(vmd->vmd_pgcache[VM_FREEPOOL_DEFAULT].zone,
2301 if (vm_domain_allocate(vmd, req, 1)) {
2305 vm_domain_free_lock(vmd);
2307 vm_domain_free_unlock(vmd);
2309 vm_domain_freecnt_inc(vmd, 1);
2320 if (vm_domain_alloc_fail(vmd, object, req))
2453 struct vm_domain *vmd;
2460 vmd = VM_DOMAIN(domain);
2461 if (!vm_domain_allocate(vmd, req, npages))
2466 vm_domain_free_lock(vmd);
2469 vm_domain_free_unlock(vmd);
2483 vm_domain_freecnt_inc(vmd, npages);
2599 struct vm_domain *vmd;
2613 vmd = VM_DOMAIN(domain);
2621 if (vmd->vmd_pgcache[VM_FREEPOOL_DIRECT].zone != NULL) {
2622 m = uma_zalloc(vmd->vmd_pgcache[VM_FREEPOOL_DIRECT].zone,
2630 if (vm_domain_allocate(vmd, req, 1)) {
2631 vm_domain_free_lock(vmd);
2633 vm_domain_free_unlock(vmd);
2635 vm_domain_freecnt_inc(vmd, 1);
2643 if (vm_domain_alloc_fail(vmd, NULL, req))
2693 struct vm_domain *vmd;
2698 vmd = VM_DOMAIN(domain);
2699 nqp = &vmd->vmd_nofreeq;
2700 vm_domain_free_lock(vmd);
2702 if (!vm_domain_allocate(vmd, req,
2704 vm_domain_free_unlock(vmd);
2710 vm_domain_freecnt_inc(vmd, 1 << VM_NOFREE_IMPORT_ORDER);
2711 vm_domain_free_unlock(vmd);
2717 vm_domain_free_unlock(vmd);
2849 struct vm_domain *vmd;
2854 vmd = VM_DOMAIN(pgcache->domain);
2860 if (vmd->vmd_severeset || curproc == pageproc ||
2861 !_vm_domain_allocate(vmd, VM_ALLOC_NORMAL, cnt))
2863 domain = vmd->vmd_domain;
2864 vm_domain_free_lock(vmd);
2867 vm_domain_free_unlock(vmd);
2869 vm_domain_freecnt_inc(vmd, cnt - i);
2877 struct vm_domain *vmd;
2883 vmd = VM_DOMAIN(pgcache->domain);
2884 vm_domain_free_lock(vmd);
2889 vm_domain_free_unlock(vmd);
2890 vm_domain_freecnt_inc(vmd, cnt);
3093 struct vm_domain *vmd;
3240 vmd = VM_DOMAIN(domain);
3241 vm_domain_free_lock(vmd);
3258 vm_domain_free_unlock(vmd);
3266 vmd = VM_DOMAIN(domain);
3268 vm_domain_free_lock(vmd);
3275 vm_domain_free_unlock(vmd);
3276 vm_domain_freecnt_inc(vmd, cnt);
3317 struct vm_domain *vmd;
3374 vmd = VM_DOMAIN(domain);
3375 count = vmd->vmd_free_count;
3376 if (count < npages + vmd->vmd_free_reserved || (count < npages +
3377 vmd->vmd_interrupt_free_min && req_class == VM_ALLOC_SYSTEM) ||
3492 vm_domain_set(struct vm_domain *vmd)
3496 if (!vmd->vmd_minset && vm_paging_min(vmd)) {
3497 vmd->vmd_minset = 1;
3498 DOMAINSET_SET(vmd->vmd_domain, &vm_min_domains);
3500 if (!vmd->vmd_severeset && vm_paging_severe(vmd)) {
3501 vmd->vmd_severeset = 1;
3502 DOMAINSET_SET(vmd->vmd_domain, &vm_severe_domains);
3511 vm_domain_clear(struct vm_domain *vmd)
3515 if (vmd->vmd_minset && !vm_paging_min(vmd)) {
3516 vmd->vmd_minset = 0;
3517 DOMAINSET_CLR(vmd->vmd_domain, &vm_min_domains);
3523 if (vmd->vmd_severeset && !vm_paging_severe(vmd)) {
3524 vmd->vmd_severeset = 0;
3525 DOMAINSET_CLR(vmd->vmd_domain, &vm_severe_domains);
3536 if (vmd->vmd_pageout_pages_needed &&
3537 vmd->vmd_free_count >= vmd->vmd_pageout_free_min) {
3538 wakeup(&vmd->vmd_pageout_pages_needed);
3539 vmd->vmd_pageout_pages_needed = 0;
3636 struct vm_domain *vmd;
3639 vmd = VM_DOMAIN(domain);
3640 vm_domain_free_assert_unlocked(vmd);
3644 if (vmd->vmd_free_count < vmd->vmd_pageout_free_min) {
3645 vmd->vmd_pageout_pages_needed = 1;
3646 msleep(&vmd->vmd_pageout_pages_needed,
3652 DOMAINSET_SET(vmd->vmd_domain, &wdom);
3707 vm_domain_alloc_fail(struct vm_domain *vmd, vm_object_t object, int req)
3710 vm_domain_free_assert_unlocked(vmd);
3712 atomic_add_int(&vmd->vmd_pageout_deficit,
3717 vm_wait_domain(vmd->vmd_domain);
3864 struct vm_domain *vmd;
3886 vmd = vm_pagequeue_domain(m);
3887 KASSERT(pq == &vmd->vmd_pagequeues[PQ_INACTIVE],
3889 TAILQ_INSERT_BEFORE(&vmd->vmd_inacthead, m, plinks.q);
4056 struct vm_domain *vmd;
4067 vmd = VM_DOMAIN(domain);
4069 pq = &vmd->vmd_pagequeues[queue];
4264 struct vm_domain *vmd;
4270 vmd = vm_pagequeue_domain(m);
4271 zone = vmd->vmd_pgcache[m->pool].zone;
4276 vm_domain_free_lock(vmd);
4278 vm_domain_free_unlock(vmd);
4279 vm_domain_freecnt_inc(vmd, 1);