1 /* $NetBSD: uvm_pdpolicy.h,v 1.7 2020/02/23 15:46:43 ad Exp $ */ 2 3 /*- 4 * Copyright (c)2005, 2006 YAMAMOTO Takashi, 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #ifndef _UVM_PDPOLICY_H_ 30 #define _UVM_PDPOLICY_H_ 31 32 struct vm_page; 33 struct vm_anon; 34 35 /* 36 * these API is for uvm internal use only. 37 * don't use them directly from outside of /sys/uvm. 38 */ 39 40 void uvmpdpol_idle(struct uvm_cpu *); 41 void uvmpdpol_init(void); 42 void uvmpdpol_init_cpu(struct uvm_cpu *); 43 void uvmpdpol_reinit(void); 44 void uvmpdpol_estimatepageable(int *, int *); 45 bool uvmpdpol_needsscan_p(void); 46 47 void uvmpdpol_pageactivate(struct vm_page *); 48 void uvmpdpol_pagedeactivate(struct vm_page *); 49 void uvmpdpol_pagedequeue(struct vm_page *); 50 void uvmpdpol_pageenqueue(struct vm_page *); 51 bool uvmpdpol_pageisqueued_p(struct vm_page *); 52 void uvmpdpol_pagerealize(struct vm_page *); 53 void uvmpdpol_anfree(struct vm_anon *); 54 55 void uvmpdpol_tune(void); 56 void uvmpdpol_scaninit(void); 57 void uvmpdpol_scanfini(void); 58 struct vm_page *uvmpdpol_selectvictim(krwlock_t **lock); 59 void uvmpdpol_balancequeue(int); 60 61 void uvmpdpol_sysctlsetup(void); 62 63 /* 64 * uvmpdpol_set_intent: set an intended state for the page, taking care not 65 * to overwrite any of the other flags. 66 */ 67 68 static inline void 69 uvmpdpol_set_intent(struct vm_page *pg, uint32_t i) 70 { 71 72 KASSERT(mutex_owned(&pg->interlock)); 73 pg->pqflags = PQ_INTENT_SET | (pg->pqflags & ~PQ_INTENT_MASK) | i; 74 } 75 76 #endif /* !_UVM_PDPOLICY_H_ */ 77