xref: /netbsd-src/sys/uvm/uvm_pdpolicy.h (revision 82d56013d7b633d116a93943de88e08335357a7c)
1 /*	$NetBSD: uvm_pdpolicy.h,v 1.8 2020/05/17 19:38:17 ad Exp $	*/
2 
3 /*-
4  * Copyright (c)2005, 2006 YAMAMOTO Takashi,
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #ifndef _UVM_PDPOLICY_H_
30 #define _UVM_PDPOLICY_H_
31 
32 struct vm_page;
33 struct vm_anon;
34 
35 /*
36  * these API is for uvm internal use only.
37  * don't use them directly from outside of /sys/uvm.
38  */
39 
40 void uvmpdpol_idle(struct uvm_cpu *);
41 void uvmpdpol_init(void);
42 void uvmpdpol_init_cpu(struct uvm_cpu *);
43 void uvmpdpol_reinit(void);
44 void uvmpdpol_estimatepageable(int *, int *);
45 bool uvmpdpol_needsscan_p(void);
46 
47 void uvmpdpol_pageactivate(struct vm_page *);
48 void uvmpdpol_pagedeactivate(struct vm_page *);
49 void uvmpdpol_pagedequeue(struct vm_page *);
50 void uvmpdpol_pageenqueue(struct vm_page *);
51 bool uvmpdpol_pageactivate_p(struct vm_page *);
52 bool uvmpdpol_pageisqueued_p(struct vm_page *);
53 void uvmpdpol_pagerealize(struct vm_page *);
54 void uvmpdpol_anfree(struct vm_anon *);
55 
56 void uvmpdpol_tune(void);
57 void uvmpdpol_scaninit(void);
58 void uvmpdpol_scanfini(void);
59 struct vm_page *uvmpdpol_selectvictim(krwlock_t **lock);
60 void uvmpdpol_balancequeue(int);
61 
62 void uvmpdpol_sysctlsetup(void);
63 
64 /*
65  * uvmpdpol_set_intent: set an intended state for the page, taking care not
66  * to overwrite any of the other flags.
67  */
68 
69 static inline void
70 uvmpdpol_set_intent(struct vm_page *pg, uint32_t i)
71 {
72 
73 	KASSERT(mutex_owned(&pg->interlock));
74 	pg->pqflags = PQ_INTENT_SET | (pg->pqflags & ~PQ_INTENT_MASK) | i;
75 }
76 
77 #endif /* !_UVM_PDPOLICY_H_ */
78