xref: /netbsd-src/sys/uvm/pmap/vmpagemd.h (revision 413d532bcc3f62d122e56d92e13ac64825a40baf)
1 /*	$NetBSD: vmpagemd.h,v 1.2 2014/03/04 06:14:53 matt Exp $	*/
2 
3 /*-
4  * Copyright (c) 2011 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Raytheon BBN Technologies Corp and Defense Advanced Research Projects
9  * Agency and which was developed by Matt Thomas of 3am Software Foundry.
10  *
11  * This material is based upon work supported by the Defense Advanced Research
12  * Projects Agency and Space and Naval Warfare Systems Center, Pacific, under
13  * Contract No. N66001-09-C-2073.
14  * Approved for Public Release, Distribution Unlimited
15  *
16  * Redistribution and use in source and binary forms, with or without
17  * modification, are permitted provided that the following conditions
18  * are met:
19  * 1. Redistributions of source code must retain the above copyright
20  *    notice, this list of conditions and the following disclaimer.
21  * 2. Redistributions in binary form must reproduce the above copyright
22  *    notice, this list of conditions and the following disclaimer in the
23  *    documentation and/or other materials provided with the distribution.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
26  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 #ifndef _COMMON_PMAP_TLB_VMPAGEMD_H_
39 #define _COMMON_PMAP_TLB_VMPAGEMD_H_
40 
41 #ifdef _LOCORE
42 #error use assym.h instead
43 #endif
44 
45 #ifdef _MODULE
46 #error this file should not be included by loadable kernel modules
47 #endif
48 
49 #ifdef _KERNEL_OPT
50 #include "opt_modular.h"
51 #include "opt_multiprocessor.h"
52 #endif
53 
54 #include <sys/mutex.h>
55 
56 #define	__HAVE_VM_PAGE_MD
57 
58 typedef struct pv_entry {
59 	struct pv_entry *pv_next;
60 	struct pmap *pv_pmap;
61 	vaddr_t pv_va;
62 } *pv_entry_t;
63 
64 #define	VM_PAGEMD_REFERENCED	0x0001	/* page has been recently referenced */
65 #define	VM_PAGEMD_MODIFIED	0x0002	/* page has been modified */
66 #define	VM_PAGEMD_POOLPAGE	0x0004	/* page is used as a poolpage */
67 #define	VM_PAGEMD_EXECPAGE	0x0008	/* page is exec mapped */
68 #ifdef __PMAP_VIRTUAL_CACHE_ALIASES
69 #define	VM_PAGEMD_UNCACHED	0x0010	/* page is mapped uncached */
70 #endif
71 
72 #ifdef __PMAP_VIRTUAL_CACHE_ALIASES
73 #define	VM_PAGEMD_CACHED_P(mdpg)	(((mdpg)->mdpg_attrs & VM_PAGEMD_UNCACHED) == 0)
74 #define	VM_PAGEMD_UNCACHED_P(mdpg)	(((mdpg)->mdpg_attrs & VM_PAGEMD_UNCACHED) != 0)
75 #endif
76 #define	VM_PAGEMD_MODIFIED_P(mdpg)	(((mdpg)->mdpg_attrs & VM_PAGEMD_MODIFIED) != 0)
77 #define	VM_PAGEMD_REFERENCED_P(mdpg)	(((mdpg)->mdpg_attrs & VM_PAGEMD_REFERENCED) != 0)
78 #define	VM_PAGEMD_POOLPAGE_P(mdpg)	(((mdpg)->mdpg_attrs & VM_PAGEMD_POOLPAGE) != 0)
79 #define	VM_PAGEMD_EXECPAGE_P(mdpg)	(((mdpg)->mdpg_attrs & VM_PAGEMD_EXECPAGE) != 0)
80 
81 struct vm_page_md {
82 	volatile u_int mdpg_attrs;	/* page attributes */
83 	struct pv_entry mdpg_first;	/* pv_entry first */
84 #if defined(MULTIPROCESSOR) || defined(MODULAR)
85 	kmutex_t *mdpg_lock;		/* pv list lock */
86 #define	VM_PAGEMD_PVLIST_LOCK_INIT(mdpg) 	\
87 	(mdpg)->mdpg_lock = NULL
88 #define	VM_PAGEMD_PVLIST_LOCK(pg, list_change)	\
89 	pmap_pvlist_lock(mdpg, list_change)
90 #define	VM_PAGEMD_PVLIST_UNLOCK(mdpg)		\
91 	mutex_spin_exit((mdpg)->mdpg_lock)
92 #define	VM_PAGEMD_PVLIST_LOCKED_P(mdpg)		\
93 	mutex_owner((mdpg)->mdpg_lock)
94 #define	VM_PAGEMD_PVLIST_GEN(mdpg)		\
95 	((uint16_t)((mdpg)->mdpg_attrs >> 16))
96 #else
97 #define	VM_PAGEMD_PVLIST_LOCK_INIT(mdpg)	do { } while (/*CONSTCOND*/ 0)
98 #define	VM_PAGEMD_PVLIST_LOCK(mdpg, lc)	(mutex_spin_enter(&pmap_pvlist_mutex), 0)
99 #define	VM_PAGEMD_PVLIST_UNLOCK(mdpg)	mutex_spin_exit(&pmap_pvlist_mutex)
100 #define	VM_PAGEMD_PVLIST_LOCKED_P(mdpg)	true
101 #define	VM_PAGEMD_PVLIST_GEN(mdpg)		(0)
102 #endif /* MULTIPROCESSOR || MODULAR */
103 };
104 
105 #define VM_MDPAGE_INIT(pg)						\
106 do {									\
107 	(pg)->mdpage.mdpg_first.pv_next = NULL;				\
108 	(pg)->mdpage.mdpg_first.pv_pmap = NULL;				\
109 	(pg)->mdpage.mdpg_first.pv_va = (pg)->phys_addr;		\
110 	(pg)->mdpage.mdpg_attrs = 0;					\
111 	VM_PAGEMD_PVLIST_LOCK_INIT(&(pg)->mdpage);			\
112 } while (/* CONSTCOND */ 0)
113 
114 #endif /* __COMMON_PMAP_TLB_VMPAGEMD_H_ */
115