xref: /netbsd-src/sys/arch/next68k/include/pmap.h (revision 08c81a9c2dc8c7300e893321eb65c0925d60871c)
1 /* $NetBSD: pmap.h,v 1.12 2002/09/11 01:46:33 mycroft Exp $ */
2 
3 /*
4  * This file was taken from from mvme68k/include/pmap.h and
5  * should probably be re-synced when needed.
6  * Darrin B Jewell <jewell@mit.edu>  Fri Aug 28 03:22:07 1998
7  * original cvs id: NetBSD: pmap.h,v 1.12 1998/08/22 10:55:34 scw Exp
8  */
9 
10 /*
11  * Copyright (c) 1987 Carnegie-Mellon University
12  * Copyright (c) 1991, 1993
13  *	The Regents of the University of California.  All rights reserved.
14  *
15  * This code is derived from software contributed to Berkeley by
16  * the Systems Programming Group of the University of Utah Computer
17  * Science Department.
18  *
19  * Redistribution and use in source and binary forms, with or without
20  * modification, are permitted provided that the following conditions
21  * are met:
22  * 1. Redistributions of source code must retain the above copyright
23  *    notice, this list of conditions and the following disclaimer.
24  * 2. Redistributions in binary form must reproduce the above copyright
25  *    notice, this list of conditions and the following disclaimer in the
26  *    documentation and/or other materials provided with the distribution.
27  * 3. All advertising materials mentioning features or use of this software
28  *    must display the following acknowledgement:
29  *	This product includes software developed by the University of
30  *	California, Berkeley and its contributors.
31  * 4. Neither the name of the University nor the names of its contributors
32  *    may be used to endorse or promote products derived from this software
33  *    without specific prior written permission.
34  *
35  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
36  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
37  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
38  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
39  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
40  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
41  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
42  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
43  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
44  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
45  * SUCH DAMAGE.
46  *
47  *	@(#)pmap.h	8.1 (Berkeley) 6/10/93
48  */
49 
50 #ifndef	_MACHINE_PMAP_H_
51 #define	_MACHINE_PMAP_H_
52 
53 #include <machine/pte.h>
54 
55 /*
56  * Pmap stuff
57  */
58 struct pmap {
59 	pt_entry_t		*pm_ptab;	/* KVA of page table */
60 	st_entry_t		*pm_stab;	/* KVA of segment table */
61 	int			pm_stfree;	/* 040: free lev2 blocks */
62 	st_entry_t		*pm_stpa;	/* 040: ST phys addr */
63 	short			pm_sref;	/* segment table ref count */
64 	short			pm_count;	/* pmap reference count */
65 	struct simplelock	pm_lock;	/* lock on pmap */
66 	struct pmap_statistics	pm_stats;	/* pmap statistics */
67 	long			pm_ptpages;	/* more stats: PT pages */
68 };
69 
70 typedef struct pmap	*pmap_t;
71 
72 /*
73  * On the 040 we keep track of which level 2 blocks are already in use
74  * with the pm_stfree mask.  Bits are arranged from LSB (block 0) to MSB
75  * (block 31).  For convenience, the level 1 table is considered to be
76  * block 0.
77  *
78  * MAX[KU]L2SIZE control how many pages of level 2 descriptors are allowed.
79  * for the kernel and users.  8 implies only the initial "segment table"
80  * page is used.  WARNING: don't change MAXUL2SIZE unless you can allocate
81  * physically contiguous pages for the ST in pmap.c!
82  */
83 #define	MAXKL2SIZE	32
84 #define MAXUL2SIZE	8
85 #define l2tobm(n)	(1 << (n))
86 #define	bmtol2(n)	(ffs(n) - 1)
87 
88 /*
89  * Macros for speed
90  */
91 #define	PMAP_ACTIVATE(pmap, loadhw)					\
92 {									\
93 	if ((loadhw))							\
94 		loadustp(m68k_btop((paddr_t)(pmap)->pm_stpa));	\
95 }
96 
97 /*
98  * For each struct vm_page, there is a list of all currently valid virtual
99  * mappings of that page.  An entry is a pv_entry, the list is pv_table.
100  */
101 struct pv_entry {
102 	struct pv_entry	*pv_next;	/* next pv_entry */
103 	struct pmap	*pv_pmap;	/* pmap where mapping lies */
104 	vaddr_t		pv_va;		/* virtual address for mapping */
105 	st_entry_t	*pv_ptste;	/* non-zero if VA maps a PT page */
106 	struct pmap	*pv_ptpmap;	/* if pv_ptste, pmap for PT page */
107 	int		pv_flags;	/* flags */
108 };
109 
110 #define	PV_CI		0x01	/* header: all entries are cache inhibited */
111 #define PV_PTPAGE	0x02	/* header: entry maps a page table page */
112 
113 struct pv_page;
114 
115 struct pv_page_info {
116 	TAILQ_ENTRY(pv_page) pgi_list;
117 	struct pv_entry *pgi_freelist;
118 	int pgi_nfree;
119 };
120 
121 /*
122  * This is basically:
123  * ((NBPG - sizeof(struct pv_page_info)) / sizeof(struct pv_entry))
124  */
125 #define	NPVPPG	170
126 
127 struct pv_page {
128 	struct pv_page_info pvp_pgi;
129 	struct pv_entry pvp_pv[NPVPPG];
130 };
131 
132 #ifdef	_KERNEL
133 
134 extern struct pmap	kernel_pmap_store;
135 
136 #define pmap_kernel()	(&kernel_pmap_store)
137 #define	active_pmap(pm) \
138 	((pm) == pmap_kernel() || (pm) == curproc->p_vmspace->vm_map.pmap)
139 #define	active_user_pmap(pm) \
140 	(curproc && \
141 	 (pm) != pmap_kernel() && (pm) == curproc->p_vmspace->vm_map.pmap)
142 
143 extern void _pmap_set_page_cacheable __P((struct pmap *, vaddr_t));
144 extern void _pmap_set_page_cacheinhibit __P((struct pmap *, vaddr_t));
145 extern int _pmap_page_is_cacheable __P((struct pmap *, vaddr_t));
146 
147 extern struct pv_entry	*pv_table;	/* array of entries, one per page */
148 
149 #define	pmap_resident_count(pmap)	((pmap)->pm_stats.resident_count)
150 #define	pmap_wired_count(pmap)		((pmap)->pm_stats.wired_count)
151 
152 #define	pmap_update(pmap)		/* nothing (yet) */
153 
154 extern pt_entry_t	*Sysmap;
155 extern char		*vmmap;		/* map for mem, dumps, etc. */
156 
157 vaddr_t	pmap_map __P((vaddr_t, paddr_t, paddr_t, int));
158 void	pmap_procwr __P((struct proc *, vaddr_t, size_t));
159 #define PMAP_NEED_PROCWR
160 
161 /*
162  * Do idle page zero'ing uncached to avoid polluting the cache.
163  */
164 boolean_t pmap_zero_page_uncached(paddr_t);
165 #define	PMAP_PAGEIDLEZERO(pa)	pmap_zero_page_uncached((pa))
166 
167 #endif /* _KERNEL */
168 
169 #endif /* !_MACHINE_PMAP_H_ */
170