xref: /netbsd-src/sys/arch/sparc64/include/pmap.h (revision 07acc2d1697896679fc953ab69d727b894604483)
1 /*	$NetBSD: pmap.h,v 1.64 2020/09/06 10:48:21 mrg Exp $	*/
2 
3 /*-
4  * Copyright (C) 1995, 1996 Wolfgang Solfrank.
5  * Copyright (C) 1995, 1996 TooLs GmbH.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by TooLs GmbH.
19  * 4. The name of TooLs GmbH may not be used to endorse or promote products
20  *    derived from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
23  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
27  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
28  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
29  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
30  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
31  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #ifndef	_MACHINE_PMAP_H_
35 #define	_MACHINE_PMAP_H_
36 
37 #ifndef _LOCORE
38 #include <machine/pte.h>
39 #include <sys/queue.h>
40 struct vm_page;
41 #include <uvm/uvm_prot.h>
42 #include <uvm/uvm_pmap.h>
43 #include <uvm/uvm_object.h>
44 #ifdef _KERNEL
45 #include <machine/cpuset.h>
46 #ifdef SUN4V
47 #include <machine/hypervisor.h>
48 #endif
49 #endif
50 #endif
51 
52 /*
53  * This scheme uses 2-level page tables.
54  *
55  * While we're still in 32-bit mode we do the following:
56  *
57  *   offset:						13 bits
58  * 1st level: 1024 64-bit TTEs in an 8K page for	10 bits
59  * 2nd level: 512 32-bit pointers in the pmap for 	 9 bits
60  *							-------
61  * total:						32 bits
62  *
63  * In 64-bit mode the Spitfire and Blackbird CPUs support only
64  * 44-bit virtual addresses.  All addresses between
65  * 0x0000 07ff ffff ffff and 0xffff f800 0000 0000 are in the
66  * "VA hole" and trap, so we don't have to track them.  However,
67  * we do need to keep them in mind during PT walking.  If they
68  * ever change the size of the address "hole" we need to rework
69  * all the page table handling.
70  *
71  *   offset:						13 bits
72  * 1st level: 1024 64-bit TTEs in an 8K page for	10 bits
73  * 2nd level: 1024 64-bit pointers in an 8K page for 	10 bits
74  * 3rd level: 1024 64-bit pointers in the segmap for 	10 bits
75  *							-------
76  * total:						43 bits
77  *
78  * Of course, this means for 32-bit spaces we always have a (practically)
79  * wasted page for the segmap (only one entry used) and half a page wasted
80  * for the page directory.  We still have need of one extra bit 8^(.
81  */
82 
83 #define HOLESHIFT	(43)
84 
85 #define PTSZ	(PAGE_SIZE/8)			/* page table entry */
86 #define PDSZ	(PTSZ)				/* page directory */
87 #define STSZ	(PTSZ)				/* psegs */
88 
89 #define PTSHIFT		(13)
90 #define	PDSHIFT		(10+PTSHIFT)
91 #define STSHIFT		(10+PDSHIFT)
92 
93 #define PTMASK		(PTSZ-1)
94 #define PDMASK		(PDSZ-1)
95 #define STMASK		(STSZ-1)
96 
97 #ifndef _LOCORE
98 
99 #ifdef _LP64
100 int	sparc64_mmap_range_test(vaddr_t, vaddr_t);
101 #define	MD_MMAP_RANGE_TEST(MINVA, MAXVA)	sparc64_mmap_range_test(MINVA, MAXVA)
102 #endif
103 
104 /*
105  * Support for big page sizes.  This maps the page size to the
106  * page bits.
107  */
108 struct page_size_map {
109 	uint64_t mask;
110 	uint64_t code;
111 #if defined(DEBUG) || 1
112 	uint64_t use;
113 #endif
114 };
115 extern struct page_size_map page_size_map[];
116 
117 /*
118  * Pmap stuff
119  */
120 
121 #define va_to_seg(v)	(int)((((paddr_t)(v))>>STSHIFT)&STMASK)
122 #define va_to_dir(v)	(int)((((paddr_t)(v))>>PDSHIFT)&PDMASK)
123 #define va_to_pte(v)	(int)((((paddr_t)(v))>>PTSHIFT)&PTMASK)
124 
125 #ifdef MULTIPROCESSOR
126 #define PMAP_LIST_MAXNUMCPU	CPUSET_MAXNUMCPU
127 #else
128 #define PMAP_LIST_MAXNUMCPU	1
129 #endif
130 
131 struct pmap {
132 	unsigned int pm_refs;
133 	TAILQ_HEAD(, vm_page) pm_ptps;
134 	LIST_ENTRY(pmap) pm_list[PMAP_LIST_MAXNUMCPU];	/* per cpu ctx used list */
135 
136 	struct pmap_statistics pm_stats;
137 
138 	/*
139 	 * We record the context used on any cpu here. If the context
140 	 * is actually present in the TLB, it will be the plain context
141 	 * number. If the context is allocated, but has been flushed
142 	 * from the tlb, the number will be negative.
143 	 * If this pmap has no context allocated on that cpu, the entry
144 	 * will be 0.
145 	 */
146 	int pm_ctx[PMAP_LIST_MAXNUMCPU];	/* Current context per cpu */
147 
148 	/*
149 	 * This contains 64-bit pointers to pages that contain
150 	 * 1024 64-bit pointers to page tables.  All addresses
151 	 * are physical.
152 	 *
153 	 * !!! Only touch this through pseg_get() and pseg_set() !!!
154 	 */
155 	paddr_t pm_physaddr;	/* physical address of pm_segs */
156 	int64_t *pm_segs;
157 };
158 
159 /*
160  * This comes from the PROM and is used to map prom entries.
161  */
162 struct prom_map {
163 	uint64_t	vstart;
164 	uint64_t	vsize;
165 	uint64_t	tte;
166 };
167 
168 #define PMAP_NC		0x001	/* Don't cache, set the E bit in the page */
169 #define PMAP_NVC	0x002	/* Don't enable the virtual cache */
170 #define PMAP_LITTLE	0x004	/* Map in little endian mode */
171 /* Large page size hints --
172    we really should use another param to pmap_enter() */
173 #define PMAP_8K		0x000
174 #define PMAP_64K	0x008	/* Use 64K page */
175 #define PMAP_512K	0x010
176 #define PMAP_4M		0x018
177 #define PMAP_SZ_TO_TTE(x)	(((x)&0x018)<<58)
178 /* If these bits are different in va's to the same PA
179    then there is an aliasing in the d$ */
180 #define VA_ALIAS_MASK   (1 << 13)
181 #define PMAP_WC		0x20	/* allow write combinimg */
182 
183 #ifdef	_KERNEL
184 #ifdef PMAP_COUNT_DEBUG
185 /* diagnostic versions if PMAP_COUNT_DEBUG option is used */
186 int pmap_count_res(struct pmap *);
187 int pmap_count_wired(struct pmap *);
188 #define	pmap_resident_count(pm)		pmap_count_res((pm))
189 #define	pmap_wired_count(pm)		pmap_count_wired((pm))
190 #else
191 #define	pmap_resident_count(pm)		((pm)->pm_stats.resident_count)
192 #define	pmap_wired_count(pm)		((pm)->pm_stats.wired_count)
193 #endif
194 
195 #define	pmap_phys_address(x)		(x)
196 
197 void pmap_activate_pmap(struct pmap *);
198 void pmap_update(struct pmap *);
199 void pmap_bootstrap(u_long, u_long);
200 
201 /* make sure all page mappings are modulo 16K to prevent d$ aliasing */
202 #define	PMAP_PREFER(fo, va, sz, td)	pmap_prefer((fo), (va), (td))
203 static inline void
pmap_prefer(vaddr_t fo,vaddr_t * va,int td)204 pmap_prefer(vaddr_t fo, vaddr_t *va, int td)
205 {
206 	vaddr_t newva;
207 	vaddr_t m;
208 
209 	m = 2 * PAGE_SIZE;
210 	newva = (*va & ~(m - 1)) | (fo & (m - 1));
211 
212 	if (td) {
213 		if (newva > *va)
214 			newva -= m;
215 	} else {
216 		if (newva < *va)
217 			newva += m;
218 	}
219 	*va = newva;
220 }
221 
222 #define	PMAP_GROWKERNEL         /* turn on pmap_growkernel interface */
223 #define PMAP_NEED_PROCWR
224 
225 void pmap_procwr(struct proc *, vaddr_t, size_t);
226 
227 /* SPARC specific? */
228 int             pmap_dumpsize(void);
229 int             pmap_dumpmmu(int (*)(dev_t, daddr_t, void *, size_t),
230                                  daddr_t);
231 int		pmap_pa_exists(paddr_t);
232 void		switchexit(struct lwp *, int);
233 void		pmap_kprotect(vaddr_t, vm_prot_t);
234 
235 /* SPARC64 specific */
236 void		pmap_copy_page_phys(paddr_t, paddr_t);
237 void		pmap_zero_page_phys(paddr_t);
238 
239 #ifdef SUN4V
240 /* sun4v specific */
241 void		pmap_setup_intstack_sun4v(paddr_t);
242 void		pmap_setup_tsb_sun4v(struct tsb_desc*);
243 #endif
244 
245 /* Installed physical memory, as discovered during bootstrap. */
246 extern int phys_installed_size;
247 extern struct mem_region *phys_installed;
248 
249 #define	__HAVE_VM_PAGE_MD
250 
251 /*
252  * For each struct vm_page, there is a list of all currently valid virtual
253  * mappings of that page.  An entry is a pv_entry_t.
254  */
255 struct pmap;
256 typedef struct pv_entry {
257 	struct pv_entry	*pv_next;	/* next pv_entry */
258 	struct pmap	*pv_pmap;	/* pmap where mapping lies */
259 	vaddr_t		pv_va;		/* virtual address for mapping */
260 } *pv_entry_t;
261 /* PV flags encoded in the low bits of the VA of the first pv_entry */
262 
263 struct vm_page_md {
264 	struct pv_entry mdpg_pvh;
265 };
266 #define	VM_MDPAGE_INIT(pg)						\
267 do {									\
268 	(pg)->mdpage.mdpg_pvh.pv_next = NULL;				\
269 	(pg)->mdpage.mdpg_pvh.pv_pmap = NULL;				\
270 	(pg)->mdpage.mdpg_pvh.pv_va = 0;				\
271 } while (/*CONSTCOND*/0)
272 
273 #ifdef MULTIPROCESSOR
274 #define pmap_ctx_cpu(PM, C)	((PM)->pm_ctx[(C)])
275 #define pmap_ctx(PM)		pmap_ctx_cpu((PM), cpu_number())
276 #else
277 #define pmap_ctx(PM)		((PM)->pm_ctx[0])
278 #endif
279 
280 #endif	/* _KERNEL */
281 
282 #endif	/* _LOCORE */
283 #endif	/* _MACHINE_PMAP_H_ */
284