xref: /netbsd-src/sys/arch/mips/include/pmap.h (revision 154bfe8e089c1a0a4e9ed8414f08d3da90949162)
1 /*	$NetBSD: pmap.h,v 1.74 2020/08/17 03:19:35 mrg Exp $	*/
2 
3 /*
4  * Copyright (c) 1992, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This code is derived from software contributed to Berkeley by
8  * Ralph Campbell.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	@(#)pmap.h	8.1 (Berkeley) 6/10/93
35  */
36 
37 /*
38  * Copyright (c) 1987 Carnegie-Mellon University
39  *
40  * This code is derived from software contributed to Berkeley by
41  * Ralph Campbell.
42  *
43  * Redistribution and use in source and binary forms, with or without
44  * modification, are permitted provided that the following conditions
45  * are met:
46  * 1. Redistributions of source code must retain the above copyright
47  *    notice, this list of conditions and the following disclaimer.
48  * 2. Redistributions in binary form must reproduce the above copyright
49  *    notice, this list of conditions and the following disclaimer in the
50  *    documentation and/or other materials provided with the distribution.
51  * 3. All advertising materials mentioning features or use of this software
52  *    must display the following acknowledgement:
53  *	This product includes software developed by the University of
54  *	California, Berkeley and its contributors.
55  * 4. Neither the name of the University nor the names of its contributors
56  *    may be used to endorse or promote products derived from this software
57  *    without specific prior written permission.
58  *
59  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
60  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
61  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
62  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
63  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
64  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
65  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
66  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
67  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
68  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
69  * SUCH DAMAGE.
70  *
71  *	@(#)pmap.h	8.1 (Berkeley) 6/10/93
72  */
73 
74 #ifndef	_MIPS_PMAP_H_
75 #define	_MIPS_PMAP_H_
76 
77 #ifdef _KERNEL_OPT
78 #include "opt_multiprocessor.h"
79 #include "opt_uvmhist.h"
80 #include "opt_cputype.h"
81 #endif
82 
83 #include <sys/evcnt.h>
84 #include <sys/kcpuset.h>
85 #include <sys/kernhist.h>
86 
87 #ifndef __BSD_PTENTRY_T__
88 #define	__BSD_PTENTRY_T__
89 typedef uint32_t pt_entry_t;
90 #define	PRIxPTE		PRIx32
91 #endif /* __BSD_PTENTRY_T__ */
92 
93 #define	KERNEL_PID			0
94 
95 #if defined(__PMAP_PRIVATE)
96 
97 #include <mips/locore.h>
98 #include <mips/cache.h>
99 
100 #define	PMAP_VIRTUAL_CACHE_ALIASES
101 #define	PMAP_INVALID_SEGTAB_ADDRESS	((pmap_segtab_t *)NULL)
102 #define	PMAP_TLB_NEED_SHOOTDOWN
103 #define	PMAP_TLB_FLUSH_ASID_ON_RESET	false
104 #if UPAGES > 1
105 #define	PMAP_TLB_WIRED_UPAGES		MIPS3_TLB_WIRED_UPAGES
106 #endif
107 #define	pmap_md_tlb_asid_max()		(MIPS_TLB_NUM_PIDS - 1)
108 #ifdef MULTIPROCESSOR
109 #define	PMAP_NO_PV_UNCACHED
110 #endif
111 
112 /*
113  * We need the pmap_segtab's to be aligned on MIPS*R2 so we can use the
114  * EXT/INS instructions on their addresses.
115  */
116 #if (MIPS32R2 + MIPS64R2 + MIPS64R2_RMIXL) > 0
117 #define	PMAP_SEGTAB_ALIGN __aligned(sizeof(void *)*NSEGPG) __section(".data1")
118 #endif
119 
120 #include <uvm/uvm_physseg.h>
121 
122 void	pmap_md_init(void);
123 void	pmap_md_icache_sync_all(void);
124 void	pmap_md_icache_sync_range_index(vaddr_t, vsize_t);
125 void	pmap_md_page_syncicache(struct vm_page *, const kcpuset_t *);
126 bool	pmap_md_vca_add(struct vm_page *, vaddr_t, pt_entry_t *);
127 void	pmap_md_vca_clean(struct vm_page *, int);
128 void	pmap_md_vca_remove(struct vm_page *, vaddr_t, bool, bool);
129 bool	pmap_md_ok_to_steal_p(const uvm_physseg_t, size_t);
130 bool	pmap_md_tlb_check_entry(void *, vaddr_t, tlb_asid_t, pt_entry_t);
131 
132 static inline bool
133 pmap_md_virtual_cache_aliasing_p(void)
134 {
135 	return MIPS_CACHE_VIRTUAL_ALIAS;
136 }
137 
138 static inline vsize_t
139 pmap_md_cache_prefer_mask(void)
140 {
141 	return MIPS_HAS_R4K_MMU ? mips_cache_info.mci_cache_prefer_mask : 0;
142 }
143 
144 static inline void
145 pmap_md_xtab_activate(struct pmap *pm, struct lwp *l)
146 {
147 
148 	/* nothing */
149 }
150 
151 static inline void
152 pmap_md_xtab_deactivate(struct pmap *pm)
153 {
154 
155 	/* nothing */
156 }
157 
158 #endif /* __PMAP_PRIVATE */
159 
160 struct tlbmask {
161 	vaddr_t	tlb_hi;
162 #ifdef __mips_o32
163 	uint32_t tlb_lo0;
164 	uint32_t tlb_lo1;
165 #else
166 	uint64_t tlb_lo0;
167 	uint64_t tlb_lo1;
168 #endif
169 	uint32_t tlb_mask;
170 };
171 
172 #ifdef _LP64
173 #define	PMAP_SEGTABSIZE		NSEGPG
174 #else
175 #define	PMAP_SEGTABSIZE		(1 << (31 - SEGSHIFT))
176 #endif
177 
178 #include <uvm/uvm_pmap.h>
179 #include <uvm/pmap/vmpagemd.h>
180 #include <uvm/pmap/pmap.h>
181 #include <uvm/pmap/pmap_tlb.h>
182 #include <uvm/pmap/pmap_synci.h>
183 
184 #ifdef _KERNEL
185 /*
186  * Select CCA to use for unmanaged pages.
187  */
188 #define	PMAP_CCA_FOR_PA(pa)	CCA_UNCACHED		/* uncached */
189 
190 #if defined(_MIPS_PADDR_T_64BIT) || defined(_LP64)
191 #define	PGC_NOCACHE	0x4000000000000000ULL
192 #define	PGC_PREFETCH	0x2000000000000000ULL
193 #endif
194 
195 #if defined(__PMAP_PRIVATE)
196 #include <mips/pte.h>
197 #endif
198 
199 /*
200  * The user address space is 2Gb (0x0 - 0x80000000).
201  * User programs are laid out in memory as follows:
202  *			address
203  *	USRTEXT		0x00001000
204  *	USRDATA		USRTEXT + text_size
205  *	USRSTACK	0x7FFFFFFF
206  *
207  * The user address space is mapped using a two level structure where
208  * virtual address bits 30..22 are used to index into a segment table which
209  * points to a page worth of PTEs (4096 page can hold 1024 PTEs).
210  * Bits 21..12 are then used to index a PTE which describes a page within
211  * a segment.
212  *
213  * The wired entries in the TLB will contain the following:
214  *	0-1	(UPAGES)	for curproc user struct and kernel stack.
215  *
216  * Note: The kernel doesn't use the same data structures as user programs.
217  * All the PTE entries are stored in a single array in Sysmap which is
218  * dynamically allocated at boot time.
219  */
220 
221 #define	pmap_phys_address(x)	mips_ptob(x)
222 
223 /*
224  *	Bootstrap the system enough to run with virtual memory.
225  */
226 void	pmap_bootstrap(void);
227 void	pmap_md_alloc_ephemeral_address_space(struct cpu_info *);
228 void	pmap_procwr(struct proc *, vaddr_t, size_t);
229 #define	PMAP_NEED_PROCWR
230 
231 /*
232  * pmap_prefer() helps reduce virtual-coherency exceptions in
233  * the virtually-indexed cache on mips3 CPUs.
234  */
235 #ifdef MIPS3_PLUS
236 #define	PMAP_PREFER(pa, va, sz, td)	pmap_prefer((pa), (va), (sz), (td))
237 void	pmap_prefer(vaddr_t, vaddr_t *, vsize_t, int);
238 #endif /* MIPS3_PLUS */
239 
240 #define	PMAP_ENABLE_PMAP_KMPAGE	/* enable the PMAP_KMPAGE flag */
241 
242 // these use register_t so we can pass XKPHYS addresses to them on N32
243 bool	pmap_md_direct_mapped_vaddr_p(register_t);
244 paddr_t	pmap_md_direct_mapped_vaddr_to_paddr(register_t);
245 bool	pmap_md_io_vaddr_p(vaddr_t);
246 
247 /*
248  * Alternate mapping hooks for pool pages.  Avoids thrashing the TLB.
249  */
250 vaddr_t pmap_md_map_poolpage(paddr_t, size_t);
251 paddr_t pmap_md_unmap_poolpage(vaddr_t, size_t);
252 struct vm_page *pmap_md_alloc_poolpage(int);
253 
254 /*
255  * Other hooks for the pool allocator.
256  */
257 paddr_t	pmap_md_pool_vtophys(vaddr_t);
258 vaddr_t	pmap_md_pool_phystov(paddr_t);
259 #define	POOL_VTOPHYS(va)	pmap_md_pool_vtophys((vaddr_t)va)
260 #define	POOL_PHYSTOV(pa)	pmap_md_pool_phystov((paddr_t)pa)
261 
262 #ifdef MIPS64_SB1
263 /* uncached accesses are bad; all accesses should be cached (and coherent) */
264 #undef PMAP_PAGEIDLEZERO
265 #define	PMAP_PAGEIDLEZERO(pa)   (pmap_zero_page(pa), true)
266 
267 int sbmips_cca_for_pa(paddr_t);
268 
269 #undef PMAP_CCA_FOR_PA
270 #define	PMAP_CCA_FOR_PA(pa)	sbmips_cca_for_pa(pa)
271 #endif
272 
273 #endif	/* _KERNEL */
274 #endif	/* _MIPS_PMAP_H_ */
275