xref: /netbsd-src/sys/arch/powerpc/include/oea/pmap.h (revision d16b7486a53dcb8072b60ec6fcb4373a2d0c27b7)
1 /*	$NetBSD: pmap.h,v 1.37 2022/05/07 07:10:46 rin Exp $	*/
2 
3 /*-
4  * Copyright (C) 1995, 1996 Wolfgang Solfrank.
5  * Copyright (C) 1995, 1996 TooLs GmbH.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by TooLs GmbH.
19  * 4. The name of TooLs GmbH may not be used to endorse or promote products
20  *    derived from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
23  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
27  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
28  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
29  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
30  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
31  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #ifndef	_POWERPC_OEA_PMAP_H_
35 #define	_POWERPC_OEA_PMAP_H_
36 
37 #ifdef _LOCORE
38 #error use assym.h instead
39 #endif
40 
41 #ifdef _MODULE
42 #error this file should not be included by loadable kernel modules
43 #endif
44 
45 #ifdef _KERNEL_OPT
46 #include "opt_ppcarch.h"
47 #include "opt_modular.h"
48 #endif
49 #include <powerpc/oea/pte.h>
50 
51 #define	__HAVE_PMAP_PV_TRACK
52 #include <uvm/pmap/pmap_pvt.h>
53 
54 /*
55  * Pmap stuff
56  */
57 struct pmap {
58 #ifdef PPC_OEA64
59 	struct steg *pm_steg_table;		/* segment table pointer */
60 	/* XXX need way to track exec pages */
61 #endif
62 
63 #if defined(PPC_OEA) || defined (PPC_OEA64_BRIDGE)
64 	register_t pm_sr[16];			/* segments used in this pmap */
65 	int pm_exec[16];			/* counts of exec mappings */
66 #endif
67 	register_t pm_vsid;			/* VSID bits */
68 	int pm_refs;				/* ref count */
69 	struct pmap_statistics pm_stats;	/* pmap statistics */
70 	unsigned int pm_evictions;		/* pvo's not in page table */
71 
72 #ifdef PPC_OEA64
73 	unsigned int pm_ste_evictions;
74 #endif
75 };
76 
77 struct pmap_ops {
78 	int (*pmapop_pte_spill)(struct pmap *, vaddr_t, bool);
79 	void (*pmapop_real_memory)(paddr_t *, psize_t *);
80 	void (*pmapop_init)(void);
81 	void (*pmapop_virtual_space)(vaddr_t *, vaddr_t *);
82 	pmap_t (*pmapop_create)(void);
83 	void (*pmapop_reference)(pmap_t);
84 	void (*pmapop_destroy)(pmap_t);
85 	void (*pmapop_copy)(pmap_t, pmap_t, vaddr_t, vsize_t, vaddr_t);
86 	void (*pmapop_update)(pmap_t);
87 	int (*pmapop_enter)(pmap_t, vaddr_t, paddr_t, vm_prot_t, u_int);
88 	void (*pmapop_remove)(pmap_t, vaddr_t, vaddr_t);
89 	void (*pmapop_kenter_pa)(vaddr_t, paddr_t, vm_prot_t, u_int);
90 	void (*pmapop_kremove)(vaddr_t, vsize_t);
91 	bool (*pmapop_extract)(pmap_t, vaddr_t, paddr_t *);
92 
93 	void (*pmapop_protect)(pmap_t, vaddr_t, vaddr_t, vm_prot_t);
94 	void (*pmapop_unwire)(pmap_t, vaddr_t);
95 	void (*pmapop_page_protect)(struct vm_page *, vm_prot_t);
96 	void (*pmapop_pv_protect)(paddr_t, vm_prot_t);
97 	bool (*pmapop_query_bit)(struct vm_page *, int);
98 	bool (*pmapop_clear_bit)(struct vm_page *, int);
99 
100 	void (*pmapop_activate)(struct lwp *);
101 	void (*pmapop_deactivate)(struct lwp *);
102 
103 	void (*pmapop_pinit)(pmap_t);
104 	void (*pmapop_procwr)(struct proc *, vaddr_t, size_t);
105 
106 	void (*pmapop_pte_print)(volatile struct pte *);
107 	void (*pmapop_pteg_check)(void);
108 	void (*pmapop_print_mmuregs)(void);
109 	void (*pmapop_print_pte)(pmap_t, vaddr_t);
110 	void (*pmapop_pteg_dist)(void);
111 	void (*pmapop_pvo_verify)(void);
112 	vaddr_t (*pmapop_steal_memory)(vsize_t, vaddr_t *, vaddr_t *);
113 	void (*pmapop_bootstrap)(paddr_t, paddr_t);
114 	void (*pmapop_bootstrap1)(paddr_t, paddr_t);
115 	void (*pmapop_bootstrap2)(void);
116 };
117 
118 #ifdef	_KERNEL
119 #include <sys/cdefs.h>
120 __BEGIN_DECLS
121 #include <sys/param.h>
122 #include <sys/systm.h>
123 
124 #if defined (PPC_OEA) || defined (PPC_OEA64_BRIDGE)
125 extern register_t iosrtable[];
126 #endif
127 extern int pmap_use_altivec;
128 
129 #define pmap_clear_modify(pg)		(pmap_clear_bit((pg), PTE_CHG))
130 #define	pmap_clear_reference(pg)	(pmap_clear_bit((pg), PTE_REF))
131 #define	pmap_is_modified(pg)		(pmap_query_bit((pg), PTE_CHG))
132 #define	pmap_is_referenced(pg)		(pmap_query_bit((pg), PTE_REF))
133 
134 #define	pmap_resident_count(pmap)	((pmap)->pm_stats.resident_count)
135 #define	pmap_wired_count(pmap)		((pmap)->pm_stats.wired_count)
136 
137 /* ARGSUSED */
138 static __inline bool
139 pmap_remove_all(struct pmap *pmap)
140 {
141 	/* Nothing. */
142 	return false;
143 }
144 
145 #if (defined(PPC_OEA) + defined(PPC_OEA64) + defined(PPC_OEA64_BRIDGE)) != 1
146 #define	PMAP_NEEDS_FIXUP
147 #endif
148 
149 extern volatile struct pteg *pmap_pteg_table;
150 extern unsigned int pmap_pteg_cnt;
151 extern unsigned int pmap_pteg_mask;
152 
153 void pmap_bootstrap(vaddr_t, vaddr_t);
154 void pmap_bootstrap1(vaddr_t, vaddr_t);
155 void pmap_bootstrap2(void);
156 bool pmap_extract(pmap_t, vaddr_t, paddr_t *);
157 bool pmap_query_bit(struct vm_page *, int);
158 bool pmap_clear_bit(struct vm_page *, int);
159 void pmap_real_memory(paddr_t *, psize_t *);
160 void pmap_procwr(struct proc *, vaddr_t, size_t);
161 int pmap_pte_spill(pmap_t, vaddr_t, bool);
162 int pmap_ste_spill(pmap_t, vaddr_t, bool);
163 void pmap_pinit(pmap_t);
164 
165 #ifdef PPC_OEA601
166 bool	pmap_extract_ioseg601(vaddr_t, paddr_t *);
167 #endif /* PPC_OEA601 */
168 #ifdef PPC_OEA
169 bool	pmap_extract_battable(vaddr_t, paddr_t *);
170 #endif /* PPC_OEA */
171 
172 u_int powerpc_mmap_flags(paddr_t);
173 #define POWERPC_MMAP_FLAG_MASK	0xf
174 #define POWERPC_MMAP_FLAG_PREFETCHABLE	0x1
175 #define POWERPC_MMAP_FLAG_CACHEABLE	0x2
176 
177 #define pmap_phys_address(ppn)		(ppn & ~POWERPC_MMAP_FLAG_MASK)
178 #define pmap_mmap_flags(ppn)		powerpc_mmap_flags(ppn)
179 
180 static __inline paddr_t vtophys (vaddr_t);
181 
182 /*
183  * Alternate mapping hooks for pool pages.  Avoids thrashing the TLB.
184  *
185  * Note: This won't work if we have more memory than can be direct-mapped
186  * VA==PA all at once.  But pmap_copy_page() and pmap_zero_page() will have
187  * this problem, too.
188  */
189 #if !defined(PPC_OEA64) && !defined (PPC_OEA64_BRIDGE)
190 #define	PMAP_MAP_POOLPAGE(pa)	(pa)
191 #define	PMAP_UNMAP_POOLPAGE(pa)	(pa)
192 #define POOL_VTOPHYS(va)	vtophys((vaddr_t) va)
193 #endif
194 
195 static __inline paddr_t
196 vtophys(vaddr_t va)
197 {
198 	paddr_t pa;
199 
200 	if (pmap_extract(pmap_kernel(), va, &pa))
201 		return pa;
202 	KASSERTMSG(0, "vtophys: pmap_extract of %#"PRIxVADDR" failed", va);
203 	return (paddr_t) -1;
204 }
205 
206 
207 #ifdef PMAP_NEEDS_FIXUP
208 extern const struct pmap_ops *pmapops;
209 extern const struct pmap_ops pmap32_ops;
210 extern const struct pmap_ops pmap64_ops;
211 extern const struct pmap_ops pmap64bridge_ops;
212 
213 static __inline void
214 pmap_setup32(void)
215 {
216 	pmapops = &pmap32_ops;
217 }
218 
219 static __inline void
220 pmap_setup64(void)
221 {
222 	pmapops = &pmap64_ops;
223 }
224 
225 static __inline void
226 pmap_setup64bridge(void)
227 {
228 	pmapops = &pmap64bridge_ops;
229 }
230 #endif
231 
232 bool pmap_pageidlezero (paddr_t);
233 void pmap_syncicache (paddr_t, psize_t);
234 #ifdef PPC_OEA64
235 vaddr_t pmap_setusr (vaddr_t);
236 vaddr_t pmap_unsetusr (void);
237 #endif
238 
239 #ifdef PPC_OEA64_BRIDGE
240 int pmap_setup_segment0_map(int use_large_pages, ...);
241 #endif
242 
243 #define PMAP_MD_PREFETCHABLE		0x2000000
244 #define PMAP_STEAL_MEMORY
245 #define PMAP_NEED_PROCWR
246 
247 void pmap_zero_page(paddr_t);
248 void pmap_copy_page(paddr_t, paddr_t);
249 
250 LIST_HEAD(pvo_head, pvo_entry);
251 
252 #define	__HAVE_VM_PAGE_MD
253 
254 struct pmap_page {
255 	unsigned int pp_attrs;
256 	struct pvo_head pp_pvoh;
257 #ifdef MODULAR
258 	uintptr_t pp_dummy[3];
259 #endif
260 };
261 
262 struct vm_page_md {
263 	struct pmap_page mdpg_pp;
264 #define	mdpg_attrs	mdpg_pp.pp_attrs
265 #define	mdpg_pvoh	mdpg_pp.pp_pvoh
266 #ifdef MODULAR
267 #define	mdpg_dummy	mdpg_pp.pp_dummy
268 #endif
269 };
270 
271 #define	VM_MDPAGE_INIT(pg) do {			\
272 	(pg)->mdpage.mdpg_attrs = 0;		\
273 	LIST_INIT(&(pg)->mdpage.mdpg_pvoh);	\
274 } while (/*CONSTCOND*/0)
275 
276 __END_DECLS
277 #endif	/* _KERNEL */
278 
279 #endif	/* _POWERPC_OEA_PMAP_H_ */
280