xref: /netbsd-src/sys/arch/powerpc/include/oea/pmap.h (revision e6c7e151de239c49d2e38720a061ed9d1fa99309)
1 /*	$NetBSD: pmap.h,v 1.30 2020/03/14 14:05:43 ad Exp $	*/
2 
3 /*-
4  * Copyright (C) 1995, 1996 Wolfgang Solfrank.
5  * Copyright (C) 1995, 1996 TooLs GmbH.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by TooLs GmbH.
19  * 4. The name of TooLs GmbH may not be used to endorse or promote products
20  *    derived from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
23  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
27  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
28  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
29  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
30  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
31  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #ifndef	_POWERPC_OEA_PMAP_H_
35 #define	_POWERPC_OEA_PMAP_H_
36 
37 #ifdef _LOCORE
38 #error use assym.h instead
39 #endif
40 
41 #if defined(_LKM) || defined(_MODULE)
42 #error this file should not be included by loadable kernel modules
43 #endif
44 
45 #ifdef _KERNEL_OPT
46 #include "opt_ppcarch.h"
47 #endif
48 #include <powerpc/oea/pte.h>
49 
50 /*
51  * Pmap stuff
52  */
53 struct pmap {
54 #ifdef PPC_OEA64
55 	struct steg *pm_steg_table;		/* segment table pointer */
56 	/* XXX need way to track exec pages */
57 #endif
58 
59 #if defined(PPC_OEA) || defined (PPC_OEA64_BRIDGE)
60 	register_t pm_sr[16];			/* segments used in this pmap */
61 	int pm_exec[16];			/* counts of exec mappings */
62 #endif
63 	register_t pm_vsid;			/* VSID bits */
64 	int pm_refs;				/* ref count */
65 	struct pmap_statistics pm_stats;	/* pmap statistics */
66 	unsigned int pm_evictions;		/* pvo's not in page table */
67 
68 #ifdef PPC_OEA64
69 	unsigned int pm_ste_evictions;
70 #endif
71 };
72 
73 struct pmap_ops {
74 	int (*pmapop_pte_spill)(struct pmap *, vaddr_t, bool);
75 	void (*pmapop_real_memory)(paddr_t *, psize_t *);
76 	void (*pmapop_init)(void);
77 	void (*pmapop_virtual_space)(vaddr_t *, vaddr_t *);
78 	pmap_t (*pmapop_create)(void);
79 	void (*pmapop_reference)(pmap_t);
80 	void (*pmapop_destroy)(pmap_t);
81 	void (*pmapop_copy)(pmap_t, pmap_t, vaddr_t, vsize_t, vaddr_t);
82 	void (*pmapop_update)(pmap_t);
83 	int (*pmapop_enter)(pmap_t, vaddr_t, paddr_t, vm_prot_t, u_int);
84 	void (*pmapop_remove)(pmap_t, vaddr_t, vaddr_t);
85 	void (*pmapop_kenter_pa)(vaddr_t, paddr_t, vm_prot_t, u_int);
86 	void (*pmapop_kremove)(vaddr_t, vsize_t);
87 	bool (*pmapop_extract)(pmap_t, vaddr_t, paddr_t *);
88 
89 	void (*pmapop_protect)(pmap_t, vaddr_t, vaddr_t, vm_prot_t);
90 	void (*pmapop_unwire)(pmap_t, vaddr_t);
91 	void (*pmapop_page_protect)(struct vm_page *, vm_prot_t);
92 	bool (*pmapop_query_bit)(struct vm_page *, int);
93 	bool (*pmapop_clear_bit)(struct vm_page *, int);
94 
95 	void (*pmapop_activate)(struct lwp *);
96 	void (*pmapop_deactivate)(struct lwp *);
97 
98 	void (*pmapop_pinit)(pmap_t);
99 	void (*pmapop_procwr)(struct proc *, vaddr_t, size_t);
100 
101 	void (*pmapop_pte_print)(volatile struct pte *);
102 	void (*pmapop_pteg_check)(void);
103 	void (*pmapop_print_mmuregs)(void);
104 	void (*pmapop_print_pte)(pmap_t, vaddr_t);
105 	void (*pmapop_pteg_dist)(void);
106 	void (*pmapop_pvo_verify)(void);
107 	vaddr_t (*pmapop_steal_memory)(vsize_t, vaddr_t *, vaddr_t *);
108 	void (*pmapop_bootstrap)(paddr_t, paddr_t);
109 };
110 
111 #ifdef	_KERNEL
112 #include <sys/cdefs.h>
113 __BEGIN_DECLS
114 #include <sys/param.h>
115 #include <sys/systm.h>
116 
117 #if defined (PPC_OEA) || defined (PPC_OEA64_BRIDGE)
118 extern register_t iosrtable[];
119 #endif
120 extern int pmap_use_altivec;
121 
122 #define pmap_clear_modify(pg)		(pmap_clear_bit((pg), PTE_CHG))
123 #define	pmap_clear_reference(pg)	(pmap_clear_bit((pg), PTE_REF))
124 #define	pmap_is_modified(pg)		(pmap_query_bit((pg), PTE_CHG))
125 #define	pmap_is_referenced(pg)		(pmap_query_bit((pg), PTE_REF))
126 
127 #define	pmap_resident_count(pmap)	((pmap)->pm_stats.resident_count)
128 #define	pmap_wired_count(pmap)		((pmap)->pm_stats.wired_count)
129 
130 /* ARGSUSED */
131 static __inline bool
132 pmap_remove_all(struct pmap *pmap)
133 {
134 	/* Nothing. */
135 	return false;
136 }
137 
138 #if (defined(PPC_OEA) + defined(PPC_OEA64) + defined(PPC_OEA64_BRIDGE)) != 1
139 #define	PMAP_NEEDS_FIXUP
140 #endif
141 
142 extern volatile struct pteg *pmap_pteg_table;
143 extern unsigned int pmap_pteg_cnt;
144 extern unsigned int pmap_pteg_mask;
145 
146 void pmap_bootstrap(vaddr_t, vaddr_t);
147 bool pmap_extract(pmap_t, vaddr_t, paddr_t *);
148 bool pmap_query_bit(struct vm_page *, int);
149 bool pmap_clear_bit(struct vm_page *, int);
150 void pmap_real_memory(paddr_t *, psize_t *);
151 void pmap_procwr(struct proc *, vaddr_t, size_t);
152 int pmap_pte_spill(pmap_t, vaddr_t, bool);
153 int pmap_ste_spill(pmap_t, vaddr_t, bool);
154 void pmap_pinit(pmap_t);
155 
156 u_int powerpc_mmap_flags(paddr_t);
157 #define POWERPC_MMAP_FLAG_MASK	0xf
158 #define POWERPC_MMAP_FLAG_PREFETCHABLE	0x1
159 #define POWERPC_MMAP_FLAG_CACHEABLE	0x2
160 
161 #define pmap_phys_address(ppn)		(ppn & ~POWERPC_MMAP_FLAG_MASK)
162 #define pmap_mmap_flags(ppn)		powerpc_mmap_flags(ppn)
163 
164 static __inline paddr_t vtophys (vaddr_t);
165 
166 /*
167  * Alternate mapping hooks for pool pages.  Avoids thrashing the TLB.
168  *
169  * Note: This won't work if we have more memory than can be direct-mapped
170  * VA==PA all at once.  But pmap_copy_page() and pmap_zero_page() will have
171  * this problem, too.
172  */
173 #if !defined(PPC_OEA64) && !defined (PPC_OEA64_BRIDGE)
174 #define	PMAP_MAP_POOLPAGE(pa)	(pa)
175 #define	PMAP_UNMAP_POOLPAGE(pa)	(pa)
176 #define POOL_VTOPHYS(va)	vtophys((vaddr_t) va)
177 #endif
178 
179 static __inline paddr_t
180 vtophys(vaddr_t va)
181 {
182 	paddr_t pa;
183 
184 	if (pmap_extract(pmap_kernel(), va, &pa))
185 		return pa;
186 	KASSERTMSG(0, "vtophys: pmap_extract of %#"PRIxVADDR" failed", va);
187 	return (paddr_t) -1;
188 }
189 
190 
191 #ifdef PMAP_NEEDS_FIXUP
192 extern const struct pmap_ops *pmapops;
193 extern const struct pmap_ops pmap32_ops;
194 extern const struct pmap_ops pmap64_ops;
195 extern const struct pmap_ops pmap64bridge_ops;
196 
197 static __inline void
198 pmap_setup32(void)
199 {
200 	pmapops = &pmap32_ops;
201 }
202 
203 static __inline void
204 pmap_setup64(void)
205 {
206 	pmapops = &pmap64_ops;
207 }
208 
209 static __inline void
210 pmap_setup64bridge(void)
211 {
212 	pmapops = &pmap64bridge_ops;
213 }
214 #endif
215 
216 bool pmap_pageidlezero (paddr_t);
217 void pmap_syncicache (paddr_t, psize_t);
218 #ifdef PPC_OEA64
219 vaddr_t pmap_setusr (vaddr_t);
220 vaddr_t pmap_unsetusr (void);
221 #endif
222 
223 #ifdef PPC_OEA64_BRIDGE
224 int pmap_setup_segment0_map(int use_large_pages, ...);
225 #endif
226 
227 #define PMAP_MD_PREFETCHABLE		0x2000000
228 #define PMAP_STEAL_MEMORY
229 #define PMAP_NEED_PROCWR
230 
231 void pmap_zero_page(paddr_t);
232 void pmap_copy_page(paddr_t, paddr_t);
233 
234 LIST_HEAD(pvo_head, pvo_entry);
235 
236 #define	__HAVE_VM_PAGE_MD
237 
238 struct vm_page_md {
239 	unsigned int mdpg_attrs;
240 	struct pvo_head mdpg_pvoh;
241 #ifdef MODULAR
242 	uintptr_t mdpg_dummy[3];
243 #endif
244 };
245 
246 #define	VM_MDPAGE_INIT(pg) do {			\
247 	(pg)->mdpage.mdpg_attrs = 0;		\
248 	LIST_INIT(&(pg)->mdpage.mdpg_pvoh);	\
249 } while (/*CONSTCOND*/0)
250 
251 __END_DECLS
252 #endif	/* _KERNEL */
253 
254 #endif	/* _POWERPC_OEA_PMAP_H_ */
255