xref: /openbsd-src/sys/arch/mips64/include/pmap.h (revision f2da64fbbbf1b03f09f390ab01267c93dfd77c4c)
1 /*      $OpenBSD: pmap.h,v 1.44 2016/05/11 15:50:29 visa Exp $ */
2 
3 /*
4  * Copyright (c) 1987 Carnegie-Mellon University
5  * Copyright (c) 1992, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * This code is derived from software contributed to Berkeley by
9  * Ralph Campbell.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. Neither the name of the University nor the names of its contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  *	from: @(#)pmap.h	8.1 (Berkeley) 6/10/93
36  */
37 
38 #ifndef	_MIPS64_PMAP_H_
39 #define	_MIPS64_PMAP_H_
40 
41 #include <sys/mutex.h>
42 
43 #ifdef	_KERNEL
44 
45 #include <machine/pte.h>
46 
47 /*
48  * The user address space is currently limited to 2Gb (0x0 - 0x80000000).
49  *
50  * The user address space is mapped using a two level structure where
51  * the virtual addresses bits are split in three groups:
52  *   segment:page:offset
53  * where:
54  * - offset are the in-page offsets (PAGE_SHIFT bits)
55  * - page are the second level page table index
56  *   (PMAP_L2SHIFT - Log2(pt_entry_t) bits)
57  * - segment are the first level page table (segment) index
58  *   (PMAP_L2SHIFT - Log2(void *) bits)
59  *
60  * This scheme allows Segment and page tables have the same size
61  * (1 << PMAP_L2SHIFT bytes, regardless of the pt_entry_t size) to be able to
62  * share the same allocator.
63  *
64  * Note: The kernel doesn't use the same data structures as user programs.
65  * All the PTE entries are stored in a single array in Sysmap which is
66  * dynamically allocated at boot time.
67  */
68 
69 /*
70  * Size of second level page structs (page tables, and segment table) used
71  * by this pmap.
72  */
73 
74 #ifdef MIPS_PTE64
75 #define	PMAP_L2SHIFT		14
76 #else
77 #define	PMAP_L2SHIFT		12
78 #endif
79 #define	PMAP_L2SIZE		(1UL << PMAP_L2SHIFT)
80 
81 #define	NPTEPG			(PMAP_L2SIZE / sizeof(pt_entry_t))
82 
83 /*
84  * Segment sizes
85  */
86 
87 #ifdef MIPS_PTE64
88 #define	SEGSHIFT		(PAGE_SHIFT + PMAP_L2SHIFT - 3)
89 #else
90 #define	SEGSHIFT		(PAGE_SHIFT + PMAP_L2SHIFT - 2)
91 #endif
92 #define	NBSEG			(1UL << SEGSHIFT)
93 #define	SEGOFSET		(NBSEG - 1)
94 
95 #define	mips_trunc_seg(x)	((vaddr_t)(x) & ~SEGOFSET)
96 #define	mips_round_seg(x)	(((vaddr_t)(x) + SEGOFSET) & ~SEGOFSET)
97 #define	pmap_segmap(m, v)	((m)->pm_segtab->seg_tab[((v) >> SEGSHIFT)])
98 
99 /* number of segments entries */
100 #define	PMAP_SEGTABSIZE		(PMAP_L2SIZE / sizeof(void *))
101 
102 struct segtab {
103 	pt_entry_t	*seg_tab[PMAP_SEGTABSIZE];
104 };
105 
106 struct pmap_asid_info {
107 	u_int			pma_asid;	/* address space tag */
108 	u_int			pma_asidgen;	/* TLB PID generation number */
109 };
110 
111 /*
112  * Machine dependent pmap structure.
113  */
114 typedef struct pmap {
115 	struct mutex		pm_mtx;		/* pmap lock */
116 	int			pm_count;	/* pmap reference count */
117 	struct pmap_statistics	pm_stats;	/* pmap statistics */
118 	struct segtab		*pm_segtab;	/* pointers to pages of PTEs */
119 	struct pmap_asid_info	pm_asid[1];	/* ASID information */
120 } *pmap_t;
121 
122 /*
123  * Compute the sizeof of a pmap structure.  Subtract one because one
124  * ASID info structure is already included in the pmap structure itself.
125  */
126 #define	PMAP_SIZEOF(x)							\
127 	(ALIGN(sizeof(struct pmap) +					\
128 	       (sizeof(struct pmap_asid_info) * ((x) - 1))))
129 
130 
131 /* machine-dependent pg_flags */
132 #define	PGF_UNCACHED	PG_PMAP0	/* Page is explicitely uncached */
133 #define	PGF_CACHED	PG_PMAP1	/* Page is currently cached */
134 #define	PGF_ATTR_MOD	PG_PMAP2
135 #define	PGF_ATTR_REF	PG_PMAP3
136 #define	PGF_EOP_CHECKED	PG_PMAP4
137 #define	PGF_EOP_VULN	PG_PMAP5
138 #define	PGF_PRESERVE	(PGF_ATTR_MOD | PGF_ATTR_REF)
139 
140 #define	PMAP_NOCACHE	PMAP_MD0
141 
142 extern	struct pmap *const kernel_pmap_ptr;
143 
144 #define	pmap_resident_count(pmap)       ((pmap)->pm_stats.resident_count)
145 #define	pmap_wired_count(pmap)		((pmap)->pm_stats.wired_count)
146 #define	pmap_kernel()			(kernel_pmap_ptr)
147 
148 #define	PMAP_STEAL_MEMORY		/* Enable 'stealing' during boot */
149 
150 #define	PMAP_PREFER(pa, va)		pmap_prefer(pa, va)
151 
152 extern vaddr_t pmap_prefer_mask;
153 /* pmap prefer alignment */
154 #define	PMAP_PREFER_ALIGN()						\
155 	(pmap_prefer_mask ? pmap_prefer_mask + 1 : 0)
156 /* pmap prefer offset in alignment */
157 #define	PMAP_PREFER_OFFSET(of)		((of) & pmap_prefer_mask)
158 
159 void	pmap_bootstrap(void);
160 vaddr_t	pmap_prefer(vaddr_t, vaddr_t);
161 int	pmap_emulate_modify(pmap_t, vaddr_t);
162 void	pmap_page_cache(vm_page_t, u_int);
163 
164 #define	pmap_collect(x)			do { /* nothing */ } while (0)
165 #define	pmap_unuse_final(p)		do { /* nothing yet */ } while (0)
166 #define	pmap_remove_holes(vm)		do { /* nothing */ } while (0)
167 
168 /*
169  * Most R5000 processors (and related families) have a silicon bug preventing
170  * the ll/sc (and lld/scd) instructions from honouring the caching mode
171  * when accessing XKPHYS addresses.
172  *
173  * Since pool memory is allocated with pmap_map_direct() if __HAVE_PMAP_DIRECT,
174  * and many structures containing fields which will be used with
175  * <machine/atomic.h> routines are allocated from pools, __HAVE_PMAP_DIRECT can
176  * not be defined on systems which may use flawed processors.
177  */
178 #if !defined(CPU_R5000) && !defined(CPU_RM7000)
179 #define	__HAVE_PMAP_DIRECT
180 vaddr_t	pmap_map_direct(vm_page_t);
181 vm_page_t pmap_unmap_direct(vaddr_t);
182 #endif
183 
184 /*
185  * MD flags to pmap_enter:
186  */
187 
188 #define	PMAP_PA_MASK	~((paddr_t)PAGE_MASK)
189 
190 /* Kernel virtual address to page table entry */
191 #define	kvtopte(va) \
192 	(Sysmap + (((vaddr_t)(va) - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT))
193 /* User virtual address to pte page entry */
194 #define	uvtopte(va)	(((va) >> PAGE_SHIFT) & (NPTEPG -1))
195 
196 extern	pt_entry_t *Sysmap;		/* kernel pte table */
197 extern	u_int Sysmapsize;		/* number of pte's in Sysmap */
198 
199 #endif	/* _KERNEL */
200 
201 #if !defined(_LOCORE)
202 typedef struct pv_entry {
203 	struct pv_entry	*pv_next;	/* next pv_entry */
204 	struct pmap	*pv_pmap;	/* pmap where mapping lies */
205 	vaddr_t		pv_va;		/* virtual address for mapping */
206 } *pv_entry_t;
207 
208 struct vm_page_md {
209 	struct mutex	pv_mtx;		/* pv list lock */
210 	struct pv_entry pv_ent;		/* pv list of this seg */
211 };
212 
213 #define	VM_MDPAGE_INIT(pg) \
214 	do { \
215 		mtx_init(&(pg)->mdpage.pv_mtx, IPL_VM); \
216 		(pg)->mdpage.pv_ent.pv_next = NULL; \
217 		(pg)->mdpage.pv_ent.pv_pmap = NULL; \
218 		(pg)->mdpage.pv_ent.pv_va = 0; \
219 	} while (0)
220 
221 #endif	/* !_LOCORE */
222 
223 #endif	/* !_MIPS64_PMAP_H_ */
224