xref: /csrg-svn/sys/sparc/include/pmap.h (revision 55123)
1*55123Storek /*
2*55123Storek  * Copyright (c) 1992 The Regents of the University of California.
3*55123Storek  * All rights reserved.
4*55123Storek  *
5*55123Storek  * This software was developed by the Computer Systems Engineering group
6*55123Storek  * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
7*55123Storek  * contributed to Berkeley.
8*55123Storek  *
9*55123Storek  * %sccs.include.redist.c%
10*55123Storek  *
11*55123Storek  *	@(#)pmap.h	7.1 (Berkeley) 07/13/92
12*55123Storek  *
13*55123Storek  * from: $Header: pmap.h,v 1.9 92/06/17 06:10:22 torek Exp $
14*55123Storek  */
15*55123Storek 
16*55123Storek #ifndef	_SPARC_PMAP_H_
17*55123Storek #define _SPARC_PMAP_H_
18*55123Storek 
19*55123Storek #include "machine/pte.h"
20*55123Storek 
21*55123Storek /*
22*55123Storek  * Pmap structure.
23*55123Storek  *
24*55123Storek  * The pmap structure really comes in two variants, one---a single
25*55123Storek  * instance---for kernel virtual memory and the other---up to nproc
26*55123Storek  * instances---for user virtual memory.  Unfortunately, we have to mash
27*55123Storek  * both into the same structure.  Fortunately, they are almost the same.
28*55123Storek  *
29*55123Storek  * The kernel begins at 0xf8000000 and runs to 0xffffffff (although
30*55123Storek  * some of this is not actually used).  Kernel space, including DVMA
31*55123Storek  * space (for now?), is mapped identically into all user contexts.
32*55123Storek  * There is no point in duplicating this mapping in each user process
33*55123Storek  * so they do not appear in the user structures.
34*55123Storek  *
35*55123Storek  * User space begins at 0x00000000 and runs through 0x1fffffff,
36*55123Storek  * then has a `hole', then resumes at 0xe0000000 and runs until it
37*55123Storek  * hits the kernel space at 0xf8000000.  This can be mapped
38*55123Storek  * contiguously by ignorning the top two bits and pretending the
39*55123Storek  * space goes from 0 to 37ffffff.  Typically the lower range is
40*55123Storek  * used for text+data and the upper for stack, but the code here
41*55123Storek  * makes no such distinction.
42*55123Storek  *
43*55123Storek  * Since each virtual segment covers 256 kbytes, the user space
44*55123Storek  * requires 3584 segments, while the kernel (including DVMA) requires
45*55123Storek  * only 512 segments.
46*55123Storek  *
47*55123Storek  * The segment map entry for virtual segment vseg is offset in
48*55123Storek  * pmap->pm_rsegmap by 0 if pmap is not the kernel pmap, or by
49*55123Storek  * NUSEG if it is.  We keep a pointer called pmap->pm_segmap
50*55123Storek  * pre-offset by this value.  pmap->pm_segmap thus contains the
51*55123Storek  * values to be loaded into the user portion of the hardware segment
52*55123Storek  * map so as to reach the proper PMEGs within the MMU.  The kernel
53*55123Storek  * mappings are `set early' and are always valid in every context
54*55123Storek  * (every change is always propagated immediately).
55*55123Storek  *
56*55123Storek  * The PMEGs within the MMU are loaded `on demand'; when a PMEG is
57*55123Storek  * taken away from context `c', the pmap for context c has its
58*55123Storek  * corresponding pm_segmap[vseg] entry marked invalid (the MMU segment
59*55123Storek  * map entry is also made invalid at the same time).  Thus
60*55123Storek  * pm_segmap[vseg] is the `invalid pmeg' number (127 or 511) whenever
61*55123Storek  * the corresponding PTEs are not actually in the MMU.  On the other
62*55123Storek  * hand, pm_pte[vseg] is NULL only if no pages in that virtual segment
63*55123Storek  * are in core; otherwise it points to a copy of the 32 or 64 PTEs that
64*55123Storek  * must be loaded in the MMU in order to reach those pages.
65*55123Storek  * pm_npte[vseg] counts the number of valid pages in each vseg.
66*55123Storek  *
67*55123Storek  * XXX performance: faster to count valid bits?
68*55123Storek  *
69*55123Storek  * The kernel pmap cannot malloc() PTEs since malloc() will sometimes
70*55123Storek  * allocate a new virtual segment.  Since kernel mappings are never
71*55123Storek  * `stolen' out of the the MMU, we just keep all its PTEs there, and
72*55123Storek  * have no software copies.  Its mmu entries are nonetheless kept on lists
73*55123Storek  * so that the code that fiddles with mmu lists has something to fiddle.
74*55123Storek  */
75*55123Storek #define	NKSEG	((int)((-(unsigned)KERNBASE) / NBPSG))	/* i.e., 512 */
76*55123Storek #define	NUSEG	(4096 - NKSEG)				/* i.e., 3584 */
77*55123Storek 
78*55123Storek /* data appearing in both user and kernel pmaps */
79*55123Storek struct pmap_common {
80*55123Storek 	union	ctxinfo *pmc_ctx;	/* current context, if any */
81*55123Storek 	int	pmc_ctxnum;		/* current context's number */
82*55123Storek #if NCPUS > 1
83*55123Storek 	simple_lock_data_t pmc_lock;	/* spinlock */
84*55123Storek #endif
85*55123Storek 	int	pmc_refcount;		/* just what it says */
86*55123Storek 	struct	mmuentry *pmc_mmuforw;	/* pmap pmeg chain */
87*55123Storek 	struct	mmuentry **pmc_mmuback;	/* (two way street) */
88*55123Storek 	pmeg_t	*pmc_segmap;		/* points to pm_rsegmap per above */
89*55123Storek 	u_char	*pmc_npte;		/* points to pm_rnpte */
90*55123Storek 	int	**pmc_pte;		/* points to pm_rpte */
91*55123Storek };
92*55123Storek 
93*55123Storek /* data appearing only in user pmaps */
94*55123Storek struct pmap {
95*55123Storek 	struct	pmap_common pmc;
96*55123Storek 	pmeg_t	pm_rsegmap[NUSEG];	/* segment map */
97*55123Storek 	u_char	pm_rnpte[NUSEG];	/* number of valid PTEs per seg */
98*55123Storek 	int	*pm_rpte[NUSEG];	/* points to PTEs for valid segments */
99*55123Storek };
100*55123Storek 
101*55123Storek /* data appearing only in the kernel pmap */
102*55123Storek struct kpmap {
103*55123Storek 	struct	pmap_common pmc;
104*55123Storek 	pmeg_t	pm_rsegmap[NKSEG];	/* segment map */
105*55123Storek 	u_char	pm_rnpte[NKSEG];	/* number of valid PTEs per kseg */
106*55123Storek 	int	*pm_rpte[NKSEG];	/* always NULL */
107*55123Storek };
108*55123Storek 
109*55123Storek #define	pm_ctx		pmc.pmc_ctx
110*55123Storek #define	pm_ctxnum	pmc.pmc_ctxnum
111*55123Storek #define	pm_lock		pmc.pmc_lock
112*55123Storek #define	pm_refcount	pmc.pmc_refcount
113*55123Storek #define	pm_mmuforw	pmc.pmc_mmuforw
114*55123Storek #define	pm_mmuback	pmc.pmc_mmuback
115*55123Storek #define	pm_segmap	pmc.pmc_segmap
116*55123Storek #define	pm_npte		pmc.pmc_npte
117*55123Storek #define	pm_pte		pmc.pmc_pte
118*55123Storek 
119*55123Storek #ifdef KERNEL
120*55123Storek 
121*55123Storek typedef struct pmap *pmap_t;
122*55123Storek #define PMAP_NULL	((pmap_t)0)
123*55123Storek 
124*55123Storek extern struct kpmap kernel_pmap_store;
125*55123Storek #define	kernel_pmap ((struct pmap *)(&kernel_pmap_store))
126*55123Storek 
127*55123Storek #define PMAP_ACTIVATE(pmap, pcb, iscurproc)
128*55123Storek #define PMAP_DEACTIVATE(pmap, pcb)
129*55123Storek 
130*55123Storek /*
131*55123Storek  * Since PTEs also contain type bits, we have to have some way
132*55123Storek  * to tell pmap_enter `this is an IO page' or `this is not to
133*55123Storek  * be cached'.  Since physical addresses are always aligned, we
134*55123Storek  * can do this with the low order bits.
135*55123Storek  *
136*55123Storek  * The ordering below is important: PMAP_PGTYPE << PG_TNC must give
137*55123Storek  * exactly the PG_NC and PG_TYPE bits.
138*55123Storek  */
139*55123Storek #define	PMAP_OBIO	1		/* tells pmap_enter to use PG_OBIO */
140*55123Storek #define	PMAP_VME16	2		/* etc */
141*55123Storek #define	PMAP_VME32	3		/* etc */
142*55123Storek #define	PMAP_NC		4		/* tells pmap_enter to set PG_NC */
143*55123Storek #define	PMAP_TNC	7		/* mask to get PG_TYPE & PG_NC */
144*55123Storek 
145*55123Storek #endif	KERNEL
146*55123Storek 
147*55123Storek #endif /* _SPARC_PMAP_H_ */
148