xref: /netbsd-src/sys/arch/sun3/include/pmap3.h (revision be9c6147a4f5582ffe26badee4aacc9003954065)
1 /*	$NetBSD: pmap3.h,v 1.51 2020/03/14 14:05:44 ad Exp $	*/
2 
3 /*-
4  * Copyright (c) 1996 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Adam Glass and Gordon W. Ross.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #ifdef _KERNEL
33 /*
34  * Physical map structures exported to the VM code.
35  */
36 
37 struct pmap {
38 	unsigned char   	*pm_segmap; 	/* soft copy of segmap */
39 	int             	pm_ctxnum;	/* MMU context number */
40 	u_int             	pm_refcount;	/* reference count */
41 	int             	pm_version;
42 };
43 
44 /*
45  * We give the pmap code a chance to resolve faults by
46  * reloading translations that it was forced to unload.
47  * This function does that, and calls vm_fault if it
48  * could not resolve the fault by reloading the MMU.
49  */
50 int _pmap_fault(struct vm_map *, vaddr_t, vm_prot_t);
51 
52 /* This lets us have some say in choosing VA locations. */
53 extern void pmap_prefer(vaddr_t, vaddr_t *, int);
54 #define PMAP_PREFER(fo, ap, sz, td) pmap_prefer((fo), (ap), (td))
55 
56 /* This needs to be a macro for kern_sysctl.c */
57 extern segsz_t pmap_resident_pages(pmap_t);
58 #define	pmap_resident_count(pmap)	(pmap_resident_pages(pmap))
59 
60 /* This needs to be a macro for vm_mmap.c */
61 extern segsz_t pmap_wired_pages(pmap_t);
62 #define	pmap_wired_count(pmap)	(pmap_wired_pages(pmap))
63 
64 /* We use the PA plus some low bits for device mmap. */
65 #define pmap_phys_address(addr) 	(addr)
66 
67 /* Map a given physical region to a virtual region */
68 extern vaddr_t pmap_map(vaddr_t, paddr_t, paddr_t, int);
69 
70 static __inline bool
pmap_remove_all(struct pmap * pmap)71 pmap_remove_all(struct pmap *pmap)
72 {
73 	/* Nothing. */
74 	return false;
75 }
76 
77 /*
78  * Since PTEs also contain type bits, we have to have some way
79  * to tell pmap_enter `this is an IO page' or `this is not to
80  * be cached'.  Since physical addresses are always aligned, we
81  * can do this with the low order bits.
82  *
83  * The values below must agree with pte.h such that:
84  *	(PMAP_OBIO << PG_MOD_SHIFT) == PGT_OBIO
85  */
86 #define	PMAP_OBMEM	0x00	/* unused */
87 #define	PMAP_OBIO	0x04	/* tells pmap_enter to use PG_OBIO */
88 #define	PMAP_VME16	0x08	/* etc */
89 #define	PMAP_VME32	0x0C	/* etc */
90 #define	PMAP_NC		0x10	/* tells pmap_enter to set PG_NC */
91 #define	PMAP_SPEC	0x1C	/* mask to get all above. */
92 
93 #endif	/* _KERNEL */
94 
95 /* MMU specific segment size */
96 #define	SEGSHIFT	17	        /* LOG2(NBSG) */
97 #define	NBSG		(1 << SEGSHIFT)	/* bytes/segment */
98 #define	SEGOFSET	(NBSG - 1)	/* byte offset into segment */
99 
100 #define	sun3_round_seg(x)	((((vaddr_t)(x)) + SEGOFSET) & ~SEGOFSET)
101 #define	sun3_trunc_seg(x)	((vaddr_t)(x) & ~SEGOFSET)
102 #define	sun3_seg_offset(x)	((vaddr_t)(x) & SEGOFSET)
103