xref: /netbsd-src/sys/arch/vax/include/pmap.h (revision c41a4eebefede43f6950f838a387dc18c6a431bf)
1 /*      $NetBSD: pmap.h,v 1.23 1998/01/03 01:13:12 thorpej Exp $     */
2 
3 /*
4  * Copyright (c) 1987 Carnegie-Mellon University
5  * Copyright (c) 1991 Regents of the University of California.
6  * All rights reserved.
7  *
8  * Changed for the VAX port. /IC
9  *
10  * This code is derived from software contributed to Berkeley by
11  * the Systems Programming Group of the University of Utah Computer
12  * Science Department.
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions
16  * are met:
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in the
21  *    documentation and/or other materials provided with the distribution.
22  * 3. All advertising materials mentioning features or use of this software
23  *    must display the following acknowledgement:
24  *	This product includes software developed by the University of
25  *	California, Berkeley and its contributors.
26  * 4. Neither the name of the University nor the names of its contributors
27  *    may be used to endorse or promote products derived from this software
28  *    without specific prior written permission.
29  *
30  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
31  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
32  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
34  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
35  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
36  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
37  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
38  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
39  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40  * SUCH DAMAGE.
41  *
42  *	@(#)pmap.h	7.6 (Berkeley) 5/10/91
43  */
44 
45 
46 #ifndef	PMAP_H
47 #define	PMAP_H
48 
49 #include <machine/mtpr.h>
50 
51 struct pte;
52 
53 /*
54  * Pmap structure
55  *  pm_stack holds lowest allocated memory for the process stack.
56  */
57 
58 typedef struct pmap {
59 	vm_offset_t		 pm_stack; /* Base of alloced p1 pte space */
60 	int                      ref_count;   /* reference count        */
61 	struct pte		*pm_p0br; /* page 0 base register */
62 	long			 pm_p0lr; /* page 0 length register */
63 	struct pte		*pm_p1br; /* page 1 base register */
64 	long			 pm_p1lr; /* page 1 length register */
65 } *pmap_t;
66 
67 /*
68  * For each vm_page_t, there is a list of all currently valid virtual
69  * mappings of that page.  An entry is a pv_entry_t, the list is pv_table.
70  */
71 
72 typedef struct pv_entry {
73 	struct pv_entry	*pv_next;	/* next pv_entry */
74 	struct pte	*pv_pte;	/* pte for this physical page */
75 } *pv_entry_t;
76 
77 /* ROUND_PAGE used before vm system is initialized */
78 #define ROUND_PAGE(x)   (((uint)(x) + PAGE_SIZE-1)& ~(PAGE_SIZE - 1))
79 #define	TRUNC_PAGE(x)	((uint)(x) & ~(PAGE_SIZE - 1))
80 
81 /* Mapping macros used when allocating SPT */
82 #define	MAPVIRT(ptr, count)					\
83 	(vm_offset_t)ptr = virtual_avail;			\
84 	virtual_avail += (count) * NBPG;
85 
86 #define	MAPPHYS(ptr, count, perm)				\
87 	(vm_offset_t)ptr = avail_start + KERNBASE;		\
88 	avail_start += (count) * NBPG;
89 
90 #ifdef	_KERNEL
91 
92 extern	struct pmap kernel_pmap_store;
93 
94 #define	pmap_kernel()			(&kernel_pmap_store)
95 
96 #endif	/* _KERNEL */
97 
98 /* Routines that are best to define as macros */
99 #define	pmap_copy(a,b,c,d,e) 		/* Dont do anything */
100 #define	pmap_update()	mtpr(0,PR_TBIA)	/* Update buffes */
101 #define	pmap_pageable(a,b,c,d)		/* Dont do anything */
102 #define	pmap_collect(pmap)		/* No need so far */
103 #define	pmap_reference(pmap)	if(pmap) (pmap)->ref_count++
104 #define	pmap_phys_address(phys) ((u_int)(phys)<<PAGE_SHIFT)
105 #define	pmap_is_referenced(phys)	(FALSE)
106 #define	pmap_clear_reference(pa)	pmap_page_protect(pa, VM_PROT_NONE)
107 #define pmap_change_wiring(pmap, v, w)  /* no need */
108 #define	pmap_remove(pmap, start, slut)  pmap_protect(pmap, start, slut, 0)
109 
110 /* These can be done as efficient inline macros */
111 #define pmap_copy_page(src, dst)				\
112 	__asm__("addl3 $0x80000000,%0,r0;addl3 $0x80000000,%1,r1;	\
113 	    movc3 $1024,(r0),(r1)"				\
114 	    :: "r"(src),"r"(dst):"r0","r1","r2","r3","r4","r5");
115 
116 #define pmap_zero_page(phys)					\
117 	__asm__("addl3 $0x80000000,%0,r0;movc5 $0,(r0),$0,$1024,(r0)" \
118 	    :: "r"(phys): "r0","r1","r2","r3","r4","r5");
119 
120 /* Prototypes */
121 void	pmap_bootstrap __P((void));
122 
123 void	pmap_pinit __P((pmap_t));
124 
125 #endif PMAP_H
126