xref: /netbsd-src/sys/arch/m68k/include/cacheops_40.h (revision 82d56013d7b633d116a93943de88e08335357a7c)
1 /*	$NetBSD: cacheops_40.h,v 1.11 2008/04/28 20:23:26 martin Exp $	*/
2 
3 /*-
4  * Copyright (c) 1997 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Leo Weppelman
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * Invalidate entire TLB.
34  */
35 static __inline void __attribute__((__unused__))
36 TBIA_40(void)
37 {
38 	__asm volatile (" .word 0xf518" ); /*  pflusha */
39 }
40 
41 /*
42  * Invalidate any TLB entry for given VA (TB Invalidate Single)
43  */
44 static __inline void __attribute__((__unused__))
45 TBIS_40(vaddr_t va)
46 {
47 	register uint8_t *r_va __asm("%a0") = (void *)va;
48 	int	tmp;
49 
50 	__asm volatile (" movc   %1, %%dfc;"	/* select supervisor	*/
51 			  " .word 0xf508;"	/* pflush %a0@		*/
52 			  " moveq  %3, %1;"	/* select user		*/
53 			  " movc   %1, %%dfc;"
54 			  " .word 0xf508;" : "=d" (tmp) :
55 			  "0" (FC_SUPERD), "a" (r_va), "i" (FC_USERD));
56 }
57 
58 /*
59  * Invalidate supervisor side of TLB
60  */
61 static __inline void __attribute__((__unused__))
62 TBIAS_40(void)
63 {
64 	/*
65 	 * Cannot specify supervisor/user on pflusha, so we flush all
66 	 */
67 	__asm volatile (" .word 0xf518;");
68 }
69 
70 /*
71  * Invalidate user side of TLB
72  */
73 static __inline void __attribute__((__unused__))
74 TBIAU_40(void)
75 {
76 	/*
77 	 * Cannot specify supervisor/user on pflusha, so we flush all
78 	 */
79 	__asm volatile (" .word 0xf518;");
80 }
81 
82 /*
83  * Invalidate instruction cache
84  */
85 static __inline void __attribute__((__unused__))
86 ICIA_40(void)
87 {
88 	__asm volatile (" .word 0xf498;"); /* cinva ic */
89 }
90 
91 static __inline void __attribute__((__unused__))
92 ICPA_40(void)
93 {
94 	__asm volatile (" .word 0xf498;"); /* cinva ic */
95 }
96 
97 /*
98  * Invalidate data cache.
99  */
100 static __inline void __attribute__((__unused__))
101 DCIA_40(void)
102 {
103 	__asm volatile (" .word 0xf478;"); /* cpusha dc */
104 }
105 
106 static __inline void __attribute__((__unused__))
107 DCIS_40(void)
108 {
109 	__asm volatile (" .word 0xf478;"); /* cpusha dc */
110 }
111 
112 static __inline void __attribute__((__unused__))
113 DCIU_40(void)
114 {
115 	__asm volatile (" .word 0xf478;"); /* cpusha dc */
116 }
117 
118 static __inline void __attribute__((__unused__))
119 DCIAS_40(paddr_t pa)
120 {
121 	register uint8_t *r_pa __asm("%a0") = (void *)pa;
122 
123 	__asm volatile (" .word 0xf468;" : : "a" (r_pa)); /* cpushl dc,%a0@ */
124 }
125 
126 static __inline void __attribute__((__unused__))
127 PCIA_40(void)
128 {
129 	__asm volatile (" .word 0xf478;"); /* cpusha dc */
130 }
131 
132 static __inline void __attribute__((__unused__))
133 DCFA_40(void)
134 {
135 	__asm volatile (" .word 0xf478;"); /* cpusha dc */
136 }
137 
138 /* invalidate instruction physical cache line */
139 static __inline void __attribute__((__unused__))
140 ICPL_40(paddr_t pa)
141 {
142 	register uint8_t *r_pa __asm("%a0") = (void *)pa;
143 
144 	__asm volatile (" .word 0xf488;" : : "a" (r_pa)); /* cinvl ic,%a0@ */
145 }
146 
147 /* invalidate instruction physical cache page */
148 static __inline void __attribute__((__unused__))
149 ICPP_40(paddr_t pa)
150 {
151 	register uint8_t *r_pa __asm("%a0") = (void *)pa;
152 
153 	__asm volatile (" .word 0xf490;" : : "a" (r_pa)); /* cinvp ic,%a0@ */
154 }
155 
156 /* invalidate data physical cache line */
157 static __inline void __attribute__((__unused__))
158 DCPL_40(paddr_t pa)
159 {
160 	register uint8_t *r_pa __asm("%a0") = (void *)pa;
161 
162 	__asm volatile (" .word 0xf448;" : : "a" (r_pa)); /* cinvl dc,%a0@ */
163 }
164 
165 /* invalidate data physical cache page */
166 static __inline void __attribute__((__unused__))
167 DCPP_40(paddr_t pa)
168 {
169 	register uint8_t *r_pa __asm("%a0") = (void *)pa;
170 
171 	__asm volatile (" .word 0xf450;" : : "a" (r_pa)); /* cinvp dc,%a0@ */
172 }
173 
174 /* invalidate data physical all */
175 static __inline void __attribute__((__unused__))
176 DCPA_40(void)
177 {
178 	__asm volatile (" .word 0xf458;"); /* cinva dc */
179 }
180 
181 /* data cache flush line */
182 static __inline void __attribute__((__unused__))
183 DCFL_40(paddr_t pa)
184 {
185 	register uint8_t *r_pa __asm("%a0") = (void *)pa;
186 
187 	__asm volatile (" .word 0xf468;" : : "a" (r_pa)); /* cpushl dc,%a0@ */
188 }
189 
190 /* data cache flush page */
191 static __inline void __attribute__((__unused__))
192 DCFP_40(paddr_t pa)
193 {
194 	register uint8_t *r_pa __asm("%a0") = (void *)pa;
195 
196 	__asm volatile (" .word 0xf470;" : : "a" (r_pa)); /* cpushp dc,%a0@ */
197 }
198