xref: /netbsd-src/sys/arch/mips/include/cache.h (revision b8c616269f5ebf18ab2e35cb8099d683130a177c)
1 /*	$NetBSD: cache.h,v 1.5 2002/12/17 12:04:29 simonb Exp $	*/
2 
3 /*
4  * Copyright 2001 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed for the NetBSD Project by
20  *	Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 /*
39  * Cache operations.
40  *
41  * We define the following primitives:
42  *
43  * --- Instruction cache synchronization (mandatory):
44  *
45  *	icache_sync_all		Synchronize I-cache
46  *
47  *	icache_sync_range	Synchronize I-cache range
48  *
49  *	icache_sync_range_index	(index ops)
50  *
51  * --- Primary data cache (mandatory):
52  *
53  *	pdcache_wbinv_all	Write-back Invalidate primary D-cache
54  *
55  *	pdcache_wbinv_range	Write-back Invalidate primary D-cache range
56  *
57  *	pdcache_wbinv_range_index (index ops)
58  *
59  *	pdcache_inv_range	Invalidate primary D-cache range
60  *
61  *	pdcache_wb_range	Write-back primary D-cache range
62  *
63  * --- Secondary data cache (optional):
64  *
65  *	sdcache_wbinv_all	Write-back Invalidate secondary D-cache
66  *
67  *	sdcache_wbinv_range	Write-back Invalidate secondary D-cache range
68  *
69  *	sdcache_wbinv_range_index (index ops)
70  *
71  *	sdcache_inv_range	Invalidate secondary D-cache range
72  *
73  *	sdcache_wb_range	Write-back secondary D-cache range
74  *
75  * There are some rules that must be followed:
76  *
77  *	I-cache Synch (all or range):
78  *		The goal is to synchronize the instruction stream,
79  *		so you may need to write-back dirty data cache
80  *		blocks first.  If a range is requested, and you
81  *		can't synchronize just a range, you have to hit
82  *		the whole thing.
83  *
84  *	D-cache Write-back Invalidate range:
85  *		If you can't WB-Inv a range, you must WB-Inv the
86  *		entire D-cache.
87  *
88  *	D-cache Invalidate:
89  *		If you can't Inv the D-cache without doing a
90  *		Write-back, YOU MUST PANIC.  This is to catch
91  *		errors in calling code.  Callers must be aware
92  *		of this scenario, and must handle it appropriately
93  *		(consider the bus_dma(9) operations).
94  *
95  *	D-cache Write-back:
96  *		If you can't Write-back without doing an invalidate,
97  *		that's fine.  Then treat this as a WB-Inv.  Skipping
98  *		the invalidate is merely an optimization.
99  *
100  *	All operations:
101  *		Valid virtual addresses must be passed to the
102  *		cache operation.
103  *
104  * Finally, these primitives are grouped together in reasonable
105  * ways.  For all operations described here, first the primary
106  * cache is frobbed, then the secondary cache frobbed, if the
107  * operation for the secondary cache exists.
108  *
109  *	mips_icache_sync_all	Synchronize I-cache
110  *
111  *	mips_icache_sync_range	Synchronize I-cache range
112  *
113  *	mips_icache_sync_range_index (index ops)
114  *
115  *	mips_dcache_wbinv_all	Write-back Invalidate D-cache
116  *
117  *	mips_dcache_wbinv_range	Write-back Invalidate D-cache range
118  *
119  *	mips_dcache_wbinv_range_index (index ops)
120  *
121  *	mips_dcache_inv_range	Invalidate D-cache range
122  *
123  *	mips_dcache_wb_range	Write-back D-cache range
124  */
125 
126 struct mips_cache_ops {
127 	void	(*mco_icache_sync_all)(void);
128 	void	(*mco_icache_sync_range)(vaddr_t, vsize_t);
129 	void	(*mco_icache_sync_range_index)(vaddr_t, vsize_t);
130 
131 	void	(*mco_pdcache_wbinv_all)(void);
132 	void	(*mco_pdcache_wbinv_range)(vaddr_t, vsize_t);
133 	void	(*mco_pdcache_wbinv_range_index)(vaddr_t, vsize_t);
134 	void	(*mco_pdcache_inv_range)(vaddr_t, vsize_t);
135 	void	(*mco_pdcache_wb_range)(vaddr_t, vsize_t);
136 
137 	/* These are called only by the (mipsNN) icache functions. */
138 	void	(*mco_intern_pdcache_wbinv_all)(void);
139 	void	(*mco_intern_pdcache_wbinv_range_index)(vaddr_t, vsize_t);
140 	void	(*mco_intern_pdcache_wb_range)(vaddr_t, vsize_t);
141 
142 	void	(*mco_sdcache_wbinv_all)(void);
143 	void	(*mco_sdcache_wbinv_range)(vaddr_t, vsize_t);
144 	void	(*mco_sdcache_wbinv_range_index)(vaddr_t, vsize_t);
145 	void	(*mco_sdcache_inv_range)(vaddr_t, vsize_t);
146 	void	(*mco_sdcache_wb_range)(vaddr_t, vsize_t);
147 
148 	/* These are called only by the (mipsNN) icache functions. */
149 	void	(*mco_intern_sdcache_wbinv_all)(void);
150 	void	(*mco_intern_sdcache_wbinv_range_index)(vaddr_t, vsize_t);
151 	void	(*mco_intern_sdcache_wb_range)(vaddr_t, vsize_t);
152 };
153 
154 #ifdef _KERNEL
155 extern struct mips_cache_ops mips_cache_ops;
156 
157 /* PRIMARY CACHE VARIABLES */
158 extern u_int mips_picache_size;
159 extern u_int mips_picache_line_size;
160 extern u_int mips_picache_ways;
161 extern u_int mips_picache_way_size;
162 extern u_int mips_picache_way_mask;
163 
164 extern u_int mips_pdcache_size;		/* and unified */
165 extern u_int mips_pdcache_line_size;
166 extern u_int mips_pdcache_ways;
167 extern u_int mips_pdcache_way_size;
168 extern u_int mips_pdcache_way_mask;
169 extern int mips_pdcache_write_through;
170 
171 extern int mips_pcache_unified;
172 
173 /* SECONDARY CACHE VARIABLES */
174 extern u_int mips_sicache_size;
175 extern u_int mips_sicache_line_size;
176 extern u_int mips_sicache_ways;
177 extern u_int mips_sicache_way_size;
178 extern u_int mips_sicache_way_mask;
179 
180 extern u_int mips_sdcache_size;		/* and unified */
181 extern u_int mips_sdcache_line_size;
182 extern u_int mips_sdcache_ways;
183 extern u_int mips_sdcache_way_size;
184 extern u_int mips_sdcache_way_mask;
185 extern int mips_sdcache_write_through;
186 
187 extern int mips_scache_unified;
188 
189 /* TERTIARY CACHE VARIABLES */
190 extern u_int mips_tcache_size;		/* always unified */
191 extern u_int mips_tcache_line_size;
192 extern u_int mips_tcache_ways;
193 extern u_int mips_tcache_way_size;
194 extern u_int mips_tcache_way_mask;
195 extern int mips_tcache_write_through;
196 
197 extern u_int mips_dcache_align;
198 extern u_int mips_dcache_align_mask;
199 
200 extern u_int mips_cache_alias_mask;
201 extern u_int mips_cache_prefer_mask;
202 
203 /*
204  * XXX XXX XXX THIS SHOULD NOT EXIST XXX XXX XXX
205  */
206 #define	mips_cache_indexof(x)	(((vaddr_t)(x)) & mips_cache_alias_mask)
207 
208 #define	__mco_noargs(prefix, x)						\
209 do {									\
210 	(*mips_cache_ops.mco_ ## prefix ## p ## x )();			\
211 	if (*mips_cache_ops.mco_ ## prefix ## s ## x )			\
212 		(*mips_cache_ops.mco_ ## prefix ## s ## x )();		\
213 } while (/*CONSTCOND*/0)
214 
215 #define	__mco_2args(prefix, x, a, b)					\
216 do {									\
217 	(*mips_cache_ops.mco_ ## prefix ## p ## x )((a), (b));		\
218 	if (*mips_cache_ops.mco_ ## prefix ## s ## x )			\
219 		(*mips_cache_ops.mco_ ## prefix ## s ## x )((a), (b));	\
220 } while (/*CONSTCOND*/0)
221 
222 #define	mips_icache_sync_all()						\
223 	(*mips_cache_ops.mco_icache_sync_all)()
224 
225 #define	mips_icache_sync_range(v, s)					\
226 	(*mips_cache_ops.mco_icache_sync_range)((v), (s))
227 
228 #define	mips_icache_sync_range_index(v, s)				\
229 	(*mips_cache_ops.mco_icache_sync_range_index)((v), (s))
230 
231 #define	mips_dcache_wbinv_all()						\
232 	__mco_noargs(, dcache_wbinv_all)
233 
234 #define	mips_dcache_wbinv_range(v, s)					\
235 	__mco_2args(, dcache_wbinv_range, (v), (s))
236 
237 #define	mips_dcache_wbinv_range_index(v, s)				\
238 	__mco_2args(, dcache_wbinv_range_index, (v), (s))
239 
240 #define	mips_dcache_inv_range(v, s)					\
241 	__mco_2args(, dcache_inv_range, (v), (s))
242 
243 #define	mips_dcache_wb_range(v, s)					\
244 	__mco_2args(, dcache_wb_range, (v), (s))
245 
246 
247 /*
248  * Private D-cache functions only called from (currently only the
249  * mipsNN) I-cache functions.
250  */
251 #define	mips_intern_dcache_wbinv_all()					\
252 	__mco_noargs(intern_, dcache_wbinv_all)
253 
254 #define	mips_intern_dcache_wbinv_range_index(v, s)			\
255 	__mco_2args(intern_, dcache_wbinv_range_index, (v), (s))
256 
257 #define	mips_intern_dcache_wb_range(v, s)				\
258 	__mco_2args(intern_, dcache_wb_range, (v), (s))
259 
260 void	mips_config_cache(void);
261 void	mips_dcache_compute_align(void);
262 
263 #include <mips/cache_mipsNN.h>
264 
265 #endif /* _KERNEL */
266