xref: /netbsd-src/sys/arch/mips/include/cache.h (revision 946379e7b37692fc43f68eb0d1c10daa0a7f3b6c)
1 /*	$NetBSD: cache.h,v 1.12 2015/06/07 07:14:47 matt Exp $	*/
2 
3 /*
4  * Copyright 2001 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed for the NetBSD Project by
20  *	Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 #ifndef _MIPS_CACHE_H_
39 #define _MIPS_CACHE_H_
40 
41 /*
42  * Cache operations.
43  *
44  * We define the following primitives:
45  *
46  * --- Instruction cache synchronization (mandatory):
47  *
48  *	icache_sync_all		Synchronize I-cache
49  *
50  *	icache_sync_range	Synchronize I-cache range
51  *
52  *	icache_sync_range_index	(index ops)
53  *
54  * --- Primary data cache (mandatory):
55  *
56  *	pdcache_wbinv_all	Write-back Invalidate primary D-cache
57  *
58  *	pdcache_wbinv_range	Write-back Invalidate primary D-cache range
59  *
60  *	pdcache_wbinv_range_index (index ops)
61  *
62  *	pdcache_inv_range	Invalidate primary D-cache range
63  *
64  *	pdcache_wb_range	Write-back primary D-cache range
65  *
66  * --- Secondary data cache (optional):
67  *
68  *	sdcache_wbinv_all	Write-back Invalidate secondary D-cache
69  *
70  *	sdcache_wbinv_range	Write-back Invalidate secondary D-cache range
71  *
72  *	sdcache_wbinv_range_index (index ops)
73  *
74  *	sdcache_inv_range	Invalidate secondary D-cache range
75  *
76  *	sdcache_wb_range	Write-back secondary D-cache range
77  *
78  * There are some rules that must be followed:
79  *
80  *	I-cache Synch (all or range):
81  *		The goal is to synchronize the instruction stream,
82  *		so you may need to write-back dirty data cache
83  *		blocks first.  If a range is requested, and you
84  *		can't synchronize just a range, you have to hit
85  *		the whole thing.
86  *
87  *	D-cache Write-back Invalidate range:
88  *		If you can't WB-Inv a range, you must WB-Inv the
89  *		entire D-cache.
90  *
91  *	D-cache Invalidate:
92  *		If you can't Inv the D-cache without doing a
93  *		Write-back, YOU MUST PANIC.  This is to catch
94  *		errors in calling code.  Callers must be aware
95  *		of this scenario, and must handle it appropriately
96  *		(consider the bus_dma(9) operations).
97  *
98  *	D-cache Write-back:
99  *		If you can't Write-back without doing an invalidate,
100  *		that's fine.  Then treat this as a WB-Inv.  Skipping
101  *		the invalidate is merely an optimization.
102  *
103  *	All operations:
104  *		Valid virtual addresses must be passed to the
105  *		cache operation.
106  *
107  * Finally, these primitives are grouped together in reasonable
108  * ways.  For all operations described here, first the primary
109  * cache is frobbed, then the secondary cache frobbed, if the
110  * operation for the secondary cache exists.
111  *
112  *	mips_icache_sync_all	Synchronize I-cache
113  *
114  *	mips_icache_sync_range	Synchronize I-cache range
115  *
116  *	mips_icache_sync_range_index (index ops)
117  *
118  *	mips_dcache_wbinv_all	Write-back Invalidate D-cache
119  *
120  *	mips_dcache_wbinv_range	Write-back Invalidate D-cache range
121  *
122  *	mips_dcache_wbinv_range_index (index ops)
123  *
124  *	mips_dcache_inv_range	Invalidate D-cache range
125  *
126  *	mips_dcache_wb_range	Write-back D-cache range
127  */
128 
129 struct mips_cache_ops {
130 	void	(*mco_icache_sync_all)(void);
131 	void	(*mco_icache_sync_range)(vaddr_t, vsize_t);
132 	void	(*mco_icache_sync_range_index)(vaddr_t, vsize_t);
133 
134 	void	(*mco_pdcache_wbinv_all)(void);
135 	void	(*mco_pdcache_wbinv_range)(vaddr_t, vsize_t);
136 	void	(*mco_pdcache_wbinv_range_index)(vaddr_t, vsize_t);
137 	void	(*mco_pdcache_inv_range)(vaddr_t, vsize_t);
138 	void	(*mco_pdcache_wb_range)(vaddr_t, vsize_t);
139 
140 	/* These are called only by the (mipsNN) icache functions. */
141 	void	(*mco_intern_pdcache_wbinv_all)(void);
142 	void	(*mco_intern_pdcache_wbinv_range_index)(vaddr_t, vsize_t);
143 	void	(*mco_intern_pdcache_wb_range)(vaddr_t, vsize_t);
144 
145 	void	(*mco_sdcache_wbinv_all)(void);
146 	void	(*mco_sdcache_wbinv_range)(vaddr_t, vsize_t);
147 	void	(*mco_sdcache_wbinv_range_index)(vaddr_t, vsize_t);
148 	void	(*mco_sdcache_inv_range)(vaddr_t, vsize_t);
149 	void	(*mco_sdcache_wb_range)(vaddr_t, vsize_t);
150 
151 	/* These are called only by the (mipsNN) icache functions. */
152 	void	(*mco_intern_sdcache_wbinv_all)(void);
153 	void	(*mco_intern_sdcache_wbinv_range_index)(vaddr_t, vsize_t);
154 	void	(*mco_intern_sdcache_wb_range)(vaddr_t, vsize_t);
155 };
156 
157 extern struct mips_cache_ops mips_cache_ops;
158 
159 /* PRIMARY CACHE VARIABLES */
160 struct mips_cache_info {
161 	u_int mci_picache_size;
162 	u_int mci_picache_line_size;
163 	u_int mci_picache_ways;
164 	u_int mci_picache_way_size;
165 	u_int mci_picache_way_mask;
166 
167 	u_int mci_pdcache_size;		/* and unified */
168 	u_int mci_pdcache_line_size;
169 	u_int mci_pdcache_ways;
170 	u_int mci_pdcache_way_size;
171 	u_int mci_pdcache_way_mask;
172 	bool mci_pdcache_write_through;
173 
174 	bool mci_pcache_unified;
175 
176 	/* SECONDARY CACHE VARIABLES */
177 	u_int mci_sicache_size;
178 	u_int mci_sicache_line_size;
179 	u_int mci_sicache_ways;
180 	u_int mci_sicache_way_size;
181 	u_int mci_sicache_way_mask;
182 
183 	u_int mci_sdcache_size;		/* and unified */
184 	u_int mci_sdcache_line_size;
185 	u_int mci_sdcache_ways;
186 	u_int mci_sdcache_way_size;
187 	u_int mci_sdcache_way_mask;
188 	bool mci_sdcache_write_through;
189 
190 	bool mci_scache_unified;
191 
192 	/* TERTIARY CACHE VARIABLES */
193 	u_int mci_tcache_size;		/* always unified */
194 	u_int mci_tcache_line_size;
195 	u_int mci_tcache_ways;
196 	u_int mci_tcache_way_size;
197 	u_int mci_tcache_way_mask;
198 	bool mci_tcache_write_through;
199 
200 	/*
201 	 * These two variables inform the rest of the kernel about the
202 	 * size of the largest D-cache line present in the system.  The
203 	 * mask can be used to determine if a region of memory is cache
204 	 * line size aligned.
205 	 *
206 	 * Whenever any code updates a data cache line size, it should
207 	 * call mips_dcache_compute_align() to recompute these values.
208 	 */
209 	u_int mci_dcache_align;
210 	u_int mci_dcache_align_mask;
211 
212 	u_int mci_cache_prefer_mask;
213 #if (MIPS2 + MIPS3 + MIPS32 + MIPS32R2 + MIPS64 + MIPS64R2) > 0
214 	u_int mci_cache_alias_mask;
215 
216 	bool mci_cache_virtual_alias;
217 
218 #define	MIPS_CACHE_ALIAS_MASK		mips_cache_info.mci_cache_alias_mask
219 #define	MIPS_CACHE_VIRTUAL_ALIAS	mips_cache_info.mci_cache_virtual_alias
220 #elif defined(MIPS1)
221 #define	MIPS_CACHE_ALIAS_MASK		0
222 #define	MIPS_CACHE_VIRTUAL_ALIAS	false
223 #else
224 #error mci_cache screw up
225 #endif
226 };
227 
228 extern struct mips_cache_info mips_cache_info;
229 
230 
231 /*
232  * XXX XXX XXX THIS SHOULD NOT EXIST XXX XXX XXX
233  */
234 #define	mips_cache_indexof(x)	(((vaddr_t)(x)) & MIPS_CACHE_ALIAS_MASK)
235 #define	mips_cache_badalias(x,y) (((vaddr_t)(x)^(vaddr_t)(y)) & MIPS_CACHE_ALIAS_MASK)
236 
237 #define	__mco_noargs(prefix, x)						\
238 do {									\
239 	(*mips_cache_ops.mco_ ## prefix ## p ## x )();			\
240 	if (*mips_cache_ops.mco_ ## prefix ## s ## x )			\
241 		(*mips_cache_ops.mco_ ## prefix ## s ## x )();		\
242 } while (/*CONSTCOND*/0)
243 
244 #define	__mco_2args(prefix, x, a, b)					\
245 do {									\
246 	(*mips_cache_ops.mco_ ## prefix ## p ## x )((a), (b));		\
247 	if (*mips_cache_ops.mco_ ## prefix ## s ## x )			\
248 		(*mips_cache_ops.mco_ ## prefix ## s ## x )((a), (b));	\
249 } while (/*CONSTCOND*/0)
250 
251 #define	mips_icache_sync_all()						\
252 	(*mips_cache_ops.mco_icache_sync_all)()
253 
254 #define	mips_icache_sync_range(v, s)					\
255 	(*mips_cache_ops.mco_icache_sync_range)((v), (s))
256 
257 #define	mips_icache_sync_range_index(v, s)				\
258 	(*mips_cache_ops.mco_icache_sync_range_index)((v), (s))
259 
260 #define	mips_dcache_wbinv_all()						\
261 	__mco_noargs(, dcache_wbinv_all)
262 
263 #define	mips_dcache_wbinv_range(v, s)					\
264 	__mco_2args(, dcache_wbinv_range, (v), (s))
265 
266 #define	mips_dcache_wbinv_range_index(v, s)				\
267 	__mco_2args(, dcache_wbinv_range_index, (v), (s))
268 
269 #define	mips_dcache_inv_range(v, s)					\
270 	__mco_2args(, dcache_inv_range, (v), (s))
271 
272 #define	mips_dcache_wb_range(v, s)					\
273 	__mco_2args(, dcache_wb_range, (v), (s))
274 
275 
276 /*
277  * Private D-cache functions only called from (currently only the
278  * mipsNN) I-cache functions.
279  */
280 #define	mips_intern_dcache_wbinv_all()					\
281 	__mco_noargs(intern_, dcache_wbinv_all)
282 
283 #define	mips_intern_dcache_wbinv_range_index(v, s)			\
284 	__mco_2args(intern_, dcache_wbinv_range_index, (v), (s))
285 
286 #define	mips_intern_dcache_wb_range(v, s)				\
287 	__mco_2args(intern_, dcache_wb_range, (v), (s))
288 
289 void	mips_config_cache(void);
290 void	mips_dcache_compute_align(void);
291 
292 #include <mips/cache_mipsNN.h>
293 
294 #endif /* _MIPS_CACHE_H_ */
295