xref: /openbsd-src/sys/arch/mips64/mips64/cache_mips64r2.c (revision f2da64fbbbf1b03f09f390ab01267c93dfd77c4c)
1 /*	$OpenBSD: cache_mips64r2.c,v 1.2 2016/01/05 05:27:54 visa Exp $	*/
2 
3 /*
4  * Copyright (c) 2014 Miodrag Vallat.
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /*
20  * Cache handling code for mips64r2 compatible processors
21  */
22 
23 #include <sys/param.h>
24 #include <sys/systm.h>
25 
26 #include <mips64/cache.h>
27 #include <machine/cpu.h>
28 
29 #include <uvm/uvm_extern.h>
30 
31 #define	IndexInvalidate_I	0x00
32 #define	IndexWBInvalidate_D	0x01
33 #define	IndexWBInvalidate_T	0x02
34 #define	IndexWBInvalidate_S	0x03
35 
36 #define	HitInvalidate_D		0x11
37 #define	HitInvalidate_T		0x12
38 #define	HitInvalidate_S		0x13
39 
40 #define	HitWBInvalidate_D	0x15
41 #define	HitWBInvalidate_T	0x16
42 #define	HitWBInvalidate_S	0x17
43 
44 #define	cache(op,addr) \
45     __asm__ __volatile__ \
46       ("cache %0, 0(%1)" :: "i"(op), "r"(addr) : "memory")
47 
48 static __inline__ void	mips64r2_hitinv_primary(vaddr_t, vsize_t, vsize_t);
49 static __inline__ void	mips64r2_hitinv_secondary(vaddr_t, vsize_t, vsize_t);
50 static __inline__ void	mips64r2_hitinv_ternary(vaddr_t, vsize_t, vsize_t);
51 static __inline__ void	mips64r2_hitwbinv_primary(vaddr_t, vsize_t, vsize_t);
52 static __inline__ void	mips64r2_hitwbinv_secondary(vaddr_t, vsize_t, vsize_t);
53 static __inline__ void	mips64r2_hitwbinv_ternary(vaddr_t, vsize_t, vsize_t);
54 
55 void
56 mips64r2_ConfigCache(struct cpu_info *ci)
57 {
58 	uint32_t cfg, valias_mask;
59 	uint32_t s, l, a;
60 
61 	cfg = cp0_get_config();
62 	if ((cfg & 0x80000000) == 0)
63 		panic("no M bit in cfg0.0");
64 
65 	cfg = cp0_get_config_1();
66 
67 	a = 1 + ((cfg >> 7) & 0x07);
68 	l = (cfg >> 10) & 0x07;
69 	s = (cfg >> 13) & 0x07;
70 	ci->ci_l1data.linesize = 2 << l;
71 	ci->ci_l1data.setsize = (64 << s) * ci->ci_l1data.linesize;
72 	ci->ci_l1data.sets = a;
73 	ci->ci_l1data.size = ci->ci_l1data.sets * ci->ci_l1data.setsize;
74 
75 	a = 1 + ((cfg >> 16) & 0x07);
76 	l = (cfg >> 19) & 0x07;
77 	s = (cfg >> 22) & 0x07;
78 	ci->ci_l1inst.linesize = 2 << l;
79 	ci->ci_l1inst.setsize = (64 << s) * ci->ci_l1inst.linesize;
80 	ci->ci_l1inst.sets = a;
81 	ci->ci_l1inst.size = ci->ci_l1inst.sets * ci->ci_l1inst.setsize;
82 
83 	memset(&ci->ci_l2, 0, sizeof(struct cache_info));
84 	memset(&ci->ci_l3, 0, sizeof(struct cache_info));
85 
86 	if ((cfg & 0x80000000) != 0) {
87 		cfg = cp0_get_config_2();
88 
89 		a = 1 + ((cfg >> 0) & 0x0f);
90 		l = (cfg >> 4) & 0x0f;
91 		s = (cfg >> 8) & 0x0f;
92 		if (l != 0) {
93 			ci->ci_l2.linesize = 2 << l;
94 			ci->ci_l2.setsize = (64 << s) * ci->ci_l2.linesize;
95 			ci->ci_l2.sets = a;
96 			ci->ci_l2.size = ci->ci_l2.sets * ci->ci_l2.setsize;
97 		}
98 
99 		a = 1 + ((cfg >> 16) & 0x0f);
100 		l = (cfg >> 20) & 0x0f;
101 		s = (cfg >> 24) & 0x0f;
102 		if (l != 0) {
103 			ci->ci_l3.linesize = 2 << l;
104 			ci->ci_l3.setsize = (64 << s) * ci->ci_l3.linesize;
105 			ci->ci_l3.sets = a;
106 			ci->ci_l3.size = ci->ci_l3.sets * ci->ci_l3.setsize;
107 		}
108 	}
109 
110 	valias_mask = (max(ci->ci_l1inst.setsize, ci->ci_l1data.setsize) - 1) &
111 	    ~PAGE_MASK;
112 
113 	if (valias_mask != 0) {
114 		valias_mask |= PAGE_MASK;
115 #ifdef MULTIPROCESSOR
116 		if (valias_mask > cache_valias_mask) {
117 #endif
118 			cache_valias_mask = valias_mask;
119 			pmap_prefer_mask = valias_mask;
120 #ifdef MULTIPROCESSOR
121 		}
122 #endif
123 	}
124 
125 	ci->ci_SyncCache = mips64r2_SyncCache;
126 	ci->ci_InvalidateICache = mips64r2_InvalidateICache;
127 	ci->ci_InvalidateICachePage = mips64r2_InvalidateICachePage;
128 	ci->ci_SyncICache = mips64r2_SyncICache;
129 	ci->ci_SyncDCachePage = mips64r2_SyncDCachePage;
130 	ci->ci_HitSyncDCachePage = mips64r2_HitSyncDCachePage;
131 	ci->ci_HitSyncDCache = mips64r2_HitSyncDCache;
132 	ci->ci_HitInvalidateDCache = mips64r2_HitInvalidateDCache;
133 	ci->ci_IOSyncDCache = mips64r2_IOSyncDCache;
134 }
135 
136 static __inline__ void
137 mips64r2_hitwbinv_primary(vaddr_t va, vsize_t sz, vsize_t line)
138 {
139 	vaddr_t eva;
140 
141 	eva = va + sz;
142 	while (va != eva) {
143 		cache(HitWBInvalidate_D, va);
144 		va += line;
145 	}
146 }
147 
148 static __inline__ void
149 mips64r2_hitwbinv_secondary(vaddr_t va, vsize_t sz, vsize_t line)
150 {
151 	vaddr_t eva;
152 
153 	eva = va + sz;
154 	while (va != eva) {
155 		cache(HitWBInvalidate_S, va);
156 		va += line;
157 	}
158 }
159 
160 static __inline__ void
161 mips64r2_hitwbinv_ternary(vaddr_t va, vsize_t sz, vsize_t line)
162 {
163 	vaddr_t eva;
164 
165 	eva = va + sz;
166 	while (va != eva) {
167 		cache(HitWBInvalidate_T, va);
168 		va += line;
169 	}
170 }
171 
172 static __inline__ void
173 mips64r2_hitinv_primary(vaddr_t va, vsize_t sz, vsize_t line)
174 {
175 	vaddr_t eva;
176 
177 	eva = va + sz;
178 	while (va != eva) {
179 		cache(HitInvalidate_D, va);
180 		va += line;
181 	}
182 }
183 
184 static __inline__ void
185 mips64r2_hitinv_secondary(vaddr_t va, vsize_t sz, vsize_t line)
186 {
187 	vaddr_t eva;
188 
189 	eva = va + sz;
190 	while (va != eva) {
191 		cache(HitInvalidate_S, va);
192 		va += line;
193 	}
194 }
195 
196 static __inline__ void
197 mips64r2_hitinv_ternary(vaddr_t va, vsize_t sz, vsize_t line)
198 {
199 	vaddr_t eva;
200 
201 	eva = va + sz;
202 	while (va != eva) {
203 		cache(HitInvalidate_T, va);
204 		va += line;
205 	}
206 }
207 
208 /*
209  * Writeback and invalidate all caches.
210  */
211 void
212 mips64r2_SyncCache(struct cpu_info *ci)
213 {
214 	vaddr_t sva, eva;
215 
216 	sva = PHYS_TO_XKPHYS(0, CCA_CACHED);
217 	eva = sva + ci->ci_l1inst.linesize;
218 	while (sva != eva) {
219 		cache(IndexInvalidate_I, sva);
220 		sva += ci->ci_l1inst.linesize;
221 	}
222 
223 	sva = PHYS_TO_XKPHYS(0, CCA_CACHED);
224 	eva = sva + ci->ci_l1data.linesize;
225 	while (sva != eva) {
226 		cache(IndexWBInvalidate_D, sva);
227 		sva += ci->ci_l1data.linesize;
228 	}
229 
230 	if (ci->ci_l2.size != 0) {
231 		sva = PHYS_TO_XKPHYS(0, CCA_CACHED);
232 		eva = sva + ci->ci_l2.size;
233 		while (sva != eva) {
234 			cache(IndexWBInvalidate_S, sva);
235 			sva += ci->ci_l2.linesize;
236 		}
237 	}
238 
239 	if (ci->ci_l3.size != 0) {
240 		sva = PHYS_TO_XKPHYS(0, CCA_CACHED);
241 		eva = sva + ci->ci_l3.size;
242 		while (sva != eva) {
243 			cache(IndexWBInvalidate_T, sva);
244 			sva += ci->ci_l3.linesize;
245 		}
246 	}
247 }
248 
249 /*
250  * Invalidate I$ for the given range.
251  */
252 void
253 mips64r2_InvalidateICache(struct cpu_info *ci, vaddr_t _va, size_t _sz)
254 {
255 	vaddr_t va, sva, eva, iva;
256 	vsize_t sz, offs;
257 	uint set, nsets;
258 
259 	/* extend the range to integral cache lines */
260 	va = _va & ~(ci->ci_l1inst.linesize - 1);
261 	sz = ((_va + _sz + ci->ci_l1inst.linesize - 1) & ~(ci->ci_l1inst.linesize - 1)) - va;
262 
263 	sva = PHYS_TO_XKPHYS(0, CCA_CACHED);
264 	offs = ci->ci_l1inst.setsize;
265 	nsets = ci->ci_l1inst.sets;
266 	/* keep only the index bits */
267 	sva |= va & (offs - 1);
268 	eva = sva + sz;
269 
270 	while (sva != eva) {
271 		for (set = nsets, iva = sva; set != 0; set--, iva += offs)
272 			cache(IndexInvalidate_I, iva);
273 		sva += ci->ci_l1inst.linesize;
274 	}
275 }
276 
277 /*
278  * Register a given page for I$ invalidation.
279  */
280 void
281 mips64r2_InvalidateICachePage(struct cpu_info *ci, vaddr_t va)
282 {
283 	/* this code is too generic to allow for lazy I$ invalidates, yet */
284 	mips64r2_InvalidateICache(ci, va, PAGE_SIZE);
285 }
286 
287 /*
288  * Perform postponed I$ invalidation.
289  */
290 void
291 mips64r2_SyncICache(struct cpu_info *ci)
292 {
293 }
294 
295 /*
296  * Writeback D$ for the given page.
297  */
298 void
299 mips64r2_SyncDCachePage(struct cpu_info *ci, vaddr_t va, paddr_t pa)
300 {
301 	vaddr_t sva, eva, iva;
302 	vsize_t line, offs;
303 	uint set, nsets;
304 
305 	line = ci->ci_l1data.linesize;
306 	sva = PHYS_TO_XKPHYS(0, CCA_CACHED);
307 	offs = ci->ci_l1data.setsize;
308 	nsets = ci->ci_l1data.sets;
309 	/* keep only the index bits */
310 	sva += va & (offs - 1);
311 	eva = sva + PAGE_SIZE;
312 	while (sva != eva) {
313 		for (set = nsets, iva = sva; set != 0; set--, iva += offs)
314 			cache(IndexWBInvalidate_D, iva);
315 		sva += ci->ci_l1data.linesize;
316 	}
317 }
318 
319 /*
320  * Writeback D$ for the given page, which is expected to be currently
321  * mapped, allowing the use of `Hit' operations. This is less aggressive
322  * than using `Index' operations.
323  */
324 
325 void
326 mips64r2_HitSyncDCachePage(struct cpu_info *ci, vaddr_t va, paddr_t pa)
327 {
328 	mips64r2_hitwbinv_primary(va, PAGE_SIZE, ci->ci_l1data.linesize);
329 }
330 
331 /*
332  * Writeback D$ for the given range. Range is expected to be currently
333  * mapped, allowing the use of `Hit' operations. This is less aggressive
334  * than using `Index' operations.
335  */
336 
337 void
338 mips64r2_HitSyncDCache(struct cpu_info *ci, vaddr_t _va, size_t _sz)
339 {
340 	vaddr_t va;
341 	vsize_t sz;
342 
343 	/* extend the range to integral cache lines */
344 	va = _va & ~(ci->ci_l1data.linesize - 1);
345 	sz = ((_va + _sz + ci->ci_l1data.linesize - 1) & ~(ci->ci_l1data.linesize - 1)) - va;
346 	mips64r2_hitwbinv_primary(va, sz, ci->ci_l1data.linesize);
347 }
348 
349 /*
350  * Invalidate D$ for the given range. Range is expected to be currently
351  * mapped, allowing the use of `Hit' operations. This is less aggressive
352  * than using `Index' operations.
353  */
354 
355 void
356 mips64r2_HitInvalidateDCache(struct cpu_info *ci, vaddr_t _va, size_t _sz)
357 {
358 	vaddr_t va;
359 	vsize_t sz;
360 
361 	/* extend the range to integral cache lines */
362 	va = _va & ~(ci->ci_l1data.linesize - 1);
363 	sz = ((_va + _sz + ci->ci_l1data.linesize - 1) & ~(ci->ci_l1data.linesize - 1)) - va;
364 	mips64r2_hitinv_primary(va, sz, ci->ci_l1data.linesize);
365 }
366 
367 /*
368  * Backend for bus_dmamap_sync(). Enforce coherency of the given range
369  * by performing the necessary cache writeback and/or invalidate
370  * operations.
371  */
372 void
373 mips64r2_IOSyncDCache(struct cpu_info *ci, vaddr_t _va, size_t _sz, int how)
374 {
375 	vaddr_t va;
376 	vsize_t sz;
377 	int partial_start, partial_end;
378 
379 	/*
380 	 * L1
381 	 */
382 
383 	/* extend the range to integral cache lines */
384 	va = _va & ~(ci->ci_l1data.linesize - 1);
385 	sz = ((_va + _sz + ci->ci_l1data.linesize - 1) & ~(ci->ci_l1data.linesize - 1)) - va;
386 
387 	switch (how) {
388 	case CACHE_SYNC_R:
389 		/* writeback partial cachelines */
390 		if (((_va | _sz) & (ci->ci_l1data.linesize - 1)) != 0) {
391 			partial_start = va != _va;
392 			partial_end = va + sz != _va + _sz;
393 		} else {
394 			partial_start = partial_end = 0;
395 		}
396 		if (partial_start) {
397 			cache(HitWBInvalidate_D, va);
398 			va += ci->ci_l1data.linesize;
399 			sz -= ci->ci_l1data.linesize;
400 		}
401 		if (sz != 0 && partial_end) {
402 			sz -= ci->ci_l1data.linesize;
403 			cache(HitWBInvalidate_D, va + sz);
404 		}
405 		if (sz != 0)
406 			mips64r2_hitinv_primary(va, sz, ci->ci_l1data.linesize);
407 		break;
408 	case CACHE_SYNC_X:
409 	case CACHE_SYNC_W:
410 		mips64r2_hitwbinv_primary(va, sz, ci->ci_l1data.linesize);
411 		break;
412 	}
413 
414 	/*
415 	 * L2
416 	 */
417 
418 	if (ci->ci_l2.size != 0) {
419 		/* extend the range to integral cache lines */
420 		va = _va & ~(ci->ci_l2.linesize - 1);
421 		sz = ((_va + _sz + ci->ci_l2.linesize - 1) & ~(ci->ci_l2.linesize - 1)) - va;
422 
423 		switch (how) {
424 		case CACHE_SYNC_R:
425 			/* writeback partial cachelines */
426 			if (((_va | _sz) & (ci->ci_l2.linesize - 1)) != 0) {
427 				partial_start = va != _va;
428 				partial_end = va + sz != _va + _sz;
429 			} else {
430 				partial_start = partial_end = 0;
431 			}
432 			if (partial_start) {
433 				cache(HitWBInvalidate_S, va);
434 				va += ci->ci_l2.linesize;
435 				sz -= ci->ci_l2.linesize;
436 			}
437 			if (sz != 0 && partial_end) {
438 				sz -= ci->ci_l2.linesize;
439 				cache(HitWBInvalidate_S, va + sz);
440 			}
441 			if (sz != 0)
442 				mips64r2_hitinv_secondary(va, sz, ci->ci_l2.linesize);
443 			break;
444 		case CACHE_SYNC_X:
445 		case CACHE_SYNC_W:
446 			mips64r2_hitwbinv_secondary(va, sz, ci->ci_l2.linesize);
447 			break;
448 		}
449 	}
450 
451 	/*
452 	 * L3
453 	 */
454 
455 	if (ci->ci_l3.size != 0) {
456 		/* extend the range to integral cache lines */
457 		va = _va & ~(ci->ci_l3.linesize - 1);
458 		sz = ((_va + _sz + ci->ci_l3.linesize - 1) & ~(ci->ci_l3.linesize - 1)) - va;
459 
460 		switch (how) {
461 		case CACHE_SYNC_R:
462 			/* writeback partial cachelines */
463 			if (((_va | _sz) & (ci->ci_l3.linesize - 1)) != 0) {
464 				partial_start = va != _va;
465 				partial_end = va + sz != _va + _sz;
466 			} else {
467 				partial_start = partial_end = 0;
468 			}
469 			if (partial_start) {
470 				cache(HitWBInvalidate_S, va);
471 				va += ci->ci_l3.linesize;
472 				sz -= ci->ci_l3.linesize;
473 			}
474 			if (sz != 0 && partial_end) {
475 				sz -= ci->ci_l3.linesize;
476 				cache(HitWBInvalidate_S, va + sz);
477 			}
478 			if (sz != 0)
479 				mips64r2_hitinv_ternary(va, sz, ci->ci_l3.linesize);
480 			break;
481 		case CACHE_SYNC_X:
482 		case CACHE_SYNC_W:
483 			mips64r2_hitwbinv_ternary(va, sz, ci->ci_l3.linesize);
484 			break;
485 		}
486 	}
487 }
488