xref: /openbsd-src/sys/arch/sh/include/cache.h (revision d874cce4b1d9fe6b41c9e4f2117a77d8a4a37b92)
1*d874cce4Sray /*	$OpenBSD: cache.h,v 1.3 2008/06/26 05:42:12 ray Exp $	*/
295c7671fSmiod /*	$NetBSD: cache.h,v 1.7 2006/01/21 00:46:36 uwe Exp $	*/
395c7671fSmiod 
495c7671fSmiod /*-
595c7671fSmiod  * Copyright (c) 2002 The NetBSD Foundation, Inc.
695c7671fSmiod  * All rights reserved.
795c7671fSmiod  *
895c7671fSmiod  * This code is derived from software contributed to The NetBSD Foundation
995c7671fSmiod  * by UCHIYAMA Yasushi.
1095c7671fSmiod  *
1195c7671fSmiod  * Redistribution and use in source and binary forms, with or without
1295c7671fSmiod  * modification, are permitted provided that the following conditions
1395c7671fSmiod  * are met:
1495c7671fSmiod  * 1. Redistributions of source code must retain the above copyright
1595c7671fSmiod  *    notice, this list of conditions and the following disclaimer.
1695c7671fSmiod  * 2. Redistributions in binary form must reproduce the above copyright
1795c7671fSmiod  *    notice, this list of conditions and the following disclaimer in the
1895c7671fSmiod  *    documentation and/or other materials provided with the distribution.
1995c7671fSmiod  *
2095c7671fSmiod  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
2195c7671fSmiod  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
2295c7671fSmiod  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
2395c7671fSmiod  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
2495c7671fSmiod  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
2595c7671fSmiod  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
2695c7671fSmiod  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
2795c7671fSmiod  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
2895c7671fSmiod  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
2995c7671fSmiod  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
3095c7671fSmiod  * POSSIBILITY OF SUCH DAMAGE.
3195c7671fSmiod  */
3295c7671fSmiod 
3395c7671fSmiod /*
3495c7671fSmiod  * Cache configurations.
3595c7671fSmiod  *
3695c7671fSmiod  * SH3 I/D unified virtual-index physical-tag cache.
3795c7671fSmiod  * SH4 I/D separated virtual-index physical-tag cache.
3895c7671fSmiod  *
3995c7671fSmiod  *
4095c7671fSmiod  *         size       line-size entry way type
4195c7671fSmiod  * SH7708  4/8K       16B       128   2/4 P0,P2,U0 [1]
4295c7671fSmiod  *                                        P1 [2]
4395c7671fSmiod  * SH7709  4/8K       16B       128   2/4 [1]
4495c7671fSmiod  * SH7709A 16K        16B       256   4   [1]
4595c7671fSmiod  *
4695c7671fSmiod  * SH7750  I$  D$     line-size entry way
4795c7671fSmiod  *         8K  8/16K  32B       256   1   [1]
4895c7671fSmiod  * SH7750
4995c7671fSmiod  * SH7750S
5095c7671fSmiod  * SH7751  I$  D$     line-size entry way
5195c7671fSmiod  *         8K  8/16K  32B       256   1   [1]
5295c7671fSmiod  *
5395c7671fSmiod  * SH7750R
5495c7671fSmiod  * SH7751R I$  D$     line-size entry way
5595c7671fSmiod  *         16K 16/32K 32B       512   2   [1]
5695c7671fSmiod  *
5795c7671fSmiod  * [1]	write-through/back selectable
5895c7671fSmiod  * [2]	write-through only
5995c7671fSmiod  *
6095c7671fSmiod  * Cache operations.
6195c7671fSmiod  *
6295c7671fSmiod  * There are some rules that must be followed:
6395c7671fSmiod  *
6495c7671fSmiod  *	I-cache Sync (all or range):
6595c7671fSmiod  *		The goal is to synchronize the instruction stream,
6695c7671fSmiod  *		so you may need to write-back dirty data cache
6795c7671fSmiod  *		blocks first.  If a range is requested, and you
6895c7671fSmiod  *		can't synchronize just a range, you have to hit
6995c7671fSmiod  *		the whole thing.
7095c7671fSmiod  *
7195c7671fSmiod  *	D-cache Write-back Invalidate range:
7295c7671fSmiod  *		If you can't WB-Inv a range, you must WB-Inv the
7395c7671fSmiod  *		entire D-cache.
7495c7671fSmiod  *
7595c7671fSmiod  *	D-cache Invalidate:
7695c7671fSmiod  *		If you can't Inv the D-cache without doing a
7795c7671fSmiod  *		Write-back, YOU MUST PANIC.  This is to catch
7895c7671fSmiod  *		errors in calling code.  Callers must be aware
7995c7671fSmiod  *		of this scenario, and must handle it appropriately
8095c7671fSmiod  *		(consider the bus_dma(9) operations).
8195c7671fSmiod  *
8295c7671fSmiod  *	D-cache Write-back:
8395c7671fSmiod  *		If you can't Write-back without doing an invalidate,
8495c7671fSmiod  *		that's fine.  Then treat this as a WB-Inv.  Skipping
8595c7671fSmiod  *		the invalidate is merely an optimization.
8695c7671fSmiod  *
8795c7671fSmiod  *	All operations:
8895c7671fSmiod  *		Valid virtual addresses must be passed to the
8995c7671fSmiod  *		cache operation.
9095c7671fSmiod  *
9195c7671fSmiod  *
9295c7671fSmiod  *	sh_icache_sync_all	Synchronize I-cache
9395c7671fSmiod  *
9495c7671fSmiod  *	sh_icache_sync_range	Synchronize I-cache range
9595c7671fSmiod  *
9695c7671fSmiod  *	sh_icache_sync_range_index (index ops)
9795c7671fSmiod  *
9895c7671fSmiod  *	sh_dcache_wbinv_all	Write-back Invalidate D-cache
9995c7671fSmiod  *
10095c7671fSmiod  *	sh_dcache_wbinv_range	Write-back Invalidate D-cache range
10195c7671fSmiod  *
10295c7671fSmiod  *	sh_dcache_wbinv_range_index (index ops)
10395c7671fSmiod  *
10495c7671fSmiod  *	sh_dcache_inv_range	Invalidate D-cache range
10595c7671fSmiod  *
10695c7671fSmiod  *	sh_dcache_wb_range	Write-back D-cache range
10795c7671fSmiod  *
10895c7671fSmiod  *	If I/D unified cache (SH3), I-cache ops are writeback invalidate
10995c7671fSmiod  *	operation.
11095c7671fSmiod  *	If write-through mode, sh_dcache_wb_range is no-operation.
11195c7671fSmiod  *
11295c7671fSmiod  */
11395c7671fSmiod 
11495c7671fSmiod #ifndef _SH_CACHE_H_
11595c7671fSmiod #define	_SH_CACHE_H_
11695c7671fSmiod 
11795c7671fSmiod #ifdef _KERNEL
11895c7671fSmiod struct sh_cache_ops {
11995c7671fSmiod 	void (*_icache_sync_all)(void);
12095c7671fSmiod 	void (*_icache_sync_range)(vaddr_t, vsize_t);
12195c7671fSmiod 	void (*_icache_sync_range_index)(vaddr_t, vsize_t);
12295c7671fSmiod 
12395c7671fSmiod 	void (*_dcache_wbinv_all)(void);
12495c7671fSmiod 	void (*_dcache_wbinv_range)(vaddr_t, vsize_t);
12595c7671fSmiod 	void (*_dcache_wbinv_range_index)(vaddr_t, vsize_t);
12695c7671fSmiod 	void (*_dcache_inv_range)(vaddr_t, vsize_t);
12795c7671fSmiod 	void (*_dcache_wb_range)(vaddr_t, vsize_t);
12895c7671fSmiod };
12995c7671fSmiod 
13095c7671fSmiod /* Cache configurations */
13195c7671fSmiod #define	sh_cache_enable_unified		sh_cache_enable_icache
13295c7671fSmiod extern int sh_cache_enable_icache;
13395c7671fSmiod extern int sh_cache_enable_dcache;
13495c7671fSmiod extern int sh_cache_write_through;
13595c7671fSmiod extern int sh_cache_write_through_p0_u0_p3;
13695c7671fSmiod extern int sh_cache_write_through_p1;
13795c7671fSmiod extern int sh_cache_ways;
13895c7671fSmiod extern int sh_cache_unified;
13995c7671fSmiod #define	sh_cache_size_unified		sh_cache_size_icache
14095c7671fSmiod extern int sh_cache_size_icache;
14195c7671fSmiod extern int sh_cache_size_dcache;
14295c7671fSmiod extern int sh_cache_line_size;
14395c7671fSmiod /* for n-way set associative cache */
14495c7671fSmiod extern int sh_cache_way_size;
14595c7671fSmiod extern int sh_cache_way_shift;
14695c7671fSmiod extern int sh_cache_entry_mask;
14795c7671fSmiod 
14895c7671fSmiod /* Special mode */
14995c7671fSmiod extern int sh_cache_ram_mode;
15095c7671fSmiod extern int sh_cache_index_mode_icache;
15195c7671fSmiod extern int sh_cache_index_mode_dcache;
15295c7671fSmiod 
1533eaab126Smiod extern int sh_cache_prefer_mask;
1543eaab126Smiod 
15595c7671fSmiod extern struct sh_cache_ops sh_cache_ops;
15695c7671fSmiod 
15795c7671fSmiod #define	sh_icache_sync_all()						\
15895c7671fSmiod 	(*sh_cache_ops._icache_sync_all)()
15995c7671fSmiod 
16095c7671fSmiod #define	sh_icache_sync_range(v, s)					\
16195c7671fSmiod 	(*sh_cache_ops._icache_sync_range)((v), (s))
16295c7671fSmiod 
16395c7671fSmiod #define	sh_icache_sync_range_index(v, s)				\
16495c7671fSmiod 	(*sh_cache_ops._icache_sync_range_index)((v), (s))
16595c7671fSmiod 
16695c7671fSmiod #define	sh_dcache_wbinv_all()						\
16795c7671fSmiod 	(*sh_cache_ops._dcache_wbinv_all)()
16895c7671fSmiod 
16995c7671fSmiod #define	sh_dcache_wbinv_range(v, s)					\
17095c7671fSmiod 	(*sh_cache_ops._dcache_wbinv_range)((v), (s))
17195c7671fSmiod 
17295c7671fSmiod #define	sh_dcache_wbinv_range_index(v, s)				\
17395c7671fSmiod 	(*sh_cache_ops._dcache_wbinv_range_index)((v), (s))
17495c7671fSmiod 
17595c7671fSmiod #define	sh_dcache_inv_range(v, s)					\
17695c7671fSmiod 	(*sh_cache_ops._dcache_inv_range)((v), (s))
17795c7671fSmiod 
17895c7671fSmiod #define	sh_dcache_wb_range(v, s)					\
17995c7671fSmiod 	(*sh_cache_ops._dcache_wb_range)((v), (s))
18095c7671fSmiod 
18195c7671fSmiod void sh_cache_init(void);
18295c7671fSmiod void sh_cache_information(void);
18395c7671fSmiod 
18495c7671fSmiod #define	SH_HAS_UNIFIED_CACHE	CPU_IS_SH3
18595c7671fSmiod #define	SH_HAS_VIRTUAL_ALIAS	CPU_IS_SH4
18695c7671fSmiod #define	SH_HAS_WRITEBACK_CACHE	(!sh_cache_write_through)
18795c7671fSmiod 
18895c7671fSmiod #endif /* _KERNEL */
18995c7671fSmiod #endif /* _SH_CACHE_H_ */
190