1 /* $OpenBSD: cache_sh3.c,v 1.3 2016/03/05 17:16:33 tobiasu Exp $ */
2 /* $NetBSD: cache_sh3.c,v 1.12 2006/03/04 01:13:35 uwe Exp $ */
3
4 /*-
5 * Copyright (c) 2002 The NetBSD Foundation, Inc.
6 * All rights reserved.
7 *
8 * This code is derived from software contributed to The NetBSD Foundation
9 * by UCHIYAMA Yasushi.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 #include <sys/param.h>
34 #include <sys/systm.h>
35
36 #include <sh/cache.h>
37 #include <sh/cache_sh3.h>
38
39 #define round_line(x) (((x) + 15) & ~15)
40 #define trunc_line(x) ((x) & ~15)
41
42 void sh3_cache_wbinv_all(void);
43 void sh3_cache_wbinv_range(vaddr_t, vsize_t);
44 void sh3_cache_wbinv_range_index(vaddr_t, vsize_t);
45 void sh3_cache_panic(vaddr_t, vsize_t);
46 void sh3_cache_nop(vaddr_t, vsize_t);
47
48 int sh_cache_way_size;
49 int sh_cache_way_shift;
50 int sh_cache_entry_mask;
51
52 static inline void cache_sh3_op_line_16_nway(int, vaddr_t, uint32_t);
53 static inline void cache_sh3_op_8lines_16_nway(int, vaddr_t, uint32_t);
54
55 void
sh3_cache_config(void)56 sh3_cache_config(void)
57 {
58 size_t cache_size;
59 uint32_t r;
60
61 /* Determine cache size */
62 switch (cpu_product) {
63 default:
64 /* FALLTHROUGH */
65 case CPU_PRODUCT_7708:
66 /* FALLTHROUGH */
67 case CPU_PRODUCT_7708S:
68 /* FALLTHROUGH */
69 case CPU_PRODUCT_7708R:
70 cache_size = 8 * 1024;
71 break;
72 case CPU_PRODUCT_7709:
73 cache_size = 8 * 1024;
74 break;
75 case CPU_PRODUCT_7709A:
76 cache_size = 16 * 1024;
77 break;
78 }
79
80 r = _reg_read_4(SH3_CCR);
81
82 sh_cache_unified = 1;
83 sh_cache_enable_unified = (r & SH3_CCR_CE);
84 sh_cache_line_size = 16;
85 sh_cache_write_through_p0_u0_p3 = r & SH3_CCR_WT;
86 sh_cache_write_through_p1 = !(r & SH3_CCR_CB);
87 sh_cache_write_through = sh_cache_write_through_p0_u0_p3 &&
88 sh_cache_write_through_p1;
89
90 sh_cache_ram_mode = r & SH3_CCR_RA;
91 if (sh_cache_ram_mode) {
92 /*
93 * In RAM-mode, way 2 and 3 are used as RAM.
94 */
95 sh_cache_ways = 2;
96 sh_cache_size_unified = cache_size / 2;
97 } else {
98 sh_cache_ways = 4;
99 sh_cache_size_unified = cache_size;
100 }
101
102 /* size enough to access foreach entries */
103 sh_cache_way_size = sh_cache_size_unified / 4/*way*/;
104 /* mask for extracting entry select */
105 sh_cache_entry_mask = (sh_cache_way_size - 1) & ~15/*line-mask*/;
106 /* shift for way selection (16KB/8KB) */
107 sh_cache_way_shift =
108 /* entry bits */
109 ffs(sh_cache_size_unified / (4/*way*/ * 16/*line-size*/)) - 1
110 /* line bits */
111 + 4;
112
113 sh_cache_ops._icache_sync_all = sh3_cache_wbinv_all;
114 sh_cache_ops._icache_sync_range = sh3_cache_wbinv_range;
115 sh_cache_ops._icache_sync_range_index = sh3_cache_wbinv_range_index;
116 sh_cache_ops._dcache_wbinv_all = sh3_cache_wbinv_all;
117 sh_cache_ops._dcache_wbinv_range = sh3_cache_wbinv_range;
118 sh_cache_ops._dcache_wbinv_range_index = sh3_cache_wbinv_range_index;
119 /* SH3 can't invalidate without write-back */
120 sh_cache_ops._dcache_inv_range = sh3_cache_panic;
121 if (sh_cache_write_through) {
122 sh_cache_ops._dcache_wb_range = sh3_cache_nop;
123 } else {
124 /* SH3 can't write-back without invalidate */
125 sh_cache_ops._dcache_wb_range = sh3_cache_wbinv_range;
126 }
127 }
128
129 /*
130 * cache_sh3_op_line_16_nway: (index-operation)
131 *
132 * Clear the specified bits on single 16-byte cache line. n-ways.
133 *
134 */
135 static inline void
cache_sh3_op_line_16_nway(int n,vaddr_t va,uint32_t bits)136 cache_sh3_op_line_16_nway(int n, vaddr_t va, uint32_t bits)
137 {
138 vaddr_t cca;
139 int way;
140
141 /* extract entry # */
142 va &= sh_cache_entry_mask;
143
144 /* operate for each way */
145 for (way = 0; way < n; way++) {
146 cca = (SH3_CCA | way << sh_cache_way_shift | va);
147 _reg_bclr_4(cca, bits);
148 }
149 }
150
151 /*
152 * cache_sh3_op_8lines_16_nway: (index-operation)
153 *
154 * Clear the specified bits on 8 16-byte cache lines, n-ways.
155 *
156 */
157 static inline void
cache_sh3_op_8lines_16_nway(int n,vaddr_t va,uint32_t bits)158 cache_sh3_op_8lines_16_nway(int n, vaddr_t va, uint32_t bits)
159 {
160 volatile uint32_t *cca;
161 int way;
162
163 /* extract entry # */
164 va &= sh_cache_entry_mask;
165
166 /* operate for each way */
167 for (way = 0; way < n; way++) {
168 cca = (volatile uint32_t *)
169 (SH3_CCA | way << sh_cache_way_shift | va);
170 cca[ 0] &= ~bits;
171 cca[ 4] &= ~bits;
172 cca[ 8] &= ~bits;
173 cca[12] &= ~bits;
174 cca[16] &= ~bits;
175 cca[20] &= ~bits;
176 cca[24] &= ~bits;
177 cca[28] &= ~bits;
178 }
179 }
180
181 void
sh3_cache_wbinv_all(void)182 sh3_cache_wbinv_all(void)
183 {
184 vaddr_t va;
185
186 for (va = 0; va < sh_cache_way_size; va += 16 * 8)
187 cache_sh3_op_8lines_16_nway(sh_cache_ways, va, CCA_U | CCA_V);
188 }
189
190 void
sh3_cache_wbinv_range_index(vaddr_t va,vsize_t sz)191 sh3_cache_wbinv_range_index(vaddr_t va, vsize_t sz)
192 {
193 vaddr_t eva = round_line(va + sz);
194
195 va = trunc_line(va);
196
197 while ((eva - va) >= (8 * 16)) {
198 cache_sh3_op_8lines_16_nway(sh_cache_ways, va, CCA_U | CCA_V);
199 va += 16 * 8;
200 }
201
202 while (va < eva) {
203 cache_sh3_op_line_16_nway(sh_cache_ways, va, CCA_U | CCA_V);
204 va += 16;
205 }
206 }
207
208 void
sh3_cache_wbinv_range(vaddr_t va,vsize_t sz)209 sh3_cache_wbinv_range(vaddr_t va, vsize_t sz)
210 {
211 vaddr_t eva = round_line(va + sz);
212 vaddr_t cca;
213
214 va = trunc_line(va);
215
216 while (va < eva) {
217 cca = SH3_CCA | CCA_A | (va & sh_cache_entry_mask);
218 /*
219 * extract virtual tag-address.
220 * MMU translates it to physical address tag,
221 * and write to address-array.
222 * implicitly specified U = 0, V = 0.
223 */
224 _reg_write_4(cca, va & CCA_TAGADDR_MASK);
225 va += 16;
226 }
227 }
228
229 void
sh3_cache_panic(vaddr_t va,vsize_t size)230 sh3_cache_panic(vaddr_t va, vsize_t size)
231 {
232 panic("SH3 can't invalidate without write-back");
233 }
234
235 void
sh3_cache_nop(vaddr_t va,vsize_t sz)236 sh3_cache_nop(vaddr_t va, vsize_t sz)
237 {
238 /* NO-OP */
239 }
240