1 /* $NetBSD: cache_sh3.c,v 1.16 2012/02/12 16:34:10 matt Exp $ */
2
3 /*-
4 * Copyright (c) 2002 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by UCHIYAMA Yasushi.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: cache_sh3.c,v 1.16 2012/02/12 16:34:10 matt Exp $");
34
35 #include "opt_cache.h"
36
37 #include <sys/param.h>
38 #include <sys/systm.h>
39
40 #include <sh3/cache.h>
41 #include <sh3/cache_sh3.h>
42
43 #define round_line(x) (((x) + 15) & ~15)
44 #define trunc_line(x) ((x) & ~15)
45
46 void sh3_cache_wbinv_all(void);
47 void sh3_cache_wbinv_range(vaddr_t, vsize_t);
48 void sh3_cache_wbinv_range_index(vaddr_t, vsize_t);
49 void sh3_cache_panic(vaddr_t, vsize_t);
50 void sh3_cache_nop(vaddr_t, vsize_t);
51
52 int sh_cache_way_size;
53 int sh_cache_way_shift;
54 int sh_cache_entry_mask;
55
56 static inline void cache_sh3_op_line_16_nway(int, vaddr_t, uint32_t);
57 static inline void cache_sh3_op_8lines_16_nway(int, vaddr_t, uint32_t);
58
59 void
sh3_cache_config(void)60 sh3_cache_config(void)
61 {
62 size_t cache_size;
63 uint32_t r;
64
65 /* Determine cache size */
66 switch (cpu_product) {
67 default:
68 /* FALLTHROUGH */
69 case CPU_PRODUCT_7708:
70 /* FALLTHROUGH */
71 case CPU_PRODUCT_7708S:
72 /* FALLTHROUGH */
73 case CPU_PRODUCT_7708R:
74 cache_size = 8 * 1024;
75 break;
76 case CPU_PRODUCT_7709:
77 cache_size = 8 * 1024;
78 break;
79 case CPU_PRODUCT_7709A:
80 /* FALLTHROUGH */
81 case CPU_PRODUCT_7706:
82 cache_size = 16 * 1024;
83 break;
84 }
85
86 r = _reg_read_4(SH3_CCR);
87
88 sh_cache_unified = 1;
89 sh_cache_enable_unified = (r & SH3_CCR_CE);
90 sh_cache_line_size = 16;
91 sh_cache_write_through_p0_u0_p3 = r & SH3_CCR_WT;
92 sh_cache_write_through_p1 = !(r & SH3_CCR_CB);
93 sh_cache_write_through = sh_cache_write_through_p0_u0_p3 &&
94 sh_cache_write_through_p1;
95
96 sh_cache_ram_mode = r & SH3_CCR_RA;
97 if (sh_cache_ram_mode) {
98 /*
99 * In RAM-mode, way 2 and 3 are used as RAM.
100 */
101 sh_cache_ways = 2;
102 sh_cache_size_unified = cache_size / 2;
103 } else {
104 sh_cache_ways = 4;
105 sh_cache_size_unified = cache_size;
106 }
107
108 /* size enough to access foreach entries */
109 sh_cache_way_size = sh_cache_size_unified / 4/*way*/;
110 /* mask for extracting entry select */
111 sh_cache_entry_mask = (sh_cache_way_size - 1) & ~15/*line-mask*/;
112 /* shift for way selection (16KB/8KB) */
113 sh_cache_way_shift =
114 /* entry bits */
115 ffs(sh_cache_size_unified / (4/*way*/ * 16/*line-size*/)) - 1
116 /* line bits */
117 + 4;
118
119 sh_cache_ops._icache_sync_all = sh3_cache_wbinv_all;
120 sh_cache_ops._icache_sync_range = sh3_cache_wbinv_range;
121 sh_cache_ops._icache_sync_range_index = sh3_cache_wbinv_range_index;
122 sh_cache_ops._dcache_wbinv_all = sh3_cache_wbinv_all;
123 sh_cache_ops._dcache_wbinv_range = sh3_cache_wbinv_range;
124 sh_cache_ops._dcache_wbinv_range_index = sh3_cache_wbinv_range_index;
125 /* SH3 can't invalidate without write-back */
126 sh_cache_ops._dcache_inv_range = sh3_cache_panic;
127 if (sh_cache_write_through) {
128 sh_cache_ops._dcache_wb_range = sh3_cache_nop;
129 } else {
130 /* SH3 can't write-back without invalidate */
131 sh_cache_ops._dcache_wb_range = sh3_cache_wbinv_range;
132 }
133 }
134
135 /*
136 * cache_sh3_op_line_16_nway: (index-operation)
137 *
138 * Clear the specified bits on single 16-byte cache line. n-ways.
139 *
140 */
141 static inline void
cache_sh3_op_line_16_nway(int n,vaddr_t va,uint32_t bits)142 cache_sh3_op_line_16_nway(int n, vaddr_t va, uint32_t bits)
143 {
144 vaddr_t cca;
145 int way;
146
147 /* extract entry # */
148 va &= sh_cache_entry_mask;
149
150 /* operate for each way */
151 for (way = 0; way < n; way++) {
152 cca = (SH3_CCA | way << sh_cache_way_shift | va);
153 _reg_bclr_4(cca, bits);
154 }
155 }
156
157 /*
158 * cache_sh3_op_8lines_16_nway: (index-operation)
159 *
160 * Clear the specified bits on 8 16-byte cache lines, n-ways.
161 *
162 */
163 static inline void
cache_sh3_op_8lines_16_nway(int n,vaddr_t va,uint32_t bits)164 cache_sh3_op_8lines_16_nway(int n, vaddr_t va, uint32_t bits)
165 {
166 volatile uint32_t *cca;
167 int way;
168
169 /* extract entry # */
170 va &= sh_cache_entry_mask;
171
172 /* operate for each way */
173 for (way = 0; way < n; way++) {
174 cca = (volatile uint32_t *)
175 (SH3_CCA | way << sh_cache_way_shift | va);
176 cca[ 0] &= ~bits;
177 cca[ 4] &= ~bits;
178 cca[ 8] &= ~bits;
179 cca[12] &= ~bits;
180 cca[16] &= ~bits;
181 cca[20] &= ~bits;
182 cca[24] &= ~bits;
183 cca[28] &= ~bits;
184 }
185 }
186
187 void
sh3_cache_wbinv_all(void)188 sh3_cache_wbinv_all(void)
189 {
190 vaddr_t va;
191
192 for (va = 0; va < sh_cache_way_size; va += 16 * 8)
193 cache_sh3_op_8lines_16_nway(sh_cache_ways, va, CCA_U | CCA_V);
194 }
195
196 void
sh3_cache_wbinv_range_index(vaddr_t va,vsize_t sz)197 sh3_cache_wbinv_range_index(vaddr_t va, vsize_t sz)
198 {
199 vaddr_t eva = round_line(va + sz);
200
201 va = trunc_line(va);
202
203 while ((eva - va) >= (8 * 16)) {
204 cache_sh3_op_8lines_16_nway(sh_cache_ways, va, CCA_U | CCA_V);
205 va += 16 * 8;
206 }
207
208 while (va < eva) {
209 cache_sh3_op_line_16_nway(sh_cache_ways, va, CCA_U | CCA_V);
210 va += 16;
211 }
212 }
213
214 void
sh3_cache_wbinv_range(vaddr_t va,vsize_t sz)215 sh3_cache_wbinv_range(vaddr_t va, vsize_t sz)
216 {
217 vaddr_t eva = round_line(va + sz);
218 vaddr_t cca;
219
220 va = trunc_line(va);
221
222 while (va < eva) {
223 cca = SH3_CCA | CCA_A | (va & sh_cache_entry_mask);
224 /*
225 * extract virtual tag-address.
226 * MMU translates it to physical address tag,
227 * and write to address-array.
228 * implicitly specified U = 0, V = 0.
229 */
230 _reg_write_4(cca, va & CCA_TAGADDR_MASK);
231 va += 16;
232 }
233 }
234
235 void
sh3_cache_panic(vaddr_t va,vsize_t size)236 sh3_cache_panic(vaddr_t va, vsize_t size)
237 {
238
239 panic("SH3 can't invalidate without write-back");
240 }
241
242 void
sh3_cache_nop(vaddr_t va,vsize_t sz)243 sh3_cache_nop(vaddr_t va, vsize_t sz)
244 {
245 /* NO-OP */
246 }
247