xref: /netbsd-src/sys/arch/arm/arm/cpufunc.c (revision 82d56013d7b633d116a93943de88e08335357a7c)
1 /*	$NetBSD: cpufunc.c,v 1.180 2021/01/31 05:59:55 skrll Exp $	*/
2 
3 /*
4  * arm7tdmi support code Copyright (c) 2001 John Fremlin
5  * arm8 support code Copyright (c) 1997 ARM Limited
6  * arm8 support code Copyright (c) 1997 Causality Limited
7  * arm9 support code Copyright (C) 2001 ARM Ltd
8  * arm11 support code Copyright (c) 2007 Microsoft
9  * cortexa8 support code Copyright (c) 2008 3am Software Foundry
10  * cortexa8 improvements Copyright (c) Goeran Weinholt
11  * Copyright (c) 1997 Mark Brinicombe.
12  * Copyright (c) 1997 Causality Limited
13  * All rights reserved.
14  *
15  * Redistribution and use in source and binary forms, with or without
16  * modification, are permitted provided that the following conditions
17  * are met:
18  * 1. Redistributions of source code must retain the above copyright
19  *    notice, this list of conditions and the following disclaimer.
20  * 2. Redistributions in binary form must reproduce the above copyright
21  *    notice, this list of conditions and the following disclaimer in the
22  *    documentation and/or other materials provided with the distribution.
23  * 3. All advertising materials mentioning features or use of this software
24  *    must display the following acknowledgement:
25  *	This product includes software developed by Causality Limited.
26  * 4. The name of Causality Limited may not be used to endorse or promote
27  *    products derived from this software without specific prior written
28  *    permission.
29  *
30  * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
31  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
32  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
33  * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
34  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
35  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
36  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
37  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
38  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
39  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40  * SUCH DAMAGE.
41  *
42  * RiscBSD kernel project
43  *
44  * cpufuncs.c
45  *
46  * C functions for supporting CPU / MMU / TLB specific operations.
47  *
48  * Created	: 30/01/97
49  */
50 
51 #include <sys/cdefs.h>
52 __KERNEL_RCSID(0, "$NetBSD: cpufunc.c,v 1.180 2021/01/31 05:59:55 skrll Exp $");
53 
54 #include "opt_arm_start.h"
55 #include "opt_compat_netbsd.h"
56 #include "opt_cpuoptions.h"
57 #include "opt_cputypes.h"
58 #include "opt_multiprocessor.h"
59 
60 #include <sys/types.h>
61 #include <sys/param.h>
62 #include <sys/systm.h>
63 #include <machine/cpu.h>
64 #include <machine/bootconfig.h>
65 #include <arch/arm/arm/disassem.h>
66 
67 #include <uvm/uvm.h>
68 
69 #include <arm/cpufunc_proto.h>
70 #include <arm/cpuconf.h>
71 #include <arm/locore.h>
72 
73 #ifdef CPU_XSCALE_80200
74 #include <arm/xscale/i80200reg.h>
75 #include <arm/xscale/i80200var.h>
76 #endif
77 
78 #ifdef CPU_XSCALE_80321
79 #include <arm/xscale/i80321reg.h>
80 #include <arm/xscale/i80321var.h>
81 #endif
82 
83 #ifdef CPU_XSCALE_IXP425
84 #include <arm/xscale/ixp425reg.h>
85 #include <arm/xscale/ixp425var.h>
86 #endif
87 
88 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321)
89 #include <arm/xscale/xscalereg.h>
90 #endif
91 
92 #if defined(CPU_PJ4B)
93 #include "opt_mvsoc.h"
94 #include <machine/bus_defs.h>
95 #if defined(ARMADAXP)
96 #include <arm/marvell/armadaxpreg.h>
97 #include <arm/marvell/armadaxpvar.h>
98 #endif
99 #endif
100 
101 #if defined(CPU_ARMV7) && (defined(CPU_ARMV6) || defined(CPU_PRE_ARMV6))
102 bool cpu_armv7_p;
103 #endif
104 
105 #if defined(CPU_ARMV6) && (defined(CPU_ARMV7) || defined(CPU_PRE_ARMV6))
106 bool cpu_armv6_p;
107 #endif
108 
109 
110 /* PRIMARY CACHE VARIABLES */
111 #if (ARM_MMU_V6 + ARM_MMU_V7) != 0
112 u_int	arm_cache_prefer_mask;
113 #endif
114 struct	arm_cache_info arm_pcache;
115 struct	arm_cache_info arm_scache;
116 
117 u_int	arm_dcache_align;
118 u_int	arm_dcache_align_mask;
119 
120 // Define a TTB value that can never be used.
121 uint32_t cpu_ttb = ~0;
122 
123 /* 1 == use cpu_sleep(), 0 == don't */
124 int cpu_do_powersave;
125 
126 #ifdef CPU_ARM6
127 struct cpu_functions arm6_cpufuncs = {
128 	/* CPU functions */
129 
130 	.cf_id			= cpufunc_id,
131 	.cf_cpwait		= cpufunc_nullop,
132 
133 	/* MMU functions */
134 
135 	.cf_control		= cpufunc_control,
136 	.cf_domains		= cpufunc_domains,
137 	.cf_setttb		= arm67_setttb,
138 	.cf_faultstatus		= cpufunc_faultstatus,
139 	.cf_faultaddress	= cpufunc_faultaddress,
140 
141 	/* TLB functions */
142 
143 	.cf_tlb_flushID		= arm67_tlb_flush,
144 	.cf_tlb_flushID_SE	= arm67_tlb_purge,
145 	.cf_tlb_flushI		= arm67_tlb_flush,
146 	.cf_tlb_flushI_SE	= arm67_tlb_purge,
147 	.cf_tlb_flushD		= arm67_tlb_flush,
148 	.cf_tlb_flushD_SE	= arm67_tlb_purge,
149 
150 	/* Cache operations */
151 
152 	.cf_icache_sync_all	= cpufunc_nullop,
153 	.cf_icache_sync_range	= (void *) cpufunc_nullop,
154 
155 	.cf_dcache_wbinv_all	= arm67_cache_flush,
156 	.cf_dcache_wbinv_range	= (void *)arm67_cache_flush,
157 	.cf_dcache_inv_range	= (void *)arm67_cache_flush,
158 	.cf_dcache_wb_range	= (void *)cpufunc_nullop,
159 
160 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
161 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
162 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
163 
164 	.cf_idcache_wbinv_all	= arm67_cache_flush,
165 	.cf_idcache_wbinv_range	= (void *)arm67_cache_flush,
166 
167 	/* Other functions */
168 
169 	.cf_flush_prefetchbuf	= cpufunc_nullop,
170 	.cf_drain_writebuf	= cpufunc_nullop,
171 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
172 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
173 
174 	.cf_sleep		= (void *)cpufunc_nullop,
175 
176 	/* Soft functions */
177 
178 #ifdef ARM6_LATE_ABORT
179 	.cf_dataabt_fixup	= late_abort_fixup,
180 #else
181 	.cf_dataabt_fixup	= early_abort_fixup,
182 #endif
183 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
184 
185 	.cf_context_switch	= arm67_context_switch,
186 
187 	.cf_setup		= arm6_setup
188 
189 };
190 #endif	/* CPU_ARM6 */
191 
192 #ifdef CPU_ARM7
193 struct cpu_functions arm7_cpufuncs = {
194 	/* CPU functions */
195 
196 	.cf_id			= cpufunc_id,
197 	.cf_cpwait		= cpufunc_nullop,
198 
199 	/* MMU functions */
200 
201 	.cf_control		= cpufunc_control,
202 	.cf_domains		= cpufunc_domains,
203 	.cf_setttb		= arm67_setttb,
204 	.cf_faultstatus		= cpufunc_faultstatus,
205 	.cf_faultaddress	= cpufunc_faultaddress,
206 
207 	/* TLB functions */
208 
209 	.cf_tlb_flushID		= arm67_tlb_flush,
210 	.cf_tlb_flushID_SE	= arm67_tlb_purge,
211 	.cf_tlb_flushI		= arm67_tlb_flush,
212 	.cf_tlb_flushI_SE	= arm67_tlb_purge,
213 	.cf_tlb_flushD		= arm67_tlb_flush,
214 	.cf_tlb_flushD_SE	= arm67_tlb_purge,
215 
216 	/* Cache operations */
217 
218 	.cf_icache_sync_all	= cpufunc_nullop,
219 	.cf_icache_sync_range	= (void *)cpufunc_nullop,
220 
221 	.cf_dcache_wbinv_all	= arm67_cache_flush,
222 	.cf_dcache_wbinv_range	= (void *)arm67_cache_flush,
223 	.cf_dcache_inv_range	= (void *)arm67_cache_flush,
224 	.cf_dcache_wb_range	= (void *)cpufunc_nullop,
225 
226 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
227 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
228 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
229 
230 	.cf_idcache_wbinv_all	= arm67_cache_flush,
231 	.cf_idcache_wbinv_range	= (void *)arm67_cache_flush,
232 
233 	/* Other functions */
234 
235 	.cf_flush_prefetchbuf	= cpufunc_nullop,
236 	.cf_drain_writebuf	= cpufunc_nullop,
237 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
238 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
239 
240 	.cf_sleep		= (void *)cpufunc_nullop,
241 
242 	/* Soft functions */
243 
244 	.cf_dataabt_fixup	= late_abort_fixup,
245 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
246 
247 	.cf_context_switch	= arm67_context_switch,
248 
249 	.cf_setup		= arm7_setup
250 
251 };
252 #endif	/* CPU_ARM7 */
253 
254 #ifdef CPU_ARM7TDMI
255 struct cpu_functions arm7tdmi_cpufuncs = {
256 	/* CPU functions */
257 
258 	.cf_id			= cpufunc_id,
259 	.cf_cpwait		= cpufunc_nullop,
260 
261 	/* MMU functions */
262 
263 	.cf_control		= cpufunc_control,
264 	.cf_domains		= cpufunc_domains,
265 	.cf_setttb		= arm7tdmi_setttb,
266 	.cf_faultstatus		= cpufunc_faultstatus,
267 	.cf_faultaddress	= cpufunc_faultaddress,
268 
269 	/* TLB functions */
270 
271 	.cf_tlb_flushID		= arm7tdmi_tlb_flushID,
272 	.cf_tlb_flushID_SE	= arm7tdmi_tlb_flushID_SE,
273 	.cf_tlb_flushI		= arm7tdmi_tlb_flushID,
274 	.cf_tlb_flushI_SE	= arm7tdmi_tlb_flushID_SE,
275 	.cf_tlb_flushD		= arm7tdmi_tlb_flushID,
276 	.cf_tlb_flushD_SE	= arm7tdmi_tlb_flushID_SE,
277 
278 	/* Cache operations */
279 
280 	.cf_icache_sync_all	= cpufunc_nullop,
281 	.cf_icache_sync_range	= (void *)cpufunc_nullop,
282 
283 	.cf_dcache_wbinv_all	= arm7tdmi_cache_flushID,
284 	.cf_dcache_wbinv_range	= (void *)arm7tdmi_cache_flushID,
285 	.cf_dcache_inv_range	= (void *)arm7tdmi_cache_flushID,
286 	.cf_dcache_wb_range	= (void *)cpufunc_nullop,
287 
288 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
289 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
290 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
291 
292 	.cf_idcache_wbinv_all	= arm7tdmi_cache_flushID,
293 	.cf_idcache_wbinv_range	= (void *)arm7tdmi_cache_flushID,
294 
295 	/* Other functions */
296 
297 	.cf_flush_prefetchbuf	= cpufunc_nullop,
298 	.cf_drain_writebuf	= cpufunc_nullop,
299 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
300 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
301 
302 	.cf_sleep		= (void *)cpufunc_nullop,
303 
304 	/* Soft functions */
305 
306 	.cf_dataabt_fixup	= late_abort_fixup,
307 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
308 
309 	.cf_context_switch	= arm7tdmi_context_switch,
310 
311 	.cf_setup		= arm7tdmi_setup
312 
313 };
314 #endif	/* CPU_ARM7TDMI */
315 
316 #ifdef CPU_ARM8
317 struct cpu_functions arm8_cpufuncs = {
318 	/* CPU functions */
319 
320 	.cf_id			= cpufunc_id,
321 	.cf_cpwait		= cpufunc_nullop,
322 
323 	/* MMU functions */
324 
325 	.cf_control		= cpufunc_control,
326 	.cf_domains		= cpufunc_domains,
327 	.cf_setttb		= arm8_setttb,
328 	.cf_faultstatus		= cpufunc_faultstatus,
329 	.cf_faultaddress	= cpufunc_faultaddress,
330 
331 	/* TLB functions */
332 
333 	.cf_tlb_flushID		= arm8_tlb_flushID,
334 	.cf_tlb_flushID_SE	= arm8_tlb_flushID_SE,
335 	.cf_tlb_flushI		= arm8_tlb_flushID,
336 	.cf_tlb_flushI_SE	= arm8_tlb_flushID_SE,
337 	.cf_tlb_flushD		= arm8_tlb_flushID,
338 	.cf_tlb_flushD_SE	= arm8_tlb_flushID_SE,
339 
340 	/* Cache operations */
341 
342 	.cf_icache_sync_all	= cpufunc_nullop,
343 	.cf_icache_sync_range	= (void *)cpufunc_nullop,
344 
345 	.cf_dcache_wbinv_all	= arm8_cache_purgeID,
346 	.cf_dcache_wbinv_range	= (void *)arm8_cache_purgeID,
347 /*XXX*/	.cf_dcache_inv_range	= (void *)arm8_cache_purgeID,
348 	.cf_dcache_wb_range	= (void *)arm8_cache_cleanID,
349 
350 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
351 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
352 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
353 
354 	.cf_idcache_wbinv_all	= arm8_cache_purgeID,
355 	.cf_idcache_wbinv_range = (void *)arm8_cache_purgeID,
356 
357 	/* Other functions */
358 
359 	.cf_flush_prefetchbuf	= cpufunc_nullop,
360 	.cf_drain_writebuf	= cpufunc_nullop,
361 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
362 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
363 
364 	.cf_sleep		= (void *)cpufunc_nullop,
365 
366 	/* Soft functions */
367 
368 	.cf_dataabt_fixup	= cpufunc_null_fixup,
369 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
370 
371 	.cf_context_switch	= arm8_context_switch,
372 
373 	.cf_setup		= arm8_setup
374 };
375 #endif	/* CPU_ARM8 */
376 
377 #ifdef CPU_ARM9
378 struct cpu_functions arm9_cpufuncs = {
379 	/* CPU functions */
380 
381 	.cf_id			= cpufunc_id,
382 	.cf_cpwait		= cpufunc_nullop,
383 
384 	/* MMU functions */
385 
386 	.cf_control		= cpufunc_control,
387 	.cf_domains		= cpufunc_domains,
388 	.cf_setttb		= arm9_setttb,
389 	.cf_faultstatus		= cpufunc_faultstatus,
390 	.cf_faultaddress	= cpufunc_faultaddress,
391 
392 	/* TLB functions */
393 
394 	.cf_tlb_flushID		= armv4_tlb_flushID,
395 	.cf_tlb_flushID_SE	= arm9_tlb_flushID_SE,
396 	.cf_tlb_flushI		= armv4_tlb_flushI,
397 	.cf_tlb_flushI_SE	= (void *)armv4_tlb_flushI,
398 	.cf_tlb_flushD		= armv4_tlb_flushD,
399 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
400 
401 	/* Cache operations */
402 
403 	.cf_icache_sync_all	= arm9_icache_sync_all,
404 	.cf_icache_sync_range	= arm9_icache_sync_range,
405 
406 	.cf_dcache_wbinv_all	= arm9_dcache_wbinv_all,
407 	.cf_dcache_wbinv_range	= arm9_dcache_wbinv_range,
408 /*XXX*/	.cf_dcache_inv_range	= arm9_dcache_wbinv_range,
409 	.cf_dcache_wb_range	= arm9_dcache_wb_range,
410 
411 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
412 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
413 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
414 
415 	.cf_idcache_wbinv_all	= arm9_idcache_wbinv_all,
416 	.cf_idcache_wbinv_range = arm9_idcache_wbinv_range,
417 
418 	/* Other functions */
419 
420 	.cf_flush_prefetchbuf	= cpufunc_nullop,
421 	.cf_drain_writebuf	= armv4_drain_writebuf,
422 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
423 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
424 
425 	.cf_sleep		= (void *)cpufunc_nullop,
426 
427 	/* Soft functions */
428 
429 	.cf_dataabt_fixup	= cpufunc_null_fixup,
430 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
431 
432 	.cf_context_switch	= arm9_context_switch,
433 
434 	.cf_setup		= arm9_setup
435 
436 };
437 #endif /* CPU_ARM9 */
438 
439 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
440 struct cpu_functions armv5_ec_cpufuncs = {
441 	/* CPU functions */
442 
443 	.cf_id			= cpufunc_id,
444 	.cf_cpwait		= cpufunc_nullop,
445 
446 	/* MMU functions */
447 
448 	.cf_control		= cpufunc_control,
449 	.cf_domains		= cpufunc_domains,
450 	.cf_setttb		= armv5_ec_setttb,
451 	.cf_faultstatus		= cpufunc_faultstatus,
452 	.cf_faultaddress	= cpufunc_faultaddress,
453 
454 	/* TLB functions */
455 
456 	.cf_tlb_flushID		= armv4_tlb_flushID,
457 	.cf_tlb_flushID_SE	= arm10_tlb_flushID_SE,
458 	.cf_tlb_flushI		= armv4_tlb_flushI,
459 	.cf_tlb_flushI_SE	= arm10_tlb_flushI_SE,
460 	.cf_tlb_flushD		= armv4_tlb_flushD,
461 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
462 
463 	/* Cache operations */
464 
465 	.cf_icache_sync_all	= armv5_ec_icache_sync_all,
466 	.cf_icache_sync_range	= armv5_ec_icache_sync_range,
467 
468 	.cf_dcache_wbinv_all	= armv5_ec_dcache_wbinv_all,
469 	.cf_dcache_wbinv_range	= armv5_ec_dcache_wbinv_range,
470 /*XXX*/	.cf_dcache_inv_range	= armv5_ec_dcache_wbinv_range,
471 	.cf_dcache_wb_range	= armv5_ec_dcache_wb_range,
472 
473 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
474 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
475 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
476 
477 	.cf_idcache_wbinv_all	= armv5_ec_idcache_wbinv_all,
478 	.cf_idcache_wbinv_range = armv5_ec_idcache_wbinv_range,
479 
480 	/* Other functions */
481 
482 	.cf_flush_prefetchbuf	= cpufunc_nullop,
483 	.cf_drain_writebuf	= armv4_drain_writebuf,
484 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
485 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
486 
487 	.cf_sleep		= (void *)cpufunc_nullop,
488 
489 	/* Soft functions */
490 
491 	.cf_dataabt_fixup	= cpufunc_null_fixup,
492 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
493 
494 	.cf_context_switch	= arm10_context_switch,
495 
496 	.cf_setup		= arm10_setup
497 
498 };
499 #endif /* CPU_ARM9E || CPU_ARM10 */
500 
501 #ifdef CPU_ARM10
502 struct cpu_functions arm10_cpufuncs = {
503 	/* CPU functions */
504 
505 	.cf_id			= cpufunc_id,
506 	.cf_cpwait		= cpufunc_nullop,
507 
508 	/* MMU functions */
509 
510 	.cf_control		= cpufunc_control,
511 	.cf_domains		= cpufunc_domains,
512 	.cf_setttb		= armv5_setttb,
513 	.cf_faultstatus		= cpufunc_faultstatus,
514 	.cf_faultaddress	= cpufunc_faultaddress,
515 
516 	/* TLB functions */
517 
518 	.cf_tlb_flushID		= armv4_tlb_flushID,
519 	.cf_tlb_flushID_SE	= arm10_tlb_flushID_SE,
520 	.cf_tlb_flushI		= armv4_tlb_flushI,
521 	.cf_tlb_flushI_SE	= arm10_tlb_flushI_SE,
522 	.cf_tlb_flushD		= armv4_tlb_flushD,
523 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
524 
525 	/* Cache operations */
526 
527 	.cf_icache_sync_all	= armv5_icache_sync_all,
528 	.cf_icache_sync_range	= armv5_icache_sync_range,
529 
530 	.cf_dcache_wbinv_all	= armv5_dcache_wbinv_all,
531 	.cf_dcache_wbinv_range	= armv5_dcache_wbinv_range,
532 /*XXX*/	.cf_dcache_inv_range	= armv5_dcache_wbinv_range,
533 	.cf_dcache_wb_range	= armv5_dcache_wb_range,
534 
535 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
536 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
537 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
538 
539 	.cf_idcache_wbinv_all	= armv5_idcache_wbinv_all,
540 	.cf_idcache_wbinv_range = armv5_idcache_wbinv_range,
541 
542 	/* Other functions */
543 
544 	.cf_flush_prefetchbuf	= cpufunc_nullop,
545 	.cf_drain_writebuf	= armv4_drain_writebuf,
546 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
547 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
548 
549 	.cf_sleep		= (void *)cpufunc_nullop,
550 
551 	/* Soft functions */
552 
553 	.cf_dataabt_fixup	= cpufunc_null_fixup,
554 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
555 
556 	.cf_context_switch	= arm10_context_switch,
557 
558 	.cf_setup		= arm10_setup
559 
560 };
561 #endif /* CPU_ARM10 */
562 
563 #ifdef CPU_ARM11
564 struct cpu_functions arm11_cpufuncs = {
565 	/* CPU functions */
566 
567 	.cf_id			= cpufunc_id,
568 	.cf_cpwait		= cpufunc_nullop,
569 
570 	/* MMU functions */
571 
572 	.cf_control		= cpufunc_control,
573 	.cf_domains		= cpufunc_domains,
574 	.cf_setttb		= arm11_setttb,
575 	.cf_faultstatus		= cpufunc_faultstatus,
576 	.cf_faultaddress	= cpufunc_faultaddress,
577 
578 	/* TLB functions */
579 
580 	.cf_tlb_flushID		= arm11_tlb_flushID,
581 	.cf_tlb_flushID_SE	= arm11_tlb_flushID_SE,
582 	.cf_tlb_flushI		= arm11_tlb_flushI,
583 	.cf_tlb_flushI_SE	= arm11_tlb_flushI_SE,
584 	.cf_tlb_flushD		= arm11_tlb_flushD,
585 	.cf_tlb_flushD_SE	= arm11_tlb_flushD_SE,
586 
587 	/* Cache operations */
588 
589 	.cf_icache_sync_all	= armv6_icache_sync_all,
590 	.cf_icache_sync_range	= armv6_icache_sync_range,
591 
592 	.cf_dcache_wbinv_all	= armv6_dcache_wbinv_all,
593 	.cf_dcache_wbinv_range	= armv6_dcache_wbinv_range,
594 	.cf_dcache_inv_range	= armv6_dcache_inv_range,
595 	.cf_dcache_wb_range	= armv6_dcache_wb_range,
596 
597 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
598 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
599 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
600 
601 	.cf_idcache_wbinv_all	= armv6_idcache_wbinv_all,
602 	.cf_idcache_wbinv_range = armv6_idcache_wbinv_range,
603 
604 	/* Other functions */
605 
606 	.cf_flush_prefetchbuf	= cpufunc_nullop,
607 	.cf_drain_writebuf	= arm11_drain_writebuf,
608 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
609 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
610 
611 	.cf_sleep		= arm11_sleep,
612 
613 	/* Soft functions */
614 
615 	.cf_dataabt_fixup	= cpufunc_null_fixup,
616 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
617 
618 	.cf_context_switch	= arm11_context_switch,
619 
620 	.cf_setup		= arm11_setup
621 
622 };
623 #endif /* CPU_ARM11 */
624 
625 #ifdef CPU_ARM1136
626 struct cpu_functions arm1136_cpufuncs = {
627 	/* CPU functions */
628 
629 	.cf_id			= cpufunc_id,
630 	.cf_cpwait		= cpufunc_nullop,
631 
632 	/* MMU functions */
633 
634 	.cf_control		= cpufunc_control,
635 	.cf_domains		= cpufunc_domains,
636 	.cf_setttb		= arm11_setttb,
637 	.cf_faultstatus		= cpufunc_faultstatus,
638 	.cf_faultaddress	= cpufunc_faultaddress,
639 
640 	/* TLB functions */
641 
642 	.cf_tlb_flushID		= arm11_tlb_flushID,
643 	.cf_tlb_flushID_SE	= arm11_tlb_flushID_SE,
644 	.cf_tlb_flushI		= arm11_tlb_flushI,
645 	.cf_tlb_flushI_SE	= arm11_tlb_flushI_SE,
646 	.cf_tlb_flushD		= arm11_tlb_flushD,
647 	.cf_tlb_flushD_SE	= arm11_tlb_flushD_SE,
648 
649 	/* Cache operations */
650 
651 	.cf_icache_sync_all	= arm11x6_icache_sync_all,	/* 411920 */
652 	.cf_icache_sync_range	= arm11x6_icache_sync_range,	/* 371025 */
653 
654 	.cf_dcache_wbinv_all	= arm11x6_dcache_wbinv_all,	/* 411920 */
655 	.cf_dcache_wbinv_range	= armv6_dcache_wbinv_range,
656 	.cf_dcache_inv_range	= armv6_dcache_inv_range,
657 	.cf_dcache_wb_range	= armv6_dcache_wb_range,
658 
659 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
660 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
661 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
662 
663 	.cf_idcache_wbinv_all	= arm11x6_idcache_wbinv_all,	/* 411920 */
664 	.cf_idcache_wbinv_range = arm11x6_idcache_wbinv_range,	/* 371025 */
665 
666 	/* Other functions */
667 
668 	.cf_flush_prefetchbuf	= arm11x6_flush_prefetchbuf,
669 	.cf_drain_writebuf	= arm11_drain_writebuf,
670 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
671 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
672 
673 	.cf_sleep		= arm11_sleep,	/* arm1136_sleep_rev0 */
674 
675 	/* Soft functions */
676 
677 	.cf_dataabt_fixup	= cpufunc_null_fixup,
678 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
679 
680 	.cf_context_switch	= arm11_context_switch,
681 
682 	.cf_setup		= arm11x6_setup
683 
684 };
685 #endif /* CPU_ARM1136 */
686 
687 #ifdef CPU_ARM1176
688 struct cpu_functions arm1176_cpufuncs = {
689 	/* CPU functions */
690 
691 	.cf_id			= cpufunc_id,
692 	.cf_cpwait		= cpufunc_nullop,
693 
694 	/* MMU functions */
695 
696 	.cf_control		= cpufunc_control,
697 	.cf_domains		= cpufunc_domains,
698 	.cf_setttb		= arm11_setttb,
699 	.cf_faultstatus		= cpufunc_faultstatus,
700 	.cf_faultaddress	= cpufunc_faultaddress,
701 
702 	/* TLB functions */
703 
704 	.cf_tlb_flushID		= arm11_tlb_flushID,
705 	.cf_tlb_flushID_SE	= arm11_tlb_flushID_SE,
706 	.cf_tlb_flushI		= arm11_tlb_flushI,
707 	.cf_tlb_flushI_SE	= arm11_tlb_flushI_SE,
708 	.cf_tlb_flushD		= arm11_tlb_flushD,
709 	.cf_tlb_flushD_SE	= arm11_tlb_flushD_SE,
710 
711 	/* Cache operations */
712 
713 	.cf_icache_sync_all	= arm11x6_icache_sync_all,	/* 415045 */
714 	.cf_icache_sync_range	= arm11x6_icache_sync_range,	/* 371367 */
715 
716 	.cf_dcache_wbinv_all	= arm11x6_dcache_wbinv_all,	/* 415045 */
717 	.cf_dcache_wbinv_range	= armv6_dcache_wbinv_range,
718 	.cf_dcache_inv_range	= armv6_dcache_inv_range,
719 	.cf_dcache_wb_range	= armv6_dcache_wb_range,
720 
721 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
722 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
723 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
724 
725 	.cf_idcache_wbinv_all	= arm11x6_idcache_wbinv_all,	/* 415045 */
726 	.cf_idcache_wbinv_range = arm11x6_idcache_wbinv_range,	/* 371367 */
727 
728 	/* Other functions */
729 
730 	.cf_flush_prefetchbuf	= arm11x6_flush_prefetchbuf,
731 	.cf_drain_writebuf	= arm11_drain_writebuf,
732 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
733 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
734 
735 	.cf_sleep		= arm11x6_sleep,		/* no ref. */
736 
737 	/* Soft functions */
738 
739 	.cf_dataabt_fixup	= cpufunc_null_fixup,
740 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
741 
742 	.cf_context_switch	= arm11_context_switch,
743 
744 	.cf_setup		= arm11x6_setup
745 
746 };
747 #endif /* CPU_ARM1176 */
748 
749 
750 #ifdef CPU_ARM11MPCORE
751 struct cpu_functions arm11mpcore_cpufuncs = {
752 	/* CPU functions */
753 
754 	.cf_id			= cpufunc_id,
755 	.cf_cpwait		= cpufunc_nullop,
756 
757 	/* MMU functions */
758 
759 	.cf_control		= cpufunc_control,
760 	.cf_domains		= cpufunc_domains,
761 	.cf_setttb		= arm11_setttb,
762 	.cf_faultstatus		= cpufunc_faultstatus,
763 	.cf_faultaddress	= cpufunc_faultaddress,
764 
765 	/* TLB functions */
766 
767 	.cf_tlb_flushID		= arm11_tlb_flushID,
768 	.cf_tlb_flushID_SE	= arm11_tlb_flushID_SE,
769 	.cf_tlb_flushI		= arm11_tlb_flushI,
770 	.cf_tlb_flushI_SE	= arm11_tlb_flushI_SE,
771 	.cf_tlb_flushD		= arm11_tlb_flushD,
772 	.cf_tlb_flushD_SE	= arm11_tlb_flushD_SE,
773 
774 	/* Cache operations */
775 
776 	.cf_icache_sync_all	= armv6_icache_sync_all,
777 	.cf_icache_sync_range	= armv5_icache_sync_range,
778 
779 	.cf_dcache_wbinv_all	= armv6_dcache_wbinv_all,
780 	.cf_dcache_wbinv_range	= armv5_dcache_wbinv_range,
781 	.cf_dcache_inv_range	= armv5_dcache_inv_range,
782 	.cf_dcache_wb_range	= armv5_dcache_wb_range,
783 
784 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
785 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
786 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
787 
788 	.cf_idcache_wbinv_all	= armv6_idcache_wbinv_all,
789 	.cf_idcache_wbinv_range = armv5_idcache_wbinv_range,
790 
791 	/* Other functions */
792 
793 	.cf_flush_prefetchbuf	= cpufunc_nullop,
794 	.cf_drain_writebuf	= arm11_drain_writebuf,
795 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
796 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
797 
798 	.cf_sleep		= arm11_sleep,
799 
800 	/* Soft functions */
801 
802 	.cf_dataabt_fixup	= cpufunc_null_fixup,
803 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
804 
805 	.cf_context_switch	= arm11_context_switch,
806 
807 	.cf_setup		= arm11mpcore_setup
808 
809 };
810 #endif /* CPU_ARM11MPCORE */
811 
812 #ifdef CPU_SA110
813 struct cpu_functions sa110_cpufuncs = {
814 	/* CPU functions */
815 
816 	.cf_id			= cpufunc_id,
817 	.cf_cpwait		= cpufunc_nullop,
818 
819 	/* MMU functions */
820 
821 	.cf_control		= cpufunc_control,
822 	.cf_domains		= cpufunc_domains,
823 	.cf_setttb		= sa1_setttb,
824 	.cf_faultstatus		= cpufunc_faultstatus,
825 	.cf_faultaddress	= cpufunc_faultaddress,
826 
827 	/* TLB functions */
828 
829 	.cf_tlb_flushID		= armv4_tlb_flushID,
830 	.cf_tlb_flushID_SE	= sa1_tlb_flushID_SE,
831 	.cf_tlb_flushI		= armv4_tlb_flushI,
832 	.cf_tlb_flushI_SE	= (void *)armv4_tlb_flushI,
833 	.cf_tlb_flushD		= armv4_tlb_flushD,
834 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
835 
836 	/* Cache operations */
837 
838 	.cf_icache_sync_all	= sa1_cache_syncI,
839 	.cf_icache_sync_range	= sa1_cache_syncI_rng,
840 
841 	.cf_dcache_wbinv_all	= sa1_cache_purgeD,
842 	.cf_dcache_wbinv_range	= sa1_cache_purgeD_rng,
843 /*XXX*/	.cf_dcache_inv_range	= sa1_cache_purgeD_rng,
844 	.cf_dcache_wb_range	= sa1_cache_cleanD_rng,
845 
846 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
847 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
848 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
849 
850 	.cf_idcache_wbinv_all	= sa1_cache_purgeID,
851 	.cf_idcache_wbinv_range	= sa1_cache_purgeID_rng,
852 
853 	/* Other functions */
854 
855 	.cf_flush_prefetchbuf	= cpufunc_nullop,
856 	.cf_drain_writebuf	= armv4_drain_writebuf,
857 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
858 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
859 
860 	.cf_sleep		= (void *)cpufunc_nullop,
861 
862 	/* Soft functions */
863 
864 	.cf_dataabt_fixup	= cpufunc_null_fixup,
865 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
866 
867 	.cf_context_switch	= sa110_context_switch,
868 
869 	.cf_setup		= sa110_setup
870 };
871 #endif	/* CPU_SA110 */
872 
873 #if defined(CPU_SA1100) || defined(CPU_SA1110)
874 struct cpu_functions sa11x0_cpufuncs = {
875 	/* CPU functions */
876 
877 	.cf_id			= cpufunc_id,
878 	.cf_cpwait		= cpufunc_nullop,
879 
880 	/* MMU functions */
881 
882 	.cf_control		= cpufunc_control,
883 	.cf_domains		= cpufunc_domains,
884 	.cf_setttb		= sa1_setttb,
885 	.cf_faultstatus		= cpufunc_faultstatus,
886 	.cf_faultaddress	= cpufunc_faultaddress,
887 
888 	/* TLB functions */
889 
890 	.cf_tlb_flushID		= armv4_tlb_flushID,
891 	.cf_tlb_flushID_SE	= sa1_tlb_flushID_SE,
892 	.cf_tlb_flushI		= armv4_tlb_flushI,
893 	.cf_tlb_flushI_SE	= (void *)armv4_tlb_flushI,
894 	.cf_tlb_flushD		= armv4_tlb_flushD,
895 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
896 
897 	/* Cache operations */
898 
899 	.cf_icache_sync_all	= sa1_cache_syncI,
900 	.cf_icache_sync_range	= sa1_cache_syncI_rng,
901 
902 	.cf_dcache_wbinv_all	= sa1_cache_purgeD,
903 	.cf_dcache_wbinv_range	= sa1_cache_purgeD_rng,
904 /*XXX*/	.cf_dcache_inv_range	= sa1_cache_purgeD_rng,
905 	.cf_dcache_wb_range	= sa1_cache_cleanD_rng,
906 
907 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
908 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
909 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
910 
911 	.cf_idcache_wbinv_all	= sa1_cache_purgeID,
912 	.cf_idcache_wbinv_range	= sa1_cache_purgeID_rng,
913 
914 	/* Other functions */
915 
916 	.cf_flush_prefetchbuf	= sa11x0_drain_readbuf,
917 	.cf_drain_writebuf	= armv4_drain_writebuf,
918 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
919 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
920 
921 	.cf_sleep		= sa11x0_cpu_sleep,
922 
923 	/* Soft functions */
924 
925 	.cf_dataabt_fixup	= cpufunc_null_fixup,
926 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
927 
928 	.cf_context_switch	= sa11x0_context_switch,
929 
930 	.cf_setup		= sa11x0_setup
931 };
932 #endif	/* CPU_SA1100 || CPU_SA1110 */
933 
934 #if defined(CPU_FA526)
935 struct cpu_functions fa526_cpufuncs = {
936 	/* CPU functions */
937 
938 	.cf_id			= cpufunc_id,
939 	.cf_cpwait		= cpufunc_nullop,
940 
941 	/* MMU functions */
942 
943 	.cf_control		= cpufunc_control,
944 	.cf_domains		= cpufunc_domains,
945 	.cf_setttb		= fa526_setttb,
946 	.cf_faultstatus		= cpufunc_faultstatus,
947 	.cf_faultaddress	= cpufunc_faultaddress,
948 
949 	/* TLB functions */
950 
951 	.cf_tlb_flushID		= armv4_tlb_flushID,
952 	.cf_tlb_flushID_SE	= fa526_tlb_flushID_SE,
953 	.cf_tlb_flushI		= armv4_tlb_flushI,
954 	.cf_tlb_flushI_SE	= fa526_tlb_flushI_SE,
955 	.cf_tlb_flushD		= armv4_tlb_flushD,
956 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
957 
958 	/* Cache operations */
959 
960 	.cf_icache_sync_all	= fa526_icache_sync_all,
961 	.cf_icache_sync_range	= fa526_icache_sync_range,
962 
963 	.cf_dcache_wbinv_all	= fa526_dcache_wbinv_all,
964 	.cf_dcache_wbinv_range	= fa526_dcache_wbinv_range,
965 	.cf_dcache_inv_range	= fa526_dcache_inv_range,
966 	.cf_dcache_wb_range	= fa526_dcache_wb_range,
967 
968 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
969 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
970 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
971 
972 	.cf_idcache_wbinv_all	= fa526_idcache_wbinv_all,
973 	.cf_idcache_wbinv_range	= fa526_idcache_wbinv_range,
974 
975 	/* Other functions */
976 
977 	.cf_flush_prefetchbuf	= fa526_flush_prefetchbuf,
978 	.cf_drain_writebuf	= armv4_drain_writebuf,
979 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
980 	.cf_flush_brnchtgt_E	= fa526_flush_brnchtgt_E,
981 
982 	.cf_sleep		= fa526_cpu_sleep,
983 
984 	/* Soft functions */
985 
986 	.cf_dataabt_fixup	= cpufunc_null_fixup,
987 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
988 
989 	.cf_context_switch	= fa526_context_switch,
990 
991 	.cf_setup		= fa526_setup
992 };
993 #endif	/* CPU_FA526 */
994 
995 #ifdef CPU_IXP12X0
996 struct cpu_functions ixp12x0_cpufuncs = {
997 	/* CPU functions */
998 
999 	.cf_id			= cpufunc_id,
1000 	.cf_cpwait		= cpufunc_nullop,
1001 
1002 	/* MMU functions */
1003 
1004 	.cf_control		= cpufunc_control,
1005 	.cf_domains		= cpufunc_domains,
1006 	.cf_setttb		= sa1_setttb,
1007 	.cf_faultstatus		= cpufunc_faultstatus,
1008 	.cf_faultaddress	= cpufunc_faultaddress,
1009 
1010 	/* TLB functions */
1011 
1012 	.cf_tlb_flushID		= armv4_tlb_flushID,
1013 	.cf_tlb_flushID_SE	= sa1_tlb_flushID_SE,
1014 	.cf_tlb_flushI		= armv4_tlb_flushI,
1015 	.cf_tlb_flushI_SE	= (void *)armv4_tlb_flushI,
1016 	.cf_tlb_flushD		= armv4_tlb_flushD,
1017 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
1018 
1019 	/* Cache operations */
1020 
1021 	.cf_icache_sync_all	= sa1_cache_syncI,
1022 	.cf_icache_sync_range	= sa1_cache_syncI_rng,
1023 
1024 	.cf_dcache_wbinv_all	= sa1_cache_purgeD,
1025 	.cf_dcache_wbinv_range	= sa1_cache_purgeD_rng,
1026 /*XXX*/	.cf_dcache_inv_range	= sa1_cache_purgeD_rng,
1027 	.cf_dcache_wb_range	= sa1_cache_cleanD_rng,
1028 
1029 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
1030 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
1031 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
1032 
1033 	.cf_idcache_wbinv_all	= sa1_cache_purgeID,
1034 	.cf_idcache_wbinv_range	= sa1_cache_purgeID_rng,
1035 
1036 	/* Other functions */
1037 
1038 	.cf_flush_prefetchbuf	= ixp12x0_drain_readbuf,
1039 	.cf_drain_writebuf	= armv4_drain_writebuf,
1040 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
1041 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
1042 
1043 	.cf_sleep		= (void *)cpufunc_nullop,
1044 
1045 	/* Soft functions */
1046 
1047 	.cf_dataabt_fixup	= cpufunc_null_fixup,
1048 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
1049 
1050 	.cf_context_switch	= ixp12x0_context_switch,
1051 
1052 	.cf_setup		= ixp12x0_setup
1053 };
1054 #endif	/* CPU_IXP12X0 */
1055 
1056 #if defined(CPU_XSCALE)
1057 struct cpu_functions xscale_cpufuncs = {
1058 	/* CPU functions */
1059 
1060 	.cf_id			= cpufunc_id,
1061 	.cf_cpwait		= xscale_cpwait,
1062 
1063 	/* MMU functions */
1064 
1065 	.cf_control		= xscale_control,
1066 	.cf_domains		= cpufunc_domains,
1067 	.cf_setttb		= xscale_setttb,
1068 	.cf_faultstatus		= cpufunc_faultstatus,
1069 	.cf_faultaddress	= cpufunc_faultaddress,
1070 
1071 	/* TLB functions */
1072 
1073 	.cf_tlb_flushID		= armv4_tlb_flushID,
1074 	.cf_tlb_flushID_SE	= xscale_tlb_flushID_SE,
1075 	.cf_tlb_flushI		= armv4_tlb_flushI,
1076 	.cf_tlb_flushI_SE	= (void *)armv4_tlb_flushI,
1077 	.cf_tlb_flushD		= armv4_tlb_flushD,
1078 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
1079 
1080 	/* Cache operations */
1081 
1082 	.cf_icache_sync_all	= xscale_cache_syncI,
1083 	.cf_icache_sync_range	= xscale_cache_syncI_rng,
1084 
1085 	.cf_dcache_wbinv_all	= xscale_cache_purgeD,
1086 	.cf_dcache_wbinv_range	= xscale_cache_purgeD_rng,
1087 	.cf_dcache_inv_range	= xscale_cache_flushD_rng,
1088 	.cf_dcache_wb_range	= xscale_cache_cleanD_rng,
1089 
1090 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
1091 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
1092 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
1093 
1094 	.cf_idcache_wbinv_all	= xscale_cache_purgeID,
1095 	.cf_idcache_wbinv_range = xscale_cache_purgeID_rng,
1096 
1097 	/* Other functions */
1098 
1099 	.cf_flush_prefetchbuf	= cpufunc_nullop,
1100 	.cf_drain_writebuf	= armv4_drain_writebuf,
1101 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
1102 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
1103 
1104 	.cf_sleep		= xscale_cpu_sleep,
1105 
1106 	/* Soft functions */
1107 
1108 	.cf_dataabt_fixup	= cpufunc_null_fixup,
1109 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
1110 
1111 	.cf_context_switch	= xscale_context_switch,
1112 
1113 	.cf_setup		= xscale_setup
1114 };
1115 #endif /* CPU_XSCALE */
1116 
1117 #if defined(CPU_ARMV7)
1118 struct cpu_functions armv7_cpufuncs = {
1119 	/* CPU functions */
1120 
1121 	.cf_id			= cpufunc_id,
1122 	.cf_cpwait		= cpufunc_nullop,
1123 
1124 	/* MMU functions */
1125 
1126 	.cf_control		= cpufunc_control,
1127 	.cf_domains		= cpufunc_domains,
1128 	.cf_setttb		= armv7_setttb,
1129 	.cf_faultstatus		= cpufunc_faultstatus,
1130 	.cf_faultaddress	= cpufunc_faultaddress,
1131 
1132 	/* TLB functions */
1133 
1134 	.cf_tlb_flushID		= armv7up_tlb_flushID,
1135 	.cf_tlb_flushID_SE	= armv7up_tlb_flushID_SE,
1136 	.cf_tlb_flushI		= armv7up_tlb_flushI,
1137 	.cf_tlb_flushI_SE	= armv7up_tlb_flushI_SE,
1138 	.cf_tlb_flushD		= armv7up_tlb_flushD,
1139 	.cf_tlb_flushD_SE	= armv7up_tlb_flushD_SE,
1140 
1141 	/* Cache operations */
1142 
1143 	.cf_icache_sync_all	= armv7_icache_sync_all,
1144 	.cf_dcache_wbinv_all	= armv7_dcache_wbinv_all,
1145 
1146 	.cf_dcache_inv_range	= armv7_dcache_inv_range,
1147 	.cf_dcache_wb_range	= armv7_dcache_wb_range,
1148 	.cf_dcache_wbinv_range	= armv7_dcache_wbinv_range,
1149 
1150 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
1151 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
1152 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
1153 
1154 	.cf_icache_sync_range	= armv7_icache_sync_range,
1155 	.cf_idcache_wbinv_range = armv7_idcache_wbinv_range,
1156 
1157 
1158 	.cf_idcache_wbinv_all	= armv7_idcache_wbinv_all,
1159 
1160 	/* Other functions */
1161 
1162 	.cf_flush_prefetchbuf	= cpufunc_nullop,
1163 	.cf_drain_writebuf	= armv7_drain_writebuf,
1164 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
1165 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
1166 
1167 	.cf_sleep		= armv7_cpu_sleep,
1168 
1169 	/* Soft functions */
1170 
1171 	.cf_dataabt_fixup	= cpufunc_null_fixup,
1172 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
1173 
1174 	.cf_context_switch	= armv7_context_switch,
1175 
1176 	.cf_setup		= armv7_setup
1177 
1178 };
1179 #endif /* CPU_ARMV7 */
1180 
1181 #ifdef CPU_PJ4B
1182 struct cpu_functions pj4bv7_cpufuncs = {
1183 	/* CPU functions */
1184 
1185 	.cf_id			= cpufunc_id,
1186 	.cf_cpwait		= armv7_drain_writebuf,
1187 
1188 	/* MMU functions */
1189 
1190 	.cf_control		= cpufunc_control,
1191 	.cf_domains		= cpufunc_domains,
1192 	.cf_setttb		= armv7_setttb,
1193 	.cf_faultstatus		= cpufunc_faultstatus,
1194 	.cf_faultaddress	= cpufunc_faultaddress,
1195 
1196 	/* TLB functions */
1197 
1198 	.cf_tlb_flushID		= armv7up_tlb_flushID,
1199 	.cf_tlb_flushID_SE	= armv7up_tlb_flushID_SE,
1200 	.cf_tlb_flushI		= armv7up_tlb_flushID,
1201 	.cf_tlb_flushI_SE	= armv7up_tlb_flushID_SE,
1202 	.cf_tlb_flushD		= armv7up_tlb_flushID,
1203 	.cf_tlb_flushD_SE	= armv7up_tlb_flushID_SE,
1204 
1205 	/* Cache operations (see also pj4bv7_setup) */
1206 	.cf_icache_sync_all	= armv7_idcache_wbinv_all,
1207 	.cf_icache_sync_range	= armv7_icache_sync_range,
1208 
1209 	.cf_dcache_wbinv_all	= armv7_dcache_wbinv_all,
1210 	.cf_dcache_wbinv_range	= armv7_dcache_wbinv_range,
1211 	.cf_dcache_inv_range	= armv7_dcache_inv_range,
1212 	.cf_dcache_wb_range	= armv7_dcache_wb_range,
1213 
1214 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
1215 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
1216 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
1217 
1218 	.cf_idcache_wbinv_all	= armv7_idcache_wbinv_all,
1219 	.cf_idcache_wbinv_range	= armv7_idcache_wbinv_range,
1220 
1221 	/* Other functions */
1222 
1223 	.cf_flush_prefetchbuf	= cpufunc_nullop,
1224 	.cf_drain_writebuf	= armv7_drain_writebuf,
1225 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
1226 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
1227 
1228 	.cf_sleep		= pj4b_cpu_sleep,
1229 
1230 	/* Soft functions */
1231 
1232 	.cf_dataabt_fixup	= cpufunc_null_fixup,
1233 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
1234 
1235 	.cf_context_switch	= armv7_context_switch,
1236 
1237 	.cf_setup		= pj4bv7_setup
1238 };
1239 #endif /* CPU_PJ4B */
1240 
1241 #ifdef CPU_SHEEVA
1242 struct cpu_functions sheeva_cpufuncs = {
1243 	/* CPU functions */
1244 
1245 	.cf_id			= cpufunc_id,
1246 	.cf_cpwait		= cpufunc_nullop,
1247 
1248 	/* MMU functions */
1249 
1250 	.cf_control		= cpufunc_control,
1251 	.cf_domains		= cpufunc_domains,
1252 	.cf_setttb		= armv5_ec_setttb,
1253 	.cf_faultstatus		= cpufunc_faultstatus,
1254 	.cf_faultaddress	= cpufunc_faultaddress,
1255 
1256 	/* TLB functions */
1257 
1258 	.cf_tlb_flushID		= armv4_tlb_flushID,
1259 	.cf_tlb_flushID_SE	= arm10_tlb_flushID_SE,
1260 	.cf_tlb_flushI		= armv4_tlb_flushI,
1261 	.cf_tlb_flushI_SE	= arm10_tlb_flushI_SE,
1262 	.cf_tlb_flushD		= armv4_tlb_flushD,
1263 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
1264 
1265 	/* Cache operations */
1266 
1267 	.cf_icache_sync_all	= armv5_ec_icache_sync_all,
1268 	.cf_icache_sync_range	= armv5_ec_icache_sync_range,
1269 
1270 	.cf_dcache_wbinv_all	= armv5_ec_dcache_wbinv_all,
1271 	.cf_dcache_wbinv_range	= sheeva_dcache_wbinv_range,
1272 	.cf_dcache_inv_range	= sheeva_dcache_inv_range,
1273 	.cf_dcache_wb_range	= sheeva_dcache_wb_range,
1274 
1275 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
1276 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
1277 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
1278 
1279 	.cf_idcache_wbinv_all	= armv5_ec_idcache_wbinv_all,
1280 	.cf_idcache_wbinv_range = sheeva_idcache_wbinv_range,
1281 
1282 	/* Other functions */
1283 
1284 	.cf_flush_prefetchbuf	= cpufunc_nullop,
1285 	.cf_drain_writebuf	= armv4_drain_writebuf,
1286 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
1287 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
1288 
1289 	.cf_sleep		= (void *)sheeva_cpu_sleep,
1290 
1291 	/* Soft functions */
1292 
1293 	.cf_dataabt_fixup	= cpufunc_null_fixup,
1294 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
1295 
1296 	.cf_context_switch	= arm10_context_switch,
1297 
1298 	.cf_setup		= sheeva_setup
1299 };
1300 #endif /* CPU_SHEEVA */
1301 
1302 
1303 /*
1304  * Global constants also used by locore.s
1305  */
1306 
1307 struct cpu_functions cpufuncs;
1308 u_int cputype;
1309 
1310 #if defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined(CPU_ARM9) || \
1311     defined(CPU_ARM9E) || defined(CPU_ARM10) || defined(CPU_FA526) || \
1312     defined(CPU_SHEEVA) || \
1313     defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
1314     defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425) || \
1315     defined(CPU_ARMV6) || defined(CPU_ARMV7)
1316 static void get_cachetype_cp15(void);
1317 
1318 /* Additional cache information local to this file.  Log2 of some of the
1319    above numbers.  */
1320 static int	arm_dcache_log2_nsets;
1321 static int	arm_dcache_log2_assoc;
1322 static int	arm_dcache_log2_linesize;
1323 
1324 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
1325 static inline u_int
1326 get_cachesize_cp15(int cssr)
1327 {
1328 #if defined(CPU_ARMV7)
1329 	__asm volatile(".arch\tarmv7a");
1330 
1331 	armreg_csselr_write(cssr);
1332 	isb();			 /* sync to the new cssr */
1333 
1334 #else
1335 	__asm volatile("mcr p15, 1, %0, c0, c0, 2" :: "r" (cssr) : "memory");
1336 #endif
1337 	return armreg_ccsidr_read();
1338 }
1339 #endif
1340 
1341 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
1342 static void
1343 get_cacheinfo_clidr(struct arm_cache_info *info, u_int level, u_int clidr)
1344 {
1345 	u_int csid;
1346 
1347 	if (clidr & 6) {
1348 		csid = get_cachesize_cp15(level << 1); /* select dcache values */
1349 		info->dcache_sets = CPU_CSID_NUMSETS(csid) + 1;
1350 		info->dcache_ways = CPU_CSID_ASSOC(csid) + 1;
1351 		info->dcache_line_size = 1U << (CPU_CSID_LEN(csid) + 4);
1352 		info->dcache_way_size =
1353 		    info->dcache_line_size * info->dcache_sets;
1354 		info->dcache_size = info->dcache_way_size * info->dcache_ways;
1355 
1356 		if (level == 0) {
1357 			arm_dcache_log2_assoc = CPU_CSID_ASSOC(csid) + 1;
1358 			arm_dcache_log2_linesize = CPU_CSID_LEN(csid) + 4;
1359 			arm_dcache_log2_nsets =
1360 			    31 - __builtin_clz(info->dcache_sets*2-1);
1361 		}
1362 	}
1363 
1364 	info->cache_unified = (clidr == 4);
1365 
1366 	if (level > 0) {
1367 		info->dcache_type = CACHE_TYPE_PIPT;
1368 		info->icache_type = CACHE_TYPE_PIPT;
1369 	}
1370 
1371 	if (info->cache_unified) {
1372 		info->icache_ways = info->dcache_ways;
1373 		info->icache_line_size = info->dcache_line_size;
1374 		info->icache_way_size = info->dcache_way_size;
1375 		info->icache_size = info->dcache_size;
1376 	} else {
1377 		csid = get_cachesize_cp15((level << 1)|CPU_CSSR_InD); /* select icache values */
1378 		info->icache_sets = CPU_CSID_NUMSETS(csid) + 1;
1379 		info->icache_ways = CPU_CSID_ASSOC(csid) + 1;
1380 		info->icache_line_size = 1U << (CPU_CSID_LEN(csid) + 4);
1381 		info->icache_way_size = info->icache_line_size * info->icache_sets;
1382 		info->icache_size = info->icache_way_size * info->icache_ways;
1383 	}
1384 	if (level == 0
1385 	    && info->dcache_way_size <= PAGE_SIZE
1386 	    && info->icache_way_size <= PAGE_SIZE) {
1387 		arm_cache_prefer_mask = 0;
1388 	}
1389 }
1390 #endif /* (ARM_MMU_V6 + ARM_MMU_V7) > 0 */
1391 
1392 static void
1393 get_cachetype_cp15(void)
1394 {
1395 	u_int ctype, isize, dsize;
1396 	u_int multiplier;
1397 
1398 	ctype = armreg_ctr_read();
1399 
1400 	/*
1401 	 * ...and thus spake the ARM ARM:
1402 	 *
1403 	 * If an <opcode2> value corresponding to an unimplemented or
1404 	 * reserved ID register is encountered, the System Control
1405 	 * processor returns the value of the main ID register.
1406 	 */
1407 	if (ctype == cpu_idnum())
1408 		goto out;
1409 
1410 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
1411 	if (CPU_CT_FORMAT(ctype) == 4) {
1412 		u_int clidr = armreg_clidr_read();
1413 
1414 		if (CPU_CT4_L1IPOLICY(ctype) == CPU_CT4_L1_PIPT) {
1415 			arm_pcache.icache_type = CACHE_TYPE_PIPT;
1416 		} else {
1417 			arm_pcache.icache_type = CACHE_TYPE_VIPT;
1418 			arm_cache_prefer_mask = PAGE_SIZE;
1419 		}
1420 #ifdef CPU_CORTEX
1421 		if (CPU_ID_CORTEX_P(cpu_idnum())) {
1422 			arm_pcache.dcache_type = CACHE_TYPE_PIPT;
1423 		} else
1424 #endif
1425 		{
1426 			arm_pcache.dcache_type = CACHE_TYPE_VIPT;
1427 		}
1428 		arm_pcache.cache_type = CPU_CT_CTYPE_WB14;
1429 
1430 		get_cacheinfo_clidr(&arm_pcache, 0, clidr & 7);
1431 		arm_dcache_align = arm_pcache.dcache_line_size;
1432 		clidr >>= 3;
1433 		if (clidr & 7) {
1434 			get_cacheinfo_clidr(&arm_scache, 1, clidr & 7);
1435 			if (arm_scache.dcache_line_size < arm_dcache_align)
1436 				arm_dcache_align = arm_scache.dcache_line_size;
1437 		}
1438 		/*
1439 		 * The pmap cleans an entire way for an exec page so
1440 		 * we don't care that it's VIPT anymore.
1441 		 */
1442 		if (arm_pcache.dcache_type == CACHE_TYPE_PIPT) {
1443 			arm_cache_prefer_mask = 0;
1444 		}
1445 		goto out;
1446 	}
1447 #endif /* ARM_MMU_V6 + ARM_MMU_V7 > 0 */
1448 
1449 	if ((ctype & CPU_CT_S) == 0)
1450 		arm_pcache.cache_unified = 1;
1451 
1452 	/*
1453 	 * If you want to know how this code works, go read the ARM ARM.
1454 	 */
1455 
1456 	arm_pcache.cache_type = CPU_CT_CTYPE(ctype);
1457 
1458 	if (arm_pcache.cache_unified == 0) {
1459 		isize = CPU_CT_ISIZE(ctype);
1460 		multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
1461 		arm_pcache.icache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
1462 		if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
1463 			if (isize & CPU_CT_xSIZE_M)
1464 				arm_pcache.icache_line_size = 0; /* not present */
1465 			else
1466 				arm_pcache.icache_ways = 1;
1467 		} else {
1468 			arm_pcache.icache_ways = multiplier <<
1469 			    (CPU_CT_xSIZE_ASSOC(isize) - 1);
1470 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
1471 			arm_pcache.icache_type = CACHE_TYPE_VIPT;
1472 			if (CPU_CT_xSIZE_P & isize)
1473 				arm_cache_prefer_mask |=
1474 				    __BIT(9 + CPU_CT_xSIZE_SIZE(isize)
1475 					  - CPU_CT_xSIZE_ASSOC(isize))
1476 				    - PAGE_SIZE;
1477 #endif
1478 		}
1479 		arm_pcache.icache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
1480 		arm_pcache.icache_way_size =
1481 		    __BIT(9 + CPU_CT_xSIZE_SIZE(isize) - CPU_CT_xSIZE_ASSOC(isize));
1482 	}
1483 
1484 	dsize = CPU_CT_DSIZE(ctype);
1485 	multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
1486 	arm_pcache.dcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
1487 	if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
1488 		if (dsize & CPU_CT_xSIZE_M)
1489 			arm_pcache.dcache_line_size = 0; /* not present */
1490 		else
1491 			arm_pcache.dcache_ways = 1;
1492 	} else {
1493 		arm_pcache.dcache_ways = multiplier <<
1494 		    (CPU_CT_xSIZE_ASSOC(dsize) - 1);
1495 #if (ARM_MMU_V6) > 0
1496 		arm_pcache.dcache_type = CACHE_TYPE_VIPT;
1497 		if ((CPU_CT_xSIZE_P & dsize)
1498 		    && CPU_ID_ARM11_P(curcpu()->ci_arm_cpuid)) {
1499 			arm_cache_prefer_mask |=
1500 			    __BIT(9 + CPU_CT_xSIZE_SIZE(dsize)
1501 				  - CPU_CT_xSIZE_ASSOC(dsize)) - PAGE_SIZE;
1502 		}
1503 #endif
1504 	}
1505 	arm_pcache.dcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
1506 	arm_pcache.dcache_way_size =
1507 	    __BIT(9 + CPU_CT_xSIZE_SIZE(dsize) - CPU_CT_xSIZE_ASSOC(dsize));
1508 
1509 	arm_dcache_align = arm_pcache.dcache_line_size;
1510 
1511 	arm_dcache_log2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
1512 	arm_dcache_log2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
1513 	arm_dcache_log2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
1514 	    CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
1515 
1516  out:
1517 	KASSERTMSG(arm_dcache_align <= CACHE_LINE_SIZE,
1518 	    "arm_dcache_align=%u CACHE_LINE_SIZE=%u",
1519 	    arm_dcache_align, CACHE_LINE_SIZE);
1520 	arm_dcache_align_mask = arm_dcache_align - 1;
1521 }
1522 #endif /* ARM7TDMI || ARM8 || ARM9 || XSCALE */
1523 
1524 #if defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_SA110) || \
1525     defined(CPU_SA1100) || defined(CPU_SA1110) || defined(CPU_IXP12X0)
1526 /* Cache information for CPUs without cache type registers. */
1527 struct cachetab {
1528 	uint32_t ct_cpuid;
1529 	int	ct_pcache_type;
1530 	int	ct_pcache_unified;
1531 	int	ct_pdcache_size;
1532 	int	ct_pdcache_line_size;
1533 	int	ct_pdcache_ways;
1534 	int	ct_picache_size;
1535 	int	ct_picache_line_size;
1536 	int	ct_picache_ways;
1537 };
1538 
1539 struct cachetab cachetab[] = {
1540     /* cpuid,           cache type,       u,  dsiz, ls, wy,  isiz, ls, wy */
1541     { CPU_ID_ARM2,      0,                1,     0,  0,  0,     0,  0,  0 },
1542     { CPU_ID_ARM250,    0,                1,     0,  0,  0,     0,  0,  0 },
1543     { CPU_ID_ARM3,      CPU_CT_CTYPE_WT,  1,  4096, 16, 64,     0,  0,  0 },
1544     { CPU_ID_ARM610,	CPU_CT_CTYPE_WT,  1,  4096, 16, 64,     0,  0,  0 },
1545     { CPU_ID_ARM710,    CPU_CT_CTYPE_WT,  1,  8192, 32,  4,     0,  0,  0 },
1546     { CPU_ID_ARM7500,   CPU_CT_CTYPE_WT,  1,  4096, 16,  4,     0,  0,  0 },
1547     { CPU_ID_ARM710A,   CPU_CT_CTYPE_WT,  1,  8192, 16,  4,     0,  0,  0 },
1548     { CPU_ID_ARM7500FE, CPU_CT_CTYPE_WT,  1,  4096, 16,  4,     0,  0,  0 },
1549     /* XXX is this type right for SA-1? */
1550     { CPU_ID_SA110,	CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 },
1551     { CPU_ID_SA1100,	CPU_CT_CTYPE_WB1, 0,  8192, 32, 32, 16384, 32, 32 },
1552     { CPU_ID_SA1110,	CPU_CT_CTYPE_WB1, 0,  8192, 32, 32, 16384, 32, 32 },
1553     { CPU_ID_IXP1200,	CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 }, /* XXX */
1554     { 0, 0, 0, 0, 0, 0, 0, 0}
1555 };
1556 
1557 static void get_cachetype_table(void);
1558 
1559 static void
1560 get_cachetype_table(void)
1561 {
1562 	int i;
1563 	uint32_t cpuid = cpu_idnum();
1564 
1565 	for (i = 0; cachetab[i].ct_cpuid != 0; i++) {
1566 		if (cachetab[i].ct_cpuid == (cpuid & CPU_ID_CPU_MASK)) {
1567 			arm_pcache.cache_type = cachetab[i].ct_pcache_type;
1568 			arm_pcache.cache_unified = cachetab[i].ct_pcache_unified;
1569 			arm_pcache.dcache_size = cachetab[i].ct_pdcache_size;
1570 			arm_pcache.dcache_line_size =
1571 			    cachetab[i].ct_pdcache_line_size;
1572 			arm_pcache.dcache_ways = cachetab[i].ct_pdcache_ways;
1573 			if (arm_pcache.dcache_ways) {
1574 				arm_pcache.dcache_way_size =
1575 				    arm_pcache.dcache_line_size
1576 				    / arm_pcache.dcache_ways;
1577 			}
1578 			arm_pcache.icache_size = cachetab[i].ct_picache_size;
1579 			arm_pcache.icache_line_size =
1580 			    cachetab[i].ct_picache_line_size;
1581 			arm_pcache.icache_ways = cachetab[i].ct_picache_ways;
1582 			if (arm_pcache.icache_ways) {
1583 				arm_pcache.icache_way_size =
1584 				    arm_pcache.icache_line_size
1585 				    / arm_pcache.icache_ways;
1586 			}
1587 		}
1588 	}
1589 
1590 	arm_dcache_align = arm_pcache.dcache_line_size;
1591 	arm_dcache_align_mask = arm_dcache_align - 1;
1592 }
1593 
1594 #endif /* ARM6 || ARM7 || SA110 || SA1100 || SA1111 || IXP12X0 */
1595 
1596 
1597 #if defined(CPU_CORTEX) || defined(CPU_PJ4B)
1598 static inline void
1599 set_cpufuncs_mpfixup(void)
1600 {
1601 #ifdef MULTIPROCESSOR
1602 	/* If MP extensions are present, patch in MP TLB ops */
1603 	const uint32_t mpidr = armreg_mpidr_read();
1604 	if ((mpidr & (MPIDR_MP|MPIDR_U)) == MPIDR_MP) {
1605 		cpufuncs.cf_tlb_flushID = armv7mp_tlb_flushID;
1606 		cpufuncs.cf_tlb_flushID_SE = armv7mp_tlb_flushID_SE;
1607 		cpufuncs.cf_tlb_flushI = armv7mp_tlb_flushI;
1608 		cpufuncs.cf_tlb_flushI_SE = armv7mp_tlb_flushI_SE;
1609 		cpufuncs.cf_tlb_flushD = armv7mp_tlb_flushD;
1610 		cpufuncs.cf_tlb_flushD_SE = armv7mp_tlb_flushD_SE;
1611 	}
1612 #endif
1613 }
1614 #endif
1615 
1616 /*
1617  * Cannot panic here as we may not have a console yet ...
1618  */
1619 
1620 int
1621 set_cpufuncs(void)
1622 {
1623 	if (cputype == 0) {
1624 		cputype = cpufunc_id();
1625 		cputype &= CPU_ID_CPU_MASK;
1626 	}
1627 
1628 	/*
1629 	 * NOTE: cpu_do_powersave defaults to off.  If we encounter a
1630 	 * CPU type where we want to use it by default, then we set it.
1631 	 */
1632 #ifdef CPU_ARM6
1633 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
1634 	    (cputype & 0x00000f00) == 0x00000600) {
1635 		cpufuncs = arm6_cpufuncs;
1636 		get_cachetype_table();
1637 		pmap_pte_init_generic();
1638 		return 0;
1639 	}
1640 #endif	/* CPU_ARM6 */
1641 #ifdef CPU_ARM7
1642 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
1643 	    CPU_ID_IS7(cputype) &&
1644 	    (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V3) {
1645 		cpufuncs = arm7_cpufuncs;
1646 		get_cachetype_table();
1647 		pmap_pte_init_generic();
1648 		return 0;
1649 	}
1650 #endif	/* CPU_ARM7 */
1651 #ifdef CPU_ARM7TDMI
1652 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
1653 	    CPU_ID_IS7(cputype) &&
1654 	    (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V4T) {
1655 		cpufuncs = arm7tdmi_cpufuncs;
1656 		get_cachetype_cp15();
1657 		pmap_pte_init_generic();
1658 		return 0;
1659 	}
1660 #endif
1661 #ifdef CPU_ARM8
1662 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
1663 	    (cputype & 0x0000f000) == 0x00008000) {
1664 		cpufuncs = arm8_cpufuncs;
1665 		get_cachetype_cp15();
1666 		pmap_pte_init_arm8();
1667 		return 0;
1668 	}
1669 #endif	/* CPU_ARM8 */
1670 #ifdef CPU_ARM9
1671 	if (((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD ||
1672 	     (cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_TI) &&
1673 	    (cputype & 0x0000f000) == 0x00009000) {
1674 		cpufuncs = arm9_cpufuncs;
1675 		get_cachetype_cp15();
1676 		arm9_dcache_sets_inc = 1U << arm_dcache_log2_linesize;
1677 		arm9_dcache_sets_max =
1678 		    (1U << (arm_dcache_log2_linesize + arm_dcache_log2_nsets)) -
1679 		    arm9_dcache_sets_inc;
1680 		arm9_dcache_index_inc = 1U << (32 - arm_dcache_log2_assoc);
1681 		arm9_dcache_index_max = 0U - arm9_dcache_index_inc;
1682 #ifdef	ARM9_CACHE_WRITE_THROUGH
1683 		pmap_pte_init_arm9();
1684 #else
1685 		pmap_pte_init_generic();
1686 #endif
1687 		return 0;
1688 	}
1689 #endif /* CPU_ARM9 */
1690 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
1691 	if (cputype == CPU_ID_ARM926EJS ||
1692 	    cputype == CPU_ID_ARM1026EJS) {
1693 		cpufuncs = armv5_ec_cpufuncs;
1694 		get_cachetype_cp15();
1695 		pmap_pte_init_generic();
1696 		return 0;
1697 	}
1698 #endif /* CPU_ARM9E || CPU_ARM10 */
1699 #if defined(CPU_SHEEVA)
1700 	if (cputype == CPU_ID_MV88SV131 ||
1701 	    cputype == CPU_ID_MV88FR571_VD) {
1702 		cpufuncs = sheeva_cpufuncs;
1703 		get_cachetype_cp15();
1704 		pmap_pte_init_generic();
1705 		cpu_do_powersave = 1;			/* Enable powersave */
1706 		return 0;
1707 	}
1708 #endif /* CPU_SHEEVA */
1709 #ifdef CPU_ARM10
1710 	if (/* cputype == CPU_ID_ARM1020T || */
1711 	    cputype == CPU_ID_ARM1020E) {
1712 		/*
1713 		 * Select write-through cacheing (this isn't really an
1714 		 * option on ARM1020T).
1715 		 */
1716 		cpufuncs = arm10_cpufuncs;
1717 		get_cachetype_cp15();
1718 		armv5_dcache_sets_inc = 1U << arm_dcache_log2_linesize;
1719 		armv5_dcache_sets_max =
1720 		    (1U << (arm_dcache_log2_linesize + arm_dcache_log2_nsets)) -
1721 		    armv5_dcache_sets_inc;
1722 		armv5_dcache_index_inc = 1U << (32 - arm_dcache_log2_assoc);
1723 		armv5_dcache_index_max = 0U - armv5_dcache_index_inc;
1724 		pmap_pte_init_generic();
1725 		return 0;
1726 	}
1727 #endif /* CPU_ARM10 */
1728 
1729 
1730 #if defined(CPU_ARM11MPCORE)
1731 	if (cputype == CPU_ID_ARM11MPCORE) {
1732 		cpufuncs = arm11mpcore_cpufuncs;
1733 #if defined(CPU_ARMV7) || defined(CPU_PRE_ARMV6)
1734 		cpu_armv6_p = true;
1735 #endif
1736 		get_cachetype_cp15();
1737 		armv5_dcache_sets_inc = 1U << arm_dcache_log2_linesize;
1738 		armv5_dcache_sets_max = (1U << (arm_dcache_log2_linesize +
1739 			arm_dcache_log2_nsets)) - armv5_dcache_sets_inc;
1740 		armv5_dcache_index_inc = 1U << (32 - arm_dcache_log2_assoc);
1741 		armv5_dcache_index_max = 0U - armv5_dcache_index_inc;
1742 		cpu_do_powersave = 1;			/* Enable powersave */
1743 		pmap_pte_init_arm11mpcore();
1744 		if (arm_cache_prefer_mask)
1745 			uvmexp.ncolors = (arm_cache_prefer_mask >> PGSHIFT) + 1;
1746 
1747 		return 0;
1748 
1749 	}
1750 #endif	/* CPU_ARM11MPCORE */
1751 
1752 #if defined(CPU_ARM11)
1753 	if (cputype == CPU_ID_ARM1136JS ||
1754 	    cputype == CPU_ID_ARM1136JSR1 ||
1755 	    cputype == CPU_ID_ARM1176JZS) {
1756 		cpufuncs = arm11_cpufuncs;
1757 #if defined(CPU_ARM1136)
1758 		if (cputype == CPU_ID_ARM1136JS ||
1759 		    cputype == CPU_ID_ARM1136JSR1) {
1760 			cpufuncs = arm1136_cpufuncs;
1761 			if (cputype == CPU_ID_ARM1136JS)
1762 				cpufuncs.cf_sleep = arm1136_sleep_rev0;
1763 		}
1764 #endif
1765 #if defined(CPU_ARM1176)
1766 		if (cputype == CPU_ID_ARM1176JZS) {
1767 			cpufuncs = arm1176_cpufuncs;
1768 		}
1769 #endif
1770 #if defined(CPU_ARMV7) || defined(CPU_PRE_ARMV6)
1771 		cpu_armv6_p = true;
1772 #endif
1773 		cpu_do_powersave = 1;			/* Enable powersave */
1774 		get_cachetype_cp15();
1775 #ifdef ARM11_CACHE_WRITE_THROUGH
1776 		pmap_pte_init_arm11();
1777 #else
1778 		pmap_pte_init_armv6();
1779 #endif
1780 		if (arm_cache_prefer_mask)
1781 			uvmexp.ncolors = (arm_cache_prefer_mask >> PGSHIFT) + 1;
1782 
1783 		/*
1784 		 * Start and reset the PMC Cycle Counter.
1785 		 */
1786 		armreg_pmcrv6_write(ARM11_PMCCTL_E | ARM11_PMCCTL_P | ARM11_PMCCTL_C);
1787 		return 0;
1788 	}
1789 #endif /* CPU_ARM11 */
1790 #ifdef CPU_SA110
1791 	if (cputype == CPU_ID_SA110) {
1792 		cpufuncs = sa110_cpufuncs;
1793 		get_cachetype_table();
1794 		pmap_pte_init_sa1();
1795 		return 0;
1796 	}
1797 #endif	/* CPU_SA110 */
1798 #ifdef CPU_SA1100
1799 	if (cputype == CPU_ID_SA1100) {
1800 		cpufuncs = sa11x0_cpufuncs;
1801 		get_cachetype_table();
1802 		pmap_pte_init_sa1();
1803 
1804 		/* Use powersave on this CPU. */
1805 		cpu_do_powersave = 1;
1806 
1807 		return 0;
1808 	}
1809 #endif	/* CPU_SA1100 */
1810 #ifdef CPU_SA1110
1811 	if (cputype == CPU_ID_SA1110) {
1812 		cpufuncs = sa11x0_cpufuncs;
1813 		get_cachetype_table();
1814 		pmap_pte_init_sa1();
1815 
1816 		/* Use powersave on this CPU. */
1817 		cpu_do_powersave = 1;
1818 
1819 		return 0;
1820 	}
1821 #endif	/* CPU_SA1110 */
1822 #ifdef CPU_FA526
1823 	if (cputype == CPU_ID_FA526) {
1824 		cpufuncs = fa526_cpufuncs;
1825 		get_cachetype_cp15();
1826 		pmap_pte_init_generic();
1827 
1828 		/* Use powersave on this CPU. */
1829 		cpu_do_powersave = 1;
1830 
1831 		return 0;
1832 	}
1833 #endif	/* CPU_FA526 */
1834 #ifdef CPU_IXP12X0
1835 	if (cputype == CPU_ID_IXP1200) {
1836 		cpufuncs = ixp12x0_cpufuncs;
1837 		get_cachetype_table();
1838 		pmap_pte_init_sa1();
1839 		return 0;
1840 	}
1841 #endif  /* CPU_IXP12X0 */
1842 #ifdef CPU_XSCALE_80200
1843 	if (cputype == CPU_ID_80200) {
1844 		int rev = cpufunc_id() & CPU_ID_REVISION_MASK;
1845 
1846 		i80200_icu_init();
1847 
1848 		/*
1849 		 * Reset the Performance Monitoring Unit to a
1850 		 * pristine state:
1851 		 *	- CCNT, PMN0, PMN1 reset to 0
1852 		 *	- overflow indications cleared
1853 		 *	- all counters disabled
1854 		 */
1855 		__asm volatile("mcr p14, 0, %0, c0, c0, 0"
1856 			:
1857 			: "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF|
1858 			       PMNC_CC_IF));
1859 
1860 #if defined(XSCALE_CCLKCFG)
1861 		/*
1862 		 * Crank CCLKCFG to maximum legal value.
1863 		 */
1864 		__asm volatile ("mcr p14, 0, %0, c6, c0, 0"
1865 			:
1866 			: "r" (XSCALE_CCLKCFG));
1867 #endif
1868 
1869 		/*
1870 		 * XXX Disable ECC in the Bus Controller Unit; we
1871 		 * don't really support it, yet.  Clear any pending
1872 		 * error indications.
1873 		 */
1874 		__asm volatile("mcr p13, 0, %0, c0, c1, 0"
1875 			:
1876 			: "r" (BCUCTL_E0|BCUCTL_E1|BCUCTL_EV));
1877 
1878 		cpufuncs = xscale_cpufuncs;
1879 
1880 		/*
1881 		 * i80200 errata: Step-A0 and A1 have a bug where
1882 		 * D$ dirty bits are not cleared on "invalidate by
1883 		 * address".
1884 		 *
1885 		 * Workaround: Clean cache line before invalidating.
1886 		 */
1887 		if (rev == 0 || rev == 1)
1888 			cpufuncs.cf_dcache_inv_range = xscale_cache_purgeD_rng;
1889 
1890 		get_cachetype_cp15();
1891 		pmap_pte_init_xscale();
1892 		return 0;
1893 	}
1894 #endif /* CPU_XSCALE_80200 */
1895 #ifdef CPU_XSCALE_80321
1896 	if (cputype == CPU_ID_80321_400 || cputype == CPU_ID_80321_600 ||
1897 	    cputype == CPU_ID_80321_400_B0 || cputype == CPU_ID_80321_600_B0 ||
1898 	    cputype == CPU_ID_80219_400 || cputype == CPU_ID_80219_600) {
1899 		i80321_icu_init();
1900 
1901 		/*
1902 		 * Reset the Performance Monitoring Unit to a
1903 		 * pristine state:
1904 		 *	- CCNT, PMN0, PMN1 reset to 0
1905 		 *	- overflow indications cleared
1906 		 *	- all counters disabled
1907 		 */
1908 		__asm volatile("mcr p14, 0, %0, c0, c0, 0"
1909 			:
1910 			: "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF|
1911 			       PMNC_CC_IF));
1912 
1913 		cpufuncs = xscale_cpufuncs;
1914 
1915 		get_cachetype_cp15();
1916 		pmap_pte_init_xscale();
1917 		return 0;
1918 	}
1919 #endif /* CPU_XSCALE_80321 */
1920 #ifdef __CPU_XSCALE_PXA2XX
1921 	/* ignore core revision to test PXA2xx CPUs */
1922 	if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA27X ||
1923 	    (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 ||
1924 	    (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) {
1925 
1926 		cpufuncs = xscale_cpufuncs;
1927 
1928 		get_cachetype_cp15();
1929 		pmap_pte_init_xscale();
1930 
1931 		/* Use powersave on this CPU. */
1932 		cpu_do_powersave = 1;
1933 
1934 		return 0;
1935 	}
1936 #endif /* __CPU_XSCALE_PXA2XX */
1937 #ifdef CPU_XSCALE_IXP425
1938 	if (cputype == CPU_ID_IXP425_533 || cputype == CPU_ID_IXP425_400 ||
1939 	    cputype == CPU_ID_IXP425_266) {
1940 		ixp425_icu_init();
1941 
1942 		cpufuncs = xscale_cpufuncs;
1943 
1944 		get_cachetype_cp15();
1945 		pmap_pte_init_xscale();
1946 
1947 		return 0;
1948 	}
1949 #endif /* CPU_XSCALE_IXP425 */
1950 #if defined(CPU_CORTEX)
1951 	if (CPU_ID_CORTEX_P(cputype)) {
1952 		cpufuncs = armv7_cpufuncs;
1953 		set_cpufuncs_mpfixup();
1954 		cpu_do_powersave = 1;			/* Enable powersave */
1955 #if defined(CPU_ARMV6) || defined(CPU_PRE_ARMV6)
1956 		cpu_armv7_p = true;
1957 #endif
1958 		get_cachetype_cp15();
1959 		pmap_pte_init_armv7();
1960 		if (arm_cache_prefer_mask)
1961 			uvmexp.ncolors = (arm_cache_prefer_mask >> PGSHIFT) + 1;
1962 
1963 		/*
1964 		 * Start and reset the PMC Cycle Counter.
1965 		 */
1966 		armreg_pmcr_write(ARM11_PMCCTL_E | ARM11_PMCCTL_P | ARM11_PMCCTL_C);
1967 		armreg_pmcntenset_write(CORTEX_CNTENS_C);
1968 		return 0;
1969 	}
1970 #endif /* CPU_CORTEX */
1971 
1972 #if defined(CPU_PJ4B)
1973 	if ((cputype == CPU_ID_MV88SV581X_V6 ||
1974 	    cputype == CPU_ID_MV88SV581X_V7 ||
1975 	    cputype == CPU_ID_MV88SV584X_V7 ||
1976 	    cputype == CPU_ID_ARM_88SV581X_V6 ||
1977 	    cputype == CPU_ID_ARM_88SV581X_V7) &&
1978 	    (armreg_pfr0_read() & ARM_PFR0_THUMBEE_MASK)) {
1979 		cpufuncs = pj4bv7_cpufuncs;
1980 		set_cpufuncs_mpfixup();
1981 #if defined(CPU_ARMV6) || defined(CPU_PRE_ARMV6)
1982 		cpu_armv7_p = true;
1983 #endif
1984 		get_cachetype_cp15();
1985 		pmap_pte_init_armv7();
1986 		return 0;
1987 	}
1988 #endif /* CPU_PJ4B */
1989 
1990 	/*
1991 	 * Bzzzz. And the answer was ...
1992 	 */
1993 	panic("No support for this CPU type (%08x) in kernel", cputype);
1994 	return ARCHITECTURE_NOT_PRESENT;
1995 }
1996 
1997 /*
1998  * Fixup routines for data and prefetch aborts.
1999  *
2000  * Several compile time symbols are used
2001  *
2002  * DEBUG_FAULT_CORRECTION - Print debugging information during the
2003  * correction of registers after a fault.
2004  * ARM6_LATE_ABORT - ARM6 supports both early and late aborts
2005  * when defined should use late aborts
2006  */
2007 
2008 
2009 /*
2010  * Null abort fixup routine.
2011  * For use when no fixup is required.
2012  */
2013 int
2014 cpufunc_null_fixup(void *arg)
2015 {
2016 	return(ABORT_FIXUP_OK);
2017 }
2018 
2019 
2020 #if defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI)
2021 
2022 #ifdef DEBUG_FAULT_CORRECTION
2023 #define DFC_PRINTF(x)		printf x
2024 #define DFC_DISASSEMBLE(x)	disassemble(x)
2025 #else
2026 #define DFC_PRINTF(x)		/* nothing */
2027 #define DFC_DISASSEMBLE(x)	/* nothing */
2028 #endif
2029 
2030 /*
2031  * "Early" data abort fixup.
2032  *
2033  * For ARM2, ARM2as, ARM3 and ARM6 (in early-abort mode).  Also used
2034  * indirectly by ARM6 (in late-abort mode) and ARM7[TDMI].
2035  *
2036  * In early aborts, we may have to fix up LDM, STM, LDC and STC.
2037  */
2038 int
2039 early_abort_fixup(void *arg)
2040 {
2041 	trapframe_t *frame = arg;
2042 	u_int fault_pc;
2043 	u_int fault_instruction;
2044 	int saved_lr = 0;
2045 
2046 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
2047 
2048 		/* Ok an abort in SVC mode */
2049 
2050 		/*
2051 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
2052 		 * as the fault happened in svc mode but we need it in the
2053 		 * usr slot so we can treat the registers as an array of ints
2054 		 * during fixing.
2055 		 * NOTE: This PC is in the position but writeback is not
2056 		 * allowed on r15.
2057 		 * Doing it like this is more efficient than trapping this
2058 		 * case in all possible locations in the following fixup code.
2059 		 */
2060 
2061 		saved_lr = frame->tf_usr_lr;
2062 		frame->tf_usr_lr = frame->tf_svc_lr;
2063 
2064 		/*
2065 		 * Note the trapframe does not have the SVC r13 so a fault
2066 		 * from an instruction with writeback to r13 in SVC mode is
2067 		 * not allowed. This should not happen as the kstack is
2068 		 * always valid.
2069 		 */
2070 	}
2071 
2072 	/* Get fault address and status from the CPU */
2073 
2074 	fault_pc = frame->tf_pc;
2075 	fault_instruction = *((volatile unsigned int *)fault_pc);
2076 
2077 	/* Decode the fault instruction and fix the registers as needed */
2078 
2079 	if ((fault_instruction & 0x0e000000) == 0x08000000) {
2080 		int base;
2081 		int loop;
2082 		int count;
2083 		int *registers = &frame->tf_r0;
2084 
2085 		DFC_PRINTF(("LDM/STM\n"));
2086 		DFC_DISASSEMBLE(fault_pc);
2087 		if (fault_instruction & (1 << 21)) {
2088 			DFC_PRINTF(("This instruction must be corrected\n"));
2089 			base = (fault_instruction >> 16) & 0x0f;
2090 			if (base == 15)
2091 				return ABORT_FIXUP_FAILED;
2092 			/* Count registers transferred */
2093 			count = 0;
2094 			for (loop = 0; loop < 16; ++loop) {
2095 				if (fault_instruction & (1<<loop))
2096 					++count;
2097 			}
2098 			DFC_PRINTF(("%d registers used\n", count));
2099 			DFC_PRINTF(("Corrected r%d by %d bytes ",
2100 				       base, count * 4));
2101 			if (fault_instruction & (1 << 23)) {
2102 				DFC_PRINTF(("down\n"));
2103 				registers[base] -= count * 4;
2104 			} else {
2105 				DFC_PRINTF(("up\n"));
2106 				registers[base] += count * 4;
2107 			}
2108 		}
2109 	} else if ((fault_instruction & 0x0e000000) == 0x0c000000) {
2110 		int base;
2111 		int offset;
2112 		int *registers = &frame->tf_r0;
2113 
2114 		/* REGISTER CORRECTION IS REQUIRED FOR THESE INSTRUCTIONS */
2115 
2116 		DFC_DISASSEMBLE(fault_pc);
2117 
2118 		/* Only need to fix registers if write back is turned on */
2119 
2120 		if ((fault_instruction & (1 << 21)) != 0) {
2121 			base = (fault_instruction >> 16) & 0x0f;
2122 			if (base == 13 &&
2123 			    (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
2124 				return ABORT_FIXUP_FAILED;
2125 			if (base == 15)
2126 				return ABORT_FIXUP_FAILED;
2127 
2128 			offset = (fault_instruction & 0xff) << 2;
2129 			DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
2130 			if ((fault_instruction & (1 << 23)) != 0)
2131 				offset = -offset;
2132 			registers[base] += offset;
2133 			DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
2134 		}
2135 	}
2136 
2137 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
2138 
2139 		/* Ok an abort in SVC mode */
2140 
2141 		/*
2142 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
2143 		 * as the fault happened in svc mode but we need it in the
2144 		 * usr slot so we can treat the registers as an array of ints
2145 		 * during fixing.
2146 		 * NOTE: This PC is in the position but writeback is not
2147 		 * allowed on r15.
2148 		 * Doing it like this is more efficient than trapping this
2149 		 * case in all possible locations in the prior fixup code.
2150 		 */
2151 
2152 		frame->tf_svc_lr = frame->tf_usr_lr;
2153 		frame->tf_usr_lr = saved_lr;
2154 
2155 		/*
2156 		 * Note the trapframe does not have the SVC r13 so a fault
2157 		 * from an instruction with writeback to r13 in SVC mode is
2158 		 * not allowed. This should not happen as the kstack is
2159 		 * always valid.
2160 		 */
2161 	}
2162 
2163 	return(ABORT_FIXUP_OK);
2164 }
2165 #endif	/* CPU_ARM6/7 */
2166 
2167 
2168 #if (defined(CPU_ARM6) && defined(ARM6_LATE_ABORT)) || defined(CPU_ARM7) || \
2169 	defined(CPU_ARM7TDMI)
2170 /*
2171  * "Late" (base updated) data abort fixup
2172  *
2173  * For ARM6 (in late-abort mode) and ARM7.
2174  *
2175  * In this model, all data-transfer instructions need fixing up.  We defer
2176  * LDM, STM, LDC and STC fixup to the early-abort handler.
2177  */
2178 int
2179 late_abort_fixup(void *arg)
2180 {
2181 	trapframe_t *frame = arg;
2182 	u_int fault_pc;
2183 	u_int fault_instruction;
2184 	int saved_lr = 0;
2185 
2186 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
2187 
2188 		/* Ok an abort in SVC mode */
2189 
2190 		/*
2191 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
2192 		 * as the fault happened in svc mode but we need it in the
2193 		 * usr slot so we can treat the registers as an array of ints
2194 		 * during fixing.
2195 		 * NOTE: This PC is in the position but writeback is not
2196 		 * allowed on r15.
2197 		 * Doing it like this is more efficient than trapping this
2198 		 * case in all possible locations in the following fixup code.
2199 		 */
2200 
2201 		saved_lr = frame->tf_usr_lr;
2202 		frame->tf_usr_lr = frame->tf_svc_lr;
2203 
2204 		/*
2205 		 * Note the trapframe does not have the SVC r13 so a fault
2206 		 * from an instruction with writeback to r13 in SVC mode is
2207 		 * not allowed. This should not happen as the kstack is
2208 		 * always valid.
2209 		 */
2210 	}
2211 
2212 	/* Get fault address and status from the CPU */
2213 
2214 	fault_pc = frame->tf_pc;
2215 	fault_instruction = *((volatile unsigned int *)fault_pc);
2216 
2217 	/* Decode the fault instruction and fix the registers as needed */
2218 
2219 	/* Was is a swap instruction ? */
2220 
2221 	if ((fault_instruction & 0x0fb00ff0) == 0x01000090) {
2222 		DFC_DISASSEMBLE(fault_pc);
2223 	} else if ((fault_instruction & 0x0c000000) == 0x04000000) {
2224 
2225 		/* Was is a ldr/str instruction */
2226 		/* This is for late abort only */
2227 
2228 		int base;
2229 		int offset;
2230 		int *registers = &frame->tf_r0;
2231 
2232 		DFC_DISASSEMBLE(fault_pc);
2233 
2234 		/* This is for late abort only */
2235 
2236 		if ((fault_instruction & (1 << 24)) == 0
2237 		    || (fault_instruction & (1 << 21)) != 0) {
2238 			/* postindexed ldr/str with no writeback */
2239 
2240 			base = (fault_instruction >> 16) & 0x0f;
2241 			if (base == 13 &&
2242 			    (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
2243 				return ABORT_FIXUP_FAILED;
2244 			if (base == 15)
2245 				return ABORT_FIXUP_FAILED;
2246 			DFC_PRINTF(("late abt fix: r%d=%08x : ",
2247 				       base, registers[base]));
2248 			if ((fault_instruction & (1 << 25)) == 0) {
2249 				/* Immediate offset - easy */
2250 
2251 				offset = fault_instruction & 0xfff;
2252 				if ((fault_instruction & (1 << 23)))
2253 					offset = -offset;
2254 				registers[base] += offset;
2255 				DFC_PRINTF(("imm=%08x ", offset));
2256 			} else {
2257 				/* offset is a shifted register */
2258 				int shift;
2259 
2260 				offset = fault_instruction & 0x0f;
2261 				if (offset == base)
2262 					return ABORT_FIXUP_FAILED;
2263 
2264 				/*
2265 				 * Register offset - hard we have to
2266 				 * cope with shifts !
2267 				 */
2268 				offset = registers[offset];
2269 
2270 				if ((fault_instruction & (1 << 4)) == 0)
2271 					/* shift with amount */
2272 					shift = (fault_instruction >> 7) & 0x1f;
2273 				else {
2274 					/* shift with register */
2275 					if ((fault_instruction & (1 << 7)) != 0)
2276 						/* undefined for now so bail out */
2277 						return ABORT_FIXUP_FAILED;
2278 					shift = ((fault_instruction >> 8) & 0xf);
2279 					if (base == shift)
2280 						return ABORT_FIXUP_FAILED;
2281 					DFC_PRINTF(("shift reg=%d ", shift));
2282 					shift = registers[shift];
2283 				}
2284 				DFC_PRINTF(("shift=%08x ", shift));
2285 				switch (((fault_instruction >> 5) & 0x3)) {
2286 				case 0 : /* Logical left */
2287 					offset = (int)(((u_int)offset) << shift);
2288 					break;
2289 				case 1 : /* Logical Right */
2290 					if (shift == 0) shift = 32;
2291 					offset = (int)(((u_int)offset) >> shift);
2292 					break;
2293 				case 2 : /* Arithmetic Right */
2294 					if (shift == 0) shift = 32;
2295 					offset = (int)(((int)offset) >> shift);
2296 					break;
2297 				case 3 : /* Rotate right (rol or rxx) */
2298 					return ABORT_FIXUP_FAILED;
2299 					break;
2300 				}
2301 
2302 				DFC_PRINTF(("abt: fixed LDR/STR with "
2303 					       "register offset\n"));
2304 				if ((fault_instruction & (1 << 23)))
2305 					offset = -offset;
2306 				DFC_PRINTF(("offset=%08x ", offset));
2307 				registers[base] += offset;
2308 			}
2309 			DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
2310 		}
2311 	}
2312 
2313 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
2314 
2315 		/* Ok an abort in SVC mode */
2316 
2317 		/*
2318 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
2319 		 * as the fault happened in svc mode but we need it in the
2320 		 * usr slot so we can treat the registers as an array of ints
2321 		 * during fixing.
2322 		 * NOTE: This PC is in the position but writeback is not
2323 		 * allowed on r15.
2324 		 * Doing it like this is more efficient than trapping this
2325 		 * case in all possible locations in the prior fixup code.
2326 		 */
2327 
2328 		frame->tf_svc_lr = frame->tf_usr_lr;
2329 		frame->tf_usr_lr = saved_lr;
2330 
2331 		/*
2332 		 * Note the trapframe does not have the SVC r13 so a fault
2333 		 * from an instruction with writeback to r13 in SVC mode is
2334 		 * not allowed. This should not happen as the kstack is
2335 		 * always valid.
2336 		 */
2337 	}
2338 
2339 	/*
2340 	 * Now let the early-abort fixup routine have a go, in case it
2341 	 * was an LDM, STM, LDC or STC that faulted.
2342 	 */
2343 
2344 	return early_abort_fixup(arg);
2345 }
2346 #endif	/* CPU_ARM6(LATE)/7/7TDMI */
2347 
2348 /*
2349  * CPU Setup code
2350  */
2351 
2352 #if defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) || \
2353 	defined(CPU_ARM8) || defined (CPU_ARM9) || defined (CPU_ARM9E) || \
2354 	defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) || \
2355 	defined(CPU_FA526) || \
2356 	defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
2357 	defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425) || \
2358 	defined(CPU_ARM10) || defined(CPU_SHEEVA) || \
2359 	defined(CPU_ARMV6) || defined(CPU_ARMV7)
2360 
2361 #define IGN	0
2362 #define OR	1
2363 #define BIC	2
2364 
2365 struct cpu_option {
2366 	const char *co_name;
2367 	int	co_falseop;
2368 	int	co_trueop;
2369 	int	co_value;
2370 };
2371 
2372 static u_int parse_cpu_options(char *, struct cpu_option *, u_int);
2373 
2374 static u_int __noasan
2375 parse_cpu_options(char *args, struct cpu_option *optlist, u_int cpuctrl)
2376 {
2377 	int integer;
2378 
2379 	if (args == NULL)
2380 		return(cpuctrl);
2381 
2382 	while (optlist->co_name) {
2383 		if (get_bootconf_option(args, optlist->co_name,
2384 		    BOOTOPT_TYPE_BOOLEAN, &integer)) {
2385 			if (integer) {
2386 				if (optlist->co_trueop == OR)
2387 					cpuctrl |= optlist->co_value;
2388 				else if (optlist->co_trueop == BIC)
2389 					cpuctrl &= ~optlist->co_value;
2390 			} else {
2391 				if (optlist->co_falseop == OR)
2392 					cpuctrl |= optlist->co_value;
2393 				else if (optlist->co_falseop == BIC)
2394 					cpuctrl &= ~optlist->co_value;
2395 			}
2396 		}
2397 		++optlist;
2398 	}
2399 	return(cpuctrl);
2400 }
2401 #endif /* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 || CPU_SA110 */
2402 
2403 #if defined (CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) \
2404 	|| defined(CPU_ARM8)
2405 struct cpu_option arm678_options[] = {
2406 #ifdef COMPAT_12
2407 	{ "nocache",		IGN, BIC, CPU_CONTROL_IDC_ENABLE },
2408 	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
2409 #endif	/* COMPAT_12 */
2410 	{ "cpu.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
2411 	{ "cpu.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
2412 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2413 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2414 	{ NULL,			IGN, IGN, 0 }
2415 };
2416 
2417 #endif	/* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 */
2418 
2419 #ifdef CPU_ARM6
2420 struct cpu_option arm6_options[] = {
2421 	{ "arm6.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
2422 	{ "arm6.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
2423 	{ "arm6.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2424 	{ "arm6.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2425 	{ NULL,			IGN, IGN, 0 }
2426 };
2427 
2428 void
2429 arm6_setup(char *args)
2430 {
2431 
2432 	/* Set up default control registers bits */
2433 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2434 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2435 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
2436 #if 0
2437 	int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2438 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2439 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
2440 		 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BEND_ENABLE
2441 		 | CPU_CONTROL_AFLT_ENABLE;
2442 #endif
2443 
2444 #ifdef ARM6_LATE_ABORT
2445 	cpuctrl |= CPU_CONTROL_LABT_ENABLE;
2446 #endif	/* ARM6_LATE_ABORT */
2447 
2448 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2449 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2450 #endif
2451 
2452 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
2453 	cpuctrl = parse_cpu_options(args, arm6_options, cpuctrl);
2454 
2455 #ifdef __ARMEB__
2456 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2457 #endif
2458 
2459 	/* Clear out the cache */
2460 	cpu_idcache_wbinv_all();
2461 
2462 	/* Set the control register */
2463 	cpu_control(0xffffffff, cpuctrl);
2464 }
2465 #endif	/* CPU_ARM6 */
2466 
2467 #ifdef CPU_ARM7
2468 struct cpu_option arm7_options[] = {
2469 	{ "arm7.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
2470 	{ "arm7.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
2471 	{ "arm7.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2472 	{ "arm7.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2473 #ifdef COMPAT_12
2474 	{ "fpaclk2",		BIC, OR,  CPU_CONTROL_CPCLK },
2475 #endif	/* COMPAT_12 */
2476 	{ "arm700.fpaclk",	BIC, OR,  CPU_CONTROL_CPCLK },
2477 	{ NULL,			IGN, IGN, 0 }
2478 };
2479 
2480 void
2481 arm7_setup(char *args)
2482 {
2483 
2484 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2485 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2486 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
2487 #if 0
2488 	int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2489 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2490 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
2491 		 | CPU_CONTROL_CPCLK | CPU_CONTROL_LABT_ENABLE
2492 		 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BEND_ENABLE
2493 		 | CPU_CONTROL_AFLT_ENABLE;
2494 #endif
2495 
2496 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2497 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2498 #endif
2499 
2500 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
2501 	cpuctrl = parse_cpu_options(args, arm7_options, cpuctrl);
2502 
2503 #ifdef __ARMEB__
2504 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2505 #endif
2506 
2507 	/* Clear out the cache */
2508 	cpu_idcache_wbinv_all();
2509 
2510 	/* Set the control register */
2511 	cpu_control(0xffffffff, cpuctrl);
2512 }
2513 #endif	/* CPU_ARM7 */
2514 
2515 #ifdef CPU_ARM7TDMI
2516 struct cpu_option arm7tdmi_options[] = {
2517 	{ "arm7.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
2518 	{ "arm7.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
2519 	{ "arm7.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2520 	{ "arm7.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2521 #ifdef COMPAT_12
2522 	{ "fpaclk2",		BIC, OR,  CPU_CONTROL_CPCLK },
2523 #endif	/* COMPAT_12 */
2524 	{ "arm700.fpaclk",	BIC, OR,  CPU_CONTROL_CPCLK },
2525 	{ NULL,			IGN, IGN, 0 }
2526 };
2527 
2528 void
2529 arm7tdmi_setup(char *args)
2530 {
2531 	int cpuctrl;
2532 
2533 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2534 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2535 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
2536 
2537 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
2538 	cpuctrl = parse_cpu_options(args, arm7tdmi_options, cpuctrl);
2539 
2540 #ifdef __ARMEB__
2541 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2542 #endif
2543 
2544 	/* Clear out the cache */
2545 	cpu_idcache_wbinv_all();
2546 
2547 	/* Set the control register */
2548 	cpu_control(0xffffffff, cpuctrl);
2549 }
2550 #endif	/* CPU_ARM7TDMI */
2551 
2552 #ifdef CPU_ARM8
2553 struct cpu_option arm8_options[] = {
2554 	{ "arm8.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
2555 	{ "arm8.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
2556 	{ "arm8.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2557 	{ "arm8.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2558 #ifdef COMPAT_12
2559 	{ "branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2560 #endif	/* COMPAT_12 */
2561 	{ "cpu.branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2562 	{ "arm8.branchpredict",	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2563 	{ NULL,			IGN, IGN, 0 }
2564 };
2565 
2566 void
2567 arm8_setup(char *args)
2568 {
2569 	int integer;
2570 	int clocktest;
2571 	int setclock = 0;
2572 
2573 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2574 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2575 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
2576 #if 0
2577 	int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2578 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2579 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
2580 		 | CPU_CONTROL_BPRD_ENABLE | CPU_CONTROL_ROM_ENABLE
2581 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE;
2582 #endif
2583 
2584 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2585 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2586 #endif
2587 
2588 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
2589 	cpuctrl = parse_cpu_options(args, arm8_options, cpuctrl);
2590 
2591 #ifdef __ARMEB__
2592 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2593 #endif
2594 
2595 	/* Get clock configuration */
2596 	clocktest = arm8_clock_config(0, 0) & 0x0f;
2597 
2598 	/* Special ARM8 clock and test configuration */
2599 	if (get_bootconf_option(args, "arm8.clock.reset", BOOTOPT_TYPE_BOOLEAN, &integer)) {
2600 		clocktest = 0;
2601 		setclock = 1;
2602 	}
2603 	if (get_bootconf_option(args, "arm8.clock.dynamic", BOOTOPT_TYPE_BOOLEAN, &integer)) {
2604 		if (integer)
2605 			clocktest |= 0x01;
2606 		else
2607 			clocktest &= ~(0x01);
2608 		setclock = 1;
2609 	}
2610 	if (get_bootconf_option(args, "arm8.clock.sync", BOOTOPT_TYPE_BOOLEAN, &integer)) {
2611 		if (integer)
2612 			clocktest |= 0x02;
2613 		else
2614 			clocktest &= ~(0x02);
2615 		setclock = 1;
2616 	}
2617 	if (get_bootconf_option(args, "arm8.clock.fast", BOOTOPT_TYPE_BININT, &integer)) {
2618 		clocktest = (clocktest & ~0xc0) | (integer & 3) << 2;
2619 		setclock = 1;
2620 	}
2621 	if (get_bootconf_option(args, "arm8.test", BOOTOPT_TYPE_BININT, &integer)) {
2622 		clocktest |= (integer & 7) << 5;
2623 		setclock = 1;
2624 	}
2625 
2626 	/* Clear out the cache */
2627 	cpu_idcache_wbinv_all();
2628 
2629 	/* Set the control register */
2630 	cpu_control(0xffffffff, cpuctrl);
2631 
2632 	/* Set the clock/test register */
2633 	if (setclock)
2634 		arm8_clock_config(0x7f, clocktest);
2635 }
2636 #endif	/* CPU_ARM8 */
2637 
2638 #ifdef CPU_ARM9
2639 struct cpu_option arm9_options[] = {
2640 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2641 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2642 	{ "arm9.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2643 	{ "arm9.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2644 	{ "arm9.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2645 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2646 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2647 	{ "arm9.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2648 	{ NULL,			IGN, IGN, 0 }
2649 };
2650 
2651 void
2652 arm9_setup(char *args)
2653 {
2654 
2655 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2656 	    | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2657 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2658 	    | CPU_CONTROL_WBUF_ENABLE;
2659 	int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2660 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2661 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2662 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2663 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2664 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC
2665 		 | CPU_CONTROL_ROUNDROBIN;
2666 
2667 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2668 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2669 #endif
2670 
2671 	cpuctrl = parse_cpu_options(args, arm9_options, cpuctrl);
2672 
2673 #ifdef __ARMEB__
2674 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2675 #endif
2676 
2677 #ifndef ARM_HAS_VBAR
2678 	if (vector_page == ARM_VECTORS_HIGH)
2679 		cpuctrl |= CPU_CONTROL_VECRELOC;
2680 #endif
2681 
2682 	/* Clear out the cache */
2683 	cpu_idcache_wbinv_all();
2684 
2685 	/* Set the control register */
2686 	cpu_control(cpuctrlmask, cpuctrl);
2687 
2688 }
2689 #endif	/* CPU_ARM9 */
2690 
2691 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
2692 struct cpu_option arm10_options[] = {
2693 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2694 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2695 	{ "arm10.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2696 	{ "arm10.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2697 	{ "arm10.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2698 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2699 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2700 	{ "arm10.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2701 	{ NULL,			IGN, IGN, 0 }
2702 };
2703 
2704 void
2705 arm10_setup(char *args)
2706 {
2707 
2708 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
2709 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2710 	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
2711 #if 0
2712 	int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
2713 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2714 	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2715 	    | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2716 	    | CPU_CONTROL_BPRD_ENABLE
2717 	    | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
2718 #endif
2719 
2720 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2721 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2722 #endif
2723 
2724 	cpuctrl = parse_cpu_options(args, arm10_options, cpuctrl);
2725 
2726 #ifdef __ARMEB__
2727 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2728 #endif
2729 
2730 #ifndef ARM_HAS_VBAR
2731 	if (vector_page == ARM_VECTORS_HIGH)
2732 		cpuctrl |= CPU_CONTROL_VECRELOC;
2733 #endif
2734 
2735 	/* Clear out the cache */
2736 	cpu_idcache_wbinv_all();
2737 
2738 	/* Now really make sure they are clean.  */
2739 	__asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
2740 
2741 	/* Set the control register */
2742 	cpu_control(0xffffffff, cpuctrl);
2743 
2744 	/* And again. */
2745 	cpu_idcache_wbinv_all();
2746 }
2747 #endif	/* CPU_ARM9E || CPU_ARM10 */
2748 
2749 #if defined(CPU_ARM11)
2750 struct cpu_option arm11_options[] = {
2751 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2752 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2753 	{ "arm11.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2754 	{ "arm11.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2755 	{ "arm11.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2756 	{ "cpu.branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2757 	{ "arm11.branchpredict", BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2758 	{ NULL,			IGN, IGN, 0 }
2759 };
2760 
2761 void
2762 arm11_setup(char *args)
2763 {
2764 
2765 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
2766 #ifdef ARM_MMU_EXTENDED
2767 	    | CPU_CONTROL_XP_ENABLE
2768 #endif
2769 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2770 	    /* | CPU_CONTROL_BPRD_ENABLE */;
2771 
2772 #ifdef __ARMEB__
2773 	cpuctrl |= CPU_CONTROL_EX_BEND;
2774 #endif
2775 
2776 	int cpuctrlmask = cpuctrl
2777 	    | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BPRD_ENABLE
2778 	    | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2779 	    | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
2780 
2781 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2782 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2783 #endif
2784 
2785 	cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl);
2786 
2787 #ifndef ARM_HAS_VBAR
2788 	if (vector_page == ARM_VECTORS_HIGH)
2789 		cpuctrl |= CPU_CONTROL_VECRELOC;
2790 #endif
2791 
2792 	/* Clear out the cache */
2793 	cpu_idcache_wbinv_all();
2794 
2795 	/* Now really make sure they are clean.  */
2796 	__asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
2797 
2798 	/* Allow detection code to find the VFP if it's fitted.  */
2799 	armreg_cpacr_write(0x0fffffff);
2800 
2801 	/* Set the control register */
2802 	cpu_control(cpuctrlmask, cpuctrl);
2803 
2804 	/* And again. */
2805 	cpu_idcache_wbinv_all();
2806 }
2807 #endif	/* CPU_ARM11 */
2808 
2809 #if defined(CPU_ARM11MPCORE)
2810 
2811 void
2812 arm11mpcore_setup(char *args)
2813 {
2814 
2815 	int cpuctrl = CPU_CONTROL_IC_ENABLE
2816 	    | CPU_CONTROL_DC_ENABLE
2817 #ifdef ARM_MMU_EXTENDED
2818 	    | CPU_CONTROL_XP_ENABLE
2819 #endif
2820 	    | CPU_CONTROL_BPRD_ENABLE ;
2821 
2822 #ifdef __ARMEB__
2823 	cpuctrl |= CPU_CONTROL_EX_BEND;
2824 #endif
2825 
2826 	int cpuctrlmask = cpuctrl
2827 	    | CPU_CONTROL_AFLT_ENABLE
2828 	    | CPU_CONTROL_VECRELOC;
2829 
2830 #ifdef	ARM11MPCORE_MMU_COMPAT
2831 	/* XXX: S and R? */
2832 #endif
2833 
2834 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2835 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2836 #endif
2837 
2838 	cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl);
2839 
2840 #ifndef ARM_HAS_VBAR
2841 	if (vector_page == ARM_VECTORS_HIGH)
2842 		cpuctrl |= CPU_CONTROL_VECRELOC;
2843 #endif
2844 
2845 	/* Clear out the cache */
2846 	cpu_idcache_wbinv_all();
2847 
2848 	/* Now really make sure they are clean.  */
2849 	__asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
2850 
2851 	/* Allow detection code to find the VFP if it's fitted.  */
2852 	armreg_cpacr_write(0x0fffffff);
2853 
2854 	/* Set the control register */
2855 	cpu_control(cpuctrlmask, cpuctrl);
2856 
2857 	/* And again. */
2858 	cpu_idcache_wbinv_all();
2859 }
2860 #endif	/* CPU_ARM11MPCORE */
2861 
2862 #ifdef CPU_PJ4B
2863 void
2864 pj4bv7_setup(char *args)
2865 {
2866 	int cpuctrl;
2867 
2868 	pj4b_config();
2869 
2870 	cpuctrl = CPU_CONTROL_MMU_ENABLE;
2871 #ifdef ARM32_DISABLE_ALIGNMENT_FAULTS
2872 	cpuctrl |= CPU_CONTROL_UNAL_ENABLE;
2873 #else
2874 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2875 #endif
2876 	cpuctrl |= CPU_CONTROL_DC_ENABLE;
2877 	cpuctrl |= CPU_CONTROL_IC_ENABLE;
2878 	cpuctrl |= (0xf << 3);
2879 	cpuctrl |= CPU_CONTROL_BPRD_ENABLE;
2880 	cpuctrl |= (0x5 << 16);
2881 	cpuctrl |= CPU_CONTROL_XP_ENABLE;
2882 
2883 #ifndef ARM_HAS_VBAR
2884 	if (vector_page == ARM_VECTORS_HIGH)
2885 		cpuctrl |= CPU_CONTROL_VECRELOC;
2886 #endif
2887 
2888 #ifdef L2CACHE_ENABLE
2889 	/* Setup L2 cache */
2890 	arm_scache.cache_type = CPU_CT_CTYPE_WT;
2891 	arm_scache.cache_unified = 1;
2892 	arm_scache.dcache_type = arm_scache.icache_type = CACHE_TYPE_PIPT;
2893 	arm_scache.dcache_size = arm_scache.icache_size = ARMADAXP_L2_SIZE;
2894 	arm_scache.dcache_ways = arm_scache.icache_ways = ARMADAXP_L2_WAYS;
2895 	arm_scache.dcache_way_size = arm_scache.icache_way_size =
2896 	    ARMADAXP_L2_WAY_SIZE;
2897 	arm_scache.dcache_line_size = arm_scache.icache_line_size =
2898 	    ARMADAXP_L2_LINE_SIZE;
2899 	arm_scache.dcache_sets = arm_scache.icache_sets =
2900 	    ARMADAXP_L2_SETS;
2901 
2902 	cpufuncs.cf_sdcache_wbinv_range	= armadaxp_sdcache_wbinv_range;
2903 	cpufuncs.cf_sdcache_inv_range	= armadaxp_sdcache_inv_range;
2904 	cpufuncs.cf_sdcache_wb_range	= armadaxp_sdcache_wb_range;
2905 #endif
2906 
2907 #ifdef AURORA_IO_CACHE_COHERENCY
2908 	/* use AMBA and I/O Coherency Fabric to maintain cache */
2909 	cpufuncs.cf_dcache_wbinv_range	= pj4b_dcache_cfu_wbinv_range;
2910 	cpufuncs.cf_dcache_inv_range	= pj4b_dcache_cfu_inv_range;
2911 	cpufuncs.cf_dcache_wb_range	= pj4b_dcache_cfu_wb_range;
2912 
2913 	cpufuncs.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop;
2914 	cpufuncs.cf_sdcache_inv_range	= (void *)cpufunc_nullop;
2915 	cpufuncs.cf_sdcache_wb_range	= (void *)cpufunc_nullop;
2916 #endif
2917 
2918 	/* Clear out the cache */
2919 	cpu_idcache_wbinv_all();
2920 
2921 	/* Set the control register */
2922 	cpu_control(0xffffffff, cpuctrl);
2923 
2924 	/* And again. */
2925 	cpu_idcache_wbinv_all();
2926 #ifdef L2CACHE_ENABLE
2927 	armadaxp_sdcache_wbinv_all();
2928 #endif
2929 }
2930 #endif /* CPU_PJ4B */
2931 
2932 #if defined(CPU_ARMV7)
2933 struct cpu_option armv7_options[] = {
2934     { "cpu.cache",      BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2935     { "cpu.nocache",    OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2936     { "armv7.cache",    BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2937     { "armv7.icache",   BIC, OR,  CPU_CONTROL_IC_ENABLE },
2938     { "armv7.dcache",   BIC, OR,  CPU_CONTROL_DC_ENABLE },
2939     { NULL, 		IGN, IGN, 0}
2940 };
2941 
2942 void
2943 armv7_setup(char *args)
2944 {
2945 	int cpuctrl =
2946 	    CPU_CONTROL_MMU_ENABLE |
2947 	    CPU_CONTROL_IC_ENABLE |
2948 	    CPU_CONTROL_DC_ENABLE |
2949 	    CPU_CONTROL_BPRD_ENABLE |
2950 	    CPU_CONTROL_UNAL_ENABLE |
2951 	    0;
2952 #ifdef __ARMEB__
2953 	cpuctrl |= CPU_CONTROL_EX_BEND;
2954 #endif
2955 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2956 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2957 #endif
2958 #ifdef ARM_MMU_EXTENDED
2959 	cpuctrl |= CPU_CONTROL_XP_ENABLE;
2960 #endif
2961 
2962 	int cpuctrlmask = cpuctrl |
2963 	    CPU_CONTROL_EX_BEND |
2964 	    CPU_CONTROL_AFLT_ENABLE |
2965 	    CPU_CONTROL_TR_ENABLE |
2966 	    CPU_CONTROL_VECRELOC |
2967 	    CPU_CONTROL_XP_ENABLE |
2968 	    0;
2969 
2970 	cpuctrl = parse_cpu_options(args, armv7_options, cpuctrl);
2971 
2972 #ifndef ARM_HAS_VBAR
2973 	if (vector_page == ARM_VECTORS_HIGH)
2974 		cpuctrl |= CPU_CONTROL_VECRELOC;
2975 #endif
2976 
2977 #ifdef __HAVE_GENERIC_START
2978 	const u_int lcputype = cpufunc_id();
2979 	int actlr_set = 0;
2980 	int actlr_clr = 0;
2981 
2982 	if (CPU_ID_CORTEX_A5_P(lcputype)) {
2983 		/*
2984 		 * Disable exclusive L1/L2 cache control
2985 		 * Enable SMP mode
2986 		 * Enable Cache and TLB maintenance broadcast
2987 		 */
2988 		actlr_clr = CORTEXA5_ACTLR_EXCL;
2989 		actlr_set = CORTEXA5_ACTLR_SMP | CORTEXA5_ACTLR_FW;
2990 	} else if (CPU_ID_CORTEX_A7_P(lcputype)) {
2991 #ifdef MULTIPROCESSOR
2992 		actlr_set |= CORTEXA7_ACTLR_SMP;
2993 #endif
2994 	} else if (CPU_ID_CORTEX_A8_P(lcputype)) {
2995 		actlr_set = CORTEXA8_ACTLR_L2EN;
2996 		actlr_clr = CORTEXA8_ACTLR_L1ALIAS;
2997 	} else if (CPU_ID_CORTEX_A9_P(lcputype)) {
2998 		actlr_set =
2999 		    CORTEXA9_AUXCTL_FW |
3000 		    CORTEXA9_AUXCTL_L2PE |	// Not in FreeBSD
3001 		    CORTEXA9_AUXCTL_SMP |
3002 		    0;
3003 	} else if (CPU_ID_CORTEX_A15_P(lcputype)) {
3004 		actlr_set =
3005 		    CORTEXA15_ACTLR_SMP |
3006 		    CORTEXA15_ACTLR_SDEH |
3007 		    0;
3008 #if 0
3009 	} else if (CPU_ID_CORTEX_A12_P(lcputype) ||
3010 	    CPU_ID_CORTEX_A17_P(lcputype)) {
3011 		actlr_set =
3012 		    CORTEXA17_ACTLR_SMP;
3013 #endif
3014 	} else if (CPU_ID_CORTEX_A53_P(lcputype)) {
3015 	} else if (CPU_ID_CORTEX_A57_P(lcputype)) {
3016 	} else if (CPU_ID_CORTEX_A72_P(lcputype)) {
3017 	}
3018 
3019 	uint32_t actlr = armreg_auxctl_read();
3020 	actlr &= ~actlr_clr;
3021 	actlr |= actlr_set;
3022 
3023 	armreg_auxctl_write(actlr);
3024 
3025 	/* Set the control register - does dsb; isb */
3026 	cpu_control(cpuctrlmask, cpuctrl);
3027 
3028 	/* does tlb and branch predictor flush, and dsb; isb */
3029 	cpu_tlb_flushID();
3030 #else
3031 	/* Set the control register - does dsb; isb */
3032 	cpu_control(cpuctrlmask, cpuctrl);
3033 #endif
3034 
3035 }
3036 #endif /* CPU_ARMV7 */
3037 
3038 
3039 #if defined(CPU_ARM1136) || defined(CPU_ARM1176)
3040 void
3041 arm11x6_setup(char *args)
3042 {
3043 	int cpuctrl, cpuctrl_wax;
3044 	uint32_t auxctrl;
3045 	uint32_t sbz=0;
3046 	uint32_t cpuid;
3047 
3048 	cpuid = cpu_idnum();
3049 
3050 	cpuctrl =
3051 		CPU_CONTROL_MMU_ENABLE  |
3052 		CPU_CONTROL_DC_ENABLE   |
3053 		CPU_CONTROL_WBUF_ENABLE |
3054 		CPU_CONTROL_32BP_ENABLE |
3055 		CPU_CONTROL_32BD_ENABLE |
3056 		CPU_CONTROL_LABT_ENABLE |
3057 		CPU_CONTROL_UNAL_ENABLE |
3058 #ifdef ARM_MMU_EXTENDED
3059 		CPU_CONTROL_XP_ENABLE   |
3060 #else
3061 		CPU_CONTROL_SYST_ENABLE |
3062 #endif
3063 		CPU_CONTROL_IC_ENABLE;
3064 
3065 #ifdef __ARMEB__
3066 	cpuctrl |= CPU_CONTROL_EX_BEND;
3067 #endif
3068 
3069 	/*
3070 	 * "write as existing" bits
3071 	 * inverse of this is mask
3072 	 */
3073 	cpuctrl_wax =
3074 		(3 << 30) |
3075 		(1 << 29) |
3076 		(1 << 28) |
3077 		(3 << 26) |
3078 		(3 << 19) |
3079 		(1 << 17);
3080 
3081 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
3082 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3083 #endif
3084 
3085 	cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl);
3086 
3087 #ifndef ARM_HAS_VBAR
3088 	if (vector_page == ARM_VECTORS_HIGH)
3089 		cpuctrl |= CPU_CONTROL_VECRELOC;
3090 #endif
3091 
3092 	auxctrl = armreg_auxctl_read();
3093 	/*
3094 	 * This options enables the workaround for the 364296 ARM1136
3095 	 * r0pX errata (possible cache data corruption with
3096 	 * hit-under-miss enabled). It sets the undocumented bit 31 in
3097 	 * the auxiliary control register and the FI bit in the control
3098 	 * register, thus disabling hit-under-miss without putting the
3099 	 * processor into full low interrupt latency mode. ARM11MPCore
3100 	 * is not affected.
3101 	 */
3102 	if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1136JS) { /* ARM1136JSr0pX */
3103 		cpuctrl |= CPU_CONTROL_FI_ENABLE;
3104 		auxctrl |= ARM1136_AUXCTL_PFI;
3105 	}
3106 
3107 	/*
3108 	 * This enables the workaround for the following ARM1176 r0pX
3109 	 * errata.
3110 	 *
3111 	 * 394601: In low interrupt latency configuration, interrupted clean
3112 	 * and invalidate operation may not clean dirty data.
3113 	 *
3114 	 * 716151: Clean Data Cache line by MVA can corrupt subsequent
3115 	 * stores to the same cache line.
3116 	 *
3117 	 * 714068: Prefetch Instruction Cache Line or Invalidate Instruction
3118 	 * Cache Line by MVA can cause deadlock.
3119 	 */
3120 	if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1176JZS) { /* ARM1176JZSr0 */
3121 		/* 394601 and 716151 */
3122 		cpuctrl |= CPU_CONTROL_FI_ENABLE;
3123 		auxctrl |= ARM1176_AUXCTL_FIO;
3124 
3125 		/* 714068 */
3126 		auxctrl |= ARM1176_AUXCTL_PHD;
3127 	}
3128 
3129 	/* Clear out the cache */
3130 	cpu_idcache_wbinv_all();
3131 
3132 	/* Now really make sure they are clean.  */
3133 	__asm volatile ("mcr\tp15, 0, %0, c7, c7, 0" : : "r"(sbz));
3134 
3135 	/* Allow detection code to find the VFP if it's fitted.  */
3136 	armreg_cpacr_write(0x0fffffff);
3137 
3138 	/* Set the control register */
3139 	cpu_control(~cpuctrl_wax, cpuctrl);
3140 
3141 	/* Update auxctlr */
3142 	armreg_auxctl_write(auxctrl);
3143 
3144 	/* And again. */
3145 	cpu_idcache_wbinv_all();
3146 }
3147 #endif	/* CPU_ARM1136 || CPU_ARM1176 */
3148 
3149 #ifdef CPU_SA110
3150 struct cpu_option sa110_options[] = {
3151 #ifdef COMPAT_12
3152 	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3153 	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
3154 #endif	/* COMPAT_12 */
3155 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3156 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3157 	{ "sa110.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3158 	{ "sa110.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
3159 	{ "sa110.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
3160 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
3161 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
3162 	{ "sa110.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
3163 	{ NULL,			IGN, IGN, 0 }
3164 };
3165 
3166 void
3167 sa110_setup(char *args)
3168 {
3169 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
3170 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
3171 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3172 		 | CPU_CONTROL_WBUF_ENABLE;
3173 #if 0
3174 	int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
3175 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
3176 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3177 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
3178 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
3179 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
3180 		 | CPU_CONTROL_CPCLK;
3181 #endif
3182 
3183 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
3184 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3185 #endif
3186 
3187 	cpuctrl = parse_cpu_options(args, sa110_options, cpuctrl);
3188 
3189 #ifdef __ARMEB__
3190 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
3191 #endif
3192 
3193 #ifndef ARM_HAS_VBAR
3194 	if (vector_page == ARM_VECTORS_HIGH)
3195 		cpuctrl |= CPU_CONTROL_VECRELOC;
3196 #endif
3197 
3198 	/* Clear out the cache */
3199 	cpu_idcache_wbinv_all();
3200 
3201 	/* Set the control register */
3202 #if 0
3203 	cpu_control(cpuctrlmask, cpuctrl);
3204 #endif
3205 	cpu_control(0xffffffff, cpuctrl);
3206 
3207 	/*
3208 	 * enable clockswitching, note that this doesn't read or write to r0,
3209 	 * r0 is just to make it valid asm
3210 	 */
3211 	__asm volatile ("mcr p15, 0, r0, c15, c1, 2");
3212 }
3213 #endif	/* CPU_SA110 */
3214 
3215 #if defined(CPU_SA1100) || defined(CPU_SA1110)
3216 struct cpu_option sa11x0_options[] = {
3217 #ifdef COMPAT_12
3218 	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3219 	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
3220 #endif	/* COMPAT_12 */
3221 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3222 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3223 	{ "sa11x0.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3224 	{ "sa11x0.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
3225 	{ "sa11x0.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
3226 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
3227 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
3228 	{ "sa11x0.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
3229 	{ NULL,			IGN, IGN, 0 }
3230 };
3231 
3232 void
3233 sa11x0_setup(char *args)
3234 {
3235 
3236 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
3237 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
3238 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3239 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE;
3240 #if 0
3241 	int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
3242 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
3243 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3244 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
3245 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
3246 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
3247 		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
3248 #endif
3249 
3250 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
3251 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3252 #endif
3253 
3254 	cpuctrl = parse_cpu_options(args, sa11x0_options, cpuctrl);
3255 
3256 #ifdef __ARMEB__
3257 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
3258 #endif
3259 
3260 #ifndef ARM_HAS_VBAR
3261 	if (vector_page == ARM_VECTORS_HIGH)
3262 		cpuctrl |= CPU_CONTROL_VECRELOC;
3263 #endif
3264 
3265 	/* Clear out the cache */
3266 	cpu_idcache_wbinv_all();
3267 
3268 	/* Set the control register */
3269 	cpu_control(0xffffffff, cpuctrl);
3270 }
3271 #endif	/* CPU_SA1100 || CPU_SA1110 */
3272 
3273 #if defined(CPU_FA526)
3274 struct cpu_option fa526_options[] = {
3275 #ifdef COMPAT_12
3276 	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3277 	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
3278 #endif	/* COMPAT_12 */
3279 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3280 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3281 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
3282 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
3283 	{ NULL,			IGN, IGN, 0 }
3284 };
3285 
3286 void
3287 fa526_setup(char *args)
3288 {
3289 
3290 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
3291 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
3292 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3293 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE;
3294 #if 0
3295 	int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
3296 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
3297 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3298 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
3299 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
3300 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
3301 		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
3302 #endif
3303 
3304 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
3305 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3306 #endif
3307 
3308 	cpuctrl = parse_cpu_options(args, fa526_options, cpuctrl);
3309 
3310 #ifdef __ARMEB__
3311 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
3312 #endif
3313 
3314 #ifndef ARM_HAS_VBAR
3315 	if (vector_page == ARM_VECTORS_HIGH)
3316 		cpuctrl |= CPU_CONTROL_VECRELOC;
3317 #endif
3318 
3319 	/* Clear out the cache */
3320 	cpu_idcache_wbinv_all();
3321 
3322 	/* Set the control register */
3323 	cpu_control(0xffffffff, cpuctrl);
3324 }
3325 #endif	/* CPU_FA526 */
3326 
3327 #if defined(CPU_IXP12X0)
3328 struct cpu_option ixp12x0_options[] = {
3329 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3330 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3331 	{ "ixp12x0.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3332 	{ "ixp12x0.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
3333 	{ "ixp12x0.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
3334 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
3335 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
3336 	{ "ixp12x0.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
3337 	{ NULL,			IGN, IGN, 0 }
3338 };
3339 
3340 void
3341 ixp12x0_setup(char *args)
3342 {
3343 
3344 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE
3345 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_SYST_ENABLE
3346 		 | CPU_CONTROL_IC_ENABLE;
3347 
3348 	int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_AFLT_ENABLE
3349 		 | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_WBUF_ENABLE
3350 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_SYST_ENABLE
3351 		 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_IC_ENABLE
3352 		 | CPU_CONTROL_VECRELOC;
3353 
3354 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
3355 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3356 #endif
3357 
3358 	cpuctrl = parse_cpu_options(args, ixp12x0_options, cpuctrl);
3359 
3360 #ifdef __ARMEB__
3361 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
3362 #endif
3363 
3364 #ifndef ARM_HAS_VBAR
3365 	if (vector_page == ARM_VECTORS_HIGH)
3366 		cpuctrl |= CPU_CONTROL_VECRELOC;
3367 #endif
3368 
3369 	/* Clear out the cache */
3370 	cpu_idcache_wbinv_all();
3371 
3372 	/* Set the control register */
3373 	/* cpu_control(0xffffffff, cpuctrl); */
3374 	cpu_control(cpuctrlmask, cpuctrl);
3375 }
3376 #endif /* CPU_IXP12X0 */
3377 
3378 #if defined(CPU_XSCALE)
3379 struct cpu_option xscale_options[] = {
3380 #ifdef COMPAT_12
3381 	{ "branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
3382 	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3383 #endif	/* COMPAT_12 */
3384 	{ "cpu.branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
3385 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3386 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3387 	{ "xscale.branchpredict", BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
3388 	{ "xscale.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3389 	{ "xscale.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
3390 	{ "xscale.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
3391 	{ NULL,			IGN, IGN, 0 }
3392 };
3393 
3394 void
3395 xscale_setup(char *args)
3396 {
3397 	uint32_t auxctl;
3398 
3399 	/*
3400 	 * The XScale Write Buffer is always enabled.  Our option
3401 	 * is to enable/disable coalescing.  Note that bits 6:3
3402 	 * must always be enabled.
3403 	 */
3404 
3405 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
3406 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
3407 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3408 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
3409 		 | CPU_CONTROL_BPRD_ENABLE;
3410 #if 0
3411 	int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
3412 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
3413 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3414 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
3415 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
3416 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
3417 		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
3418 #endif
3419 
3420 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
3421 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3422 #endif
3423 
3424 	cpuctrl = parse_cpu_options(args, xscale_options, cpuctrl);
3425 
3426 #ifdef __ARMEB__
3427 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
3428 #endif
3429 
3430 #ifndef ARM_HAS_VBAR
3431 	if (vector_page == ARM_VECTORS_HIGH)
3432 		cpuctrl |= CPU_CONTROL_VECRELOC;
3433 #endif
3434 
3435 	/* Clear out the cache */
3436 	cpu_idcache_wbinv_all();
3437 
3438 	/*
3439 	 * Set the control register.  Note that bits 6:3 must always
3440 	 * be set to 1.
3441 	 */
3442 #if 0
3443 	cpu_control(cpuctrlmask, cpuctrl);
3444 #endif
3445 	cpu_control(0xffffffff, cpuctrl);
3446 
3447 	/* Make sure write coalescing is turned on */
3448 	auxctl = armreg_auxctl_read();
3449 #ifdef XSCALE_NO_COALESCE_WRITES
3450 	auxctl |= XSCALE_AUXCTL_K;
3451 #else
3452 	auxctl &= ~XSCALE_AUXCTL_K;
3453 #endif
3454 	armreg_auxctl_write(auxctl);
3455 }
3456 #endif	/* CPU_XSCALE */
3457 
3458 #if defined(CPU_SHEEVA)
3459 struct cpu_option sheeva_options[] = {
3460 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3461 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3462 	{ "sheeva.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3463 	{ "sheeva.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
3464 	{ "sheeva.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
3465 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
3466 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
3467 	{ "sheeva.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
3468 	{ NULL,			IGN, IGN, 0 }
3469 };
3470 
3471 void
3472 sheeva_setup(char *args)
3473 {
3474 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
3475 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3476 	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
3477 #if 0
3478 	int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
3479 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3480 	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
3481 	    | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
3482 	    | CPU_CONTROL_BPRD_ENABLE
3483 	    | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
3484 #endif
3485 
3486 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
3487 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3488 #endif
3489 
3490 	cpuctrl = parse_cpu_options(args, sheeva_options, cpuctrl);
3491 
3492 	/* Enable DCache Streaming Switch and Write Allocate */
3493 	uint32_t sheeva_ext = armreg_sheeva_xctrl_read();
3494 
3495 	sheeva_ext |= FC_DCACHE_STREAM_EN | FC_WR_ALLOC_EN;
3496 #ifdef SHEEVA_L2_CACHE
3497 	sheeva_ext |= FC_L2CACHE_EN;
3498 	sheeva_ext &= ~FC_L2_PREF_DIS;
3499 #endif
3500 
3501 	armreg_sheeva_xctrl_write(sheeva_ext);
3502 
3503 #ifdef SHEEVA_L2_CACHE
3504 #ifndef SHEEVA_L2_CACHE_WT
3505 	arm_scache.cache_type = CPU_CT_CTYPE_WB2;
3506 #elif CPU_CT_CTYPE_WT != 0
3507 	arm_scache.cache_type = CPU_CT_CTYPE_WT;
3508 #endif
3509 	arm_scache.cache_unified = 1;
3510 	arm_scache.dcache_type = arm_scache.icache_type = CACHE_TYPE_PIPT;
3511 	arm_scache.dcache_size = arm_scache.icache_size = 256*1024;
3512 	arm_scache.dcache_ways = arm_scache.icache_ways = 4;
3513 	arm_scache.dcache_way_size = arm_scache.icache_way_size =
3514 	    arm_scache.dcache_size / arm_scache.dcache_ways;
3515 	arm_scache.dcache_line_size = arm_scache.icache_line_size = 32;
3516 	arm_scache.dcache_sets = arm_scache.icache_sets =
3517 	    arm_scache.dcache_way_size / arm_scache.dcache_line_size;
3518 
3519 	cpufuncs.cf_sdcache_wb_range = sheeva_sdcache_wb_range;
3520 	cpufuncs.cf_sdcache_inv_range = sheeva_sdcache_inv_range;
3521 	cpufuncs.cf_sdcache_wbinv_range = sheeva_sdcache_wbinv_range;
3522 #endif /* SHEEVA_L2_CACHE */
3523 
3524 #ifdef __ARMEB__
3525 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
3526 #endif
3527 
3528 #ifndef ARM_HAS_VBAR
3529 	if (vector_page == ARM_VECTORS_HIGH)
3530 		cpuctrl |= CPU_CONTROL_VECRELOC;
3531 #endif
3532 
3533 	/* Clear out the cache */
3534 	cpu_idcache_wbinv_all();
3535 
3536 	/* Now really make sure they are clean.  */
3537 	__asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
3538 
3539 	/* Set the control register */
3540 	cpu_control(0xffffffff, cpuctrl);
3541 
3542 	/* And again. */
3543 	cpu_idcache_wbinv_all();
3544 #ifdef SHEEVA_L2_CACHE
3545 	sheeva_sdcache_wbinv_all();
3546 #endif
3547 }
3548 #endif	/* CPU_SHEEVA */
3549 
3550 bool
3551 cpu_gtmr_exists_p(void)
3552 {
3553 	return armreg_pfr1_read() & ARM_PFR1_GTIMER_MASK;
3554 }
3555 
3556 u_int
3557 cpu_clusterid(void)
3558 {
3559 	return __SHIFTOUT(armreg_mpidr_read(), MPIDR_AFF1);
3560 }
3561 
3562 bool
3563 cpu_earlydevice_va_p(void)
3564 {
3565 	const bool mmu_enabled_p =
3566 	    armreg_sctlr_read() & CPU_CONTROL_MMU_ENABLE;
3567 
3568 	if (!mmu_enabled_p)
3569 		return false;
3570 
3571 	/* Don't access cpu_ttb unless the mmu is enabled */
3572 	const bool cpul1pt_p =
3573 	    ((armreg_ttbr_read() & -L1_TABLE_SIZE) == cpu_ttb) ||
3574 	    ((armreg_ttbr1_read() & -L1_TABLE_SIZE) == cpu_ttb);
3575 
3576 	return cpul1pt_p;
3577 }
3578