xref: /netbsd-src/sys/arch/arm/arm/cpufunc.c (revision 154bfe8e089c1a0a4e9ed8414f08d3da90949162)
1 /*	$NetBSD: cpufunc.c,v 1.177 2020/07/10 12:25:08 skrll Exp $	*/
2 
3 /*
4  * arm7tdmi support code Copyright (c) 2001 John Fremlin
5  * arm8 support code Copyright (c) 1997 ARM Limited
6  * arm8 support code Copyright (c) 1997 Causality Limited
7  * arm9 support code Copyright (C) 2001 ARM Ltd
8  * arm11 support code Copyright (c) 2007 Microsoft
9  * cortexa8 support code Copyright (c) 2008 3am Software Foundry
10  * cortexa8 improvements Copyright (c) Goeran Weinholt
11  * Copyright (c) 1997 Mark Brinicombe.
12  * Copyright (c) 1997 Causality Limited
13  * All rights reserved.
14  *
15  * Redistribution and use in source and binary forms, with or without
16  * modification, are permitted provided that the following conditions
17  * are met:
18  * 1. Redistributions of source code must retain the above copyright
19  *    notice, this list of conditions and the following disclaimer.
20  * 2. Redistributions in binary form must reproduce the above copyright
21  *    notice, this list of conditions and the following disclaimer in the
22  *    documentation and/or other materials provided with the distribution.
23  * 3. All advertising materials mentioning features or use of this software
24  *    must display the following acknowledgement:
25  *	This product includes software developed by Causality Limited.
26  * 4. The name of Causality Limited may not be used to endorse or promote
27  *    products derived from this software without specific prior written
28  *    permission.
29  *
30  * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
31  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
32  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
33  * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
34  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
35  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
36  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
37  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
38  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
39  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40  * SUCH DAMAGE.
41  *
42  * RiscBSD kernel project
43  *
44  * cpufuncs.c
45  *
46  * C functions for supporting CPU / MMU / TLB specific operations.
47  *
48  * Created	: 30/01/97
49  */
50 
51 #include <sys/cdefs.h>
52 __KERNEL_RCSID(0, "$NetBSD: cpufunc.c,v 1.177 2020/07/10 12:25:08 skrll Exp $");
53 
54 #include "opt_arm_start.h"
55 #include "opt_compat_netbsd.h"
56 #include "opt_cpuoptions.h"
57 #include "opt_cputypes.h"
58 #include "opt_multiprocessor.h"
59 
60 #include <sys/types.h>
61 #include <sys/param.h>
62 #include <sys/systm.h>
63 #include <machine/cpu.h>
64 #include <machine/bootconfig.h>
65 #include <arch/arm/arm/disassem.h>
66 
67 #include <uvm/uvm.h>
68 
69 #include <arm/cpufunc_proto.h>
70 #include <arm/cpuconf.h>
71 #include <arm/locore.h>
72 
73 #ifdef CPU_XSCALE_80200
74 #include <arm/xscale/i80200reg.h>
75 #include <arm/xscale/i80200var.h>
76 #endif
77 
78 #ifdef CPU_XSCALE_80321
79 #include <arm/xscale/i80321reg.h>
80 #include <arm/xscale/i80321var.h>
81 #endif
82 
83 #ifdef CPU_XSCALE_IXP425
84 #include <arm/xscale/ixp425reg.h>
85 #include <arm/xscale/ixp425var.h>
86 #endif
87 
88 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321)
89 #include <arm/xscale/xscalereg.h>
90 #endif
91 
92 #if defined(CPU_PJ4B)
93 #include "opt_cputypes.h"
94 #include "opt_mvsoc.h"
95 #include <machine/bus_defs.h>
96 #if defined(ARMADAXP)
97 #include <arm/marvell/armadaxpreg.h>
98 #include <arm/marvell/armadaxpvar.h>
99 #endif
100 #endif
101 
102 #if defined(CPU_ARMV7) && (defined(CPU_ARMV6) || defined(CPU_PRE_ARMV6))
103 bool cpu_armv7_p;
104 #endif
105 
106 #if defined(CPU_ARMV6) && (defined(CPU_ARMV7) || defined(CPU_PRE_ARMV6))
107 bool cpu_armv6_p;
108 #endif
109 
110 
111 /* PRIMARY CACHE VARIABLES */
112 #if (ARM_MMU_V6 + ARM_MMU_V7) != 0
113 u_int	arm_cache_prefer_mask;
114 #endif
115 struct	arm_cache_info arm_pcache;
116 struct	arm_cache_info arm_scache;
117 
118 u_int	arm_dcache_align;
119 u_int	arm_dcache_align_mask;
120 
121 // Define a TTB value that can never be used.
122 uint32_t cpu_ttb = ~0;
123 
124 /* 1 == use cpu_sleep(), 0 == don't */
125 int cpu_do_powersave;
126 
127 #ifdef CPU_ARM6
128 struct cpu_functions arm6_cpufuncs = {
129 	/* CPU functions */
130 
131 	.cf_id			= cpufunc_id,
132 	.cf_cpwait		= cpufunc_nullop,
133 
134 	/* MMU functions */
135 
136 	.cf_control		= cpufunc_control,
137 	.cf_domains		= cpufunc_domains,
138 	.cf_setttb		= arm67_setttb,
139 	.cf_faultstatus		= cpufunc_faultstatus,
140 	.cf_faultaddress	= cpufunc_faultaddress,
141 
142 	/* TLB functions */
143 
144 	.cf_tlb_flushID		= arm67_tlb_flush,
145 	.cf_tlb_flushID_SE	= arm67_tlb_purge,
146 	.cf_tlb_flushI		= arm67_tlb_flush,
147 	.cf_tlb_flushI_SE	= arm67_tlb_purge,
148 	.cf_tlb_flushD		= arm67_tlb_flush,
149 	.cf_tlb_flushD_SE	= arm67_tlb_purge,
150 
151 	/* Cache operations */
152 
153 	.cf_icache_sync_all	= cpufunc_nullop,
154 	.cf_icache_sync_range	= (void *) cpufunc_nullop,
155 
156 	.cf_dcache_wbinv_all	= arm67_cache_flush,
157 	.cf_dcache_wbinv_range	= (void *)arm67_cache_flush,
158 	.cf_dcache_inv_range	= (void *)arm67_cache_flush,
159 	.cf_dcache_wb_range	= (void *)cpufunc_nullop,
160 
161 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
162 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
163 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
164 
165 	.cf_idcache_wbinv_all	= arm67_cache_flush,
166 	.cf_idcache_wbinv_range	= (void *)arm67_cache_flush,
167 
168 	/* Other functions */
169 
170 	.cf_flush_prefetchbuf	= cpufunc_nullop,
171 	.cf_drain_writebuf	= cpufunc_nullop,
172 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
173 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
174 
175 	.cf_sleep		= (void *)cpufunc_nullop,
176 
177 	/* Soft functions */
178 
179 #ifdef ARM6_LATE_ABORT
180 	.cf_dataabt_fixup	= late_abort_fixup,
181 #else
182 	.cf_dataabt_fixup	= early_abort_fixup,
183 #endif
184 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
185 
186 	.cf_context_switch	= arm67_context_switch,
187 
188 	.cf_setup		= arm6_setup
189 
190 };
191 #endif	/* CPU_ARM6 */
192 
193 #ifdef CPU_ARM7
194 struct cpu_functions arm7_cpufuncs = {
195 	/* CPU functions */
196 
197 	.cf_id			= cpufunc_id,
198 	.cf_cpwait		= cpufunc_nullop,
199 
200 	/* MMU functions */
201 
202 	.cf_control		= cpufunc_control,
203 	.cf_domains		= cpufunc_domains,
204 	.cf_setttb		= arm67_setttb,
205 	.cf_faultstatus		= cpufunc_faultstatus,
206 	.cf_faultaddress	= cpufunc_faultaddress,
207 
208 	/* TLB functions */
209 
210 	.cf_tlb_flushID		= arm67_tlb_flush,
211 	.cf_tlb_flushID_SE	= arm67_tlb_purge,
212 	.cf_tlb_flushI		= arm67_tlb_flush,
213 	.cf_tlb_flushI_SE	= arm67_tlb_purge,
214 	.cf_tlb_flushD		= arm67_tlb_flush,
215 	.cf_tlb_flushD_SE	= arm67_tlb_purge,
216 
217 	/* Cache operations */
218 
219 	.cf_icache_sync_all	= cpufunc_nullop,
220 	.cf_icache_sync_range	= (void *)cpufunc_nullop,
221 
222 	.cf_dcache_wbinv_all	= arm67_cache_flush,
223 	.cf_dcache_wbinv_range	= (void *)arm67_cache_flush,
224 	.cf_dcache_inv_range	= (void *)arm67_cache_flush,
225 	.cf_dcache_wb_range	= (void *)cpufunc_nullop,
226 
227 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
228 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
229 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
230 
231 	.cf_idcache_wbinv_all	= arm67_cache_flush,
232 	.cf_idcache_wbinv_range	= (void *)arm67_cache_flush,
233 
234 	/* Other functions */
235 
236 	.cf_flush_prefetchbuf	= cpufunc_nullop,
237 	.cf_drain_writebuf	= cpufunc_nullop,
238 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
239 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
240 
241 	.cf_sleep		= (void *)cpufunc_nullop,
242 
243 	/* Soft functions */
244 
245 	.cf_dataabt_fixup	= late_abort_fixup,
246 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
247 
248 	.cf_context_switch	= arm67_context_switch,
249 
250 	.cf_setup		= arm7_setup
251 
252 };
253 #endif	/* CPU_ARM7 */
254 
255 #ifdef CPU_ARM7TDMI
256 struct cpu_functions arm7tdmi_cpufuncs = {
257 	/* CPU functions */
258 
259 	.cf_id			= cpufunc_id,
260 	.cf_cpwait		= cpufunc_nullop,
261 
262 	/* MMU functions */
263 
264 	.cf_control		= cpufunc_control,
265 	.cf_domains		= cpufunc_domains,
266 	.cf_setttb		= arm7tdmi_setttb,
267 	.cf_faultstatus		= cpufunc_faultstatus,
268 	.cf_faultaddress	= cpufunc_faultaddress,
269 
270 	/* TLB functions */
271 
272 	.cf_tlb_flushID		= arm7tdmi_tlb_flushID,
273 	.cf_tlb_flushID_SE	= arm7tdmi_tlb_flushID_SE,
274 	.cf_tlb_flushI		= arm7tdmi_tlb_flushID,
275 	.cf_tlb_flushI_SE	= arm7tdmi_tlb_flushID_SE,
276 	.cf_tlb_flushD		= arm7tdmi_tlb_flushID,
277 	.cf_tlb_flushD_SE	= arm7tdmi_tlb_flushID_SE,
278 
279 	/* Cache operations */
280 
281 	.cf_icache_sync_all	= cpufunc_nullop,
282 	.cf_icache_sync_range	= (void *)cpufunc_nullop,
283 
284 	.cf_dcache_wbinv_all	= arm7tdmi_cache_flushID,
285 	.cf_dcache_wbinv_range	= (void *)arm7tdmi_cache_flushID,
286 	.cf_dcache_inv_range	= (void *)arm7tdmi_cache_flushID,
287 	.cf_dcache_wb_range	= (void *)cpufunc_nullop,
288 
289 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
290 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
291 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
292 
293 	.cf_idcache_wbinv_all	= arm7tdmi_cache_flushID,
294 	.cf_idcache_wbinv_range	= (void *)arm7tdmi_cache_flushID,
295 
296 	/* Other functions */
297 
298 	.cf_flush_prefetchbuf	= cpufunc_nullop,
299 	.cf_drain_writebuf	= cpufunc_nullop,
300 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
301 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
302 
303 	.cf_sleep		= (void *)cpufunc_nullop,
304 
305 	/* Soft functions */
306 
307 	.cf_dataabt_fixup	= late_abort_fixup,
308 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
309 
310 	.cf_context_switch	= arm7tdmi_context_switch,
311 
312 	.cf_setup		= arm7tdmi_setup
313 
314 };
315 #endif	/* CPU_ARM7TDMI */
316 
317 #ifdef CPU_ARM8
318 struct cpu_functions arm8_cpufuncs = {
319 	/* CPU functions */
320 
321 	.cf_id			= cpufunc_id,
322 	.cf_cpwait		= cpufunc_nullop,
323 
324 	/* MMU functions */
325 
326 	.cf_control		= cpufunc_control,
327 	.cf_domains		= cpufunc_domains,
328 	.cf_setttb		= arm8_setttb,
329 	.cf_faultstatus		= cpufunc_faultstatus,
330 	.cf_faultaddress	= cpufunc_faultaddress,
331 
332 	/* TLB functions */
333 
334 	.cf_tlb_flushID		= arm8_tlb_flushID,
335 	.cf_tlb_flushID_SE	= arm8_tlb_flushID_SE,
336 	.cf_tlb_flushI		= arm8_tlb_flushID,
337 	.cf_tlb_flushI_SE	= arm8_tlb_flushID_SE,
338 	.cf_tlb_flushD		= arm8_tlb_flushID,
339 	.cf_tlb_flushD_SE	= arm8_tlb_flushID_SE,
340 
341 	/* Cache operations */
342 
343 	.cf_icache_sync_all	= cpufunc_nullop,
344 	.cf_icache_sync_range	= (void *)cpufunc_nullop,
345 
346 	.cf_dcache_wbinv_all	= arm8_cache_purgeID,
347 	.cf_dcache_wbinv_range	= (void *)arm8_cache_purgeID,
348 /*XXX*/	.cf_dcache_inv_range	= (void *)arm8_cache_purgeID,
349 	.cf_dcache_wb_range	= (void *)arm8_cache_cleanID,
350 
351 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
352 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
353 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
354 
355 	.cf_idcache_wbinv_all	= arm8_cache_purgeID,
356 	.cf_idcache_wbinv_range = (void *)arm8_cache_purgeID,
357 
358 	/* Other functions */
359 
360 	.cf_flush_prefetchbuf	= cpufunc_nullop,
361 	.cf_drain_writebuf	= cpufunc_nullop,
362 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
363 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
364 
365 	.cf_sleep		= (void *)cpufunc_nullop,
366 
367 	/* Soft functions */
368 
369 	.cf_dataabt_fixup	= cpufunc_null_fixup,
370 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
371 
372 	.cf_context_switch	= arm8_context_switch,
373 
374 	.cf_setup		= arm8_setup
375 };
376 #endif	/* CPU_ARM8 */
377 
378 #ifdef CPU_ARM9
379 struct cpu_functions arm9_cpufuncs = {
380 	/* CPU functions */
381 
382 	.cf_id			= cpufunc_id,
383 	.cf_cpwait		= cpufunc_nullop,
384 
385 	/* MMU functions */
386 
387 	.cf_control		= cpufunc_control,
388 	.cf_domains		= cpufunc_domains,
389 	.cf_setttb		= arm9_setttb,
390 	.cf_faultstatus		= cpufunc_faultstatus,
391 	.cf_faultaddress	= cpufunc_faultaddress,
392 
393 	/* TLB functions */
394 
395 	.cf_tlb_flushID		= armv4_tlb_flushID,
396 	.cf_tlb_flushID_SE	= arm9_tlb_flushID_SE,
397 	.cf_tlb_flushI		= armv4_tlb_flushI,
398 	.cf_tlb_flushI_SE	= (void *)armv4_tlb_flushI,
399 	.cf_tlb_flushD		= armv4_tlb_flushD,
400 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
401 
402 	/* Cache operations */
403 
404 	.cf_icache_sync_all	= arm9_icache_sync_all,
405 	.cf_icache_sync_range	= arm9_icache_sync_range,
406 
407 	.cf_dcache_wbinv_all	= arm9_dcache_wbinv_all,
408 	.cf_dcache_wbinv_range	= arm9_dcache_wbinv_range,
409 /*XXX*/	.cf_dcache_inv_range	= arm9_dcache_wbinv_range,
410 	.cf_dcache_wb_range	= arm9_dcache_wb_range,
411 
412 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
413 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
414 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
415 
416 	.cf_idcache_wbinv_all	= arm9_idcache_wbinv_all,
417 	.cf_idcache_wbinv_range = arm9_idcache_wbinv_range,
418 
419 	/* Other functions */
420 
421 	.cf_flush_prefetchbuf	= cpufunc_nullop,
422 	.cf_drain_writebuf	= armv4_drain_writebuf,
423 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
424 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
425 
426 	.cf_sleep		= (void *)cpufunc_nullop,
427 
428 	/* Soft functions */
429 
430 	.cf_dataabt_fixup	= cpufunc_null_fixup,
431 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
432 
433 	.cf_context_switch	= arm9_context_switch,
434 
435 	.cf_setup		= arm9_setup
436 
437 };
438 #endif /* CPU_ARM9 */
439 
440 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
441 struct cpu_functions armv5_ec_cpufuncs = {
442 	/* CPU functions */
443 
444 	.cf_id			= cpufunc_id,
445 	.cf_cpwait		= cpufunc_nullop,
446 
447 	/* MMU functions */
448 
449 	.cf_control		= cpufunc_control,
450 	.cf_domains		= cpufunc_domains,
451 	.cf_setttb		= armv5_ec_setttb,
452 	.cf_faultstatus		= cpufunc_faultstatus,
453 	.cf_faultaddress	= cpufunc_faultaddress,
454 
455 	/* TLB functions */
456 
457 	.cf_tlb_flushID		= armv4_tlb_flushID,
458 	.cf_tlb_flushID_SE	= arm10_tlb_flushID_SE,
459 	.cf_tlb_flushI		= armv4_tlb_flushI,
460 	.cf_tlb_flushI_SE	= arm10_tlb_flushI_SE,
461 	.cf_tlb_flushD		= armv4_tlb_flushD,
462 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
463 
464 	/* Cache operations */
465 
466 	.cf_icache_sync_all	= armv5_ec_icache_sync_all,
467 	.cf_icache_sync_range	= armv5_ec_icache_sync_range,
468 
469 	.cf_dcache_wbinv_all	= armv5_ec_dcache_wbinv_all,
470 	.cf_dcache_wbinv_range	= armv5_ec_dcache_wbinv_range,
471 /*XXX*/	.cf_dcache_inv_range	= armv5_ec_dcache_wbinv_range,
472 	.cf_dcache_wb_range	= armv5_ec_dcache_wb_range,
473 
474 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
475 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
476 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
477 
478 	.cf_idcache_wbinv_all	= armv5_ec_idcache_wbinv_all,
479 	.cf_idcache_wbinv_range = armv5_ec_idcache_wbinv_range,
480 
481 	/* Other functions */
482 
483 	.cf_flush_prefetchbuf	= cpufunc_nullop,
484 	.cf_drain_writebuf	= armv4_drain_writebuf,
485 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
486 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
487 
488 	.cf_sleep		= (void *)cpufunc_nullop,
489 
490 	/* Soft functions */
491 
492 	.cf_dataabt_fixup	= cpufunc_null_fixup,
493 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
494 
495 	.cf_context_switch	= arm10_context_switch,
496 
497 	.cf_setup		= arm10_setup
498 
499 };
500 #endif /* CPU_ARM9E || CPU_ARM10 */
501 
502 #ifdef CPU_ARM10
503 struct cpu_functions arm10_cpufuncs = {
504 	/* CPU functions */
505 
506 	.cf_id			= cpufunc_id,
507 	.cf_cpwait		= cpufunc_nullop,
508 
509 	/* MMU functions */
510 
511 	.cf_control		= cpufunc_control,
512 	.cf_domains		= cpufunc_domains,
513 	.cf_setttb		= armv5_setttb,
514 	.cf_faultstatus		= cpufunc_faultstatus,
515 	.cf_faultaddress	= cpufunc_faultaddress,
516 
517 	/* TLB functions */
518 
519 	.cf_tlb_flushID		= armv4_tlb_flushID,
520 	.cf_tlb_flushID_SE	= arm10_tlb_flushID_SE,
521 	.cf_tlb_flushI		= armv4_tlb_flushI,
522 	.cf_tlb_flushI_SE	= arm10_tlb_flushI_SE,
523 	.cf_tlb_flushD		= armv4_tlb_flushD,
524 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
525 
526 	/* Cache operations */
527 
528 	.cf_icache_sync_all	= armv5_icache_sync_all,
529 	.cf_icache_sync_range	= armv5_icache_sync_range,
530 
531 	.cf_dcache_wbinv_all	= armv5_dcache_wbinv_all,
532 	.cf_dcache_wbinv_range	= armv5_dcache_wbinv_range,
533 /*XXX*/	.cf_dcache_inv_range	= armv5_dcache_wbinv_range,
534 	.cf_dcache_wb_range	= armv5_dcache_wb_range,
535 
536 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
537 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
538 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
539 
540 	.cf_idcache_wbinv_all	= armv5_idcache_wbinv_all,
541 	.cf_idcache_wbinv_range = armv5_idcache_wbinv_range,
542 
543 	/* Other functions */
544 
545 	.cf_flush_prefetchbuf	= cpufunc_nullop,
546 	.cf_drain_writebuf	= armv4_drain_writebuf,
547 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
548 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
549 
550 	.cf_sleep		= (void *)cpufunc_nullop,
551 
552 	/* Soft functions */
553 
554 	.cf_dataabt_fixup	= cpufunc_null_fixup,
555 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
556 
557 	.cf_context_switch	= arm10_context_switch,
558 
559 	.cf_setup		= arm10_setup
560 
561 };
562 #endif /* CPU_ARM10 */
563 
564 #ifdef CPU_ARM11
565 struct cpu_functions arm11_cpufuncs = {
566 	/* CPU functions */
567 
568 	.cf_id			= cpufunc_id,
569 	.cf_cpwait		= cpufunc_nullop,
570 
571 	/* MMU functions */
572 
573 	.cf_control		= cpufunc_control,
574 	.cf_domains		= cpufunc_domains,
575 	.cf_setttb		= arm11_setttb,
576 	.cf_faultstatus		= cpufunc_faultstatus,
577 	.cf_faultaddress	= cpufunc_faultaddress,
578 
579 	/* TLB functions */
580 
581 	.cf_tlb_flushID		= arm11_tlb_flushID,
582 	.cf_tlb_flushID_SE	= arm11_tlb_flushID_SE,
583 	.cf_tlb_flushI		= arm11_tlb_flushI,
584 	.cf_tlb_flushI_SE	= arm11_tlb_flushI_SE,
585 	.cf_tlb_flushD		= arm11_tlb_flushD,
586 	.cf_tlb_flushD_SE	= arm11_tlb_flushD_SE,
587 
588 	/* Cache operations */
589 
590 	.cf_icache_sync_all	= armv6_icache_sync_all,
591 	.cf_icache_sync_range	= armv6_icache_sync_range,
592 
593 	.cf_dcache_wbinv_all	= armv6_dcache_wbinv_all,
594 	.cf_dcache_wbinv_range	= armv6_dcache_wbinv_range,
595 	.cf_dcache_inv_range	= armv6_dcache_inv_range,
596 	.cf_dcache_wb_range	= armv6_dcache_wb_range,
597 
598 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
599 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
600 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
601 
602 	.cf_idcache_wbinv_all	= armv6_idcache_wbinv_all,
603 	.cf_idcache_wbinv_range = armv6_idcache_wbinv_range,
604 
605 	/* Other functions */
606 
607 	.cf_flush_prefetchbuf	= cpufunc_nullop,
608 	.cf_drain_writebuf	= arm11_drain_writebuf,
609 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
610 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
611 
612 	.cf_sleep		= arm11_sleep,
613 
614 	/* Soft functions */
615 
616 	.cf_dataabt_fixup	= cpufunc_null_fixup,
617 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
618 
619 	.cf_context_switch	= arm11_context_switch,
620 
621 	.cf_setup		= arm11_setup
622 
623 };
624 #endif /* CPU_ARM11 */
625 
626 #ifdef CPU_ARM1136
627 struct cpu_functions arm1136_cpufuncs = {
628 	/* CPU functions */
629 
630 	.cf_id			= cpufunc_id,
631 	.cf_cpwait		= cpufunc_nullop,
632 
633 	/* MMU functions */
634 
635 	.cf_control		= cpufunc_control,
636 	.cf_domains		= cpufunc_domains,
637 	.cf_setttb		= arm11_setttb,
638 	.cf_faultstatus		= cpufunc_faultstatus,
639 	.cf_faultaddress	= cpufunc_faultaddress,
640 
641 	/* TLB functions */
642 
643 	.cf_tlb_flushID		= arm11_tlb_flushID,
644 	.cf_tlb_flushID_SE	= arm11_tlb_flushID_SE,
645 	.cf_tlb_flushI		= arm11_tlb_flushI,
646 	.cf_tlb_flushI_SE	= arm11_tlb_flushI_SE,
647 	.cf_tlb_flushD		= arm11_tlb_flushD,
648 	.cf_tlb_flushD_SE	= arm11_tlb_flushD_SE,
649 
650 	/* Cache operations */
651 
652 	.cf_icache_sync_all	= arm11x6_icache_sync_all,	/* 411920 */
653 	.cf_icache_sync_range	= arm11x6_icache_sync_range,	/* 371025 */
654 
655 	.cf_dcache_wbinv_all	= arm11x6_dcache_wbinv_all,	/* 411920 */
656 	.cf_dcache_wbinv_range	= armv6_dcache_wbinv_range,
657 	.cf_dcache_inv_range	= armv6_dcache_inv_range,
658 	.cf_dcache_wb_range	= armv6_dcache_wb_range,
659 
660 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
661 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
662 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
663 
664 	.cf_idcache_wbinv_all	= arm11x6_idcache_wbinv_all,	/* 411920 */
665 	.cf_idcache_wbinv_range = arm11x6_idcache_wbinv_range,	/* 371025 */
666 
667 	/* Other functions */
668 
669 	.cf_flush_prefetchbuf	= arm11x6_flush_prefetchbuf,
670 	.cf_drain_writebuf	= arm11_drain_writebuf,
671 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
672 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
673 
674 	.cf_sleep		= arm11_sleep,	/* arm1136_sleep_rev0 */
675 
676 	/* Soft functions */
677 
678 	.cf_dataabt_fixup	= cpufunc_null_fixup,
679 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
680 
681 	.cf_context_switch	= arm11_context_switch,
682 
683 	.cf_setup		= arm11x6_setup
684 
685 };
686 #endif /* CPU_ARM1136 */
687 
688 #ifdef CPU_ARM1176
689 struct cpu_functions arm1176_cpufuncs = {
690 	/* CPU functions */
691 
692 	.cf_id			= cpufunc_id,
693 	.cf_cpwait		= cpufunc_nullop,
694 
695 	/* MMU functions */
696 
697 	.cf_control		= cpufunc_control,
698 	.cf_domains		= cpufunc_domains,
699 	.cf_setttb		= arm11_setttb,
700 	.cf_faultstatus		= cpufunc_faultstatus,
701 	.cf_faultaddress	= cpufunc_faultaddress,
702 
703 	/* TLB functions */
704 
705 	.cf_tlb_flushID		= arm11_tlb_flushID,
706 	.cf_tlb_flushID_SE	= arm11_tlb_flushID_SE,
707 	.cf_tlb_flushI		= arm11_tlb_flushI,
708 	.cf_tlb_flushI_SE	= arm11_tlb_flushI_SE,
709 	.cf_tlb_flushD		= arm11_tlb_flushD,
710 	.cf_tlb_flushD_SE	= arm11_tlb_flushD_SE,
711 
712 	/* Cache operations */
713 
714 	.cf_icache_sync_all	= arm11x6_icache_sync_all,	/* 415045 */
715 	.cf_icache_sync_range	= arm11x6_icache_sync_range,	/* 371367 */
716 
717 	.cf_dcache_wbinv_all	= arm11x6_dcache_wbinv_all,	/* 415045 */
718 	.cf_dcache_wbinv_range	= armv6_dcache_wbinv_range,
719 	.cf_dcache_inv_range	= armv6_dcache_inv_range,
720 	.cf_dcache_wb_range	= armv6_dcache_wb_range,
721 
722 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
723 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
724 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
725 
726 	.cf_idcache_wbinv_all	= arm11x6_idcache_wbinv_all,	/* 415045 */
727 	.cf_idcache_wbinv_range = arm11x6_idcache_wbinv_range,	/* 371367 */
728 
729 	/* Other functions */
730 
731 	.cf_flush_prefetchbuf	= arm11x6_flush_prefetchbuf,
732 	.cf_drain_writebuf	= arm11_drain_writebuf,
733 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
734 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
735 
736 	.cf_sleep		= arm11x6_sleep,		/* no ref. */
737 
738 	/* Soft functions */
739 
740 	.cf_dataabt_fixup	= cpufunc_null_fixup,
741 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
742 
743 	.cf_context_switch	= arm11_context_switch,
744 
745 	.cf_setup		= arm11x6_setup
746 
747 };
748 #endif /* CPU_ARM1176 */
749 
750 
751 #ifdef CPU_ARM11MPCORE
752 struct cpu_functions arm11mpcore_cpufuncs = {
753 	/* CPU functions */
754 
755 	.cf_id			= cpufunc_id,
756 	.cf_cpwait		= cpufunc_nullop,
757 
758 	/* MMU functions */
759 
760 	.cf_control		= cpufunc_control,
761 	.cf_domains		= cpufunc_domains,
762 	.cf_setttb		= arm11_setttb,
763 	.cf_faultstatus		= cpufunc_faultstatus,
764 	.cf_faultaddress	= cpufunc_faultaddress,
765 
766 	/* TLB functions */
767 
768 	.cf_tlb_flushID		= arm11_tlb_flushID,
769 	.cf_tlb_flushID_SE	= arm11_tlb_flushID_SE,
770 	.cf_tlb_flushI		= arm11_tlb_flushI,
771 	.cf_tlb_flushI_SE	= arm11_tlb_flushI_SE,
772 	.cf_tlb_flushD		= arm11_tlb_flushD,
773 	.cf_tlb_flushD_SE	= arm11_tlb_flushD_SE,
774 
775 	/* Cache operations */
776 
777 	.cf_icache_sync_all	= armv6_icache_sync_all,
778 	.cf_icache_sync_range	= armv5_icache_sync_range,
779 
780 	.cf_dcache_wbinv_all	= armv6_dcache_wbinv_all,
781 	.cf_dcache_wbinv_range	= armv5_dcache_wbinv_range,
782 	.cf_dcache_inv_range	= armv5_dcache_inv_range,
783 	.cf_dcache_wb_range	= armv5_dcache_wb_range,
784 
785 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
786 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
787 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
788 
789 	.cf_idcache_wbinv_all	= armv6_idcache_wbinv_all,
790 	.cf_idcache_wbinv_range = armv5_idcache_wbinv_range,
791 
792 	/* Other functions */
793 
794 	.cf_flush_prefetchbuf	= cpufunc_nullop,
795 	.cf_drain_writebuf	= arm11_drain_writebuf,
796 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
797 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
798 
799 	.cf_sleep		= arm11_sleep,
800 
801 	/* Soft functions */
802 
803 	.cf_dataabt_fixup	= cpufunc_null_fixup,
804 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
805 
806 	.cf_context_switch	= arm11_context_switch,
807 
808 	.cf_setup		= arm11mpcore_setup
809 
810 };
811 #endif /* CPU_ARM11MPCORE */
812 
813 #ifdef CPU_SA110
814 struct cpu_functions sa110_cpufuncs = {
815 	/* CPU functions */
816 
817 	.cf_id			= cpufunc_id,
818 	.cf_cpwait		= cpufunc_nullop,
819 
820 	/* MMU functions */
821 
822 	.cf_control		= cpufunc_control,
823 	.cf_domains		= cpufunc_domains,
824 	.cf_setttb		= sa1_setttb,
825 	.cf_faultstatus		= cpufunc_faultstatus,
826 	.cf_faultaddress	= cpufunc_faultaddress,
827 
828 	/* TLB functions */
829 
830 	.cf_tlb_flushID		= armv4_tlb_flushID,
831 	.cf_tlb_flushID_SE	= sa1_tlb_flushID_SE,
832 	.cf_tlb_flushI		= armv4_tlb_flushI,
833 	.cf_tlb_flushI_SE	= (void *)armv4_tlb_flushI,
834 	.cf_tlb_flushD		= armv4_tlb_flushD,
835 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
836 
837 	/* Cache operations */
838 
839 	.cf_icache_sync_all	= sa1_cache_syncI,
840 	.cf_icache_sync_range	= sa1_cache_syncI_rng,
841 
842 	.cf_dcache_wbinv_all	= sa1_cache_purgeD,
843 	.cf_dcache_wbinv_range	= sa1_cache_purgeD_rng,
844 /*XXX*/	.cf_dcache_inv_range	= sa1_cache_purgeD_rng,
845 	.cf_dcache_wb_range	= sa1_cache_cleanD_rng,
846 
847 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
848 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
849 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
850 
851 	.cf_idcache_wbinv_all	= sa1_cache_purgeID,
852 	.cf_idcache_wbinv_range	= sa1_cache_purgeID_rng,
853 
854 	/* Other functions */
855 
856 	.cf_flush_prefetchbuf	= cpufunc_nullop,
857 	.cf_drain_writebuf	= armv4_drain_writebuf,
858 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
859 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
860 
861 	.cf_sleep		= (void *)cpufunc_nullop,
862 
863 	/* Soft functions */
864 
865 	.cf_dataabt_fixup	= cpufunc_null_fixup,
866 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
867 
868 	.cf_context_switch	= sa110_context_switch,
869 
870 	.cf_setup		= sa110_setup
871 };
872 #endif	/* CPU_SA110 */
873 
874 #if defined(CPU_SA1100) || defined(CPU_SA1110)
875 struct cpu_functions sa11x0_cpufuncs = {
876 	/* CPU functions */
877 
878 	.cf_id			= cpufunc_id,
879 	.cf_cpwait		= cpufunc_nullop,
880 
881 	/* MMU functions */
882 
883 	.cf_control		= cpufunc_control,
884 	.cf_domains		= cpufunc_domains,
885 	.cf_setttb		= sa1_setttb,
886 	.cf_faultstatus		= cpufunc_faultstatus,
887 	.cf_faultaddress	= cpufunc_faultaddress,
888 
889 	/* TLB functions */
890 
891 	.cf_tlb_flushID		= armv4_tlb_flushID,
892 	.cf_tlb_flushID_SE	= sa1_tlb_flushID_SE,
893 	.cf_tlb_flushI		= armv4_tlb_flushI,
894 	.cf_tlb_flushI_SE	= (void *)armv4_tlb_flushI,
895 	.cf_tlb_flushD		= armv4_tlb_flushD,
896 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
897 
898 	/* Cache operations */
899 
900 	.cf_icache_sync_all	= sa1_cache_syncI,
901 	.cf_icache_sync_range	= sa1_cache_syncI_rng,
902 
903 	.cf_dcache_wbinv_all	= sa1_cache_purgeD,
904 	.cf_dcache_wbinv_range	= sa1_cache_purgeD_rng,
905 /*XXX*/	.cf_dcache_inv_range	= sa1_cache_purgeD_rng,
906 	.cf_dcache_wb_range	= sa1_cache_cleanD_rng,
907 
908 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
909 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
910 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
911 
912 	.cf_idcache_wbinv_all	= sa1_cache_purgeID,
913 	.cf_idcache_wbinv_range	= sa1_cache_purgeID_rng,
914 
915 	/* Other functions */
916 
917 	.cf_flush_prefetchbuf	= sa11x0_drain_readbuf,
918 	.cf_drain_writebuf	= armv4_drain_writebuf,
919 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
920 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
921 
922 	.cf_sleep		= sa11x0_cpu_sleep,
923 
924 	/* Soft functions */
925 
926 	.cf_dataabt_fixup	= cpufunc_null_fixup,
927 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
928 
929 	.cf_context_switch	= sa11x0_context_switch,
930 
931 	.cf_setup		= sa11x0_setup
932 };
933 #endif	/* CPU_SA1100 || CPU_SA1110 */
934 
935 #if defined(CPU_FA526)
936 struct cpu_functions fa526_cpufuncs = {
937 	/* CPU functions */
938 
939 	.cf_id			= cpufunc_id,
940 	.cf_cpwait		= cpufunc_nullop,
941 
942 	/* MMU functions */
943 
944 	.cf_control		= cpufunc_control,
945 	.cf_domains		= cpufunc_domains,
946 	.cf_setttb		= fa526_setttb,
947 	.cf_faultstatus		= cpufunc_faultstatus,
948 	.cf_faultaddress	= cpufunc_faultaddress,
949 
950 	/* TLB functions */
951 
952 	.cf_tlb_flushID		= armv4_tlb_flushID,
953 	.cf_tlb_flushID_SE	= fa526_tlb_flushID_SE,
954 	.cf_tlb_flushI		= armv4_tlb_flushI,
955 	.cf_tlb_flushI_SE	= fa526_tlb_flushI_SE,
956 	.cf_tlb_flushD		= armv4_tlb_flushD,
957 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
958 
959 	/* Cache operations */
960 
961 	.cf_icache_sync_all	= fa526_icache_sync_all,
962 	.cf_icache_sync_range	= fa526_icache_sync_range,
963 
964 	.cf_dcache_wbinv_all	= fa526_dcache_wbinv_all,
965 	.cf_dcache_wbinv_range	= fa526_dcache_wbinv_range,
966 	.cf_dcache_inv_range	= fa526_dcache_inv_range,
967 	.cf_dcache_wb_range	= fa526_dcache_wb_range,
968 
969 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
970 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
971 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
972 
973 	.cf_idcache_wbinv_all	= fa526_idcache_wbinv_all,
974 	.cf_idcache_wbinv_range	= fa526_idcache_wbinv_range,
975 
976 	/* Other functions */
977 
978 	.cf_flush_prefetchbuf	= fa526_flush_prefetchbuf,
979 	.cf_drain_writebuf	= armv4_drain_writebuf,
980 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
981 	.cf_flush_brnchtgt_E	= fa526_flush_brnchtgt_E,
982 
983 	.cf_sleep		= fa526_cpu_sleep,
984 
985 	/* Soft functions */
986 
987 	.cf_dataabt_fixup	= cpufunc_null_fixup,
988 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
989 
990 	.cf_context_switch	= fa526_context_switch,
991 
992 	.cf_setup		= fa526_setup
993 };
994 #endif	/* CPU_FA526 */
995 
996 #ifdef CPU_IXP12X0
997 struct cpu_functions ixp12x0_cpufuncs = {
998 	/* CPU functions */
999 
1000 	.cf_id			= cpufunc_id,
1001 	.cf_cpwait		= cpufunc_nullop,
1002 
1003 	/* MMU functions */
1004 
1005 	.cf_control		= cpufunc_control,
1006 	.cf_domains		= cpufunc_domains,
1007 	.cf_setttb		= sa1_setttb,
1008 	.cf_faultstatus		= cpufunc_faultstatus,
1009 	.cf_faultaddress	= cpufunc_faultaddress,
1010 
1011 	/* TLB functions */
1012 
1013 	.cf_tlb_flushID		= armv4_tlb_flushID,
1014 	.cf_tlb_flushID_SE	= sa1_tlb_flushID_SE,
1015 	.cf_tlb_flushI		= armv4_tlb_flushI,
1016 	.cf_tlb_flushI_SE	= (void *)armv4_tlb_flushI,
1017 	.cf_tlb_flushD		= armv4_tlb_flushD,
1018 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
1019 
1020 	/* Cache operations */
1021 
1022 	.cf_icache_sync_all	= sa1_cache_syncI,
1023 	.cf_icache_sync_range	= sa1_cache_syncI_rng,
1024 
1025 	.cf_dcache_wbinv_all	= sa1_cache_purgeD,
1026 	.cf_dcache_wbinv_range	= sa1_cache_purgeD_rng,
1027 /*XXX*/	.cf_dcache_inv_range	= sa1_cache_purgeD_rng,
1028 	.cf_dcache_wb_range	= sa1_cache_cleanD_rng,
1029 
1030 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
1031 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
1032 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
1033 
1034 	.cf_idcache_wbinv_all	= sa1_cache_purgeID,
1035 	.cf_idcache_wbinv_range	= sa1_cache_purgeID_rng,
1036 
1037 	/* Other functions */
1038 
1039 	.cf_flush_prefetchbuf	= ixp12x0_drain_readbuf,
1040 	.cf_drain_writebuf	= armv4_drain_writebuf,
1041 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
1042 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
1043 
1044 	.cf_sleep		= (void *)cpufunc_nullop,
1045 
1046 	/* Soft functions */
1047 
1048 	.cf_dataabt_fixup	= cpufunc_null_fixup,
1049 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
1050 
1051 	.cf_context_switch	= ixp12x0_context_switch,
1052 
1053 	.cf_setup		= ixp12x0_setup
1054 };
1055 #endif	/* CPU_IXP12X0 */
1056 
1057 #if defined(CPU_XSCALE)
1058 struct cpu_functions xscale_cpufuncs = {
1059 	/* CPU functions */
1060 
1061 	.cf_id			= cpufunc_id,
1062 	.cf_cpwait		= xscale_cpwait,
1063 
1064 	/* MMU functions */
1065 
1066 	.cf_control		= xscale_control,
1067 	.cf_domains		= cpufunc_domains,
1068 	.cf_setttb		= xscale_setttb,
1069 	.cf_faultstatus		= cpufunc_faultstatus,
1070 	.cf_faultaddress	= cpufunc_faultaddress,
1071 
1072 	/* TLB functions */
1073 
1074 	.cf_tlb_flushID		= armv4_tlb_flushID,
1075 	.cf_tlb_flushID_SE	= xscale_tlb_flushID_SE,
1076 	.cf_tlb_flushI		= armv4_tlb_flushI,
1077 	.cf_tlb_flushI_SE	= (void *)armv4_tlb_flushI,
1078 	.cf_tlb_flushD		= armv4_tlb_flushD,
1079 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
1080 
1081 	/* Cache operations */
1082 
1083 	.cf_icache_sync_all	= xscale_cache_syncI,
1084 	.cf_icache_sync_range	= xscale_cache_syncI_rng,
1085 
1086 	.cf_dcache_wbinv_all	= xscale_cache_purgeD,
1087 	.cf_dcache_wbinv_range	= xscale_cache_purgeD_rng,
1088 	.cf_dcache_inv_range	= xscale_cache_flushD_rng,
1089 	.cf_dcache_wb_range	= xscale_cache_cleanD_rng,
1090 
1091 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
1092 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
1093 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
1094 
1095 	.cf_idcache_wbinv_all	= xscale_cache_purgeID,
1096 	.cf_idcache_wbinv_range = xscale_cache_purgeID_rng,
1097 
1098 	/* Other functions */
1099 
1100 	.cf_flush_prefetchbuf	= cpufunc_nullop,
1101 	.cf_drain_writebuf	= armv4_drain_writebuf,
1102 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
1103 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
1104 
1105 	.cf_sleep		= xscale_cpu_sleep,
1106 
1107 	/* Soft functions */
1108 
1109 	.cf_dataabt_fixup	= cpufunc_null_fixup,
1110 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
1111 
1112 	.cf_context_switch	= xscale_context_switch,
1113 
1114 	.cf_setup		= xscale_setup
1115 };
1116 #endif /* CPU_XSCALE */
1117 
1118 #if defined(CPU_ARMV7)
1119 struct cpu_functions armv7_cpufuncs = {
1120 	/* CPU functions */
1121 
1122 	.cf_id			= cpufunc_id,
1123 	.cf_cpwait		= cpufunc_nullop,
1124 
1125 	/* MMU functions */
1126 
1127 	.cf_control		= cpufunc_control,
1128 	.cf_domains		= cpufunc_domains,
1129 	.cf_setttb		= armv7_setttb,
1130 	.cf_faultstatus		= cpufunc_faultstatus,
1131 	.cf_faultaddress	= cpufunc_faultaddress,
1132 
1133 	/* TLB functions */
1134 
1135 	.cf_tlb_flushID		= armv7up_tlb_flushID,
1136 	.cf_tlb_flushID_SE	= armv7up_tlb_flushID_SE,
1137 	.cf_tlb_flushI		= armv7up_tlb_flushI,
1138 	.cf_tlb_flushI_SE	= armv7up_tlb_flushI_SE,
1139 	.cf_tlb_flushD		= armv7up_tlb_flushD,
1140 	.cf_tlb_flushD_SE	= armv7up_tlb_flushD_SE,
1141 
1142 	/* Cache operations */
1143 
1144 	.cf_icache_sync_all	= armv7_icache_sync_all,
1145 	.cf_dcache_wbinv_all	= armv7_dcache_wbinv_all,
1146 
1147 	.cf_dcache_inv_range	= armv7_dcache_inv_range,
1148 	.cf_dcache_wb_range	= armv7_dcache_wb_range,
1149 	.cf_dcache_wbinv_range	= armv7_dcache_wbinv_range,
1150 
1151 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
1152 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
1153 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
1154 
1155 	.cf_icache_sync_range	= armv7_icache_sync_range,
1156 	.cf_idcache_wbinv_range = armv7_idcache_wbinv_range,
1157 
1158 
1159 	.cf_idcache_wbinv_all	= armv7_idcache_wbinv_all,
1160 
1161 	/* Other functions */
1162 
1163 	.cf_flush_prefetchbuf	= cpufunc_nullop,
1164 	.cf_drain_writebuf	= armv7_drain_writebuf,
1165 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
1166 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
1167 
1168 	.cf_sleep		= armv7_cpu_sleep,
1169 
1170 	/* Soft functions */
1171 
1172 	.cf_dataabt_fixup	= cpufunc_null_fixup,
1173 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
1174 
1175 	.cf_context_switch	= armv7_context_switch,
1176 
1177 	.cf_setup		= armv7_setup
1178 
1179 };
1180 #endif /* CPU_ARMV7 */
1181 
1182 #ifdef CPU_PJ4B
1183 struct cpu_functions pj4bv7_cpufuncs = {
1184 	/* CPU functions */
1185 
1186 	.cf_id			= cpufunc_id,
1187 	.cf_cpwait		= armv7_drain_writebuf,
1188 
1189 	/* MMU functions */
1190 
1191 	.cf_control		= cpufunc_control,
1192 	.cf_domains		= cpufunc_domains,
1193 	.cf_setttb		= armv7_setttb,
1194 	.cf_faultstatus		= cpufunc_faultstatus,
1195 	.cf_faultaddress	= cpufunc_faultaddress,
1196 
1197 	/* TLB functions */
1198 
1199 	.cf_tlb_flushID		= armv7up_tlb_flushID,
1200 	.cf_tlb_flushID_SE	= armv7up_tlb_flushID_SE,
1201 	.cf_tlb_flushI		= armv7up_tlb_flushID,
1202 	.cf_tlb_flushI_SE	= armv7up_tlb_flushID_SE,
1203 	.cf_tlb_flushD		= armv7up_tlb_flushID,
1204 	.cf_tlb_flushD_SE	= armv7up_tlb_flushID_SE,
1205 
1206 	/* Cache operations (see also pj4bv7_setup) */
1207 	.cf_icache_sync_all	= armv7_idcache_wbinv_all,
1208 	.cf_icache_sync_range	= armv7_icache_sync_range,
1209 
1210 	.cf_dcache_wbinv_all	= armv7_dcache_wbinv_all,
1211 	.cf_dcache_wbinv_range	= armv7_dcache_wbinv_range,
1212 	.cf_dcache_inv_range	= armv7_dcache_inv_range,
1213 	.cf_dcache_wb_range	= armv7_dcache_wb_range,
1214 
1215 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
1216 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
1217 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
1218 
1219 	.cf_idcache_wbinv_all	= armv7_idcache_wbinv_all,
1220 	.cf_idcache_wbinv_range	= armv7_idcache_wbinv_range,
1221 
1222 	/* Other functions */
1223 
1224 	.cf_flush_prefetchbuf	= cpufunc_nullop,
1225 	.cf_drain_writebuf	= armv7_drain_writebuf,
1226 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
1227 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
1228 
1229 	.cf_sleep		= pj4b_cpu_sleep,
1230 
1231 	/* Soft functions */
1232 
1233 	.cf_dataabt_fixup	= cpufunc_null_fixup,
1234 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
1235 
1236 	.cf_context_switch	= armv7_context_switch,
1237 
1238 	.cf_setup		= pj4bv7_setup
1239 };
1240 #endif /* CPU_PJ4B */
1241 
1242 #ifdef CPU_SHEEVA
1243 struct cpu_functions sheeva_cpufuncs = {
1244 	/* CPU functions */
1245 
1246 	.cf_id			= cpufunc_id,
1247 	.cf_cpwait		= cpufunc_nullop,
1248 
1249 	/* MMU functions */
1250 
1251 	.cf_control		= cpufunc_control,
1252 	.cf_domains		= cpufunc_domains,
1253 	.cf_setttb		= armv5_ec_setttb,
1254 	.cf_faultstatus		= cpufunc_faultstatus,
1255 	.cf_faultaddress	= cpufunc_faultaddress,
1256 
1257 	/* TLB functions */
1258 
1259 	.cf_tlb_flushID		= armv4_tlb_flushID,
1260 	.cf_tlb_flushID_SE	= arm10_tlb_flushID_SE,
1261 	.cf_tlb_flushI		= armv4_tlb_flushI,
1262 	.cf_tlb_flushI_SE	= arm10_tlb_flushI_SE,
1263 	.cf_tlb_flushD		= armv4_tlb_flushD,
1264 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
1265 
1266 	/* Cache operations */
1267 
1268 	.cf_icache_sync_all	= armv5_ec_icache_sync_all,
1269 	.cf_icache_sync_range	= armv5_ec_icache_sync_range,
1270 
1271 	.cf_dcache_wbinv_all	= armv5_ec_dcache_wbinv_all,
1272 	.cf_dcache_wbinv_range	= sheeva_dcache_wbinv_range,
1273 	.cf_dcache_inv_range	= sheeva_dcache_inv_range,
1274 	.cf_dcache_wb_range	= sheeva_dcache_wb_range,
1275 
1276 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
1277 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
1278 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
1279 
1280 	.cf_idcache_wbinv_all	= armv5_ec_idcache_wbinv_all,
1281 	.cf_idcache_wbinv_range = sheeva_idcache_wbinv_range,
1282 
1283 	/* Other functions */
1284 
1285 	.cf_flush_prefetchbuf	= cpufunc_nullop,
1286 	.cf_drain_writebuf	= armv4_drain_writebuf,
1287 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
1288 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
1289 
1290 	.cf_sleep		= (void *)sheeva_cpu_sleep,
1291 
1292 	/* Soft functions */
1293 
1294 	.cf_dataabt_fixup	= cpufunc_null_fixup,
1295 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
1296 
1297 	.cf_context_switch	= arm10_context_switch,
1298 
1299 	.cf_setup		= sheeva_setup
1300 };
1301 #endif /* CPU_SHEEVA */
1302 
1303 
1304 /*
1305  * Global constants also used by locore.s
1306  */
1307 
1308 struct cpu_functions cpufuncs;
1309 u_int cputype;
1310 
1311 #if defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined(CPU_ARM9) || \
1312     defined(CPU_ARM9E) || defined(CPU_ARM10) || defined(CPU_FA526) || \
1313     defined(CPU_SHEEVA) || \
1314     defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
1315     defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425) || \
1316     defined(CPU_ARMV6) || defined(CPU_ARMV7)
1317 static void get_cachetype_cp15(void);
1318 
1319 /* Additional cache information local to this file.  Log2 of some of the
1320    above numbers.  */
1321 static int	arm_dcache_log2_nsets;
1322 static int	arm_dcache_log2_assoc;
1323 static int	arm_dcache_log2_linesize;
1324 
1325 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
1326 static inline u_int
1327 get_cachesize_cp15(int cssr)
1328 {
1329 #if defined(CPU_ARMV7)
1330 	__asm volatile(".arch\tarmv7a");
1331 
1332 	armreg_csselr_write(cssr);
1333 	arm_isb();			 /* sync to the new cssr */
1334 
1335 #else
1336 	__asm volatile("mcr p15, 1, %0, c0, c0, 2" :: "r" (cssr) : "memory");
1337 #endif
1338 	return armreg_ccsidr_read();
1339 }
1340 #endif
1341 
1342 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
1343 static void
1344 get_cacheinfo_clidr(struct arm_cache_info *info, u_int level, u_int clidr)
1345 {
1346 	u_int csid;
1347 
1348 	if (clidr & 6) {
1349 		csid = get_cachesize_cp15(level << 1); /* select dcache values */
1350 		info->dcache_sets = CPU_CSID_NUMSETS(csid) + 1;
1351 		info->dcache_ways = CPU_CSID_ASSOC(csid) + 1;
1352 		info->dcache_line_size = 1U << (CPU_CSID_LEN(csid) + 4);
1353 		info->dcache_way_size =
1354 		    info->dcache_line_size * info->dcache_sets;
1355 		info->dcache_size = info->dcache_way_size * info->dcache_ways;
1356 
1357 		if (level == 0) {
1358 			arm_dcache_log2_assoc = CPU_CSID_ASSOC(csid) + 1;
1359 			arm_dcache_log2_linesize = CPU_CSID_LEN(csid) + 4;
1360 			arm_dcache_log2_nsets =
1361 			    31 - __builtin_clz(info->dcache_sets*2-1);
1362 		}
1363 	}
1364 
1365 	info->cache_unified = (clidr == 4);
1366 
1367 	if (level > 0) {
1368 		info->dcache_type = CACHE_TYPE_PIPT;
1369 		info->icache_type = CACHE_TYPE_PIPT;
1370 	}
1371 
1372 	if (info->cache_unified) {
1373 		info->icache_ways = info->dcache_ways;
1374 		info->icache_line_size = info->dcache_line_size;
1375 		info->icache_way_size = info->dcache_way_size;
1376 		info->icache_size = info->dcache_size;
1377 	} else {
1378 		csid = get_cachesize_cp15((level << 1)|CPU_CSSR_InD); /* select icache values */
1379 		info->icache_sets = CPU_CSID_NUMSETS(csid) + 1;
1380 		info->icache_ways = CPU_CSID_ASSOC(csid) + 1;
1381 		info->icache_line_size = 1U << (CPU_CSID_LEN(csid) + 4);
1382 		info->icache_way_size = info->icache_line_size * info->icache_sets;
1383 		info->icache_size = info->icache_way_size * info->icache_ways;
1384 	}
1385 	if (level == 0
1386 	    && info->dcache_way_size <= PAGE_SIZE
1387 	    && info->icache_way_size <= PAGE_SIZE) {
1388 		arm_cache_prefer_mask = 0;
1389 	}
1390 }
1391 #endif /* (ARM_MMU_V6 + ARM_MMU_V7) > 0 */
1392 
1393 static void
1394 get_cachetype_cp15(void)
1395 {
1396 	u_int ctype, isize, dsize;
1397 	u_int multiplier;
1398 
1399 	ctype = armreg_ctr_read();
1400 
1401 	/*
1402 	 * ...and thus spake the ARM ARM:
1403 	 *
1404 	 * If an <opcode2> value corresponding to an unimplemented or
1405 	 * reserved ID register is encountered, the System Control
1406 	 * processor returns the value of the main ID register.
1407 	 */
1408 	if (ctype == cpu_idnum())
1409 		goto out;
1410 
1411 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
1412 	if (CPU_CT_FORMAT(ctype) == 4) {
1413 		u_int clidr = armreg_clidr_read();
1414 
1415 		if (CPU_CT4_L1IPOLICY(ctype) == CPU_CT4_L1_PIPT) {
1416 			arm_pcache.icache_type = CACHE_TYPE_PIPT;
1417 		} else {
1418 			arm_pcache.icache_type = CACHE_TYPE_VIPT;
1419 			arm_cache_prefer_mask = PAGE_SIZE;
1420 		}
1421 #ifdef CPU_CORTEX
1422 		if (CPU_ID_CORTEX_P(cpu_idnum())) {
1423 			arm_pcache.dcache_type = CACHE_TYPE_PIPT;
1424 		} else
1425 #endif
1426 		{
1427 			arm_pcache.dcache_type = CACHE_TYPE_VIPT;
1428 		}
1429 		arm_pcache.cache_type = CPU_CT_CTYPE_WB14;
1430 
1431 		get_cacheinfo_clidr(&arm_pcache, 0, clidr & 7);
1432 		arm_dcache_align = arm_pcache.dcache_line_size;
1433 		clidr >>= 3;
1434 		if (clidr & 7) {
1435 			get_cacheinfo_clidr(&arm_scache, 1, clidr & 7);
1436 			if (arm_scache.dcache_line_size < arm_dcache_align)
1437 				arm_dcache_align = arm_scache.dcache_line_size;
1438 		}
1439 		/*
1440 		 * The pmap cleans an entire way for an exec page so
1441 		 * we don't care that it's VIPT anymore.
1442 		 */
1443 		if (arm_pcache.dcache_type == CACHE_TYPE_PIPT) {
1444 			arm_cache_prefer_mask = 0;
1445 		}
1446 		goto out;
1447 	}
1448 #endif /* ARM_MMU_V6 + ARM_MMU_V7 > 0 */
1449 
1450 	if ((ctype & CPU_CT_S) == 0)
1451 		arm_pcache.cache_unified = 1;
1452 
1453 	/*
1454 	 * If you want to know how this code works, go read the ARM ARM.
1455 	 */
1456 
1457 	arm_pcache.cache_type = CPU_CT_CTYPE(ctype);
1458 
1459 	if (arm_pcache.cache_unified == 0) {
1460 		isize = CPU_CT_ISIZE(ctype);
1461 		multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
1462 		arm_pcache.icache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
1463 		if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
1464 			if (isize & CPU_CT_xSIZE_M)
1465 				arm_pcache.icache_line_size = 0; /* not present */
1466 			else
1467 				arm_pcache.icache_ways = 1;
1468 		} else {
1469 			arm_pcache.icache_ways = multiplier <<
1470 			    (CPU_CT_xSIZE_ASSOC(isize) - 1);
1471 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
1472 			arm_pcache.icache_type = CACHE_TYPE_VIPT;
1473 			if (CPU_CT_xSIZE_P & isize)
1474 				arm_cache_prefer_mask |=
1475 				    __BIT(9 + CPU_CT_xSIZE_SIZE(isize)
1476 					  - CPU_CT_xSIZE_ASSOC(isize))
1477 				    - PAGE_SIZE;
1478 #endif
1479 		}
1480 		arm_pcache.icache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
1481 		arm_pcache.icache_way_size =
1482 		    __BIT(9 + CPU_CT_xSIZE_SIZE(isize) - CPU_CT_xSIZE_ASSOC(isize));
1483 	}
1484 
1485 	dsize = CPU_CT_DSIZE(ctype);
1486 	multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
1487 	arm_pcache.dcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
1488 	if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
1489 		if (dsize & CPU_CT_xSIZE_M)
1490 			arm_pcache.dcache_line_size = 0; /* not present */
1491 		else
1492 			arm_pcache.dcache_ways = 1;
1493 	} else {
1494 		arm_pcache.dcache_ways = multiplier <<
1495 		    (CPU_CT_xSIZE_ASSOC(dsize) - 1);
1496 #if (ARM_MMU_V6) > 0
1497 		arm_pcache.dcache_type = CACHE_TYPE_VIPT;
1498 		if ((CPU_CT_xSIZE_P & dsize)
1499 		    && CPU_ID_ARM11_P(curcpu()->ci_arm_cpuid)) {
1500 			arm_cache_prefer_mask |=
1501 			    __BIT(9 + CPU_CT_xSIZE_SIZE(dsize)
1502 				  - CPU_CT_xSIZE_ASSOC(dsize)) - PAGE_SIZE;
1503 		}
1504 #endif
1505 	}
1506 	arm_pcache.dcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
1507 	arm_pcache.dcache_way_size =
1508 	    __BIT(9 + CPU_CT_xSIZE_SIZE(dsize) - CPU_CT_xSIZE_ASSOC(dsize));
1509 
1510 	arm_dcache_align = arm_pcache.dcache_line_size;
1511 
1512 	arm_dcache_log2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
1513 	arm_dcache_log2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
1514 	arm_dcache_log2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
1515 	    CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
1516 
1517  out:
1518 	KASSERTMSG(arm_dcache_align <= CACHE_LINE_SIZE,
1519 	    "arm_dcache_align=%u CACHE_LINE_SIZE=%u",
1520 	    arm_dcache_align, CACHE_LINE_SIZE);
1521 	arm_dcache_align_mask = arm_dcache_align - 1;
1522 }
1523 #endif /* ARM7TDMI || ARM8 || ARM9 || XSCALE */
1524 
1525 #if defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_SA110) || \
1526     defined(CPU_SA1100) || defined(CPU_SA1110) || defined(CPU_IXP12X0)
1527 /* Cache information for CPUs without cache type registers. */
1528 struct cachetab {
1529 	uint32_t ct_cpuid;
1530 	int	ct_pcache_type;
1531 	int	ct_pcache_unified;
1532 	int	ct_pdcache_size;
1533 	int	ct_pdcache_line_size;
1534 	int	ct_pdcache_ways;
1535 	int	ct_picache_size;
1536 	int	ct_picache_line_size;
1537 	int	ct_picache_ways;
1538 };
1539 
1540 struct cachetab cachetab[] = {
1541     /* cpuid,           cache type,       u,  dsiz, ls, wy,  isiz, ls, wy */
1542     { CPU_ID_ARM2,      0,                1,     0,  0,  0,     0,  0,  0 },
1543     { CPU_ID_ARM250,    0,                1,     0,  0,  0,     0,  0,  0 },
1544     { CPU_ID_ARM3,      CPU_CT_CTYPE_WT,  1,  4096, 16, 64,     0,  0,  0 },
1545     { CPU_ID_ARM610,	CPU_CT_CTYPE_WT,  1,  4096, 16, 64,     0,  0,  0 },
1546     { CPU_ID_ARM710,    CPU_CT_CTYPE_WT,  1,  8192, 32,  4,     0,  0,  0 },
1547     { CPU_ID_ARM7500,   CPU_CT_CTYPE_WT,  1,  4096, 16,  4,     0,  0,  0 },
1548     { CPU_ID_ARM710A,   CPU_CT_CTYPE_WT,  1,  8192, 16,  4,     0,  0,  0 },
1549     { CPU_ID_ARM7500FE, CPU_CT_CTYPE_WT,  1,  4096, 16,  4,     0,  0,  0 },
1550     /* XXX is this type right for SA-1? */
1551     { CPU_ID_SA110,	CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 },
1552     { CPU_ID_SA1100,	CPU_CT_CTYPE_WB1, 0,  8192, 32, 32, 16384, 32, 32 },
1553     { CPU_ID_SA1110,	CPU_CT_CTYPE_WB1, 0,  8192, 32, 32, 16384, 32, 32 },
1554     { CPU_ID_IXP1200,	CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 }, /* XXX */
1555     { 0, 0, 0, 0, 0, 0, 0, 0}
1556 };
1557 
1558 static void get_cachetype_table(void);
1559 
1560 static void
1561 get_cachetype_table(void)
1562 {
1563 	int i;
1564 	uint32_t cpuid = cpu_idnum();
1565 
1566 	for (i = 0; cachetab[i].ct_cpuid != 0; i++) {
1567 		if (cachetab[i].ct_cpuid == (cpuid & CPU_ID_CPU_MASK)) {
1568 			arm_pcache.cache_type = cachetab[i].ct_pcache_type;
1569 			arm_pcache.cache_unified = cachetab[i].ct_pcache_unified;
1570 			arm_pcache.dcache_size = cachetab[i].ct_pdcache_size;
1571 			arm_pcache.dcache_line_size =
1572 			    cachetab[i].ct_pdcache_line_size;
1573 			arm_pcache.dcache_ways = cachetab[i].ct_pdcache_ways;
1574 			if (arm_pcache.dcache_ways) {
1575 				arm_pcache.dcache_way_size =
1576 				    arm_pcache.dcache_line_size
1577 				    / arm_pcache.dcache_ways;
1578 			}
1579 			arm_pcache.icache_size = cachetab[i].ct_picache_size;
1580 			arm_pcache.icache_line_size =
1581 			    cachetab[i].ct_picache_line_size;
1582 			arm_pcache.icache_ways = cachetab[i].ct_picache_ways;
1583 			if (arm_pcache.icache_ways) {
1584 				arm_pcache.icache_way_size =
1585 				    arm_pcache.icache_line_size
1586 				    / arm_pcache.icache_ways;
1587 			}
1588 		}
1589 	}
1590 
1591 	arm_dcache_align = arm_pcache.dcache_line_size;
1592 	arm_dcache_align_mask = arm_dcache_align - 1;
1593 }
1594 
1595 #endif /* ARM6 || ARM7 || SA110 || SA1100 || SA1111 || IXP12X0 */
1596 
1597 
1598 #if defined(CPU_CORTEX) || defined(CPU_PJ4B)
1599 static inline void
1600 set_cpufuncs_mpfixup(void)
1601 {
1602 #ifdef MULTIPROCESSOR
1603 	/* If MP extensions are present, patch in MP TLB ops */
1604 	const uint32_t mpidr = armreg_mpidr_read();
1605 	if ((mpidr & (MPIDR_MP|MPIDR_U)) == MPIDR_MP) {
1606 		cpufuncs.cf_tlb_flushID = armv7mp_tlb_flushID;
1607 		cpufuncs.cf_tlb_flushID_SE = armv7mp_tlb_flushID_SE;
1608 		cpufuncs.cf_tlb_flushI = armv7mp_tlb_flushI;
1609 		cpufuncs.cf_tlb_flushI_SE = armv7mp_tlb_flushI_SE;
1610 		cpufuncs.cf_tlb_flushD = armv7mp_tlb_flushD;
1611 		cpufuncs.cf_tlb_flushD_SE = armv7mp_tlb_flushD_SE;
1612 	}
1613 #endif
1614 }
1615 #endif
1616 
1617 /*
1618  * Cannot panic here as we may not have a console yet ...
1619  */
1620 
1621 int
1622 set_cpufuncs(void)
1623 {
1624 	if (cputype == 0) {
1625 		cputype = cpufunc_id();
1626 		cputype &= CPU_ID_CPU_MASK;
1627 	}
1628 
1629 	/*
1630 	 * NOTE: cpu_do_powersave defaults to off.  If we encounter a
1631 	 * CPU type where we want to use it by default, then we set it.
1632 	 */
1633 #ifdef CPU_ARM6
1634 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
1635 	    (cputype & 0x00000f00) == 0x00000600) {
1636 		cpufuncs = arm6_cpufuncs;
1637 		get_cachetype_table();
1638 		pmap_pte_init_generic();
1639 		return 0;
1640 	}
1641 #endif	/* CPU_ARM6 */
1642 #ifdef CPU_ARM7
1643 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
1644 	    CPU_ID_IS7(cputype) &&
1645 	    (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V3) {
1646 		cpufuncs = arm7_cpufuncs;
1647 		get_cachetype_table();
1648 		pmap_pte_init_generic();
1649 		return 0;
1650 	}
1651 #endif	/* CPU_ARM7 */
1652 #ifdef CPU_ARM7TDMI
1653 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
1654 	    CPU_ID_IS7(cputype) &&
1655 	    (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V4T) {
1656 		cpufuncs = arm7tdmi_cpufuncs;
1657 		get_cachetype_cp15();
1658 		pmap_pte_init_generic();
1659 		return 0;
1660 	}
1661 #endif
1662 #ifdef CPU_ARM8
1663 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
1664 	    (cputype & 0x0000f000) == 0x00008000) {
1665 		cpufuncs = arm8_cpufuncs;
1666 		get_cachetype_cp15();
1667 		pmap_pte_init_arm8();
1668 		return 0;
1669 	}
1670 #endif	/* CPU_ARM8 */
1671 #ifdef CPU_ARM9
1672 	if (((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD ||
1673 	     (cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_TI) &&
1674 	    (cputype & 0x0000f000) == 0x00009000) {
1675 		cpufuncs = arm9_cpufuncs;
1676 		get_cachetype_cp15();
1677 		arm9_dcache_sets_inc = 1U << arm_dcache_log2_linesize;
1678 		arm9_dcache_sets_max =
1679 		    (1U << (arm_dcache_log2_linesize + arm_dcache_log2_nsets)) -
1680 		    arm9_dcache_sets_inc;
1681 		arm9_dcache_index_inc = 1U << (32 - arm_dcache_log2_assoc);
1682 		arm9_dcache_index_max = 0U - arm9_dcache_index_inc;
1683 #ifdef	ARM9_CACHE_WRITE_THROUGH
1684 		pmap_pte_init_arm9();
1685 #else
1686 		pmap_pte_init_generic();
1687 #endif
1688 		return 0;
1689 	}
1690 #endif /* CPU_ARM9 */
1691 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
1692 	if (cputype == CPU_ID_ARM926EJS ||
1693 	    cputype == CPU_ID_ARM1026EJS) {
1694 		cpufuncs = armv5_ec_cpufuncs;
1695 		get_cachetype_cp15();
1696 		pmap_pte_init_generic();
1697 		return 0;
1698 	}
1699 #endif /* CPU_ARM9E || CPU_ARM10 */
1700 #if defined(CPU_SHEEVA)
1701 	if (cputype == CPU_ID_MV88SV131 ||
1702 	    cputype == CPU_ID_MV88FR571_VD) {
1703 		cpufuncs = sheeva_cpufuncs;
1704 		get_cachetype_cp15();
1705 		pmap_pte_init_generic();
1706 		cpu_do_powersave = 1;			/* Enable powersave */
1707 		return 0;
1708 	}
1709 #endif /* CPU_SHEEVA */
1710 #ifdef CPU_ARM10
1711 	if (/* cputype == CPU_ID_ARM1020T || */
1712 	    cputype == CPU_ID_ARM1020E) {
1713 		/*
1714 		 * Select write-through cacheing (this isn't really an
1715 		 * option on ARM1020T).
1716 		 */
1717 		cpufuncs = arm10_cpufuncs;
1718 		get_cachetype_cp15();
1719 		armv5_dcache_sets_inc = 1U << arm_dcache_log2_linesize;
1720 		armv5_dcache_sets_max =
1721 		    (1U << (arm_dcache_log2_linesize + arm_dcache_log2_nsets)) -
1722 		    armv5_dcache_sets_inc;
1723 		armv5_dcache_index_inc = 1U << (32 - arm_dcache_log2_assoc);
1724 		armv5_dcache_index_max = 0U - armv5_dcache_index_inc;
1725 		pmap_pte_init_generic();
1726 		return 0;
1727 	}
1728 #endif /* CPU_ARM10 */
1729 
1730 
1731 #if defined(CPU_ARM11MPCORE)
1732 	if (cputype == CPU_ID_ARM11MPCORE) {
1733 		cpufuncs = arm11mpcore_cpufuncs;
1734 #if defined(CPU_ARMV7) || defined(CPU_PRE_ARMV6)
1735 		cpu_armv6_p = true;
1736 #endif
1737 		get_cachetype_cp15();
1738 		armv5_dcache_sets_inc = 1U << arm_dcache_log2_linesize;
1739 		armv5_dcache_sets_max = (1U << (arm_dcache_log2_linesize +
1740 			arm_dcache_log2_nsets)) - armv5_dcache_sets_inc;
1741 		armv5_dcache_index_inc = 1U << (32 - arm_dcache_log2_assoc);
1742 		armv5_dcache_index_max = 0U - armv5_dcache_index_inc;
1743 		cpu_do_powersave = 1;			/* Enable powersave */
1744 		pmap_pte_init_arm11mpcore();
1745 		if (arm_cache_prefer_mask)
1746 			uvmexp.ncolors = (arm_cache_prefer_mask >> PGSHIFT) + 1;
1747 
1748 		return 0;
1749 
1750 	}
1751 #endif	/* CPU_ARM11MPCORE */
1752 
1753 #if defined(CPU_ARM11)
1754 	if (cputype == CPU_ID_ARM1136JS ||
1755 	    cputype == CPU_ID_ARM1136JSR1 ||
1756 	    cputype == CPU_ID_ARM1176JZS) {
1757 		cpufuncs = arm11_cpufuncs;
1758 #if defined(CPU_ARM1136)
1759 		if (cputype == CPU_ID_ARM1136JS ||
1760 		    cputype == CPU_ID_ARM1136JSR1) {
1761 			cpufuncs = arm1136_cpufuncs;
1762 			if (cputype == CPU_ID_ARM1136JS)
1763 				cpufuncs.cf_sleep = arm1136_sleep_rev0;
1764 		}
1765 #endif
1766 #if defined(CPU_ARM1176)
1767 		if (cputype == CPU_ID_ARM1176JZS) {
1768 			cpufuncs = arm1176_cpufuncs;
1769 		}
1770 #endif
1771 #if defined(CPU_ARMV7) || defined(CPU_PRE_ARMV6)
1772 		cpu_armv6_p = true;
1773 #endif
1774 		cpu_do_powersave = 1;			/* Enable powersave */
1775 		get_cachetype_cp15();
1776 #ifdef ARM11_CACHE_WRITE_THROUGH
1777 		pmap_pte_init_arm11();
1778 #else
1779 		pmap_pte_init_armv6();
1780 #endif
1781 		if (arm_cache_prefer_mask)
1782 			uvmexp.ncolors = (arm_cache_prefer_mask >> PGSHIFT) + 1;
1783 
1784 		/*
1785 		 * Start and reset the PMC Cycle Counter.
1786 		 */
1787 		armreg_pmcrv6_write(ARM11_PMCCTL_E | ARM11_PMCCTL_P | ARM11_PMCCTL_C);
1788 		return 0;
1789 	}
1790 #endif /* CPU_ARM11 */
1791 #ifdef CPU_SA110
1792 	if (cputype == CPU_ID_SA110) {
1793 		cpufuncs = sa110_cpufuncs;
1794 		get_cachetype_table();
1795 		pmap_pte_init_sa1();
1796 		return 0;
1797 	}
1798 #endif	/* CPU_SA110 */
1799 #ifdef CPU_SA1100
1800 	if (cputype == CPU_ID_SA1100) {
1801 		cpufuncs = sa11x0_cpufuncs;
1802 		get_cachetype_table();
1803 		pmap_pte_init_sa1();
1804 
1805 		/* Use powersave on this CPU. */
1806 		cpu_do_powersave = 1;
1807 
1808 		return 0;
1809 	}
1810 #endif	/* CPU_SA1100 */
1811 #ifdef CPU_SA1110
1812 	if (cputype == CPU_ID_SA1110) {
1813 		cpufuncs = sa11x0_cpufuncs;
1814 		get_cachetype_table();
1815 		pmap_pte_init_sa1();
1816 
1817 		/* Use powersave on this CPU. */
1818 		cpu_do_powersave = 1;
1819 
1820 		return 0;
1821 	}
1822 #endif	/* CPU_SA1110 */
1823 #ifdef CPU_FA526
1824 	if (cputype == CPU_ID_FA526) {
1825 		cpufuncs = fa526_cpufuncs;
1826 		get_cachetype_cp15();
1827 		pmap_pte_init_generic();
1828 
1829 		/* Use powersave on this CPU. */
1830 		cpu_do_powersave = 1;
1831 
1832 		return 0;
1833 	}
1834 #endif	/* CPU_FA526 */
1835 #ifdef CPU_IXP12X0
1836 	if (cputype == CPU_ID_IXP1200) {
1837 		cpufuncs = ixp12x0_cpufuncs;
1838 		get_cachetype_table();
1839 		pmap_pte_init_sa1();
1840 		return 0;
1841 	}
1842 #endif  /* CPU_IXP12X0 */
1843 #ifdef CPU_XSCALE_80200
1844 	if (cputype == CPU_ID_80200) {
1845 		int rev = cpufunc_id() & CPU_ID_REVISION_MASK;
1846 
1847 		i80200_icu_init();
1848 
1849 		/*
1850 		 * Reset the Performance Monitoring Unit to a
1851 		 * pristine state:
1852 		 *	- CCNT, PMN0, PMN1 reset to 0
1853 		 *	- overflow indications cleared
1854 		 *	- all counters disabled
1855 		 */
1856 		__asm volatile("mcr p14, 0, %0, c0, c0, 0"
1857 			:
1858 			: "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF|
1859 			       PMNC_CC_IF));
1860 
1861 #if defined(XSCALE_CCLKCFG)
1862 		/*
1863 		 * Crank CCLKCFG to maximum legal value.
1864 		 */
1865 		__asm volatile ("mcr p14, 0, %0, c6, c0, 0"
1866 			:
1867 			: "r" (XSCALE_CCLKCFG));
1868 #endif
1869 
1870 		/*
1871 		 * XXX Disable ECC in the Bus Controller Unit; we
1872 		 * don't really support it, yet.  Clear any pending
1873 		 * error indications.
1874 		 */
1875 		__asm volatile("mcr p13, 0, %0, c0, c1, 0"
1876 			:
1877 			: "r" (BCUCTL_E0|BCUCTL_E1|BCUCTL_EV));
1878 
1879 		cpufuncs = xscale_cpufuncs;
1880 
1881 		/*
1882 		 * i80200 errata: Step-A0 and A1 have a bug where
1883 		 * D$ dirty bits are not cleared on "invalidate by
1884 		 * address".
1885 		 *
1886 		 * Workaround: Clean cache line before invalidating.
1887 		 */
1888 		if (rev == 0 || rev == 1)
1889 			cpufuncs.cf_dcache_inv_range = xscale_cache_purgeD_rng;
1890 
1891 		get_cachetype_cp15();
1892 		pmap_pte_init_xscale();
1893 		return 0;
1894 	}
1895 #endif /* CPU_XSCALE_80200 */
1896 #ifdef CPU_XSCALE_80321
1897 	if (cputype == CPU_ID_80321_400 || cputype == CPU_ID_80321_600 ||
1898 	    cputype == CPU_ID_80321_400_B0 || cputype == CPU_ID_80321_600_B0 ||
1899 	    cputype == CPU_ID_80219_400 || cputype == CPU_ID_80219_600) {
1900 		i80321_icu_init();
1901 
1902 		/*
1903 		 * Reset the Performance Monitoring Unit to a
1904 		 * pristine state:
1905 		 *	- CCNT, PMN0, PMN1 reset to 0
1906 		 *	- overflow indications cleared
1907 		 *	- all counters disabled
1908 		 */
1909 		__asm volatile("mcr p14, 0, %0, c0, c0, 0"
1910 			:
1911 			: "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF|
1912 			       PMNC_CC_IF));
1913 
1914 		cpufuncs = xscale_cpufuncs;
1915 
1916 		get_cachetype_cp15();
1917 		pmap_pte_init_xscale();
1918 		return 0;
1919 	}
1920 #endif /* CPU_XSCALE_80321 */
1921 #ifdef __CPU_XSCALE_PXA2XX
1922 	/* ignore core revision to test PXA2xx CPUs */
1923 	if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA27X ||
1924 	    (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 ||
1925 	    (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) {
1926 
1927 		cpufuncs = xscale_cpufuncs;
1928 
1929 		get_cachetype_cp15();
1930 		pmap_pte_init_xscale();
1931 
1932 		/* Use powersave on this CPU. */
1933 		cpu_do_powersave = 1;
1934 
1935 		return 0;
1936 	}
1937 #endif /* __CPU_XSCALE_PXA2XX */
1938 #ifdef CPU_XSCALE_IXP425
1939 	if (cputype == CPU_ID_IXP425_533 || cputype == CPU_ID_IXP425_400 ||
1940 	    cputype == CPU_ID_IXP425_266) {
1941 		ixp425_icu_init();
1942 
1943 		cpufuncs = xscale_cpufuncs;
1944 
1945 		get_cachetype_cp15();
1946 		pmap_pte_init_xscale();
1947 
1948 		return 0;
1949 	}
1950 #endif /* CPU_XSCALE_IXP425 */
1951 #if defined(CPU_CORTEX)
1952 	if (CPU_ID_CORTEX_P(cputype)) {
1953 		cpufuncs = armv7_cpufuncs;
1954 		set_cpufuncs_mpfixup();
1955 		cpu_do_powersave = 1;			/* Enable powersave */
1956 #if defined(CPU_ARMV6) || defined(CPU_PRE_ARMV6)
1957 		cpu_armv7_p = true;
1958 #endif
1959 		get_cachetype_cp15();
1960 		pmap_pte_init_armv7();
1961 		if (arm_cache_prefer_mask)
1962 			uvmexp.ncolors = (arm_cache_prefer_mask >> PGSHIFT) + 1;
1963 
1964 		/*
1965 		 * Start and reset the PMC Cycle Counter.
1966 		 */
1967 		armreg_pmcr_write(ARM11_PMCCTL_E | ARM11_PMCCTL_P | ARM11_PMCCTL_C);
1968 		armreg_pmcntenset_write(CORTEX_CNTENS_C);
1969 		return 0;
1970 	}
1971 #endif /* CPU_CORTEX */
1972 
1973 #if defined(CPU_PJ4B)
1974 	if ((cputype == CPU_ID_MV88SV581X_V6 ||
1975 	    cputype == CPU_ID_MV88SV581X_V7 ||
1976 	    cputype == CPU_ID_MV88SV584X_V7 ||
1977 	    cputype == CPU_ID_ARM_88SV581X_V6 ||
1978 	    cputype == CPU_ID_ARM_88SV581X_V7) &&
1979 	    (armreg_pfr0_read() & ARM_PFR0_THUMBEE_MASK)) {
1980 		cpufuncs = pj4bv7_cpufuncs;
1981 		set_cpufuncs_mpfixup();
1982 #if defined(CPU_ARMV6) || defined(CPU_PRE_ARMV6)
1983 		cpu_armv7_p = true;
1984 #endif
1985 		get_cachetype_cp15();
1986 		pmap_pte_init_armv7();
1987 		return 0;
1988 	}
1989 #endif /* CPU_PJ4B */
1990 
1991 	/*
1992 	 * Bzzzz. And the answer was ...
1993 	 */
1994 	panic("No support for this CPU type (%08x) in kernel", cputype);
1995 	return ARCHITECTURE_NOT_PRESENT;
1996 }
1997 
1998 /*
1999  * Fixup routines for data and prefetch aborts.
2000  *
2001  * Several compile time symbols are used
2002  *
2003  * DEBUG_FAULT_CORRECTION - Print debugging information during the
2004  * correction of registers after a fault.
2005  * ARM6_LATE_ABORT - ARM6 supports both early and late aborts
2006  * when defined should use late aborts
2007  */
2008 
2009 
2010 /*
2011  * Null abort fixup routine.
2012  * For use when no fixup is required.
2013  */
2014 int
2015 cpufunc_null_fixup(void *arg)
2016 {
2017 	return(ABORT_FIXUP_OK);
2018 }
2019 
2020 
2021 #if defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI)
2022 
2023 #ifdef DEBUG_FAULT_CORRECTION
2024 #define DFC_PRINTF(x)		printf x
2025 #define DFC_DISASSEMBLE(x)	disassemble(x)
2026 #else
2027 #define DFC_PRINTF(x)		/* nothing */
2028 #define DFC_DISASSEMBLE(x)	/* nothing */
2029 #endif
2030 
2031 /*
2032  * "Early" data abort fixup.
2033  *
2034  * For ARM2, ARM2as, ARM3 and ARM6 (in early-abort mode).  Also used
2035  * indirectly by ARM6 (in late-abort mode) and ARM7[TDMI].
2036  *
2037  * In early aborts, we may have to fix up LDM, STM, LDC and STC.
2038  */
2039 int
2040 early_abort_fixup(void *arg)
2041 {
2042 	trapframe_t *frame = arg;
2043 	u_int fault_pc;
2044 	u_int fault_instruction;
2045 	int saved_lr = 0;
2046 
2047 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
2048 
2049 		/* Ok an abort in SVC mode */
2050 
2051 		/*
2052 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
2053 		 * as the fault happened in svc mode but we need it in the
2054 		 * usr slot so we can treat the registers as an array of ints
2055 		 * during fixing.
2056 		 * NOTE: This PC is in the position but writeback is not
2057 		 * allowed on r15.
2058 		 * Doing it like this is more efficient than trapping this
2059 		 * case in all possible locations in the following fixup code.
2060 		 */
2061 
2062 		saved_lr = frame->tf_usr_lr;
2063 		frame->tf_usr_lr = frame->tf_svc_lr;
2064 
2065 		/*
2066 		 * Note the trapframe does not have the SVC r13 so a fault
2067 		 * from an instruction with writeback to r13 in SVC mode is
2068 		 * not allowed. This should not happen as the kstack is
2069 		 * always valid.
2070 		 */
2071 	}
2072 
2073 	/* Get fault address and status from the CPU */
2074 
2075 	fault_pc = frame->tf_pc;
2076 	fault_instruction = *((volatile unsigned int *)fault_pc);
2077 
2078 	/* Decode the fault instruction and fix the registers as needed */
2079 
2080 	if ((fault_instruction & 0x0e000000) == 0x08000000) {
2081 		int base;
2082 		int loop;
2083 		int count;
2084 		int *registers = &frame->tf_r0;
2085 
2086 		DFC_PRINTF(("LDM/STM\n"));
2087 		DFC_DISASSEMBLE(fault_pc);
2088 		if (fault_instruction & (1 << 21)) {
2089 			DFC_PRINTF(("This instruction must be corrected\n"));
2090 			base = (fault_instruction >> 16) & 0x0f;
2091 			if (base == 15)
2092 				return ABORT_FIXUP_FAILED;
2093 			/* Count registers transferred */
2094 			count = 0;
2095 			for (loop = 0; loop < 16; ++loop) {
2096 				if (fault_instruction & (1<<loop))
2097 					++count;
2098 			}
2099 			DFC_PRINTF(("%d registers used\n", count));
2100 			DFC_PRINTF(("Corrected r%d by %d bytes ",
2101 				       base, count * 4));
2102 			if (fault_instruction & (1 << 23)) {
2103 				DFC_PRINTF(("down\n"));
2104 				registers[base] -= count * 4;
2105 			} else {
2106 				DFC_PRINTF(("up\n"));
2107 				registers[base] += count * 4;
2108 			}
2109 		}
2110 	} else if ((fault_instruction & 0x0e000000) == 0x0c000000) {
2111 		int base;
2112 		int offset;
2113 		int *registers = &frame->tf_r0;
2114 
2115 		/* REGISTER CORRECTION IS REQUIRED FOR THESE INSTRUCTIONS */
2116 
2117 		DFC_DISASSEMBLE(fault_pc);
2118 
2119 		/* Only need to fix registers if write back is turned on */
2120 
2121 		if ((fault_instruction & (1 << 21)) != 0) {
2122 			base = (fault_instruction >> 16) & 0x0f;
2123 			if (base == 13 &&
2124 			    (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
2125 				return ABORT_FIXUP_FAILED;
2126 			if (base == 15)
2127 				return ABORT_FIXUP_FAILED;
2128 
2129 			offset = (fault_instruction & 0xff) << 2;
2130 			DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
2131 			if ((fault_instruction & (1 << 23)) != 0)
2132 				offset = -offset;
2133 			registers[base] += offset;
2134 			DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
2135 		}
2136 	}
2137 
2138 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
2139 
2140 		/* Ok an abort in SVC mode */
2141 
2142 		/*
2143 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
2144 		 * as the fault happened in svc mode but we need it in the
2145 		 * usr slot so we can treat the registers as an array of ints
2146 		 * during fixing.
2147 		 * NOTE: This PC is in the position but writeback is not
2148 		 * allowed on r15.
2149 		 * Doing it like this is more efficient than trapping this
2150 		 * case in all possible locations in the prior fixup code.
2151 		 */
2152 
2153 		frame->tf_svc_lr = frame->tf_usr_lr;
2154 		frame->tf_usr_lr = saved_lr;
2155 
2156 		/*
2157 		 * Note the trapframe does not have the SVC r13 so a fault
2158 		 * from an instruction with writeback to r13 in SVC mode is
2159 		 * not allowed. This should not happen as the kstack is
2160 		 * always valid.
2161 		 */
2162 	}
2163 
2164 	return(ABORT_FIXUP_OK);
2165 }
2166 #endif	/* CPU_ARM6/7 */
2167 
2168 
2169 #if (defined(CPU_ARM6) && defined(ARM6_LATE_ABORT)) || defined(CPU_ARM7) || \
2170 	defined(CPU_ARM7TDMI)
2171 /*
2172  * "Late" (base updated) data abort fixup
2173  *
2174  * For ARM6 (in late-abort mode) and ARM7.
2175  *
2176  * In this model, all data-transfer instructions need fixing up.  We defer
2177  * LDM, STM, LDC and STC fixup to the early-abort handler.
2178  */
2179 int
2180 late_abort_fixup(void *arg)
2181 {
2182 	trapframe_t *frame = arg;
2183 	u_int fault_pc;
2184 	u_int fault_instruction;
2185 	int saved_lr = 0;
2186 
2187 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
2188 
2189 		/* Ok an abort in SVC mode */
2190 
2191 		/*
2192 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
2193 		 * as the fault happened in svc mode but we need it in the
2194 		 * usr slot so we can treat the registers as an array of ints
2195 		 * during fixing.
2196 		 * NOTE: This PC is in the position but writeback is not
2197 		 * allowed on r15.
2198 		 * Doing it like this is more efficient than trapping this
2199 		 * case in all possible locations in the following fixup code.
2200 		 */
2201 
2202 		saved_lr = frame->tf_usr_lr;
2203 		frame->tf_usr_lr = frame->tf_svc_lr;
2204 
2205 		/*
2206 		 * Note the trapframe does not have the SVC r13 so a fault
2207 		 * from an instruction with writeback to r13 in SVC mode is
2208 		 * not allowed. This should not happen as the kstack is
2209 		 * always valid.
2210 		 */
2211 	}
2212 
2213 	/* Get fault address and status from the CPU */
2214 
2215 	fault_pc = frame->tf_pc;
2216 	fault_instruction = *((volatile unsigned int *)fault_pc);
2217 
2218 	/* Decode the fault instruction and fix the registers as needed */
2219 
2220 	/* Was is a swap instruction ? */
2221 
2222 	if ((fault_instruction & 0x0fb00ff0) == 0x01000090) {
2223 		DFC_DISASSEMBLE(fault_pc);
2224 	} else if ((fault_instruction & 0x0c000000) == 0x04000000) {
2225 
2226 		/* Was is a ldr/str instruction */
2227 		/* This is for late abort only */
2228 
2229 		int base;
2230 		int offset;
2231 		int *registers = &frame->tf_r0;
2232 
2233 		DFC_DISASSEMBLE(fault_pc);
2234 
2235 		/* This is for late abort only */
2236 
2237 		if ((fault_instruction & (1 << 24)) == 0
2238 		    || (fault_instruction & (1 << 21)) != 0) {
2239 			/* postindexed ldr/str with no writeback */
2240 
2241 			base = (fault_instruction >> 16) & 0x0f;
2242 			if (base == 13 &&
2243 			    (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
2244 				return ABORT_FIXUP_FAILED;
2245 			if (base == 15)
2246 				return ABORT_FIXUP_FAILED;
2247 			DFC_PRINTF(("late abt fix: r%d=%08x : ",
2248 				       base, registers[base]));
2249 			if ((fault_instruction & (1 << 25)) == 0) {
2250 				/* Immediate offset - easy */
2251 
2252 				offset = fault_instruction & 0xfff;
2253 				if ((fault_instruction & (1 << 23)))
2254 					offset = -offset;
2255 				registers[base] += offset;
2256 				DFC_PRINTF(("imm=%08x ", offset));
2257 			} else {
2258 				/* offset is a shifted register */
2259 				int shift;
2260 
2261 				offset = fault_instruction & 0x0f;
2262 				if (offset == base)
2263 					return ABORT_FIXUP_FAILED;
2264 
2265 				/*
2266 				 * Register offset - hard we have to
2267 				 * cope with shifts !
2268 				 */
2269 				offset = registers[offset];
2270 
2271 				if ((fault_instruction & (1 << 4)) == 0)
2272 					/* shift with amount */
2273 					shift = (fault_instruction >> 7) & 0x1f;
2274 				else {
2275 					/* shift with register */
2276 					if ((fault_instruction & (1 << 7)) != 0)
2277 						/* undefined for now so bail out */
2278 						return ABORT_FIXUP_FAILED;
2279 					shift = ((fault_instruction >> 8) & 0xf);
2280 					if (base == shift)
2281 						return ABORT_FIXUP_FAILED;
2282 					DFC_PRINTF(("shift reg=%d ", shift));
2283 					shift = registers[shift];
2284 				}
2285 				DFC_PRINTF(("shift=%08x ", shift));
2286 				switch (((fault_instruction >> 5) & 0x3)) {
2287 				case 0 : /* Logical left */
2288 					offset = (int)(((u_int)offset) << shift);
2289 					break;
2290 				case 1 : /* Logical Right */
2291 					if (shift == 0) shift = 32;
2292 					offset = (int)(((u_int)offset) >> shift);
2293 					break;
2294 				case 2 : /* Arithmetic Right */
2295 					if (shift == 0) shift = 32;
2296 					offset = (int)(((int)offset) >> shift);
2297 					break;
2298 				case 3 : /* Rotate right (rol or rxx) */
2299 					return ABORT_FIXUP_FAILED;
2300 					break;
2301 				}
2302 
2303 				DFC_PRINTF(("abt: fixed LDR/STR with "
2304 					       "register offset\n"));
2305 				if ((fault_instruction & (1 << 23)))
2306 					offset = -offset;
2307 				DFC_PRINTF(("offset=%08x ", offset));
2308 				registers[base] += offset;
2309 			}
2310 			DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
2311 		}
2312 	}
2313 
2314 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
2315 
2316 		/* Ok an abort in SVC mode */
2317 
2318 		/*
2319 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
2320 		 * as the fault happened in svc mode but we need it in the
2321 		 * usr slot so we can treat the registers as an array of ints
2322 		 * during fixing.
2323 		 * NOTE: This PC is in the position but writeback is not
2324 		 * allowed on r15.
2325 		 * Doing it like this is more efficient than trapping this
2326 		 * case in all possible locations in the prior fixup code.
2327 		 */
2328 
2329 		frame->tf_svc_lr = frame->tf_usr_lr;
2330 		frame->tf_usr_lr = saved_lr;
2331 
2332 		/*
2333 		 * Note the trapframe does not have the SVC r13 so a fault
2334 		 * from an instruction with writeback to r13 in SVC mode is
2335 		 * not allowed. This should not happen as the kstack is
2336 		 * always valid.
2337 		 */
2338 	}
2339 
2340 	/*
2341 	 * Now let the early-abort fixup routine have a go, in case it
2342 	 * was an LDM, STM, LDC or STC that faulted.
2343 	 */
2344 
2345 	return early_abort_fixup(arg);
2346 }
2347 #endif	/* CPU_ARM6(LATE)/7/7TDMI */
2348 
2349 /*
2350  * CPU Setup code
2351  */
2352 
2353 #if defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) || \
2354 	defined(CPU_ARM8) || defined (CPU_ARM9) || defined (CPU_ARM9E) || \
2355 	defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) || \
2356 	defined(CPU_FA526) || \
2357 	defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
2358 	defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425) || \
2359 	defined(CPU_ARM10) || defined(CPU_SHEEVA) || \
2360 	defined(CPU_ARMV6) || defined(CPU_ARMV7)
2361 
2362 #define IGN	0
2363 #define OR	1
2364 #define BIC	2
2365 
2366 struct cpu_option {
2367 	const char *co_name;
2368 	int	co_falseop;
2369 	int	co_trueop;
2370 	int	co_value;
2371 };
2372 
2373 static u_int parse_cpu_options(char *, struct cpu_option *, u_int);
2374 
2375 static u_int __noasan
2376 parse_cpu_options(char *args, struct cpu_option *optlist, u_int cpuctrl)
2377 {
2378 	int integer;
2379 
2380 	if (args == NULL)
2381 		return(cpuctrl);
2382 
2383 	while (optlist->co_name) {
2384 		if (get_bootconf_option(args, optlist->co_name,
2385 		    BOOTOPT_TYPE_BOOLEAN, &integer)) {
2386 			if (integer) {
2387 				if (optlist->co_trueop == OR)
2388 					cpuctrl |= optlist->co_value;
2389 				else if (optlist->co_trueop == BIC)
2390 					cpuctrl &= ~optlist->co_value;
2391 			} else {
2392 				if (optlist->co_falseop == OR)
2393 					cpuctrl |= optlist->co_value;
2394 				else if (optlist->co_falseop == BIC)
2395 					cpuctrl &= ~optlist->co_value;
2396 			}
2397 		}
2398 		++optlist;
2399 	}
2400 	return(cpuctrl);
2401 }
2402 #endif /* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 || CPU_SA110 */
2403 
2404 #if defined (CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) \
2405 	|| defined(CPU_ARM8)
2406 struct cpu_option arm678_options[] = {
2407 #ifdef COMPAT_12
2408 	{ "nocache",		IGN, BIC, CPU_CONTROL_IDC_ENABLE },
2409 	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
2410 #endif	/* COMPAT_12 */
2411 	{ "cpu.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
2412 	{ "cpu.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
2413 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2414 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2415 	{ NULL,			IGN, IGN, 0 }
2416 };
2417 
2418 #endif	/* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 */
2419 
2420 #ifdef CPU_ARM6
2421 struct cpu_option arm6_options[] = {
2422 	{ "arm6.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
2423 	{ "arm6.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
2424 	{ "arm6.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2425 	{ "arm6.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2426 	{ NULL,			IGN, IGN, 0 }
2427 };
2428 
2429 void
2430 arm6_setup(char *args)
2431 {
2432 
2433 	/* Set up default control registers bits */
2434 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2435 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2436 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
2437 #if 0
2438 	int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2439 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2440 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
2441 		 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BEND_ENABLE
2442 		 | CPU_CONTROL_AFLT_ENABLE;
2443 #endif
2444 
2445 #ifdef ARM6_LATE_ABORT
2446 	cpuctrl |= CPU_CONTROL_LABT_ENABLE;
2447 #endif	/* ARM6_LATE_ABORT */
2448 
2449 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2450 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2451 #endif
2452 
2453 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
2454 	cpuctrl = parse_cpu_options(args, arm6_options, cpuctrl);
2455 
2456 #ifdef __ARMEB__
2457 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2458 #endif
2459 
2460 	/* Clear out the cache */
2461 	cpu_idcache_wbinv_all();
2462 
2463 	/* Set the control register */
2464 	cpu_control(0xffffffff, cpuctrl);
2465 }
2466 #endif	/* CPU_ARM6 */
2467 
2468 #ifdef CPU_ARM7
2469 struct cpu_option arm7_options[] = {
2470 	{ "arm7.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
2471 	{ "arm7.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
2472 	{ "arm7.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2473 	{ "arm7.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2474 #ifdef COMPAT_12
2475 	{ "fpaclk2",		BIC, OR,  CPU_CONTROL_CPCLK },
2476 #endif	/* COMPAT_12 */
2477 	{ "arm700.fpaclk",	BIC, OR,  CPU_CONTROL_CPCLK },
2478 	{ NULL,			IGN, IGN, 0 }
2479 };
2480 
2481 void
2482 arm7_setup(char *args)
2483 {
2484 
2485 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2486 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2487 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
2488 #if 0
2489 	int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2490 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2491 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
2492 		 | CPU_CONTROL_CPCLK | CPU_CONTROL_LABT_ENABLE
2493 		 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BEND_ENABLE
2494 		 | CPU_CONTROL_AFLT_ENABLE;
2495 #endif
2496 
2497 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2498 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2499 #endif
2500 
2501 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
2502 	cpuctrl = parse_cpu_options(args, arm7_options, cpuctrl);
2503 
2504 #ifdef __ARMEB__
2505 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2506 #endif
2507 
2508 	/* Clear out the cache */
2509 	cpu_idcache_wbinv_all();
2510 
2511 	/* Set the control register */
2512 	cpu_control(0xffffffff, cpuctrl);
2513 }
2514 #endif	/* CPU_ARM7 */
2515 
2516 #ifdef CPU_ARM7TDMI
2517 struct cpu_option arm7tdmi_options[] = {
2518 	{ "arm7.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
2519 	{ "arm7.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
2520 	{ "arm7.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2521 	{ "arm7.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2522 #ifdef COMPAT_12
2523 	{ "fpaclk2",		BIC, OR,  CPU_CONTROL_CPCLK },
2524 #endif	/* COMPAT_12 */
2525 	{ "arm700.fpaclk",	BIC, OR,  CPU_CONTROL_CPCLK },
2526 	{ NULL,			IGN, IGN, 0 }
2527 };
2528 
2529 void
2530 arm7tdmi_setup(char *args)
2531 {
2532 	int cpuctrl;
2533 
2534 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2535 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2536 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
2537 
2538 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
2539 	cpuctrl = parse_cpu_options(args, arm7tdmi_options, cpuctrl);
2540 
2541 #ifdef __ARMEB__
2542 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2543 #endif
2544 
2545 	/* Clear out the cache */
2546 	cpu_idcache_wbinv_all();
2547 
2548 	/* Set the control register */
2549 	cpu_control(0xffffffff, cpuctrl);
2550 }
2551 #endif	/* CPU_ARM7TDMI */
2552 
2553 #ifdef CPU_ARM8
2554 struct cpu_option arm8_options[] = {
2555 	{ "arm8.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
2556 	{ "arm8.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
2557 	{ "arm8.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2558 	{ "arm8.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2559 #ifdef COMPAT_12
2560 	{ "branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2561 #endif	/* COMPAT_12 */
2562 	{ "cpu.branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2563 	{ "arm8.branchpredict",	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2564 	{ NULL,			IGN, IGN, 0 }
2565 };
2566 
2567 void
2568 arm8_setup(char *args)
2569 {
2570 	int integer;
2571 	int clocktest;
2572 	int setclock = 0;
2573 
2574 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2575 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2576 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
2577 #if 0
2578 	int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2579 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2580 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
2581 		 | CPU_CONTROL_BPRD_ENABLE | CPU_CONTROL_ROM_ENABLE
2582 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE;
2583 #endif
2584 
2585 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2586 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2587 #endif
2588 
2589 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
2590 	cpuctrl = parse_cpu_options(args, arm8_options, cpuctrl);
2591 
2592 #ifdef __ARMEB__
2593 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2594 #endif
2595 
2596 	/* Get clock configuration */
2597 	clocktest = arm8_clock_config(0, 0) & 0x0f;
2598 
2599 	/* Special ARM8 clock and test configuration */
2600 	if (get_bootconf_option(args, "arm8.clock.reset", BOOTOPT_TYPE_BOOLEAN, &integer)) {
2601 		clocktest = 0;
2602 		setclock = 1;
2603 	}
2604 	if (get_bootconf_option(args, "arm8.clock.dynamic", BOOTOPT_TYPE_BOOLEAN, &integer)) {
2605 		if (integer)
2606 			clocktest |= 0x01;
2607 		else
2608 			clocktest &= ~(0x01);
2609 		setclock = 1;
2610 	}
2611 	if (get_bootconf_option(args, "arm8.clock.sync", BOOTOPT_TYPE_BOOLEAN, &integer)) {
2612 		if (integer)
2613 			clocktest |= 0x02;
2614 		else
2615 			clocktest &= ~(0x02);
2616 		setclock = 1;
2617 	}
2618 	if (get_bootconf_option(args, "arm8.clock.fast", BOOTOPT_TYPE_BININT, &integer)) {
2619 		clocktest = (clocktest & ~0xc0) | (integer & 3) << 2;
2620 		setclock = 1;
2621 	}
2622 	if (get_bootconf_option(args, "arm8.test", BOOTOPT_TYPE_BININT, &integer)) {
2623 		clocktest |= (integer & 7) << 5;
2624 		setclock = 1;
2625 	}
2626 
2627 	/* Clear out the cache */
2628 	cpu_idcache_wbinv_all();
2629 
2630 	/* Set the control register */
2631 	cpu_control(0xffffffff, cpuctrl);
2632 
2633 	/* Set the clock/test register */
2634 	if (setclock)
2635 		arm8_clock_config(0x7f, clocktest);
2636 }
2637 #endif	/* CPU_ARM8 */
2638 
2639 #ifdef CPU_ARM9
2640 struct cpu_option arm9_options[] = {
2641 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2642 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2643 	{ "arm9.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2644 	{ "arm9.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2645 	{ "arm9.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2646 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2647 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2648 	{ "arm9.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2649 	{ NULL,			IGN, IGN, 0 }
2650 };
2651 
2652 void
2653 arm9_setup(char *args)
2654 {
2655 
2656 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2657 	    | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2658 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2659 	    | CPU_CONTROL_WBUF_ENABLE;
2660 	int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2661 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2662 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2663 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2664 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2665 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC
2666 		 | CPU_CONTROL_ROUNDROBIN;
2667 
2668 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2669 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2670 #endif
2671 
2672 	cpuctrl = parse_cpu_options(args, arm9_options, cpuctrl);
2673 
2674 #ifdef __ARMEB__
2675 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2676 #endif
2677 
2678 #ifndef ARM_HAS_VBAR
2679 	if (vector_page == ARM_VECTORS_HIGH)
2680 		cpuctrl |= CPU_CONTROL_VECRELOC;
2681 #endif
2682 
2683 	/* Clear out the cache */
2684 	cpu_idcache_wbinv_all();
2685 
2686 	/* Set the control register */
2687 	cpu_control(cpuctrlmask, cpuctrl);
2688 
2689 }
2690 #endif	/* CPU_ARM9 */
2691 
2692 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
2693 struct cpu_option arm10_options[] = {
2694 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2695 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2696 	{ "arm10.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2697 	{ "arm10.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2698 	{ "arm10.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2699 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2700 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2701 	{ "arm10.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2702 	{ NULL,			IGN, IGN, 0 }
2703 };
2704 
2705 void
2706 arm10_setup(char *args)
2707 {
2708 
2709 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
2710 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2711 	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
2712 #if 0
2713 	int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
2714 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2715 	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2716 	    | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2717 	    | CPU_CONTROL_BPRD_ENABLE
2718 	    | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
2719 #endif
2720 
2721 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2722 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2723 #endif
2724 
2725 	cpuctrl = parse_cpu_options(args, arm10_options, cpuctrl);
2726 
2727 #ifdef __ARMEB__
2728 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2729 #endif
2730 
2731 #ifndef ARM_HAS_VBAR
2732 	if (vector_page == ARM_VECTORS_HIGH)
2733 		cpuctrl |= CPU_CONTROL_VECRELOC;
2734 #endif
2735 
2736 	/* Clear out the cache */
2737 	cpu_idcache_wbinv_all();
2738 
2739 	/* Now really make sure they are clean.  */
2740 	__asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
2741 
2742 	/* Set the control register */
2743 	cpu_control(0xffffffff, cpuctrl);
2744 
2745 	/* And again. */
2746 	cpu_idcache_wbinv_all();
2747 }
2748 #endif	/* CPU_ARM9E || CPU_ARM10 */
2749 
2750 #if defined(CPU_ARM11)
2751 struct cpu_option arm11_options[] = {
2752 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2753 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2754 	{ "arm11.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2755 	{ "arm11.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2756 	{ "arm11.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2757 	{ "cpu.branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2758 	{ "arm11.branchpredict", BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2759 	{ NULL,			IGN, IGN, 0 }
2760 };
2761 
2762 void
2763 arm11_setup(char *args)
2764 {
2765 
2766 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
2767 #ifdef ARM_MMU_EXTENDED
2768 	    | CPU_CONTROL_XP_ENABLE
2769 #endif
2770 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2771 	    /* | CPU_CONTROL_BPRD_ENABLE */;
2772 	int cpuctrlmask = cpuctrl
2773 	    | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BPRD_ENABLE
2774 	    | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2775 	    | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
2776 
2777 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2778 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2779 #endif
2780 
2781 	cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl);
2782 
2783 #ifdef __ARMEB__
2784 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2785 #endif
2786 
2787 #ifndef ARM_HAS_VBAR
2788 	if (vector_page == ARM_VECTORS_HIGH)
2789 		cpuctrl |= CPU_CONTROL_VECRELOC;
2790 #endif
2791 
2792 	/* Clear out the cache */
2793 	cpu_idcache_wbinv_all();
2794 
2795 	/* Now really make sure they are clean.  */
2796 	__asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
2797 
2798 	/* Allow detection code to find the VFP if it's fitted.  */
2799 	armreg_cpacr_write(0x0fffffff);
2800 
2801 	/* Set the control register */
2802 	cpu_control(cpuctrlmask, cpuctrl);
2803 
2804 	/* And again. */
2805 	cpu_idcache_wbinv_all();
2806 }
2807 #endif	/* CPU_ARM11 */
2808 
2809 #if defined(CPU_ARM11MPCORE)
2810 
2811 void
2812 arm11mpcore_setup(char *args)
2813 {
2814 
2815 	int cpuctrl = CPU_CONTROL_IC_ENABLE
2816 	    | CPU_CONTROL_DC_ENABLE
2817 #ifdef ARM_MMU_EXTENDED
2818 	    | CPU_CONTROL_XP_ENABLE
2819 #endif
2820 	    | CPU_CONTROL_BPRD_ENABLE ;
2821 	int cpuctrlmask = cpuctrl
2822 	    | CPU_CONTROL_AFLT_ENABLE
2823 	    | CPU_CONTROL_VECRELOC;
2824 
2825 #ifdef	ARM11MPCORE_MMU_COMPAT
2826 	/* XXX: S and R? */
2827 #endif
2828 
2829 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2830 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2831 #endif
2832 
2833 	cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl);
2834 
2835 #ifndef ARM_HAS_VBAR
2836 	if (vector_page == ARM_VECTORS_HIGH)
2837 		cpuctrl |= CPU_CONTROL_VECRELOC;
2838 #endif
2839 
2840 	/* Clear out the cache */
2841 	cpu_idcache_wbinv_all();
2842 
2843 	/* Now really make sure they are clean.  */
2844 	__asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
2845 
2846 	/* Allow detection code to find the VFP if it's fitted.  */
2847 	armreg_cpacr_write(0x0fffffff);
2848 
2849 	/* Set the control register */
2850 	cpu_control(cpuctrlmask, cpuctrl);
2851 
2852 	/* And again. */
2853 	cpu_idcache_wbinv_all();
2854 }
2855 #endif	/* CPU_ARM11MPCORE */
2856 
2857 #ifdef CPU_PJ4B
2858 void
2859 pj4bv7_setup(char *args)
2860 {
2861 	int cpuctrl;
2862 
2863 	pj4b_config();
2864 
2865 	cpuctrl = CPU_CONTROL_MMU_ENABLE;
2866 #ifdef ARM32_DISABLE_ALIGNMENT_FAULTS
2867 	cpuctrl |= CPU_CONTROL_UNAL_ENABLE;
2868 #else
2869 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2870 #endif
2871 	cpuctrl |= CPU_CONTROL_DC_ENABLE;
2872 	cpuctrl |= CPU_CONTROL_IC_ENABLE;
2873 	cpuctrl |= (0xf << 3);
2874 	cpuctrl |= CPU_CONTROL_BPRD_ENABLE;
2875 	cpuctrl |= (0x5 << 16);
2876 	cpuctrl |= CPU_CONTROL_XP_ENABLE;
2877 
2878 #ifndef ARM_HAS_VBAR
2879 	if (vector_page == ARM_VECTORS_HIGH)
2880 		cpuctrl |= CPU_CONTROL_VECRELOC;
2881 #endif
2882 
2883 #ifdef L2CACHE_ENABLE
2884 	/* Setup L2 cache */
2885 	arm_scache.cache_type = CPU_CT_CTYPE_WT;
2886 	arm_scache.cache_unified = 1;
2887 	arm_scache.dcache_type = arm_scache.icache_type = CACHE_TYPE_PIPT;
2888 	arm_scache.dcache_size = arm_scache.icache_size = ARMADAXP_L2_SIZE;
2889 	arm_scache.dcache_ways = arm_scache.icache_ways = ARMADAXP_L2_WAYS;
2890 	arm_scache.dcache_way_size = arm_scache.icache_way_size =
2891 	    ARMADAXP_L2_WAY_SIZE;
2892 	arm_scache.dcache_line_size = arm_scache.icache_line_size =
2893 	    ARMADAXP_L2_LINE_SIZE;
2894 	arm_scache.dcache_sets = arm_scache.icache_sets =
2895 	    ARMADAXP_L2_SETS;
2896 
2897 	cpufuncs.cf_sdcache_wbinv_range	= armadaxp_sdcache_wbinv_range;
2898 	cpufuncs.cf_sdcache_inv_range	= armadaxp_sdcache_inv_range;
2899 	cpufuncs.cf_sdcache_wb_range	= armadaxp_sdcache_wb_range;
2900 #endif
2901 
2902 #ifdef AURORA_IO_CACHE_COHERENCY
2903 	/* use AMBA and I/O Coherency Fabric to maintain cache */
2904 	cpufuncs.cf_dcache_wbinv_range	= pj4b_dcache_cfu_wbinv_range;
2905 	cpufuncs.cf_dcache_inv_range	= pj4b_dcache_cfu_inv_range;
2906 	cpufuncs.cf_dcache_wb_range	= pj4b_dcache_cfu_wb_range;
2907 
2908 	cpufuncs.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop;
2909 	cpufuncs.cf_sdcache_inv_range	= (void *)cpufunc_nullop;
2910 	cpufuncs.cf_sdcache_wb_range	= (void *)cpufunc_nullop;
2911 #endif
2912 
2913 	/* Clear out the cache */
2914 	cpu_idcache_wbinv_all();
2915 
2916 	/* Set the control register */
2917 	cpu_control(0xffffffff, cpuctrl);
2918 
2919 	/* And again. */
2920 	cpu_idcache_wbinv_all();
2921 #ifdef L2CACHE_ENABLE
2922 	armadaxp_sdcache_wbinv_all();
2923 #endif
2924 }
2925 #endif /* CPU_PJ4B */
2926 
2927 #if defined(CPU_ARMV7)
2928 struct cpu_option armv7_options[] = {
2929     { "cpu.cache",      BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2930     { "cpu.nocache",    OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2931     { "armv7.cache",    BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2932     { "armv7.icache",   BIC, OR,  CPU_CONTROL_IC_ENABLE },
2933     { "armv7.dcache",   BIC, OR,  CPU_CONTROL_DC_ENABLE },
2934     { NULL, 		IGN, IGN, 0}
2935 };
2936 
2937 void
2938 armv7_setup(char *args)
2939 {
2940 	int cpuctrl =
2941 	    CPU_CONTROL_MMU_ENABLE |
2942 	    CPU_CONTROL_IC_ENABLE |
2943 	    CPU_CONTROL_DC_ENABLE |
2944 	    CPU_CONTROL_BPRD_ENABLE |
2945 	    CPU_CONTROL_UNAL_ENABLE |
2946 	    0;
2947 #ifdef __ARMEB__
2948 	cpuctrl |= CPU_CONTROL_EX_BEND;
2949 #endif
2950 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2951 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2952 #endif
2953 #ifdef ARM_MMU_EXTENDED
2954 	cpuctrl |= CPU_CONTROL_XP_ENABLE;
2955 #endif
2956 
2957 	int cpuctrlmask = cpuctrl |
2958 	    CPU_CONTROL_EX_BEND |
2959 	    CPU_CONTROL_AFLT_ENABLE |
2960 	    CPU_CONTROL_TR_ENABLE |
2961 	    CPU_CONTROL_VECRELOC |
2962 	    CPU_CONTROL_XP_ENABLE |
2963 	    0;
2964 
2965 	cpuctrl = parse_cpu_options(args, armv7_options, cpuctrl);
2966 
2967 #ifndef ARM_HAS_VBAR
2968 	if (vector_page == ARM_VECTORS_HIGH)
2969 		cpuctrl |= CPU_CONTROL_VECRELOC;
2970 #endif
2971 
2972 #ifdef __HAVE_GENERIC_START
2973 	const u_int lcputype = cpufunc_id();
2974 	int actlr_set = 0;
2975 	int actlr_clr = 0;
2976 
2977 	if (CPU_ID_CORTEX_A5_P(lcputype)) {
2978 		/*
2979 		 * Disable exclusive L1/L2 cache control
2980 		 * Enable SMP mode
2981 		 * Enable Cache and TLB maintenance broadcast
2982 		 */
2983 		actlr_clr = CORTEXA5_ACTLR_EXCL;
2984 		actlr_set = CORTEXA5_ACTLR_SMP | CORTEXA5_ACTLR_FW;
2985 	} else if (CPU_ID_CORTEX_A7_P(lcputype)) {
2986 #ifdef MULTIPROCESSOR
2987 		actlr_set |= CORTEXA7_ACTLR_SMP;
2988 #endif
2989 	} else if (CPU_ID_CORTEX_A8_P(lcputype)) {
2990 		actlr_set = CORTEXA8_ACTLR_L2EN;
2991 		actlr_clr = CORTEXA8_ACTLR_L1ALIAS;
2992 	} else if (CPU_ID_CORTEX_A9_P(lcputype)) {
2993 		actlr_set =
2994 		    CORTEXA9_AUXCTL_FW |
2995 		    CORTEXA9_AUXCTL_L2PE |	// Not in FreeBSD
2996 		    CORTEXA9_AUXCTL_SMP |
2997 		    0;
2998 	} else if (CPU_ID_CORTEX_A15_P(lcputype)) {
2999 		actlr_set =
3000 		    CORTEXA15_ACTLR_SMP |
3001 		    CORTEXA15_ACTLR_SDEH |
3002 		    0;
3003 #if 0
3004 	} else if (CPU_ID_CORTEX_A12_P(lcputype) ||
3005 	    CPU_ID_CORTEX_A17_P(lcputype)) {
3006 		actlr_set =
3007 		    CORTEXA17_ACTLR_SMP;
3008 #endif
3009 	} else if (CPU_ID_CORTEX_A53_P(lcputype)) {
3010 	} else if (CPU_ID_CORTEX_A57_P(lcputype)) {
3011 	} else if (CPU_ID_CORTEX_A72_P(lcputype)) {
3012 	}
3013 
3014 	uint32_t actlr = armreg_auxctl_read();
3015 	actlr &= ~actlr_clr;
3016 	actlr |= actlr_set;
3017 
3018 	armreg_auxctl_write(actlr);
3019 
3020 	/* Set the control register - does dsb; isb */
3021 	cpu_control(cpuctrlmask, cpuctrl);
3022 
3023 	/* does tlb and branch predictor flush, and dsb; isb */
3024 	cpu_tlb_flushID();
3025 #else
3026 	/* Set the control register - does dsb; isb */
3027 	cpu_control(cpuctrlmask, cpuctrl);
3028 #endif
3029 
3030 }
3031 #endif /* CPU_ARMV7 */
3032 
3033 
3034 #if defined(CPU_ARM1136) || defined(CPU_ARM1176)
3035 void
3036 arm11x6_setup(char *args)
3037 {
3038 	int cpuctrl, cpuctrl_wax;
3039 	uint32_t auxctrl;
3040 	uint32_t sbz=0;
3041 	uint32_t cpuid;
3042 
3043 	cpuid = cpu_idnum();
3044 
3045 	cpuctrl =
3046 		CPU_CONTROL_MMU_ENABLE  |
3047 		CPU_CONTROL_DC_ENABLE   |
3048 		CPU_CONTROL_WBUF_ENABLE |
3049 		CPU_CONTROL_32BP_ENABLE |
3050 		CPU_CONTROL_32BD_ENABLE |
3051 		CPU_CONTROL_LABT_ENABLE |
3052 		CPU_CONTROL_UNAL_ENABLE |
3053 #ifdef ARM_MMU_EXTENDED
3054 		CPU_CONTROL_XP_ENABLE   |
3055 #else
3056 		CPU_CONTROL_SYST_ENABLE |
3057 #endif
3058 		CPU_CONTROL_IC_ENABLE;
3059 
3060 	/*
3061 	 * "write as existing" bits
3062 	 * inverse of this is mask
3063 	 */
3064 	cpuctrl_wax =
3065 		(3 << 30) |
3066 		(1 << 29) |
3067 		(1 << 28) |
3068 		(3 << 26) |
3069 		(3 << 19) |
3070 		(1 << 17);
3071 
3072 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
3073 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3074 #endif
3075 
3076 	cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl);
3077 
3078 #ifdef __ARMEB__
3079 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
3080 #endif
3081 
3082 #ifndef ARM_HAS_VBAR
3083 	if (vector_page == ARM_VECTORS_HIGH)
3084 		cpuctrl |= CPU_CONTROL_VECRELOC;
3085 #endif
3086 
3087 	auxctrl = armreg_auxctl_read();
3088 	/*
3089 	 * This options enables the workaround for the 364296 ARM1136
3090 	 * r0pX errata (possible cache data corruption with
3091 	 * hit-under-miss enabled). It sets the undocumented bit 31 in
3092 	 * the auxiliary control register and the FI bit in the control
3093 	 * register, thus disabling hit-under-miss without putting the
3094 	 * processor into full low interrupt latency mode. ARM11MPCore
3095 	 * is not affected.
3096 	 */
3097 	if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1136JS) { /* ARM1136JSr0pX */
3098 		cpuctrl |= CPU_CONTROL_FI_ENABLE;
3099 		auxctrl |= ARM1136_AUXCTL_PFI;
3100 	}
3101 
3102 	/*
3103 	 * This enables the workaround for the following ARM1176 r0pX
3104 	 * errata.
3105 	 *
3106 	 * 394601: In low interrupt latency configuration, interrupted clean
3107 	 * and invalidate operation may not clean dirty data.
3108 	 *
3109 	 * 716151: Clean Data Cache line by MVA can corrupt subsequent
3110 	 * stores to the same cache line.
3111 	 *
3112 	 * 714068: Prefetch Instruction Cache Line or Invalidate Instruction
3113 	 * Cache Line by MVA can cause deadlock.
3114 	 */
3115 	if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1176JZS) { /* ARM1176JZSr0 */
3116 		/* 394601 and 716151 */
3117 		cpuctrl |= CPU_CONTROL_FI_ENABLE;
3118 		auxctrl |= ARM1176_AUXCTL_FIO;
3119 
3120 		/* 714068 */
3121 		auxctrl |= ARM1176_AUXCTL_PHD;
3122 	}
3123 
3124 	/* Clear out the cache */
3125 	cpu_idcache_wbinv_all();
3126 
3127 	/* Now really make sure they are clean.  */
3128 	__asm volatile ("mcr\tp15, 0, %0, c7, c7, 0" : : "r"(sbz));
3129 
3130 	/* Allow detection code to find the VFP if it's fitted.  */
3131 	armreg_cpacr_write(0x0fffffff);
3132 
3133 	/* Set the control register */
3134 	cpu_control(~cpuctrl_wax, cpuctrl);
3135 
3136 	/* Update auxctlr */
3137 	armreg_auxctl_write(auxctrl);
3138 
3139 	/* And again. */
3140 	cpu_idcache_wbinv_all();
3141 }
3142 #endif	/* CPU_ARM1136 || CPU_ARM1176 */
3143 
3144 #ifdef CPU_SA110
3145 struct cpu_option sa110_options[] = {
3146 #ifdef COMPAT_12
3147 	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3148 	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
3149 #endif	/* COMPAT_12 */
3150 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3151 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3152 	{ "sa110.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3153 	{ "sa110.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
3154 	{ "sa110.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
3155 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
3156 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
3157 	{ "sa110.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
3158 	{ NULL,			IGN, IGN, 0 }
3159 };
3160 
3161 void
3162 sa110_setup(char *args)
3163 {
3164 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
3165 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
3166 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3167 		 | CPU_CONTROL_WBUF_ENABLE;
3168 #if 0
3169 	int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
3170 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
3171 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3172 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
3173 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
3174 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
3175 		 | CPU_CONTROL_CPCLK;
3176 #endif
3177 
3178 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
3179 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3180 #endif
3181 
3182 	cpuctrl = parse_cpu_options(args, sa110_options, cpuctrl);
3183 
3184 #ifdef __ARMEB__
3185 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
3186 #endif
3187 
3188 #ifndef ARM_HAS_VBAR
3189 	if (vector_page == ARM_VECTORS_HIGH)
3190 		cpuctrl |= CPU_CONTROL_VECRELOC;
3191 #endif
3192 
3193 	/* Clear out the cache */
3194 	cpu_idcache_wbinv_all();
3195 
3196 	/* Set the control register */
3197 #if 0
3198 	cpu_control(cpuctrlmask, cpuctrl);
3199 #endif
3200 	cpu_control(0xffffffff, cpuctrl);
3201 
3202 	/*
3203 	 * enable clockswitching, note that this doesn't read or write to r0,
3204 	 * r0 is just to make it valid asm
3205 	 */
3206 	__asm volatile ("mcr p15, 0, r0, c15, c1, 2");
3207 }
3208 #endif	/* CPU_SA110 */
3209 
3210 #if defined(CPU_SA1100) || defined(CPU_SA1110)
3211 struct cpu_option sa11x0_options[] = {
3212 #ifdef COMPAT_12
3213 	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3214 	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
3215 #endif	/* COMPAT_12 */
3216 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3217 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3218 	{ "sa11x0.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3219 	{ "sa11x0.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
3220 	{ "sa11x0.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
3221 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
3222 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
3223 	{ "sa11x0.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
3224 	{ NULL,			IGN, IGN, 0 }
3225 };
3226 
3227 void
3228 sa11x0_setup(char *args)
3229 {
3230 
3231 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
3232 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
3233 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3234 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE;
3235 #if 0
3236 	int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
3237 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
3238 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3239 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
3240 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
3241 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
3242 		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
3243 #endif
3244 
3245 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
3246 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3247 #endif
3248 
3249 	cpuctrl = parse_cpu_options(args, sa11x0_options, cpuctrl);
3250 
3251 #ifdef __ARMEB__
3252 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
3253 #endif
3254 
3255 #ifndef ARM_HAS_VBAR
3256 	if (vector_page == ARM_VECTORS_HIGH)
3257 		cpuctrl |= CPU_CONTROL_VECRELOC;
3258 #endif
3259 
3260 	/* Clear out the cache */
3261 	cpu_idcache_wbinv_all();
3262 
3263 	/* Set the control register */
3264 	cpu_control(0xffffffff, cpuctrl);
3265 }
3266 #endif	/* CPU_SA1100 || CPU_SA1110 */
3267 
3268 #if defined(CPU_FA526)
3269 struct cpu_option fa526_options[] = {
3270 #ifdef COMPAT_12
3271 	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3272 	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
3273 #endif	/* COMPAT_12 */
3274 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3275 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3276 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
3277 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
3278 	{ NULL,			IGN, IGN, 0 }
3279 };
3280 
3281 void
3282 fa526_setup(char *args)
3283 {
3284 
3285 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
3286 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
3287 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3288 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE;
3289 #if 0
3290 	int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
3291 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
3292 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3293 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
3294 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
3295 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
3296 		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
3297 #endif
3298 
3299 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
3300 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3301 #endif
3302 
3303 	cpuctrl = parse_cpu_options(args, fa526_options, cpuctrl);
3304 
3305 #ifdef __ARMEB__
3306 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
3307 #endif
3308 
3309 #ifndef ARM_HAS_VBAR
3310 	if (vector_page == ARM_VECTORS_HIGH)
3311 		cpuctrl |= CPU_CONTROL_VECRELOC;
3312 #endif
3313 
3314 	/* Clear out the cache */
3315 	cpu_idcache_wbinv_all();
3316 
3317 	/* Set the control register */
3318 	cpu_control(0xffffffff, cpuctrl);
3319 }
3320 #endif	/* CPU_FA526 */
3321 
3322 #if defined(CPU_IXP12X0)
3323 struct cpu_option ixp12x0_options[] = {
3324 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3325 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3326 	{ "ixp12x0.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3327 	{ "ixp12x0.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
3328 	{ "ixp12x0.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
3329 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
3330 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
3331 	{ "ixp12x0.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
3332 	{ NULL,			IGN, IGN, 0 }
3333 };
3334 
3335 void
3336 ixp12x0_setup(char *args)
3337 {
3338 
3339 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE
3340 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_SYST_ENABLE
3341 		 | CPU_CONTROL_IC_ENABLE;
3342 
3343 	int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_AFLT_ENABLE
3344 		 | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_WBUF_ENABLE
3345 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_SYST_ENABLE
3346 		 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_IC_ENABLE
3347 		 | CPU_CONTROL_VECRELOC;
3348 
3349 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
3350 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3351 #endif
3352 
3353 	cpuctrl = parse_cpu_options(args, ixp12x0_options, cpuctrl);
3354 
3355 #ifdef __ARMEB__
3356 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
3357 #endif
3358 
3359 #ifndef ARM_HAS_VBAR
3360 	if (vector_page == ARM_VECTORS_HIGH)
3361 		cpuctrl |= CPU_CONTROL_VECRELOC;
3362 #endif
3363 
3364 	/* Clear out the cache */
3365 	cpu_idcache_wbinv_all();
3366 
3367 	/* Set the control register */
3368 	/* cpu_control(0xffffffff, cpuctrl); */
3369 	cpu_control(cpuctrlmask, cpuctrl);
3370 }
3371 #endif /* CPU_IXP12X0 */
3372 
3373 #if defined(CPU_XSCALE)
3374 struct cpu_option xscale_options[] = {
3375 #ifdef COMPAT_12
3376 	{ "branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
3377 	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3378 #endif	/* COMPAT_12 */
3379 	{ "cpu.branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
3380 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3381 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3382 	{ "xscale.branchpredict", BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
3383 	{ "xscale.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3384 	{ "xscale.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
3385 	{ "xscale.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
3386 	{ NULL,			IGN, IGN, 0 }
3387 };
3388 
3389 void
3390 xscale_setup(char *args)
3391 {
3392 	uint32_t auxctl;
3393 
3394 	/*
3395 	 * The XScale Write Buffer is always enabled.  Our option
3396 	 * is to enable/disable coalescing.  Note that bits 6:3
3397 	 * must always be enabled.
3398 	 */
3399 
3400 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
3401 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
3402 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3403 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
3404 		 | CPU_CONTROL_BPRD_ENABLE;
3405 #if 0
3406 	int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
3407 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
3408 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3409 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
3410 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
3411 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
3412 		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
3413 #endif
3414 
3415 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
3416 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3417 #endif
3418 
3419 	cpuctrl = parse_cpu_options(args, xscale_options, cpuctrl);
3420 
3421 #ifdef __ARMEB__
3422 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
3423 #endif
3424 
3425 #ifndef ARM_HAS_VBAR
3426 	if (vector_page == ARM_VECTORS_HIGH)
3427 		cpuctrl |= CPU_CONTROL_VECRELOC;
3428 #endif
3429 
3430 	/* Clear out the cache */
3431 	cpu_idcache_wbinv_all();
3432 
3433 	/*
3434 	 * Set the control register.  Note that bits 6:3 must always
3435 	 * be set to 1.
3436 	 */
3437 #if 0
3438 	cpu_control(cpuctrlmask, cpuctrl);
3439 #endif
3440 	cpu_control(0xffffffff, cpuctrl);
3441 
3442 	/* Make sure write coalescing is turned on */
3443 	auxctl = armreg_auxctl_read();
3444 #ifdef XSCALE_NO_COALESCE_WRITES
3445 	auxctl |= XSCALE_AUXCTL_K;
3446 #else
3447 	auxctl &= ~XSCALE_AUXCTL_K;
3448 #endif
3449 	armreg_auxctl_write(auxctl);
3450 }
3451 #endif	/* CPU_XSCALE */
3452 
3453 #if defined(CPU_SHEEVA)
3454 struct cpu_option sheeva_options[] = {
3455 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3456 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3457 	{ "sheeva.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3458 	{ "sheeva.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
3459 	{ "sheeva.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
3460 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
3461 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
3462 	{ "sheeva.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
3463 	{ NULL,			IGN, IGN, 0 }
3464 };
3465 
3466 void
3467 sheeva_setup(char *args)
3468 {
3469 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
3470 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3471 	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
3472 #if 0
3473 	int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
3474 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3475 	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
3476 	    | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
3477 	    | CPU_CONTROL_BPRD_ENABLE
3478 	    | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
3479 #endif
3480 
3481 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
3482 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3483 #endif
3484 
3485 	cpuctrl = parse_cpu_options(args, sheeva_options, cpuctrl);
3486 
3487 	/* Enable DCache Streaming Switch and Write Allocate */
3488 	uint32_t sheeva_ext = armreg_sheeva_xctrl_read();
3489 
3490 	sheeva_ext |= FC_DCACHE_STREAM_EN | FC_WR_ALLOC_EN;
3491 #ifdef SHEEVA_L2_CACHE
3492 	sheeva_ext |= FC_L2CACHE_EN;
3493 	sheeva_ext &= ~FC_L2_PREF_DIS;
3494 #endif
3495 
3496 	armreg_sheeva_xctrl_write(sheeva_ext);
3497 
3498 #ifdef SHEEVA_L2_CACHE
3499 #ifndef SHEEVA_L2_CACHE_WT
3500 	arm_scache.cache_type = CPU_CT_CTYPE_WB2;
3501 #elif CPU_CT_CTYPE_WT != 0
3502 	arm_scache.cache_type = CPU_CT_CTYPE_WT;
3503 #endif
3504 	arm_scache.cache_unified = 1;
3505 	arm_scache.dcache_type = arm_scache.icache_type = CACHE_TYPE_PIPT;
3506 	arm_scache.dcache_size = arm_scache.icache_size = 256*1024;
3507 	arm_scache.dcache_ways = arm_scache.icache_ways = 4;
3508 	arm_scache.dcache_way_size = arm_scache.icache_way_size =
3509 	    arm_scache.dcache_size / arm_scache.dcache_ways;
3510 	arm_scache.dcache_line_size = arm_scache.icache_line_size = 32;
3511 	arm_scache.dcache_sets = arm_scache.icache_sets =
3512 	    arm_scache.dcache_way_size / arm_scache.dcache_line_size;
3513 
3514 	cpufuncs.cf_sdcache_wb_range = sheeva_sdcache_wb_range;
3515 	cpufuncs.cf_sdcache_inv_range = sheeva_sdcache_inv_range;
3516 	cpufuncs.cf_sdcache_wbinv_range = sheeva_sdcache_wbinv_range;
3517 #endif /* SHEEVA_L2_CACHE */
3518 
3519 #ifdef __ARMEB__
3520 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
3521 #endif
3522 
3523 #ifndef ARM_HAS_VBAR
3524 	if (vector_page == ARM_VECTORS_HIGH)
3525 		cpuctrl |= CPU_CONTROL_VECRELOC;
3526 #endif
3527 
3528 	/* Clear out the cache */
3529 	cpu_idcache_wbinv_all();
3530 
3531 	/* Now really make sure they are clean.  */
3532 	__asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
3533 
3534 	/* Set the control register */
3535 	cpu_control(0xffffffff, cpuctrl);
3536 
3537 	/* And again. */
3538 	cpu_idcache_wbinv_all();
3539 #ifdef SHEEVA_L2_CACHE
3540 	sheeva_sdcache_wbinv_all();
3541 #endif
3542 }
3543 #endif	/* CPU_SHEEVA */
3544 
3545 bool
3546 cpu_gtmr_exists_p(void)
3547 {
3548 	return armreg_pfr1_read() & ARM_PFR1_GTIMER_MASK;
3549 }
3550 
3551 u_int
3552 cpu_clusterid(void)
3553 {
3554 	return __SHIFTOUT(armreg_mpidr_read(), MPIDR_AFF1);
3555 }
3556 
3557 bool
3558 cpu_earlydevice_va_p(void)
3559 {
3560 	const bool mmu_enabled_p =
3561 	    armreg_sctlr_read() & CPU_CONTROL_MMU_ENABLE;
3562 
3563 	if (!mmu_enabled_p)
3564 		return false;
3565 
3566 	/* Don't access cpu_ttb unless the mmu is enabled */
3567 	const bool cpul1pt_p =
3568 	    ((armreg_ttbr_read() & -L1_TABLE_SIZE) == cpu_ttb) ||
3569 	    ((armreg_ttbr1_read() & -L1_TABLE_SIZE) == cpu_ttb);
3570 
3571 	return cpul1pt_p;
3572 }
3573