xref: /netbsd-src/sys/arch/arm/arm/cpufunc.c (revision 4391d5e9d4f291db41e3b3ba26a01b5e51364aae)
1 /*	$NetBSD: cpufunc.c,v 1.130 2013/11/12 17:31:55 matt Exp $	*/
2 
3 /*
4  * arm7tdmi support code Copyright (c) 2001 John Fremlin
5  * arm8 support code Copyright (c) 1997 ARM Limited
6  * arm8 support code Copyright (c) 1997 Causality Limited
7  * arm9 support code Copyright (C) 2001 ARM Ltd
8  * arm11 support code Copyright (c) 2007 Microsoft
9  * cortexa8 support code Copyright (c) 2008 3am Software Foundry
10  * cortexa8 improvements Copyright (c) Goeran Weinholt
11  * Copyright (c) 1997 Mark Brinicombe.
12  * Copyright (c) 1997 Causality Limited
13  * All rights reserved.
14  *
15  * Redistribution and use in source and binary forms, with or without
16  * modification, are permitted provided that the following conditions
17  * are met:
18  * 1. Redistributions of source code must retain the above copyright
19  *    notice, this list of conditions and the following disclaimer.
20  * 2. Redistributions in binary form must reproduce the above copyright
21  *    notice, this list of conditions and the following disclaimer in the
22  *    documentation and/or other materials provided with the distribution.
23  * 3. All advertising materials mentioning features or use of this software
24  *    must display the following acknowledgement:
25  *	This product includes software developed by Causality Limited.
26  * 4. The name of Causality Limited may not be used to endorse or promote
27  *    products derived from this software without specific prior written
28  *    permission.
29  *
30  * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
31  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
32  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
33  * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
34  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
35  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
36  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
37  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
38  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
39  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40  * SUCH DAMAGE.
41  *
42  * RiscBSD kernel project
43  *
44  * cpufuncs.c
45  *
46  * C functions for supporting CPU / MMU / TLB specific operations.
47  *
48  * Created	: 30/01/97
49  */
50 
51 #include <sys/cdefs.h>
52 __KERNEL_RCSID(0, "$NetBSD: cpufunc.c,v 1.130 2013/11/12 17:31:55 matt Exp $");
53 
54 #include "opt_compat_netbsd.h"
55 #include "opt_cpuoptions.h"
56 #include "opt_perfctrs.h"
57 
58 #include <sys/types.h>
59 #include <sys/param.h>
60 #include <sys/pmc.h>
61 #include <sys/systm.h>
62 #include <machine/cpu.h>
63 #include <machine/bootconfig.h>
64 #include <arch/arm/arm/disassem.h>
65 
66 #include <uvm/uvm.h>
67 
68 #include <arm/cpuconf.h>
69 #include <arm/cpufunc.h>
70 #include <arm/locore.h>
71 
72 #ifdef CPU_XSCALE_80200
73 #include <arm/xscale/i80200reg.h>
74 #include <arm/xscale/i80200var.h>
75 #endif
76 
77 #ifdef CPU_XSCALE_80321
78 #include <arm/xscale/i80321reg.h>
79 #include <arm/xscale/i80321var.h>
80 #endif
81 
82 #ifdef CPU_XSCALE_IXP425
83 #include <arm/xscale/ixp425reg.h>
84 #include <arm/xscale/ixp425var.h>
85 #endif
86 
87 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321)
88 #include <arm/xscale/xscalereg.h>
89 #endif
90 
91 #if defined(PERFCTRS)
92 struct arm_pmc_funcs *arm_pmc;
93 #endif
94 
95 #if defined(CPU_ARMV7) && (defined(CPU_ARMV6) || defined(CPU_PRE_ARMV6))
96 bool cpu_armv7_p;
97 #endif
98 
99 /* PRIMARY CACHE VARIABLES */
100 #if (ARM_MMU_V6 + ARM_MMU_V7) != 0
101 u_int	arm_cache_prefer_mask;
102 #endif
103 struct	arm_cache_info arm_pcache;
104 struct	arm_cache_info arm_scache;
105 
106 u_int	arm_dcache_align;
107 u_int	arm_dcache_align_mask;
108 
109 /* 1 == use cpu_sleep(), 0 == don't */
110 int cpu_do_powersave;
111 
112 #ifdef CPU_ARM2
113 struct cpu_functions arm2_cpufuncs = {
114 	/* CPU functions */
115 
116 	.cf_id			= arm2_id,
117 	.cf_cpwait		= cpufunc_nullop,
118 
119 	/* MMU functions */
120 
121 	.cf_control		= (void *)cpufunc_nullop,
122 
123 	/* TLB functions */
124 
125 	.cf_tlb_flushID		= cpufunc_nullop,
126 	.cf_tlb_flushID_SE	= (void *)cpufunc_nullop,
127 	.cf_tlb_flushI		= cpufunc_nullop,
128 	.cf_tlb_flushI_SE	= (void *)cpufunc_nullop,
129 	.cf_tlb_flushD		= cpufunc_nullop,
130 	.cf_tlb_flushD_SE	= (void *)cpufunc_nullop,
131 
132 	/* Cache operations */
133 
134 	.cf_icache_sync_all	= cpufunc_nullop,
135 	.cf_icache_sync_range	= (void *) cpufunc_nullop,
136 
137 	.cf_dcache_wbinv_all	= arm3_cache_flush,
138 	.cf_dcache_wbinv_range	= (void *)cpufunc_nullop,
139 	.cf_dcache_inv_range	= (void *)cpufunc_nullop,
140 	.cf_dcache_wb_range	= (void *)cpufunc_nullop,
141 
142 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
143 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
144 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
145 
146 	.cf_idcache_wbinv_all	= cpufunc_nullop,
147 	.cf_idcache_wbinv_range	= (void *)cpufunc_nullop,
148 
149 	/* Other functions */
150 
151 	.cf_flush_prefetchbuf	= cpufunc_nullop,
152 	.cf_drain_writebuf	= cpufunc_nullop,
153 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
154 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
155 
156 	.cf_sleep		= (void *)cpufunc_nullop,
157 
158 	/* Soft functions */
159 
160 	.cf_dataabt_fixup	= early_abort_fixup,
161 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
162 
163 	.cf_setup		= (void *)cpufunc_nullop
164 
165 };
166 #endif	/* CPU_ARM2 */
167 
168 #ifdef CPU_ARM250
169 struct cpu_functions arm250_cpufuncs = {
170 	/* CPU functions */
171 
172 	.cf_id			= arm250_id,
173 	.cf_cpwait		= cpufunc_nullop,
174 
175 	/* MMU functions */
176 
177 	.cf_control		= (void *)cpufunc_nullop,
178 
179 	/* TLB functions */
180 
181 	.cf_tlb_flushID		= cpufunc_nullop,
182 	.cf_tlb_flushID_SE	= (void *)cpufunc_nullop,
183 	.cf_tlb_flushI		= cpufunc_nullop,
184 	.cf_tlb_flushI_SE	= (void *)cpufunc_nullop,
185 	.cf_tlb_flushD		= cpufunc_nullop,
186 	.cf_tlb_flushD_SE	= (void *)cpufunc_nullop,
187 
188 	/* Cache operations */
189 
190 	.cf_icache_sync_all	= cpufunc_nullop,
191 	.cf_icache_sync_range	= (void *) cpufunc_nullop,
192 
193 	.cf_dcache_wbinv_all	= arm3_cache_flush,
194 	.cf_dcache_wbinv_range	= (void *)cpufunc_nullop,
195 	.cf_dcache_inv_range	= (void *)cpufunc_nullop,
196 	.cf_dcache_wb_range	= (void *)cpufunc_nullop,
197 
198 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
199 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
200 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
201 
202 	.cf_idcache_wbinv_all	= cpufunc_nullop,
203 	.cf_idcache_wbinv_range	= (void *)cpufunc_nullop,
204 
205 	/* Other functions */
206 
207 	.cf_flush_prefetchbuf	= cpufunc_nullop,
208 	.cf_drain_writebuf	= cpufunc_nullop,
209 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
210 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
211 
212 	.cf_sleep		= (void *)cpufunc_nullop,
213 
214 	/* Soft functions */
215 
216 	.cf_dataabt_fixup	= early_abort_fixup,
217 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
218 
219 	.cf_setup		= (void *)cpufunc_nullop
220 
221 };
222 #endif	/* CPU_ARM250 */
223 
224 #ifdef CPU_ARM3
225 struct cpu_functions arm3_cpufuncs = {
226 	/* CPU functions */
227 
228 	.cf_id			= cpufunc_id,
229 	.cf_cpwait		= cpufunc_nullop,
230 
231 	/* MMU functions */
232 
233 	.cf_control		= arm3_control,
234 
235 	/* TLB functions */
236 
237 	.cf_tlb_flushID		= cpufunc_nullop,
238 	.cf_tlb_flushID_SE	= (void *)cpufunc_nullop,
239 	.cf_tlb_flushI		= cpufunc_nullop,
240 	.cf_tlb_flushI_SE	= (void *)cpufunc_nullop,
241 	.cf_tlb_flushD		= cpufunc_nullop,
242 	.cf_tlb_flushD_SE	= (void *)cpufunc_nullop,
243 
244 	/* Cache operations */
245 
246 	.cf_icache_sync_all	= cpufunc_nullop,
247 	.cf_icache_sync_range	= (void *) cpufunc_nullop,
248 
249 	.cf_dcache_wbinv_all	= arm3_cache_flush,
250 	.cf_dcache_wbinv_range	= (void *)arm3_cache_flush,
251 	.cf_dcache_inv_range	= (void *)arm3_cache_flush,
252 	.cf_dcache_wb_range	= (void *)cpufunc_nullop,
253 
254 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
255 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
256 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
257 
258 	.cf_idcache_wbinv_all	= arm3_cache_flush,
259 	.cf_idcache_wbinv_range	= (void *)arm3_cache_flush,
260 
261 	/* Other functions */
262 
263 	.cf_flush_prefetchbuf	= cpufunc_nullop,
264 	.cf_drain_writebuf	= cpufunc_nullop,
265 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
266 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
267 
268 	.cf_sleep		= (void *)cpufunc_nullop,
269 
270 	/* Soft functions */
271 
272 	.cf_dataabt_fixup	= early_abort_fixup,
273 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
274 
275 	.cf_setup		= (void *)cpufunc_nullop
276 
277 };
278 #endif	/* CPU_ARM3 */
279 
280 #ifdef CPU_ARM6
281 struct cpu_functions arm6_cpufuncs = {
282 	/* CPU functions */
283 
284 	.cf_id			= cpufunc_id,
285 	.cf_cpwait		= cpufunc_nullop,
286 
287 	/* MMU functions */
288 
289 	.cf_control		= cpufunc_control,
290 	.cf_domains		= cpufunc_domains,
291 	.cf_setttb		= arm67_setttb,
292 	.cf_faultstatus		= cpufunc_faultstatus,
293 	.cf_faultaddress	= cpufunc_faultaddress,
294 
295 	/* TLB functions */
296 
297 	.cf_tlb_flushID		= arm67_tlb_flush,
298 	.cf_tlb_flushID_SE	= arm67_tlb_purge,
299 	.cf_tlb_flushI		= arm67_tlb_flush,
300 	.cf_tlb_flushI_SE	= arm67_tlb_purge,
301 	.cf_tlb_flushD		= arm67_tlb_flush,
302 	.cf_tlb_flushD_SE	= arm67_tlb_purge,
303 
304 	/* Cache operations */
305 
306 	.cf_icache_sync_all	= cpufunc_nullop,
307 	.cf_icache_sync_range	= (void *) cpufunc_nullop,
308 
309 	.cf_dcache_wbinv_all	= arm67_cache_flush,
310 	.cf_dcache_wbinv_range	= (void *)arm67_cache_flush,
311 	.cf_dcache_inv_range	= (void *)arm67_cache_flush,
312 	.cf_dcache_wb_range	= (void *)cpufunc_nullop,
313 
314 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
315 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
316 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
317 
318 	.cf_idcache_wbinv_all	= arm67_cache_flush,
319 	.cf_idcache_wbinv_range	= (void *)arm67_cache_flush,
320 
321 	/* Other functions */
322 
323 	.cf_flush_prefetchbuf	= cpufunc_nullop,
324 	.cf_drain_writebuf	= cpufunc_nullop,
325 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
326 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
327 
328 	.cf_sleep		= (void *)cpufunc_nullop,
329 
330 	/* Soft functions */
331 
332 #ifdef ARM6_LATE_ABORT
333 	.cf_dataabt_fixup	= late_abort_fixup,
334 #else
335 	.cf_dataabt_fixup	= early_abort_fixup,
336 #endif
337 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
338 
339 	.cf_context_switch	= arm67_context_switch,
340 
341 	.cf_setup		= arm6_setup
342 
343 };
344 #endif	/* CPU_ARM6 */
345 
346 #ifdef CPU_ARM7
347 struct cpu_functions arm7_cpufuncs = {
348 	/* CPU functions */
349 
350 	.cf_id			= cpufunc_id,
351 	.cf_cpwait		= cpufunc_nullop,
352 
353 	/* MMU functions */
354 
355 	.cf_control		= cpufunc_control,
356 	.cf_domains		= cpufunc_domains,
357 	.cf_setttb		= arm67_setttb,
358 	.cf_faultstatus		= cpufunc_faultstatus,
359 	.cf_faultaddress	= cpufunc_faultaddress,
360 
361 	/* TLB functions */
362 
363 	.cf_tlb_flushID		= arm67_tlb_flush,
364 	.cf_tlb_flushID_SE	= arm67_tlb_purge,
365 	.cf_tlb_flushI		= arm67_tlb_flush,
366 	.cf_tlb_flushI_SE	= arm67_tlb_purge,
367 	.cf_tlb_flushD		= arm67_tlb_flush,
368 	.cf_tlb_flushD_SE	= arm67_tlb_purge,
369 
370 	/* Cache operations */
371 
372 	.cf_icache_sync_all	= cpufunc_nullop,
373 	.cf_icache_sync_range	= (void *)cpufunc_nullop,
374 
375 	.cf_dcache_wbinv_all	= arm67_cache_flush,
376 	.cf_dcache_wbinv_range	= (void *)arm67_cache_flush,
377 	.cf_dcache_inv_range	= (void *)arm67_cache_flush,
378 	.cf_dcache_wb_range	= (void *)cpufunc_nullop,
379 
380 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
381 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
382 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
383 
384 	.cf_idcache_wbinv_all	= arm67_cache_flush,
385 	.cf_idcache_wbinv_range	= (void *)arm67_cache_flush,
386 
387 	/* Other functions */
388 
389 	.cf_flush_prefetchbuf	= cpufunc_nullop,
390 	.cf_drain_writebuf	= cpufunc_nullop,
391 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
392 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
393 
394 	.cf_sleep		= (void *)cpufunc_nullop,
395 
396 	/* Soft functions */
397 
398 	.cf_dataabt_fixup	= late_abort_fixup,
399 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
400 
401 	.cf_context_switch	= arm67_context_switch,
402 
403 	.cf_setup		= arm7_setup
404 
405 };
406 #endif	/* CPU_ARM7 */
407 
408 #ifdef CPU_ARM7TDMI
409 struct cpu_functions arm7tdmi_cpufuncs = {
410 	/* CPU functions */
411 
412 	.cf_id			= cpufunc_id,
413 	.cf_cpwait		= cpufunc_nullop,
414 
415 	/* MMU functions */
416 
417 	.cf_control		= cpufunc_control,
418 	.cf_domains		= cpufunc_domains,
419 	.cf_setttb		= arm7tdmi_setttb,
420 	.cf_faultstatus		= cpufunc_faultstatus,
421 	.cf_faultaddress	= cpufunc_faultaddress,
422 
423 	/* TLB functions */
424 
425 	.cf_tlb_flushID		= arm7tdmi_tlb_flushID,
426 	.cf_tlb_flushID_SE	= arm7tdmi_tlb_flushID_SE,
427 	.cf_tlb_flushI		= arm7tdmi_tlb_flushID,
428 	.cf_tlb_flushI_SE	= arm7tdmi_tlb_flushID_SE,
429 	.cf_tlb_flushD		= arm7tdmi_tlb_flushID,
430 	.cf_tlb_flushD_SE	= arm7tdmi_tlb_flushID_SE,
431 
432 	/* Cache operations */
433 
434 	.cf_icache_sync_all	= cpufunc_nullop,
435 	.cf_icache_sync_range	= (void *)cpufunc_nullop,
436 
437 	.cf_dcache_wbinv_all	= arm7tdmi_cache_flushID,
438 	.cf_dcache_wbinv_range	= (void *)arm7tdmi_cache_flushID,
439 	.cf_dcache_inv_range	= (void *)arm7tdmi_cache_flushID,
440 	.cf_dcache_wb_range	= (void *)cpufunc_nullop,
441 
442 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
443 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
444 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
445 
446 	.cf_idcache_wbinv_all	= arm7tdmi_cache_flushID,
447 	.cf_idcache_wbinv_range	= (void *)arm7tdmi_cache_flushID,
448 
449 	/* Other functions */
450 
451 	.cf_flush_prefetchbuf	= cpufunc_nullop,
452 	.cf_drain_writebuf	= cpufunc_nullop,
453 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
454 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
455 
456 	.cf_sleep		= (void *)cpufunc_nullop,
457 
458 	/* Soft functions */
459 
460 	.cf_dataabt_fixup	= late_abort_fixup,
461 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
462 
463 	.cf_context_switch	= arm7tdmi_context_switch,
464 
465 	.cf_setup		= arm7tdmi_setup
466 
467 };
468 #endif	/* CPU_ARM7TDMI */
469 
470 #ifdef CPU_ARM8
471 struct cpu_functions arm8_cpufuncs = {
472 	/* CPU functions */
473 
474 	.cf_id			= cpufunc_id,
475 	.cf_cpwait		= cpufunc_nullop,
476 
477 	/* MMU functions */
478 
479 	.cf_control		= cpufunc_control,
480 	.cf_domains		= cpufunc_domains,
481 	.cf_setttb		= arm8_setttb,
482 	.cf_faultstatus		= cpufunc_faultstatus,
483 	.cf_faultaddress	= cpufunc_faultaddress,
484 
485 	/* TLB functions */
486 
487 	.cf_tlb_flushID		= arm8_tlb_flushID,
488 	.cf_tlb_flushID_SE	= arm8_tlb_flushID_SE,
489 	.cf_tlb_flushI		= arm8_tlb_flushID,
490 	.cf_tlb_flushI_SE	= arm8_tlb_flushID_SE,
491 	.cf_tlb_flushD		= arm8_tlb_flushID,
492 	.cf_tlb_flushD_SE	= arm8_tlb_flushID_SE,
493 
494 	/* Cache operations */
495 
496 	.cf_icache_sync_all	= cpufunc_nullop,
497 	.cf_icache_sync_range	= (void *)cpufunc_nullop,
498 
499 	.cf_dcache_wbinv_all	= arm8_cache_purgeID,
500 	.cf_dcache_wbinv_range	= (void *)arm8_cache_purgeID,
501 /*XXX*/	.cf_dcache_inv_range	= (void *)arm8_cache_purgeID,
502 	.cf_dcache_wb_range	= (void *)arm8_cache_cleanID,
503 
504 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
505 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
506 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
507 
508 	.cf_idcache_wbinv_all	= arm8_cache_purgeID,
509 	.cf_idcache_wbinv_range = (void *)arm8_cache_purgeID,
510 
511 	/* Other functions */
512 
513 	.cf_flush_prefetchbuf	= cpufunc_nullop,
514 	.cf_drain_writebuf	= cpufunc_nullop,
515 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
516 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
517 
518 	.cf_sleep		= (void *)cpufunc_nullop,
519 
520 	/* Soft functions */
521 
522 	.cf_dataabt_fixup	= cpufunc_null_fixup,
523 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
524 
525 	.cf_context_switch	= arm8_context_switch,
526 
527 	.cf_setup		= arm8_setup
528 };
529 #endif	/* CPU_ARM8 */
530 
531 #ifdef CPU_ARM9
532 struct cpu_functions arm9_cpufuncs = {
533 	/* CPU functions */
534 
535 	.cf_id			= cpufunc_id,
536 	.cf_cpwait		= cpufunc_nullop,
537 
538 	/* MMU functions */
539 
540 	.cf_control		= cpufunc_control,
541 	.cf_domains		= cpufunc_domains,
542 	.cf_setttb		= arm9_setttb,
543 	.cf_faultstatus		= cpufunc_faultstatus,
544 	.cf_faultaddress	= cpufunc_faultaddress,
545 
546 	/* TLB functions */
547 
548 	.cf_tlb_flushID		= armv4_tlb_flushID,
549 	.cf_tlb_flushID_SE	= arm9_tlb_flushID_SE,
550 	.cf_tlb_flushI		= armv4_tlb_flushI,
551 	.cf_tlb_flushI_SE	= (void *)armv4_tlb_flushI,
552 	.cf_tlb_flushD		= armv4_tlb_flushD,
553 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
554 
555 	/* Cache operations */
556 
557 	.cf_icache_sync_all	= arm9_icache_sync_all,
558 	.cf_icache_sync_range	= arm9_icache_sync_range,
559 
560 	.cf_dcache_wbinv_all	= arm9_dcache_wbinv_all,
561 	.cf_dcache_wbinv_range	= arm9_dcache_wbinv_range,
562 /*XXX*/	.cf_dcache_inv_range	= arm9_dcache_wbinv_range,
563 	.cf_dcache_wb_range	= arm9_dcache_wb_range,
564 
565 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
566 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
567 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
568 
569 	.cf_idcache_wbinv_all	= arm9_idcache_wbinv_all,
570 	.cf_idcache_wbinv_range = arm9_idcache_wbinv_range,
571 
572 	/* Other functions */
573 
574 	.cf_flush_prefetchbuf	= cpufunc_nullop,
575 	.cf_drain_writebuf	= armv4_drain_writebuf,
576 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
577 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
578 
579 	.cf_sleep		= (void *)cpufunc_nullop,
580 
581 	/* Soft functions */
582 
583 	.cf_dataabt_fixup	= cpufunc_null_fixup,
584 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
585 
586 	.cf_context_switch	= arm9_context_switch,
587 
588 	.cf_setup		= arm9_setup
589 
590 };
591 #endif /* CPU_ARM9 */
592 
593 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
594 struct cpu_functions armv5_ec_cpufuncs = {
595 	/* CPU functions */
596 
597 	.cf_id			= cpufunc_id,
598 	.cf_cpwait		= cpufunc_nullop,
599 
600 	/* MMU functions */
601 
602 	.cf_control		= cpufunc_control,
603 	.cf_domains		= cpufunc_domains,
604 	.cf_setttb		= armv5_ec_setttb,
605 	.cf_faultstatus		= cpufunc_faultstatus,
606 	.cf_faultaddress	= cpufunc_faultaddress,
607 
608 	/* TLB functions */
609 
610 	.cf_tlb_flushID		= armv4_tlb_flushID,
611 	.cf_tlb_flushID_SE	= arm10_tlb_flushID_SE,
612 	.cf_tlb_flushI		= armv4_tlb_flushI,
613 	.cf_tlb_flushI_SE	= arm10_tlb_flushI_SE,
614 	.cf_tlb_flushD		= armv4_tlb_flushD,
615 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
616 
617 	/* Cache operations */
618 
619 	.cf_icache_sync_all	= armv5_ec_icache_sync_all,
620 	.cf_icache_sync_range	= armv5_ec_icache_sync_range,
621 
622 	.cf_dcache_wbinv_all	= armv5_ec_dcache_wbinv_all,
623 	.cf_dcache_wbinv_range	= armv5_ec_dcache_wbinv_range,
624 /*XXX*/	.cf_dcache_inv_range	= armv5_ec_dcache_wbinv_range,
625 	.cf_dcache_wb_range	= armv5_ec_dcache_wb_range,
626 
627 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
628 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
629 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
630 
631 	.cf_idcache_wbinv_all	= armv5_ec_idcache_wbinv_all,
632 	.cf_idcache_wbinv_range = armv5_ec_idcache_wbinv_range,
633 
634 	/* Other functions */
635 
636 	.cf_flush_prefetchbuf	= cpufunc_nullop,
637 	.cf_drain_writebuf	= armv4_drain_writebuf,
638 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
639 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
640 
641 	.cf_sleep		= (void *)cpufunc_nullop,
642 
643 	/* Soft functions */
644 
645 	.cf_dataabt_fixup	= cpufunc_null_fixup,
646 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
647 
648 	.cf_context_switch	= arm10_context_switch,
649 
650 	.cf_setup		= arm10_setup
651 
652 };
653 #endif /* CPU_ARM9E || CPU_ARM10 */
654 
655 #ifdef CPU_ARM10
656 struct cpu_functions arm10_cpufuncs = {
657 	/* CPU functions */
658 
659 	.cf_id			= cpufunc_id,
660 	.cf_cpwait		= cpufunc_nullop,
661 
662 	/* MMU functions */
663 
664 	.cf_control		= cpufunc_control,
665 	.cf_domains		= cpufunc_domains,
666 	.cf_setttb		= armv5_setttb,
667 	.cf_faultstatus		= cpufunc_faultstatus,
668 	.cf_faultaddress	= cpufunc_faultaddress,
669 
670 	/* TLB functions */
671 
672 	.cf_tlb_flushID		= armv4_tlb_flushID,
673 	.cf_tlb_flushID_SE	= arm10_tlb_flushID_SE,
674 	.cf_tlb_flushI		= armv4_tlb_flushI,
675 	.cf_tlb_flushI_SE	= arm10_tlb_flushI_SE,
676 	.cf_tlb_flushD		= armv4_tlb_flushD,
677 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
678 
679 	/* Cache operations */
680 
681 	.cf_icache_sync_all	= armv5_icache_sync_all,
682 	.cf_icache_sync_range	= armv5_icache_sync_range,
683 
684 	.cf_dcache_wbinv_all	= armv5_dcache_wbinv_all,
685 	.cf_dcache_wbinv_range	= armv5_dcache_wbinv_range,
686 /*XXX*/	.cf_dcache_inv_range	= armv5_dcache_wbinv_range,
687 	.cf_dcache_wb_range	= armv5_dcache_wb_range,
688 
689 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
690 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
691 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
692 
693 	.cf_idcache_wbinv_all	= armv5_idcache_wbinv_all,
694 	.cf_idcache_wbinv_range = armv5_idcache_wbinv_range,
695 
696 	/* Other functions */
697 
698 	.cf_flush_prefetchbuf	= cpufunc_nullop,
699 	.cf_drain_writebuf	= armv4_drain_writebuf,
700 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
701 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
702 
703 	.cf_sleep		= (void *)cpufunc_nullop,
704 
705 	/* Soft functions */
706 
707 	.cf_dataabt_fixup	= cpufunc_null_fixup,
708 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
709 
710 	.cf_context_switch	= arm10_context_switch,
711 
712 	.cf_setup		= arm10_setup
713 
714 };
715 #endif /* CPU_ARM10 */
716 
717 #ifdef CPU_ARM11
718 struct cpu_functions arm11_cpufuncs = {
719 	/* CPU functions */
720 
721 	.cf_id			= cpufunc_id,
722 	.cf_cpwait		= cpufunc_nullop,
723 
724 	/* MMU functions */
725 
726 	.cf_control		= cpufunc_control,
727 	.cf_domains		= cpufunc_domains,
728 	.cf_setttb		= arm11_setttb,
729 	.cf_faultstatus		= cpufunc_faultstatus,
730 	.cf_faultaddress	= cpufunc_faultaddress,
731 
732 	/* TLB functions */
733 
734 	.cf_tlb_flushID		= arm11_tlb_flushID,
735 	.cf_tlb_flushID_SE	= arm11_tlb_flushID_SE,
736 	.cf_tlb_flushI		= arm11_tlb_flushI,
737 	.cf_tlb_flushI_SE	= arm11_tlb_flushI_SE,
738 	.cf_tlb_flushD		= arm11_tlb_flushD,
739 	.cf_tlb_flushD_SE	= arm11_tlb_flushD_SE,
740 
741 	/* Cache operations */
742 
743 	.cf_icache_sync_all	= armv6_icache_sync_all,
744 	.cf_icache_sync_range	= armv6_icache_sync_range,
745 
746 	.cf_dcache_wbinv_all	= armv6_dcache_wbinv_all,
747 	.cf_dcache_wbinv_range	= armv6_dcache_wbinv_range,
748 	.cf_dcache_inv_range	= armv6_dcache_inv_range,
749 	.cf_dcache_wb_range	= armv6_dcache_wb_range,
750 
751 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
752 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
753 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
754 
755 	.cf_idcache_wbinv_all	= armv6_idcache_wbinv_all,
756 	.cf_idcache_wbinv_range = armv6_idcache_wbinv_range,
757 
758 	/* Other functions */
759 
760 	.cf_flush_prefetchbuf	= cpufunc_nullop,
761 	.cf_drain_writebuf	= arm11_drain_writebuf,
762 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
763 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
764 
765 	.cf_sleep		= arm11_sleep,
766 
767 	/* Soft functions */
768 
769 	.cf_dataabt_fixup	= cpufunc_null_fixup,
770 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
771 
772 	.cf_context_switch	= arm11_context_switch,
773 
774 	.cf_setup		= arm11_setup
775 
776 };
777 #endif /* CPU_ARM11 */
778 
779 #ifdef CPU_ARM1136
780 struct cpu_functions arm1136_cpufuncs = {
781 	/* CPU functions */
782 
783 	.cf_id			= cpufunc_id,
784 	.cf_cpwait		= cpufunc_nullop,
785 
786 	/* MMU functions */
787 
788 	.cf_control		= cpufunc_control,
789 	.cf_domains		= cpufunc_domains,
790 	.cf_setttb		= arm11x6_setttb,
791 	.cf_faultstatus		= cpufunc_faultstatus,
792 	.cf_faultaddress	= cpufunc_faultaddress,
793 
794 	/* TLB functions */
795 
796 	.cf_tlb_flushID		= arm11_tlb_flushID,
797 	.cf_tlb_flushID_SE	= arm11_tlb_flushID_SE,
798 	.cf_tlb_flushI		= arm11_tlb_flushI,
799 	.cf_tlb_flushI_SE	= arm11_tlb_flushI_SE,
800 	.cf_tlb_flushD		= arm11_tlb_flushD,
801 	.cf_tlb_flushD_SE	= arm11_tlb_flushD_SE,
802 
803 	/* Cache operations */
804 
805 	.cf_icache_sync_all	= arm11x6_icache_sync_all,	/* 411920 */
806 	.cf_icache_sync_range	= arm11x6_icache_sync_range,	/* 371025 */
807 
808 	.cf_dcache_wbinv_all	= arm11x6_dcache_wbinv_all,	/* 411920 */
809 	.cf_dcache_wbinv_range	= armv6_dcache_wbinv_range,
810 	.cf_dcache_inv_range	= armv6_dcache_inv_range,
811 	.cf_dcache_wb_range	= armv6_dcache_wb_range,
812 
813 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
814 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
815 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
816 
817 	.cf_idcache_wbinv_all	= arm11x6_idcache_wbinv_all,	/* 411920 */
818 	.cf_idcache_wbinv_range = arm11x6_idcache_wbinv_range,	/* 371025 */
819 
820 	/* Other functions */
821 
822 	.cf_flush_prefetchbuf	= arm11x6_flush_prefetchbuf,
823 	.cf_drain_writebuf	= arm11_drain_writebuf,
824 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
825 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
826 
827 	.cf_sleep		= arm11_sleep,	/* arm1136_sleep_rev0 */
828 
829 	/* Soft functions */
830 
831 	.cf_dataabt_fixup	= cpufunc_null_fixup,
832 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
833 
834 	.cf_context_switch	= arm11_context_switch,
835 
836 	.cf_setup		= arm11x6_setup
837 
838 };
839 #endif /* CPU_ARM1136 */
840 
841 #ifdef CPU_ARM1176
842 struct cpu_functions arm1176_cpufuncs = {
843 	/* CPU functions */
844 
845 	.cf_id			= cpufunc_id,
846 	.cf_cpwait		= cpufunc_nullop,
847 
848 	/* MMU functions */
849 
850 	.cf_control		= cpufunc_control,
851 	.cf_domains		= cpufunc_domains,
852 	.cf_setttb		= arm11x6_setttb,
853 	.cf_faultstatus		= cpufunc_faultstatus,
854 	.cf_faultaddress	= cpufunc_faultaddress,
855 
856 	/* TLB functions */
857 
858 	.cf_tlb_flushID		= arm11_tlb_flushID,
859 	.cf_tlb_flushID_SE	= arm11_tlb_flushID_SE,
860 	.cf_tlb_flushI		= arm11_tlb_flushI,
861 	.cf_tlb_flushI_SE	= arm11_tlb_flushI_SE,
862 	.cf_tlb_flushD		= arm11_tlb_flushD,
863 	.cf_tlb_flushD_SE	= arm11_tlb_flushD_SE,
864 
865 	/* Cache operations */
866 
867 	.cf_icache_sync_all	= arm11x6_icache_sync_all,	/* 415045 */
868 	.cf_icache_sync_range	= arm11x6_icache_sync_range,	/* 371367 */
869 
870 	.cf_dcache_wbinv_all	= arm11x6_dcache_wbinv_all,	/* 415045 */
871 	.cf_dcache_wbinv_range	= armv6_dcache_wbinv_range,
872 	.cf_dcache_inv_range	= armv6_dcache_inv_range,
873 	.cf_dcache_wb_range	= armv6_dcache_wb_range,
874 
875 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
876 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
877 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
878 
879 	.cf_idcache_wbinv_all	= arm11x6_idcache_wbinv_all,	/* 415045 */
880 	.cf_idcache_wbinv_range = arm11x6_idcache_wbinv_range,	/* 371367 */
881 
882 	/* Other functions */
883 
884 	.cf_flush_prefetchbuf	= arm11x6_flush_prefetchbuf,
885 	.cf_drain_writebuf	= arm11_drain_writebuf,
886 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
887 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
888 
889 	.cf_sleep		= arm11x6_sleep,		/* no ref. */
890 
891 	/* Soft functions */
892 
893 	.cf_dataabt_fixup	= cpufunc_null_fixup,
894 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
895 
896 	.cf_context_switch	= arm11_context_switch,
897 
898 	.cf_setup		= arm11x6_setup
899 
900 };
901 #endif /* CPU_ARM1176 */
902 
903 
904 #ifdef CPU_ARM11MPCORE
905 struct cpu_functions arm11mpcore_cpufuncs = {
906 	/* CPU functions */
907 
908 	.cf_id			= cpufunc_id,
909 	.cf_cpwait		= cpufunc_nullop,
910 
911 	/* MMU functions */
912 
913 	.cf_control		= cpufunc_control,
914 	.cf_domains		= cpufunc_domains,
915 	.cf_setttb		= arm11_setttb,
916 	.cf_faultstatus		= cpufunc_faultstatus,
917 	.cf_faultaddress	= cpufunc_faultaddress,
918 
919 	/* TLB functions */
920 
921 	.cf_tlb_flushID		= arm11_tlb_flushID,
922 	.cf_tlb_flushID_SE	= arm11_tlb_flushID_SE,
923 	.cf_tlb_flushI		= arm11_tlb_flushI,
924 	.cf_tlb_flushI_SE	= arm11_tlb_flushI_SE,
925 	.cf_tlb_flushD		= arm11_tlb_flushD,
926 	.cf_tlb_flushD_SE	= arm11_tlb_flushD_SE,
927 
928 	/* Cache operations */
929 
930 	.cf_icache_sync_all	= armv6_icache_sync_all,
931 	.cf_icache_sync_range	= armv5_icache_sync_range,
932 
933 	.cf_dcache_wbinv_all	= armv6_dcache_wbinv_all,
934 	.cf_dcache_wbinv_range	= armv5_dcache_wbinv_range,
935 	.cf_dcache_inv_range	= armv5_dcache_inv_range,
936 	.cf_dcache_wb_range	= armv5_dcache_wb_range,
937 
938 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
939 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
940 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
941 
942 	.cf_idcache_wbinv_all	= armv6_idcache_wbinv_all,
943 	.cf_idcache_wbinv_range = armv5_idcache_wbinv_range,
944 
945 	/* Other functions */
946 
947 	.cf_flush_prefetchbuf	= cpufunc_nullop,
948 	.cf_drain_writebuf	= arm11_drain_writebuf,
949 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
950 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
951 
952 	.cf_sleep		= arm11_sleep,
953 
954 	/* Soft functions */
955 
956 	.cf_dataabt_fixup	= cpufunc_null_fixup,
957 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
958 
959 	.cf_context_switch	= arm11_context_switch,
960 
961 	.cf_setup		= arm11mpcore_setup
962 
963 };
964 #endif /* CPU_ARM11MPCORE */
965 
966 #ifdef CPU_SA110
967 struct cpu_functions sa110_cpufuncs = {
968 	/* CPU functions */
969 
970 	.cf_id			= cpufunc_id,
971 	.cf_cpwait		= cpufunc_nullop,
972 
973 	/* MMU functions */
974 
975 	.cf_control		= cpufunc_control,
976 	.cf_domains		= cpufunc_domains,
977 	.cf_setttb		= sa1_setttb,
978 	.cf_faultstatus		= cpufunc_faultstatus,
979 	.cf_faultaddress	= cpufunc_faultaddress,
980 
981 	/* TLB functions */
982 
983 	.cf_tlb_flushID		= armv4_tlb_flushID,
984 	.cf_tlb_flushID_SE	= sa1_tlb_flushID_SE,
985 	.cf_tlb_flushI		= armv4_tlb_flushI,
986 	.cf_tlb_flushI_SE	= (void *)armv4_tlb_flushI,
987 	.cf_tlb_flushD		= armv4_tlb_flushD,
988 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
989 
990 	/* Cache operations */
991 
992 	.cf_icache_sync_all	= sa1_cache_syncI,
993 	.cf_icache_sync_range	= sa1_cache_syncI_rng,
994 
995 	.cf_dcache_wbinv_all	= sa1_cache_purgeD,
996 	.cf_dcache_wbinv_range	= sa1_cache_purgeD_rng,
997 /*XXX*/	.cf_dcache_inv_range	= sa1_cache_purgeD_rng,
998 	.cf_dcache_wb_range	= sa1_cache_cleanD_rng,
999 
1000 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
1001 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
1002 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
1003 
1004 	.cf_idcache_wbinv_all	= sa1_cache_purgeID,
1005 	.cf_idcache_wbinv_range	= sa1_cache_purgeID_rng,
1006 
1007 	/* Other functions */
1008 
1009 	.cf_flush_prefetchbuf	= cpufunc_nullop,
1010 	.cf_drain_writebuf	= armv4_drain_writebuf,
1011 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
1012 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
1013 
1014 	.cf_sleep		= (void *)cpufunc_nullop,
1015 
1016 	/* Soft functions */
1017 
1018 	.cf_dataabt_fixup	= cpufunc_null_fixup,
1019 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
1020 
1021 	.cf_context_switch	= sa110_context_switch,
1022 
1023 	.cf_setup		= sa110_setup
1024 };
1025 #endif	/* CPU_SA110 */
1026 
1027 #if defined(CPU_SA1100) || defined(CPU_SA1110)
1028 struct cpu_functions sa11x0_cpufuncs = {
1029 	/* CPU functions */
1030 
1031 	.cf_id			= cpufunc_id,
1032 	.cf_cpwait		= cpufunc_nullop,
1033 
1034 	/* MMU functions */
1035 
1036 	.cf_control		= cpufunc_control,
1037 	.cf_domains		= cpufunc_domains,
1038 	.cf_setttb		= sa1_setttb,
1039 	.cf_faultstatus		= cpufunc_faultstatus,
1040 	.cf_faultaddress	= cpufunc_faultaddress,
1041 
1042 	/* TLB functions */
1043 
1044 	.cf_tlb_flushID		= armv4_tlb_flushID,
1045 	.cf_tlb_flushID_SE	= sa1_tlb_flushID_SE,
1046 	.cf_tlb_flushI		= armv4_tlb_flushI,
1047 	.cf_tlb_flushI_SE	= (void *)armv4_tlb_flushI,
1048 	.cf_tlb_flushD		= armv4_tlb_flushD,
1049 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
1050 
1051 	/* Cache operations */
1052 
1053 	.cf_icache_sync_all	= sa1_cache_syncI,
1054 	.cf_icache_sync_range	= sa1_cache_syncI_rng,
1055 
1056 	.cf_dcache_wbinv_all	= sa1_cache_purgeD,
1057 	.cf_dcache_wbinv_range	= sa1_cache_purgeD_rng,
1058 /*XXX*/	.cf_dcache_inv_range	= sa1_cache_purgeD_rng,
1059 	.cf_dcache_wb_range	= sa1_cache_cleanD_rng,
1060 
1061 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
1062 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
1063 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
1064 
1065 	.cf_idcache_wbinv_all	= sa1_cache_purgeID,
1066 	.cf_idcache_wbinv_range	= sa1_cache_purgeID_rng,
1067 
1068 	/* Other functions */
1069 
1070 	.cf_flush_prefetchbuf	= sa11x0_drain_readbuf,
1071 	.cf_drain_writebuf	= armv4_drain_writebuf,
1072 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
1073 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
1074 
1075 	.cf_sleep		= sa11x0_cpu_sleep,
1076 
1077 	/* Soft functions */
1078 
1079 	.cf_dataabt_fixup	= cpufunc_null_fixup,
1080 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
1081 
1082 	.cf_context_switch	= sa11x0_context_switch,
1083 
1084 	.cf_setup		= sa11x0_setup
1085 };
1086 #endif	/* CPU_SA1100 || CPU_SA1110 */
1087 
1088 #if defined(CPU_FA526)
1089 struct cpu_functions fa526_cpufuncs = {
1090 	/* CPU functions */
1091 
1092 	.cf_id			= cpufunc_id,
1093 	.cf_cpwait		= cpufunc_nullop,
1094 
1095 	/* MMU functions */
1096 
1097 	.cf_control		= cpufunc_control,
1098 	.cf_domains		= cpufunc_domains,
1099 	.cf_setttb		= fa526_setttb,
1100 	.cf_faultstatus		= cpufunc_faultstatus,
1101 	.cf_faultaddress	= cpufunc_faultaddress,
1102 
1103 	/* TLB functions */
1104 
1105 	.cf_tlb_flushID		= armv4_tlb_flushID,
1106 	.cf_tlb_flushID_SE	= fa526_tlb_flushID_SE,
1107 	.cf_tlb_flushI		= armv4_tlb_flushI,
1108 	.cf_tlb_flushI_SE	= fa526_tlb_flushI_SE,
1109 	.cf_tlb_flushD		= armv4_tlb_flushD,
1110 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
1111 
1112 	/* Cache operations */
1113 
1114 	.cf_icache_sync_all	= fa526_icache_sync_all,
1115 	.cf_icache_sync_range	= fa526_icache_sync_range,
1116 
1117 	.cf_dcache_wbinv_all	= fa526_dcache_wbinv_all,
1118 	.cf_dcache_wbinv_range	= fa526_dcache_wbinv_range,
1119 	.cf_dcache_inv_range	= fa526_dcache_inv_range,
1120 	.cf_dcache_wb_range	= fa526_dcache_wb_range,
1121 
1122 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
1123 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
1124 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
1125 
1126 	.cf_idcache_wbinv_all	= fa526_idcache_wbinv_all,
1127 	.cf_idcache_wbinv_range	= fa526_idcache_wbinv_range,
1128 
1129 	/* Other functions */
1130 
1131 	.cf_flush_prefetchbuf	= fa526_flush_prefetchbuf,
1132 	.cf_drain_writebuf	= armv4_drain_writebuf,
1133 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
1134 	.cf_flush_brnchtgt_E	= fa526_flush_brnchtgt_E,
1135 
1136 	.cf_sleep		= fa526_cpu_sleep,
1137 
1138 	/* Soft functions */
1139 
1140 	.cf_dataabt_fixup	= cpufunc_null_fixup,
1141 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
1142 
1143 	.cf_context_switch	= fa526_context_switch,
1144 
1145 	.cf_setup		= fa526_setup
1146 };
1147 #endif	/* CPU_FA526 */
1148 
1149 #ifdef CPU_IXP12X0
1150 struct cpu_functions ixp12x0_cpufuncs = {
1151 	/* CPU functions */
1152 
1153 	.cf_id			= cpufunc_id,
1154 	.cf_cpwait		= cpufunc_nullop,
1155 
1156 	/* MMU functions */
1157 
1158 	.cf_control		= cpufunc_control,
1159 	.cf_domains		= cpufunc_domains,
1160 	.cf_setttb		= sa1_setttb,
1161 	.cf_faultstatus		= cpufunc_faultstatus,
1162 	.cf_faultaddress	= cpufunc_faultaddress,
1163 
1164 	/* TLB functions */
1165 
1166 	.cf_tlb_flushID		= armv4_tlb_flushID,
1167 	.cf_tlb_flushID_SE	= sa1_tlb_flushID_SE,
1168 	.cf_tlb_flushI		= armv4_tlb_flushI,
1169 	.cf_tlb_flushI_SE	= (void *)armv4_tlb_flushI,
1170 	.cf_tlb_flushD		= armv4_tlb_flushD,
1171 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
1172 
1173 	/* Cache operations */
1174 
1175 	.cf_icache_sync_all	= sa1_cache_syncI,
1176 	.cf_icache_sync_range	= sa1_cache_syncI_rng,
1177 
1178 	.cf_dcache_wbinv_all	= sa1_cache_purgeD,
1179 	.cf_dcache_wbinv_range	= sa1_cache_purgeD_rng,
1180 /*XXX*/	.cf_dcache_inv_range	= sa1_cache_purgeD_rng,
1181 	.cf_dcache_wb_range	= sa1_cache_cleanD_rng,
1182 
1183 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
1184 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
1185 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
1186 
1187 	.cf_idcache_wbinv_all	= sa1_cache_purgeID,
1188 	.cf_idcache_wbinv_range	= sa1_cache_purgeID_rng,
1189 
1190 	/* Other functions */
1191 
1192 	.cf_flush_prefetchbuf	= ixp12x0_drain_readbuf,
1193 	.cf_drain_writebuf	= armv4_drain_writebuf,
1194 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
1195 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
1196 
1197 	.cf_sleep		= (void *)cpufunc_nullop,
1198 
1199 	/* Soft functions */
1200 
1201 	.cf_dataabt_fixup	= cpufunc_null_fixup,
1202 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
1203 
1204 	.cf_context_switch	= ixp12x0_context_switch,
1205 
1206 	.cf_setup		= ixp12x0_setup
1207 };
1208 #endif	/* CPU_IXP12X0 */
1209 
1210 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
1211     defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425)
1212 struct cpu_functions xscale_cpufuncs = {
1213 	/* CPU functions */
1214 
1215 	.cf_id			= cpufunc_id,
1216 	.cf_cpwait		= xscale_cpwait,
1217 
1218 	/* MMU functions */
1219 
1220 	.cf_control		= xscale_control,
1221 	.cf_domains		= cpufunc_domains,
1222 	.cf_setttb		= xscale_setttb,
1223 	.cf_faultstatus		= cpufunc_faultstatus,
1224 	.cf_faultaddress	= cpufunc_faultaddress,
1225 
1226 	/* TLB functions */
1227 
1228 	.cf_tlb_flushID		= armv4_tlb_flushID,
1229 	.cf_tlb_flushID_SE	= xscale_tlb_flushID_SE,
1230 	.cf_tlb_flushI		= armv4_tlb_flushI,
1231 	.cf_tlb_flushI_SE	= (void *)armv4_tlb_flushI,
1232 	.cf_tlb_flushD		= armv4_tlb_flushD,
1233 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
1234 
1235 	/* Cache operations */
1236 
1237 	.cf_icache_sync_all	= xscale_cache_syncI,
1238 	.cf_icache_sync_range	= xscale_cache_syncI_rng,
1239 
1240 	.cf_dcache_wbinv_all	= xscale_cache_purgeD,
1241 	.cf_dcache_wbinv_range	= xscale_cache_purgeD_rng,
1242 	.cf_dcache_inv_range	= xscale_cache_flushD_rng,
1243 	.cf_dcache_wb_range	= xscale_cache_cleanD_rng,
1244 
1245 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
1246 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
1247 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
1248 
1249 	.cf_idcache_wbinv_all	= xscale_cache_purgeID,
1250 	.cf_idcache_wbinv_range = xscale_cache_purgeID_rng,
1251 
1252 	/* Other functions */
1253 
1254 	.cf_flush_prefetchbuf	= cpufunc_nullop,
1255 	.cf_drain_writebuf	= armv4_drain_writebuf,
1256 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
1257 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
1258 
1259 	.cf_sleep		= xscale_cpu_sleep,
1260 
1261 	/* Soft functions */
1262 
1263 	.cf_dataabt_fixup	= cpufunc_null_fixup,
1264 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
1265 
1266 	.cf_context_switch	= xscale_context_switch,
1267 
1268 	.cf_setup		= xscale_setup
1269 };
1270 #endif
1271 /* CPU_XSCALE_80200 || CPU_XSCALE_80321 || __CPU_XSCALE_PXA2XX || CPU_XSCALE_IXP425 */
1272 
1273 #if defined(CPU_CORTEX)
1274 struct cpu_functions cortex_cpufuncs = {
1275 	/* CPU functions */
1276 
1277 	.cf_id			= cpufunc_id,
1278 	.cf_cpwait		= cpufunc_nullop,
1279 
1280 	/* MMU functions */
1281 
1282 	.cf_control		= cpufunc_control,
1283 	.cf_domains		= cpufunc_domains,
1284 	.cf_setttb		= armv7_setttb,
1285 	.cf_faultstatus		= cpufunc_faultstatus,
1286 	.cf_faultaddress	= cpufunc_faultaddress,
1287 
1288 	/* TLB functions */
1289 
1290 	.cf_tlb_flushID		= arm11_tlb_flushID,
1291 	.cf_tlb_flushID_SE	= armv7_tlb_flushID_SE,
1292 	.cf_tlb_flushI		= arm11_tlb_flushI,
1293 	.cf_tlb_flushI_SE	= arm11_tlb_flushI_SE,
1294 	.cf_tlb_flushD		= arm11_tlb_flushD,
1295 	.cf_tlb_flushD_SE	= arm11_tlb_flushD_SE,
1296 
1297 	/* Cache operations */
1298 
1299 	.cf_icache_sync_all	= armv7_icache_sync_all,
1300 	.cf_dcache_wbinv_all	= armv7_dcache_wbinv_all,
1301 
1302 	.cf_dcache_inv_range	= armv7_dcache_inv_range,
1303 	.cf_dcache_wb_range	= armv7_dcache_wb_range,
1304 	.cf_dcache_wbinv_range	= armv7_dcache_wbinv_range,
1305 
1306 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
1307 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
1308 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
1309 
1310 	.cf_icache_sync_range	= armv7_icache_sync_range,
1311 	.cf_idcache_wbinv_range = armv7_idcache_wbinv_range,
1312 
1313 
1314 	.cf_idcache_wbinv_all	= armv7_idcache_wbinv_all,
1315 
1316 	/* Other functions */
1317 
1318 	.cf_flush_prefetchbuf	= cpufunc_nullop,
1319 	.cf_drain_writebuf	= armv7_drain_writebuf,
1320 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
1321 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
1322 
1323 	.cf_sleep		= armv7_cpu_sleep,
1324 
1325 	/* Soft functions */
1326 
1327 	.cf_dataabt_fixup	= cpufunc_null_fixup,
1328 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
1329 
1330 	.cf_context_switch	= armv7_context_switch,
1331 
1332 	.cf_setup		= armv7_setup
1333 
1334 };
1335 #endif /* CPU_CORTEX */
1336 
1337 #ifdef CPU_PJ4B
1338 struct cpu_functions pj4bv7_cpufuncs = {
1339 	/* CPU functions */
1340 
1341 	.cf_id			= cpufunc_id,
1342 	.cf_cpwait		= pj4b_drain_writebuf,
1343 
1344 	/* MMU functions */
1345 
1346 	.cf_control		= cpufunc_control,
1347 	.cf_domains		= cpufunc_domains,
1348 	.cf_setttb		= pj4b_setttb,
1349 	.cf_faultstatus		= cpufunc_faultstatus,
1350 	.cf_faultaddress	= cpufunc_faultaddress,
1351 
1352 	/* TLB functions */
1353 
1354 	.cf_tlb_flushID		= pj4b_tlb_flushID,
1355 	.cf_tlb_flushID_SE	= pj4b_tlb_flushID_SE,
1356 	.cf_tlb_flushI		= pj4b_tlb_flushID,
1357 	.cf_tlb_flushI_SE	= pj4b_tlb_flushID_SE,
1358 	.cf_tlb_flushD		= pj4b_tlb_flushID,
1359 	.cf_tlb_flushD_SE	= pj4b_tlb_flushID_SE,
1360 
1361 	/* Cache operations */
1362 
1363 	.cf_icache_sync_all	= armv7_idcache_wbinv_all,
1364 	.cf_icache_sync_range	= pj4b_icache_sync_range,
1365 
1366 	.cf_dcache_wbinv_all	= armv7_dcache_wbinv_all,
1367 	.cf_dcache_wbinv_range	= pj4b_dcache_wbinv_range,
1368 	.cf_dcache_inv_range	= pj4b_dcache_inv_range,
1369 	.cf_dcache_wb_range	= pj4b_dcache_wb_range,
1370 
1371 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
1372 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
1373 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
1374 
1375 	.cf_idcache_wbinv_all	= armv7_idcache_wbinv_all,
1376 	.cf_idcache_wbinv_range	= pj4b_idcache_wbinv_range,
1377 
1378 	/* Other functions */
1379 
1380 	.cf_flush_prefetchbuf	= pj4b_drain_readbuf,
1381 	.cf_drain_writebuf	= pj4b_drain_writebuf,
1382 	.cf_flush_brnchtgt_C	= pj4b_flush_brnchtgt_all,
1383 	.cf_flush_brnchtgt_E	= pj4b_flush_brnchtgt_va,
1384 
1385 	.cf_sleep		= (void *)cpufunc_nullop,
1386 
1387 	/* Soft functions */
1388 
1389 	.cf_dataabt_fixup	= cpufunc_null_fixup,
1390 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
1391 
1392 	.cf_context_switch	= pj4b_context_switch,
1393 
1394 	.cf_setup		= pj4bv7_setup
1395 };
1396 #endif /* CPU_PJ4B */
1397 
1398 #ifdef CPU_SHEEVA
1399 struct cpu_functions sheeva_cpufuncs = {
1400 	/* CPU functions */
1401 
1402 	.cf_id			= cpufunc_id,
1403 	.cf_cpwait		= cpufunc_nullop,
1404 
1405 	/* MMU functions */
1406 
1407 	.cf_control		= cpufunc_control,
1408 	.cf_domains		= cpufunc_domains,
1409 	.cf_setttb		= armv5_ec_setttb,
1410 	.cf_faultstatus		= cpufunc_faultstatus,
1411 	.cf_faultaddress	= cpufunc_faultaddress,
1412 
1413 	/* TLB functions */
1414 
1415 	.cf_tlb_flushID		= armv4_tlb_flushID,
1416 	.cf_tlb_flushID_SE	= arm10_tlb_flushID_SE,
1417 	.cf_tlb_flushI		= armv4_tlb_flushI,
1418 	.cf_tlb_flushI_SE	= arm10_tlb_flushI_SE,
1419 	.cf_tlb_flushD		= armv4_tlb_flushD,
1420 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
1421 
1422 	/* Cache operations */
1423 
1424 	.cf_icache_sync_all	= armv5_ec_icache_sync_all,
1425 	.cf_icache_sync_range	= armv5_ec_icache_sync_range,
1426 
1427 	.cf_dcache_wbinv_all	= armv5_ec_dcache_wbinv_all,
1428 	.cf_dcache_wbinv_range	= sheeva_dcache_wbinv_range,
1429 	.cf_dcache_inv_range	= sheeva_dcache_inv_range,
1430 	.cf_dcache_wb_range	= sheeva_dcache_wb_range,
1431 
1432 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
1433 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
1434 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
1435 
1436 	.cf_idcache_wbinv_all	= armv5_ec_idcache_wbinv_all,
1437 	.cf_idcache_wbinv_range = sheeva_idcache_wbinv_range,
1438 
1439 	/* Other functions */
1440 
1441 	.cf_flush_prefetchbuf	= cpufunc_nullop,
1442 	.cf_drain_writebuf	= armv4_drain_writebuf,
1443 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
1444 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
1445 
1446 	.cf_sleep		= (void *)sheeva_cpu_sleep,
1447 
1448 	/* Soft functions */
1449 
1450 	.cf_dataabt_fixup	= cpufunc_null_fixup,
1451 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
1452 
1453 	.cf_context_switch	= arm10_context_switch,
1454 
1455 	.cf_setup		= sheeva_setup
1456 };
1457 #endif /* CPU_SHEEVA */
1458 
1459 
1460 /*
1461  * Global constants also used by locore.s
1462  */
1463 
1464 struct cpu_functions cpufuncs;
1465 u_int cputype;
1466 
1467 #if defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined(CPU_ARM9) || \
1468     defined(CPU_ARM9E) || defined(CPU_ARM10) || defined(CPU_ARM11) || \
1469     defined(CPU_FA526) || \
1470     defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
1471     defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425) || \
1472     defined(CPU_CORTEX) || defined(CPU_PJ4B) || defined(CPU_SHEEVA)
1473 static void get_cachetype_cp15(void);
1474 
1475 /* Additional cache information local to this file.  Log2 of some of the
1476    above numbers.  */
1477 static int	arm_dcache_log2_nsets;
1478 static int	arm_dcache_log2_assoc;
1479 static int	arm_dcache_log2_linesize;
1480 
1481 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
1482 static inline u_int
1483 get_cachesize_cp15(int cssr)
1484 {
1485 	u_int csid;
1486 
1487 #if ((CPU_CORTEX) > 0) || defined(CPU_PJ4B)
1488 	__asm volatile(".arch\tarmv7a");
1489 	__asm volatile("mcr p15, 2, %0, c0, c0, 0" :: "r" (cssr));
1490 	__asm volatile("isb");	/* sync to the new cssr */
1491 #else
1492 	__asm volatile("mcr p15, 1, %0, c0, c0, 2" :: "r" (cssr));
1493 #endif
1494 	__asm volatile("mrc p15, 1, %0, c0, c0, 0" : "=r" (csid));
1495 	return csid;
1496 }
1497 #endif
1498 
1499 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
1500 static void
1501 get_cacheinfo_clidr(struct arm_cache_info *info, u_int level, u_int clidr)
1502 {
1503 	u_int csid;
1504 	u_int nsets;
1505 
1506 	if (clidr & 6) {
1507 		csid = get_cachesize_cp15(level << 1); /* select L1 dcache values */
1508 		nsets = CPU_CSID_NUMSETS(csid) + 1;
1509 		info->dcache_ways = CPU_CSID_ASSOC(csid) + 1;
1510 		info->dcache_line_size = 1U << (CPU_CSID_LEN(csid) + 4);
1511 		info->dcache_size = info->dcache_line_size * info->dcache_ways * nsets;
1512 
1513 		if (level == 0) {
1514 			arm_dcache_log2_assoc = CPU_CSID_ASSOC(csid) + 1;
1515 			arm_dcache_log2_linesize = CPU_CSID_LEN(csid) + 4;
1516 			arm_dcache_log2_nsets = 31 - __builtin_clz(nsets);
1517 		}
1518 	}
1519 
1520 	info->cache_unified = (clidr == 4);
1521 
1522 	if (clidr & 1) {
1523 		csid = get_cachesize_cp15((level << 1)|CPU_CSSR_InD); /* select L1 icache values */
1524 		nsets = CPU_CSID_NUMSETS(csid) + 1;
1525 		info->icache_ways = CPU_CSID_ASSOC(csid) + 1;
1526 		info->icache_line_size = 1U << (CPU_CSID_LEN(csid) + 4);
1527 		info->icache_size = info->icache_line_size * info->icache_ways * nsets;
1528 	} else {
1529 		info->icache_ways = info->dcache_ways;
1530 		info->icache_line_size = info->dcache_line_size;
1531 		info->icache_size = info->dcache_size;
1532 	}
1533 }
1534 #endif /* (ARM_MMU_V6 + ARM_MMU_V7) > 0 */
1535 
1536 static void
1537 get_cachetype_cp15(void)
1538 {
1539 	u_int ctype, isize, dsize;
1540 	u_int multiplier;
1541 
1542 	__asm volatile("mrc p15, 0, %0, c0, c0, 1"
1543 		: "=r" (ctype));
1544 
1545 	/*
1546 	 * ...and thus spake the ARM ARM:
1547 	 *
1548 	 * If an <opcode2> value corresponding to an unimplemented or
1549 	 * reserved ID register is encountered, the System Control
1550 	 * processor returns the value of the main ID register.
1551 	 */
1552 	if (ctype == cpu_id())
1553 		goto out;
1554 
1555 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
1556 	if (CPU_CT_FORMAT(ctype) == 4) {
1557 		u_int clidr = armreg_clidr_read();
1558 
1559 		if (CPU_CT4_L1IPOLICY(ctype) != CPU_CT4_L1_PIPT) {
1560 			arm_cache_prefer_mask = PAGE_SIZE;
1561 		}
1562 		arm_pcache.cache_type = CPU_CT_CTYPE_WB14;
1563 
1564 		get_cacheinfo_clidr(&arm_pcache, 0, clidr & 7);
1565 		arm_dcache_align = arm_pcache.dcache_line_size;
1566 		clidr >>= 3;
1567 		if (clidr & 7) {
1568 			get_cacheinfo_clidr(&arm_scache, 1, clidr & 7);
1569 			if (arm_scache.dcache_line_size < arm_dcache_align)
1570 				arm_dcache_align = arm_scache.dcache_line_size;
1571 		}
1572 		goto out;
1573 	}
1574 #endif /* ARM_MMU_V6 + ARM_MMU_V7 > 0 */
1575 
1576 	if ((ctype & CPU_CT_S) == 0)
1577 		arm_pcache.cache_unified = 1;
1578 
1579 	/*
1580 	 * If you want to know how this code works, go read the ARM ARM.
1581 	 */
1582 
1583 	arm_pcache.cache_type = CPU_CT_CTYPE(ctype);
1584 
1585 	if (arm_pcache.cache_unified == 0) {
1586 		isize = CPU_CT_ISIZE(ctype);
1587 		multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
1588 		arm_pcache.icache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
1589 		if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
1590 			if (isize & CPU_CT_xSIZE_M)
1591 				arm_pcache.icache_line_size = 0; /* not present */
1592 			else
1593 				arm_pcache.icache_ways = 1;
1594 		} else {
1595 			arm_pcache.icache_ways = multiplier <<
1596 			    (CPU_CT_xSIZE_ASSOC(isize) - 1);
1597 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
1598 			if (CPU_CT_xSIZE_P & isize)
1599 				arm_cache_prefer_mask |=
1600 				    __BIT(9 + CPU_CT_xSIZE_SIZE(isize)
1601 					  - CPU_CT_xSIZE_ASSOC(isize))
1602 				    - PAGE_SIZE;
1603 #endif
1604 		}
1605 		arm_pcache.icache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
1606 	}
1607 
1608 	dsize = CPU_CT_DSIZE(ctype);
1609 	multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
1610 	arm_pcache.dcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
1611 	if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
1612 		if (dsize & CPU_CT_xSIZE_M)
1613 			arm_pcache.dcache_line_size = 0; /* not present */
1614 		else
1615 			arm_pcache.dcache_ways = 1;
1616 	} else {
1617 		arm_pcache.dcache_ways = multiplier <<
1618 		    (CPU_CT_xSIZE_ASSOC(dsize) - 1);
1619 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
1620 		if (CPU_CT_xSIZE_P & dsize)
1621 			arm_cache_prefer_mask |=
1622 			    __BIT(9 + CPU_CT_xSIZE_SIZE(dsize)
1623 				  - CPU_CT_xSIZE_ASSOC(dsize)) - PAGE_SIZE;
1624 #endif
1625 	}
1626 	arm_pcache.dcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
1627 
1628 	arm_dcache_align = arm_pcache.dcache_line_size;
1629 
1630 	arm_dcache_log2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
1631 	arm_dcache_log2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
1632 	arm_dcache_log2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
1633 	    CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
1634 
1635  out:
1636 	arm_dcache_align_mask = arm_dcache_align - 1;
1637 }
1638 #endif /* ARM7TDMI || ARM8 || ARM9 || XSCALE */
1639 
1640 #if defined(CPU_ARM2) || defined(CPU_ARM250) || defined(CPU_ARM3) || \
1641     defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_SA110) || \
1642     defined(CPU_SA1100) || defined(CPU_SA1110) || defined(CPU_IXP12X0)
1643 /* Cache information for CPUs without cache type registers. */
1644 struct cachetab {
1645 	uint32_t ct_cpuid;
1646 	int	ct_pcache_type;
1647 	int	ct_pcache_unified;
1648 	int	ct_pdcache_size;
1649 	int	ct_pdcache_line_size;
1650 	int	ct_pdcache_ways;
1651 	int	ct_picache_size;
1652 	int	ct_picache_line_size;
1653 	int	ct_picache_ways;
1654 };
1655 
1656 struct cachetab cachetab[] = {
1657     /* cpuid,           cache type,       u,  dsiz, ls, wy,  isiz, ls, wy */
1658     { CPU_ID_ARM2,      0,                1,     0,  0,  0,     0,  0,  0 },
1659     { CPU_ID_ARM250,    0,                1,     0,  0,  0,     0,  0,  0 },
1660     { CPU_ID_ARM3,      CPU_CT_CTYPE_WT,  1,  4096, 16, 64,     0,  0,  0 },
1661     { CPU_ID_ARM610,	CPU_CT_CTYPE_WT,  1,  4096, 16, 64,     0,  0,  0 },
1662     { CPU_ID_ARM710,    CPU_CT_CTYPE_WT,  1,  8192, 32,  4,     0,  0,  0 },
1663     { CPU_ID_ARM7500,   CPU_CT_CTYPE_WT,  1,  4096, 16,  4,     0,  0,  0 },
1664     { CPU_ID_ARM710A,   CPU_CT_CTYPE_WT,  1,  8192, 16,  4,     0,  0,  0 },
1665     { CPU_ID_ARM7500FE, CPU_CT_CTYPE_WT,  1,  4096, 16,  4,     0,  0,  0 },
1666     /* XXX is this type right for SA-1? */
1667     { CPU_ID_SA110,	CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 },
1668     { CPU_ID_SA1100,	CPU_CT_CTYPE_WB1, 0,  8192, 32, 32, 16384, 32, 32 },
1669     { CPU_ID_SA1110,	CPU_CT_CTYPE_WB1, 0,  8192, 32, 32, 16384, 32, 32 },
1670     { CPU_ID_IXP1200,	CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 }, /* XXX */
1671     { 0, 0, 0, 0, 0, 0, 0, 0}
1672 };
1673 
1674 static void get_cachetype_table(void);
1675 
1676 static void
1677 get_cachetype_table(void)
1678 {
1679 	int i;
1680 	uint32_t cpuid = cpu_id();
1681 
1682 	for (i = 0; cachetab[i].ct_cpuid != 0; i++) {
1683 		if (cachetab[i].ct_cpuid == (cpuid & CPU_ID_CPU_MASK)) {
1684 			arm_pcache.cache_type = cachetab[i].ct_pcache_type;
1685 			arm_pcache.cache_unified = cachetab[i].ct_pcache_unified;
1686 			arm_pcache.dcache_size = cachetab[i].ct_pdcache_size;
1687 			arm_pcache.dcache_line_size =
1688 			    cachetab[i].ct_pdcache_line_size;
1689 			arm_pcache.dcache_ways = cachetab[i].ct_pdcache_ways;
1690 			arm_pcache.icache_size = cachetab[i].ct_picache_size;
1691 			arm_pcache.icache_line_size =
1692 			    cachetab[i].ct_picache_line_size;
1693 			arm_pcache.icache_ways = cachetab[i].ct_picache_ways;
1694 		}
1695 	}
1696 
1697 	arm_dcache_align = arm_pcache.dcache_line_size;
1698 	arm_dcache_align_mask = arm_dcache_align - 1;
1699 }
1700 
1701 #endif /* ARM2 || ARM250 || ARM3 || ARM6 || ARM7 || SA110 || SA1100 || SA1111 || IXP12X0 */
1702 
1703 /*
1704  * Cannot panic here as we may not have a console yet ...
1705  */
1706 
1707 int
1708 set_cpufuncs(void)
1709 {
1710 	if (cputype == 0) {
1711 		cputype = cpufunc_id();
1712 		cputype &= CPU_ID_CPU_MASK;
1713 	}
1714 
1715 	/*
1716 	 * NOTE: cpu_do_powersave defaults to off.  If we encounter a
1717 	 * CPU type where we want to use it by default, then we set it.
1718 	 */
1719 #ifdef CPU_ARM2
1720 	if (cputype == CPU_ID_ARM2) {
1721 		cpufuncs = arm2_cpufuncs;
1722 		get_cachetype_table();
1723 		return 0;
1724 	}
1725 #endif /* CPU_ARM2 */
1726 #ifdef CPU_ARM250
1727 	if (cputype == CPU_ID_ARM250) {
1728 		cpufuncs = arm250_cpufuncs;
1729 		get_cachetype_table();
1730 		return 0;
1731 	}
1732 #endif
1733 #ifdef CPU_ARM3
1734 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
1735 	    (cputype & 0x00000f00) == 0x00000300) {
1736 		cpufuncs = arm3_cpufuncs;
1737 		get_cachetype_table();
1738 		return 0;
1739 	}
1740 #endif	/* CPU_ARM3 */
1741 #ifdef CPU_ARM6
1742 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
1743 	    (cputype & 0x00000f00) == 0x00000600) {
1744 		cpufuncs = arm6_cpufuncs;
1745 		get_cachetype_table();
1746 		pmap_pte_init_generic();
1747 		return 0;
1748 	}
1749 #endif	/* CPU_ARM6 */
1750 #ifdef CPU_ARM7
1751 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
1752 	    CPU_ID_IS7(cputype) &&
1753 	    (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V3) {
1754 		cpufuncs = arm7_cpufuncs;
1755 		get_cachetype_table();
1756 		pmap_pte_init_generic();
1757 		return 0;
1758 	}
1759 #endif	/* CPU_ARM7 */
1760 #ifdef CPU_ARM7TDMI
1761 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
1762 	    CPU_ID_IS7(cputype) &&
1763 	    (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V4T) {
1764 		cpufuncs = arm7tdmi_cpufuncs;
1765 		get_cachetype_cp15();
1766 		pmap_pte_init_generic();
1767 		return 0;
1768 	}
1769 #endif
1770 #ifdef CPU_ARM8
1771 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
1772 	    (cputype & 0x0000f000) == 0x00008000) {
1773 		cpufuncs = arm8_cpufuncs;
1774 		get_cachetype_cp15();
1775 		pmap_pte_init_arm8();
1776 		return 0;
1777 	}
1778 #endif	/* CPU_ARM8 */
1779 #ifdef CPU_ARM9
1780 	if (((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD ||
1781 	     (cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_TI) &&
1782 	    (cputype & 0x0000f000) == 0x00009000) {
1783 		cpufuncs = arm9_cpufuncs;
1784 		get_cachetype_cp15();
1785 		arm9_dcache_sets_inc = 1U << arm_dcache_log2_linesize;
1786 		arm9_dcache_sets_max =
1787 		    (1U << (arm_dcache_log2_linesize + arm_dcache_log2_nsets)) -
1788 		    arm9_dcache_sets_inc;
1789 		arm9_dcache_index_inc = 1U << (32 - arm_dcache_log2_assoc);
1790 		arm9_dcache_index_max = 0U - arm9_dcache_index_inc;
1791 #ifdef	ARM9_CACHE_WRITE_THROUGH
1792 		pmap_pte_init_arm9();
1793 #else
1794 		pmap_pte_init_generic();
1795 #endif
1796 		return 0;
1797 	}
1798 #endif /* CPU_ARM9 */
1799 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
1800 	if (cputype == CPU_ID_ARM926EJS ||
1801 	    cputype == CPU_ID_ARM1026EJS) {
1802 		cpufuncs = armv5_ec_cpufuncs;
1803 		get_cachetype_cp15();
1804 		pmap_pte_init_generic();
1805 		return 0;
1806 	}
1807 #endif /* CPU_ARM9E || CPU_ARM10 */
1808 #if defined(CPU_SHEEVA)
1809 	if (cputype == CPU_ID_MV88SV131 ||
1810 	    cputype == CPU_ID_MV88FR571_VD) {
1811 		cpufuncs = sheeva_cpufuncs;
1812 		get_cachetype_cp15();
1813 		pmap_pte_init_generic();
1814 		cpu_do_powersave = 1;			/* Enable powersave */
1815 		return 0;
1816 	}
1817 #endif /* CPU_SHEEVA */
1818 #ifdef CPU_ARM10
1819 	if (/* cputype == CPU_ID_ARM1020T || */
1820 	    cputype == CPU_ID_ARM1020E) {
1821 		/*
1822 		 * Select write-through cacheing (this isn't really an
1823 		 * option on ARM1020T).
1824 		 */
1825 		cpufuncs = arm10_cpufuncs;
1826 		get_cachetype_cp15();
1827 		armv5_dcache_sets_inc = 1U << arm_dcache_log2_linesize;
1828 		armv5_dcache_sets_max =
1829 		    (1U << (arm_dcache_log2_linesize + arm_dcache_log2_nsets)) -
1830 		    armv5_dcache_sets_inc;
1831 		armv5_dcache_index_inc = 1U << (32 - arm_dcache_log2_assoc);
1832 		armv5_dcache_index_max = 0U - armv5_dcache_index_inc;
1833 		pmap_pte_init_generic();
1834 		return 0;
1835 	}
1836 #endif /* CPU_ARM10 */
1837 
1838 
1839 #if defined(CPU_ARM11MPCORE)
1840 	if (cputype == CPU_ID_ARM11MPCORE) {
1841 		cpufuncs = arm11mpcore_cpufuncs;
1842 		get_cachetype_cp15();
1843 		armv5_dcache_sets_inc = 1U << arm_dcache_log2_linesize;
1844 		armv5_dcache_sets_max = (1U << (arm_dcache_log2_linesize +
1845 			arm_dcache_log2_nsets)) - armv5_dcache_sets_inc;
1846 		armv5_dcache_index_inc = 1U << (32 - arm_dcache_log2_assoc);
1847 		armv5_dcache_index_max = 0U - armv5_dcache_index_inc;
1848 		cpu_do_powersave = 1;			/* Enable powersave */
1849 		pmap_pte_init_arm11mpcore();
1850 		if (arm_cache_prefer_mask)
1851 			uvmexp.ncolors = (arm_cache_prefer_mask >> PGSHIFT) + 1;
1852 
1853 		return 0;
1854 
1855 	}
1856 #endif	/* CPU_ARM11MPCORE */
1857 
1858 #if defined(CPU_ARM11)
1859 	if (cputype == CPU_ID_ARM1136JS ||
1860 	    cputype == CPU_ID_ARM1136JSR1 ||
1861 	    cputype == CPU_ID_ARM1176JZS) {
1862 		cpufuncs = arm11_cpufuncs;
1863 #if defined(CPU_ARM1136)
1864 		if (cputype == CPU_ID_ARM1136JS &&
1865 		    cputype == CPU_ID_ARM1136JSR1) {
1866 			cpufuncs = arm1136_cpufuncs;
1867 			if (cputype == CPU_ID_ARM1136JS)
1868 				cpufuncs.cf_sleep = arm1136_sleep_rev0;
1869 		}
1870 #endif
1871 #if defined(CPU_ARM1176)
1872 		if (cputype == CPU_ID_ARM1176JZS) {
1873 			cpufuncs = arm1176_cpufuncs;
1874 		}
1875 #endif
1876 		cpu_do_powersave = 1;			/* Enable powersave */
1877 		get_cachetype_cp15();
1878 #ifdef ARM11_CACHE_WRITE_THROUGH
1879 		pmap_pte_init_arm11();
1880 #else
1881 		pmap_pte_init_generic();
1882 #endif
1883 		if (arm_cache_prefer_mask)
1884 			uvmexp.ncolors = (arm_cache_prefer_mask >> PGSHIFT) + 1;
1885 
1886 		/*
1887 		 * Start and reset the PMC Cycle Counter.
1888 		 */
1889 		armreg_pmcrv6_write(ARM11_PMCCTL_E | ARM11_PMCCTL_P | ARM11_PMCCTL_C);
1890 		return 0;
1891 	}
1892 #endif /* CPU_ARM11 */
1893 #ifdef CPU_SA110
1894 	if (cputype == CPU_ID_SA110) {
1895 		cpufuncs = sa110_cpufuncs;
1896 		get_cachetype_table();
1897 		pmap_pte_init_sa1();
1898 		return 0;
1899 	}
1900 #endif	/* CPU_SA110 */
1901 #ifdef CPU_SA1100
1902 	if (cputype == CPU_ID_SA1100) {
1903 		cpufuncs = sa11x0_cpufuncs;
1904 		get_cachetype_table();
1905 		pmap_pte_init_sa1();
1906 
1907 		/* Use powersave on this CPU. */
1908 		cpu_do_powersave = 1;
1909 
1910 		return 0;
1911 	}
1912 #endif	/* CPU_SA1100 */
1913 #ifdef CPU_SA1110
1914 	if (cputype == CPU_ID_SA1110) {
1915 		cpufuncs = sa11x0_cpufuncs;
1916 		get_cachetype_table();
1917 		pmap_pte_init_sa1();
1918 
1919 		/* Use powersave on this CPU. */
1920 		cpu_do_powersave = 1;
1921 
1922 		return 0;
1923 	}
1924 #endif	/* CPU_SA1110 */
1925 #ifdef CPU_FA526
1926 	if (cputype == CPU_ID_FA526) {
1927 		cpufuncs = fa526_cpufuncs;
1928 		get_cachetype_cp15();
1929 		pmap_pte_init_generic();
1930 
1931 		/* Use powersave on this CPU. */
1932 		cpu_do_powersave = 1;
1933 
1934 		return 0;
1935 	}
1936 #endif	/* CPU_FA526 */
1937 #ifdef CPU_IXP12X0
1938 	if (cputype == CPU_ID_IXP1200) {
1939 		cpufuncs = ixp12x0_cpufuncs;
1940 		get_cachetype_table();
1941 		pmap_pte_init_sa1();
1942 		return 0;
1943 	}
1944 #endif  /* CPU_IXP12X0 */
1945 #ifdef CPU_XSCALE_80200
1946 	if (cputype == CPU_ID_80200) {
1947 		int rev = cpufunc_id() & CPU_ID_REVISION_MASK;
1948 
1949 		i80200_icu_init();
1950 
1951 		/*
1952 		 * Reset the Performance Monitoring Unit to a
1953 		 * pristine state:
1954 		 *	- CCNT, PMN0, PMN1 reset to 0
1955 		 *	- overflow indications cleared
1956 		 *	- all counters disabled
1957 		 */
1958 		__asm volatile("mcr p14, 0, %0, c0, c0, 0"
1959 			:
1960 			: "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF|
1961 			       PMNC_CC_IF));
1962 
1963 #if defined(XSCALE_CCLKCFG)
1964 		/*
1965 		 * Crank CCLKCFG to maximum legal value.
1966 		 */
1967 		__asm volatile ("mcr p14, 0, %0, c6, c0, 0"
1968 			:
1969 			: "r" (XSCALE_CCLKCFG));
1970 #endif
1971 
1972 		/*
1973 		 * XXX Disable ECC in the Bus Controller Unit; we
1974 		 * don't really support it, yet.  Clear any pending
1975 		 * error indications.
1976 		 */
1977 		__asm volatile("mcr p13, 0, %0, c0, c1, 0"
1978 			:
1979 			: "r" (BCUCTL_E0|BCUCTL_E1|BCUCTL_EV));
1980 
1981 		cpufuncs = xscale_cpufuncs;
1982 #if defined(PERFCTRS)
1983 		xscale_pmu_init();
1984 #endif
1985 
1986 		/*
1987 		 * i80200 errata: Step-A0 and A1 have a bug where
1988 		 * D$ dirty bits are not cleared on "invalidate by
1989 		 * address".
1990 		 *
1991 		 * Workaround: Clean cache line before invalidating.
1992 		 */
1993 		if (rev == 0 || rev == 1)
1994 			cpufuncs.cf_dcache_inv_range = xscale_cache_purgeD_rng;
1995 
1996 		get_cachetype_cp15();
1997 		pmap_pte_init_xscale();
1998 		return 0;
1999 	}
2000 #endif /* CPU_XSCALE_80200 */
2001 #ifdef CPU_XSCALE_80321
2002 	if (cputype == CPU_ID_80321_400 || cputype == CPU_ID_80321_600 ||
2003 	    cputype == CPU_ID_80321_400_B0 || cputype == CPU_ID_80321_600_B0 ||
2004 	    cputype == CPU_ID_80219_400 || cputype == CPU_ID_80219_600) {
2005 		i80321_icu_init();
2006 
2007 		/*
2008 		 * Reset the Performance Monitoring Unit to a
2009 		 * pristine state:
2010 		 *	- CCNT, PMN0, PMN1 reset to 0
2011 		 *	- overflow indications cleared
2012 		 *	- all counters disabled
2013 		 */
2014 		__asm volatile("mcr p14, 0, %0, c0, c0, 0"
2015 			:
2016 			: "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF|
2017 			       PMNC_CC_IF));
2018 
2019 		cpufuncs = xscale_cpufuncs;
2020 #if defined(PERFCTRS)
2021 		xscale_pmu_init();
2022 #endif
2023 
2024 		get_cachetype_cp15();
2025 		pmap_pte_init_xscale();
2026 		return 0;
2027 	}
2028 #endif /* CPU_XSCALE_80321 */
2029 #ifdef __CPU_XSCALE_PXA2XX
2030 	/* ignore core revision to test PXA2xx CPUs */
2031 	if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA27X ||
2032 	    (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 ||
2033 	    (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) {
2034 
2035 		cpufuncs = xscale_cpufuncs;
2036 #if defined(PERFCTRS)
2037 		xscale_pmu_init();
2038 #endif
2039 
2040 		get_cachetype_cp15();
2041 		pmap_pte_init_xscale();
2042 
2043 		/* Use powersave on this CPU. */
2044 		cpu_do_powersave = 1;
2045 
2046 		return 0;
2047 	}
2048 #endif /* __CPU_XSCALE_PXA2XX */
2049 #ifdef CPU_XSCALE_IXP425
2050 	if (cputype == CPU_ID_IXP425_533 || cputype == CPU_ID_IXP425_400 ||
2051 	    cputype == CPU_ID_IXP425_266) {
2052 		ixp425_icu_init();
2053 
2054 		cpufuncs = xscale_cpufuncs;
2055 #if defined(PERFCTRS)
2056 		xscale_pmu_init();
2057 #endif
2058 
2059 		get_cachetype_cp15();
2060 		pmap_pte_init_xscale();
2061 
2062 		return 0;
2063 	}
2064 #endif /* CPU_XSCALE_IXP425 */
2065 #if defined(CPU_CORTEX)
2066 	if (CPU_ID_CORTEX_P(cputype)) {
2067 		cpufuncs = cortex_cpufuncs;
2068 		cpu_do_powersave = 1;			/* Enable powersave */
2069 #if defined(CPU_ARMV6) || defined(CPU_PRE_ARMV6)
2070 		cpu_armv7_p = true;
2071 #endif
2072 		get_cachetype_cp15();
2073 		pmap_pte_init_armv7();
2074 		if (arm_cache_prefer_mask)
2075 			uvmexp.ncolors = (arm_cache_prefer_mask >> PGSHIFT) + 1;
2076 		/*
2077 		 * Start and reset the PMC Cycle Counter.
2078 		 */
2079 		armreg_pmcr_write(ARM11_PMCCTL_E | ARM11_PMCCTL_P | ARM11_PMCCTL_C);
2080 		armreg_pmcntenset_write(CORTEX_CNTENS_C);
2081 		return 0;
2082 	}
2083 #endif /* CPU_CORTEX */
2084 
2085 #if defined(CPU_PJ4B)
2086 	if ((cputype == CPU_ID_MV88SV581X_V6 ||
2087 	    cputype == CPU_ID_MV88SV581X_V7 ||
2088 	    cputype == CPU_ID_MV88SV584X_V7 ||
2089 	    cputype == CPU_ID_ARM_88SV581X_V6 ||
2090 	    cputype == CPU_ID_ARM_88SV581X_V7) &&
2091 	    (armreg_pfr0_read() & ARM_PFR0_THUMBEE_MASK)) {
2092 			cpufuncs = pj4bv7_cpufuncs;
2093 #if defined(CPU_ARMV6) || defined(CPU_PRE_ARMV6)
2094 			cpu_armv7_p = true;
2095 #endif
2096 			get_cachetype_cp15();
2097 			pmap_pte_init_armv7();
2098 			return 0;
2099 	}
2100 #endif /* CPU_PJ4B */
2101 
2102 	/*
2103 	 * Bzzzz. And the answer was ...
2104 	 */
2105 	panic("No support for this CPU type (%08x) in kernel", cputype);
2106 	return(ARCHITECTURE_NOT_PRESENT);
2107 }
2108 
2109 #ifdef CPU_ARM2
2110 u_int arm2_id(void)
2111 {
2112 
2113 	return CPU_ID_ARM2;
2114 }
2115 #endif /* CPU_ARM2 */
2116 
2117 #ifdef CPU_ARM250
2118 u_int arm250_id(void)
2119 {
2120 
2121 	return CPU_ID_ARM250;
2122 }
2123 #endif /* CPU_ARM250 */
2124 
2125 /*
2126  * Fixup routines for data and prefetch aborts.
2127  *
2128  * Several compile time symbols are used
2129  *
2130  * DEBUG_FAULT_CORRECTION - Print debugging information during the
2131  * correction of registers after a fault.
2132  * ARM6_LATE_ABORT - ARM6 supports both early and late aborts
2133  * when defined should use late aborts
2134  */
2135 
2136 
2137 /*
2138  * Null abort fixup routine.
2139  * For use when no fixup is required.
2140  */
2141 int
2142 cpufunc_null_fixup(void *arg)
2143 {
2144 	return(ABORT_FIXUP_OK);
2145 }
2146 
2147 
2148 #if defined(CPU_ARM2) || defined(CPU_ARM250) || defined(CPU_ARM3) || \
2149     defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI)
2150 
2151 #ifdef DEBUG_FAULT_CORRECTION
2152 #define DFC_PRINTF(x)		printf x
2153 #define DFC_DISASSEMBLE(x)	disassemble(x)
2154 #else
2155 #define DFC_PRINTF(x)		/* nothing */
2156 #define DFC_DISASSEMBLE(x)	/* nothing */
2157 #endif
2158 
2159 /*
2160  * "Early" data abort fixup.
2161  *
2162  * For ARM2, ARM2as, ARM3 and ARM6 (in early-abort mode).  Also used
2163  * indirectly by ARM6 (in late-abort mode) and ARM7[TDMI].
2164  *
2165  * In early aborts, we may have to fix up LDM, STM, LDC and STC.
2166  */
2167 int
2168 early_abort_fixup(void *arg)
2169 {
2170 	trapframe_t *frame = arg;
2171 	u_int fault_pc;
2172 	u_int fault_instruction;
2173 	int saved_lr = 0;
2174 
2175 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
2176 
2177 		/* Ok an abort in SVC mode */
2178 
2179 		/*
2180 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
2181 		 * as the fault happened in svc mode but we need it in the
2182 		 * usr slot so we can treat the registers as an array of ints
2183 		 * during fixing.
2184 		 * NOTE: This PC is in the position but writeback is not
2185 		 * allowed on r15.
2186 		 * Doing it like this is more efficient than trapping this
2187 		 * case in all possible locations in the following fixup code.
2188 		 */
2189 
2190 		saved_lr = frame->tf_usr_lr;
2191 		frame->tf_usr_lr = frame->tf_svc_lr;
2192 
2193 		/*
2194 		 * Note the trapframe does not have the SVC r13 so a fault
2195 		 * from an instruction with writeback to r13 in SVC mode is
2196 		 * not allowed. This should not happen as the kstack is
2197 		 * always valid.
2198 		 */
2199 	}
2200 
2201 	/* Get fault address and status from the CPU */
2202 
2203 	fault_pc = frame->tf_pc;
2204 	fault_instruction = *((volatile unsigned int *)fault_pc);
2205 
2206 	/* Decode the fault instruction and fix the registers as needed */
2207 
2208 	if ((fault_instruction & 0x0e000000) == 0x08000000) {
2209 		int base;
2210 		int loop;
2211 		int count;
2212 		int *registers = &frame->tf_r0;
2213 
2214 		DFC_PRINTF(("LDM/STM\n"));
2215 		DFC_DISASSEMBLE(fault_pc);
2216 		if (fault_instruction & (1 << 21)) {
2217 			DFC_PRINTF(("This instruction must be corrected\n"));
2218 			base = (fault_instruction >> 16) & 0x0f;
2219 			if (base == 15)
2220 				return ABORT_FIXUP_FAILED;
2221 			/* Count registers transferred */
2222 			count = 0;
2223 			for (loop = 0; loop < 16; ++loop) {
2224 				if (fault_instruction & (1<<loop))
2225 					++count;
2226 			}
2227 			DFC_PRINTF(("%d registers used\n", count));
2228 			DFC_PRINTF(("Corrected r%d by %d bytes ",
2229 				       base, count * 4));
2230 			if (fault_instruction & (1 << 23)) {
2231 				DFC_PRINTF(("down\n"));
2232 				registers[base] -= count * 4;
2233 			} else {
2234 				DFC_PRINTF(("up\n"));
2235 				registers[base] += count * 4;
2236 			}
2237 		}
2238 	} else if ((fault_instruction & 0x0e000000) == 0x0c000000) {
2239 		int base;
2240 		int offset;
2241 		int *registers = &frame->tf_r0;
2242 
2243 		/* REGISTER CORRECTION IS REQUIRED FOR THESE INSTRUCTIONS */
2244 
2245 		DFC_DISASSEMBLE(fault_pc);
2246 
2247 		/* Only need to fix registers if write back is turned on */
2248 
2249 		if ((fault_instruction & (1 << 21)) != 0) {
2250 			base = (fault_instruction >> 16) & 0x0f;
2251 			if (base == 13 &&
2252 			    (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
2253 				return ABORT_FIXUP_FAILED;
2254 			if (base == 15)
2255 				return ABORT_FIXUP_FAILED;
2256 
2257 			offset = (fault_instruction & 0xff) << 2;
2258 			DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
2259 			if ((fault_instruction & (1 << 23)) != 0)
2260 				offset = -offset;
2261 			registers[base] += offset;
2262 			DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
2263 		}
2264 	} else if ((fault_instruction & 0x0e000000) == 0x0c000000)
2265 		return ABORT_FIXUP_FAILED;
2266 
2267 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
2268 
2269 		/* Ok an abort in SVC mode */
2270 
2271 		/*
2272 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
2273 		 * as the fault happened in svc mode but we need it in the
2274 		 * usr slot so we can treat the registers as an array of ints
2275 		 * during fixing.
2276 		 * NOTE: This PC is in the position but writeback is not
2277 		 * allowed on r15.
2278 		 * Doing it like this is more efficient than trapping this
2279 		 * case in all possible locations in the prior fixup code.
2280 		 */
2281 
2282 		frame->tf_svc_lr = frame->tf_usr_lr;
2283 		frame->tf_usr_lr = saved_lr;
2284 
2285 		/*
2286 		 * Note the trapframe does not have the SVC r13 so a fault
2287 		 * from an instruction with writeback to r13 in SVC mode is
2288 		 * not allowed. This should not happen as the kstack is
2289 		 * always valid.
2290 		 */
2291 	}
2292 
2293 	return(ABORT_FIXUP_OK);
2294 }
2295 #endif	/* CPU_ARM2/250/3/6/7 */
2296 
2297 
2298 #if (defined(CPU_ARM6) && defined(ARM6_LATE_ABORT)) || defined(CPU_ARM7) || \
2299 	defined(CPU_ARM7TDMI)
2300 /*
2301  * "Late" (base updated) data abort fixup
2302  *
2303  * For ARM6 (in late-abort mode) and ARM7.
2304  *
2305  * In this model, all data-transfer instructions need fixing up.  We defer
2306  * LDM, STM, LDC and STC fixup to the early-abort handler.
2307  */
2308 int
2309 late_abort_fixup(void *arg)
2310 {
2311 	trapframe_t *frame = arg;
2312 	u_int fault_pc;
2313 	u_int fault_instruction;
2314 	int saved_lr = 0;
2315 
2316 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
2317 
2318 		/* Ok an abort in SVC mode */
2319 
2320 		/*
2321 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
2322 		 * as the fault happened in svc mode but we need it in the
2323 		 * usr slot so we can treat the registers as an array of ints
2324 		 * during fixing.
2325 		 * NOTE: This PC is in the position but writeback is not
2326 		 * allowed on r15.
2327 		 * Doing it like this is more efficient than trapping this
2328 		 * case in all possible locations in the following fixup code.
2329 		 */
2330 
2331 		saved_lr = frame->tf_usr_lr;
2332 		frame->tf_usr_lr = frame->tf_svc_lr;
2333 
2334 		/*
2335 		 * Note the trapframe does not have the SVC r13 so a fault
2336 		 * from an instruction with writeback to r13 in SVC mode is
2337 		 * not allowed. This should not happen as the kstack is
2338 		 * always valid.
2339 		 */
2340 	}
2341 
2342 	/* Get fault address and status from the CPU */
2343 
2344 	fault_pc = frame->tf_pc;
2345 	fault_instruction = *((volatile unsigned int *)fault_pc);
2346 
2347 	/* Decode the fault instruction and fix the registers as needed */
2348 
2349 	/* Was is a swap instruction ? */
2350 
2351 	if ((fault_instruction & 0x0fb00ff0) == 0x01000090) {
2352 		DFC_DISASSEMBLE(fault_pc);
2353 	} else if ((fault_instruction & 0x0c000000) == 0x04000000) {
2354 
2355 		/* Was is a ldr/str instruction */
2356 		/* This is for late abort only */
2357 
2358 		int base;
2359 		int offset;
2360 		int *registers = &frame->tf_r0;
2361 
2362 		DFC_DISASSEMBLE(fault_pc);
2363 
2364 		/* This is for late abort only */
2365 
2366 		if ((fault_instruction & (1 << 24)) == 0
2367 		    || (fault_instruction & (1 << 21)) != 0) {
2368 			/* postindexed ldr/str with no writeback */
2369 
2370 			base = (fault_instruction >> 16) & 0x0f;
2371 			if (base == 13 &&
2372 			    (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
2373 				return ABORT_FIXUP_FAILED;
2374 			if (base == 15)
2375 				return ABORT_FIXUP_FAILED;
2376 			DFC_PRINTF(("late abt fix: r%d=%08x : ",
2377 				       base, registers[base]));
2378 			if ((fault_instruction & (1 << 25)) == 0) {
2379 				/* Immediate offset - easy */
2380 
2381 				offset = fault_instruction & 0xfff;
2382 				if ((fault_instruction & (1 << 23)))
2383 					offset = -offset;
2384 				registers[base] += offset;
2385 				DFC_PRINTF(("imm=%08x ", offset));
2386 			} else {
2387 				/* offset is a shifted register */
2388 				int shift;
2389 
2390 				offset = fault_instruction & 0x0f;
2391 				if (offset == base)
2392 					return ABORT_FIXUP_FAILED;
2393 
2394 				/*
2395 				 * Register offset - hard we have to
2396 				 * cope with shifts !
2397 				 */
2398 				offset = registers[offset];
2399 
2400 				if ((fault_instruction & (1 << 4)) == 0)
2401 					/* shift with amount */
2402 					shift = (fault_instruction >> 7) & 0x1f;
2403 				else {
2404 					/* shift with register */
2405 					if ((fault_instruction & (1 << 7)) != 0)
2406 						/* undefined for now so bail out */
2407 						return ABORT_FIXUP_FAILED;
2408 					shift = ((fault_instruction >> 8) & 0xf);
2409 					if (base == shift)
2410 						return ABORT_FIXUP_FAILED;
2411 					DFC_PRINTF(("shift reg=%d ", shift));
2412 					shift = registers[shift];
2413 				}
2414 				DFC_PRINTF(("shift=%08x ", shift));
2415 				switch (((fault_instruction >> 5) & 0x3)) {
2416 				case 0 : /* Logical left */
2417 					offset = (int)(((u_int)offset) << shift);
2418 					break;
2419 				case 1 : /* Logical Right */
2420 					if (shift == 0) shift = 32;
2421 					offset = (int)(((u_int)offset) >> shift);
2422 					break;
2423 				case 2 : /* Arithmetic Right */
2424 					if (shift == 0) shift = 32;
2425 					offset = (int)(((int)offset) >> shift);
2426 					break;
2427 				case 3 : /* Rotate right (rol or rxx) */
2428 					return ABORT_FIXUP_FAILED;
2429 					break;
2430 				}
2431 
2432 				DFC_PRINTF(("abt: fixed LDR/STR with "
2433 					       "register offset\n"));
2434 				if ((fault_instruction & (1 << 23)))
2435 					offset = -offset;
2436 				DFC_PRINTF(("offset=%08x ", offset));
2437 				registers[base] += offset;
2438 			}
2439 			DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
2440 		}
2441 	}
2442 
2443 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
2444 
2445 		/* Ok an abort in SVC mode */
2446 
2447 		/*
2448 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
2449 		 * as the fault happened in svc mode but we need it in the
2450 		 * usr slot so we can treat the registers as an array of ints
2451 		 * during fixing.
2452 		 * NOTE: This PC is in the position but writeback is not
2453 		 * allowed on r15.
2454 		 * Doing it like this is more efficient than trapping this
2455 		 * case in all possible locations in the prior fixup code.
2456 		 */
2457 
2458 		frame->tf_svc_lr = frame->tf_usr_lr;
2459 		frame->tf_usr_lr = saved_lr;
2460 
2461 		/*
2462 		 * Note the trapframe does not have the SVC r13 so a fault
2463 		 * from an instruction with writeback to r13 in SVC mode is
2464 		 * not allowed. This should not happen as the kstack is
2465 		 * always valid.
2466 		 */
2467 	}
2468 
2469 	/*
2470 	 * Now let the early-abort fixup routine have a go, in case it
2471 	 * was an LDM, STM, LDC or STC that faulted.
2472 	 */
2473 
2474 	return early_abort_fixup(arg);
2475 }
2476 #endif	/* CPU_ARM6(LATE)/7/7TDMI */
2477 
2478 /*
2479  * CPU Setup code
2480  */
2481 
2482 #if defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) || \
2483 	defined(CPU_ARM8) || defined (CPU_ARM9) || defined (CPU_ARM9E) || \
2484 	defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) || \
2485 	defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
2486 	defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425) || \
2487 	defined(CPU_ARM10) || defined(CPU_ARM11) || \
2488 	defined(CPU_FA526) || defined(CPU_CORTEX) || defined(CPU_SHEEVA)
2489 
2490 #define IGN	0
2491 #define OR	1
2492 #define BIC	2
2493 
2494 struct cpu_option {
2495 	const char *co_name;
2496 	int	co_falseop;
2497 	int	co_trueop;
2498 	int	co_value;
2499 };
2500 
2501 static u_int parse_cpu_options(char *, struct cpu_option *, u_int);
2502 
2503 static u_int
2504 parse_cpu_options(char *args, struct cpu_option *optlist, u_int cpuctrl)
2505 {
2506 	int integer;
2507 
2508 	if (args == NULL)
2509 		return(cpuctrl);
2510 
2511 	while (optlist->co_name) {
2512 		if (get_bootconf_option(args, optlist->co_name,
2513 		    BOOTOPT_TYPE_BOOLEAN, &integer)) {
2514 			if (integer) {
2515 				if (optlist->co_trueop == OR)
2516 					cpuctrl |= optlist->co_value;
2517 				else if (optlist->co_trueop == BIC)
2518 					cpuctrl &= ~optlist->co_value;
2519 			} else {
2520 				if (optlist->co_falseop == OR)
2521 					cpuctrl |= optlist->co_value;
2522 				else if (optlist->co_falseop == BIC)
2523 					cpuctrl &= ~optlist->co_value;
2524 			}
2525 		}
2526 		++optlist;
2527 	}
2528 	return(cpuctrl);
2529 }
2530 #endif /* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 || CPU_SA110 */
2531 
2532 #if defined (CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) \
2533 	|| defined(CPU_ARM8)
2534 struct cpu_option arm678_options[] = {
2535 #ifdef COMPAT_12
2536 	{ "nocache",		IGN, BIC, CPU_CONTROL_IDC_ENABLE },
2537 	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
2538 #endif	/* COMPAT_12 */
2539 	{ "cpu.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
2540 	{ "cpu.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
2541 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2542 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2543 	{ NULL,			IGN, IGN, 0 }
2544 };
2545 
2546 #endif	/* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 */
2547 
2548 #ifdef CPU_ARM6
2549 struct cpu_option arm6_options[] = {
2550 	{ "arm6.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
2551 	{ "arm6.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
2552 	{ "arm6.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2553 	{ "arm6.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2554 	{ NULL,			IGN, IGN, 0 }
2555 };
2556 
2557 void
2558 arm6_setup(char *args)
2559 {
2560 	int cpuctrl, cpuctrlmask;
2561 
2562 	/* Set up default control registers bits */
2563 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2564 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2565 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
2566 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2567 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2568 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
2569 		 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BEND_ENABLE
2570 		 | CPU_CONTROL_AFLT_ENABLE;
2571 
2572 #ifdef ARM6_LATE_ABORT
2573 	cpuctrl |= CPU_CONTROL_LABT_ENABLE;
2574 #endif	/* ARM6_LATE_ABORT */
2575 
2576 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2577 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2578 #endif
2579 
2580 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
2581 	cpuctrl = parse_cpu_options(args, arm6_options, cpuctrl);
2582 
2583 #ifdef __ARMEB__
2584 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2585 #endif
2586 
2587 	/* Clear out the cache */
2588 	cpu_idcache_wbinv_all();
2589 
2590 	/* Set the control register */
2591 	curcpu()->ci_ctrl = cpuctrl;
2592 	cpu_control(0xffffffff, cpuctrl);
2593 }
2594 #endif	/* CPU_ARM6 */
2595 
2596 #ifdef CPU_ARM7
2597 struct cpu_option arm7_options[] = {
2598 	{ "arm7.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
2599 	{ "arm7.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
2600 	{ "arm7.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2601 	{ "arm7.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2602 #ifdef COMPAT_12
2603 	{ "fpaclk2",		BIC, OR,  CPU_CONTROL_CPCLK },
2604 #endif	/* COMPAT_12 */
2605 	{ "arm700.fpaclk",	BIC, OR,  CPU_CONTROL_CPCLK },
2606 	{ NULL,			IGN, IGN, 0 }
2607 };
2608 
2609 void
2610 arm7_setup(char *args)
2611 {
2612 	int cpuctrl, cpuctrlmask;
2613 
2614 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2615 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2616 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
2617 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2618 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2619 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
2620 		 | CPU_CONTROL_CPCLK | CPU_CONTROL_LABT_ENABLE
2621 		 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BEND_ENABLE
2622 		 | CPU_CONTROL_AFLT_ENABLE;
2623 
2624 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2625 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2626 #endif
2627 
2628 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
2629 	cpuctrl = parse_cpu_options(args, arm7_options, cpuctrl);
2630 
2631 #ifdef __ARMEB__
2632 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2633 #endif
2634 
2635 	/* Clear out the cache */
2636 	cpu_idcache_wbinv_all();
2637 
2638 	/* Set the control register */
2639 	curcpu()->ci_ctrl = cpuctrl;
2640 	cpu_control(0xffffffff, cpuctrl);
2641 }
2642 #endif	/* CPU_ARM7 */
2643 
2644 #ifdef CPU_ARM7TDMI
2645 struct cpu_option arm7tdmi_options[] = {
2646 	{ "arm7.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
2647 	{ "arm7.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
2648 	{ "arm7.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2649 	{ "arm7.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2650 #ifdef COMPAT_12
2651 	{ "fpaclk2",		BIC, OR,  CPU_CONTROL_CPCLK },
2652 #endif	/* COMPAT_12 */
2653 	{ "arm700.fpaclk",	BIC, OR,  CPU_CONTROL_CPCLK },
2654 	{ NULL,			IGN, IGN, 0 }
2655 };
2656 
2657 void
2658 arm7tdmi_setup(char *args)
2659 {
2660 	int cpuctrl;
2661 
2662 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2663 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2664 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
2665 
2666 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
2667 	cpuctrl = parse_cpu_options(args, arm7tdmi_options, cpuctrl);
2668 
2669 #ifdef __ARMEB__
2670 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2671 #endif
2672 
2673 	/* Clear out the cache */
2674 	cpu_idcache_wbinv_all();
2675 
2676 	/* Set the control register */
2677 	curcpu()->ci_ctrl = cpuctrl;
2678 	cpu_control(0xffffffff, cpuctrl);
2679 }
2680 #endif	/* CPU_ARM7TDMI */
2681 
2682 #ifdef CPU_ARM8
2683 struct cpu_option arm8_options[] = {
2684 	{ "arm8.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
2685 	{ "arm8.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
2686 	{ "arm8.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2687 	{ "arm8.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2688 #ifdef COMPAT_12
2689 	{ "branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2690 #endif	/* COMPAT_12 */
2691 	{ "cpu.branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2692 	{ "arm8.branchpredict",	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2693 	{ NULL,			IGN, IGN, 0 }
2694 };
2695 
2696 void
2697 arm8_setup(char *args)
2698 {
2699 	int integer;
2700 	int cpuctrl, cpuctrlmask;
2701 	int clocktest;
2702 	int setclock = 0;
2703 
2704 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2705 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2706 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
2707 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2708 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2709 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
2710 		 | CPU_CONTROL_BPRD_ENABLE | CPU_CONTROL_ROM_ENABLE
2711 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE;
2712 
2713 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2714 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2715 #endif
2716 
2717 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
2718 	cpuctrl = parse_cpu_options(args, arm8_options, cpuctrl);
2719 
2720 #ifdef __ARMEB__
2721 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2722 #endif
2723 
2724 	/* Get clock configuration */
2725 	clocktest = arm8_clock_config(0, 0) & 0x0f;
2726 
2727 	/* Special ARM8 clock and test configuration */
2728 	if (get_bootconf_option(args, "arm8.clock.reset", BOOTOPT_TYPE_BOOLEAN, &integer)) {
2729 		clocktest = 0;
2730 		setclock = 1;
2731 	}
2732 	if (get_bootconf_option(args, "arm8.clock.dynamic", BOOTOPT_TYPE_BOOLEAN, &integer)) {
2733 		if (integer)
2734 			clocktest |= 0x01;
2735 		else
2736 			clocktest &= ~(0x01);
2737 		setclock = 1;
2738 	}
2739 	if (get_bootconf_option(args, "arm8.clock.sync", BOOTOPT_TYPE_BOOLEAN, &integer)) {
2740 		if (integer)
2741 			clocktest |= 0x02;
2742 		else
2743 			clocktest &= ~(0x02);
2744 		setclock = 1;
2745 	}
2746 	if (get_bootconf_option(args, "arm8.clock.fast", BOOTOPT_TYPE_BININT, &integer)) {
2747 		clocktest = (clocktest & ~0xc0) | (integer & 3) << 2;
2748 		setclock = 1;
2749 	}
2750 	if (get_bootconf_option(args, "arm8.test", BOOTOPT_TYPE_BININT, &integer)) {
2751 		clocktest |= (integer & 7) << 5;
2752 		setclock = 1;
2753 	}
2754 
2755 	/* Clear out the cache */
2756 	cpu_idcache_wbinv_all();
2757 
2758 	/* Set the control register */
2759 	curcpu()->ci_ctrl = cpuctrl;
2760 	cpu_control(0xffffffff, cpuctrl);
2761 
2762 	/* Set the clock/test register */
2763 	if (setclock)
2764 		arm8_clock_config(0x7f, clocktest);
2765 }
2766 #endif	/* CPU_ARM8 */
2767 
2768 #ifdef CPU_ARM9
2769 struct cpu_option arm9_options[] = {
2770 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2771 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2772 	{ "arm9.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2773 	{ "arm9.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2774 	{ "arm9.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2775 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2776 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2777 	{ "arm9.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2778 	{ NULL,			IGN, IGN, 0 }
2779 };
2780 
2781 void
2782 arm9_setup(char *args)
2783 {
2784 	int cpuctrl, cpuctrlmask;
2785 
2786 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2787 	    | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2788 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2789 	    | CPU_CONTROL_WBUF_ENABLE;
2790 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2791 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2792 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2793 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2794 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2795 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC
2796 		 | CPU_CONTROL_ROUNDROBIN;
2797 
2798 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2799 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2800 #endif
2801 
2802 	cpuctrl = parse_cpu_options(args, arm9_options, cpuctrl);
2803 
2804 #ifdef __ARMEB__
2805 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2806 #endif
2807 
2808 #ifndef ARM_HAS_VBAR
2809 	if (vector_page == ARM_VECTORS_HIGH)
2810 		cpuctrl |= CPU_CONTROL_VECRELOC;
2811 #endif
2812 
2813 	/* Clear out the cache */
2814 	cpu_idcache_wbinv_all();
2815 
2816 	/* Set the control register */
2817 	curcpu()->ci_ctrl = cpuctrl;
2818 	cpu_control(cpuctrlmask, cpuctrl);
2819 
2820 }
2821 #endif	/* CPU_ARM9 */
2822 
2823 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
2824 struct cpu_option arm10_options[] = {
2825 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2826 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2827 	{ "arm10.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2828 	{ "arm10.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2829 	{ "arm10.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2830 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2831 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2832 	{ "arm10.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2833 	{ NULL,			IGN, IGN, 0 }
2834 };
2835 
2836 void
2837 arm10_setup(char *args)
2838 {
2839 	int cpuctrl;
2840 
2841 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
2842 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2843 	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
2844 #if 0
2845 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
2846 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2847 	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2848 	    | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2849 	    | CPU_CONTROL_BPRD_ENABLE
2850 	    | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
2851 #endif
2852 
2853 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2854 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2855 #endif
2856 
2857 	cpuctrl = parse_cpu_options(args, arm10_options, cpuctrl);
2858 
2859 #ifdef __ARMEB__
2860 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2861 #endif
2862 
2863 #ifndef ARM_HAS_VBAR
2864 	if (vector_page == ARM_VECTORS_HIGH)
2865 		cpuctrl |= CPU_CONTROL_VECRELOC;
2866 #endif
2867 
2868 	/* Clear out the cache */
2869 	cpu_idcache_wbinv_all();
2870 
2871 	/* Now really make sure they are clean.  */
2872 	__asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
2873 
2874 	/* Set the control register */
2875 	curcpu()->ci_ctrl = cpuctrl;
2876 	cpu_control(0xffffffff, cpuctrl);
2877 
2878 	/* And again. */
2879 	cpu_idcache_wbinv_all();
2880 }
2881 #endif	/* CPU_ARM9E || CPU_ARM10 */
2882 
2883 #if defined(CPU_ARM11)
2884 struct cpu_option arm11_options[] = {
2885 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2886 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2887 	{ "arm11.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2888 	{ "arm11.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2889 	{ "arm11.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2890 	{ "cpu.branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2891 	{ "arm11.branchpredict", BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2892 	{ NULL,			IGN, IGN, 0 }
2893 };
2894 
2895 void
2896 arm11_setup(char *args)
2897 {
2898 	int cpuctrl, cpuctrlmask;
2899 
2900 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
2901 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2902 	    /* | CPU_CONTROL_BPRD_ENABLE */;
2903 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
2904 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2905 	    | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BPRD_ENABLE
2906 	    | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2907 	    | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
2908 
2909 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2910 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2911 #endif
2912 
2913 	cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl);
2914 
2915 #ifdef __ARMEB__
2916 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2917 #endif
2918 
2919 #ifndef ARM_HAS_VBAR
2920 	if (vector_page == ARM_VECTORS_HIGH)
2921 		cpuctrl |= CPU_CONTROL_VECRELOC;
2922 #endif
2923 
2924 	/* Clear out the cache */
2925 	cpu_idcache_wbinv_all();
2926 
2927 	/* Now really make sure they are clean.  */
2928 	__asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
2929 
2930 	/* Allow detection code to find the VFP if it's fitted.  */
2931 	__asm volatile ("mcr\tp15, 0, %0, c1, c0, 2" : : "r" (0x0fffffff));
2932 
2933 	/* Set the control register */
2934 	curcpu()->ci_ctrl = cpuctrl;
2935 	cpu_control(cpuctrlmask, cpuctrl);
2936 
2937 	/* And again. */
2938 	cpu_idcache_wbinv_all();
2939 }
2940 #endif	/* CPU_ARM11 */
2941 
2942 #if defined(CPU_ARM11MPCORE)
2943 
2944 void
2945 arm11mpcore_setup(char *args)
2946 {
2947 	int cpuctrl, cpuctrlmask;
2948 
2949 	cpuctrl = CPU_CONTROL_IC_ENABLE
2950 	    | CPU_CONTROL_DC_ENABLE
2951 	    | CPU_CONTROL_BPRD_ENABLE ;
2952 	cpuctrlmask = CPU_CONTROL_IC_ENABLE
2953 	    | CPU_CONTROL_DC_ENABLE
2954 	    | CPU_CONTROL_BPRD_ENABLE
2955 	    | CPU_CONTROL_AFLT_ENABLE
2956 	    | CPU_CONTROL_VECRELOC;
2957 
2958 #ifdef	ARM11MPCORE_MMU_COMPAT
2959 	/* XXX: S and R? */
2960 #endif
2961 
2962 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2963 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2964 #endif
2965 
2966 	cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl);
2967 
2968 #ifndef ARM_HAS_VBAR
2969 	if (vector_page == ARM_VECTORS_HIGH)
2970 		cpuctrl |= CPU_CONTROL_VECRELOC;
2971 #endif
2972 
2973 	/* Clear out the cache */
2974 	cpu_idcache_wbinv_all();
2975 
2976 	/* Now really make sure they are clean.  */
2977 	__asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
2978 
2979 	/* Allow detection code to find the VFP if it's fitted.  */
2980 	__asm volatile ("mcr\tp15, 0, %0, c1, c0, 2" : : "r" (0x0fffffff));
2981 
2982 	/* Set the control register */
2983 	curcpu()->ci_ctrl = cpu_control(cpuctrlmask, cpuctrl);
2984 
2985 	/* And again. */
2986 	cpu_idcache_wbinv_all();
2987 }
2988 #endif	/* CPU_ARM11MPCORE */
2989 
2990 #ifdef CPU_PJ4B
2991 void
2992 pj4bv7_setup(char *args)
2993 {
2994 	int cpuctrl;
2995 
2996 	pj4b_config();
2997 
2998 	cpuctrl = CPU_CONTROL_MMU_ENABLE;
2999 #ifdef ARM32_DISABLE_ALIGNMENT_FAULTS
3000 	cpuctrl |= CPU_CONTROL_UNAL_ENABLE;
3001 #else
3002 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3003 #endif
3004 	cpuctrl |= CPU_CONTROL_DC_ENABLE;
3005 	cpuctrl |= CPU_CONTROL_IC_ENABLE;
3006 	cpuctrl |= (0xf << 3);
3007 	cpuctrl |= CPU_CONTROL_BPRD_ENABLE;
3008 	cpuctrl |= (0x5 << 16) | (1 < 22);
3009 	cpuctrl |= CPU_CONTROL_XP_ENABLE;
3010 
3011 #ifndef ARM_HAS_VBAR
3012 	if (vector_page == ARM_VECTORS_HIGH)
3013 		cpuctrl |= CPU_CONTROL_VECRELOC;
3014 #endif
3015 
3016 	/* Clear out the cache */
3017 	cpu_idcache_wbinv_all();
3018 
3019 	/* Set the control register */
3020 	cpu_control(0xffffffff, cpuctrl);
3021 
3022 	/* And again. */
3023 	cpu_idcache_wbinv_all();
3024 
3025 	curcpu()->ci_ctrl = cpuctrl;
3026 }
3027 #endif /* CPU_PJ4B */
3028 
3029 #if defined(CPU_CORTEX)
3030 struct cpu_option armv7_options[] = {
3031     { "cpu.cache",      BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3032     { "cpu.nocache",    OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3033     { "armv7.cache",    BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3034     { "armv7.icache",   BIC, OR,  CPU_CONTROL_IC_ENABLE },
3035     { "armv7.dcache",   BIC, OR,  CPU_CONTROL_DC_ENABLE },
3036 	{ NULL, 			IGN, IGN, 0}
3037 };
3038 
3039 void
3040 armv7_setup(char *args)
3041 {
3042 	int cpuctrl, cpuctrlmask;
3043 
3044 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_IC_ENABLE
3045 	    | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_BPRD_ENABLE ;
3046 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
3047 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3048 	    | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BPRD_ENABLE
3049 	    | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
3050 	    | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
3051 
3052 #ifdef ARM32_DISABLE_ALIGNMENT_FAULTS
3053 	cpuctrl |= CPU_CONTROL_UNAL_ENABLE;
3054 #else
3055 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3056 #endif
3057 
3058 	cpuctrl = parse_cpu_options(args, armv7_options, cpuctrl);
3059 
3060 #ifndef ARM_HAS_VBAR
3061 	if (vector_page == ARM_VECTORS_HIGH)
3062 		cpuctrl |= CPU_CONTROL_VECRELOC;
3063 #endif
3064 
3065 	/* Clear out the cache */
3066 	cpu_idcache_wbinv_all();
3067 
3068 	/* Set the control register */
3069 	curcpu()->ci_ctrl = cpuctrl;
3070 	cpu_control(0xffffffff, cpuctrl);
3071 }
3072 #endif /* CPU_CORTEX */
3073 
3074 
3075 #if defined(CPU_ARM1136) || defined(CPU_ARM1176)
3076 void
3077 arm11x6_setup(char *args)
3078 {
3079 	int cpuctrl, cpuctrl_wax;
3080 	uint32_t auxctrl, auxctrl_wax;
3081 	uint32_t tmp, tmp2;
3082 	uint32_t sbz=0;
3083 	uint32_t cpuid;
3084 
3085 	cpuid = cpu_id();
3086 
3087 	cpuctrl =
3088 		CPU_CONTROL_MMU_ENABLE  |
3089 		CPU_CONTROL_DC_ENABLE   |
3090 		CPU_CONTROL_WBUF_ENABLE |
3091 		CPU_CONTROL_32BP_ENABLE |
3092 		CPU_CONTROL_32BD_ENABLE |
3093 		CPU_CONTROL_LABT_ENABLE |
3094 		CPU_CONTROL_SYST_ENABLE |
3095 		CPU_CONTROL_UNAL_ENABLE |
3096 		CPU_CONTROL_IC_ENABLE;
3097 
3098 	/*
3099 	 * "write as existing" bits
3100 	 * inverse of this is mask
3101 	 */
3102 	cpuctrl_wax =
3103 		(3 << 30) |
3104 		(1 << 29) |
3105 		(1 << 28) |
3106 		(3 << 26) |
3107 		(3 << 19) |
3108 		(1 << 17);
3109 
3110 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
3111 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3112 #endif
3113 
3114 	cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl);
3115 
3116 #ifdef __ARMEB__
3117 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
3118 #endif
3119 
3120 #ifndef ARM_HAS_VBAR
3121 	if (vector_page == ARM_VECTORS_HIGH)
3122 		cpuctrl |= CPU_CONTROL_VECRELOC;
3123 #endif
3124 
3125 	auxctrl = 0;
3126 	auxctrl_wax = ~0;
3127 	/*
3128 	 * This options enables the workaround for the 364296 ARM1136
3129 	 * r0pX errata (possible cache data corruption with
3130 	 * hit-under-miss enabled). It sets the undocumented bit 31 in
3131 	 * the auxiliary control register and the FI bit in the control
3132 	 * register, thus disabling hit-under-miss without putting the
3133 	 * processor into full low interrupt latency mode. ARM11MPCore
3134 	 * is not affected.
3135 	 */
3136 	if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1136JS) { /* ARM1136JSr0pX */
3137 		cpuctrl |= CPU_CONTROL_FI_ENABLE;
3138 		auxctrl = ARM1136_AUXCTL_PFI;
3139 		auxctrl_wax = ~ARM1136_AUXCTL_PFI;
3140 	}
3141 
3142 	/*
3143 	 * Enable an errata workaround
3144 	 */
3145 	if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1176JZS) { /* ARM1176JZSr0 */
3146 		auxctrl = ARM1176_AUXCTL_PHD;
3147 		auxctrl_wax = ~ARM1176_AUXCTL_PHD;
3148 	}
3149 
3150 	/* Clear out the cache */
3151 	cpu_idcache_wbinv_all();
3152 
3153 	/* Now really make sure they are clean.  */
3154 	__asm volatile ("mcr\tp15, 0, %0, c7, c7, 0" : : "r"(sbz));
3155 
3156 	/* Allow detection code to find the VFP if it's fitted.  */
3157 	__asm volatile ("mcr\tp15, 0, %0, c1, c0, 2" : : "r" (0x0fffffff));
3158 
3159 	/* Set the control register */
3160 	curcpu()->ci_ctrl = cpuctrl;
3161 	cpu_control(~cpuctrl_wax, cpuctrl);
3162 
3163 	__asm volatile ("mrc	p15, 0, %0, c1, c0, 1\n\t"
3164 			"and	%1, %0, %2\n\t"
3165 			"orr	%1, %1, %3\n\t"
3166 			"teq	%0, %1\n\t"
3167 			"mcrne	p15, 0, %1, c1, c0, 1\n\t"
3168 			: "=r"(tmp), "=r"(tmp2) :
3169 			  "r"(auxctrl_wax), "r"(auxctrl));
3170 
3171 	/* And again. */
3172 	cpu_idcache_wbinv_all();
3173 }
3174 #endif	/* CPU_ARM1136 || CPU_ARM1176 */
3175 
3176 #ifdef CPU_SA110
3177 struct cpu_option sa110_options[] = {
3178 #ifdef COMPAT_12
3179 	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3180 	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
3181 #endif	/* COMPAT_12 */
3182 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3183 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3184 	{ "sa110.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3185 	{ "sa110.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
3186 	{ "sa110.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
3187 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
3188 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
3189 	{ "sa110.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
3190 	{ NULL,			IGN, IGN, 0 }
3191 };
3192 
3193 void
3194 sa110_setup(char *args)
3195 {
3196 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
3197 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
3198 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3199 		 | CPU_CONTROL_WBUF_ENABLE;
3200 #ifdef notyet
3201 	int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
3202 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
3203 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3204 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
3205 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
3206 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
3207 		 | CPU_CONTROL_CPCLK;
3208 #endif
3209 
3210 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
3211 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3212 #endif
3213 
3214 	cpuctrl = parse_cpu_options(args, sa110_options, cpuctrl);
3215 
3216 #ifdef __ARMEB__
3217 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
3218 #endif
3219 
3220 #ifndef ARM_HAS_VBAR
3221 	if (vector_page == ARM_VECTORS_HIGH)
3222 		cpuctrl |= CPU_CONTROL_VECRELOC;
3223 #endif
3224 
3225 	/* Clear out the cache */
3226 	cpu_idcache_wbinv_all();
3227 
3228 	/* Set the control register */
3229 	curcpu()->ci_ctrl = cpuctrl;
3230 #ifdef notyet
3231 	cpu_control(cpuctrlmask, cpuctrl);
3232 #endif
3233 	cpu_control(0xffffffff, cpuctrl);
3234 
3235 	/*
3236 	 * enable clockswitching, note that this doesn't read or write to r0,
3237 	 * r0 is just to make it valid asm
3238 	 */
3239 	__asm ("mcr 15, 0, r0, c15, c1, 2");
3240 }
3241 #endif	/* CPU_SA110 */
3242 
3243 #if defined(CPU_SA1100) || defined(CPU_SA1110)
3244 struct cpu_option sa11x0_options[] = {
3245 #ifdef COMPAT_12
3246 	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3247 	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
3248 #endif	/* COMPAT_12 */
3249 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3250 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3251 	{ "sa11x0.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3252 	{ "sa11x0.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
3253 	{ "sa11x0.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
3254 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
3255 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
3256 	{ "sa11x0.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
3257 	{ NULL,			IGN, IGN, 0 }
3258 };
3259 
3260 void
3261 sa11x0_setup(char *args)
3262 {
3263 	int cpuctrl, cpuctrlmask;
3264 
3265 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
3266 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
3267 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3268 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE;
3269 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
3270 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
3271 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3272 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
3273 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
3274 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
3275 		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
3276 
3277 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
3278 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3279 #endif
3280 
3281 	cpuctrl = parse_cpu_options(args, sa11x0_options, cpuctrl);
3282 
3283 #ifdef __ARMEB__
3284 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
3285 #endif
3286 
3287 #ifndef ARM_HAS_VBAR
3288 	if (vector_page == ARM_VECTORS_HIGH)
3289 		cpuctrl |= CPU_CONTROL_VECRELOC;
3290 #endif
3291 
3292 	/* Clear out the cache */
3293 	cpu_idcache_wbinv_all();
3294 
3295 	/* Set the control register */
3296 	curcpu()->ci_ctrl = cpuctrl;
3297 	cpu_control(0xffffffff, cpuctrl);
3298 }
3299 #endif	/* CPU_SA1100 || CPU_SA1110 */
3300 
3301 #if defined(CPU_FA526)
3302 struct cpu_option fa526_options[] = {
3303 #ifdef COMPAT_12
3304 	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3305 	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
3306 #endif	/* COMPAT_12 */
3307 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3308 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3309 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
3310 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
3311 	{ NULL,			IGN, IGN, 0 }
3312 };
3313 
3314 void
3315 fa526_setup(char *args)
3316 {
3317 	int cpuctrl, cpuctrlmask;
3318 
3319 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
3320 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
3321 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3322 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE;
3323 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
3324 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
3325 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3326 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
3327 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
3328 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
3329 		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
3330 
3331 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
3332 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3333 #endif
3334 
3335 	cpuctrl = parse_cpu_options(args, fa526_options, cpuctrl);
3336 
3337 #ifdef __ARMEB__
3338 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
3339 #endif
3340 
3341 #ifndef ARM_HAS_VBAR
3342 	if (vector_page == ARM_VECTORS_HIGH)
3343 		cpuctrl |= CPU_CONTROL_VECRELOC;
3344 #endif
3345 
3346 	/* Clear out the cache */
3347 	cpu_idcache_wbinv_all();
3348 
3349 	/* Set the control register */
3350 	curcpu()->ci_ctrl = cpuctrl;
3351 	cpu_control(0xffffffff, cpuctrl);
3352 }
3353 #endif	/* CPU_FA526 */
3354 
3355 #if defined(CPU_IXP12X0)
3356 struct cpu_option ixp12x0_options[] = {
3357 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3358 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3359 	{ "ixp12x0.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3360 	{ "ixp12x0.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
3361 	{ "ixp12x0.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
3362 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
3363 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
3364 	{ "ixp12x0.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
3365 	{ NULL,			IGN, IGN, 0 }
3366 };
3367 
3368 void
3369 ixp12x0_setup(char *args)
3370 {
3371 	int cpuctrl, cpuctrlmask;
3372 
3373 
3374 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE
3375 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_SYST_ENABLE
3376 		 | CPU_CONTROL_IC_ENABLE;
3377 
3378 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_AFLT_ENABLE
3379 		 | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_WBUF_ENABLE
3380 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_SYST_ENABLE
3381 		 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_IC_ENABLE
3382 		 | CPU_CONTROL_VECRELOC;
3383 
3384 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
3385 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3386 #endif
3387 
3388 	cpuctrl = parse_cpu_options(args, ixp12x0_options, cpuctrl);
3389 
3390 #ifdef __ARMEB__
3391 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
3392 #endif
3393 
3394 #ifndef ARM_HAS_VBAR
3395 	if (vector_page == ARM_VECTORS_HIGH)
3396 		cpuctrl |= CPU_CONTROL_VECRELOC;
3397 #endif
3398 
3399 	/* Clear out the cache */
3400 	cpu_idcache_wbinv_all();
3401 
3402 	/* Set the control register */
3403 	curcpu()->ci_ctrl = cpuctrl;
3404 	/* cpu_control(0xffffffff, cpuctrl); */
3405 	cpu_control(cpuctrlmask, cpuctrl);
3406 }
3407 #endif /* CPU_IXP12X0 */
3408 
3409 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
3410     defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425) || defined(CPU_CORTEX)
3411 struct cpu_option xscale_options[] = {
3412 #ifdef COMPAT_12
3413 	{ "branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
3414 	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3415 #endif	/* COMPAT_12 */
3416 	{ "cpu.branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
3417 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3418 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3419 	{ "xscale.branchpredict", BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
3420 	{ "xscale.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3421 	{ "xscale.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
3422 	{ "xscale.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
3423 	{ NULL,			IGN, IGN, 0 }
3424 };
3425 
3426 void
3427 xscale_setup(char *args)
3428 {
3429 	uint32_t auxctl;
3430 	int cpuctrl, cpuctrlmask;
3431 
3432 	/*
3433 	 * The XScale Write Buffer is always enabled.  Our option
3434 	 * is to enable/disable coalescing.  Note that bits 6:3
3435 	 * must always be enabled.
3436 	 */
3437 
3438 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
3439 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
3440 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3441 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
3442 		 | CPU_CONTROL_BPRD_ENABLE;
3443 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
3444 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
3445 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3446 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
3447 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
3448 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
3449 		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
3450 
3451 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
3452 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3453 #endif
3454 
3455 	cpuctrl = parse_cpu_options(args, xscale_options, cpuctrl);
3456 
3457 #ifdef __ARMEB__
3458 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
3459 #endif
3460 
3461 #ifndef ARM_HAS_VBAR
3462 	if (vector_page == ARM_VECTORS_HIGH)
3463 		cpuctrl |= CPU_CONTROL_VECRELOC;
3464 #endif
3465 
3466 	/* Clear out the cache */
3467 	cpu_idcache_wbinv_all();
3468 
3469 	/*
3470 	 * Set the control register.  Note that bits 6:3 must always
3471 	 * be set to 1.
3472 	 */
3473 	curcpu()->ci_ctrl = cpuctrl;
3474 /*	cpu_control(cpuctrlmask, cpuctrl);*/
3475 	cpu_control(0xffffffff, cpuctrl);
3476 
3477 	/* Make sure write coalescing is turned on */
3478 	__asm volatile("mrc p15, 0, %0, c1, c0, 1"
3479 		: "=r" (auxctl));
3480 #ifdef XSCALE_NO_COALESCE_WRITES
3481 	auxctl |= XSCALE_AUXCTL_K;
3482 #else
3483 	auxctl &= ~XSCALE_AUXCTL_K;
3484 #endif
3485 	__asm volatile("mcr p15, 0, %0, c1, c0, 1"
3486 		: : "r" (auxctl));
3487 }
3488 #endif	/* CPU_XSCALE_80200 || CPU_XSCALE_80321 || __CPU_XSCALE_PXA2XX || CPU_XSCALE_IXP425 */
3489 
3490 #if defined(CPU_SHEEVA)
3491 struct cpu_option sheeva_options[] = {
3492 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3493 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3494 	{ "sheeva.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3495 	{ "sheeva.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
3496 	{ "sheeva.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
3497 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
3498 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
3499 	{ "sheeva.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
3500 	{ NULL,			IGN, IGN, 0 }
3501 };
3502 
3503 void
3504 sheeva_setup(char *args)
3505 {
3506 	int cpuctrl, cpuctrlmask;
3507 	uint32_t sheeva_ext;
3508 
3509 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
3510 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3511 	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
3512 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
3513 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3514 	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
3515 	    | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
3516 	    | CPU_CONTROL_BPRD_ENABLE
3517 	    | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
3518 
3519 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
3520 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3521 #endif
3522 
3523 	cpuctrl = parse_cpu_options(args, sheeva_options, cpuctrl);
3524 
3525 	/* Enable DCache Streaming Switch and Write Allocate */
3526 	__asm volatile("mrc p15, 1, %0, c15, c1, 0"
3527 	    : "=r" (sheeva_ext));
3528 
3529 	sheeva_ext |= FC_DCACHE_STREAM_EN | FC_WR_ALLOC_EN;
3530 
3531 	__asm volatile("mcr p15, 1, %0, c15, c1, 0"
3532 	    :: "r" (sheeva_ext));
3533 
3534 	/*
3535 	 * Sheeva has L2 Cache.  Enable/Disable it here.
3536 	 * Really not support yet...
3537 	 */
3538 
3539 #ifdef __ARMEB__
3540 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
3541 #endif
3542 
3543 #ifndef ARM_HAS_VBAR
3544 	if (vector_page == ARM_VECTORS_HIGH)
3545 		cpuctrl |= CPU_CONTROL_VECRELOC;
3546 #endif
3547 
3548 	/* Clear out the cache */
3549 	cpu_idcache_wbinv_all();
3550 
3551 	/* Now really make sure they are clean.  */
3552 	__asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
3553 
3554 	/* Set the control register */
3555 	curcpu()->ci_ctrl = cpuctrl;
3556 	cpu_control(0xffffffff, cpuctrl);
3557 
3558 	/* And again. */
3559 	cpu_idcache_wbinv_all();
3560 }
3561 #endif	/* CPU_SHEEVA */
3562