xref: /netbsd-src/sys/arch/arm/arm/cpufunc.c (revision 7788a0781fe6ff2cce37368b4578a7ade0850cb1)
1 /*	$NetBSD: cpufunc.c,v 1.123 2013/06/18 15:27:05 matt Exp $	*/
2 
3 /*
4  * arm7tdmi support code Copyright (c) 2001 John Fremlin
5  * arm8 support code Copyright (c) 1997 ARM Limited
6  * arm8 support code Copyright (c) 1997 Causality Limited
7  * arm9 support code Copyright (C) 2001 ARM Ltd
8  * arm11 support code Copyright (c) 2007 Microsoft
9  * cortexa8 support code Copyright (c) 2008 3am Software Foundry
10  * cortexa8 improvements Copyright (c) Goeran Weinholt
11  * Copyright (c) 1997 Mark Brinicombe.
12  * Copyright (c) 1997 Causality Limited
13  * All rights reserved.
14  *
15  * Redistribution and use in source and binary forms, with or without
16  * modification, are permitted provided that the following conditions
17  * are met:
18  * 1. Redistributions of source code must retain the above copyright
19  *    notice, this list of conditions and the following disclaimer.
20  * 2. Redistributions in binary form must reproduce the above copyright
21  *    notice, this list of conditions and the following disclaimer in the
22  *    documentation and/or other materials provided with the distribution.
23  * 3. All advertising materials mentioning features or use of this software
24  *    must display the following acknowledgement:
25  *	This product includes software developed by Causality Limited.
26  * 4. The name of Causality Limited may not be used to endorse or promote
27  *    products derived from this software without specific prior written
28  *    permission.
29  *
30  * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
31  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
32  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
33  * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
34  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
35  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
36  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
37  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
38  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
39  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40  * SUCH DAMAGE.
41  *
42  * RiscBSD kernel project
43  *
44  * cpufuncs.c
45  *
46  * C functions for supporting CPU / MMU / TLB specific operations.
47  *
48  * Created	: 30/01/97
49  */
50 
51 #include <sys/cdefs.h>
52 __KERNEL_RCSID(0, "$NetBSD: cpufunc.c,v 1.123 2013/06/18 15:27:05 matt Exp $");
53 
54 #include "opt_compat_netbsd.h"
55 #include "opt_cpuoptions.h"
56 #include "opt_perfctrs.h"
57 
58 #include <sys/types.h>
59 #include <sys/param.h>
60 #include <sys/pmc.h>
61 #include <sys/systm.h>
62 #include <machine/cpu.h>
63 #include <machine/bootconfig.h>
64 #include <arch/arm/arm/disassem.h>
65 
66 #include <uvm/uvm.h>
67 
68 #include <arm/cpuconf.h>
69 #include <arm/cpufunc.h>
70 
71 #ifdef CPU_XSCALE_80200
72 #include <arm/xscale/i80200reg.h>
73 #include <arm/xscale/i80200var.h>
74 #endif
75 
76 #ifdef CPU_XSCALE_80321
77 #include <arm/xscale/i80321reg.h>
78 #include <arm/xscale/i80321var.h>
79 #endif
80 
81 #ifdef CPU_XSCALE_IXP425
82 #include <arm/xscale/ixp425reg.h>
83 #include <arm/xscale/ixp425var.h>
84 #endif
85 
86 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321)
87 #include <arm/xscale/xscalereg.h>
88 #endif
89 
90 #if defined(PERFCTRS)
91 struct arm_pmc_funcs *arm_pmc;
92 #endif
93 
94 /* PRIMARY CACHE VARIABLES */
95 #if (ARM_MMU_V6 + ARM_MMU_V7) != 0
96 u_int	arm_cache_prefer_mask;
97 #endif
98 struct	arm_cache_info arm_pcache;
99 struct	arm_cache_info arm_scache;
100 
101 u_int	arm_dcache_align;
102 u_int	arm_dcache_align_mask;
103 
104 /* 1 == use cpu_sleep(), 0 == don't */
105 int cpu_do_powersave;
106 
107 #ifdef CPU_ARM2
108 struct cpu_functions arm2_cpufuncs = {
109 	/* CPU functions */
110 
111 	.cf_id			= arm2_id,
112 	.cf_cpwait		= cpufunc_nullop,
113 
114 	/* MMU functions */
115 
116 	.cf_control		= (void *)cpufunc_nullop,
117 
118 	/* TLB functions */
119 
120 	.cf_tlb_flushID		= cpufunc_nullop,
121 	.cf_tlb_flushID_SE	= (void *)cpufunc_nullop,
122 	.cf_tlb_flushI		= cpufunc_nullop,
123 	.cf_tlb_flushI_SE	= (void *)cpufunc_nullop,
124 	.cf_tlb_flushD		= cpufunc_nullop,
125 	.cf_tlb_flushD_SE	= (void *)cpufunc_nullop,
126 
127 	/* Cache operations */
128 
129 	.cf_icache_sync_all	= cpufunc_nullop,
130 	.cf_icache_sync_range	= (void *) cpufunc_nullop,
131 
132 	.cf_dcache_wbinv_all	= arm3_cache_flush,
133 	.cf_dcache_wbinv_range	= (void *)cpufunc_nullop,
134 	.cf_dcache_inv_range	= (void *)cpufunc_nullop,
135 	.cf_dcache_wb_range	= (void *)cpufunc_nullop,
136 
137 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
138 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
139 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
140 
141 	.cf_idcache_wbinv_all	= cpufunc_nullop,
142 	.cf_idcache_wbinv_range	= (void *)cpufunc_nullop,
143 
144 	/* Other functions */
145 
146 	.cf_flush_prefetchbuf	= cpufunc_nullop,
147 	.cf_drain_writebuf	= cpufunc_nullop,
148 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
149 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
150 
151 	.cf_sleep		= (void *)cpufunc_nullop,
152 
153 	/* Soft functions */
154 
155 	.cf_dataabt_fixup	= early_abort_fixup,
156 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
157 
158 	.cf_setup		= (void *)cpufunc_nullop
159 
160 };
161 #endif	/* CPU_ARM2 */
162 
163 #ifdef CPU_ARM250
164 struct cpu_functions arm250_cpufuncs = {
165 	/* CPU functions */
166 
167 	.cf_id			= arm250_id,
168 	.cf_cpwait		= cpufunc_nullop,
169 
170 	/* MMU functions */
171 
172 	.cf_control		= (void *)cpufunc_nullop,
173 
174 	/* TLB functions */
175 
176 	.cf_tlb_flushID		= cpufunc_nullop,
177 	.cf_tlb_flushID_SE	= (void *)cpufunc_nullop,
178 	.cf_tlb_flushI		= cpufunc_nullop,
179 	.cf_tlb_flushI_SE	= (void *)cpufunc_nullop,
180 	.cf_tlb_flushD		= cpufunc_nullop,
181 	.cf_tlb_flushD_SE	= (void *)cpufunc_nullop,
182 
183 	/* Cache operations */
184 
185 	.cf_icache_sync_all	= cpufunc_nullop,
186 	.cf_icache_sync_range	= (void *) cpufunc_nullop,
187 
188 	.cf_dcache_wbinv_all	= arm3_cache_flush,
189 	.cf_dcache_wbinv_range	= (void *)cpufunc_nullop,
190 	.cf_dcache_inv_range	= (void *)cpufunc_nullop,
191 	.cf_dcache_wb_range	= (void *)cpufunc_nullop,
192 
193 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
194 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
195 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
196 
197 	.cf_idcache_wbinv_all	= cpufunc_nullop,
198 	.cf_idcache_wbinv_range	= (void *)cpufunc_nullop,
199 
200 	/* Other functions */
201 
202 	.cf_flush_prefetchbuf	= cpufunc_nullop,
203 	.cf_drain_writebuf	= cpufunc_nullop,
204 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
205 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
206 
207 	.cf_sleep		= (void *)cpufunc_nullop,
208 
209 	/* Soft functions */
210 
211 	.cf_dataabt_fixup	= early_abort_fixup,
212 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
213 
214 	.cf_setup		= (void *)cpufunc_nullop
215 
216 };
217 #endif	/* CPU_ARM250 */
218 
219 #ifdef CPU_ARM3
220 struct cpu_functions arm3_cpufuncs = {
221 	/* CPU functions */
222 
223 	.cf_id			= cpufunc_id,
224 	.cf_cpwait		= cpufunc_nullop,
225 
226 	/* MMU functions */
227 
228 	.cf_control		= arm3_control,
229 
230 	/* TLB functions */
231 
232 	.cf_tlb_flushID		= cpufunc_nullop,
233 	.cf_tlb_flushID_SE	= (void *)cpufunc_nullop,
234 	.cf_tlb_flushI		= cpufunc_nullop,
235 	.cf_tlb_flushI_SE	= (void *)cpufunc_nullop,
236 	.cf_tlb_flushD		= cpufunc_nullop,
237 	.cf_tlb_flushD_SE	= (void *)cpufunc_nullop,
238 
239 	/* Cache operations */
240 
241 	.cf_icache_sync_all	= cpufunc_nullop,
242 	.cf_icache_sync_range	= (void *) cpufunc_nullop,
243 
244 	.cf_dcache_wbinv_all	= arm3_cache_flush,
245 	.cf_dcache_wbinv_range	= (void *)arm3_cache_flush,
246 	.cf_dcache_inv_range	= (void *)arm3_cache_flush,
247 	.cf_dcache_wb_range	= (void *)cpufunc_nullop,
248 
249 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
250 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
251 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
252 
253 	.cf_idcache_wbinv_all	= arm3_cache_flush,
254 	.cf_idcache_wbinv_range	= (void *)arm3_cache_flush,
255 
256 	/* Other functions */
257 
258 	.cf_flush_prefetchbuf	= cpufunc_nullop,
259 	.cf_drain_writebuf	= cpufunc_nullop,
260 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
261 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
262 
263 	.cf_sleep		= (void *)cpufunc_nullop,
264 
265 	/* Soft functions */
266 
267 	.cf_dataabt_fixup	= early_abort_fixup,
268 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
269 
270 	.cf_setup		= (void *)cpufunc_nullop
271 
272 };
273 #endif	/* CPU_ARM3 */
274 
275 #ifdef CPU_ARM6
276 struct cpu_functions arm6_cpufuncs = {
277 	/* CPU functions */
278 
279 	.cf_id			= cpufunc_id,
280 	.cf_cpwait		= cpufunc_nullop,
281 
282 	/* MMU functions */
283 
284 	.cf_control		= cpufunc_control,
285 	.cf_domains		= cpufunc_domains,
286 	.cf_setttb		= arm67_setttb,
287 	.cf_faultstatus		= cpufunc_faultstatus,
288 	.cf_faultaddress	= cpufunc_faultaddress,
289 
290 	/* TLB functions */
291 
292 	.cf_tlb_flushID		= arm67_tlb_flush,
293 	.cf_tlb_flushID_SE	= arm67_tlb_purge,
294 	.cf_tlb_flushI		= arm67_tlb_flush,
295 	.cf_tlb_flushI_SE	= arm67_tlb_purge,
296 	.cf_tlb_flushD		= arm67_tlb_flush,
297 	.cf_tlb_flushD_SE	= arm67_tlb_purge,
298 
299 	/* Cache operations */
300 
301 	.cf_icache_sync_all	= cpufunc_nullop,
302 	.cf_icache_sync_range	= (void *) cpufunc_nullop,
303 
304 	.cf_dcache_wbinv_all	= arm67_cache_flush,
305 	.cf_dcache_wbinv_range	= (void *)arm67_cache_flush,
306 	.cf_dcache_inv_range	= (void *)arm67_cache_flush,
307 	.cf_dcache_wb_range	= (void *)cpufunc_nullop,
308 
309 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
310 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
311 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
312 
313 	.cf_idcache_wbinv_all	= arm67_cache_flush,
314 	.cf_idcache_wbinv_range	= (void *)arm67_cache_flush,
315 
316 	/* Other functions */
317 
318 	.cf_flush_prefetchbuf	= cpufunc_nullop,
319 	.cf_drain_writebuf	= cpufunc_nullop,
320 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
321 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
322 
323 	.cf_sleep		= (void *)cpufunc_nullop,
324 
325 	/* Soft functions */
326 
327 #ifdef ARM6_LATE_ABORT
328 	.cf_dataabt_fixup	= late_abort_fixup,
329 #else
330 	.cf_dataabt_fixup	= early_abort_fixup,
331 #endif
332 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
333 
334 	.cf_context_switch	= arm67_context_switch,
335 
336 	.cf_setup		= arm6_setup
337 
338 };
339 #endif	/* CPU_ARM6 */
340 
341 #ifdef CPU_ARM7
342 struct cpu_functions arm7_cpufuncs = {
343 	/* CPU functions */
344 
345 	.cf_id			= cpufunc_id,
346 	.cf_cpwait		= cpufunc_nullop,
347 
348 	/* MMU functions */
349 
350 	.cf_control		= cpufunc_control,
351 	.cf_domains		= cpufunc_domains,
352 	.cf_setttb		= arm67_setttb,
353 	.cf_faultstatus		= cpufunc_faultstatus,
354 	.cf_faultaddress	= cpufunc_faultaddress,
355 
356 	/* TLB functions */
357 
358 	.cf_tlb_flushID		= arm67_tlb_flush,
359 	.cf_tlb_flushID_SE	= arm67_tlb_purge,
360 	.cf_tlb_flushI		= arm67_tlb_flush,
361 	.cf_tlb_flushI_SE	= arm67_tlb_purge,
362 	.cf_tlb_flushD		= arm67_tlb_flush,
363 	.cf_tlb_flushD_SE	= arm67_tlb_purge,
364 
365 	/* Cache operations */
366 
367 	.cf_icache_sync_all	= cpufunc_nullop,
368 	.cf_icache_sync_range	= (void *)cpufunc_nullop,
369 
370 	.cf_dcache_wbinv_all	= arm67_cache_flush,
371 	.cf_dcache_wbinv_range	= (void *)arm67_cache_flush,
372 	.cf_dcache_inv_range	= (void *)arm67_cache_flush,
373 	.cf_dcache_wb_range	= (void *)cpufunc_nullop,
374 
375 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
376 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
377 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
378 
379 	.cf_idcache_wbinv_all	= arm67_cache_flush,
380 	.cf_idcache_wbinv_range	= (void *)arm67_cache_flush,
381 
382 	/* Other functions */
383 
384 	.cf_flush_prefetchbuf	= cpufunc_nullop,
385 	.cf_drain_writebuf	= cpufunc_nullop,
386 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
387 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
388 
389 	.cf_sleep		= (void *)cpufunc_nullop,
390 
391 	/* Soft functions */
392 
393 	.cf_dataabt_fixup	= late_abort_fixup,
394 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
395 
396 	.cf_context_switch	= arm67_context_switch,
397 
398 	.cf_setup		= arm7_setup
399 
400 };
401 #endif	/* CPU_ARM7 */
402 
403 #ifdef CPU_ARM7TDMI
404 struct cpu_functions arm7tdmi_cpufuncs = {
405 	/* CPU functions */
406 
407 	.cf_id			= cpufunc_id,
408 	.cf_cpwait		= cpufunc_nullop,
409 
410 	/* MMU functions */
411 
412 	.cf_control		= cpufunc_control,
413 	.cf_domains		= cpufunc_domains,
414 	.cf_setttb		= arm7tdmi_setttb,
415 	.cf_faultstatus		= cpufunc_faultstatus,
416 	.cf_faultaddress	= cpufunc_faultaddress,
417 
418 	/* TLB functions */
419 
420 	.cf_tlb_flushID		= arm7tdmi_tlb_flushID,
421 	.cf_tlb_flushID_SE	= arm7tdmi_tlb_flushID_SE,
422 	.cf_tlb_flushI		= arm7tdmi_tlb_flushID,
423 	.cf_tlb_flushI_SE	= arm7tdmi_tlb_flushID_SE,
424 	.cf_tlb_flushD		= arm7tdmi_tlb_flushID,
425 	.cf_tlb_flushD_SE	= arm7tdmi_tlb_flushID_SE,
426 
427 	/* Cache operations */
428 
429 	.cf_icache_sync_all	= cpufunc_nullop,
430 	.cf_icache_sync_range	= (void *)cpufunc_nullop,
431 
432 	.cf_dcache_wbinv_all	= arm7tdmi_cache_flushID,
433 	.cf_dcache_wbinv_range	= (void *)arm7tdmi_cache_flushID,
434 	.cf_dcache_inv_range	= (void *)arm7tdmi_cache_flushID,
435 	.cf_dcache_wb_range	= (void *)cpufunc_nullop,
436 
437 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
438 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
439 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
440 
441 	.cf_idcache_wbinv_all	= arm7tdmi_cache_flushID,
442 	.cf_idcache_wbinv_range	= (void *)arm7tdmi_cache_flushID,
443 
444 	/* Other functions */
445 
446 	.cf_flush_prefetchbuf	= cpufunc_nullop,
447 	.cf_drain_writebuf	= cpufunc_nullop,
448 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
449 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
450 
451 	.cf_sleep		= (void *)cpufunc_nullop,
452 
453 	/* Soft functions */
454 
455 	.cf_dataabt_fixup	= late_abort_fixup,
456 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
457 
458 	.cf_context_switch	= arm7tdmi_context_switch,
459 
460 	.cf_setup		= arm7tdmi_setup
461 
462 };
463 #endif	/* CPU_ARM7TDMI */
464 
465 #ifdef CPU_ARM8
466 struct cpu_functions arm8_cpufuncs = {
467 	/* CPU functions */
468 
469 	.cf_id			= cpufunc_id,
470 	.cf_cpwait		= cpufunc_nullop,
471 
472 	/* MMU functions */
473 
474 	.cf_control		= cpufunc_control,
475 	.cf_domains		= cpufunc_domains,
476 	.cf_setttb		= arm8_setttb,
477 	.cf_faultstatus		= cpufunc_faultstatus,
478 	.cf_faultaddress	= cpufunc_faultaddress,
479 
480 	/* TLB functions */
481 
482 	.cf_tlb_flushID		= arm8_tlb_flushID,
483 	.cf_tlb_flushID_SE	= arm8_tlb_flushID_SE,
484 	.cf_tlb_flushI		= arm8_tlb_flushID,
485 	.cf_tlb_flushI_SE	= arm8_tlb_flushID_SE,
486 	.cf_tlb_flushD		= arm8_tlb_flushID,
487 	.cf_tlb_flushD_SE	= arm8_tlb_flushID_SE,
488 
489 	/* Cache operations */
490 
491 	.cf_icache_sync_all	= cpufunc_nullop,
492 	.cf_icache_sync_range	= (void *)cpufunc_nullop,
493 
494 	.cf_dcache_wbinv_all	= arm8_cache_purgeID,
495 	.cf_dcache_wbinv_range	= (void *)arm8_cache_purgeID,
496 /*XXX*/	.cf_dcache_inv_range	= (void *)arm8_cache_purgeID,
497 	.cf_dcache_wb_range	= (void *)arm8_cache_cleanID,
498 
499 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
500 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
501 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
502 
503 	.cf_idcache_wbinv_all	= arm8_cache_purgeID,
504 	.cf_idcache_wbinv_range = (void *)arm8_cache_purgeID,
505 
506 	/* Other functions */
507 
508 	.cf_flush_prefetchbuf	= cpufunc_nullop,
509 	.cf_drain_writebuf	= cpufunc_nullop,
510 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
511 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
512 
513 	.cf_sleep		= (void *)cpufunc_nullop,
514 
515 	/* Soft functions */
516 
517 	.cf_dataabt_fixup	= cpufunc_null_fixup,
518 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
519 
520 	.cf_context_switch	= arm8_context_switch,
521 
522 	.cf_setup		= arm8_setup
523 };
524 #endif	/* CPU_ARM8 */
525 
526 #ifdef CPU_ARM9
527 struct cpu_functions arm9_cpufuncs = {
528 	/* CPU functions */
529 
530 	.cf_id			= cpufunc_id,
531 	.cf_cpwait		= cpufunc_nullop,
532 
533 	/* MMU functions */
534 
535 	.cf_control		= cpufunc_control,
536 	.cf_domains		= cpufunc_domains,
537 	.cf_setttb		= arm9_setttb,
538 	.cf_faultstatus		= cpufunc_faultstatus,
539 	.cf_faultaddress	= cpufunc_faultaddress,
540 
541 	/* TLB functions */
542 
543 	.cf_tlb_flushID		= armv4_tlb_flushID,
544 	.cf_tlb_flushID_SE	= arm9_tlb_flushID_SE,
545 	.cf_tlb_flushI		= armv4_tlb_flushI,
546 	.cf_tlb_flushI_SE	= (void *)armv4_tlb_flushI,
547 	.cf_tlb_flushD		= armv4_tlb_flushD,
548 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
549 
550 	/* Cache operations */
551 
552 	.cf_icache_sync_all	= arm9_icache_sync_all,
553 	.cf_icache_sync_range	= arm9_icache_sync_range,
554 
555 	.cf_dcache_wbinv_all	= arm9_dcache_wbinv_all,
556 	.cf_dcache_wbinv_range	= arm9_dcache_wbinv_range,
557 /*XXX*/	.cf_dcache_inv_range	= arm9_dcache_wbinv_range,
558 	.cf_dcache_wb_range	= arm9_dcache_wb_range,
559 
560 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
561 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
562 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
563 
564 	.cf_idcache_wbinv_all	= arm9_idcache_wbinv_all,
565 	.cf_idcache_wbinv_range = arm9_idcache_wbinv_range,
566 
567 	/* Other functions */
568 
569 	.cf_flush_prefetchbuf	= cpufunc_nullop,
570 	.cf_drain_writebuf	= armv4_drain_writebuf,
571 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
572 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
573 
574 	.cf_sleep		= (void *)cpufunc_nullop,
575 
576 	/* Soft functions */
577 
578 	.cf_dataabt_fixup	= cpufunc_null_fixup,
579 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
580 
581 	.cf_context_switch	= arm9_context_switch,
582 
583 	.cf_setup		= arm9_setup
584 
585 };
586 #endif /* CPU_ARM9 */
587 
588 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
589 struct cpu_functions armv5_ec_cpufuncs = {
590 	/* CPU functions */
591 
592 	.cf_id			= cpufunc_id,
593 	.cf_cpwait		= cpufunc_nullop,
594 
595 	/* MMU functions */
596 
597 	.cf_control		= cpufunc_control,
598 	.cf_domains		= cpufunc_domains,
599 	.cf_setttb		= armv5_ec_setttb,
600 	.cf_faultstatus		= cpufunc_faultstatus,
601 	.cf_faultaddress	= cpufunc_faultaddress,
602 
603 	/* TLB functions */
604 
605 	.cf_tlb_flushID		= armv4_tlb_flushID,
606 	.cf_tlb_flushID_SE	= arm10_tlb_flushID_SE,
607 	.cf_tlb_flushI		= armv4_tlb_flushI,
608 	.cf_tlb_flushI_SE	= arm10_tlb_flushI_SE,
609 	.cf_tlb_flushD		= armv4_tlb_flushD,
610 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
611 
612 	/* Cache operations */
613 
614 	.cf_icache_sync_all	= armv5_ec_icache_sync_all,
615 	.cf_icache_sync_range	= armv5_ec_icache_sync_range,
616 
617 	.cf_dcache_wbinv_all	= armv5_ec_dcache_wbinv_all,
618 	.cf_dcache_wbinv_range	= armv5_ec_dcache_wbinv_range,
619 /*XXX*/	.cf_dcache_inv_range	= armv5_ec_dcache_wbinv_range,
620 	.cf_dcache_wb_range	= armv5_ec_dcache_wb_range,
621 
622 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
623 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
624 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
625 
626 	.cf_idcache_wbinv_all	= armv5_ec_idcache_wbinv_all,
627 	.cf_idcache_wbinv_range = armv5_ec_idcache_wbinv_range,
628 
629 	/* Other functions */
630 
631 	.cf_flush_prefetchbuf	= cpufunc_nullop,
632 	.cf_drain_writebuf	= armv4_drain_writebuf,
633 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
634 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
635 
636 	.cf_sleep		= (void *)cpufunc_nullop,
637 
638 	/* Soft functions */
639 
640 	.cf_dataabt_fixup	= cpufunc_null_fixup,
641 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
642 
643 	.cf_context_switch	= arm10_context_switch,
644 
645 	.cf_setup		= arm10_setup
646 
647 };
648 #endif /* CPU_ARM9E || CPU_ARM10 */
649 
650 #ifdef CPU_ARM10
651 struct cpu_functions arm10_cpufuncs = {
652 	/* CPU functions */
653 
654 	.cf_id			= cpufunc_id,
655 	.cf_cpwait		= cpufunc_nullop,
656 
657 	/* MMU functions */
658 
659 	.cf_control		= cpufunc_control,
660 	.cf_domains		= cpufunc_domains,
661 	.cf_setttb		= armv5_setttb,
662 	.cf_faultstatus		= cpufunc_faultstatus,
663 	.cf_faultaddress	= cpufunc_faultaddress,
664 
665 	/* TLB functions */
666 
667 	.cf_tlb_flushID		= armv4_tlb_flushID,
668 	.cf_tlb_flushID_SE	= arm10_tlb_flushID_SE,
669 	.cf_tlb_flushI		= armv4_tlb_flushI,
670 	.cf_tlb_flushI_SE	= arm10_tlb_flushI_SE,
671 	.cf_tlb_flushD		= armv4_tlb_flushD,
672 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
673 
674 	/* Cache operations */
675 
676 	.cf_icache_sync_all	= armv5_icache_sync_all,
677 	.cf_icache_sync_range	= armv5_icache_sync_range,
678 
679 	.cf_dcache_wbinv_all	= armv5_dcache_wbinv_all,
680 	.cf_dcache_wbinv_range	= armv5_dcache_wbinv_range,
681 /*XXX*/	.cf_dcache_inv_range	= armv5_dcache_wbinv_range,
682 	.cf_dcache_wb_range	= armv5_dcache_wb_range,
683 
684 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
685 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
686 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
687 
688 	.cf_idcache_wbinv_all	= armv5_idcache_wbinv_all,
689 	.cf_idcache_wbinv_range = armv5_idcache_wbinv_range,
690 
691 	/* Other functions */
692 
693 	.cf_flush_prefetchbuf	= cpufunc_nullop,
694 	.cf_drain_writebuf	= armv4_drain_writebuf,
695 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
696 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
697 
698 	.cf_sleep		= (void *)cpufunc_nullop,
699 
700 	/* Soft functions */
701 
702 	.cf_dataabt_fixup	= cpufunc_null_fixup,
703 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
704 
705 	.cf_context_switch	= arm10_context_switch,
706 
707 	.cf_setup		= arm10_setup
708 
709 };
710 #endif /* CPU_ARM10 */
711 
712 #ifdef CPU_ARM11
713 struct cpu_functions arm11_cpufuncs = {
714 	/* CPU functions */
715 
716 	.cf_id			= cpufunc_id,
717 	.cf_cpwait		= cpufunc_nullop,
718 
719 	/* MMU functions */
720 
721 	.cf_control		= cpufunc_control,
722 	.cf_domains		= cpufunc_domains,
723 	.cf_setttb		= arm11_setttb,
724 	.cf_faultstatus		= cpufunc_faultstatus,
725 	.cf_faultaddress	= cpufunc_faultaddress,
726 
727 	/* TLB functions */
728 
729 	.cf_tlb_flushID		= arm11_tlb_flushID,
730 	.cf_tlb_flushID_SE	= arm11_tlb_flushID_SE,
731 	.cf_tlb_flushI		= arm11_tlb_flushI,
732 	.cf_tlb_flushI_SE	= arm11_tlb_flushI_SE,
733 	.cf_tlb_flushD		= arm11_tlb_flushD,
734 	.cf_tlb_flushD_SE	= arm11_tlb_flushD_SE,
735 
736 	/* Cache operations */
737 
738 	.cf_icache_sync_all	= armv6_icache_sync_all,
739 	.cf_icache_sync_range	= armv6_icache_sync_range,
740 
741 	.cf_dcache_wbinv_all	= armv6_dcache_wbinv_all,
742 	.cf_dcache_wbinv_range	= armv6_dcache_wbinv_range,
743 	.cf_dcache_inv_range	= armv6_dcache_inv_range,
744 	.cf_dcache_wb_range	= armv6_dcache_wb_range,
745 
746 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
747 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
748 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
749 
750 	.cf_idcache_wbinv_all	= armv6_idcache_wbinv_all,
751 	.cf_idcache_wbinv_range = armv6_idcache_wbinv_range,
752 
753 	/* Other functions */
754 
755 	.cf_flush_prefetchbuf	= cpufunc_nullop,
756 	.cf_drain_writebuf	= arm11_drain_writebuf,
757 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
758 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
759 
760 	.cf_sleep		= arm11_sleep,
761 
762 	/* Soft functions */
763 
764 	.cf_dataabt_fixup	= cpufunc_null_fixup,
765 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
766 
767 	.cf_context_switch	= arm11_context_switch,
768 
769 	.cf_setup		= arm11_setup
770 
771 };
772 #endif /* CPU_ARM11 */
773 
774 #ifdef CPU_ARM1136
775 struct cpu_functions arm1136_cpufuncs = {
776 	/* CPU functions */
777 
778 	.cf_id			= cpufunc_id,
779 	.cf_cpwait		= cpufunc_nullop,
780 
781 	/* MMU functions */
782 
783 	.cf_control		= cpufunc_control,
784 	.cf_domains		= cpufunc_domains,
785 	.cf_setttb		= arm11x6_setttb,
786 	.cf_faultstatus		= cpufunc_faultstatus,
787 	.cf_faultaddress	= cpufunc_faultaddress,
788 
789 	/* TLB functions */
790 
791 	.cf_tlb_flushID		= arm11_tlb_flushID,
792 	.cf_tlb_flushID_SE	= arm11_tlb_flushID_SE,
793 	.cf_tlb_flushI		= arm11_tlb_flushI,
794 	.cf_tlb_flushI_SE	= arm11_tlb_flushI_SE,
795 	.cf_tlb_flushD		= arm11_tlb_flushD,
796 	.cf_tlb_flushD_SE	= arm11_tlb_flushD_SE,
797 
798 	/* Cache operations */
799 
800 	.cf_icache_sync_all	= arm11x6_icache_sync_all,	/* 411920 */
801 	.cf_icache_sync_range	= arm11x6_icache_sync_range,	/* 371025 */
802 
803 	.cf_dcache_wbinv_all	= arm11x6_dcache_wbinv_all,	/* 411920 */
804 	.cf_dcache_wbinv_range	= armv6_dcache_wbinv_range,
805 	.cf_dcache_inv_range	= armv6_dcache_inv_range,
806 	.cf_dcache_wb_range	= armv6_dcache_wb_range,
807 
808 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
809 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
810 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
811 
812 	.cf_idcache_wbinv_all	= arm11x6_idcache_wbinv_all,	/* 411920 */
813 	.cf_idcache_wbinv_range = arm11x6_idcache_wbinv_range,	/* 371025 */
814 
815 	/* Other functions */
816 
817 	.cf_flush_prefetchbuf	= arm11x6_flush_prefetchbuf,
818 	.cf_drain_writebuf	= arm11_drain_writebuf,
819 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
820 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
821 
822 	.cf_sleep		= arm11_sleep,	/* arm1136_sleep_rev0 */
823 
824 	/* Soft functions */
825 
826 	.cf_dataabt_fixup	= cpufunc_null_fixup,
827 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
828 
829 	.cf_context_switch	= arm11_context_switch,
830 
831 	.cf_setup		= arm11x6_setup
832 
833 };
834 #endif /* CPU_ARM1136 */
835 
836 #ifdef CPU_ARM1176
837 struct cpu_functions arm1176_cpufuncs = {
838 	/* CPU functions */
839 
840 	.cf_id			= cpufunc_id,
841 	.cf_cpwait		= cpufunc_nullop,
842 
843 	/* MMU functions */
844 
845 	.cf_control		= cpufunc_control,
846 	.cf_domains		= cpufunc_domains,
847 	.cf_setttb		= arm11x6_setttb,
848 	.cf_faultstatus		= cpufunc_faultstatus,
849 	.cf_faultaddress	= cpufunc_faultaddress,
850 
851 	/* TLB functions */
852 
853 	.cf_tlb_flushID		= arm11_tlb_flushID,
854 	.cf_tlb_flushID_SE	= arm11_tlb_flushID_SE,
855 	.cf_tlb_flushI		= arm11_tlb_flushI,
856 	.cf_tlb_flushI_SE	= arm11_tlb_flushI_SE,
857 	.cf_tlb_flushD		= arm11_tlb_flushD,
858 	.cf_tlb_flushD_SE	= arm11_tlb_flushD_SE,
859 
860 	/* Cache operations */
861 
862 	.cf_icache_sync_all	= arm11x6_icache_sync_all,	/* 415045 */
863 	.cf_icache_sync_range	= arm11x6_icache_sync_range,	/* 371367 */
864 
865 	.cf_dcache_wbinv_all	= arm11x6_dcache_wbinv_all,	/* 415045 */
866 	.cf_dcache_wbinv_range	= armv6_dcache_wbinv_range,
867 	.cf_dcache_inv_range	= armv6_dcache_inv_range,
868 	.cf_dcache_wb_range	= armv6_dcache_wb_range,
869 
870 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
871 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
872 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
873 
874 	.cf_idcache_wbinv_all	= arm11x6_idcache_wbinv_all,	/* 415045 */
875 	.cf_idcache_wbinv_range = arm11x6_idcache_wbinv_range,	/* 371367 */
876 
877 	/* Other functions */
878 
879 	.cf_flush_prefetchbuf	= arm11x6_flush_prefetchbuf,
880 	.cf_drain_writebuf	= arm11_drain_writebuf,
881 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
882 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
883 
884 	.cf_sleep		= arm11x6_sleep,		/* no ref. */
885 
886 	/* Soft functions */
887 
888 	.cf_dataabt_fixup	= cpufunc_null_fixup,
889 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
890 
891 	.cf_context_switch	= arm11_context_switch,
892 
893 	.cf_setup		= arm11x6_setup
894 
895 };
896 #endif /* CPU_ARM1176 */
897 
898 
899 #ifdef CPU_ARM11MPCORE
900 struct cpu_functions arm11mpcore_cpufuncs = {
901 	/* CPU functions */
902 
903 	.cf_id			= cpufunc_id,
904 	.cf_cpwait		= cpufunc_nullop,
905 
906 	/* MMU functions */
907 
908 	.cf_control		= cpufunc_control,
909 	.cf_domains		= cpufunc_domains,
910 	.cf_setttb		= arm11_setttb,
911 	.cf_faultstatus		= cpufunc_faultstatus,
912 	.cf_faultaddress	= cpufunc_faultaddress,
913 
914 	/* TLB functions */
915 
916 	.cf_tlb_flushID		= arm11_tlb_flushID,
917 	.cf_tlb_flushID_SE	= arm11_tlb_flushID_SE,
918 	.cf_tlb_flushI		= arm11_tlb_flushI,
919 	.cf_tlb_flushI_SE	= arm11_tlb_flushI_SE,
920 	.cf_tlb_flushD		= arm11_tlb_flushD,
921 	.cf_tlb_flushD_SE	= arm11_tlb_flushD_SE,
922 
923 	/* Cache operations */
924 
925 	.cf_icache_sync_all	= armv6_icache_sync_all,
926 	.cf_icache_sync_range	= armv5_icache_sync_range,
927 
928 	.cf_dcache_wbinv_all	= armv6_dcache_wbinv_all,
929 	.cf_dcache_wbinv_range	= armv5_dcache_wbinv_range,
930 	.cf_dcache_inv_range	= armv5_dcache_inv_range,
931 	.cf_dcache_wb_range	= armv5_dcache_wb_range,
932 
933 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
934 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
935 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
936 
937 	.cf_idcache_wbinv_all	= armv6_idcache_wbinv_all,
938 	.cf_idcache_wbinv_range = armv5_idcache_wbinv_range,
939 
940 	/* Other functions */
941 
942 	.cf_flush_prefetchbuf	= cpufunc_nullop,
943 	.cf_drain_writebuf	= arm11_drain_writebuf,
944 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
945 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
946 
947 	.cf_sleep		= arm11_sleep,
948 
949 	/* Soft functions */
950 
951 	.cf_dataabt_fixup	= cpufunc_null_fixup,
952 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
953 
954 	.cf_context_switch	= arm11_context_switch,
955 
956 	.cf_setup		= arm11mpcore_setup
957 
958 };
959 #endif /* CPU_ARM11MPCORE */
960 
961 #ifdef CPU_SA110
962 struct cpu_functions sa110_cpufuncs = {
963 	/* CPU functions */
964 
965 	.cf_id			= cpufunc_id,
966 	.cf_cpwait		= cpufunc_nullop,
967 
968 	/* MMU functions */
969 
970 	.cf_control		= cpufunc_control,
971 	.cf_domains		= cpufunc_domains,
972 	.cf_setttb		= sa1_setttb,
973 	.cf_faultstatus		= cpufunc_faultstatus,
974 	.cf_faultaddress	= cpufunc_faultaddress,
975 
976 	/* TLB functions */
977 
978 	.cf_tlb_flushID		= armv4_tlb_flushID,
979 	.cf_tlb_flushID_SE	= sa1_tlb_flushID_SE,
980 	.cf_tlb_flushI		= armv4_tlb_flushI,
981 	.cf_tlb_flushI_SE	= (void *)armv4_tlb_flushI,
982 	.cf_tlb_flushD		= armv4_tlb_flushD,
983 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
984 
985 	/* Cache operations */
986 
987 	.cf_icache_sync_all	= sa1_cache_syncI,
988 	.cf_icache_sync_range	= sa1_cache_syncI_rng,
989 
990 	.cf_dcache_wbinv_all	= sa1_cache_purgeD,
991 	.cf_dcache_wbinv_range	= sa1_cache_purgeD_rng,
992 /*XXX*/	.cf_dcache_inv_range	= sa1_cache_purgeD_rng,
993 	.cf_dcache_wb_range	= sa1_cache_cleanD_rng,
994 
995 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
996 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
997 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
998 
999 	.cf_idcache_wbinv_all	= sa1_cache_purgeID,
1000 	.cf_idcache_wbinv_range	= sa1_cache_purgeID_rng,
1001 
1002 	/* Other functions */
1003 
1004 	.cf_flush_prefetchbuf	= cpufunc_nullop,
1005 	.cf_drain_writebuf	= armv4_drain_writebuf,
1006 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
1007 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
1008 
1009 	.cf_sleep		= (void *)cpufunc_nullop,
1010 
1011 	/* Soft functions */
1012 
1013 	.cf_dataabt_fixup	= cpufunc_null_fixup,
1014 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
1015 
1016 	.cf_context_switch	= sa110_context_switch,
1017 
1018 	.cf_setup		= sa110_setup
1019 };
1020 #endif	/* CPU_SA110 */
1021 
1022 #if defined(CPU_SA1100) || defined(CPU_SA1110)
1023 struct cpu_functions sa11x0_cpufuncs = {
1024 	/* CPU functions */
1025 
1026 	.cf_id			= cpufunc_id,
1027 	.cf_cpwait		= cpufunc_nullop,
1028 
1029 	/* MMU functions */
1030 
1031 	.cf_control		= cpufunc_control,
1032 	.cf_domains		= cpufunc_domains,
1033 	.cf_setttb		= sa1_setttb,
1034 	.cf_faultstatus		= cpufunc_faultstatus,
1035 	.cf_faultaddress	= cpufunc_faultaddress,
1036 
1037 	/* TLB functions */
1038 
1039 	.cf_tlb_flushID		= armv4_tlb_flushID,
1040 	.cf_tlb_flushID_SE	= sa1_tlb_flushID_SE,
1041 	.cf_tlb_flushI		= armv4_tlb_flushI,
1042 	.cf_tlb_flushI_SE	= (void *)armv4_tlb_flushI,
1043 	.cf_tlb_flushD		= armv4_tlb_flushD,
1044 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
1045 
1046 	/* Cache operations */
1047 
1048 	.cf_icache_sync_all	= sa1_cache_syncI,
1049 	.cf_icache_sync_range	= sa1_cache_syncI_rng,
1050 
1051 	.cf_dcache_wbinv_all	= sa1_cache_purgeD,
1052 	.cf_dcache_wbinv_range	= sa1_cache_purgeD_rng,
1053 /*XXX*/	.cf_dcache_inv_range	= sa1_cache_purgeD_rng,
1054 	.cf_dcache_wb_range	= sa1_cache_cleanD_rng,
1055 
1056 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
1057 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
1058 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
1059 
1060 	.cf_idcache_wbinv_all	= sa1_cache_purgeID,
1061 	.cf_idcache_wbinv_range	= sa1_cache_purgeID_rng,
1062 
1063 	/* Other functions */
1064 
1065 	.cf_flush_prefetchbuf	= sa11x0_drain_readbuf,
1066 	.cf_drain_writebuf	= armv4_drain_writebuf,
1067 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
1068 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
1069 
1070 	.cf_sleep		= sa11x0_cpu_sleep,
1071 
1072 	/* Soft functions */
1073 
1074 	.cf_dataabt_fixup	= cpufunc_null_fixup,
1075 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
1076 
1077 	.cf_context_switch	= sa11x0_context_switch,
1078 
1079 	.cf_setup		= sa11x0_setup
1080 };
1081 #endif	/* CPU_SA1100 || CPU_SA1110 */
1082 
1083 #if defined(CPU_FA526)
1084 struct cpu_functions fa526_cpufuncs = {
1085 	/* CPU functions */
1086 
1087 	.cf_id			= cpufunc_id,
1088 	.cf_cpwait		= cpufunc_nullop,
1089 
1090 	/* MMU functions */
1091 
1092 	.cf_control		= cpufunc_control,
1093 	.cf_domains		= cpufunc_domains,
1094 	.cf_setttb		= fa526_setttb,
1095 	.cf_faultstatus		= cpufunc_faultstatus,
1096 	.cf_faultaddress	= cpufunc_faultaddress,
1097 
1098 	/* TLB functions */
1099 
1100 	.cf_tlb_flushID		= armv4_tlb_flushID,
1101 	.cf_tlb_flushID_SE	= fa526_tlb_flushID_SE,
1102 	.cf_tlb_flushI		= armv4_tlb_flushI,
1103 	.cf_tlb_flushI_SE	= fa526_tlb_flushI_SE,
1104 	.cf_tlb_flushD		= armv4_tlb_flushD,
1105 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
1106 
1107 	/* Cache operations */
1108 
1109 	.cf_icache_sync_all	= fa526_icache_sync_all,
1110 	.cf_icache_sync_range	= fa526_icache_sync_range,
1111 
1112 	.cf_dcache_wbinv_all	= fa526_dcache_wbinv_all,
1113 	.cf_dcache_wbinv_range	= fa526_dcache_wbinv_range,
1114 	.cf_dcache_inv_range	= fa526_dcache_inv_range,
1115 	.cf_dcache_wb_range	= fa526_dcache_wb_range,
1116 
1117 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
1118 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
1119 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
1120 
1121 	.cf_idcache_wbinv_all	= fa526_idcache_wbinv_all,
1122 	.cf_idcache_wbinv_range	= fa526_idcache_wbinv_range,
1123 
1124 	/* Other functions */
1125 
1126 	.cf_flush_prefetchbuf	= fa526_flush_prefetchbuf,
1127 	.cf_drain_writebuf	= armv4_drain_writebuf,
1128 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
1129 	.cf_flush_brnchtgt_E	= fa526_flush_brnchtgt_E,
1130 
1131 	.cf_sleep		= fa526_cpu_sleep,
1132 
1133 	/* Soft functions */
1134 
1135 	.cf_dataabt_fixup	= cpufunc_null_fixup,
1136 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
1137 
1138 	.cf_context_switch	= fa526_context_switch,
1139 
1140 	.cf_setup		= fa526_setup
1141 };
1142 #endif	/* CPU_FA526 */
1143 
1144 #ifdef CPU_IXP12X0
1145 struct cpu_functions ixp12x0_cpufuncs = {
1146 	/* CPU functions */
1147 
1148 	.cf_id			= cpufunc_id,
1149 	.cf_cpwait		= cpufunc_nullop,
1150 
1151 	/* MMU functions */
1152 
1153 	.cf_control		= cpufunc_control,
1154 	.cf_domains		= cpufunc_domains,
1155 	.cf_setttb		= sa1_setttb,
1156 	.cf_faultstatus		= cpufunc_faultstatus,
1157 	.cf_faultaddress	= cpufunc_faultaddress,
1158 
1159 	/* TLB functions */
1160 
1161 	.cf_tlb_flushID		= armv4_tlb_flushID,
1162 	.cf_tlb_flushID_SE	= sa1_tlb_flushID_SE,
1163 	.cf_tlb_flushI		= armv4_tlb_flushI,
1164 	.cf_tlb_flushI_SE	= (void *)armv4_tlb_flushI,
1165 	.cf_tlb_flushD		= armv4_tlb_flushD,
1166 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
1167 
1168 	/* Cache operations */
1169 
1170 	.cf_icache_sync_all	= sa1_cache_syncI,
1171 	.cf_icache_sync_range	= sa1_cache_syncI_rng,
1172 
1173 	.cf_dcache_wbinv_all	= sa1_cache_purgeD,
1174 	.cf_dcache_wbinv_range	= sa1_cache_purgeD_rng,
1175 /*XXX*/	.cf_dcache_inv_range	= sa1_cache_purgeD_rng,
1176 	.cf_dcache_wb_range	= sa1_cache_cleanD_rng,
1177 
1178 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
1179 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
1180 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
1181 
1182 	.cf_idcache_wbinv_all	= sa1_cache_purgeID,
1183 	.cf_idcache_wbinv_range	= sa1_cache_purgeID_rng,
1184 
1185 	/* Other functions */
1186 
1187 	.cf_flush_prefetchbuf	= ixp12x0_drain_readbuf,
1188 	.cf_drain_writebuf	= armv4_drain_writebuf,
1189 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
1190 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
1191 
1192 	.cf_sleep		= (void *)cpufunc_nullop,
1193 
1194 	/* Soft functions */
1195 
1196 	.cf_dataabt_fixup	= cpufunc_null_fixup,
1197 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
1198 
1199 	.cf_context_switch	= ixp12x0_context_switch,
1200 
1201 	.cf_setup		= ixp12x0_setup
1202 };
1203 #endif	/* CPU_IXP12X0 */
1204 
1205 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
1206     defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425)
1207 struct cpu_functions xscale_cpufuncs = {
1208 	/* CPU functions */
1209 
1210 	.cf_id			= cpufunc_id,
1211 	.cf_cpwait		= xscale_cpwait,
1212 
1213 	/* MMU functions */
1214 
1215 	.cf_control		= xscale_control,
1216 	.cf_domains		= cpufunc_domains,
1217 	.cf_setttb		= xscale_setttb,
1218 	.cf_faultstatus		= cpufunc_faultstatus,
1219 	.cf_faultaddress	= cpufunc_faultaddress,
1220 
1221 	/* TLB functions */
1222 
1223 	.cf_tlb_flushID		= armv4_tlb_flushID,
1224 	.cf_tlb_flushID_SE	= xscale_tlb_flushID_SE,
1225 	.cf_tlb_flushI		= armv4_tlb_flushI,
1226 	.cf_tlb_flushI_SE	= (void *)armv4_tlb_flushI,
1227 	.cf_tlb_flushD		= armv4_tlb_flushD,
1228 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
1229 
1230 	/* Cache operations */
1231 
1232 	.cf_icache_sync_all	= xscale_cache_syncI,
1233 	.cf_icache_sync_range	= xscale_cache_syncI_rng,
1234 
1235 	.cf_dcache_wbinv_all	= xscale_cache_purgeD,
1236 	.cf_dcache_wbinv_range	= xscale_cache_purgeD_rng,
1237 	.cf_dcache_inv_range	= xscale_cache_flushD_rng,
1238 	.cf_dcache_wb_range	= xscale_cache_cleanD_rng,
1239 
1240 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
1241 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
1242 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
1243 
1244 	.cf_idcache_wbinv_all	= xscale_cache_purgeID,
1245 	.cf_idcache_wbinv_range = xscale_cache_purgeID_rng,
1246 
1247 	/* Other functions */
1248 
1249 	.cf_flush_prefetchbuf	= cpufunc_nullop,
1250 	.cf_drain_writebuf	= armv4_drain_writebuf,
1251 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
1252 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
1253 
1254 	.cf_sleep		= xscale_cpu_sleep,
1255 
1256 	/* Soft functions */
1257 
1258 	.cf_dataabt_fixup	= cpufunc_null_fixup,
1259 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
1260 
1261 	.cf_context_switch	= xscale_context_switch,
1262 
1263 	.cf_setup		= xscale_setup
1264 };
1265 #endif
1266 /* CPU_XSCALE_80200 || CPU_XSCALE_80321 || __CPU_XSCALE_PXA2XX || CPU_XSCALE_IXP425 */
1267 
1268 #if defined(CPU_CORTEX)
1269 struct cpu_functions cortex_cpufuncs = {
1270 	/* CPU functions */
1271 
1272 	.cf_id			= cpufunc_id,
1273 	.cf_cpwait		= cpufunc_nullop,
1274 
1275 	/* MMU functions */
1276 
1277 	.cf_control		= cpufunc_control,
1278 	.cf_domains		= cpufunc_domains,
1279 	.cf_setttb		= armv7_setttb,
1280 	.cf_faultstatus		= cpufunc_faultstatus,
1281 	.cf_faultaddress	= cpufunc_faultaddress,
1282 
1283 	/* TLB functions */
1284 
1285 	.cf_tlb_flushID		= arm11_tlb_flushID,
1286 	.cf_tlb_flushID_SE	= armv7_tlb_flushID_SE,
1287 	.cf_tlb_flushI		= arm11_tlb_flushI,
1288 	.cf_tlb_flushI_SE	= arm11_tlb_flushI_SE,
1289 	.cf_tlb_flushD		= arm11_tlb_flushD,
1290 	.cf_tlb_flushD_SE	= arm11_tlb_flushD_SE,
1291 
1292 	/* Cache operations */
1293 
1294 	.cf_icache_sync_all	= armv7_icache_sync_all,
1295 	.cf_dcache_wbinv_all	= armv7_dcache_wbinv_all,
1296 
1297 	.cf_dcache_inv_range	= armv7_dcache_inv_range,
1298 	.cf_dcache_wb_range	= armv7_dcache_wb_range,
1299 	.cf_dcache_wbinv_range	= armv7_dcache_wbinv_range,
1300 
1301 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
1302 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
1303 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
1304 
1305 	.cf_icache_sync_range	= armv7_icache_sync_range,
1306 	.cf_idcache_wbinv_range = armv7_idcache_wbinv_range,
1307 
1308 
1309 	.cf_idcache_wbinv_all	= armv7_idcache_wbinv_all,
1310 
1311 	/* Other functions */
1312 
1313 	.cf_flush_prefetchbuf	= cpufunc_nullop,
1314 	.cf_drain_writebuf	= armv7_drain_writebuf,
1315 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
1316 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
1317 
1318 	.cf_sleep		= armv7_cpu_sleep,
1319 
1320 	/* Soft functions */
1321 
1322 	.cf_dataabt_fixup	= cpufunc_null_fixup,
1323 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
1324 
1325 	.cf_context_switch	= armv7_context_switch,
1326 
1327 	.cf_setup		= armv7_setup
1328 
1329 };
1330 #endif /* CPU_CORTEX */
1331 
1332 #ifdef CPU_PJ4B
1333 struct cpu_functions pj4bv7_cpufuncs = {
1334 	/* CPU functions */
1335 
1336 	.cf_id			= cpufunc_id,
1337 	.cf_cpwait		= pj4b_drain_writebuf,
1338 
1339 	/* MMU functions */
1340 
1341 	.cf_control		= cpufunc_control,
1342 	.cf_domains		= cpufunc_domains,
1343 	.cf_setttb		= pj4b_setttb,
1344 	.cf_faultstatus		= cpufunc_faultstatus,
1345 	.cf_faultaddress	= cpufunc_faultaddress,
1346 
1347 	/* TLB functions */
1348 
1349 	.cf_tlb_flushID		= pj4b_tlb_flushID,
1350 	.cf_tlb_flushID_SE	= pj4b_tlb_flushID_SE,
1351 	.cf_tlb_flushI		= pj4b_tlb_flushID,
1352 	.cf_tlb_flushI_SE	= pj4b_tlb_flushID_SE,
1353 	.cf_tlb_flushD		= pj4b_tlb_flushID,
1354 	.cf_tlb_flushD_SE	= pj4b_tlb_flushID_SE,
1355 
1356 	/* Cache operations */
1357 
1358 	.cf_icache_sync_all	= armv7_idcache_wbinv_all,
1359 	.cf_icache_sync_range	= pj4b_icache_sync_range,
1360 
1361 	.cf_dcache_wbinv_all	= armv7_dcache_wbinv_all,
1362 	.cf_dcache_wbinv_range	= pj4b_dcache_wbinv_range,
1363 	.cf_dcache_inv_range	= pj4b_dcache_inv_range,
1364 	.cf_dcache_wb_range	= pj4b_dcache_wb_range,
1365 
1366 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
1367 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
1368 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
1369 
1370 	.cf_idcache_wbinv_all	= armv7_idcache_wbinv_all,
1371 	.cf_idcache_wbinv_range	= pj4b_idcache_wbinv_range,
1372 
1373 	/* Other functions */
1374 
1375 	.cf_flush_prefetchbuf	= pj4b_drain_readbuf,
1376 	.cf_drain_writebuf	= pj4b_drain_writebuf,
1377 	.cf_flush_brnchtgt_C	= pj4b_flush_brnchtgt_all,
1378 	.cf_flush_brnchtgt_E	= pj4b_flush_brnchtgt_va,
1379 
1380 	.cf_sleep		= (void *)cpufunc_nullop,
1381 
1382 	/* Soft functions */
1383 
1384 	.cf_dataabt_fixup	= cpufunc_null_fixup,
1385 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
1386 
1387 	.cf_context_switch	= pj4b_context_switch,
1388 
1389 	.cf_setup		= pj4bv7_setup
1390 };
1391 #endif /* CPU_PJ4B */
1392 
1393 #ifdef CPU_SHEEVA
1394 struct cpu_functions sheeva_cpufuncs = {
1395 	/* CPU functions */
1396 
1397 	.cf_id			= cpufunc_id,
1398 	.cf_cpwait		= cpufunc_nullop,
1399 
1400 	/* MMU functions */
1401 
1402 	.cf_control		= cpufunc_control,
1403 	.cf_domains		= cpufunc_domains,
1404 	.cf_setttb		= armv5_ec_setttb,
1405 	.cf_faultstatus		= cpufunc_faultstatus,
1406 	.cf_faultaddress	= cpufunc_faultaddress,
1407 
1408 	/* TLB functions */
1409 
1410 	.cf_tlb_flushID		= armv4_tlb_flushID,
1411 	.cf_tlb_flushID_SE	= arm10_tlb_flushID_SE,
1412 	.cf_tlb_flushI		= armv4_tlb_flushI,
1413 	.cf_tlb_flushI_SE	= arm10_tlb_flushI_SE,
1414 	.cf_tlb_flushD		= armv4_tlb_flushD,
1415 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
1416 
1417 	/* Cache operations */
1418 
1419 	.cf_icache_sync_all	= armv5_ec_icache_sync_all,
1420 	.cf_icache_sync_range	= armv5_ec_icache_sync_range,
1421 
1422 	.cf_dcache_wbinv_all	= armv5_ec_dcache_wbinv_all,
1423 	.cf_dcache_wbinv_range	= sheeva_dcache_wbinv_range,
1424 	.cf_dcache_inv_range	= sheeva_dcache_inv_range,
1425 	.cf_dcache_wb_range	= sheeva_dcache_wb_range,
1426 
1427 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
1428 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
1429 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
1430 
1431 	.cf_idcache_wbinv_all	= armv5_ec_idcache_wbinv_all,
1432 	.cf_idcache_wbinv_range = sheeva_idcache_wbinv_range,
1433 
1434 	/* Other functions */
1435 
1436 	.cf_flush_prefetchbuf	= cpufunc_nullop,
1437 	.cf_drain_writebuf	= armv4_drain_writebuf,
1438 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
1439 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
1440 
1441 	.cf_sleep		= (void *)sheeva_cpu_sleep,
1442 
1443 	/* Soft functions */
1444 
1445 	.cf_dataabt_fixup	= cpufunc_null_fixup,
1446 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
1447 
1448 	.cf_context_switch	= arm10_context_switch,
1449 
1450 	.cf_setup		= sheeva_setup
1451 };
1452 #endif /* CPU_SHEEVA */
1453 
1454 
1455 /*
1456  * Global constants also used by locore.s
1457  */
1458 
1459 struct cpu_functions cpufuncs;
1460 u_int cputype;
1461 
1462 #if defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined(CPU_ARM9) || \
1463     defined(CPU_ARM9E) || defined(CPU_ARM10) || defined(CPU_ARM11) || \
1464     defined(CPU_FA526) || \
1465     defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
1466     defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425) || \
1467     defined(CPU_CORTEX) || defined(CPU_PJ4B) || defined(CPU_SHEEVA)
1468 static void get_cachetype_cp15(void);
1469 
1470 /* Additional cache information local to this file.  Log2 of some of the
1471    above numbers.  */
1472 static int	arm_dcache_log2_nsets;
1473 static int	arm_dcache_log2_assoc;
1474 static int	arm_dcache_log2_linesize;
1475 
1476 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
1477 static inline u_int
1478 get_cachesize_cp15(int cssr)
1479 {
1480 	u_int csid;
1481 
1482 #if ((CPU_CORTEX) > 0) || defined(CPU_PJ4B)
1483 	__asm volatile(".arch\tarmv7a");
1484 	__asm volatile("mcr p15, 2, %0, c0, c0, 0" :: "r" (cssr));
1485 	__asm volatile("isb");	/* sync to the new cssr */
1486 #else
1487 	__asm volatile("mcr p15, 1, %0, c0, c0, 2" :: "r" (cssr));
1488 #endif
1489 	__asm volatile("mrc p15, 1, %0, c0, c0, 0" : "=r" (csid));
1490 	return csid;
1491 }
1492 #endif
1493 
1494 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
1495 static void
1496 get_cacheinfo_clidr(struct arm_cache_info *info, u_int level, u_int clidr)
1497 {
1498 	u_int csid;
1499 	u_int nsets;
1500 
1501 	if (clidr & 6) {
1502 		csid = get_cachesize_cp15(level << 1); /* select L1 dcache values */
1503 		nsets = CPU_CSID_NUMSETS(csid) + 1;
1504 		info->dcache_ways = CPU_CSID_ASSOC(csid) + 1;
1505 		info->dcache_line_size = 1U << (CPU_CSID_LEN(csid) + 4);
1506 		info->dcache_size = info->dcache_line_size * info->dcache_ways * nsets;
1507 
1508 		if (level == 0) {
1509 			arm_dcache_log2_assoc = CPU_CSID_ASSOC(csid) + 1;
1510 			arm_dcache_log2_linesize = CPU_CSID_LEN(csid) + 4;
1511 			arm_dcache_log2_nsets = 31 - __builtin_clz(nsets);
1512 		}
1513 	}
1514 
1515 	info->cache_unified = (clidr == 4);
1516 
1517 	if (clidr & 1) {
1518 		csid = get_cachesize_cp15((level << 1)|CPU_CSSR_InD); /* select L1 icache values */
1519 		nsets = CPU_CSID_NUMSETS(csid) + 1;
1520 		info->icache_ways = CPU_CSID_ASSOC(csid) + 1;
1521 		info->icache_line_size = 1U << (CPU_CSID_LEN(csid) + 4);
1522 		info->icache_size = info->icache_line_size * info->icache_ways * nsets;
1523 	} else {
1524 		info->icache_ways = info->dcache_ways;
1525 		info->icache_line_size = info->dcache_line_size;
1526 		info->icache_size = info->dcache_size;
1527 	}
1528 }
1529 #endif /* (ARM_MMU_V6 + ARM_MMU_V7) > 0 */
1530 
1531 static void
1532 get_cachetype_cp15(void)
1533 {
1534 	u_int ctype, isize, dsize;
1535 	u_int multiplier;
1536 
1537 	__asm volatile("mrc p15, 0, %0, c0, c0, 1"
1538 		: "=r" (ctype));
1539 
1540 	/*
1541 	 * ...and thus spake the ARM ARM:
1542 	 *
1543 	 * If an <opcode2> value corresponding to an unimplemented or
1544 	 * reserved ID register is encountered, the System Control
1545 	 * processor returns the value of the main ID register.
1546 	 */
1547 	if (ctype == cpu_id())
1548 		goto out;
1549 
1550 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
1551 	if (CPU_CT_FORMAT(ctype) == 4) {
1552 		u_int clidr = armreg_clidr_read();
1553 
1554 		if (CPU_CT4_L1IPOLICY(ctype) != CPU_CT4_L1_PIPT) {
1555 			arm_cache_prefer_mask = PAGE_SIZE;
1556 		}
1557 		arm_pcache.cache_type = CPU_CT_CTYPE_WB14;
1558 
1559 		get_cacheinfo_clidr(&arm_pcache, 0, clidr & 7);
1560 		arm_dcache_align = arm_pcache.dcache_line_size;
1561 		clidr >>= 3;
1562 		if (clidr & 7) {
1563 			get_cacheinfo_clidr(&arm_scache, 1, clidr & 7);
1564 			if (arm_scache.dcache_line_size < arm_dcache_align)
1565 				arm_dcache_align = arm_scache.dcache_line_size;
1566 		}
1567 		goto out;
1568 	}
1569 #endif /* ARM_MMU_V6 + ARM_MMU_V7 > 0 */
1570 
1571 	if ((ctype & CPU_CT_S) == 0)
1572 		arm_pcache.cache_unified = 1;
1573 
1574 	/*
1575 	 * If you want to know how this code works, go read the ARM ARM.
1576 	 */
1577 
1578 	arm_pcache.cache_type = CPU_CT_CTYPE(ctype);
1579 
1580 	if (arm_pcache.cache_unified == 0) {
1581 		isize = CPU_CT_ISIZE(ctype);
1582 		multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
1583 		arm_pcache.icache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
1584 		if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
1585 			if (isize & CPU_CT_xSIZE_M)
1586 				arm_pcache.icache_line_size = 0; /* not present */
1587 			else
1588 				arm_pcache.icache_ways = 1;
1589 		} else {
1590 			arm_pcache.icache_ways = multiplier <<
1591 			    (CPU_CT_xSIZE_ASSOC(isize) - 1);
1592 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
1593 			if (CPU_CT_xSIZE_P & isize)
1594 				arm_cache_prefer_mask |=
1595 				    __BIT(9 + CPU_CT_xSIZE_SIZE(isize)
1596 					  - CPU_CT_xSIZE_ASSOC(isize))
1597 				    - PAGE_SIZE;
1598 #endif
1599 		}
1600 		arm_pcache.icache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
1601 	}
1602 
1603 	dsize = CPU_CT_DSIZE(ctype);
1604 	multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
1605 	arm_pcache.dcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
1606 	if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
1607 		if (dsize & CPU_CT_xSIZE_M)
1608 			arm_pcache.dcache_line_size = 0; /* not present */
1609 		else
1610 			arm_pcache.dcache_ways = 1;
1611 	} else {
1612 		arm_pcache.dcache_ways = multiplier <<
1613 		    (CPU_CT_xSIZE_ASSOC(dsize) - 1);
1614 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
1615 		if (CPU_CT_xSIZE_P & dsize)
1616 			arm_cache_prefer_mask |=
1617 			    __BIT(9 + CPU_CT_xSIZE_SIZE(dsize)
1618 				  - CPU_CT_xSIZE_ASSOC(dsize)) - PAGE_SIZE;
1619 #endif
1620 	}
1621 	arm_pcache.dcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
1622 
1623 	arm_dcache_align = arm_pcache.dcache_line_size;
1624 
1625 	arm_dcache_log2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
1626 	arm_dcache_log2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
1627 	arm_dcache_log2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
1628 	    CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
1629 
1630  out:
1631 	arm_dcache_align_mask = arm_dcache_align - 1;
1632 }
1633 #endif /* ARM7TDMI || ARM8 || ARM9 || XSCALE */
1634 
1635 #if defined(CPU_ARM2) || defined(CPU_ARM250) || defined(CPU_ARM3) || \
1636     defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_SA110) || \
1637     defined(CPU_SA1100) || defined(CPU_SA1110) || defined(CPU_IXP12X0)
1638 /* Cache information for CPUs without cache type registers. */
1639 struct cachetab {
1640 	uint32_t ct_cpuid;
1641 	int	ct_pcache_type;
1642 	int	ct_pcache_unified;
1643 	int	ct_pdcache_size;
1644 	int	ct_pdcache_line_size;
1645 	int	ct_pdcache_ways;
1646 	int	ct_picache_size;
1647 	int	ct_picache_line_size;
1648 	int	ct_picache_ways;
1649 };
1650 
1651 struct cachetab cachetab[] = {
1652     /* cpuid,           cache type,       u,  dsiz, ls, wy,  isiz, ls, wy */
1653     { CPU_ID_ARM2,      0,                1,     0,  0,  0,     0,  0,  0 },
1654     { CPU_ID_ARM250,    0,                1,     0,  0,  0,     0,  0,  0 },
1655     { CPU_ID_ARM3,      CPU_CT_CTYPE_WT,  1,  4096, 16, 64,     0,  0,  0 },
1656     { CPU_ID_ARM610,	CPU_CT_CTYPE_WT,  1,  4096, 16, 64,     0,  0,  0 },
1657     { CPU_ID_ARM710,    CPU_CT_CTYPE_WT,  1,  8192, 32,  4,     0,  0,  0 },
1658     { CPU_ID_ARM7500,   CPU_CT_CTYPE_WT,  1,  4096, 16,  4,     0,  0,  0 },
1659     { CPU_ID_ARM710A,   CPU_CT_CTYPE_WT,  1,  8192, 16,  4,     0,  0,  0 },
1660     { CPU_ID_ARM7500FE, CPU_CT_CTYPE_WT,  1,  4096, 16,  4,     0,  0,  0 },
1661     /* XXX is this type right for SA-1? */
1662     { CPU_ID_SA110,	CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 },
1663     { CPU_ID_SA1100,	CPU_CT_CTYPE_WB1, 0,  8192, 32, 32, 16384, 32, 32 },
1664     { CPU_ID_SA1110,	CPU_CT_CTYPE_WB1, 0,  8192, 32, 32, 16384, 32, 32 },
1665     { CPU_ID_IXP1200,	CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 }, /* XXX */
1666     { 0, 0, 0, 0, 0, 0, 0, 0}
1667 };
1668 
1669 static void get_cachetype_table(void);
1670 
1671 static void
1672 get_cachetype_table(void)
1673 {
1674 	int i;
1675 	uint32_t cpuid = cpu_id();
1676 
1677 	for (i = 0; cachetab[i].ct_cpuid != 0; i++) {
1678 		if (cachetab[i].ct_cpuid == (cpuid & CPU_ID_CPU_MASK)) {
1679 			arm_pcache.cache_type = cachetab[i].ct_pcache_type;
1680 			arm_pcache.cache_unified = cachetab[i].ct_pcache_unified;
1681 			arm_pcache.dcache_size = cachetab[i].ct_pdcache_size;
1682 			arm_pcache.dcache_line_size =
1683 			    cachetab[i].ct_pdcache_line_size;
1684 			arm_pcache.dcache_ways = cachetab[i].ct_pdcache_ways;
1685 			arm_pcache.icache_size = cachetab[i].ct_picache_size;
1686 			arm_pcache.icache_line_size =
1687 			    cachetab[i].ct_picache_line_size;
1688 			arm_pcache.icache_ways = cachetab[i].ct_picache_ways;
1689 		}
1690 	}
1691 
1692 	arm_dcache_align = arm_pcache.dcache_line_size;
1693 	arm_dcache_align_mask = arm_dcache_align - 1;
1694 }
1695 
1696 #endif /* ARM2 || ARM250 || ARM3 || ARM6 || ARM7 || SA110 || SA1100 || SA1111 || IXP12X0 */
1697 
1698 /*
1699  * Cannot panic here as we may not have a console yet ...
1700  */
1701 
1702 int
1703 set_cpufuncs(void)
1704 {
1705 	if (cputype == 0) {
1706 		cputype = cpufunc_id();
1707 		cputype &= CPU_ID_CPU_MASK;
1708 	}
1709 
1710 	/*
1711 	 * NOTE: cpu_do_powersave defaults to off.  If we encounter a
1712 	 * CPU type where we want to use it by default, then we set it.
1713 	 */
1714 #ifdef CPU_ARM2
1715 	if (cputype == CPU_ID_ARM2) {
1716 		cpufuncs = arm2_cpufuncs;
1717 		get_cachetype_table();
1718 		return 0;
1719 	}
1720 #endif /* CPU_ARM2 */
1721 #ifdef CPU_ARM250
1722 	if (cputype == CPU_ID_ARM250) {
1723 		cpufuncs = arm250_cpufuncs;
1724 		get_cachetype_table();
1725 		return 0;
1726 	}
1727 #endif
1728 #ifdef CPU_ARM3
1729 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
1730 	    (cputype & 0x00000f00) == 0x00000300) {
1731 		cpufuncs = arm3_cpufuncs;
1732 		get_cachetype_table();
1733 		return 0;
1734 	}
1735 #endif	/* CPU_ARM3 */
1736 #ifdef CPU_ARM6
1737 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
1738 	    (cputype & 0x00000f00) == 0x00000600) {
1739 		cpufuncs = arm6_cpufuncs;
1740 		get_cachetype_table();
1741 		pmap_pte_init_generic();
1742 		return 0;
1743 	}
1744 #endif	/* CPU_ARM6 */
1745 #ifdef CPU_ARM7
1746 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
1747 	    CPU_ID_IS7(cputype) &&
1748 	    (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V3) {
1749 		cpufuncs = arm7_cpufuncs;
1750 		get_cachetype_table();
1751 		pmap_pte_init_generic();
1752 		return 0;
1753 	}
1754 #endif	/* CPU_ARM7 */
1755 #ifdef CPU_ARM7TDMI
1756 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
1757 	    CPU_ID_IS7(cputype) &&
1758 	    (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V4T) {
1759 		cpufuncs = arm7tdmi_cpufuncs;
1760 		get_cachetype_cp15();
1761 		pmap_pte_init_generic();
1762 		return 0;
1763 	}
1764 #endif
1765 #ifdef CPU_ARM8
1766 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
1767 	    (cputype & 0x0000f000) == 0x00008000) {
1768 		cpufuncs = arm8_cpufuncs;
1769 		get_cachetype_cp15();
1770 		pmap_pte_init_arm8();
1771 		return 0;
1772 	}
1773 #endif	/* CPU_ARM8 */
1774 #ifdef CPU_ARM9
1775 	if (((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD ||
1776 	     (cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_TI) &&
1777 	    (cputype & 0x0000f000) == 0x00009000) {
1778 		cpufuncs = arm9_cpufuncs;
1779 		get_cachetype_cp15();
1780 		arm9_dcache_sets_inc = 1U << arm_dcache_log2_linesize;
1781 		arm9_dcache_sets_max =
1782 		    (1U << (arm_dcache_log2_linesize + arm_dcache_log2_nsets)) -
1783 		    arm9_dcache_sets_inc;
1784 		arm9_dcache_index_inc = 1U << (32 - arm_dcache_log2_assoc);
1785 		arm9_dcache_index_max = 0U - arm9_dcache_index_inc;
1786 #ifdef	ARM9_CACHE_WRITE_THROUGH
1787 		pmap_pte_init_arm9();
1788 #else
1789 		pmap_pte_init_generic();
1790 #endif
1791 		return 0;
1792 	}
1793 #endif /* CPU_ARM9 */
1794 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
1795 	if (cputype == CPU_ID_ARM926EJS ||
1796 	    cputype == CPU_ID_ARM1026EJS) {
1797 		cpufuncs = armv5_ec_cpufuncs;
1798 		get_cachetype_cp15();
1799 		pmap_pte_init_generic();
1800 		return 0;
1801 	}
1802 #endif /* CPU_ARM9E || CPU_ARM10 */
1803 #if defined(CPU_SHEEVA)
1804 	if (cputype == CPU_ID_MV88SV131 ||
1805 	    cputype == CPU_ID_MV88FR571_VD) {
1806 		cpufuncs = sheeva_cpufuncs;
1807 		get_cachetype_cp15();
1808 		pmap_pte_init_generic();
1809 		cpu_do_powersave = 1;			/* Enable powersave */
1810 		return 0;
1811 	}
1812 #endif /* CPU_SHEEVA */
1813 #ifdef CPU_ARM10
1814 	if (/* cputype == CPU_ID_ARM1020T || */
1815 	    cputype == CPU_ID_ARM1020E) {
1816 		/*
1817 		 * Select write-through cacheing (this isn't really an
1818 		 * option on ARM1020T).
1819 		 */
1820 		cpufuncs = arm10_cpufuncs;
1821 		get_cachetype_cp15();
1822 		armv5_dcache_sets_inc = 1U << arm_dcache_log2_linesize;
1823 		armv5_dcache_sets_max =
1824 		    (1U << (arm_dcache_log2_linesize + arm_dcache_log2_nsets)) -
1825 		    armv5_dcache_sets_inc;
1826 		armv5_dcache_index_inc = 1U << (32 - arm_dcache_log2_assoc);
1827 		armv5_dcache_index_max = 0U - armv5_dcache_index_inc;
1828 		pmap_pte_init_generic();
1829 		return 0;
1830 	}
1831 #endif /* CPU_ARM10 */
1832 
1833 
1834 #if defined(CPU_ARM11MPCORE)
1835 	if (cputype == CPU_ID_ARM11MPCORE) {
1836 		cpufuncs = arm11mpcore_cpufuncs;
1837 		get_cachetype_cp15();
1838 		armv5_dcache_sets_inc = 1U << arm_dcache_log2_linesize;
1839 		armv5_dcache_sets_max = (1U << (arm_dcache_log2_linesize +
1840 			arm_dcache_log2_nsets)) - armv5_dcache_sets_inc;
1841 		armv5_dcache_index_inc = 1U << (32 - arm_dcache_log2_assoc);
1842 		armv5_dcache_index_max = 0U - armv5_dcache_index_inc;
1843 		cpu_do_powersave = 1;			/* Enable powersave */
1844 		pmap_pte_init_arm11mpcore();
1845 		if (arm_cache_prefer_mask)
1846 			uvmexp.ncolors = (arm_cache_prefer_mask >> PGSHIFT) + 1;
1847 
1848 		return 0;
1849 
1850 	}
1851 #endif	/* CPU_ARM11MPCORE */
1852 
1853 #if defined(CPU_ARM11)
1854 	if (cputype == CPU_ID_ARM1136JS ||
1855 	    cputype == CPU_ID_ARM1136JSR1 ||
1856 	    cputype == CPU_ID_ARM1176JZS) {
1857 		cpufuncs = arm11_cpufuncs;
1858 #if defined(CPU_ARM1136)
1859 		if (cputype == CPU_ID_ARM1136JS &&
1860 		    cputype == CPU_ID_ARM1136JSR1) {
1861 			cpufuncs = arm1136_cpufuncs;
1862 			if (cputype == CPU_ID_ARM1136JS)
1863 				cpufuncs.cf_sleep = arm1136_sleep_rev0;
1864 		}
1865 #endif
1866 #if defined(CPU_ARM1176)
1867 		if (cputype == CPU_ID_ARM1176JZS) {
1868 			cpufuncs = arm1176_cpufuncs;
1869 		}
1870 #endif
1871 		cpu_do_powersave = 1;			/* Enable powersave */
1872 		get_cachetype_cp15();
1873 #ifdef ARM11_CACHE_WRITE_THROUGH
1874 		pmap_pte_init_arm11();
1875 #else
1876 		pmap_pte_init_generic();
1877 #endif
1878 		if (arm_cache_prefer_mask)
1879 			uvmexp.ncolors = (arm_cache_prefer_mask >> PGSHIFT) + 1;
1880 
1881 		/*
1882 		 * Start and reset the PMC Cycle Counter.
1883 		 */
1884 		armreg_pmcrv6_write(ARM11_PMCCTL_E | ARM11_PMCCTL_P | ARM11_PMCCTL_C);
1885 		return 0;
1886 	}
1887 #endif /* CPU_ARM11 */
1888 #ifdef CPU_SA110
1889 	if (cputype == CPU_ID_SA110) {
1890 		cpufuncs = sa110_cpufuncs;
1891 		get_cachetype_table();
1892 		pmap_pte_init_sa1();
1893 		return 0;
1894 	}
1895 #endif	/* CPU_SA110 */
1896 #ifdef CPU_SA1100
1897 	if (cputype == CPU_ID_SA1100) {
1898 		cpufuncs = sa11x0_cpufuncs;
1899 		get_cachetype_table();
1900 		pmap_pte_init_sa1();
1901 
1902 		/* Use powersave on this CPU. */
1903 		cpu_do_powersave = 1;
1904 
1905 		return 0;
1906 	}
1907 #endif	/* CPU_SA1100 */
1908 #ifdef CPU_SA1110
1909 	if (cputype == CPU_ID_SA1110) {
1910 		cpufuncs = sa11x0_cpufuncs;
1911 		get_cachetype_table();
1912 		pmap_pte_init_sa1();
1913 
1914 		/* Use powersave on this CPU. */
1915 		cpu_do_powersave = 1;
1916 
1917 		return 0;
1918 	}
1919 #endif	/* CPU_SA1110 */
1920 #ifdef CPU_FA526
1921 	if (cputype == CPU_ID_FA526) {
1922 		cpufuncs = fa526_cpufuncs;
1923 		get_cachetype_cp15();
1924 		pmap_pte_init_generic();
1925 
1926 		/* Use powersave on this CPU. */
1927 		cpu_do_powersave = 1;
1928 
1929 		return 0;
1930 	}
1931 #endif	/* CPU_FA526 */
1932 #ifdef CPU_IXP12X0
1933 	if (cputype == CPU_ID_IXP1200) {
1934 		cpufuncs = ixp12x0_cpufuncs;
1935 		get_cachetype_table();
1936 		pmap_pte_init_sa1();
1937 		return 0;
1938 	}
1939 #endif  /* CPU_IXP12X0 */
1940 #ifdef CPU_XSCALE_80200
1941 	if (cputype == CPU_ID_80200) {
1942 		int rev = cpufunc_id() & CPU_ID_REVISION_MASK;
1943 
1944 		i80200_icu_init();
1945 
1946 		/*
1947 		 * Reset the Performance Monitoring Unit to a
1948 		 * pristine state:
1949 		 *	- CCNT, PMN0, PMN1 reset to 0
1950 		 *	- overflow indications cleared
1951 		 *	- all counters disabled
1952 		 */
1953 		__asm volatile("mcr p14, 0, %0, c0, c0, 0"
1954 			:
1955 			: "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF|
1956 			       PMNC_CC_IF));
1957 
1958 #if defined(XSCALE_CCLKCFG)
1959 		/*
1960 		 * Crank CCLKCFG to maximum legal value.
1961 		 */
1962 		__asm volatile ("mcr p14, 0, %0, c6, c0, 0"
1963 			:
1964 			: "r" (XSCALE_CCLKCFG));
1965 #endif
1966 
1967 		/*
1968 		 * XXX Disable ECC in the Bus Controller Unit; we
1969 		 * don't really support it, yet.  Clear any pending
1970 		 * error indications.
1971 		 */
1972 		__asm volatile("mcr p13, 0, %0, c0, c1, 0"
1973 			:
1974 			: "r" (BCUCTL_E0|BCUCTL_E1|BCUCTL_EV));
1975 
1976 		cpufuncs = xscale_cpufuncs;
1977 #if defined(PERFCTRS)
1978 		xscale_pmu_init();
1979 #endif
1980 
1981 		/*
1982 		 * i80200 errata: Step-A0 and A1 have a bug where
1983 		 * D$ dirty bits are not cleared on "invalidate by
1984 		 * address".
1985 		 *
1986 		 * Workaround: Clean cache line before invalidating.
1987 		 */
1988 		if (rev == 0 || rev == 1)
1989 			cpufuncs.cf_dcache_inv_range = xscale_cache_purgeD_rng;
1990 
1991 		get_cachetype_cp15();
1992 		pmap_pte_init_xscale();
1993 		return 0;
1994 	}
1995 #endif /* CPU_XSCALE_80200 */
1996 #ifdef CPU_XSCALE_80321
1997 	if (cputype == CPU_ID_80321_400 || cputype == CPU_ID_80321_600 ||
1998 	    cputype == CPU_ID_80321_400_B0 || cputype == CPU_ID_80321_600_B0 ||
1999 	    cputype == CPU_ID_80219_400 || cputype == CPU_ID_80219_600) {
2000 		i80321_icu_init();
2001 
2002 		/*
2003 		 * Reset the Performance Monitoring Unit to a
2004 		 * pristine state:
2005 		 *	- CCNT, PMN0, PMN1 reset to 0
2006 		 *	- overflow indications cleared
2007 		 *	- all counters disabled
2008 		 */
2009 		__asm volatile("mcr p14, 0, %0, c0, c0, 0"
2010 			:
2011 			: "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF|
2012 			       PMNC_CC_IF));
2013 
2014 		cpufuncs = xscale_cpufuncs;
2015 #if defined(PERFCTRS)
2016 		xscale_pmu_init();
2017 #endif
2018 
2019 		get_cachetype_cp15();
2020 		pmap_pte_init_xscale();
2021 		return 0;
2022 	}
2023 #endif /* CPU_XSCALE_80321 */
2024 #ifdef __CPU_XSCALE_PXA2XX
2025 	/* ignore core revision to test PXA2xx CPUs */
2026 	if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA27X ||
2027 	    (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 ||
2028 	    (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) {
2029 
2030 		cpufuncs = xscale_cpufuncs;
2031 #if defined(PERFCTRS)
2032 		xscale_pmu_init();
2033 #endif
2034 
2035 		get_cachetype_cp15();
2036 		pmap_pte_init_xscale();
2037 
2038 		/* Use powersave on this CPU. */
2039 		cpu_do_powersave = 1;
2040 
2041 		return 0;
2042 	}
2043 #endif /* __CPU_XSCALE_PXA2XX */
2044 #ifdef CPU_XSCALE_IXP425
2045 	if (cputype == CPU_ID_IXP425_533 || cputype == CPU_ID_IXP425_400 ||
2046 	    cputype == CPU_ID_IXP425_266) {
2047 		ixp425_icu_init();
2048 
2049 		cpufuncs = xscale_cpufuncs;
2050 #if defined(PERFCTRS)
2051 		xscale_pmu_init();
2052 #endif
2053 
2054 		get_cachetype_cp15();
2055 		pmap_pte_init_xscale();
2056 
2057 		return 0;
2058 	}
2059 #endif /* CPU_XSCALE_IXP425 */
2060 #if defined(CPU_CORTEX)
2061 	if (CPU_ID_CORTEX_P(cputype)) {
2062 		cpufuncs = cortex_cpufuncs;
2063 		cpu_do_powersave = 1;			/* Enable powersave */
2064 		get_cachetype_cp15();
2065 		pmap_pte_init_armv7();
2066 		if (arm_cache_prefer_mask)
2067 			uvmexp.ncolors = (arm_cache_prefer_mask >> PGSHIFT) + 1;
2068 		/*
2069 		 * Start and reset the PMC Cycle Counter.
2070 		 */
2071 		armreg_pmcr_write(ARM11_PMCCTL_E | ARM11_PMCCTL_P | ARM11_PMCCTL_C);
2072 		armreg_pmcntenset_write(CORTEX_CNTENS_C);
2073 		return 0;
2074 	}
2075 #endif /* CPU_CORTEX */
2076 
2077 #if defined(CPU_PJ4B)
2078 	if ((cputype == CPU_ID_MV88SV581X_V6 ||
2079 	    cputype == CPU_ID_MV88SV581X_V7 ||
2080 	    cputype == CPU_ID_MV88SV584X_V7 ||
2081 	    cputype == CPU_ID_ARM_88SV581X_V6 ||
2082 	    cputype == CPU_ID_ARM_88SV581X_V7) &&
2083 	    (armreg_pfr0_read() & ARM_PFR0_THUMBEE_MASK)) {
2084 			cpufuncs = pj4bv7_cpufuncs;
2085 			get_cachetype_cp15();
2086 			pmap_pte_init_armv7();
2087 			return 0;
2088 	}
2089 #endif /* CPU_PJ4B */
2090 
2091 	/*
2092 	 * Bzzzz. And the answer was ...
2093 	 */
2094 	panic("No support for this CPU type (%08x) in kernel", cputype);
2095 	return(ARCHITECTURE_NOT_PRESENT);
2096 }
2097 
2098 #ifdef CPU_ARM2
2099 u_int arm2_id(void)
2100 {
2101 
2102 	return CPU_ID_ARM2;
2103 }
2104 #endif /* CPU_ARM2 */
2105 
2106 #ifdef CPU_ARM250
2107 u_int arm250_id(void)
2108 {
2109 
2110 	return CPU_ID_ARM250;
2111 }
2112 #endif /* CPU_ARM250 */
2113 
2114 /*
2115  * Fixup routines for data and prefetch aborts.
2116  *
2117  * Several compile time symbols are used
2118  *
2119  * DEBUG_FAULT_CORRECTION - Print debugging information during the
2120  * correction of registers after a fault.
2121  * ARM6_LATE_ABORT - ARM6 supports both early and late aborts
2122  * when defined should use late aborts
2123  */
2124 
2125 
2126 /*
2127  * Null abort fixup routine.
2128  * For use when no fixup is required.
2129  */
2130 int
2131 cpufunc_null_fixup(void *arg)
2132 {
2133 	return(ABORT_FIXUP_OK);
2134 }
2135 
2136 
2137 #if defined(CPU_ARM2) || defined(CPU_ARM250) || defined(CPU_ARM3) || \
2138     defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI)
2139 
2140 #ifdef DEBUG_FAULT_CORRECTION
2141 #define DFC_PRINTF(x)		printf x
2142 #define DFC_DISASSEMBLE(x)	disassemble(x)
2143 #else
2144 #define DFC_PRINTF(x)		/* nothing */
2145 #define DFC_DISASSEMBLE(x)	/* nothing */
2146 #endif
2147 
2148 /*
2149  * "Early" data abort fixup.
2150  *
2151  * For ARM2, ARM2as, ARM3 and ARM6 (in early-abort mode).  Also used
2152  * indirectly by ARM6 (in late-abort mode) and ARM7[TDMI].
2153  *
2154  * In early aborts, we may have to fix up LDM, STM, LDC and STC.
2155  */
2156 int
2157 early_abort_fixup(void *arg)
2158 {
2159 	trapframe_t *frame = arg;
2160 	u_int fault_pc;
2161 	u_int fault_instruction;
2162 	int saved_lr = 0;
2163 
2164 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
2165 
2166 		/* Ok an abort in SVC mode */
2167 
2168 		/*
2169 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
2170 		 * as the fault happened in svc mode but we need it in the
2171 		 * usr slot so we can treat the registers as an array of ints
2172 		 * during fixing.
2173 		 * NOTE: This PC is in the position but writeback is not
2174 		 * allowed on r15.
2175 		 * Doing it like this is more efficient than trapping this
2176 		 * case in all possible locations in the following fixup code.
2177 		 */
2178 
2179 		saved_lr = frame->tf_usr_lr;
2180 		frame->tf_usr_lr = frame->tf_svc_lr;
2181 
2182 		/*
2183 		 * Note the trapframe does not have the SVC r13 so a fault
2184 		 * from an instruction with writeback to r13 in SVC mode is
2185 		 * not allowed. This should not happen as the kstack is
2186 		 * always valid.
2187 		 */
2188 	}
2189 
2190 	/* Get fault address and status from the CPU */
2191 
2192 	fault_pc = frame->tf_pc;
2193 	fault_instruction = *((volatile unsigned int *)fault_pc);
2194 
2195 	/* Decode the fault instruction and fix the registers as needed */
2196 
2197 	if ((fault_instruction & 0x0e000000) == 0x08000000) {
2198 		int base;
2199 		int loop;
2200 		int count;
2201 		int *registers = &frame->tf_r0;
2202 
2203 		DFC_PRINTF(("LDM/STM\n"));
2204 		DFC_DISASSEMBLE(fault_pc);
2205 		if (fault_instruction & (1 << 21)) {
2206 			DFC_PRINTF(("This instruction must be corrected\n"));
2207 			base = (fault_instruction >> 16) & 0x0f;
2208 			if (base == 15)
2209 				return ABORT_FIXUP_FAILED;
2210 			/* Count registers transferred */
2211 			count = 0;
2212 			for (loop = 0; loop < 16; ++loop) {
2213 				if (fault_instruction & (1<<loop))
2214 					++count;
2215 			}
2216 			DFC_PRINTF(("%d registers used\n", count));
2217 			DFC_PRINTF(("Corrected r%d by %d bytes ",
2218 				       base, count * 4));
2219 			if (fault_instruction & (1 << 23)) {
2220 				DFC_PRINTF(("down\n"));
2221 				registers[base] -= count * 4;
2222 			} else {
2223 				DFC_PRINTF(("up\n"));
2224 				registers[base] += count * 4;
2225 			}
2226 		}
2227 	} else if ((fault_instruction & 0x0e000000) == 0x0c000000) {
2228 		int base;
2229 		int offset;
2230 		int *registers = &frame->tf_r0;
2231 
2232 		/* REGISTER CORRECTION IS REQUIRED FOR THESE INSTRUCTIONS */
2233 
2234 		DFC_DISASSEMBLE(fault_pc);
2235 
2236 		/* Only need to fix registers if write back is turned on */
2237 
2238 		if ((fault_instruction & (1 << 21)) != 0) {
2239 			base = (fault_instruction >> 16) & 0x0f;
2240 			if (base == 13 &&
2241 			    (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
2242 				return ABORT_FIXUP_FAILED;
2243 			if (base == 15)
2244 				return ABORT_FIXUP_FAILED;
2245 
2246 			offset = (fault_instruction & 0xff) << 2;
2247 			DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
2248 			if ((fault_instruction & (1 << 23)) != 0)
2249 				offset = -offset;
2250 			registers[base] += offset;
2251 			DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
2252 		}
2253 	} else if ((fault_instruction & 0x0e000000) == 0x0c000000)
2254 		return ABORT_FIXUP_FAILED;
2255 
2256 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
2257 
2258 		/* Ok an abort in SVC mode */
2259 
2260 		/*
2261 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
2262 		 * as the fault happened in svc mode but we need it in the
2263 		 * usr slot so we can treat the registers as an array of ints
2264 		 * during fixing.
2265 		 * NOTE: This PC is in the position but writeback is not
2266 		 * allowed on r15.
2267 		 * Doing it like this is more efficient than trapping this
2268 		 * case in all possible locations in the prior fixup code.
2269 		 */
2270 
2271 		frame->tf_svc_lr = frame->tf_usr_lr;
2272 		frame->tf_usr_lr = saved_lr;
2273 
2274 		/*
2275 		 * Note the trapframe does not have the SVC r13 so a fault
2276 		 * from an instruction with writeback to r13 in SVC mode is
2277 		 * not allowed. This should not happen as the kstack is
2278 		 * always valid.
2279 		 */
2280 	}
2281 
2282 	return(ABORT_FIXUP_OK);
2283 }
2284 #endif	/* CPU_ARM2/250/3/6/7 */
2285 
2286 
2287 #if (defined(CPU_ARM6) && defined(ARM6_LATE_ABORT)) || defined(CPU_ARM7) || \
2288 	defined(CPU_ARM7TDMI)
2289 /*
2290  * "Late" (base updated) data abort fixup
2291  *
2292  * For ARM6 (in late-abort mode) and ARM7.
2293  *
2294  * In this model, all data-transfer instructions need fixing up.  We defer
2295  * LDM, STM, LDC and STC fixup to the early-abort handler.
2296  */
2297 int
2298 late_abort_fixup(void *arg)
2299 {
2300 	trapframe_t *frame = arg;
2301 	u_int fault_pc;
2302 	u_int fault_instruction;
2303 	int saved_lr = 0;
2304 
2305 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
2306 
2307 		/* Ok an abort in SVC mode */
2308 
2309 		/*
2310 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
2311 		 * as the fault happened in svc mode but we need it in the
2312 		 * usr slot so we can treat the registers as an array of ints
2313 		 * during fixing.
2314 		 * NOTE: This PC is in the position but writeback is not
2315 		 * allowed on r15.
2316 		 * Doing it like this is more efficient than trapping this
2317 		 * case in all possible locations in the following fixup code.
2318 		 */
2319 
2320 		saved_lr = frame->tf_usr_lr;
2321 		frame->tf_usr_lr = frame->tf_svc_lr;
2322 
2323 		/*
2324 		 * Note the trapframe does not have the SVC r13 so a fault
2325 		 * from an instruction with writeback to r13 in SVC mode is
2326 		 * not allowed. This should not happen as the kstack is
2327 		 * always valid.
2328 		 */
2329 	}
2330 
2331 	/* Get fault address and status from the CPU */
2332 
2333 	fault_pc = frame->tf_pc;
2334 	fault_instruction = *((volatile unsigned int *)fault_pc);
2335 
2336 	/* Decode the fault instruction and fix the registers as needed */
2337 
2338 	/* Was is a swap instruction ? */
2339 
2340 	if ((fault_instruction & 0x0fb00ff0) == 0x01000090) {
2341 		DFC_DISASSEMBLE(fault_pc);
2342 	} else if ((fault_instruction & 0x0c000000) == 0x04000000) {
2343 
2344 		/* Was is a ldr/str instruction */
2345 		/* This is for late abort only */
2346 
2347 		int base;
2348 		int offset;
2349 		int *registers = &frame->tf_r0;
2350 
2351 		DFC_DISASSEMBLE(fault_pc);
2352 
2353 		/* This is for late abort only */
2354 
2355 		if ((fault_instruction & (1 << 24)) == 0
2356 		    || (fault_instruction & (1 << 21)) != 0) {
2357 			/* postindexed ldr/str with no writeback */
2358 
2359 			base = (fault_instruction >> 16) & 0x0f;
2360 			if (base == 13 &&
2361 			    (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
2362 				return ABORT_FIXUP_FAILED;
2363 			if (base == 15)
2364 				return ABORT_FIXUP_FAILED;
2365 			DFC_PRINTF(("late abt fix: r%d=%08x : ",
2366 				       base, registers[base]));
2367 			if ((fault_instruction & (1 << 25)) == 0) {
2368 				/* Immediate offset - easy */
2369 
2370 				offset = fault_instruction & 0xfff;
2371 				if ((fault_instruction & (1 << 23)))
2372 					offset = -offset;
2373 				registers[base] += offset;
2374 				DFC_PRINTF(("imm=%08x ", offset));
2375 			} else {
2376 				/* offset is a shifted register */
2377 				int shift;
2378 
2379 				offset = fault_instruction & 0x0f;
2380 				if (offset == base)
2381 					return ABORT_FIXUP_FAILED;
2382 
2383 				/*
2384 				 * Register offset - hard we have to
2385 				 * cope with shifts !
2386 				 */
2387 				offset = registers[offset];
2388 
2389 				if ((fault_instruction & (1 << 4)) == 0)
2390 					/* shift with amount */
2391 					shift = (fault_instruction >> 7) & 0x1f;
2392 				else {
2393 					/* shift with register */
2394 					if ((fault_instruction & (1 << 7)) != 0)
2395 						/* undefined for now so bail out */
2396 						return ABORT_FIXUP_FAILED;
2397 					shift = ((fault_instruction >> 8) & 0xf);
2398 					if (base == shift)
2399 						return ABORT_FIXUP_FAILED;
2400 					DFC_PRINTF(("shift reg=%d ", shift));
2401 					shift = registers[shift];
2402 				}
2403 				DFC_PRINTF(("shift=%08x ", shift));
2404 				switch (((fault_instruction >> 5) & 0x3)) {
2405 				case 0 : /* Logical left */
2406 					offset = (int)(((u_int)offset) << shift);
2407 					break;
2408 				case 1 : /* Logical Right */
2409 					if (shift == 0) shift = 32;
2410 					offset = (int)(((u_int)offset) >> shift);
2411 					break;
2412 				case 2 : /* Arithmetic Right */
2413 					if (shift == 0) shift = 32;
2414 					offset = (int)(((int)offset) >> shift);
2415 					break;
2416 				case 3 : /* Rotate right (rol or rxx) */
2417 					return ABORT_FIXUP_FAILED;
2418 					break;
2419 				}
2420 
2421 				DFC_PRINTF(("abt: fixed LDR/STR with "
2422 					       "register offset\n"));
2423 				if ((fault_instruction & (1 << 23)))
2424 					offset = -offset;
2425 				DFC_PRINTF(("offset=%08x ", offset));
2426 				registers[base] += offset;
2427 			}
2428 			DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
2429 		}
2430 	}
2431 
2432 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
2433 
2434 		/* Ok an abort in SVC mode */
2435 
2436 		/*
2437 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
2438 		 * as the fault happened in svc mode but we need it in the
2439 		 * usr slot so we can treat the registers as an array of ints
2440 		 * during fixing.
2441 		 * NOTE: This PC is in the position but writeback is not
2442 		 * allowed on r15.
2443 		 * Doing it like this is more efficient than trapping this
2444 		 * case in all possible locations in the prior fixup code.
2445 		 */
2446 
2447 		frame->tf_svc_lr = frame->tf_usr_lr;
2448 		frame->tf_usr_lr = saved_lr;
2449 
2450 		/*
2451 		 * Note the trapframe does not have the SVC r13 so a fault
2452 		 * from an instruction with writeback to r13 in SVC mode is
2453 		 * not allowed. This should not happen as the kstack is
2454 		 * always valid.
2455 		 */
2456 	}
2457 
2458 	/*
2459 	 * Now let the early-abort fixup routine have a go, in case it
2460 	 * was an LDM, STM, LDC or STC that faulted.
2461 	 */
2462 
2463 	return early_abort_fixup(arg);
2464 }
2465 #endif	/* CPU_ARM6(LATE)/7/7TDMI */
2466 
2467 /*
2468  * CPU Setup code
2469  */
2470 
2471 #if defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) || \
2472 	defined(CPU_ARM8) || defined (CPU_ARM9) || defined (CPU_ARM9E) || \
2473 	defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) || \
2474 	defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
2475 	defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425) || \
2476 	defined(CPU_ARM10) || defined(CPU_ARM11) || \
2477 	defined(CPU_FA526) || defined(CPU_CORTEX) || defined(CPU_SHEEVA)
2478 
2479 #define IGN	0
2480 #define OR	1
2481 #define BIC	2
2482 
2483 struct cpu_option {
2484 	const char *co_name;
2485 	int	co_falseop;
2486 	int	co_trueop;
2487 	int	co_value;
2488 };
2489 
2490 static u_int parse_cpu_options(char *, struct cpu_option *, u_int);
2491 
2492 static u_int
2493 parse_cpu_options(char *args, struct cpu_option *optlist, u_int cpuctrl)
2494 {
2495 	int integer;
2496 
2497 	if (args == NULL)
2498 		return(cpuctrl);
2499 
2500 	while (optlist->co_name) {
2501 		if (get_bootconf_option(args, optlist->co_name,
2502 		    BOOTOPT_TYPE_BOOLEAN, &integer)) {
2503 			if (integer) {
2504 				if (optlist->co_trueop == OR)
2505 					cpuctrl |= optlist->co_value;
2506 				else if (optlist->co_trueop == BIC)
2507 					cpuctrl &= ~optlist->co_value;
2508 			} else {
2509 				if (optlist->co_falseop == OR)
2510 					cpuctrl |= optlist->co_value;
2511 				else if (optlist->co_falseop == BIC)
2512 					cpuctrl &= ~optlist->co_value;
2513 			}
2514 		}
2515 		++optlist;
2516 	}
2517 	return(cpuctrl);
2518 }
2519 #endif /* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 || CPU_SA110 */
2520 
2521 #if defined (CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) \
2522 	|| defined(CPU_ARM8)
2523 struct cpu_option arm678_options[] = {
2524 #ifdef COMPAT_12
2525 	{ "nocache",		IGN, BIC, CPU_CONTROL_IDC_ENABLE },
2526 	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
2527 #endif	/* COMPAT_12 */
2528 	{ "cpu.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
2529 	{ "cpu.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
2530 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2531 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2532 	{ NULL,			IGN, IGN, 0 }
2533 };
2534 
2535 #endif	/* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 */
2536 
2537 #ifdef CPU_ARM6
2538 struct cpu_option arm6_options[] = {
2539 	{ "arm6.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
2540 	{ "arm6.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
2541 	{ "arm6.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2542 	{ "arm6.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2543 	{ NULL,			IGN, IGN, 0 }
2544 };
2545 
2546 void
2547 arm6_setup(char *args)
2548 {
2549 	int cpuctrl, cpuctrlmask;
2550 
2551 	/* Set up default control registers bits */
2552 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2553 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2554 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
2555 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2556 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2557 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
2558 		 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BEND_ENABLE
2559 		 | CPU_CONTROL_AFLT_ENABLE;
2560 
2561 #ifdef ARM6_LATE_ABORT
2562 	cpuctrl |= CPU_CONTROL_LABT_ENABLE;
2563 #endif	/* ARM6_LATE_ABORT */
2564 
2565 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2566 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2567 #endif
2568 
2569 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
2570 	cpuctrl = parse_cpu_options(args, arm6_options, cpuctrl);
2571 
2572 #ifdef __ARMEB__
2573 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2574 #endif
2575 
2576 	/* Clear out the cache */
2577 	cpu_idcache_wbinv_all();
2578 
2579 	/* Set the control register */
2580 	curcpu()->ci_ctrl = cpuctrl;
2581 	cpu_control(0xffffffff, cpuctrl);
2582 }
2583 #endif	/* CPU_ARM6 */
2584 
2585 #ifdef CPU_ARM7
2586 struct cpu_option arm7_options[] = {
2587 	{ "arm7.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
2588 	{ "arm7.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
2589 	{ "arm7.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2590 	{ "arm7.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2591 #ifdef COMPAT_12
2592 	{ "fpaclk2",		BIC, OR,  CPU_CONTROL_CPCLK },
2593 #endif	/* COMPAT_12 */
2594 	{ "arm700.fpaclk",	BIC, OR,  CPU_CONTROL_CPCLK },
2595 	{ NULL,			IGN, IGN, 0 }
2596 };
2597 
2598 void
2599 arm7_setup(char *args)
2600 {
2601 	int cpuctrl, cpuctrlmask;
2602 
2603 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2604 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2605 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
2606 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2607 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2608 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
2609 		 | CPU_CONTROL_CPCLK | CPU_CONTROL_LABT_ENABLE
2610 		 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BEND_ENABLE
2611 		 | CPU_CONTROL_AFLT_ENABLE;
2612 
2613 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2614 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2615 #endif
2616 
2617 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
2618 	cpuctrl = parse_cpu_options(args, arm7_options, cpuctrl);
2619 
2620 #ifdef __ARMEB__
2621 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2622 #endif
2623 
2624 	/* Clear out the cache */
2625 	cpu_idcache_wbinv_all();
2626 
2627 	/* Set the control register */
2628 	curcpu()->ci_ctrl = cpuctrl;
2629 	cpu_control(0xffffffff, cpuctrl);
2630 }
2631 #endif	/* CPU_ARM7 */
2632 
2633 #ifdef CPU_ARM7TDMI
2634 struct cpu_option arm7tdmi_options[] = {
2635 	{ "arm7.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
2636 	{ "arm7.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
2637 	{ "arm7.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2638 	{ "arm7.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2639 #ifdef COMPAT_12
2640 	{ "fpaclk2",		BIC, OR,  CPU_CONTROL_CPCLK },
2641 #endif	/* COMPAT_12 */
2642 	{ "arm700.fpaclk",	BIC, OR,  CPU_CONTROL_CPCLK },
2643 	{ NULL,			IGN, IGN, 0 }
2644 };
2645 
2646 void
2647 arm7tdmi_setup(char *args)
2648 {
2649 	int cpuctrl;
2650 
2651 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2652 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2653 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
2654 
2655 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
2656 	cpuctrl = parse_cpu_options(args, arm7tdmi_options, cpuctrl);
2657 
2658 #ifdef __ARMEB__
2659 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2660 #endif
2661 
2662 	/* Clear out the cache */
2663 	cpu_idcache_wbinv_all();
2664 
2665 	/* Set the control register */
2666 	curcpu()->ci_ctrl = cpuctrl;
2667 	cpu_control(0xffffffff, cpuctrl);
2668 }
2669 #endif	/* CPU_ARM7TDMI */
2670 
2671 #ifdef CPU_ARM8
2672 struct cpu_option arm8_options[] = {
2673 	{ "arm8.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
2674 	{ "arm8.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
2675 	{ "arm8.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2676 	{ "arm8.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2677 #ifdef COMPAT_12
2678 	{ "branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2679 #endif	/* COMPAT_12 */
2680 	{ "cpu.branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2681 	{ "arm8.branchpredict",	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2682 	{ NULL,			IGN, IGN, 0 }
2683 };
2684 
2685 void
2686 arm8_setup(char *args)
2687 {
2688 	int integer;
2689 	int cpuctrl, cpuctrlmask;
2690 	int clocktest;
2691 	int setclock = 0;
2692 
2693 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2694 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2695 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
2696 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2697 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2698 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
2699 		 | CPU_CONTROL_BPRD_ENABLE | CPU_CONTROL_ROM_ENABLE
2700 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE;
2701 
2702 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2703 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2704 #endif
2705 
2706 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
2707 	cpuctrl = parse_cpu_options(args, arm8_options, cpuctrl);
2708 
2709 #ifdef __ARMEB__
2710 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2711 #endif
2712 
2713 	/* Get clock configuration */
2714 	clocktest = arm8_clock_config(0, 0) & 0x0f;
2715 
2716 	/* Special ARM8 clock and test configuration */
2717 	if (get_bootconf_option(args, "arm8.clock.reset", BOOTOPT_TYPE_BOOLEAN, &integer)) {
2718 		clocktest = 0;
2719 		setclock = 1;
2720 	}
2721 	if (get_bootconf_option(args, "arm8.clock.dynamic", BOOTOPT_TYPE_BOOLEAN, &integer)) {
2722 		if (integer)
2723 			clocktest |= 0x01;
2724 		else
2725 			clocktest &= ~(0x01);
2726 		setclock = 1;
2727 	}
2728 	if (get_bootconf_option(args, "arm8.clock.sync", BOOTOPT_TYPE_BOOLEAN, &integer)) {
2729 		if (integer)
2730 			clocktest |= 0x02;
2731 		else
2732 			clocktest &= ~(0x02);
2733 		setclock = 1;
2734 	}
2735 	if (get_bootconf_option(args, "arm8.clock.fast", BOOTOPT_TYPE_BININT, &integer)) {
2736 		clocktest = (clocktest & ~0xc0) | (integer & 3) << 2;
2737 		setclock = 1;
2738 	}
2739 	if (get_bootconf_option(args, "arm8.test", BOOTOPT_TYPE_BININT, &integer)) {
2740 		clocktest |= (integer & 7) << 5;
2741 		setclock = 1;
2742 	}
2743 
2744 	/* Clear out the cache */
2745 	cpu_idcache_wbinv_all();
2746 
2747 	/* Set the control register */
2748 	curcpu()->ci_ctrl = cpuctrl;
2749 	cpu_control(0xffffffff, cpuctrl);
2750 
2751 	/* Set the clock/test register */
2752 	if (setclock)
2753 		arm8_clock_config(0x7f, clocktest);
2754 }
2755 #endif	/* CPU_ARM8 */
2756 
2757 #ifdef CPU_ARM9
2758 struct cpu_option arm9_options[] = {
2759 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2760 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2761 	{ "arm9.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2762 	{ "arm9.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2763 	{ "arm9.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2764 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2765 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2766 	{ "arm9.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2767 	{ NULL,			IGN, IGN, 0 }
2768 };
2769 
2770 void
2771 arm9_setup(char *args)
2772 {
2773 	int cpuctrl, cpuctrlmask;
2774 
2775 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2776 	    | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2777 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2778 	    | CPU_CONTROL_WBUF_ENABLE;
2779 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2780 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2781 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2782 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2783 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2784 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC
2785 		 | CPU_CONTROL_ROUNDROBIN;
2786 
2787 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2788 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2789 #endif
2790 
2791 	cpuctrl = parse_cpu_options(args, arm9_options, cpuctrl);
2792 
2793 #ifdef __ARMEB__
2794 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2795 #endif
2796 
2797 #ifndef ARM_HAS_VBAR
2798 	if (vector_page == ARM_VECTORS_HIGH)
2799 		cpuctrl |= CPU_CONTROL_VECRELOC;
2800 #endif
2801 
2802 	/* Clear out the cache */
2803 	cpu_idcache_wbinv_all();
2804 
2805 	/* Set the control register */
2806 	curcpu()->ci_ctrl = cpuctrl;
2807 	cpu_control(cpuctrlmask, cpuctrl);
2808 
2809 }
2810 #endif	/* CPU_ARM9 */
2811 
2812 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
2813 struct cpu_option arm10_options[] = {
2814 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2815 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2816 	{ "arm10.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2817 	{ "arm10.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2818 	{ "arm10.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2819 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2820 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2821 	{ "arm10.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2822 	{ NULL,			IGN, IGN, 0 }
2823 };
2824 
2825 void
2826 arm10_setup(char *args)
2827 {
2828 	int cpuctrl, cpuctrlmask;
2829 
2830 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
2831 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2832 	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
2833 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
2834 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2835 	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2836 	    | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2837 	    | CPU_CONTROL_BPRD_ENABLE
2838 	    | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
2839 
2840 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2841 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2842 #endif
2843 
2844 	cpuctrl = parse_cpu_options(args, arm10_options, cpuctrl);
2845 
2846 #ifdef __ARMEB__
2847 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2848 #endif
2849 
2850 #ifndef ARM_HAS_VBAR
2851 	if (vector_page == ARM_VECTORS_HIGH)
2852 		cpuctrl |= CPU_CONTROL_VECRELOC;
2853 #endif
2854 
2855 	/* Clear out the cache */
2856 	cpu_idcache_wbinv_all();
2857 
2858 	/* Now really make sure they are clean.  */
2859 	__asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
2860 
2861 	/* Set the control register */
2862 	curcpu()->ci_ctrl = cpuctrl;
2863 	cpu_control(0xffffffff, cpuctrl);
2864 
2865 	/* And again. */
2866 	cpu_idcache_wbinv_all();
2867 }
2868 #endif	/* CPU_ARM9E || CPU_ARM10 */
2869 
2870 #if defined(CPU_ARM11)
2871 struct cpu_option arm11_options[] = {
2872 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2873 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2874 	{ "arm11.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2875 	{ "arm11.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2876 	{ "arm11.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2877 	{ "cpu.branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2878 	{ "arm11.branchpredict", BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2879 	{ NULL,			IGN, IGN, 0 }
2880 };
2881 
2882 void
2883 arm11_setup(char *args)
2884 {
2885 	int cpuctrl, cpuctrlmask;
2886 
2887 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
2888 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2889 	    /* | CPU_CONTROL_BPRD_ENABLE */;
2890 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
2891 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2892 	    | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BPRD_ENABLE
2893 	    | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2894 	    | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
2895 
2896 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2897 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2898 #endif
2899 
2900 	cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl);
2901 
2902 #ifdef __ARMEB__
2903 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2904 #endif
2905 
2906 #ifndef ARM_HAS_VBAR
2907 	if (vector_page == ARM_VECTORS_HIGH)
2908 		cpuctrl |= CPU_CONTROL_VECRELOC;
2909 #endif
2910 
2911 	/* Clear out the cache */
2912 	cpu_idcache_wbinv_all();
2913 
2914 	/* Now really make sure they are clean.  */
2915 	__asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
2916 
2917 	/* Allow detection code to find the VFP if it's fitted.  */
2918 	__asm volatile ("mcr\tp15, 0, %0, c1, c0, 2" : : "r" (0x0fffffff));
2919 
2920 	/* Set the control register */
2921 	curcpu()->ci_ctrl = cpuctrl;
2922 	cpu_control(0xffffffff, cpuctrl);
2923 
2924 	/* And again. */
2925 	cpu_idcache_wbinv_all();
2926 }
2927 #endif	/* CPU_ARM11 */
2928 
2929 #if defined(CPU_ARM11MPCORE)
2930 
2931 void
2932 arm11mpcore_setup(char *args)
2933 {
2934 	int cpuctrl, cpuctrlmask;
2935 
2936 	cpuctrl = CPU_CONTROL_IC_ENABLE
2937 	    | CPU_CONTROL_DC_ENABLE
2938 	    | CPU_CONTROL_BPRD_ENABLE ;
2939 	cpuctrlmask = CPU_CONTROL_IC_ENABLE
2940 	    | CPU_CONTROL_DC_ENABLE
2941 	    | CPU_CONTROL_BPRD_ENABLE
2942 	    | CPU_CONTROL_AFLT_ENABLE
2943 	    | CPU_CONTROL_VECRELOC;
2944 
2945 #ifdef	ARM11MPCORE_MMU_COMPAT
2946 	/* XXX: S and R? */
2947 #endif
2948 
2949 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2950 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2951 #endif
2952 
2953 	cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl);
2954 
2955 #ifndef ARM_HAS_VBAR
2956 	if (vector_page == ARM_VECTORS_HIGH)
2957 		cpuctrl |= CPU_CONTROL_VECRELOC;
2958 #endif
2959 
2960 	/* Clear out the cache */
2961 	cpu_idcache_wbinv_all();
2962 
2963 	/* Now really make sure they are clean.  */
2964 	__asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
2965 
2966 	/* Allow detection code to find the VFP if it's fitted.  */
2967 	__asm volatile ("mcr\tp15, 0, %0, c1, c0, 2" : : "r" (0x0fffffff));
2968 
2969 	/* Set the control register */
2970 	curcpu()->ci_ctrl = cpu_control(cpuctrlmask, cpuctrl);
2971 
2972 	/* And again. */
2973 	cpu_idcache_wbinv_all();
2974 }
2975 #endif	/* CPU_ARM11MPCORE */
2976 
2977 #ifdef CPU_PJ4B
2978 void
2979 pj4bv7_setup(char *args)
2980 {
2981 	int cpuctrl;
2982 
2983 	pj4b_config();
2984 
2985 	cpuctrl = CPU_CONTROL_MMU_ENABLE;
2986 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2987 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2988 #endif
2989 	cpuctrl |= CPU_CONTROL_DC_ENABLE;
2990 	cpuctrl |= CPU_CONTROL_IC_ENABLE;
2991 	cpuctrl |= (0xf << 3);
2992 	cpuctrl |= CPU_CONTROL_BPRD_ENABLE;
2993 	cpuctrl |= (0x5 << 16) | (1 < 22);
2994 	cpuctrl |= CPU_CONTROL_XP_ENABLE;
2995 
2996 #ifndef ARM_HAS_VBAR
2997 	if (vector_page == ARM_VECTORS_HIGH)
2998 		cpuctrl |= CPU_CONTROL_VECRELOC;
2999 #endif
3000 
3001 	/* Clear out the cache */
3002 	cpu_idcache_wbinv_all();
3003 
3004 	/* Set the control register */
3005 	cpu_control(0xffffffff, cpuctrl);
3006 
3007 	/* And again. */
3008 	cpu_idcache_wbinv_all();
3009 
3010 	curcpu()->ci_ctrl = cpuctrl;
3011 }
3012 #endif /* CPU_PJ4B */
3013 
3014 #if defined(CPU_CORTEX)
3015 struct cpu_option armv7_options[] = {
3016     { "cpu.cache",      BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3017     { "cpu.nocache",    OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3018     { "armv7.cache",    BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3019     { "armv7.icache",   BIC, OR,  CPU_CONTROL_IC_ENABLE },
3020     { "armv7.dcache",   BIC, OR,  CPU_CONTROL_DC_ENABLE },
3021 	{ NULL, 			IGN, IGN, 0}
3022 };
3023 
3024 void
3025 armv7_setup(char *args)
3026 {
3027 	int cpuctrl, cpuctrlmask;
3028 
3029 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_IC_ENABLE
3030 	    | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_BPRD_ENABLE ;
3031 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
3032 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3033 	    | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BPRD_ENABLE
3034 	    | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
3035 	    | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
3036 
3037 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
3038 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3039 #endif
3040 
3041 	cpuctrl = parse_cpu_options(args, armv7_options, cpuctrl);
3042 
3043 #ifdef __ARMEB__
3044 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
3045 #endif
3046 
3047 #ifndef ARM_HAS_VBAR
3048 	if (vector_page == ARM_VECTORS_HIGH)
3049 		cpuctrl |= CPU_CONTROL_VECRELOC;
3050 #endif
3051 
3052 	/* Clear out the cache */
3053 	cpu_idcache_wbinv_all();
3054 
3055 	/* Set the control register */
3056 	curcpu()->ci_ctrl = cpuctrl;
3057 	cpu_control(0xffffffff, cpuctrl);
3058 }
3059 #endif /* CPU_CORTEX */
3060 
3061 
3062 #if defined(CPU_ARM1136) || defined(CPU_ARM1176)
3063 void
3064 arm11x6_setup(char *args)
3065 {
3066 	int cpuctrl, cpuctrl_wax;
3067 	uint32_t auxctrl, auxctrl_wax;
3068 	uint32_t tmp, tmp2;
3069 	uint32_t sbz=0;
3070 	uint32_t cpuid;
3071 
3072 	cpuid = cpu_id();
3073 
3074 	cpuctrl =
3075 		CPU_CONTROL_MMU_ENABLE  |
3076 		CPU_CONTROL_DC_ENABLE   |
3077 		CPU_CONTROL_WBUF_ENABLE |
3078 		CPU_CONTROL_32BP_ENABLE |
3079 		CPU_CONTROL_32BD_ENABLE |
3080 		CPU_CONTROL_LABT_ENABLE |
3081 		CPU_CONTROL_SYST_ENABLE |
3082 		CPU_CONTROL_IC_ENABLE;
3083 
3084 	/*
3085 	 * "write as existing" bits
3086 	 * inverse of this is mask
3087 	 */
3088 	cpuctrl_wax =
3089 		(3 << 30) |
3090 		(1 << 29) |
3091 		(1 << 28) |
3092 		(3 << 26) |
3093 		(3 << 19) |
3094 		(1 << 17);
3095 
3096 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
3097 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3098 #endif
3099 
3100 	cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl);
3101 
3102 #ifdef __ARMEB__
3103 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
3104 #endif
3105 
3106 #ifndef ARM_HAS_VBAR
3107 	if (vector_page == ARM_VECTORS_HIGH)
3108 		cpuctrl |= CPU_CONTROL_VECRELOC;
3109 #endif
3110 
3111 	auxctrl = 0;
3112 	auxctrl_wax = ~0;
3113 	/*
3114 	 * This options enables the workaround for the 364296 ARM1136
3115 	 * r0pX errata (possible cache data corruption with
3116 	 * hit-under-miss enabled). It sets the undocumented bit 31 in
3117 	 * the auxiliary control register and the FI bit in the control
3118 	 * register, thus disabling hit-under-miss without putting the
3119 	 * processor into full low interrupt latency mode. ARM11MPCore
3120 	 * is not affected.
3121 	 */
3122 	if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1136JS) { /* ARM1136JSr0pX */
3123 		cpuctrl |= CPU_CONTROL_FI_ENABLE;
3124 		auxctrl = ARM1136_AUXCTL_PFI;
3125 		auxctrl_wax = ~ARM1136_AUXCTL_PFI;
3126 	}
3127 
3128 	/*
3129 	 * Enable an errata workaround
3130 	 */
3131 	if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1176JZS) { /* ARM1176JZSr0 */
3132 		auxctrl = ARM1176_AUXCTL_PHD;
3133 		auxctrl_wax = ~ARM1176_AUXCTL_PHD;
3134 	}
3135 
3136 	/* Clear out the cache */
3137 	cpu_idcache_wbinv_all();
3138 
3139 	/* Now really make sure they are clean.  */
3140 	__asm volatile ("mcr\tp15, 0, %0, c7, c7, 0" : : "r"(sbz));
3141 
3142 	/* Allow detection code to find the VFP if it's fitted.  */
3143 	__asm volatile ("mcr\tp15, 0, %0, c1, c0, 2" : : "r" (0x0fffffff));
3144 
3145 	/* Set the control register */
3146 	curcpu()->ci_ctrl = cpuctrl;
3147 	cpu_control(~cpuctrl_wax, cpuctrl);
3148 
3149 	__asm volatile ("mrc	p15, 0, %0, c1, c0, 1\n\t"
3150 			"and	%1, %0, %2\n\t"
3151 			"orr	%1, %1, %3\n\t"
3152 			"teq	%0, %1\n\t"
3153 			"mcrne	p15, 0, %1, c1, c0, 1\n\t"
3154 			: "=r"(tmp), "=r"(tmp2) :
3155 			  "r"(auxctrl_wax), "r"(auxctrl));
3156 
3157 	/* And again. */
3158 	cpu_idcache_wbinv_all();
3159 }
3160 #endif	/* CPU_ARM1136 || CPU_ARM1176 */
3161 
3162 #ifdef CPU_SA110
3163 struct cpu_option sa110_options[] = {
3164 #ifdef COMPAT_12
3165 	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3166 	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
3167 #endif	/* COMPAT_12 */
3168 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3169 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3170 	{ "sa110.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3171 	{ "sa110.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
3172 	{ "sa110.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
3173 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
3174 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
3175 	{ "sa110.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
3176 	{ NULL,			IGN, IGN, 0 }
3177 };
3178 
3179 void
3180 sa110_setup(char *args)
3181 {
3182 	int cpuctrl, cpuctrlmask;
3183 
3184 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
3185 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
3186 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3187 		 | CPU_CONTROL_WBUF_ENABLE;
3188 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
3189 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
3190 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3191 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
3192 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
3193 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
3194 		 | CPU_CONTROL_CPCLK;
3195 
3196 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
3197 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3198 #endif
3199 
3200 	cpuctrl = parse_cpu_options(args, sa110_options, cpuctrl);
3201 
3202 #ifdef __ARMEB__
3203 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
3204 #endif
3205 
3206 #ifndef ARM_HAS_VBAR
3207 	if (vector_page == ARM_VECTORS_HIGH)
3208 		cpuctrl |= CPU_CONTROL_VECRELOC;
3209 #endif
3210 
3211 	/* Clear out the cache */
3212 	cpu_idcache_wbinv_all();
3213 
3214 	/* Set the control register */
3215 	curcpu()->ci_ctrl = cpuctrl;
3216 /*	cpu_control(cpuctrlmask, cpuctrl);*/
3217 	cpu_control(0xffffffff, cpuctrl);
3218 
3219 	/*
3220 	 * enable clockswitching, note that this doesn't read or write to r0,
3221 	 * r0 is just to make it valid asm
3222 	 */
3223 	__asm ("mcr 15, 0, r0, c15, c1, 2");
3224 }
3225 #endif	/* CPU_SA110 */
3226 
3227 #if defined(CPU_SA1100) || defined(CPU_SA1110)
3228 struct cpu_option sa11x0_options[] = {
3229 #ifdef COMPAT_12
3230 	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3231 	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
3232 #endif	/* COMPAT_12 */
3233 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3234 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3235 	{ "sa11x0.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3236 	{ "sa11x0.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
3237 	{ "sa11x0.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
3238 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
3239 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
3240 	{ "sa11x0.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
3241 	{ NULL,			IGN, IGN, 0 }
3242 };
3243 
3244 void
3245 sa11x0_setup(char *args)
3246 {
3247 	int cpuctrl, cpuctrlmask;
3248 
3249 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
3250 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
3251 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3252 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE;
3253 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
3254 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
3255 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3256 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
3257 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
3258 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
3259 		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
3260 
3261 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
3262 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3263 #endif
3264 
3265 	cpuctrl = parse_cpu_options(args, sa11x0_options, cpuctrl);
3266 
3267 #ifdef __ARMEB__
3268 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
3269 #endif
3270 
3271 #ifndef ARM_HAS_VBAR
3272 	if (vector_page == ARM_VECTORS_HIGH)
3273 		cpuctrl |= CPU_CONTROL_VECRELOC;
3274 #endif
3275 
3276 	/* Clear out the cache */
3277 	cpu_idcache_wbinv_all();
3278 
3279 	/* Set the control register */
3280 	curcpu()->ci_ctrl = cpuctrl;
3281 	cpu_control(0xffffffff, cpuctrl);
3282 }
3283 #endif	/* CPU_SA1100 || CPU_SA1110 */
3284 
3285 #if defined(CPU_FA526)
3286 struct cpu_option fa526_options[] = {
3287 #ifdef COMPAT_12
3288 	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3289 	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
3290 #endif	/* COMPAT_12 */
3291 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3292 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3293 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
3294 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
3295 	{ NULL,			IGN, IGN, 0 }
3296 };
3297 
3298 void
3299 fa526_setup(char *args)
3300 {
3301 	int cpuctrl, cpuctrlmask;
3302 
3303 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
3304 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
3305 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3306 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE;
3307 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
3308 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
3309 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3310 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
3311 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
3312 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
3313 		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
3314 
3315 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
3316 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3317 #endif
3318 
3319 	cpuctrl = parse_cpu_options(args, fa526_options, cpuctrl);
3320 
3321 #ifdef __ARMEB__
3322 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
3323 #endif
3324 
3325 #ifndef ARM_HAS_VBAR
3326 	if (vector_page == ARM_VECTORS_HIGH)
3327 		cpuctrl |= CPU_CONTROL_VECRELOC;
3328 #endif
3329 
3330 	/* Clear out the cache */
3331 	cpu_idcache_wbinv_all();
3332 
3333 	/* Set the control register */
3334 	curcpu()->ci_ctrl = cpuctrl;
3335 	cpu_control(0xffffffff, cpuctrl);
3336 }
3337 #endif	/* CPU_FA526 */
3338 
3339 #if defined(CPU_IXP12X0)
3340 struct cpu_option ixp12x0_options[] = {
3341 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3342 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3343 	{ "ixp12x0.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3344 	{ "ixp12x0.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
3345 	{ "ixp12x0.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
3346 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
3347 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
3348 	{ "ixp12x0.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
3349 	{ NULL,			IGN, IGN, 0 }
3350 };
3351 
3352 void
3353 ixp12x0_setup(char *args)
3354 {
3355 	int cpuctrl, cpuctrlmask;
3356 
3357 
3358 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE
3359 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_SYST_ENABLE
3360 		 | CPU_CONTROL_IC_ENABLE;
3361 
3362 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_AFLT_ENABLE
3363 		 | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_WBUF_ENABLE
3364 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_SYST_ENABLE
3365 		 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_IC_ENABLE
3366 		 | CPU_CONTROL_VECRELOC;
3367 
3368 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
3369 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3370 #endif
3371 
3372 	cpuctrl = parse_cpu_options(args, ixp12x0_options, cpuctrl);
3373 
3374 #ifdef __ARMEB__
3375 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
3376 #endif
3377 
3378 #ifndef ARM_HAS_VBAR
3379 	if (vector_page == ARM_VECTORS_HIGH)
3380 		cpuctrl |= CPU_CONTROL_VECRELOC;
3381 #endif
3382 
3383 	/* Clear out the cache */
3384 	cpu_idcache_wbinv_all();
3385 
3386 	/* Set the control register */
3387 	curcpu()->ci_ctrl = cpuctrl;
3388 	/* cpu_control(0xffffffff, cpuctrl); */
3389 	cpu_control(cpuctrlmask, cpuctrl);
3390 }
3391 #endif /* CPU_IXP12X0 */
3392 
3393 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
3394     defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425) || defined(CPU_CORTEX)
3395 struct cpu_option xscale_options[] = {
3396 #ifdef COMPAT_12
3397 	{ "branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
3398 	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3399 #endif	/* COMPAT_12 */
3400 	{ "cpu.branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
3401 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3402 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3403 	{ "xscale.branchpredict", BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
3404 	{ "xscale.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3405 	{ "xscale.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
3406 	{ "xscale.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
3407 	{ NULL,			IGN, IGN, 0 }
3408 };
3409 
3410 void
3411 xscale_setup(char *args)
3412 {
3413 	uint32_t auxctl;
3414 	int cpuctrl, cpuctrlmask;
3415 
3416 	/*
3417 	 * The XScale Write Buffer is always enabled.  Our option
3418 	 * is to enable/disable coalescing.  Note that bits 6:3
3419 	 * must always be enabled.
3420 	 */
3421 
3422 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
3423 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
3424 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3425 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
3426 		 | CPU_CONTROL_BPRD_ENABLE;
3427 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
3428 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
3429 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3430 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
3431 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
3432 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
3433 		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
3434 
3435 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
3436 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3437 #endif
3438 
3439 	cpuctrl = parse_cpu_options(args, xscale_options, cpuctrl);
3440 
3441 #ifdef __ARMEB__
3442 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
3443 #endif
3444 
3445 #ifndef ARM_HAS_VBAR
3446 	if (vector_page == ARM_VECTORS_HIGH)
3447 		cpuctrl |= CPU_CONTROL_VECRELOC;
3448 #endif
3449 
3450 	/* Clear out the cache */
3451 	cpu_idcache_wbinv_all();
3452 
3453 	/*
3454 	 * Set the control register.  Note that bits 6:3 must always
3455 	 * be set to 1.
3456 	 */
3457 	curcpu()->ci_ctrl = cpuctrl;
3458 /*	cpu_control(cpuctrlmask, cpuctrl);*/
3459 	cpu_control(0xffffffff, cpuctrl);
3460 
3461 	/* Make sure write coalescing is turned on */
3462 	__asm volatile("mrc p15, 0, %0, c1, c0, 1"
3463 		: "=r" (auxctl));
3464 #ifdef XSCALE_NO_COALESCE_WRITES
3465 	auxctl |= XSCALE_AUXCTL_K;
3466 #else
3467 	auxctl &= ~XSCALE_AUXCTL_K;
3468 #endif
3469 	__asm volatile("mcr p15, 0, %0, c1, c0, 1"
3470 		: : "r" (auxctl));
3471 }
3472 #endif	/* CPU_XSCALE_80200 || CPU_XSCALE_80321 || __CPU_XSCALE_PXA2XX || CPU_XSCALE_IXP425 */
3473 
3474 #if defined(CPU_SHEEVA)
3475 struct cpu_option sheeva_options[] = {
3476 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3477 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3478 	{ "sheeva.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3479 	{ "sheeva.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
3480 	{ "sheeva.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
3481 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
3482 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
3483 	{ "sheeva.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
3484 	{ NULL,			IGN, IGN, 0 }
3485 };
3486 
3487 void
3488 sheeva_setup(char *args)
3489 {
3490 	int cpuctrl, cpuctrlmask;
3491 	uint32_t sheeva_ext;
3492 
3493 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
3494 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3495 	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
3496 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
3497 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3498 	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
3499 	    | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
3500 	    | CPU_CONTROL_BPRD_ENABLE
3501 	    | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
3502 
3503 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
3504 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3505 #endif
3506 
3507 	cpuctrl = parse_cpu_options(args, sheeva_options, cpuctrl);
3508 
3509 	/* Enable DCache Streaming Switch and Write Allocate */
3510 	__asm volatile("mrc p15, 1, %0, c15, c1, 0"
3511 	    : "=r" (sheeva_ext));
3512 
3513 	sheeva_ext |= FC_DCACHE_STREAM_EN | FC_WR_ALLOC_EN;
3514 
3515 	__asm volatile("mcr p15, 1, %0, c15, c1, 0"
3516 	    :: "r" (sheeva_ext));
3517 
3518 	/*
3519 	 * Sheeva has L2 Cache.  Enable/Disable it here.
3520 	 * Really not support yet...
3521 	 */
3522 
3523 #ifdef __ARMEB__
3524 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
3525 #endif
3526 
3527 #ifndef ARM_HAS_VBAR
3528 	if (vector_page == ARM_VECTORS_HIGH)
3529 		cpuctrl |= CPU_CONTROL_VECRELOC;
3530 #endif
3531 
3532 	/* Clear out the cache */
3533 	cpu_idcache_wbinv_all();
3534 
3535 	/* Now really make sure they are clean.  */
3536 	__asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
3537 
3538 	/* Set the control register */
3539 	curcpu()->ci_ctrl = cpuctrl;
3540 	cpu_control(0xffffffff, cpuctrl);
3541 
3542 	/* And again. */
3543 	cpu_idcache_wbinv_all();
3544 }
3545 #endif	/* CPU_SHEEVA */
3546