xref: /netbsd-src/sys/arch/arm/arm/cpufunc.c (revision c2f76ff004a2cb67efe5b12d97bd3ef7fe89e18d)
1 /*	$NetBSD: cpufunc.c,v 1.101 2010/10/02 05:37:58 kiyohara Exp $	*/
2 
3 /*
4  * arm7tdmi support code Copyright (c) 2001 John Fremlin
5  * arm8 support code Copyright (c) 1997 ARM Limited
6  * arm8 support code Copyright (c) 1997 Causality Limited
7  * arm9 support code Copyright (C) 2001 ARM Ltd
8  * arm11 support code Copyright (c) 2007 Microsoft
9  * cortexa8 support code Copyright (c) 2008 3am Software Foundry
10  * cortexa8 improvements Copyright (c) Goeran Weinholt
11  * Copyright (c) 1997 Mark Brinicombe.
12  * Copyright (c) 1997 Causality Limited
13  * All rights reserved.
14  *
15  * Redistribution and use in source and binary forms, with or without
16  * modification, are permitted provided that the following conditions
17  * are met:
18  * 1. Redistributions of source code must retain the above copyright
19  *    notice, this list of conditions and the following disclaimer.
20  * 2. Redistributions in binary form must reproduce the above copyright
21  *    notice, this list of conditions and the following disclaimer in the
22  *    documentation and/or other materials provided with the distribution.
23  * 3. All advertising materials mentioning features or use of this software
24  *    must display the following acknowledgement:
25  *	This product includes software developed by Causality Limited.
26  * 4. The name of Causality Limited may not be used to endorse or promote
27  *    products derived from this software without specific prior written
28  *    permission.
29  *
30  * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
31  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
32  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
33  * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
34  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
35  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
36  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
37  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
38  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
39  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40  * SUCH DAMAGE.
41  *
42  * RiscBSD kernel project
43  *
44  * cpufuncs.c
45  *
46  * C functions for supporting CPU / MMU / TLB specific operations.
47  *
48  * Created	: 30/01/97
49  */
50 
51 #include <sys/cdefs.h>
52 __KERNEL_RCSID(0, "$NetBSD: cpufunc.c,v 1.101 2010/10/02 05:37:58 kiyohara Exp $");
53 
54 #include "opt_compat_netbsd.h"
55 #include "opt_cpuoptions.h"
56 #include "opt_perfctrs.h"
57 
58 #include <sys/types.h>
59 #include <sys/param.h>
60 #include <sys/pmc.h>
61 #include <sys/systm.h>
62 #include <machine/cpu.h>
63 #include <machine/bootconfig.h>
64 #include <arch/arm/arm/disassem.h>
65 
66 #include <uvm/uvm.h>
67 
68 #include <arm/cpuconf.h>
69 #include <arm/cpufunc.h>
70 
71 #ifdef CPU_XSCALE_80200
72 #include <arm/xscale/i80200reg.h>
73 #include <arm/xscale/i80200var.h>
74 #endif
75 
76 #ifdef CPU_XSCALE_80321
77 #include <arm/xscale/i80321reg.h>
78 #include <arm/xscale/i80321var.h>
79 #endif
80 
81 #ifdef CPU_XSCALE_IXP425
82 #include <arm/xscale/ixp425reg.h>
83 #include <arm/xscale/ixp425var.h>
84 #endif
85 
86 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321)
87 #include <arm/xscale/xscalereg.h>
88 #endif
89 
90 #if defined(PERFCTRS)
91 struct arm_pmc_funcs *arm_pmc;
92 #endif
93 
94 /* PRIMARY CACHE VARIABLES */
95 int	arm_picache_size;
96 int	arm_picache_line_size;
97 int	arm_picache_ways;
98 
99 int	arm_pdcache_size;	/* and unified */
100 int	arm_pdcache_line_size;
101 int	arm_pdcache_ways;
102 #if (ARM_MMU_V6 + ARM_MMU_V7) != 0
103 int	arm_cache_prefer_mask;
104 #endif
105 
106 
107 int	arm_pcache_type;
108 int	arm_pcache_unified;
109 
110 int	arm_dcache_align;
111 int	arm_dcache_align_mask;
112 
113 /* 1 == use cpu_sleep(), 0 == don't */
114 int cpu_do_powersave;
115 
116 #ifdef CPU_ARM2
117 struct cpu_functions arm2_cpufuncs = {
118 	/* CPU functions */
119 
120 	.cf_id			= arm2_id,
121 	.cf_cpwait		= cpufunc_nullop,
122 
123 	/* MMU functions */
124 
125 	.cf_control		= (void *)cpufunc_nullop,
126 
127 	/* TLB functions */
128 
129 	.cf_tlb_flushID		= cpufunc_nullop,
130 	.cf_tlb_flushID_SE	= (void *)cpufunc_nullop,
131 	.cf_tlb_flushI		= cpufunc_nullop,
132 	.cf_tlb_flushI_SE	= (void *)cpufunc_nullop,
133 	.cf_tlb_flushD		= cpufunc_nullop,
134 	.cf_tlb_flushD_SE	= (void *)cpufunc_nullop,
135 
136 	/* Cache operations */
137 
138 	.cf_icache_sync_all	= cpufunc_nullop,
139 	.cf_icache_sync_range	= (void *) cpufunc_nullop,
140 
141 	.cf_dcache_wbinv_all	= arm3_cache_flush,
142 	.cf_dcache_wbinv_range	= (void *)cpufunc_nullop,
143 	.cf_dcache_inv_range	= (void *)cpufunc_nullop,
144 	.cf_dcache_wb_range	= (void *)cpufunc_nullop,
145 
146 	.cf_idcache_wbinv_all	= cpufunc_nullop,
147 	.cf_idcache_wbinv_range	= (void *)cpufunc_nullop,
148 
149 	/* Other functions */
150 
151 	.cf_flush_prefetchbuf	= cpufunc_nullop,
152 	.cf_drain_writebuf	= cpufunc_nullop,
153 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
154 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
155 
156 	.cf_sleep		= (void *)cpufunc_nullop,
157 
158 	/* Soft functions */
159 
160 	.cf_dataabt_fixup	= early_abort_fixup,
161 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
162 
163 	.cf_setup		= (void *)cpufunc_nullop
164 
165 };
166 #endif	/* CPU_ARM2 */
167 
168 #ifdef CPU_ARM250
169 struct cpu_functions arm250_cpufuncs = {
170 	/* CPU functions */
171 
172 	.cf_id			= arm250_id,
173 	.cf_cpwait		= cpufunc_nullop,
174 
175 	/* MMU functions */
176 
177 	.cf_control		= (void *)cpufunc_nullop,
178 
179 	/* TLB functions */
180 
181 	.cf_tlb_flushID		= cpufunc_nullop,
182 	.cf_tlb_flushID_SE	= (void *)cpufunc_nullop,
183 	.cf_tlb_flushI		= cpufunc_nullop,
184 	.cf_tlb_flushI_SE	= (void *)cpufunc_nullop,
185 	.cf_tlb_flushD		= cpufunc_nullop,
186 	.cf_tlb_flushD_SE	= (void *)cpufunc_nullop,
187 
188 	/* Cache operations */
189 
190 	.cf_icache_sync_all	= cpufunc_nullop,
191 	.cf_icache_sync_range	= (void *) cpufunc_nullop,
192 
193 	.cf_dcache_wbinv_all	= arm3_cache_flush,
194 	.cf_dcache_wbinv_range	= (void *)cpufunc_nullop,
195 	.cf_dcache_inv_range	= (void *)cpufunc_nullop,
196 	.cf_dcache_wb_range	= (void *)cpufunc_nullop,
197 
198 	.cf_idcache_wbinv_all	= cpufunc_nullop,
199 	.cf_idcache_wbinv_range	= (void *)cpufunc_nullop,
200 
201 	/* Other functions */
202 
203 	.cf_flush_prefetchbuf	= cpufunc_nullop,
204 	.cf_drain_writebuf	= cpufunc_nullop,
205 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
206 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
207 
208 	.cf_sleep		= (void *)cpufunc_nullop,
209 
210 	/* Soft functions */
211 
212 	.cf_dataabt_fixup	= early_abort_fixup,
213 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
214 
215 	.cf_setup		= (void *)cpufunc_nullop
216 
217 };
218 #endif	/* CPU_ARM250 */
219 
220 #ifdef CPU_ARM3
221 struct cpu_functions arm3_cpufuncs = {
222 	/* CPU functions */
223 
224 	.cf_id			= cpufunc_id,
225 	.cf_cpwait		= cpufunc_nullop,
226 
227 	/* MMU functions */
228 
229 	.cf_control		= arm3_control,
230 
231 	/* TLB functions */
232 
233 	.cf_tlb_flushID		= cpufunc_nullop,
234 	.cf_tlb_flushID_SE	= (void *)cpufunc_nullop,
235 	.cf_tlb_flushI		= cpufunc_nullop,
236 	.cf_tlb_flushI_SE	= (void *)cpufunc_nullop,
237 	.cf_tlb_flushD		= cpufunc_nullop,
238 	.cf_tlb_flushD_SE	= (void *)cpufunc_nullop,
239 
240 	/* Cache operations */
241 
242 	.cf_icache_sync_all	= cpufunc_nullop,
243 	.cf_icache_sync_range	= (void *) cpufunc_nullop,
244 
245 	.cf_dcache_wbinv_all	= arm3_cache_flush,
246 	.cf_dcache_wbinv_range	= (void *)arm3_cache_flush,
247 	.cf_dcache_inv_range	= (void *)arm3_cache_flush,
248 	.cf_dcache_wb_range	= (void *)cpufunc_nullop,
249 
250 	.cf_idcache_wbinv_all	= arm3_cache_flush,
251 	.cf_idcache_wbinv_range	= (void *)arm3_cache_flush,
252 
253 	/* Other functions */
254 
255 	.cf_flush_prefetchbuf	= cpufunc_nullop,
256 	.cf_drain_writebuf	= cpufunc_nullop,
257 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
258 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
259 
260 	.cf_sleep		= (void *)cpufunc_nullop,
261 
262 	/* Soft functions */
263 
264 	.cf_dataabt_fixup	= early_abort_fixup,
265 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
266 
267 	.cf_setup		= (void *)cpufunc_nullop
268 
269 };
270 #endif	/* CPU_ARM3 */
271 
272 #ifdef CPU_ARM6
273 struct cpu_functions arm6_cpufuncs = {
274 	/* CPU functions */
275 
276 	.cf_id			= cpufunc_id,
277 	.cf_cpwait		= cpufunc_nullop,
278 
279 	/* MMU functions */
280 
281 	.cf_control		= cpufunc_control,
282 	.cf_domains		= cpufunc_domains,
283 	.cf_setttb		= arm67_setttb,
284 	.cf_faultstatus		= cpufunc_faultstatus,
285 	.cf_faultaddress	= cpufunc_faultaddress,
286 
287 	/* TLB functions */
288 
289 	.cf_tlb_flushID		= arm67_tlb_flush,
290 	.cf_tlb_flushID_SE	= arm67_tlb_purge,
291 	.cf_tlb_flushI		= arm67_tlb_flush,
292 	.cf_tlb_flushI_SE	= arm67_tlb_purge,
293 	.cf_tlb_flushD		= arm67_tlb_flush,
294 	.cf_tlb_flushD_SE	= arm67_tlb_purge,
295 
296 	/* Cache operations */
297 
298 	.cf_icache_sync_all	= cpufunc_nullop,
299 	.cf_icache_sync_range	= (void *) cpufunc_nullop,
300 
301 	.cf_dcache_wbinv_all	= arm67_cache_flush,
302 	.cf_dcache_wbinv_range	= (void *)arm67_cache_flush,
303 	.cf_dcache_inv_range	= (void *)arm67_cache_flush,
304 	.cf_dcache_wb_range	= (void *)cpufunc_nullop,
305 
306 	.cf_idcache_wbinv_all	= arm67_cache_flush,
307 	.cf_idcache_wbinv_range	= (void *)arm67_cache_flush,
308 
309 	/* Other functions */
310 
311 	.cf_flush_prefetchbuf	= cpufunc_nullop,
312 	.cf_drain_writebuf	= cpufunc_nullop,
313 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
314 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
315 
316 	.cf_sleep		= (void *)cpufunc_nullop,
317 
318 	/* Soft functions */
319 
320 #ifdef ARM6_LATE_ABORT
321 	.cf_dataabt_fixup	= late_abort_fixup,
322 #else
323 	.cf_dataabt_fixup	= early_abort_fixup,
324 #endif
325 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
326 
327 	.cf_context_switch	= arm67_context_switch,
328 
329 	.cf_setup		= arm6_setup
330 
331 };
332 #endif	/* CPU_ARM6 */
333 
334 #ifdef CPU_ARM7
335 struct cpu_functions arm7_cpufuncs = {
336 	/* CPU functions */
337 
338 	.cf_id			= cpufunc_id,
339 	.cf_cpwait		= cpufunc_nullop,
340 
341 	/* MMU functions */
342 
343 	.cf_control		= cpufunc_control,
344 	.cf_domains		= cpufunc_domains,
345 	.cf_setttb		= arm67_setttb,
346 	.cf_faultstatus		= cpufunc_faultstatus,
347 	.cf_faultaddress	= cpufunc_faultaddress,
348 
349 	/* TLB functions */
350 
351 	.cf_tlb_flushID		= arm67_tlb_flush,
352 	.cf_tlb_flushID_SE	= arm67_tlb_purge,
353 	.cf_tlb_flushI		= arm67_tlb_flush,
354 	.cf_tlb_flushI_SE	= arm67_tlb_purge,
355 	.cf_tlb_flushD		= arm67_tlb_flush,
356 	.cf_tlb_flushD_SE	= arm67_tlb_purge,
357 
358 	/* Cache operations */
359 
360 	.cf_icache_sync_all	= cpufunc_nullop,
361 	.cf_icache_sync_range	= (void *)cpufunc_nullop,
362 
363 	.cf_dcache_wbinv_all	= arm67_cache_flush,
364 	.cf_dcache_wbinv_range	= (void *)arm67_cache_flush,
365 	.cf_dcache_inv_range	= (void *)arm67_cache_flush,
366 	.cf_dcache_wb_range	= (void *)cpufunc_nullop,
367 
368 	.cf_idcache_wbinv_all	= arm67_cache_flush,
369 	.cf_idcache_wbinv_range	= (void *)arm67_cache_flush,
370 
371 	/* Other functions */
372 
373 	.cf_flush_prefetchbuf	= cpufunc_nullop,
374 	.cf_drain_writebuf	= cpufunc_nullop,
375 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
376 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
377 
378 	.cf_sleep		= (void *)cpufunc_nullop,
379 
380 	/* Soft functions */
381 
382 	.cf_dataabt_fixup	= late_abort_fixup,
383 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
384 
385 	.cf_context_switch	= arm67_context_switch,
386 
387 	.cf_setup		= arm7_setup
388 
389 };
390 #endif	/* CPU_ARM7 */
391 
392 #ifdef CPU_ARM7TDMI
393 struct cpu_functions arm7tdmi_cpufuncs = {
394 	/* CPU functions */
395 
396 	.cf_id			= cpufunc_id,
397 	.cf_cpwait		= cpufunc_nullop,
398 
399 	/* MMU functions */
400 
401 	.cf_control		= cpufunc_control,
402 	.cf_domains		= cpufunc_domains,
403 	.cf_setttb		= arm7tdmi_setttb,
404 	.cf_faultstatus		= cpufunc_faultstatus,
405 	.cf_faultaddress	= cpufunc_faultaddress,
406 
407 	/* TLB functions */
408 
409 	.cf_tlb_flushID		= arm7tdmi_tlb_flushID,
410 	.cf_tlb_flushID_SE	= arm7tdmi_tlb_flushID_SE,
411 	.cf_tlb_flushI		= arm7tdmi_tlb_flushID,
412 	.cf_tlb_flushI_SE	= arm7tdmi_tlb_flushID_SE,
413 	.cf_tlb_flushD		= arm7tdmi_tlb_flushID,
414 	.cf_tlb_flushD_SE	= arm7tdmi_tlb_flushID_SE,
415 
416 	/* Cache operations */
417 
418 	.cf_icache_sync_all	= cpufunc_nullop,
419 	.cf_icache_sync_range	= (void *)cpufunc_nullop,
420 
421 	.cf_dcache_wbinv_all	= arm7tdmi_cache_flushID,
422 	.cf_dcache_wbinv_range	= (void *)arm7tdmi_cache_flushID,
423 	.cf_dcache_inv_range	= (void *)arm7tdmi_cache_flushID,
424 	.cf_dcache_wb_range	= (void *)cpufunc_nullop,
425 
426 	.cf_idcache_wbinv_all	= arm7tdmi_cache_flushID,
427 	.cf_idcache_wbinv_range	= (void *)arm7tdmi_cache_flushID,
428 
429 	/* Other functions */
430 
431 	.cf_flush_prefetchbuf	= cpufunc_nullop,
432 	.cf_drain_writebuf	= cpufunc_nullop,
433 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
434 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
435 
436 	.cf_sleep		= (void *)cpufunc_nullop,
437 
438 	/* Soft functions */
439 
440 	.cf_dataabt_fixup	= late_abort_fixup,
441 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
442 
443 	.cf_context_switch	= arm7tdmi_context_switch,
444 
445 	.cf_setup		= arm7tdmi_setup
446 
447 };
448 #endif	/* CPU_ARM7TDMI */
449 
450 #ifdef CPU_ARM8
451 struct cpu_functions arm8_cpufuncs = {
452 	/* CPU functions */
453 
454 	.cf_id			= cpufunc_id,
455 	.cf_cpwait		= cpufunc_nullop,
456 
457 	/* MMU functions */
458 
459 	.cf_control		= cpufunc_control,
460 	.cf_domains		= cpufunc_domains,
461 	.cf_setttb		= arm8_setttb,
462 	.cf_faultstatus		= cpufunc_faultstatus,
463 	.cf_faultaddress	= cpufunc_faultaddress,
464 
465 	/* TLB functions */
466 
467 	.cf_tlb_flushID		= arm8_tlb_flushID,
468 	.cf_tlb_flushID_SE	= arm8_tlb_flushID_SE,
469 	.cf_tlb_flushI		= arm8_tlb_flushID,
470 	.cf_tlb_flushI_SE	= arm8_tlb_flushID_SE,
471 	.cf_tlb_flushD		= arm8_tlb_flushID,
472 	.cf_tlb_flushD_SE	= arm8_tlb_flushID_SE,
473 
474 	/* Cache operations */
475 
476 	.cf_icache_sync_all	= cpufunc_nullop,
477 	.cf_icache_sync_range	= (void *)cpufunc_nullop,
478 
479 	.cf_dcache_wbinv_all	= arm8_cache_purgeID,
480 	.cf_dcache_wbinv_range	= (void *)arm8_cache_purgeID,
481 /*XXX*/	.cf_dcache_inv_range	= (void *)arm8_cache_purgeID,
482 	.cf_dcache_wb_range	= (void *)arm8_cache_cleanID,
483 
484 	.cf_idcache_wbinv_all	= arm8_cache_purgeID,
485 	.cf_idcache_wbinv_range = (void *)arm8_cache_purgeID,
486 
487 	/* Other functions */
488 
489 	.cf_flush_prefetchbuf	= cpufunc_nullop,
490 	.cf_drain_writebuf	= cpufunc_nullop,
491 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
492 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
493 
494 	.cf_sleep		= (void *)cpufunc_nullop,
495 
496 	/* Soft functions */
497 
498 	.cf_dataabt_fixup	= cpufunc_null_fixup,
499 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
500 
501 	.cf_context_switch	= arm8_context_switch,
502 
503 	.cf_setup		= arm8_setup
504 };
505 #endif	/* CPU_ARM8 */
506 
507 #ifdef CPU_ARM9
508 struct cpu_functions arm9_cpufuncs = {
509 	/* CPU functions */
510 
511 	.cf_id			= cpufunc_id,
512 	.cf_cpwait		= cpufunc_nullop,
513 
514 	/* MMU functions */
515 
516 	.cf_control		= cpufunc_control,
517 	.cf_domains		= cpufunc_domains,
518 	.cf_setttb		= arm9_setttb,
519 	.cf_faultstatus		= cpufunc_faultstatus,
520 	.cf_faultaddress	= cpufunc_faultaddress,
521 
522 	/* TLB functions */
523 
524 	.cf_tlb_flushID		= armv4_tlb_flushID,
525 	.cf_tlb_flushID_SE	= arm9_tlb_flushID_SE,
526 	.cf_tlb_flushI		= armv4_tlb_flushI,
527 	.cf_tlb_flushI_SE	= (void *)armv4_tlb_flushI,
528 	.cf_tlb_flushD		= armv4_tlb_flushD,
529 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
530 
531 	/* Cache operations */
532 
533 	.cf_icache_sync_all	= arm9_icache_sync_all,
534 	.cf_icache_sync_range	= arm9_icache_sync_range,
535 
536 	.cf_dcache_wbinv_all	= arm9_dcache_wbinv_all,
537 	.cf_dcache_wbinv_range	= arm9_dcache_wbinv_range,
538 /*XXX*/	.cf_dcache_inv_range	= arm9_dcache_wbinv_range,
539 	.cf_dcache_wb_range	= arm9_dcache_wb_range,
540 
541 	.cf_idcache_wbinv_all	= arm9_idcache_wbinv_all,
542 	.cf_idcache_wbinv_range = arm9_idcache_wbinv_range,
543 
544 	/* Other functions */
545 
546 	.cf_flush_prefetchbuf	= cpufunc_nullop,
547 	.cf_drain_writebuf	= armv4_drain_writebuf,
548 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
549 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
550 
551 	.cf_sleep		= (void *)cpufunc_nullop,
552 
553 	/* Soft functions */
554 
555 	.cf_dataabt_fixup	= cpufunc_null_fixup,
556 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
557 
558 	.cf_context_switch	= arm9_context_switch,
559 
560 	.cf_setup		= arm9_setup
561 
562 };
563 #endif /* CPU_ARM9 */
564 
565 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
566 struct cpu_functions armv5_ec_cpufuncs = {
567 	/* CPU functions */
568 
569 	.cf_id			= cpufunc_id,
570 	.cf_cpwait		= cpufunc_nullop,
571 
572 	/* MMU functions */
573 
574 	.cf_control		= cpufunc_control,
575 	.cf_domains		= cpufunc_domains,
576 	.cf_setttb		= armv5_ec_setttb,
577 	.cf_faultstatus		= cpufunc_faultstatus,
578 	.cf_faultaddress	= cpufunc_faultaddress,
579 
580 	/* TLB functions */
581 
582 	.cf_tlb_flushID		= armv4_tlb_flushID,
583 	.cf_tlb_flushID_SE	= arm10_tlb_flushID_SE,
584 	.cf_tlb_flushI		= armv4_tlb_flushI,
585 	.cf_tlb_flushI_SE	= arm10_tlb_flushI_SE,
586 	.cf_tlb_flushD		= armv4_tlb_flushD,
587 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
588 
589 	/* Cache operations */
590 
591 	.cf_icache_sync_all	= armv5_ec_icache_sync_all,
592 	.cf_icache_sync_range	= armv5_ec_icache_sync_range,
593 
594 	.cf_dcache_wbinv_all	= armv5_ec_dcache_wbinv_all,
595 	.cf_dcache_wbinv_range	= armv5_ec_dcache_wbinv_range,
596 /*XXX*/	.cf_dcache_inv_range	= armv5_ec_dcache_wbinv_range,
597 	.cf_dcache_wb_range	= armv5_ec_dcache_wb_range,
598 
599 	.cf_idcache_wbinv_all	= armv5_ec_idcache_wbinv_all,
600 	.cf_idcache_wbinv_range = armv5_ec_idcache_wbinv_range,
601 
602 	/* Other functions */
603 
604 	.cf_flush_prefetchbuf	= cpufunc_nullop,
605 	.cf_drain_writebuf	= armv4_drain_writebuf,
606 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
607 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
608 
609 	.cf_sleep		= (void *)cpufunc_nullop,
610 
611 	/* Soft functions */
612 
613 	.cf_dataabt_fixup	= cpufunc_null_fixup,
614 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
615 
616 	.cf_context_switch	= arm10_context_switch,
617 
618 	.cf_setup		= arm10_setup
619 
620 };
621 #endif /* CPU_ARM9E || CPU_ARM10 */
622 
623 #ifdef CPU_ARM10
624 struct cpu_functions arm10_cpufuncs = {
625 	/* CPU functions */
626 
627 	.cf_id			= cpufunc_id,
628 	.cf_cpwait		= cpufunc_nullop,
629 
630 	/* MMU functions */
631 
632 	.cf_control		= cpufunc_control,
633 	.cf_domains		= cpufunc_domains,
634 	.cf_setttb		= armv5_setttb,
635 	.cf_faultstatus		= cpufunc_faultstatus,
636 	.cf_faultaddress	= cpufunc_faultaddress,
637 
638 	/* TLB functions */
639 
640 	.cf_tlb_flushID		= armv4_tlb_flushID,
641 	.cf_tlb_flushID_SE	= arm10_tlb_flushID_SE,
642 	.cf_tlb_flushI		= armv4_tlb_flushI,
643 	.cf_tlb_flushI_SE	= arm10_tlb_flushI_SE,
644 	.cf_tlb_flushD		= armv4_tlb_flushD,
645 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
646 
647 	/* Cache operations */
648 
649 	.cf_icache_sync_all	= armv5_icache_sync_all,
650 	.cf_icache_sync_range	= armv5_icache_sync_range,
651 
652 	.cf_dcache_wbinv_all	= armv5_dcache_wbinv_all,
653 	.cf_dcache_wbinv_range	= armv5_dcache_wbinv_range,
654 /*XXX*/	.cf_dcache_inv_range	= armv5_dcache_wbinv_range,
655 	.cf_dcache_wb_range	= armv5_dcache_wb_range,
656 
657 	.cf_idcache_wbinv_all	= armv5_idcache_wbinv_all,
658 	.cf_idcache_wbinv_range = armv5_idcache_wbinv_range,
659 
660 	/* Other functions */
661 
662 	.cf_flush_prefetchbuf	= cpufunc_nullop,
663 	.cf_drain_writebuf	= armv4_drain_writebuf,
664 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
665 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
666 
667 	.cf_sleep		= (void *)cpufunc_nullop,
668 
669 	/* Soft functions */
670 
671 	.cf_dataabt_fixup	= cpufunc_null_fixup,
672 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
673 
674 	.cf_context_switch	= arm10_context_switch,
675 
676 	.cf_setup		= arm10_setup
677 
678 };
679 #endif /* CPU_ARM10 */
680 
681 #ifdef CPU_ARM11
682 struct cpu_functions arm11_cpufuncs = {
683 	/* CPU functions */
684 
685 	.cf_id			= cpufunc_id,
686 	.cf_cpwait		= cpufunc_nullop,
687 
688 	/* MMU functions */
689 
690 	.cf_control		= cpufunc_control,
691 	.cf_domains		= cpufunc_domains,
692 	.cf_setttb		= arm11_setttb,
693 	.cf_faultstatus		= cpufunc_faultstatus,
694 	.cf_faultaddress	= cpufunc_faultaddress,
695 
696 	/* TLB functions */
697 
698 	.cf_tlb_flushID		= arm11_tlb_flushID,
699 	.cf_tlb_flushID_SE	= arm11_tlb_flushID_SE,
700 	.cf_tlb_flushI		= arm11_tlb_flushI,
701 	.cf_tlb_flushI_SE	= arm11_tlb_flushI_SE,
702 	.cf_tlb_flushD		= arm11_tlb_flushD,
703 	.cf_tlb_flushD_SE	= arm11_tlb_flushD_SE,
704 
705 	/* Cache operations */
706 
707 	.cf_icache_sync_all	= armv6_icache_sync_all,
708 	.cf_icache_sync_range	= armv6_icache_sync_range,
709 
710 	.cf_dcache_wbinv_all	= armv6_dcache_wbinv_all,
711 	.cf_dcache_wbinv_range	= armv6_dcache_wbinv_range,
712 	.cf_dcache_inv_range	= armv6_dcache_inv_range,
713 	.cf_dcache_wb_range	= armv6_dcache_wb_range,
714 
715 	.cf_idcache_wbinv_all	= armv6_idcache_wbinv_all,
716 	.cf_idcache_wbinv_range = armv6_idcache_wbinv_range,
717 
718 	/* Other functions */
719 
720 	.cf_flush_prefetchbuf	= cpufunc_nullop,
721 	.cf_drain_writebuf	= arm11_drain_writebuf,
722 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
723 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
724 
725 	.cf_sleep		= arm11_sleep,
726 
727 	/* Soft functions */
728 
729 	.cf_dataabt_fixup	= cpufunc_null_fixup,
730 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
731 
732 	.cf_context_switch	= arm11_context_switch,
733 
734 	.cf_setup		= arm11_setup
735 
736 };
737 #endif /* CPU_ARM11 */
738 
739 #ifdef CPU_ARM1136
740 struct cpu_functions arm1136_cpufuncs = {
741 	/* CPU functions */
742 
743 	.cf_id			= cpufunc_id,
744 	.cf_cpwait		= cpufunc_nullop,
745 
746 	/* MMU functions */
747 
748 	.cf_control		= cpufunc_control,
749 	.cf_domains		= cpufunc_domains,
750 	.cf_setttb		= arm1136_setttb,
751 	.cf_faultstatus		= cpufunc_faultstatus,
752 	.cf_faultaddress	= cpufunc_faultaddress,
753 
754 	/* TLB functions */
755 
756 	.cf_tlb_flushID		= arm11_tlb_flushID,
757 	.cf_tlb_flushID_SE	= arm11_tlb_flushID_SE,
758 	.cf_tlb_flushI		= arm11_tlb_flushI,
759 	.cf_tlb_flushI_SE	= arm11_tlb_flushI_SE,
760 	.cf_tlb_flushD		= arm11_tlb_flushD,
761 	.cf_tlb_flushD_SE	= arm11_tlb_flushD_SE,
762 
763 	/* Cache operations */
764 
765 	.cf_icache_sync_all	= arm1136_icache_sync_all,	/* 411920 */
766 	.cf_icache_sync_range	= arm1136_icache_sync_range,	/* 371025 */
767 
768 	.cf_dcache_wbinv_all	= arm1136_dcache_wbinv_all,	/* 411920 */
769 	.cf_dcache_wbinv_range	= armv6_dcache_wbinv_range,
770 	.cf_dcache_inv_range	= armv6_dcache_inv_range,
771 	.cf_dcache_wb_range	= armv6_dcache_wb_range,
772 
773 	.cf_idcache_wbinv_all	= arm1136_idcache_wbinv_all,	/* 411920 */
774 	.cf_idcache_wbinv_range = arm1136_idcache_wbinv_range,	/* 371025 */
775 
776 	/* Other functions */
777 
778 	.cf_flush_prefetchbuf	= arm1136_flush_prefetchbuf,
779 	.cf_drain_writebuf	= arm11_drain_writebuf,
780 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
781 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
782 
783 	.cf_sleep		= arm11_sleep,
784 
785 	/* Soft functions */
786 
787 	.cf_dataabt_fixup	= cpufunc_null_fixup,
788 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
789 
790 	.cf_context_switch	= arm11_context_switch,
791 
792 	.cf_setup		= arm1136_setup
793 
794 };
795 #endif /* CPU_ARM1136 */
796 
797 #ifdef CPU_SA110
798 struct cpu_functions sa110_cpufuncs = {
799 	/* CPU functions */
800 
801 	.cf_id			= cpufunc_id,
802 	.cf_cpwait		= cpufunc_nullop,
803 
804 	/* MMU functions */
805 
806 	.cf_control		= cpufunc_control,
807 	.cf_domains		= cpufunc_domains,
808 	.cf_setttb		= sa1_setttb,
809 	.cf_faultstatus		= cpufunc_faultstatus,
810 	.cf_faultaddress	= cpufunc_faultaddress,
811 
812 	/* TLB functions */
813 
814 	.cf_tlb_flushID		= armv4_tlb_flushID,
815 	.cf_tlb_flushID_SE	= sa1_tlb_flushID_SE,
816 	.cf_tlb_flushI		= armv4_tlb_flushI,
817 	.cf_tlb_flushI_SE	= (void *)armv4_tlb_flushI,
818 	.cf_tlb_flushD		= armv4_tlb_flushD,
819 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
820 
821 	/* Cache operations */
822 
823 	.cf_icache_sync_all	= sa1_cache_syncI,
824 	.cf_icache_sync_range	= sa1_cache_syncI_rng,
825 
826 	.cf_dcache_wbinv_all	= sa1_cache_purgeD,
827 	.cf_dcache_wbinv_range	= sa1_cache_purgeD_rng,
828 /*XXX*/	.cf_dcache_inv_range	= sa1_cache_purgeD_rng,
829 	.cf_dcache_wb_range	= sa1_cache_cleanD_rng,
830 
831 	.cf_idcache_wbinv_all	= sa1_cache_purgeID,
832 	.cf_idcache_wbinv_range	= sa1_cache_purgeID_rng,
833 
834 	/* Other functions */
835 
836 	.cf_flush_prefetchbuf	= cpufunc_nullop,
837 	.cf_drain_writebuf	= armv4_drain_writebuf,
838 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
839 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
840 
841 	.cf_sleep		= (void *)cpufunc_nullop,
842 
843 	/* Soft functions */
844 
845 	.cf_dataabt_fixup	= cpufunc_null_fixup,
846 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
847 
848 	.cf_context_switch	= sa110_context_switch,
849 
850 	.cf_setup		= sa110_setup
851 };
852 #endif	/* CPU_SA110 */
853 
854 #if defined(CPU_SA1100) || defined(CPU_SA1110)
855 struct cpu_functions sa11x0_cpufuncs = {
856 	/* CPU functions */
857 
858 	.cf_id			= cpufunc_id,
859 	.cf_cpwait		= cpufunc_nullop,
860 
861 	/* MMU functions */
862 
863 	.cf_control		= cpufunc_control,
864 	.cf_domains		= cpufunc_domains,
865 	.cf_setttb		= sa1_setttb,
866 	.cf_faultstatus		= cpufunc_faultstatus,
867 	.cf_faultaddress	= cpufunc_faultaddress,
868 
869 	/* TLB functions */
870 
871 	.cf_tlb_flushID		= armv4_tlb_flushID,
872 	.cf_tlb_flushID_SE	= sa1_tlb_flushID_SE,
873 	.cf_tlb_flushI		= armv4_tlb_flushI,
874 	.cf_tlb_flushI_SE	= (void *)armv4_tlb_flushI,
875 	.cf_tlb_flushD		= armv4_tlb_flushD,
876 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
877 
878 	/* Cache operations */
879 
880 	.cf_icache_sync_all	= sa1_cache_syncI,
881 	.cf_icache_sync_range	= sa1_cache_syncI_rng,
882 
883 	.cf_dcache_wbinv_all	= sa1_cache_purgeD,
884 	.cf_dcache_wbinv_range	= sa1_cache_purgeD_rng,
885 /*XXX*/	.cf_dcache_inv_range	= sa1_cache_purgeD_rng,
886 	.cf_dcache_wb_range	= sa1_cache_cleanD_rng,
887 
888 	.cf_idcache_wbinv_all	= sa1_cache_purgeID,
889 	.cf_idcache_wbinv_range	= sa1_cache_purgeID_rng,
890 
891 	/* Other functions */
892 
893 	.cf_flush_prefetchbuf	= sa11x0_drain_readbuf,
894 	.cf_drain_writebuf	= armv4_drain_writebuf,
895 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
896 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
897 
898 	.cf_sleep		= sa11x0_cpu_sleep,
899 
900 	/* Soft functions */
901 
902 	.cf_dataabt_fixup	= cpufunc_null_fixup,
903 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
904 
905 	.cf_context_switch	= sa11x0_context_switch,
906 
907 	.cf_setup		= sa11x0_setup
908 };
909 #endif	/* CPU_SA1100 || CPU_SA1110 */
910 
911 #if defined(CPU_FA526)
912 struct cpu_functions fa526_cpufuncs = {
913 	/* CPU functions */
914 
915 	.cf_id			= cpufunc_id,
916 	.cf_cpwait		= cpufunc_nullop,
917 
918 	/* MMU functions */
919 
920 	.cf_control		= cpufunc_control,
921 	.cf_domains		= cpufunc_domains,
922 	.cf_setttb		= fa526_setttb,
923 	.cf_faultstatus		= cpufunc_faultstatus,
924 	.cf_faultaddress	= cpufunc_faultaddress,
925 
926 	/* TLB functions */
927 
928 	.cf_tlb_flushID		= armv4_tlb_flushID,
929 	.cf_tlb_flushID_SE	= fa526_tlb_flushID_SE,
930 	.cf_tlb_flushI		= armv4_tlb_flushI,
931 	.cf_tlb_flushI_SE	= fa526_tlb_flushI_SE,
932 	.cf_tlb_flushD		= armv4_tlb_flushD,
933 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
934 
935 	/* Cache operations */
936 
937 	.cf_icache_sync_all	= fa526_icache_sync_all,
938 	.cf_icache_sync_range	= fa526_icache_sync_range,
939 
940 	.cf_dcache_wbinv_all	= fa526_dcache_wbinv_all,
941 	.cf_dcache_wbinv_range	= fa526_dcache_wbinv_range,
942 	.cf_dcache_inv_range	= fa526_dcache_inv_range,
943 	.cf_dcache_wb_range	= fa526_dcache_wb_range,
944 
945 	.cf_idcache_wbinv_all	= fa526_idcache_wbinv_all,
946 	.cf_idcache_wbinv_range	= fa526_idcache_wbinv_range,
947 
948 	/* Other functions */
949 
950 	.cf_flush_prefetchbuf	= fa526_flush_prefetchbuf,
951 	.cf_drain_writebuf	= armv4_drain_writebuf,
952 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
953 	.cf_flush_brnchtgt_E	= fa526_flush_brnchtgt_E,
954 
955 	.cf_sleep		= fa526_cpu_sleep,
956 
957 	/* Soft functions */
958 
959 	.cf_dataabt_fixup	= cpufunc_null_fixup,
960 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
961 
962 	.cf_context_switch	= fa526_context_switch,
963 
964 	.cf_setup		= fa526_setup
965 };
966 #endif	/* CPU_FA526 */
967 
968 #ifdef CPU_IXP12X0
969 struct cpu_functions ixp12x0_cpufuncs = {
970 	/* CPU functions */
971 
972 	.cf_id			= cpufunc_id,
973 	.cf_cpwait		= cpufunc_nullop,
974 
975 	/* MMU functions */
976 
977 	.cf_control		= cpufunc_control,
978 	.cf_domains		= cpufunc_domains,
979 	.cf_setttb		= sa1_setttb,
980 	.cf_faultstatus		= cpufunc_faultstatus,
981 	.cf_faultaddress	= cpufunc_faultaddress,
982 
983 	/* TLB functions */
984 
985 	.cf_tlb_flushID		= armv4_tlb_flushID,
986 	.cf_tlb_flushID_SE	= sa1_tlb_flushID_SE,
987 	.cf_tlb_flushI		= armv4_tlb_flushI,
988 	.cf_tlb_flushI_SE	= (void *)armv4_tlb_flushI,
989 	.cf_tlb_flushD		= armv4_tlb_flushD,
990 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
991 
992 	/* Cache operations */
993 
994 	.cf_icache_sync_all	= sa1_cache_syncI,
995 	.cf_icache_sync_range	= sa1_cache_syncI_rng,
996 
997 	.cf_dcache_wbinv_all	= sa1_cache_purgeD,
998 	.cf_dcache_wbinv_range	= sa1_cache_purgeD_rng,
999 /*XXX*/	.cf_dcache_inv_range	= sa1_cache_purgeD_rng,
1000 	.cf_dcache_wb_range	= sa1_cache_cleanD_rng,
1001 
1002 	.cf_idcache_wbinv_all	= sa1_cache_purgeID,
1003 	.cf_idcache_wbinv_range	= sa1_cache_purgeID_rng,
1004 
1005 	/* Other functions */
1006 
1007 	.cf_flush_prefetchbuf	= ixp12x0_drain_readbuf,
1008 	.cf_drain_writebuf	= armv4_drain_writebuf,
1009 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
1010 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
1011 
1012 	.cf_sleep		= (void *)cpufunc_nullop,
1013 
1014 	/* Soft functions */
1015 
1016 	.cf_dataabt_fixup	= cpufunc_null_fixup,
1017 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
1018 
1019 	.cf_context_switch	= ixp12x0_context_switch,
1020 
1021 	.cf_setup		= ixp12x0_setup
1022 };
1023 #endif	/* CPU_IXP12X0 */
1024 
1025 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
1026     defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425)
1027 struct cpu_functions xscale_cpufuncs = {
1028 	/* CPU functions */
1029 
1030 	.cf_id			= cpufunc_id,
1031 	.cf_cpwait		= xscale_cpwait,
1032 
1033 	/* MMU functions */
1034 
1035 	.cf_control		= xscale_control,
1036 	.cf_domains		= cpufunc_domains,
1037 	.cf_setttb		= xscale_setttb,
1038 	.cf_faultstatus		= cpufunc_faultstatus,
1039 	.cf_faultaddress	= cpufunc_faultaddress,
1040 
1041 	/* TLB functions */
1042 
1043 	.cf_tlb_flushID		= armv4_tlb_flushID,
1044 	.cf_tlb_flushID_SE	= xscale_tlb_flushID_SE,
1045 	.cf_tlb_flushI		= armv4_tlb_flushI,
1046 	.cf_tlb_flushI_SE	= (void *)armv4_tlb_flushI,
1047 	.cf_tlb_flushD		= armv4_tlb_flushD,
1048 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
1049 
1050 	/* Cache operations */
1051 
1052 	.cf_icache_sync_all	= xscale_cache_syncI,
1053 	.cf_icache_sync_range	= xscale_cache_syncI_rng,
1054 
1055 	.cf_dcache_wbinv_all	= xscale_cache_purgeD,
1056 	.cf_dcache_wbinv_range	= xscale_cache_purgeD_rng,
1057 	.cf_dcache_inv_range	= xscale_cache_flushD_rng,
1058 	.cf_dcache_wb_range	= xscale_cache_cleanD_rng,
1059 
1060 	.cf_idcache_wbinv_all	= xscale_cache_purgeID,
1061 	.cf_idcache_wbinv_range = xscale_cache_purgeID_rng,
1062 
1063 	/* Other functions */
1064 
1065 	.cf_flush_prefetchbuf	= cpufunc_nullop,
1066 	.cf_drain_writebuf	= armv4_drain_writebuf,
1067 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
1068 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
1069 
1070 	.cf_sleep		= xscale_cpu_sleep,
1071 
1072 	/* Soft functions */
1073 
1074 	.cf_dataabt_fixup	= cpufunc_null_fixup,
1075 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
1076 
1077 	.cf_context_switch	= xscale_context_switch,
1078 
1079 	.cf_setup		= xscale_setup
1080 };
1081 #endif
1082 /* CPU_XSCALE_80200 || CPU_XSCALE_80321 || __CPU_XSCALE_PXA2XX || CPU_XSCALE_IXP425 */
1083 
1084 #if defined(CPU_CORTEX)
1085 struct cpu_functions cortex_cpufuncs = {
1086 	/* CPU functions */
1087 
1088 	.cf_id			= cpufunc_id,
1089 	.cf_cpwait		= cpufunc_nullop,
1090 
1091 	/* MMU functions */
1092 
1093 	.cf_control		= cpufunc_control,
1094 	.cf_domains		= cpufunc_domains,
1095 	.cf_setttb		= armv7_setttb,
1096 	.cf_faultstatus		= cpufunc_faultstatus,
1097 	.cf_faultaddress	= cpufunc_faultaddress,
1098 
1099 	/* TLB functions */
1100 
1101 	.cf_tlb_flushID		= arm11_tlb_flushID,
1102 	.cf_tlb_flushID_SE	= armv7_tlb_flushID_SE,
1103 	.cf_tlb_flushI		= arm11_tlb_flushI,
1104 	.cf_tlb_flushI_SE	= arm11_tlb_flushI_SE,
1105 	.cf_tlb_flushD		= arm11_tlb_flushD,
1106 	.cf_tlb_flushD_SE	= arm11_tlb_flushD_SE,
1107 
1108 	/* Cache operations */
1109 
1110 	.cf_icache_sync_all	= armv7_icache_sync_all,
1111 	.cf_dcache_wbinv_all	= armv7_dcache_wbinv_all,
1112 
1113 	.cf_dcache_inv_range	= armv7_dcache_inv_range,
1114 	.cf_dcache_wb_range	= armv7_dcache_wb_range,
1115 	.cf_dcache_wbinv_range	= armv7_dcache_wbinv_range,
1116 
1117 	.cf_icache_sync_range	= armv7_icache_sync_range,
1118 	.cf_idcache_wbinv_range = armv7_idcache_wbinv_range,
1119 
1120 
1121 	.cf_idcache_wbinv_all	= armv7_idcache_wbinv_all,
1122 
1123 	/* Other functions */
1124 
1125 	.cf_flush_prefetchbuf	= cpufunc_nullop,
1126 	.cf_drain_writebuf	= arm11_drain_writebuf,
1127 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
1128 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
1129 
1130 	.cf_sleep		= armv7_cpu_sleep,
1131 
1132 	/* Soft functions */
1133 
1134 	.cf_dataabt_fixup	= cpufunc_null_fixup,
1135 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
1136 
1137 	.cf_context_switch	= armv7_context_switch,
1138 
1139 	.cf_setup		= armv7_setup
1140 
1141 };
1142 #endif /* CPU_CORTEX */
1143 
1144 #ifdef CPU_SHEEVA
1145 struct cpu_functions sheeva_cpufuncs = {
1146 	/* CPU functions */
1147 
1148 	.cf_id			= cpufunc_id,
1149 	.cf_cpwait		= cpufunc_nullop,
1150 
1151 	/* MMU functions */
1152 
1153 	.cf_control		= cpufunc_control,
1154 	.cf_domains		= cpufunc_domains,
1155 	.cf_setttb		= armv5_ec_setttb,
1156 	.cf_faultstatus		= cpufunc_faultstatus,
1157 	.cf_faultaddress	= cpufunc_faultaddress,
1158 
1159 	/* TLB functions */
1160 
1161 	.cf_tlb_flushID		= armv4_tlb_flushID,
1162 	.cf_tlb_flushID_SE	= arm10_tlb_flushID_SE,
1163 	.cf_tlb_flushI		= armv4_tlb_flushI,
1164 	.cf_tlb_flushI_SE	= arm10_tlb_flushI_SE,
1165 	.cf_tlb_flushD		= armv4_tlb_flushD,
1166 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
1167 
1168 	/* Cache operations */
1169 
1170 	.cf_icache_sync_all	= armv5_ec_icache_sync_all,
1171 	.cf_icache_sync_range	= armv5_ec_icache_sync_range,
1172 
1173 	.cf_dcache_wbinv_all	= armv5_ec_dcache_wbinv_all,
1174 	.cf_dcache_wbinv_range	= sheeva_dcache_wbinv_range,
1175 	.cf_dcache_inv_range	= sheeva_dcache_inv_range,
1176 	.cf_dcache_wb_range	= sheeva_dcache_wb_range,
1177 
1178 	.cf_idcache_wbinv_all	= armv5_ec_idcache_wbinv_all,
1179 	.cf_idcache_wbinv_range = sheeva_idcache_wbinv_range,
1180 
1181 	/* Other functions */
1182 
1183 	.cf_flush_prefetchbuf	= cpufunc_nullop,
1184 	.cf_drain_writebuf	= armv4_drain_writebuf,
1185 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
1186 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
1187 
1188 	.cf_sleep		= (void *)cpufunc_nullop,
1189 
1190 	/* Soft functions */
1191 
1192 	.cf_dataabt_fixup	= cpufunc_null_fixup,
1193 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
1194 
1195 	.cf_context_switch	= arm10_context_switch,
1196 
1197 	.cf_setup		= sheeva_setup
1198 };
1199 #endif /* CPU_SHEEVA */
1200 
1201 
1202 /*
1203  * Global constants also used by locore.s
1204  */
1205 
1206 struct cpu_functions cpufuncs;
1207 u_int cputype;
1208 u_int cpu_reset_needs_v4_MMU_disable;	/* flag used in locore.s */
1209 
1210 #if defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined(CPU_ARM9) || \
1211     defined(CPU_ARM9E) || defined(CPU_ARM10) || defined(CPU_ARM11) || \
1212     defined(CPU_FA526) || \
1213     defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
1214     defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425) || \
1215     defined(CPU_CORTEX) || defined(CPU_SHEEVA)
1216 static void get_cachetype_cp15(void);
1217 
1218 /* Additional cache information local to this file.  Log2 of some of the
1219    above numbers.  */
1220 static int	arm_dcache_l2_nsets;
1221 static int	arm_dcache_l2_assoc;
1222 static int	arm_dcache_l2_linesize;
1223 
1224 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
1225 static inline u_int
1226 get_cachesize_cp15(int cssr)
1227 {
1228 	u_int csid;
1229 
1230 #if (CPU_CORTEX) > 0
1231 	__asm volatile(".arch\tarmv7a");
1232 	__asm volatile("mcr p15, 2, %0, c0, c0, 0" :: "r" (cssr));
1233 	__asm volatile("isb");	/* sync to the new cssr */
1234 #else
1235 	__asm volatile("mcr p15, 1, %0, c0, c0, 2" :: "r" (cssr));
1236 #endif
1237 	__asm volatile("mrc p15, 1, %0, c0, c0, 0" : "=r" (csid));
1238 	return csid;
1239 }
1240 #endif
1241 
1242 static void
1243 get_cachetype_cp15()
1244 {
1245 	u_int ctype, isize, dsize;
1246 	u_int multiplier;
1247 
1248 	__asm volatile("mrc p15, 0, %0, c0, c0, 1"
1249 		: "=r" (ctype));
1250 
1251 	/*
1252 	 * ...and thus spake the ARM ARM:
1253 	 *
1254 	 * If an <opcode2> value corresponding to an unimplemented or
1255 	 * reserved ID register is encountered, the System Control
1256 	 * processor returns the value of the main ID register.
1257 	 */
1258 	if (ctype == cpu_id())
1259 		goto out;
1260 
1261 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
1262 	if (CPU_CT_FORMAT(ctype) == 4) {
1263 		u_int csid0, csid1, csid2;
1264 
1265 		isize = 1U << (CPU_CT4_ILINE(ctype) + 2);
1266 		dsize = 1U << (CPU_CT4_DLINE(ctype) + 2);
1267 
1268 		csid0 = get_cachesize_cp15(CPU_CSSR_L1); /* select L1 dcache values */
1269 		arm_pdcache_ways = CPU_CSID_ASSOC(csid0) + 1;
1270 		arm_pdcache_line_size = dsize;
1271 		arm_pdcache_size = arm_pdcache_line_size * arm_pdcache_ways;
1272 		arm_pdcache_size *= (CPU_CSID_NUMSETS(csid0) + 1);
1273 		arm_cache_prefer_mask = PAGE_SIZE;
1274 
1275 		arm_dcache_align = arm_pdcache_line_size;
1276 
1277 		csid1 = get_cachesize_cp15(CPU_CSSR_L1|CPU_CSSR_InD); /* select L1 icache values */
1278 		arm_picache_ways = CPU_CSID_ASSOC(csid1) + 1;
1279 		arm_picache_line_size = isize;
1280 		arm_picache_size = arm_picache_line_size * arm_picache_ways;
1281 		arm_picache_size *= (CPU_CSID_NUMSETS(csid1) + 1);
1282 		arm_cache_prefer_mask = PAGE_SIZE;
1283 
1284 		arm_dcache_align = arm_pdcache_line_size;
1285 
1286 		csid2 = get_cachesize_cp15(CPU_CSSR_L2); /* select L2 cache values */
1287 		arm_dcache_l2_assoc = CPU_CSID_ASSOC(csid2) + 1;
1288 		arm_dcache_l2_linesize = 1 << (CPU_CSID_LEN(csid2) + 2);
1289 		arm_dcache_l2_nsets = CPU_CSID_NUMSETS(csid2) + 1;
1290 		arm_pcache_type = CPU_CT_CTYPE_WB14;
1291 		goto out;
1292 	}
1293 #endif /* ARM_MMU_V6 + ARM_MMU_V7 > 0 */
1294 
1295 	if ((ctype & CPU_CT_S) == 0)
1296 		arm_pcache_unified = 1;
1297 
1298 	/*
1299 	 * If you want to know how this code works, go read the ARM ARM.
1300 	 */
1301 
1302 	arm_pcache_type = CPU_CT_CTYPE(ctype);
1303 
1304 	if (arm_pcache_unified == 0) {
1305 		isize = CPU_CT_ISIZE(ctype);
1306 		multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
1307 		arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
1308 		if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
1309 			if (isize & CPU_CT_xSIZE_M)
1310 				arm_picache_line_size = 0; /* not present */
1311 			else
1312 				arm_picache_ways = 1;
1313 		} else {
1314 			arm_picache_ways = multiplier <<
1315 			    (CPU_CT_xSIZE_ASSOC(isize) - 1);
1316 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
1317 			if (CPU_CT_xSIZE_P & isize)
1318 				arm_cache_prefer_mask |=
1319 				    __BIT(9 + CPU_CT_xSIZE_SIZE(isize)
1320 					  - CPU_CT_xSIZE_ASSOC(isize))
1321 				    - PAGE_SIZE;
1322 #endif
1323 		}
1324 		arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
1325 	}
1326 
1327 	dsize = CPU_CT_DSIZE(ctype);
1328 	multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
1329 	arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
1330 	if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
1331 		if (dsize & CPU_CT_xSIZE_M)
1332 			arm_pdcache_line_size = 0; /* not present */
1333 		else
1334 			arm_pdcache_ways = 1;
1335 	} else {
1336 		arm_pdcache_ways = multiplier <<
1337 		    (CPU_CT_xSIZE_ASSOC(dsize) - 1);
1338 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
1339 		if (CPU_CT_xSIZE_P & dsize)
1340 			arm_cache_prefer_mask |=
1341 			    __BIT(9 + CPU_CT_xSIZE_SIZE(dsize)
1342 				  - CPU_CT_xSIZE_ASSOC(dsize)) - PAGE_SIZE;
1343 #endif
1344 	}
1345 	arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
1346 
1347 	arm_dcache_align = arm_pdcache_line_size;
1348 
1349 	arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
1350 	arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
1351 	arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
1352 	    CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
1353 
1354  out:
1355 	arm_dcache_align_mask = arm_dcache_align - 1;
1356 }
1357 #endif /* ARM7TDMI || ARM8 || ARM9 || XSCALE */
1358 
1359 #if defined(CPU_ARM2) || defined(CPU_ARM250) || defined(CPU_ARM3) || \
1360     defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_SA110) || \
1361     defined(CPU_SA1100) || defined(CPU_SA1110) || defined(CPU_IXP12X0)
1362 /* Cache information for CPUs without cache type registers. */
1363 struct cachetab {
1364 	u_int32_t ct_cpuid;
1365 	int	ct_pcache_type;
1366 	int	ct_pcache_unified;
1367 	int	ct_pdcache_size;
1368 	int	ct_pdcache_line_size;
1369 	int	ct_pdcache_ways;
1370 	int	ct_picache_size;
1371 	int	ct_picache_line_size;
1372 	int	ct_picache_ways;
1373 };
1374 
1375 struct cachetab cachetab[] = {
1376     /* cpuid,           cache type,       u,  dsiz, ls, wy,  isiz, ls, wy */
1377     { CPU_ID_ARM2,      0,                1,     0,  0,  0,     0,  0,  0 },
1378     { CPU_ID_ARM250,    0,                1,     0,  0,  0,     0,  0,  0 },
1379     { CPU_ID_ARM3,      CPU_CT_CTYPE_WT,  1,  4096, 16, 64,     0,  0,  0 },
1380     { CPU_ID_ARM610,	CPU_CT_CTYPE_WT,  1,  4096, 16, 64,     0,  0,  0 },
1381     { CPU_ID_ARM710,    CPU_CT_CTYPE_WT,  1,  8192, 32,  4,     0,  0,  0 },
1382     { CPU_ID_ARM7500,   CPU_CT_CTYPE_WT,  1,  4096, 16,  4,     0,  0,  0 },
1383     { CPU_ID_ARM710A,   CPU_CT_CTYPE_WT,  1,  8192, 16,  4,     0,  0,  0 },
1384     { CPU_ID_ARM7500FE, CPU_CT_CTYPE_WT,  1,  4096, 16,  4,     0,  0,  0 },
1385     /* XXX is this type right for SA-1? */
1386     { CPU_ID_SA110,	CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 },
1387     { CPU_ID_SA1100,	CPU_CT_CTYPE_WB1, 0,  8192, 32, 32, 16384, 32, 32 },
1388     { CPU_ID_SA1110,	CPU_CT_CTYPE_WB1, 0,  8192, 32, 32, 16384, 32, 32 },
1389     { CPU_ID_IXP1200,	CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 }, /* XXX */
1390     { 0, 0, 0, 0, 0, 0, 0, 0}
1391 };
1392 
1393 static void get_cachetype_table(void);
1394 
1395 static void
1396 get_cachetype_table(void)
1397 {
1398 	int i;
1399 	u_int32_t cpuid = cpu_id();
1400 
1401 	for (i = 0; cachetab[i].ct_cpuid != 0; i++) {
1402 		if (cachetab[i].ct_cpuid == (cpuid & CPU_ID_CPU_MASK)) {
1403 			arm_pcache_type = cachetab[i].ct_pcache_type;
1404 			arm_pcache_unified = cachetab[i].ct_pcache_unified;
1405 			arm_pdcache_size = cachetab[i].ct_pdcache_size;
1406 			arm_pdcache_line_size =
1407 			    cachetab[i].ct_pdcache_line_size;
1408 			arm_pdcache_ways = cachetab[i].ct_pdcache_ways;
1409 			arm_picache_size = cachetab[i].ct_picache_size;
1410 			arm_picache_line_size =
1411 			    cachetab[i].ct_picache_line_size;
1412 			arm_picache_ways = cachetab[i].ct_picache_ways;
1413 		}
1414 	}
1415 	arm_dcache_align = arm_pdcache_line_size;
1416 
1417 	arm_dcache_align_mask = arm_dcache_align - 1;
1418 }
1419 
1420 #endif /* ARM2 || ARM250 || ARM3 || ARM6 || ARM7 || SA110 || SA1100 || SA1111 || IXP12X0 */
1421 
1422 /*
1423  * Cannot panic here as we may not have a console yet ...
1424  */
1425 
1426 int
1427 set_cpufuncs(void)
1428 {
1429 	if (cputype == 0) {
1430 		cputype = cpufunc_id();
1431 		cputype &= CPU_ID_CPU_MASK;
1432 	}
1433 
1434 	/*
1435 	 * NOTE: cpu_do_powersave defaults to off.  If we encounter a
1436 	 * CPU type where we want to use it by default, then we set it.
1437 	 */
1438 #ifdef CPU_ARM2
1439 	if (cputype == CPU_ID_ARM2) {
1440 		cpufuncs = arm2_cpufuncs;
1441 		cpu_reset_needs_v4_MMU_disable = 0;
1442 		get_cachetype_table();
1443 		return 0;
1444 	}
1445 #endif /* CPU_ARM2 */
1446 #ifdef CPU_ARM250
1447 	if (cputype == CPU_ID_ARM250) {
1448 		cpufuncs = arm250_cpufuncs;
1449 		cpu_reset_needs_v4_MMU_disable = 0;
1450 		get_cachetype_table();
1451 		return 0;
1452 	}
1453 #endif
1454 #ifdef CPU_ARM3
1455 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
1456 	    (cputype & 0x00000f00) == 0x00000300) {
1457 		cpufuncs = arm3_cpufuncs;
1458 		cpu_reset_needs_v4_MMU_disable = 0;
1459 		get_cachetype_table();
1460 		return 0;
1461 	}
1462 #endif	/* CPU_ARM3 */
1463 #ifdef CPU_ARM6
1464 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
1465 	    (cputype & 0x00000f00) == 0x00000600) {
1466 		cpufuncs = arm6_cpufuncs;
1467 		cpu_reset_needs_v4_MMU_disable = 0;
1468 		get_cachetype_table();
1469 		pmap_pte_init_generic();
1470 		return 0;
1471 	}
1472 #endif	/* CPU_ARM6 */
1473 #ifdef CPU_ARM7
1474 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
1475 	    CPU_ID_IS7(cputype) &&
1476 	    (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V3) {
1477 		cpufuncs = arm7_cpufuncs;
1478 		cpu_reset_needs_v4_MMU_disable = 0;
1479 		get_cachetype_table();
1480 		pmap_pte_init_generic();
1481 		return 0;
1482 	}
1483 #endif	/* CPU_ARM7 */
1484 #ifdef CPU_ARM7TDMI
1485 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
1486 	    CPU_ID_IS7(cputype) &&
1487 	    (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V4T) {
1488 		cpufuncs = arm7tdmi_cpufuncs;
1489 		cpu_reset_needs_v4_MMU_disable = 0;
1490 		get_cachetype_cp15();
1491 		pmap_pte_init_generic();
1492 		return 0;
1493 	}
1494 #endif
1495 #ifdef CPU_ARM8
1496 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
1497 	    (cputype & 0x0000f000) == 0x00008000) {
1498 		cpufuncs = arm8_cpufuncs;
1499 		cpu_reset_needs_v4_MMU_disable = 0;	/* XXX correct? */
1500 		get_cachetype_cp15();
1501 		pmap_pte_init_arm8();
1502 		return 0;
1503 	}
1504 #endif	/* CPU_ARM8 */
1505 #ifdef CPU_ARM9
1506 	if (((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD ||
1507 	     (cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_TI) &&
1508 	    (cputype & 0x0000f000) == 0x00009000) {
1509 		cpufuncs = arm9_cpufuncs;
1510 		cpu_reset_needs_v4_MMU_disable = 1;	/* V4 or higher */
1511 		get_cachetype_cp15();
1512 		arm9_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
1513 		arm9_dcache_sets_max =
1514 		    (1U << (arm_dcache_l2_linesize + arm_dcache_l2_nsets)) -
1515 		    arm9_dcache_sets_inc;
1516 		arm9_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
1517 		arm9_dcache_index_max = 0U - arm9_dcache_index_inc;
1518 #ifdef	ARM9_CACHE_WRITE_THROUGH
1519 		pmap_pte_init_arm9();
1520 #else
1521 		pmap_pte_init_generic();
1522 #endif
1523 		return 0;
1524 	}
1525 #endif /* CPU_ARM9 */
1526 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
1527 	if (cputype == CPU_ID_ARM926EJS ||
1528 	    cputype == CPU_ID_ARM1026EJS) {
1529 		cpufuncs = armv5_ec_cpufuncs;
1530 		cpu_reset_needs_v4_MMU_disable = 1;	/* V4 or higher */
1531 		get_cachetype_cp15();
1532 		pmap_pte_init_generic();
1533 		return 0;
1534 	}
1535 #endif /* CPU_ARM9E || CPU_ARM10 */
1536 #if defined(CPU_SHEEVA)
1537 	if (cputype == CPU_ID_MV88SV131 ||
1538 	    cputype == CPU_ID_MV88FR571_VD) {
1539 		cpufuncs = sheeva_cpufuncs;
1540 		cpu_reset_needs_v4_MMU_disable = 1;	/* V4 or higher */
1541 		get_cachetype_cp15();
1542 		pmap_pte_init_generic();
1543 		return 0;
1544 	}
1545 #endif /* CPU_SHEEVA */
1546 #ifdef CPU_ARM10
1547 	if (/* cputype == CPU_ID_ARM1020T || */
1548 	    cputype == CPU_ID_ARM1020E) {
1549 		/*
1550 		 * Select write-through cacheing (this isn't really an
1551 		 * option on ARM1020T).
1552 		 */
1553 		cpufuncs = arm10_cpufuncs;
1554 		cpu_reset_needs_v4_MMU_disable = 1;	/* V4 or higher */
1555 		get_cachetype_cp15();
1556 		armv5_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
1557 		armv5_dcache_sets_max =
1558 		    (1U << (arm_dcache_l2_linesize + arm_dcache_l2_nsets)) -
1559 		    armv5_dcache_sets_inc;
1560 		armv5_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
1561 		armv5_dcache_index_max = 0U - armv5_dcache_index_inc;
1562 		pmap_pte_init_generic();
1563 		return 0;
1564 	}
1565 #endif /* CPU_ARM10 */
1566 #if defined(CPU_ARM11)
1567 	if (cputype == CPU_ID_ARM1136JS ||
1568 	    cputype == CPU_ID_ARM1136JSR1 ||
1569 	    cputype == CPU_ID_ARM1176JS) {
1570 		cpufuncs = arm11_cpufuncs;
1571 #if defined(CPU_ARM1136)
1572 		if (cputype != CPU_ID_ARM1176JS) {
1573 			cpufuncs = arm1136_cpufuncs;
1574 			if (cputype == CPU_ID_ARM1136JS)
1575 				cpufuncs.cf_sleep = arm1136_sleep_rev0;
1576 		}
1577 #endif
1578 		cpu_reset_needs_v4_MMU_disable = 1;	/* V4 or higher */
1579 		cpu_do_powersave = 1;			/* Enable powersave */
1580 		get_cachetype_cp15();
1581 #ifdef ARM11_CACHE_WRITE_THROUGH
1582 		pmap_pte_init_arm11();
1583 #else
1584 		pmap_pte_init_generic();
1585 #endif
1586 		if (arm_cache_prefer_mask)
1587 			uvmexp.ncolors = (arm_cache_prefer_mask >> PGSHIFT) + 1;
1588 
1589 		return 0;
1590 	}
1591 #endif /* CPU_ARM11 */
1592 #ifdef CPU_SA110
1593 	if (cputype == CPU_ID_SA110) {
1594 		cpufuncs = sa110_cpufuncs;
1595 		cpu_reset_needs_v4_MMU_disable = 1;	/* SA needs it */
1596 		get_cachetype_table();
1597 		pmap_pte_init_sa1();
1598 		return 0;
1599 	}
1600 #endif	/* CPU_SA110 */
1601 #ifdef CPU_SA1100
1602 	if (cputype == CPU_ID_SA1100) {
1603 		cpufuncs = sa11x0_cpufuncs;
1604 		cpu_reset_needs_v4_MMU_disable = 1;	/* SA needs it	*/
1605 		get_cachetype_table();
1606 		pmap_pte_init_sa1();
1607 
1608 		/* Use powersave on this CPU. */
1609 		cpu_do_powersave = 1;
1610 
1611 		return 0;
1612 	}
1613 #endif	/* CPU_SA1100 */
1614 #ifdef CPU_SA1110
1615 	if (cputype == CPU_ID_SA1110) {
1616 		cpufuncs = sa11x0_cpufuncs;
1617 		cpu_reset_needs_v4_MMU_disable = 1;	/* SA needs it	*/
1618 		get_cachetype_table();
1619 		pmap_pte_init_sa1();
1620 
1621 		/* Use powersave on this CPU. */
1622 		cpu_do_powersave = 1;
1623 
1624 		return 0;
1625 	}
1626 #endif	/* CPU_SA1110 */
1627 #ifdef CPU_FA526
1628 	if (cputype == CPU_ID_FA526) {
1629 		cpufuncs = fa526_cpufuncs;
1630 		cpu_reset_needs_v4_MMU_disable = 1;	/* SA needs it	*/
1631 		get_cachetype_cp15();
1632 		pmap_pte_init_generic();
1633 
1634 		/* Use powersave on this CPU. */
1635 		cpu_do_powersave = 1;
1636 
1637 		return 0;
1638 	}
1639 #endif	/* CPU_FA526 */
1640 #ifdef CPU_IXP12X0
1641 	if (cputype == CPU_ID_IXP1200) {
1642 		cpufuncs = ixp12x0_cpufuncs;
1643 		cpu_reset_needs_v4_MMU_disable = 1;
1644 		get_cachetype_table();
1645 		pmap_pte_init_sa1();
1646 		return 0;
1647 	}
1648 #endif  /* CPU_IXP12X0 */
1649 #ifdef CPU_XSCALE_80200
1650 	if (cputype == CPU_ID_80200) {
1651 		int rev = cpufunc_id() & CPU_ID_REVISION_MASK;
1652 
1653 		i80200_icu_init();
1654 
1655 		/*
1656 		 * Reset the Performance Monitoring Unit to a
1657 		 * pristine state:
1658 		 *	- CCNT, PMN0, PMN1 reset to 0
1659 		 *	- overflow indications cleared
1660 		 *	- all counters disabled
1661 		 */
1662 		__asm volatile("mcr p14, 0, %0, c0, c0, 0"
1663 			:
1664 			: "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF|
1665 			       PMNC_CC_IF));
1666 
1667 #if defined(XSCALE_CCLKCFG)
1668 		/*
1669 		 * Crank CCLKCFG to maximum legal value.
1670 		 */
1671 		__asm volatile ("mcr p14, 0, %0, c6, c0, 0"
1672 			:
1673 			: "r" (XSCALE_CCLKCFG));
1674 #endif
1675 
1676 		/*
1677 		 * XXX Disable ECC in the Bus Controller Unit; we
1678 		 * don't really support it, yet.  Clear any pending
1679 		 * error indications.
1680 		 */
1681 		__asm volatile("mcr p13, 0, %0, c0, c1, 0"
1682 			:
1683 			: "r" (BCUCTL_E0|BCUCTL_E1|BCUCTL_EV));
1684 
1685 		cpufuncs = xscale_cpufuncs;
1686 #if defined(PERFCTRS)
1687 		xscale_pmu_init();
1688 #endif
1689 
1690 		/*
1691 		 * i80200 errata: Step-A0 and A1 have a bug where
1692 		 * D$ dirty bits are not cleared on "invalidate by
1693 		 * address".
1694 		 *
1695 		 * Workaround: Clean cache line before invalidating.
1696 		 */
1697 		if (rev == 0 || rev == 1)
1698 			cpufuncs.cf_dcache_inv_range = xscale_cache_purgeD_rng;
1699 
1700 		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1701 		get_cachetype_cp15();
1702 		pmap_pte_init_xscale();
1703 		return 0;
1704 	}
1705 #endif /* CPU_XSCALE_80200 */
1706 #ifdef CPU_XSCALE_80321
1707 	if (cputype == CPU_ID_80321_400 || cputype == CPU_ID_80321_600 ||
1708 	    cputype == CPU_ID_80321_400_B0 || cputype == CPU_ID_80321_600_B0 ||
1709 	    cputype == CPU_ID_80219_400 || cputype == CPU_ID_80219_600) {
1710 		i80321_icu_init();
1711 
1712 		/*
1713 		 * Reset the Performance Monitoring Unit to a
1714 		 * pristine state:
1715 		 *	- CCNT, PMN0, PMN1 reset to 0
1716 		 *	- overflow indications cleared
1717 		 *	- all counters disabled
1718 		 */
1719 		__asm volatile("mcr p14, 0, %0, c0, c0, 0"
1720 			:
1721 			: "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF|
1722 			       PMNC_CC_IF));
1723 
1724 		cpufuncs = xscale_cpufuncs;
1725 #if defined(PERFCTRS)
1726 		xscale_pmu_init();
1727 #endif
1728 
1729 		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1730 		get_cachetype_cp15();
1731 		pmap_pte_init_xscale();
1732 		return 0;
1733 	}
1734 #endif /* CPU_XSCALE_80321 */
1735 #ifdef __CPU_XSCALE_PXA2XX
1736 	/* ignore core revision to test PXA2xx CPUs */
1737 	if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA27X ||
1738 	    (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 ||
1739 	    (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) {
1740 
1741 		cpufuncs = xscale_cpufuncs;
1742 #if defined(PERFCTRS)
1743 		xscale_pmu_init();
1744 #endif
1745 
1746 		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1747 		get_cachetype_cp15();
1748 		pmap_pte_init_xscale();
1749 
1750 		/* Use powersave on this CPU. */
1751 		cpu_do_powersave = 1;
1752 
1753 		return 0;
1754 	}
1755 #endif /* __CPU_XSCALE_PXA2XX */
1756 #ifdef CPU_XSCALE_IXP425
1757 	if (cputype == CPU_ID_IXP425_533 || cputype == CPU_ID_IXP425_400 ||
1758 	    cputype == CPU_ID_IXP425_266) {
1759 		ixp425_icu_init();
1760 
1761 		cpufuncs = xscale_cpufuncs;
1762 #if defined(PERFCTRS)
1763 		xscale_pmu_init();
1764 #endif
1765 
1766 		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1767 		get_cachetype_cp15();
1768 		pmap_pte_init_xscale();
1769 
1770 		return 0;
1771 	}
1772 #endif /* CPU_XSCALE_IXP425 */
1773 #if defined(CPU_CORTEX)
1774 	if (cputype == CPU_ID_CORTEXA8R1 ||
1775 	    cputype == CPU_ID_CORTEXA8R2 ||
1776 	    cputype == CPU_ID_CORTEXA8R3 ||
1777 	    cputype == CPU_ID_CORTEXA9R1) {
1778 		cpufuncs = cortex_cpufuncs;
1779 		cpu_reset_needs_v4_MMU_disable = 1;	/* V4 or higher */
1780 		cpu_do_powersave = 1;			/* Enable powersave */
1781 		get_cachetype_cp15();
1782 		pmap_pte_init_armv7();
1783 		if (arm_cache_prefer_mask)
1784 			uvmexp.ncolors = (arm_cache_prefer_mask >> PGSHIFT) + 1;
1785 
1786 		return 0;
1787 	}
1788 #endif /* CPU_CORTEX */
1789 	/*
1790 	 * Bzzzz. And the answer was ...
1791 	 */
1792 	panic("No support for this CPU type (%08x) in kernel", cputype);
1793 	return(ARCHITECTURE_NOT_PRESENT);
1794 }
1795 
1796 #ifdef CPU_ARM2
1797 u_int arm2_id(void)
1798 {
1799 
1800 	return CPU_ID_ARM2;
1801 }
1802 #endif /* CPU_ARM2 */
1803 
1804 #ifdef CPU_ARM250
1805 u_int arm250_id(void)
1806 {
1807 
1808 	return CPU_ID_ARM250;
1809 }
1810 #endif /* CPU_ARM250 */
1811 
1812 /*
1813  * Fixup routines for data and prefetch aborts.
1814  *
1815  * Several compile time symbols are used
1816  *
1817  * DEBUG_FAULT_CORRECTION - Print debugging information during the
1818  * correction of registers after a fault.
1819  * ARM6_LATE_ABORT - ARM6 supports both early and late aborts
1820  * when defined should use late aborts
1821  */
1822 
1823 
1824 /*
1825  * Null abort fixup routine.
1826  * For use when no fixup is required.
1827  */
1828 int
1829 cpufunc_null_fixup(void *arg)
1830 {
1831 	return(ABORT_FIXUP_OK);
1832 }
1833 
1834 
1835 #if defined(CPU_ARM2) || defined(CPU_ARM250) || defined(CPU_ARM3) || \
1836     defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI)
1837 
1838 #ifdef DEBUG_FAULT_CORRECTION
1839 #define DFC_PRINTF(x)		printf x
1840 #define DFC_DISASSEMBLE(x)	disassemble(x)
1841 #else
1842 #define DFC_PRINTF(x)		/* nothing */
1843 #define DFC_DISASSEMBLE(x)	/* nothing */
1844 #endif
1845 
1846 /*
1847  * "Early" data abort fixup.
1848  *
1849  * For ARM2, ARM2as, ARM3 and ARM6 (in early-abort mode).  Also used
1850  * indirectly by ARM6 (in late-abort mode) and ARM7[TDMI].
1851  *
1852  * In early aborts, we may have to fix up LDM, STM, LDC and STC.
1853  */
1854 int
1855 early_abort_fixup(void *arg)
1856 {
1857 	trapframe_t *frame = arg;
1858 	u_int fault_pc;
1859 	u_int fault_instruction;
1860 	int saved_lr = 0;
1861 
1862 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1863 
1864 		/* Ok an abort in SVC mode */
1865 
1866 		/*
1867 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1868 		 * as the fault happened in svc mode but we need it in the
1869 		 * usr slot so we can treat the registers as an array of ints
1870 		 * during fixing.
1871 		 * NOTE: This PC is in the position but writeback is not
1872 		 * allowed on r15.
1873 		 * Doing it like this is more efficient than trapping this
1874 		 * case in all possible locations in the following fixup code.
1875 		 */
1876 
1877 		saved_lr = frame->tf_usr_lr;
1878 		frame->tf_usr_lr = frame->tf_svc_lr;
1879 
1880 		/*
1881 		 * Note the trapframe does not have the SVC r13 so a fault
1882 		 * from an instruction with writeback to r13 in SVC mode is
1883 		 * not allowed. This should not happen as the kstack is
1884 		 * always valid.
1885 		 */
1886 	}
1887 
1888 	/* Get fault address and status from the CPU */
1889 
1890 	fault_pc = frame->tf_pc;
1891 	fault_instruction = *((volatile unsigned int *)fault_pc);
1892 
1893 	/* Decode the fault instruction and fix the registers as needed */
1894 
1895 	if ((fault_instruction & 0x0e000000) == 0x08000000) {
1896 		int base;
1897 		int loop;
1898 		int count;
1899 		int *registers = &frame->tf_r0;
1900 
1901 		DFC_PRINTF(("LDM/STM\n"));
1902 		DFC_DISASSEMBLE(fault_pc);
1903 		if (fault_instruction & (1 << 21)) {
1904 			DFC_PRINTF(("This instruction must be corrected\n"));
1905 			base = (fault_instruction >> 16) & 0x0f;
1906 			if (base == 15)
1907 				return ABORT_FIXUP_FAILED;
1908 			/* Count registers transferred */
1909 			count = 0;
1910 			for (loop = 0; loop < 16; ++loop) {
1911 				if (fault_instruction & (1<<loop))
1912 					++count;
1913 			}
1914 			DFC_PRINTF(("%d registers used\n", count));
1915 			DFC_PRINTF(("Corrected r%d by %d bytes ",
1916 				       base, count * 4));
1917 			if (fault_instruction & (1 << 23)) {
1918 				DFC_PRINTF(("down\n"));
1919 				registers[base] -= count * 4;
1920 			} else {
1921 				DFC_PRINTF(("up\n"));
1922 				registers[base] += count * 4;
1923 			}
1924 		}
1925 	} else if ((fault_instruction & 0x0e000000) == 0x0c000000) {
1926 		int base;
1927 		int offset;
1928 		int *registers = &frame->tf_r0;
1929 
1930 		/* REGISTER CORRECTION IS REQUIRED FOR THESE INSTRUCTIONS */
1931 
1932 		DFC_DISASSEMBLE(fault_pc);
1933 
1934 		/* Only need to fix registers if write back is turned on */
1935 
1936 		if ((fault_instruction & (1 << 21)) != 0) {
1937 			base = (fault_instruction >> 16) & 0x0f;
1938 			if (base == 13 &&
1939 			    (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
1940 				return ABORT_FIXUP_FAILED;
1941 			if (base == 15)
1942 				return ABORT_FIXUP_FAILED;
1943 
1944 			offset = (fault_instruction & 0xff) << 2;
1945 			DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1946 			if ((fault_instruction & (1 << 23)) != 0)
1947 				offset = -offset;
1948 			registers[base] += offset;
1949 			DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1950 		}
1951 	} else if ((fault_instruction & 0x0e000000) == 0x0c000000)
1952 		return ABORT_FIXUP_FAILED;
1953 
1954 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1955 
1956 		/* Ok an abort in SVC mode */
1957 
1958 		/*
1959 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1960 		 * as the fault happened in svc mode but we need it in the
1961 		 * usr slot so we can treat the registers as an array of ints
1962 		 * during fixing.
1963 		 * NOTE: This PC is in the position but writeback is not
1964 		 * allowed on r15.
1965 		 * Doing it like this is more efficient than trapping this
1966 		 * case in all possible locations in the prior fixup code.
1967 		 */
1968 
1969 		frame->tf_svc_lr = frame->tf_usr_lr;
1970 		frame->tf_usr_lr = saved_lr;
1971 
1972 		/*
1973 		 * Note the trapframe does not have the SVC r13 so a fault
1974 		 * from an instruction with writeback to r13 in SVC mode is
1975 		 * not allowed. This should not happen as the kstack is
1976 		 * always valid.
1977 		 */
1978 	}
1979 
1980 	return(ABORT_FIXUP_OK);
1981 }
1982 #endif	/* CPU_ARM2/250/3/6/7 */
1983 
1984 
1985 #if (defined(CPU_ARM6) && defined(ARM6_LATE_ABORT)) || defined(CPU_ARM7) || \
1986 	defined(CPU_ARM7TDMI)
1987 /*
1988  * "Late" (base updated) data abort fixup
1989  *
1990  * For ARM6 (in late-abort mode) and ARM7.
1991  *
1992  * In this model, all data-transfer instructions need fixing up.  We defer
1993  * LDM, STM, LDC and STC fixup to the early-abort handler.
1994  */
1995 int
1996 late_abort_fixup(void *arg)
1997 {
1998 	trapframe_t *frame = arg;
1999 	u_int fault_pc;
2000 	u_int fault_instruction;
2001 	int saved_lr = 0;
2002 
2003 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
2004 
2005 		/* Ok an abort in SVC mode */
2006 
2007 		/*
2008 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
2009 		 * as the fault happened in svc mode but we need it in the
2010 		 * usr slot so we can treat the registers as an array of ints
2011 		 * during fixing.
2012 		 * NOTE: This PC is in the position but writeback is not
2013 		 * allowed on r15.
2014 		 * Doing it like this is more efficient than trapping this
2015 		 * case in all possible locations in the following fixup code.
2016 		 */
2017 
2018 		saved_lr = frame->tf_usr_lr;
2019 		frame->tf_usr_lr = frame->tf_svc_lr;
2020 
2021 		/*
2022 		 * Note the trapframe does not have the SVC r13 so a fault
2023 		 * from an instruction with writeback to r13 in SVC mode is
2024 		 * not allowed. This should not happen as the kstack is
2025 		 * always valid.
2026 		 */
2027 	}
2028 
2029 	/* Get fault address and status from the CPU */
2030 
2031 	fault_pc = frame->tf_pc;
2032 	fault_instruction = *((volatile unsigned int *)fault_pc);
2033 
2034 	/* Decode the fault instruction and fix the registers as needed */
2035 
2036 	/* Was is a swap instruction ? */
2037 
2038 	if ((fault_instruction & 0x0fb00ff0) == 0x01000090) {
2039 		DFC_DISASSEMBLE(fault_pc);
2040 	} else if ((fault_instruction & 0x0c000000) == 0x04000000) {
2041 
2042 		/* Was is a ldr/str instruction */
2043 		/* This is for late abort only */
2044 
2045 		int base;
2046 		int offset;
2047 		int *registers = &frame->tf_r0;
2048 
2049 		DFC_DISASSEMBLE(fault_pc);
2050 
2051 		/* This is for late abort only */
2052 
2053 		if ((fault_instruction & (1 << 24)) == 0
2054 		    || (fault_instruction & (1 << 21)) != 0) {
2055 			/* postindexed ldr/str with no writeback */
2056 
2057 			base = (fault_instruction >> 16) & 0x0f;
2058 			if (base == 13 &&
2059 			    (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
2060 				return ABORT_FIXUP_FAILED;
2061 			if (base == 15)
2062 				return ABORT_FIXUP_FAILED;
2063 			DFC_PRINTF(("late abt fix: r%d=%08x : ",
2064 				       base, registers[base]));
2065 			if ((fault_instruction & (1 << 25)) == 0) {
2066 				/* Immediate offset - easy */
2067 
2068 				offset = fault_instruction & 0xfff;
2069 				if ((fault_instruction & (1 << 23)))
2070 					offset = -offset;
2071 				registers[base] += offset;
2072 				DFC_PRINTF(("imm=%08x ", offset));
2073 			} else {
2074 				/* offset is a shifted register */
2075 				int shift;
2076 
2077 				offset = fault_instruction & 0x0f;
2078 				if (offset == base)
2079 					return ABORT_FIXUP_FAILED;
2080 
2081 				/*
2082 				 * Register offset - hard we have to
2083 				 * cope with shifts !
2084 				 */
2085 				offset = registers[offset];
2086 
2087 				if ((fault_instruction & (1 << 4)) == 0)
2088 					/* shift with amount */
2089 					shift = (fault_instruction >> 7) & 0x1f;
2090 				else {
2091 					/* shift with register */
2092 					if ((fault_instruction & (1 << 7)) != 0)
2093 						/* undefined for now so bail out */
2094 						return ABORT_FIXUP_FAILED;
2095 					shift = ((fault_instruction >> 8) & 0xf);
2096 					if (base == shift)
2097 						return ABORT_FIXUP_FAILED;
2098 					DFC_PRINTF(("shift reg=%d ", shift));
2099 					shift = registers[shift];
2100 				}
2101 				DFC_PRINTF(("shift=%08x ", shift));
2102 				switch (((fault_instruction >> 5) & 0x3)) {
2103 				case 0 : /* Logical left */
2104 					offset = (int)(((u_int)offset) << shift);
2105 					break;
2106 				case 1 : /* Logical Right */
2107 					if (shift == 0) shift = 32;
2108 					offset = (int)(((u_int)offset) >> shift);
2109 					break;
2110 				case 2 : /* Arithmetic Right */
2111 					if (shift == 0) shift = 32;
2112 					offset = (int)(((int)offset) >> shift);
2113 					break;
2114 				case 3 : /* Rotate right (rol or rxx) */
2115 					return ABORT_FIXUP_FAILED;
2116 					break;
2117 				}
2118 
2119 				DFC_PRINTF(("abt: fixed LDR/STR with "
2120 					       "register offset\n"));
2121 				if ((fault_instruction & (1 << 23)))
2122 					offset = -offset;
2123 				DFC_PRINTF(("offset=%08x ", offset));
2124 				registers[base] += offset;
2125 			}
2126 			DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
2127 		}
2128 	}
2129 
2130 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
2131 
2132 		/* Ok an abort in SVC mode */
2133 
2134 		/*
2135 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
2136 		 * as the fault happened in svc mode but we need it in the
2137 		 * usr slot so we can treat the registers as an array of ints
2138 		 * during fixing.
2139 		 * NOTE: This PC is in the position but writeback is not
2140 		 * allowed on r15.
2141 		 * Doing it like this is more efficient than trapping this
2142 		 * case in all possible locations in the prior fixup code.
2143 		 */
2144 
2145 		frame->tf_svc_lr = frame->tf_usr_lr;
2146 		frame->tf_usr_lr = saved_lr;
2147 
2148 		/*
2149 		 * Note the trapframe does not have the SVC r13 so a fault
2150 		 * from an instruction with writeback to r13 in SVC mode is
2151 		 * not allowed. This should not happen as the kstack is
2152 		 * always valid.
2153 		 */
2154 	}
2155 
2156 	/*
2157 	 * Now let the early-abort fixup routine have a go, in case it
2158 	 * was an LDM, STM, LDC or STC that faulted.
2159 	 */
2160 
2161 	return early_abort_fixup(arg);
2162 }
2163 #endif	/* CPU_ARM6(LATE)/7/7TDMI */
2164 
2165 /*
2166  * CPU Setup code
2167  */
2168 
2169 #if defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) || \
2170 	defined(CPU_ARM8) || defined (CPU_ARM9) || defined (CPU_ARM9E) || \
2171 	defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) || \
2172 	defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
2173 	defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425) || \
2174 	defined(CPU_ARM10) || defined(CPU_ARM11) || defined(CPU_ARM1136) || \
2175 	defined(CPU_FA526) || defined(CPU_CORTEX) || defined(CPU_SHEEVA)
2176 
2177 #define IGN	0
2178 #define OR	1
2179 #define BIC	2
2180 
2181 struct cpu_option {
2182 	const char *co_name;
2183 	int	co_falseop;
2184 	int	co_trueop;
2185 	int	co_value;
2186 };
2187 
2188 static u_int parse_cpu_options(char *, struct cpu_option *, u_int);
2189 
2190 static u_int
2191 parse_cpu_options(char *args, struct cpu_option *optlist, u_int cpuctrl)
2192 {
2193 	int integer;
2194 
2195 	if (args == NULL)
2196 		return(cpuctrl);
2197 
2198 	while (optlist->co_name) {
2199 		if (get_bootconf_option(args, optlist->co_name,
2200 		    BOOTOPT_TYPE_BOOLEAN, &integer)) {
2201 			if (integer) {
2202 				if (optlist->co_trueop == OR)
2203 					cpuctrl |= optlist->co_value;
2204 				else if (optlist->co_trueop == BIC)
2205 					cpuctrl &= ~optlist->co_value;
2206 			} else {
2207 				if (optlist->co_falseop == OR)
2208 					cpuctrl |= optlist->co_value;
2209 				else if (optlist->co_falseop == BIC)
2210 					cpuctrl &= ~optlist->co_value;
2211 			}
2212 		}
2213 		++optlist;
2214 	}
2215 	return(cpuctrl);
2216 }
2217 #endif /* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 || CPU_SA110 */
2218 
2219 #if defined (CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) \
2220 	|| defined(CPU_ARM8)
2221 struct cpu_option arm678_options[] = {
2222 #ifdef COMPAT_12
2223 	{ "nocache",		IGN, BIC, CPU_CONTROL_IDC_ENABLE },
2224 	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
2225 #endif	/* COMPAT_12 */
2226 	{ "cpu.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
2227 	{ "cpu.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
2228 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2229 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2230 	{ NULL,			IGN, IGN, 0 }
2231 };
2232 
2233 #endif	/* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 */
2234 
2235 #ifdef CPU_ARM6
2236 struct cpu_option arm6_options[] = {
2237 	{ "arm6.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
2238 	{ "arm6.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
2239 	{ "arm6.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2240 	{ "arm6.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2241 	{ NULL,			IGN, IGN, 0 }
2242 };
2243 
2244 void
2245 arm6_setup(char *args)
2246 {
2247 	int cpuctrl, cpuctrlmask;
2248 
2249 	/* Set up default control registers bits */
2250 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2251 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2252 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
2253 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2254 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2255 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
2256 		 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BEND_ENABLE
2257 		 | CPU_CONTROL_AFLT_ENABLE;
2258 
2259 #ifdef ARM6_LATE_ABORT
2260 	cpuctrl |= CPU_CONTROL_LABT_ENABLE;
2261 #endif	/* ARM6_LATE_ABORT */
2262 
2263 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2264 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2265 #endif
2266 
2267 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
2268 	cpuctrl = parse_cpu_options(args, arm6_options, cpuctrl);
2269 
2270 #ifdef __ARMEB__
2271 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2272 #endif
2273 
2274 	/* Clear out the cache */
2275 	cpu_idcache_wbinv_all();
2276 
2277 	/* Set the control register */
2278 	curcpu()->ci_ctrl = cpuctrl;
2279 	cpu_control(0xffffffff, cpuctrl);
2280 }
2281 #endif	/* CPU_ARM6 */
2282 
2283 #ifdef CPU_ARM7
2284 struct cpu_option arm7_options[] = {
2285 	{ "arm7.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
2286 	{ "arm7.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
2287 	{ "arm7.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2288 	{ "arm7.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2289 #ifdef COMPAT_12
2290 	{ "fpaclk2",		BIC, OR,  CPU_CONTROL_CPCLK },
2291 #endif	/* COMPAT_12 */
2292 	{ "arm700.fpaclk",	BIC, OR,  CPU_CONTROL_CPCLK },
2293 	{ NULL,			IGN, IGN, 0 }
2294 };
2295 
2296 void
2297 arm7_setup(char *args)
2298 {
2299 	int cpuctrl, cpuctrlmask;
2300 
2301 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2302 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2303 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
2304 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2305 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2306 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
2307 		 | CPU_CONTROL_CPCLK | CPU_CONTROL_LABT_ENABLE
2308 		 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BEND_ENABLE
2309 		 | CPU_CONTROL_AFLT_ENABLE;
2310 
2311 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2312 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2313 #endif
2314 
2315 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
2316 	cpuctrl = parse_cpu_options(args, arm7_options, cpuctrl);
2317 
2318 #ifdef __ARMEB__
2319 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2320 #endif
2321 
2322 	/* Clear out the cache */
2323 	cpu_idcache_wbinv_all();
2324 
2325 	/* Set the control register */
2326 	curcpu()->ci_ctrl = cpuctrl;
2327 	cpu_control(0xffffffff, cpuctrl);
2328 }
2329 #endif	/* CPU_ARM7 */
2330 
2331 #ifdef CPU_ARM7TDMI
2332 struct cpu_option arm7tdmi_options[] = {
2333 	{ "arm7.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
2334 	{ "arm7.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
2335 	{ "arm7.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2336 	{ "arm7.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2337 #ifdef COMPAT_12
2338 	{ "fpaclk2",		BIC, OR,  CPU_CONTROL_CPCLK },
2339 #endif	/* COMPAT_12 */
2340 	{ "arm700.fpaclk",	BIC, OR,  CPU_CONTROL_CPCLK },
2341 	{ NULL,			IGN, IGN, 0 }
2342 };
2343 
2344 void
2345 arm7tdmi_setup(char *args)
2346 {
2347 	int cpuctrl;
2348 
2349 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2350 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2351 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
2352 
2353 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
2354 	cpuctrl = parse_cpu_options(args, arm7tdmi_options, cpuctrl);
2355 
2356 #ifdef __ARMEB__
2357 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2358 #endif
2359 
2360 	/* Clear out the cache */
2361 	cpu_idcache_wbinv_all();
2362 
2363 	/* Set the control register */
2364 	curcpu()->ci_ctrl = cpuctrl;
2365 	cpu_control(0xffffffff, cpuctrl);
2366 }
2367 #endif	/* CPU_ARM7TDMI */
2368 
2369 #ifdef CPU_ARM8
2370 struct cpu_option arm8_options[] = {
2371 	{ "arm8.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
2372 	{ "arm8.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
2373 	{ "arm8.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2374 	{ "arm8.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2375 #ifdef COMPAT_12
2376 	{ "branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2377 #endif	/* COMPAT_12 */
2378 	{ "cpu.branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2379 	{ "arm8.branchpredict",	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2380 	{ NULL,			IGN, IGN, 0 }
2381 };
2382 
2383 void
2384 arm8_setup(char *args)
2385 {
2386 	int integer;
2387 	int cpuctrl, cpuctrlmask;
2388 	int clocktest;
2389 	int setclock = 0;
2390 
2391 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2392 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2393 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
2394 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2395 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2396 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
2397 		 | CPU_CONTROL_BPRD_ENABLE | CPU_CONTROL_ROM_ENABLE
2398 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE;
2399 
2400 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2401 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2402 #endif
2403 
2404 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
2405 	cpuctrl = parse_cpu_options(args, arm8_options, cpuctrl);
2406 
2407 #ifdef __ARMEB__
2408 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2409 #endif
2410 
2411 	/* Get clock configuration */
2412 	clocktest = arm8_clock_config(0, 0) & 0x0f;
2413 
2414 	/* Special ARM8 clock and test configuration */
2415 	if (get_bootconf_option(args, "arm8.clock.reset", BOOTOPT_TYPE_BOOLEAN, &integer)) {
2416 		clocktest = 0;
2417 		setclock = 1;
2418 	}
2419 	if (get_bootconf_option(args, "arm8.clock.dynamic", BOOTOPT_TYPE_BOOLEAN, &integer)) {
2420 		if (integer)
2421 			clocktest |= 0x01;
2422 		else
2423 			clocktest &= ~(0x01);
2424 		setclock = 1;
2425 	}
2426 	if (get_bootconf_option(args, "arm8.clock.sync", BOOTOPT_TYPE_BOOLEAN, &integer)) {
2427 		if (integer)
2428 			clocktest |= 0x02;
2429 		else
2430 			clocktest &= ~(0x02);
2431 		setclock = 1;
2432 	}
2433 	if (get_bootconf_option(args, "arm8.clock.fast", BOOTOPT_TYPE_BININT, &integer)) {
2434 		clocktest = (clocktest & ~0xc0) | (integer & 3) << 2;
2435 		setclock = 1;
2436 	}
2437 	if (get_bootconf_option(args, "arm8.test", BOOTOPT_TYPE_BININT, &integer)) {
2438 		clocktest |= (integer & 7) << 5;
2439 		setclock = 1;
2440 	}
2441 
2442 	/* Clear out the cache */
2443 	cpu_idcache_wbinv_all();
2444 
2445 	/* Set the control register */
2446 	curcpu()->ci_ctrl = cpuctrl;
2447 	cpu_control(0xffffffff, cpuctrl);
2448 
2449 	/* Set the clock/test register */
2450 	if (setclock)
2451 		arm8_clock_config(0x7f, clocktest);
2452 }
2453 #endif	/* CPU_ARM8 */
2454 
2455 #ifdef CPU_ARM9
2456 struct cpu_option arm9_options[] = {
2457 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2458 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2459 	{ "arm9.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2460 	{ "arm9.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2461 	{ "arm9.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2462 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2463 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2464 	{ "arm9.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2465 	{ NULL,			IGN, IGN, 0 }
2466 };
2467 
2468 void
2469 arm9_setup(char *args)
2470 {
2471 	int cpuctrl, cpuctrlmask;
2472 
2473 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2474 	    | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2475 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2476 	    | CPU_CONTROL_WBUF_ENABLE;
2477 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2478 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2479 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2480 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2481 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2482 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC
2483 		 | CPU_CONTROL_ROUNDROBIN;
2484 
2485 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2486 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2487 #endif
2488 
2489 	cpuctrl = parse_cpu_options(args, arm9_options, cpuctrl);
2490 
2491 #ifdef __ARMEB__
2492 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2493 #endif
2494 
2495 	if (vector_page == ARM_VECTORS_HIGH)
2496 		cpuctrl |= CPU_CONTROL_VECRELOC;
2497 
2498 	/* Clear out the cache */
2499 	cpu_idcache_wbinv_all();
2500 
2501 	/* Set the control register */
2502 	curcpu()->ci_ctrl = cpuctrl;
2503 	cpu_control(cpuctrlmask, cpuctrl);
2504 
2505 }
2506 #endif	/* CPU_ARM9 */
2507 
2508 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
2509 struct cpu_option arm10_options[] = {
2510 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2511 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2512 	{ "arm10.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2513 	{ "arm10.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2514 	{ "arm10.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2515 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2516 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2517 	{ "arm10.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2518 	{ NULL,			IGN, IGN, 0 }
2519 };
2520 
2521 void
2522 arm10_setup(char *args)
2523 {
2524 	int cpuctrl, cpuctrlmask;
2525 
2526 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
2527 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2528 	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
2529 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
2530 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2531 	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2532 	    | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2533 	    | CPU_CONTROL_BPRD_ENABLE
2534 	    | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
2535 
2536 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2537 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2538 #endif
2539 
2540 	cpuctrl = parse_cpu_options(args, arm10_options, cpuctrl);
2541 
2542 #ifdef __ARMEB__
2543 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2544 #endif
2545 
2546 	if (vector_page == ARM_VECTORS_HIGH)
2547 		cpuctrl |= CPU_CONTROL_VECRELOC;
2548 
2549 	/* Clear out the cache */
2550 	cpu_idcache_wbinv_all();
2551 
2552 	/* Now really make sure they are clean.  */
2553 	__asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
2554 
2555 	/* Set the control register */
2556 	curcpu()->ci_ctrl = cpuctrl;
2557 	cpu_control(0xffffffff, cpuctrl);
2558 
2559 	/* And again. */
2560 	cpu_idcache_wbinv_all();
2561 }
2562 #endif	/* CPU_ARM9E || CPU_ARM10 */
2563 
2564 #if defined(CPU_ARM11)
2565 struct cpu_option arm11_options[] = {
2566 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2567 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2568 	{ "arm11.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2569 	{ "arm11.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2570 	{ "arm11.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2571 	{ NULL,			IGN, IGN, 0 }
2572 };
2573 
2574 void
2575 arm11_setup(char *args)
2576 {
2577 	int cpuctrl, cpuctrlmask;
2578 
2579 #if defined(PROCESS_ID_IS_CURCPU)
2580 	/* set curcpu() */
2581 	__asm("mcr\tp15, 0, %0, c13, c0, 4" : : "r"(&cpu_info_store));
2582 #elif defined(PROCESS_ID_IS_CURLWP)
2583 	/* set curlwp() */
2584 	__asm("mcr\tp15, 0, %0, c13, c0, 4" : : "r"(&lwp0));
2585 #endif
2586 
2587 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
2588 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2589 	    /* | CPU_CONTROL_BPRD_ENABLE */;
2590 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
2591 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2592 	    | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BPRD_ENABLE
2593 	    | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2594 	    | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
2595 
2596 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2597 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2598 #endif
2599 
2600 	cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl);
2601 
2602 #ifdef __ARMEB__
2603 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2604 #endif
2605 
2606 	if (vector_page == ARM_VECTORS_HIGH)
2607 		cpuctrl |= CPU_CONTROL_VECRELOC;
2608 
2609 	/* Clear out the cache */
2610 	cpu_idcache_wbinv_all();
2611 
2612 	/* Now really make sure they are clean.  */
2613 	__asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
2614 
2615 	/* Allow detection code to find the VFP if it's fitted.  */
2616 	__asm volatile ("mcr\tp15, 0, %0, c1, c0, 2" : : "r" (0x0fffffff));
2617 
2618 	/* Set the control register */
2619 	curcpu()->ci_ctrl = cpuctrl;
2620 	cpu_control(0xffffffff, cpuctrl);
2621 
2622 	/* And again. */
2623 	cpu_idcache_wbinv_all();
2624 }
2625 #endif	/* CPU_ARM11 */
2626 
2627 #if defined(CPU_CORTEX)
2628 struct cpu_option armv7_options[] = {
2629     { "cpu.cache",      BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2630     { "cpu.nocache",    OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2631     { "armv7.cache",    BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2632     { "armv7.icache",   BIC, OR,  CPU_CONTROL_IC_ENABLE },
2633     { "armv7.dcache",   BIC, OR,  CPU_CONTROL_DC_ENABLE },
2634 	{ NULL, 			IGN, IGN, 0}
2635 };
2636 
2637 void
2638 armv7_setup(args)
2639 	char *args;
2640 {
2641 	int cpuctrl, cpuctrlmask;
2642 
2643 #if defined(PROCESS_ID_IS_CURCPU)
2644 	/* set curcpu() */
2645 	__asm("mcr\tp15, 0, %0, c13, c0, 4" : : "r"(&cpu_info_store));
2646 #elif defined(PROCESS_ID_IS_CURLWP)
2647 	/* set curlwp() */
2648 	__asm("mcr\tp15, 0, %0, c13, c0, 4" : : "r"(&lwp0));
2649 #endif
2650 
2651 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_IC_ENABLE
2652 	    | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_BPRD_ENABLE ;
2653 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
2654 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2655 	    | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BPRD_ENABLE
2656 	    | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2657 	    | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
2658 
2659 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2660 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2661 #endif
2662 
2663 	cpuctrl = parse_cpu_options(args, armv7_options, cpuctrl);
2664 
2665 #ifdef __ARMEB__
2666 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2667 #endif
2668 
2669 	if (vector_page == ARM_VECTORS_HIGH)
2670 		cpuctrl |= CPU_CONTROL_VECRELOC;
2671 
2672 	/* Clear out the cache */
2673 	cpu_idcache_wbinv_all();
2674 
2675 	/* Set the control register */
2676 	curcpu()->ci_ctrl = cpuctrl;
2677 	cpu_control(0xffffffff, cpuctrl);
2678 }
2679 
2680 /* Clean the data cache to the level of coherency. Slow. */
2681 void
2682 armv7_dcache_wbinv_all()
2683 {
2684 	u_int clidr, loc, level;
2685 
2686 	/* Cache Level ID Register */
2687 	__asm volatile("mrc\tp15, 1, %0, c0, c0, 1" : "=r" (clidr));
2688 
2689 	loc = (clidr >> 24) & 7; /* Level of Coherency */
2690 
2691 	for (level = 0; level <= loc; level++) {
2692 		u_int ctype, csid;
2693 		int line_size, ways, nsets, wayshift, setshift;
2694 
2695 		ctype = (clidr >> (level * 3)) & 7;
2696 		/* We're supposed to stop when ctype == 0, but we
2697 		 * trust that loc isn't larger than necesssary. */
2698 		if (ctype < 2) continue; /* no cache / only icache */
2699 
2700 		csid = get_cachesize_cp15(level << 1);
2701 		line_size = CPU_CSID_LEN(csid);
2702 		ways = CPU_CSID_ASSOC(csid);
2703 		nsets = (csid >> 13) & 0x7fff;
2704 
2705 		wayshift = __builtin_clz(ways); /* leading zeros */
2706 		setshift = line_size + 4;
2707 
2708 		for (; nsets >= 0; nsets--) {
2709 			int way;
2710 
2711 			for (way = ways; way >= 0; way--) {
2712 				/* Clean by set/way */
2713 				const u_int sw = (way << wayshift)
2714 				    | (nsets << setshift)
2715 				    | (level << 1);
2716 
2717 				__asm volatile("mcr\tp15, 0, %0, c7, c10, 2"
2718 				    :: "r"(sw));
2719 			}
2720 		}
2721 	}
2722 
2723 	__asm volatile("dsb");
2724 	__asm volatile("isb");
2725 }
2726 #endif /* CPU_CORTEX */
2727 
2728 
2729 
2730 #if defined(CPU_ARM1136)
2731 void
2732 arm1136_setup(char *args)
2733 {
2734 	int cpuctrl, cpuctrl_wax;
2735 	uint32_t auxctrl, auxctrl_wax;
2736 	uint32_t tmp, tmp2;
2737 	uint32_t sbz=0;
2738 	uint32_t cpuid;
2739 
2740 #if defined(PROCESS_ID_IS_CURCPU)
2741 	/* set curcpu() */
2742 	__asm("mcr\tp15, 0, %0, c13, c0, 4" : : "r"(&cpu_info_store));
2743 #elif defined(PROCESS_ID_IS_CURLWP)
2744 	/* set curlwp() */
2745 	__asm("mcr\tp15, 0, %0, c13, c0, 4" : : "r"(&lwp0));
2746 #endif
2747 
2748 	cpuid = cpu_id();
2749 
2750 	cpuctrl =
2751 		CPU_CONTROL_MMU_ENABLE  |
2752 		CPU_CONTROL_DC_ENABLE   |
2753 		CPU_CONTROL_WBUF_ENABLE |
2754 		CPU_CONTROL_32BP_ENABLE |
2755 		CPU_CONTROL_32BD_ENABLE |
2756 		CPU_CONTROL_LABT_ENABLE |
2757 		CPU_CONTROL_SYST_ENABLE |
2758 		CPU_CONTROL_IC_ENABLE;
2759 
2760 	/*
2761 	 * "write as existing" bits
2762 	 * inverse of this is mask
2763 	 */
2764 	cpuctrl_wax =
2765 		(3 << 30) |
2766 		(1 << 29) |
2767 		(1 << 28) |
2768 		(3 << 26) |
2769 		(3 << 19) |
2770 		(1 << 17);
2771 
2772 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2773 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2774 #endif
2775 
2776 	cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl);
2777 
2778 #ifdef __ARMEB__
2779 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2780 #endif
2781 
2782 	if (vector_page == ARM_VECTORS_HIGH)
2783 		cpuctrl |= CPU_CONTROL_VECRELOC;
2784 
2785 	auxctrl = 0;
2786 	auxctrl_wax = ~0;
2787 	/* This options enables the workaround for the 364296 ARM1136
2788 	 * r0pX errata (possible cache data corruption with
2789 	 * hit-under-miss enabled). It sets the undocumented bit 31 in
2790 	 * the auxiliary control register and the FI bit in the control
2791 	 * register, thus disabling hit-under-miss without putting the
2792 	 * processor into full low interrupt latency mode. ARM11MPCore
2793 	 * is not affected.
2794 	 */
2795 	if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1136JS) { /* ARM1136JSr0pX */
2796 		cpuctrl |= CPU_CONTROL_FI_ENABLE;
2797 		auxctrl = ARM11R0_AUXCTL_PFI;
2798 		auxctrl_wax = ~ARM11R0_AUXCTL_PFI;
2799 	}
2800 
2801 	/* Clear out the cache */
2802 	cpu_idcache_wbinv_all();
2803 
2804 	/* Now really make sure they are clean.  */
2805 	__asm volatile ("mcr\tp15, 0, %0, c7, c7, 0" : : "r"(sbz));
2806 
2807 	/* Allow detection code to find the VFP if it's fitted.  */
2808 	__asm volatile ("mcr\tp15, 0, %0, c1, c0, 2" : : "r" (0x0fffffff));
2809 
2810 	/* Set the control register */
2811 	curcpu()->ci_ctrl = cpuctrl;
2812 	cpu_control(~cpuctrl_wax, cpuctrl);
2813 
2814 	__asm volatile ("mrc	p15, 0, %0, c1, c0, 1\n\t"
2815 			"bic	%1, %0, %2\n\t"
2816 			"eor	%1, %0, %3\n\t"
2817 			"teq	%0, %1\n\t"
2818 			"mcrne	p15, 0, %1, c1, c0, 1\n\t"
2819 			: "=r"(tmp), "=r"(tmp2) :
2820 			  "r"(~auxctrl_wax), "r"(auxctrl));
2821 
2822 	/* And again. */
2823 	cpu_idcache_wbinv_all();
2824 }
2825 #endif	/* CPU_ARM1136 */
2826 
2827 #ifdef CPU_SA110
2828 struct cpu_option sa110_options[] = {
2829 #ifdef COMPAT_12
2830 	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2831 	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
2832 #endif	/* COMPAT_12 */
2833 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2834 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2835 	{ "sa110.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2836 	{ "sa110.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2837 	{ "sa110.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2838 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2839 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2840 	{ "sa110.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2841 	{ NULL,			IGN, IGN, 0 }
2842 };
2843 
2844 void
2845 sa110_setup(char *args)
2846 {
2847 	int cpuctrl, cpuctrlmask;
2848 
2849 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2850 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2851 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2852 		 | CPU_CONTROL_WBUF_ENABLE;
2853 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2854 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2855 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2856 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2857 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2858 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
2859 		 | CPU_CONTROL_CPCLK;
2860 
2861 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2862 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2863 #endif
2864 
2865 	cpuctrl = parse_cpu_options(args, sa110_options, cpuctrl);
2866 
2867 #ifdef __ARMEB__
2868 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2869 #endif
2870 
2871 	if (vector_page == ARM_VECTORS_HIGH)
2872 		cpuctrl |= CPU_CONTROL_VECRELOC;
2873 
2874 	/* Clear out the cache */
2875 	cpu_idcache_wbinv_all();
2876 
2877 	/* Set the control register */
2878 	curcpu()->ci_ctrl = cpuctrl;
2879 /*	cpu_control(cpuctrlmask, cpuctrl);*/
2880 	cpu_control(0xffffffff, cpuctrl);
2881 
2882 	/*
2883 	 * enable clockswitching, note that this doesn't read or write to r0,
2884 	 * r0 is just to make it valid asm
2885 	 */
2886 	__asm ("mcr 15, 0, r0, c15, c1, 2");
2887 }
2888 #endif	/* CPU_SA110 */
2889 
2890 #if defined(CPU_SA1100) || defined(CPU_SA1110)
2891 struct cpu_option sa11x0_options[] = {
2892 #ifdef COMPAT_12
2893 	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2894 	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
2895 #endif	/* COMPAT_12 */
2896 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2897 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2898 	{ "sa11x0.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2899 	{ "sa11x0.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2900 	{ "sa11x0.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2901 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2902 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2903 	{ "sa11x0.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2904 	{ NULL,			IGN, IGN, 0 }
2905 };
2906 
2907 void
2908 sa11x0_setup(char *args)
2909 {
2910 	int cpuctrl, cpuctrlmask;
2911 
2912 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2913 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2914 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2915 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE;
2916 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2917 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2918 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2919 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2920 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2921 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
2922 		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
2923 
2924 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2925 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2926 #endif
2927 
2928 	cpuctrl = parse_cpu_options(args, sa11x0_options, cpuctrl);
2929 
2930 #ifdef __ARMEB__
2931 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2932 #endif
2933 
2934 	if (vector_page == ARM_VECTORS_HIGH)
2935 		cpuctrl |= CPU_CONTROL_VECRELOC;
2936 
2937 	/* Clear out the cache */
2938 	cpu_idcache_wbinv_all();
2939 
2940 	/* Set the control register */
2941 	curcpu()->ci_ctrl = cpuctrl;
2942 	cpu_control(0xffffffff, cpuctrl);
2943 }
2944 #endif	/* CPU_SA1100 || CPU_SA1110 */
2945 
2946 #if defined(CPU_FA526)
2947 struct cpu_option fa526_options[] = {
2948 #ifdef COMPAT_12
2949 	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2950 	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
2951 #endif	/* COMPAT_12 */
2952 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2953 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2954 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2955 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2956 	{ NULL,			IGN, IGN, 0 }
2957 };
2958 
2959 void
2960 fa526_setup(char *args)
2961 {
2962 	int cpuctrl, cpuctrlmask;
2963 
2964 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2965 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2966 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2967 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE;
2968 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2969 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2970 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2971 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2972 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2973 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
2974 		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
2975 
2976 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2977 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2978 #endif
2979 
2980 	cpuctrl = parse_cpu_options(args, fa526_options, cpuctrl);
2981 
2982 #ifdef __ARMEB__
2983 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2984 #endif
2985 
2986 	if (vector_page == ARM_VECTORS_HIGH)
2987 		cpuctrl |= CPU_CONTROL_VECRELOC;
2988 
2989 	/* Clear out the cache */
2990 	cpu_idcache_wbinv_all();
2991 
2992 	/* Set the control register */
2993 	curcpu()->ci_ctrl = cpuctrl;
2994 	cpu_control(0xffffffff, cpuctrl);
2995 }
2996 #endif	/* CPU_FA526 */
2997 
2998 #if defined(CPU_IXP12X0)
2999 struct cpu_option ixp12x0_options[] = {
3000 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3001 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3002 	{ "ixp12x0.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3003 	{ "ixp12x0.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
3004 	{ "ixp12x0.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
3005 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
3006 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
3007 	{ "ixp12x0.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
3008 	{ NULL,			IGN, IGN, 0 }
3009 };
3010 
3011 void
3012 ixp12x0_setup(char *args)
3013 {
3014 	int cpuctrl, cpuctrlmask;
3015 
3016 
3017 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE
3018 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_SYST_ENABLE
3019 		 | CPU_CONTROL_IC_ENABLE;
3020 
3021 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_AFLT_ENABLE
3022 		 | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_WBUF_ENABLE
3023 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_SYST_ENABLE
3024 		 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_IC_ENABLE
3025 		 | CPU_CONTROL_VECRELOC;
3026 
3027 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
3028 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3029 #endif
3030 
3031 	cpuctrl = parse_cpu_options(args, ixp12x0_options, cpuctrl);
3032 
3033 #ifdef __ARMEB__
3034 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
3035 #endif
3036 
3037 	if (vector_page == ARM_VECTORS_HIGH)
3038 		cpuctrl |= CPU_CONTROL_VECRELOC;
3039 
3040 	/* Clear out the cache */
3041 	cpu_idcache_wbinv_all();
3042 
3043 	/* Set the control register */
3044 	curcpu()->ci_ctrl = cpuctrl;
3045 	/* cpu_control(0xffffffff, cpuctrl); */
3046 	cpu_control(cpuctrlmask, cpuctrl);
3047 }
3048 #endif /* CPU_IXP12X0 */
3049 
3050 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
3051     defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425) || defined(CPU_CORTEX)
3052 struct cpu_option xscale_options[] = {
3053 #ifdef COMPAT_12
3054 	{ "branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
3055 	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3056 #endif	/* COMPAT_12 */
3057 	{ "cpu.branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
3058 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3059 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3060 	{ "xscale.branchpredict", BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
3061 	{ "xscale.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3062 	{ "xscale.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
3063 	{ "xscale.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
3064 	{ NULL,			IGN, IGN, 0 }
3065 };
3066 
3067 void
3068 xscale_setup(char *args)
3069 {
3070 	uint32_t auxctl;
3071 	int cpuctrl, cpuctrlmask;
3072 
3073 	/*
3074 	 * The XScale Write Buffer is always enabled.  Our option
3075 	 * is to enable/disable coalescing.  Note that bits 6:3
3076 	 * must always be enabled.
3077 	 */
3078 
3079 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
3080 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
3081 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3082 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
3083 		 | CPU_CONTROL_BPRD_ENABLE;
3084 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
3085 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
3086 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3087 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
3088 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
3089 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
3090 		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
3091 
3092 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
3093 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3094 #endif
3095 
3096 	cpuctrl = parse_cpu_options(args, xscale_options, cpuctrl);
3097 
3098 #ifdef __ARMEB__
3099 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
3100 #endif
3101 
3102 	if (vector_page == ARM_VECTORS_HIGH)
3103 		cpuctrl |= CPU_CONTROL_VECRELOC;
3104 
3105 	/* Clear out the cache */
3106 	cpu_idcache_wbinv_all();
3107 
3108 	/*
3109 	 * Set the control register.  Note that bits 6:3 must always
3110 	 * be set to 1.
3111 	 */
3112 	curcpu()->ci_ctrl = cpuctrl;
3113 /*	cpu_control(cpuctrlmask, cpuctrl);*/
3114 	cpu_control(0xffffffff, cpuctrl);
3115 
3116 	/* Make sure write coalescing is turned on */
3117 	__asm volatile("mrc p15, 0, %0, c1, c0, 1"
3118 		: "=r" (auxctl));
3119 #ifdef XSCALE_NO_COALESCE_WRITES
3120 	auxctl |= XSCALE_AUXCTL_K;
3121 #else
3122 	auxctl &= ~XSCALE_AUXCTL_K;
3123 #endif
3124 	__asm volatile("mcr p15, 0, %0, c1, c0, 1"
3125 		: : "r" (auxctl));
3126 }
3127 #endif	/* CPU_XSCALE_80200 || CPU_XSCALE_80321 || __CPU_XSCALE_PXA2XX || CPU_XSCALE_IXP425 */
3128 
3129 #if defined(CPU_SHEEVA)
3130 struct cpu_option sheeva_options[] = {
3131 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3132 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3133 	{ "sheeva.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3134 	{ "sheeva.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
3135 	{ "sheeva.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
3136 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
3137 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
3138 	{ "sheeva.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
3139 	{ NULL,			IGN, IGN, 0 }
3140 };
3141 
3142 void
3143 sheeva_setup(char *args)
3144 {
3145 	int cpuctrl, cpuctrlmask;
3146 
3147 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
3148 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3149 	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
3150 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
3151 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3152 	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
3153 	    | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
3154 	    | CPU_CONTROL_BPRD_ENABLE
3155 	    | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
3156 
3157 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
3158 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3159 #endif
3160 
3161 	cpuctrl = parse_cpu_options(args, sheeva_options, cpuctrl);
3162 
3163 	/*
3164 	 * Sheeva has L2 Cache.  Enable/Disable it here.
3165 	 * Really not support yet...
3166 	 */
3167 
3168 #ifdef __ARMEB__
3169 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
3170 #endif
3171 
3172 	if (vector_page == ARM_VECTORS_HIGH)
3173 		cpuctrl |= CPU_CONTROL_VECRELOC;
3174 
3175 	/* Clear out the cache */
3176 	cpu_idcache_wbinv_all();
3177 
3178 	/* Now really make sure they are clean.  */
3179 	__asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
3180 
3181 	/* Set the control register */
3182 	curcpu()->ci_ctrl = cpuctrl;
3183 	cpu_control(0xffffffff, cpuctrl);
3184 
3185 	/* And again. */
3186 	cpu_idcache_wbinv_all();
3187 }
3188 #endif	/* CPU_SHEEVA */
3189