xref: /netbsd-src/sys/arch/arm/arm/cpufunc.c (revision b7b7574d3bf8eeb51a1fa3977b59142ec6434a55)
1 /*	$NetBSD: cpufunc.c,v 1.147 2014/04/18 23:50:59 christos Exp $	*/
2 
3 /*
4  * arm7tdmi support code Copyright (c) 2001 John Fremlin
5  * arm8 support code Copyright (c) 1997 ARM Limited
6  * arm8 support code Copyright (c) 1997 Causality Limited
7  * arm9 support code Copyright (C) 2001 ARM Ltd
8  * arm11 support code Copyright (c) 2007 Microsoft
9  * cortexa8 support code Copyright (c) 2008 3am Software Foundry
10  * cortexa8 improvements Copyright (c) Goeran Weinholt
11  * Copyright (c) 1997 Mark Brinicombe.
12  * Copyright (c) 1997 Causality Limited
13  * All rights reserved.
14  *
15  * Redistribution and use in source and binary forms, with or without
16  * modification, are permitted provided that the following conditions
17  * are met:
18  * 1. Redistributions of source code must retain the above copyright
19  *    notice, this list of conditions and the following disclaimer.
20  * 2. Redistributions in binary form must reproduce the above copyright
21  *    notice, this list of conditions and the following disclaimer in the
22  *    documentation and/or other materials provided with the distribution.
23  * 3. All advertising materials mentioning features or use of this software
24  *    must display the following acknowledgement:
25  *	This product includes software developed by Causality Limited.
26  * 4. The name of Causality Limited may not be used to endorse or promote
27  *    products derived from this software without specific prior written
28  *    permission.
29  *
30  * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
31  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
32  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
33  * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
34  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
35  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
36  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
37  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
38  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
39  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40  * SUCH DAMAGE.
41  *
42  * RiscBSD kernel project
43  *
44  * cpufuncs.c
45  *
46  * C functions for supporting CPU / MMU / TLB specific operations.
47  *
48  * Created	: 30/01/97
49  */
50 
51 #include <sys/cdefs.h>
52 __KERNEL_RCSID(0, "$NetBSD: cpufunc.c,v 1.147 2014/04/18 23:50:59 christos Exp $");
53 
54 #include "opt_compat_netbsd.h"
55 #include "opt_cpuoptions.h"
56 #include "opt_perfctrs.h"
57 
58 #include <sys/types.h>
59 #include <sys/param.h>
60 #include <sys/pmc.h>
61 #include <sys/systm.h>
62 #include <machine/cpu.h>
63 #include <machine/bootconfig.h>
64 #include <arch/arm/arm/disassem.h>
65 
66 #include <uvm/uvm.h>
67 
68 #include <arm/cpufunc_proto.h>
69 #include <arm/cpuconf.h>
70 #include <arm/locore.h>
71 
72 #ifdef CPU_XSCALE_80200
73 #include <arm/xscale/i80200reg.h>
74 #include <arm/xscale/i80200var.h>
75 #endif
76 
77 #ifdef CPU_XSCALE_80321
78 #include <arm/xscale/i80321reg.h>
79 #include <arm/xscale/i80321var.h>
80 #endif
81 
82 #ifdef CPU_XSCALE_IXP425
83 #include <arm/xscale/ixp425reg.h>
84 #include <arm/xscale/ixp425var.h>
85 #endif
86 
87 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321)
88 #include <arm/xscale/xscalereg.h>
89 #endif
90 
91 #if defined(PERFCTRS)
92 struct arm_pmc_funcs *arm_pmc;
93 #endif
94 
95 #if defined(CPU_ARMV7) && (defined(CPU_ARMV6) || defined(CPU_PRE_ARMV6))
96 bool cpu_armv7_p;
97 #endif
98 
99 #if defined(CPU_ARMV6) && (defined(CPU_ARMV7) || defined(CPU_PRE_ARMV6))
100 bool cpu_armv6_p;
101 #endif
102 
103 
104 /* PRIMARY CACHE VARIABLES */
105 #if (ARM_MMU_V6 + ARM_MMU_V7) != 0
106 u_int	arm_cache_prefer_mask;
107 #endif
108 struct	arm_cache_info arm_pcache;
109 struct	arm_cache_info arm_scache;
110 
111 u_int	arm_dcache_align;
112 u_int	arm_dcache_align_mask;
113 
114 /* 1 == use cpu_sleep(), 0 == don't */
115 int cpu_do_powersave;
116 
117 #ifdef CPU_ARM2
118 struct cpu_functions arm2_cpufuncs = {
119 	/* CPU functions */
120 
121 	.cf_id			= arm2_id,
122 	.cf_cpwait		= cpufunc_nullop,
123 
124 	/* MMU functions */
125 
126 	.cf_control		= (void *)cpufunc_nullop,
127 
128 	/* TLB functions */
129 
130 	.cf_tlb_flushID		= cpufunc_nullop,
131 	.cf_tlb_flushID_SE	= (void *)cpufunc_nullop,
132 	.cf_tlb_flushI		= cpufunc_nullop,
133 	.cf_tlb_flushI_SE	= (void *)cpufunc_nullop,
134 	.cf_tlb_flushD		= cpufunc_nullop,
135 	.cf_tlb_flushD_SE	= (void *)cpufunc_nullop,
136 
137 	/* Cache operations */
138 
139 	.cf_icache_sync_all	= cpufunc_nullop,
140 	.cf_icache_sync_range	= (void *) cpufunc_nullop,
141 
142 	.cf_dcache_wbinv_all	= arm3_cache_flush,
143 	.cf_dcache_wbinv_range	= (void *)cpufunc_nullop,
144 	.cf_dcache_inv_range	= (void *)cpufunc_nullop,
145 	.cf_dcache_wb_range	= (void *)cpufunc_nullop,
146 
147 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
148 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
149 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
150 
151 	.cf_idcache_wbinv_all	= cpufunc_nullop,
152 	.cf_idcache_wbinv_range	= (void *)cpufunc_nullop,
153 
154 	/* Other functions */
155 
156 	.cf_flush_prefetchbuf	= cpufunc_nullop,
157 	.cf_drain_writebuf	= cpufunc_nullop,
158 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
159 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
160 
161 	.cf_sleep		= (void *)cpufunc_nullop,
162 
163 	/* Soft functions */
164 
165 	.cf_dataabt_fixup	= early_abort_fixup,
166 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
167 
168 	.cf_setup		= (void *)cpufunc_nullop
169 
170 };
171 #endif	/* CPU_ARM2 */
172 
173 #ifdef CPU_ARM250
174 struct cpu_functions arm250_cpufuncs = {
175 	/* CPU functions */
176 
177 	.cf_id			= arm250_id,
178 	.cf_cpwait		= cpufunc_nullop,
179 
180 	/* MMU functions */
181 
182 	.cf_control		= (void *)cpufunc_nullop,
183 
184 	/* TLB functions */
185 
186 	.cf_tlb_flushID		= cpufunc_nullop,
187 	.cf_tlb_flushID_SE	= (void *)cpufunc_nullop,
188 	.cf_tlb_flushI		= cpufunc_nullop,
189 	.cf_tlb_flushI_SE	= (void *)cpufunc_nullop,
190 	.cf_tlb_flushD		= cpufunc_nullop,
191 	.cf_tlb_flushD_SE	= (void *)cpufunc_nullop,
192 
193 	/* Cache operations */
194 
195 	.cf_icache_sync_all	= cpufunc_nullop,
196 	.cf_icache_sync_range	= (void *) cpufunc_nullop,
197 
198 	.cf_dcache_wbinv_all	= arm3_cache_flush,
199 	.cf_dcache_wbinv_range	= (void *)cpufunc_nullop,
200 	.cf_dcache_inv_range	= (void *)cpufunc_nullop,
201 	.cf_dcache_wb_range	= (void *)cpufunc_nullop,
202 
203 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
204 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
205 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
206 
207 	.cf_idcache_wbinv_all	= cpufunc_nullop,
208 	.cf_idcache_wbinv_range	= (void *)cpufunc_nullop,
209 
210 	/* Other functions */
211 
212 	.cf_flush_prefetchbuf	= cpufunc_nullop,
213 	.cf_drain_writebuf	= cpufunc_nullop,
214 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
215 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
216 
217 	.cf_sleep		= (void *)cpufunc_nullop,
218 
219 	/* Soft functions */
220 
221 	.cf_dataabt_fixup	= early_abort_fixup,
222 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
223 
224 	.cf_setup		= (void *)cpufunc_nullop
225 
226 };
227 #endif	/* CPU_ARM250 */
228 
229 #ifdef CPU_ARM3
230 struct cpu_functions arm3_cpufuncs = {
231 	/* CPU functions */
232 
233 	.cf_id			= cpufunc_id,
234 	.cf_cpwait		= cpufunc_nullop,
235 
236 	/* MMU functions */
237 
238 	.cf_control		= arm3_control,
239 
240 	/* TLB functions */
241 
242 	.cf_tlb_flushID		= cpufunc_nullop,
243 	.cf_tlb_flushID_SE	= (void *)cpufunc_nullop,
244 	.cf_tlb_flushI		= cpufunc_nullop,
245 	.cf_tlb_flushI_SE	= (void *)cpufunc_nullop,
246 	.cf_tlb_flushD		= cpufunc_nullop,
247 	.cf_tlb_flushD_SE	= (void *)cpufunc_nullop,
248 
249 	/* Cache operations */
250 
251 	.cf_icache_sync_all	= cpufunc_nullop,
252 	.cf_icache_sync_range	= (void *) cpufunc_nullop,
253 
254 	.cf_dcache_wbinv_all	= arm3_cache_flush,
255 	.cf_dcache_wbinv_range	= (void *)arm3_cache_flush,
256 	.cf_dcache_inv_range	= (void *)arm3_cache_flush,
257 	.cf_dcache_wb_range	= (void *)cpufunc_nullop,
258 
259 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
260 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
261 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
262 
263 	.cf_idcache_wbinv_all	= arm3_cache_flush,
264 	.cf_idcache_wbinv_range	= (void *)arm3_cache_flush,
265 
266 	/* Other functions */
267 
268 	.cf_flush_prefetchbuf	= cpufunc_nullop,
269 	.cf_drain_writebuf	= cpufunc_nullop,
270 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
271 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
272 
273 	.cf_sleep		= (void *)cpufunc_nullop,
274 
275 	/* Soft functions */
276 
277 	.cf_dataabt_fixup	= early_abort_fixup,
278 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
279 
280 	.cf_setup		= (void *)cpufunc_nullop
281 
282 };
283 #endif	/* CPU_ARM3 */
284 
285 #ifdef CPU_ARM6
286 struct cpu_functions arm6_cpufuncs = {
287 	/* CPU functions */
288 
289 	.cf_id			= cpufunc_id,
290 	.cf_cpwait		= cpufunc_nullop,
291 
292 	/* MMU functions */
293 
294 	.cf_control		= cpufunc_control,
295 	.cf_domains		= cpufunc_domains,
296 	.cf_setttb		= arm67_setttb,
297 	.cf_faultstatus		= cpufunc_faultstatus,
298 	.cf_faultaddress	= cpufunc_faultaddress,
299 
300 	/* TLB functions */
301 
302 	.cf_tlb_flushID		= arm67_tlb_flush,
303 	.cf_tlb_flushID_SE	= arm67_tlb_purge,
304 	.cf_tlb_flushI		= arm67_tlb_flush,
305 	.cf_tlb_flushI_SE	= arm67_tlb_purge,
306 	.cf_tlb_flushD		= arm67_tlb_flush,
307 	.cf_tlb_flushD_SE	= arm67_tlb_purge,
308 
309 	/* Cache operations */
310 
311 	.cf_icache_sync_all	= cpufunc_nullop,
312 	.cf_icache_sync_range	= (void *) cpufunc_nullop,
313 
314 	.cf_dcache_wbinv_all	= arm67_cache_flush,
315 	.cf_dcache_wbinv_range	= (void *)arm67_cache_flush,
316 	.cf_dcache_inv_range	= (void *)arm67_cache_flush,
317 	.cf_dcache_wb_range	= (void *)cpufunc_nullop,
318 
319 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
320 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
321 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
322 
323 	.cf_idcache_wbinv_all	= arm67_cache_flush,
324 	.cf_idcache_wbinv_range	= (void *)arm67_cache_flush,
325 
326 	/* Other functions */
327 
328 	.cf_flush_prefetchbuf	= cpufunc_nullop,
329 	.cf_drain_writebuf	= cpufunc_nullop,
330 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
331 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
332 
333 	.cf_sleep		= (void *)cpufunc_nullop,
334 
335 	/* Soft functions */
336 
337 #ifdef ARM6_LATE_ABORT
338 	.cf_dataabt_fixup	= late_abort_fixup,
339 #else
340 	.cf_dataabt_fixup	= early_abort_fixup,
341 #endif
342 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
343 
344 	.cf_context_switch	= arm67_context_switch,
345 
346 	.cf_setup		= arm6_setup
347 
348 };
349 #endif	/* CPU_ARM6 */
350 
351 #ifdef CPU_ARM7
352 struct cpu_functions arm7_cpufuncs = {
353 	/* CPU functions */
354 
355 	.cf_id			= cpufunc_id,
356 	.cf_cpwait		= cpufunc_nullop,
357 
358 	/* MMU functions */
359 
360 	.cf_control		= cpufunc_control,
361 	.cf_domains		= cpufunc_domains,
362 	.cf_setttb		= arm67_setttb,
363 	.cf_faultstatus		= cpufunc_faultstatus,
364 	.cf_faultaddress	= cpufunc_faultaddress,
365 
366 	/* TLB functions */
367 
368 	.cf_tlb_flushID		= arm67_tlb_flush,
369 	.cf_tlb_flushID_SE	= arm67_tlb_purge,
370 	.cf_tlb_flushI		= arm67_tlb_flush,
371 	.cf_tlb_flushI_SE	= arm67_tlb_purge,
372 	.cf_tlb_flushD		= arm67_tlb_flush,
373 	.cf_tlb_flushD_SE	= arm67_tlb_purge,
374 
375 	/* Cache operations */
376 
377 	.cf_icache_sync_all	= cpufunc_nullop,
378 	.cf_icache_sync_range	= (void *)cpufunc_nullop,
379 
380 	.cf_dcache_wbinv_all	= arm67_cache_flush,
381 	.cf_dcache_wbinv_range	= (void *)arm67_cache_flush,
382 	.cf_dcache_inv_range	= (void *)arm67_cache_flush,
383 	.cf_dcache_wb_range	= (void *)cpufunc_nullop,
384 
385 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
386 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
387 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
388 
389 	.cf_idcache_wbinv_all	= arm67_cache_flush,
390 	.cf_idcache_wbinv_range	= (void *)arm67_cache_flush,
391 
392 	/* Other functions */
393 
394 	.cf_flush_prefetchbuf	= cpufunc_nullop,
395 	.cf_drain_writebuf	= cpufunc_nullop,
396 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
397 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
398 
399 	.cf_sleep		= (void *)cpufunc_nullop,
400 
401 	/* Soft functions */
402 
403 	.cf_dataabt_fixup	= late_abort_fixup,
404 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
405 
406 	.cf_context_switch	= arm67_context_switch,
407 
408 	.cf_setup		= arm7_setup
409 
410 };
411 #endif	/* CPU_ARM7 */
412 
413 #ifdef CPU_ARM7TDMI
414 struct cpu_functions arm7tdmi_cpufuncs = {
415 	/* CPU functions */
416 
417 	.cf_id			= cpufunc_id,
418 	.cf_cpwait		= cpufunc_nullop,
419 
420 	/* MMU functions */
421 
422 	.cf_control		= cpufunc_control,
423 	.cf_domains		= cpufunc_domains,
424 	.cf_setttb		= arm7tdmi_setttb,
425 	.cf_faultstatus		= cpufunc_faultstatus,
426 	.cf_faultaddress	= cpufunc_faultaddress,
427 
428 	/* TLB functions */
429 
430 	.cf_tlb_flushID		= arm7tdmi_tlb_flushID,
431 	.cf_tlb_flushID_SE	= arm7tdmi_tlb_flushID_SE,
432 	.cf_tlb_flushI		= arm7tdmi_tlb_flushID,
433 	.cf_tlb_flushI_SE	= arm7tdmi_tlb_flushID_SE,
434 	.cf_tlb_flushD		= arm7tdmi_tlb_flushID,
435 	.cf_tlb_flushD_SE	= arm7tdmi_tlb_flushID_SE,
436 
437 	/* Cache operations */
438 
439 	.cf_icache_sync_all	= cpufunc_nullop,
440 	.cf_icache_sync_range	= (void *)cpufunc_nullop,
441 
442 	.cf_dcache_wbinv_all	= arm7tdmi_cache_flushID,
443 	.cf_dcache_wbinv_range	= (void *)arm7tdmi_cache_flushID,
444 	.cf_dcache_inv_range	= (void *)arm7tdmi_cache_flushID,
445 	.cf_dcache_wb_range	= (void *)cpufunc_nullop,
446 
447 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
448 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
449 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
450 
451 	.cf_idcache_wbinv_all	= arm7tdmi_cache_flushID,
452 	.cf_idcache_wbinv_range	= (void *)arm7tdmi_cache_flushID,
453 
454 	/* Other functions */
455 
456 	.cf_flush_prefetchbuf	= cpufunc_nullop,
457 	.cf_drain_writebuf	= cpufunc_nullop,
458 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
459 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
460 
461 	.cf_sleep		= (void *)cpufunc_nullop,
462 
463 	/* Soft functions */
464 
465 	.cf_dataabt_fixup	= late_abort_fixup,
466 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
467 
468 	.cf_context_switch	= arm7tdmi_context_switch,
469 
470 	.cf_setup		= arm7tdmi_setup
471 
472 };
473 #endif	/* CPU_ARM7TDMI */
474 
475 #ifdef CPU_ARM8
476 struct cpu_functions arm8_cpufuncs = {
477 	/* CPU functions */
478 
479 	.cf_id			= cpufunc_id,
480 	.cf_cpwait		= cpufunc_nullop,
481 
482 	/* MMU functions */
483 
484 	.cf_control		= cpufunc_control,
485 	.cf_domains		= cpufunc_domains,
486 	.cf_setttb		= arm8_setttb,
487 	.cf_faultstatus		= cpufunc_faultstatus,
488 	.cf_faultaddress	= cpufunc_faultaddress,
489 
490 	/* TLB functions */
491 
492 	.cf_tlb_flushID		= arm8_tlb_flushID,
493 	.cf_tlb_flushID_SE	= arm8_tlb_flushID_SE,
494 	.cf_tlb_flushI		= arm8_tlb_flushID,
495 	.cf_tlb_flushI_SE	= arm8_tlb_flushID_SE,
496 	.cf_tlb_flushD		= arm8_tlb_flushID,
497 	.cf_tlb_flushD_SE	= arm8_tlb_flushID_SE,
498 
499 	/* Cache operations */
500 
501 	.cf_icache_sync_all	= cpufunc_nullop,
502 	.cf_icache_sync_range	= (void *)cpufunc_nullop,
503 
504 	.cf_dcache_wbinv_all	= arm8_cache_purgeID,
505 	.cf_dcache_wbinv_range	= (void *)arm8_cache_purgeID,
506 /*XXX*/	.cf_dcache_inv_range	= (void *)arm8_cache_purgeID,
507 	.cf_dcache_wb_range	= (void *)arm8_cache_cleanID,
508 
509 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
510 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
511 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
512 
513 	.cf_idcache_wbinv_all	= arm8_cache_purgeID,
514 	.cf_idcache_wbinv_range = (void *)arm8_cache_purgeID,
515 
516 	/* Other functions */
517 
518 	.cf_flush_prefetchbuf	= cpufunc_nullop,
519 	.cf_drain_writebuf	= cpufunc_nullop,
520 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
521 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
522 
523 	.cf_sleep		= (void *)cpufunc_nullop,
524 
525 	/* Soft functions */
526 
527 	.cf_dataabt_fixup	= cpufunc_null_fixup,
528 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
529 
530 	.cf_context_switch	= arm8_context_switch,
531 
532 	.cf_setup		= arm8_setup
533 };
534 #endif	/* CPU_ARM8 */
535 
536 #ifdef CPU_ARM9
537 struct cpu_functions arm9_cpufuncs = {
538 	/* CPU functions */
539 
540 	.cf_id			= cpufunc_id,
541 	.cf_cpwait		= cpufunc_nullop,
542 
543 	/* MMU functions */
544 
545 	.cf_control		= cpufunc_control,
546 	.cf_domains		= cpufunc_domains,
547 	.cf_setttb		= arm9_setttb,
548 	.cf_faultstatus		= cpufunc_faultstatus,
549 	.cf_faultaddress	= cpufunc_faultaddress,
550 
551 	/* TLB functions */
552 
553 	.cf_tlb_flushID		= armv4_tlb_flushID,
554 	.cf_tlb_flushID_SE	= arm9_tlb_flushID_SE,
555 	.cf_tlb_flushI		= armv4_tlb_flushI,
556 	.cf_tlb_flushI_SE	= (void *)armv4_tlb_flushI,
557 	.cf_tlb_flushD		= armv4_tlb_flushD,
558 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
559 
560 	/* Cache operations */
561 
562 	.cf_icache_sync_all	= arm9_icache_sync_all,
563 	.cf_icache_sync_range	= arm9_icache_sync_range,
564 
565 	.cf_dcache_wbinv_all	= arm9_dcache_wbinv_all,
566 	.cf_dcache_wbinv_range	= arm9_dcache_wbinv_range,
567 /*XXX*/	.cf_dcache_inv_range	= arm9_dcache_wbinv_range,
568 	.cf_dcache_wb_range	= arm9_dcache_wb_range,
569 
570 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
571 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
572 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
573 
574 	.cf_idcache_wbinv_all	= arm9_idcache_wbinv_all,
575 	.cf_idcache_wbinv_range = arm9_idcache_wbinv_range,
576 
577 	/* Other functions */
578 
579 	.cf_flush_prefetchbuf	= cpufunc_nullop,
580 	.cf_drain_writebuf	= armv4_drain_writebuf,
581 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
582 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
583 
584 	.cf_sleep		= (void *)cpufunc_nullop,
585 
586 	/* Soft functions */
587 
588 	.cf_dataabt_fixup	= cpufunc_null_fixup,
589 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
590 
591 	.cf_context_switch	= arm9_context_switch,
592 
593 	.cf_setup		= arm9_setup
594 
595 };
596 #endif /* CPU_ARM9 */
597 
598 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
599 struct cpu_functions armv5_ec_cpufuncs = {
600 	/* CPU functions */
601 
602 	.cf_id			= cpufunc_id,
603 	.cf_cpwait		= cpufunc_nullop,
604 
605 	/* MMU functions */
606 
607 	.cf_control		= cpufunc_control,
608 	.cf_domains		= cpufunc_domains,
609 	.cf_setttb		= armv5_ec_setttb,
610 	.cf_faultstatus		= cpufunc_faultstatus,
611 	.cf_faultaddress	= cpufunc_faultaddress,
612 
613 	/* TLB functions */
614 
615 	.cf_tlb_flushID		= armv4_tlb_flushID,
616 	.cf_tlb_flushID_SE	= arm10_tlb_flushID_SE,
617 	.cf_tlb_flushI		= armv4_tlb_flushI,
618 	.cf_tlb_flushI_SE	= arm10_tlb_flushI_SE,
619 	.cf_tlb_flushD		= armv4_tlb_flushD,
620 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
621 
622 	/* Cache operations */
623 
624 	.cf_icache_sync_all	= armv5_ec_icache_sync_all,
625 	.cf_icache_sync_range	= armv5_ec_icache_sync_range,
626 
627 	.cf_dcache_wbinv_all	= armv5_ec_dcache_wbinv_all,
628 	.cf_dcache_wbinv_range	= armv5_ec_dcache_wbinv_range,
629 /*XXX*/	.cf_dcache_inv_range	= armv5_ec_dcache_wbinv_range,
630 	.cf_dcache_wb_range	= armv5_ec_dcache_wb_range,
631 
632 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
633 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
634 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
635 
636 	.cf_idcache_wbinv_all	= armv5_ec_idcache_wbinv_all,
637 	.cf_idcache_wbinv_range = armv5_ec_idcache_wbinv_range,
638 
639 	/* Other functions */
640 
641 	.cf_flush_prefetchbuf	= cpufunc_nullop,
642 	.cf_drain_writebuf	= armv4_drain_writebuf,
643 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
644 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
645 
646 	.cf_sleep		= (void *)cpufunc_nullop,
647 
648 	/* Soft functions */
649 
650 	.cf_dataabt_fixup	= cpufunc_null_fixup,
651 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
652 
653 	.cf_context_switch	= arm10_context_switch,
654 
655 	.cf_setup		= arm10_setup
656 
657 };
658 #endif /* CPU_ARM9E || CPU_ARM10 */
659 
660 #ifdef CPU_ARM10
661 struct cpu_functions arm10_cpufuncs = {
662 	/* CPU functions */
663 
664 	.cf_id			= cpufunc_id,
665 	.cf_cpwait		= cpufunc_nullop,
666 
667 	/* MMU functions */
668 
669 	.cf_control		= cpufunc_control,
670 	.cf_domains		= cpufunc_domains,
671 	.cf_setttb		= armv5_setttb,
672 	.cf_faultstatus		= cpufunc_faultstatus,
673 	.cf_faultaddress	= cpufunc_faultaddress,
674 
675 	/* TLB functions */
676 
677 	.cf_tlb_flushID		= armv4_tlb_flushID,
678 	.cf_tlb_flushID_SE	= arm10_tlb_flushID_SE,
679 	.cf_tlb_flushI		= armv4_tlb_flushI,
680 	.cf_tlb_flushI_SE	= arm10_tlb_flushI_SE,
681 	.cf_tlb_flushD		= armv4_tlb_flushD,
682 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
683 
684 	/* Cache operations */
685 
686 	.cf_icache_sync_all	= armv5_icache_sync_all,
687 	.cf_icache_sync_range	= armv5_icache_sync_range,
688 
689 	.cf_dcache_wbinv_all	= armv5_dcache_wbinv_all,
690 	.cf_dcache_wbinv_range	= armv5_dcache_wbinv_range,
691 /*XXX*/	.cf_dcache_inv_range	= armv5_dcache_wbinv_range,
692 	.cf_dcache_wb_range	= armv5_dcache_wb_range,
693 
694 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
695 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
696 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
697 
698 	.cf_idcache_wbinv_all	= armv5_idcache_wbinv_all,
699 	.cf_idcache_wbinv_range = armv5_idcache_wbinv_range,
700 
701 	/* Other functions */
702 
703 	.cf_flush_prefetchbuf	= cpufunc_nullop,
704 	.cf_drain_writebuf	= armv4_drain_writebuf,
705 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
706 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
707 
708 	.cf_sleep		= (void *)cpufunc_nullop,
709 
710 	/* Soft functions */
711 
712 	.cf_dataabt_fixup	= cpufunc_null_fixup,
713 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
714 
715 	.cf_context_switch	= arm10_context_switch,
716 
717 	.cf_setup		= arm10_setup
718 
719 };
720 #endif /* CPU_ARM10 */
721 
722 #ifdef CPU_ARM11
723 struct cpu_functions arm11_cpufuncs = {
724 	/* CPU functions */
725 
726 	.cf_id			= cpufunc_id,
727 	.cf_cpwait		= cpufunc_nullop,
728 
729 	/* MMU functions */
730 
731 	.cf_control		= cpufunc_control,
732 	.cf_domains		= cpufunc_domains,
733 	.cf_setttb		= arm11_setttb,
734 	.cf_faultstatus		= cpufunc_faultstatus,
735 	.cf_faultaddress	= cpufunc_faultaddress,
736 
737 	/* TLB functions */
738 
739 	.cf_tlb_flushID		= arm11_tlb_flushID,
740 	.cf_tlb_flushID_SE	= arm11_tlb_flushID_SE,
741 	.cf_tlb_flushI		= arm11_tlb_flushI,
742 	.cf_tlb_flushI_SE	= arm11_tlb_flushI_SE,
743 	.cf_tlb_flushD		= arm11_tlb_flushD,
744 	.cf_tlb_flushD_SE	= arm11_tlb_flushD_SE,
745 
746 	/* Cache operations */
747 
748 	.cf_icache_sync_all	= armv6_icache_sync_all,
749 	.cf_icache_sync_range	= armv6_icache_sync_range,
750 
751 	.cf_dcache_wbinv_all	= armv6_dcache_wbinv_all,
752 	.cf_dcache_wbinv_range	= armv6_dcache_wbinv_range,
753 	.cf_dcache_inv_range	= armv6_dcache_inv_range,
754 	.cf_dcache_wb_range	= armv6_dcache_wb_range,
755 
756 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
757 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
758 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
759 
760 	.cf_idcache_wbinv_all	= armv6_idcache_wbinv_all,
761 	.cf_idcache_wbinv_range = armv6_idcache_wbinv_range,
762 
763 	/* Other functions */
764 
765 	.cf_flush_prefetchbuf	= cpufunc_nullop,
766 	.cf_drain_writebuf	= arm11_drain_writebuf,
767 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
768 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
769 
770 	.cf_sleep		= arm11_sleep,
771 
772 	/* Soft functions */
773 
774 	.cf_dataabt_fixup	= cpufunc_null_fixup,
775 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
776 
777 	.cf_context_switch	= arm11_context_switch,
778 
779 	.cf_setup		= arm11_setup
780 
781 };
782 #endif /* CPU_ARM11 */
783 
784 #ifdef CPU_ARM1136
785 struct cpu_functions arm1136_cpufuncs = {
786 	/* CPU functions */
787 
788 	.cf_id			= cpufunc_id,
789 	.cf_cpwait		= cpufunc_nullop,
790 
791 	/* MMU functions */
792 
793 	.cf_control		= cpufunc_control,
794 	.cf_domains		= cpufunc_domains,
795 	.cf_setttb		= arm11_setttb,
796 	.cf_faultstatus		= cpufunc_faultstatus,
797 	.cf_faultaddress	= cpufunc_faultaddress,
798 
799 	/* TLB functions */
800 
801 	.cf_tlb_flushID		= arm11_tlb_flushID,
802 	.cf_tlb_flushID_SE	= arm11_tlb_flushID_SE,
803 	.cf_tlb_flushI		= arm11_tlb_flushI,
804 	.cf_tlb_flushI_SE	= arm11_tlb_flushI_SE,
805 	.cf_tlb_flushD		= arm11_tlb_flushD,
806 	.cf_tlb_flushD_SE	= arm11_tlb_flushD_SE,
807 
808 	/* Cache operations */
809 
810 	.cf_icache_sync_all	= arm11x6_icache_sync_all,	/* 411920 */
811 	.cf_icache_sync_range	= arm11x6_icache_sync_range,	/* 371025 */
812 
813 	.cf_dcache_wbinv_all	= arm11x6_dcache_wbinv_all,	/* 411920 */
814 	.cf_dcache_wbinv_range	= armv6_dcache_wbinv_range,
815 	.cf_dcache_inv_range	= armv6_dcache_inv_range,
816 	.cf_dcache_wb_range	= armv6_dcache_wb_range,
817 
818 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
819 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
820 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
821 
822 	.cf_idcache_wbinv_all	= arm11x6_idcache_wbinv_all,	/* 411920 */
823 	.cf_idcache_wbinv_range = arm11x6_idcache_wbinv_range,	/* 371025 */
824 
825 	/* Other functions */
826 
827 	.cf_flush_prefetchbuf	= arm11x6_flush_prefetchbuf,
828 	.cf_drain_writebuf	= arm11_drain_writebuf,
829 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
830 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
831 
832 	.cf_sleep		= arm11_sleep,	/* arm1136_sleep_rev0 */
833 
834 	/* Soft functions */
835 
836 	.cf_dataabt_fixup	= cpufunc_null_fixup,
837 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
838 
839 	.cf_context_switch	= arm11_context_switch,
840 
841 	.cf_setup		= arm11x6_setup
842 
843 };
844 #endif /* CPU_ARM1136 */
845 
846 #ifdef CPU_ARM1176
847 struct cpu_functions arm1176_cpufuncs = {
848 	/* CPU functions */
849 
850 	.cf_id			= cpufunc_id,
851 	.cf_cpwait		= cpufunc_nullop,
852 
853 	/* MMU functions */
854 
855 	.cf_control		= cpufunc_control,
856 	.cf_domains		= cpufunc_domains,
857 	.cf_setttb		= arm11_setttb,
858 	.cf_faultstatus		= cpufunc_faultstatus,
859 	.cf_faultaddress	= cpufunc_faultaddress,
860 
861 	/* TLB functions */
862 
863 	.cf_tlb_flushID		= arm11_tlb_flushID,
864 	.cf_tlb_flushID_SE	= arm11_tlb_flushID_SE,
865 	.cf_tlb_flushI		= arm11_tlb_flushI,
866 	.cf_tlb_flushI_SE	= arm11_tlb_flushI_SE,
867 	.cf_tlb_flushD		= arm11_tlb_flushD,
868 	.cf_tlb_flushD_SE	= arm11_tlb_flushD_SE,
869 
870 	/* Cache operations */
871 
872 	.cf_icache_sync_all	= arm11x6_icache_sync_all,	/* 415045 */
873 	.cf_icache_sync_range	= arm11x6_icache_sync_range,	/* 371367 */
874 
875 	.cf_dcache_wbinv_all	= arm11x6_dcache_wbinv_all,	/* 415045 */
876 	.cf_dcache_wbinv_range	= armv6_dcache_wbinv_range,
877 	.cf_dcache_inv_range	= armv6_dcache_inv_range,
878 	.cf_dcache_wb_range	= armv6_dcache_wb_range,
879 
880 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
881 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
882 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
883 
884 	.cf_idcache_wbinv_all	= arm11x6_idcache_wbinv_all,	/* 415045 */
885 	.cf_idcache_wbinv_range = arm11x6_idcache_wbinv_range,	/* 371367 */
886 
887 	/* Other functions */
888 
889 	.cf_flush_prefetchbuf	= arm11x6_flush_prefetchbuf,
890 	.cf_drain_writebuf	= arm11_drain_writebuf,
891 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
892 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
893 
894 	.cf_sleep		= arm11x6_sleep,		/* no ref. */
895 
896 	/* Soft functions */
897 
898 	.cf_dataabt_fixup	= cpufunc_null_fixup,
899 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
900 
901 	.cf_context_switch	= arm11_context_switch,
902 
903 	.cf_setup		= arm11x6_setup
904 
905 };
906 #endif /* CPU_ARM1176 */
907 
908 
909 #ifdef CPU_ARM11MPCORE
910 struct cpu_functions arm11mpcore_cpufuncs = {
911 	/* CPU functions */
912 
913 	.cf_id			= cpufunc_id,
914 	.cf_cpwait		= cpufunc_nullop,
915 
916 	/* MMU functions */
917 
918 	.cf_control		= cpufunc_control,
919 	.cf_domains		= cpufunc_domains,
920 	.cf_setttb		= arm11_setttb,
921 	.cf_faultstatus		= cpufunc_faultstatus,
922 	.cf_faultaddress	= cpufunc_faultaddress,
923 
924 	/* TLB functions */
925 
926 	.cf_tlb_flushID		= arm11_tlb_flushID,
927 	.cf_tlb_flushID_SE	= arm11_tlb_flushID_SE,
928 	.cf_tlb_flushI		= arm11_tlb_flushI,
929 	.cf_tlb_flushI_SE	= arm11_tlb_flushI_SE,
930 	.cf_tlb_flushD		= arm11_tlb_flushD,
931 	.cf_tlb_flushD_SE	= arm11_tlb_flushD_SE,
932 
933 	/* Cache operations */
934 
935 	.cf_icache_sync_all	= armv6_icache_sync_all,
936 	.cf_icache_sync_range	= armv5_icache_sync_range,
937 
938 	.cf_dcache_wbinv_all	= armv6_dcache_wbinv_all,
939 	.cf_dcache_wbinv_range	= armv5_dcache_wbinv_range,
940 	.cf_dcache_inv_range	= armv5_dcache_inv_range,
941 	.cf_dcache_wb_range	= armv5_dcache_wb_range,
942 
943 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
944 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
945 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
946 
947 	.cf_idcache_wbinv_all	= armv6_idcache_wbinv_all,
948 	.cf_idcache_wbinv_range = armv5_idcache_wbinv_range,
949 
950 	/* Other functions */
951 
952 	.cf_flush_prefetchbuf	= cpufunc_nullop,
953 	.cf_drain_writebuf	= arm11_drain_writebuf,
954 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
955 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
956 
957 	.cf_sleep		= arm11_sleep,
958 
959 	/* Soft functions */
960 
961 	.cf_dataabt_fixup	= cpufunc_null_fixup,
962 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
963 
964 	.cf_context_switch	= arm11_context_switch,
965 
966 	.cf_setup		= arm11mpcore_setup
967 
968 };
969 #endif /* CPU_ARM11MPCORE */
970 
971 #ifdef CPU_SA110
972 struct cpu_functions sa110_cpufuncs = {
973 	/* CPU functions */
974 
975 	.cf_id			= cpufunc_id,
976 	.cf_cpwait		= cpufunc_nullop,
977 
978 	/* MMU functions */
979 
980 	.cf_control		= cpufunc_control,
981 	.cf_domains		= cpufunc_domains,
982 	.cf_setttb		= sa1_setttb,
983 	.cf_faultstatus		= cpufunc_faultstatus,
984 	.cf_faultaddress	= cpufunc_faultaddress,
985 
986 	/* TLB functions */
987 
988 	.cf_tlb_flushID		= armv4_tlb_flushID,
989 	.cf_tlb_flushID_SE	= sa1_tlb_flushID_SE,
990 	.cf_tlb_flushI		= armv4_tlb_flushI,
991 	.cf_tlb_flushI_SE	= (void *)armv4_tlb_flushI,
992 	.cf_tlb_flushD		= armv4_tlb_flushD,
993 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
994 
995 	/* Cache operations */
996 
997 	.cf_icache_sync_all	= sa1_cache_syncI,
998 	.cf_icache_sync_range	= sa1_cache_syncI_rng,
999 
1000 	.cf_dcache_wbinv_all	= sa1_cache_purgeD,
1001 	.cf_dcache_wbinv_range	= sa1_cache_purgeD_rng,
1002 /*XXX*/	.cf_dcache_inv_range	= sa1_cache_purgeD_rng,
1003 	.cf_dcache_wb_range	= sa1_cache_cleanD_rng,
1004 
1005 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
1006 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
1007 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
1008 
1009 	.cf_idcache_wbinv_all	= sa1_cache_purgeID,
1010 	.cf_idcache_wbinv_range	= sa1_cache_purgeID_rng,
1011 
1012 	/* Other functions */
1013 
1014 	.cf_flush_prefetchbuf	= cpufunc_nullop,
1015 	.cf_drain_writebuf	= armv4_drain_writebuf,
1016 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
1017 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
1018 
1019 	.cf_sleep		= (void *)cpufunc_nullop,
1020 
1021 	/* Soft functions */
1022 
1023 	.cf_dataabt_fixup	= cpufunc_null_fixup,
1024 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
1025 
1026 	.cf_context_switch	= sa110_context_switch,
1027 
1028 	.cf_setup		= sa110_setup
1029 };
1030 #endif	/* CPU_SA110 */
1031 
1032 #if defined(CPU_SA1100) || defined(CPU_SA1110)
1033 struct cpu_functions sa11x0_cpufuncs = {
1034 	/* CPU functions */
1035 
1036 	.cf_id			= cpufunc_id,
1037 	.cf_cpwait		= cpufunc_nullop,
1038 
1039 	/* MMU functions */
1040 
1041 	.cf_control		= cpufunc_control,
1042 	.cf_domains		= cpufunc_domains,
1043 	.cf_setttb		= sa1_setttb,
1044 	.cf_faultstatus		= cpufunc_faultstatus,
1045 	.cf_faultaddress	= cpufunc_faultaddress,
1046 
1047 	/* TLB functions */
1048 
1049 	.cf_tlb_flushID		= armv4_tlb_flushID,
1050 	.cf_tlb_flushID_SE	= sa1_tlb_flushID_SE,
1051 	.cf_tlb_flushI		= armv4_tlb_flushI,
1052 	.cf_tlb_flushI_SE	= (void *)armv4_tlb_flushI,
1053 	.cf_tlb_flushD		= armv4_tlb_flushD,
1054 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
1055 
1056 	/* Cache operations */
1057 
1058 	.cf_icache_sync_all	= sa1_cache_syncI,
1059 	.cf_icache_sync_range	= sa1_cache_syncI_rng,
1060 
1061 	.cf_dcache_wbinv_all	= sa1_cache_purgeD,
1062 	.cf_dcache_wbinv_range	= sa1_cache_purgeD_rng,
1063 /*XXX*/	.cf_dcache_inv_range	= sa1_cache_purgeD_rng,
1064 	.cf_dcache_wb_range	= sa1_cache_cleanD_rng,
1065 
1066 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
1067 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
1068 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
1069 
1070 	.cf_idcache_wbinv_all	= sa1_cache_purgeID,
1071 	.cf_idcache_wbinv_range	= sa1_cache_purgeID_rng,
1072 
1073 	/* Other functions */
1074 
1075 	.cf_flush_prefetchbuf	= sa11x0_drain_readbuf,
1076 	.cf_drain_writebuf	= armv4_drain_writebuf,
1077 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
1078 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
1079 
1080 	.cf_sleep		= sa11x0_cpu_sleep,
1081 
1082 	/* Soft functions */
1083 
1084 	.cf_dataabt_fixup	= cpufunc_null_fixup,
1085 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
1086 
1087 	.cf_context_switch	= sa11x0_context_switch,
1088 
1089 	.cf_setup		= sa11x0_setup
1090 };
1091 #endif	/* CPU_SA1100 || CPU_SA1110 */
1092 
1093 #if defined(CPU_FA526)
1094 struct cpu_functions fa526_cpufuncs = {
1095 	/* CPU functions */
1096 
1097 	.cf_id			= cpufunc_id,
1098 	.cf_cpwait		= cpufunc_nullop,
1099 
1100 	/* MMU functions */
1101 
1102 	.cf_control		= cpufunc_control,
1103 	.cf_domains		= cpufunc_domains,
1104 	.cf_setttb		= fa526_setttb,
1105 	.cf_faultstatus		= cpufunc_faultstatus,
1106 	.cf_faultaddress	= cpufunc_faultaddress,
1107 
1108 	/* TLB functions */
1109 
1110 	.cf_tlb_flushID		= armv4_tlb_flushID,
1111 	.cf_tlb_flushID_SE	= fa526_tlb_flushID_SE,
1112 	.cf_tlb_flushI		= armv4_tlb_flushI,
1113 	.cf_tlb_flushI_SE	= fa526_tlb_flushI_SE,
1114 	.cf_tlb_flushD		= armv4_tlb_flushD,
1115 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
1116 
1117 	/* Cache operations */
1118 
1119 	.cf_icache_sync_all	= fa526_icache_sync_all,
1120 	.cf_icache_sync_range	= fa526_icache_sync_range,
1121 
1122 	.cf_dcache_wbinv_all	= fa526_dcache_wbinv_all,
1123 	.cf_dcache_wbinv_range	= fa526_dcache_wbinv_range,
1124 	.cf_dcache_inv_range	= fa526_dcache_inv_range,
1125 	.cf_dcache_wb_range	= fa526_dcache_wb_range,
1126 
1127 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
1128 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
1129 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
1130 
1131 	.cf_idcache_wbinv_all	= fa526_idcache_wbinv_all,
1132 	.cf_idcache_wbinv_range	= fa526_idcache_wbinv_range,
1133 
1134 	/* Other functions */
1135 
1136 	.cf_flush_prefetchbuf	= fa526_flush_prefetchbuf,
1137 	.cf_drain_writebuf	= armv4_drain_writebuf,
1138 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
1139 	.cf_flush_brnchtgt_E	= fa526_flush_brnchtgt_E,
1140 
1141 	.cf_sleep		= fa526_cpu_sleep,
1142 
1143 	/* Soft functions */
1144 
1145 	.cf_dataabt_fixup	= cpufunc_null_fixup,
1146 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
1147 
1148 	.cf_context_switch	= fa526_context_switch,
1149 
1150 	.cf_setup		= fa526_setup
1151 };
1152 #endif	/* CPU_FA526 */
1153 
1154 #ifdef CPU_IXP12X0
1155 struct cpu_functions ixp12x0_cpufuncs = {
1156 	/* CPU functions */
1157 
1158 	.cf_id			= cpufunc_id,
1159 	.cf_cpwait		= cpufunc_nullop,
1160 
1161 	/* MMU functions */
1162 
1163 	.cf_control		= cpufunc_control,
1164 	.cf_domains		= cpufunc_domains,
1165 	.cf_setttb		= sa1_setttb,
1166 	.cf_faultstatus		= cpufunc_faultstatus,
1167 	.cf_faultaddress	= cpufunc_faultaddress,
1168 
1169 	/* TLB functions */
1170 
1171 	.cf_tlb_flushID		= armv4_tlb_flushID,
1172 	.cf_tlb_flushID_SE	= sa1_tlb_flushID_SE,
1173 	.cf_tlb_flushI		= armv4_tlb_flushI,
1174 	.cf_tlb_flushI_SE	= (void *)armv4_tlb_flushI,
1175 	.cf_tlb_flushD		= armv4_tlb_flushD,
1176 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
1177 
1178 	/* Cache operations */
1179 
1180 	.cf_icache_sync_all	= sa1_cache_syncI,
1181 	.cf_icache_sync_range	= sa1_cache_syncI_rng,
1182 
1183 	.cf_dcache_wbinv_all	= sa1_cache_purgeD,
1184 	.cf_dcache_wbinv_range	= sa1_cache_purgeD_rng,
1185 /*XXX*/	.cf_dcache_inv_range	= sa1_cache_purgeD_rng,
1186 	.cf_dcache_wb_range	= sa1_cache_cleanD_rng,
1187 
1188 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
1189 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
1190 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
1191 
1192 	.cf_idcache_wbinv_all	= sa1_cache_purgeID,
1193 	.cf_idcache_wbinv_range	= sa1_cache_purgeID_rng,
1194 
1195 	/* Other functions */
1196 
1197 	.cf_flush_prefetchbuf	= ixp12x0_drain_readbuf,
1198 	.cf_drain_writebuf	= armv4_drain_writebuf,
1199 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
1200 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
1201 
1202 	.cf_sleep		= (void *)cpufunc_nullop,
1203 
1204 	/* Soft functions */
1205 
1206 	.cf_dataabt_fixup	= cpufunc_null_fixup,
1207 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
1208 
1209 	.cf_context_switch	= ixp12x0_context_switch,
1210 
1211 	.cf_setup		= ixp12x0_setup
1212 };
1213 #endif	/* CPU_IXP12X0 */
1214 
1215 #if defined(CPU_XSCALE)
1216 struct cpu_functions xscale_cpufuncs = {
1217 	/* CPU functions */
1218 
1219 	.cf_id			= cpufunc_id,
1220 	.cf_cpwait		= xscale_cpwait,
1221 
1222 	/* MMU functions */
1223 
1224 	.cf_control		= xscale_control,
1225 	.cf_domains		= cpufunc_domains,
1226 	.cf_setttb		= xscale_setttb,
1227 	.cf_faultstatus		= cpufunc_faultstatus,
1228 	.cf_faultaddress	= cpufunc_faultaddress,
1229 
1230 	/* TLB functions */
1231 
1232 	.cf_tlb_flushID		= armv4_tlb_flushID,
1233 	.cf_tlb_flushID_SE	= xscale_tlb_flushID_SE,
1234 	.cf_tlb_flushI		= armv4_tlb_flushI,
1235 	.cf_tlb_flushI_SE	= (void *)armv4_tlb_flushI,
1236 	.cf_tlb_flushD		= armv4_tlb_flushD,
1237 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
1238 
1239 	/* Cache operations */
1240 
1241 	.cf_icache_sync_all	= xscale_cache_syncI,
1242 	.cf_icache_sync_range	= xscale_cache_syncI_rng,
1243 
1244 	.cf_dcache_wbinv_all	= xscale_cache_purgeD,
1245 	.cf_dcache_wbinv_range	= xscale_cache_purgeD_rng,
1246 	.cf_dcache_inv_range	= xscale_cache_flushD_rng,
1247 	.cf_dcache_wb_range	= xscale_cache_cleanD_rng,
1248 
1249 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
1250 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
1251 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
1252 
1253 	.cf_idcache_wbinv_all	= xscale_cache_purgeID,
1254 	.cf_idcache_wbinv_range = xscale_cache_purgeID_rng,
1255 
1256 	/* Other functions */
1257 
1258 	.cf_flush_prefetchbuf	= cpufunc_nullop,
1259 	.cf_drain_writebuf	= armv4_drain_writebuf,
1260 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
1261 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
1262 
1263 	.cf_sleep		= xscale_cpu_sleep,
1264 
1265 	/* Soft functions */
1266 
1267 	.cf_dataabt_fixup	= cpufunc_null_fixup,
1268 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
1269 
1270 	.cf_context_switch	= xscale_context_switch,
1271 
1272 	.cf_setup		= xscale_setup
1273 };
1274 #endif /* CPU_XSCALE */
1275 
1276 #if defined(CPU_ARMV7)
1277 struct cpu_functions armv7_cpufuncs = {
1278 	/* CPU functions */
1279 
1280 	.cf_id			= cpufunc_id,
1281 	.cf_cpwait		= cpufunc_nullop,
1282 
1283 	/* MMU functions */
1284 
1285 	.cf_control		= cpufunc_control,
1286 	.cf_domains		= cpufunc_domains,
1287 	.cf_setttb		= armv7_setttb,
1288 	.cf_faultstatus		= cpufunc_faultstatus,
1289 	.cf_faultaddress	= cpufunc_faultaddress,
1290 
1291 	/* TLB functions */
1292 
1293 	.cf_tlb_flushID		= armv7_tlb_flushID,
1294 	.cf_tlb_flushID_SE	= armv7_tlb_flushID_SE,
1295 	.cf_tlb_flushI		= armv7_tlb_flushI,
1296 	.cf_tlb_flushI_SE	= armv7_tlb_flushI_SE,
1297 	.cf_tlb_flushD		= armv7_tlb_flushD,
1298 	.cf_tlb_flushD_SE	= armv7_tlb_flushD_SE,
1299 
1300 	/* Cache operations */
1301 
1302 	.cf_icache_sync_all	= armv7_icache_sync_all,
1303 	.cf_dcache_wbinv_all	= armv7_dcache_wbinv_all,
1304 
1305 	.cf_dcache_inv_range	= armv7_dcache_inv_range,
1306 	.cf_dcache_wb_range	= armv7_dcache_wb_range,
1307 	.cf_dcache_wbinv_range	= armv7_dcache_wbinv_range,
1308 
1309 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
1310 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
1311 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
1312 
1313 	.cf_icache_sync_range	= armv7_icache_sync_range,
1314 	.cf_idcache_wbinv_range = armv7_idcache_wbinv_range,
1315 
1316 
1317 	.cf_idcache_wbinv_all	= armv7_idcache_wbinv_all,
1318 
1319 	/* Other functions */
1320 
1321 	.cf_flush_prefetchbuf	= cpufunc_nullop,
1322 	.cf_drain_writebuf	= armv7_drain_writebuf,
1323 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
1324 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
1325 
1326 	.cf_sleep		= armv7_cpu_sleep,
1327 
1328 	/* Soft functions */
1329 
1330 	.cf_dataabt_fixup	= cpufunc_null_fixup,
1331 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
1332 
1333 	.cf_context_switch	= armv7_context_switch,
1334 
1335 	.cf_setup		= armv7_setup
1336 
1337 };
1338 #endif /* CPU_ARMV7 */
1339 
1340 #ifdef CPU_PJ4B
1341 struct cpu_functions pj4bv7_cpufuncs = {
1342 	/* CPU functions */
1343 
1344 	.cf_id			= cpufunc_id,
1345 	.cf_cpwait		= pj4b_drain_writebuf,
1346 
1347 	/* MMU functions */
1348 
1349 	.cf_control		= cpufunc_control,
1350 	.cf_domains		= cpufunc_domains,
1351 	.cf_setttb		= pj4b_setttb,
1352 	.cf_faultstatus		= cpufunc_faultstatus,
1353 	.cf_faultaddress	= cpufunc_faultaddress,
1354 
1355 	/* TLB functions */
1356 
1357 	.cf_tlb_flushID		= pj4b_tlb_flushID,
1358 	.cf_tlb_flushID_SE	= pj4b_tlb_flushID_SE,
1359 	.cf_tlb_flushI		= pj4b_tlb_flushID,
1360 	.cf_tlb_flushI_SE	= pj4b_tlb_flushID_SE,
1361 	.cf_tlb_flushD		= pj4b_tlb_flushID,
1362 	.cf_tlb_flushD_SE	= pj4b_tlb_flushID_SE,
1363 
1364 	/* Cache operations */
1365 
1366 	.cf_icache_sync_all	= armv7_idcache_wbinv_all,
1367 	.cf_icache_sync_range	= pj4b_icache_sync_range,
1368 
1369 	.cf_dcache_wbinv_all	= armv7_dcache_wbinv_all,
1370 	.cf_dcache_wbinv_range	= pj4b_dcache_wbinv_range,
1371 	.cf_dcache_inv_range	= pj4b_dcache_inv_range,
1372 	.cf_dcache_wb_range	= pj4b_dcache_wb_range,
1373 
1374 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
1375 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
1376 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
1377 
1378 	.cf_idcache_wbinv_all	= armv7_idcache_wbinv_all,
1379 	.cf_idcache_wbinv_range	= pj4b_idcache_wbinv_range,
1380 
1381 	/* Other functions */
1382 
1383 	.cf_flush_prefetchbuf	= pj4b_drain_readbuf,
1384 	.cf_drain_writebuf	= pj4b_drain_writebuf,
1385 	.cf_flush_brnchtgt_C	= pj4b_flush_brnchtgt_all,
1386 	.cf_flush_brnchtgt_E	= pj4b_flush_brnchtgt_va,
1387 
1388 	.cf_sleep		= (void *)cpufunc_nullop,
1389 
1390 	/* Soft functions */
1391 
1392 	.cf_dataabt_fixup	= cpufunc_null_fixup,
1393 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
1394 
1395 	.cf_context_switch	= pj4b_context_switch,
1396 
1397 	.cf_setup		= pj4bv7_setup
1398 };
1399 #endif /* CPU_PJ4B */
1400 
1401 #ifdef CPU_SHEEVA
1402 struct cpu_functions sheeva_cpufuncs = {
1403 	/* CPU functions */
1404 
1405 	.cf_id			= cpufunc_id,
1406 	.cf_cpwait		= cpufunc_nullop,
1407 
1408 	/* MMU functions */
1409 
1410 	.cf_control		= cpufunc_control,
1411 	.cf_domains		= cpufunc_domains,
1412 	.cf_setttb		= armv5_ec_setttb,
1413 	.cf_faultstatus		= cpufunc_faultstatus,
1414 	.cf_faultaddress	= cpufunc_faultaddress,
1415 
1416 	/* TLB functions */
1417 
1418 	.cf_tlb_flushID		= armv4_tlb_flushID,
1419 	.cf_tlb_flushID_SE	= arm10_tlb_flushID_SE,
1420 	.cf_tlb_flushI		= armv4_tlb_flushI,
1421 	.cf_tlb_flushI_SE	= arm10_tlb_flushI_SE,
1422 	.cf_tlb_flushD		= armv4_tlb_flushD,
1423 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
1424 
1425 	/* Cache operations */
1426 
1427 	.cf_icache_sync_all	= armv5_ec_icache_sync_all,
1428 	.cf_icache_sync_range	= armv5_ec_icache_sync_range,
1429 
1430 	.cf_dcache_wbinv_all	= armv5_ec_dcache_wbinv_all,
1431 	.cf_dcache_wbinv_range	= sheeva_dcache_wbinv_range,
1432 	.cf_dcache_inv_range	= sheeva_dcache_inv_range,
1433 	.cf_dcache_wb_range	= sheeva_dcache_wb_range,
1434 
1435 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
1436 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
1437 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
1438 
1439 	.cf_idcache_wbinv_all	= armv5_ec_idcache_wbinv_all,
1440 	.cf_idcache_wbinv_range = sheeva_idcache_wbinv_range,
1441 
1442 	/* Other functions */
1443 
1444 	.cf_flush_prefetchbuf	= cpufunc_nullop,
1445 	.cf_drain_writebuf	= armv4_drain_writebuf,
1446 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
1447 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
1448 
1449 	.cf_sleep		= (void *)sheeva_cpu_sleep,
1450 
1451 	/* Soft functions */
1452 
1453 	.cf_dataabt_fixup	= cpufunc_null_fixup,
1454 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
1455 
1456 	.cf_context_switch	= arm10_context_switch,
1457 
1458 	.cf_setup		= sheeva_setup
1459 };
1460 #endif /* CPU_SHEEVA */
1461 
1462 
1463 /*
1464  * Global constants also used by locore.s
1465  */
1466 
1467 struct cpu_functions cpufuncs;
1468 u_int cputype;
1469 
1470 #if defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined(CPU_ARM9) || \
1471     defined(CPU_ARM9E) || defined(CPU_ARM10) || defined(CPU_FA526) || \
1472     defined(CPU_SHEEVA) || \
1473     defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
1474     defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425) || \
1475     defined(CPU_ARMV6) || defined(CPU_ARMV7)
1476 static void get_cachetype_cp15(void);
1477 
1478 /* Additional cache information local to this file.  Log2 of some of the
1479    above numbers.  */
1480 static int	arm_dcache_log2_nsets;
1481 static int	arm_dcache_log2_assoc;
1482 static int	arm_dcache_log2_linesize;
1483 
1484 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
1485 static inline u_int
1486 get_cachesize_cp15(int cssr)
1487 {
1488 	u_int csid;
1489 
1490 #if defined(CPU_ARMV7)
1491 	__asm volatile(".arch\tarmv7a");
1492 	__asm volatile("mcr p15, 2, %0, c0, c0, 0" :: "r" (cssr));
1493 	__asm volatile("isb");	/* sync to the new cssr */
1494 #else
1495 	__asm volatile("mcr p15, 1, %0, c0, c0, 2" :: "r" (cssr));
1496 #endif
1497 	__asm volatile("mrc p15, 1, %0, c0, c0, 0" : "=r" (csid));
1498 	return csid;
1499 }
1500 #endif
1501 
1502 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
1503 static void
1504 get_cacheinfo_clidr(struct arm_cache_info *info, u_int level, u_int clidr)
1505 {
1506 	u_int csid;
1507 
1508 	if (clidr & 6) {
1509 		csid = get_cachesize_cp15(level << 1); /* select dcache values */
1510 		info->dcache_sets = CPU_CSID_NUMSETS(csid) + 1;
1511 		info->dcache_ways = CPU_CSID_ASSOC(csid) + 1;
1512 		info->dcache_line_size = 1U << (CPU_CSID_LEN(csid) + 4);
1513 		info->dcache_way_size =
1514 		    info->dcache_line_size * info->dcache_sets;
1515 		info->dcache_size = info->dcache_way_size * info->dcache_ways;
1516 
1517 		if (level == 0) {
1518 			arm_dcache_log2_assoc = CPU_CSID_ASSOC(csid) + 1;
1519 			arm_dcache_log2_linesize = CPU_CSID_LEN(csid) + 4;
1520 			arm_dcache_log2_nsets =
1521 			    31 - __builtin_clz(info->dcache_sets*2-1);
1522 		}
1523 	}
1524 
1525 	info->cache_unified = (clidr == 4);
1526 
1527 	if (level > 0) {
1528 		info->dcache_type = CACHE_TYPE_PIPT;
1529 		info->icache_type = CACHE_TYPE_PIPT;
1530 	}
1531 
1532 	if (info->cache_unified) {
1533 		info->icache_ways = info->dcache_ways;
1534 		info->icache_line_size = info->dcache_line_size;
1535 		info->icache_way_size = info->dcache_way_size;
1536 		info->icache_size = info->dcache_size;
1537 	} else {
1538 		csid = get_cachesize_cp15((level << 1)|CPU_CSSR_InD); /* select icache values */
1539 		info->icache_sets = CPU_CSID_NUMSETS(csid) + 1;
1540 		info->icache_ways = CPU_CSID_ASSOC(csid) + 1;
1541 		info->icache_line_size = 1U << (CPU_CSID_LEN(csid) + 4);
1542 		info->icache_way_size = info->icache_line_size * info->icache_sets;
1543 		info->icache_size = info->icache_way_size * info->icache_ways;
1544 	}
1545 	if (level == 0
1546 	    && info->dcache_way_size <= PAGE_SIZE
1547 	    && info->icache_way_size <= PAGE_SIZE) {
1548 		arm_cache_prefer_mask = 0;
1549 	}
1550 }
1551 #endif /* (ARM_MMU_V6 + ARM_MMU_V7) > 0 */
1552 
1553 static void
1554 get_cachetype_cp15(void)
1555 {
1556 	u_int ctype, isize, dsize;
1557 	u_int multiplier;
1558 
1559 	__asm volatile("mrc p15, 0, %0, c0, c0, 1"
1560 		: "=r" (ctype));
1561 
1562 	/*
1563 	 * ...and thus spake the ARM ARM:
1564 	 *
1565 	 * If an <opcode2> value corresponding to an unimplemented or
1566 	 * reserved ID register is encountered, the System Control
1567 	 * processor returns the value of the main ID register.
1568 	 */
1569 	if (ctype == cpu_id())
1570 		goto out;
1571 
1572 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
1573 	if (CPU_CT_FORMAT(ctype) == 4) {
1574 		u_int clidr = armreg_clidr_read();
1575 
1576 		if (CPU_CT4_L1IPOLICY(ctype) == CPU_CT4_L1_PIPT) {
1577 			arm_pcache.icache_type = CACHE_TYPE_PIPT;
1578 		} else {
1579 			arm_pcache.icache_type = CACHE_TYPE_VIPT;
1580 			arm_cache_prefer_mask = PAGE_SIZE;
1581 		}
1582 #ifdef CPU_CORTEX
1583 		if (CPU_ID_CORTEX_P(cpu_id())) {
1584 			arm_pcache.dcache_type = CACHE_TYPE_PIPT;
1585 		} else
1586 #endif
1587 		{
1588 			arm_pcache.dcache_type = CACHE_TYPE_VIPT;
1589 		}
1590 		arm_pcache.cache_type = CPU_CT_CTYPE_WB14;
1591 
1592 		get_cacheinfo_clidr(&arm_pcache, 0, clidr & 7);
1593 		arm_dcache_align = arm_pcache.dcache_line_size;
1594 		clidr >>= 3;
1595 		if (clidr & 7) {
1596 			get_cacheinfo_clidr(&arm_scache, 1, clidr & 7);
1597 			if (arm_scache.dcache_line_size < arm_dcache_align)
1598 				arm_dcache_align = arm_scache.dcache_line_size;
1599 		}
1600 		/*
1601 		 * The pmap cleans an entire way for an exec page so
1602 		 * we don't care that it's VIPT anymore.
1603 		 */
1604 		if (arm_pcache.dcache_type == CACHE_TYPE_PIPT) {
1605 			arm_cache_prefer_mask = 0;
1606 		}
1607 		goto out;
1608 	}
1609 #endif /* ARM_MMU_V6 + ARM_MMU_V7 > 0 */
1610 
1611 	if ((ctype & CPU_CT_S) == 0)
1612 		arm_pcache.cache_unified = 1;
1613 
1614 	/*
1615 	 * If you want to know how this code works, go read the ARM ARM.
1616 	 */
1617 
1618 	arm_pcache.cache_type = CPU_CT_CTYPE(ctype);
1619 
1620 	if (arm_pcache.cache_unified == 0) {
1621 		isize = CPU_CT_ISIZE(ctype);
1622 		multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
1623 		arm_pcache.icache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
1624 		if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
1625 			if (isize & CPU_CT_xSIZE_M)
1626 				arm_pcache.icache_line_size = 0; /* not present */
1627 			else
1628 				arm_pcache.icache_ways = 1;
1629 		} else {
1630 			arm_pcache.icache_ways = multiplier <<
1631 			    (CPU_CT_xSIZE_ASSOC(isize) - 1);
1632 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
1633 			arm_pcache.icache_type = CACHE_TYPE_VIPT;
1634 			if (CPU_CT_xSIZE_P & isize)
1635 				arm_cache_prefer_mask |=
1636 				    __BIT(9 + CPU_CT_xSIZE_SIZE(isize)
1637 					  - CPU_CT_xSIZE_ASSOC(isize))
1638 				    - PAGE_SIZE;
1639 #endif
1640 		}
1641 		arm_pcache.icache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
1642 		arm_pcache.icache_way_size =
1643 		    __BIT(9 + CPU_CT_xSIZE_SIZE(isize) - CPU_CT_xSIZE_ASSOC(isize));
1644 	}
1645 
1646 	dsize = CPU_CT_DSIZE(ctype);
1647 	multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
1648 	arm_pcache.dcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
1649 	if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
1650 		if (dsize & CPU_CT_xSIZE_M)
1651 			arm_pcache.dcache_line_size = 0; /* not present */
1652 		else
1653 			arm_pcache.dcache_ways = 1;
1654 	} else {
1655 		arm_pcache.dcache_ways = multiplier <<
1656 		    (CPU_CT_xSIZE_ASSOC(dsize) - 1);
1657 #if (ARM_MMU_V6) > 0
1658 		arm_pcache.dcache_type = CACHE_TYPE_VIPT;
1659 		if ((CPU_CT_xSIZE_P & dsize)
1660 		    && CPU_ID_ARM11_P(curcpu()->ci_arm_cpuid)) {
1661 			arm_cache_prefer_mask |=
1662 			    __BIT(9 + CPU_CT_xSIZE_SIZE(dsize)
1663 				  - CPU_CT_xSIZE_ASSOC(dsize)) - PAGE_SIZE;
1664 		}
1665 #endif
1666 	}
1667 	arm_pcache.dcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
1668 	arm_pcache.dcache_way_size =
1669 	    __BIT(9 + CPU_CT_xSIZE_SIZE(dsize) - CPU_CT_xSIZE_ASSOC(dsize));
1670 
1671 	arm_dcache_align = arm_pcache.dcache_line_size;
1672 
1673 	arm_dcache_log2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
1674 	arm_dcache_log2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
1675 	arm_dcache_log2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
1676 	    CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
1677 
1678  out:
1679 	KASSERTMSG(arm_dcache_align <= CACHE_LINE_SIZE,
1680 	    "arm_dcache_align=%u CACHE_LINE_SIZE=%u",
1681 	    arm_dcache_align, CACHE_LINE_SIZE);
1682 	arm_dcache_align_mask = arm_dcache_align - 1;
1683 }
1684 #endif /* ARM7TDMI || ARM8 || ARM9 || XSCALE */
1685 
1686 #if defined(CPU_ARM2) || defined(CPU_ARM250) || defined(CPU_ARM3) || \
1687     defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_SA110) || \
1688     defined(CPU_SA1100) || defined(CPU_SA1110) || defined(CPU_IXP12X0)
1689 /* Cache information for CPUs without cache type registers. */
1690 struct cachetab {
1691 	uint32_t ct_cpuid;
1692 	int	ct_pcache_type;
1693 	int	ct_pcache_unified;
1694 	int	ct_pdcache_size;
1695 	int	ct_pdcache_line_size;
1696 	int	ct_pdcache_ways;
1697 	int	ct_picache_size;
1698 	int	ct_picache_line_size;
1699 	int	ct_picache_ways;
1700 };
1701 
1702 struct cachetab cachetab[] = {
1703     /* cpuid,           cache type,       u,  dsiz, ls, wy,  isiz, ls, wy */
1704     { CPU_ID_ARM2,      0,                1,     0,  0,  0,     0,  0,  0 },
1705     { CPU_ID_ARM250,    0,                1,     0,  0,  0,     0,  0,  0 },
1706     { CPU_ID_ARM3,      CPU_CT_CTYPE_WT,  1,  4096, 16, 64,     0,  0,  0 },
1707     { CPU_ID_ARM610,	CPU_CT_CTYPE_WT,  1,  4096, 16, 64,     0,  0,  0 },
1708     { CPU_ID_ARM710,    CPU_CT_CTYPE_WT,  1,  8192, 32,  4,     0,  0,  0 },
1709     { CPU_ID_ARM7500,   CPU_CT_CTYPE_WT,  1,  4096, 16,  4,     0,  0,  0 },
1710     { CPU_ID_ARM710A,   CPU_CT_CTYPE_WT,  1,  8192, 16,  4,     0,  0,  0 },
1711     { CPU_ID_ARM7500FE, CPU_CT_CTYPE_WT,  1,  4096, 16,  4,     0,  0,  0 },
1712     /* XXX is this type right for SA-1? */
1713     { CPU_ID_SA110,	CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 },
1714     { CPU_ID_SA1100,	CPU_CT_CTYPE_WB1, 0,  8192, 32, 32, 16384, 32, 32 },
1715     { CPU_ID_SA1110,	CPU_CT_CTYPE_WB1, 0,  8192, 32, 32, 16384, 32, 32 },
1716     { CPU_ID_IXP1200,	CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 }, /* XXX */
1717     { 0, 0, 0, 0, 0, 0, 0, 0}
1718 };
1719 
1720 static void get_cachetype_table(void);
1721 
1722 static void
1723 get_cachetype_table(void)
1724 {
1725 	int i;
1726 	uint32_t cpuid = cpu_id();
1727 
1728 	for (i = 0; cachetab[i].ct_cpuid != 0; i++) {
1729 		if (cachetab[i].ct_cpuid == (cpuid & CPU_ID_CPU_MASK)) {
1730 			arm_pcache.cache_type = cachetab[i].ct_pcache_type;
1731 			arm_pcache.cache_unified = cachetab[i].ct_pcache_unified;
1732 			arm_pcache.dcache_size = cachetab[i].ct_pdcache_size;
1733 			arm_pcache.dcache_line_size =
1734 			    cachetab[i].ct_pdcache_line_size;
1735 			arm_pcache.dcache_ways = cachetab[i].ct_pdcache_ways;
1736 			if (arm_pcache.dcache_ways) {
1737 				arm_pcache.dcache_way_size =
1738 				    arm_pcache.dcache_line_size
1739 				    / arm_pcache.dcache_ways;
1740 			}
1741 			arm_pcache.icache_size = cachetab[i].ct_picache_size;
1742 			arm_pcache.icache_line_size =
1743 			    cachetab[i].ct_picache_line_size;
1744 			arm_pcache.icache_ways = cachetab[i].ct_picache_ways;
1745 			if (arm_pcache.icache_ways) {
1746 				arm_pcache.icache_way_size =
1747 				    arm_pcache.icache_line_size
1748 				    / arm_pcache.icache_ways;
1749 			}
1750 		}
1751 	}
1752 
1753 	arm_dcache_align = arm_pcache.dcache_line_size;
1754 	arm_dcache_align_mask = arm_dcache_align - 1;
1755 }
1756 
1757 #endif /* ARM2 || ARM250 || ARM3 || ARM6 || ARM7 || SA110 || SA1100 || SA1111 || IXP12X0 */
1758 
1759 /*
1760  * Cannot panic here as we may not have a console yet ...
1761  */
1762 
1763 int
1764 set_cpufuncs(void)
1765 {
1766 	if (cputype == 0) {
1767 		cputype = cpufunc_id();
1768 		cputype &= CPU_ID_CPU_MASK;
1769 	}
1770 
1771 	/*
1772 	 * NOTE: cpu_do_powersave defaults to off.  If we encounter a
1773 	 * CPU type where we want to use it by default, then we set it.
1774 	 */
1775 #ifdef CPU_ARM2
1776 	if (cputype == CPU_ID_ARM2) {
1777 		cpufuncs = arm2_cpufuncs;
1778 		get_cachetype_table();
1779 		return 0;
1780 	}
1781 #endif /* CPU_ARM2 */
1782 #ifdef CPU_ARM250
1783 	if (cputype == CPU_ID_ARM250) {
1784 		cpufuncs = arm250_cpufuncs;
1785 		get_cachetype_table();
1786 		return 0;
1787 	}
1788 #endif
1789 #ifdef CPU_ARM3
1790 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
1791 	    (cputype & 0x00000f00) == 0x00000300) {
1792 		cpufuncs = arm3_cpufuncs;
1793 		get_cachetype_table();
1794 		return 0;
1795 	}
1796 #endif	/* CPU_ARM3 */
1797 #ifdef CPU_ARM6
1798 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
1799 	    (cputype & 0x00000f00) == 0x00000600) {
1800 		cpufuncs = arm6_cpufuncs;
1801 		get_cachetype_table();
1802 		pmap_pte_init_generic();
1803 		return 0;
1804 	}
1805 #endif	/* CPU_ARM6 */
1806 #ifdef CPU_ARM7
1807 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
1808 	    CPU_ID_IS7(cputype) &&
1809 	    (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V3) {
1810 		cpufuncs = arm7_cpufuncs;
1811 		get_cachetype_table();
1812 		pmap_pte_init_generic();
1813 		return 0;
1814 	}
1815 #endif	/* CPU_ARM7 */
1816 #ifdef CPU_ARM7TDMI
1817 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
1818 	    CPU_ID_IS7(cputype) &&
1819 	    (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V4T) {
1820 		cpufuncs = arm7tdmi_cpufuncs;
1821 		get_cachetype_cp15();
1822 		pmap_pte_init_generic();
1823 		return 0;
1824 	}
1825 #endif
1826 #ifdef CPU_ARM8
1827 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
1828 	    (cputype & 0x0000f000) == 0x00008000) {
1829 		cpufuncs = arm8_cpufuncs;
1830 		get_cachetype_cp15();
1831 		pmap_pte_init_arm8();
1832 		return 0;
1833 	}
1834 #endif	/* CPU_ARM8 */
1835 #ifdef CPU_ARM9
1836 	if (((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD ||
1837 	     (cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_TI) &&
1838 	    (cputype & 0x0000f000) == 0x00009000) {
1839 		cpufuncs = arm9_cpufuncs;
1840 		get_cachetype_cp15();
1841 		arm9_dcache_sets_inc = 1U << arm_dcache_log2_linesize;
1842 		arm9_dcache_sets_max =
1843 		    (1U << (arm_dcache_log2_linesize + arm_dcache_log2_nsets)) -
1844 		    arm9_dcache_sets_inc;
1845 		arm9_dcache_index_inc = 1U << (32 - arm_dcache_log2_assoc);
1846 		arm9_dcache_index_max = 0U - arm9_dcache_index_inc;
1847 #ifdef	ARM9_CACHE_WRITE_THROUGH
1848 		pmap_pte_init_arm9();
1849 #else
1850 		pmap_pte_init_generic();
1851 #endif
1852 		return 0;
1853 	}
1854 #endif /* CPU_ARM9 */
1855 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
1856 	if (cputype == CPU_ID_ARM926EJS ||
1857 	    cputype == CPU_ID_ARM1026EJS) {
1858 		cpufuncs = armv5_ec_cpufuncs;
1859 		get_cachetype_cp15();
1860 		pmap_pte_init_generic();
1861 		return 0;
1862 	}
1863 #endif /* CPU_ARM9E || CPU_ARM10 */
1864 #if defined(CPU_SHEEVA)
1865 	if (cputype == CPU_ID_MV88SV131 ||
1866 	    cputype == CPU_ID_MV88FR571_VD) {
1867 		cpufuncs = sheeva_cpufuncs;
1868 		get_cachetype_cp15();
1869 		pmap_pte_init_generic();
1870 		cpu_do_powersave = 1;			/* Enable powersave */
1871 		return 0;
1872 	}
1873 #endif /* CPU_SHEEVA */
1874 #ifdef CPU_ARM10
1875 	if (/* cputype == CPU_ID_ARM1020T || */
1876 	    cputype == CPU_ID_ARM1020E) {
1877 		/*
1878 		 * Select write-through cacheing (this isn't really an
1879 		 * option on ARM1020T).
1880 		 */
1881 		cpufuncs = arm10_cpufuncs;
1882 		get_cachetype_cp15();
1883 		armv5_dcache_sets_inc = 1U << arm_dcache_log2_linesize;
1884 		armv5_dcache_sets_max =
1885 		    (1U << (arm_dcache_log2_linesize + arm_dcache_log2_nsets)) -
1886 		    armv5_dcache_sets_inc;
1887 		armv5_dcache_index_inc = 1U << (32 - arm_dcache_log2_assoc);
1888 		armv5_dcache_index_max = 0U - armv5_dcache_index_inc;
1889 		pmap_pte_init_generic();
1890 		return 0;
1891 	}
1892 #endif /* CPU_ARM10 */
1893 
1894 
1895 #if defined(CPU_ARM11MPCORE)
1896 	if (cputype == CPU_ID_ARM11MPCORE) {
1897 		cpufuncs = arm11mpcore_cpufuncs;
1898 #if defined(CPU_ARMV7) || defined(CPU_PRE_ARMV6)
1899 		cpu_armv6_p = true;
1900 #endif
1901 		get_cachetype_cp15();
1902 		armv5_dcache_sets_inc = 1U << arm_dcache_log2_linesize;
1903 		armv5_dcache_sets_max = (1U << (arm_dcache_log2_linesize +
1904 			arm_dcache_log2_nsets)) - armv5_dcache_sets_inc;
1905 		armv5_dcache_index_inc = 1U << (32 - arm_dcache_log2_assoc);
1906 		armv5_dcache_index_max = 0U - armv5_dcache_index_inc;
1907 		cpu_do_powersave = 1;			/* Enable powersave */
1908 		pmap_pte_init_arm11mpcore();
1909 		if (arm_cache_prefer_mask)
1910 			uvmexp.ncolors = (arm_cache_prefer_mask >> PGSHIFT) + 1;
1911 
1912 		return 0;
1913 
1914 	}
1915 #endif	/* CPU_ARM11MPCORE */
1916 
1917 #if defined(CPU_ARM11)
1918 	if (cputype == CPU_ID_ARM1136JS ||
1919 	    cputype == CPU_ID_ARM1136JSR1 ||
1920 	    cputype == CPU_ID_ARM1176JZS) {
1921 		cpufuncs = arm11_cpufuncs;
1922 #if defined(CPU_ARM1136)
1923 		if (cputype == CPU_ID_ARM1136JS &&
1924 		    cputype == CPU_ID_ARM1136JSR1) {
1925 			cpufuncs = arm1136_cpufuncs;
1926 			if (cputype == CPU_ID_ARM1136JS)
1927 				cpufuncs.cf_sleep = arm1136_sleep_rev0;
1928 		}
1929 #endif
1930 #if defined(CPU_ARM1176)
1931 		if (cputype == CPU_ID_ARM1176JZS) {
1932 			cpufuncs = arm1176_cpufuncs;
1933 		}
1934 #endif
1935 #if defined(CPU_ARMV7) || defined(CPU_PRE_ARMV6)
1936 		cpu_armv6_p = true;
1937 #endif
1938 		cpu_do_powersave = 1;			/* Enable powersave */
1939 		get_cachetype_cp15();
1940 #ifdef ARM11_CACHE_WRITE_THROUGH
1941 		pmap_pte_init_arm11();
1942 #else
1943 		pmap_pte_init_generic();
1944 #endif
1945 		if (arm_cache_prefer_mask)
1946 			uvmexp.ncolors = (arm_cache_prefer_mask >> PGSHIFT) + 1;
1947 
1948 		/*
1949 		 * Start and reset the PMC Cycle Counter.
1950 		 */
1951 		armreg_pmcrv6_write(ARM11_PMCCTL_E | ARM11_PMCCTL_P | ARM11_PMCCTL_C);
1952 		return 0;
1953 	}
1954 #endif /* CPU_ARM11 */
1955 #ifdef CPU_SA110
1956 	if (cputype == CPU_ID_SA110) {
1957 		cpufuncs = sa110_cpufuncs;
1958 		get_cachetype_table();
1959 		pmap_pte_init_sa1();
1960 		return 0;
1961 	}
1962 #endif	/* CPU_SA110 */
1963 #ifdef CPU_SA1100
1964 	if (cputype == CPU_ID_SA1100) {
1965 		cpufuncs = sa11x0_cpufuncs;
1966 		get_cachetype_table();
1967 		pmap_pte_init_sa1();
1968 
1969 		/* Use powersave on this CPU. */
1970 		cpu_do_powersave = 1;
1971 
1972 		return 0;
1973 	}
1974 #endif	/* CPU_SA1100 */
1975 #ifdef CPU_SA1110
1976 	if (cputype == CPU_ID_SA1110) {
1977 		cpufuncs = sa11x0_cpufuncs;
1978 		get_cachetype_table();
1979 		pmap_pte_init_sa1();
1980 
1981 		/* Use powersave on this CPU. */
1982 		cpu_do_powersave = 1;
1983 
1984 		return 0;
1985 	}
1986 #endif	/* CPU_SA1110 */
1987 #ifdef CPU_FA526
1988 	if (cputype == CPU_ID_FA526) {
1989 		cpufuncs = fa526_cpufuncs;
1990 		get_cachetype_cp15();
1991 		pmap_pte_init_generic();
1992 
1993 		/* Use powersave on this CPU. */
1994 		cpu_do_powersave = 1;
1995 
1996 		return 0;
1997 	}
1998 #endif	/* CPU_FA526 */
1999 #ifdef CPU_IXP12X0
2000 	if (cputype == CPU_ID_IXP1200) {
2001 		cpufuncs = ixp12x0_cpufuncs;
2002 		get_cachetype_table();
2003 		pmap_pte_init_sa1();
2004 		return 0;
2005 	}
2006 #endif  /* CPU_IXP12X0 */
2007 #ifdef CPU_XSCALE_80200
2008 	if (cputype == CPU_ID_80200) {
2009 		int rev = cpufunc_id() & CPU_ID_REVISION_MASK;
2010 
2011 		i80200_icu_init();
2012 
2013 		/*
2014 		 * Reset the Performance Monitoring Unit to a
2015 		 * pristine state:
2016 		 *	- CCNT, PMN0, PMN1 reset to 0
2017 		 *	- overflow indications cleared
2018 		 *	- all counters disabled
2019 		 */
2020 		__asm volatile("mcr p14, 0, %0, c0, c0, 0"
2021 			:
2022 			: "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF|
2023 			       PMNC_CC_IF));
2024 
2025 #if defined(XSCALE_CCLKCFG)
2026 		/*
2027 		 * Crank CCLKCFG to maximum legal value.
2028 		 */
2029 		__asm volatile ("mcr p14, 0, %0, c6, c0, 0"
2030 			:
2031 			: "r" (XSCALE_CCLKCFG));
2032 #endif
2033 
2034 		/*
2035 		 * XXX Disable ECC in the Bus Controller Unit; we
2036 		 * don't really support it, yet.  Clear any pending
2037 		 * error indications.
2038 		 */
2039 		__asm volatile("mcr p13, 0, %0, c0, c1, 0"
2040 			:
2041 			: "r" (BCUCTL_E0|BCUCTL_E1|BCUCTL_EV));
2042 
2043 		cpufuncs = xscale_cpufuncs;
2044 #if defined(PERFCTRS)
2045 		xscale_pmu_init();
2046 #endif
2047 
2048 		/*
2049 		 * i80200 errata: Step-A0 and A1 have a bug where
2050 		 * D$ dirty bits are not cleared on "invalidate by
2051 		 * address".
2052 		 *
2053 		 * Workaround: Clean cache line before invalidating.
2054 		 */
2055 		if (rev == 0 || rev == 1)
2056 			cpufuncs.cf_dcache_inv_range = xscale_cache_purgeD_rng;
2057 
2058 		get_cachetype_cp15();
2059 		pmap_pte_init_xscale();
2060 		return 0;
2061 	}
2062 #endif /* CPU_XSCALE_80200 */
2063 #ifdef CPU_XSCALE_80321
2064 	if (cputype == CPU_ID_80321_400 || cputype == CPU_ID_80321_600 ||
2065 	    cputype == CPU_ID_80321_400_B0 || cputype == CPU_ID_80321_600_B0 ||
2066 	    cputype == CPU_ID_80219_400 || cputype == CPU_ID_80219_600) {
2067 		i80321_icu_init();
2068 
2069 		/*
2070 		 * Reset the Performance Monitoring Unit to a
2071 		 * pristine state:
2072 		 *	- CCNT, PMN0, PMN1 reset to 0
2073 		 *	- overflow indications cleared
2074 		 *	- all counters disabled
2075 		 */
2076 		__asm volatile("mcr p14, 0, %0, c0, c0, 0"
2077 			:
2078 			: "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF|
2079 			       PMNC_CC_IF));
2080 
2081 		cpufuncs = xscale_cpufuncs;
2082 #if defined(PERFCTRS)
2083 		xscale_pmu_init();
2084 #endif
2085 
2086 		get_cachetype_cp15();
2087 		pmap_pte_init_xscale();
2088 		return 0;
2089 	}
2090 #endif /* CPU_XSCALE_80321 */
2091 #ifdef __CPU_XSCALE_PXA2XX
2092 	/* ignore core revision to test PXA2xx CPUs */
2093 	if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA27X ||
2094 	    (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 ||
2095 	    (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) {
2096 
2097 		cpufuncs = xscale_cpufuncs;
2098 #if defined(PERFCTRS)
2099 		xscale_pmu_init();
2100 #endif
2101 
2102 		get_cachetype_cp15();
2103 		pmap_pte_init_xscale();
2104 
2105 		/* Use powersave on this CPU. */
2106 		cpu_do_powersave = 1;
2107 
2108 		return 0;
2109 	}
2110 #endif /* __CPU_XSCALE_PXA2XX */
2111 #ifdef CPU_XSCALE_IXP425
2112 	if (cputype == CPU_ID_IXP425_533 || cputype == CPU_ID_IXP425_400 ||
2113 	    cputype == CPU_ID_IXP425_266) {
2114 		ixp425_icu_init();
2115 
2116 		cpufuncs = xscale_cpufuncs;
2117 #if defined(PERFCTRS)
2118 		xscale_pmu_init();
2119 #endif
2120 
2121 		get_cachetype_cp15();
2122 		pmap_pte_init_xscale();
2123 
2124 		return 0;
2125 	}
2126 #endif /* CPU_XSCALE_IXP425 */
2127 #if defined(CPU_CORTEX)
2128 	if (CPU_ID_CORTEX_P(cputype)) {
2129 		cpufuncs = armv7_cpufuncs;
2130 		cpu_do_powersave = 1;			/* Enable powersave */
2131 #if defined(CPU_ARMV6) || defined(CPU_PRE_ARMV6)
2132 		cpu_armv7_p = true;
2133 #endif
2134 		get_cachetype_cp15();
2135 		pmap_pte_init_armv7();
2136 		if (arm_cache_prefer_mask)
2137 			uvmexp.ncolors = (arm_cache_prefer_mask >> PGSHIFT) + 1;
2138 		/*
2139 		 * Start and reset the PMC Cycle Counter.
2140 		 */
2141 		armreg_pmcr_write(ARM11_PMCCTL_E | ARM11_PMCCTL_P | ARM11_PMCCTL_C);
2142 		armreg_pmcntenset_write(CORTEX_CNTENS_C);
2143 		return 0;
2144 	}
2145 #endif /* CPU_CORTEX */
2146 
2147 #if defined(CPU_PJ4B)
2148 	if ((cputype == CPU_ID_MV88SV581X_V6 ||
2149 	    cputype == CPU_ID_MV88SV581X_V7 ||
2150 	    cputype == CPU_ID_MV88SV584X_V7 ||
2151 	    cputype == CPU_ID_ARM_88SV581X_V6 ||
2152 	    cputype == CPU_ID_ARM_88SV581X_V7) &&
2153 	    (armreg_pfr0_read() & ARM_PFR0_THUMBEE_MASK)) {
2154 			cpufuncs = pj4bv7_cpufuncs;
2155 #if defined(CPU_ARMV6) || defined(CPU_PRE_ARMV6)
2156 			cpu_armv7_p = true;
2157 #endif
2158 			get_cachetype_cp15();
2159 			pmap_pte_init_armv7();
2160 			return 0;
2161 	}
2162 #endif /* CPU_PJ4B */
2163 
2164 	/*
2165 	 * Bzzzz. And the answer was ...
2166 	 */
2167 	panic("No support for this CPU type (%08x) in kernel", cputype);
2168 	return(ARCHITECTURE_NOT_PRESENT);
2169 }
2170 
2171 #ifdef CPU_ARM2
2172 u_int arm2_id(void)
2173 {
2174 
2175 	return CPU_ID_ARM2;
2176 }
2177 #endif /* CPU_ARM2 */
2178 
2179 #ifdef CPU_ARM250
2180 u_int arm250_id(void)
2181 {
2182 
2183 	return CPU_ID_ARM250;
2184 }
2185 #endif /* CPU_ARM250 */
2186 
2187 /*
2188  * Fixup routines for data and prefetch aborts.
2189  *
2190  * Several compile time symbols are used
2191  *
2192  * DEBUG_FAULT_CORRECTION - Print debugging information during the
2193  * correction of registers after a fault.
2194  * ARM6_LATE_ABORT - ARM6 supports both early and late aborts
2195  * when defined should use late aborts
2196  */
2197 
2198 
2199 /*
2200  * Null abort fixup routine.
2201  * For use when no fixup is required.
2202  */
2203 int
2204 cpufunc_null_fixup(void *arg)
2205 {
2206 	return(ABORT_FIXUP_OK);
2207 }
2208 
2209 
2210 #if defined(CPU_ARM2) || defined(CPU_ARM250) || defined(CPU_ARM3) || \
2211     defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI)
2212 
2213 #ifdef DEBUG_FAULT_CORRECTION
2214 #define DFC_PRINTF(x)		printf x
2215 #define DFC_DISASSEMBLE(x)	disassemble(x)
2216 #else
2217 #define DFC_PRINTF(x)		/* nothing */
2218 #define DFC_DISASSEMBLE(x)	/* nothing */
2219 #endif
2220 
2221 /*
2222  * "Early" data abort fixup.
2223  *
2224  * For ARM2, ARM2as, ARM3 and ARM6 (in early-abort mode).  Also used
2225  * indirectly by ARM6 (in late-abort mode) and ARM7[TDMI].
2226  *
2227  * In early aborts, we may have to fix up LDM, STM, LDC and STC.
2228  */
2229 int
2230 early_abort_fixup(void *arg)
2231 {
2232 	trapframe_t *frame = arg;
2233 	u_int fault_pc;
2234 	u_int fault_instruction;
2235 	int saved_lr = 0;
2236 
2237 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
2238 
2239 		/* Ok an abort in SVC mode */
2240 
2241 		/*
2242 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
2243 		 * as the fault happened in svc mode but we need it in the
2244 		 * usr slot so we can treat the registers as an array of ints
2245 		 * during fixing.
2246 		 * NOTE: This PC is in the position but writeback is not
2247 		 * allowed on r15.
2248 		 * Doing it like this is more efficient than trapping this
2249 		 * case in all possible locations in the following fixup code.
2250 		 */
2251 
2252 		saved_lr = frame->tf_usr_lr;
2253 		frame->tf_usr_lr = frame->tf_svc_lr;
2254 
2255 		/*
2256 		 * Note the trapframe does not have the SVC r13 so a fault
2257 		 * from an instruction with writeback to r13 in SVC mode is
2258 		 * not allowed. This should not happen as the kstack is
2259 		 * always valid.
2260 		 */
2261 	}
2262 
2263 	/* Get fault address and status from the CPU */
2264 
2265 	fault_pc = frame->tf_pc;
2266 	fault_instruction = *((volatile unsigned int *)fault_pc);
2267 
2268 	/* Decode the fault instruction and fix the registers as needed */
2269 
2270 	if ((fault_instruction & 0x0e000000) == 0x08000000) {
2271 		int base;
2272 		int loop;
2273 		int count;
2274 		int *registers = &frame->tf_r0;
2275 
2276 		DFC_PRINTF(("LDM/STM\n"));
2277 		DFC_DISASSEMBLE(fault_pc);
2278 		if (fault_instruction & (1 << 21)) {
2279 			DFC_PRINTF(("This instruction must be corrected\n"));
2280 			base = (fault_instruction >> 16) & 0x0f;
2281 			if (base == 15)
2282 				return ABORT_FIXUP_FAILED;
2283 			/* Count registers transferred */
2284 			count = 0;
2285 			for (loop = 0; loop < 16; ++loop) {
2286 				if (fault_instruction & (1<<loop))
2287 					++count;
2288 			}
2289 			DFC_PRINTF(("%d registers used\n", count));
2290 			DFC_PRINTF(("Corrected r%d by %d bytes ",
2291 				       base, count * 4));
2292 			if (fault_instruction & (1 << 23)) {
2293 				DFC_PRINTF(("down\n"));
2294 				registers[base] -= count * 4;
2295 			} else {
2296 				DFC_PRINTF(("up\n"));
2297 				registers[base] += count * 4;
2298 			}
2299 		}
2300 	} else if ((fault_instruction & 0x0e000000) == 0x0c000000) {
2301 		int base;
2302 		int offset;
2303 		int *registers = &frame->tf_r0;
2304 
2305 		/* REGISTER CORRECTION IS REQUIRED FOR THESE INSTRUCTIONS */
2306 
2307 		DFC_DISASSEMBLE(fault_pc);
2308 
2309 		/* Only need to fix registers if write back is turned on */
2310 
2311 		if ((fault_instruction & (1 << 21)) != 0) {
2312 			base = (fault_instruction >> 16) & 0x0f;
2313 			if (base == 13 &&
2314 			    (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
2315 				return ABORT_FIXUP_FAILED;
2316 			if (base == 15)
2317 				return ABORT_FIXUP_FAILED;
2318 
2319 			offset = (fault_instruction & 0xff) << 2;
2320 			DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
2321 			if ((fault_instruction & (1 << 23)) != 0)
2322 				offset = -offset;
2323 			registers[base] += offset;
2324 			DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
2325 		}
2326 	} else if ((fault_instruction & 0x0e000000) == 0x0c000000)
2327 		return ABORT_FIXUP_FAILED;
2328 
2329 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
2330 
2331 		/* Ok an abort in SVC mode */
2332 
2333 		/*
2334 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
2335 		 * as the fault happened in svc mode but we need it in the
2336 		 * usr slot so we can treat the registers as an array of ints
2337 		 * during fixing.
2338 		 * NOTE: This PC is in the position but writeback is not
2339 		 * allowed on r15.
2340 		 * Doing it like this is more efficient than trapping this
2341 		 * case in all possible locations in the prior fixup code.
2342 		 */
2343 
2344 		frame->tf_svc_lr = frame->tf_usr_lr;
2345 		frame->tf_usr_lr = saved_lr;
2346 
2347 		/*
2348 		 * Note the trapframe does not have the SVC r13 so a fault
2349 		 * from an instruction with writeback to r13 in SVC mode is
2350 		 * not allowed. This should not happen as the kstack is
2351 		 * always valid.
2352 		 */
2353 	}
2354 
2355 	return(ABORT_FIXUP_OK);
2356 }
2357 #endif	/* CPU_ARM2/250/3/6/7 */
2358 
2359 
2360 #if (defined(CPU_ARM6) && defined(ARM6_LATE_ABORT)) || defined(CPU_ARM7) || \
2361 	defined(CPU_ARM7TDMI)
2362 /*
2363  * "Late" (base updated) data abort fixup
2364  *
2365  * For ARM6 (in late-abort mode) and ARM7.
2366  *
2367  * In this model, all data-transfer instructions need fixing up.  We defer
2368  * LDM, STM, LDC and STC fixup to the early-abort handler.
2369  */
2370 int
2371 late_abort_fixup(void *arg)
2372 {
2373 	trapframe_t *frame = arg;
2374 	u_int fault_pc;
2375 	u_int fault_instruction;
2376 	int saved_lr = 0;
2377 
2378 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
2379 
2380 		/* Ok an abort in SVC mode */
2381 
2382 		/*
2383 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
2384 		 * as the fault happened in svc mode but we need it in the
2385 		 * usr slot so we can treat the registers as an array of ints
2386 		 * during fixing.
2387 		 * NOTE: This PC is in the position but writeback is not
2388 		 * allowed on r15.
2389 		 * Doing it like this is more efficient than trapping this
2390 		 * case in all possible locations in the following fixup code.
2391 		 */
2392 
2393 		saved_lr = frame->tf_usr_lr;
2394 		frame->tf_usr_lr = frame->tf_svc_lr;
2395 
2396 		/*
2397 		 * Note the trapframe does not have the SVC r13 so a fault
2398 		 * from an instruction with writeback to r13 in SVC mode is
2399 		 * not allowed. This should not happen as the kstack is
2400 		 * always valid.
2401 		 */
2402 	}
2403 
2404 	/* Get fault address and status from the CPU */
2405 
2406 	fault_pc = frame->tf_pc;
2407 	fault_instruction = *((volatile unsigned int *)fault_pc);
2408 
2409 	/* Decode the fault instruction and fix the registers as needed */
2410 
2411 	/* Was is a swap instruction ? */
2412 
2413 	if ((fault_instruction & 0x0fb00ff0) == 0x01000090) {
2414 		DFC_DISASSEMBLE(fault_pc);
2415 	} else if ((fault_instruction & 0x0c000000) == 0x04000000) {
2416 
2417 		/* Was is a ldr/str instruction */
2418 		/* This is for late abort only */
2419 
2420 		int base;
2421 		int offset;
2422 		int *registers = &frame->tf_r0;
2423 
2424 		DFC_DISASSEMBLE(fault_pc);
2425 
2426 		/* This is for late abort only */
2427 
2428 		if ((fault_instruction & (1 << 24)) == 0
2429 		    || (fault_instruction & (1 << 21)) != 0) {
2430 			/* postindexed ldr/str with no writeback */
2431 
2432 			base = (fault_instruction >> 16) & 0x0f;
2433 			if (base == 13 &&
2434 			    (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
2435 				return ABORT_FIXUP_FAILED;
2436 			if (base == 15)
2437 				return ABORT_FIXUP_FAILED;
2438 			DFC_PRINTF(("late abt fix: r%d=%08x : ",
2439 				       base, registers[base]));
2440 			if ((fault_instruction & (1 << 25)) == 0) {
2441 				/* Immediate offset - easy */
2442 
2443 				offset = fault_instruction & 0xfff;
2444 				if ((fault_instruction & (1 << 23)))
2445 					offset = -offset;
2446 				registers[base] += offset;
2447 				DFC_PRINTF(("imm=%08x ", offset));
2448 			} else {
2449 				/* offset is a shifted register */
2450 				int shift;
2451 
2452 				offset = fault_instruction & 0x0f;
2453 				if (offset == base)
2454 					return ABORT_FIXUP_FAILED;
2455 
2456 				/*
2457 				 * Register offset - hard we have to
2458 				 * cope with shifts !
2459 				 */
2460 				offset = registers[offset];
2461 
2462 				if ((fault_instruction & (1 << 4)) == 0)
2463 					/* shift with amount */
2464 					shift = (fault_instruction >> 7) & 0x1f;
2465 				else {
2466 					/* shift with register */
2467 					if ((fault_instruction & (1 << 7)) != 0)
2468 						/* undefined for now so bail out */
2469 						return ABORT_FIXUP_FAILED;
2470 					shift = ((fault_instruction >> 8) & 0xf);
2471 					if (base == shift)
2472 						return ABORT_FIXUP_FAILED;
2473 					DFC_PRINTF(("shift reg=%d ", shift));
2474 					shift = registers[shift];
2475 				}
2476 				DFC_PRINTF(("shift=%08x ", shift));
2477 				switch (((fault_instruction >> 5) & 0x3)) {
2478 				case 0 : /* Logical left */
2479 					offset = (int)(((u_int)offset) << shift);
2480 					break;
2481 				case 1 : /* Logical Right */
2482 					if (shift == 0) shift = 32;
2483 					offset = (int)(((u_int)offset) >> shift);
2484 					break;
2485 				case 2 : /* Arithmetic Right */
2486 					if (shift == 0) shift = 32;
2487 					offset = (int)(((int)offset) >> shift);
2488 					break;
2489 				case 3 : /* Rotate right (rol or rxx) */
2490 					return ABORT_FIXUP_FAILED;
2491 					break;
2492 				}
2493 
2494 				DFC_PRINTF(("abt: fixed LDR/STR with "
2495 					       "register offset\n"));
2496 				if ((fault_instruction & (1 << 23)))
2497 					offset = -offset;
2498 				DFC_PRINTF(("offset=%08x ", offset));
2499 				registers[base] += offset;
2500 			}
2501 			DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
2502 		}
2503 	}
2504 
2505 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
2506 
2507 		/* Ok an abort in SVC mode */
2508 
2509 		/*
2510 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
2511 		 * as the fault happened in svc mode but we need it in the
2512 		 * usr slot so we can treat the registers as an array of ints
2513 		 * during fixing.
2514 		 * NOTE: This PC is in the position but writeback is not
2515 		 * allowed on r15.
2516 		 * Doing it like this is more efficient than trapping this
2517 		 * case in all possible locations in the prior fixup code.
2518 		 */
2519 
2520 		frame->tf_svc_lr = frame->tf_usr_lr;
2521 		frame->tf_usr_lr = saved_lr;
2522 
2523 		/*
2524 		 * Note the trapframe does not have the SVC r13 so a fault
2525 		 * from an instruction with writeback to r13 in SVC mode is
2526 		 * not allowed. This should not happen as the kstack is
2527 		 * always valid.
2528 		 */
2529 	}
2530 
2531 	/*
2532 	 * Now let the early-abort fixup routine have a go, in case it
2533 	 * was an LDM, STM, LDC or STC that faulted.
2534 	 */
2535 
2536 	return early_abort_fixup(arg);
2537 }
2538 #endif	/* CPU_ARM6(LATE)/7/7TDMI */
2539 
2540 /*
2541  * CPU Setup code
2542  */
2543 
2544 #if defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) || \
2545 	defined(CPU_ARM8) || defined (CPU_ARM9) || defined (CPU_ARM9E) || \
2546 	defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) || \
2547 	defined(CPU_FA526) || \
2548 	defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
2549 	defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425) || \
2550 	defined(CPU_ARM10) || defined(CPU_SHEEVA) || \
2551 	defined(CPU_ARMV6) || defined(CPU_ARMV7)
2552 
2553 #define IGN	0
2554 #define OR	1
2555 #define BIC	2
2556 
2557 struct cpu_option {
2558 	const char *co_name;
2559 	int	co_falseop;
2560 	int	co_trueop;
2561 	int	co_value;
2562 };
2563 
2564 static u_int parse_cpu_options(char *, struct cpu_option *, u_int);
2565 
2566 static u_int
2567 parse_cpu_options(char *args, struct cpu_option *optlist, u_int cpuctrl)
2568 {
2569 	int integer;
2570 
2571 	if (args == NULL)
2572 		return(cpuctrl);
2573 
2574 	while (optlist->co_name) {
2575 		if (get_bootconf_option(args, optlist->co_name,
2576 		    BOOTOPT_TYPE_BOOLEAN, &integer)) {
2577 			if (integer) {
2578 				if (optlist->co_trueop == OR)
2579 					cpuctrl |= optlist->co_value;
2580 				else if (optlist->co_trueop == BIC)
2581 					cpuctrl &= ~optlist->co_value;
2582 			} else {
2583 				if (optlist->co_falseop == OR)
2584 					cpuctrl |= optlist->co_value;
2585 				else if (optlist->co_falseop == BIC)
2586 					cpuctrl &= ~optlist->co_value;
2587 			}
2588 		}
2589 		++optlist;
2590 	}
2591 	return(cpuctrl);
2592 }
2593 #endif /* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 || CPU_SA110 */
2594 
2595 #if defined (CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) \
2596 	|| defined(CPU_ARM8)
2597 struct cpu_option arm678_options[] = {
2598 #ifdef COMPAT_12
2599 	{ "nocache",		IGN, BIC, CPU_CONTROL_IDC_ENABLE },
2600 	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
2601 #endif	/* COMPAT_12 */
2602 	{ "cpu.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
2603 	{ "cpu.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
2604 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2605 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2606 	{ NULL,			IGN, IGN, 0 }
2607 };
2608 
2609 #endif	/* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 */
2610 
2611 #ifdef CPU_ARM6
2612 struct cpu_option arm6_options[] = {
2613 	{ "arm6.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
2614 	{ "arm6.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
2615 	{ "arm6.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2616 	{ "arm6.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2617 	{ NULL,			IGN, IGN, 0 }
2618 };
2619 
2620 void
2621 arm6_setup(char *args)
2622 {
2623 
2624 	/* Set up default control registers bits */
2625 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2626 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2627 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
2628 #if 0
2629 	int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2630 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2631 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
2632 		 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BEND_ENABLE
2633 		 | CPU_CONTROL_AFLT_ENABLE;
2634 #endif
2635 
2636 #ifdef ARM6_LATE_ABORT
2637 	cpuctrl |= CPU_CONTROL_LABT_ENABLE;
2638 #endif	/* ARM6_LATE_ABORT */
2639 
2640 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2641 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2642 #endif
2643 
2644 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
2645 	cpuctrl = parse_cpu_options(args, arm6_options, cpuctrl);
2646 
2647 #ifdef __ARMEB__
2648 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2649 #endif
2650 
2651 	/* Clear out the cache */
2652 	cpu_idcache_wbinv_all();
2653 
2654 	/* Set the control register */
2655 	curcpu()->ci_ctrl = cpuctrl;
2656 	cpu_control(0xffffffff, cpuctrl);
2657 }
2658 #endif	/* CPU_ARM6 */
2659 
2660 #ifdef CPU_ARM7
2661 struct cpu_option arm7_options[] = {
2662 	{ "arm7.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
2663 	{ "arm7.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
2664 	{ "arm7.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2665 	{ "arm7.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2666 #ifdef COMPAT_12
2667 	{ "fpaclk2",		BIC, OR,  CPU_CONTROL_CPCLK },
2668 #endif	/* COMPAT_12 */
2669 	{ "arm700.fpaclk",	BIC, OR,  CPU_CONTROL_CPCLK },
2670 	{ NULL,			IGN, IGN, 0 }
2671 };
2672 
2673 void
2674 arm7_setup(char *args)
2675 {
2676 
2677 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2678 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2679 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
2680 #if 0
2681 	int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2682 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2683 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
2684 		 | CPU_CONTROL_CPCLK | CPU_CONTROL_LABT_ENABLE
2685 		 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BEND_ENABLE
2686 		 | CPU_CONTROL_AFLT_ENABLE;
2687 #endif
2688 
2689 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2690 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2691 #endif
2692 
2693 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
2694 	cpuctrl = parse_cpu_options(args, arm7_options, cpuctrl);
2695 
2696 #ifdef __ARMEB__
2697 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2698 #endif
2699 
2700 	/* Clear out the cache */
2701 	cpu_idcache_wbinv_all();
2702 
2703 	/* Set the control register */
2704 	curcpu()->ci_ctrl = cpuctrl;
2705 	cpu_control(0xffffffff, cpuctrl);
2706 }
2707 #endif	/* CPU_ARM7 */
2708 
2709 #ifdef CPU_ARM7TDMI
2710 struct cpu_option arm7tdmi_options[] = {
2711 	{ "arm7.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
2712 	{ "arm7.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
2713 	{ "arm7.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2714 	{ "arm7.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2715 #ifdef COMPAT_12
2716 	{ "fpaclk2",		BIC, OR,  CPU_CONTROL_CPCLK },
2717 #endif	/* COMPAT_12 */
2718 	{ "arm700.fpaclk",	BIC, OR,  CPU_CONTROL_CPCLK },
2719 	{ NULL,			IGN, IGN, 0 }
2720 };
2721 
2722 void
2723 arm7tdmi_setup(char *args)
2724 {
2725 	int cpuctrl;
2726 
2727 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2728 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2729 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
2730 
2731 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
2732 	cpuctrl = parse_cpu_options(args, arm7tdmi_options, cpuctrl);
2733 
2734 #ifdef __ARMEB__
2735 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2736 #endif
2737 
2738 	/* Clear out the cache */
2739 	cpu_idcache_wbinv_all();
2740 
2741 	/* Set the control register */
2742 	curcpu()->ci_ctrl = cpuctrl;
2743 	cpu_control(0xffffffff, cpuctrl);
2744 }
2745 #endif	/* CPU_ARM7TDMI */
2746 
2747 #ifdef CPU_ARM8
2748 struct cpu_option arm8_options[] = {
2749 	{ "arm8.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
2750 	{ "arm8.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
2751 	{ "arm8.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2752 	{ "arm8.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2753 #ifdef COMPAT_12
2754 	{ "branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2755 #endif	/* COMPAT_12 */
2756 	{ "cpu.branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2757 	{ "arm8.branchpredict",	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2758 	{ NULL,			IGN, IGN, 0 }
2759 };
2760 
2761 void
2762 arm8_setup(char *args)
2763 {
2764 	int integer;
2765 	int clocktest;
2766 	int setclock = 0;
2767 
2768 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2769 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2770 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
2771 #if 0
2772 	int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2773 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2774 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
2775 		 | CPU_CONTROL_BPRD_ENABLE | CPU_CONTROL_ROM_ENABLE
2776 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE;
2777 #endif
2778 
2779 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2780 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2781 #endif
2782 
2783 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
2784 	cpuctrl = parse_cpu_options(args, arm8_options, cpuctrl);
2785 
2786 #ifdef __ARMEB__
2787 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2788 #endif
2789 
2790 	/* Get clock configuration */
2791 	clocktest = arm8_clock_config(0, 0) & 0x0f;
2792 
2793 	/* Special ARM8 clock and test configuration */
2794 	if (get_bootconf_option(args, "arm8.clock.reset", BOOTOPT_TYPE_BOOLEAN, &integer)) {
2795 		clocktest = 0;
2796 		setclock = 1;
2797 	}
2798 	if (get_bootconf_option(args, "arm8.clock.dynamic", BOOTOPT_TYPE_BOOLEAN, &integer)) {
2799 		if (integer)
2800 			clocktest |= 0x01;
2801 		else
2802 			clocktest &= ~(0x01);
2803 		setclock = 1;
2804 	}
2805 	if (get_bootconf_option(args, "arm8.clock.sync", BOOTOPT_TYPE_BOOLEAN, &integer)) {
2806 		if (integer)
2807 			clocktest |= 0x02;
2808 		else
2809 			clocktest &= ~(0x02);
2810 		setclock = 1;
2811 	}
2812 	if (get_bootconf_option(args, "arm8.clock.fast", BOOTOPT_TYPE_BININT, &integer)) {
2813 		clocktest = (clocktest & ~0xc0) | (integer & 3) << 2;
2814 		setclock = 1;
2815 	}
2816 	if (get_bootconf_option(args, "arm8.test", BOOTOPT_TYPE_BININT, &integer)) {
2817 		clocktest |= (integer & 7) << 5;
2818 		setclock = 1;
2819 	}
2820 
2821 	/* Clear out the cache */
2822 	cpu_idcache_wbinv_all();
2823 
2824 	/* Set the control register */
2825 	curcpu()->ci_ctrl = cpuctrl;
2826 	cpu_control(0xffffffff, cpuctrl);
2827 
2828 	/* Set the clock/test register */
2829 	if (setclock)
2830 		arm8_clock_config(0x7f, clocktest);
2831 }
2832 #endif	/* CPU_ARM8 */
2833 
2834 #ifdef CPU_ARM9
2835 struct cpu_option arm9_options[] = {
2836 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2837 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2838 	{ "arm9.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2839 	{ "arm9.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2840 	{ "arm9.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2841 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2842 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2843 	{ "arm9.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2844 	{ NULL,			IGN, IGN, 0 }
2845 };
2846 
2847 void
2848 arm9_setup(char *args)
2849 {
2850 
2851 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2852 	    | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2853 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2854 	    | CPU_CONTROL_WBUF_ENABLE;
2855 	int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2856 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2857 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2858 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2859 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2860 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC
2861 		 | CPU_CONTROL_ROUNDROBIN;
2862 
2863 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2864 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2865 #endif
2866 
2867 	cpuctrl = parse_cpu_options(args, arm9_options, cpuctrl);
2868 
2869 #ifdef __ARMEB__
2870 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2871 #endif
2872 
2873 #ifndef ARM_HAS_VBAR
2874 	if (vector_page == ARM_VECTORS_HIGH)
2875 		cpuctrl |= CPU_CONTROL_VECRELOC;
2876 #endif
2877 
2878 	/* Clear out the cache */
2879 	cpu_idcache_wbinv_all();
2880 
2881 	/* Set the control register */
2882 	curcpu()->ci_ctrl = cpuctrl;
2883 	cpu_control(cpuctrlmask, cpuctrl);
2884 
2885 }
2886 #endif	/* CPU_ARM9 */
2887 
2888 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
2889 struct cpu_option arm10_options[] = {
2890 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2891 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2892 	{ "arm10.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2893 	{ "arm10.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2894 	{ "arm10.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2895 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2896 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2897 	{ "arm10.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2898 	{ NULL,			IGN, IGN, 0 }
2899 };
2900 
2901 void
2902 arm10_setup(char *args)
2903 {
2904 
2905 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
2906 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2907 	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
2908 #if 0
2909 	int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
2910 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2911 	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2912 	    | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2913 	    | CPU_CONTROL_BPRD_ENABLE
2914 	    | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
2915 #endif
2916 
2917 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2918 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2919 #endif
2920 
2921 	cpuctrl = parse_cpu_options(args, arm10_options, cpuctrl);
2922 
2923 #ifdef __ARMEB__
2924 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2925 #endif
2926 
2927 #ifndef ARM_HAS_VBAR
2928 	if (vector_page == ARM_VECTORS_HIGH)
2929 		cpuctrl |= CPU_CONTROL_VECRELOC;
2930 #endif
2931 
2932 	/* Clear out the cache */
2933 	cpu_idcache_wbinv_all();
2934 
2935 	/* Now really make sure they are clean.  */
2936 	__asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
2937 
2938 	/* Set the control register */
2939 	curcpu()->ci_ctrl = cpuctrl;
2940 	cpu_control(0xffffffff, cpuctrl);
2941 
2942 	/* And again. */
2943 	cpu_idcache_wbinv_all();
2944 }
2945 #endif	/* CPU_ARM9E || CPU_ARM10 */
2946 
2947 #if defined(CPU_ARM11)
2948 struct cpu_option arm11_options[] = {
2949 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2950 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2951 	{ "arm11.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2952 	{ "arm11.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2953 	{ "arm11.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2954 	{ "cpu.branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2955 	{ "arm11.branchpredict", BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2956 	{ NULL,			IGN, IGN, 0 }
2957 };
2958 
2959 void
2960 arm11_setup(char *args)
2961 {
2962 
2963 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
2964 #ifdef ARM_MMU_EXTENDED
2965 	    | CPU_CONTROL_XP_ENABLE
2966 #endif
2967 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2968 	    /* | CPU_CONTROL_BPRD_ENABLE */;
2969 	int cpuctrlmask = cpuctrl
2970 	    | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BPRD_ENABLE
2971 	    | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2972 	    | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
2973 
2974 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2975 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2976 #endif
2977 
2978 	cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl);
2979 
2980 #ifdef __ARMEB__
2981 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2982 #endif
2983 
2984 #ifndef ARM_HAS_VBAR
2985 	if (vector_page == ARM_VECTORS_HIGH)
2986 		cpuctrl |= CPU_CONTROL_VECRELOC;
2987 #endif
2988 
2989 	/* Clear out the cache */
2990 	cpu_idcache_wbinv_all();
2991 
2992 	/* Now really make sure they are clean.  */
2993 	__asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
2994 
2995 	/* Allow detection code to find the VFP if it's fitted.  */
2996 	__asm volatile ("mcr\tp15, 0, %0, c1, c0, 2" : : "r" (0x0fffffff));
2997 
2998 	/* Set the control register */
2999 	curcpu()->ci_ctrl = cpuctrl;
3000 	cpu_control(cpuctrlmask, cpuctrl);
3001 
3002 	/* And again. */
3003 	cpu_idcache_wbinv_all();
3004 }
3005 #endif	/* CPU_ARM11 */
3006 
3007 #if defined(CPU_ARM11MPCORE)
3008 
3009 void
3010 arm11mpcore_setup(char *args)
3011 {
3012 
3013 	int cpuctrl = CPU_CONTROL_IC_ENABLE
3014 	    | CPU_CONTROL_DC_ENABLE
3015 #ifdef ARM_MMU_EXTENDED
3016 	    | CPU_CONTROL_XP_ENABLE
3017 #endif
3018 	    | CPU_CONTROL_BPRD_ENABLE ;
3019 	int cpuctrlmask = cpuctrl
3020 	    | CPU_CONTROL_AFLT_ENABLE
3021 	    | CPU_CONTROL_VECRELOC;
3022 
3023 #ifdef	ARM11MPCORE_MMU_COMPAT
3024 	/* XXX: S and R? */
3025 #endif
3026 
3027 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
3028 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3029 #endif
3030 
3031 	cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl);
3032 
3033 #ifndef ARM_HAS_VBAR
3034 	if (vector_page == ARM_VECTORS_HIGH)
3035 		cpuctrl |= CPU_CONTROL_VECRELOC;
3036 #endif
3037 
3038 	/* Clear out the cache */
3039 	cpu_idcache_wbinv_all();
3040 
3041 	/* Now really make sure they are clean.  */
3042 	__asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
3043 
3044 	/* Allow detection code to find the VFP if it's fitted.  */
3045 	__asm volatile ("mcr\tp15, 0, %0, c1, c0, 2" : : "r" (0x0fffffff));
3046 
3047 	/* Set the control register */
3048 	curcpu()->ci_ctrl = cpu_control(cpuctrlmask, cpuctrl);
3049 
3050 	/* And again. */
3051 	cpu_idcache_wbinv_all();
3052 }
3053 #endif	/* CPU_ARM11MPCORE */
3054 
3055 #ifdef CPU_PJ4B
3056 void
3057 pj4bv7_setup(char *args)
3058 {
3059 	int cpuctrl;
3060 
3061 	pj4b_config();
3062 
3063 	cpuctrl = CPU_CONTROL_MMU_ENABLE;
3064 #ifdef ARM32_DISABLE_ALIGNMENT_FAULTS
3065 	cpuctrl |= CPU_CONTROL_UNAL_ENABLE;
3066 #else
3067 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3068 #endif
3069 	cpuctrl |= CPU_CONTROL_DC_ENABLE;
3070 	cpuctrl |= CPU_CONTROL_IC_ENABLE;
3071 	cpuctrl |= (0xf << 3);
3072 	cpuctrl |= CPU_CONTROL_BPRD_ENABLE;
3073 	cpuctrl |= (0x5 << 16) | (1 < 22);
3074 	cpuctrl |= CPU_CONTROL_XP_ENABLE;
3075 
3076 #ifndef ARM_HAS_VBAR
3077 	if (vector_page == ARM_VECTORS_HIGH)
3078 		cpuctrl |= CPU_CONTROL_VECRELOC;
3079 #endif
3080 
3081 	/* Clear out the cache */
3082 	cpu_idcache_wbinv_all();
3083 
3084 	/* Set the control register */
3085 	cpu_control(0xffffffff, cpuctrl);
3086 
3087 	/* And again. */
3088 	cpu_idcache_wbinv_all();
3089 
3090 	curcpu()->ci_ctrl = cpuctrl;
3091 }
3092 #endif /* CPU_PJ4B */
3093 
3094 #if defined(CPU_ARMV7)
3095 struct cpu_option armv7_options[] = {
3096     { "cpu.cache",      BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3097     { "cpu.nocache",    OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3098     { "armv7.cache",    BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3099     { "armv7.icache",   BIC, OR,  CPU_CONTROL_IC_ENABLE },
3100     { "armv7.dcache",   BIC, OR,  CPU_CONTROL_DC_ENABLE },
3101 	{ NULL, 			IGN, IGN, 0}
3102 };
3103 
3104 void
3105 armv7_setup(char *args)
3106 {
3107 
3108 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_IC_ENABLE
3109 	    | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_BPRD_ENABLE
3110 #ifdef __ARMEB__
3111 	    | CPU_CONTROL_EX_BEND
3112 #endif
3113 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
3114 	    | CPU_CONTROL_AFLT_ENABLE;
3115 #endif
3116 	    | CPU_CONTROL_UNAL_ENABLE;
3117 
3118 	int cpuctrlmask = cpuctrl | CPU_CONTROL_AFLT_ENABLE;
3119 
3120 
3121 	cpuctrl = parse_cpu_options(args, armv7_options, cpuctrl);
3122 
3123 #ifndef ARM_HAS_VBAR
3124 	if (vector_page == ARM_VECTORS_HIGH)
3125 		cpuctrl |= CPU_CONTROL_VECRELOC;
3126 #endif
3127 
3128 	/* Clear out the cache */
3129 	cpu_idcache_wbinv_all();
3130 
3131 	/* Set the control register */
3132 	curcpu()->ci_ctrl = cpuctrl;
3133 	cpu_control(cpuctrlmask, cpuctrl);
3134 }
3135 #endif /* CPU_ARMV7 */
3136 
3137 
3138 #if defined(CPU_ARM1136) || defined(CPU_ARM1176)
3139 void
3140 arm11x6_setup(char *args)
3141 {
3142 	int cpuctrl, cpuctrl_wax;
3143 	uint32_t auxctrl;
3144 	uint32_t sbz=0;
3145 	uint32_t cpuid;
3146 
3147 	cpuid = cpu_id();
3148 
3149 	cpuctrl =
3150 		CPU_CONTROL_MMU_ENABLE  |
3151 		CPU_CONTROL_DC_ENABLE   |
3152 		CPU_CONTROL_WBUF_ENABLE |
3153 		CPU_CONTROL_32BP_ENABLE |
3154 		CPU_CONTROL_32BD_ENABLE |
3155 		CPU_CONTROL_LABT_ENABLE |
3156 		CPU_CONTROL_SYST_ENABLE |
3157 		CPU_CONTROL_UNAL_ENABLE |
3158 #ifdef ARM_MMU_EXTENDED
3159 		CPU_CONTROL_XP_ENABLE   |
3160 #endif
3161 		CPU_CONTROL_IC_ENABLE;
3162 
3163 	/*
3164 	 * "write as existing" bits
3165 	 * inverse of this is mask
3166 	 */
3167 	cpuctrl_wax =
3168 		(3 << 30) |
3169 		(1 << 29) |
3170 		(1 << 28) |
3171 		(3 << 26) |
3172 		(3 << 19) |
3173 		(1 << 17);
3174 
3175 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
3176 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3177 #endif
3178 
3179 	cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl);
3180 
3181 #ifdef __ARMEB__
3182 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
3183 #endif
3184 
3185 #ifndef ARM_HAS_VBAR
3186 	if (vector_page == ARM_VECTORS_HIGH)
3187 		cpuctrl |= CPU_CONTROL_VECRELOC;
3188 #endif
3189 
3190 	auxctrl = armreg_auxctl_read();
3191 	/*
3192 	 * This options enables the workaround for the 364296 ARM1136
3193 	 * r0pX errata (possible cache data corruption with
3194 	 * hit-under-miss enabled). It sets the undocumented bit 31 in
3195 	 * the auxiliary control register and the FI bit in the control
3196 	 * register, thus disabling hit-under-miss without putting the
3197 	 * processor into full low interrupt latency mode. ARM11MPCore
3198 	 * is not affected.
3199 	 */
3200 	if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1136JS) { /* ARM1136JSr0pX */
3201 		cpuctrl |= CPU_CONTROL_FI_ENABLE;
3202 		auxctrl |= ARM1136_AUXCTL_PFI;
3203 	}
3204 
3205 	/*
3206 	 * Enable an errata workaround
3207 	 */
3208 	if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1176JZS) { /* ARM1176JZSr0 */
3209 		auxctrl |= ARM1176_AUXCTL_PHD;
3210 	}
3211 
3212 	/* Clear out the cache */
3213 	cpu_idcache_wbinv_all();
3214 
3215 	/* Now really make sure they are clean.  */
3216 	__asm volatile ("mcr\tp15, 0, %0, c7, c7, 0" : : "r"(sbz));
3217 
3218 	/* Allow detection code to find the VFP if it's fitted.  */
3219 	__asm volatile ("mcr\tp15, 0, %0, c1, c0, 2" : : "r" (0x0fffffff));
3220 
3221 	/* Set the control register */
3222 	curcpu()->ci_ctrl = cpuctrl;
3223 	cpu_control(~cpuctrl_wax, cpuctrl);
3224 
3225 	/* Update auxctlr */
3226 	armreg_auxctl_write(auxctrl);
3227 
3228 	/* And again. */
3229 	cpu_idcache_wbinv_all();
3230 }
3231 #endif	/* CPU_ARM1136 || CPU_ARM1176 */
3232 
3233 #ifdef CPU_SA110
3234 struct cpu_option sa110_options[] = {
3235 #ifdef COMPAT_12
3236 	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3237 	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
3238 #endif	/* COMPAT_12 */
3239 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3240 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3241 	{ "sa110.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3242 	{ "sa110.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
3243 	{ "sa110.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
3244 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
3245 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
3246 	{ "sa110.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
3247 	{ NULL,			IGN, IGN, 0 }
3248 };
3249 
3250 void
3251 sa110_setup(char *args)
3252 {
3253 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
3254 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
3255 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3256 		 | CPU_CONTROL_WBUF_ENABLE;
3257 #if 0
3258 	int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
3259 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
3260 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3261 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
3262 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
3263 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
3264 		 | CPU_CONTROL_CPCLK;
3265 #endif
3266 
3267 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
3268 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3269 #endif
3270 
3271 	cpuctrl = parse_cpu_options(args, sa110_options, cpuctrl);
3272 
3273 #ifdef __ARMEB__
3274 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
3275 #endif
3276 
3277 #ifndef ARM_HAS_VBAR
3278 	if (vector_page == ARM_VECTORS_HIGH)
3279 		cpuctrl |= CPU_CONTROL_VECRELOC;
3280 #endif
3281 
3282 	/* Clear out the cache */
3283 	cpu_idcache_wbinv_all();
3284 
3285 	/* Set the control register */
3286 	curcpu()->ci_ctrl = cpuctrl;
3287 #if 0
3288 	cpu_control(cpuctrlmask, cpuctrl);
3289 #endif
3290 	cpu_control(0xffffffff, cpuctrl);
3291 
3292 	/*
3293 	 * enable clockswitching, note that this doesn't read or write to r0,
3294 	 * r0 is just to make it valid asm
3295 	 */
3296 	__asm volatile ("mcr p15, 0, r0, c15, c1, 2");
3297 }
3298 #endif	/* CPU_SA110 */
3299 
3300 #if defined(CPU_SA1100) || defined(CPU_SA1110)
3301 struct cpu_option sa11x0_options[] = {
3302 #ifdef COMPAT_12
3303 	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3304 	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
3305 #endif	/* COMPAT_12 */
3306 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3307 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3308 	{ "sa11x0.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3309 	{ "sa11x0.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
3310 	{ "sa11x0.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
3311 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
3312 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
3313 	{ "sa11x0.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
3314 	{ NULL,			IGN, IGN, 0 }
3315 };
3316 
3317 void
3318 sa11x0_setup(char *args)
3319 {
3320 
3321 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
3322 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
3323 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3324 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE;
3325 #if 0
3326 	int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
3327 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
3328 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3329 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
3330 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
3331 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
3332 		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
3333 #endif
3334 
3335 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
3336 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3337 #endif
3338 
3339 	cpuctrl = parse_cpu_options(args, sa11x0_options, cpuctrl);
3340 
3341 #ifdef __ARMEB__
3342 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
3343 #endif
3344 
3345 #ifndef ARM_HAS_VBAR
3346 	if (vector_page == ARM_VECTORS_HIGH)
3347 		cpuctrl |= CPU_CONTROL_VECRELOC;
3348 #endif
3349 
3350 	/* Clear out the cache */
3351 	cpu_idcache_wbinv_all();
3352 
3353 	/* Set the control register */
3354 	curcpu()->ci_ctrl = cpuctrl;
3355 	cpu_control(0xffffffff, cpuctrl);
3356 }
3357 #endif	/* CPU_SA1100 || CPU_SA1110 */
3358 
3359 #if defined(CPU_FA526)
3360 struct cpu_option fa526_options[] = {
3361 #ifdef COMPAT_12
3362 	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3363 	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
3364 #endif	/* COMPAT_12 */
3365 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3366 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3367 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
3368 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
3369 	{ NULL,			IGN, IGN, 0 }
3370 };
3371 
3372 void
3373 fa526_setup(char *args)
3374 {
3375 
3376 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
3377 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
3378 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3379 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE;
3380 #if 0
3381 	int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
3382 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
3383 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3384 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
3385 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
3386 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
3387 		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
3388 #endif
3389 
3390 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
3391 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3392 #endif
3393 
3394 	cpuctrl = parse_cpu_options(args, fa526_options, cpuctrl);
3395 
3396 #ifdef __ARMEB__
3397 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
3398 #endif
3399 
3400 #ifndef ARM_HAS_VBAR
3401 	if (vector_page == ARM_VECTORS_HIGH)
3402 		cpuctrl |= CPU_CONTROL_VECRELOC;
3403 #endif
3404 
3405 	/* Clear out the cache */
3406 	cpu_idcache_wbinv_all();
3407 
3408 	/* Set the control register */
3409 	curcpu()->ci_ctrl = cpuctrl;
3410 	cpu_control(0xffffffff, cpuctrl);
3411 }
3412 #endif	/* CPU_FA526 */
3413 
3414 #if defined(CPU_IXP12X0)
3415 struct cpu_option ixp12x0_options[] = {
3416 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3417 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3418 	{ "ixp12x0.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3419 	{ "ixp12x0.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
3420 	{ "ixp12x0.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
3421 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
3422 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
3423 	{ "ixp12x0.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
3424 	{ NULL,			IGN, IGN, 0 }
3425 };
3426 
3427 void
3428 ixp12x0_setup(char *args)
3429 {
3430 
3431 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE
3432 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_SYST_ENABLE
3433 		 | CPU_CONTROL_IC_ENABLE;
3434 
3435 	int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_AFLT_ENABLE
3436 		 | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_WBUF_ENABLE
3437 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_SYST_ENABLE
3438 		 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_IC_ENABLE
3439 		 | CPU_CONTROL_VECRELOC;
3440 
3441 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
3442 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3443 #endif
3444 
3445 	cpuctrl = parse_cpu_options(args, ixp12x0_options, cpuctrl);
3446 
3447 #ifdef __ARMEB__
3448 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
3449 #endif
3450 
3451 #ifndef ARM_HAS_VBAR
3452 	if (vector_page == ARM_VECTORS_HIGH)
3453 		cpuctrl |= CPU_CONTROL_VECRELOC;
3454 #endif
3455 
3456 	/* Clear out the cache */
3457 	cpu_idcache_wbinv_all();
3458 
3459 	/* Set the control register */
3460 	curcpu()->ci_ctrl = cpuctrl;
3461 	/* cpu_control(0xffffffff, cpuctrl); */
3462 	cpu_control(cpuctrlmask, cpuctrl);
3463 }
3464 #endif /* CPU_IXP12X0 */
3465 
3466 #if defined(CPU_XSCALE)
3467 struct cpu_option xscale_options[] = {
3468 #ifdef COMPAT_12
3469 	{ "branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
3470 	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3471 #endif	/* COMPAT_12 */
3472 	{ "cpu.branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
3473 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3474 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3475 	{ "xscale.branchpredict", BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
3476 	{ "xscale.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3477 	{ "xscale.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
3478 	{ "xscale.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
3479 	{ NULL,			IGN, IGN, 0 }
3480 };
3481 
3482 void
3483 xscale_setup(char *args)
3484 {
3485 	uint32_t auxctl;
3486 
3487 	/*
3488 	 * The XScale Write Buffer is always enabled.  Our option
3489 	 * is to enable/disable coalescing.  Note that bits 6:3
3490 	 * must always be enabled.
3491 	 */
3492 
3493 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
3494 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
3495 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3496 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
3497 		 | CPU_CONTROL_BPRD_ENABLE;
3498 #if 0
3499 	int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
3500 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
3501 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3502 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
3503 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
3504 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
3505 		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
3506 #endif
3507 
3508 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
3509 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3510 #endif
3511 
3512 	cpuctrl = parse_cpu_options(args, xscale_options, cpuctrl);
3513 
3514 #ifdef __ARMEB__
3515 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
3516 #endif
3517 
3518 #ifndef ARM_HAS_VBAR
3519 	if (vector_page == ARM_VECTORS_HIGH)
3520 		cpuctrl |= CPU_CONTROL_VECRELOC;
3521 #endif
3522 
3523 	/* Clear out the cache */
3524 	cpu_idcache_wbinv_all();
3525 
3526 	/*
3527 	 * Set the control register.  Note that bits 6:3 must always
3528 	 * be set to 1.
3529 	 */
3530 	curcpu()->ci_ctrl = cpuctrl;
3531 #if 0
3532 	cpu_control(cpuctrlmask, cpuctrl);
3533 #endif
3534 	cpu_control(0xffffffff, cpuctrl);
3535 
3536 	/* Make sure write coalescing is turned on */
3537 	__asm volatile("mrc p15, 0, %0, c1, c0, 1"
3538 		: "=r" (auxctl));
3539 #ifdef XSCALE_NO_COALESCE_WRITES
3540 	auxctl |= XSCALE_AUXCTL_K;
3541 #else
3542 	auxctl &= ~XSCALE_AUXCTL_K;
3543 #endif
3544 	__asm volatile("mcr p15, 0, %0, c1, c0, 1"
3545 		: : "r" (auxctl));
3546 }
3547 #endif	/* CPU_XSCALE */
3548 
3549 #if defined(CPU_SHEEVA)
3550 struct cpu_option sheeva_options[] = {
3551 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3552 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3553 	{ "sheeva.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3554 	{ "sheeva.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
3555 	{ "sheeva.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
3556 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
3557 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
3558 	{ "sheeva.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
3559 	{ NULL,			IGN, IGN, 0 }
3560 };
3561 
3562 void
3563 sheeva_setup(char *args)
3564 {
3565 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
3566 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3567 	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
3568 #if 0
3569 	int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
3570 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3571 	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
3572 	    | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
3573 	    | CPU_CONTROL_BPRD_ENABLE
3574 	    | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
3575 #endif
3576 
3577 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
3578 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3579 #endif
3580 
3581 	cpuctrl = parse_cpu_options(args, sheeva_options, cpuctrl);
3582 
3583 	/* Enable DCache Streaming Switch and Write Allocate */
3584 	uint32_t sheeva_ext = armreg_sheeva_xctrl_read();
3585 
3586 	sheeva_ext |= FC_DCACHE_STREAM_EN | FC_WR_ALLOC_EN;
3587 #ifdef SHEEVA_L2_CACHE
3588 	sheeva_ext |= FC_L2CACHE_EN;
3589 	sheeva_ext &= ~FC_L2_PREF_DIS;
3590 #endif
3591 
3592 	armreg_sheeva_xctrl_write(sheeva_ext);
3593 
3594 #ifdef SHEEVA_L2_CACHE
3595 #ifndef SHEEVA_L2_CACHE_WT
3596 	arm_scache.cache_type = CPU_CT_CTYPE_WB2;
3597 #elif CPU_CT_CTYPE_WT != 0
3598 	arm_scache.cache_type = CPU_CT_CTYPE_WT;
3599 #endif
3600 	arm_scache.cache_unified = 1;
3601 	arm_scache.dcache_type = arm_scache.icache_type = CACHE_TYPE_PIPT;
3602 	arm_scache.dcache_size = arm_scache.icache_size = 256*1024;
3603 	arm_scache.dcache_ways = arm_scache.icache_ways = 4;
3604 	arm_scache.dcache_way_size = arm_scache.icache_way_size =
3605 	    arm_scache.dcache_size / arm_scache.dcache_ways;
3606 	arm_scache.dcache_line_size = arm_scache.icache_line_size = 32;
3607 	arm_scache.dcache_sets = arm_scache.icache_sets =
3608 	    arm_scache.dcache_way_size / arm_scache.dcache_line_size;
3609 
3610 	cpufuncs.cf_sdcache_wb_range = sheeva_sdcache_wb_range;
3611 	cpufuncs.cf_sdcache_inv_range = sheeva_sdcache_inv_range;
3612 	cpufuncs.cf_sdcache_wbinv_range = sheeva_sdcache_wbinv_range;
3613 #endif /* SHEEVA_L2_CACHE */
3614 
3615 #ifdef __ARMEB__
3616 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
3617 #endif
3618 
3619 #ifndef ARM_HAS_VBAR
3620 	if (vector_page == ARM_VECTORS_HIGH)
3621 		cpuctrl |= CPU_CONTROL_VECRELOC;
3622 #endif
3623 
3624 	/* Clear out the cache */
3625 	cpu_idcache_wbinv_all();
3626 
3627 	/* Now really make sure they are clean.  */
3628 	__asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
3629 
3630 	/* Set the control register */
3631 	curcpu()->ci_ctrl = cpuctrl;
3632 	cpu_control(0xffffffff, cpuctrl);
3633 
3634 	/* And again. */
3635 	cpu_idcache_wbinv_all();
3636 #ifdef SHEEVA_L2_CACHE
3637 	sheeva_sdcache_wbinv_all();
3638 #endif
3639 }
3640 #endif	/* CPU_SHEEVA */
3641