xref: /netbsd-src/sys/arch/arm/arm/cpufunc.c (revision d909946ca08dceb44d7d0f22ec9488679695d976)
1 /*	$NetBSD: cpufunc.c,v 1.161 2016/05/30 17:18:38 dholland Exp $	*/
2 
3 /*
4  * arm7tdmi support code Copyright (c) 2001 John Fremlin
5  * arm8 support code Copyright (c) 1997 ARM Limited
6  * arm8 support code Copyright (c) 1997 Causality Limited
7  * arm9 support code Copyright (C) 2001 ARM Ltd
8  * arm11 support code Copyright (c) 2007 Microsoft
9  * cortexa8 support code Copyright (c) 2008 3am Software Foundry
10  * cortexa8 improvements Copyright (c) Goeran Weinholt
11  * Copyright (c) 1997 Mark Brinicombe.
12  * Copyright (c) 1997 Causality Limited
13  * All rights reserved.
14  *
15  * Redistribution and use in source and binary forms, with or without
16  * modification, are permitted provided that the following conditions
17  * are met:
18  * 1. Redistributions of source code must retain the above copyright
19  *    notice, this list of conditions and the following disclaimer.
20  * 2. Redistributions in binary form must reproduce the above copyright
21  *    notice, this list of conditions and the following disclaimer in the
22  *    documentation and/or other materials provided with the distribution.
23  * 3. All advertising materials mentioning features or use of this software
24  *    must display the following acknowledgement:
25  *	This product includes software developed by Causality Limited.
26  * 4. The name of Causality Limited may not be used to endorse or promote
27  *    products derived from this software without specific prior written
28  *    permission.
29  *
30  * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
31  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
32  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
33  * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
34  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
35  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
36  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
37  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
38  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
39  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40  * SUCH DAMAGE.
41  *
42  * RiscBSD kernel project
43  *
44  * cpufuncs.c
45  *
46  * C functions for supporting CPU / MMU / TLB specific operations.
47  *
48  * Created	: 30/01/97
49  */
50 
51 #include <sys/cdefs.h>
52 __KERNEL_RCSID(0, "$NetBSD: cpufunc.c,v 1.161 2016/05/30 17:18:38 dholland Exp $");
53 
54 #include "opt_compat_netbsd.h"
55 #include "opt_cpuoptions.h"
56 #include "opt_perfctrs.h"
57 
58 #include <sys/types.h>
59 #include <sys/param.h>
60 #include <sys/pmc.h>
61 #include <sys/systm.h>
62 #include <machine/cpu.h>
63 #include <machine/bootconfig.h>
64 #include <arch/arm/arm/disassem.h>
65 
66 #include <uvm/uvm.h>
67 
68 #include <arm/cpufunc_proto.h>
69 #include <arm/cpuconf.h>
70 #include <arm/locore.h>
71 
72 #ifdef CPU_XSCALE_80200
73 #include <arm/xscale/i80200reg.h>
74 #include <arm/xscale/i80200var.h>
75 #endif
76 
77 #ifdef CPU_XSCALE_80321
78 #include <arm/xscale/i80321reg.h>
79 #include <arm/xscale/i80321var.h>
80 #endif
81 
82 #ifdef CPU_XSCALE_IXP425
83 #include <arm/xscale/ixp425reg.h>
84 #include <arm/xscale/ixp425var.h>
85 #endif
86 
87 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321)
88 #include <arm/xscale/xscalereg.h>
89 #endif
90 
91 #if defined(CPU_PJ4B)
92 #include "opt_cputypes.h"
93 #include "opt_mvsoc.h"
94 #include <machine/bus_defs.h>
95 #if defined(ARMADAXP)
96 #include <arm/marvell/armadaxpreg.h>
97 #include <arm/marvell/armadaxpvar.h>
98 #endif
99 #endif
100 
101 #if defined(PERFCTRS)
102 struct arm_pmc_funcs *arm_pmc;
103 #endif
104 
105 #if defined(CPU_ARMV7) && (defined(CPU_ARMV6) || defined(CPU_PRE_ARMV6))
106 bool cpu_armv7_p;
107 #endif
108 
109 #if defined(CPU_ARMV6) && (defined(CPU_ARMV7) || defined(CPU_PRE_ARMV6))
110 bool cpu_armv6_p;
111 #endif
112 
113 
114 /* PRIMARY CACHE VARIABLES */
115 #if (ARM_MMU_V6 + ARM_MMU_V7) != 0
116 u_int	arm_cache_prefer_mask;
117 #endif
118 struct	arm_cache_info arm_pcache;
119 struct	arm_cache_info arm_scache;
120 
121 u_int	arm_dcache_align;
122 u_int	arm_dcache_align_mask;
123 
124 /* 1 == use cpu_sleep(), 0 == don't */
125 int cpu_do_powersave;
126 
127 #ifdef CPU_ARM2
128 struct cpu_functions arm2_cpufuncs = {
129 	/* CPU functions */
130 
131 	.cf_id			= arm2_id,
132 	.cf_cpwait		= cpufunc_nullop,
133 
134 	/* MMU functions */
135 
136 	.cf_control		= (void *)cpufunc_nullop,
137 
138 	/* TLB functions */
139 
140 	.cf_tlb_flushID		= cpufunc_nullop,
141 	.cf_tlb_flushID_SE	= (void *)cpufunc_nullop,
142 	.cf_tlb_flushI		= cpufunc_nullop,
143 	.cf_tlb_flushI_SE	= (void *)cpufunc_nullop,
144 	.cf_tlb_flushD		= cpufunc_nullop,
145 	.cf_tlb_flushD_SE	= (void *)cpufunc_nullop,
146 
147 	/* Cache operations */
148 
149 	.cf_icache_sync_all	= cpufunc_nullop,
150 	.cf_icache_sync_range	= (void *) cpufunc_nullop,
151 
152 	.cf_dcache_wbinv_all	= arm3_cache_flush,
153 	.cf_dcache_wbinv_range	= (void *)cpufunc_nullop,
154 	.cf_dcache_inv_range	= (void *)cpufunc_nullop,
155 	.cf_dcache_wb_range	= (void *)cpufunc_nullop,
156 
157 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
158 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
159 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
160 
161 	.cf_idcache_wbinv_all	= cpufunc_nullop,
162 	.cf_idcache_wbinv_range	= (void *)cpufunc_nullop,
163 
164 	/* Other functions */
165 
166 	.cf_flush_prefetchbuf	= cpufunc_nullop,
167 	.cf_drain_writebuf	= cpufunc_nullop,
168 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
169 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
170 
171 	.cf_sleep		= (void *)cpufunc_nullop,
172 
173 	/* Soft functions */
174 
175 	.cf_dataabt_fixup	= early_abort_fixup,
176 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
177 
178 	.cf_setup		= (void *)cpufunc_nullop
179 
180 };
181 #endif	/* CPU_ARM2 */
182 
183 #ifdef CPU_ARM250
184 struct cpu_functions arm250_cpufuncs = {
185 	/* CPU functions */
186 
187 	.cf_id			= arm250_id,
188 	.cf_cpwait		= cpufunc_nullop,
189 
190 	/* MMU functions */
191 
192 	.cf_control		= (void *)cpufunc_nullop,
193 
194 	/* TLB functions */
195 
196 	.cf_tlb_flushID		= cpufunc_nullop,
197 	.cf_tlb_flushID_SE	= (void *)cpufunc_nullop,
198 	.cf_tlb_flushI		= cpufunc_nullop,
199 	.cf_tlb_flushI_SE	= (void *)cpufunc_nullop,
200 	.cf_tlb_flushD		= cpufunc_nullop,
201 	.cf_tlb_flushD_SE	= (void *)cpufunc_nullop,
202 
203 	/* Cache operations */
204 
205 	.cf_icache_sync_all	= cpufunc_nullop,
206 	.cf_icache_sync_range	= (void *) cpufunc_nullop,
207 
208 	.cf_dcache_wbinv_all	= arm3_cache_flush,
209 	.cf_dcache_wbinv_range	= (void *)cpufunc_nullop,
210 	.cf_dcache_inv_range	= (void *)cpufunc_nullop,
211 	.cf_dcache_wb_range	= (void *)cpufunc_nullop,
212 
213 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
214 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
215 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
216 
217 	.cf_idcache_wbinv_all	= cpufunc_nullop,
218 	.cf_idcache_wbinv_range	= (void *)cpufunc_nullop,
219 
220 	/* Other functions */
221 
222 	.cf_flush_prefetchbuf	= cpufunc_nullop,
223 	.cf_drain_writebuf	= cpufunc_nullop,
224 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
225 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
226 
227 	.cf_sleep		= (void *)cpufunc_nullop,
228 
229 	/* Soft functions */
230 
231 	.cf_dataabt_fixup	= early_abort_fixup,
232 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
233 
234 	.cf_setup		= (void *)cpufunc_nullop
235 
236 };
237 #endif	/* CPU_ARM250 */
238 
239 #ifdef CPU_ARM3
240 struct cpu_functions arm3_cpufuncs = {
241 	/* CPU functions */
242 
243 	.cf_id			= cpufunc_id,
244 	.cf_cpwait		= cpufunc_nullop,
245 
246 	/* MMU functions */
247 
248 	.cf_control		= arm3_control,
249 
250 	/* TLB functions */
251 
252 	.cf_tlb_flushID		= cpufunc_nullop,
253 	.cf_tlb_flushID_SE	= (void *)cpufunc_nullop,
254 	.cf_tlb_flushI		= cpufunc_nullop,
255 	.cf_tlb_flushI_SE	= (void *)cpufunc_nullop,
256 	.cf_tlb_flushD		= cpufunc_nullop,
257 	.cf_tlb_flushD_SE	= (void *)cpufunc_nullop,
258 
259 	/* Cache operations */
260 
261 	.cf_icache_sync_all	= cpufunc_nullop,
262 	.cf_icache_sync_range	= (void *) cpufunc_nullop,
263 
264 	.cf_dcache_wbinv_all	= arm3_cache_flush,
265 	.cf_dcache_wbinv_range	= (void *)arm3_cache_flush,
266 	.cf_dcache_inv_range	= (void *)arm3_cache_flush,
267 	.cf_dcache_wb_range	= (void *)cpufunc_nullop,
268 
269 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
270 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
271 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
272 
273 	.cf_idcache_wbinv_all	= arm3_cache_flush,
274 	.cf_idcache_wbinv_range	= (void *)arm3_cache_flush,
275 
276 	/* Other functions */
277 
278 	.cf_flush_prefetchbuf	= cpufunc_nullop,
279 	.cf_drain_writebuf	= cpufunc_nullop,
280 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
281 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
282 
283 	.cf_sleep		= (void *)cpufunc_nullop,
284 
285 	/* Soft functions */
286 
287 	.cf_dataabt_fixup	= early_abort_fixup,
288 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
289 
290 	.cf_setup		= (void *)cpufunc_nullop
291 
292 };
293 #endif	/* CPU_ARM3 */
294 
295 #ifdef CPU_ARM6
296 struct cpu_functions arm6_cpufuncs = {
297 	/* CPU functions */
298 
299 	.cf_id			= cpufunc_id,
300 	.cf_cpwait		= cpufunc_nullop,
301 
302 	/* MMU functions */
303 
304 	.cf_control		= cpufunc_control,
305 	.cf_domains		= cpufunc_domains,
306 	.cf_setttb		= arm67_setttb,
307 	.cf_faultstatus		= cpufunc_faultstatus,
308 	.cf_faultaddress	= cpufunc_faultaddress,
309 
310 	/* TLB functions */
311 
312 	.cf_tlb_flushID		= arm67_tlb_flush,
313 	.cf_tlb_flushID_SE	= arm67_tlb_purge,
314 	.cf_tlb_flushI		= arm67_tlb_flush,
315 	.cf_tlb_flushI_SE	= arm67_tlb_purge,
316 	.cf_tlb_flushD		= arm67_tlb_flush,
317 	.cf_tlb_flushD_SE	= arm67_tlb_purge,
318 
319 	/* Cache operations */
320 
321 	.cf_icache_sync_all	= cpufunc_nullop,
322 	.cf_icache_sync_range	= (void *) cpufunc_nullop,
323 
324 	.cf_dcache_wbinv_all	= arm67_cache_flush,
325 	.cf_dcache_wbinv_range	= (void *)arm67_cache_flush,
326 	.cf_dcache_inv_range	= (void *)arm67_cache_flush,
327 	.cf_dcache_wb_range	= (void *)cpufunc_nullop,
328 
329 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
330 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
331 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
332 
333 	.cf_idcache_wbinv_all	= arm67_cache_flush,
334 	.cf_idcache_wbinv_range	= (void *)arm67_cache_flush,
335 
336 	/* Other functions */
337 
338 	.cf_flush_prefetchbuf	= cpufunc_nullop,
339 	.cf_drain_writebuf	= cpufunc_nullop,
340 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
341 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
342 
343 	.cf_sleep		= (void *)cpufunc_nullop,
344 
345 	/* Soft functions */
346 
347 #ifdef ARM6_LATE_ABORT
348 	.cf_dataabt_fixup	= late_abort_fixup,
349 #else
350 	.cf_dataabt_fixup	= early_abort_fixup,
351 #endif
352 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
353 
354 	.cf_context_switch	= arm67_context_switch,
355 
356 	.cf_setup		= arm6_setup
357 
358 };
359 #endif	/* CPU_ARM6 */
360 
361 #ifdef CPU_ARM7
362 struct cpu_functions arm7_cpufuncs = {
363 	/* CPU functions */
364 
365 	.cf_id			= cpufunc_id,
366 	.cf_cpwait		= cpufunc_nullop,
367 
368 	/* MMU functions */
369 
370 	.cf_control		= cpufunc_control,
371 	.cf_domains		= cpufunc_domains,
372 	.cf_setttb		= arm67_setttb,
373 	.cf_faultstatus		= cpufunc_faultstatus,
374 	.cf_faultaddress	= cpufunc_faultaddress,
375 
376 	/* TLB functions */
377 
378 	.cf_tlb_flushID		= arm67_tlb_flush,
379 	.cf_tlb_flushID_SE	= arm67_tlb_purge,
380 	.cf_tlb_flushI		= arm67_tlb_flush,
381 	.cf_tlb_flushI_SE	= arm67_tlb_purge,
382 	.cf_tlb_flushD		= arm67_tlb_flush,
383 	.cf_tlb_flushD_SE	= arm67_tlb_purge,
384 
385 	/* Cache operations */
386 
387 	.cf_icache_sync_all	= cpufunc_nullop,
388 	.cf_icache_sync_range	= (void *)cpufunc_nullop,
389 
390 	.cf_dcache_wbinv_all	= arm67_cache_flush,
391 	.cf_dcache_wbinv_range	= (void *)arm67_cache_flush,
392 	.cf_dcache_inv_range	= (void *)arm67_cache_flush,
393 	.cf_dcache_wb_range	= (void *)cpufunc_nullop,
394 
395 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
396 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
397 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
398 
399 	.cf_idcache_wbinv_all	= arm67_cache_flush,
400 	.cf_idcache_wbinv_range	= (void *)arm67_cache_flush,
401 
402 	/* Other functions */
403 
404 	.cf_flush_prefetchbuf	= cpufunc_nullop,
405 	.cf_drain_writebuf	= cpufunc_nullop,
406 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
407 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
408 
409 	.cf_sleep		= (void *)cpufunc_nullop,
410 
411 	/* Soft functions */
412 
413 	.cf_dataabt_fixup	= late_abort_fixup,
414 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
415 
416 	.cf_context_switch	= arm67_context_switch,
417 
418 	.cf_setup		= arm7_setup
419 
420 };
421 #endif	/* CPU_ARM7 */
422 
423 #ifdef CPU_ARM7TDMI
424 struct cpu_functions arm7tdmi_cpufuncs = {
425 	/* CPU functions */
426 
427 	.cf_id			= cpufunc_id,
428 	.cf_cpwait		= cpufunc_nullop,
429 
430 	/* MMU functions */
431 
432 	.cf_control		= cpufunc_control,
433 	.cf_domains		= cpufunc_domains,
434 	.cf_setttb		= arm7tdmi_setttb,
435 	.cf_faultstatus		= cpufunc_faultstatus,
436 	.cf_faultaddress	= cpufunc_faultaddress,
437 
438 	/* TLB functions */
439 
440 	.cf_tlb_flushID		= arm7tdmi_tlb_flushID,
441 	.cf_tlb_flushID_SE	= arm7tdmi_tlb_flushID_SE,
442 	.cf_tlb_flushI		= arm7tdmi_tlb_flushID,
443 	.cf_tlb_flushI_SE	= arm7tdmi_tlb_flushID_SE,
444 	.cf_tlb_flushD		= arm7tdmi_tlb_flushID,
445 	.cf_tlb_flushD_SE	= arm7tdmi_tlb_flushID_SE,
446 
447 	/* Cache operations */
448 
449 	.cf_icache_sync_all	= cpufunc_nullop,
450 	.cf_icache_sync_range	= (void *)cpufunc_nullop,
451 
452 	.cf_dcache_wbinv_all	= arm7tdmi_cache_flushID,
453 	.cf_dcache_wbinv_range	= (void *)arm7tdmi_cache_flushID,
454 	.cf_dcache_inv_range	= (void *)arm7tdmi_cache_flushID,
455 	.cf_dcache_wb_range	= (void *)cpufunc_nullop,
456 
457 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
458 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
459 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
460 
461 	.cf_idcache_wbinv_all	= arm7tdmi_cache_flushID,
462 	.cf_idcache_wbinv_range	= (void *)arm7tdmi_cache_flushID,
463 
464 	/* Other functions */
465 
466 	.cf_flush_prefetchbuf	= cpufunc_nullop,
467 	.cf_drain_writebuf	= cpufunc_nullop,
468 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
469 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
470 
471 	.cf_sleep		= (void *)cpufunc_nullop,
472 
473 	/* Soft functions */
474 
475 	.cf_dataabt_fixup	= late_abort_fixup,
476 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
477 
478 	.cf_context_switch	= arm7tdmi_context_switch,
479 
480 	.cf_setup		= arm7tdmi_setup
481 
482 };
483 #endif	/* CPU_ARM7TDMI */
484 
485 #ifdef CPU_ARM8
486 struct cpu_functions arm8_cpufuncs = {
487 	/* CPU functions */
488 
489 	.cf_id			= cpufunc_id,
490 	.cf_cpwait		= cpufunc_nullop,
491 
492 	/* MMU functions */
493 
494 	.cf_control		= cpufunc_control,
495 	.cf_domains		= cpufunc_domains,
496 	.cf_setttb		= arm8_setttb,
497 	.cf_faultstatus		= cpufunc_faultstatus,
498 	.cf_faultaddress	= cpufunc_faultaddress,
499 
500 	/* TLB functions */
501 
502 	.cf_tlb_flushID		= arm8_tlb_flushID,
503 	.cf_tlb_flushID_SE	= arm8_tlb_flushID_SE,
504 	.cf_tlb_flushI		= arm8_tlb_flushID,
505 	.cf_tlb_flushI_SE	= arm8_tlb_flushID_SE,
506 	.cf_tlb_flushD		= arm8_tlb_flushID,
507 	.cf_tlb_flushD_SE	= arm8_tlb_flushID_SE,
508 
509 	/* Cache operations */
510 
511 	.cf_icache_sync_all	= cpufunc_nullop,
512 	.cf_icache_sync_range	= (void *)cpufunc_nullop,
513 
514 	.cf_dcache_wbinv_all	= arm8_cache_purgeID,
515 	.cf_dcache_wbinv_range	= (void *)arm8_cache_purgeID,
516 /*XXX*/	.cf_dcache_inv_range	= (void *)arm8_cache_purgeID,
517 	.cf_dcache_wb_range	= (void *)arm8_cache_cleanID,
518 
519 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
520 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
521 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
522 
523 	.cf_idcache_wbinv_all	= arm8_cache_purgeID,
524 	.cf_idcache_wbinv_range = (void *)arm8_cache_purgeID,
525 
526 	/* Other functions */
527 
528 	.cf_flush_prefetchbuf	= cpufunc_nullop,
529 	.cf_drain_writebuf	= cpufunc_nullop,
530 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
531 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
532 
533 	.cf_sleep		= (void *)cpufunc_nullop,
534 
535 	/* Soft functions */
536 
537 	.cf_dataabt_fixup	= cpufunc_null_fixup,
538 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
539 
540 	.cf_context_switch	= arm8_context_switch,
541 
542 	.cf_setup		= arm8_setup
543 };
544 #endif	/* CPU_ARM8 */
545 
546 #ifdef CPU_ARM9
547 struct cpu_functions arm9_cpufuncs = {
548 	/* CPU functions */
549 
550 	.cf_id			= cpufunc_id,
551 	.cf_cpwait		= cpufunc_nullop,
552 
553 	/* MMU functions */
554 
555 	.cf_control		= cpufunc_control,
556 	.cf_domains		= cpufunc_domains,
557 	.cf_setttb		= arm9_setttb,
558 	.cf_faultstatus		= cpufunc_faultstatus,
559 	.cf_faultaddress	= cpufunc_faultaddress,
560 
561 	/* TLB functions */
562 
563 	.cf_tlb_flushID		= armv4_tlb_flushID,
564 	.cf_tlb_flushID_SE	= arm9_tlb_flushID_SE,
565 	.cf_tlb_flushI		= armv4_tlb_flushI,
566 	.cf_tlb_flushI_SE	= (void *)armv4_tlb_flushI,
567 	.cf_tlb_flushD		= armv4_tlb_flushD,
568 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
569 
570 	/* Cache operations */
571 
572 	.cf_icache_sync_all	= arm9_icache_sync_all,
573 	.cf_icache_sync_range	= arm9_icache_sync_range,
574 
575 	.cf_dcache_wbinv_all	= arm9_dcache_wbinv_all,
576 	.cf_dcache_wbinv_range	= arm9_dcache_wbinv_range,
577 /*XXX*/	.cf_dcache_inv_range	= arm9_dcache_wbinv_range,
578 	.cf_dcache_wb_range	= arm9_dcache_wb_range,
579 
580 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
581 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
582 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
583 
584 	.cf_idcache_wbinv_all	= arm9_idcache_wbinv_all,
585 	.cf_idcache_wbinv_range = arm9_idcache_wbinv_range,
586 
587 	/* Other functions */
588 
589 	.cf_flush_prefetchbuf	= cpufunc_nullop,
590 	.cf_drain_writebuf	= armv4_drain_writebuf,
591 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
592 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
593 
594 	.cf_sleep		= (void *)cpufunc_nullop,
595 
596 	/* Soft functions */
597 
598 	.cf_dataabt_fixup	= cpufunc_null_fixup,
599 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
600 
601 	.cf_context_switch	= arm9_context_switch,
602 
603 	.cf_setup		= arm9_setup
604 
605 };
606 #endif /* CPU_ARM9 */
607 
608 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
609 struct cpu_functions armv5_ec_cpufuncs = {
610 	/* CPU functions */
611 
612 	.cf_id			= cpufunc_id,
613 	.cf_cpwait		= cpufunc_nullop,
614 
615 	/* MMU functions */
616 
617 	.cf_control		= cpufunc_control,
618 	.cf_domains		= cpufunc_domains,
619 	.cf_setttb		= armv5_ec_setttb,
620 	.cf_faultstatus		= cpufunc_faultstatus,
621 	.cf_faultaddress	= cpufunc_faultaddress,
622 
623 	/* TLB functions */
624 
625 	.cf_tlb_flushID		= armv4_tlb_flushID,
626 	.cf_tlb_flushID_SE	= arm10_tlb_flushID_SE,
627 	.cf_tlb_flushI		= armv4_tlb_flushI,
628 	.cf_tlb_flushI_SE	= arm10_tlb_flushI_SE,
629 	.cf_tlb_flushD		= armv4_tlb_flushD,
630 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
631 
632 	/* Cache operations */
633 
634 	.cf_icache_sync_all	= armv5_ec_icache_sync_all,
635 	.cf_icache_sync_range	= armv5_ec_icache_sync_range,
636 
637 	.cf_dcache_wbinv_all	= armv5_ec_dcache_wbinv_all,
638 	.cf_dcache_wbinv_range	= armv5_ec_dcache_wbinv_range,
639 /*XXX*/	.cf_dcache_inv_range	= armv5_ec_dcache_wbinv_range,
640 	.cf_dcache_wb_range	= armv5_ec_dcache_wb_range,
641 
642 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
643 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
644 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
645 
646 	.cf_idcache_wbinv_all	= armv5_ec_idcache_wbinv_all,
647 	.cf_idcache_wbinv_range = armv5_ec_idcache_wbinv_range,
648 
649 	/* Other functions */
650 
651 	.cf_flush_prefetchbuf	= cpufunc_nullop,
652 	.cf_drain_writebuf	= armv4_drain_writebuf,
653 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
654 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
655 
656 	.cf_sleep		= (void *)cpufunc_nullop,
657 
658 	/* Soft functions */
659 
660 	.cf_dataabt_fixup	= cpufunc_null_fixup,
661 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
662 
663 	.cf_context_switch	= arm10_context_switch,
664 
665 	.cf_setup		= arm10_setup
666 
667 };
668 #endif /* CPU_ARM9E || CPU_ARM10 */
669 
670 #ifdef CPU_ARM10
671 struct cpu_functions arm10_cpufuncs = {
672 	/* CPU functions */
673 
674 	.cf_id			= cpufunc_id,
675 	.cf_cpwait		= cpufunc_nullop,
676 
677 	/* MMU functions */
678 
679 	.cf_control		= cpufunc_control,
680 	.cf_domains		= cpufunc_domains,
681 	.cf_setttb		= armv5_setttb,
682 	.cf_faultstatus		= cpufunc_faultstatus,
683 	.cf_faultaddress	= cpufunc_faultaddress,
684 
685 	/* TLB functions */
686 
687 	.cf_tlb_flushID		= armv4_tlb_flushID,
688 	.cf_tlb_flushID_SE	= arm10_tlb_flushID_SE,
689 	.cf_tlb_flushI		= armv4_tlb_flushI,
690 	.cf_tlb_flushI_SE	= arm10_tlb_flushI_SE,
691 	.cf_tlb_flushD		= armv4_tlb_flushD,
692 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
693 
694 	/* Cache operations */
695 
696 	.cf_icache_sync_all	= armv5_icache_sync_all,
697 	.cf_icache_sync_range	= armv5_icache_sync_range,
698 
699 	.cf_dcache_wbinv_all	= armv5_dcache_wbinv_all,
700 	.cf_dcache_wbinv_range	= armv5_dcache_wbinv_range,
701 /*XXX*/	.cf_dcache_inv_range	= armv5_dcache_wbinv_range,
702 	.cf_dcache_wb_range	= armv5_dcache_wb_range,
703 
704 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
705 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
706 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
707 
708 	.cf_idcache_wbinv_all	= armv5_idcache_wbinv_all,
709 	.cf_idcache_wbinv_range = armv5_idcache_wbinv_range,
710 
711 	/* Other functions */
712 
713 	.cf_flush_prefetchbuf	= cpufunc_nullop,
714 	.cf_drain_writebuf	= armv4_drain_writebuf,
715 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
716 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
717 
718 	.cf_sleep		= (void *)cpufunc_nullop,
719 
720 	/* Soft functions */
721 
722 	.cf_dataabt_fixup	= cpufunc_null_fixup,
723 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
724 
725 	.cf_context_switch	= arm10_context_switch,
726 
727 	.cf_setup		= arm10_setup
728 
729 };
730 #endif /* CPU_ARM10 */
731 
732 #ifdef CPU_ARM11
733 struct cpu_functions arm11_cpufuncs = {
734 	/* CPU functions */
735 
736 	.cf_id			= cpufunc_id,
737 	.cf_cpwait		= cpufunc_nullop,
738 
739 	/* MMU functions */
740 
741 	.cf_control		= cpufunc_control,
742 	.cf_domains		= cpufunc_domains,
743 	.cf_setttb		= arm11_setttb,
744 	.cf_faultstatus		= cpufunc_faultstatus,
745 	.cf_faultaddress	= cpufunc_faultaddress,
746 
747 	/* TLB functions */
748 
749 	.cf_tlb_flushID		= arm11_tlb_flushID,
750 	.cf_tlb_flushID_SE	= arm11_tlb_flushID_SE,
751 	.cf_tlb_flushI		= arm11_tlb_flushI,
752 	.cf_tlb_flushI_SE	= arm11_tlb_flushI_SE,
753 	.cf_tlb_flushD		= arm11_tlb_flushD,
754 	.cf_tlb_flushD_SE	= arm11_tlb_flushD_SE,
755 
756 	/* Cache operations */
757 
758 	.cf_icache_sync_all	= armv6_icache_sync_all,
759 	.cf_icache_sync_range	= armv6_icache_sync_range,
760 
761 	.cf_dcache_wbinv_all	= armv6_dcache_wbinv_all,
762 	.cf_dcache_wbinv_range	= armv6_dcache_wbinv_range,
763 	.cf_dcache_inv_range	= armv6_dcache_inv_range,
764 	.cf_dcache_wb_range	= armv6_dcache_wb_range,
765 
766 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
767 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
768 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
769 
770 	.cf_idcache_wbinv_all	= armv6_idcache_wbinv_all,
771 	.cf_idcache_wbinv_range = armv6_idcache_wbinv_range,
772 
773 	/* Other functions */
774 
775 	.cf_flush_prefetchbuf	= cpufunc_nullop,
776 	.cf_drain_writebuf	= arm11_drain_writebuf,
777 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
778 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
779 
780 	.cf_sleep		= arm11_sleep,
781 
782 	/* Soft functions */
783 
784 	.cf_dataabt_fixup	= cpufunc_null_fixup,
785 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
786 
787 	.cf_context_switch	= arm11_context_switch,
788 
789 	.cf_setup		= arm11_setup
790 
791 };
792 #endif /* CPU_ARM11 */
793 
794 #ifdef CPU_ARM1136
795 struct cpu_functions arm1136_cpufuncs = {
796 	/* CPU functions */
797 
798 	.cf_id			= cpufunc_id,
799 	.cf_cpwait		= cpufunc_nullop,
800 
801 	/* MMU functions */
802 
803 	.cf_control		= cpufunc_control,
804 	.cf_domains		= cpufunc_domains,
805 	.cf_setttb		= arm11_setttb,
806 	.cf_faultstatus		= cpufunc_faultstatus,
807 	.cf_faultaddress	= cpufunc_faultaddress,
808 
809 	/* TLB functions */
810 
811 	.cf_tlb_flushID		= arm11_tlb_flushID,
812 	.cf_tlb_flushID_SE	= arm11_tlb_flushID_SE,
813 	.cf_tlb_flushI		= arm11_tlb_flushI,
814 	.cf_tlb_flushI_SE	= arm11_tlb_flushI_SE,
815 	.cf_tlb_flushD		= arm11_tlb_flushD,
816 	.cf_tlb_flushD_SE	= arm11_tlb_flushD_SE,
817 
818 	/* Cache operations */
819 
820 	.cf_icache_sync_all	= arm11x6_icache_sync_all,	/* 411920 */
821 	.cf_icache_sync_range	= arm11x6_icache_sync_range,	/* 371025 */
822 
823 	.cf_dcache_wbinv_all	= arm11x6_dcache_wbinv_all,	/* 411920 */
824 	.cf_dcache_wbinv_range	= armv6_dcache_wbinv_range,
825 	.cf_dcache_inv_range	= armv6_dcache_inv_range,
826 	.cf_dcache_wb_range	= armv6_dcache_wb_range,
827 
828 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
829 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
830 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
831 
832 	.cf_idcache_wbinv_all	= arm11x6_idcache_wbinv_all,	/* 411920 */
833 	.cf_idcache_wbinv_range = arm11x6_idcache_wbinv_range,	/* 371025 */
834 
835 	/* Other functions */
836 
837 	.cf_flush_prefetchbuf	= arm11x6_flush_prefetchbuf,
838 	.cf_drain_writebuf	= arm11_drain_writebuf,
839 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
840 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
841 
842 	.cf_sleep		= arm11_sleep,	/* arm1136_sleep_rev0 */
843 
844 	/* Soft functions */
845 
846 	.cf_dataabt_fixup	= cpufunc_null_fixup,
847 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
848 
849 	.cf_context_switch	= arm11_context_switch,
850 
851 	.cf_setup		= arm11x6_setup
852 
853 };
854 #endif /* CPU_ARM1136 */
855 
856 #ifdef CPU_ARM1176
857 struct cpu_functions arm1176_cpufuncs = {
858 	/* CPU functions */
859 
860 	.cf_id			= cpufunc_id,
861 	.cf_cpwait		= cpufunc_nullop,
862 
863 	/* MMU functions */
864 
865 	.cf_control		= cpufunc_control,
866 	.cf_domains		= cpufunc_domains,
867 	.cf_setttb		= arm11_setttb,
868 	.cf_faultstatus		= cpufunc_faultstatus,
869 	.cf_faultaddress	= cpufunc_faultaddress,
870 
871 	/* TLB functions */
872 
873 	.cf_tlb_flushID		= arm11_tlb_flushID,
874 	.cf_tlb_flushID_SE	= arm11_tlb_flushID_SE,
875 	.cf_tlb_flushI		= arm11_tlb_flushI,
876 	.cf_tlb_flushI_SE	= arm11_tlb_flushI_SE,
877 	.cf_tlb_flushD		= arm11_tlb_flushD,
878 	.cf_tlb_flushD_SE	= arm11_tlb_flushD_SE,
879 
880 	/* Cache operations */
881 
882 	.cf_icache_sync_all	= arm11x6_icache_sync_all,	/* 415045 */
883 	.cf_icache_sync_range	= arm11x6_icache_sync_range,	/* 371367 */
884 
885 	.cf_dcache_wbinv_all	= arm11x6_dcache_wbinv_all,	/* 415045 */
886 	.cf_dcache_wbinv_range	= armv6_dcache_wbinv_range,
887 	.cf_dcache_inv_range	= armv6_dcache_inv_range,
888 	.cf_dcache_wb_range	= armv6_dcache_wb_range,
889 
890 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
891 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
892 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
893 
894 	.cf_idcache_wbinv_all	= arm11x6_idcache_wbinv_all,	/* 415045 */
895 	.cf_idcache_wbinv_range = arm11x6_idcache_wbinv_range,	/* 371367 */
896 
897 	/* Other functions */
898 
899 	.cf_flush_prefetchbuf	= arm11x6_flush_prefetchbuf,
900 	.cf_drain_writebuf	= arm11_drain_writebuf,
901 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
902 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
903 
904 	.cf_sleep		= arm11x6_sleep,		/* no ref. */
905 
906 	/* Soft functions */
907 
908 	.cf_dataabt_fixup	= cpufunc_null_fixup,
909 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
910 
911 	.cf_context_switch	= arm11_context_switch,
912 
913 	.cf_setup		= arm11x6_setup
914 
915 };
916 #endif /* CPU_ARM1176 */
917 
918 
919 #ifdef CPU_ARM11MPCORE
920 struct cpu_functions arm11mpcore_cpufuncs = {
921 	/* CPU functions */
922 
923 	.cf_id			= cpufunc_id,
924 	.cf_cpwait		= cpufunc_nullop,
925 
926 	/* MMU functions */
927 
928 	.cf_control		= cpufunc_control,
929 	.cf_domains		= cpufunc_domains,
930 	.cf_setttb		= arm11_setttb,
931 	.cf_faultstatus		= cpufunc_faultstatus,
932 	.cf_faultaddress	= cpufunc_faultaddress,
933 
934 	/* TLB functions */
935 
936 	.cf_tlb_flushID		= arm11_tlb_flushID,
937 	.cf_tlb_flushID_SE	= arm11_tlb_flushID_SE,
938 	.cf_tlb_flushI		= arm11_tlb_flushI,
939 	.cf_tlb_flushI_SE	= arm11_tlb_flushI_SE,
940 	.cf_tlb_flushD		= arm11_tlb_flushD,
941 	.cf_tlb_flushD_SE	= arm11_tlb_flushD_SE,
942 
943 	/* Cache operations */
944 
945 	.cf_icache_sync_all	= armv6_icache_sync_all,
946 	.cf_icache_sync_range	= armv5_icache_sync_range,
947 
948 	.cf_dcache_wbinv_all	= armv6_dcache_wbinv_all,
949 	.cf_dcache_wbinv_range	= armv5_dcache_wbinv_range,
950 	.cf_dcache_inv_range	= armv5_dcache_inv_range,
951 	.cf_dcache_wb_range	= armv5_dcache_wb_range,
952 
953 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
954 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
955 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
956 
957 	.cf_idcache_wbinv_all	= armv6_idcache_wbinv_all,
958 	.cf_idcache_wbinv_range = armv5_idcache_wbinv_range,
959 
960 	/* Other functions */
961 
962 	.cf_flush_prefetchbuf	= cpufunc_nullop,
963 	.cf_drain_writebuf	= arm11_drain_writebuf,
964 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
965 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
966 
967 	.cf_sleep		= arm11_sleep,
968 
969 	/* Soft functions */
970 
971 	.cf_dataabt_fixup	= cpufunc_null_fixup,
972 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
973 
974 	.cf_context_switch	= arm11_context_switch,
975 
976 	.cf_setup		= arm11mpcore_setup
977 
978 };
979 #endif /* CPU_ARM11MPCORE */
980 
981 #ifdef CPU_SA110
982 struct cpu_functions sa110_cpufuncs = {
983 	/* CPU functions */
984 
985 	.cf_id			= cpufunc_id,
986 	.cf_cpwait		= cpufunc_nullop,
987 
988 	/* MMU functions */
989 
990 	.cf_control		= cpufunc_control,
991 	.cf_domains		= cpufunc_domains,
992 	.cf_setttb		= sa1_setttb,
993 	.cf_faultstatus		= cpufunc_faultstatus,
994 	.cf_faultaddress	= cpufunc_faultaddress,
995 
996 	/* TLB functions */
997 
998 	.cf_tlb_flushID		= armv4_tlb_flushID,
999 	.cf_tlb_flushID_SE	= sa1_tlb_flushID_SE,
1000 	.cf_tlb_flushI		= armv4_tlb_flushI,
1001 	.cf_tlb_flushI_SE	= (void *)armv4_tlb_flushI,
1002 	.cf_tlb_flushD		= armv4_tlb_flushD,
1003 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
1004 
1005 	/* Cache operations */
1006 
1007 	.cf_icache_sync_all	= sa1_cache_syncI,
1008 	.cf_icache_sync_range	= sa1_cache_syncI_rng,
1009 
1010 	.cf_dcache_wbinv_all	= sa1_cache_purgeD,
1011 	.cf_dcache_wbinv_range	= sa1_cache_purgeD_rng,
1012 /*XXX*/	.cf_dcache_inv_range	= sa1_cache_purgeD_rng,
1013 	.cf_dcache_wb_range	= sa1_cache_cleanD_rng,
1014 
1015 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
1016 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
1017 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
1018 
1019 	.cf_idcache_wbinv_all	= sa1_cache_purgeID,
1020 	.cf_idcache_wbinv_range	= sa1_cache_purgeID_rng,
1021 
1022 	/* Other functions */
1023 
1024 	.cf_flush_prefetchbuf	= cpufunc_nullop,
1025 	.cf_drain_writebuf	= armv4_drain_writebuf,
1026 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
1027 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
1028 
1029 	.cf_sleep		= (void *)cpufunc_nullop,
1030 
1031 	/* Soft functions */
1032 
1033 	.cf_dataabt_fixup	= cpufunc_null_fixup,
1034 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
1035 
1036 	.cf_context_switch	= sa110_context_switch,
1037 
1038 	.cf_setup		= sa110_setup
1039 };
1040 #endif	/* CPU_SA110 */
1041 
1042 #if defined(CPU_SA1100) || defined(CPU_SA1110)
1043 struct cpu_functions sa11x0_cpufuncs = {
1044 	/* CPU functions */
1045 
1046 	.cf_id			= cpufunc_id,
1047 	.cf_cpwait		= cpufunc_nullop,
1048 
1049 	/* MMU functions */
1050 
1051 	.cf_control		= cpufunc_control,
1052 	.cf_domains		= cpufunc_domains,
1053 	.cf_setttb		= sa1_setttb,
1054 	.cf_faultstatus		= cpufunc_faultstatus,
1055 	.cf_faultaddress	= cpufunc_faultaddress,
1056 
1057 	/* TLB functions */
1058 
1059 	.cf_tlb_flushID		= armv4_tlb_flushID,
1060 	.cf_tlb_flushID_SE	= sa1_tlb_flushID_SE,
1061 	.cf_tlb_flushI		= armv4_tlb_flushI,
1062 	.cf_tlb_flushI_SE	= (void *)armv4_tlb_flushI,
1063 	.cf_tlb_flushD		= armv4_tlb_flushD,
1064 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
1065 
1066 	/* Cache operations */
1067 
1068 	.cf_icache_sync_all	= sa1_cache_syncI,
1069 	.cf_icache_sync_range	= sa1_cache_syncI_rng,
1070 
1071 	.cf_dcache_wbinv_all	= sa1_cache_purgeD,
1072 	.cf_dcache_wbinv_range	= sa1_cache_purgeD_rng,
1073 /*XXX*/	.cf_dcache_inv_range	= sa1_cache_purgeD_rng,
1074 	.cf_dcache_wb_range	= sa1_cache_cleanD_rng,
1075 
1076 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
1077 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
1078 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
1079 
1080 	.cf_idcache_wbinv_all	= sa1_cache_purgeID,
1081 	.cf_idcache_wbinv_range	= sa1_cache_purgeID_rng,
1082 
1083 	/* Other functions */
1084 
1085 	.cf_flush_prefetchbuf	= sa11x0_drain_readbuf,
1086 	.cf_drain_writebuf	= armv4_drain_writebuf,
1087 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
1088 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
1089 
1090 	.cf_sleep		= sa11x0_cpu_sleep,
1091 
1092 	/* Soft functions */
1093 
1094 	.cf_dataabt_fixup	= cpufunc_null_fixup,
1095 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
1096 
1097 	.cf_context_switch	= sa11x0_context_switch,
1098 
1099 	.cf_setup		= sa11x0_setup
1100 };
1101 #endif	/* CPU_SA1100 || CPU_SA1110 */
1102 
1103 #if defined(CPU_FA526)
1104 struct cpu_functions fa526_cpufuncs = {
1105 	/* CPU functions */
1106 
1107 	.cf_id			= cpufunc_id,
1108 	.cf_cpwait		= cpufunc_nullop,
1109 
1110 	/* MMU functions */
1111 
1112 	.cf_control		= cpufunc_control,
1113 	.cf_domains		= cpufunc_domains,
1114 	.cf_setttb		= fa526_setttb,
1115 	.cf_faultstatus		= cpufunc_faultstatus,
1116 	.cf_faultaddress	= cpufunc_faultaddress,
1117 
1118 	/* TLB functions */
1119 
1120 	.cf_tlb_flushID		= armv4_tlb_flushID,
1121 	.cf_tlb_flushID_SE	= fa526_tlb_flushID_SE,
1122 	.cf_tlb_flushI		= armv4_tlb_flushI,
1123 	.cf_tlb_flushI_SE	= fa526_tlb_flushI_SE,
1124 	.cf_tlb_flushD		= armv4_tlb_flushD,
1125 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
1126 
1127 	/* Cache operations */
1128 
1129 	.cf_icache_sync_all	= fa526_icache_sync_all,
1130 	.cf_icache_sync_range	= fa526_icache_sync_range,
1131 
1132 	.cf_dcache_wbinv_all	= fa526_dcache_wbinv_all,
1133 	.cf_dcache_wbinv_range	= fa526_dcache_wbinv_range,
1134 	.cf_dcache_inv_range	= fa526_dcache_inv_range,
1135 	.cf_dcache_wb_range	= fa526_dcache_wb_range,
1136 
1137 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
1138 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
1139 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
1140 
1141 	.cf_idcache_wbinv_all	= fa526_idcache_wbinv_all,
1142 	.cf_idcache_wbinv_range	= fa526_idcache_wbinv_range,
1143 
1144 	/* Other functions */
1145 
1146 	.cf_flush_prefetchbuf	= fa526_flush_prefetchbuf,
1147 	.cf_drain_writebuf	= armv4_drain_writebuf,
1148 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
1149 	.cf_flush_brnchtgt_E	= fa526_flush_brnchtgt_E,
1150 
1151 	.cf_sleep		= fa526_cpu_sleep,
1152 
1153 	/* Soft functions */
1154 
1155 	.cf_dataabt_fixup	= cpufunc_null_fixup,
1156 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
1157 
1158 	.cf_context_switch	= fa526_context_switch,
1159 
1160 	.cf_setup		= fa526_setup
1161 };
1162 #endif	/* CPU_FA526 */
1163 
1164 #ifdef CPU_IXP12X0
1165 struct cpu_functions ixp12x0_cpufuncs = {
1166 	/* CPU functions */
1167 
1168 	.cf_id			= cpufunc_id,
1169 	.cf_cpwait		= cpufunc_nullop,
1170 
1171 	/* MMU functions */
1172 
1173 	.cf_control		= cpufunc_control,
1174 	.cf_domains		= cpufunc_domains,
1175 	.cf_setttb		= sa1_setttb,
1176 	.cf_faultstatus		= cpufunc_faultstatus,
1177 	.cf_faultaddress	= cpufunc_faultaddress,
1178 
1179 	/* TLB functions */
1180 
1181 	.cf_tlb_flushID		= armv4_tlb_flushID,
1182 	.cf_tlb_flushID_SE	= sa1_tlb_flushID_SE,
1183 	.cf_tlb_flushI		= armv4_tlb_flushI,
1184 	.cf_tlb_flushI_SE	= (void *)armv4_tlb_flushI,
1185 	.cf_tlb_flushD		= armv4_tlb_flushD,
1186 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
1187 
1188 	/* Cache operations */
1189 
1190 	.cf_icache_sync_all	= sa1_cache_syncI,
1191 	.cf_icache_sync_range	= sa1_cache_syncI_rng,
1192 
1193 	.cf_dcache_wbinv_all	= sa1_cache_purgeD,
1194 	.cf_dcache_wbinv_range	= sa1_cache_purgeD_rng,
1195 /*XXX*/	.cf_dcache_inv_range	= sa1_cache_purgeD_rng,
1196 	.cf_dcache_wb_range	= sa1_cache_cleanD_rng,
1197 
1198 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
1199 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
1200 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
1201 
1202 	.cf_idcache_wbinv_all	= sa1_cache_purgeID,
1203 	.cf_idcache_wbinv_range	= sa1_cache_purgeID_rng,
1204 
1205 	/* Other functions */
1206 
1207 	.cf_flush_prefetchbuf	= ixp12x0_drain_readbuf,
1208 	.cf_drain_writebuf	= armv4_drain_writebuf,
1209 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
1210 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
1211 
1212 	.cf_sleep		= (void *)cpufunc_nullop,
1213 
1214 	/* Soft functions */
1215 
1216 	.cf_dataabt_fixup	= cpufunc_null_fixup,
1217 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
1218 
1219 	.cf_context_switch	= ixp12x0_context_switch,
1220 
1221 	.cf_setup		= ixp12x0_setup
1222 };
1223 #endif	/* CPU_IXP12X0 */
1224 
1225 #if defined(CPU_XSCALE)
1226 struct cpu_functions xscale_cpufuncs = {
1227 	/* CPU functions */
1228 
1229 	.cf_id			= cpufunc_id,
1230 	.cf_cpwait		= xscale_cpwait,
1231 
1232 	/* MMU functions */
1233 
1234 	.cf_control		= xscale_control,
1235 	.cf_domains		= cpufunc_domains,
1236 	.cf_setttb		= xscale_setttb,
1237 	.cf_faultstatus		= cpufunc_faultstatus,
1238 	.cf_faultaddress	= cpufunc_faultaddress,
1239 
1240 	/* TLB functions */
1241 
1242 	.cf_tlb_flushID		= armv4_tlb_flushID,
1243 	.cf_tlb_flushID_SE	= xscale_tlb_flushID_SE,
1244 	.cf_tlb_flushI		= armv4_tlb_flushI,
1245 	.cf_tlb_flushI_SE	= (void *)armv4_tlb_flushI,
1246 	.cf_tlb_flushD		= armv4_tlb_flushD,
1247 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
1248 
1249 	/* Cache operations */
1250 
1251 	.cf_icache_sync_all	= xscale_cache_syncI,
1252 	.cf_icache_sync_range	= xscale_cache_syncI_rng,
1253 
1254 	.cf_dcache_wbinv_all	= xscale_cache_purgeD,
1255 	.cf_dcache_wbinv_range	= xscale_cache_purgeD_rng,
1256 	.cf_dcache_inv_range	= xscale_cache_flushD_rng,
1257 	.cf_dcache_wb_range	= xscale_cache_cleanD_rng,
1258 
1259 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
1260 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
1261 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
1262 
1263 	.cf_idcache_wbinv_all	= xscale_cache_purgeID,
1264 	.cf_idcache_wbinv_range = xscale_cache_purgeID_rng,
1265 
1266 	/* Other functions */
1267 
1268 	.cf_flush_prefetchbuf	= cpufunc_nullop,
1269 	.cf_drain_writebuf	= armv4_drain_writebuf,
1270 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
1271 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
1272 
1273 	.cf_sleep		= xscale_cpu_sleep,
1274 
1275 	/* Soft functions */
1276 
1277 	.cf_dataabt_fixup	= cpufunc_null_fixup,
1278 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
1279 
1280 	.cf_context_switch	= xscale_context_switch,
1281 
1282 	.cf_setup		= xscale_setup
1283 };
1284 #endif /* CPU_XSCALE */
1285 
1286 #if defined(CPU_ARMV7)
1287 struct cpu_functions armv7_cpufuncs = {
1288 	/* CPU functions */
1289 
1290 	.cf_id			= cpufunc_id,
1291 	.cf_cpwait		= cpufunc_nullop,
1292 
1293 	/* MMU functions */
1294 
1295 	.cf_control		= cpufunc_control,
1296 	.cf_domains		= cpufunc_domains,
1297 	.cf_setttb		= armv7_setttb,
1298 	.cf_faultstatus		= cpufunc_faultstatus,
1299 	.cf_faultaddress	= cpufunc_faultaddress,
1300 
1301 	/* TLB functions */
1302 
1303 	.cf_tlb_flushID		= armv7_tlb_flushID,
1304 	.cf_tlb_flushID_SE	= armv7_tlb_flushID_SE,
1305 	.cf_tlb_flushI		= armv7_tlb_flushI,
1306 	.cf_tlb_flushI_SE	= armv7_tlb_flushI_SE,
1307 	.cf_tlb_flushD		= armv7_tlb_flushD,
1308 	.cf_tlb_flushD_SE	= armv7_tlb_flushD_SE,
1309 
1310 	/* Cache operations */
1311 
1312 	.cf_icache_sync_all	= armv7_icache_sync_all,
1313 	.cf_dcache_wbinv_all	= armv7_dcache_wbinv_all,
1314 
1315 	.cf_dcache_inv_range	= armv7_dcache_inv_range,
1316 	.cf_dcache_wb_range	= armv7_dcache_wb_range,
1317 	.cf_dcache_wbinv_range	= armv7_dcache_wbinv_range,
1318 
1319 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
1320 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
1321 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
1322 
1323 	.cf_icache_sync_range	= armv7_icache_sync_range,
1324 	.cf_idcache_wbinv_range = armv7_idcache_wbinv_range,
1325 
1326 
1327 	.cf_idcache_wbinv_all	= armv7_idcache_wbinv_all,
1328 
1329 	/* Other functions */
1330 
1331 	.cf_flush_prefetchbuf	= cpufunc_nullop,
1332 	.cf_drain_writebuf	= armv7_drain_writebuf,
1333 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
1334 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
1335 
1336 	.cf_sleep		= armv7_cpu_sleep,
1337 
1338 	/* Soft functions */
1339 
1340 	.cf_dataabt_fixup	= cpufunc_null_fixup,
1341 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
1342 
1343 	.cf_context_switch	= armv7_context_switch,
1344 
1345 	.cf_setup		= armv7_setup
1346 
1347 };
1348 #endif /* CPU_ARMV7 */
1349 
1350 #ifdef CPU_PJ4B
1351 struct cpu_functions pj4bv7_cpufuncs = {
1352 	/* CPU functions */
1353 
1354 	.cf_id			= cpufunc_id,
1355 	.cf_cpwait		= armv7_drain_writebuf,
1356 
1357 	/* MMU functions */
1358 
1359 	.cf_control		= cpufunc_control,
1360 	.cf_domains		= cpufunc_domains,
1361 	.cf_setttb		= armv7_setttb,
1362 	.cf_faultstatus		= cpufunc_faultstatus,
1363 	.cf_faultaddress	= cpufunc_faultaddress,
1364 
1365 	/* TLB functions */
1366 
1367 	.cf_tlb_flushID		= armv7_tlb_flushID,
1368 	.cf_tlb_flushID_SE	= armv7_tlb_flushID_SE,
1369 	.cf_tlb_flushI		= armv7_tlb_flushID,
1370 	.cf_tlb_flushI_SE	= armv7_tlb_flushID_SE,
1371 	.cf_tlb_flushD		= armv7_tlb_flushID,
1372 	.cf_tlb_flushD_SE	= armv7_tlb_flushID_SE,
1373 
1374 	/* Cache operations (see also pj4bv7_setup) */
1375 	.cf_icache_sync_all	= armv7_idcache_wbinv_all,
1376 	.cf_icache_sync_range	= armv7_icache_sync_range,
1377 
1378 	.cf_dcache_wbinv_all	= armv7_dcache_wbinv_all,
1379 	.cf_dcache_wbinv_range	= armv7_dcache_wbinv_range,
1380 	.cf_dcache_inv_range	= armv7_dcache_inv_range,
1381 	.cf_dcache_wb_range	= armv7_dcache_wb_range,
1382 
1383 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
1384 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
1385 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
1386 
1387 	.cf_idcache_wbinv_all	= armv7_idcache_wbinv_all,
1388 	.cf_idcache_wbinv_range	= armv7_idcache_wbinv_range,
1389 
1390 	/* Other functions */
1391 
1392 	.cf_flush_prefetchbuf	= cpufunc_nullop,
1393 	.cf_drain_writebuf	= armv7_drain_writebuf,
1394 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
1395 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
1396 
1397 	.cf_sleep		= pj4b_cpu_sleep,
1398 
1399 	/* Soft functions */
1400 
1401 	.cf_dataabt_fixup	= cpufunc_null_fixup,
1402 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
1403 
1404 	.cf_context_switch	= armv7_context_switch,
1405 
1406 	.cf_setup		= pj4bv7_setup
1407 };
1408 #endif /* CPU_PJ4B */
1409 
1410 #ifdef CPU_SHEEVA
1411 struct cpu_functions sheeva_cpufuncs = {
1412 	/* CPU functions */
1413 
1414 	.cf_id			= cpufunc_id,
1415 	.cf_cpwait		= cpufunc_nullop,
1416 
1417 	/* MMU functions */
1418 
1419 	.cf_control		= cpufunc_control,
1420 	.cf_domains		= cpufunc_domains,
1421 	.cf_setttb		= armv5_ec_setttb,
1422 	.cf_faultstatus		= cpufunc_faultstatus,
1423 	.cf_faultaddress	= cpufunc_faultaddress,
1424 
1425 	/* TLB functions */
1426 
1427 	.cf_tlb_flushID		= armv4_tlb_flushID,
1428 	.cf_tlb_flushID_SE	= arm10_tlb_flushID_SE,
1429 	.cf_tlb_flushI		= armv4_tlb_flushI,
1430 	.cf_tlb_flushI_SE	= arm10_tlb_flushI_SE,
1431 	.cf_tlb_flushD		= armv4_tlb_flushD,
1432 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
1433 
1434 	/* Cache operations */
1435 
1436 	.cf_icache_sync_all	= armv5_ec_icache_sync_all,
1437 	.cf_icache_sync_range	= armv5_ec_icache_sync_range,
1438 
1439 	.cf_dcache_wbinv_all	= armv5_ec_dcache_wbinv_all,
1440 	.cf_dcache_wbinv_range	= sheeva_dcache_wbinv_range,
1441 	.cf_dcache_inv_range	= sheeva_dcache_inv_range,
1442 	.cf_dcache_wb_range	= sheeva_dcache_wb_range,
1443 
1444 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
1445 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
1446 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
1447 
1448 	.cf_idcache_wbinv_all	= armv5_ec_idcache_wbinv_all,
1449 	.cf_idcache_wbinv_range = sheeva_idcache_wbinv_range,
1450 
1451 	/* Other functions */
1452 
1453 	.cf_flush_prefetchbuf	= cpufunc_nullop,
1454 	.cf_drain_writebuf	= armv4_drain_writebuf,
1455 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
1456 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
1457 
1458 	.cf_sleep		= (void *)sheeva_cpu_sleep,
1459 
1460 	/* Soft functions */
1461 
1462 	.cf_dataabt_fixup	= cpufunc_null_fixup,
1463 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
1464 
1465 	.cf_context_switch	= arm10_context_switch,
1466 
1467 	.cf_setup		= sheeva_setup
1468 };
1469 #endif /* CPU_SHEEVA */
1470 
1471 
1472 /*
1473  * Global constants also used by locore.s
1474  */
1475 
1476 struct cpu_functions cpufuncs;
1477 u_int cputype;
1478 
1479 #if defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined(CPU_ARM9) || \
1480     defined(CPU_ARM9E) || defined(CPU_ARM10) || defined(CPU_FA526) || \
1481     defined(CPU_SHEEVA) || \
1482     defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
1483     defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425) || \
1484     defined(CPU_ARMV6) || defined(CPU_ARMV7)
1485 static void get_cachetype_cp15(void);
1486 
1487 /* Additional cache information local to this file.  Log2 of some of the
1488    above numbers.  */
1489 static int	arm_dcache_log2_nsets;
1490 static int	arm_dcache_log2_assoc;
1491 static int	arm_dcache_log2_linesize;
1492 
1493 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
1494 static inline u_int
1495 get_cachesize_cp15(int cssr)
1496 {
1497 #if defined(CPU_ARMV7)
1498 	__asm volatile(".arch\tarmv7a");
1499 
1500 	armreg_csselr_write(cssr);
1501 	arm_isb();			 /* sync to the new cssr */
1502 
1503 #else
1504 	__asm volatile("mcr p15, 1, %0, c0, c0, 2" :: "r" (cssr) : "memory");
1505 #endif
1506 	return armreg_ccsidr_read();
1507 }
1508 #endif
1509 
1510 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
1511 static void
1512 get_cacheinfo_clidr(struct arm_cache_info *info, u_int level, u_int clidr)
1513 {
1514 	u_int csid;
1515 
1516 	if (clidr & 6) {
1517 		csid = get_cachesize_cp15(level << 1); /* select dcache values */
1518 		info->dcache_sets = CPU_CSID_NUMSETS(csid) + 1;
1519 		info->dcache_ways = CPU_CSID_ASSOC(csid) + 1;
1520 		info->dcache_line_size = 1U << (CPU_CSID_LEN(csid) + 4);
1521 		info->dcache_way_size =
1522 		    info->dcache_line_size * info->dcache_sets;
1523 		info->dcache_size = info->dcache_way_size * info->dcache_ways;
1524 
1525 		if (level == 0) {
1526 			arm_dcache_log2_assoc = CPU_CSID_ASSOC(csid) + 1;
1527 			arm_dcache_log2_linesize = CPU_CSID_LEN(csid) + 4;
1528 			arm_dcache_log2_nsets =
1529 			    31 - __builtin_clz(info->dcache_sets*2-1);
1530 		}
1531 	}
1532 
1533 	info->cache_unified = (clidr == 4);
1534 
1535 	if (level > 0) {
1536 		info->dcache_type = CACHE_TYPE_PIPT;
1537 		info->icache_type = CACHE_TYPE_PIPT;
1538 	}
1539 
1540 	if (info->cache_unified) {
1541 		info->icache_ways = info->dcache_ways;
1542 		info->icache_line_size = info->dcache_line_size;
1543 		info->icache_way_size = info->dcache_way_size;
1544 		info->icache_size = info->dcache_size;
1545 	} else {
1546 		csid = get_cachesize_cp15((level << 1)|CPU_CSSR_InD); /* select icache values */
1547 		info->icache_sets = CPU_CSID_NUMSETS(csid) + 1;
1548 		info->icache_ways = CPU_CSID_ASSOC(csid) + 1;
1549 		info->icache_line_size = 1U << (CPU_CSID_LEN(csid) + 4);
1550 		info->icache_way_size = info->icache_line_size * info->icache_sets;
1551 		info->icache_size = info->icache_way_size * info->icache_ways;
1552 	}
1553 	if (level == 0
1554 	    && info->dcache_way_size <= PAGE_SIZE
1555 	    && info->icache_way_size <= PAGE_SIZE) {
1556 		arm_cache_prefer_mask = 0;
1557 	}
1558 }
1559 #endif /* (ARM_MMU_V6 + ARM_MMU_V7) > 0 */
1560 
1561 static void
1562 get_cachetype_cp15(void)
1563 {
1564 	u_int ctype, isize, dsize;
1565 	u_int multiplier;
1566 
1567 	ctype = armreg_ctr_read();
1568 
1569 	/*
1570 	 * ...and thus spake the ARM ARM:
1571 	 *
1572 	 * If an <opcode2> value corresponding to an unimplemented or
1573 	 * reserved ID register is encountered, the System Control
1574 	 * processor returns the value of the main ID register.
1575 	 */
1576 	if (ctype == cpu_idnum())
1577 		goto out;
1578 
1579 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
1580 	if (CPU_CT_FORMAT(ctype) == 4) {
1581 		u_int clidr = armreg_clidr_read();
1582 
1583 		if (CPU_CT4_L1IPOLICY(ctype) == CPU_CT4_L1_PIPT) {
1584 			arm_pcache.icache_type = CACHE_TYPE_PIPT;
1585 		} else {
1586 			arm_pcache.icache_type = CACHE_TYPE_VIPT;
1587 			arm_cache_prefer_mask = PAGE_SIZE;
1588 		}
1589 #ifdef CPU_CORTEX
1590 		if (CPU_ID_CORTEX_P(cpu_idnum())) {
1591 			arm_pcache.dcache_type = CACHE_TYPE_PIPT;
1592 		} else
1593 #endif
1594 		{
1595 			arm_pcache.dcache_type = CACHE_TYPE_VIPT;
1596 		}
1597 		arm_pcache.cache_type = CPU_CT_CTYPE_WB14;
1598 
1599 		get_cacheinfo_clidr(&arm_pcache, 0, clidr & 7);
1600 		arm_dcache_align = arm_pcache.dcache_line_size;
1601 		clidr >>= 3;
1602 		if (clidr & 7) {
1603 			get_cacheinfo_clidr(&arm_scache, 1, clidr & 7);
1604 			if (arm_scache.dcache_line_size < arm_dcache_align)
1605 				arm_dcache_align = arm_scache.dcache_line_size;
1606 		}
1607 		/*
1608 		 * The pmap cleans an entire way for an exec page so
1609 		 * we don't care that it's VIPT anymore.
1610 		 */
1611 		if (arm_pcache.dcache_type == CACHE_TYPE_PIPT) {
1612 			arm_cache_prefer_mask = 0;
1613 		}
1614 		goto out;
1615 	}
1616 #endif /* ARM_MMU_V6 + ARM_MMU_V7 > 0 */
1617 
1618 	if ((ctype & CPU_CT_S) == 0)
1619 		arm_pcache.cache_unified = 1;
1620 
1621 	/*
1622 	 * If you want to know how this code works, go read the ARM ARM.
1623 	 */
1624 
1625 	arm_pcache.cache_type = CPU_CT_CTYPE(ctype);
1626 
1627 	if (arm_pcache.cache_unified == 0) {
1628 		isize = CPU_CT_ISIZE(ctype);
1629 		multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
1630 		arm_pcache.icache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
1631 		if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
1632 			if (isize & CPU_CT_xSIZE_M)
1633 				arm_pcache.icache_line_size = 0; /* not present */
1634 			else
1635 				arm_pcache.icache_ways = 1;
1636 		} else {
1637 			arm_pcache.icache_ways = multiplier <<
1638 			    (CPU_CT_xSIZE_ASSOC(isize) - 1);
1639 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
1640 			arm_pcache.icache_type = CACHE_TYPE_VIPT;
1641 			if (CPU_CT_xSIZE_P & isize)
1642 				arm_cache_prefer_mask |=
1643 				    __BIT(9 + CPU_CT_xSIZE_SIZE(isize)
1644 					  - CPU_CT_xSIZE_ASSOC(isize))
1645 				    - PAGE_SIZE;
1646 #endif
1647 		}
1648 		arm_pcache.icache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
1649 		arm_pcache.icache_way_size =
1650 		    __BIT(9 + CPU_CT_xSIZE_SIZE(isize) - CPU_CT_xSIZE_ASSOC(isize));
1651 	}
1652 
1653 	dsize = CPU_CT_DSIZE(ctype);
1654 	multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
1655 	arm_pcache.dcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
1656 	if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
1657 		if (dsize & CPU_CT_xSIZE_M)
1658 			arm_pcache.dcache_line_size = 0; /* not present */
1659 		else
1660 			arm_pcache.dcache_ways = 1;
1661 	} else {
1662 		arm_pcache.dcache_ways = multiplier <<
1663 		    (CPU_CT_xSIZE_ASSOC(dsize) - 1);
1664 #if (ARM_MMU_V6) > 0
1665 		arm_pcache.dcache_type = CACHE_TYPE_VIPT;
1666 		if ((CPU_CT_xSIZE_P & dsize)
1667 		    && CPU_ID_ARM11_P(curcpu()->ci_arm_cpuid)) {
1668 			arm_cache_prefer_mask |=
1669 			    __BIT(9 + CPU_CT_xSIZE_SIZE(dsize)
1670 				  - CPU_CT_xSIZE_ASSOC(dsize)) - PAGE_SIZE;
1671 		}
1672 #endif
1673 	}
1674 	arm_pcache.dcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
1675 	arm_pcache.dcache_way_size =
1676 	    __BIT(9 + CPU_CT_xSIZE_SIZE(dsize) - CPU_CT_xSIZE_ASSOC(dsize));
1677 
1678 	arm_dcache_align = arm_pcache.dcache_line_size;
1679 
1680 	arm_dcache_log2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
1681 	arm_dcache_log2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
1682 	arm_dcache_log2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
1683 	    CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
1684 
1685  out:
1686 	KASSERTMSG(arm_dcache_align <= CACHE_LINE_SIZE,
1687 	    "arm_dcache_align=%u CACHE_LINE_SIZE=%u",
1688 	    arm_dcache_align, CACHE_LINE_SIZE);
1689 	arm_dcache_align_mask = arm_dcache_align - 1;
1690 }
1691 #endif /* ARM7TDMI || ARM8 || ARM9 || XSCALE */
1692 
1693 #if defined(CPU_ARM2) || defined(CPU_ARM250) || defined(CPU_ARM3) || \
1694     defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_SA110) || \
1695     defined(CPU_SA1100) || defined(CPU_SA1110) || defined(CPU_IXP12X0)
1696 /* Cache information for CPUs without cache type registers. */
1697 struct cachetab {
1698 	uint32_t ct_cpuid;
1699 	int	ct_pcache_type;
1700 	int	ct_pcache_unified;
1701 	int	ct_pdcache_size;
1702 	int	ct_pdcache_line_size;
1703 	int	ct_pdcache_ways;
1704 	int	ct_picache_size;
1705 	int	ct_picache_line_size;
1706 	int	ct_picache_ways;
1707 };
1708 
1709 struct cachetab cachetab[] = {
1710     /* cpuid,           cache type,       u,  dsiz, ls, wy,  isiz, ls, wy */
1711     { CPU_ID_ARM2,      0,                1,     0,  0,  0,     0,  0,  0 },
1712     { CPU_ID_ARM250,    0,                1,     0,  0,  0,     0,  0,  0 },
1713     { CPU_ID_ARM3,      CPU_CT_CTYPE_WT,  1,  4096, 16, 64,     0,  0,  0 },
1714     { CPU_ID_ARM610,	CPU_CT_CTYPE_WT,  1,  4096, 16, 64,     0,  0,  0 },
1715     { CPU_ID_ARM710,    CPU_CT_CTYPE_WT,  1,  8192, 32,  4,     0,  0,  0 },
1716     { CPU_ID_ARM7500,   CPU_CT_CTYPE_WT,  1,  4096, 16,  4,     0,  0,  0 },
1717     { CPU_ID_ARM710A,   CPU_CT_CTYPE_WT,  1,  8192, 16,  4,     0,  0,  0 },
1718     { CPU_ID_ARM7500FE, CPU_CT_CTYPE_WT,  1,  4096, 16,  4,     0,  0,  0 },
1719     /* XXX is this type right for SA-1? */
1720     { CPU_ID_SA110,	CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 },
1721     { CPU_ID_SA1100,	CPU_CT_CTYPE_WB1, 0,  8192, 32, 32, 16384, 32, 32 },
1722     { CPU_ID_SA1110,	CPU_CT_CTYPE_WB1, 0,  8192, 32, 32, 16384, 32, 32 },
1723     { CPU_ID_IXP1200,	CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 }, /* XXX */
1724     { 0, 0, 0, 0, 0, 0, 0, 0}
1725 };
1726 
1727 static void get_cachetype_table(void);
1728 
1729 static void
1730 get_cachetype_table(void)
1731 {
1732 	int i;
1733 	uint32_t cpuid = cpu_idnum();
1734 
1735 	for (i = 0; cachetab[i].ct_cpuid != 0; i++) {
1736 		if (cachetab[i].ct_cpuid == (cpuid & CPU_ID_CPU_MASK)) {
1737 			arm_pcache.cache_type = cachetab[i].ct_pcache_type;
1738 			arm_pcache.cache_unified = cachetab[i].ct_pcache_unified;
1739 			arm_pcache.dcache_size = cachetab[i].ct_pdcache_size;
1740 			arm_pcache.dcache_line_size =
1741 			    cachetab[i].ct_pdcache_line_size;
1742 			arm_pcache.dcache_ways = cachetab[i].ct_pdcache_ways;
1743 			if (arm_pcache.dcache_ways) {
1744 				arm_pcache.dcache_way_size =
1745 				    arm_pcache.dcache_line_size
1746 				    / arm_pcache.dcache_ways;
1747 			}
1748 			arm_pcache.icache_size = cachetab[i].ct_picache_size;
1749 			arm_pcache.icache_line_size =
1750 			    cachetab[i].ct_picache_line_size;
1751 			arm_pcache.icache_ways = cachetab[i].ct_picache_ways;
1752 			if (arm_pcache.icache_ways) {
1753 				arm_pcache.icache_way_size =
1754 				    arm_pcache.icache_line_size
1755 				    / arm_pcache.icache_ways;
1756 			}
1757 		}
1758 	}
1759 
1760 	arm_dcache_align = arm_pcache.dcache_line_size;
1761 	arm_dcache_align_mask = arm_dcache_align - 1;
1762 }
1763 
1764 #endif /* ARM2 || ARM250 || ARM3 || ARM6 || ARM7 || SA110 || SA1100 || SA1111 || IXP12X0 */
1765 
1766 /*
1767  * Cannot panic here as we may not have a console yet ...
1768  */
1769 
1770 int
1771 set_cpufuncs(void)
1772 {
1773 	if (cputype == 0) {
1774 		cputype = cpufunc_id();
1775 		cputype &= CPU_ID_CPU_MASK;
1776 	}
1777 
1778 	/*
1779 	 * NOTE: cpu_do_powersave defaults to off.  If we encounter a
1780 	 * CPU type where we want to use it by default, then we set it.
1781 	 */
1782 #ifdef CPU_ARM2
1783 	if (cputype == CPU_ID_ARM2) {
1784 		cpufuncs = arm2_cpufuncs;
1785 		get_cachetype_table();
1786 		return 0;
1787 	}
1788 #endif /* CPU_ARM2 */
1789 #ifdef CPU_ARM250
1790 	if (cputype == CPU_ID_ARM250) {
1791 		cpufuncs = arm250_cpufuncs;
1792 		get_cachetype_table();
1793 		return 0;
1794 	}
1795 #endif
1796 #ifdef CPU_ARM3
1797 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
1798 	    (cputype & 0x00000f00) == 0x00000300) {
1799 		cpufuncs = arm3_cpufuncs;
1800 		get_cachetype_table();
1801 		return 0;
1802 	}
1803 #endif	/* CPU_ARM3 */
1804 #ifdef CPU_ARM6
1805 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
1806 	    (cputype & 0x00000f00) == 0x00000600) {
1807 		cpufuncs = arm6_cpufuncs;
1808 		get_cachetype_table();
1809 		pmap_pte_init_generic();
1810 		return 0;
1811 	}
1812 #endif	/* CPU_ARM6 */
1813 #ifdef CPU_ARM7
1814 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
1815 	    CPU_ID_IS7(cputype) &&
1816 	    (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V3) {
1817 		cpufuncs = arm7_cpufuncs;
1818 		get_cachetype_table();
1819 		pmap_pte_init_generic();
1820 		return 0;
1821 	}
1822 #endif	/* CPU_ARM7 */
1823 #ifdef CPU_ARM7TDMI
1824 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
1825 	    CPU_ID_IS7(cputype) &&
1826 	    (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V4T) {
1827 		cpufuncs = arm7tdmi_cpufuncs;
1828 		get_cachetype_cp15();
1829 		pmap_pte_init_generic();
1830 		return 0;
1831 	}
1832 #endif
1833 #ifdef CPU_ARM8
1834 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
1835 	    (cputype & 0x0000f000) == 0x00008000) {
1836 		cpufuncs = arm8_cpufuncs;
1837 		get_cachetype_cp15();
1838 		pmap_pte_init_arm8();
1839 		return 0;
1840 	}
1841 #endif	/* CPU_ARM8 */
1842 #ifdef CPU_ARM9
1843 	if (((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD ||
1844 	     (cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_TI) &&
1845 	    (cputype & 0x0000f000) == 0x00009000) {
1846 		cpufuncs = arm9_cpufuncs;
1847 		get_cachetype_cp15();
1848 		arm9_dcache_sets_inc = 1U << arm_dcache_log2_linesize;
1849 		arm9_dcache_sets_max =
1850 		    (1U << (arm_dcache_log2_linesize + arm_dcache_log2_nsets)) -
1851 		    arm9_dcache_sets_inc;
1852 		arm9_dcache_index_inc = 1U << (32 - arm_dcache_log2_assoc);
1853 		arm9_dcache_index_max = 0U - arm9_dcache_index_inc;
1854 #ifdef	ARM9_CACHE_WRITE_THROUGH
1855 		pmap_pte_init_arm9();
1856 #else
1857 		pmap_pte_init_generic();
1858 #endif
1859 		return 0;
1860 	}
1861 #endif /* CPU_ARM9 */
1862 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
1863 	if (cputype == CPU_ID_ARM926EJS ||
1864 	    cputype == CPU_ID_ARM1026EJS) {
1865 		cpufuncs = armv5_ec_cpufuncs;
1866 		get_cachetype_cp15();
1867 		pmap_pte_init_generic();
1868 		return 0;
1869 	}
1870 #endif /* CPU_ARM9E || CPU_ARM10 */
1871 #if defined(CPU_SHEEVA)
1872 	if (cputype == CPU_ID_MV88SV131 ||
1873 	    cputype == CPU_ID_MV88FR571_VD) {
1874 		cpufuncs = sheeva_cpufuncs;
1875 		get_cachetype_cp15();
1876 		pmap_pte_init_generic();
1877 		cpu_do_powersave = 1;			/* Enable powersave */
1878 		return 0;
1879 	}
1880 #endif /* CPU_SHEEVA */
1881 #ifdef CPU_ARM10
1882 	if (/* cputype == CPU_ID_ARM1020T || */
1883 	    cputype == CPU_ID_ARM1020E) {
1884 		/*
1885 		 * Select write-through cacheing (this isn't really an
1886 		 * option on ARM1020T).
1887 		 */
1888 		cpufuncs = arm10_cpufuncs;
1889 		get_cachetype_cp15();
1890 		armv5_dcache_sets_inc = 1U << arm_dcache_log2_linesize;
1891 		armv5_dcache_sets_max =
1892 		    (1U << (arm_dcache_log2_linesize + arm_dcache_log2_nsets)) -
1893 		    armv5_dcache_sets_inc;
1894 		armv5_dcache_index_inc = 1U << (32 - arm_dcache_log2_assoc);
1895 		armv5_dcache_index_max = 0U - armv5_dcache_index_inc;
1896 		pmap_pte_init_generic();
1897 		return 0;
1898 	}
1899 #endif /* CPU_ARM10 */
1900 
1901 
1902 #if defined(CPU_ARM11MPCORE)
1903 	if (cputype == CPU_ID_ARM11MPCORE) {
1904 		cpufuncs = arm11mpcore_cpufuncs;
1905 #if defined(CPU_ARMV7) || defined(CPU_PRE_ARMV6)
1906 		cpu_armv6_p = true;
1907 #endif
1908 		get_cachetype_cp15();
1909 		armv5_dcache_sets_inc = 1U << arm_dcache_log2_linesize;
1910 		armv5_dcache_sets_max = (1U << (arm_dcache_log2_linesize +
1911 			arm_dcache_log2_nsets)) - armv5_dcache_sets_inc;
1912 		armv5_dcache_index_inc = 1U << (32 - arm_dcache_log2_assoc);
1913 		armv5_dcache_index_max = 0U - armv5_dcache_index_inc;
1914 		cpu_do_powersave = 1;			/* Enable powersave */
1915 		pmap_pte_init_arm11mpcore();
1916 		if (arm_cache_prefer_mask)
1917 			uvmexp.ncolors = (arm_cache_prefer_mask >> PGSHIFT) + 1;
1918 
1919 		return 0;
1920 
1921 	}
1922 #endif	/* CPU_ARM11MPCORE */
1923 
1924 #if defined(CPU_ARM11)
1925 	if (cputype == CPU_ID_ARM1136JS ||
1926 	    cputype == CPU_ID_ARM1136JSR1 ||
1927 	    cputype == CPU_ID_ARM1176JZS) {
1928 		cpufuncs = arm11_cpufuncs;
1929 #if defined(CPU_ARM1136)
1930 		if (cputype == CPU_ID_ARM1136JS ||
1931 		    cputype == CPU_ID_ARM1136JSR1) {
1932 			cpufuncs = arm1136_cpufuncs;
1933 			if (cputype == CPU_ID_ARM1136JS)
1934 				cpufuncs.cf_sleep = arm1136_sleep_rev0;
1935 		}
1936 #endif
1937 #if defined(CPU_ARM1176)
1938 		if (cputype == CPU_ID_ARM1176JZS) {
1939 			cpufuncs = arm1176_cpufuncs;
1940 		}
1941 #endif
1942 #if defined(CPU_ARMV7) || defined(CPU_PRE_ARMV6)
1943 		cpu_armv6_p = true;
1944 #endif
1945 		cpu_do_powersave = 1;			/* Enable powersave */
1946 		get_cachetype_cp15();
1947 #ifdef ARM11_CACHE_WRITE_THROUGH
1948 		pmap_pte_init_arm11();
1949 #else
1950 		pmap_pte_init_generic();
1951 #endif
1952 		if (arm_cache_prefer_mask)
1953 			uvmexp.ncolors = (arm_cache_prefer_mask >> PGSHIFT) + 1;
1954 
1955 		/*
1956 		 * Start and reset the PMC Cycle Counter.
1957 		 */
1958 		armreg_pmcrv6_write(ARM11_PMCCTL_E | ARM11_PMCCTL_P | ARM11_PMCCTL_C);
1959 		return 0;
1960 	}
1961 #endif /* CPU_ARM11 */
1962 #ifdef CPU_SA110
1963 	if (cputype == CPU_ID_SA110) {
1964 		cpufuncs = sa110_cpufuncs;
1965 		get_cachetype_table();
1966 		pmap_pte_init_sa1();
1967 		return 0;
1968 	}
1969 #endif	/* CPU_SA110 */
1970 #ifdef CPU_SA1100
1971 	if (cputype == CPU_ID_SA1100) {
1972 		cpufuncs = sa11x0_cpufuncs;
1973 		get_cachetype_table();
1974 		pmap_pte_init_sa1();
1975 
1976 		/* Use powersave on this CPU. */
1977 		cpu_do_powersave = 1;
1978 
1979 		return 0;
1980 	}
1981 #endif	/* CPU_SA1100 */
1982 #ifdef CPU_SA1110
1983 	if (cputype == CPU_ID_SA1110) {
1984 		cpufuncs = sa11x0_cpufuncs;
1985 		get_cachetype_table();
1986 		pmap_pte_init_sa1();
1987 
1988 		/* Use powersave on this CPU. */
1989 		cpu_do_powersave = 1;
1990 
1991 		return 0;
1992 	}
1993 #endif	/* CPU_SA1110 */
1994 #ifdef CPU_FA526
1995 	if (cputype == CPU_ID_FA526) {
1996 		cpufuncs = fa526_cpufuncs;
1997 		get_cachetype_cp15();
1998 		pmap_pte_init_generic();
1999 
2000 		/* Use powersave on this CPU. */
2001 		cpu_do_powersave = 1;
2002 
2003 		return 0;
2004 	}
2005 #endif	/* CPU_FA526 */
2006 #ifdef CPU_IXP12X0
2007 	if (cputype == CPU_ID_IXP1200) {
2008 		cpufuncs = ixp12x0_cpufuncs;
2009 		get_cachetype_table();
2010 		pmap_pte_init_sa1();
2011 		return 0;
2012 	}
2013 #endif  /* CPU_IXP12X0 */
2014 #ifdef CPU_XSCALE_80200
2015 	if (cputype == CPU_ID_80200) {
2016 		int rev = cpufunc_id() & CPU_ID_REVISION_MASK;
2017 
2018 		i80200_icu_init();
2019 
2020 		/*
2021 		 * Reset the Performance Monitoring Unit to a
2022 		 * pristine state:
2023 		 *	- CCNT, PMN0, PMN1 reset to 0
2024 		 *	- overflow indications cleared
2025 		 *	- all counters disabled
2026 		 */
2027 		__asm volatile("mcr p14, 0, %0, c0, c0, 0"
2028 			:
2029 			: "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF|
2030 			       PMNC_CC_IF));
2031 
2032 #if defined(XSCALE_CCLKCFG)
2033 		/*
2034 		 * Crank CCLKCFG to maximum legal value.
2035 		 */
2036 		__asm volatile ("mcr p14, 0, %0, c6, c0, 0"
2037 			:
2038 			: "r" (XSCALE_CCLKCFG));
2039 #endif
2040 
2041 		/*
2042 		 * XXX Disable ECC in the Bus Controller Unit; we
2043 		 * don't really support it, yet.  Clear any pending
2044 		 * error indications.
2045 		 */
2046 		__asm volatile("mcr p13, 0, %0, c0, c1, 0"
2047 			:
2048 			: "r" (BCUCTL_E0|BCUCTL_E1|BCUCTL_EV));
2049 
2050 		cpufuncs = xscale_cpufuncs;
2051 #if defined(PERFCTRS)
2052 		xscale_pmu_init();
2053 #endif
2054 
2055 		/*
2056 		 * i80200 errata: Step-A0 and A1 have a bug where
2057 		 * D$ dirty bits are not cleared on "invalidate by
2058 		 * address".
2059 		 *
2060 		 * Workaround: Clean cache line before invalidating.
2061 		 */
2062 		if (rev == 0 || rev == 1)
2063 			cpufuncs.cf_dcache_inv_range = xscale_cache_purgeD_rng;
2064 
2065 		get_cachetype_cp15();
2066 		pmap_pte_init_xscale();
2067 		return 0;
2068 	}
2069 #endif /* CPU_XSCALE_80200 */
2070 #ifdef CPU_XSCALE_80321
2071 	if (cputype == CPU_ID_80321_400 || cputype == CPU_ID_80321_600 ||
2072 	    cputype == CPU_ID_80321_400_B0 || cputype == CPU_ID_80321_600_B0 ||
2073 	    cputype == CPU_ID_80219_400 || cputype == CPU_ID_80219_600) {
2074 		i80321_icu_init();
2075 
2076 		/*
2077 		 * Reset the Performance Monitoring Unit to a
2078 		 * pristine state:
2079 		 *	- CCNT, PMN0, PMN1 reset to 0
2080 		 *	- overflow indications cleared
2081 		 *	- all counters disabled
2082 		 */
2083 		__asm volatile("mcr p14, 0, %0, c0, c0, 0"
2084 			:
2085 			: "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF|
2086 			       PMNC_CC_IF));
2087 
2088 		cpufuncs = xscale_cpufuncs;
2089 #if defined(PERFCTRS)
2090 		xscale_pmu_init();
2091 #endif
2092 
2093 		get_cachetype_cp15();
2094 		pmap_pte_init_xscale();
2095 		return 0;
2096 	}
2097 #endif /* CPU_XSCALE_80321 */
2098 #ifdef __CPU_XSCALE_PXA2XX
2099 	/* ignore core revision to test PXA2xx CPUs */
2100 	if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA27X ||
2101 	    (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 ||
2102 	    (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) {
2103 
2104 		cpufuncs = xscale_cpufuncs;
2105 #if defined(PERFCTRS)
2106 		xscale_pmu_init();
2107 #endif
2108 
2109 		get_cachetype_cp15();
2110 		pmap_pte_init_xscale();
2111 
2112 		/* Use powersave on this CPU. */
2113 		cpu_do_powersave = 1;
2114 
2115 		return 0;
2116 	}
2117 #endif /* __CPU_XSCALE_PXA2XX */
2118 #ifdef CPU_XSCALE_IXP425
2119 	if (cputype == CPU_ID_IXP425_533 || cputype == CPU_ID_IXP425_400 ||
2120 	    cputype == CPU_ID_IXP425_266) {
2121 		ixp425_icu_init();
2122 
2123 		cpufuncs = xscale_cpufuncs;
2124 #if defined(PERFCTRS)
2125 		xscale_pmu_init();
2126 #endif
2127 
2128 		get_cachetype_cp15();
2129 		pmap_pte_init_xscale();
2130 
2131 		return 0;
2132 	}
2133 #endif /* CPU_XSCALE_IXP425 */
2134 #if defined(CPU_CORTEX)
2135 	if (CPU_ID_CORTEX_P(cputype)) {
2136 		cpufuncs = armv7_cpufuncs;
2137 		cpu_do_powersave = 1;			/* Enable powersave */
2138 #if defined(CPU_ARMV6) || defined(CPU_PRE_ARMV6)
2139 		cpu_armv7_p = true;
2140 #endif
2141 		get_cachetype_cp15();
2142 		pmap_pte_init_armv7();
2143 		if (arm_cache_prefer_mask)
2144 			uvmexp.ncolors = (arm_cache_prefer_mask >> PGSHIFT) + 1;
2145 		/*
2146 		 * Start and reset the PMC Cycle Counter.
2147 		 */
2148 		armreg_pmcr_write(ARM11_PMCCTL_E | ARM11_PMCCTL_P | ARM11_PMCCTL_C);
2149 		armreg_pmcntenset_write(CORTEX_CNTENS_C);
2150 		return 0;
2151 	}
2152 #endif /* CPU_CORTEX */
2153 
2154 #if defined(CPU_PJ4B)
2155 	if ((cputype == CPU_ID_MV88SV581X_V6 ||
2156 	    cputype == CPU_ID_MV88SV581X_V7 ||
2157 	    cputype == CPU_ID_MV88SV584X_V7 ||
2158 	    cputype == CPU_ID_ARM_88SV581X_V6 ||
2159 	    cputype == CPU_ID_ARM_88SV581X_V7) &&
2160 	    (armreg_pfr0_read() & ARM_PFR0_THUMBEE_MASK)) {
2161 			cpufuncs = pj4bv7_cpufuncs;
2162 #if defined(CPU_ARMV6) || defined(CPU_PRE_ARMV6)
2163 			cpu_armv7_p = true;
2164 #endif
2165 			get_cachetype_cp15();
2166 			pmap_pte_init_armv7();
2167 			return 0;
2168 	}
2169 #endif /* CPU_PJ4B */
2170 
2171 	/*
2172 	 * Bzzzz. And the answer was ...
2173 	 */
2174 	panic("No support for this CPU type (%08x) in kernel", cputype);
2175 	return(ARCHITECTURE_NOT_PRESENT);
2176 }
2177 
2178 #ifdef CPU_ARM2
2179 u_int arm2_id(void)
2180 {
2181 
2182 	return CPU_ID_ARM2;
2183 }
2184 #endif /* CPU_ARM2 */
2185 
2186 #ifdef CPU_ARM250
2187 u_int arm250_id(void)
2188 {
2189 
2190 	return CPU_ID_ARM250;
2191 }
2192 #endif /* CPU_ARM250 */
2193 
2194 /*
2195  * Fixup routines for data and prefetch aborts.
2196  *
2197  * Several compile time symbols are used
2198  *
2199  * DEBUG_FAULT_CORRECTION - Print debugging information during the
2200  * correction of registers after a fault.
2201  * ARM6_LATE_ABORT - ARM6 supports both early and late aborts
2202  * when defined should use late aborts
2203  */
2204 
2205 
2206 /*
2207  * Null abort fixup routine.
2208  * For use when no fixup is required.
2209  */
2210 int
2211 cpufunc_null_fixup(void *arg)
2212 {
2213 	return(ABORT_FIXUP_OK);
2214 }
2215 
2216 
2217 #if defined(CPU_ARM2) || defined(CPU_ARM250) || defined(CPU_ARM3) || \
2218     defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI)
2219 
2220 #ifdef DEBUG_FAULT_CORRECTION
2221 #define DFC_PRINTF(x)		printf x
2222 #define DFC_DISASSEMBLE(x)	disassemble(x)
2223 #else
2224 #define DFC_PRINTF(x)		/* nothing */
2225 #define DFC_DISASSEMBLE(x)	/* nothing */
2226 #endif
2227 
2228 /*
2229  * "Early" data abort fixup.
2230  *
2231  * For ARM2, ARM2as, ARM3 and ARM6 (in early-abort mode).  Also used
2232  * indirectly by ARM6 (in late-abort mode) and ARM7[TDMI].
2233  *
2234  * In early aborts, we may have to fix up LDM, STM, LDC and STC.
2235  */
2236 int
2237 early_abort_fixup(void *arg)
2238 {
2239 	trapframe_t *frame = arg;
2240 	u_int fault_pc;
2241 	u_int fault_instruction;
2242 	int saved_lr = 0;
2243 
2244 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
2245 
2246 		/* Ok an abort in SVC mode */
2247 
2248 		/*
2249 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
2250 		 * as the fault happened in svc mode but we need it in the
2251 		 * usr slot so we can treat the registers as an array of ints
2252 		 * during fixing.
2253 		 * NOTE: This PC is in the position but writeback is not
2254 		 * allowed on r15.
2255 		 * Doing it like this is more efficient than trapping this
2256 		 * case in all possible locations in the following fixup code.
2257 		 */
2258 
2259 		saved_lr = frame->tf_usr_lr;
2260 		frame->tf_usr_lr = frame->tf_svc_lr;
2261 
2262 		/*
2263 		 * Note the trapframe does not have the SVC r13 so a fault
2264 		 * from an instruction with writeback to r13 in SVC mode is
2265 		 * not allowed. This should not happen as the kstack is
2266 		 * always valid.
2267 		 */
2268 	}
2269 
2270 	/* Get fault address and status from the CPU */
2271 
2272 	fault_pc = frame->tf_pc;
2273 	fault_instruction = *((volatile unsigned int *)fault_pc);
2274 
2275 	/* Decode the fault instruction and fix the registers as needed */
2276 
2277 	if ((fault_instruction & 0x0e000000) == 0x08000000) {
2278 		int base;
2279 		int loop;
2280 		int count;
2281 		int *registers = &frame->tf_r0;
2282 
2283 		DFC_PRINTF(("LDM/STM\n"));
2284 		DFC_DISASSEMBLE(fault_pc);
2285 		if (fault_instruction & (1 << 21)) {
2286 			DFC_PRINTF(("This instruction must be corrected\n"));
2287 			base = (fault_instruction >> 16) & 0x0f;
2288 			if (base == 15)
2289 				return ABORT_FIXUP_FAILED;
2290 			/* Count registers transferred */
2291 			count = 0;
2292 			for (loop = 0; loop < 16; ++loop) {
2293 				if (fault_instruction & (1<<loop))
2294 					++count;
2295 			}
2296 			DFC_PRINTF(("%d registers used\n", count));
2297 			DFC_PRINTF(("Corrected r%d by %d bytes ",
2298 				       base, count * 4));
2299 			if (fault_instruction & (1 << 23)) {
2300 				DFC_PRINTF(("down\n"));
2301 				registers[base] -= count * 4;
2302 			} else {
2303 				DFC_PRINTF(("up\n"));
2304 				registers[base] += count * 4;
2305 			}
2306 		}
2307 	} else if ((fault_instruction & 0x0e000000) == 0x0c000000) {
2308 		int base;
2309 		int offset;
2310 		int *registers = &frame->tf_r0;
2311 
2312 		/* REGISTER CORRECTION IS REQUIRED FOR THESE INSTRUCTIONS */
2313 
2314 		DFC_DISASSEMBLE(fault_pc);
2315 
2316 		/* Only need to fix registers if write back is turned on */
2317 
2318 		if ((fault_instruction & (1 << 21)) != 0) {
2319 			base = (fault_instruction >> 16) & 0x0f;
2320 			if (base == 13 &&
2321 			    (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
2322 				return ABORT_FIXUP_FAILED;
2323 			if (base == 15)
2324 				return ABORT_FIXUP_FAILED;
2325 
2326 			offset = (fault_instruction & 0xff) << 2;
2327 			DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
2328 			if ((fault_instruction & (1 << 23)) != 0)
2329 				offset = -offset;
2330 			registers[base] += offset;
2331 			DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
2332 		}
2333 	}
2334 
2335 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
2336 
2337 		/* Ok an abort in SVC mode */
2338 
2339 		/*
2340 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
2341 		 * as the fault happened in svc mode but we need it in the
2342 		 * usr slot so we can treat the registers as an array of ints
2343 		 * during fixing.
2344 		 * NOTE: This PC is in the position but writeback is not
2345 		 * allowed on r15.
2346 		 * Doing it like this is more efficient than trapping this
2347 		 * case in all possible locations in the prior fixup code.
2348 		 */
2349 
2350 		frame->tf_svc_lr = frame->tf_usr_lr;
2351 		frame->tf_usr_lr = saved_lr;
2352 
2353 		/*
2354 		 * Note the trapframe does not have the SVC r13 so a fault
2355 		 * from an instruction with writeback to r13 in SVC mode is
2356 		 * not allowed. This should not happen as the kstack is
2357 		 * always valid.
2358 		 */
2359 	}
2360 
2361 	return(ABORT_FIXUP_OK);
2362 }
2363 #endif	/* CPU_ARM2/250/3/6/7 */
2364 
2365 
2366 #if (defined(CPU_ARM6) && defined(ARM6_LATE_ABORT)) || defined(CPU_ARM7) || \
2367 	defined(CPU_ARM7TDMI)
2368 /*
2369  * "Late" (base updated) data abort fixup
2370  *
2371  * For ARM6 (in late-abort mode) and ARM7.
2372  *
2373  * In this model, all data-transfer instructions need fixing up.  We defer
2374  * LDM, STM, LDC and STC fixup to the early-abort handler.
2375  */
2376 int
2377 late_abort_fixup(void *arg)
2378 {
2379 	trapframe_t *frame = arg;
2380 	u_int fault_pc;
2381 	u_int fault_instruction;
2382 	int saved_lr = 0;
2383 
2384 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
2385 
2386 		/* Ok an abort in SVC mode */
2387 
2388 		/*
2389 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
2390 		 * as the fault happened in svc mode but we need it in the
2391 		 * usr slot so we can treat the registers as an array of ints
2392 		 * during fixing.
2393 		 * NOTE: This PC is in the position but writeback is not
2394 		 * allowed on r15.
2395 		 * Doing it like this is more efficient than trapping this
2396 		 * case in all possible locations in the following fixup code.
2397 		 */
2398 
2399 		saved_lr = frame->tf_usr_lr;
2400 		frame->tf_usr_lr = frame->tf_svc_lr;
2401 
2402 		/*
2403 		 * Note the trapframe does not have the SVC r13 so a fault
2404 		 * from an instruction with writeback to r13 in SVC mode is
2405 		 * not allowed. This should not happen as the kstack is
2406 		 * always valid.
2407 		 */
2408 	}
2409 
2410 	/* Get fault address and status from the CPU */
2411 
2412 	fault_pc = frame->tf_pc;
2413 	fault_instruction = *((volatile unsigned int *)fault_pc);
2414 
2415 	/* Decode the fault instruction and fix the registers as needed */
2416 
2417 	/* Was is a swap instruction ? */
2418 
2419 	if ((fault_instruction & 0x0fb00ff0) == 0x01000090) {
2420 		DFC_DISASSEMBLE(fault_pc);
2421 	} else if ((fault_instruction & 0x0c000000) == 0x04000000) {
2422 
2423 		/* Was is a ldr/str instruction */
2424 		/* This is for late abort only */
2425 
2426 		int base;
2427 		int offset;
2428 		int *registers = &frame->tf_r0;
2429 
2430 		DFC_DISASSEMBLE(fault_pc);
2431 
2432 		/* This is for late abort only */
2433 
2434 		if ((fault_instruction & (1 << 24)) == 0
2435 		    || (fault_instruction & (1 << 21)) != 0) {
2436 			/* postindexed ldr/str with no writeback */
2437 
2438 			base = (fault_instruction >> 16) & 0x0f;
2439 			if (base == 13 &&
2440 			    (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
2441 				return ABORT_FIXUP_FAILED;
2442 			if (base == 15)
2443 				return ABORT_FIXUP_FAILED;
2444 			DFC_PRINTF(("late abt fix: r%d=%08x : ",
2445 				       base, registers[base]));
2446 			if ((fault_instruction & (1 << 25)) == 0) {
2447 				/* Immediate offset - easy */
2448 
2449 				offset = fault_instruction & 0xfff;
2450 				if ((fault_instruction & (1 << 23)))
2451 					offset = -offset;
2452 				registers[base] += offset;
2453 				DFC_PRINTF(("imm=%08x ", offset));
2454 			} else {
2455 				/* offset is a shifted register */
2456 				int shift;
2457 
2458 				offset = fault_instruction & 0x0f;
2459 				if (offset == base)
2460 					return ABORT_FIXUP_FAILED;
2461 
2462 				/*
2463 				 * Register offset - hard we have to
2464 				 * cope with shifts !
2465 				 */
2466 				offset = registers[offset];
2467 
2468 				if ((fault_instruction & (1 << 4)) == 0)
2469 					/* shift with amount */
2470 					shift = (fault_instruction >> 7) & 0x1f;
2471 				else {
2472 					/* shift with register */
2473 					if ((fault_instruction & (1 << 7)) != 0)
2474 						/* undefined for now so bail out */
2475 						return ABORT_FIXUP_FAILED;
2476 					shift = ((fault_instruction >> 8) & 0xf);
2477 					if (base == shift)
2478 						return ABORT_FIXUP_FAILED;
2479 					DFC_PRINTF(("shift reg=%d ", shift));
2480 					shift = registers[shift];
2481 				}
2482 				DFC_PRINTF(("shift=%08x ", shift));
2483 				switch (((fault_instruction >> 5) & 0x3)) {
2484 				case 0 : /* Logical left */
2485 					offset = (int)(((u_int)offset) << shift);
2486 					break;
2487 				case 1 : /* Logical Right */
2488 					if (shift == 0) shift = 32;
2489 					offset = (int)(((u_int)offset) >> shift);
2490 					break;
2491 				case 2 : /* Arithmetic Right */
2492 					if (shift == 0) shift = 32;
2493 					offset = (int)(((int)offset) >> shift);
2494 					break;
2495 				case 3 : /* Rotate right (rol or rxx) */
2496 					return ABORT_FIXUP_FAILED;
2497 					break;
2498 				}
2499 
2500 				DFC_PRINTF(("abt: fixed LDR/STR with "
2501 					       "register offset\n"));
2502 				if ((fault_instruction & (1 << 23)))
2503 					offset = -offset;
2504 				DFC_PRINTF(("offset=%08x ", offset));
2505 				registers[base] += offset;
2506 			}
2507 			DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
2508 		}
2509 	}
2510 
2511 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
2512 
2513 		/* Ok an abort in SVC mode */
2514 
2515 		/*
2516 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
2517 		 * as the fault happened in svc mode but we need it in the
2518 		 * usr slot so we can treat the registers as an array of ints
2519 		 * during fixing.
2520 		 * NOTE: This PC is in the position but writeback is not
2521 		 * allowed on r15.
2522 		 * Doing it like this is more efficient than trapping this
2523 		 * case in all possible locations in the prior fixup code.
2524 		 */
2525 
2526 		frame->tf_svc_lr = frame->tf_usr_lr;
2527 		frame->tf_usr_lr = saved_lr;
2528 
2529 		/*
2530 		 * Note the trapframe does not have the SVC r13 so a fault
2531 		 * from an instruction with writeback to r13 in SVC mode is
2532 		 * not allowed. This should not happen as the kstack is
2533 		 * always valid.
2534 		 */
2535 	}
2536 
2537 	/*
2538 	 * Now let the early-abort fixup routine have a go, in case it
2539 	 * was an LDM, STM, LDC or STC that faulted.
2540 	 */
2541 
2542 	return early_abort_fixup(arg);
2543 }
2544 #endif	/* CPU_ARM6(LATE)/7/7TDMI */
2545 
2546 /*
2547  * CPU Setup code
2548  */
2549 
2550 #if defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) || \
2551 	defined(CPU_ARM8) || defined (CPU_ARM9) || defined (CPU_ARM9E) || \
2552 	defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) || \
2553 	defined(CPU_FA526) || \
2554 	defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
2555 	defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425) || \
2556 	defined(CPU_ARM10) || defined(CPU_SHEEVA) || \
2557 	defined(CPU_ARMV6) || defined(CPU_ARMV7)
2558 
2559 #define IGN	0
2560 #define OR	1
2561 #define BIC	2
2562 
2563 struct cpu_option {
2564 	const char *co_name;
2565 	int	co_falseop;
2566 	int	co_trueop;
2567 	int	co_value;
2568 };
2569 
2570 static u_int parse_cpu_options(char *, struct cpu_option *, u_int);
2571 
2572 static u_int
2573 parse_cpu_options(char *args, struct cpu_option *optlist, u_int cpuctrl)
2574 {
2575 	int integer;
2576 
2577 	if (args == NULL)
2578 		return(cpuctrl);
2579 
2580 	while (optlist->co_name) {
2581 		if (get_bootconf_option(args, optlist->co_name,
2582 		    BOOTOPT_TYPE_BOOLEAN, &integer)) {
2583 			if (integer) {
2584 				if (optlist->co_trueop == OR)
2585 					cpuctrl |= optlist->co_value;
2586 				else if (optlist->co_trueop == BIC)
2587 					cpuctrl &= ~optlist->co_value;
2588 			} else {
2589 				if (optlist->co_falseop == OR)
2590 					cpuctrl |= optlist->co_value;
2591 				else if (optlist->co_falseop == BIC)
2592 					cpuctrl &= ~optlist->co_value;
2593 			}
2594 		}
2595 		++optlist;
2596 	}
2597 	return(cpuctrl);
2598 }
2599 #endif /* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 || CPU_SA110 */
2600 
2601 #if defined (CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) \
2602 	|| defined(CPU_ARM8)
2603 struct cpu_option arm678_options[] = {
2604 #ifdef COMPAT_12
2605 	{ "nocache",		IGN, BIC, CPU_CONTROL_IDC_ENABLE },
2606 	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
2607 #endif	/* COMPAT_12 */
2608 	{ "cpu.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
2609 	{ "cpu.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
2610 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2611 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2612 	{ NULL,			IGN, IGN, 0 }
2613 };
2614 
2615 #endif	/* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 */
2616 
2617 #ifdef CPU_ARM6
2618 struct cpu_option arm6_options[] = {
2619 	{ "arm6.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
2620 	{ "arm6.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
2621 	{ "arm6.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2622 	{ "arm6.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2623 	{ NULL,			IGN, IGN, 0 }
2624 };
2625 
2626 void
2627 arm6_setup(char *args)
2628 {
2629 
2630 	/* Set up default control registers bits */
2631 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2632 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2633 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
2634 #if 0
2635 	int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2636 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2637 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
2638 		 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BEND_ENABLE
2639 		 | CPU_CONTROL_AFLT_ENABLE;
2640 #endif
2641 
2642 #ifdef ARM6_LATE_ABORT
2643 	cpuctrl |= CPU_CONTROL_LABT_ENABLE;
2644 #endif	/* ARM6_LATE_ABORT */
2645 
2646 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2647 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2648 #endif
2649 
2650 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
2651 	cpuctrl = parse_cpu_options(args, arm6_options, cpuctrl);
2652 
2653 #ifdef __ARMEB__
2654 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2655 #endif
2656 
2657 	/* Clear out the cache */
2658 	cpu_idcache_wbinv_all();
2659 
2660 	/* Set the control register */
2661 	curcpu()->ci_ctrl = cpuctrl;
2662 	cpu_control(0xffffffff, cpuctrl);
2663 }
2664 #endif	/* CPU_ARM6 */
2665 
2666 #ifdef CPU_ARM7
2667 struct cpu_option arm7_options[] = {
2668 	{ "arm7.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
2669 	{ "arm7.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
2670 	{ "arm7.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2671 	{ "arm7.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2672 #ifdef COMPAT_12
2673 	{ "fpaclk2",		BIC, OR,  CPU_CONTROL_CPCLK },
2674 #endif	/* COMPAT_12 */
2675 	{ "arm700.fpaclk",	BIC, OR,  CPU_CONTROL_CPCLK },
2676 	{ NULL,			IGN, IGN, 0 }
2677 };
2678 
2679 void
2680 arm7_setup(char *args)
2681 {
2682 
2683 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2684 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2685 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
2686 #if 0
2687 	int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2688 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2689 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
2690 		 | CPU_CONTROL_CPCLK | CPU_CONTROL_LABT_ENABLE
2691 		 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BEND_ENABLE
2692 		 | CPU_CONTROL_AFLT_ENABLE;
2693 #endif
2694 
2695 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2696 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2697 #endif
2698 
2699 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
2700 	cpuctrl = parse_cpu_options(args, arm7_options, cpuctrl);
2701 
2702 #ifdef __ARMEB__
2703 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2704 #endif
2705 
2706 	/* Clear out the cache */
2707 	cpu_idcache_wbinv_all();
2708 
2709 	/* Set the control register */
2710 	curcpu()->ci_ctrl = cpuctrl;
2711 	cpu_control(0xffffffff, cpuctrl);
2712 }
2713 #endif	/* CPU_ARM7 */
2714 
2715 #ifdef CPU_ARM7TDMI
2716 struct cpu_option arm7tdmi_options[] = {
2717 	{ "arm7.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
2718 	{ "arm7.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
2719 	{ "arm7.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2720 	{ "arm7.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2721 #ifdef COMPAT_12
2722 	{ "fpaclk2",		BIC, OR,  CPU_CONTROL_CPCLK },
2723 #endif	/* COMPAT_12 */
2724 	{ "arm700.fpaclk",	BIC, OR,  CPU_CONTROL_CPCLK },
2725 	{ NULL,			IGN, IGN, 0 }
2726 };
2727 
2728 void
2729 arm7tdmi_setup(char *args)
2730 {
2731 	int cpuctrl;
2732 
2733 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2734 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2735 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
2736 
2737 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
2738 	cpuctrl = parse_cpu_options(args, arm7tdmi_options, cpuctrl);
2739 
2740 #ifdef __ARMEB__
2741 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2742 #endif
2743 
2744 	/* Clear out the cache */
2745 	cpu_idcache_wbinv_all();
2746 
2747 	/* Set the control register */
2748 	curcpu()->ci_ctrl = cpuctrl;
2749 	cpu_control(0xffffffff, cpuctrl);
2750 }
2751 #endif	/* CPU_ARM7TDMI */
2752 
2753 #ifdef CPU_ARM8
2754 struct cpu_option arm8_options[] = {
2755 	{ "arm8.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
2756 	{ "arm8.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
2757 	{ "arm8.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2758 	{ "arm8.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2759 #ifdef COMPAT_12
2760 	{ "branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2761 #endif	/* COMPAT_12 */
2762 	{ "cpu.branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2763 	{ "arm8.branchpredict",	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2764 	{ NULL,			IGN, IGN, 0 }
2765 };
2766 
2767 void
2768 arm8_setup(char *args)
2769 {
2770 	int integer;
2771 	int clocktest;
2772 	int setclock = 0;
2773 
2774 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2775 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2776 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
2777 #if 0
2778 	int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2779 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2780 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
2781 		 | CPU_CONTROL_BPRD_ENABLE | CPU_CONTROL_ROM_ENABLE
2782 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE;
2783 #endif
2784 
2785 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2786 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2787 #endif
2788 
2789 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
2790 	cpuctrl = parse_cpu_options(args, arm8_options, cpuctrl);
2791 
2792 #ifdef __ARMEB__
2793 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2794 #endif
2795 
2796 	/* Get clock configuration */
2797 	clocktest = arm8_clock_config(0, 0) & 0x0f;
2798 
2799 	/* Special ARM8 clock and test configuration */
2800 	if (get_bootconf_option(args, "arm8.clock.reset", BOOTOPT_TYPE_BOOLEAN, &integer)) {
2801 		clocktest = 0;
2802 		setclock = 1;
2803 	}
2804 	if (get_bootconf_option(args, "arm8.clock.dynamic", BOOTOPT_TYPE_BOOLEAN, &integer)) {
2805 		if (integer)
2806 			clocktest |= 0x01;
2807 		else
2808 			clocktest &= ~(0x01);
2809 		setclock = 1;
2810 	}
2811 	if (get_bootconf_option(args, "arm8.clock.sync", BOOTOPT_TYPE_BOOLEAN, &integer)) {
2812 		if (integer)
2813 			clocktest |= 0x02;
2814 		else
2815 			clocktest &= ~(0x02);
2816 		setclock = 1;
2817 	}
2818 	if (get_bootconf_option(args, "arm8.clock.fast", BOOTOPT_TYPE_BININT, &integer)) {
2819 		clocktest = (clocktest & ~0xc0) | (integer & 3) << 2;
2820 		setclock = 1;
2821 	}
2822 	if (get_bootconf_option(args, "arm8.test", BOOTOPT_TYPE_BININT, &integer)) {
2823 		clocktest |= (integer & 7) << 5;
2824 		setclock = 1;
2825 	}
2826 
2827 	/* Clear out the cache */
2828 	cpu_idcache_wbinv_all();
2829 
2830 	/* Set the control register */
2831 	curcpu()->ci_ctrl = cpuctrl;
2832 	cpu_control(0xffffffff, cpuctrl);
2833 
2834 	/* Set the clock/test register */
2835 	if (setclock)
2836 		arm8_clock_config(0x7f, clocktest);
2837 }
2838 #endif	/* CPU_ARM8 */
2839 
2840 #ifdef CPU_ARM9
2841 struct cpu_option arm9_options[] = {
2842 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2843 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2844 	{ "arm9.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2845 	{ "arm9.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2846 	{ "arm9.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2847 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2848 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2849 	{ "arm9.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2850 	{ NULL,			IGN, IGN, 0 }
2851 };
2852 
2853 void
2854 arm9_setup(char *args)
2855 {
2856 
2857 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2858 	    | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2859 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2860 	    | CPU_CONTROL_WBUF_ENABLE;
2861 	int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2862 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2863 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2864 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2865 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2866 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC
2867 		 | CPU_CONTROL_ROUNDROBIN;
2868 
2869 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2870 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2871 #endif
2872 
2873 	cpuctrl = parse_cpu_options(args, arm9_options, cpuctrl);
2874 
2875 #ifdef __ARMEB__
2876 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2877 #endif
2878 
2879 #ifndef ARM_HAS_VBAR
2880 	if (vector_page == ARM_VECTORS_HIGH)
2881 		cpuctrl |= CPU_CONTROL_VECRELOC;
2882 #endif
2883 
2884 	/* Clear out the cache */
2885 	cpu_idcache_wbinv_all();
2886 
2887 	/* Set the control register */
2888 	curcpu()->ci_ctrl = cpuctrl;
2889 	cpu_control(cpuctrlmask, cpuctrl);
2890 
2891 }
2892 #endif	/* CPU_ARM9 */
2893 
2894 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
2895 struct cpu_option arm10_options[] = {
2896 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2897 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2898 	{ "arm10.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2899 	{ "arm10.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2900 	{ "arm10.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2901 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2902 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2903 	{ "arm10.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2904 	{ NULL,			IGN, IGN, 0 }
2905 };
2906 
2907 void
2908 arm10_setup(char *args)
2909 {
2910 
2911 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
2912 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2913 	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
2914 #if 0
2915 	int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
2916 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2917 	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2918 	    | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2919 	    | CPU_CONTROL_BPRD_ENABLE
2920 	    | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
2921 #endif
2922 
2923 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2924 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2925 #endif
2926 
2927 	cpuctrl = parse_cpu_options(args, arm10_options, cpuctrl);
2928 
2929 #ifdef __ARMEB__
2930 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2931 #endif
2932 
2933 #ifndef ARM_HAS_VBAR
2934 	if (vector_page == ARM_VECTORS_HIGH)
2935 		cpuctrl |= CPU_CONTROL_VECRELOC;
2936 #endif
2937 
2938 	/* Clear out the cache */
2939 	cpu_idcache_wbinv_all();
2940 
2941 	/* Now really make sure they are clean.  */
2942 	__asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
2943 
2944 	/* Set the control register */
2945 	curcpu()->ci_ctrl = cpuctrl;
2946 	cpu_control(0xffffffff, cpuctrl);
2947 
2948 	/* And again. */
2949 	cpu_idcache_wbinv_all();
2950 }
2951 #endif	/* CPU_ARM9E || CPU_ARM10 */
2952 
2953 #if defined(CPU_ARM11)
2954 struct cpu_option arm11_options[] = {
2955 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2956 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2957 	{ "arm11.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2958 	{ "arm11.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2959 	{ "arm11.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2960 	{ "cpu.branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2961 	{ "arm11.branchpredict", BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2962 	{ NULL,			IGN, IGN, 0 }
2963 };
2964 
2965 void
2966 arm11_setup(char *args)
2967 {
2968 
2969 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
2970 #ifdef ARM_MMU_EXTENDED
2971 	    | CPU_CONTROL_XP_ENABLE
2972 #endif
2973 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2974 	    /* | CPU_CONTROL_BPRD_ENABLE */;
2975 	int cpuctrlmask = cpuctrl
2976 	    | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BPRD_ENABLE
2977 	    | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2978 	    | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
2979 
2980 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2981 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2982 #endif
2983 
2984 	cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl);
2985 
2986 #ifdef __ARMEB__
2987 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2988 #endif
2989 
2990 #ifndef ARM_HAS_VBAR
2991 	if (vector_page == ARM_VECTORS_HIGH)
2992 		cpuctrl |= CPU_CONTROL_VECRELOC;
2993 #endif
2994 
2995 	/* Clear out the cache */
2996 	cpu_idcache_wbinv_all();
2997 
2998 	/* Now really make sure they are clean.  */
2999 	__asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
3000 
3001 	/* Allow detection code to find the VFP if it's fitted.  */
3002 	armreg_cpacr_write(0x0fffffff);
3003 
3004 	/* Set the control register */
3005 	curcpu()->ci_ctrl = cpuctrl;
3006 	cpu_control(cpuctrlmask, cpuctrl);
3007 
3008 	/* And again. */
3009 	cpu_idcache_wbinv_all();
3010 }
3011 #endif	/* CPU_ARM11 */
3012 
3013 #if defined(CPU_ARM11MPCORE)
3014 
3015 void
3016 arm11mpcore_setup(char *args)
3017 {
3018 
3019 	int cpuctrl = CPU_CONTROL_IC_ENABLE
3020 	    | CPU_CONTROL_DC_ENABLE
3021 #ifdef ARM_MMU_EXTENDED
3022 	    | CPU_CONTROL_XP_ENABLE
3023 #endif
3024 	    | CPU_CONTROL_BPRD_ENABLE ;
3025 	int cpuctrlmask = cpuctrl
3026 	    | CPU_CONTROL_AFLT_ENABLE
3027 	    | CPU_CONTROL_VECRELOC;
3028 
3029 #ifdef	ARM11MPCORE_MMU_COMPAT
3030 	/* XXX: S and R? */
3031 #endif
3032 
3033 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
3034 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3035 #endif
3036 
3037 	cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl);
3038 
3039 #ifndef ARM_HAS_VBAR
3040 	if (vector_page == ARM_VECTORS_HIGH)
3041 		cpuctrl |= CPU_CONTROL_VECRELOC;
3042 #endif
3043 
3044 	/* Clear out the cache */
3045 	cpu_idcache_wbinv_all();
3046 
3047 	/* Now really make sure they are clean.  */
3048 	__asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
3049 
3050 	/* Allow detection code to find the VFP if it's fitted.  */
3051 	armreg_cpacr_write(0x0fffffff);
3052 
3053 	/* Set the control register */
3054 	curcpu()->ci_ctrl = cpu_control(cpuctrlmask, cpuctrl);
3055 
3056 	/* And again. */
3057 	cpu_idcache_wbinv_all();
3058 }
3059 #endif	/* CPU_ARM11MPCORE */
3060 
3061 #ifdef CPU_PJ4B
3062 void
3063 pj4bv7_setup(char *args)
3064 {
3065 	int cpuctrl;
3066 
3067 	pj4b_config();
3068 
3069 	cpuctrl = CPU_CONTROL_MMU_ENABLE;
3070 #ifdef ARM32_DISABLE_ALIGNMENT_FAULTS
3071 	cpuctrl |= CPU_CONTROL_UNAL_ENABLE;
3072 #else
3073 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3074 #endif
3075 	cpuctrl |= CPU_CONTROL_DC_ENABLE;
3076 	cpuctrl |= CPU_CONTROL_IC_ENABLE;
3077 	cpuctrl |= (0xf << 3);
3078 	cpuctrl |= CPU_CONTROL_BPRD_ENABLE;
3079 	cpuctrl |= (0x5 << 16) | (1 < 22);
3080 	cpuctrl |= CPU_CONTROL_XP_ENABLE;
3081 
3082 #ifndef ARM_HAS_VBAR
3083 	if (vector_page == ARM_VECTORS_HIGH)
3084 		cpuctrl |= CPU_CONTROL_VECRELOC;
3085 #endif
3086 
3087 #ifdef L2CACHE_ENABLE
3088 	/* Setup L2 cache */
3089 	arm_scache.cache_type = CPU_CT_CTYPE_WT;
3090 	arm_scache.cache_unified = 1;
3091 	arm_scache.dcache_type = arm_scache.icache_type = CACHE_TYPE_PIPT;
3092 	arm_scache.dcache_size = arm_scache.icache_size = ARMADAXP_L2_SIZE;
3093 	arm_scache.dcache_ways = arm_scache.icache_ways = ARMADAXP_L2_WAYS;
3094 	arm_scache.dcache_way_size = arm_scache.icache_way_size =
3095 	    ARMADAXP_L2_WAY_SIZE;
3096 	arm_scache.dcache_line_size = arm_scache.icache_line_size =
3097 	    ARMADAXP_L2_LINE_SIZE;
3098 	arm_scache.dcache_sets = arm_scache.icache_sets =
3099 	    ARMADAXP_L2_SETS;
3100 
3101 	cpufuncs.cf_sdcache_wbinv_range	= armadaxp_sdcache_wbinv_range;
3102 	cpufuncs.cf_sdcache_inv_range	= armadaxp_sdcache_inv_range;
3103 	cpufuncs.cf_sdcache_wb_range	= armadaxp_sdcache_wb_range;
3104 #endif
3105 
3106 #ifdef AURORA_IO_CACHE_COHERENCY
3107 	/* use AMBA and I/O Coherency Fabric to maintain cache */
3108 	cpufuncs.cf_dcache_wbinv_range	= pj4b_dcache_cfu_wbinv_range;
3109 	cpufuncs.cf_dcache_inv_range	= pj4b_dcache_cfu_inv_range;
3110 	cpufuncs.cf_dcache_wb_range	= pj4b_dcache_cfu_wb_range;
3111 
3112 	cpufuncs.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop;
3113 	cpufuncs.cf_sdcache_inv_range	= (void *)cpufunc_nullop;
3114 	cpufuncs.cf_sdcache_wb_range	= (void *)cpufunc_nullop;
3115 #endif
3116 
3117 	/* Clear out the cache */
3118 	cpu_idcache_wbinv_all();
3119 
3120 	/* Set the control register */
3121 	cpu_control(0xffffffff, cpuctrl);
3122 
3123 	/* And again. */
3124 	cpu_idcache_wbinv_all();
3125 #ifdef L2CACHE_ENABLE
3126 	armadaxp_sdcache_wbinv_all();
3127 #endif
3128 
3129 	curcpu()->ci_ctrl = cpuctrl;
3130 }
3131 #endif /* CPU_PJ4B */
3132 
3133 #if defined(CPU_ARMV7)
3134 struct cpu_option armv7_options[] = {
3135     { "cpu.cache",      BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3136     { "cpu.nocache",    OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3137     { "armv7.cache",    BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3138     { "armv7.icache",   BIC, OR,  CPU_CONTROL_IC_ENABLE },
3139     { "armv7.dcache",   BIC, OR,  CPU_CONTROL_DC_ENABLE },
3140 	{ NULL, 			IGN, IGN, 0}
3141 };
3142 
3143 void
3144 armv7_setup(char *args)
3145 {
3146 
3147 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_IC_ENABLE
3148 	    | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_BPRD_ENABLE
3149 #ifdef __ARMEB__
3150 	    | CPU_CONTROL_EX_BEND
3151 #endif
3152 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
3153 	    | CPU_CONTROL_AFLT_ENABLE;
3154 #endif
3155 	    | CPU_CONTROL_UNAL_ENABLE;
3156 
3157 	int cpuctrlmask = cpuctrl | CPU_CONTROL_AFLT_ENABLE;
3158 
3159 
3160 	cpuctrl = parse_cpu_options(args, armv7_options, cpuctrl);
3161 
3162 #ifndef ARM_HAS_VBAR
3163 	if (vector_page == ARM_VECTORS_HIGH)
3164 		cpuctrl |= CPU_CONTROL_VECRELOC;
3165 #endif
3166 
3167 	/* Clear out the cache */
3168 	cpu_idcache_wbinv_all();
3169 
3170 	/* Set the control register */
3171 	curcpu()->ci_ctrl = cpuctrl;
3172 	cpu_control(cpuctrlmask, cpuctrl);
3173 }
3174 #endif /* CPU_ARMV7 */
3175 
3176 
3177 #if defined(CPU_ARM1136) || defined(CPU_ARM1176)
3178 void
3179 arm11x6_setup(char *args)
3180 {
3181 	int cpuctrl, cpuctrl_wax;
3182 	uint32_t auxctrl;
3183 	uint32_t sbz=0;
3184 	uint32_t cpuid;
3185 
3186 	cpuid = cpu_idnum();
3187 
3188 	cpuctrl =
3189 		CPU_CONTROL_MMU_ENABLE  |
3190 		CPU_CONTROL_DC_ENABLE   |
3191 		CPU_CONTROL_WBUF_ENABLE |
3192 		CPU_CONTROL_32BP_ENABLE |
3193 		CPU_CONTROL_32BD_ENABLE |
3194 		CPU_CONTROL_LABT_ENABLE |
3195 		CPU_CONTROL_UNAL_ENABLE |
3196 #ifdef ARM_MMU_EXTENDED
3197 		CPU_CONTROL_XP_ENABLE   |
3198 #else
3199 		CPU_CONTROL_SYST_ENABLE |
3200 #endif
3201 		CPU_CONTROL_IC_ENABLE;
3202 
3203 	/*
3204 	 * "write as existing" bits
3205 	 * inverse of this is mask
3206 	 */
3207 	cpuctrl_wax =
3208 		(3 << 30) |
3209 		(1 << 29) |
3210 		(1 << 28) |
3211 		(3 << 26) |
3212 		(3 << 19) |
3213 		(1 << 17);
3214 
3215 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
3216 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3217 #endif
3218 
3219 	cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl);
3220 
3221 #ifdef __ARMEB__
3222 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
3223 #endif
3224 
3225 #ifndef ARM_HAS_VBAR
3226 	if (vector_page == ARM_VECTORS_HIGH)
3227 		cpuctrl |= CPU_CONTROL_VECRELOC;
3228 #endif
3229 
3230 	auxctrl = armreg_auxctl_read();
3231 	/*
3232 	 * This options enables the workaround for the 364296 ARM1136
3233 	 * r0pX errata (possible cache data corruption with
3234 	 * hit-under-miss enabled). It sets the undocumented bit 31 in
3235 	 * the auxiliary control register and the FI bit in the control
3236 	 * register, thus disabling hit-under-miss without putting the
3237 	 * processor into full low interrupt latency mode. ARM11MPCore
3238 	 * is not affected.
3239 	 */
3240 	if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1136JS) { /* ARM1136JSr0pX */
3241 		cpuctrl |= CPU_CONTROL_FI_ENABLE;
3242 		auxctrl |= ARM1136_AUXCTL_PFI;
3243 	}
3244 
3245 	/*
3246 	 * This enables the workaround for the following ARM1176 r0pX
3247 	 * errata.
3248 	 *
3249 	 * 394601: In low interrupt latency configuration, interrupted clean
3250 	 * and invalidate operation may not clean dirty data.
3251 	 *
3252 	 * 716151: Clean Data Cache line by MVA can corrupt subsequent
3253 	 * stores to the same cache line.
3254 	 *
3255 	 * 714068: Prefetch Instruction Cache Line or Invalidate Instruction
3256 	 * Cache Line by MVA can cause deadlock.
3257 	 */
3258 	if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1176JZS) { /* ARM1176JZSr0 */
3259 		/* 394601 and 716151 */
3260 		cpuctrl |= CPU_CONTROL_FI_ENABLE;
3261 		auxctrl |= ARM1176_AUXCTL_FIO;
3262 
3263 		/* 714068 */
3264 		auxctrl |= ARM1176_AUXCTL_PHD;
3265 	}
3266 
3267 	/* Clear out the cache */
3268 	cpu_idcache_wbinv_all();
3269 
3270 	/* Now really make sure they are clean.  */
3271 	__asm volatile ("mcr\tp15, 0, %0, c7, c7, 0" : : "r"(sbz));
3272 
3273 	/* Allow detection code to find the VFP if it's fitted.  */
3274 	armreg_cpacr_write(0x0fffffff);
3275 
3276 	/* Set the control register */
3277 	curcpu()->ci_ctrl = cpuctrl;
3278 	cpu_control(~cpuctrl_wax, cpuctrl);
3279 
3280 	/* Update auxctlr */
3281 	armreg_auxctl_write(auxctrl);
3282 
3283 	/* And again. */
3284 	cpu_idcache_wbinv_all();
3285 }
3286 #endif	/* CPU_ARM1136 || CPU_ARM1176 */
3287 
3288 #ifdef CPU_SA110
3289 struct cpu_option sa110_options[] = {
3290 #ifdef COMPAT_12
3291 	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3292 	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
3293 #endif	/* COMPAT_12 */
3294 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3295 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3296 	{ "sa110.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3297 	{ "sa110.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
3298 	{ "sa110.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
3299 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
3300 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
3301 	{ "sa110.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
3302 	{ NULL,			IGN, IGN, 0 }
3303 };
3304 
3305 void
3306 sa110_setup(char *args)
3307 {
3308 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
3309 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
3310 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3311 		 | CPU_CONTROL_WBUF_ENABLE;
3312 #if 0
3313 	int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
3314 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
3315 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3316 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
3317 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
3318 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
3319 		 | CPU_CONTROL_CPCLK;
3320 #endif
3321 
3322 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
3323 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3324 #endif
3325 
3326 	cpuctrl = parse_cpu_options(args, sa110_options, cpuctrl);
3327 
3328 #ifdef __ARMEB__
3329 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
3330 #endif
3331 
3332 #ifndef ARM_HAS_VBAR
3333 	if (vector_page == ARM_VECTORS_HIGH)
3334 		cpuctrl |= CPU_CONTROL_VECRELOC;
3335 #endif
3336 
3337 	/* Clear out the cache */
3338 	cpu_idcache_wbinv_all();
3339 
3340 	/* Set the control register */
3341 	curcpu()->ci_ctrl = cpuctrl;
3342 #if 0
3343 	cpu_control(cpuctrlmask, cpuctrl);
3344 #endif
3345 	cpu_control(0xffffffff, cpuctrl);
3346 
3347 	/*
3348 	 * enable clockswitching, note that this doesn't read or write to r0,
3349 	 * r0 is just to make it valid asm
3350 	 */
3351 	__asm volatile ("mcr p15, 0, r0, c15, c1, 2");
3352 }
3353 #endif	/* CPU_SA110 */
3354 
3355 #if defined(CPU_SA1100) || defined(CPU_SA1110)
3356 struct cpu_option sa11x0_options[] = {
3357 #ifdef COMPAT_12
3358 	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3359 	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
3360 #endif	/* COMPAT_12 */
3361 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3362 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3363 	{ "sa11x0.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3364 	{ "sa11x0.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
3365 	{ "sa11x0.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
3366 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
3367 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
3368 	{ "sa11x0.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
3369 	{ NULL,			IGN, IGN, 0 }
3370 };
3371 
3372 void
3373 sa11x0_setup(char *args)
3374 {
3375 
3376 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
3377 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
3378 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3379 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE;
3380 #if 0
3381 	int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
3382 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
3383 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3384 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
3385 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
3386 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
3387 		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
3388 #endif
3389 
3390 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
3391 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3392 #endif
3393 
3394 	cpuctrl = parse_cpu_options(args, sa11x0_options, cpuctrl);
3395 
3396 #ifdef __ARMEB__
3397 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
3398 #endif
3399 
3400 #ifndef ARM_HAS_VBAR
3401 	if (vector_page == ARM_VECTORS_HIGH)
3402 		cpuctrl |= CPU_CONTROL_VECRELOC;
3403 #endif
3404 
3405 	/* Clear out the cache */
3406 	cpu_idcache_wbinv_all();
3407 
3408 	/* Set the control register */
3409 	curcpu()->ci_ctrl = cpuctrl;
3410 	cpu_control(0xffffffff, cpuctrl);
3411 }
3412 #endif	/* CPU_SA1100 || CPU_SA1110 */
3413 
3414 #if defined(CPU_FA526)
3415 struct cpu_option fa526_options[] = {
3416 #ifdef COMPAT_12
3417 	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3418 	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
3419 #endif	/* COMPAT_12 */
3420 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3421 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3422 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
3423 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
3424 	{ NULL,			IGN, IGN, 0 }
3425 };
3426 
3427 void
3428 fa526_setup(char *args)
3429 {
3430 
3431 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
3432 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
3433 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3434 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE;
3435 #if 0
3436 	int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
3437 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
3438 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3439 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
3440 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
3441 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
3442 		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
3443 #endif
3444 
3445 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
3446 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3447 #endif
3448 
3449 	cpuctrl = parse_cpu_options(args, fa526_options, cpuctrl);
3450 
3451 #ifdef __ARMEB__
3452 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
3453 #endif
3454 
3455 #ifndef ARM_HAS_VBAR
3456 	if (vector_page == ARM_VECTORS_HIGH)
3457 		cpuctrl |= CPU_CONTROL_VECRELOC;
3458 #endif
3459 
3460 	/* Clear out the cache */
3461 	cpu_idcache_wbinv_all();
3462 
3463 	/* Set the control register */
3464 	curcpu()->ci_ctrl = cpuctrl;
3465 	cpu_control(0xffffffff, cpuctrl);
3466 }
3467 #endif	/* CPU_FA526 */
3468 
3469 #if defined(CPU_IXP12X0)
3470 struct cpu_option ixp12x0_options[] = {
3471 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3472 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3473 	{ "ixp12x0.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3474 	{ "ixp12x0.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
3475 	{ "ixp12x0.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
3476 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
3477 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
3478 	{ "ixp12x0.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
3479 	{ NULL,			IGN, IGN, 0 }
3480 };
3481 
3482 void
3483 ixp12x0_setup(char *args)
3484 {
3485 
3486 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE
3487 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_SYST_ENABLE
3488 		 | CPU_CONTROL_IC_ENABLE;
3489 
3490 	int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_AFLT_ENABLE
3491 		 | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_WBUF_ENABLE
3492 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_SYST_ENABLE
3493 		 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_IC_ENABLE
3494 		 | CPU_CONTROL_VECRELOC;
3495 
3496 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
3497 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3498 #endif
3499 
3500 	cpuctrl = parse_cpu_options(args, ixp12x0_options, cpuctrl);
3501 
3502 #ifdef __ARMEB__
3503 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
3504 #endif
3505 
3506 #ifndef ARM_HAS_VBAR
3507 	if (vector_page == ARM_VECTORS_HIGH)
3508 		cpuctrl |= CPU_CONTROL_VECRELOC;
3509 #endif
3510 
3511 	/* Clear out the cache */
3512 	cpu_idcache_wbinv_all();
3513 
3514 	/* Set the control register */
3515 	curcpu()->ci_ctrl = cpuctrl;
3516 	/* cpu_control(0xffffffff, cpuctrl); */
3517 	cpu_control(cpuctrlmask, cpuctrl);
3518 }
3519 #endif /* CPU_IXP12X0 */
3520 
3521 #if defined(CPU_XSCALE)
3522 struct cpu_option xscale_options[] = {
3523 #ifdef COMPAT_12
3524 	{ "branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
3525 	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3526 #endif	/* COMPAT_12 */
3527 	{ "cpu.branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
3528 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3529 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3530 	{ "xscale.branchpredict", BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
3531 	{ "xscale.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3532 	{ "xscale.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
3533 	{ "xscale.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
3534 	{ NULL,			IGN, IGN, 0 }
3535 };
3536 
3537 void
3538 xscale_setup(char *args)
3539 {
3540 	uint32_t auxctl;
3541 
3542 	/*
3543 	 * The XScale Write Buffer is always enabled.  Our option
3544 	 * is to enable/disable coalescing.  Note that bits 6:3
3545 	 * must always be enabled.
3546 	 */
3547 
3548 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
3549 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
3550 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3551 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
3552 		 | CPU_CONTROL_BPRD_ENABLE;
3553 #if 0
3554 	int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
3555 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
3556 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3557 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
3558 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
3559 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
3560 		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
3561 #endif
3562 
3563 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
3564 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3565 #endif
3566 
3567 	cpuctrl = parse_cpu_options(args, xscale_options, cpuctrl);
3568 
3569 #ifdef __ARMEB__
3570 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
3571 #endif
3572 
3573 #ifndef ARM_HAS_VBAR
3574 	if (vector_page == ARM_VECTORS_HIGH)
3575 		cpuctrl |= CPU_CONTROL_VECRELOC;
3576 #endif
3577 
3578 	/* Clear out the cache */
3579 	cpu_idcache_wbinv_all();
3580 
3581 	/*
3582 	 * Set the control register.  Note that bits 6:3 must always
3583 	 * be set to 1.
3584 	 */
3585 	curcpu()->ci_ctrl = cpuctrl;
3586 #if 0
3587 	cpu_control(cpuctrlmask, cpuctrl);
3588 #endif
3589 	cpu_control(0xffffffff, cpuctrl);
3590 
3591 	/* Make sure write coalescing is turned on */
3592 	auxctl = armreg_auxctl_read();
3593 #ifdef XSCALE_NO_COALESCE_WRITES
3594 	auxctl |= XSCALE_AUXCTL_K;
3595 #else
3596 	auxctl &= ~XSCALE_AUXCTL_K;
3597 #endif
3598 	armreg_auxctl_write(auxctl);
3599 }
3600 #endif	/* CPU_XSCALE */
3601 
3602 #if defined(CPU_SHEEVA)
3603 struct cpu_option sheeva_options[] = {
3604 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3605 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3606 	{ "sheeva.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3607 	{ "sheeva.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
3608 	{ "sheeva.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
3609 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
3610 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
3611 	{ "sheeva.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
3612 	{ NULL,			IGN, IGN, 0 }
3613 };
3614 
3615 void
3616 sheeva_setup(char *args)
3617 {
3618 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
3619 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3620 	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
3621 #if 0
3622 	int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
3623 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3624 	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
3625 	    | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
3626 	    | CPU_CONTROL_BPRD_ENABLE
3627 	    | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
3628 #endif
3629 
3630 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
3631 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3632 #endif
3633 
3634 	cpuctrl = parse_cpu_options(args, sheeva_options, cpuctrl);
3635 
3636 	/* Enable DCache Streaming Switch and Write Allocate */
3637 	uint32_t sheeva_ext = armreg_sheeva_xctrl_read();
3638 
3639 	sheeva_ext |= FC_DCACHE_STREAM_EN | FC_WR_ALLOC_EN;
3640 #ifdef SHEEVA_L2_CACHE
3641 	sheeva_ext |= FC_L2CACHE_EN;
3642 	sheeva_ext &= ~FC_L2_PREF_DIS;
3643 #endif
3644 
3645 	armreg_sheeva_xctrl_write(sheeva_ext);
3646 
3647 #ifdef SHEEVA_L2_CACHE
3648 #ifndef SHEEVA_L2_CACHE_WT
3649 	arm_scache.cache_type = CPU_CT_CTYPE_WB2;
3650 #elif CPU_CT_CTYPE_WT != 0
3651 	arm_scache.cache_type = CPU_CT_CTYPE_WT;
3652 #endif
3653 	arm_scache.cache_unified = 1;
3654 	arm_scache.dcache_type = arm_scache.icache_type = CACHE_TYPE_PIPT;
3655 	arm_scache.dcache_size = arm_scache.icache_size = 256*1024;
3656 	arm_scache.dcache_ways = arm_scache.icache_ways = 4;
3657 	arm_scache.dcache_way_size = arm_scache.icache_way_size =
3658 	    arm_scache.dcache_size / arm_scache.dcache_ways;
3659 	arm_scache.dcache_line_size = arm_scache.icache_line_size = 32;
3660 	arm_scache.dcache_sets = arm_scache.icache_sets =
3661 	    arm_scache.dcache_way_size / arm_scache.dcache_line_size;
3662 
3663 	cpufuncs.cf_sdcache_wb_range = sheeva_sdcache_wb_range;
3664 	cpufuncs.cf_sdcache_inv_range = sheeva_sdcache_inv_range;
3665 	cpufuncs.cf_sdcache_wbinv_range = sheeva_sdcache_wbinv_range;
3666 #endif /* SHEEVA_L2_CACHE */
3667 
3668 #ifdef __ARMEB__
3669 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
3670 #endif
3671 
3672 #ifndef ARM_HAS_VBAR
3673 	if (vector_page == ARM_VECTORS_HIGH)
3674 		cpuctrl |= CPU_CONTROL_VECRELOC;
3675 #endif
3676 
3677 	/* Clear out the cache */
3678 	cpu_idcache_wbinv_all();
3679 
3680 	/* Now really make sure they are clean.  */
3681 	__asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
3682 
3683 	/* Set the control register */
3684 	curcpu()->ci_ctrl = cpuctrl;
3685 	cpu_control(0xffffffff, cpuctrl);
3686 
3687 	/* And again. */
3688 	cpu_idcache_wbinv_all();
3689 #ifdef SHEEVA_L2_CACHE
3690 	sheeva_sdcache_wbinv_all();
3691 #endif
3692 }
3693 #endif	/* CPU_SHEEVA */
3694