xref: /netbsd-src/sys/arch/arm/arm/cpufunc.c (revision c38e7cc395b1472a774ff828e46123de44c628e9)
1 /*	$NetBSD: cpufunc.c,v 1.168 2018/04/01 04:35:03 ryo Exp $	*/
2 
3 /*
4  * arm7tdmi support code Copyright (c) 2001 John Fremlin
5  * arm8 support code Copyright (c) 1997 ARM Limited
6  * arm8 support code Copyright (c) 1997 Causality Limited
7  * arm9 support code Copyright (C) 2001 ARM Ltd
8  * arm11 support code Copyright (c) 2007 Microsoft
9  * cortexa8 support code Copyright (c) 2008 3am Software Foundry
10  * cortexa8 improvements Copyright (c) Goeran Weinholt
11  * Copyright (c) 1997 Mark Brinicombe.
12  * Copyright (c) 1997 Causality Limited
13  * All rights reserved.
14  *
15  * Redistribution and use in source and binary forms, with or without
16  * modification, are permitted provided that the following conditions
17  * are met:
18  * 1. Redistributions of source code must retain the above copyright
19  *    notice, this list of conditions and the following disclaimer.
20  * 2. Redistributions in binary form must reproduce the above copyright
21  *    notice, this list of conditions and the following disclaimer in the
22  *    documentation and/or other materials provided with the distribution.
23  * 3. All advertising materials mentioning features or use of this software
24  *    must display the following acknowledgement:
25  *	This product includes software developed by Causality Limited.
26  * 4. The name of Causality Limited may not be used to endorse or promote
27  *    products derived from this software without specific prior written
28  *    permission.
29  *
30  * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
31  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
32  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
33  * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
34  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
35  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
36  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
37  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
38  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
39  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40  * SUCH DAMAGE.
41  *
42  * RiscBSD kernel project
43  *
44  * cpufuncs.c
45  *
46  * C functions for supporting CPU / MMU / TLB specific operations.
47  *
48  * Created	: 30/01/97
49  */
50 
51 #include <sys/cdefs.h>
52 __KERNEL_RCSID(0, "$NetBSD: cpufunc.c,v 1.168 2018/04/01 04:35:03 ryo Exp $");
53 
54 #include "opt_compat_netbsd.h"
55 #include "opt_cpuoptions.h"
56 #include "opt_perfctrs.h"
57 
58 #include <sys/types.h>
59 #include <sys/param.h>
60 #include <sys/pmc.h>
61 #include <sys/systm.h>
62 #include <machine/cpu.h>
63 #include <machine/bootconfig.h>
64 #include <arch/arm/arm/disassem.h>
65 
66 #include <uvm/uvm.h>
67 
68 #include <arm/cpufunc_proto.h>
69 #include <arm/cpuconf.h>
70 #include <arm/locore.h>
71 
72 #ifdef CPU_XSCALE_80200
73 #include <arm/xscale/i80200reg.h>
74 #include <arm/xscale/i80200var.h>
75 #endif
76 
77 #ifdef CPU_XSCALE_80321
78 #include <arm/xscale/i80321reg.h>
79 #include <arm/xscale/i80321var.h>
80 #endif
81 
82 #ifdef CPU_XSCALE_IXP425
83 #include <arm/xscale/ixp425reg.h>
84 #include <arm/xscale/ixp425var.h>
85 #endif
86 
87 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321)
88 #include <arm/xscale/xscalereg.h>
89 #endif
90 
91 #if defined(CPU_PJ4B)
92 #include "opt_cputypes.h"
93 #include "opt_mvsoc.h"
94 #include <machine/bus_defs.h>
95 #if defined(ARMADAXP)
96 #include <arm/marvell/armadaxpreg.h>
97 #include <arm/marvell/armadaxpvar.h>
98 #endif
99 #endif
100 
101 #if defined(PERFCTRS)
102 struct arm_pmc_funcs *arm_pmc;
103 #endif
104 
105 #if defined(CPU_ARMV7) && (defined(CPU_ARMV6) || defined(CPU_PRE_ARMV6))
106 bool cpu_armv7_p;
107 #endif
108 
109 #if defined(CPU_ARMV6) && (defined(CPU_ARMV7) || defined(CPU_PRE_ARMV6))
110 bool cpu_armv6_p;
111 #endif
112 
113 
114 /* PRIMARY CACHE VARIABLES */
115 #if (ARM_MMU_V6 + ARM_MMU_V7) != 0
116 u_int	arm_cache_prefer_mask;
117 #endif
118 struct	arm_cache_info arm_pcache;
119 struct	arm_cache_info arm_scache;
120 
121 u_int	arm_dcache_align;
122 u_int	arm_dcache_align_mask;
123 
124 /* 1 == use cpu_sleep(), 0 == don't */
125 int cpu_do_powersave;
126 
127 #ifdef CPU_ARM2
128 struct cpu_functions arm2_cpufuncs = {
129 	/* CPU functions */
130 
131 	.cf_id			= arm2_id,
132 	.cf_cpwait		= cpufunc_nullop,
133 
134 	/* MMU functions */
135 
136 	.cf_control		= (void *)cpufunc_nullop,
137 
138 	/* TLB functions */
139 
140 	.cf_tlb_flushID		= cpufunc_nullop,
141 	.cf_tlb_flushID_SE	= (void *)cpufunc_nullop,
142 	.cf_tlb_flushI		= cpufunc_nullop,
143 	.cf_tlb_flushI_SE	= (void *)cpufunc_nullop,
144 	.cf_tlb_flushD		= cpufunc_nullop,
145 	.cf_tlb_flushD_SE	= (void *)cpufunc_nullop,
146 
147 	/* Cache operations */
148 
149 	.cf_icache_sync_all	= cpufunc_nullop,
150 	.cf_icache_sync_range	= (void *) cpufunc_nullop,
151 
152 	.cf_dcache_wbinv_all	= arm3_cache_flush,
153 	.cf_dcache_wbinv_range	= (void *)cpufunc_nullop,
154 	.cf_dcache_inv_range	= (void *)cpufunc_nullop,
155 	.cf_dcache_wb_range	= (void *)cpufunc_nullop,
156 
157 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
158 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
159 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
160 
161 	.cf_idcache_wbinv_all	= cpufunc_nullop,
162 	.cf_idcache_wbinv_range	= (void *)cpufunc_nullop,
163 
164 	/* Other functions */
165 
166 	.cf_flush_prefetchbuf	= cpufunc_nullop,
167 	.cf_drain_writebuf	= cpufunc_nullop,
168 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
169 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
170 
171 	.cf_sleep		= (void *)cpufunc_nullop,
172 
173 	/* Soft functions */
174 
175 	.cf_dataabt_fixup	= early_abort_fixup,
176 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
177 
178 	.cf_setup		= (void *)cpufunc_nullop
179 
180 };
181 #endif	/* CPU_ARM2 */
182 
183 #ifdef CPU_ARM250
184 struct cpu_functions arm250_cpufuncs = {
185 	/* CPU functions */
186 
187 	.cf_id			= arm250_id,
188 	.cf_cpwait		= cpufunc_nullop,
189 
190 	/* MMU functions */
191 
192 	.cf_control		= (void *)cpufunc_nullop,
193 
194 	/* TLB functions */
195 
196 	.cf_tlb_flushID		= cpufunc_nullop,
197 	.cf_tlb_flushID_SE	= (void *)cpufunc_nullop,
198 	.cf_tlb_flushI		= cpufunc_nullop,
199 	.cf_tlb_flushI_SE	= (void *)cpufunc_nullop,
200 	.cf_tlb_flushD		= cpufunc_nullop,
201 	.cf_tlb_flushD_SE	= (void *)cpufunc_nullop,
202 
203 	/* Cache operations */
204 
205 	.cf_icache_sync_all	= cpufunc_nullop,
206 	.cf_icache_sync_range	= (void *) cpufunc_nullop,
207 
208 	.cf_dcache_wbinv_all	= arm3_cache_flush,
209 	.cf_dcache_wbinv_range	= (void *)cpufunc_nullop,
210 	.cf_dcache_inv_range	= (void *)cpufunc_nullop,
211 	.cf_dcache_wb_range	= (void *)cpufunc_nullop,
212 
213 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
214 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
215 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
216 
217 	.cf_idcache_wbinv_all	= cpufunc_nullop,
218 	.cf_idcache_wbinv_range	= (void *)cpufunc_nullop,
219 
220 	/* Other functions */
221 
222 	.cf_flush_prefetchbuf	= cpufunc_nullop,
223 	.cf_drain_writebuf	= cpufunc_nullop,
224 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
225 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
226 
227 	.cf_sleep		= (void *)cpufunc_nullop,
228 
229 	/* Soft functions */
230 
231 	.cf_dataabt_fixup	= early_abort_fixup,
232 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
233 
234 	.cf_setup		= (void *)cpufunc_nullop
235 
236 };
237 #endif	/* CPU_ARM250 */
238 
239 #ifdef CPU_ARM3
240 struct cpu_functions arm3_cpufuncs = {
241 	/* CPU functions */
242 
243 	.cf_id			= cpufunc_id,
244 	.cf_cpwait		= cpufunc_nullop,
245 
246 	/* MMU functions */
247 
248 	.cf_control		= arm3_control,
249 
250 	/* TLB functions */
251 
252 	.cf_tlb_flushID		= cpufunc_nullop,
253 	.cf_tlb_flushID_SE	= (void *)cpufunc_nullop,
254 	.cf_tlb_flushI		= cpufunc_nullop,
255 	.cf_tlb_flushI_SE	= (void *)cpufunc_nullop,
256 	.cf_tlb_flushD		= cpufunc_nullop,
257 	.cf_tlb_flushD_SE	= (void *)cpufunc_nullop,
258 
259 	/* Cache operations */
260 
261 	.cf_icache_sync_all	= cpufunc_nullop,
262 	.cf_icache_sync_range	= (void *) cpufunc_nullop,
263 
264 	.cf_dcache_wbinv_all	= arm3_cache_flush,
265 	.cf_dcache_wbinv_range	= (void *)arm3_cache_flush,
266 	.cf_dcache_inv_range	= (void *)arm3_cache_flush,
267 	.cf_dcache_wb_range	= (void *)cpufunc_nullop,
268 
269 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
270 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
271 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
272 
273 	.cf_idcache_wbinv_all	= arm3_cache_flush,
274 	.cf_idcache_wbinv_range	= (void *)arm3_cache_flush,
275 
276 	/* Other functions */
277 
278 	.cf_flush_prefetchbuf	= cpufunc_nullop,
279 	.cf_drain_writebuf	= cpufunc_nullop,
280 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
281 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
282 
283 	.cf_sleep		= (void *)cpufunc_nullop,
284 
285 	/* Soft functions */
286 
287 	.cf_dataabt_fixup	= early_abort_fixup,
288 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
289 
290 	.cf_setup		= (void *)cpufunc_nullop
291 
292 };
293 #endif	/* CPU_ARM3 */
294 
295 #ifdef CPU_ARM6
296 struct cpu_functions arm6_cpufuncs = {
297 	/* CPU functions */
298 
299 	.cf_id			= cpufunc_id,
300 	.cf_cpwait		= cpufunc_nullop,
301 
302 	/* MMU functions */
303 
304 	.cf_control		= cpufunc_control,
305 	.cf_domains		= cpufunc_domains,
306 	.cf_setttb		= arm67_setttb,
307 	.cf_faultstatus		= cpufunc_faultstatus,
308 	.cf_faultaddress	= cpufunc_faultaddress,
309 
310 	/* TLB functions */
311 
312 	.cf_tlb_flushID		= arm67_tlb_flush,
313 	.cf_tlb_flushID_SE	= arm67_tlb_purge,
314 	.cf_tlb_flushI		= arm67_tlb_flush,
315 	.cf_tlb_flushI_SE	= arm67_tlb_purge,
316 	.cf_tlb_flushD		= arm67_tlb_flush,
317 	.cf_tlb_flushD_SE	= arm67_tlb_purge,
318 
319 	/* Cache operations */
320 
321 	.cf_icache_sync_all	= cpufunc_nullop,
322 	.cf_icache_sync_range	= (void *) cpufunc_nullop,
323 
324 	.cf_dcache_wbinv_all	= arm67_cache_flush,
325 	.cf_dcache_wbinv_range	= (void *)arm67_cache_flush,
326 	.cf_dcache_inv_range	= (void *)arm67_cache_flush,
327 	.cf_dcache_wb_range	= (void *)cpufunc_nullop,
328 
329 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
330 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
331 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
332 
333 	.cf_idcache_wbinv_all	= arm67_cache_flush,
334 	.cf_idcache_wbinv_range	= (void *)arm67_cache_flush,
335 
336 	/* Other functions */
337 
338 	.cf_flush_prefetchbuf	= cpufunc_nullop,
339 	.cf_drain_writebuf	= cpufunc_nullop,
340 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
341 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
342 
343 	.cf_sleep		= (void *)cpufunc_nullop,
344 
345 	/* Soft functions */
346 
347 #ifdef ARM6_LATE_ABORT
348 	.cf_dataabt_fixup	= late_abort_fixup,
349 #else
350 	.cf_dataabt_fixup	= early_abort_fixup,
351 #endif
352 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
353 
354 	.cf_context_switch	= arm67_context_switch,
355 
356 	.cf_setup		= arm6_setup
357 
358 };
359 #endif	/* CPU_ARM6 */
360 
361 #ifdef CPU_ARM7
362 struct cpu_functions arm7_cpufuncs = {
363 	/* CPU functions */
364 
365 	.cf_id			= cpufunc_id,
366 	.cf_cpwait		= cpufunc_nullop,
367 
368 	/* MMU functions */
369 
370 	.cf_control		= cpufunc_control,
371 	.cf_domains		= cpufunc_domains,
372 	.cf_setttb		= arm67_setttb,
373 	.cf_faultstatus		= cpufunc_faultstatus,
374 	.cf_faultaddress	= cpufunc_faultaddress,
375 
376 	/* TLB functions */
377 
378 	.cf_tlb_flushID		= arm67_tlb_flush,
379 	.cf_tlb_flushID_SE	= arm67_tlb_purge,
380 	.cf_tlb_flushI		= arm67_tlb_flush,
381 	.cf_tlb_flushI_SE	= arm67_tlb_purge,
382 	.cf_tlb_flushD		= arm67_tlb_flush,
383 	.cf_tlb_flushD_SE	= arm67_tlb_purge,
384 
385 	/* Cache operations */
386 
387 	.cf_icache_sync_all	= cpufunc_nullop,
388 	.cf_icache_sync_range	= (void *)cpufunc_nullop,
389 
390 	.cf_dcache_wbinv_all	= arm67_cache_flush,
391 	.cf_dcache_wbinv_range	= (void *)arm67_cache_flush,
392 	.cf_dcache_inv_range	= (void *)arm67_cache_flush,
393 	.cf_dcache_wb_range	= (void *)cpufunc_nullop,
394 
395 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
396 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
397 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
398 
399 	.cf_idcache_wbinv_all	= arm67_cache_flush,
400 	.cf_idcache_wbinv_range	= (void *)arm67_cache_flush,
401 
402 	/* Other functions */
403 
404 	.cf_flush_prefetchbuf	= cpufunc_nullop,
405 	.cf_drain_writebuf	= cpufunc_nullop,
406 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
407 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
408 
409 	.cf_sleep		= (void *)cpufunc_nullop,
410 
411 	/* Soft functions */
412 
413 	.cf_dataabt_fixup	= late_abort_fixup,
414 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
415 
416 	.cf_context_switch	= arm67_context_switch,
417 
418 	.cf_setup		= arm7_setup
419 
420 };
421 #endif	/* CPU_ARM7 */
422 
423 #ifdef CPU_ARM7TDMI
424 struct cpu_functions arm7tdmi_cpufuncs = {
425 	/* CPU functions */
426 
427 	.cf_id			= cpufunc_id,
428 	.cf_cpwait		= cpufunc_nullop,
429 
430 	/* MMU functions */
431 
432 	.cf_control		= cpufunc_control,
433 	.cf_domains		= cpufunc_domains,
434 	.cf_setttb		= arm7tdmi_setttb,
435 	.cf_faultstatus		= cpufunc_faultstatus,
436 	.cf_faultaddress	= cpufunc_faultaddress,
437 
438 	/* TLB functions */
439 
440 	.cf_tlb_flushID		= arm7tdmi_tlb_flushID,
441 	.cf_tlb_flushID_SE	= arm7tdmi_tlb_flushID_SE,
442 	.cf_tlb_flushI		= arm7tdmi_tlb_flushID,
443 	.cf_tlb_flushI_SE	= arm7tdmi_tlb_flushID_SE,
444 	.cf_tlb_flushD		= arm7tdmi_tlb_flushID,
445 	.cf_tlb_flushD_SE	= arm7tdmi_tlb_flushID_SE,
446 
447 	/* Cache operations */
448 
449 	.cf_icache_sync_all	= cpufunc_nullop,
450 	.cf_icache_sync_range	= (void *)cpufunc_nullop,
451 
452 	.cf_dcache_wbinv_all	= arm7tdmi_cache_flushID,
453 	.cf_dcache_wbinv_range	= (void *)arm7tdmi_cache_flushID,
454 	.cf_dcache_inv_range	= (void *)arm7tdmi_cache_flushID,
455 	.cf_dcache_wb_range	= (void *)cpufunc_nullop,
456 
457 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
458 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
459 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
460 
461 	.cf_idcache_wbinv_all	= arm7tdmi_cache_flushID,
462 	.cf_idcache_wbinv_range	= (void *)arm7tdmi_cache_flushID,
463 
464 	/* Other functions */
465 
466 	.cf_flush_prefetchbuf	= cpufunc_nullop,
467 	.cf_drain_writebuf	= cpufunc_nullop,
468 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
469 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
470 
471 	.cf_sleep		= (void *)cpufunc_nullop,
472 
473 	/* Soft functions */
474 
475 	.cf_dataabt_fixup	= late_abort_fixup,
476 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
477 
478 	.cf_context_switch	= arm7tdmi_context_switch,
479 
480 	.cf_setup		= arm7tdmi_setup
481 
482 };
483 #endif	/* CPU_ARM7TDMI */
484 
485 #ifdef CPU_ARM8
486 struct cpu_functions arm8_cpufuncs = {
487 	/* CPU functions */
488 
489 	.cf_id			= cpufunc_id,
490 	.cf_cpwait		= cpufunc_nullop,
491 
492 	/* MMU functions */
493 
494 	.cf_control		= cpufunc_control,
495 	.cf_domains		= cpufunc_domains,
496 	.cf_setttb		= arm8_setttb,
497 	.cf_faultstatus		= cpufunc_faultstatus,
498 	.cf_faultaddress	= cpufunc_faultaddress,
499 
500 	/* TLB functions */
501 
502 	.cf_tlb_flushID		= arm8_tlb_flushID,
503 	.cf_tlb_flushID_SE	= arm8_tlb_flushID_SE,
504 	.cf_tlb_flushI		= arm8_tlb_flushID,
505 	.cf_tlb_flushI_SE	= arm8_tlb_flushID_SE,
506 	.cf_tlb_flushD		= arm8_tlb_flushID,
507 	.cf_tlb_flushD_SE	= arm8_tlb_flushID_SE,
508 
509 	/* Cache operations */
510 
511 	.cf_icache_sync_all	= cpufunc_nullop,
512 	.cf_icache_sync_range	= (void *)cpufunc_nullop,
513 
514 	.cf_dcache_wbinv_all	= arm8_cache_purgeID,
515 	.cf_dcache_wbinv_range	= (void *)arm8_cache_purgeID,
516 /*XXX*/	.cf_dcache_inv_range	= (void *)arm8_cache_purgeID,
517 	.cf_dcache_wb_range	= (void *)arm8_cache_cleanID,
518 
519 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
520 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
521 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
522 
523 	.cf_idcache_wbinv_all	= arm8_cache_purgeID,
524 	.cf_idcache_wbinv_range = (void *)arm8_cache_purgeID,
525 
526 	/* Other functions */
527 
528 	.cf_flush_prefetchbuf	= cpufunc_nullop,
529 	.cf_drain_writebuf	= cpufunc_nullop,
530 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
531 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
532 
533 	.cf_sleep		= (void *)cpufunc_nullop,
534 
535 	/* Soft functions */
536 
537 	.cf_dataabt_fixup	= cpufunc_null_fixup,
538 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
539 
540 	.cf_context_switch	= arm8_context_switch,
541 
542 	.cf_setup		= arm8_setup
543 };
544 #endif	/* CPU_ARM8 */
545 
546 #ifdef CPU_ARM9
547 struct cpu_functions arm9_cpufuncs = {
548 	/* CPU functions */
549 
550 	.cf_id			= cpufunc_id,
551 	.cf_cpwait		= cpufunc_nullop,
552 
553 	/* MMU functions */
554 
555 	.cf_control		= cpufunc_control,
556 	.cf_domains		= cpufunc_domains,
557 	.cf_setttb		= arm9_setttb,
558 	.cf_faultstatus		= cpufunc_faultstatus,
559 	.cf_faultaddress	= cpufunc_faultaddress,
560 
561 	/* TLB functions */
562 
563 	.cf_tlb_flushID		= armv4_tlb_flushID,
564 	.cf_tlb_flushID_SE	= arm9_tlb_flushID_SE,
565 	.cf_tlb_flushI		= armv4_tlb_flushI,
566 	.cf_tlb_flushI_SE	= (void *)armv4_tlb_flushI,
567 	.cf_tlb_flushD		= armv4_tlb_flushD,
568 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
569 
570 	/* Cache operations */
571 
572 	.cf_icache_sync_all	= arm9_icache_sync_all,
573 	.cf_icache_sync_range	= arm9_icache_sync_range,
574 
575 	.cf_dcache_wbinv_all	= arm9_dcache_wbinv_all,
576 	.cf_dcache_wbinv_range	= arm9_dcache_wbinv_range,
577 /*XXX*/	.cf_dcache_inv_range	= arm9_dcache_wbinv_range,
578 	.cf_dcache_wb_range	= arm9_dcache_wb_range,
579 
580 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
581 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
582 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
583 
584 	.cf_idcache_wbinv_all	= arm9_idcache_wbinv_all,
585 	.cf_idcache_wbinv_range = arm9_idcache_wbinv_range,
586 
587 	/* Other functions */
588 
589 	.cf_flush_prefetchbuf	= cpufunc_nullop,
590 	.cf_drain_writebuf	= armv4_drain_writebuf,
591 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
592 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
593 
594 	.cf_sleep		= (void *)cpufunc_nullop,
595 
596 	/* Soft functions */
597 
598 	.cf_dataabt_fixup	= cpufunc_null_fixup,
599 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
600 
601 	.cf_context_switch	= arm9_context_switch,
602 
603 	.cf_setup		= arm9_setup
604 
605 };
606 #endif /* CPU_ARM9 */
607 
608 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
609 struct cpu_functions armv5_ec_cpufuncs = {
610 	/* CPU functions */
611 
612 	.cf_id			= cpufunc_id,
613 	.cf_cpwait		= cpufunc_nullop,
614 
615 	/* MMU functions */
616 
617 	.cf_control		= cpufunc_control,
618 	.cf_domains		= cpufunc_domains,
619 	.cf_setttb		= armv5_ec_setttb,
620 	.cf_faultstatus		= cpufunc_faultstatus,
621 	.cf_faultaddress	= cpufunc_faultaddress,
622 
623 	/* TLB functions */
624 
625 	.cf_tlb_flushID		= armv4_tlb_flushID,
626 	.cf_tlb_flushID_SE	= arm10_tlb_flushID_SE,
627 	.cf_tlb_flushI		= armv4_tlb_flushI,
628 	.cf_tlb_flushI_SE	= arm10_tlb_flushI_SE,
629 	.cf_tlb_flushD		= armv4_tlb_flushD,
630 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
631 
632 	/* Cache operations */
633 
634 	.cf_icache_sync_all	= armv5_ec_icache_sync_all,
635 	.cf_icache_sync_range	= armv5_ec_icache_sync_range,
636 
637 	.cf_dcache_wbinv_all	= armv5_ec_dcache_wbinv_all,
638 	.cf_dcache_wbinv_range	= armv5_ec_dcache_wbinv_range,
639 /*XXX*/	.cf_dcache_inv_range	= armv5_ec_dcache_wbinv_range,
640 	.cf_dcache_wb_range	= armv5_ec_dcache_wb_range,
641 
642 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
643 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
644 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
645 
646 	.cf_idcache_wbinv_all	= armv5_ec_idcache_wbinv_all,
647 	.cf_idcache_wbinv_range = armv5_ec_idcache_wbinv_range,
648 
649 	/* Other functions */
650 
651 	.cf_flush_prefetchbuf	= cpufunc_nullop,
652 	.cf_drain_writebuf	= armv4_drain_writebuf,
653 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
654 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
655 
656 	.cf_sleep		= (void *)cpufunc_nullop,
657 
658 	/* Soft functions */
659 
660 	.cf_dataabt_fixup	= cpufunc_null_fixup,
661 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
662 
663 	.cf_context_switch	= arm10_context_switch,
664 
665 	.cf_setup		= arm10_setup
666 
667 };
668 #endif /* CPU_ARM9E || CPU_ARM10 */
669 
670 #ifdef CPU_ARM10
671 struct cpu_functions arm10_cpufuncs = {
672 	/* CPU functions */
673 
674 	.cf_id			= cpufunc_id,
675 	.cf_cpwait		= cpufunc_nullop,
676 
677 	/* MMU functions */
678 
679 	.cf_control		= cpufunc_control,
680 	.cf_domains		= cpufunc_domains,
681 	.cf_setttb		= armv5_setttb,
682 	.cf_faultstatus		= cpufunc_faultstatus,
683 	.cf_faultaddress	= cpufunc_faultaddress,
684 
685 	/* TLB functions */
686 
687 	.cf_tlb_flushID		= armv4_tlb_flushID,
688 	.cf_tlb_flushID_SE	= arm10_tlb_flushID_SE,
689 	.cf_tlb_flushI		= armv4_tlb_flushI,
690 	.cf_tlb_flushI_SE	= arm10_tlb_flushI_SE,
691 	.cf_tlb_flushD		= armv4_tlb_flushD,
692 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
693 
694 	/* Cache operations */
695 
696 	.cf_icache_sync_all	= armv5_icache_sync_all,
697 	.cf_icache_sync_range	= armv5_icache_sync_range,
698 
699 	.cf_dcache_wbinv_all	= armv5_dcache_wbinv_all,
700 	.cf_dcache_wbinv_range	= armv5_dcache_wbinv_range,
701 /*XXX*/	.cf_dcache_inv_range	= armv5_dcache_wbinv_range,
702 	.cf_dcache_wb_range	= armv5_dcache_wb_range,
703 
704 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
705 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
706 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
707 
708 	.cf_idcache_wbinv_all	= armv5_idcache_wbinv_all,
709 	.cf_idcache_wbinv_range = armv5_idcache_wbinv_range,
710 
711 	/* Other functions */
712 
713 	.cf_flush_prefetchbuf	= cpufunc_nullop,
714 	.cf_drain_writebuf	= armv4_drain_writebuf,
715 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
716 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
717 
718 	.cf_sleep		= (void *)cpufunc_nullop,
719 
720 	/* Soft functions */
721 
722 	.cf_dataabt_fixup	= cpufunc_null_fixup,
723 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
724 
725 	.cf_context_switch	= arm10_context_switch,
726 
727 	.cf_setup		= arm10_setup
728 
729 };
730 #endif /* CPU_ARM10 */
731 
732 #ifdef CPU_ARM11
733 struct cpu_functions arm11_cpufuncs = {
734 	/* CPU functions */
735 
736 	.cf_id			= cpufunc_id,
737 	.cf_cpwait		= cpufunc_nullop,
738 
739 	/* MMU functions */
740 
741 	.cf_control		= cpufunc_control,
742 	.cf_domains		= cpufunc_domains,
743 	.cf_setttb		= arm11_setttb,
744 	.cf_faultstatus		= cpufunc_faultstatus,
745 	.cf_faultaddress	= cpufunc_faultaddress,
746 
747 	/* TLB functions */
748 
749 	.cf_tlb_flushID		= arm11_tlb_flushID,
750 	.cf_tlb_flushID_SE	= arm11_tlb_flushID_SE,
751 	.cf_tlb_flushI		= arm11_tlb_flushI,
752 	.cf_tlb_flushI_SE	= arm11_tlb_flushI_SE,
753 	.cf_tlb_flushD		= arm11_tlb_flushD,
754 	.cf_tlb_flushD_SE	= arm11_tlb_flushD_SE,
755 
756 	/* Cache operations */
757 
758 	.cf_icache_sync_all	= armv6_icache_sync_all,
759 	.cf_icache_sync_range	= armv6_icache_sync_range,
760 
761 	.cf_dcache_wbinv_all	= armv6_dcache_wbinv_all,
762 	.cf_dcache_wbinv_range	= armv6_dcache_wbinv_range,
763 	.cf_dcache_inv_range	= armv6_dcache_inv_range,
764 	.cf_dcache_wb_range	= armv6_dcache_wb_range,
765 
766 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
767 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
768 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
769 
770 	.cf_idcache_wbinv_all	= armv6_idcache_wbinv_all,
771 	.cf_idcache_wbinv_range = armv6_idcache_wbinv_range,
772 
773 	/* Other functions */
774 
775 	.cf_flush_prefetchbuf	= cpufunc_nullop,
776 	.cf_drain_writebuf	= arm11_drain_writebuf,
777 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
778 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
779 
780 	.cf_sleep		= arm11_sleep,
781 
782 	/* Soft functions */
783 
784 	.cf_dataabt_fixup	= cpufunc_null_fixup,
785 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
786 
787 	.cf_context_switch	= arm11_context_switch,
788 
789 	.cf_setup		= arm11_setup
790 
791 };
792 #endif /* CPU_ARM11 */
793 
794 #ifdef CPU_ARM1136
795 struct cpu_functions arm1136_cpufuncs = {
796 	/* CPU functions */
797 
798 	.cf_id			= cpufunc_id,
799 	.cf_cpwait		= cpufunc_nullop,
800 
801 	/* MMU functions */
802 
803 	.cf_control		= cpufunc_control,
804 	.cf_domains		= cpufunc_domains,
805 	.cf_setttb		= arm11_setttb,
806 	.cf_faultstatus		= cpufunc_faultstatus,
807 	.cf_faultaddress	= cpufunc_faultaddress,
808 
809 	/* TLB functions */
810 
811 	.cf_tlb_flushID		= arm11_tlb_flushID,
812 	.cf_tlb_flushID_SE	= arm11_tlb_flushID_SE,
813 	.cf_tlb_flushI		= arm11_tlb_flushI,
814 	.cf_tlb_flushI_SE	= arm11_tlb_flushI_SE,
815 	.cf_tlb_flushD		= arm11_tlb_flushD,
816 	.cf_tlb_flushD_SE	= arm11_tlb_flushD_SE,
817 
818 	/* Cache operations */
819 
820 	.cf_icache_sync_all	= arm11x6_icache_sync_all,	/* 411920 */
821 	.cf_icache_sync_range	= arm11x6_icache_sync_range,	/* 371025 */
822 
823 	.cf_dcache_wbinv_all	= arm11x6_dcache_wbinv_all,	/* 411920 */
824 	.cf_dcache_wbinv_range	= armv6_dcache_wbinv_range,
825 	.cf_dcache_inv_range	= armv6_dcache_inv_range,
826 	.cf_dcache_wb_range	= armv6_dcache_wb_range,
827 
828 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
829 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
830 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
831 
832 	.cf_idcache_wbinv_all	= arm11x6_idcache_wbinv_all,	/* 411920 */
833 	.cf_idcache_wbinv_range = arm11x6_idcache_wbinv_range,	/* 371025 */
834 
835 	/* Other functions */
836 
837 	.cf_flush_prefetchbuf	= arm11x6_flush_prefetchbuf,
838 	.cf_drain_writebuf	= arm11_drain_writebuf,
839 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
840 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
841 
842 	.cf_sleep		= arm11_sleep,	/* arm1136_sleep_rev0 */
843 
844 	/* Soft functions */
845 
846 	.cf_dataabt_fixup	= cpufunc_null_fixup,
847 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
848 
849 	.cf_context_switch	= arm11_context_switch,
850 
851 	.cf_setup		= arm11x6_setup
852 
853 };
854 #endif /* CPU_ARM1136 */
855 
856 #ifdef CPU_ARM1176
857 struct cpu_functions arm1176_cpufuncs = {
858 	/* CPU functions */
859 
860 	.cf_id			= cpufunc_id,
861 	.cf_cpwait		= cpufunc_nullop,
862 
863 	/* MMU functions */
864 
865 	.cf_control		= cpufunc_control,
866 	.cf_domains		= cpufunc_domains,
867 	.cf_setttb		= arm11_setttb,
868 	.cf_faultstatus		= cpufunc_faultstatus,
869 	.cf_faultaddress	= cpufunc_faultaddress,
870 
871 	/* TLB functions */
872 
873 	.cf_tlb_flushID		= arm11_tlb_flushID,
874 	.cf_tlb_flushID_SE	= arm11_tlb_flushID_SE,
875 	.cf_tlb_flushI		= arm11_tlb_flushI,
876 	.cf_tlb_flushI_SE	= arm11_tlb_flushI_SE,
877 	.cf_tlb_flushD		= arm11_tlb_flushD,
878 	.cf_tlb_flushD_SE	= arm11_tlb_flushD_SE,
879 
880 	/* Cache operations */
881 
882 	.cf_icache_sync_all	= arm11x6_icache_sync_all,	/* 415045 */
883 	.cf_icache_sync_range	= arm11x6_icache_sync_range,	/* 371367 */
884 
885 	.cf_dcache_wbinv_all	= arm11x6_dcache_wbinv_all,	/* 415045 */
886 	.cf_dcache_wbinv_range	= armv6_dcache_wbinv_range,
887 	.cf_dcache_inv_range	= armv6_dcache_inv_range,
888 	.cf_dcache_wb_range	= armv6_dcache_wb_range,
889 
890 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
891 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
892 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
893 
894 	.cf_idcache_wbinv_all	= arm11x6_idcache_wbinv_all,	/* 415045 */
895 	.cf_idcache_wbinv_range = arm11x6_idcache_wbinv_range,	/* 371367 */
896 
897 	/* Other functions */
898 
899 	.cf_flush_prefetchbuf	= arm11x6_flush_prefetchbuf,
900 	.cf_drain_writebuf	= arm11_drain_writebuf,
901 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
902 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
903 
904 	.cf_sleep		= arm11x6_sleep,		/* no ref. */
905 
906 	/* Soft functions */
907 
908 	.cf_dataabt_fixup	= cpufunc_null_fixup,
909 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
910 
911 	.cf_context_switch	= arm11_context_switch,
912 
913 	.cf_setup		= arm11x6_setup
914 
915 };
916 #endif /* CPU_ARM1176 */
917 
918 
919 #ifdef CPU_ARM11MPCORE
920 struct cpu_functions arm11mpcore_cpufuncs = {
921 	/* CPU functions */
922 
923 	.cf_id			= cpufunc_id,
924 	.cf_cpwait		= cpufunc_nullop,
925 
926 	/* MMU functions */
927 
928 	.cf_control		= cpufunc_control,
929 	.cf_domains		= cpufunc_domains,
930 	.cf_setttb		= arm11_setttb,
931 	.cf_faultstatus		= cpufunc_faultstatus,
932 	.cf_faultaddress	= cpufunc_faultaddress,
933 
934 	/* TLB functions */
935 
936 	.cf_tlb_flushID		= arm11_tlb_flushID,
937 	.cf_tlb_flushID_SE	= arm11_tlb_flushID_SE,
938 	.cf_tlb_flushI		= arm11_tlb_flushI,
939 	.cf_tlb_flushI_SE	= arm11_tlb_flushI_SE,
940 	.cf_tlb_flushD		= arm11_tlb_flushD,
941 	.cf_tlb_flushD_SE	= arm11_tlb_flushD_SE,
942 
943 	/* Cache operations */
944 
945 	.cf_icache_sync_all	= armv6_icache_sync_all,
946 	.cf_icache_sync_range	= armv5_icache_sync_range,
947 
948 	.cf_dcache_wbinv_all	= armv6_dcache_wbinv_all,
949 	.cf_dcache_wbinv_range	= armv5_dcache_wbinv_range,
950 	.cf_dcache_inv_range	= armv5_dcache_inv_range,
951 	.cf_dcache_wb_range	= armv5_dcache_wb_range,
952 
953 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
954 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
955 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
956 
957 	.cf_idcache_wbinv_all	= armv6_idcache_wbinv_all,
958 	.cf_idcache_wbinv_range = armv5_idcache_wbinv_range,
959 
960 	/* Other functions */
961 
962 	.cf_flush_prefetchbuf	= cpufunc_nullop,
963 	.cf_drain_writebuf	= arm11_drain_writebuf,
964 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
965 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
966 
967 	.cf_sleep		= arm11_sleep,
968 
969 	/* Soft functions */
970 
971 	.cf_dataabt_fixup	= cpufunc_null_fixup,
972 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
973 
974 	.cf_context_switch	= arm11_context_switch,
975 
976 	.cf_setup		= arm11mpcore_setup
977 
978 };
979 #endif /* CPU_ARM11MPCORE */
980 
981 #ifdef CPU_SA110
982 struct cpu_functions sa110_cpufuncs = {
983 	/* CPU functions */
984 
985 	.cf_id			= cpufunc_id,
986 	.cf_cpwait		= cpufunc_nullop,
987 
988 	/* MMU functions */
989 
990 	.cf_control		= cpufunc_control,
991 	.cf_domains		= cpufunc_domains,
992 	.cf_setttb		= sa1_setttb,
993 	.cf_faultstatus		= cpufunc_faultstatus,
994 	.cf_faultaddress	= cpufunc_faultaddress,
995 
996 	/* TLB functions */
997 
998 	.cf_tlb_flushID		= armv4_tlb_flushID,
999 	.cf_tlb_flushID_SE	= sa1_tlb_flushID_SE,
1000 	.cf_tlb_flushI		= armv4_tlb_flushI,
1001 	.cf_tlb_flushI_SE	= (void *)armv4_tlb_flushI,
1002 	.cf_tlb_flushD		= armv4_tlb_flushD,
1003 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
1004 
1005 	/* Cache operations */
1006 
1007 	.cf_icache_sync_all	= sa1_cache_syncI,
1008 	.cf_icache_sync_range	= sa1_cache_syncI_rng,
1009 
1010 	.cf_dcache_wbinv_all	= sa1_cache_purgeD,
1011 	.cf_dcache_wbinv_range	= sa1_cache_purgeD_rng,
1012 /*XXX*/	.cf_dcache_inv_range	= sa1_cache_purgeD_rng,
1013 	.cf_dcache_wb_range	= sa1_cache_cleanD_rng,
1014 
1015 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
1016 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
1017 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
1018 
1019 	.cf_idcache_wbinv_all	= sa1_cache_purgeID,
1020 	.cf_idcache_wbinv_range	= sa1_cache_purgeID_rng,
1021 
1022 	/* Other functions */
1023 
1024 	.cf_flush_prefetchbuf	= cpufunc_nullop,
1025 	.cf_drain_writebuf	= armv4_drain_writebuf,
1026 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
1027 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
1028 
1029 	.cf_sleep		= (void *)cpufunc_nullop,
1030 
1031 	/* Soft functions */
1032 
1033 	.cf_dataabt_fixup	= cpufunc_null_fixup,
1034 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
1035 
1036 	.cf_context_switch	= sa110_context_switch,
1037 
1038 	.cf_setup		= sa110_setup
1039 };
1040 #endif	/* CPU_SA110 */
1041 
1042 #if defined(CPU_SA1100) || defined(CPU_SA1110)
1043 struct cpu_functions sa11x0_cpufuncs = {
1044 	/* CPU functions */
1045 
1046 	.cf_id			= cpufunc_id,
1047 	.cf_cpwait		= cpufunc_nullop,
1048 
1049 	/* MMU functions */
1050 
1051 	.cf_control		= cpufunc_control,
1052 	.cf_domains		= cpufunc_domains,
1053 	.cf_setttb		= sa1_setttb,
1054 	.cf_faultstatus		= cpufunc_faultstatus,
1055 	.cf_faultaddress	= cpufunc_faultaddress,
1056 
1057 	/* TLB functions */
1058 
1059 	.cf_tlb_flushID		= armv4_tlb_flushID,
1060 	.cf_tlb_flushID_SE	= sa1_tlb_flushID_SE,
1061 	.cf_tlb_flushI		= armv4_tlb_flushI,
1062 	.cf_tlb_flushI_SE	= (void *)armv4_tlb_flushI,
1063 	.cf_tlb_flushD		= armv4_tlb_flushD,
1064 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
1065 
1066 	/* Cache operations */
1067 
1068 	.cf_icache_sync_all	= sa1_cache_syncI,
1069 	.cf_icache_sync_range	= sa1_cache_syncI_rng,
1070 
1071 	.cf_dcache_wbinv_all	= sa1_cache_purgeD,
1072 	.cf_dcache_wbinv_range	= sa1_cache_purgeD_rng,
1073 /*XXX*/	.cf_dcache_inv_range	= sa1_cache_purgeD_rng,
1074 	.cf_dcache_wb_range	= sa1_cache_cleanD_rng,
1075 
1076 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
1077 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
1078 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
1079 
1080 	.cf_idcache_wbinv_all	= sa1_cache_purgeID,
1081 	.cf_idcache_wbinv_range	= sa1_cache_purgeID_rng,
1082 
1083 	/* Other functions */
1084 
1085 	.cf_flush_prefetchbuf	= sa11x0_drain_readbuf,
1086 	.cf_drain_writebuf	= armv4_drain_writebuf,
1087 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
1088 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
1089 
1090 	.cf_sleep		= sa11x0_cpu_sleep,
1091 
1092 	/* Soft functions */
1093 
1094 	.cf_dataabt_fixup	= cpufunc_null_fixup,
1095 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
1096 
1097 	.cf_context_switch	= sa11x0_context_switch,
1098 
1099 	.cf_setup		= sa11x0_setup
1100 };
1101 #endif	/* CPU_SA1100 || CPU_SA1110 */
1102 
1103 #if defined(CPU_FA526)
1104 struct cpu_functions fa526_cpufuncs = {
1105 	/* CPU functions */
1106 
1107 	.cf_id			= cpufunc_id,
1108 	.cf_cpwait		= cpufunc_nullop,
1109 
1110 	/* MMU functions */
1111 
1112 	.cf_control		= cpufunc_control,
1113 	.cf_domains		= cpufunc_domains,
1114 	.cf_setttb		= fa526_setttb,
1115 	.cf_faultstatus		= cpufunc_faultstatus,
1116 	.cf_faultaddress	= cpufunc_faultaddress,
1117 
1118 	/* TLB functions */
1119 
1120 	.cf_tlb_flushID		= armv4_tlb_flushID,
1121 	.cf_tlb_flushID_SE	= fa526_tlb_flushID_SE,
1122 	.cf_tlb_flushI		= armv4_tlb_flushI,
1123 	.cf_tlb_flushI_SE	= fa526_tlb_flushI_SE,
1124 	.cf_tlb_flushD		= armv4_tlb_flushD,
1125 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
1126 
1127 	/* Cache operations */
1128 
1129 	.cf_icache_sync_all	= fa526_icache_sync_all,
1130 	.cf_icache_sync_range	= fa526_icache_sync_range,
1131 
1132 	.cf_dcache_wbinv_all	= fa526_dcache_wbinv_all,
1133 	.cf_dcache_wbinv_range	= fa526_dcache_wbinv_range,
1134 	.cf_dcache_inv_range	= fa526_dcache_inv_range,
1135 	.cf_dcache_wb_range	= fa526_dcache_wb_range,
1136 
1137 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
1138 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
1139 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
1140 
1141 	.cf_idcache_wbinv_all	= fa526_idcache_wbinv_all,
1142 	.cf_idcache_wbinv_range	= fa526_idcache_wbinv_range,
1143 
1144 	/* Other functions */
1145 
1146 	.cf_flush_prefetchbuf	= fa526_flush_prefetchbuf,
1147 	.cf_drain_writebuf	= armv4_drain_writebuf,
1148 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
1149 	.cf_flush_brnchtgt_E	= fa526_flush_brnchtgt_E,
1150 
1151 	.cf_sleep		= fa526_cpu_sleep,
1152 
1153 	/* Soft functions */
1154 
1155 	.cf_dataabt_fixup	= cpufunc_null_fixup,
1156 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
1157 
1158 	.cf_context_switch	= fa526_context_switch,
1159 
1160 	.cf_setup		= fa526_setup
1161 };
1162 #endif	/* CPU_FA526 */
1163 
1164 #ifdef CPU_IXP12X0
1165 struct cpu_functions ixp12x0_cpufuncs = {
1166 	/* CPU functions */
1167 
1168 	.cf_id			= cpufunc_id,
1169 	.cf_cpwait		= cpufunc_nullop,
1170 
1171 	/* MMU functions */
1172 
1173 	.cf_control		= cpufunc_control,
1174 	.cf_domains		= cpufunc_domains,
1175 	.cf_setttb		= sa1_setttb,
1176 	.cf_faultstatus		= cpufunc_faultstatus,
1177 	.cf_faultaddress	= cpufunc_faultaddress,
1178 
1179 	/* TLB functions */
1180 
1181 	.cf_tlb_flushID		= armv4_tlb_flushID,
1182 	.cf_tlb_flushID_SE	= sa1_tlb_flushID_SE,
1183 	.cf_tlb_flushI		= armv4_tlb_flushI,
1184 	.cf_tlb_flushI_SE	= (void *)armv4_tlb_flushI,
1185 	.cf_tlb_flushD		= armv4_tlb_flushD,
1186 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
1187 
1188 	/* Cache operations */
1189 
1190 	.cf_icache_sync_all	= sa1_cache_syncI,
1191 	.cf_icache_sync_range	= sa1_cache_syncI_rng,
1192 
1193 	.cf_dcache_wbinv_all	= sa1_cache_purgeD,
1194 	.cf_dcache_wbinv_range	= sa1_cache_purgeD_rng,
1195 /*XXX*/	.cf_dcache_inv_range	= sa1_cache_purgeD_rng,
1196 	.cf_dcache_wb_range	= sa1_cache_cleanD_rng,
1197 
1198 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
1199 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
1200 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
1201 
1202 	.cf_idcache_wbinv_all	= sa1_cache_purgeID,
1203 	.cf_idcache_wbinv_range	= sa1_cache_purgeID_rng,
1204 
1205 	/* Other functions */
1206 
1207 	.cf_flush_prefetchbuf	= ixp12x0_drain_readbuf,
1208 	.cf_drain_writebuf	= armv4_drain_writebuf,
1209 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
1210 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
1211 
1212 	.cf_sleep		= (void *)cpufunc_nullop,
1213 
1214 	/* Soft functions */
1215 
1216 	.cf_dataabt_fixup	= cpufunc_null_fixup,
1217 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
1218 
1219 	.cf_context_switch	= ixp12x0_context_switch,
1220 
1221 	.cf_setup		= ixp12x0_setup
1222 };
1223 #endif	/* CPU_IXP12X0 */
1224 
1225 #if defined(CPU_XSCALE)
1226 struct cpu_functions xscale_cpufuncs = {
1227 	/* CPU functions */
1228 
1229 	.cf_id			= cpufunc_id,
1230 	.cf_cpwait		= xscale_cpwait,
1231 
1232 	/* MMU functions */
1233 
1234 	.cf_control		= xscale_control,
1235 	.cf_domains		= cpufunc_domains,
1236 	.cf_setttb		= xscale_setttb,
1237 	.cf_faultstatus		= cpufunc_faultstatus,
1238 	.cf_faultaddress	= cpufunc_faultaddress,
1239 
1240 	/* TLB functions */
1241 
1242 	.cf_tlb_flushID		= armv4_tlb_flushID,
1243 	.cf_tlb_flushID_SE	= xscale_tlb_flushID_SE,
1244 	.cf_tlb_flushI		= armv4_tlb_flushI,
1245 	.cf_tlb_flushI_SE	= (void *)armv4_tlb_flushI,
1246 	.cf_tlb_flushD		= armv4_tlb_flushD,
1247 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
1248 
1249 	/* Cache operations */
1250 
1251 	.cf_icache_sync_all	= xscale_cache_syncI,
1252 	.cf_icache_sync_range	= xscale_cache_syncI_rng,
1253 
1254 	.cf_dcache_wbinv_all	= xscale_cache_purgeD,
1255 	.cf_dcache_wbinv_range	= xscale_cache_purgeD_rng,
1256 	.cf_dcache_inv_range	= xscale_cache_flushD_rng,
1257 	.cf_dcache_wb_range	= xscale_cache_cleanD_rng,
1258 
1259 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
1260 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
1261 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
1262 
1263 	.cf_idcache_wbinv_all	= xscale_cache_purgeID,
1264 	.cf_idcache_wbinv_range = xscale_cache_purgeID_rng,
1265 
1266 	/* Other functions */
1267 
1268 	.cf_flush_prefetchbuf	= cpufunc_nullop,
1269 	.cf_drain_writebuf	= armv4_drain_writebuf,
1270 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
1271 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
1272 
1273 	.cf_sleep		= xscale_cpu_sleep,
1274 
1275 	/* Soft functions */
1276 
1277 	.cf_dataabt_fixup	= cpufunc_null_fixup,
1278 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
1279 
1280 	.cf_context_switch	= xscale_context_switch,
1281 
1282 	.cf_setup		= xscale_setup
1283 };
1284 #endif /* CPU_XSCALE */
1285 
1286 #if defined(CPU_ARMV7)
1287 struct cpu_functions armv7_cpufuncs = {
1288 	/* CPU functions */
1289 
1290 	.cf_id			= cpufunc_id,
1291 	.cf_cpwait		= cpufunc_nullop,
1292 
1293 	/* MMU functions */
1294 
1295 	.cf_control		= cpufunc_control,
1296 	.cf_domains		= cpufunc_domains,
1297 	.cf_setttb		= armv7_setttb,
1298 	.cf_faultstatus		= cpufunc_faultstatus,
1299 	.cf_faultaddress	= cpufunc_faultaddress,
1300 
1301 	/* TLB functions */
1302 
1303 	.cf_tlb_flushID		= armv7up_tlb_flushID,
1304 	.cf_tlb_flushID_SE	= armv7up_tlb_flushID_SE,
1305 	.cf_tlb_flushI		= armv7up_tlb_flushI,
1306 	.cf_tlb_flushI_SE	= armv7up_tlb_flushI_SE,
1307 	.cf_tlb_flushD		= armv7up_tlb_flushD,
1308 	.cf_tlb_flushD_SE	= armv7up_tlb_flushD_SE,
1309 
1310 	/* Cache operations */
1311 
1312 	.cf_icache_sync_all	= armv7_icache_sync_all,
1313 	.cf_dcache_wbinv_all	= armv7_dcache_wbinv_all,
1314 
1315 	.cf_dcache_inv_range	= armv7_dcache_inv_range,
1316 	.cf_dcache_wb_range	= armv7_dcache_wb_range,
1317 	.cf_dcache_wbinv_range	= armv7_dcache_wbinv_range,
1318 
1319 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
1320 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
1321 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
1322 
1323 	.cf_icache_sync_range	= armv7_icache_sync_range,
1324 	.cf_idcache_wbinv_range = armv7_idcache_wbinv_range,
1325 
1326 
1327 	.cf_idcache_wbinv_all	= armv7_idcache_wbinv_all,
1328 
1329 	/* Other functions */
1330 
1331 	.cf_flush_prefetchbuf	= cpufunc_nullop,
1332 	.cf_drain_writebuf	= armv7_drain_writebuf,
1333 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
1334 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
1335 
1336 	.cf_sleep		= armv7_cpu_sleep,
1337 
1338 	/* Soft functions */
1339 
1340 	.cf_dataabt_fixup	= cpufunc_null_fixup,
1341 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
1342 
1343 	.cf_context_switch	= armv7_context_switch,
1344 
1345 	.cf_setup		= armv7_setup
1346 
1347 };
1348 #endif /* CPU_ARMV7 */
1349 
1350 #ifdef CPU_PJ4B
1351 struct cpu_functions pj4bv7_cpufuncs = {
1352 	/* CPU functions */
1353 
1354 	.cf_id			= cpufunc_id,
1355 	.cf_cpwait		= armv7_drain_writebuf,
1356 
1357 	/* MMU functions */
1358 
1359 	.cf_control		= cpufunc_control,
1360 	.cf_domains		= cpufunc_domains,
1361 	.cf_setttb		= armv7_setttb,
1362 	.cf_faultstatus		= cpufunc_faultstatus,
1363 	.cf_faultaddress	= cpufunc_faultaddress,
1364 
1365 	/* TLB functions */
1366 
1367 	.cf_tlb_flushID		= armv7up_tlb_flushID,
1368 	.cf_tlb_flushID_SE	= armv7up_tlb_flushID_SE,
1369 	.cf_tlb_flushI		= armv7up_tlb_flushID,
1370 	.cf_tlb_flushI_SE	= armv7up_tlb_flushID_SE,
1371 	.cf_tlb_flushD		= armv7up_tlb_flushID,
1372 	.cf_tlb_flushD_SE	= armv7up_tlb_flushID_SE,
1373 
1374 	/* Cache operations (see also pj4bv7_setup) */
1375 	.cf_icache_sync_all	= armv7_idcache_wbinv_all,
1376 	.cf_icache_sync_range	= armv7_icache_sync_range,
1377 
1378 	.cf_dcache_wbinv_all	= armv7_dcache_wbinv_all,
1379 	.cf_dcache_wbinv_range	= armv7_dcache_wbinv_range,
1380 	.cf_dcache_inv_range	= armv7_dcache_inv_range,
1381 	.cf_dcache_wb_range	= armv7_dcache_wb_range,
1382 
1383 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
1384 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
1385 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
1386 
1387 	.cf_idcache_wbinv_all	= armv7_idcache_wbinv_all,
1388 	.cf_idcache_wbinv_range	= armv7_idcache_wbinv_range,
1389 
1390 	/* Other functions */
1391 
1392 	.cf_flush_prefetchbuf	= cpufunc_nullop,
1393 	.cf_drain_writebuf	= armv7_drain_writebuf,
1394 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
1395 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
1396 
1397 	.cf_sleep		= pj4b_cpu_sleep,
1398 
1399 	/* Soft functions */
1400 
1401 	.cf_dataabt_fixup	= cpufunc_null_fixup,
1402 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
1403 
1404 	.cf_context_switch	= armv7_context_switch,
1405 
1406 	.cf_setup		= pj4bv7_setup
1407 };
1408 #endif /* CPU_PJ4B */
1409 
1410 #ifdef CPU_SHEEVA
1411 struct cpu_functions sheeva_cpufuncs = {
1412 	/* CPU functions */
1413 
1414 	.cf_id			= cpufunc_id,
1415 	.cf_cpwait		= cpufunc_nullop,
1416 
1417 	/* MMU functions */
1418 
1419 	.cf_control		= cpufunc_control,
1420 	.cf_domains		= cpufunc_domains,
1421 	.cf_setttb		= armv5_ec_setttb,
1422 	.cf_faultstatus		= cpufunc_faultstatus,
1423 	.cf_faultaddress	= cpufunc_faultaddress,
1424 
1425 	/* TLB functions */
1426 
1427 	.cf_tlb_flushID		= armv4_tlb_flushID,
1428 	.cf_tlb_flushID_SE	= arm10_tlb_flushID_SE,
1429 	.cf_tlb_flushI		= armv4_tlb_flushI,
1430 	.cf_tlb_flushI_SE	= arm10_tlb_flushI_SE,
1431 	.cf_tlb_flushD		= armv4_tlb_flushD,
1432 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
1433 
1434 	/* Cache operations */
1435 
1436 	.cf_icache_sync_all	= armv5_ec_icache_sync_all,
1437 	.cf_icache_sync_range	= armv5_ec_icache_sync_range,
1438 
1439 	.cf_dcache_wbinv_all	= armv5_ec_dcache_wbinv_all,
1440 	.cf_dcache_wbinv_range	= sheeva_dcache_wbinv_range,
1441 	.cf_dcache_inv_range	= sheeva_dcache_inv_range,
1442 	.cf_dcache_wb_range	= sheeva_dcache_wb_range,
1443 
1444 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
1445 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
1446 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
1447 
1448 	.cf_idcache_wbinv_all	= armv5_ec_idcache_wbinv_all,
1449 	.cf_idcache_wbinv_range = sheeva_idcache_wbinv_range,
1450 
1451 	/* Other functions */
1452 
1453 	.cf_flush_prefetchbuf	= cpufunc_nullop,
1454 	.cf_drain_writebuf	= armv4_drain_writebuf,
1455 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
1456 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
1457 
1458 	.cf_sleep		= (void *)sheeva_cpu_sleep,
1459 
1460 	/* Soft functions */
1461 
1462 	.cf_dataabt_fixup	= cpufunc_null_fixup,
1463 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
1464 
1465 	.cf_context_switch	= arm10_context_switch,
1466 
1467 	.cf_setup		= sheeva_setup
1468 };
1469 #endif /* CPU_SHEEVA */
1470 
1471 
1472 /*
1473  * Global constants also used by locore.s
1474  */
1475 
1476 struct cpu_functions cpufuncs;
1477 u_int cputype;
1478 
1479 #if defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined(CPU_ARM9) || \
1480     defined(CPU_ARM9E) || defined(CPU_ARM10) || defined(CPU_FA526) || \
1481     defined(CPU_SHEEVA) || \
1482     defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
1483     defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425) || \
1484     defined(CPU_ARMV6) || defined(CPU_ARMV7)
1485 static void get_cachetype_cp15(void);
1486 
1487 /* Additional cache information local to this file.  Log2 of some of the
1488    above numbers.  */
1489 static int	arm_dcache_log2_nsets;
1490 static int	arm_dcache_log2_assoc;
1491 static int	arm_dcache_log2_linesize;
1492 
1493 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
1494 static inline u_int
1495 get_cachesize_cp15(int cssr)
1496 {
1497 #if defined(CPU_ARMV7)
1498 	__asm volatile(".arch\tarmv7a");
1499 
1500 	armreg_csselr_write(cssr);
1501 	arm_isb();			 /* sync to the new cssr */
1502 
1503 #else
1504 	__asm volatile("mcr p15, 1, %0, c0, c0, 2" :: "r" (cssr) : "memory");
1505 #endif
1506 	return armreg_ccsidr_read();
1507 }
1508 #endif
1509 
1510 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
1511 static void
1512 get_cacheinfo_clidr(struct arm_cache_info *info, u_int level, u_int clidr)
1513 {
1514 	u_int csid;
1515 
1516 	if (clidr & 6) {
1517 		csid = get_cachesize_cp15(level << 1); /* select dcache values */
1518 		info->dcache_sets = CPU_CSID_NUMSETS(csid) + 1;
1519 		info->dcache_ways = CPU_CSID_ASSOC(csid) + 1;
1520 		info->dcache_line_size = 1U << (CPU_CSID_LEN(csid) + 4);
1521 		info->dcache_way_size =
1522 		    info->dcache_line_size * info->dcache_sets;
1523 		info->dcache_size = info->dcache_way_size * info->dcache_ways;
1524 
1525 		if (level == 0) {
1526 			arm_dcache_log2_assoc = CPU_CSID_ASSOC(csid) + 1;
1527 			arm_dcache_log2_linesize = CPU_CSID_LEN(csid) + 4;
1528 			arm_dcache_log2_nsets =
1529 			    31 - __builtin_clz(info->dcache_sets*2-1);
1530 		}
1531 	}
1532 
1533 	info->cache_unified = (clidr == 4);
1534 
1535 	if (level > 0) {
1536 		info->dcache_type = CACHE_TYPE_PIPT;
1537 		info->icache_type = CACHE_TYPE_PIPT;
1538 	}
1539 
1540 	if (info->cache_unified) {
1541 		info->icache_ways = info->dcache_ways;
1542 		info->icache_line_size = info->dcache_line_size;
1543 		info->icache_way_size = info->dcache_way_size;
1544 		info->icache_size = info->dcache_size;
1545 	} else {
1546 		csid = get_cachesize_cp15((level << 1)|CPU_CSSR_InD); /* select icache values */
1547 		info->icache_sets = CPU_CSID_NUMSETS(csid) + 1;
1548 		info->icache_ways = CPU_CSID_ASSOC(csid) + 1;
1549 		info->icache_line_size = 1U << (CPU_CSID_LEN(csid) + 4);
1550 		info->icache_way_size = info->icache_line_size * info->icache_sets;
1551 		info->icache_size = info->icache_way_size * info->icache_ways;
1552 	}
1553 	if (level == 0
1554 	    && info->dcache_way_size <= PAGE_SIZE
1555 	    && info->icache_way_size <= PAGE_SIZE) {
1556 		arm_cache_prefer_mask = 0;
1557 	}
1558 }
1559 #endif /* (ARM_MMU_V6 + ARM_MMU_V7) > 0 */
1560 
1561 static void
1562 get_cachetype_cp15(void)
1563 {
1564 	u_int ctype, isize, dsize;
1565 	u_int multiplier;
1566 
1567 	ctype = armreg_ctr_read();
1568 
1569 	/*
1570 	 * ...and thus spake the ARM ARM:
1571 	 *
1572 	 * If an <opcode2> value corresponding to an unimplemented or
1573 	 * reserved ID register is encountered, the System Control
1574 	 * processor returns the value of the main ID register.
1575 	 */
1576 	if (ctype == cpu_idnum())
1577 		goto out;
1578 
1579 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
1580 	if (CPU_CT_FORMAT(ctype) == 4) {
1581 		u_int clidr = armreg_clidr_read();
1582 
1583 		if (CPU_CT4_L1IPOLICY(ctype) == CPU_CT4_L1_PIPT) {
1584 			arm_pcache.icache_type = CACHE_TYPE_PIPT;
1585 		} else {
1586 			arm_pcache.icache_type = CACHE_TYPE_VIPT;
1587 			arm_cache_prefer_mask = PAGE_SIZE;
1588 		}
1589 #ifdef CPU_CORTEX
1590 		if (CPU_ID_CORTEX_P(cpu_idnum())) {
1591 			arm_pcache.dcache_type = CACHE_TYPE_PIPT;
1592 		} else
1593 #endif
1594 		{
1595 			arm_pcache.dcache_type = CACHE_TYPE_VIPT;
1596 		}
1597 		arm_pcache.cache_type = CPU_CT_CTYPE_WB14;
1598 
1599 		get_cacheinfo_clidr(&arm_pcache, 0, clidr & 7);
1600 		arm_dcache_align = arm_pcache.dcache_line_size;
1601 		clidr >>= 3;
1602 		if (clidr & 7) {
1603 			get_cacheinfo_clidr(&arm_scache, 1, clidr & 7);
1604 			if (arm_scache.dcache_line_size < arm_dcache_align)
1605 				arm_dcache_align = arm_scache.dcache_line_size;
1606 		}
1607 		/*
1608 		 * The pmap cleans an entire way for an exec page so
1609 		 * we don't care that it's VIPT anymore.
1610 		 */
1611 		if (arm_pcache.dcache_type == CACHE_TYPE_PIPT) {
1612 			arm_cache_prefer_mask = 0;
1613 		}
1614 		goto out;
1615 	}
1616 #endif /* ARM_MMU_V6 + ARM_MMU_V7 > 0 */
1617 
1618 	if ((ctype & CPU_CT_S) == 0)
1619 		arm_pcache.cache_unified = 1;
1620 
1621 	/*
1622 	 * If you want to know how this code works, go read the ARM ARM.
1623 	 */
1624 
1625 	arm_pcache.cache_type = CPU_CT_CTYPE(ctype);
1626 
1627 	if (arm_pcache.cache_unified == 0) {
1628 		isize = CPU_CT_ISIZE(ctype);
1629 		multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
1630 		arm_pcache.icache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
1631 		if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
1632 			if (isize & CPU_CT_xSIZE_M)
1633 				arm_pcache.icache_line_size = 0; /* not present */
1634 			else
1635 				arm_pcache.icache_ways = 1;
1636 		} else {
1637 			arm_pcache.icache_ways = multiplier <<
1638 			    (CPU_CT_xSIZE_ASSOC(isize) - 1);
1639 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
1640 			arm_pcache.icache_type = CACHE_TYPE_VIPT;
1641 			if (CPU_CT_xSIZE_P & isize)
1642 				arm_cache_prefer_mask |=
1643 				    __BIT(9 + CPU_CT_xSIZE_SIZE(isize)
1644 					  - CPU_CT_xSIZE_ASSOC(isize))
1645 				    - PAGE_SIZE;
1646 #endif
1647 		}
1648 		arm_pcache.icache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
1649 		arm_pcache.icache_way_size =
1650 		    __BIT(9 + CPU_CT_xSIZE_SIZE(isize) - CPU_CT_xSIZE_ASSOC(isize));
1651 	}
1652 
1653 	dsize = CPU_CT_DSIZE(ctype);
1654 	multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
1655 	arm_pcache.dcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
1656 	if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
1657 		if (dsize & CPU_CT_xSIZE_M)
1658 			arm_pcache.dcache_line_size = 0; /* not present */
1659 		else
1660 			arm_pcache.dcache_ways = 1;
1661 	} else {
1662 		arm_pcache.dcache_ways = multiplier <<
1663 		    (CPU_CT_xSIZE_ASSOC(dsize) - 1);
1664 #if (ARM_MMU_V6) > 0
1665 		arm_pcache.dcache_type = CACHE_TYPE_VIPT;
1666 		if ((CPU_CT_xSIZE_P & dsize)
1667 		    && CPU_ID_ARM11_P(curcpu()->ci_arm_cpuid)) {
1668 			arm_cache_prefer_mask |=
1669 			    __BIT(9 + CPU_CT_xSIZE_SIZE(dsize)
1670 				  - CPU_CT_xSIZE_ASSOC(dsize)) - PAGE_SIZE;
1671 		}
1672 #endif
1673 	}
1674 	arm_pcache.dcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
1675 	arm_pcache.dcache_way_size =
1676 	    __BIT(9 + CPU_CT_xSIZE_SIZE(dsize) - CPU_CT_xSIZE_ASSOC(dsize));
1677 
1678 	arm_dcache_align = arm_pcache.dcache_line_size;
1679 
1680 	arm_dcache_log2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
1681 	arm_dcache_log2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
1682 	arm_dcache_log2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
1683 	    CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
1684 
1685  out:
1686 	KASSERTMSG(arm_dcache_align <= CACHE_LINE_SIZE,
1687 	    "arm_dcache_align=%u CACHE_LINE_SIZE=%u",
1688 	    arm_dcache_align, CACHE_LINE_SIZE);
1689 	arm_dcache_align_mask = arm_dcache_align - 1;
1690 }
1691 #endif /* ARM7TDMI || ARM8 || ARM9 || XSCALE */
1692 
1693 #if defined(CPU_ARM2) || defined(CPU_ARM250) || defined(CPU_ARM3) || \
1694     defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_SA110) || \
1695     defined(CPU_SA1100) || defined(CPU_SA1110) || defined(CPU_IXP12X0)
1696 /* Cache information for CPUs without cache type registers. */
1697 struct cachetab {
1698 	uint32_t ct_cpuid;
1699 	int	ct_pcache_type;
1700 	int	ct_pcache_unified;
1701 	int	ct_pdcache_size;
1702 	int	ct_pdcache_line_size;
1703 	int	ct_pdcache_ways;
1704 	int	ct_picache_size;
1705 	int	ct_picache_line_size;
1706 	int	ct_picache_ways;
1707 };
1708 
1709 struct cachetab cachetab[] = {
1710     /* cpuid,           cache type,       u,  dsiz, ls, wy,  isiz, ls, wy */
1711     { CPU_ID_ARM2,      0,                1,     0,  0,  0,     0,  0,  0 },
1712     { CPU_ID_ARM250,    0,                1,     0,  0,  0,     0,  0,  0 },
1713     { CPU_ID_ARM3,      CPU_CT_CTYPE_WT,  1,  4096, 16, 64,     0,  0,  0 },
1714     { CPU_ID_ARM610,	CPU_CT_CTYPE_WT,  1,  4096, 16, 64,     0,  0,  0 },
1715     { CPU_ID_ARM710,    CPU_CT_CTYPE_WT,  1,  8192, 32,  4,     0,  0,  0 },
1716     { CPU_ID_ARM7500,   CPU_CT_CTYPE_WT,  1,  4096, 16,  4,     0,  0,  0 },
1717     { CPU_ID_ARM710A,   CPU_CT_CTYPE_WT,  1,  8192, 16,  4,     0,  0,  0 },
1718     { CPU_ID_ARM7500FE, CPU_CT_CTYPE_WT,  1,  4096, 16,  4,     0,  0,  0 },
1719     /* XXX is this type right for SA-1? */
1720     { CPU_ID_SA110,	CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 },
1721     { CPU_ID_SA1100,	CPU_CT_CTYPE_WB1, 0,  8192, 32, 32, 16384, 32, 32 },
1722     { CPU_ID_SA1110,	CPU_CT_CTYPE_WB1, 0,  8192, 32, 32, 16384, 32, 32 },
1723     { CPU_ID_IXP1200,	CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 }, /* XXX */
1724     { 0, 0, 0, 0, 0, 0, 0, 0}
1725 };
1726 
1727 static void get_cachetype_table(void);
1728 
1729 static void
1730 get_cachetype_table(void)
1731 {
1732 	int i;
1733 	uint32_t cpuid = cpu_idnum();
1734 
1735 	for (i = 0; cachetab[i].ct_cpuid != 0; i++) {
1736 		if (cachetab[i].ct_cpuid == (cpuid & CPU_ID_CPU_MASK)) {
1737 			arm_pcache.cache_type = cachetab[i].ct_pcache_type;
1738 			arm_pcache.cache_unified = cachetab[i].ct_pcache_unified;
1739 			arm_pcache.dcache_size = cachetab[i].ct_pdcache_size;
1740 			arm_pcache.dcache_line_size =
1741 			    cachetab[i].ct_pdcache_line_size;
1742 			arm_pcache.dcache_ways = cachetab[i].ct_pdcache_ways;
1743 			if (arm_pcache.dcache_ways) {
1744 				arm_pcache.dcache_way_size =
1745 				    arm_pcache.dcache_line_size
1746 				    / arm_pcache.dcache_ways;
1747 			}
1748 			arm_pcache.icache_size = cachetab[i].ct_picache_size;
1749 			arm_pcache.icache_line_size =
1750 			    cachetab[i].ct_picache_line_size;
1751 			arm_pcache.icache_ways = cachetab[i].ct_picache_ways;
1752 			if (arm_pcache.icache_ways) {
1753 				arm_pcache.icache_way_size =
1754 				    arm_pcache.icache_line_size
1755 				    / arm_pcache.icache_ways;
1756 			}
1757 		}
1758 	}
1759 
1760 	arm_dcache_align = arm_pcache.dcache_line_size;
1761 	arm_dcache_align_mask = arm_dcache_align - 1;
1762 }
1763 
1764 #endif /* ARM2 || ARM250 || ARM3 || ARM6 || ARM7 || SA110 || SA1100 || SA1111 || IXP12X0 */
1765 
1766 
1767 #if defined(CPU_CORTEX) || defined(CPU_PJ4B)
1768 static inline void
1769 set_cpufuncs_mpfixup(void)
1770 {
1771 #ifdef MULTIPROCESSOR
1772 	/* If MP extensions are present, patch in MP TLB ops */
1773 	const uint32_t mpidr = armreg_mpidr_read();
1774 	if ((mpidr & (MPIDR_MP|MPIDR_U)) == MPIDR_MP) {
1775 		cpufuncs.cf_tlb_flushID = armv7mp_tlb_flushID;
1776 		cpufuncs.cf_tlb_flushID_SE = armv7mp_tlb_flushID_SE;
1777 		cpufuncs.cf_tlb_flushI = armv7mp_tlb_flushI;
1778 		cpufuncs.cf_tlb_flushI_SE = armv7mp_tlb_flushI_SE;
1779 		cpufuncs.cf_tlb_flushD = armv7mp_tlb_flushD;
1780 		cpufuncs.cf_tlb_flushD_SE = armv7mp_tlb_flushD_SE;
1781 	}
1782 #endif
1783 }
1784 #endif
1785 
1786 /*
1787  * Cannot panic here as we may not have a console yet ...
1788  */
1789 
1790 int
1791 set_cpufuncs(void)
1792 {
1793 	if (cputype == 0) {
1794 		cputype = cpufunc_id();
1795 		cputype &= CPU_ID_CPU_MASK;
1796 	}
1797 
1798 	/*
1799 	 * NOTE: cpu_do_powersave defaults to off.  If we encounter a
1800 	 * CPU type where we want to use it by default, then we set it.
1801 	 */
1802 #ifdef CPU_ARM2
1803 	if (cputype == CPU_ID_ARM2) {
1804 		cpufuncs = arm2_cpufuncs;
1805 		get_cachetype_table();
1806 		return 0;
1807 	}
1808 #endif /* CPU_ARM2 */
1809 #ifdef CPU_ARM250
1810 	if (cputype == CPU_ID_ARM250) {
1811 		cpufuncs = arm250_cpufuncs;
1812 		get_cachetype_table();
1813 		return 0;
1814 	}
1815 #endif
1816 #ifdef CPU_ARM3
1817 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
1818 	    (cputype & 0x00000f00) == 0x00000300) {
1819 		cpufuncs = arm3_cpufuncs;
1820 		get_cachetype_table();
1821 		return 0;
1822 	}
1823 #endif	/* CPU_ARM3 */
1824 #ifdef CPU_ARM6
1825 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
1826 	    (cputype & 0x00000f00) == 0x00000600) {
1827 		cpufuncs = arm6_cpufuncs;
1828 		get_cachetype_table();
1829 		pmap_pte_init_generic();
1830 		return 0;
1831 	}
1832 #endif	/* CPU_ARM6 */
1833 #ifdef CPU_ARM7
1834 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
1835 	    CPU_ID_IS7(cputype) &&
1836 	    (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V3) {
1837 		cpufuncs = arm7_cpufuncs;
1838 		get_cachetype_table();
1839 		pmap_pte_init_generic();
1840 		return 0;
1841 	}
1842 #endif	/* CPU_ARM7 */
1843 #ifdef CPU_ARM7TDMI
1844 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
1845 	    CPU_ID_IS7(cputype) &&
1846 	    (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V4T) {
1847 		cpufuncs = arm7tdmi_cpufuncs;
1848 		get_cachetype_cp15();
1849 		pmap_pte_init_generic();
1850 		return 0;
1851 	}
1852 #endif
1853 #ifdef CPU_ARM8
1854 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
1855 	    (cputype & 0x0000f000) == 0x00008000) {
1856 		cpufuncs = arm8_cpufuncs;
1857 		get_cachetype_cp15();
1858 		pmap_pte_init_arm8();
1859 		return 0;
1860 	}
1861 #endif	/* CPU_ARM8 */
1862 #ifdef CPU_ARM9
1863 	if (((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD ||
1864 	     (cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_TI) &&
1865 	    (cputype & 0x0000f000) == 0x00009000) {
1866 		cpufuncs = arm9_cpufuncs;
1867 		get_cachetype_cp15();
1868 		arm9_dcache_sets_inc = 1U << arm_dcache_log2_linesize;
1869 		arm9_dcache_sets_max =
1870 		    (1U << (arm_dcache_log2_linesize + arm_dcache_log2_nsets)) -
1871 		    arm9_dcache_sets_inc;
1872 		arm9_dcache_index_inc = 1U << (32 - arm_dcache_log2_assoc);
1873 		arm9_dcache_index_max = 0U - arm9_dcache_index_inc;
1874 #ifdef	ARM9_CACHE_WRITE_THROUGH
1875 		pmap_pte_init_arm9();
1876 #else
1877 		pmap_pte_init_generic();
1878 #endif
1879 		return 0;
1880 	}
1881 #endif /* CPU_ARM9 */
1882 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
1883 	if (cputype == CPU_ID_ARM926EJS ||
1884 	    cputype == CPU_ID_ARM1026EJS) {
1885 		cpufuncs = armv5_ec_cpufuncs;
1886 		get_cachetype_cp15();
1887 		pmap_pte_init_generic();
1888 		return 0;
1889 	}
1890 #endif /* CPU_ARM9E || CPU_ARM10 */
1891 #if defined(CPU_SHEEVA)
1892 	if (cputype == CPU_ID_MV88SV131 ||
1893 	    cputype == CPU_ID_MV88FR571_VD) {
1894 		cpufuncs = sheeva_cpufuncs;
1895 		get_cachetype_cp15();
1896 		pmap_pte_init_generic();
1897 		cpu_do_powersave = 1;			/* Enable powersave */
1898 		return 0;
1899 	}
1900 #endif /* CPU_SHEEVA */
1901 #ifdef CPU_ARM10
1902 	if (/* cputype == CPU_ID_ARM1020T || */
1903 	    cputype == CPU_ID_ARM1020E) {
1904 		/*
1905 		 * Select write-through cacheing (this isn't really an
1906 		 * option on ARM1020T).
1907 		 */
1908 		cpufuncs = arm10_cpufuncs;
1909 		get_cachetype_cp15();
1910 		armv5_dcache_sets_inc = 1U << arm_dcache_log2_linesize;
1911 		armv5_dcache_sets_max =
1912 		    (1U << (arm_dcache_log2_linesize + arm_dcache_log2_nsets)) -
1913 		    armv5_dcache_sets_inc;
1914 		armv5_dcache_index_inc = 1U << (32 - arm_dcache_log2_assoc);
1915 		armv5_dcache_index_max = 0U - armv5_dcache_index_inc;
1916 		pmap_pte_init_generic();
1917 		return 0;
1918 	}
1919 #endif /* CPU_ARM10 */
1920 
1921 
1922 #if defined(CPU_ARM11MPCORE)
1923 	if (cputype == CPU_ID_ARM11MPCORE) {
1924 		cpufuncs = arm11mpcore_cpufuncs;
1925 #if defined(CPU_ARMV7) || defined(CPU_PRE_ARMV6)
1926 		cpu_armv6_p = true;
1927 #endif
1928 		get_cachetype_cp15();
1929 		armv5_dcache_sets_inc = 1U << arm_dcache_log2_linesize;
1930 		armv5_dcache_sets_max = (1U << (arm_dcache_log2_linesize +
1931 			arm_dcache_log2_nsets)) - armv5_dcache_sets_inc;
1932 		armv5_dcache_index_inc = 1U << (32 - arm_dcache_log2_assoc);
1933 		armv5_dcache_index_max = 0U - armv5_dcache_index_inc;
1934 		cpu_do_powersave = 1;			/* Enable powersave */
1935 		pmap_pte_init_arm11mpcore();
1936 		if (arm_cache_prefer_mask)
1937 			uvmexp.ncolors = (arm_cache_prefer_mask >> PGSHIFT) + 1;
1938 
1939 		return 0;
1940 
1941 	}
1942 #endif	/* CPU_ARM11MPCORE */
1943 
1944 #if defined(CPU_ARM11)
1945 	if (cputype == CPU_ID_ARM1136JS ||
1946 	    cputype == CPU_ID_ARM1136JSR1 ||
1947 	    cputype == CPU_ID_ARM1176JZS) {
1948 		cpufuncs = arm11_cpufuncs;
1949 #if defined(CPU_ARM1136)
1950 		if (cputype == CPU_ID_ARM1136JS ||
1951 		    cputype == CPU_ID_ARM1136JSR1) {
1952 			cpufuncs = arm1136_cpufuncs;
1953 			if (cputype == CPU_ID_ARM1136JS)
1954 				cpufuncs.cf_sleep = arm1136_sleep_rev0;
1955 		}
1956 #endif
1957 #if defined(CPU_ARM1176)
1958 		if (cputype == CPU_ID_ARM1176JZS) {
1959 			cpufuncs = arm1176_cpufuncs;
1960 		}
1961 #endif
1962 #if defined(CPU_ARMV7) || defined(CPU_PRE_ARMV6)
1963 		cpu_armv6_p = true;
1964 #endif
1965 		cpu_do_powersave = 1;			/* Enable powersave */
1966 		get_cachetype_cp15();
1967 #ifdef ARM11_CACHE_WRITE_THROUGH
1968 		pmap_pte_init_arm11();
1969 #else
1970 		pmap_pte_init_generic();
1971 #endif
1972 		if (arm_cache_prefer_mask)
1973 			uvmexp.ncolors = (arm_cache_prefer_mask >> PGSHIFT) + 1;
1974 
1975 		/*
1976 		 * Start and reset the PMC Cycle Counter.
1977 		 */
1978 		armreg_pmcrv6_write(ARM11_PMCCTL_E | ARM11_PMCCTL_P | ARM11_PMCCTL_C);
1979 		return 0;
1980 	}
1981 #endif /* CPU_ARM11 */
1982 #ifdef CPU_SA110
1983 	if (cputype == CPU_ID_SA110) {
1984 		cpufuncs = sa110_cpufuncs;
1985 		get_cachetype_table();
1986 		pmap_pte_init_sa1();
1987 		return 0;
1988 	}
1989 #endif	/* CPU_SA110 */
1990 #ifdef CPU_SA1100
1991 	if (cputype == CPU_ID_SA1100) {
1992 		cpufuncs = sa11x0_cpufuncs;
1993 		get_cachetype_table();
1994 		pmap_pte_init_sa1();
1995 
1996 		/* Use powersave on this CPU. */
1997 		cpu_do_powersave = 1;
1998 
1999 		return 0;
2000 	}
2001 #endif	/* CPU_SA1100 */
2002 #ifdef CPU_SA1110
2003 	if (cputype == CPU_ID_SA1110) {
2004 		cpufuncs = sa11x0_cpufuncs;
2005 		get_cachetype_table();
2006 		pmap_pte_init_sa1();
2007 
2008 		/* Use powersave on this CPU. */
2009 		cpu_do_powersave = 1;
2010 
2011 		return 0;
2012 	}
2013 #endif	/* CPU_SA1110 */
2014 #ifdef CPU_FA526
2015 	if (cputype == CPU_ID_FA526) {
2016 		cpufuncs = fa526_cpufuncs;
2017 		get_cachetype_cp15();
2018 		pmap_pte_init_generic();
2019 
2020 		/* Use powersave on this CPU. */
2021 		cpu_do_powersave = 1;
2022 
2023 		return 0;
2024 	}
2025 #endif	/* CPU_FA526 */
2026 #ifdef CPU_IXP12X0
2027 	if (cputype == CPU_ID_IXP1200) {
2028 		cpufuncs = ixp12x0_cpufuncs;
2029 		get_cachetype_table();
2030 		pmap_pte_init_sa1();
2031 		return 0;
2032 	}
2033 #endif  /* CPU_IXP12X0 */
2034 #ifdef CPU_XSCALE_80200
2035 	if (cputype == CPU_ID_80200) {
2036 		int rev = cpufunc_id() & CPU_ID_REVISION_MASK;
2037 
2038 		i80200_icu_init();
2039 
2040 		/*
2041 		 * Reset the Performance Monitoring Unit to a
2042 		 * pristine state:
2043 		 *	- CCNT, PMN0, PMN1 reset to 0
2044 		 *	- overflow indications cleared
2045 		 *	- all counters disabled
2046 		 */
2047 		__asm volatile("mcr p14, 0, %0, c0, c0, 0"
2048 			:
2049 			: "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF|
2050 			       PMNC_CC_IF));
2051 
2052 #if defined(XSCALE_CCLKCFG)
2053 		/*
2054 		 * Crank CCLKCFG to maximum legal value.
2055 		 */
2056 		__asm volatile ("mcr p14, 0, %0, c6, c0, 0"
2057 			:
2058 			: "r" (XSCALE_CCLKCFG));
2059 #endif
2060 
2061 		/*
2062 		 * XXX Disable ECC in the Bus Controller Unit; we
2063 		 * don't really support it, yet.  Clear any pending
2064 		 * error indications.
2065 		 */
2066 		__asm volatile("mcr p13, 0, %0, c0, c1, 0"
2067 			:
2068 			: "r" (BCUCTL_E0|BCUCTL_E1|BCUCTL_EV));
2069 
2070 		cpufuncs = xscale_cpufuncs;
2071 #if defined(PERFCTRS)
2072 		xscale_pmu_init();
2073 #endif
2074 
2075 		/*
2076 		 * i80200 errata: Step-A0 and A1 have a bug where
2077 		 * D$ dirty bits are not cleared on "invalidate by
2078 		 * address".
2079 		 *
2080 		 * Workaround: Clean cache line before invalidating.
2081 		 */
2082 		if (rev == 0 || rev == 1)
2083 			cpufuncs.cf_dcache_inv_range = xscale_cache_purgeD_rng;
2084 
2085 		get_cachetype_cp15();
2086 		pmap_pte_init_xscale();
2087 		return 0;
2088 	}
2089 #endif /* CPU_XSCALE_80200 */
2090 #ifdef CPU_XSCALE_80321
2091 	if (cputype == CPU_ID_80321_400 || cputype == CPU_ID_80321_600 ||
2092 	    cputype == CPU_ID_80321_400_B0 || cputype == CPU_ID_80321_600_B0 ||
2093 	    cputype == CPU_ID_80219_400 || cputype == CPU_ID_80219_600) {
2094 		i80321_icu_init();
2095 
2096 		/*
2097 		 * Reset the Performance Monitoring Unit to a
2098 		 * pristine state:
2099 		 *	- CCNT, PMN0, PMN1 reset to 0
2100 		 *	- overflow indications cleared
2101 		 *	- all counters disabled
2102 		 */
2103 		__asm volatile("mcr p14, 0, %0, c0, c0, 0"
2104 			:
2105 			: "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF|
2106 			       PMNC_CC_IF));
2107 
2108 		cpufuncs = xscale_cpufuncs;
2109 #if defined(PERFCTRS)
2110 		xscale_pmu_init();
2111 #endif
2112 
2113 		get_cachetype_cp15();
2114 		pmap_pte_init_xscale();
2115 		return 0;
2116 	}
2117 #endif /* CPU_XSCALE_80321 */
2118 #ifdef __CPU_XSCALE_PXA2XX
2119 	/* ignore core revision to test PXA2xx CPUs */
2120 	if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA27X ||
2121 	    (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 ||
2122 	    (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) {
2123 
2124 		cpufuncs = xscale_cpufuncs;
2125 #if defined(PERFCTRS)
2126 		xscale_pmu_init();
2127 #endif
2128 
2129 		get_cachetype_cp15();
2130 		pmap_pte_init_xscale();
2131 
2132 		/* Use powersave on this CPU. */
2133 		cpu_do_powersave = 1;
2134 
2135 		return 0;
2136 	}
2137 #endif /* __CPU_XSCALE_PXA2XX */
2138 #ifdef CPU_XSCALE_IXP425
2139 	if (cputype == CPU_ID_IXP425_533 || cputype == CPU_ID_IXP425_400 ||
2140 	    cputype == CPU_ID_IXP425_266) {
2141 		ixp425_icu_init();
2142 
2143 		cpufuncs = xscale_cpufuncs;
2144 #if defined(PERFCTRS)
2145 		xscale_pmu_init();
2146 #endif
2147 
2148 		get_cachetype_cp15();
2149 		pmap_pte_init_xscale();
2150 
2151 		return 0;
2152 	}
2153 #endif /* CPU_XSCALE_IXP425 */
2154 #if defined(CPU_CORTEX)
2155 	if (CPU_ID_CORTEX_P(cputype)) {
2156 		cpufuncs = armv7_cpufuncs;
2157 		set_cpufuncs_mpfixup();
2158 		cpu_do_powersave = 1;			/* Enable powersave */
2159 #if defined(CPU_ARMV6) || defined(CPU_PRE_ARMV6)
2160 		cpu_armv7_p = true;
2161 #endif
2162 		get_cachetype_cp15();
2163 		pmap_pte_init_armv7();
2164 		if (arm_cache_prefer_mask)
2165 			uvmexp.ncolors = (arm_cache_prefer_mask >> PGSHIFT) + 1;
2166 		/*
2167 		 * Start and reset the PMC Cycle Counter.
2168 		 */
2169 		armreg_pmcr_write(ARM11_PMCCTL_E | ARM11_PMCCTL_P | ARM11_PMCCTL_C);
2170 		armreg_pmcntenset_write(CORTEX_CNTENS_C);
2171 		return 0;
2172 	}
2173 #endif /* CPU_CORTEX */
2174 
2175 #if defined(CPU_PJ4B)
2176 	if ((cputype == CPU_ID_MV88SV581X_V6 ||
2177 	    cputype == CPU_ID_MV88SV581X_V7 ||
2178 	    cputype == CPU_ID_MV88SV584X_V7 ||
2179 	    cputype == CPU_ID_ARM_88SV581X_V6 ||
2180 	    cputype == CPU_ID_ARM_88SV581X_V7) &&
2181 	    (armreg_pfr0_read() & ARM_PFR0_THUMBEE_MASK)) {
2182 		cpufuncs = pj4bv7_cpufuncs;
2183 		set_cpufuncs_mpfixup();
2184 #if defined(CPU_ARMV6) || defined(CPU_PRE_ARMV6)
2185 		cpu_armv7_p = true;
2186 #endif
2187 		get_cachetype_cp15();
2188 		pmap_pte_init_armv7();
2189 		return 0;
2190 	}
2191 #endif /* CPU_PJ4B */
2192 
2193 	/*
2194 	 * Bzzzz. And the answer was ...
2195 	 */
2196 	panic("No support for this CPU type (%08x) in kernel", cputype);
2197 	return ARCHITECTURE_NOT_PRESENT;
2198 }
2199 
2200 #ifdef CPU_ARM2
2201 u_int arm2_id(void)
2202 {
2203 
2204 	return CPU_ID_ARM2;
2205 }
2206 #endif /* CPU_ARM2 */
2207 
2208 #ifdef CPU_ARM250
2209 u_int arm250_id(void)
2210 {
2211 
2212 	return CPU_ID_ARM250;
2213 }
2214 #endif /* CPU_ARM250 */
2215 
2216 /*
2217  * Fixup routines for data and prefetch aborts.
2218  *
2219  * Several compile time symbols are used
2220  *
2221  * DEBUG_FAULT_CORRECTION - Print debugging information during the
2222  * correction of registers after a fault.
2223  * ARM6_LATE_ABORT - ARM6 supports both early and late aborts
2224  * when defined should use late aborts
2225  */
2226 
2227 
2228 /*
2229  * Null abort fixup routine.
2230  * For use when no fixup is required.
2231  */
2232 int
2233 cpufunc_null_fixup(void *arg)
2234 {
2235 	return(ABORT_FIXUP_OK);
2236 }
2237 
2238 
2239 #if defined(CPU_ARM2) || defined(CPU_ARM250) || defined(CPU_ARM3) || \
2240     defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI)
2241 
2242 #ifdef DEBUG_FAULT_CORRECTION
2243 #define DFC_PRINTF(x)		printf x
2244 #define DFC_DISASSEMBLE(x)	disassemble(x)
2245 #else
2246 #define DFC_PRINTF(x)		/* nothing */
2247 #define DFC_DISASSEMBLE(x)	/* nothing */
2248 #endif
2249 
2250 /*
2251  * "Early" data abort fixup.
2252  *
2253  * For ARM2, ARM2as, ARM3 and ARM6 (in early-abort mode).  Also used
2254  * indirectly by ARM6 (in late-abort mode) and ARM7[TDMI].
2255  *
2256  * In early aborts, we may have to fix up LDM, STM, LDC and STC.
2257  */
2258 int
2259 early_abort_fixup(void *arg)
2260 {
2261 	trapframe_t *frame = arg;
2262 	u_int fault_pc;
2263 	u_int fault_instruction;
2264 	int saved_lr = 0;
2265 
2266 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
2267 
2268 		/* Ok an abort in SVC mode */
2269 
2270 		/*
2271 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
2272 		 * as the fault happened in svc mode but we need it in the
2273 		 * usr slot so we can treat the registers as an array of ints
2274 		 * during fixing.
2275 		 * NOTE: This PC is in the position but writeback is not
2276 		 * allowed on r15.
2277 		 * Doing it like this is more efficient than trapping this
2278 		 * case in all possible locations in the following fixup code.
2279 		 */
2280 
2281 		saved_lr = frame->tf_usr_lr;
2282 		frame->tf_usr_lr = frame->tf_svc_lr;
2283 
2284 		/*
2285 		 * Note the trapframe does not have the SVC r13 so a fault
2286 		 * from an instruction with writeback to r13 in SVC mode is
2287 		 * not allowed. This should not happen as the kstack is
2288 		 * always valid.
2289 		 */
2290 	}
2291 
2292 	/* Get fault address and status from the CPU */
2293 
2294 	fault_pc = frame->tf_pc;
2295 	fault_instruction = *((volatile unsigned int *)fault_pc);
2296 
2297 	/* Decode the fault instruction and fix the registers as needed */
2298 
2299 	if ((fault_instruction & 0x0e000000) == 0x08000000) {
2300 		int base;
2301 		int loop;
2302 		int count;
2303 		int *registers = &frame->tf_r0;
2304 
2305 		DFC_PRINTF(("LDM/STM\n"));
2306 		DFC_DISASSEMBLE(fault_pc);
2307 		if (fault_instruction & (1 << 21)) {
2308 			DFC_PRINTF(("This instruction must be corrected\n"));
2309 			base = (fault_instruction >> 16) & 0x0f;
2310 			if (base == 15)
2311 				return ABORT_FIXUP_FAILED;
2312 			/* Count registers transferred */
2313 			count = 0;
2314 			for (loop = 0; loop < 16; ++loop) {
2315 				if (fault_instruction & (1<<loop))
2316 					++count;
2317 			}
2318 			DFC_PRINTF(("%d registers used\n", count));
2319 			DFC_PRINTF(("Corrected r%d by %d bytes ",
2320 				       base, count * 4));
2321 			if (fault_instruction & (1 << 23)) {
2322 				DFC_PRINTF(("down\n"));
2323 				registers[base] -= count * 4;
2324 			} else {
2325 				DFC_PRINTF(("up\n"));
2326 				registers[base] += count * 4;
2327 			}
2328 		}
2329 	} else if ((fault_instruction & 0x0e000000) == 0x0c000000) {
2330 		int base;
2331 		int offset;
2332 		int *registers = &frame->tf_r0;
2333 
2334 		/* REGISTER CORRECTION IS REQUIRED FOR THESE INSTRUCTIONS */
2335 
2336 		DFC_DISASSEMBLE(fault_pc);
2337 
2338 		/* Only need to fix registers if write back is turned on */
2339 
2340 		if ((fault_instruction & (1 << 21)) != 0) {
2341 			base = (fault_instruction >> 16) & 0x0f;
2342 			if (base == 13 &&
2343 			    (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
2344 				return ABORT_FIXUP_FAILED;
2345 			if (base == 15)
2346 				return ABORT_FIXUP_FAILED;
2347 
2348 			offset = (fault_instruction & 0xff) << 2;
2349 			DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
2350 			if ((fault_instruction & (1 << 23)) != 0)
2351 				offset = -offset;
2352 			registers[base] += offset;
2353 			DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
2354 		}
2355 	}
2356 
2357 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
2358 
2359 		/* Ok an abort in SVC mode */
2360 
2361 		/*
2362 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
2363 		 * as the fault happened in svc mode but we need it in the
2364 		 * usr slot so we can treat the registers as an array of ints
2365 		 * during fixing.
2366 		 * NOTE: This PC is in the position but writeback is not
2367 		 * allowed on r15.
2368 		 * Doing it like this is more efficient than trapping this
2369 		 * case in all possible locations in the prior fixup code.
2370 		 */
2371 
2372 		frame->tf_svc_lr = frame->tf_usr_lr;
2373 		frame->tf_usr_lr = saved_lr;
2374 
2375 		/*
2376 		 * Note the trapframe does not have the SVC r13 so a fault
2377 		 * from an instruction with writeback to r13 in SVC mode is
2378 		 * not allowed. This should not happen as the kstack is
2379 		 * always valid.
2380 		 */
2381 	}
2382 
2383 	return(ABORT_FIXUP_OK);
2384 }
2385 #endif	/* CPU_ARM2/250/3/6/7 */
2386 
2387 
2388 #if (defined(CPU_ARM6) && defined(ARM6_LATE_ABORT)) || defined(CPU_ARM7) || \
2389 	defined(CPU_ARM7TDMI)
2390 /*
2391  * "Late" (base updated) data abort fixup
2392  *
2393  * For ARM6 (in late-abort mode) and ARM7.
2394  *
2395  * In this model, all data-transfer instructions need fixing up.  We defer
2396  * LDM, STM, LDC and STC fixup to the early-abort handler.
2397  */
2398 int
2399 late_abort_fixup(void *arg)
2400 {
2401 	trapframe_t *frame = arg;
2402 	u_int fault_pc;
2403 	u_int fault_instruction;
2404 	int saved_lr = 0;
2405 
2406 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
2407 
2408 		/* Ok an abort in SVC mode */
2409 
2410 		/*
2411 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
2412 		 * as the fault happened in svc mode but we need it in the
2413 		 * usr slot so we can treat the registers as an array of ints
2414 		 * during fixing.
2415 		 * NOTE: This PC is in the position but writeback is not
2416 		 * allowed on r15.
2417 		 * Doing it like this is more efficient than trapping this
2418 		 * case in all possible locations in the following fixup code.
2419 		 */
2420 
2421 		saved_lr = frame->tf_usr_lr;
2422 		frame->tf_usr_lr = frame->tf_svc_lr;
2423 
2424 		/*
2425 		 * Note the trapframe does not have the SVC r13 so a fault
2426 		 * from an instruction with writeback to r13 in SVC mode is
2427 		 * not allowed. This should not happen as the kstack is
2428 		 * always valid.
2429 		 */
2430 	}
2431 
2432 	/* Get fault address and status from the CPU */
2433 
2434 	fault_pc = frame->tf_pc;
2435 	fault_instruction = *((volatile unsigned int *)fault_pc);
2436 
2437 	/* Decode the fault instruction and fix the registers as needed */
2438 
2439 	/* Was is a swap instruction ? */
2440 
2441 	if ((fault_instruction & 0x0fb00ff0) == 0x01000090) {
2442 		DFC_DISASSEMBLE(fault_pc);
2443 	} else if ((fault_instruction & 0x0c000000) == 0x04000000) {
2444 
2445 		/* Was is a ldr/str instruction */
2446 		/* This is for late abort only */
2447 
2448 		int base;
2449 		int offset;
2450 		int *registers = &frame->tf_r0;
2451 
2452 		DFC_DISASSEMBLE(fault_pc);
2453 
2454 		/* This is for late abort only */
2455 
2456 		if ((fault_instruction & (1 << 24)) == 0
2457 		    || (fault_instruction & (1 << 21)) != 0) {
2458 			/* postindexed ldr/str with no writeback */
2459 
2460 			base = (fault_instruction >> 16) & 0x0f;
2461 			if (base == 13 &&
2462 			    (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
2463 				return ABORT_FIXUP_FAILED;
2464 			if (base == 15)
2465 				return ABORT_FIXUP_FAILED;
2466 			DFC_PRINTF(("late abt fix: r%d=%08x : ",
2467 				       base, registers[base]));
2468 			if ((fault_instruction & (1 << 25)) == 0) {
2469 				/* Immediate offset - easy */
2470 
2471 				offset = fault_instruction & 0xfff;
2472 				if ((fault_instruction & (1 << 23)))
2473 					offset = -offset;
2474 				registers[base] += offset;
2475 				DFC_PRINTF(("imm=%08x ", offset));
2476 			} else {
2477 				/* offset is a shifted register */
2478 				int shift;
2479 
2480 				offset = fault_instruction & 0x0f;
2481 				if (offset == base)
2482 					return ABORT_FIXUP_FAILED;
2483 
2484 				/*
2485 				 * Register offset - hard we have to
2486 				 * cope with shifts !
2487 				 */
2488 				offset = registers[offset];
2489 
2490 				if ((fault_instruction & (1 << 4)) == 0)
2491 					/* shift with amount */
2492 					shift = (fault_instruction >> 7) & 0x1f;
2493 				else {
2494 					/* shift with register */
2495 					if ((fault_instruction & (1 << 7)) != 0)
2496 						/* undefined for now so bail out */
2497 						return ABORT_FIXUP_FAILED;
2498 					shift = ((fault_instruction >> 8) & 0xf);
2499 					if (base == shift)
2500 						return ABORT_FIXUP_FAILED;
2501 					DFC_PRINTF(("shift reg=%d ", shift));
2502 					shift = registers[shift];
2503 				}
2504 				DFC_PRINTF(("shift=%08x ", shift));
2505 				switch (((fault_instruction >> 5) & 0x3)) {
2506 				case 0 : /* Logical left */
2507 					offset = (int)(((u_int)offset) << shift);
2508 					break;
2509 				case 1 : /* Logical Right */
2510 					if (shift == 0) shift = 32;
2511 					offset = (int)(((u_int)offset) >> shift);
2512 					break;
2513 				case 2 : /* Arithmetic Right */
2514 					if (shift == 0) shift = 32;
2515 					offset = (int)(((int)offset) >> shift);
2516 					break;
2517 				case 3 : /* Rotate right (rol or rxx) */
2518 					return ABORT_FIXUP_FAILED;
2519 					break;
2520 				}
2521 
2522 				DFC_PRINTF(("abt: fixed LDR/STR with "
2523 					       "register offset\n"));
2524 				if ((fault_instruction & (1 << 23)))
2525 					offset = -offset;
2526 				DFC_PRINTF(("offset=%08x ", offset));
2527 				registers[base] += offset;
2528 			}
2529 			DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
2530 		}
2531 	}
2532 
2533 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
2534 
2535 		/* Ok an abort in SVC mode */
2536 
2537 		/*
2538 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
2539 		 * as the fault happened in svc mode but we need it in the
2540 		 * usr slot so we can treat the registers as an array of ints
2541 		 * during fixing.
2542 		 * NOTE: This PC is in the position but writeback is not
2543 		 * allowed on r15.
2544 		 * Doing it like this is more efficient than trapping this
2545 		 * case in all possible locations in the prior fixup code.
2546 		 */
2547 
2548 		frame->tf_svc_lr = frame->tf_usr_lr;
2549 		frame->tf_usr_lr = saved_lr;
2550 
2551 		/*
2552 		 * Note the trapframe does not have the SVC r13 so a fault
2553 		 * from an instruction with writeback to r13 in SVC mode is
2554 		 * not allowed. This should not happen as the kstack is
2555 		 * always valid.
2556 		 */
2557 	}
2558 
2559 	/*
2560 	 * Now let the early-abort fixup routine have a go, in case it
2561 	 * was an LDM, STM, LDC or STC that faulted.
2562 	 */
2563 
2564 	return early_abort_fixup(arg);
2565 }
2566 #endif	/* CPU_ARM6(LATE)/7/7TDMI */
2567 
2568 /*
2569  * CPU Setup code
2570  */
2571 
2572 #if defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) || \
2573 	defined(CPU_ARM8) || defined (CPU_ARM9) || defined (CPU_ARM9E) || \
2574 	defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) || \
2575 	defined(CPU_FA526) || \
2576 	defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
2577 	defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425) || \
2578 	defined(CPU_ARM10) || defined(CPU_SHEEVA) || \
2579 	defined(CPU_ARMV6) || defined(CPU_ARMV7)
2580 
2581 #define IGN	0
2582 #define OR	1
2583 #define BIC	2
2584 
2585 struct cpu_option {
2586 	const char *co_name;
2587 	int	co_falseop;
2588 	int	co_trueop;
2589 	int	co_value;
2590 };
2591 
2592 static u_int parse_cpu_options(char *, struct cpu_option *, u_int);
2593 
2594 static u_int
2595 parse_cpu_options(char *args, struct cpu_option *optlist, u_int cpuctrl)
2596 {
2597 	int integer;
2598 
2599 	if (args == NULL)
2600 		return(cpuctrl);
2601 
2602 	while (optlist->co_name) {
2603 		if (get_bootconf_option(args, optlist->co_name,
2604 		    BOOTOPT_TYPE_BOOLEAN, &integer)) {
2605 			if (integer) {
2606 				if (optlist->co_trueop == OR)
2607 					cpuctrl |= optlist->co_value;
2608 				else if (optlist->co_trueop == BIC)
2609 					cpuctrl &= ~optlist->co_value;
2610 			} else {
2611 				if (optlist->co_falseop == OR)
2612 					cpuctrl |= optlist->co_value;
2613 				else if (optlist->co_falseop == BIC)
2614 					cpuctrl &= ~optlist->co_value;
2615 			}
2616 		}
2617 		++optlist;
2618 	}
2619 	return(cpuctrl);
2620 }
2621 #endif /* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 || CPU_SA110 */
2622 
2623 #if defined (CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) \
2624 	|| defined(CPU_ARM8)
2625 struct cpu_option arm678_options[] = {
2626 #ifdef COMPAT_12
2627 	{ "nocache",		IGN, BIC, CPU_CONTROL_IDC_ENABLE },
2628 	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
2629 #endif	/* COMPAT_12 */
2630 	{ "cpu.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
2631 	{ "cpu.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
2632 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2633 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2634 	{ NULL,			IGN, IGN, 0 }
2635 };
2636 
2637 #endif	/* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 */
2638 
2639 #ifdef CPU_ARM6
2640 struct cpu_option arm6_options[] = {
2641 	{ "arm6.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
2642 	{ "arm6.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
2643 	{ "arm6.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2644 	{ "arm6.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2645 	{ NULL,			IGN, IGN, 0 }
2646 };
2647 
2648 void
2649 arm6_setup(char *args)
2650 {
2651 
2652 	/* Set up default control registers bits */
2653 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2654 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2655 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
2656 #if 0
2657 	int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2658 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2659 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
2660 		 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BEND_ENABLE
2661 		 | CPU_CONTROL_AFLT_ENABLE;
2662 #endif
2663 
2664 #ifdef ARM6_LATE_ABORT
2665 	cpuctrl |= CPU_CONTROL_LABT_ENABLE;
2666 #endif	/* ARM6_LATE_ABORT */
2667 
2668 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2669 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2670 #endif
2671 
2672 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
2673 	cpuctrl = parse_cpu_options(args, arm6_options, cpuctrl);
2674 
2675 #ifdef __ARMEB__
2676 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2677 #endif
2678 
2679 	/* Clear out the cache */
2680 	cpu_idcache_wbinv_all();
2681 
2682 	/* Set the control register */
2683 	curcpu()->ci_ctrl = cpuctrl;
2684 	cpu_control(0xffffffff, cpuctrl);
2685 }
2686 #endif	/* CPU_ARM6 */
2687 
2688 #ifdef CPU_ARM7
2689 struct cpu_option arm7_options[] = {
2690 	{ "arm7.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
2691 	{ "arm7.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
2692 	{ "arm7.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2693 	{ "arm7.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2694 #ifdef COMPAT_12
2695 	{ "fpaclk2",		BIC, OR,  CPU_CONTROL_CPCLK },
2696 #endif	/* COMPAT_12 */
2697 	{ "arm700.fpaclk",	BIC, OR,  CPU_CONTROL_CPCLK },
2698 	{ NULL,			IGN, IGN, 0 }
2699 };
2700 
2701 void
2702 arm7_setup(char *args)
2703 {
2704 
2705 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2706 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2707 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
2708 #if 0
2709 	int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2710 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2711 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
2712 		 | CPU_CONTROL_CPCLK | CPU_CONTROL_LABT_ENABLE
2713 		 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BEND_ENABLE
2714 		 | CPU_CONTROL_AFLT_ENABLE;
2715 #endif
2716 
2717 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2718 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2719 #endif
2720 
2721 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
2722 	cpuctrl = parse_cpu_options(args, arm7_options, cpuctrl);
2723 
2724 #ifdef __ARMEB__
2725 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2726 #endif
2727 
2728 	/* Clear out the cache */
2729 	cpu_idcache_wbinv_all();
2730 
2731 	/* Set the control register */
2732 	curcpu()->ci_ctrl = cpuctrl;
2733 	cpu_control(0xffffffff, cpuctrl);
2734 }
2735 #endif	/* CPU_ARM7 */
2736 
2737 #ifdef CPU_ARM7TDMI
2738 struct cpu_option arm7tdmi_options[] = {
2739 	{ "arm7.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
2740 	{ "arm7.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
2741 	{ "arm7.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2742 	{ "arm7.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2743 #ifdef COMPAT_12
2744 	{ "fpaclk2",		BIC, OR,  CPU_CONTROL_CPCLK },
2745 #endif	/* COMPAT_12 */
2746 	{ "arm700.fpaclk",	BIC, OR,  CPU_CONTROL_CPCLK },
2747 	{ NULL,			IGN, IGN, 0 }
2748 };
2749 
2750 void
2751 arm7tdmi_setup(char *args)
2752 {
2753 	int cpuctrl;
2754 
2755 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2756 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2757 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
2758 
2759 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
2760 	cpuctrl = parse_cpu_options(args, arm7tdmi_options, cpuctrl);
2761 
2762 #ifdef __ARMEB__
2763 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2764 #endif
2765 
2766 	/* Clear out the cache */
2767 	cpu_idcache_wbinv_all();
2768 
2769 	/* Set the control register */
2770 	curcpu()->ci_ctrl = cpuctrl;
2771 	cpu_control(0xffffffff, cpuctrl);
2772 }
2773 #endif	/* CPU_ARM7TDMI */
2774 
2775 #ifdef CPU_ARM8
2776 struct cpu_option arm8_options[] = {
2777 	{ "arm8.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
2778 	{ "arm8.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
2779 	{ "arm8.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2780 	{ "arm8.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2781 #ifdef COMPAT_12
2782 	{ "branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2783 #endif	/* COMPAT_12 */
2784 	{ "cpu.branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2785 	{ "arm8.branchpredict",	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2786 	{ NULL,			IGN, IGN, 0 }
2787 };
2788 
2789 void
2790 arm8_setup(char *args)
2791 {
2792 	int integer;
2793 	int clocktest;
2794 	int setclock = 0;
2795 
2796 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2797 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2798 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
2799 #if 0
2800 	int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2801 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2802 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
2803 		 | CPU_CONTROL_BPRD_ENABLE | CPU_CONTROL_ROM_ENABLE
2804 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE;
2805 #endif
2806 
2807 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2808 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2809 #endif
2810 
2811 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
2812 	cpuctrl = parse_cpu_options(args, arm8_options, cpuctrl);
2813 
2814 #ifdef __ARMEB__
2815 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2816 #endif
2817 
2818 	/* Get clock configuration */
2819 	clocktest = arm8_clock_config(0, 0) & 0x0f;
2820 
2821 	/* Special ARM8 clock and test configuration */
2822 	if (get_bootconf_option(args, "arm8.clock.reset", BOOTOPT_TYPE_BOOLEAN, &integer)) {
2823 		clocktest = 0;
2824 		setclock = 1;
2825 	}
2826 	if (get_bootconf_option(args, "arm8.clock.dynamic", BOOTOPT_TYPE_BOOLEAN, &integer)) {
2827 		if (integer)
2828 			clocktest |= 0x01;
2829 		else
2830 			clocktest &= ~(0x01);
2831 		setclock = 1;
2832 	}
2833 	if (get_bootconf_option(args, "arm8.clock.sync", BOOTOPT_TYPE_BOOLEAN, &integer)) {
2834 		if (integer)
2835 			clocktest |= 0x02;
2836 		else
2837 			clocktest &= ~(0x02);
2838 		setclock = 1;
2839 	}
2840 	if (get_bootconf_option(args, "arm8.clock.fast", BOOTOPT_TYPE_BININT, &integer)) {
2841 		clocktest = (clocktest & ~0xc0) | (integer & 3) << 2;
2842 		setclock = 1;
2843 	}
2844 	if (get_bootconf_option(args, "arm8.test", BOOTOPT_TYPE_BININT, &integer)) {
2845 		clocktest |= (integer & 7) << 5;
2846 		setclock = 1;
2847 	}
2848 
2849 	/* Clear out the cache */
2850 	cpu_idcache_wbinv_all();
2851 
2852 	/* Set the control register */
2853 	curcpu()->ci_ctrl = cpuctrl;
2854 	cpu_control(0xffffffff, cpuctrl);
2855 
2856 	/* Set the clock/test register */
2857 	if (setclock)
2858 		arm8_clock_config(0x7f, clocktest);
2859 }
2860 #endif	/* CPU_ARM8 */
2861 
2862 #ifdef CPU_ARM9
2863 struct cpu_option arm9_options[] = {
2864 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2865 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2866 	{ "arm9.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2867 	{ "arm9.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2868 	{ "arm9.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2869 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2870 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2871 	{ "arm9.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2872 	{ NULL,			IGN, IGN, 0 }
2873 };
2874 
2875 void
2876 arm9_setup(char *args)
2877 {
2878 
2879 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2880 	    | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2881 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2882 	    | CPU_CONTROL_WBUF_ENABLE;
2883 	int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2884 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2885 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2886 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2887 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2888 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC
2889 		 | CPU_CONTROL_ROUNDROBIN;
2890 
2891 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2892 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2893 #endif
2894 
2895 	cpuctrl = parse_cpu_options(args, arm9_options, cpuctrl);
2896 
2897 #ifdef __ARMEB__
2898 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2899 #endif
2900 
2901 #ifndef ARM_HAS_VBAR
2902 	if (vector_page == ARM_VECTORS_HIGH)
2903 		cpuctrl |= CPU_CONTROL_VECRELOC;
2904 #endif
2905 
2906 	/* Clear out the cache */
2907 	cpu_idcache_wbinv_all();
2908 
2909 	/* Set the control register */
2910 	curcpu()->ci_ctrl = cpuctrl;
2911 	cpu_control(cpuctrlmask, cpuctrl);
2912 
2913 }
2914 #endif	/* CPU_ARM9 */
2915 
2916 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
2917 struct cpu_option arm10_options[] = {
2918 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2919 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2920 	{ "arm10.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2921 	{ "arm10.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2922 	{ "arm10.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2923 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2924 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2925 	{ "arm10.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2926 	{ NULL,			IGN, IGN, 0 }
2927 };
2928 
2929 void
2930 arm10_setup(char *args)
2931 {
2932 
2933 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
2934 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2935 	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
2936 #if 0
2937 	int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
2938 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2939 	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2940 	    | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2941 	    | CPU_CONTROL_BPRD_ENABLE
2942 	    | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
2943 #endif
2944 
2945 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2946 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2947 #endif
2948 
2949 	cpuctrl = parse_cpu_options(args, arm10_options, cpuctrl);
2950 
2951 #ifdef __ARMEB__
2952 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2953 #endif
2954 
2955 #ifndef ARM_HAS_VBAR
2956 	if (vector_page == ARM_VECTORS_HIGH)
2957 		cpuctrl |= CPU_CONTROL_VECRELOC;
2958 #endif
2959 
2960 	/* Clear out the cache */
2961 	cpu_idcache_wbinv_all();
2962 
2963 	/* Now really make sure they are clean.  */
2964 	__asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
2965 
2966 	/* Set the control register */
2967 	curcpu()->ci_ctrl = cpuctrl;
2968 	cpu_control(0xffffffff, cpuctrl);
2969 
2970 	/* And again. */
2971 	cpu_idcache_wbinv_all();
2972 }
2973 #endif	/* CPU_ARM9E || CPU_ARM10 */
2974 
2975 #if defined(CPU_ARM11)
2976 struct cpu_option arm11_options[] = {
2977 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2978 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2979 	{ "arm11.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2980 	{ "arm11.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2981 	{ "arm11.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2982 	{ "cpu.branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2983 	{ "arm11.branchpredict", BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2984 	{ NULL,			IGN, IGN, 0 }
2985 };
2986 
2987 void
2988 arm11_setup(char *args)
2989 {
2990 
2991 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
2992 #ifdef ARM_MMU_EXTENDED
2993 	    | CPU_CONTROL_XP_ENABLE
2994 #endif
2995 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2996 	    /* | CPU_CONTROL_BPRD_ENABLE */;
2997 	int cpuctrlmask = cpuctrl
2998 	    | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BPRD_ENABLE
2999 	    | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
3000 	    | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
3001 
3002 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
3003 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3004 #endif
3005 
3006 	cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl);
3007 
3008 #ifdef __ARMEB__
3009 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
3010 #endif
3011 
3012 #ifndef ARM_HAS_VBAR
3013 	if (vector_page == ARM_VECTORS_HIGH)
3014 		cpuctrl |= CPU_CONTROL_VECRELOC;
3015 #endif
3016 
3017 	/* Clear out the cache */
3018 	cpu_idcache_wbinv_all();
3019 
3020 	/* Now really make sure they are clean.  */
3021 	__asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
3022 
3023 	/* Allow detection code to find the VFP if it's fitted.  */
3024 	armreg_cpacr_write(0x0fffffff);
3025 
3026 	/* Set the control register */
3027 	curcpu()->ci_ctrl = cpuctrl;
3028 	cpu_control(cpuctrlmask, cpuctrl);
3029 
3030 	/* And again. */
3031 	cpu_idcache_wbinv_all();
3032 }
3033 #endif	/* CPU_ARM11 */
3034 
3035 #if defined(CPU_ARM11MPCORE)
3036 
3037 void
3038 arm11mpcore_setup(char *args)
3039 {
3040 
3041 	int cpuctrl = CPU_CONTROL_IC_ENABLE
3042 	    | CPU_CONTROL_DC_ENABLE
3043 #ifdef ARM_MMU_EXTENDED
3044 	    | CPU_CONTROL_XP_ENABLE
3045 #endif
3046 	    | CPU_CONTROL_BPRD_ENABLE ;
3047 	int cpuctrlmask = cpuctrl
3048 	    | CPU_CONTROL_AFLT_ENABLE
3049 	    | CPU_CONTROL_VECRELOC;
3050 
3051 #ifdef	ARM11MPCORE_MMU_COMPAT
3052 	/* XXX: S and R? */
3053 #endif
3054 
3055 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
3056 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3057 #endif
3058 
3059 	cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl);
3060 
3061 #ifndef ARM_HAS_VBAR
3062 	if (vector_page == ARM_VECTORS_HIGH)
3063 		cpuctrl |= CPU_CONTROL_VECRELOC;
3064 #endif
3065 
3066 	/* Clear out the cache */
3067 	cpu_idcache_wbinv_all();
3068 
3069 	/* Now really make sure they are clean.  */
3070 	__asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
3071 
3072 	/* Allow detection code to find the VFP if it's fitted.  */
3073 	armreg_cpacr_write(0x0fffffff);
3074 
3075 	/* Set the control register */
3076 	curcpu()->ci_ctrl = cpu_control(cpuctrlmask, cpuctrl);
3077 
3078 	/* And again. */
3079 	cpu_idcache_wbinv_all();
3080 }
3081 #endif	/* CPU_ARM11MPCORE */
3082 
3083 #ifdef CPU_PJ4B
3084 void
3085 pj4bv7_setup(char *args)
3086 {
3087 	int cpuctrl;
3088 
3089 	pj4b_config();
3090 
3091 	cpuctrl = CPU_CONTROL_MMU_ENABLE;
3092 #ifdef ARM32_DISABLE_ALIGNMENT_FAULTS
3093 	cpuctrl |= CPU_CONTROL_UNAL_ENABLE;
3094 #else
3095 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3096 #endif
3097 	cpuctrl |= CPU_CONTROL_DC_ENABLE;
3098 	cpuctrl |= CPU_CONTROL_IC_ENABLE;
3099 	cpuctrl |= (0xf << 3);
3100 	cpuctrl |= CPU_CONTROL_BPRD_ENABLE;
3101 	cpuctrl |= (0x5 << 16);
3102 	cpuctrl |= CPU_CONTROL_XP_ENABLE;
3103 
3104 #ifndef ARM_HAS_VBAR
3105 	if (vector_page == ARM_VECTORS_HIGH)
3106 		cpuctrl |= CPU_CONTROL_VECRELOC;
3107 #endif
3108 
3109 #ifdef L2CACHE_ENABLE
3110 	/* Setup L2 cache */
3111 	arm_scache.cache_type = CPU_CT_CTYPE_WT;
3112 	arm_scache.cache_unified = 1;
3113 	arm_scache.dcache_type = arm_scache.icache_type = CACHE_TYPE_PIPT;
3114 	arm_scache.dcache_size = arm_scache.icache_size = ARMADAXP_L2_SIZE;
3115 	arm_scache.dcache_ways = arm_scache.icache_ways = ARMADAXP_L2_WAYS;
3116 	arm_scache.dcache_way_size = arm_scache.icache_way_size =
3117 	    ARMADAXP_L2_WAY_SIZE;
3118 	arm_scache.dcache_line_size = arm_scache.icache_line_size =
3119 	    ARMADAXP_L2_LINE_SIZE;
3120 	arm_scache.dcache_sets = arm_scache.icache_sets =
3121 	    ARMADAXP_L2_SETS;
3122 
3123 	cpufuncs.cf_sdcache_wbinv_range	= armadaxp_sdcache_wbinv_range;
3124 	cpufuncs.cf_sdcache_inv_range	= armadaxp_sdcache_inv_range;
3125 	cpufuncs.cf_sdcache_wb_range	= armadaxp_sdcache_wb_range;
3126 #endif
3127 
3128 #ifdef AURORA_IO_CACHE_COHERENCY
3129 	/* use AMBA and I/O Coherency Fabric to maintain cache */
3130 	cpufuncs.cf_dcache_wbinv_range	= pj4b_dcache_cfu_wbinv_range;
3131 	cpufuncs.cf_dcache_inv_range	= pj4b_dcache_cfu_inv_range;
3132 	cpufuncs.cf_dcache_wb_range	= pj4b_dcache_cfu_wb_range;
3133 
3134 	cpufuncs.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop;
3135 	cpufuncs.cf_sdcache_inv_range	= (void *)cpufunc_nullop;
3136 	cpufuncs.cf_sdcache_wb_range	= (void *)cpufunc_nullop;
3137 #endif
3138 
3139 	/* Clear out the cache */
3140 	cpu_idcache_wbinv_all();
3141 
3142 	/* Set the control register */
3143 	cpu_control(0xffffffff, cpuctrl);
3144 
3145 	/* And again. */
3146 	cpu_idcache_wbinv_all();
3147 #ifdef L2CACHE_ENABLE
3148 	armadaxp_sdcache_wbinv_all();
3149 #endif
3150 
3151 	curcpu()->ci_ctrl = cpuctrl;
3152 }
3153 #endif /* CPU_PJ4B */
3154 
3155 #if defined(CPU_ARMV7)
3156 struct cpu_option armv7_options[] = {
3157     { "cpu.cache",      BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3158     { "cpu.nocache",    OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3159     { "armv7.cache",    BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3160     { "armv7.icache",   BIC, OR,  CPU_CONTROL_IC_ENABLE },
3161     { "armv7.dcache",   BIC, OR,  CPU_CONTROL_DC_ENABLE },
3162 	{ NULL, 			IGN, IGN, 0}
3163 };
3164 
3165 void
3166 armv7_setup(char *args)
3167 {
3168 
3169 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_IC_ENABLE
3170 	    | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_BPRD_ENABLE
3171 #ifdef __ARMEB__
3172 	    | CPU_CONTROL_EX_BEND
3173 #endif
3174 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
3175 	    | CPU_CONTROL_AFLT_ENABLE;
3176 #endif
3177 	    | CPU_CONTROL_UNAL_ENABLE;
3178 
3179 	int cpuctrlmask = cpuctrl | CPU_CONTROL_AFLT_ENABLE;
3180 
3181 
3182 	cpuctrl = parse_cpu_options(args, armv7_options, cpuctrl);
3183 
3184 #ifndef ARM_HAS_VBAR
3185 	if (vector_page == ARM_VECTORS_HIGH)
3186 		cpuctrl |= CPU_CONTROL_VECRELOC;
3187 #endif
3188 
3189 	/* Clear out the cache */
3190 	cpu_idcache_wbinv_all();
3191 
3192 	/* Set the control register */
3193 	curcpu()->ci_ctrl = cpuctrl;
3194 	cpu_control(cpuctrlmask, cpuctrl);
3195 }
3196 #endif /* CPU_ARMV7 */
3197 
3198 
3199 #if defined(CPU_ARM1136) || defined(CPU_ARM1176)
3200 void
3201 arm11x6_setup(char *args)
3202 {
3203 	int cpuctrl, cpuctrl_wax;
3204 	uint32_t auxctrl;
3205 	uint32_t sbz=0;
3206 	uint32_t cpuid;
3207 
3208 	cpuid = cpu_idnum();
3209 
3210 	cpuctrl =
3211 		CPU_CONTROL_MMU_ENABLE  |
3212 		CPU_CONTROL_DC_ENABLE   |
3213 		CPU_CONTROL_WBUF_ENABLE |
3214 		CPU_CONTROL_32BP_ENABLE |
3215 		CPU_CONTROL_32BD_ENABLE |
3216 		CPU_CONTROL_LABT_ENABLE |
3217 		CPU_CONTROL_UNAL_ENABLE |
3218 #ifdef ARM_MMU_EXTENDED
3219 		CPU_CONTROL_XP_ENABLE   |
3220 #else
3221 		CPU_CONTROL_SYST_ENABLE |
3222 #endif
3223 		CPU_CONTROL_IC_ENABLE;
3224 
3225 	/*
3226 	 * "write as existing" bits
3227 	 * inverse of this is mask
3228 	 */
3229 	cpuctrl_wax =
3230 		(3 << 30) |
3231 		(1 << 29) |
3232 		(1 << 28) |
3233 		(3 << 26) |
3234 		(3 << 19) |
3235 		(1 << 17);
3236 
3237 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
3238 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3239 #endif
3240 
3241 	cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl);
3242 
3243 #ifdef __ARMEB__
3244 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
3245 #endif
3246 
3247 #ifndef ARM_HAS_VBAR
3248 	if (vector_page == ARM_VECTORS_HIGH)
3249 		cpuctrl |= CPU_CONTROL_VECRELOC;
3250 #endif
3251 
3252 	auxctrl = armreg_auxctl_read();
3253 	/*
3254 	 * This options enables the workaround for the 364296 ARM1136
3255 	 * r0pX errata (possible cache data corruption with
3256 	 * hit-under-miss enabled). It sets the undocumented bit 31 in
3257 	 * the auxiliary control register and the FI bit in the control
3258 	 * register, thus disabling hit-under-miss without putting the
3259 	 * processor into full low interrupt latency mode. ARM11MPCore
3260 	 * is not affected.
3261 	 */
3262 	if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1136JS) { /* ARM1136JSr0pX */
3263 		cpuctrl |= CPU_CONTROL_FI_ENABLE;
3264 		auxctrl |= ARM1136_AUXCTL_PFI;
3265 	}
3266 
3267 	/*
3268 	 * This enables the workaround for the following ARM1176 r0pX
3269 	 * errata.
3270 	 *
3271 	 * 394601: In low interrupt latency configuration, interrupted clean
3272 	 * and invalidate operation may not clean dirty data.
3273 	 *
3274 	 * 716151: Clean Data Cache line by MVA can corrupt subsequent
3275 	 * stores to the same cache line.
3276 	 *
3277 	 * 714068: Prefetch Instruction Cache Line or Invalidate Instruction
3278 	 * Cache Line by MVA can cause deadlock.
3279 	 */
3280 	if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1176JZS) { /* ARM1176JZSr0 */
3281 		/* 394601 and 716151 */
3282 		cpuctrl |= CPU_CONTROL_FI_ENABLE;
3283 		auxctrl |= ARM1176_AUXCTL_FIO;
3284 
3285 		/* 714068 */
3286 		auxctrl |= ARM1176_AUXCTL_PHD;
3287 	}
3288 
3289 	/* Clear out the cache */
3290 	cpu_idcache_wbinv_all();
3291 
3292 	/* Now really make sure they are clean.  */
3293 	__asm volatile ("mcr\tp15, 0, %0, c7, c7, 0" : : "r"(sbz));
3294 
3295 	/* Allow detection code to find the VFP if it's fitted.  */
3296 	armreg_cpacr_write(0x0fffffff);
3297 
3298 	/* Set the control register */
3299 	curcpu()->ci_ctrl = cpuctrl;
3300 	cpu_control(~cpuctrl_wax, cpuctrl);
3301 
3302 	/* Update auxctlr */
3303 	armreg_auxctl_write(auxctrl);
3304 
3305 	/* And again. */
3306 	cpu_idcache_wbinv_all();
3307 }
3308 #endif	/* CPU_ARM1136 || CPU_ARM1176 */
3309 
3310 #ifdef CPU_SA110
3311 struct cpu_option sa110_options[] = {
3312 #ifdef COMPAT_12
3313 	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3314 	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
3315 #endif	/* COMPAT_12 */
3316 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3317 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3318 	{ "sa110.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3319 	{ "sa110.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
3320 	{ "sa110.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
3321 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
3322 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
3323 	{ "sa110.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
3324 	{ NULL,			IGN, IGN, 0 }
3325 };
3326 
3327 void
3328 sa110_setup(char *args)
3329 {
3330 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
3331 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
3332 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3333 		 | CPU_CONTROL_WBUF_ENABLE;
3334 #if 0
3335 	int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
3336 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
3337 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3338 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
3339 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
3340 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
3341 		 | CPU_CONTROL_CPCLK;
3342 #endif
3343 
3344 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
3345 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3346 #endif
3347 
3348 	cpuctrl = parse_cpu_options(args, sa110_options, cpuctrl);
3349 
3350 #ifdef __ARMEB__
3351 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
3352 #endif
3353 
3354 #ifndef ARM_HAS_VBAR
3355 	if (vector_page == ARM_VECTORS_HIGH)
3356 		cpuctrl |= CPU_CONTROL_VECRELOC;
3357 #endif
3358 
3359 	/* Clear out the cache */
3360 	cpu_idcache_wbinv_all();
3361 
3362 	/* Set the control register */
3363 	curcpu()->ci_ctrl = cpuctrl;
3364 #if 0
3365 	cpu_control(cpuctrlmask, cpuctrl);
3366 #endif
3367 	cpu_control(0xffffffff, cpuctrl);
3368 
3369 	/*
3370 	 * enable clockswitching, note that this doesn't read or write to r0,
3371 	 * r0 is just to make it valid asm
3372 	 */
3373 	__asm volatile ("mcr p15, 0, r0, c15, c1, 2");
3374 }
3375 #endif	/* CPU_SA110 */
3376 
3377 #if defined(CPU_SA1100) || defined(CPU_SA1110)
3378 struct cpu_option sa11x0_options[] = {
3379 #ifdef COMPAT_12
3380 	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3381 	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
3382 #endif	/* COMPAT_12 */
3383 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3384 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3385 	{ "sa11x0.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3386 	{ "sa11x0.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
3387 	{ "sa11x0.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
3388 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
3389 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
3390 	{ "sa11x0.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
3391 	{ NULL,			IGN, IGN, 0 }
3392 };
3393 
3394 void
3395 sa11x0_setup(char *args)
3396 {
3397 
3398 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
3399 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
3400 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3401 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE;
3402 #if 0
3403 	int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
3404 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
3405 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3406 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
3407 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
3408 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
3409 		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
3410 #endif
3411 
3412 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
3413 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3414 #endif
3415 
3416 	cpuctrl = parse_cpu_options(args, sa11x0_options, cpuctrl);
3417 
3418 #ifdef __ARMEB__
3419 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
3420 #endif
3421 
3422 #ifndef ARM_HAS_VBAR
3423 	if (vector_page == ARM_VECTORS_HIGH)
3424 		cpuctrl |= CPU_CONTROL_VECRELOC;
3425 #endif
3426 
3427 	/* Clear out the cache */
3428 	cpu_idcache_wbinv_all();
3429 
3430 	/* Set the control register */
3431 	curcpu()->ci_ctrl = cpuctrl;
3432 	cpu_control(0xffffffff, cpuctrl);
3433 }
3434 #endif	/* CPU_SA1100 || CPU_SA1110 */
3435 
3436 #if defined(CPU_FA526)
3437 struct cpu_option fa526_options[] = {
3438 #ifdef COMPAT_12
3439 	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3440 	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
3441 #endif	/* COMPAT_12 */
3442 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3443 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3444 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
3445 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
3446 	{ NULL,			IGN, IGN, 0 }
3447 };
3448 
3449 void
3450 fa526_setup(char *args)
3451 {
3452 
3453 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
3454 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
3455 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3456 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE;
3457 #if 0
3458 	int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
3459 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
3460 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3461 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
3462 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
3463 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
3464 		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
3465 #endif
3466 
3467 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
3468 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3469 #endif
3470 
3471 	cpuctrl = parse_cpu_options(args, fa526_options, cpuctrl);
3472 
3473 #ifdef __ARMEB__
3474 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
3475 #endif
3476 
3477 #ifndef ARM_HAS_VBAR
3478 	if (vector_page == ARM_VECTORS_HIGH)
3479 		cpuctrl |= CPU_CONTROL_VECRELOC;
3480 #endif
3481 
3482 	/* Clear out the cache */
3483 	cpu_idcache_wbinv_all();
3484 
3485 	/* Set the control register */
3486 	curcpu()->ci_ctrl = cpuctrl;
3487 	cpu_control(0xffffffff, cpuctrl);
3488 }
3489 #endif	/* CPU_FA526 */
3490 
3491 #if defined(CPU_IXP12X0)
3492 struct cpu_option ixp12x0_options[] = {
3493 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3494 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3495 	{ "ixp12x0.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3496 	{ "ixp12x0.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
3497 	{ "ixp12x0.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
3498 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
3499 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
3500 	{ "ixp12x0.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
3501 	{ NULL,			IGN, IGN, 0 }
3502 };
3503 
3504 void
3505 ixp12x0_setup(char *args)
3506 {
3507 
3508 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE
3509 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_SYST_ENABLE
3510 		 | CPU_CONTROL_IC_ENABLE;
3511 
3512 	int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_AFLT_ENABLE
3513 		 | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_WBUF_ENABLE
3514 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_SYST_ENABLE
3515 		 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_IC_ENABLE
3516 		 | CPU_CONTROL_VECRELOC;
3517 
3518 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
3519 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3520 #endif
3521 
3522 	cpuctrl = parse_cpu_options(args, ixp12x0_options, cpuctrl);
3523 
3524 #ifdef __ARMEB__
3525 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
3526 #endif
3527 
3528 #ifndef ARM_HAS_VBAR
3529 	if (vector_page == ARM_VECTORS_HIGH)
3530 		cpuctrl |= CPU_CONTROL_VECRELOC;
3531 #endif
3532 
3533 	/* Clear out the cache */
3534 	cpu_idcache_wbinv_all();
3535 
3536 	/* Set the control register */
3537 	curcpu()->ci_ctrl = cpuctrl;
3538 	/* cpu_control(0xffffffff, cpuctrl); */
3539 	cpu_control(cpuctrlmask, cpuctrl);
3540 }
3541 #endif /* CPU_IXP12X0 */
3542 
3543 #if defined(CPU_XSCALE)
3544 struct cpu_option xscale_options[] = {
3545 #ifdef COMPAT_12
3546 	{ "branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
3547 	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3548 #endif	/* COMPAT_12 */
3549 	{ "cpu.branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
3550 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3551 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3552 	{ "xscale.branchpredict", BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
3553 	{ "xscale.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3554 	{ "xscale.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
3555 	{ "xscale.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
3556 	{ NULL,			IGN, IGN, 0 }
3557 };
3558 
3559 void
3560 xscale_setup(char *args)
3561 {
3562 	uint32_t auxctl;
3563 
3564 	/*
3565 	 * The XScale Write Buffer is always enabled.  Our option
3566 	 * is to enable/disable coalescing.  Note that bits 6:3
3567 	 * must always be enabled.
3568 	 */
3569 
3570 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
3571 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
3572 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3573 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
3574 		 | CPU_CONTROL_BPRD_ENABLE;
3575 #if 0
3576 	int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
3577 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
3578 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3579 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
3580 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
3581 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
3582 		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
3583 #endif
3584 
3585 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
3586 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3587 #endif
3588 
3589 	cpuctrl = parse_cpu_options(args, xscale_options, cpuctrl);
3590 
3591 #ifdef __ARMEB__
3592 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
3593 #endif
3594 
3595 #ifndef ARM_HAS_VBAR
3596 	if (vector_page == ARM_VECTORS_HIGH)
3597 		cpuctrl |= CPU_CONTROL_VECRELOC;
3598 #endif
3599 
3600 	/* Clear out the cache */
3601 	cpu_idcache_wbinv_all();
3602 
3603 	/*
3604 	 * Set the control register.  Note that bits 6:3 must always
3605 	 * be set to 1.
3606 	 */
3607 	curcpu()->ci_ctrl = cpuctrl;
3608 #if 0
3609 	cpu_control(cpuctrlmask, cpuctrl);
3610 #endif
3611 	cpu_control(0xffffffff, cpuctrl);
3612 
3613 	/* Make sure write coalescing is turned on */
3614 	auxctl = armreg_auxctl_read();
3615 #ifdef XSCALE_NO_COALESCE_WRITES
3616 	auxctl |= XSCALE_AUXCTL_K;
3617 #else
3618 	auxctl &= ~XSCALE_AUXCTL_K;
3619 #endif
3620 	armreg_auxctl_write(auxctl);
3621 }
3622 #endif	/* CPU_XSCALE */
3623 
3624 #if defined(CPU_SHEEVA)
3625 struct cpu_option sheeva_options[] = {
3626 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3627 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3628 	{ "sheeva.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3629 	{ "sheeva.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
3630 	{ "sheeva.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
3631 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
3632 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
3633 	{ "sheeva.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
3634 	{ NULL,			IGN, IGN, 0 }
3635 };
3636 
3637 void
3638 sheeva_setup(char *args)
3639 {
3640 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
3641 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3642 	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
3643 #if 0
3644 	int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
3645 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3646 	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
3647 	    | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
3648 	    | CPU_CONTROL_BPRD_ENABLE
3649 	    | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
3650 #endif
3651 
3652 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
3653 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3654 #endif
3655 
3656 	cpuctrl = parse_cpu_options(args, sheeva_options, cpuctrl);
3657 
3658 	/* Enable DCache Streaming Switch and Write Allocate */
3659 	uint32_t sheeva_ext = armreg_sheeva_xctrl_read();
3660 
3661 	sheeva_ext |= FC_DCACHE_STREAM_EN | FC_WR_ALLOC_EN;
3662 #ifdef SHEEVA_L2_CACHE
3663 	sheeva_ext |= FC_L2CACHE_EN;
3664 	sheeva_ext &= ~FC_L2_PREF_DIS;
3665 #endif
3666 
3667 	armreg_sheeva_xctrl_write(sheeva_ext);
3668 
3669 #ifdef SHEEVA_L2_CACHE
3670 #ifndef SHEEVA_L2_CACHE_WT
3671 	arm_scache.cache_type = CPU_CT_CTYPE_WB2;
3672 #elif CPU_CT_CTYPE_WT != 0
3673 	arm_scache.cache_type = CPU_CT_CTYPE_WT;
3674 #endif
3675 	arm_scache.cache_unified = 1;
3676 	arm_scache.dcache_type = arm_scache.icache_type = CACHE_TYPE_PIPT;
3677 	arm_scache.dcache_size = arm_scache.icache_size = 256*1024;
3678 	arm_scache.dcache_ways = arm_scache.icache_ways = 4;
3679 	arm_scache.dcache_way_size = arm_scache.icache_way_size =
3680 	    arm_scache.dcache_size / arm_scache.dcache_ways;
3681 	arm_scache.dcache_line_size = arm_scache.icache_line_size = 32;
3682 	arm_scache.dcache_sets = arm_scache.icache_sets =
3683 	    arm_scache.dcache_way_size / arm_scache.dcache_line_size;
3684 
3685 	cpufuncs.cf_sdcache_wb_range = sheeva_sdcache_wb_range;
3686 	cpufuncs.cf_sdcache_inv_range = sheeva_sdcache_inv_range;
3687 	cpufuncs.cf_sdcache_wbinv_range = sheeva_sdcache_wbinv_range;
3688 #endif /* SHEEVA_L2_CACHE */
3689 
3690 #ifdef __ARMEB__
3691 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
3692 #endif
3693 
3694 #ifndef ARM_HAS_VBAR
3695 	if (vector_page == ARM_VECTORS_HIGH)
3696 		cpuctrl |= CPU_CONTROL_VECRELOC;
3697 #endif
3698 
3699 	/* Clear out the cache */
3700 	cpu_idcache_wbinv_all();
3701 
3702 	/* Now really make sure they are clean.  */
3703 	__asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
3704 
3705 	/* Set the control register */
3706 	curcpu()->ci_ctrl = cpuctrl;
3707 	cpu_control(0xffffffff, cpuctrl);
3708 
3709 	/* And again. */
3710 	cpu_idcache_wbinv_all();
3711 #ifdef SHEEVA_L2_CACHE
3712 	sheeva_sdcache_wbinv_all();
3713 #endif
3714 }
3715 #endif	/* CPU_SHEEVA */
3716 
3717 
3718 bool
3719 cpu_gtmr_exists_p(void)
3720 {
3721 	return armreg_pfr1_read() & ARM_PFR1_GTIMER_MASK;
3722 }
3723 
3724 u_int
3725 cpu_clusterid(void)
3726 {
3727 	return __SHIFTOUT(armreg_mpidr_read(), MPIDR_AFF1);
3728 }
3729 
3730 bool
3731 cpu_earlydevice_va_p(void)
3732 {
3733 	return armreg_sctlr_read() & CPU_CONTROL_MMU_ENABLE;
3734 }
3735