xref: /netbsd-src/sys/arch/arm/arm/cpufunc.c (revision bdc22b2e01993381dcefeff2bc9b56ca75a4235c)
1 /*	$NetBSD: cpufunc.c,v 1.170 2018/07/12 12:48:50 jakllsch Exp $	*/
2 
3 /*
4  * arm7tdmi support code Copyright (c) 2001 John Fremlin
5  * arm8 support code Copyright (c) 1997 ARM Limited
6  * arm8 support code Copyright (c) 1997 Causality Limited
7  * arm9 support code Copyright (C) 2001 ARM Ltd
8  * arm11 support code Copyright (c) 2007 Microsoft
9  * cortexa8 support code Copyright (c) 2008 3am Software Foundry
10  * cortexa8 improvements Copyright (c) Goeran Weinholt
11  * Copyright (c) 1997 Mark Brinicombe.
12  * Copyright (c) 1997 Causality Limited
13  * All rights reserved.
14  *
15  * Redistribution and use in source and binary forms, with or without
16  * modification, are permitted provided that the following conditions
17  * are met:
18  * 1. Redistributions of source code must retain the above copyright
19  *    notice, this list of conditions and the following disclaimer.
20  * 2. Redistributions in binary form must reproduce the above copyright
21  *    notice, this list of conditions and the following disclaimer in the
22  *    documentation and/or other materials provided with the distribution.
23  * 3. All advertising materials mentioning features or use of this software
24  *    must display the following acknowledgement:
25  *	This product includes software developed by Causality Limited.
26  * 4. The name of Causality Limited may not be used to endorse or promote
27  *    products derived from this software without specific prior written
28  *    permission.
29  *
30  * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
31  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
32  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
33  * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
34  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
35  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
36  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
37  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
38  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
39  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40  * SUCH DAMAGE.
41  *
42  * RiscBSD kernel project
43  *
44  * cpufuncs.c
45  *
46  * C functions for supporting CPU / MMU / TLB specific operations.
47  *
48  * Created	: 30/01/97
49  */
50 
51 #include <sys/cdefs.h>
52 __KERNEL_RCSID(0, "$NetBSD: cpufunc.c,v 1.170 2018/07/12 12:48:50 jakllsch Exp $");
53 
54 #include "opt_compat_netbsd.h"
55 #include "opt_cpuoptions.h"
56 
57 #include <sys/types.h>
58 #include <sys/param.h>
59 #include <sys/systm.h>
60 #include <machine/cpu.h>
61 #include <machine/bootconfig.h>
62 #include <arch/arm/arm/disassem.h>
63 
64 #include <uvm/uvm.h>
65 
66 #include <arm/cpufunc_proto.h>
67 #include <arm/cpuconf.h>
68 #include <arm/locore.h>
69 
70 #ifdef CPU_XSCALE_80200
71 #include <arm/xscale/i80200reg.h>
72 #include <arm/xscale/i80200var.h>
73 #endif
74 
75 #ifdef CPU_XSCALE_80321
76 #include <arm/xscale/i80321reg.h>
77 #include <arm/xscale/i80321var.h>
78 #endif
79 
80 #ifdef CPU_XSCALE_IXP425
81 #include <arm/xscale/ixp425reg.h>
82 #include <arm/xscale/ixp425var.h>
83 #endif
84 
85 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321)
86 #include <arm/xscale/xscalereg.h>
87 #endif
88 
89 #if defined(CPU_PJ4B)
90 #include "opt_cputypes.h"
91 #include "opt_mvsoc.h"
92 #include <machine/bus_defs.h>
93 #if defined(ARMADAXP)
94 #include <arm/marvell/armadaxpreg.h>
95 #include <arm/marvell/armadaxpvar.h>
96 #endif
97 #endif
98 
99 #if defined(CPU_ARMV7) && (defined(CPU_ARMV6) || defined(CPU_PRE_ARMV6))
100 bool cpu_armv7_p;
101 #endif
102 
103 #if defined(CPU_ARMV6) && (defined(CPU_ARMV7) || defined(CPU_PRE_ARMV6))
104 bool cpu_armv6_p;
105 #endif
106 
107 
108 /* PRIMARY CACHE VARIABLES */
109 #if (ARM_MMU_V6 + ARM_MMU_V7) != 0
110 u_int	arm_cache_prefer_mask;
111 #endif
112 struct	arm_cache_info arm_pcache;
113 struct	arm_cache_info arm_scache;
114 
115 u_int	arm_dcache_align;
116 u_int	arm_dcache_align_mask;
117 
118 /* 1 == use cpu_sleep(), 0 == don't */
119 int cpu_do_powersave;
120 
121 #ifdef CPU_ARM2
122 struct cpu_functions arm2_cpufuncs = {
123 	/* CPU functions */
124 
125 	.cf_id			= arm2_id,
126 	.cf_cpwait		= cpufunc_nullop,
127 
128 	/* MMU functions */
129 
130 	.cf_control		= (void *)cpufunc_nullop,
131 
132 	/* TLB functions */
133 
134 	.cf_tlb_flushID		= cpufunc_nullop,
135 	.cf_tlb_flushID_SE	= (void *)cpufunc_nullop,
136 	.cf_tlb_flushI		= cpufunc_nullop,
137 	.cf_tlb_flushI_SE	= (void *)cpufunc_nullop,
138 	.cf_tlb_flushD		= cpufunc_nullop,
139 	.cf_tlb_flushD_SE	= (void *)cpufunc_nullop,
140 
141 	/* Cache operations */
142 
143 	.cf_icache_sync_all	= cpufunc_nullop,
144 	.cf_icache_sync_range	= (void *) cpufunc_nullop,
145 
146 	.cf_dcache_wbinv_all	= arm3_cache_flush,
147 	.cf_dcache_wbinv_range	= (void *)cpufunc_nullop,
148 	.cf_dcache_inv_range	= (void *)cpufunc_nullop,
149 	.cf_dcache_wb_range	= (void *)cpufunc_nullop,
150 
151 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
152 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
153 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
154 
155 	.cf_idcache_wbinv_all	= cpufunc_nullop,
156 	.cf_idcache_wbinv_range	= (void *)cpufunc_nullop,
157 
158 	/* Other functions */
159 
160 	.cf_flush_prefetchbuf	= cpufunc_nullop,
161 	.cf_drain_writebuf	= cpufunc_nullop,
162 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
163 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
164 
165 	.cf_sleep		= (void *)cpufunc_nullop,
166 
167 	/* Soft functions */
168 
169 	.cf_dataabt_fixup	= early_abort_fixup,
170 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
171 
172 	.cf_setup		= (void *)cpufunc_nullop
173 
174 };
175 #endif	/* CPU_ARM2 */
176 
177 #ifdef CPU_ARM250
178 struct cpu_functions arm250_cpufuncs = {
179 	/* CPU functions */
180 
181 	.cf_id			= arm250_id,
182 	.cf_cpwait		= cpufunc_nullop,
183 
184 	/* MMU functions */
185 
186 	.cf_control		= (void *)cpufunc_nullop,
187 
188 	/* TLB functions */
189 
190 	.cf_tlb_flushID		= cpufunc_nullop,
191 	.cf_tlb_flushID_SE	= (void *)cpufunc_nullop,
192 	.cf_tlb_flushI		= cpufunc_nullop,
193 	.cf_tlb_flushI_SE	= (void *)cpufunc_nullop,
194 	.cf_tlb_flushD		= cpufunc_nullop,
195 	.cf_tlb_flushD_SE	= (void *)cpufunc_nullop,
196 
197 	/* Cache operations */
198 
199 	.cf_icache_sync_all	= cpufunc_nullop,
200 	.cf_icache_sync_range	= (void *) cpufunc_nullop,
201 
202 	.cf_dcache_wbinv_all	= arm3_cache_flush,
203 	.cf_dcache_wbinv_range	= (void *)cpufunc_nullop,
204 	.cf_dcache_inv_range	= (void *)cpufunc_nullop,
205 	.cf_dcache_wb_range	= (void *)cpufunc_nullop,
206 
207 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
208 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
209 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
210 
211 	.cf_idcache_wbinv_all	= cpufunc_nullop,
212 	.cf_idcache_wbinv_range	= (void *)cpufunc_nullop,
213 
214 	/* Other functions */
215 
216 	.cf_flush_prefetchbuf	= cpufunc_nullop,
217 	.cf_drain_writebuf	= cpufunc_nullop,
218 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
219 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
220 
221 	.cf_sleep		= (void *)cpufunc_nullop,
222 
223 	/* Soft functions */
224 
225 	.cf_dataabt_fixup	= early_abort_fixup,
226 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
227 
228 	.cf_setup		= (void *)cpufunc_nullop
229 
230 };
231 #endif	/* CPU_ARM250 */
232 
233 #ifdef CPU_ARM3
234 struct cpu_functions arm3_cpufuncs = {
235 	/* CPU functions */
236 
237 	.cf_id			= cpufunc_id,
238 	.cf_cpwait		= cpufunc_nullop,
239 
240 	/* MMU functions */
241 
242 	.cf_control		= arm3_control,
243 
244 	/* TLB functions */
245 
246 	.cf_tlb_flushID		= cpufunc_nullop,
247 	.cf_tlb_flushID_SE	= (void *)cpufunc_nullop,
248 	.cf_tlb_flushI		= cpufunc_nullop,
249 	.cf_tlb_flushI_SE	= (void *)cpufunc_nullop,
250 	.cf_tlb_flushD		= cpufunc_nullop,
251 	.cf_tlb_flushD_SE	= (void *)cpufunc_nullop,
252 
253 	/* Cache operations */
254 
255 	.cf_icache_sync_all	= cpufunc_nullop,
256 	.cf_icache_sync_range	= (void *) cpufunc_nullop,
257 
258 	.cf_dcache_wbinv_all	= arm3_cache_flush,
259 	.cf_dcache_wbinv_range	= (void *)arm3_cache_flush,
260 	.cf_dcache_inv_range	= (void *)arm3_cache_flush,
261 	.cf_dcache_wb_range	= (void *)cpufunc_nullop,
262 
263 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
264 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
265 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
266 
267 	.cf_idcache_wbinv_all	= arm3_cache_flush,
268 	.cf_idcache_wbinv_range	= (void *)arm3_cache_flush,
269 
270 	/* Other functions */
271 
272 	.cf_flush_prefetchbuf	= cpufunc_nullop,
273 	.cf_drain_writebuf	= cpufunc_nullop,
274 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
275 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
276 
277 	.cf_sleep		= (void *)cpufunc_nullop,
278 
279 	/* Soft functions */
280 
281 	.cf_dataabt_fixup	= early_abort_fixup,
282 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
283 
284 	.cf_setup		= (void *)cpufunc_nullop
285 
286 };
287 #endif	/* CPU_ARM3 */
288 
289 #ifdef CPU_ARM6
290 struct cpu_functions arm6_cpufuncs = {
291 	/* CPU functions */
292 
293 	.cf_id			= cpufunc_id,
294 	.cf_cpwait		= cpufunc_nullop,
295 
296 	/* MMU functions */
297 
298 	.cf_control		= cpufunc_control,
299 	.cf_domains		= cpufunc_domains,
300 	.cf_setttb		= arm67_setttb,
301 	.cf_faultstatus		= cpufunc_faultstatus,
302 	.cf_faultaddress	= cpufunc_faultaddress,
303 
304 	/* TLB functions */
305 
306 	.cf_tlb_flushID		= arm67_tlb_flush,
307 	.cf_tlb_flushID_SE	= arm67_tlb_purge,
308 	.cf_tlb_flushI		= arm67_tlb_flush,
309 	.cf_tlb_flushI_SE	= arm67_tlb_purge,
310 	.cf_tlb_flushD		= arm67_tlb_flush,
311 	.cf_tlb_flushD_SE	= arm67_tlb_purge,
312 
313 	/* Cache operations */
314 
315 	.cf_icache_sync_all	= cpufunc_nullop,
316 	.cf_icache_sync_range	= (void *) cpufunc_nullop,
317 
318 	.cf_dcache_wbinv_all	= arm67_cache_flush,
319 	.cf_dcache_wbinv_range	= (void *)arm67_cache_flush,
320 	.cf_dcache_inv_range	= (void *)arm67_cache_flush,
321 	.cf_dcache_wb_range	= (void *)cpufunc_nullop,
322 
323 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
324 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
325 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
326 
327 	.cf_idcache_wbinv_all	= arm67_cache_flush,
328 	.cf_idcache_wbinv_range	= (void *)arm67_cache_flush,
329 
330 	/* Other functions */
331 
332 	.cf_flush_prefetchbuf	= cpufunc_nullop,
333 	.cf_drain_writebuf	= cpufunc_nullop,
334 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
335 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
336 
337 	.cf_sleep		= (void *)cpufunc_nullop,
338 
339 	/* Soft functions */
340 
341 #ifdef ARM6_LATE_ABORT
342 	.cf_dataabt_fixup	= late_abort_fixup,
343 #else
344 	.cf_dataabt_fixup	= early_abort_fixup,
345 #endif
346 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
347 
348 	.cf_context_switch	= arm67_context_switch,
349 
350 	.cf_setup		= arm6_setup
351 
352 };
353 #endif	/* CPU_ARM6 */
354 
355 #ifdef CPU_ARM7
356 struct cpu_functions arm7_cpufuncs = {
357 	/* CPU functions */
358 
359 	.cf_id			= cpufunc_id,
360 	.cf_cpwait		= cpufunc_nullop,
361 
362 	/* MMU functions */
363 
364 	.cf_control		= cpufunc_control,
365 	.cf_domains		= cpufunc_domains,
366 	.cf_setttb		= arm67_setttb,
367 	.cf_faultstatus		= cpufunc_faultstatus,
368 	.cf_faultaddress	= cpufunc_faultaddress,
369 
370 	/* TLB functions */
371 
372 	.cf_tlb_flushID		= arm67_tlb_flush,
373 	.cf_tlb_flushID_SE	= arm67_tlb_purge,
374 	.cf_tlb_flushI		= arm67_tlb_flush,
375 	.cf_tlb_flushI_SE	= arm67_tlb_purge,
376 	.cf_tlb_flushD		= arm67_tlb_flush,
377 	.cf_tlb_flushD_SE	= arm67_tlb_purge,
378 
379 	/* Cache operations */
380 
381 	.cf_icache_sync_all	= cpufunc_nullop,
382 	.cf_icache_sync_range	= (void *)cpufunc_nullop,
383 
384 	.cf_dcache_wbinv_all	= arm67_cache_flush,
385 	.cf_dcache_wbinv_range	= (void *)arm67_cache_flush,
386 	.cf_dcache_inv_range	= (void *)arm67_cache_flush,
387 	.cf_dcache_wb_range	= (void *)cpufunc_nullop,
388 
389 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
390 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
391 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
392 
393 	.cf_idcache_wbinv_all	= arm67_cache_flush,
394 	.cf_idcache_wbinv_range	= (void *)arm67_cache_flush,
395 
396 	/* Other functions */
397 
398 	.cf_flush_prefetchbuf	= cpufunc_nullop,
399 	.cf_drain_writebuf	= cpufunc_nullop,
400 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
401 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
402 
403 	.cf_sleep		= (void *)cpufunc_nullop,
404 
405 	/* Soft functions */
406 
407 	.cf_dataabt_fixup	= late_abort_fixup,
408 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
409 
410 	.cf_context_switch	= arm67_context_switch,
411 
412 	.cf_setup		= arm7_setup
413 
414 };
415 #endif	/* CPU_ARM7 */
416 
417 #ifdef CPU_ARM7TDMI
418 struct cpu_functions arm7tdmi_cpufuncs = {
419 	/* CPU functions */
420 
421 	.cf_id			= cpufunc_id,
422 	.cf_cpwait		= cpufunc_nullop,
423 
424 	/* MMU functions */
425 
426 	.cf_control		= cpufunc_control,
427 	.cf_domains		= cpufunc_domains,
428 	.cf_setttb		= arm7tdmi_setttb,
429 	.cf_faultstatus		= cpufunc_faultstatus,
430 	.cf_faultaddress	= cpufunc_faultaddress,
431 
432 	/* TLB functions */
433 
434 	.cf_tlb_flushID		= arm7tdmi_tlb_flushID,
435 	.cf_tlb_flushID_SE	= arm7tdmi_tlb_flushID_SE,
436 	.cf_tlb_flushI		= arm7tdmi_tlb_flushID,
437 	.cf_tlb_flushI_SE	= arm7tdmi_tlb_flushID_SE,
438 	.cf_tlb_flushD		= arm7tdmi_tlb_flushID,
439 	.cf_tlb_flushD_SE	= arm7tdmi_tlb_flushID_SE,
440 
441 	/* Cache operations */
442 
443 	.cf_icache_sync_all	= cpufunc_nullop,
444 	.cf_icache_sync_range	= (void *)cpufunc_nullop,
445 
446 	.cf_dcache_wbinv_all	= arm7tdmi_cache_flushID,
447 	.cf_dcache_wbinv_range	= (void *)arm7tdmi_cache_flushID,
448 	.cf_dcache_inv_range	= (void *)arm7tdmi_cache_flushID,
449 	.cf_dcache_wb_range	= (void *)cpufunc_nullop,
450 
451 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
452 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
453 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
454 
455 	.cf_idcache_wbinv_all	= arm7tdmi_cache_flushID,
456 	.cf_idcache_wbinv_range	= (void *)arm7tdmi_cache_flushID,
457 
458 	/* Other functions */
459 
460 	.cf_flush_prefetchbuf	= cpufunc_nullop,
461 	.cf_drain_writebuf	= cpufunc_nullop,
462 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
463 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
464 
465 	.cf_sleep		= (void *)cpufunc_nullop,
466 
467 	/* Soft functions */
468 
469 	.cf_dataabt_fixup	= late_abort_fixup,
470 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
471 
472 	.cf_context_switch	= arm7tdmi_context_switch,
473 
474 	.cf_setup		= arm7tdmi_setup
475 
476 };
477 #endif	/* CPU_ARM7TDMI */
478 
479 #ifdef CPU_ARM8
480 struct cpu_functions arm8_cpufuncs = {
481 	/* CPU functions */
482 
483 	.cf_id			= cpufunc_id,
484 	.cf_cpwait		= cpufunc_nullop,
485 
486 	/* MMU functions */
487 
488 	.cf_control		= cpufunc_control,
489 	.cf_domains		= cpufunc_domains,
490 	.cf_setttb		= arm8_setttb,
491 	.cf_faultstatus		= cpufunc_faultstatus,
492 	.cf_faultaddress	= cpufunc_faultaddress,
493 
494 	/* TLB functions */
495 
496 	.cf_tlb_flushID		= arm8_tlb_flushID,
497 	.cf_tlb_flushID_SE	= arm8_tlb_flushID_SE,
498 	.cf_tlb_flushI		= arm8_tlb_flushID,
499 	.cf_tlb_flushI_SE	= arm8_tlb_flushID_SE,
500 	.cf_tlb_flushD		= arm8_tlb_flushID,
501 	.cf_tlb_flushD_SE	= arm8_tlb_flushID_SE,
502 
503 	/* Cache operations */
504 
505 	.cf_icache_sync_all	= cpufunc_nullop,
506 	.cf_icache_sync_range	= (void *)cpufunc_nullop,
507 
508 	.cf_dcache_wbinv_all	= arm8_cache_purgeID,
509 	.cf_dcache_wbinv_range	= (void *)arm8_cache_purgeID,
510 /*XXX*/	.cf_dcache_inv_range	= (void *)arm8_cache_purgeID,
511 	.cf_dcache_wb_range	= (void *)arm8_cache_cleanID,
512 
513 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
514 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
515 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
516 
517 	.cf_idcache_wbinv_all	= arm8_cache_purgeID,
518 	.cf_idcache_wbinv_range = (void *)arm8_cache_purgeID,
519 
520 	/* Other functions */
521 
522 	.cf_flush_prefetchbuf	= cpufunc_nullop,
523 	.cf_drain_writebuf	= cpufunc_nullop,
524 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
525 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
526 
527 	.cf_sleep		= (void *)cpufunc_nullop,
528 
529 	/* Soft functions */
530 
531 	.cf_dataabt_fixup	= cpufunc_null_fixup,
532 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
533 
534 	.cf_context_switch	= arm8_context_switch,
535 
536 	.cf_setup		= arm8_setup
537 };
538 #endif	/* CPU_ARM8 */
539 
540 #ifdef CPU_ARM9
541 struct cpu_functions arm9_cpufuncs = {
542 	/* CPU functions */
543 
544 	.cf_id			= cpufunc_id,
545 	.cf_cpwait		= cpufunc_nullop,
546 
547 	/* MMU functions */
548 
549 	.cf_control		= cpufunc_control,
550 	.cf_domains		= cpufunc_domains,
551 	.cf_setttb		= arm9_setttb,
552 	.cf_faultstatus		= cpufunc_faultstatus,
553 	.cf_faultaddress	= cpufunc_faultaddress,
554 
555 	/* TLB functions */
556 
557 	.cf_tlb_flushID		= armv4_tlb_flushID,
558 	.cf_tlb_flushID_SE	= arm9_tlb_flushID_SE,
559 	.cf_tlb_flushI		= armv4_tlb_flushI,
560 	.cf_tlb_flushI_SE	= (void *)armv4_tlb_flushI,
561 	.cf_tlb_flushD		= armv4_tlb_flushD,
562 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
563 
564 	/* Cache operations */
565 
566 	.cf_icache_sync_all	= arm9_icache_sync_all,
567 	.cf_icache_sync_range	= arm9_icache_sync_range,
568 
569 	.cf_dcache_wbinv_all	= arm9_dcache_wbinv_all,
570 	.cf_dcache_wbinv_range	= arm9_dcache_wbinv_range,
571 /*XXX*/	.cf_dcache_inv_range	= arm9_dcache_wbinv_range,
572 	.cf_dcache_wb_range	= arm9_dcache_wb_range,
573 
574 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
575 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
576 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
577 
578 	.cf_idcache_wbinv_all	= arm9_idcache_wbinv_all,
579 	.cf_idcache_wbinv_range = arm9_idcache_wbinv_range,
580 
581 	/* Other functions */
582 
583 	.cf_flush_prefetchbuf	= cpufunc_nullop,
584 	.cf_drain_writebuf	= armv4_drain_writebuf,
585 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
586 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
587 
588 	.cf_sleep		= (void *)cpufunc_nullop,
589 
590 	/* Soft functions */
591 
592 	.cf_dataabt_fixup	= cpufunc_null_fixup,
593 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
594 
595 	.cf_context_switch	= arm9_context_switch,
596 
597 	.cf_setup		= arm9_setup
598 
599 };
600 #endif /* CPU_ARM9 */
601 
602 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
603 struct cpu_functions armv5_ec_cpufuncs = {
604 	/* CPU functions */
605 
606 	.cf_id			= cpufunc_id,
607 	.cf_cpwait		= cpufunc_nullop,
608 
609 	/* MMU functions */
610 
611 	.cf_control		= cpufunc_control,
612 	.cf_domains		= cpufunc_domains,
613 	.cf_setttb		= armv5_ec_setttb,
614 	.cf_faultstatus		= cpufunc_faultstatus,
615 	.cf_faultaddress	= cpufunc_faultaddress,
616 
617 	/* TLB functions */
618 
619 	.cf_tlb_flushID		= armv4_tlb_flushID,
620 	.cf_tlb_flushID_SE	= arm10_tlb_flushID_SE,
621 	.cf_tlb_flushI		= armv4_tlb_flushI,
622 	.cf_tlb_flushI_SE	= arm10_tlb_flushI_SE,
623 	.cf_tlb_flushD		= armv4_tlb_flushD,
624 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
625 
626 	/* Cache operations */
627 
628 	.cf_icache_sync_all	= armv5_ec_icache_sync_all,
629 	.cf_icache_sync_range	= armv5_ec_icache_sync_range,
630 
631 	.cf_dcache_wbinv_all	= armv5_ec_dcache_wbinv_all,
632 	.cf_dcache_wbinv_range	= armv5_ec_dcache_wbinv_range,
633 /*XXX*/	.cf_dcache_inv_range	= armv5_ec_dcache_wbinv_range,
634 	.cf_dcache_wb_range	= armv5_ec_dcache_wb_range,
635 
636 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
637 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
638 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
639 
640 	.cf_idcache_wbinv_all	= armv5_ec_idcache_wbinv_all,
641 	.cf_idcache_wbinv_range = armv5_ec_idcache_wbinv_range,
642 
643 	/* Other functions */
644 
645 	.cf_flush_prefetchbuf	= cpufunc_nullop,
646 	.cf_drain_writebuf	= armv4_drain_writebuf,
647 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
648 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
649 
650 	.cf_sleep		= (void *)cpufunc_nullop,
651 
652 	/* Soft functions */
653 
654 	.cf_dataabt_fixup	= cpufunc_null_fixup,
655 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
656 
657 	.cf_context_switch	= arm10_context_switch,
658 
659 	.cf_setup		= arm10_setup
660 
661 };
662 #endif /* CPU_ARM9E || CPU_ARM10 */
663 
664 #ifdef CPU_ARM10
665 struct cpu_functions arm10_cpufuncs = {
666 	/* CPU functions */
667 
668 	.cf_id			= cpufunc_id,
669 	.cf_cpwait		= cpufunc_nullop,
670 
671 	/* MMU functions */
672 
673 	.cf_control		= cpufunc_control,
674 	.cf_domains		= cpufunc_domains,
675 	.cf_setttb		= armv5_setttb,
676 	.cf_faultstatus		= cpufunc_faultstatus,
677 	.cf_faultaddress	= cpufunc_faultaddress,
678 
679 	/* TLB functions */
680 
681 	.cf_tlb_flushID		= armv4_tlb_flushID,
682 	.cf_tlb_flushID_SE	= arm10_tlb_flushID_SE,
683 	.cf_tlb_flushI		= armv4_tlb_flushI,
684 	.cf_tlb_flushI_SE	= arm10_tlb_flushI_SE,
685 	.cf_tlb_flushD		= armv4_tlb_flushD,
686 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
687 
688 	/* Cache operations */
689 
690 	.cf_icache_sync_all	= armv5_icache_sync_all,
691 	.cf_icache_sync_range	= armv5_icache_sync_range,
692 
693 	.cf_dcache_wbinv_all	= armv5_dcache_wbinv_all,
694 	.cf_dcache_wbinv_range	= armv5_dcache_wbinv_range,
695 /*XXX*/	.cf_dcache_inv_range	= armv5_dcache_wbinv_range,
696 	.cf_dcache_wb_range	= armv5_dcache_wb_range,
697 
698 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
699 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
700 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
701 
702 	.cf_idcache_wbinv_all	= armv5_idcache_wbinv_all,
703 	.cf_idcache_wbinv_range = armv5_idcache_wbinv_range,
704 
705 	/* Other functions */
706 
707 	.cf_flush_prefetchbuf	= cpufunc_nullop,
708 	.cf_drain_writebuf	= armv4_drain_writebuf,
709 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
710 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
711 
712 	.cf_sleep		= (void *)cpufunc_nullop,
713 
714 	/* Soft functions */
715 
716 	.cf_dataabt_fixup	= cpufunc_null_fixup,
717 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
718 
719 	.cf_context_switch	= arm10_context_switch,
720 
721 	.cf_setup		= arm10_setup
722 
723 };
724 #endif /* CPU_ARM10 */
725 
726 #ifdef CPU_ARM11
727 struct cpu_functions arm11_cpufuncs = {
728 	/* CPU functions */
729 
730 	.cf_id			= cpufunc_id,
731 	.cf_cpwait		= cpufunc_nullop,
732 
733 	/* MMU functions */
734 
735 	.cf_control		= cpufunc_control,
736 	.cf_domains		= cpufunc_domains,
737 	.cf_setttb		= arm11_setttb,
738 	.cf_faultstatus		= cpufunc_faultstatus,
739 	.cf_faultaddress	= cpufunc_faultaddress,
740 
741 	/* TLB functions */
742 
743 	.cf_tlb_flushID		= arm11_tlb_flushID,
744 	.cf_tlb_flushID_SE	= arm11_tlb_flushID_SE,
745 	.cf_tlb_flushI		= arm11_tlb_flushI,
746 	.cf_tlb_flushI_SE	= arm11_tlb_flushI_SE,
747 	.cf_tlb_flushD		= arm11_tlb_flushD,
748 	.cf_tlb_flushD_SE	= arm11_tlb_flushD_SE,
749 
750 	/* Cache operations */
751 
752 	.cf_icache_sync_all	= armv6_icache_sync_all,
753 	.cf_icache_sync_range	= armv6_icache_sync_range,
754 
755 	.cf_dcache_wbinv_all	= armv6_dcache_wbinv_all,
756 	.cf_dcache_wbinv_range	= armv6_dcache_wbinv_range,
757 	.cf_dcache_inv_range	= armv6_dcache_inv_range,
758 	.cf_dcache_wb_range	= armv6_dcache_wb_range,
759 
760 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
761 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
762 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
763 
764 	.cf_idcache_wbinv_all	= armv6_idcache_wbinv_all,
765 	.cf_idcache_wbinv_range = armv6_idcache_wbinv_range,
766 
767 	/* Other functions */
768 
769 	.cf_flush_prefetchbuf	= cpufunc_nullop,
770 	.cf_drain_writebuf	= arm11_drain_writebuf,
771 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
772 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
773 
774 	.cf_sleep		= arm11_sleep,
775 
776 	/* Soft functions */
777 
778 	.cf_dataabt_fixup	= cpufunc_null_fixup,
779 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
780 
781 	.cf_context_switch	= arm11_context_switch,
782 
783 	.cf_setup		= arm11_setup
784 
785 };
786 #endif /* CPU_ARM11 */
787 
788 #ifdef CPU_ARM1136
789 struct cpu_functions arm1136_cpufuncs = {
790 	/* CPU functions */
791 
792 	.cf_id			= cpufunc_id,
793 	.cf_cpwait		= cpufunc_nullop,
794 
795 	/* MMU functions */
796 
797 	.cf_control		= cpufunc_control,
798 	.cf_domains		= cpufunc_domains,
799 	.cf_setttb		= arm11_setttb,
800 	.cf_faultstatus		= cpufunc_faultstatus,
801 	.cf_faultaddress	= cpufunc_faultaddress,
802 
803 	/* TLB functions */
804 
805 	.cf_tlb_flushID		= arm11_tlb_flushID,
806 	.cf_tlb_flushID_SE	= arm11_tlb_flushID_SE,
807 	.cf_tlb_flushI		= arm11_tlb_flushI,
808 	.cf_tlb_flushI_SE	= arm11_tlb_flushI_SE,
809 	.cf_tlb_flushD		= arm11_tlb_flushD,
810 	.cf_tlb_flushD_SE	= arm11_tlb_flushD_SE,
811 
812 	/* Cache operations */
813 
814 	.cf_icache_sync_all	= arm11x6_icache_sync_all,	/* 411920 */
815 	.cf_icache_sync_range	= arm11x6_icache_sync_range,	/* 371025 */
816 
817 	.cf_dcache_wbinv_all	= arm11x6_dcache_wbinv_all,	/* 411920 */
818 	.cf_dcache_wbinv_range	= armv6_dcache_wbinv_range,
819 	.cf_dcache_inv_range	= armv6_dcache_inv_range,
820 	.cf_dcache_wb_range	= armv6_dcache_wb_range,
821 
822 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
823 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
824 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
825 
826 	.cf_idcache_wbinv_all	= arm11x6_idcache_wbinv_all,	/* 411920 */
827 	.cf_idcache_wbinv_range = arm11x6_idcache_wbinv_range,	/* 371025 */
828 
829 	/* Other functions */
830 
831 	.cf_flush_prefetchbuf	= arm11x6_flush_prefetchbuf,
832 	.cf_drain_writebuf	= arm11_drain_writebuf,
833 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
834 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
835 
836 	.cf_sleep		= arm11_sleep,	/* arm1136_sleep_rev0 */
837 
838 	/* Soft functions */
839 
840 	.cf_dataabt_fixup	= cpufunc_null_fixup,
841 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
842 
843 	.cf_context_switch	= arm11_context_switch,
844 
845 	.cf_setup		= arm11x6_setup
846 
847 };
848 #endif /* CPU_ARM1136 */
849 
850 #ifdef CPU_ARM1176
851 struct cpu_functions arm1176_cpufuncs = {
852 	/* CPU functions */
853 
854 	.cf_id			= cpufunc_id,
855 	.cf_cpwait		= cpufunc_nullop,
856 
857 	/* MMU functions */
858 
859 	.cf_control		= cpufunc_control,
860 	.cf_domains		= cpufunc_domains,
861 	.cf_setttb		= arm11_setttb,
862 	.cf_faultstatus		= cpufunc_faultstatus,
863 	.cf_faultaddress	= cpufunc_faultaddress,
864 
865 	/* TLB functions */
866 
867 	.cf_tlb_flushID		= arm11_tlb_flushID,
868 	.cf_tlb_flushID_SE	= arm11_tlb_flushID_SE,
869 	.cf_tlb_flushI		= arm11_tlb_flushI,
870 	.cf_tlb_flushI_SE	= arm11_tlb_flushI_SE,
871 	.cf_tlb_flushD		= arm11_tlb_flushD,
872 	.cf_tlb_flushD_SE	= arm11_tlb_flushD_SE,
873 
874 	/* Cache operations */
875 
876 	.cf_icache_sync_all	= arm11x6_icache_sync_all,	/* 415045 */
877 	.cf_icache_sync_range	= arm11x6_icache_sync_range,	/* 371367 */
878 
879 	.cf_dcache_wbinv_all	= arm11x6_dcache_wbinv_all,	/* 415045 */
880 	.cf_dcache_wbinv_range	= armv6_dcache_wbinv_range,
881 	.cf_dcache_inv_range	= armv6_dcache_inv_range,
882 	.cf_dcache_wb_range	= armv6_dcache_wb_range,
883 
884 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
885 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
886 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
887 
888 	.cf_idcache_wbinv_all	= arm11x6_idcache_wbinv_all,	/* 415045 */
889 	.cf_idcache_wbinv_range = arm11x6_idcache_wbinv_range,	/* 371367 */
890 
891 	/* Other functions */
892 
893 	.cf_flush_prefetchbuf	= arm11x6_flush_prefetchbuf,
894 	.cf_drain_writebuf	= arm11_drain_writebuf,
895 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
896 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
897 
898 	.cf_sleep		= arm11x6_sleep,		/* no ref. */
899 
900 	/* Soft functions */
901 
902 	.cf_dataabt_fixup	= cpufunc_null_fixup,
903 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
904 
905 	.cf_context_switch	= arm11_context_switch,
906 
907 	.cf_setup		= arm11x6_setup
908 
909 };
910 #endif /* CPU_ARM1176 */
911 
912 
913 #ifdef CPU_ARM11MPCORE
914 struct cpu_functions arm11mpcore_cpufuncs = {
915 	/* CPU functions */
916 
917 	.cf_id			= cpufunc_id,
918 	.cf_cpwait		= cpufunc_nullop,
919 
920 	/* MMU functions */
921 
922 	.cf_control		= cpufunc_control,
923 	.cf_domains		= cpufunc_domains,
924 	.cf_setttb		= arm11_setttb,
925 	.cf_faultstatus		= cpufunc_faultstatus,
926 	.cf_faultaddress	= cpufunc_faultaddress,
927 
928 	/* TLB functions */
929 
930 	.cf_tlb_flushID		= arm11_tlb_flushID,
931 	.cf_tlb_flushID_SE	= arm11_tlb_flushID_SE,
932 	.cf_tlb_flushI		= arm11_tlb_flushI,
933 	.cf_tlb_flushI_SE	= arm11_tlb_flushI_SE,
934 	.cf_tlb_flushD		= arm11_tlb_flushD,
935 	.cf_tlb_flushD_SE	= arm11_tlb_flushD_SE,
936 
937 	/* Cache operations */
938 
939 	.cf_icache_sync_all	= armv6_icache_sync_all,
940 	.cf_icache_sync_range	= armv5_icache_sync_range,
941 
942 	.cf_dcache_wbinv_all	= armv6_dcache_wbinv_all,
943 	.cf_dcache_wbinv_range	= armv5_dcache_wbinv_range,
944 	.cf_dcache_inv_range	= armv5_dcache_inv_range,
945 	.cf_dcache_wb_range	= armv5_dcache_wb_range,
946 
947 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
948 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
949 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
950 
951 	.cf_idcache_wbinv_all	= armv6_idcache_wbinv_all,
952 	.cf_idcache_wbinv_range = armv5_idcache_wbinv_range,
953 
954 	/* Other functions */
955 
956 	.cf_flush_prefetchbuf	= cpufunc_nullop,
957 	.cf_drain_writebuf	= arm11_drain_writebuf,
958 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
959 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
960 
961 	.cf_sleep		= arm11_sleep,
962 
963 	/* Soft functions */
964 
965 	.cf_dataabt_fixup	= cpufunc_null_fixup,
966 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
967 
968 	.cf_context_switch	= arm11_context_switch,
969 
970 	.cf_setup		= arm11mpcore_setup
971 
972 };
973 #endif /* CPU_ARM11MPCORE */
974 
975 #ifdef CPU_SA110
976 struct cpu_functions sa110_cpufuncs = {
977 	/* CPU functions */
978 
979 	.cf_id			= cpufunc_id,
980 	.cf_cpwait		= cpufunc_nullop,
981 
982 	/* MMU functions */
983 
984 	.cf_control		= cpufunc_control,
985 	.cf_domains		= cpufunc_domains,
986 	.cf_setttb		= sa1_setttb,
987 	.cf_faultstatus		= cpufunc_faultstatus,
988 	.cf_faultaddress	= cpufunc_faultaddress,
989 
990 	/* TLB functions */
991 
992 	.cf_tlb_flushID		= armv4_tlb_flushID,
993 	.cf_tlb_flushID_SE	= sa1_tlb_flushID_SE,
994 	.cf_tlb_flushI		= armv4_tlb_flushI,
995 	.cf_tlb_flushI_SE	= (void *)armv4_tlb_flushI,
996 	.cf_tlb_flushD		= armv4_tlb_flushD,
997 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
998 
999 	/* Cache operations */
1000 
1001 	.cf_icache_sync_all	= sa1_cache_syncI,
1002 	.cf_icache_sync_range	= sa1_cache_syncI_rng,
1003 
1004 	.cf_dcache_wbinv_all	= sa1_cache_purgeD,
1005 	.cf_dcache_wbinv_range	= sa1_cache_purgeD_rng,
1006 /*XXX*/	.cf_dcache_inv_range	= sa1_cache_purgeD_rng,
1007 	.cf_dcache_wb_range	= sa1_cache_cleanD_rng,
1008 
1009 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
1010 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
1011 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
1012 
1013 	.cf_idcache_wbinv_all	= sa1_cache_purgeID,
1014 	.cf_idcache_wbinv_range	= sa1_cache_purgeID_rng,
1015 
1016 	/* Other functions */
1017 
1018 	.cf_flush_prefetchbuf	= cpufunc_nullop,
1019 	.cf_drain_writebuf	= armv4_drain_writebuf,
1020 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
1021 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
1022 
1023 	.cf_sleep		= (void *)cpufunc_nullop,
1024 
1025 	/* Soft functions */
1026 
1027 	.cf_dataabt_fixup	= cpufunc_null_fixup,
1028 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
1029 
1030 	.cf_context_switch	= sa110_context_switch,
1031 
1032 	.cf_setup		= sa110_setup
1033 };
1034 #endif	/* CPU_SA110 */
1035 
1036 #if defined(CPU_SA1100) || defined(CPU_SA1110)
1037 struct cpu_functions sa11x0_cpufuncs = {
1038 	/* CPU functions */
1039 
1040 	.cf_id			= cpufunc_id,
1041 	.cf_cpwait		= cpufunc_nullop,
1042 
1043 	/* MMU functions */
1044 
1045 	.cf_control		= cpufunc_control,
1046 	.cf_domains		= cpufunc_domains,
1047 	.cf_setttb		= sa1_setttb,
1048 	.cf_faultstatus		= cpufunc_faultstatus,
1049 	.cf_faultaddress	= cpufunc_faultaddress,
1050 
1051 	/* TLB functions */
1052 
1053 	.cf_tlb_flushID		= armv4_tlb_flushID,
1054 	.cf_tlb_flushID_SE	= sa1_tlb_flushID_SE,
1055 	.cf_tlb_flushI		= armv4_tlb_flushI,
1056 	.cf_tlb_flushI_SE	= (void *)armv4_tlb_flushI,
1057 	.cf_tlb_flushD		= armv4_tlb_flushD,
1058 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
1059 
1060 	/* Cache operations */
1061 
1062 	.cf_icache_sync_all	= sa1_cache_syncI,
1063 	.cf_icache_sync_range	= sa1_cache_syncI_rng,
1064 
1065 	.cf_dcache_wbinv_all	= sa1_cache_purgeD,
1066 	.cf_dcache_wbinv_range	= sa1_cache_purgeD_rng,
1067 /*XXX*/	.cf_dcache_inv_range	= sa1_cache_purgeD_rng,
1068 	.cf_dcache_wb_range	= sa1_cache_cleanD_rng,
1069 
1070 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
1071 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
1072 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
1073 
1074 	.cf_idcache_wbinv_all	= sa1_cache_purgeID,
1075 	.cf_idcache_wbinv_range	= sa1_cache_purgeID_rng,
1076 
1077 	/* Other functions */
1078 
1079 	.cf_flush_prefetchbuf	= sa11x0_drain_readbuf,
1080 	.cf_drain_writebuf	= armv4_drain_writebuf,
1081 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
1082 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
1083 
1084 	.cf_sleep		= sa11x0_cpu_sleep,
1085 
1086 	/* Soft functions */
1087 
1088 	.cf_dataabt_fixup	= cpufunc_null_fixup,
1089 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
1090 
1091 	.cf_context_switch	= sa11x0_context_switch,
1092 
1093 	.cf_setup		= sa11x0_setup
1094 };
1095 #endif	/* CPU_SA1100 || CPU_SA1110 */
1096 
1097 #if defined(CPU_FA526)
1098 struct cpu_functions fa526_cpufuncs = {
1099 	/* CPU functions */
1100 
1101 	.cf_id			= cpufunc_id,
1102 	.cf_cpwait		= cpufunc_nullop,
1103 
1104 	/* MMU functions */
1105 
1106 	.cf_control		= cpufunc_control,
1107 	.cf_domains		= cpufunc_domains,
1108 	.cf_setttb		= fa526_setttb,
1109 	.cf_faultstatus		= cpufunc_faultstatus,
1110 	.cf_faultaddress	= cpufunc_faultaddress,
1111 
1112 	/* TLB functions */
1113 
1114 	.cf_tlb_flushID		= armv4_tlb_flushID,
1115 	.cf_tlb_flushID_SE	= fa526_tlb_flushID_SE,
1116 	.cf_tlb_flushI		= armv4_tlb_flushI,
1117 	.cf_tlb_flushI_SE	= fa526_tlb_flushI_SE,
1118 	.cf_tlb_flushD		= armv4_tlb_flushD,
1119 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
1120 
1121 	/* Cache operations */
1122 
1123 	.cf_icache_sync_all	= fa526_icache_sync_all,
1124 	.cf_icache_sync_range	= fa526_icache_sync_range,
1125 
1126 	.cf_dcache_wbinv_all	= fa526_dcache_wbinv_all,
1127 	.cf_dcache_wbinv_range	= fa526_dcache_wbinv_range,
1128 	.cf_dcache_inv_range	= fa526_dcache_inv_range,
1129 	.cf_dcache_wb_range	= fa526_dcache_wb_range,
1130 
1131 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
1132 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
1133 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
1134 
1135 	.cf_idcache_wbinv_all	= fa526_idcache_wbinv_all,
1136 	.cf_idcache_wbinv_range	= fa526_idcache_wbinv_range,
1137 
1138 	/* Other functions */
1139 
1140 	.cf_flush_prefetchbuf	= fa526_flush_prefetchbuf,
1141 	.cf_drain_writebuf	= armv4_drain_writebuf,
1142 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
1143 	.cf_flush_brnchtgt_E	= fa526_flush_brnchtgt_E,
1144 
1145 	.cf_sleep		= fa526_cpu_sleep,
1146 
1147 	/* Soft functions */
1148 
1149 	.cf_dataabt_fixup	= cpufunc_null_fixup,
1150 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
1151 
1152 	.cf_context_switch	= fa526_context_switch,
1153 
1154 	.cf_setup		= fa526_setup
1155 };
1156 #endif	/* CPU_FA526 */
1157 
1158 #ifdef CPU_IXP12X0
1159 struct cpu_functions ixp12x0_cpufuncs = {
1160 	/* CPU functions */
1161 
1162 	.cf_id			= cpufunc_id,
1163 	.cf_cpwait		= cpufunc_nullop,
1164 
1165 	/* MMU functions */
1166 
1167 	.cf_control		= cpufunc_control,
1168 	.cf_domains		= cpufunc_domains,
1169 	.cf_setttb		= sa1_setttb,
1170 	.cf_faultstatus		= cpufunc_faultstatus,
1171 	.cf_faultaddress	= cpufunc_faultaddress,
1172 
1173 	/* TLB functions */
1174 
1175 	.cf_tlb_flushID		= armv4_tlb_flushID,
1176 	.cf_tlb_flushID_SE	= sa1_tlb_flushID_SE,
1177 	.cf_tlb_flushI		= armv4_tlb_flushI,
1178 	.cf_tlb_flushI_SE	= (void *)armv4_tlb_flushI,
1179 	.cf_tlb_flushD		= armv4_tlb_flushD,
1180 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
1181 
1182 	/* Cache operations */
1183 
1184 	.cf_icache_sync_all	= sa1_cache_syncI,
1185 	.cf_icache_sync_range	= sa1_cache_syncI_rng,
1186 
1187 	.cf_dcache_wbinv_all	= sa1_cache_purgeD,
1188 	.cf_dcache_wbinv_range	= sa1_cache_purgeD_rng,
1189 /*XXX*/	.cf_dcache_inv_range	= sa1_cache_purgeD_rng,
1190 	.cf_dcache_wb_range	= sa1_cache_cleanD_rng,
1191 
1192 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
1193 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
1194 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
1195 
1196 	.cf_idcache_wbinv_all	= sa1_cache_purgeID,
1197 	.cf_idcache_wbinv_range	= sa1_cache_purgeID_rng,
1198 
1199 	/* Other functions */
1200 
1201 	.cf_flush_prefetchbuf	= ixp12x0_drain_readbuf,
1202 	.cf_drain_writebuf	= armv4_drain_writebuf,
1203 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
1204 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
1205 
1206 	.cf_sleep		= (void *)cpufunc_nullop,
1207 
1208 	/* Soft functions */
1209 
1210 	.cf_dataabt_fixup	= cpufunc_null_fixup,
1211 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
1212 
1213 	.cf_context_switch	= ixp12x0_context_switch,
1214 
1215 	.cf_setup		= ixp12x0_setup
1216 };
1217 #endif	/* CPU_IXP12X0 */
1218 
1219 #if defined(CPU_XSCALE)
1220 struct cpu_functions xscale_cpufuncs = {
1221 	/* CPU functions */
1222 
1223 	.cf_id			= cpufunc_id,
1224 	.cf_cpwait		= xscale_cpwait,
1225 
1226 	/* MMU functions */
1227 
1228 	.cf_control		= xscale_control,
1229 	.cf_domains		= cpufunc_domains,
1230 	.cf_setttb		= xscale_setttb,
1231 	.cf_faultstatus		= cpufunc_faultstatus,
1232 	.cf_faultaddress	= cpufunc_faultaddress,
1233 
1234 	/* TLB functions */
1235 
1236 	.cf_tlb_flushID		= armv4_tlb_flushID,
1237 	.cf_tlb_flushID_SE	= xscale_tlb_flushID_SE,
1238 	.cf_tlb_flushI		= armv4_tlb_flushI,
1239 	.cf_tlb_flushI_SE	= (void *)armv4_tlb_flushI,
1240 	.cf_tlb_flushD		= armv4_tlb_flushD,
1241 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
1242 
1243 	/* Cache operations */
1244 
1245 	.cf_icache_sync_all	= xscale_cache_syncI,
1246 	.cf_icache_sync_range	= xscale_cache_syncI_rng,
1247 
1248 	.cf_dcache_wbinv_all	= xscale_cache_purgeD,
1249 	.cf_dcache_wbinv_range	= xscale_cache_purgeD_rng,
1250 	.cf_dcache_inv_range	= xscale_cache_flushD_rng,
1251 	.cf_dcache_wb_range	= xscale_cache_cleanD_rng,
1252 
1253 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
1254 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
1255 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
1256 
1257 	.cf_idcache_wbinv_all	= xscale_cache_purgeID,
1258 	.cf_idcache_wbinv_range = xscale_cache_purgeID_rng,
1259 
1260 	/* Other functions */
1261 
1262 	.cf_flush_prefetchbuf	= cpufunc_nullop,
1263 	.cf_drain_writebuf	= armv4_drain_writebuf,
1264 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
1265 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
1266 
1267 	.cf_sleep		= xscale_cpu_sleep,
1268 
1269 	/* Soft functions */
1270 
1271 	.cf_dataabt_fixup	= cpufunc_null_fixup,
1272 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
1273 
1274 	.cf_context_switch	= xscale_context_switch,
1275 
1276 	.cf_setup		= xscale_setup
1277 };
1278 #endif /* CPU_XSCALE */
1279 
1280 #if defined(CPU_ARMV7)
1281 struct cpu_functions armv7_cpufuncs = {
1282 	/* CPU functions */
1283 
1284 	.cf_id			= cpufunc_id,
1285 	.cf_cpwait		= cpufunc_nullop,
1286 
1287 	/* MMU functions */
1288 
1289 	.cf_control		= cpufunc_control,
1290 	.cf_domains		= cpufunc_domains,
1291 	.cf_setttb		= armv7_setttb,
1292 	.cf_faultstatus		= cpufunc_faultstatus,
1293 	.cf_faultaddress	= cpufunc_faultaddress,
1294 
1295 	/* TLB functions */
1296 
1297 	.cf_tlb_flushID		= armv7up_tlb_flushID,
1298 	.cf_tlb_flushID_SE	= armv7up_tlb_flushID_SE,
1299 	.cf_tlb_flushI		= armv7up_tlb_flushI,
1300 	.cf_tlb_flushI_SE	= armv7up_tlb_flushI_SE,
1301 	.cf_tlb_flushD		= armv7up_tlb_flushD,
1302 	.cf_tlb_flushD_SE	= armv7up_tlb_flushD_SE,
1303 
1304 	/* Cache operations */
1305 
1306 	.cf_icache_sync_all	= armv7_icache_sync_all,
1307 	.cf_dcache_wbinv_all	= armv7_dcache_wbinv_all,
1308 
1309 	.cf_dcache_inv_range	= armv7_dcache_inv_range,
1310 	.cf_dcache_wb_range	= armv7_dcache_wb_range,
1311 	.cf_dcache_wbinv_range	= armv7_dcache_wbinv_range,
1312 
1313 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
1314 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
1315 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
1316 
1317 	.cf_icache_sync_range	= armv7_icache_sync_range,
1318 	.cf_idcache_wbinv_range = armv7_idcache_wbinv_range,
1319 
1320 
1321 	.cf_idcache_wbinv_all	= armv7_idcache_wbinv_all,
1322 
1323 	/* Other functions */
1324 
1325 	.cf_flush_prefetchbuf	= cpufunc_nullop,
1326 	.cf_drain_writebuf	= armv7_drain_writebuf,
1327 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
1328 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
1329 
1330 	.cf_sleep		= armv7_cpu_sleep,
1331 
1332 	/* Soft functions */
1333 
1334 	.cf_dataabt_fixup	= cpufunc_null_fixup,
1335 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
1336 
1337 	.cf_context_switch	= armv7_context_switch,
1338 
1339 	.cf_setup		= armv7_setup
1340 
1341 };
1342 #endif /* CPU_ARMV7 */
1343 
1344 #ifdef CPU_PJ4B
1345 struct cpu_functions pj4bv7_cpufuncs = {
1346 	/* CPU functions */
1347 
1348 	.cf_id			= cpufunc_id,
1349 	.cf_cpwait		= armv7_drain_writebuf,
1350 
1351 	/* MMU functions */
1352 
1353 	.cf_control		= cpufunc_control,
1354 	.cf_domains		= cpufunc_domains,
1355 	.cf_setttb		= armv7_setttb,
1356 	.cf_faultstatus		= cpufunc_faultstatus,
1357 	.cf_faultaddress	= cpufunc_faultaddress,
1358 
1359 	/* TLB functions */
1360 
1361 	.cf_tlb_flushID		= armv7up_tlb_flushID,
1362 	.cf_tlb_flushID_SE	= armv7up_tlb_flushID_SE,
1363 	.cf_tlb_flushI		= armv7up_tlb_flushID,
1364 	.cf_tlb_flushI_SE	= armv7up_tlb_flushID_SE,
1365 	.cf_tlb_flushD		= armv7up_tlb_flushID,
1366 	.cf_tlb_flushD_SE	= armv7up_tlb_flushID_SE,
1367 
1368 	/* Cache operations (see also pj4bv7_setup) */
1369 	.cf_icache_sync_all	= armv7_idcache_wbinv_all,
1370 	.cf_icache_sync_range	= armv7_icache_sync_range,
1371 
1372 	.cf_dcache_wbinv_all	= armv7_dcache_wbinv_all,
1373 	.cf_dcache_wbinv_range	= armv7_dcache_wbinv_range,
1374 	.cf_dcache_inv_range	= armv7_dcache_inv_range,
1375 	.cf_dcache_wb_range	= armv7_dcache_wb_range,
1376 
1377 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
1378 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
1379 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
1380 
1381 	.cf_idcache_wbinv_all	= armv7_idcache_wbinv_all,
1382 	.cf_idcache_wbinv_range	= armv7_idcache_wbinv_range,
1383 
1384 	/* Other functions */
1385 
1386 	.cf_flush_prefetchbuf	= cpufunc_nullop,
1387 	.cf_drain_writebuf	= armv7_drain_writebuf,
1388 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
1389 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
1390 
1391 	.cf_sleep		= pj4b_cpu_sleep,
1392 
1393 	/* Soft functions */
1394 
1395 	.cf_dataabt_fixup	= cpufunc_null_fixup,
1396 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
1397 
1398 	.cf_context_switch	= armv7_context_switch,
1399 
1400 	.cf_setup		= pj4bv7_setup
1401 };
1402 #endif /* CPU_PJ4B */
1403 
1404 #ifdef CPU_SHEEVA
1405 struct cpu_functions sheeva_cpufuncs = {
1406 	/* CPU functions */
1407 
1408 	.cf_id			= cpufunc_id,
1409 	.cf_cpwait		= cpufunc_nullop,
1410 
1411 	/* MMU functions */
1412 
1413 	.cf_control		= cpufunc_control,
1414 	.cf_domains		= cpufunc_domains,
1415 	.cf_setttb		= armv5_ec_setttb,
1416 	.cf_faultstatus		= cpufunc_faultstatus,
1417 	.cf_faultaddress	= cpufunc_faultaddress,
1418 
1419 	/* TLB functions */
1420 
1421 	.cf_tlb_flushID		= armv4_tlb_flushID,
1422 	.cf_tlb_flushID_SE	= arm10_tlb_flushID_SE,
1423 	.cf_tlb_flushI		= armv4_tlb_flushI,
1424 	.cf_tlb_flushI_SE	= arm10_tlb_flushI_SE,
1425 	.cf_tlb_flushD		= armv4_tlb_flushD,
1426 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
1427 
1428 	/* Cache operations */
1429 
1430 	.cf_icache_sync_all	= armv5_ec_icache_sync_all,
1431 	.cf_icache_sync_range	= armv5_ec_icache_sync_range,
1432 
1433 	.cf_dcache_wbinv_all	= armv5_ec_dcache_wbinv_all,
1434 	.cf_dcache_wbinv_range	= sheeva_dcache_wbinv_range,
1435 	.cf_dcache_inv_range	= sheeva_dcache_inv_range,
1436 	.cf_dcache_wb_range	= sheeva_dcache_wb_range,
1437 
1438 	.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop,
1439 	.cf_sdcache_inv_range	= (void *)cpufunc_nullop,
1440 	.cf_sdcache_wb_range	= (void *)cpufunc_nullop,
1441 
1442 	.cf_idcache_wbinv_all	= armv5_ec_idcache_wbinv_all,
1443 	.cf_idcache_wbinv_range = sheeva_idcache_wbinv_range,
1444 
1445 	/* Other functions */
1446 
1447 	.cf_flush_prefetchbuf	= cpufunc_nullop,
1448 	.cf_drain_writebuf	= armv4_drain_writebuf,
1449 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
1450 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
1451 
1452 	.cf_sleep		= (void *)sheeva_cpu_sleep,
1453 
1454 	/* Soft functions */
1455 
1456 	.cf_dataabt_fixup	= cpufunc_null_fixup,
1457 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
1458 
1459 	.cf_context_switch	= arm10_context_switch,
1460 
1461 	.cf_setup		= sheeva_setup
1462 };
1463 #endif /* CPU_SHEEVA */
1464 
1465 
1466 /*
1467  * Global constants also used by locore.s
1468  */
1469 
1470 struct cpu_functions cpufuncs;
1471 u_int cputype;
1472 
1473 #if defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined(CPU_ARM9) || \
1474     defined(CPU_ARM9E) || defined(CPU_ARM10) || defined(CPU_FA526) || \
1475     defined(CPU_SHEEVA) || \
1476     defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
1477     defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425) || \
1478     defined(CPU_ARMV6) || defined(CPU_ARMV7)
1479 static void get_cachetype_cp15(void);
1480 
1481 /* Additional cache information local to this file.  Log2 of some of the
1482    above numbers.  */
1483 static int	arm_dcache_log2_nsets;
1484 static int	arm_dcache_log2_assoc;
1485 static int	arm_dcache_log2_linesize;
1486 
1487 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
1488 static inline u_int
1489 get_cachesize_cp15(int cssr)
1490 {
1491 #if defined(CPU_ARMV7)
1492 	__asm volatile(".arch\tarmv7a");
1493 
1494 	armreg_csselr_write(cssr);
1495 	arm_isb();			 /* sync to the new cssr */
1496 
1497 #else
1498 	__asm volatile("mcr p15, 1, %0, c0, c0, 2" :: "r" (cssr) : "memory");
1499 #endif
1500 	return armreg_ccsidr_read();
1501 }
1502 #endif
1503 
1504 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
1505 static void
1506 get_cacheinfo_clidr(struct arm_cache_info *info, u_int level, u_int clidr)
1507 {
1508 	u_int csid;
1509 
1510 	if (clidr & 6) {
1511 		csid = get_cachesize_cp15(level << 1); /* select dcache values */
1512 		info->dcache_sets = CPU_CSID_NUMSETS(csid) + 1;
1513 		info->dcache_ways = CPU_CSID_ASSOC(csid) + 1;
1514 		info->dcache_line_size = 1U << (CPU_CSID_LEN(csid) + 4);
1515 		info->dcache_way_size =
1516 		    info->dcache_line_size * info->dcache_sets;
1517 		info->dcache_size = info->dcache_way_size * info->dcache_ways;
1518 
1519 		if (level == 0) {
1520 			arm_dcache_log2_assoc = CPU_CSID_ASSOC(csid) + 1;
1521 			arm_dcache_log2_linesize = CPU_CSID_LEN(csid) + 4;
1522 			arm_dcache_log2_nsets =
1523 			    31 - __builtin_clz(info->dcache_sets*2-1);
1524 		}
1525 	}
1526 
1527 	info->cache_unified = (clidr == 4);
1528 
1529 	if (level > 0) {
1530 		info->dcache_type = CACHE_TYPE_PIPT;
1531 		info->icache_type = CACHE_TYPE_PIPT;
1532 	}
1533 
1534 	if (info->cache_unified) {
1535 		info->icache_ways = info->dcache_ways;
1536 		info->icache_line_size = info->dcache_line_size;
1537 		info->icache_way_size = info->dcache_way_size;
1538 		info->icache_size = info->dcache_size;
1539 	} else {
1540 		csid = get_cachesize_cp15((level << 1)|CPU_CSSR_InD); /* select icache values */
1541 		info->icache_sets = CPU_CSID_NUMSETS(csid) + 1;
1542 		info->icache_ways = CPU_CSID_ASSOC(csid) + 1;
1543 		info->icache_line_size = 1U << (CPU_CSID_LEN(csid) + 4);
1544 		info->icache_way_size = info->icache_line_size * info->icache_sets;
1545 		info->icache_size = info->icache_way_size * info->icache_ways;
1546 	}
1547 	if (level == 0
1548 	    && info->dcache_way_size <= PAGE_SIZE
1549 	    && info->icache_way_size <= PAGE_SIZE) {
1550 		arm_cache_prefer_mask = 0;
1551 	}
1552 }
1553 #endif /* (ARM_MMU_V6 + ARM_MMU_V7) > 0 */
1554 
1555 static void
1556 get_cachetype_cp15(void)
1557 {
1558 	u_int ctype, isize, dsize;
1559 	u_int multiplier;
1560 
1561 	ctype = armreg_ctr_read();
1562 
1563 	/*
1564 	 * ...and thus spake the ARM ARM:
1565 	 *
1566 	 * If an <opcode2> value corresponding to an unimplemented or
1567 	 * reserved ID register is encountered, the System Control
1568 	 * processor returns the value of the main ID register.
1569 	 */
1570 	if (ctype == cpu_idnum())
1571 		goto out;
1572 
1573 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
1574 	if (CPU_CT_FORMAT(ctype) == 4) {
1575 		u_int clidr = armreg_clidr_read();
1576 
1577 		if (CPU_CT4_L1IPOLICY(ctype) == CPU_CT4_L1_PIPT) {
1578 			arm_pcache.icache_type = CACHE_TYPE_PIPT;
1579 		} else {
1580 			arm_pcache.icache_type = CACHE_TYPE_VIPT;
1581 			arm_cache_prefer_mask = PAGE_SIZE;
1582 		}
1583 #ifdef CPU_CORTEX
1584 		if (CPU_ID_CORTEX_P(cpu_idnum())) {
1585 			arm_pcache.dcache_type = CACHE_TYPE_PIPT;
1586 		} else
1587 #endif
1588 		{
1589 			arm_pcache.dcache_type = CACHE_TYPE_VIPT;
1590 		}
1591 		arm_pcache.cache_type = CPU_CT_CTYPE_WB14;
1592 
1593 		get_cacheinfo_clidr(&arm_pcache, 0, clidr & 7);
1594 		arm_dcache_align = arm_pcache.dcache_line_size;
1595 		clidr >>= 3;
1596 		if (clidr & 7) {
1597 			get_cacheinfo_clidr(&arm_scache, 1, clidr & 7);
1598 			if (arm_scache.dcache_line_size < arm_dcache_align)
1599 				arm_dcache_align = arm_scache.dcache_line_size;
1600 		}
1601 		/*
1602 		 * The pmap cleans an entire way for an exec page so
1603 		 * we don't care that it's VIPT anymore.
1604 		 */
1605 		if (arm_pcache.dcache_type == CACHE_TYPE_PIPT) {
1606 			arm_cache_prefer_mask = 0;
1607 		}
1608 		goto out;
1609 	}
1610 #endif /* ARM_MMU_V6 + ARM_MMU_V7 > 0 */
1611 
1612 	if ((ctype & CPU_CT_S) == 0)
1613 		arm_pcache.cache_unified = 1;
1614 
1615 	/*
1616 	 * If you want to know how this code works, go read the ARM ARM.
1617 	 */
1618 
1619 	arm_pcache.cache_type = CPU_CT_CTYPE(ctype);
1620 
1621 	if (arm_pcache.cache_unified == 0) {
1622 		isize = CPU_CT_ISIZE(ctype);
1623 		multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
1624 		arm_pcache.icache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
1625 		if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
1626 			if (isize & CPU_CT_xSIZE_M)
1627 				arm_pcache.icache_line_size = 0; /* not present */
1628 			else
1629 				arm_pcache.icache_ways = 1;
1630 		} else {
1631 			arm_pcache.icache_ways = multiplier <<
1632 			    (CPU_CT_xSIZE_ASSOC(isize) - 1);
1633 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
1634 			arm_pcache.icache_type = CACHE_TYPE_VIPT;
1635 			if (CPU_CT_xSIZE_P & isize)
1636 				arm_cache_prefer_mask |=
1637 				    __BIT(9 + CPU_CT_xSIZE_SIZE(isize)
1638 					  - CPU_CT_xSIZE_ASSOC(isize))
1639 				    - PAGE_SIZE;
1640 #endif
1641 		}
1642 		arm_pcache.icache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
1643 		arm_pcache.icache_way_size =
1644 		    __BIT(9 + CPU_CT_xSIZE_SIZE(isize) - CPU_CT_xSIZE_ASSOC(isize));
1645 	}
1646 
1647 	dsize = CPU_CT_DSIZE(ctype);
1648 	multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
1649 	arm_pcache.dcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
1650 	if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
1651 		if (dsize & CPU_CT_xSIZE_M)
1652 			arm_pcache.dcache_line_size = 0; /* not present */
1653 		else
1654 			arm_pcache.dcache_ways = 1;
1655 	} else {
1656 		arm_pcache.dcache_ways = multiplier <<
1657 		    (CPU_CT_xSIZE_ASSOC(dsize) - 1);
1658 #if (ARM_MMU_V6) > 0
1659 		arm_pcache.dcache_type = CACHE_TYPE_VIPT;
1660 		if ((CPU_CT_xSIZE_P & dsize)
1661 		    && CPU_ID_ARM11_P(curcpu()->ci_arm_cpuid)) {
1662 			arm_cache_prefer_mask |=
1663 			    __BIT(9 + CPU_CT_xSIZE_SIZE(dsize)
1664 				  - CPU_CT_xSIZE_ASSOC(dsize)) - PAGE_SIZE;
1665 		}
1666 #endif
1667 	}
1668 	arm_pcache.dcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
1669 	arm_pcache.dcache_way_size =
1670 	    __BIT(9 + CPU_CT_xSIZE_SIZE(dsize) - CPU_CT_xSIZE_ASSOC(dsize));
1671 
1672 	arm_dcache_align = arm_pcache.dcache_line_size;
1673 
1674 	arm_dcache_log2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
1675 	arm_dcache_log2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
1676 	arm_dcache_log2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
1677 	    CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
1678 
1679  out:
1680 	KASSERTMSG(arm_dcache_align <= CACHE_LINE_SIZE,
1681 	    "arm_dcache_align=%u CACHE_LINE_SIZE=%u",
1682 	    arm_dcache_align, CACHE_LINE_SIZE);
1683 	arm_dcache_align_mask = arm_dcache_align - 1;
1684 }
1685 #endif /* ARM7TDMI || ARM8 || ARM9 || XSCALE */
1686 
1687 #if defined(CPU_ARM2) || defined(CPU_ARM250) || defined(CPU_ARM3) || \
1688     defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_SA110) || \
1689     defined(CPU_SA1100) || defined(CPU_SA1110) || defined(CPU_IXP12X0)
1690 /* Cache information for CPUs without cache type registers. */
1691 struct cachetab {
1692 	uint32_t ct_cpuid;
1693 	int	ct_pcache_type;
1694 	int	ct_pcache_unified;
1695 	int	ct_pdcache_size;
1696 	int	ct_pdcache_line_size;
1697 	int	ct_pdcache_ways;
1698 	int	ct_picache_size;
1699 	int	ct_picache_line_size;
1700 	int	ct_picache_ways;
1701 };
1702 
1703 struct cachetab cachetab[] = {
1704     /* cpuid,           cache type,       u,  dsiz, ls, wy,  isiz, ls, wy */
1705     { CPU_ID_ARM2,      0,                1,     0,  0,  0,     0,  0,  0 },
1706     { CPU_ID_ARM250,    0,                1,     0,  0,  0,     0,  0,  0 },
1707     { CPU_ID_ARM3,      CPU_CT_CTYPE_WT,  1,  4096, 16, 64,     0,  0,  0 },
1708     { CPU_ID_ARM610,	CPU_CT_CTYPE_WT,  1,  4096, 16, 64,     0,  0,  0 },
1709     { CPU_ID_ARM710,    CPU_CT_CTYPE_WT,  1,  8192, 32,  4,     0,  0,  0 },
1710     { CPU_ID_ARM7500,   CPU_CT_CTYPE_WT,  1,  4096, 16,  4,     0,  0,  0 },
1711     { CPU_ID_ARM710A,   CPU_CT_CTYPE_WT,  1,  8192, 16,  4,     0,  0,  0 },
1712     { CPU_ID_ARM7500FE, CPU_CT_CTYPE_WT,  1,  4096, 16,  4,     0,  0,  0 },
1713     /* XXX is this type right for SA-1? */
1714     { CPU_ID_SA110,	CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 },
1715     { CPU_ID_SA1100,	CPU_CT_CTYPE_WB1, 0,  8192, 32, 32, 16384, 32, 32 },
1716     { CPU_ID_SA1110,	CPU_CT_CTYPE_WB1, 0,  8192, 32, 32, 16384, 32, 32 },
1717     { CPU_ID_IXP1200,	CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 }, /* XXX */
1718     { 0, 0, 0, 0, 0, 0, 0, 0}
1719 };
1720 
1721 static void get_cachetype_table(void);
1722 
1723 static void
1724 get_cachetype_table(void)
1725 {
1726 	int i;
1727 	uint32_t cpuid = cpu_idnum();
1728 
1729 	for (i = 0; cachetab[i].ct_cpuid != 0; i++) {
1730 		if (cachetab[i].ct_cpuid == (cpuid & CPU_ID_CPU_MASK)) {
1731 			arm_pcache.cache_type = cachetab[i].ct_pcache_type;
1732 			arm_pcache.cache_unified = cachetab[i].ct_pcache_unified;
1733 			arm_pcache.dcache_size = cachetab[i].ct_pdcache_size;
1734 			arm_pcache.dcache_line_size =
1735 			    cachetab[i].ct_pdcache_line_size;
1736 			arm_pcache.dcache_ways = cachetab[i].ct_pdcache_ways;
1737 			if (arm_pcache.dcache_ways) {
1738 				arm_pcache.dcache_way_size =
1739 				    arm_pcache.dcache_line_size
1740 				    / arm_pcache.dcache_ways;
1741 			}
1742 			arm_pcache.icache_size = cachetab[i].ct_picache_size;
1743 			arm_pcache.icache_line_size =
1744 			    cachetab[i].ct_picache_line_size;
1745 			arm_pcache.icache_ways = cachetab[i].ct_picache_ways;
1746 			if (arm_pcache.icache_ways) {
1747 				arm_pcache.icache_way_size =
1748 				    arm_pcache.icache_line_size
1749 				    / arm_pcache.icache_ways;
1750 			}
1751 		}
1752 	}
1753 
1754 	arm_dcache_align = arm_pcache.dcache_line_size;
1755 	arm_dcache_align_mask = arm_dcache_align - 1;
1756 }
1757 
1758 #endif /* ARM2 || ARM250 || ARM3 || ARM6 || ARM7 || SA110 || SA1100 || SA1111 || IXP12X0 */
1759 
1760 
1761 #if defined(CPU_CORTEX) || defined(CPU_PJ4B)
1762 static inline void
1763 set_cpufuncs_mpfixup(void)
1764 {
1765 #ifdef MULTIPROCESSOR
1766 	/* If MP extensions are present, patch in MP TLB ops */
1767 	const uint32_t mpidr = armreg_mpidr_read();
1768 	if ((mpidr & (MPIDR_MP|MPIDR_U)) == MPIDR_MP) {
1769 		cpufuncs.cf_tlb_flushID = armv7mp_tlb_flushID;
1770 		cpufuncs.cf_tlb_flushID_SE = armv7mp_tlb_flushID_SE;
1771 		cpufuncs.cf_tlb_flushI = armv7mp_tlb_flushI;
1772 		cpufuncs.cf_tlb_flushI_SE = armv7mp_tlb_flushI_SE;
1773 		cpufuncs.cf_tlb_flushD = armv7mp_tlb_flushD;
1774 		cpufuncs.cf_tlb_flushD_SE = armv7mp_tlb_flushD_SE;
1775 	}
1776 #endif
1777 }
1778 #endif
1779 
1780 /*
1781  * Cannot panic here as we may not have a console yet ...
1782  */
1783 
1784 int
1785 set_cpufuncs(void)
1786 {
1787 	if (cputype == 0) {
1788 		cputype = cpufunc_id();
1789 		cputype &= CPU_ID_CPU_MASK;
1790 	}
1791 
1792 	/*
1793 	 * NOTE: cpu_do_powersave defaults to off.  If we encounter a
1794 	 * CPU type where we want to use it by default, then we set it.
1795 	 */
1796 #ifdef CPU_ARM2
1797 	if (cputype == CPU_ID_ARM2) {
1798 		cpufuncs = arm2_cpufuncs;
1799 		get_cachetype_table();
1800 		return 0;
1801 	}
1802 #endif /* CPU_ARM2 */
1803 #ifdef CPU_ARM250
1804 	if (cputype == CPU_ID_ARM250) {
1805 		cpufuncs = arm250_cpufuncs;
1806 		get_cachetype_table();
1807 		return 0;
1808 	}
1809 #endif
1810 #ifdef CPU_ARM3
1811 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
1812 	    (cputype & 0x00000f00) == 0x00000300) {
1813 		cpufuncs = arm3_cpufuncs;
1814 		get_cachetype_table();
1815 		return 0;
1816 	}
1817 #endif	/* CPU_ARM3 */
1818 #ifdef CPU_ARM6
1819 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
1820 	    (cputype & 0x00000f00) == 0x00000600) {
1821 		cpufuncs = arm6_cpufuncs;
1822 		get_cachetype_table();
1823 		pmap_pte_init_generic();
1824 		return 0;
1825 	}
1826 #endif	/* CPU_ARM6 */
1827 #ifdef CPU_ARM7
1828 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
1829 	    CPU_ID_IS7(cputype) &&
1830 	    (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V3) {
1831 		cpufuncs = arm7_cpufuncs;
1832 		get_cachetype_table();
1833 		pmap_pte_init_generic();
1834 		return 0;
1835 	}
1836 #endif	/* CPU_ARM7 */
1837 #ifdef CPU_ARM7TDMI
1838 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
1839 	    CPU_ID_IS7(cputype) &&
1840 	    (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V4T) {
1841 		cpufuncs = arm7tdmi_cpufuncs;
1842 		get_cachetype_cp15();
1843 		pmap_pte_init_generic();
1844 		return 0;
1845 	}
1846 #endif
1847 #ifdef CPU_ARM8
1848 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
1849 	    (cputype & 0x0000f000) == 0x00008000) {
1850 		cpufuncs = arm8_cpufuncs;
1851 		get_cachetype_cp15();
1852 		pmap_pte_init_arm8();
1853 		return 0;
1854 	}
1855 #endif	/* CPU_ARM8 */
1856 #ifdef CPU_ARM9
1857 	if (((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD ||
1858 	     (cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_TI) &&
1859 	    (cputype & 0x0000f000) == 0x00009000) {
1860 		cpufuncs = arm9_cpufuncs;
1861 		get_cachetype_cp15();
1862 		arm9_dcache_sets_inc = 1U << arm_dcache_log2_linesize;
1863 		arm9_dcache_sets_max =
1864 		    (1U << (arm_dcache_log2_linesize + arm_dcache_log2_nsets)) -
1865 		    arm9_dcache_sets_inc;
1866 		arm9_dcache_index_inc = 1U << (32 - arm_dcache_log2_assoc);
1867 		arm9_dcache_index_max = 0U - arm9_dcache_index_inc;
1868 #ifdef	ARM9_CACHE_WRITE_THROUGH
1869 		pmap_pte_init_arm9();
1870 #else
1871 		pmap_pte_init_generic();
1872 #endif
1873 		return 0;
1874 	}
1875 #endif /* CPU_ARM9 */
1876 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
1877 	if (cputype == CPU_ID_ARM926EJS ||
1878 	    cputype == CPU_ID_ARM1026EJS) {
1879 		cpufuncs = armv5_ec_cpufuncs;
1880 		get_cachetype_cp15();
1881 		pmap_pte_init_generic();
1882 		return 0;
1883 	}
1884 #endif /* CPU_ARM9E || CPU_ARM10 */
1885 #if defined(CPU_SHEEVA)
1886 	if (cputype == CPU_ID_MV88SV131 ||
1887 	    cputype == CPU_ID_MV88FR571_VD) {
1888 		cpufuncs = sheeva_cpufuncs;
1889 		get_cachetype_cp15();
1890 		pmap_pte_init_generic();
1891 		cpu_do_powersave = 1;			/* Enable powersave */
1892 		return 0;
1893 	}
1894 #endif /* CPU_SHEEVA */
1895 #ifdef CPU_ARM10
1896 	if (/* cputype == CPU_ID_ARM1020T || */
1897 	    cputype == CPU_ID_ARM1020E) {
1898 		/*
1899 		 * Select write-through cacheing (this isn't really an
1900 		 * option on ARM1020T).
1901 		 */
1902 		cpufuncs = arm10_cpufuncs;
1903 		get_cachetype_cp15();
1904 		armv5_dcache_sets_inc = 1U << arm_dcache_log2_linesize;
1905 		armv5_dcache_sets_max =
1906 		    (1U << (arm_dcache_log2_linesize + arm_dcache_log2_nsets)) -
1907 		    armv5_dcache_sets_inc;
1908 		armv5_dcache_index_inc = 1U << (32 - arm_dcache_log2_assoc);
1909 		armv5_dcache_index_max = 0U - armv5_dcache_index_inc;
1910 		pmap_pte_init_generic();
1911 		return 0;
1912 	}
1913 #endif /* CPU_ARM10 */
1914 
1915 
1916 #if defined(CPU_ARM11MPCORE)
1917 	if (cputype == CPU_ID_ARM11MPCORE) {
1918 		cpufuncs = arm11mpcore_cpufuncs;
1919 #if defined(CPU_ARMV7) || defined(CPU_PRE_ARMV6)
1920 		cpu_armv6_p = true;
1921 #endif
1922 		get_cachetype_cp15();
1923 		armv5_dcache_sets_inc = 1U << arm_dcache_log2_linesize;
1924 		armv5_dcache_sets_max = (1U << (arm_dcache_log2_linesize +
1925 			arm_dcache_log2_nsets)) - armv5_dcache_sets_inc;
1926 		armv5_dcache_index_inc = 1U << (32 - arm_dcache_log2_assoc);
1927 		armv5_dcache_index_max = 0U - armv5_dcache_index_inc;
1928 		cpu_do_powersave = 1;			/* Enable powersave */
1929 		pmap_pte_init_arm11mpcore();
1930 		if (arm_cache_prefer_mask)
1931 			uvmexp.ncolors = (arm_cache_prefer_mask >> PGSHIFT) + 1;
1932 
1933 		return 0;
1934 
1935 	}
1936 #endif	/* CPU_ARM11MPCORE */
1937 
1938 #if defined(CPU_ARM11)
1939 	if (cputype == CPU_ID_ARM1136JS ||
1940 	    cputype == CPU_ID_ARM1136JSR1 ||
1941 	    cputype == CPU_ID_ARM1176JZS) {
1942 		cpufuncs = arm11_cpufuncs;
1943 #if defined(CPU_ARM1136)
1944 		if (cputype == CPU_ID_ARM1136JS ||
1945 		    cputype == CPU_ID_ARM1136JSR1) {
1946 			cpufuncs = arm1136_cpufuncs;
1947 			if (cputype == CPU_ID_ARM1136JS)
1948 				cpufuncs.cf_sleep = arm1136_sleep_rev0;
1949 		}
1950 #endif
1951 #if defined(CPU_ARM1176)
1952 		if (cputype == CPU_ID_ARM1176JZS) {
1953 			cpufuncs = arm1176_cpufuncs;
1954 		}
1955 #endif
1956 #if defined(CPU_ARMV7) || defined(CPU_PRE_ARMV6)
1957 		cpu_armv6_p = true;
1958 #endif
1959 		cpu_do_powersave = 1;			/* Enable powersave */
1960 		get_cachetype_cp15();
1961 #ifdef ARM11_CACHE_WRITE_THROUGH
1962 		pmap_pte_init_arm11();
1963 #else
1964 		pmap_pte_init_generic();
1965 #endif
1966 		if (arm_cache_prefer_mask)
1967 			uvmexp.ncolors = (arm_cache_prefer_mask >> PGSHIFT) + 1;
1968 
1969 		/*
1970 		 * Start and reset the PMC Cycle Counter.
1971 		 */
1972 		armreg_pmcrv6_write(ARM11_PMCCTL_E | ARM11_PMCCTL_P | ARM11_PMCCTL_C);
1973 		return 0;
1974 	}
1975 #endif /* CPU_ARM11 */
1976 #ifdef CPU_SA110
1977 	if (cputype == CPU_ID_SA110) {
1978 		cpufuncs = sa110_cpufuncs;
1979 		get_cachetype_table();
1980 		pmap_pte_init_sa1();
1981 		return 0;
1982 	}
1983 #endif	/* CPU_SA110 */
1984 #ifdef CPU_SA1100
1985 	if (cputype == CPU_ID_SA1100) {
1986 		cpufuncs = sa11x0_cpufuncs;
1987 		get_cachetype_table();
1988 		pmap_pte_init_sa1();
1989 
1990 		/* Use powersave on this CPU. */
1991 		cpu_do_powersave = 1;
1992 
1993 		return 0;
1994 	}
1995 #endif	/* CPU_SA1100 */
1996 #ifdef CPU_SA1110
1997 	if (cputype == CPU_ID_SA1110) {
1998 		cpufuncs = sa11x0_cpufuncs;
1999 		get_cachetype_table();
2000 		pmap_pte_init_sa1();
2001 
2002 		/* Use powersave on this CPU. */
2003 		cpu_do_powersave = 1;
2004 
2005 		return 0;
2006 	}
2007 #endif	/* CPU_SA1110 */
2008 #ifdef CPU_FA526
2009 	if (cputype == CPU_ID_FA526) {
2010 		cpufuncs = fa526_cpufuncs;
2011 		get_cachetype_cp15();
2012 		pmap_pte_init_generic();
2013 
2014 		/* Use powersave on this CPU. */
2015 		cpu_do_powersave = 1;
2016 
2017 		return 0;
2018 	}
2019 #endif	/* CPU_FA526 */
2020 #ifdef CPU_IXP12X0
2021 	if (cputype == CPU_ID_IXP1200) {
2022 		cpufuncs = ixp12x0_cpufuncs;
2023 		get_cachetype_table();
2024 		pmap_pte_init_sa1();
2025 		return 0;
2026 	}
2027 #endif  /* CPU_IXP12X0 */
2028 #ifdef CPU_XSCALE_80200
2029 	if (cputype == CPU_ID_80200) {
2030 		int rev = cpufunc_id() & CPU_ID_REVISION_MASK;
2031 
2032 		i80200_icu_init();
2033 
2034 		/*
2035 		 * Reset the Performance Monitoring Unit to a
2036 		 * pristine state:
2037 		 *	- CCNT, PMN0, PMN1 reset to 0
2038 		 *	- overflow indications cleared
2039 		 *	- all counters disabled
2040 		 */
2041 		__asm volatile("mcr p14, 0, %0, c0, c0, 0"
2042 			:
2043 			: "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF|
2044 			       PMNC_CC_IF));
2045 
2046 #if defined(XSCALE_CCLKCFG)
2047 		/*
2048 		 * Crank CCLKCFG to maximum legal value.
2049 		 */
2050 		__asm volatile ("mcr p14, 0, %0, c6, c0, 0"
2051 			:
2052 			: "r" (XSCALE_CCLKCFG));
2053 #endif
2054 
2055 		/*
2056 		 * XXX Disable ECC in the Bus Controller Unit; we
2057 		 * don't really support it, yet.  Clear any pending
2058 		 * error indications.
2059 		 */
2060 		__asm volatile("mcr p13, 0, %0, c0, c1, 0"
2061 			:
2062 			: "r" (BCUCTL_E0|BCUCTL_E1|BCUCTL_EV));
2063 
2064 		cpufuncs = xscale_cpufuncs;
2065 
2066 		/*
2067 		 * i80200 errata: Step-A0 and A1 have a bug where
2068 		 * D$ dirty bits are not cleared on "invalidate by
2069 		 * address".
2070 		 *
2071 		 * Workaround: Clean cache line before invalidating.
2072 		 */
2073 		if (rev == 0 || rev == 1)
2074 			cpufuncs.cf_dcache_inv_range = xscale_cache_purgeD_rng;
2075 
2076 		get_cachetype_cp15();
2077 		pmap_pte_init_xscale();
2078 		return 0;
2079 	}
2080 #endif /* CPU_XSCALE_80200 */
2081 #ifdef CPU_XSCALE_80321
2082 	if (cputype == CPU_ID_80321_400 || cputype == CPU_ID_80321_600 ||
2083 	    cputype == CPU_ID_80321_400_B0 || cputype == CPU_ID_80321_600_B0 ||
2084 	    cputype == CPU_ID_80219_400 || cputype == CPU_ID_80219_600) {
2085 		i80321_icu_init();
2086 
2087 		/*
2088 		 * Reset the Performance Monitoring Unit to a
2089 		 * pristine state:
2090 		 *	- CCNT, PMN0, PMN1 reset to 0
2091 		 *	- overflow indications cleared
2092 		 *	- all counters disabled
2093 		 */
2094 		__asm volatile("mcr p14, 0, %0, c0, c0, 0"
2095 			:
2096 			: "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF|
2097 			       PMNC_CC_IF));
2098 
2099 		cpufuncs = xscale_cpufuncs;
2100 
2101 		get_cachetype_cp15();
2102 		pmap_pte_init_xscale();
2103 		return 0;
2104 	}
2105 #endif /* CPU_XSCALE_80321 */
2106 #ifdef __CPU_XSCALE_PXA2XX
2107 	/* ignore core revision to test PXA2xx CPUs */
2108 	if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA27X ||
2109 	    (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 ||
2110 	    (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) {
2111 
2112 		cpufuncs = xscale_cpufuncs;
2113 
2114 		get_cachetype_cp15();
2115 		pmap_pte_init_xscale();
2116 
2117 		/* Use powersave on this CPU. */
2118 		cpu_do_powersave = 1;
2119 
2120 		return 0;
2121 	}
2122 #endif /* __CPU_XSCALE_PXA2XX */
2123 #ifdef CPU_XSCALE_IXP425
2124 	if (cputype == CPU_ID_IXP425_533 || cputype == CPU_ID_IXP425_400 ||
2125 	    cputype == CPU_ID_IXP425_266) {
2126 		ixp425_icu_init();
2127 
2128 		cpufuncs = xscale_cpufuncs;
2129 
2130 		get_cachetype_cp15();
2131 		pmap_pte_init_xscale();
2132 
2133 		return 0;
2134 	}
2135 #endif /* CPU_XSCALE_IXP425 */
2136 #if defined(CPU_CORTEX)
2137 	if (CPU_ID_CORTEX_P(cputype)) {
2138 		cpufuncs = armv7_cpufuncs;
2139 		set_cpufuncs_mpfixup();
2140 		cpu_do_powersave = 1;			/* Enable powersave */
2141 #if defined(CPU_ARMV6) || defined(CPU_PRE_ARMV6)
2142 		cpu_armv7_p = true;
2143 #endif
2144 		get_cachetype_cp15();
2145 		pmap_pte_init_armv7();
2146 		if (arm_cache_prefer_mask)
2147 			uvmexp.ncolors = (arm_cache_prefer_mask >> PGSHIFT) + 1;
2148 		/*
2149 		 * Start and reset the PMC Cycle Counter.
2150 		 */
2151 		armreg_pmcr_write(ARM11_PMCCTL_E | ARM11_PMCCTL_P | ARM11_PMCCTL_C);
2152 		armreg_pmcntenset_write(CORTEX_CNTENS_C);
2153 		return 0;
2154 	}
2155 #endif /* CPU_CORTEX */
2156 
2157 #if defined(CPU_PJ4B)
2158 	if ((cputype == CPU_ID_MV88SV581X_V6 ||
2159 	    cputype == CPU_ID_MV88SV581X_V7 ||
2160 	    cputype == CPU_ID_MV88SV584X_V7 ||
2161 	    cputype == CPU_ID_ARM_88SV581X_V6 ||
2162 	    cputype == CPU_ID_ARM_88SV581X_V7) &&
2163 	    (armreg_pfr0_read() & ARM_PFR0_THUMBEE_MASK)) {
2164 		cpufuncs = pj4bv7_cpufuncs;
2165 		set_cpufuncs_mpfixup();
2166 #if defined(CPU_ARMV6) || defined(CPU_PRE_ARMV6)
2167 		cpu_armv7_p = true;
2168 #endif
2169 		get_cachetype_cp15();
2170 		pmap_pte_init_armv7();
2171 		return 0;
2172 	}
2173 #endif /* CPU_PJ4B */
2174 
2175 	/*
2176 	 * Bzzzz. And the answer was ...
2177 	 */
2178 	panic("No support for this CPU type (%08x) in kernel", cputype);
2179 	return ARCHITECTURE_NOT_PRESENT;
2180 }
2181 
2182 #ifdef CPU_ARM2
2183 u_int arm2_id(void)
2184 {
2185 
2186 	return CPU_ID_ARM2;
2187 }
2188 #endif /* CPU_ARM2 */
2189 
2190 #ifdef CPU_ARM250
2191 u_int arm250_id(void)
2192 {
2193 
2194 	return CPU_ID_ARM250;
2195 }
2196 #endif /* CPU_ARM250 */
2197 
2198 /*
2199  * Fixup routines for data and prefetch aborts.
2200  *
2201  * Several compile time symbols are used
2202  *
2203  * DEBUG_FAULT_CORRECTION - Print debugging information during the
2204  * correction of registers after a fault.
2205  * ARM6_LATE_ABORT - ARM6 supports both early and late aborts
2206  * when defined should use late aborts
2207  */
2208 
2209 
2210 /*
2211  * Null abort fixup routine.
2212  * For use when no fixup is required.
2213  */
2214 int
2215 cpufunc_null_fixup(void *arg)
2216 {
2217 	return(ABORT_FIXUP_OK);
2218 }
2219 
2220 
2221 #if defined(CPU_ARM2) || defined(CPU_ARM250) || defined(CPU_ARM3) || \
2222     defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI)
2223 
2224 #ifdef DEBUG_FAULT_CORRECTION
2225 #define DFC_PRINTF(x)		printf x
2226 #define DFC_DISASSEMBLE(x)	disassemble(x)
2227 #else
2228 #define DFC_PRINTF(x)		/* nothing */
2229 #define DFC_DISASSEMBLE(x)	/* nothing */
2230 #endif
2231 
2232 /*
2233  * "Early" data abort fixup.
2234  *
2235  * For ARM2, ARM2as, ARM3 and ARM6 (in early-abort mode).  Also used
2236  * indirectly by ARM6 (in late-abort mode) and ARM7[TDMI].
2237  *
2238  * In early aborts, we may have to fix up LDM, STM, LDC and STC.
2239  */
2240 int
2241 early_abort_fixup(void *arg)
2242 {
2243 	trapframe_t *frame = arg;
2244 	u_int fault_pc;
2245 	u_int fault_instruction;
2246 	int saved_lr = 0;
2247 
2248 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
2249 
2250 		/* Ok an abort in SVC mode */
2251 
2252 		/*
2253 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
2254 		 * as the fault happened in svc mode but we need it in the
2255 		 * usr slot so we can treat the registers as an array of ints
2256 		 * during fixing.
2257 		 * NOTE: This PC is in the position but writeback is not
2258 		 * allowed on r15.
2259 		 * Doing it like this is more efficient than trapping this
2260 		 * case in all possible locations in the following fixup code.
2261 		 */
2262 
2263 		saved_lr = frame->tf_usr_lr;
2264 		frame->tf_usr_lr = frame->tf_svc_lr;
2265 
2266 		/*
2267 		 * Note the trapframe does not have the SVC r13 so a fault
2268 		 * from an instruction with writeback to r13 in SVC mode is
2269 		 * not allowed. This should not happen as the kstack is
2270 		 * always valid.
2271 		 */
2272 	}
2273 
2274 	/* Get fault address and status from the CPU */
2275 
2276 	fault_pc = frame->tf_pc;
2277 	fault_instruction = *((volatile unsigned int *)fault_pc);
2278 
2279 	/* Decode the fault instruction and fix the registers as needed */
2280 
2281 	if ((fault_instruction & 0x0e000000) == 0x08000000) {
2282 		int base;
2283 		int loop;
2284 		int count;
2285 		int *registers = &frame->tf_r0;
2286 
2287 		DFC_PRINTF(("LDM/STM\n"));
2288 		DFC_DISASSEMBLE(fault_pc);
2289 		if (fault_instruction & (1 << 21)) {
2290 			DFC_PRINTF(("This instruction must be corrected\n"));
2291 			base = (fault_instruction >> 16) & 0x0f;
2292 			if (base == 15)
2293 				return ABORT_FIXUP_FAILED;
2294 			/* Count registers transferred */
2295 			count = 0;
2296 			for (loop = 0; loop < 16; ++loop) {
2297 				if (fault_instruction & (1<<loop))
2298 					++count;
2299 			}
2300 			DFC_PRINTF(("%d registers used\n", count));
2301 			DFC_PRINTF(("Corrected r%d by %d bytes ",
2302 				       base, count * 4));
2303 			if (fault_instruction & (1 << 23)) {
2304 				DFC_PRINTF(("down\n"));
2305 				registers[base] -= count * 4;
2306 			} else {
2307 				DFC_PRINTF(("up\n"));
2308 				registers[base] += count * 4;
2309 			}
2310 		}
2311 	} else if ((fault_instruction & 0x0e000000) == 0x0c000000) {
2312 		int base;
2313 		int offset;
2314 		int *registers = &frame->tf_r0;
2315 
2316 		/* REGISTER CORRECTION IS REQUIRED FOR THESE INSTRUCTIONS */
2317 
2318 		DFC_DISASSEMBLE(fault_pc);
2319 
2320 		/* Only need to fix registers if write back is turned on */
2321 
2322 		if ((fault_instruction & (1 << 21)) != 0) {
2323 			base = (fault_instruction >> 16) & 0x0f;
2324 			if (base == 13 &&
2325 			    (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
2326 				return ABORT_FIXUP_FAILED;
2327 			if (base == 15)
2328 				return ABORT_FIXUP_FAILED;
2329 
2330 			offset = (fault_instruction & 0xff) << 2;
2331 			DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
2332 			if ((fault_instruction & (1 << 23)) != 0)
2333 				offset = -offset;
2334 			registers[base] += offset;
2335 			DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
2336 		}
2337 	}
2338 
2339 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
2340 
2341 		/* Ok an abort in SVC mode */
2342 
2343 		/*
2344 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
2345 		 * as the fault happened in svc mode but we need it in the
2346 		 * usr slot so we can treat the registers as an array of ints
2347 		 * during fixing.
2348 		 * NOTE: This PC is in the position but writeback is not
2349 		 * allowed on r15.
2350 		 * Doing it like this is more efficient than trapping this
2351 		 * case in all possible locations in the prior fixup code.
2352 		 */
2353 
2354 		frame->tf_svc_lr = frame->tf_usr_lr;
2355 		frame->tf_usr_lr = saved_lr;
2356 
2357 		/*
2358 		 * Note the trapframe does not have the SVC r13 so a fault
2359 		 * from an instruction with writeback to r13 in SVC mode is
2360 		 * not allowed. This should not happen as the kstack is
2361 		 * always valid.
2362 		 */
2363 	}
2364 
2365 	return(ABORT_FIXUP_OK);
2366 }
2367 #endif	/* CPU_ARM2/250/3/6/7 */
2368 
2369 
2370 #if (defined(CPU_ARM6) && defined(ARM6_LATE_ABORT)) || defined(CPU_ARM7) || \
2371 	defined(CPU_ARM7TDMI)
2372 /*
2373  * "Late" (base updated) data abort fixup
2374  *
2375  * For ARM6 (in late-abort mode) and ARM7.
2376  *
2377  * In this model, all data-transfer instructions need fixing up.  We defer
2378  * LDM, STM, LDC and STC fixup to the early-abort handler.
2379  */
2380 int
2381 late_abort_fixup(void *arg)
2382 {
2383 	trapframe_t *frame = arg;
2384 	u_int fault_pc;
2385 	u_int fault_instruction;
2386 	int saved_lr = 0;
2387 
2388 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
2389 
2390 		/* Ok an abort in SVC mode */
2391 
2392 		/*
2393 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
2394 		 * as the fault happened in svc mode but we need it in the
2395 		 * usr slot so we can treat the registers as an array of ints
2396 		 * during fixing.
2397 		 * NOTE: This PC is in the position but writeback is not
2398 		 * allowed on r15.
2399 		 * Doing it like this is more efficient than trapping this
2400 		 * case in all possible locations in the following fixup code.
2401 		 */
2402 
2403 		saved_lr = frame->tf_usr_lr;
2404 		frame->tf_usr_lr = frame->tf_svc_lr;
2405 
2406 		/*
2407 		 * Note the trapframe does not have the SVC r13 so a fault
2408 		 * from an instruction with writeback to r13 in SVC mode is
2409 		 * not allowed. This should not happen as the kstack is
2410 		 * always valid.
2411 		 */
2412 	}
2413 
2414 	/* Get fault address and status from the CPU */
2415 
2416 	fault_pc = frame->tf_pc;
2417 	fault_instruction = *((volatile unsigned int *)fault_pc);
2418 
2419 	/* Decode the fault instruction and fix the registers as needed */
2420 
2421 	/* Was is a swap instruction ? */
2422 
2423 	if ((fault_instruction & 0x0fb00ff0) == 0x01000090) {
2424 		DFC_DISASSEMBLE(fault_pc);
2425 	} else if ((fault_instruction & 0x0c000000) == 0x04000000) {
2426 
2427 		/* Was is a ldr/str instruction */
2428 		/* This is for late abort only */
2429 
2430 		int base;
2431 		int offset;
2432 		int *registers = &frame->tf_r0;
2433 
2434 		DFC_DISASSEMBLE(fault_pc);
2435 
2436 		/* This is for late abort only */
2437 
2438 		if ((fault_instruction & (1 << 24)) == 0
2439 		    || (fault_instruction & (1 << 21)) != 0) {
2440 			/* postindexed ldr/str with no writeback */
2441 
2442 			base = (fault_instruction >> 16) & 0x0f;
2443 			if (base == 13 &&
2444 			    (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
2445 				return ABORT_FIXUP_FAILED;
2446 			if (base == 15)
2447 				return ABORT_FIXUP_FAILED;
2448 			DFC_PRINTF(("late abt fix: r%d=%08x : ",
2449 				       base, registers[base]));
2450 			if ((fault_instruction & (1 << 25)) == 0) {
2451 				/* Immediate offset - easy */
2452 
2453 				offset = fault_instruction & 0xfff;
2454 				if ((fault_instruction & (1 << 23)))
2455 					offset = -offset;
2456 				registers[base] += offset;
2457 				DFC_PRINTF(("imm=%08x ", offset));
2458 			} else {
2459 				/* offset is a shifted register */
2460 				int shift;
2461 
2462 				offset = fault_instruction & 0x0f;
2463 				if (offset == base)
2464 					return ABORT_FIXUP_FAILED;
2465 
2466 				/*
2467 				 * Register offset - hard we have to
2468 				 * cope with shifts !
2469 				 */
2470 				offset = registers[offset];
2471 
2472 				if ((fault_instruction & (1 << 4)) == 0)
2473 					/* shift with amount */
2474 					shift = (fault_instruction >> 7) & 0x1f;
2475 				else {
2476 					/* shift with register */
2477 					if ((fault_instruction & (1 << 7)) != 0)
2478 						/* undefined for now so bail out */
2479 						return ABORT_FIXUP_FAILED;
2480 					shift = ((fault_instruction >> 8) & 0xf);
2481 					if (base == shift)
2482 						return ABORT_FIXUP_FAILED;
2483 					DFC_PRINTF(("shift reg=%d ", shift));
2484 					shift = registers[shift];
2485 				}
2486 				DFC_PRINTF(("shift=%08x ", shift));
2487 				switch (((fault_instruction >> 5) & 0x3)) {
2488 				case 0 : /* Logical left */
2489 					offset = (int)(((u_int)offset) << shift);
2490 					break;
2491 				case 1 : /* Logical Right */
2492 					if (shift == 0) shift = 32;
2493 					offset = (int)(((u_int)offset) >> shift);
2494 					break;
2495 				case 2 : /* Arithmetic Right */
2496 					if (shift == 0) shift = 32;
2497 					offset = (int)(((int)offset) >> shift);
2498 					break;
2499 				case 3 : /* Rotate right (rol or rxx) */
2500 					return ABORT_FIXUP_FAILED;
2501 					break;
2502 				}
2503 
2504 				DFC_PRINTF(("abt: fixed LDR/STR with "
2505 					       "register offset\n"));
2506 				if ((fault_instruction & (1 << 23)))
2507 					offset = -offset;
2508 				DFC_PRINTF(("offset=%08x ", offset));
2509 				registers[base] += offset;
2510 			}
2511 			DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
2512 		}
2513 	}
2514 
2515 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
2516 
2517 		/* Ok an abort in SVC mode */
2518 
2519 		/*
2520 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
2521 		 * as the fault happened in svc mode but we need it in the
2522 		 * usr slot so we can treat the registers as an array of ints
2523 		 * during fixing.
2524 		 * NOTE: This PC is in the position but writeback is not
2525 		 * allowed on r15.
2526 		 * Doing it like this is more efficient than trapping this
2527 		 * case in all possible locations in the prior fixup code.
2528 		 */
2529 
2530 		frame->tf_svc_lr = frame->tf_usr_lr;
2531 		frame->tf_usr_lr = saved_lr;
2532 
2533 		/*
2534 		 * Note the trapframe does not have the SVC r13 so a fault
2535 		 * from an instruction with writeback to r13 in SVC mode is
2536 		 * not allowed. This should not happen as the kstack is
2537 		 * always valid.
2538 		 */
2539 	}
2540 
2541 	/*
2542 	 * Now let the early-abort fixup routine have a go, in case it
2543 	 * was an LDM, STM, LDC or STC that faulted.
2544 	 */
2545 
2546 	return early_abort_fixup(arg);
2547 }
2548 #endif	/* CPU_ARM6(LATE)/7/7TDMI */
2549 
2550 /*
2551  * CPU Setup code
2552  */
2553 
2554 #if defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) || \
2555 	defined(CPU_ARM8) || defined (CPU_ARM9) || defined (CPU_ARM9E) || \
2556 	defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) || \
2557 	defined(CPU_FA526) || \
2558 	defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
2559 	defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425) || \
2560 	defined(CPU_ARM10) || defined(CPU_SHEEVA) || \
2561 	defined(CPU_ARMV6) || defined(CPU_ARMV7)
2562 
2563 #define IGN	0
2564 #define OR	1
2565 #define BIC	2
2566 
2567 struct cpu_option {
2568 	const char *co_name;
2569 	int	co_falseop;
2570 	int	co_trueop;
2571 	int	co_value;
2572 };
2573 
2574 static u_int parse_cpu_options(char *, struct cpu_option *, u_int);
2575 
2576 static u_int
2577 parse_cpu_options(char *args, struct cpu_option *optlist, u_int cpuctrl)
2578 {
2579 	int integer;
2580 
2581 	if (args == NULL)
2582 		return(cpuctrl);
2583 
2584 	while (optlist->co_name) {
2585 		if (get_bootconf_option(args, optlist->co_name,
2586 		    BOOTOPT_TYPE_BOOLEAN, &integer)) {
2587 			if (integer) {
2588 				if (optlist->co_trueop == OR)
2589 					cpuctrl |= optlist->co_value;
2590 				else if (optlist->co_trueop == BIC)
2591 					cpuctrl &= ~optlist->co_value;
2592 			} else {
2593 				if (optlist->co_falseop == OR)
2594 					cpuctrl |= optlist->co_value;
2595 				else if (optlist->co_falseop == BIC)
2596 					cpuctrl &= ~optlist->co_value;
2597 			}
2598 		}
2599 		++optlist;
2600 	}
2601 	return(cpuctrl);
2602 }
2603 #endif /* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 || CPU_SA110 */
2604 
2605 #if defined (CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) \
2606 	|| defined(CPU_ARM8)
2607 struct cpu_option arm678_options[] = {
2608 #ifdef COMPAT_12
2609 	{ "nocache",		IGN, BIC, CPU_CONTROL_IDC_ENABLE },
2610 	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
2611 #endif	/* COMPAT_12 */
2612 	{ "cpu.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
2613 	{ "cpu.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
2614 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2615 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2616 	{ NULL,			IGN, IGN, 0 }
2617 };
2618 
2619 #endif	/* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 */
2620 
2621 #ifdef CPU_ARM6
2622 struct cpu_option arm6_options[] = {
2623 	{ "arm6.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
2624 	{ "arm6.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
2625 	{ "arm6.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2626 	{ "arm6.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2627 	{ NULL,			IGN, IGN, 0 }
2628 };
2629 
2630 void
2631 arm6_setup(char *args)
2632 {
2633 
2634 	/* Set up default control registers bits */
2635 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2636 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2637 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
2638 #if 0
2639 	int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2640 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2641 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
2642 		 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BEND_ENABLE
2643 		 | CPU_CONTROL_AFLT_ENABLE;
2644 #endif
2645 
2646 #ifdef ARM6_LATE_ABORT
2647 	cpuctrl |= CPU_CONTROL_LABT_ENABLE;
2648 #endif	/* ARM6_LATE_ABORT */
2649 
2650 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2651 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2652 #endif
2653 
2654 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
2655 	cpuctrl = parse_cpu_options(args, arm6_options, cpuctrl);
2656 
2657 #ifdef __ARMEB__
2658 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2659 #endif
2660 
2661 	/* Clear out the cache */
2662 	cpu_idcache_wbinv_all();
2663 
2664 	/* Set the control register */
2665 	curcpu()->ci_ctrl = cpuctrl;
2666 	cpu_control(0xffffffff, cpuctrl);
2667 }
2668 #endif	/* CPU_ARM6 */
2669 
2670 #ifdef CPU_ARM7
2671 struct cpu_option arm7_options[] = {
2672 	{ "arm7.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
2673 	{ "arm7.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
2674 	{ "arm7.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2675 	{ "arm7.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2676 #ifdef COMPAT_12
2677 	{ "fpaclk2",		BIC, OR,  CPU_CONTROL_CPCLK },
2678 #endif	/* COMPAT_12 */
2679 	{ "arm700.fpaclk",	BIC, OR,  CPU_CONTROL_CPCLK },
2680 	{ NULL,			IGN, IGN, 0 }
2681 };
2682 
2683 void
2684 arm7_setup(char *args)
2685 {
2686 
2687 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2688 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2689 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
2690 #if 0
2691 	int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2692 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2693 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
2694 		 | CPU_CONTROL_CPCLK | CPU_CONTROL_LABT_ENABLE
2695 		 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BEND_ENABLE
2696 		 | CPU_CONTROL_AFLT_ENABLE;
2697 #endif
2698 
2699 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2700 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2701 #endif
2702 
2703 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
2704 	cpuctrl = parse_cpu_options(args, arm7_options, cpuctrl);
2705 
2706 #ifdef __ARMEB__
2707 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2708 #endif
2709 
2710 	/* Clear out the cache */
2711 	cpu_idcache_wbinv_all();
2712 
2713 	/* Set the control register */
2714 	curcpu()->ci_ctrl = cpuctrl;
2715 	cpu_control(0xffffffff, cpuctrl);
2716 }
2717 #endif	/* CPU_ARM7 */
2718 
2719 #ifdef CPU_ARM7TDMI
2720 struct cpu_option arm7tdmi_options[] = {
2721 	{ "arm7.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
2722 	{ "arm7.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
2723 	{ "arm7.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2724 	{ "arm7.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2725 #ifdef COMPAT_12
2726 	{ "fpaclk2",		BIC, OR,  CPU_CONTROL_CPCLK },
2727 #endif	/* COMPAT_12 */
2728 	{ "arm700.fpaclk",	BIC, OR,  CPU_CONTROL_CPCLK },
2729 	{ NULL,			IGN, IGN, 0 }
2730 };
2731 
2732 void
2733 arm7tdmi_setup(char *args)
2734 {
2735 	int cpuctrl;
2736 
2737 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2738 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2739 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
2740 
2741 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
2742 	cpuctrl = parse_cpu_options(args, arm7tdmi_options, cpuctrl);
2743 
2744 #ifdef __ARMEB__
2745 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2746 #endif
2747 
2748 	/* Clear out the cache */
2749 	cpu_idcache_wbinv_all();
2750 
2751 	/* Set the control register */
2752 	curcpu()->ci_ctrl = cpuctrl;
2753 	cpu_control(0xffffffff, cpuctrl);
2754 }
2755 #endif	/* CPU_ARM7TDMI */
2756 
2757 #ifdef CPU_ARM8
2758 struct cpu_option arm8_options[] = {
2759 	{ "arm8.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
2760 	{ "arm8.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
2761 	{ "arm8.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2762 	{ "arm8.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2763 #ifdef COMPAT_12
2764 	{ "branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2765 #endif	/* COMPAT_12 */
2766 	{ "cpu.branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2767 	{ "arm8.branchpredict",	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2768 	{ NULL,			IGN, IGN, 0 }
2769 };
2770 
2771 void
2772 arm8_setup(char *args)
2773 {
2774 	int integer;
2775 	int clocktest;
2776 	int setclock = 0;
2777 
2778 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2779 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2780 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
2781 #if 0
2782 	int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2783 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2784 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
2785 		 | CPU_CONTROL_BPRD_ENABLE | CPU_CONTROL_ROM_ENABLE
2786 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE;
2787 #endif
2788 
2789 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2790 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2791 #endif
2792 
2793 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
2794 	cpuctrl = parse_cpu_options(args, arm8_options, cpuctrl);
2795 
2796 #ifdef __ARMEB__
2797 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2798 #endif
2799 
2800 	/* Get clock configuration */
2801 	clocktest = arm8_clock_config(0, 0) & 0x0f;
2802 
2803 	/* Special ARM8 clock and test configuration */
2804 	if (get_bootconf_option(args, "arm8.clock.reset", BOOTOPT_TYPE_BOOLEAN, &integer)) {
2805 		clocktest = 0;
2806 		setclock = 1;
2807 	}
2808 	if (get_bootconf_option(args, "arm8.clock.dynamic", BOOTOPT_TYPE_BOOLEAN, &integer)) {
2809 		if (integer)
2810 			clocktest |= 0x01;
2811 		else
2812 			clocktest &= ~(0x01);
2813 		setclock = 1;
2814 	}
2815 	if (get_bootconf_option(args, "arm8.clock.sync", BOOTOPT_TYPE_BOOLEAN, &integer)) {
2816 		if (integer)
2817 			clocktest |= 0x02;
2818 		else
2819 			clocktest &= ~(0x02);
2820 		setclock = 1;
2821 	}
2822 	if (get_bootconf_option(args, "arm8.clock.fast", BOOTOPT_TYPE_BININT, &integer)) {
2823 		clocktest = (clocktest & ~0xc0) | (integer & 3) << 2;
2824 		setclock = 1;
2825 	}
2826 	if (get_bootconf_option(args, "arm8.test", BOOTOPT_TYPE_BININT, &integer)) {
2827 		clocktest |= (integer & 7) << 5;
2828 		setclock = 1;
2829 	}
2830 
2831 	/* Clear out the cache */
2832 	cpu_idcache_wbinv_all();
2833 
2834 	/* Set the control register */
2835 	curcpu()->ci_ctrl = cpuctrl;
2836 	cpu_control(0xffffffff, cpuctrl);
2837 
2838 	/* Set the clock/test register */
2839 	if (setclock)
2840 		arm8_clock_config(0x7f, clocktest);
2841 }
2842 #endif	/* CPU_ARM8 */
2843 
2844 #ifdef CPU_ARM9
2845 struct cpu_option arm9_options[] = {
2846 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2847 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2848 	{ "arm9.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2849 	{ "arm9.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2850 	{ "arm9.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2851 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2852 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2853 	{ "arm9.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2854 	{ NULL,			IGN, IGN, 0 }
2855 };
2856 
2857 void
2858 arm9_setup(char *args)
2859 {
2860 
2861 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2862 	    | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2863 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2864 	    | CPU_CONTROL_WBUF_ENABLE;
2865 	int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2866 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2867 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2868 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2869 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2870 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC
2871 		 | CPU_CONTROL_ROUNDROBIN;
2872 
2873 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2874 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2875 #endif
2876 
2877 	cpuctrl = parse_cpu_options(args, arm9_options, cpuctrl);
2878 
2879 #ifdef __ARMEB__
2880 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2881 #endif
2882 
2883 #ifndef ARM_HAS_VBAR
2884 	if (vector_page == ARM_VECTORS_HIGH)
2885 		cpuctrl |= CPU_CONTROL_VECRELOC;
2886 #endif
2887 
2888 	/* Clear out the cache */
2889 	cpu_idcache_wbinv_all();
2890 
2891 	/* Set the control register */
2892 	curcpu()->ci_ctrl = cpuctrl;
2893 	cpu_control(cpuctrlmask, cpuctrl);
2894 
2895 }
2896 #endif	/* CPU_ARM9 */
2897 
2898 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
2899 struct cpu_option arm10_options[] = {
2900 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2901 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2902 	{ "arm10.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2903 	{ "arm10.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2904 	{ "arm10.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2905 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2906 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2907 	{ "arm10.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2908 	{ NULL,			IGN, IGN, 0 }
2909 };
2910 
2911 void
2912 arm10_setup(char *args)
2913 {
2914 
2915 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
2916 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2917 	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
2918 #if 0
2919 	int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
2920 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2921 	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2922 	    | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2923 	    | CPU_CONTROL_BPRD_ENABLE
2924 	    | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
2925 #endif
2926 
2927 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2928 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2929 #endif
2930 
2931 	cpuctrl = parse_cpu_options(args, arm10_options, cpuctrl);
2932 
2933 #ifdef __ARMEB__
2934 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2935 #endif
2936 
2937 #ifndef ARM_HAS_VBAR
2938 	if (vector_page == ARM_VECTORS_HIGH)
2939 		cpuctrl |= CPU_CONTROL_VECRELOC;
2940 #endif
2941 
2942 	/* Clear out the cache */
2943 	cpu_idcache_wbinv_all();
2944 
2945 	/* Now really make sure they are clean.  */
2946 	__asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
2947 
2948 	/* Set the control register */
2949 	curcpu()->ci_ctrl = cpuctrl;
2950 	cpu_control(0xffffffff, cpuctrl);
2951 
2952 	/* And again. */
2953 	cpu_idcache_wbinv_all();
2954 }
2955 #endif	/* CPU_ARM9E || CPU_ARM10 */
2956 
2957 #if defined(CPU_ARM11)
2958 struct cpu_option arm11_options[] = {
2959 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2960 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2961 	{ "arm11.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2962 	{ "arm11.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2963 	{ "arm11.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2964 	{ "cpu.branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2965 	{ "arm11.branchpredict", BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2966 	{ NULL,			IGN, IGN, 0 }
2967 };
2968 
2969 void
2970 arm11_setup(char *args)
2971 {
2972 
2973 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
2974 #ifdef ARM_MMU_EXTENDED
2975 	    | CPU_CONTROL_XP_ENABLE
2976 #endif
2977 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2978 	    /* | CPU_CONTROL_BPRD_ENABLE */;
2979 	int cpuctrlmask = cpuctrl
2980 	    | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BPRD_ENABLE
2981 	    | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2982 	    | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
2983 
2984 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2985 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2986 #endif
2987 
2988 	cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl);
2989 
2990 #ifdef __ARMEB__
2991 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2992 #endif
2993 
2994 #ifndef ARM_HAS_VBAR
2995 	if (vector_page == ARM_VECTORS_HIGH)
2996 		cpuctrl |= CPU_CONTROL_VECRELOC;
2997 #endif
2998 
2999 	/* Clear out the cache */
3000 	cpu_idcache_wbinv_all();
3001 
3002 	/* Now really make sure they are clean.  */
3003 	__asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
3004 
3005 	/* Allow detection code to find the VFP if it's fitted.  */
3006 	armreg_cpacr_write(0x0fffffff);
3007 
3008 	/* Set the control register */
3009 	curcpu()->ci_ctrl = cpuctrl;
3010 	cpu_control(cpuctrlmask, cpuctrl);
3011 
3012 	/* And again. */
3013 	cpu_idcache_wbinv_all();
3014 }
3015 #endif	/* CPU_ARM11 */
3016 
3017 #if defined(CPU_ARM11MPCORE)
3018 
3019 void
3020 arm11mpcore_setup(char *args)
3021 {
3022 
3023 	int cpuctrl = CPU_CONTROL_IC_ENABLE
3024 	    | CPU_CONTROL_DC_ENABLE
3025 #ifdef ARM_MMU_EXTENDED
3026 	    | CPU_CONTROL_XP_ENABLE
3027 #endif
3028 	    | CPU_CONTROL_BPRD_ENABLE ;
3029 	int cpuctrlmask = cpuctrl
3030 	    | CPU_CONTROL_AFLT_ENABLE
3031 	    | CPU_CONTROL_VECRELOC;
3032 
3033 #ifdef	ARM11MPCORE_MMU_COMPAT
3034 	/* XXX: S and R? */
3035 #endif
3036 
3037 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
3038 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3039 #endif
3040 
3041 	cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl);
3042 
3043 #ifndef ARM_HAS_VBAR
3044 	if (vector_page == ARM_VECTORS_HIGH)
3045 		cpuctrl |= CPU_CONTROL_VECRELOC;
3046 #endif
3047 
3048 	/* Clear out the cache */
3049 	cpu_idcache_wbinv_all();
3050 
3051 	/* Now really make sure they are clean.  */
3052 	__asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
3053 
3054 	/* Allow detection code to find the VFP if it's fitted.  */
3055 	armreg_cpacr_write(0x0fffffff);
3056 
3057 	/* Set the control register */
3058 	curcpu()->ci_ctrl = cpu_control(cpuctrlmask, cpuctrl);
3059 
3060 	/* And again. */
3061 	cpu_idcache_wbinv_all();
3062 }
3063 #endif	/* CPU_ARM11MPCORE */
3064 
3065 #ifdef CPU_PJ4B
3066 void
3067 pj4bv7_setup(char *args)
3068 {
3069 	int cpuctrl;
3070 
3071 	pj4b_config();
3072 
3073 	cpuctrl = CPU_CONTROL_MMU_ENABLE;
3074 #ifdef ARM32_DISABLE_ALIGNMENT_FAULTS
3075 	cpuctrl |= CPU_CONTROL_UNAL_ENABLE;
3076 #else
3077 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3078 #endif
3079 	cpuctrl |= CPU_CONTROL_DC_ENABLE;
3080 	cpuctrl |= CPU_CONTROL_IC_ENABLE;
3081 	cpuctrl |= (0xf << 3);
3082 	cpuctrl |= CPU_CONTROL_BPRD_ENABLE;
3083 	cpuctrl |= (0x5 << 16);
3084 	cpuctrl |= CPU_CONTROL_XP_ENABLE;
3085 
3086 #ifndef ARM_HAS_VBAR
3087 	if (vector_page == ARM_VECTORS_HIGH)
3088 		cpuctrl |= CPU_CONTROL_VECRELOC;
3089 #endif
3090 
3091 #ifdef L2CACHE_ENABLE
3092 	/* Setup L2 cache */
3093 	arm_scache.cache_type = CPU_CT_CTYPE_WT;
3094 	arm_scache.cache_unified = 1;
3095 	arm_scache.dcache_type = arm_scache.icache_type = CACHE_TYPE_PIPT;
3096 	arm_scache.dcache_size = arm_scache.icache_size = ARMADAXP_L2_SIZE;
3097 	arm_scache.dcache_ways = arm_scache.icache_ways = ARMADAXP_L2_WAYS;
3098 	arm_scache.dcache_way_size = arm_scache.icache_way_size =
3099 	    ARMADAXP_L2_WAY_SIZE;
3100 	arm_scache.dcache_line_size = arm_scache.icache_line_size =
3101 	    ARMADAXP_L2_LINE_SIZE;
3102 	arm_scache.dcache_sets = arm_scache.icache_sets =
3103 	    ARMADAXP_L2_SETS;
3104 
3105 	cpufuncs.cf_sdcache_wbinv_range	= armadaxp_sdcache_wbinv_range;
3106 	cpufuncs.cf_sdcache_inv_range	= armadaxp_sdcache_inv_range;
3107 	cpufuncs.cf_sdcache_wb_range	= armadaxp_sdcache_wb_range;
3108 #endif
3109 
3110 #ifdef AURORA_IO_CACHE_COHERENCY
3111 	/* use AMBA and I/O Coherency Fabric to maintain cache */
3112 	cpufuncs.cf_dcache_wbinv_range	= pj4b_dcache_cfu_wbinv_range;
3113 	cpufuncs.cf_dcache_inv_range	= pj4b_dcache_cfu_inv_range;
3114 	cpufuncs.cf_dcache_wb_range	= pj4b_dcache_cfu_wb_range;
3115 
3116 	cpufuncs.cf_sdcache_wbinv_range	= (void *)cpufunc_nullop;
3117 	cpufuncs.cf_sdcache_inv_range	= (void *)cpufunc_nullop;
3118 	cpufuncs.cf_sdcache_wb_range	= (void *)cpufunc_nullop;
3119 #endif
3120 
3121 	/* Clear out the cache */
3122 	cpu_idcache_wbinv_all();
3123 
3124 	/* Set the control register */
3125 	cpu_control(0xffffffff, cpuctrl);
3126 
3127 	/* And again. */
3128 	cpu_idcache_wbinv_all();
3129 #ifdef L2CACHE_ENABLE
3130 	armadaxp_sdcache_wbinv_all();
3131 #endif
3132 
3133 	curcpu()->ci_ctrl = cpuctrl;
3134 }
3135 #endif /* CPU_PJ4B */
3136 
3137 #if defined(CPU_ARMV7)
3138 struct cpu_option armv7_options[] = {
3139     { "cpu.cache",      BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3140     { "cpu.nocache",    OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3141     { "armv7.cache",    BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3142     { "armv7.icache",   BIC, OR,  CPU_CONTROL_IC_ENABLE },
3143     { "armv7.dcache",   BIC, OR,  CPU_CONTROL_DC_ENABLE },
3144 	{ NULL, 			IGN, IGN, 0}
3145 };
3146 
3147 void
3148 armv7_setup(char *args)
3149 {
3150 
3151 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_IC_ENABLE
3152 	    | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_BPRD_ENABLE
3153 #ifdef __ARMEB__
3154 	    | CPU_CONTROL_EX_BEND
3155 #endif
3156 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
3157 	    | CPU_CONTROL_AFLT_ENABLE;
3158 #endif
3159 	    | CPU_CONTROL_UNAL_ENABLE;
3160 
3161 	int cpuctrlmask = cpuctrl | CPU_CONTROL_AFLT_ENABLE;
3162 
3163 
3164 	cpuctrl = parse_cpu_options(args, armv7_options, cpuctrl);
3165 
3166 #ifndef ARM_HAS_VBAR
3167 	if (vector_page == ARM_VECTORS_HIGH)
3168 		cpuctrl |= CPU_CONTROL_VECRELOC;
3169 #endif
3170 
3171 	/* Clear out the cache */
3172 	cpu_idcache_wbinv_all();
3173 
3174 	/* Set the control register */
3175 	curcpu()->ci_ctrl = cpuctrl;
3176 	cpu_control(cpuctrlmask, cpuctrl);
3177 }
3178 #endif /* CPU_ARMV7 */
3179 
3180 
3181 #if defined(CPU_ARM1136) || defined(CPU_ARM1176)
3182 void
3183 arm11x6_setup(char *args)
3184 {
3185 	int cpuctrl, cpuctrl_wax;
3186 	uint32_t auxctrl;
3187 	uint32_t sbz=0;
3188 	uint32_t cpuid;
3189 
3190 	cpuid = cpu_idnum();
3191 
3192 	cpuctrl =
3193 		CPU_CONTROL_MMU_ENABLE  |
3194 		CPU_CONTROL_DC_ENABLE   |
3195 		CPU_CONTROL_WBUF_ENABLE |
3196 		CPU_CONTROL_32BP_ENABLE |
3197 		CPU_CONTROL_32BD_ENABLE |
3198 		CPU_CONTROL_LABT_ENABLE |
3199 		CPU_CONTROL_UNAL_ENABLE |
3200 #ifdef ARM_MMU_EXTENDED
3201 		CPU_CONTROL_XP_ENABLE   |
3202 #else
3203 		CPU_CONTROL_SYST_ENABLE |
3204 #endif
3205 		CPU_CONTROL_IC_ENABLE;
3206 
3207 	/*
3208 	 * "write as existing" bits
3209 	 * inverse of this is mask
3210 	 */
3211 	cpuctrl_wax =
3212 		(3 << 30) |
3213 		(1 << 29) |
3214 		(1 << 28) |
3215 		(3 << 26) |
3216 		(3 << 19) |
3217 		(1 << 17);
3218 
3219 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
3220 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3221 #endif
3222 
3223 	cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl);
3224 
3225 #ifdef __ARMEB__
3226 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
3227 #endif
3228 
3229 #ifndef ARM_HAS_VBAR
3230 	if (vector_page == ARM_VECTORS_HIGH)
3231 		cpuctrl |= CPU_CONTROL_VECRELOC;
3232 #endif
3233 
3234 	auxctrl = armreg_auxctl_read();
3235 	/*
3236 	 * This options enables the workaround for the 364296 ARM1136
3237 	 * r0pX errata (possible cache data corruption with
3238 	 * hit-under-miss enabled). It sets the undocumented bit 31 in
3239 	 * the auxiliary control register and the FI bit in the control
3240 	 * register, thus disabling hit-under-miss without putting the
3241 	 * processor into full low interrupt latency mode. ARM11MPCore
3242 	 * is not affected.
3243 	 */
3244 	if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1136JS) { /* ARM1136JSr0pX */
3245 		cpuctrl |= CPU_CONTROL_FI_ENABLE;
3246 		auxctrl |= ARM1136_AUXCTL_PFI;
3247 	}
3248 
3249 	/*
3250 	 * This enables the workaround for the following ARM1176 r0pX
3251 	 * errata.
3252 	 *
3253 	 * 394601: In low interrupt latency configuration, interrupted clean
3254 	 * and invalidate operation may not clean dirty data.
3255 	 *
3256 	 * 716151: Clean Data Cache line by MVA can corrupt subsequent
3257 	 * stores to the same cache line.
3258 	 *
3259 	 * 714068: Prefetch Instruction Cache Line or Invalidate Instruction
3260 	 * Cache Line by MVA can cause deadlock.
3261 	 */
3262 	if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1176JZS) { /* ARM1176JZSr0 */
3263 		/* 394601 and 716151 */
3264 		cpuctrl |= CPU_CONTROL_FI_ENABLE;
3265 		auxctrl |= ARM1176_AUXCTL_FIO;
3266 
3267 		/* 714068 */
3268 		auxctrl |= ARM1176_AUXCTL_PHD;
3269 	}
3270 
3271 	/* Clear out the cache */
3272 	cpu_idcache_wbinv_all();
3273 
3274 	/* Now really make sure they are clean.  */
3275 	__asm volatile ("mcr\tp15, 0, %0, c7, c7, 0" : : "r"(sbz));
3276 
3277 	/* Allow detection code to find the VFP if it's fitted.  */
3278 	armreg_cpacr_write(0x0fffffff);
3279 
3280 	/* Set the control register */
3281 	curcpu()->ci_ctrl = cpuctrl;
3282 	cpu_control(~cpuctrl_wax, cpuctrl);
3283 
3284 	/* Update auxctlr */
3285 	armreg_auxctl_write(auxctrl);
3286 
3287 	/* And again. */
3288 	cpu_idcache_wbinv_all();
3289 }
3290 #endif	/* CPU_ARM1136 || CPU_ARM1176 */
3291 
3292 #ifdef CPU_SA110
3293 struct cpu_option sa110_options[] = {
3294 #ifdef COMPAT_12
3295 	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3296 	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
3297 #endif	/* COMPAT_12 */
3298 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3299 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3300 	{ "sa110.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3301 	{ "sa110.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
3302 	{ "sa110.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
3303 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
3304 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
3305 	{ "sa110.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
3306 	{ NULL,			IGN, IGN, 0 }
3307 };
3308 
3309 void
3310 sa110_setup(char *args)
3311 {
3312 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
3313 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
3314 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3315 		 | CPU_CONTROL_WBUF_ENABLE;
3316 #if 0
3317 	int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
3318 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
3319 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3320 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
3321 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
3322 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
3323 		 | CPU_CONTROL_CPCLK;
3324 #endif
3325 
3326 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
3327 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3328 #endif
3329 
3330 	cpuctrl = parse_cpu_options(args, sa110_options, cpuctrl);
3331 
3332 #ifdef __ARMEB__
3333 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
3334 #endif
3335 
3336 #ifndef ARM_HAS_VBAR
3337 	if (vector_page == ARM_VECTORS_HIGH)
3338 		cpuctrl |= CPU_CONTROL_VECRELOC;
3339 #endif
3340 
3341 	/* Clear out the cache */
3342 	cpu_idcache_wbinv_all();
3343 
3344 	/* Set the control register */
3345 	curcpu()->ci_ctrl = cpuctrl;
3346 #if 0
3347 	cpu_control(cpuctrlmask, cpuctrl);
3348 #endif
3349 	cpu_control(0xffffffff, cpuctrl);
3350 
3351 	/*
3352 	 * enable clockswitching, note that this doesn't read or write to r0,
3353 	 * r0 is just to make it valid asm
3354 	 */
3355 	__asm volatile ("mcr p15, 0, r0, c15, c1, 2");
3356 }
3357 #endif	/* CPU_SA110 */
3358 
3359 #if defined(CPU_SA1100) || defined(CPU_SA1110)
3360 struct cpu_option sa11x0_options[] = {
3361 #ifdef COMPAT_12
3362 	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3363 	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
3364 #endif	/* COMPAT_12 */
3365 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3366 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3367 	{ "sa11x0.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3368 	{ "sa11x0.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
3369 	{ "sa11x0.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
3370 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
3371 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
3372 	{ "sa11x0.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
3373 	{ NULL,			IGN, IGN, 0 }
3374 };
3375 
3376 void
3377 sa11x0_setup(char *args)
3378 {
3379 
3380 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
3381 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
3382 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3383 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE;
3384 #if 0
3385 	int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
3386 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
3387 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3388 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
3389 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
3390 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
3391 		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
3392 #endif
3393 
3394 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
3395 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3396 #endif
3397 
3398 	cpuctrl = parse_cpu_options(args, sa11x0_options, cpuctrl);
3399 
3400 #ifdef __ARMEB__
3401 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
3402 #endif
3403 
3404 #ifndef ARM_HAS_VBAR
3405 	if (vector_page == ARM_VECTORS_HIGH)
3406 		cpuctrl |= CPU_CONTROL_VECRELOC;
3407 #endif
3408 
3409 	/* Clear out the cache */
3410 	cpu_idcache_wbinv_all();
3411 
3412 	/* Set the control register */
3413 	curcpu()->ci_ctrl = cpuctrl;
3414 	cpu_control(0xffffffff, cpuctrl);
3415 }
3416 #endif	/* CPU_SA1100 || CPU_SA1110 */
3417 
3418 #if defined(CPU_FA526)
3419 struct cpu_option fa526_options[] = {
3420 #ifdef COMPAT_12
3421 	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3422 	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
3423 #endif	/* COMPAT_12 */
3424 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3425 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3426 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
3427 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
3428 	{ NULL,			IGN, IGN, 0 }
3429 };
3430 
3431 void
3432 fa526_setup(char *args)
3433 {
3434 
3435 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
3436 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
3437 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3438 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE;
3439 #if 0
3440 	int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
3441 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
3442 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3443 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
3444 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
3445 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
3446 		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
3447 #endif
3448 
3449 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
3450 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3451 #endif
3452 
3453 	cpuctrl = parse_cpu_options(args, fa526_options, cpuctrl);
3454 
3455 #ifdef __ARMEB__
3456 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
3457 #endif
3458 
3459 #ifndef ARM_HAS_VBAR
3460 	if (vector_page == ARM_VECTORS_HIGH)
3461 		cpuctrl |= CPU_CONTROL_VECRELOC;
3462 #endif
3463 
3464 	/* Clear out the cache */
3465 	cpu_idcache_wbinv_all();
3466 
3467 	/* Set the control register */
3468 	curcpu()->ci_ctrl = cpuctrl;
3469 	cpu_control(0xffffffff, cpuctrl);
3470 }
3471 #endif	/* CPU_FA526 */
3472 
3473 #if defined(CPU_IXP12X0)
3474 struct cpu_option ixp12x0_options[] = {
3475 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3476 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3477 	{ "ixp12x0.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3478 	{ "ixp12x0.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
3479 	{ "ixp12x0.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
3480 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
3481 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
3482 	{ "ixp12x0.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
3483 	{ NULL,			IGN, IGN, 0 }
3484 };
3485 
3486 void
3487 ixp12x0_setup(char *args)
3488 {
3489 
3490 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE
3491 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_SYST_ENABLE
3492 		 | CPU_CONTROL_IC_ENABLE;
3493 
3494 	int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_AFLT_ENABLE
3495 		 | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_WBUF_ENABLE
3496 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_SYST_ENABLE
3497 		 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_IC_ENABLE
3498 		 | CPU_CONTROL_VECRELOC;
3499 
3500 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
3501 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3502 #endif
3503 
3504 	cpuctrl = parse_cpu_options(args, ixp12x0_options, cpuctrl);
3505 
3506 #ifdef __ARMEB__
3507 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
3508 #endif
3509 
3510 #ifndef ARM_HAS_VBAR
3511 	if (vector_page == ARM_VECTORS_HIGH)
3512 		cpuctrl |= CPU_CONTROL_VECRELOC;
3513 #endif
3514 
3515 	/* Clear out the cache */
3516 	cpu_idcache_wbinv_all();
3517 
3518 	/* Set the control register */
3519 	curcpu()->ci_ctrl = cpuctrl;
3520 	/* cpu_control(0xffffffff, cpuctrl); */
3521 	cpu_control(cpuctrlmask, cpuctrl);
3522 }
3523 #endif /* CPU_IXP12X0 */
3524 
3525 #if defined(CPU_XSCALE)
3526 struct cpu_option xscale_options[] = {
3527 #ifdef COMPAT_12
3528 	{ "branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
3529 	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3530 #endif	/* COMPAT_12 */
3531 	{ "cpu.branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
3532 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3533 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3534 	{ "xscale.branchpredict", BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
3535 	{ "xscale.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3536 	{ "xscale.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
3537 	{ "xscale.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
3538 	{ NULL,			IGN, IGN, 0 }
3539 };
3540 
3541 void
3542 xscale_setup(char *args)
3543 {
3544 	uint32_t auxctl;
3545 
3546 	/*
3547 	 * The XScale Write Buffer is always enabled.  Our option
3548 	 * is to enable/disable coalescing.  Note that bits 6:3
3549 	 * must always be enabled.
3550 	 */
3551 
3552 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
3553 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
3554 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3555 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
3556 		 | CPU_CONTROL_BPRD_ENABLE;
3557 #if 0
3558 	int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
3559 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
3560 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3561 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
3562 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
3563 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
3564 		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
3565 #endif
3566 
3567 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
3568 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3569 #endif
3570 
3571 	cpuctrl = parse_cpu_options(args, xscale_options, cpuctrl);
3572 
3573 #ifdef __ARMEB__
3574 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
3575 #endif
3576 
3577 #ifndef ARM_HAS_VBAR
3578 	if (vector_page == ARM_VECTORS_HIGH)
3579 		cpuctrl |= CPU_CONTROL_VECRELOC;
3580 #endif
3581 
3582 	/* Clear out the cache */
3583 	cpu_idcache_wbinv_all();
3584 
3585 	/*
3586 	 * Set the control register.  Note that bits 6:3 must always
3587 	 * be set to 1.
3588 	 */
3589 	curcpu()->ci_ctrl = cpuctrl;
3590 #if 0
3591 	cpu_control(cpuctrlmask, cpuctrl);
3592 #endif
3593 	cpu_control(0xffffffff, cpuctrl);
3594 
3595 	/* Make sure write coalescing is turned on */
3596 	auxctl = armreg_auxctl_read();
3597 #ifdef XSCALE_NO_COALESCE_WRITES
3598 	auxctl |= XSCALE_AUXCTL_K;
3599 #else
3600 	auxctl &= ~XSCALE_AUXCTL_K;
3601 #endif
3602 	armreg_auxctl_write(auxctl);
3603 }
3604 #endif	/* CPU_XSCALE */
3605 
3606 #if defined(CPU_SHEEVA)
3607 struct cpu_option sheeva_options[] = {
3608 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3609 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3610 	{ "sheeva.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3611 	{ "sheeva.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
3612 	{ "sheeva.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
3613 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
3614 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
3615 	{ "sheeva.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
3616 	{ NULL,			IGN, IGN, 0 }
3617 };
3618 
3619 void
3620 sheeva_setup(char *args)
3621 {
3622 	int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
3623 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3624 	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
3625 #if 0
3626 	int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
3627 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3628 	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
3629 	    | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
3630 	    | CPU_CONTROL_BPRD_ENABLE
3631 	    | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
3632 #endif
3633 
3634 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
3635 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3636 #endif
3637 
3638 	cpuctrl = parse_cpu_options(args, sheeva_options, cpuctrl);
3639 
3640 	/* Enable DCache Streaming Switch and Write Allocate */
3641 	uint32_t sheeva_ext = armreg_sheeva_xctrl_read();
3642 
3643 	sheeva_ext |= FC_DCACHE_STREAM_EN | FC_WR_ALLOC_EN;
3644 #ifdef SHEEVA_L2_CACHE
3645 	sheeva_ext |= FC_L2CACHE_EN;
3646 	sheeva_ext &= ~FC_L2_PREF_DIS;
3647 #endif
3648 
3649 	armreg_sheeva_xctrl_write(sheeva_ext);
3650 
3651 #ifdef SHEEVA_L2_CACHE
3652 #ifndef SHEEVA_L2_CACHE_WT
3653 	arm_scache.cache_type = CPU_CT_CTYPE_WB2;
3654 #elif CPU_CT_CTYPE_WT != 0
3655 	arm_scache.cache_type = CPU_CT_CTYPE_WT;
3656 #endif
3657 	arm_scache.cache_unified = 1;
3658 	arm_scache.dcache_type = arm_scache.icache_type = CACHE_TYPE_PIPT;
3659 	arm_scache.dcache_size = arm_scache.icache_size = 256*1024;
3660 	arm_scache.dcache_ways = arm_scache.icache_ways = 4;
3661 	arm_scache.dcache_way_size = arm_scache.icache_way_size =
3662 	    arm_scache.dcache_size / arm_scache.dcache_ways;
3663 	arm_scache.dcache_line_size = arm_scache.icache_line_size = 32;
3664 	arm_scache.dcache_sets = arm_scache.icache_sets =
3665 	    arm_scache.dcache_way_size / arm_scache.dcache_line_size;
3666 
3667 	cpufuncs.cf_sdcache_wb_range = sheeva_sdcache_wb_range;
3668 	cpufuncs.cf_sdcache_inv_range = sheeva_sdcache_inv_range;
3669 	cpufuncs.cf_sdcache_wbinv_range = sheeva_sdcache_wbinv_range;
3670 #endif /* SHEEVA_L2_CACHE */
3671 
3672 #ifdef __ARMEB__
3673 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
3674 #endif
3675 
3676 #ifndef ARM_HAS_VBAR
3677 	if (vector_page == ARM_VECTORS_HIGH)
3678 		cpuctrl |= CPU_CONTROL_VECRELOC;
3679 #endif
3680 
3681 	/* Clear out the cache */
3682 	cpu_idcache_wbinv_all();
3683 
3684 	/* Now really make sure they are clean.  */
3685 	__asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
3686 
3687 	/* Set the control register */
3688 	curcpu()->ci_ctrl = cpuctrl;
3689 	cpu_control(0xffffffff, cpuctrl);
3690 
3691 	/* And again. */
3692 	cpu_idcache_wbinv_all();
3693 #ifdef SHEEVA_L2_CACHE
3694 	sheeva_sdcache_wbinv_all();
3695 #endif
3696 }
3697 #endif	/* CPU_SHEEVA */
3698 
3699 
3700 bool
3701 cpu_gtmr_exists_p(void)
3702 {
3703 	return armreg_pfr1_read() & ARM_PFR1_GTIMER_MASK;
3704 }
3705 
3706 u_int
3707 cpu_clusterid(void)
3708 {
3709 	return __SHIFTOUT(armreg_mpidr_read(), MPIDR_AFF1);
3710 }
3711 
3712 bool
3713 cpu_earlydevice_va_p(void)
3714 {
3715 	return armreg_sctlr_read() & CPU_CONTROL_MMU_ENABLE;
3716 }
3717