xref: /netbsd-src/sys/arch/arm/arm/cpufunc.c (revision 1b9578b8c2c1f848eeb16dabbfd7d1f0d9fdefbd)
1 /*	$NetBSD: cpufunc.c,v 1.103 2011/03/10 08:06:27 bsh Exp $	*/
2 
3 /*
4  * arm7tdmi support code Copyright (c) 2001 John Fremlin
5  * arm8 support code Copyright (c) 1997 ARM Limited
6  * arm8 support code Copyright (c) 1997 Causality Limited
7  * arm9 support code Copyright (C) 2001 ARM Ltd
8  * arm11 support code Copyright (c) 2007 Microsoft
9  * cortexa8 support code Copyright (c) 2008 3am Software Foundry
10  * cortexa8 improvements Copyright (c) Goeran Weinholt
11  * Copyright (c) 1997 Mark Brinicombe.
12  * Copyright (c) 1997 Causality Limited
13  * All rights reserved.
14  *
15  * Redistribution and use in source and binary forms, with or without
16  * modification, are permitted provided that the following conditions
17  * are met:
18  * 1. Redistributions of source code must retain the above copyright
19  *    notice, this list of conditions and the following disclaimer.
20  * 2. Redistributions in binary form must reproduce the above copyright
21  *    notice, this list of conditions and the following disclaimer in the
22  *    documentation and/or other materials provided with the distribution.
23  * 3. All advertising materials mentioning features or use of this software
24  *    must display the following acknowledgement:
25  *	This product includes software developed by Causality Limited.
26  * 4. The name of Causality Limited may not be used to endorse or promote
27  *    products derived from this software without specific prior written
28  *    permission.
29  *
30  * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
31  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
32  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
33  * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
34  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
35  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
36  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
37  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
38  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
39  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40  * SUCH DAMAGE.
41  *
42  * RiscBSD kernel project
43  *
44  * cpufuncs.c
45  *
46  * C functions for supporting CPU / MMU / TLB specific operations.
47  *
48  * Created	: 30/01/97
49  */
50 
51 #include <sys/cdefs.h>
52 __KERNEL_RCSID(0, "$NetBSD: cpufunc.c,v 1.103 2011/03/10 08:06:27 bsh Exp $");
53 
54 #include "opt_compat_netbsd.h"
55 #include "opt_cpuoptions.h"
56 #include "opt_perfctrs.h"
57 
58 #include <sys/types.h>
59 #include <sys/param.h>
60 #include <sys/pmc.h>
61 #include <sys/systm.h>
62 #include <machine/cpu.h>
63 #include <machine/bootconfig.h>
64 #include <arch/arm/arm/disassem.h>
65 
66 #include <uvm/uvm.h>
67 
68 #include <arm/cpuconf.h>
69 #include <arm/cpufunc.h>
70 
71 #ifdef CPU_XSCALE_80200
72 #include <arm/xscale/i80200reg.h>
73 #include <arm/xscale/i80200var.h>
74 #endif
75 
76 #ifdef CPU_XSCALE_80321
77 #include <arm/xscale/i80321reg.h>
78 #include <arm/xscale/i80321var.h>
79 #endif
80 
81 #ifdef CPU_XSCALE_IXP425
82 #include <arm/xscale/ixp425reg.h>
83 #include <arm/xscale/ixp425var.h>
84 #endif
85 
86 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321)
87 #include <arm/xscale/xscalereg.h>
88 #endif
89 
90 #if defined(PERFCTRS)
91 struct arm_pmc_funcs *arm_pmc;
92 #endif
93 
94 /* PRIMARY CACHE VARIABLES */
95 int	arm_picache_size;
96 int	arm_picache_line_size;
97 int	arm_picache_ways;
98 
99 int	arm_pdcache_size;	/* and unified */
100 int	arm_pdcache_line_size;
101 int	arm_pdcache_ways;
102 #if (ARM_MMU_V6 + ARM_MMU_V7) != 0
103 int	arm_cache_prefer_mask;
104 #endif
105 
106 
107 int	arm_pcache_type;
108 int	arm_pcache_unified;
109 
110 int	arm_dcache_align;
111 int	arm_dcache_align_mask;
112 
113 /* 1 == use cpu_sleep(), 0 == don't */
114 int cpu_do_powersave;
115 
116 #ifdef CPU_ARM2
117 struct cpu_functions arm2_cpufuncs = {
118 	/* CPU functions */
119 
120 	.cf_id			= arm2_id,
121 	.cf_cpwait		= cpufunc_nullop,
122 
123 	/* MMU functions */
124 
125 	.cf_control		= (void *)cpufunc_nullop,
126 
127 	/* TLB functions */
128 
129 	.cf_tlb_flushID		= cpufunc_nullop,
130 	.cf_tlb_flushID_SE	= (void *)cpufunc_nullop,
131 	.cf_tlb_flushI		= cpufunc_nullop,
132 	.cf_tlb_flushI_SE	= (void *)cpufunc_nullop,
133 	.cf_tlb_flushD		= cpufunc_nullop,
134 	.cf_tlb_flushD_SE	= (void *)cpufunc_nullop,
135 
136 	/* Cache operations */
137 
138 	.cf_icache_sync_all	= cpufunc_nullop,
139 	.cf_icache_sync_range	= (void *) cpufunc_nullop,
140 
141 	.cf_dcache_wbinv_all	= arm3_cache_flush,
142 	.cf_dcache_wbinv_range	= (void *)cpufunc_nullop,
143 	.cf_dcache_inv_range	= (void *)cpufunc_nullop,
144 	.cf_dcache_wb_range	= (void *)cpufunc_nullop,
145 
146 	.cf_idcache_wbinv_all	= cpufunc_nullop,
147 	.cf_idcache_wbinv_range	= (void *)cpufunc_nullop,
148 
149 	/* Other functions */
150 
151 	.cf_flush_prefetchbuf	= cpufunc_nullop,
152 	.cf_drain_writebuf	= cpufunc_nullop,
153 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
154 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
155 
156 	.cf_sleep		= (void *)cpufunc_nullop,
157 
158 	/* Soft functions */
159 
160 	.cf_dataabt_fixup	= early_abort_fixup,
161 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
162 
163 	.cf_setup		= (void *)cpufunc_nullop
164 
165 };
166 #endif	/* CPU_ARM2 */
167 
168 #ifdef CPU_ARM250
169 struct cpu_functions arm250_cpufuncs = {
170 	/* CPU functions */
171 
172 	.cf_id			= arm250_id,
173 	.cf_cpwait		= cpufunc_nullop,
174 
175 	/* MMU functions */
176 
177 	.cf_control		= (void *)cpufunc_nullop,
178 
179 	/* TLB functions */
180 
181 	.cf_tlb_flushID		= cpufunc_nullop,
182 	.cf_tlb_flushID_SE	= (void *)cpufunc_nullop,
183 	.cf_tlb_flushI		= cpufunc_nullop,
184 	.cf_tlb_flushI_SE	= (void *)cpufunc_nullop,
185 	.cf_tlb_flushD		= cpufunc_nullop,
186 	.cf_tlb_flushD_SE	= (void *)cpufunc_nullop,
187 
188 	/* Cache operations */
189 
190 	.cf_icache_sync_all	= cpufunc_nullop,
191 	.cf_icache_sync_range	= (void *) cpufunc_nullop,
192 
193 	.cf_dcache_wbinv_all	= arm3_cache_flush,
194 	.cf_dcache_wbinv_range	= (void *)cpufunc_nullop,
195 	.cf_dcache_inv_range	= (void *)cpufunc_nullop,
196 	.cf_dcache_wb_range	= (void *)cpufunc_nullop,
197 
198 	.cf_idcache_wbinv_all	= cpufunc_nullop,
199 	.cf_idcache_wbinv_range	= (void *)cpufunc_nullop,
200 
201 	/* Other functions */
202 
203 	.cf_flush_prefetchbuf	= cpufunc_nullop,
204 	.cf_drain_writebuf	= cpufunc_nullop,
205 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
206 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
207 
208 	.cf_sleep		= (void *)cpufunc_nullop,
209 
210 	/* Soft functions */
211 
212 	.cf_dataabt_fixup	= early_abort_fixup,
213 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
214 
215 	.cf_setup		= (void *)cpufunc_nullop
216 
217 };
218 #endif	/* CPU_ARM250 */
219 
220 #ifdef CPU_ARM3
221 struct cpu_functions arm3_cpufuncs = {
222 	/* CPU functions */
223 
224 	.cf_id			= cpufunc_id,
225 	.cf_cpwait		= cpufunc_nullop,
226 
227 	/* MMU functions */
228 
229 	.cf_control		= arm3_control,
230 
231 	/* TLB functions */
232 
233 	.cf_tlb_flushID		= cpufunc_nullop,
234 	.cf_tlb_flushID_SE	= (void *)cpufunc_nullop,
235 	.cf_tlb_flushI		= cpufunc_nullop,
236 	.cf_tlb_flushI_SE	= (void *)cpufunc_nullop,
237 	.cf_tlb_flushD		= cpufunc_nullop,
238 	.cf_tlb_flushD_SE	= (void *)cpufunc_nullop,
239 
240 	/* Cache operations */
241 
242 	.cf_icache_sync_all	= cpufunc_nullop,
243 	.cf_icache_sync_range	= (void *) cpufunc_nullop,
244 
245 	.cf_dcache_wbinv_all	= arm3_cache_flush,
246 	.cf_dcache_wbinv_range	= (void *)arm3_cache_flush,
247 	.cf_dcache_inv_range	= (void *)arm3_cache_flush,
248 	.cf_dcache_wb_range	= (void *)cpufunc_nullop,
249 
250 	.cf_idcache_wbinv_all	= arm3_cache_flush,
251 	.cf_idcache_wbinv_range	= (void *)arm3_cache_flush,
252 
253 	/* Other functions */
254 
255 	.cf_flush_prefetchbuf	= cpufunc_nullop,
256 	.cf_drain_writebuf	= cpufunc_nullop,
257 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
258 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
259 
260 	.cf_sleep		= (void *)cpufunc_nullop,
261 
262 	/* Soft functions */
263 
264 	.cf_dataabt_fixup	= early_abort_fixup,
265 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
266 
267 	.cf_setup		= (void *)cpufunc_nullop
268 
269 };
270 #endif	/* CPU_ARM3 */
271 
272 #ifdef CPU_ARM6
273 struct cpu_functions arm6_cpufuncs = {
274 	/* CPU functions */
275 
276 	.cf_id			= cpufunc_id,
277 	.cf_cpwait		= cpufunc_nullop,
278 
279 	/* MMU functions */
280 
281 	.cf_control		= cpufunc_control,
282 	.cf_domains		= cpufunc_domains,
283 	.cf_setttb		= arm67_setttb,
284 	.cf_faultstatus		= cpufunc_faultstatus,
285 	.cf_faultaddress	= cpufunc_faultaddress,
286 
287 	/* TLB functions */
288 
289 	.cf_tlb_flushID		= arm67_tlb_flush,
290 	.cf_tlb_flushID_SE	= arm67_tlb_purge,
291 	.cf_tlb_flushI		= arm67_tlb_flush,
292 	.cf_tlb_flushI_SE	= arm67_tlb_purge,
293 	.cf_tlb_flushD		= arm67_tlb_flush,
294 	.cf_tlb_flushD_SE	= arm67_tlb_purge,
295 
296 	/* Cache operations */
297 
298 	.cf_icache_sync_all	= cpufunc_nullop,
299 	.cf_icache_sync_range	= (void *) cpufunc_nullop,
300 
301 	.cf_dcache_wbinv_all	= arm67_cache_flush,
302 	.cf_dcache_wbinv_range	= (void *)arm67_cache_flush,
303 	.cf_dcache_inv_range	= (void *)arm67_cache_flush,
304 	.cf_dcache_wb_range	= (void *)cpufunc_nullop,
305 
306 	.cf_idcache_wbinv_all	= arm67_cache_flush,
307 	.cf_idcache_wbinv_range	= (void *)arm67_cache_flush,
308 
309 	/* Other functions */
310 
311 	.cf_flush_prefetchbuf	= cpufunc_nullop,
312 	.cf_drain_writebuf	= cpufunc_nullop,
313 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
314 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
315 
316 	.cf_sleep		= (void *)cpufunc_nullop,
317 
318 	/* Soft functions */
319 
320 #ifdef ARM6_LATE_ABORT
321 	.cf_dataabt_fixup	= late_abort_fixup,
322 #else
323 	.cf_dataabt_fixup	= early_abort_fixup,
324 #endif
325 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
326 
327 	.cf_context_switch	= arm67_context_switch,
328 
329 	.cf_setup		= arm6_setup
330 
331 };
332 #endif	/* CPU_ARM6 */
333 
334 #ifdef CPU_ARM7
335 struct cpu_functions arm7_cpufuncs = {
336 	/* CPU functions */
337 
338 	.cf_id			= cpufunc_id,
339 	.cf_cpwait		= cpufunc_nullop,
340 
341 	/* MMU functions */
342 
343 	.cf_control		= cpufunc_control,
344 	.cf_domains		= cpufunc_domains,
345 	.cf_setttb		= arm67_setttb,
346 	.cf_faultstatus		= cpufunc_faultstatus,
347 	.cf_faultaddress	= cpufunc_faultaddress,
348 
349 	/* TLB functions */
350 
351 	.cf_tlb_flushID		= arm67_tlb_flush,
352 	.cf_tlb_flushID_SE	= arm67_tlb_purge,
353 	.cf_tlb_flushI		= arm67_tlb_flush,
354 	.cf_tlb_flushI_SE	= arm67_tlb_purge,
355 	.cf_tlb_flushD		= arm67_tlb_flush,
356 	.cf_tlb_flushD_SE	= arm67_tlb_purge,
357 
358 	/* Cache operations */
359 
360 	.cf_icache_sync_all	= cpufunc_nullop,
361 	.cf_icache_sync_range	= (void *)cpufunc_nullop,
362 
363 	.cf_dcache_wbinv_all	= arm67_cache_flush,
364 	.cf_dcache_wbinv_range	= (void *)arm67_cache_flush,
365 	.cf_dcache_inv_range	= (void *)arm67_cache_flush,
366 	.cf_dcache_wb_range	= (void *)cpufunc_nullop,
367 
368 	.cf_idcache_wbinv_all	= arm67_cache_flush,
369 	.cf_idcache_wbinv_range	= (void *)arm67_cache_flush,
370 
371 	/* Other functions */
372 
373 	.cf_flush_prefetchbuf	= cpufunc_nullop,
374 	.cf_drain_writebuf	= cpufunc_nullop,
375 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
376 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
377 
378 	.cf_sleep		= (void *)cpufunc_nullop,
379 
380 	/* Soft functions */
381 
382 	.cf_dataabt_fixup	= late_abort_fixup,
383 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
384 
385 	.cf_context_switch	= arm67_context_switch,
386 
387 	.cf_setup		= arm7_setup
388 
389 };
390 #endif	/* CPU_ARM7 */
391 
392 #ifdef CPU_ARM7TDMI
393 struct cpu_functions arm7tdmi_cpufuncs = {
394 	/* CPU functions */
395 
396 	.cf_id			= cpufunc_id,
397 	.cf_cpwait		= cpufunc_nullop,
398 
399 	/* MMU functions */
400 
401 	.cf_control		= cpufunc_control,
402 	.cf_domains		= cpufunc_domains,
403 	.cf_setttb		= arm7tdmi_setttb,
404 	.cf_faultstatus		= cpufunc_faultstatus,
405 	.cf_faultaddress	= cpufunc_faultaddress,
406 
407 	/* TLB functions */
408 
409 	.cf_tlb_flushID		= arm7tdmi_tlb_flushID,
410 	.cf_tlb_flushID_SE	= arm7tdmi_tlb_flushID_SE,
411 	.cf_tlb_flushI		= arm7tdmi_tlb_flushID,
412 	.cf_tlb_flushI_SE	= arm7tdmi_tlb_flushID_SE,
413 	.cf_tlb_flushD		= arm7tdmi_tlb_flushID,
414 	.cf_tlb_flushD_SE	= arm7tdmi_tlb_flushID_SE,
415 
416 	/* Cache operations */
417 
418 	.cf_icache_sync_all	= cpufunc_nullop,
419 	.cf_icache_sync_range	= (void *)cpufunc_nullop,
420 
421 	.cf_dcache_wbinv_all	= arm7tdmi_cache_flushID,
422 	.cf_dcache_wbinv_range	= (void *)arm7tdmi_cache_flushID,
423 	.cf_dcache_inv_range	= (void *)arm7tdmi_cache_flushID,
424 	.cf_dcache_wb_range	= (void *)cpufunc_nullop,
425 
426 	.cf_idcache_wbinv_all	= arm7tdmi_cache_flushID,
427 	.cf_idcache_wbinv_range	= (void *)arm7tdmi_cache_flushID,
428 
429 	/* Other functions */
430 
431 	.cf_flush_prefetchbuf	= cpufunc_nullop,
432 	.cf_drain_writebuf	= cpufunc_nullop,
433 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
434 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
435 
436 	.cf_sleep		= (void *)cpufunc_nullop,
437 
438 	/* Soft functions */
439 
440 	.cf_dataabt_fixup	= late_abort_fixup,
441 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
442 
443 	.cf_context_switch	= arm7tdmi_context_switch,
444 
445 	.cf_setup		= arm7tdmi_setup
446 
447 };
448 #endif	/* CPU_ARM7TDMI */
449 
450 #ifdef CPU_ARM8
451 struct cpu_functions arm8_cpufuncs = {
452 	/* CPU functions */
453 
454 	.cf_id			= cpufunc_id,
455 	.cf_cpwait		= cpufunc_nullop,
456 
457 	/* MMU functions */
458 
459 	.cf_control		= cpufunc_control,
460 	.cf_domains		= cpufunc_domains,
461 	.cf_setttb		= arm8_setttb,
462 	.cf_faultstatus		= cpufunc_faultstatus,
463 	.cf_faultaddress	= cpufunc_faultaddress,
464 
465 	/* TLB functions */
466 
467 	.cf_tlb_flushID		= arm8_tlb_flushID,
468 	.cf_tlb_flushID_SE	= arm8_tlb_flushID_SE,
469 	.cf_tlb_flushI		= arm8_tlb_flushID,
470 	.cf_tlb_flushI_SE	= arm8_tlb_flushID_SE,
471 	.cf_tlb_flushD		= arm8_tlb_flushID,
472 	.cf_tlb_flushD_SE	= arm8_tlb_flushID_SE,
473 
474 	/* Cache operations */
475 
476 	.cf_icache_sync_all	= cpufunc_nullop,
477 	.cf_icache_sync_range	= (void *)cpufunc_nullop,
478 
479 	.cf_dcache_wbinv_all	= arm8_cache_purgeID,
480 	.cf_dcache_wbinv_range	= (void *)arm8_cache_purgeID,
481 /*XXX*/	.cf_dcache_inv_range	= (void *)arm8_cache_purgeID,
482 	.cf_dcache_wb_range	= (void *)arm8_cache_cleanID,
483 
484 	.cf_idcache_wbinv_all	= arm8_cache_purgeID,
485 	.cf_idcache_wbinv_range = (void *)arm8_cache_purgeID,
486 
487 	/* Other functions */
488 
489 	.cf_flush_prefetchbuf	= cpufunc_nullop,
490 	.cf_drain_writebuf	= cpufunc_nullop,
491 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
492 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
493 
494 	.cf_sleep		= (void *)cpufunc_nullop,
495 
496 	/* Soft functions */
497 
498 	.cf_dataabt_fixup	= cpufunc_null_fixup,
499 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
500 
501 	.cf_context_switch	= arm8_context_switch,
502 
503 	.cf_setup		= arm8_setup
504 };
505 #endif	/* CPU_ARM8 */
506 
507 #ifdef CPU_ARM9
508 struct cpu_functions arm9_cpufuncs = {
509 	/* CPU functions */
510 
511 	.cf_id			= cpufunc_id,
512 	.cf_cpwait		= cpufunc_nullop,
513 
514 	/* MMU functions */
515 
516 	.cf_control		= cpufunc_control,
517 	.cf_domains		= cpufunc_domains,
518 	.cf_setttb		= arm9_setttb,
519 	.cf_faultstatus		= cpufunc_faultstatus,
520 	.cf_faultaddress	= cpufunc_faultaddress,
521 
522 	/* TLB functions */
523 
524 	.cf_tlb_flushID		= armv4_tlb_flushID,
525 	.cf_tlb_flushID_SE	= arm9_tlb_flushID_SE,
526 	.cf_tlb_flushI		= armv4_tlb_flushI,
527 	.cf_tlb_flushI_SE	= (void *)armv4_tlb_flushI,
528 	.cf_tlb_flushD		= armv4_tlb_flushD,
529 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
530 
531 	/* Cache operations */
532 
533 	.cf_icache_sync_all	= arm9_icache_sync_all,
534 	.cf_icache_sync_range	= arm9_icache_sync_range,
535 
536 	.cf_dcache_wbinv_all	= arm9_dcache_wbinv_all,
537 	.cf_dcache_wbinv_range	= arm9_dcache_wbinv_range,
538 /*XXX*/	.cf_dcache_inv_range	= arm9_dcache_wbinv_range,
539 	.cf_dcache_wb_range	= arm9_dcache_wb_range,
540 
541 	.cf_idcache_wbinv_all	= arm9_idcache_wbinv_all,
542 	.cf_idcache_wbinv_range = arm9_idcache_wbinv_range,
543 
544 	/* Other functions */
545 
546 	.cf_flush_prefetchbuf	= cpufunc_nullop,
547 	.cf_drain_writebuf	= armv4_drain_writebuf,
548 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
549 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
550 
551 	.cf_sleep		= (void *)cpufunc_nullop,
552 
553 	/* Soft functions */
554 
555 	.cf_dataabt_fixup	= cpufunc_null_fixup,
556 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
557 
558 	.cf_context_switch	= arm9_context_switch,
559 
560 	.cf_setup		= arm9_setup
561 
562 };
563 #endif /* CPU_ARM9 */
564 
565 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
566 struct cpu_functions armv5_ec_cpufuncs = {
567 	/* CPU functions */
568 
569 	.cf_id			= cpufunc_id,
570 	.cf_cpwait		= cpufunc_nullop,
571 
572 	/* MMU functions */
573 
574 	.cf_control		= cpufunc_control,
575 	.cf_domains		= cpufunc_domains,
576 	.cf_setttb		= armv5_ec_setttb,
577 	.cf_faultstatus		= cpufunc_faultstatus,
578 	.cf_faultaddress	= cpufunc_faultaddress,
579 
580 	/* TLB functions */
581 
582 	.cf_tlb_flushID		= armv4_tlb_flushID,
583 	.cf_tlb_flushID_SE	= arm10_tlb_flushID_SE,
584 	.cf_tlb_flushI		= armv4_tlb_flushI,
585 	.cf_tlb_flushI_SE	= arm10_tlb_flushI_SE,
586 	.cf_tlb_flushD		= armv4_tlb_flushD,
587 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
588 
589 	/* Cache operations */
590 
591 	.cf_icache_sync_all	= armv5_ec_icache_sync_all,
592 	.cf_icache_sync_range	= armv5_ec_icache_sync_range,
593 
594 	.cf_dcache_wbinv_all	= armv5_ec_dcache_wbinv_all,
595 	.cf_dcache_wbinv_range	= armv5_ec_dcache_wbinv_range,
596 /*XXX*/	.cf_dcache_inv_range	= armv5_ec_dcache_wbinv_range,
597 	.cf_dcache_wb_range	= armv5_ec_dcache_wb_range,
598 
599 	.cf_idcache_wbinv_all	= armv5_ec_idcache_wbinv_all,
600 	.cf_idcache_wbinv_range = armv5_ec_idcache_wbinv_range,
601 
602 	/* Other functions */
603 
604 	.cf_flush_prefetchbuf	= cpufunc_nullop,
605 	.cf_drain_writebuf	= armv4_drain_writebuf,
606 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
607 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
608 
609 	.cf_sleep		= (void *)cpufunc_nullop,
610 
611 	/* Soft functions */
612 
613 	.cf_dataabt_fixup	= cpufunc_null_fixup,
614 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
615 
616 	.cf_context_switch	= arm10_context_switch,
617 
618 	.cf_setup		= arm10_setup
619 
620 };
621 #endif /* CPU_ARM9E || CPU_ARM10 */
622 
623 #ifdef CPU_ARM10
624 struct cpu_functions arm10_cpufuncs = {
625 	/* CPU functions */
626 
627 	.cf_id			= cpufunc_id,
628 	.cf_cpwait		= cpufunc_nullop,
629 
630 	/* MMU functions */
631 
632 	.cf_control		= cpufunc_control,
633 	.cf_domains		= cpufunc_domains,
634 	.cf_setttb		= armv5_setttb,
635 	.cf_faultstatus		= cpufunc_faultstatus,
636 	.cf_faultaddress	= cpufunc_faultaddress,
637 
638 	/* TLB functions */
639 
640 	.cf_tlb_flushID		= armv4_tlb_flushID,
641 	.cf_tlb_flushID_SE	= arm10_tlb_flushID_SE,
642 	.cf_tlb_flushI		= armv4_tlb_flushI,
643 	.cf_tlb_flushI_SE	= arm10_tlb_flushI_SE,
644 	.cf_tlb_flushD		= armv4_tlb_flushD,
645 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
646 
647 	/* Cache operations */
648 
649 	.cf_icache_sync_all	= armv5_icache_sync_all,
650 	.cf_icache_sync_range	= armv5_icache_sync_range,
651 
652 	.cf_dcache_wbinv_all	= armv5_dcache_wbinv_all,
653 	.cf_dcache_wbinv_range	= armv5_dcache_wbinv_range,
654 /*XXX*/	.cf_dcache_inv_range	= armv5_dcache_wbinv_range,
655 	.cf_dcache_wb_range	= armv5_dcache_wb_range,
656 
657 	.cf_idcache_wbinv_all	= armv5_idcache_wbinv_all,
658 	.cf_idcache_wbinv_range = armv5_idcache_wbinv_range,
659 
660 	/* Other functions */
661 
662 	.cf_flush_prefetchbuf	= cpufunc_nullop,
663 	.cf_drain_writebuf	= armv4_drain_writebuf,
664 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
665 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
666 
667 	.cf_sleep		= (void *)cpufunc_nullop,
668 
669 	/* Soft functions */
670 
671 	.cf_dataabt_fixup	= cpufunc_null_fixup,
672 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
673 
674 	.cf_context_switch	= arm10_context_switch,
675 
676 	.cf_setup		= arm10_setup
677 
678 };
679 #endif /* CPU_ARM10 */
680 
681 #ifdef CPU_ARM11
682 struct cpu_functions arm11_cpufuncs = {
683 	/* CPU functions */
684 
685 	.cf_id			= cpufunc_id,
686 	.cf_cpwait		= cpufunc_nullop,
687 
688 	/* MMU functions */
689 
690 	.cf_control		= cpufunc_control,
691 	.cf_domains		= cpufunc_domains,
692 	.cf_setttb		= arm11_setttb,
693 	.cf_faultstatus		= cpufunc_faultstatus,
694 	.cf_faultaddress	= cpufunc_faultaddress,
695 
696 	/* TLB functions */
697 
698 	.cf_tlb_flushID		= arm11_tlb_flushID,
699 	.cf_tlb_flushID_SE	= arm11_tlb_flushID_SE,
700 	.cf_tlb_flushI		= arm11_tlb_flushI,
701 	.cf_tlb_flushI_SE	= arm11_tlb_flushI_SE,
702 	.cf_tlb_flushD		= arm11_tlb_flushD,
703 	.cf_tlb_flushD_SE	= arm11_tlb_flushD_SE,
704 
705 	/* Cache operations */
706 
707 	.cf_icache_sync_all	= armv6_icache_sync_all,
708 	.cf_icache_sync_range	= armv6_icache_sync_range,
709 
710 	.cf_dcache_wbinv_all	= armv6_dcache_wbinv_all,
711 	.cf_dcache_wbinv_range	= armv6_dcache_wbinv_range,
712 	.cf_dcache_inv_range	= armv6_dcache_inv_range,
713 	.cf_dcache_wb_range	= armv6_dcache_wb_range,
714 
715 	.cf_idcache_wbinv_all	= armv6_idcache_wbinv_all,
716 	.cf_idcache_wbinv_range = armv6_idcache_wbinv_range,
717 
718 	/* Other functions */
719 
720 	.cf_flush_prefetchbuf	= cpufunc_nullop,
721 	.cf_drain_writebuf	= arm11_drain_writebuf,
722 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
723 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
724 
725 	.cf_sleep		= arm11_sleep,
726 
727 	/* Soft functions */
728 
729 	.cf_dataabt_fixup	= cpufunc_null_fixup,
730 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
731 
732 	.cf_context_switch	= arm11_context_switch,
733 
734 	.cf_setup		= arm11_setup
735 
736 };
737 #endif /* CPU_ARM11 */
738 
739 #ifdef CPU_ARM1136
740 struct cpu_functions arm1136_cpufuncs = {
741 	/* CPU functions */
742 
743 	.cf_id			= cpufunc_id,
744 	.cf_cpwait		= cpufunc_nullop,
745 
746 	/* MMU functions */
747 
748 	.cf_control		= cpufunc_control,
749 	.cf_domains		= cpufunc_domains,
750 	.cf_setttb		= arm1136_setttb,
751 	.cf_faultstatus		= cpufunc_faultstatus,
752 	.cf_faultaddress	= cpufunc_faultaddress,
753 
754 	/* TLB functions */
755 
756 	.cf_tlb_flushID		= arm11_tlb_flushID,
757 	.cf_tlb_flushID_SE	= arm11_tlb_flushID_SE,
758 	.cf_tlb_flushI		= arm11_tlb_flushI,
759 	.cf_tlb_flushI_SE	= arm11_tlb_flushI_SE,
760 	.cf_tlb_flushD		= arm11_tlb_flushD,
761 	.cf_tlb_flushD_SE	= arm11_tlb_flushD_SE,
762 
763 	/* Cache operations */
764 
765 	.cf_icache_sync_all	= arm1136_icache_sync_all,	/* 411920 */
766 	.cf_icache_sync_range	= arm1136_icache_sync_range,	/* 371025 */
767 
768 	.cf_dcache_wbinv_all	= arm1136_dcache_wbinv_all,	/* 411920 */
769 	.cf_dcache_wbinv_range	= armv6_dcache_wbinv_range,
770 	.cf_dcache_inv_range	= armv6_dcache_inv_range,
771 	.cf_dcache_wb_range	= armv6_dcache_wb_range,
772 
773 	.cf_idcache_wbinv_all	= arm1136_idcache_wbinv_all,	/* 411920 */
774 	.cf_idcache_wbinv_range = arm1136_idcache_wbinv_range,	/* 371025 */
775 
776 	/* Other functions */
777 
778 	.cf_flush_prefetchbuf	= arm1136_flush_prefetchbuf,
779 	.cf_drain_writebuf	= arm11_drain_writebuf,
780 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
781 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
782 
783 	.cf_sleep		= arm11_sleep,
784 
785 	/* Soft functions */
786 
787 	.cf_dataabt_fixup	= cpufunc_null_fixup,
788 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
789 
790 	.cf_context_switch	= arm11_context_switch,
791 
792 	.cf_setup		= arm1136_setup
793 
794 };
795 #endif /* CPU_ARM1136 */
796 
797 #ifdef CPU_ARM11MPCORE
798 struct cpu_functions arm11mpcore_cpufuncs = {
799 	/* CPU functions */
800 
801 	.cf_id			= cpufunc_id,
802 	.cf_cpwait		= cpufunc_nullop,
803 
804 	/* MMU functions */
805 
806 	.cf_control		= cpufunc_control,
807 	.cf_domains		= cpufunc_domains,
808 	.cf_setttb		= arm11_setttb,
809 	.cf_faultstatus		= cpufunc_faultstatus,
810 	.cf_faultaddress	= cpufunc_faultaddress,
811 
812 	/* TLB functions */
813 
814 	.cf_tlb_flushID		= arm11_tlb_flushID,
815 	.cf_tlb_flushID_SE	= arm11_tlb_flushID_SE,
816 	.cf_tlb_flushI		= arm11_tlb_flushI,
817 	.cf_tlb_flushI_SE	= arm11_tlb_flushI_SE,
818 	.cf_tlb_flushD		= arm11_tlb_flushD,
819 	.cf_tlb_flushD_SE	= arm11_tlb_flushD_SE,
820 
821 	/* Cache operations */
822 
823 	.cf_icache_sync_all	= armv6_icache_sync_all,
824 	.cf_icache_sync_range	= armv5_icache_sync_range,
825 
826 	.cf_dcache_wbinv_all	= armv6_dcache_wbinv_all,
827 	.cf_dcache_wbinv_range	= armv5_dcache_wbinv_range,
828 	.cf_dcache_inv_range	= armv5_dcache_inv_range,
829 	.cf_dcache_wb_range	= armv5_dcache_wb_range,
830 
831 	.cf_idcache_wbinv_all	= armv6_idcache_wbinv_all,
832 	.cf_idcache_wbinv_range = armv5_idcache_wbinv_range,
833 
834 	/* Other functions */
835 
836 	.cf_flush_prefetchbuf	= cpufunc_nullop,
837 	.cf_drain_writebuf	= arm11_drain_writebuf,
838 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
839 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
840 
841 	.cf_sleep		= arm11_sleep,
842 
843 	/* Soft functions */
844 
845 	.cf_dataabt_fixup	= cpufunc_null_fixup,
846 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
847 
848 	.cf_context_switch	= arm11_context_switch,
849 
850 	.cf_setup		= arm11mpcore_setup
851 
852 };
853 #endif /* CPU_ARM11MPCORE */
854 
855 #ifdef CPU_SA110
856 struct cpu_functions sa110_cpufuncs = {
857 	/* CPU functions */
858 
859 	.cf_id			= cpufunc_id,
860 	.cf_cpwait		= cpufunc_nullop,
861 
862 	/* MMU functions */
863 
864 	.cf_control		= cpufunc_control,
865 	.cf_domains		= cpufunc_domains,
866 	.cf_setttb		= sa1_setttb,
867 	.cf_faultstatus		= cpufunc_faultstatus,
868 	.cf_faultaddress	= cpufunc_faultaddress,
869 
870 	/* TLB functions */
871 
872 	.cf_tlb_flushID		= armv4_tlb_flushID,
873 	.cf_tlb_flushID_SE	= sa1_tlb_flushID_SE,
874 	.cf_tlb_flushI		= armv4_tlb_flushI,
875 	.cf_tlb_flushI_SE	= (void *)armv4_tlb_flushI,
876 	.cf_tlb_flushD		= armv4_tlb_flushD,
877 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
878 
879 	/* Cache operations */
880 
881 	.cf_icache_sync_all	= sa1_cache_syncI,
882 	.cf_icache_sync_range	= sa1_cache_syncI_rng,
883 
884 	.cf_dcache_wbinv_all	= sa1_cache_purgeD,
885 	.cf_dcache_wbinv_range	= sa1_cache_purgeD_rng,
886 /*XXX*/	.cf_dcache_inv_range	= sa1_cache_purgeD_rng,
887 	.cf_dcache_wb_range	= sa1_cache_cleanD_rng,
888 
889 	.cf_idcache_wbinv_all	= sa1_cache_purgeID,
890 	.cf_idcache_wbinv_range	= sa1_cache_purgeID_rng,
891 
892 	/* Other functions */
893 
894 	.cf_flush_prefetchbuf	= cpufunc_nullop,
895 	.cf_drain_writebuf	= armv4_drain_writebuf,
896 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
897 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
898 
899 	.cf_sleep		= (void *)cpufunc_nullop,
900 
901 	/* Soft functions */
902 
903 	.cf_dataabt_fixup	= cpufunc_null_fixup,
904 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
905 
906 	.cf_context_switch	= sa110_context_switch,
907 
908 	.cf_setup		= sa110_setup
909 };
910 #endif	/* CPU_SA110 */
911 
912 #if defined(CPU_SA1100) || defined(CPU_SA1110)
913 struct cpu_functions sa11x0_cpufuncs = {
914 	/* CPU functions */
915 
916 	.cf_id			= cpufunc_id,
917 	.cf_cpwait		= cpufunc_nullop,
918 
919 	/* MMU functions */
920 
921 	.cf_control		= cpufunc_control,
922 	.cf_domains		= cpufunc_domains,
923 	.cf_setttb		= sa1_setttb,
924 	.cf_faultstatus		= cpufunc_faultstatus,
925 	.cf_faultaddress	= cpufunc_faultaddress,
926 
927 	/* TLB functions */
928 
929 	.cf_tlb_flushID		= armv4_tlb_flushID,
930 	.cf_tlb_flushID_SE	= sa1_tlb_flushID_SE,
931 	.cf_tlb_flushI		= armv4_tlb_flushI,
932 	.cf_tlb_flushI_SE	= (void *)armv4_tlb_flushI,
933 	.cf_tlb_flushD		= armv4_tlb_flushD,
934 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
935 
936 	/* Cache operations */
937 
938 	.cf_icache_sync_all	= sa1_cache_syncI,
939 	.cf_icache_sync_range	= sa1_cache_syncI_rng,
940 
941 	.cf_dcache_wbinv_all	= sa1_cache_purgeD,
942 	.cf_dcache_wbinv_range	= sa1_cache_purgeD_rng,
943 /*XXX*/	.cf_dcache_inv_range	= sa1_cache_purgeD_rng,
944 	.cf_dcache_wb_range	= sa1_cache_cleanD_rng,
945 
946 	.cf_idcache_wbinv_all	= sa1_cache_purgeID,
947 	.cf_idcache_wbinv_range	= sa1_cache_purgeID_rng,
948 
949 	/* Other functions */
950 
951 	.cf_flush_prefetchbuf	= sa11x0_drain_readbuf,
952 	.cf_drain_writebuf	= armv4_drain_writebuf,
953 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
954 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
955 
956 	.cf_sleep		= sa11x0_cpu_sleep,
957 
958 	/* Soft functions */
959 
960 	.cf_dataabt_fixup	= cpufunc_null_fixup,
961 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
962 
963 	.cf_context_switch	= sa11x0_context_switch,
964 
965 	.cf_setup		= sa11x0_setup
966 };
967 #endif	/* CPU_SA1100 || CPU_SA1110 */
968 
969 #if defined(CPU_FA526)
970 struct cpu_functions fa526_cpufuncs = {
971 	/* CPU functions */
972 
973 	.cf_id			= cpufunc_id,
974 	.cf_cpwait		= cpufunc_nullop,
975 
976 	/* MMU functions */
977 
978 	.cf_control		= cpufunc_control,
979 	.cf_domains		= cpufunc_domains,
980 	.cf_setttb		= fa526_setttb,
981 	.cf_faultstatus		= cpufunc_faultstatus,
982 	.cf_faultaddress	= cpufunc_faultaddress,
983 
984 	/* TLB functions */
985 
986 	.cf_tlb_flushID		= armv4_tlb_flushID,
987 	.cf_tlb_flushID_SE	= fa526_tlb_flushID_SE,
988 	.cf_tlb_flushI		= armv4_tlb_flushI,
989 	.cf_tlb_flushI_SE	= fa526_tlb_flushI_SE,
990 	.cf_tlb_flushD		= armv4_tlb_flushD,
991 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
992 
993 	/* Cache operations */
994 
995 	.cf_icache_sync_all	= fa526_icache_sync_all,
996 	.cf_icache_sync_range	= fa526_icache_sync_range,
997 
998 	.cf_dcache_wbinv_all	= fa526_dcache_wbinv_all,
999 	.cf_dcache_wbinv_range	= fa526_dcache_wbinv_range,
1000 	.cf_dcache_inv_range	= fa526_dcache_inv_range,
1001 	.cf_dcache_wb_range	= fa526_dcache_wb_range,
1002 
1003 	.cf_idcache_wbinv_all	= fa526_idcache_wbinv_all,
1004 	.cf_idcache_wbinv_range	= fa526_idcache_wbinv_range,
1005 
1006 	/* Other functions */
1007 
1008 	.cf_flush_prefetchbuf	= fa526_flush_prefetchbuf,
1009 	.cf_drain_writebuf	= armv4_drain_writebuf,
1010 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
1011 	.cf_flush_brnchtgt_E	= fa526_flush_brnchtgt_E,
1012 
1013 	.cf_sleep		= fa526_cpu_sleep,
1014 
1015 	/* Soft functions */
1016 
1017 	.cf_dataabt_fixup	= cpufunc_null_fixup,
1018 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
1019 
1020 	.cf_context_switch	= fa526_context_switch,
1021 
1022 	.cf_setup		= fa526_setup
1023 };
1024 #endif	/* CPU_FA526 */
1025 
1026 #ifdef CPU_IXP12X0
1027 struct cpu_functions ixp12x0_cpufuncs = {
1028 	/* CPU functions */
1029 
1030 	.cf_id			= cpufunc_id,
1031 	.cf_cpwait		= cpufunc_nullop,
1032 
1033 	/* MMU functions */
1034 
1035 	.cf_control		= cpufunc_control,
1036 	.cf_domains		= cpufunc_domains,
1037 	.cf_setttb		= sa1_setttb,
1038 	.cf_faultstatus		= cpufunc_faultstatus,
1039 	.cf_faultaddress	= cpufunc_faultaddress,
1040 
1041 	/* TLB functions */
1042 
1043 	.cf_tlb_flushID		= armv4_tlb_flushID,
1044 	.cf_tlb_flushID_SE	= sa1_tlb_flushID_SE,
1045 	.cf_tlb_flushI		= armv4_tlb_flushI,
1046 	.cf_tlb_flushI_SE	= (void *)armv4_tlb_flushI,
1047 	.cf_tlb_flushD		= armv4_tlb_flushD,
1048 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
1049 
1050 	/* Cache operations */
1051 
1052 	.cf_icache_sync_all	= sa1_cache_syncI,
1053 	.cf_icache_sync_range	= sa1_cache_syncI_rng,
1054 
1055 	.cf_dcache_wbinv_all	= sa1_cache_purgeD,
1056 	.cf_dcache_wbinv_range	= sa1_cache_purgeD_rng,
1057 /*XXX*/	.cf_dcache_inv_range	= sa1_cache_purgeD_rng,
1058 	.cf_dcache_wb_range	= sa1_cache_cleanD_rng,
1059 
1060 	.cf_idcache_wbinv_all	= sa1_cache_purgeID,
1061 	.cf_idcache_wbinv_range	= sa1_cache_purgeID_rng,
1062 
1063 	/* Other functions */
1064 
1065 	.cf_flush_prefetchbuf	= ixp12x0_drain_readbuf,
1066 	.cf_drain_writebuf	= armv4_drain_writebuf,
1067 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
1068 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
1069 
1070 	.cf_sleep		= (void *)cpufunc_nullop,
1071 
1072 	/* Soft functions */
1073 
1074 	.cf_dataabt_fixup	= cpufunc_null_fixup,
1075 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
1076 
1077 	.cf_context_switch	= ixp12x0_context_switch,
1078 
1079 	.cf_setup		= ixp12x0_setup
1080 };
1081 #endif	/* CPU_IXP12X0 */
1082 
1083 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
1084     defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425)
1085 struct cpu_functions xscale_cpufuncs = {
1086 	/* CPU functions */
1087 
1088 	.cf_id			= cpufunc_id,
1089 	.cf_cpwait		= xscale_cpwait,
1090 
1091 	/* MMU functions */
1092 
1093 	.cf_control		= xscale_control,
1094 	.cf_domains		= cpufunc_domains,
1095 	.cf_setttb		= xscale_setttb,
1096 	.cf_faultstatus		= cpufunc_faultstatus,
1097 	.cf_faultaddress	= cpufunc_faultaddress,
1098 
1099 	/* TLB functions */
1100 
1101 	.cf_tlb_flushID		= armv4_tlb_flushID,
1102 	.cf_tlb_flushID_SE	= xscale_tlb_flushID_SE,
1103 	.cf_tlb_flushI		= armv4_tlb_flushI,
1104 	.cf_tlb_flushI_SE	= (void *)armv4_tlb_flushI,
1105 	.cf_tlb_flushD		= armv4_tlb_flushD,
1106 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
1107 
1108 	/* Cache operations */
1109 
1110 	.cf_icache_sync_all	= xscale_cache_syncI,
1111 	.cf_icache_sync_range	= xscale_cache_syncI_rng,
1112 
1113 	.cf_dcache_wbinv_all	= xscale_cache_purgeD,
1114 	.cf_dcache_wbinv_range	= xscale_cache_purgeD_rng,
1115 	.cf_dcache_inv_range	= xscale_cache_flushD_rng,
1116 	.cf_dcache_wb_range	= xscale_cache_cleanD_rng,
1117 
1118 	.cf_idcache_wbinv_all	= xscale_cache_purgeID,
1119 	.cf_idcache_wbinv_range = xscale_cache_purgeID_rng,
1120 
1121 	/* Other functions */
1122 
1123 	.cf_flush_prefetchbuf	= cpufunc_nullop,
1124 	.cf_drain_writebuf	= armv4_drain_writebuf,
1125 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
1126 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
1127 
1128 	.cf_sleep		= xscale_cpu_sleep,
1129 
1130 	/* Soft functions */
1131 
1132 	.cf_dataabt_fixup	= cpufunc_null_fixup,
1133 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
1134 
1135 	.cf_context_switch	= xscale_context_switch,
1136 
1137 	.cf_setup		= xscale_setup
1138 };
1139 #endif
1140 /* CPU_XSCALE_80200 || CPU_XSCALE_80321 || __CPU_XSCALE_PXA2XX || CPU_XSCALE_IXP425 */
1141 
1142 #if defined(CPU_CORTEX)
1143 struct cpu_functions cortex_cpufuncs = {
1144 	/* CPU functions */
1145 
1146 	.cf_id			= cpufunc_id,
1147 	.cf_cpwait		= cpufunc_nullop,
1148 
1149 	/* MMU functions */
1150 
1151 	.cf_control		= cpufunc_control,
1152 	.cf_domains		= cpufunc_domains,
1153 	.cf_setttb		= armv7_setttb,
1154 	.cf_faultstatus		= cpufunc_faultstatus,
1155 	.cf_faultaddress	= cpufunc_faultaddress,
1156 
1157 	/* TLB functions */
1158 
1159 	.cf_tlb_flushID		= arm11_tlb_flushID,
1160 	.cf_tlb_flushID_SE	= armv7_tlb_flushID_SE,
1161 	.cf_tlb_flushI		= arm11_tlb_flushI,
1162 	.cf_tlb_flushI_SE	= arm11_tlb_flushI_SE,
1163 	.cf_tlb_flushD		= arm11_tlb_flushD,
1164 	.cf_tlb_flushD_SE	= arm11_tlb_flushD_SE,
1165 
1166 	/* Cache operations */
1167 
1168 	.cf_icache_sync_all	= armv7_icache_sync_all,
1169 	.cf_dcache_wbinv_all	= armv7_dcache_wbinv_all,
1170 
1171 	.cf_dcache_inv_range	= armv7_dcache_inv_range,
1172 	.cf_dcache_wb_range	= armv7_dcache_wb_range,
1173 	.cf_dcache_wbinv_range	= armv7_dcache_wbinv_range,
1174 
1175 	.cf_icache_sync_range	= armv7_icache_sync_range,
1176 	.cf_idcache_wbinv_range = armv7_idcache_wbinv_range,
1177 
1178 
1179 	.cf_idcache_wbinv_all	= armv7_idcache_wbinv_all,
1180 
1181 	/* Other functions */
1182 
1183 	.cf_flush_prefetchbuf	= cpufunc_nullop,
1184 	.cf_drain_writebuf	= arm11_drain_writebuf,
1185 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
1186 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
1187 
1188 	.cf_sleep		= armv7_cpu_sleep,
1189 
1190 	/* Soft functions */
1191 
1192 	.cf_dataabt_fixup	= cpufunc_null_fixup,
1193 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
1194 
1195 	.cf_context_switch	= armv7_context_switch,
1196 
1197 	.cf_setup		= armv7_setup
1198 
1199 };
1200 #endif /* CPU_CORTEX */
1201 
1202 #ifdef CPU_SHEEVA
1203 struct cpu_functions sheeva_cpufuncs = {
1204 	/* CPU functions */
1205 
1206 	.cf_id			= cpufunc_id,
1207 	.cf_cpwait		= cpufunc_nullop,
1208 
1209 	/* MMU functions */
1210 
1211 	.cf_control		= cpufunc_control,
1212 	.cf_domains		= cpufunc_domains,
1213 	.cf_setttb		= armv5_ec_setttb,
1214 	.cf_faultstatus		= cpufunc_faultstatus,
1215 	.cf_faultaddress	= cpufunc_faultaddress,
1216 
1217 	/* TLB functions */
1218 
1219 	.cf_tlb_flushID		= armv4_tlb_flushID,
1220 	.cf_tlb_flushID_SE	= arm10_tlb_flushID_SE,
1221 	.cf_tlb_flushI		= armv4_tlb_flushI,
1222 	.cf_tlb_flushI_SE	= arm10_tlb_flushI_SE,
1223 	.cf_tlb_flushD		= armv4_tlb_flushD,
1224 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
1225 
1226 	/* Cache operations */
1227 
1228 	.cf_icache_sync_all	= armv5_ec_icache_sync_all,
1229 	.cf_icache_sync_range	= armv5_ec_icache_sync_range,
1230 
1231 	.cf_dcache_wbinv_all	= armv5_ec_dcache_wbinv_all,
1232 	.cf_dcache_wbinv_range	= sheeva_dcache_wbinv_range,
1233 	.cf_dcache_inv_range	= sheeva_dcache_inv_range,
1234 	.cf_dcache_wb_range	= sheeva_dcache_wb_range,
1235 
1236 	.cf_idcache_wbinv_all	= armv5_ec_idcache_wbinv_all,
1237 	.cf_idcache_wbinv_range = sheeva_idcache_wbinv_range,
1238 
1239 	/* Other functions */
1240 
1241 	.cf_flush_prefetchbuf	= cpufunc_nullop,
1242 	.cf_drain_writebuf	= armv4_drain_writebuf,
1243 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
1244 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
1245 
1246 	.cf_sleep		= (void *)cpufunc_nullop,
1247 
1248 	/* Soft functions */
1249 
1250 	.cf_dataabt_fixup	= cpufunc_null_fixup,
1251 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
1252 
1253 	.cf_context_switch	= arm10_context_switch,
1254 
1255 	.cf_setup		= sheeva_setup
1256 };
1257 #endif /* CPU_SHEEVA */
1258 
1259 
1260 /*
1261  * Global constants also used by locore.s
1262  */
1263 
1264 struct cpu_functions cpufuncs;
1265 u_int cputype;
1266 u_int cpu_reset_needs_v4_MMU_disable;	/* flag used in locore.s */
1267 
1268 #if defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined(CPU_ARM9) || \
1269     defined(CPU_ARM9E) || defined(CPU_ARM10) || defined(CPU_ARM11) || \
1270     defined(CPU_FA526) || \
1271     defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
1272     defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425) || \
1273     defined(CPU_CORTEX) || defined(CPU_SHEEVA)
1274 static void get_cachetype_cp15(void);
1275 
1276 /* Additional cache information local to this file.  Log2 of some of the
1277    above numbers.  */
1278 static int	arm_dcache_l2_nsets;
1279 static int	arm_dcache_l2_assoc;
1280 static int	arm_dcache_l2_linesize;
1281 
1282 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
1283 static inline u_int
1284 get_cachesize_cp15(int cssr)
1285 {
1286 	u_int csid;
1287 
1288 #if (CPU_CORTEX) > 0
1289 	__asm volatile(".arch\tarmv7a");
1290 	__asm volatile("mcr p15, 2, %0, c0, c0, 0" :: "r" (cssr));
1291 	__asm volatile("isb");	/* sync to the new cssr */
1292 #else
1293 	__asm volatile("mcr p15, 1, %0, c0, c0, 2" :: "r" (cssr));
1294 #endif
1295 	__asm volatile("mrc p15, 1, %0, c0, c0, 0" : "=r" (csid));
1296 	return csid;
1297 }
1298 #endif
1299 
1300 static void
1301 get_cachetype_cp15()
1302 {
1303 	u_int ctype, isize, dsize;
1304 	u_int multiplier;
1305 
1306 	__asm volatile("mrc p15, 0, %0, c0, c0, 1"
1307 		: "=r" (ctype));
1308 
1309 	/*
1310 	 * ...and thus spake the ARM ARM:
1311 	 *
1312 	 * If an <opcode2> value corresponding to an unimplemented or
1313 	 * reserved ID register is encountered, the System Control
1314 	 * processor returns the value of the main ID register.
1315 	 */
1316 	if (ctype == cpu_id())
1317 		goto out;
1318 
1319 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
1320 	if (CPU_CT_FORMAT(ctype) == 4) {
1321 		u_int csid0, csid1, csid2;
1322 
1323 		isize = 1U << (CPU_CT4_ILINE(ctype) + 2);
1324 		dsize = 1U << (CPU_CT4_DLINE(ctype) + 2);
1325 
1326 		csid0 = get_cachesize_cp15(CPU_CSSR_L1); /* select L1 dcache values */
1327 		arm_pdcache_ways = CPU_CSID_ASSOC(csid0) + 1;
1328 		arm_pdcache_line_size = dsize;
1329 		arm_pdcache_size = arm_pdcache_line_size * arm_pdcache_ways;
1330 		arm_pdcache_size *= (CPU_CSID_NUMSETS(csid0) + 1);
1331 		arm_cache_prefer_mask = PAGE_SIZE;
1332 
1333 		arm_dcache_align = arm_pdcache_line_size;
1334 
1335 		csid1 = get_cachesize_cp15(CPU_CSSR_L1|CPU_CSSR_InD); /* select L1 icache values */
1336 		arm_picache_ways = CPU_CSID_ASSOC(csid1) + 1;
1337 		arm_picache_line_size = isize;
1338 		arm_picache_size = arm_picache_line_size * arm_picache_ways;
1339 		arm_picache_size *= (CPU_CSID_NUMSETS(csid1) + 1);
1340 		arm_cache_prefer_mask = PAGE_SIZE;
1341 
1342 		arm_dcache_align = arm_pdcache_line_size;
1343 
1344 		csid2 = get_cachesize_cp15(CPU_CSSR_L2); /* select L2 cache values */
1345 		arm_dcache_l2_assoc = CPU_CSID_ASSOC(csid2) + 1;
1346 		arm_dcache_l2_linesize = 1 << (CPU_CSID_LEN(csid2) + 2);
1347 		arm_dcache_l2_nsets = CPU_CSID_NUMSETS(csid2) + 1;
1348 		arm_pcache_type = CPU_CT_CTYPE_WB14;
1349 		goto out;
1350 	}
1351 #endif /* ARM_MMU_V6 + ARM_MMU_V7 > 0 */
1352 
1353 	if ((ctype & CPU_CT_S) == 0)
1354 		arm_pcache_unified = 1;
1355 
1356 	/*
1357 	 * If you want to know how this code works, go read the ARM ARM.
1358 	 */
1359 
1360 	arm_pcache_type = CPU_CT_CTYPE(ctype);
1361 
1362 	if (arm_pcache_unified == 0) {
1363 		isize = CPU_CT_ISIZE(ctype);
1364 		multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
1365 		arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
1366 		if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
1367 			if (isize & CPU_CT_xSIZE_M)
1368 				arm_picache_line_size = 0; /* not present */
1369 			else
1370 				arm_picache_ways = 1;
1371 		} else {
1372 			arm_picache_ways = multiplier <<
1373 			    (CPU_CT_xSIZE_ASSOC(isize) - 1);
1374 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
1375 			if (CPU_CT_xSIZE_P & isize)
1376 				arm_cache_prefer_mask |=
1377 				    __BIT(9 + CPU_CT_xSIZE_SIZE(isize)
1378 					  - CPU_CT_xSIZE_ASSOC(isize))
1379 				    - PAGE_SIZE;
1380 #endif
1381 		}
1382 		arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
1383 	}
1384 
1385 	dsize = CPU_CT_DSIZE(ctype);
1386 	multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
1387 	arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
1388 	if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
1389 		if (dsize & CPU_CT_xSIZE_M)
1390 			arm_pdcache_line_size = 0; /* not present */
1391 		else
1392 			arm_pdcache_ways = 1;
1393 	} else {
1394 		arm_pdcache_ways = multiplier <<
1395 		    (CPU_CT_xSIZE_ASSOC(dsize) - 1);
1396 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
1397 		if (CPU_CT_xSIZE_P & dsize)
1398 			arm_cache_prefer_mask |=
1399 			    __BIT(9 + CPU_CT_xSIZE_SIZE(dsize)
1400 				  - CPU_CT_xSIZE_ASSOC(dsize)) - PAGE_SIZE;
1401 #endif
1402 	}
1403 	arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
1404 
1405 	arm_dcache_align = arm_pdcache_line_size;
1406 
1407 	arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
1408 	arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
1409 	arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
1410 	    CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
1411 
1412  out:
1413 	arm_dcache_align_mask = arm_dcache_align - 1;
1414 }
1415 #endif /* ARM7TDMI || ARM8 || ARM9 || XSCALE */
1416 
1417 #if defined(CPU_ARM2) || defined(CPU_ARM250) || defined(CPU_ARM3) || \
1418     defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_SA110) || \
1419     defined(CPU_SA1100) || defined(CPU_SA1110) || defined(CPU_IXP12X0)
1420 /* Cache information for CPUs without cache type registers. */
1421 struct cachetab {
1422 	u_int32_t ct_cpuid;
1423 	int	ct_pcache_type;
1424 	int	ct_pcache_unified;
1425 	int	ct_pdcache_size;
1426 	int	ct_pdcache_line_size;
1427 	int	ct_pdcache_ways;
1428 	int	ct_picache_size;
1429 	int	ct_picache_line_size;
1430 	int	ct_picache_ways;
1431 };
1432 
1433 struct cachetab cachetab[] = {
1434     /* cpuid,           cache type,       u,  dsiz, ls, wy,  isiz, ls, wy */
1435     { CPU_ID_ARM2,      0,                1,     0,  0,  0,     0,  0,  0 },
1436     { CPU_ID_ARM250,    0,                1,     0,  0,  0,     0,  0,  0 },
1437     { CPU_ID_ARM3,      CPU_CT_CTYPE_WT,  1,  4096, 16, 64,     0,  0,  0 },
1438     { CPU_ID_ARM610,	CPU_CT_CTYPE_WT,  1,  4096, 16, 64,     0,  0,  0 },
1439     { CPU_ID_ARM710,    CPU_CT_CTYPE_WT,  1,  8192, 32,  4,     0,  0,  0 },
1440     { CPU_ID_ARM7500,   CPU_CT_CTYPE_WT,  1,  4096, 16,  4,     0,  0,  0 },
1441     { CPU_ID_ARM710A,   CPU_CT_CTYPE_WT,  1,  8192, 16,  4,     0,  0,  0 },
1442     { CPU_ID_ARM7500FE, CPU_CT_CTYPE_WT,  1,  4096, 16,  4,     0,  0,  0 },
1443     /* XXX is this type right for SA-1? */
1444     { CPU_ID_SA110,	CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 },
1445     { CPU_ID_SA1100,	CPU_CT_CTYPE_WB1, 0,  8192, 32, 32, 16384, 32, 32 },
1446     { CPU_ID_SA1110,	CPU_CT_CTYPE_WB1, 0,  8192, 32, 32, 16384, 32, 32 },
1447     { CPU_ID_IXP1200,	CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 }, /* XXX */
1448     { 0, 0, 0, 0, 0, 0, 0, 0}
1449 };
1450 
1451 static void get_cachetype_table(void);
1452 
1453 static void
1454 get_cachetype_table(void)
1455 {
1456 	int i;
1457 	u_int32_t cpuid = cpu_id();
1458 
1459 	for (i = 0; cachetab[i].ct_cpuid != 0; i++) {
1460 		if (cachetab[i].ct_cpuid == (cpuid & CPU_ID_CPU_MASK)) {
1461 			arm_pcache_type = cachetab[i].ct_pcache_type;
1462 			arm_pcache_unified = cachetab[i].ct_pcache_unified;
1463 			arm_pdcache_size = cachetab[i].ct_pdcache_size;
1464 			arm_pdcache_line_size =
1465 			    cachetab[i].ct_pdcache_line_size;
1466 			arm_pdcache_ways = cachetab[i].ct_pdcache_ways;
1467 			arm_picache_size = cachetab[i].ct_picache_size;
1468 			arm_picache_line_size =
1469 			    cachetab[i].ct_picache_line_size;
1470 			arm_picache_ways = cachetab[i].ct_picache_ways;
1471 		}
1472 	}
1473 	arm_dcache_align = arm_pdcache_line_size;
1474 
1475 	arm_dcache_align_mask = arm_dcache_align - 1;
1476 }
1477 
1478 #endif /* ARM2 || ARM250 || ARM3 || ARM6 || ARM7 || SA110 || SA1100 || SA1111 || IXP12X0 */
1479 
1480 /*
1481  * Cannot panic here as we may not have a console yet ...
1482  */
1483 
1484 int
1485 set_cpufuncs(void)
1486 {
1487 	if (cputype == 0) {
1488 		cputype = cpufunc_id();
1489 		cputype &= CPU_ID_CPU_MASK;
1490 	}
1491 
1492 	/*
1493 	 * NOTE: cpu_do_powersave defaults to off.  If we encounter a
1494 	 * CPU type where we want to use it by default, then we set it.
1495 	 */
1496 #ifdef CPU_ARM2
1497 	if (cputype == CPU_ID_ARM2) {
1498 		cpufuncs = arm2_cpufuncs;
1499 		cpu_reset_needs_v4_MMU_disable = 0;
1500 		get_cachetype_table();
1501 		return 0;
1502 	}
1503 #endif /* CPU_ARM2 */
1504 #ifdef CPU_ARM250
1505 	if (cputype == CPU_ID_ARM250) {
1506 		cpufuncs = arm250_cpufuncs;
1507 		cpu_reset_needs_v4_MMU_disable = 0;
1508 		get_cachetype_table();
1509 		return 0;
1510 	}
1511 #endif
1512 #ifdef CPU_ARM3
1513 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
1514 	    (cputype & 0x00000f00) == 0x00000300) {
1515 		cpufuncs = arm3_cpufuncs;
1516 		cpu_reset_needs_v4_MMU_disable = 0;
1517 		get_cachetype_table();
1518 		return 0;
1519 	}
1520 #endif	/* CPU_ARM3 */
1521 #ifdef CPU_ARM6
1522 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
1523 	    (cputype & 0x00000f00) == 0x00000600) {
1524 		cpufuncs = arm6_cpufuncs;
1525 		cpu_reset_needs_v4_MMU_disable = 0;
1526 		get_cachetype_table();
1527 		pmap_pte_init_generic();
1528 		return 0;
1529 	}
1530 #endif	/* CPU_ARM6 */
1531 #ifdef CPU_ARM7
1532 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
1533 	    CPU_ID_IS7(cputype) &&
1534 	    (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V3) {
1535 		cpufuncs = arm7_cpufuncs;
1536 		cpu_reset_needs_v4_MMU_disable = 0;
1537 		get_cachetype_table();
1538 		pmap_pte_init_generic();
1539 		return 0;
1540 	}
1541 #endif	/* CPU_ARM7 */
1542 #ifdef CPU_ARM7TDMI
1543 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
1544 	    CPU_ID_IS7(cputype) &&
1545 	    (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V4T) {
1546 		cpufuncs = arm7tdmi_cpufuncs;
1547 		cpu_reset_needs_v4_MMU_disable = 0;
1548 		get_cachetype_cp15();
1549 		pmap_pte_init_generic();
1550 		return 0;
1551 	}
1552 #endif
1553 #ifdef CPU_ARM8
1554 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
1555 	    (cputype & 0x0000f000) == 0x00008000) {
1556 		cpufuncs = arm8_cpufuncs;
1557 		cpu_reset_needs_v4_MMU_disable = 0;	/* XXX correct? */
1558 		get_cachetype_cp15();
1559 		pmap_pte_init_arm8();
1560 		return 0;
1561 	}
1562 #endif	/* CPU_ARM8 */
1563 #ifdef CPU_ARM9
1564 	if (((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD ||
1565 	     (cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_TI) &&
1566 	    (cputype & 0x0000f000) == 0x00009000) {
1567 		cpufuncs = arm9_cpufuncs;
1568 		cpu_reset_needs_v4_MMU_disable = 1;	/* V4 or higher */
1569 		get_cachetype_cp15();
1570 		arm9_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
1571 		arm9_dcache_sets_max =
1572 		    (1U << (arm_dcache_l2_linesize + arm_dcache_l2_nsets)) -
1573 		    arm9_dcache_sets_inc;
1574 		arm9_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
1575 		arm9_dcache_index_max = 0U - arm9_dcache_index_inc;
1576 #ifdef	ARM9_CACHE_WRITE_THROUGH
1577 		pmap_pte_init_arm9();
1578 #else
1579 		pmap_pte_init_generic();
1580 #endif
1581 		return 0;
1582 	}
1583 #endif /* CPU_ARM9 */
1584 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
1585 	if (cputype == CPU_ID_ARM926EJS ||
1586 	    cputype == CPU_ID_ARM1026EJS) {
1587 		cpufuncs = armv5_ec_cpufuncs;
1588 		cpu_reset_needs_v4_MMU_disable = 1;	/* V4 or higher */
1589 		get_cachetype_cp15();
1590 		pmap_pte_init_generic();
1591 		return 0;
1592 	}
1593 #endif /* CPU_ARM9E || CPU_ARM10 */
1594 #if defined(CPU_SHEEVA)
1595 	if (cputype == CPU_ID_MV88SV131 ||
1596 	    cputype == CPU_ID_MV88FR571_VD) {
1597 		cpufuncs = sheeva_cpufuncs;
1598 		cpu_reset_needs_v4_MMU_disable = 1;	/* V4 or higher */
1599 		get_cachetype_cp15();
1600 		pmap_pte_init_generic();
1601 		return 0;
1602 	}
1603 #endif /* CPU_SHEEVA */
1604 #ifdef CPU_ARM10
1605 	if (/* cputype == CPU_ID_ARM1020T || */
1606 	    cputype == CPU_ID_ARM1020E) {
1607 		/*
1608 		 * Select write-through cacheing (this isn't really an
1609 		 * option on ARM1020T).
1610 		 */
1611 		cpufuncs = arm10_cpufuncs;
1612 		cpu_reset_needs_v4_MMU_disable = 1;	/* V4 or higher */
1613 		get_cachetype_cp15();
1614 		armv5_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
1615 		armv5_dcache_sets_max =
1616 		    (1U << (arm_dcache_l2_linesize + arm_dcache_l2_nsets)) -
1617 		    armv5_dcache_sets_inc;
1618 		armv5_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
1619 		armv5_dcache_index_max = 0U - armv5_dcache_index_inc;
1620 		pmap_pte_init_generic();
1621 		return 0;
1622 	}
1623 #endif /* CPU_ARM10 */
1624 
1625 
1626 #if defined(CPU_ARM11MPCORE)
1627 	if (cputype == CPU_ID_ARM11MPCORE) {
1628 		cpufuncs = arm11mpcore_cpufuncs;
1629 		get_cachetype_cp15();
1630 		armv5_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
1631 		armv5_dcache_sets_max = (1U << (arm_dcache_l2_linesize +
1632 			arm_dcache_l2_nsets)) - armv5_dcache_sets_inc;
1633 		armv5_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
1634 		armv5_dcache_index_max = 0U - armv5_dcache_index_inc;
1635 		cpu_reset_needs_v4_MMU_disable = 1;	/* V4 or higher */
1636 		cpu_do_powersave = 1;			/* Enable powersave */
1637 		pmap_pte_init_arm11mpcore();
1638 		if (arm_cache_prefer_mask)
1639 			uvmexp.ncolors = (arm_cache_prefer_mask >> PGSHIFT) + 1;
1640 
1641 		return 0;
1642 
1643 	}
1644 #endif	/* CPU_ARM11MPCORE */
1645 
1646 #if defined(CPU_ARM11)
1647 	if (cputype == CPU_ID_ARM1136JS ||
1648 	    cputype == CPU_ID_ARM1136JSR1 ||
1649 	    cputype == CPU_ID_ARM1176JS) {
1650 		cpufuncs = arm11_cpufuncs;
1651 #if defined(CPU_ARM1136)
1652 		if (cputype != CPU_ID_ARM1176JS) {
1653 			cpufuncs = arm1136_cpufuncs;
1654 			if (cputype == CPU_ID_ARM1136JS)
1655 				cpufuncs.cf_sleep = arm1136_sleep_rev0;
1656 		}
1657 #endif
1658 		cpu_reset_needs_v4_MMU_disable = 1;	/* V4 or higher */
1659 		cpu_do_powersave = 1;			/* Enable powersave */
1660 		get_cachetype_cp15();
1661 #ifdef ARM11_CACHE_WRITE_THROUGH
1662 		pmap_pte_init_arm11();
1663 #else
1664 		pmap_pte_init_generic();
1665 #endif
1666 		if (arm_cache_prefer_mask)
1667 			uvmexp.ncolors = (arm_cache_prefer_mask >> PGSHIFT) + 1;
1668 
1669 		return 0;
1670 	}
1671 #endif /* CPU_ARM11 */
1672 #ifdef CPU_SA110
1673 	if (cputype == CPU_ID_SA110) {
1674 		cpufuncs = sa110_cpufuncs;
1675 		cpu_reset_needs_v4_MMU_disable = 1;	/* SA needs it */
1676 		get_cachetype_table();
1677 		pmap_pte_init_sa1();
1678 		return 0;
1679 	}
1680 #endif	/* CPU_SA110 */
1681 #ifdef CPU_SA1100
1682 	if (cputype == CPU_ID_SA1100) {
1683 		cpufuncs = sa11x0_cpufuncs;
1684 		cpu_reset_needs_v4_MMU_disable = 1;	/* SA needs it	*/
1685 		get_cachetype_table();
1686 		pmap_pte_init_sa1();
1687 
1688 		/* Use powersave on this CPU. */
1689 		cpu_do_powersave = 1;
1690 
1691 		return 0;
1692 	}
1693 #endif	/* CPU_SA1100 */
1694 #ifdef CPU_SA1110
1695 	if (cputype == CPU_ID_SA1110) {
1696 		cpufuncs = sa11x0_cpufuncs;
1697 		cpu_reset_needs_v4_MMU_disable = 1;	/* SA needs it	*/
1698 		get_cachetype_table();
1699 		pmap_pte_init_sa1();
1700 
1701 		/* Use powersave on this CPU. */
1702 		cpu_do_powersave = 1;
1703 
1704 		return 0;
1705 	}
1706 #endif	/* CPU_SA1110 */
1707 #ifdef CPU_FA526
1708 	if (cputype == CPU_ID_FA526) {
1709 		cpufuncs = fa526_cpufuncs;
1710 		cpu_reset_needs_v4_MMU_disable = 1;	/* SA needs it	*/
1711 		get_cachetype_cp15();
1712 		pmap_pte_init_generic();
1713 
1714 		/* Use powersave on this CPU. */
1715 		cpu_do_powersave = 1;
1716 
1717 		return 0;
1718 	}
1719 #endif	/* CPU_FA526 */
1720 #ifdef CPU_IXP12X0
1721 	if (cputype == CPU_ID_IXP1200) {
1722 		cpufuncs = ixp12x0_cpufuncs;
1723 		cpu_reset_needs_v4_MMU_disable = 1;
1724 		get_cachetype_table();
1725 		pmap_pte_init_sa1();
1726 		return 0;
1727 	}
1728 #endif  /* CPU_IXP12X0 */
1729 #ifdef CPU_XSCALE_80200
1730 	if (cputype == CPU_ID_80200) {
1731 		int rev = cpufunc_id() & CPU_ID_REVISION_MASK;
1732 
1733 		i80200_icu_init();
1734 
1735 		/*
1736 		 * Reset the Performance Monitoring Unit to a
1737 		 * pristine state:
1738 		 *	- CCNT, PMN0, PMN1 reset to 0
1739 		 *	- overflow indications cleared
1740 		 *	- all counters disabled
1741 		 */
1742 		__asm volatile("mcr p14, 0, %0, c0, c0, 0"
1743 			:
1744 			: "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF|
1745 			       PMNC_CC_IF));
1746 
1747 #if defined(XSCALE_CCLKCFG)
1748 		/*
1749 		 * Crank CCLKCFG to maximum legal value.
1750 		 */
1751 		__asm volatile ("mcr p14, 0, %0, c6, c0, 0"
1752 			:
1753 			: "r" (XSCALE_CCLKCFG));
1754 #endif
1755 
1756 		/*
1757 		 * XXX Disable ECC in the Bus Controller Unit; we
1758 		 * don't really support it, yet.  Clear any pending
1759 		 * error indications.
1760 		 */
1761 		__asm volatile("mcr p13, 0, %0, c0, c1, 0"
1762 			:
1763 			: "r" (BCUCTL_E0|BCUCTL_E1|BCUCTL_EV));
1764 
1765 		cpufuncs = xscale_cpufuncs;
1766 #if defined(PERFCTRS)
1767 		xscale_pmu_init();
1768 #endif
1769 
1770 		/*
1771 		 * i80200 errata: Step-A0 and A1 have a bug where
1772 		 * D$ dirty bits are not cleared on "invalidate by
1773 		 * address".
1774 		 *
1775 		 * Workaround: Clean cache line before invalidating.
1776 		 */
1777 		if (rev == 0 || rev == 1)
1778 			cpufuncs.cf_dcache_inv_range = xscale_cache_purgeD_rng;
1779 
1780 		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1781 		get_cachetype_cp15();
1782 		pmap_pte_init_xscale();
1783 		return 0;
1784 	}
1785 #endif /* CPU_XSCALE_80200 */
1786 #ifdef CPU_XSCALE_80321
1787 	if (cputype == CPU_ID_80321_400 || cputype == CPU_ID_80321_600 ||
1788 	    cputype == CPU_ID_80321_400_B0 || cputype == CPU_ID_80321_600_B0 ||
1789 	    cputype == CPU_ID_80219_400 || cputype == CPU_ID_80219_600) {
1790 		i80321_icu_init();
1791 
1792 		/*
1793 		 * Reset the Performance Monitoring Unit to a
1794 		 * pristine state:
1795 		 *	- CCNT, PMN0, PMN1 reset to 0
1796 		 *	- overflow indications cleared
1797 		 *	- all counters disabled
1798 		 */
1799 		__asm volatile("mcr p14, 0, %0, c0, c0, 0"
1800 			:
1801 			: "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF|
1802 			       PMNC_CC_IF));
1803 
1804 		cpufuncs = xscale_cpufuncs;
1805 #if defined(PERFCTRS)
1806 		xscale_pmu_init();
1807 #endif
1808 
1809 		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1810 		get_cachetype_cp15();
1811 		pmap_pte_init_xscale();
1812 		return 0;
1813 	}
1814 #endif /* CPU_XSCALE_80321 */
1815 #ifdef __CPU_XSCALE_PXA2XX
1816 	/* ignore core revision to test PXA2xx CPUs */
1817 	if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA27X ||
1818 	    (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 ||
1819 	    (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) {
1820 
1821 		cpufuncs = xscale_cpufuncs;
1822 #if defined(PERFCTRS)
1823 		xscale_pmu_init();
1824 #endif
1825 
1826 		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1827 		get_cachetype_cp15();
1828 		pmap_pte_init_xscale();
1829 
1830 		/* Use powersave on this CPU. */
1831 		cpu_do_powersave = 1;
1832 
1833 		return 0;
1834 	}
1835 #endif /* __CPU_XSCALE_PXA2XX */
1836 #ifdef CPU_XSCALE_IXP425
1837 	if (cputype == CPU_ID_IXP425_533 || cputype == CPU_ID_IXP425_400 ||
1838 	    cputype == CPU_ID_IXP425_266) {
1839 		ixp425_icu_init();
1840 
1841 		cpufuncs = xscale_cpufuncs;
1842 #if defined(PERFCTRS)
1843 		xscale_pmu_init();
1844 #endif
1845 
1846 		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1847 		get_cachetype_cp15();
1848 		pmap_pte_init_xscale();
1849 
1850 		return 0;
1851 	}
1852 #endif /* CPU_XSCALE_IXP425 */
1853 #if defined(CPU_CORTEX)
1854 	if (cputype == CPU_ID_CORTEXA8R1 ||
1855 	    cputype == CPU_ID_CORTEXA8R2 ||
1856 	    cputype == CPU_ID_CORTEXA8R3 ||
1857 	    cputype == CPU_ID_CORTEXA9R1) {
1858 		cpufuncs = cortex_cpufuncs;
1859 		cpu_reset_needs_v4_MMU_disable = 1;	/* V4 or higher */
1860 		cpu_do_powersave = 1;			/* Enable powersave */
1861 		get_cachetype_cp15();
1862 		pmap_pte_init_armv7();
1863 		if (arm_cache_prefer_mask)
1864 			uvmexp.ncolors = (arm_cache_prefer_mask >> PGSHIFT) + 1;
1865 
1866 		return 0;
1867 	}
1868 #endif /* CPU_CORTEX */
1869 	/*
1870 	 * Bzzzz. And the answer was ...
1871 	 */
1872 	panic("No support for this CPU type (%08x) in kernel", cputype);
1873 	return(ARCHITECTURE_NOT_PRESENT);
1874 }
1875 
1876 #ifdef CPU_ARM2
1877 u_int arm2_id(void)
1878 {
1879 
1880 	return CPU_ID_ARM2;
1881 }
1882 #endif /* CPU_ARM2 */
1883 
1884 #ifdef CPU_ARM250
1885 u_int arm250_id(void)
1886 {
1887 
1888 	return CPU_ID_ARM250;
1889 }
1890 #endif /* CPU_ARM250 */
1891 
1892 /*
1893  * Fixup routines for data and prefetch aborts.
1894  *
1895  * Several compile time symbols are used
1896  *
1897  * DEBUG_FAULT_CORRECTION - Print debugging information during the
1898  * correction of registers after a fault.
1899  * ARM6_LATE_ABORT - ARM6 supports both early and late aborts
1900  * when defined should use late aborts
1901  */
1902 
1903 
1904 /*
1905  * Null abort fixup routine.
1906  * For use when no fixup is required.
1907  */
1908 int
1909 cpufunc_null_fixup(void *arg)
1910 {
1911 	return(ABORT_FIXUP_OK);
1912 }
1913 
1914 
1915 #if defined(CPU_ARM2) || defined(CPU_ARM250) || defined(CPU_ARM3) || \
1916     defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI)
1917 
1918 #ifdef DEBUG_FAULT_CORRECTION
1919 #define DFC_PRINTF(x)		printf x
1920 #define DFC_DISASSEMBLE(x)	disassemble(x)
1921 #else
1922 #define DFC_PRINTF(x)		/* nothing */
1923 #define DFC_DISASSEMBLE(x)	/* nothing */
1924 #endif
1925 
1926 /*
1927  * "Early" data abort fixup.
1928  *
1929  * For ARM2, ARM2as, ARM3 and ARM6 (in early-abort mode).  Also used
1930  * indirectly by ARM6 (in late-abort mode) and ARM7[TDMI].
1931  *
1932  * In early aborts, we may have to fix up LDM, STM, LDC and STC.
1933  */
1934 int
1935 early_abort_fixup(void *arg)
1936 {
1937 	trapframe_t *frame = arg;
1938 	u_int fault_pc;
1939 	u_int fault_instruction;
1940 	int saved_lr = 0;
1941 
1942 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1943 
1944 		/* Ok an abort in SVC mode */
1945 
1946 		/*
1947 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1948 		 * as the fault happened in svc mode but we need it in the
1949 		 * usr slot so we can treat the registers as an array of ints
1950 		 * during fixing.
1951 		 * NOTE: This PC is in the position but writeback is not
1952 		 * allowed on r15.
1953 		 * Doing it like this is more efficient than trapping this
1954 		 * case in all possible locations in the following fixup code.
1955 		 */
1956 
1957 		saved_lr = frame->tf_usr_lr;
1958 		frame->tf_usr_lr = frame->tf_svc_lr;
1959 
1960 		/*
1961 		 * Note the trapframe does not have the SVC r13 so a fault
1962 		 * from an instruction with writeback to r13 in SVC mode is
1963 		 * not allowed. This should not happen as the kstack is
1964 		 * always valid.
1965 		 */
1966 	}
1967 
1968 	/* Get fault address and status from the CPU */
1969 
1970 	fault_pc = frame->tf_pc;
1971 	fault_instruction = *((volatile unsigned int *)fault_pc);
1972 
1973 	/* Decode the fault instruction and fix the registers as needed */
1974 
1975 	if ((fault_instruction & 0x0e000000) == 0x08000000) {
1976 		int base;
1977 		int loop;
1978 		int count;
1979 		int *registers = &frame->tf_r0;
1980 
1981 		DFC_PRINTF(("LDM/STM\n"));
1982 		DFC_DISASSEMBLE(fault_pc);
1983 		if (fault_instruction & (1 << 21)) {
1984 			DFC_PRINTF(("This instruction must be corrected\n"));
1985 			base = (fault_instruction >> 16) & 0x0f;
1986 			if (base == 15)
1987 				return ABORT_FIXUP_FAILED;
1988 			/* Count registers transferred */
1989 			count = 0;
1990 			for (loop = 0; loop < 16; ++loop) {
1991 				if (fault_instruction & (1<<loop))
1992 					++count;
1993 			}
1994 			DFC_PRINTF(("%d registers used\n", count));
1995 			DFC_PRINTF(("Corrected r%d by %d bytes ",
1996 				       base, count * 4));
1997 			if (fault_instruction & (1 << 23)) {
1998 				DFC_PRINTF(("down\n"));
1999 				registers[base] -= count * 4;
2000 			} else {
2001 				DFC_PRINTF(("up\n"));
2002 				registers[base] += count * 4;
2003 			}
2004 		}
2005 	} else if ((fault_instruction & 0x0e000000) == 0x0c000000) {
2006 		int base;
2007 		int offset;
2008 		int *registers = &frame->tf_r0;
2009 
2010 		/* REGISTER CORRECTION IS REQUIRED FOR THESE INSTRUCTIONS */
2011 
2012 		DFC_DISASSEMBLE(fault_pc);
2013 
2014 		/* Only need to fix registers if write back is turned on */
2015 
2016 		if ((fault_instruction & (1 << 21)) != 0) {
2017 			base = (fault_instruction >> 16) & 0x0f;
2018 			if (base == 13 &&
2019 			    (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
2020 				return ABORT_FIXUP_FAILED;
2021 			if (base == 15)
2022 				return ABORT_FIXUP_FAILED;
2023 
2024 			offset = (fault_instruction & 0xff) << 2;
2025 			DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
2026 			if ((fault_instruction & (1 << 23)) != 0)
2027 				offset = -offset;
2028 			registers[base] += offset;
2029 			DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
2030 		}
2031 	} else if ((fault_instruction & 0x0e000000) == 0x0c000000)
2032 		return ABORT_FIXUP_FAILED;
2033 
2034 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
2035 
2036 		/* Ok an abort in SVC mode */
2037 
2038 		/*
2039 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
2040 		 * as the fault happened in svc mode but we need it in the
2041 		 * usr slot so we can treat the registers as an array of ints
2042 		 * during fixing.
2043 		 * NOTE: This PC is in the position but writeback is not
2044 		 * allowed on r15.
2045 		 * Doing it like this is more efficient than trapping this
2046 		 * case in all possible locations in the prior fixup code.
2047 		 */
2048 
2049 		frame->tf_svc_lr = frame->tf_usr_lr;
2050 		frame->tf_usr_lr = saved_lr;
2051 
2052 		/*
2053 		 * Note the trapframe does not have the SVC r13 so a fault
2054 		 * from an instruction with writeback to r13 in SVC mode is
2055 		 * not allowed. This should not happen as the kstack is
2056 		 * always valid.
2057 		 */
2058 	}
2059 
2060 	return(ABORT_FIXUP_OK);
2061 }
2062 #endif	/* CPU_ARM2/250/3/6/7 */
2063 
2064 
2065 #if (defined(CPU_ARM6) && defined(ARM6_LATE_ABORT)) || defined(CPU_ARM7) || \
2066 	defined(CPU_ARM7TDMI)
2067 /*
2068  * "Late" (base updated) data abort fixup
2069  *
2070  * For ARM6 (in late-abort mode) and ARM7.
2071  *
2072  * In this model, all data-transfer instructions need fixing up.  We defer
2073  * LDM, STM, LDC and STC fixup to the early-abort handler.
2074  */
2075 int
2076 late_abort_fixup(void *arg)
2077 {
2078 	trapframe_t *frame = arg;
2079 	u_int fault_pc;
2080 	u_int fault_instruction;
2081 	int saved_lr = 0;
2082 
2083 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
2084 
2085 		/* Ok an abort in SVC mode */
2086 
2087 		/*
2088 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
2089 		 * as the fault happened in svc mode but we need it in the
2090 		 * usr slot so we can treat the registers as an array of ints
2091 		 * during fixing.
2092 		 * NOTE: This PC is in the position but writeback is not
2093 		 * allowed on r15.
2094 		 * Doing it like this is more efficient than trapping this
2095 		 * case in all possible locations in the following fixup code.
2096 		 */
2097 
2098 		saved_lr = frame->tf_usr_lr;
2099 		frame->tf_usr_lr = frame->tf_svc_lr;
2100 
2101 		/*
2102 		 * Note the trapframe does not have the SVC r13 so a fault
2103 		 * from an instruction with writeback to r13 in SVC mode is
2104 		 * not allowed. This should not happen as the kstack is
2105 		 * always valid.
2106 		 */
2107 	}
2108 
2109 	/* Get fault address and status from the CPU */
2110 
2111 	fault_pc = frame->tf_pc;
2112 	fault_instruction = *((volatile unsigned int *)fault_pc);
2113 
2114 	/* Decode the fault instruction and fix the registers as needed */
2115 
2116 	/* Was is a swap instruction ? */
2117 
2118 	if ((fault_instruction & 0x0fb00ff0) == 0x01000090) {
2119 		DFC_DISASSEMBLE(fault_pc);
2120 	} else if ((fault_instruction & 0x0c000000) == 0x04000000) {
2121 
2122 		/* Was is a ldr/str instruction */
2123 		/* This is for late abort only */
2124 
2125 		int base;
2126 		int offset;
2127 		int *registers = &frame->tf_r0;
2128 
2129 		DFC_DISASSEMBLE(fault_pc);
2130 
2131 		/* This is for late abort only */
2132 
2133 		if ((fault_instruction & (1 << 24)) == 0
2134 		    || (fault_instruction & (1 << 21)) != 0) {
2135 			/* postindexed ldr/str with no writeback */
2136 
2137 			base = (fault_instruction >> 16) & 0x0f;
2138 			if (base == 13 &&
2139 			    (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
2140 				return ABORT_FIXUP_FAILED;
2141 			if (base == 15)
2142 				return ABORT_FIXUP_FAILED;
2143 			DFC_PRINTF(("late abt fix: r%d=%08x : ",
2144 				       base, registers[base]));
2145 			if ((fault_instruction & (1 << 25)) == 0) {
2146 				/* Immediate offset - easy */
2147 
2148 				offset = fault_instruction & 0xfff;
2149 				if ((fault_instruction & (1 << 23)))
2150 					offset = -offset;
2151 				registers[base] += offset;
2152 				DFC_PRINTF(("imm=%08x ", offset));
2153 			} else {
2154 				/* offset is a shifted register */
2155 				int shift;
2156 
2157 				offset = fault_instruction & 0x0f;
2158 				if (offset == base)
2159 					return ABORT_FIXUP_FAILED;
2160 
2161 				/*
2162 				 * Register offset - hard we have to
2163 				 * cope with shifts !
2164 				 */
2165 				offset = registers[offset];
2166 
2167 				if ((fault_instruction & (1 << 4)) == 0)
2168 					/* shift with amount */
2169 					shift = (fault_instruction >> 7) & 0x1f;
2170 				else {
2171 					/* shift with register */
2172 					if ((fault_instruction & (1 << 7)) != 0)
2173 						/* undefined for now so bail out */
2174 						return ABORT_FIXUP_FAILED;
2175 					shift = ((fault_instruction >> 8) & 0xf);
2176 					if (base == shift)
2177 						return ABORT_FIXUP_FAILED;
2178 					DFC_PRINTF(("shift reg=%d ", shift));
2179 					shift = registers[shift];
2180 				}
2181 				DFC_PRINTF(("shift=%08x ", shift));
2182 				switch (((fault_instruction >> 5) & 0x3)) {
2183 				case 0 : /* Logical left */
2184 					offset = (int)(((u_int)offset) << shift);
2185 					break;
2186 				case 1 : /* Logical Right */
2187 					if (shift == 0) shift = 32;
2188 					offset = (int)(((u_int)offset) >> shift);
2189 					break;
2190 				case 2 : /* Arithmetic Right */
2191 					if (shift == 0) shift = 32;
2192 					offset = (int)(((int)offset) >> shift);
2193 					break;
2194 				case 3 : /* Rotate right (rol or rxx) */
2195 					return ABORT_FIXUP_FAILED;
2196 					break;
2197 				}
2198 
2199 				DFC_PRINTF(("abt: fixed LDR/STR with "
2200 					       "register offset\n"));
2201 				if ((fault_instruction & (1 << 23)))
2202 					offset = -offset;
2203 				DFC_PRINTF(("offset=%08x ", offset));
2204 				registers[base] += offset;
2205 			}
2206 			DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
2207 		}
2208 	}
2209 
2210 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
2211 
2212 		/* Ok an abort in SVC mode */
2213 
2214 		/*
2215 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
2216 		 * as the fault happened in svc mode but we need it in the
2217 		 * usr slot so we can treat the registers as an array of ints
2218 		 * during fixing.
2219 		 * NOTE: This PC is in the position but writeback is not
2220 		 * allowed on r15.
2221 		 * Doing it like this is more efficient than trapping this
2222 		 * case in all possible locations in the prior fixup code.
2223 		 */
2224 
2225 		frame->tf_svc_lr = frame->tf_usr_lr;
2226 		frame->tf_usr_lr = saved_lr;
2227 
2228 		/*
2229 		 * Note the trapframe does not have the SVC r13 so a fault
2230 		 * from an instruction with writeback to r13 in SVC mode is
2231 		 * not allowed. This should not happen as the kstack is
2232 		 * always valid.
2233 		 */
2234 	}
2235 
2236 	/*
2237 	 * Now let the early-abort fixup routine have a go, in case it
2238 	 * was an LDM, STM, LDC or STC that faulted.
2239 	 */
2240 
2241 	return early_abort_fixup(arg);
2242 }
2243 #endif	/* CPU_ARM6(LATE)/7/7TDMI */
2244 
2245 /*
2246  * CPU Setup code
2247  */
2248 
2249 #if defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) || \
2250 	defined(CPU_ARM8) || defined (CPU_ARM9) || defined (CPU_ARM9E) || \
2251 	defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) || \
2252 	defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
2253 	defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425) || \
2254 	defined(CPU_ARM10) || defined(CPU_ARM11) || defined(CPU_ARM1136) || \
2255 	defined(CPU_FA526) || defined(CPU_CORTEX) || defined(CPU_SHEEVA)
2256 
2257 #define IGN	0
2258 #define OR	1
2259 #define BIC	2
2260 
2261 struct cpu_option {
2262 	const char *co_name;
2263 	int	co_falseop;
2264 	int	co_trueop;
2265 	int	co_value;
2266 };
2267 
2268 static u_int parse_cpu_options(char *, struct cpu_option *, u_int);
2269 
2270 static u_int
2271 parse_cpu_options(char *args, struct cpu_option *optlist, u_int cpuctrl)
2272 {
2273 	int integer;
2274 
2275 	if (args == NULL)
2276 		return(cpuctrl);
2277 
2278 	while (optlist->co_name) {
2279 		if (get_bootconf_option(args, optlist->co_name,
2280 		    BOOTOPT_TYPE_BOOLEAN, &integer)) {
2281 			if (integer) {
2282 				if (optlist->co_trueop == OR)
2283 					cpuctrl |= optlist->co_value;
2284 				else if (optlist->co_trueop == BIC)
2285 					cpuctrl &= ~optlist->co_value;
2286 			} else {
2287 				if (optlist->co_falseop == OR)
2288 					cpuctrl |= optlist->co_value;
2289 				else if (optlist->co_falseop == BIC)
2290 					cpuctrl &= ~optlist->co_value;
2291 			}
2292 		}
2293 		++optlist;
2294 	}
2295 	return(cpuctrl);
2296 }
2297 #endif /* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 || CPU_SA110 */
2298 
2299 #if defined (CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) \
2300 	|| defined(CPU_ARM8)
2301 struct cpu_option arm678_options[] = {
2302 #ifdef COMPAT_12
2303 	{ "nocache",		IGN, BIC, CPU_CONTROL_IDC_ENABLE },
2304 	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
2305 #endif	/* COMPAT_12 */
2306 	{ "cpu.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
2307 	{ "cpu.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
2308 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2309 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2310 	{ NULL,			IGN, IGN, 0 }
2311 };
2312 
2313 #endif	/* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 */
2314 
2315 #ifdef CPU_ARM6
2316 struct cpu_option arm6_options[] = {
2317 	{ "arm6.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
2318 	{ "arm6.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
2319 	{ "arm6.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2320 	{ "arm6.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2321 	{ NULL,			IGN, IGN, 0 }
2322 };
2323 
2324 void
2325 arm6_setup(char *args)
2326 {
2327 	int cpuctrl, cpuctrlmask;
2328 
2329 	/* Set up default control registers bits */
2330 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2331 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2332 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
2333 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2334 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2335 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
2336 		 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BEND_ENABLE
2337 		 | CPU_CONTROL_AFLT_ENABLE;
2338 
2339 #ifdef ARM6_LATE_ABORT
2340 	cpuctrl |= CPU_CONTROL_LABT_ENABLE;
2341 #endif	/* ARM6_LATE_ABORT */
2342 
2343 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2344 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2345 #endif
2346 
2347 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
2348 	cpuctrl = parse_cpu_options(args, arm6_options, cpuctrl);
2349 
2350 #ifdef __ARMEB__
2351 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2352 #endif
2353 
2354 	/* Clear out the cache */
2355 	cpu_idcache_wbinv_all();
2356 
2357 	/* Set the control register */
2358 	curcpu()->ci_ctrl = cpuctrl;
2359 	cpu_control(0xffffffff, cpuctrl);
2360 }
2361 #endif	/* CPU_ARM6 */
2362 
2363 #ifdef CPU_ARM7
2364 struct cpu_option arm7_options[] = {
2365 	{ "arm7.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
2366 	{ "arm7.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
2367 	{ "arm7.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2368 	{ "arm7.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2369 #ifdef COMPAT_12
2370 	{ "fpaclk2",		BIC, OR,  CPU_CONTROL_CPCLK },
2371 #endif	/* COMPAT_12 */
2372 	{ "arm700.fpaclk",	BIC, OR,  CPU_CONTROL_CPCLK },
2373 	{ NULL,			IGN, IGN, 0 }
2374 };
2375 
2376 void
2377 arm7_setup(char *args)
2378 {
2379 	int cpuctrl, cpuctrlmask;
2380 
2381 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2382 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2383 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
2384 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2385 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2386 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
2387 		 | CPU_CONTROL_CPCLK | CPU_CONTROL_LABT_ENABLE
2388 		 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BEND_ENABLE
2389 		 | CPU_CONTROL_AFLT_ENABLE;
2390 
2391 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2392 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2393 #endif
2394 
2395 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
2396 	cpuctrl = parse_cpu_options(args, arm7_options, cpuctrl);
2397 
2398 #ifdef __ARMEB__
2399 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2400 #endif
2401 
2402 	/* Clear out the cache */
2403 	cpu_idcache_wbinv_all();
2404 
2405 	/* Set the control register */
2406 	curcpu()->ci_ctrl = cpuctrl;
2407 	cpu_control(0xffffffff, cpuctrl);
2408 }
2409 #endif	/* CPU_ARM7 */
2410 
2411 #ifdef CPU_ARM7TDMI
2412 struct cpu_option arm7tdmi_options[] = {
2413 	{ "arm7.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
2414 	{ "arm7.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
2415 	{ "arm7.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2416 	{ "arm7.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2417 #ifdef COMPAT_12
2418 	{ "fpaclk2",		BIC, OR,  CPU_CONTROL_CPCLK },
2419 #endif	/* COMPAT_12 */
2420 	{ "arm700.fpaclk",	BIC, OR,  CPU_CONTROL_CPCLK },
2421 	{ NULL,			IGN, IGN, 0 }
2422 };
2423 
2424 void
2425 arm7tdmi_setup(char *args)
2426 {
2427 	int cpuctrl;
2428 
2429 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2430 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2431 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
2432 
2433 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
2434 	cpuctrl = parse_cpu_options(args, arm7tdmi_options, cpuctrl);
2435 
2436 #ifdef __ARMEB__
2437 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2438 #endif
2439 
2440 	/* Clear out the cache */
2441 	cpu_idcache_wbinv_all();
2442 
2443 	/* Set the control register */
2444 	curcpu()->ci_ctrl = cpuctrl;
2445 	cpu_control(0xffffffff, cpuctrl);
2446 }
2447 #endif	/* CPU_ARM7TDMI */
2448 
2449 #ifdef CPU_ARM8
2450 struct cpu_option arm8_options[] = {
2451 	{ "arm8.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
2452 	{ "arm8.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
2453 	{ "arm8.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2454 	{ "arm8.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2455 #ifdef COMPAT_12
2456 	{ "branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2457 #endif	/* COMPAT_12 */
2458 	{ "cpu.branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2459 	{ "arm8.branchpredict",	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2460 	{ NULL,			IGN, IGN, 0 }
2461 };
2462 
2463 void
2464 arm8_setup(char *args)
2465 {
2466 	int integer;
2467 	int cpuctrl, cpuctrlmask;
2468 	int clocktest;
2469 	int setclock = 0;
2470 
2471 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2472 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2473 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
2474 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2475 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2476 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
2477 		 | CPU_CONTROL_BPRD_ENABLE | CPU_CONTROL_ROM_ENABLE
2478 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE;
2479 
2480 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2481 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2482 #endif
2483 
2484 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
2485 	cpuctrl = parse_cpu_options(args, arm8_options, cpuctrl);
2486 
2487 #ifdef __ARMEB__
2488 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2489 #endif
2490 
2491 	/* Get clock configuration */
2492 	clocktest = arm8_clock_config(0, 0) & 0x0f;
2493 
2494 	/* Special ARM8 clock and test configuration */
2495 	if (get_bootconf_option(args, "arm8.clock.reset", BOOTOPT_TYPE_BOOLEAN, &integer)) {
2496 		clocktest = 0;
2497 		setclock = 1;
2498 	}
2499 	if (get_bootconf_option(args, "arm8.clock.dynamic", BOOTOPT_TYPE_BOOLEAN, &integer)) {
2500 		if (integer)
2501 			clocktest |= 0x01;
2502 		else
2503 			clocktest &= ~(0x01);
2504 		setclock = 1;
2505 	}
2506 	if (get_bootconf_option(args, "arm8.clock.sync", BOOTOPT_TYPE_BOOLEAN, &integer)) {
2507 		if (integer)
2508 			clocktest |= 0x02;
2509 		else
2510 			clocktest &= ~(0x02);
2511 		setclock = 1;
2512 	}
2513 	if (get_bootconf_option(args, "arm8.clock.fast", BOOTOPT_TYPE_BININT, &integer)) {
2514 		clocktest = (clocktest & ~0xc0) | (integer & 3) << 2;
2515 		setclock = 1;
2516 	}
2517 	if (get_bootconf_option(args, "arm8.test", BOOTOPT_TYPE_BININT, &integer)) {
2518 		clocktest |= (integer & 7) << 5;
2519 		setclock = 1;
2520 	}
2521 
2522 	/* Clear out the cache */
2523 	cpu_idcache_wbinv_all();
2524 
2525 	/* Set the control register */
2526 	curcpu()->ci_ctrl = cpuctrl;
2527 	cpu_control(0xffffffff, cpuctrl);
2528 
2529 	/* Set the clock/test register */
2530 	if (setclock)
2531 		arm8_clock_config(0x7f, clocktest);
2532 }
2533 #endif	/* CPU_ARM8 */
2534 
2535 #ifdef CPU_ARM9
2536 struct cpu_option arm9_options[] = {
2537 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2538 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2539 	{ "arm9.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2540 	{ "arm9.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2541 	{ "arm9.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2542 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2543 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2544 	{ "arm9.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2545 	{ NULL,			IGN, IGN, 0 }
2546 };
2547 
2548 void
2549 arm9_setup(char *args)
2550 {
2551 	int cpuctrl, cpuctrlmask;
2552 
2553 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2554 	    | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2555 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2556 	    | CPU_CONTROL_WBUF_ENABLE;
2557 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2558 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2559 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2560 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2561 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2562 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC
2563 		 | CPU_CONTROL_ROUNDROBIN;
2564 
2565 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2566 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2567 #endif
2568 
2569 	cpuctrl = parse_cpu_options(args, arm9_options, cpuctrl);
2570 
2571 #ifdef __ARMEB__
2572 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2573 #endif
2574 
2575 	if (vector_page == ARM_VECTORS_HIGH)
2576 		cpuctrl |= CPU_CONTROL_VECRELOC;
2577 
2578 	/* Clear out the cache */
2579 	cpu_idcache_wbinv_all();
2580 
2581 	/* Set the control register */
2582 	curcpu()->ci_ctrl = cpuctrl;
2583 	cpu_control(cpuctrlmask, cpuctrl);
2584 
2585 }
2586 #endif	/* CPU_ARM9 */
2587 
2588 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
2589 struct cpu_option arm10_options[] = {
2590 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2591 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2592 	{ "arm10.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2593 	{ "arm10.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2594 	{ "arm10.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2595 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2596 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2597 	{ "arm10.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2598 	{ NULL,			IGN, IGN, 0 }
2599 };
2600 
2601 void
2602 arm10_setup(char *args)
2603 {
2604 	int cpuctrl, cpuctrlmask;
2605 
2606 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
2607 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2608 	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
2609 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
2610 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2611 	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2612 	    | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2613 	    | CPU_CONTROL_BPRD_ENABLE
2614 	    | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
2615 
2616 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2617 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2618 #endif
2619 
2620 	cpuctrl = parse_cpu_options(args, arm10_options, cpuctrl);
2621 
2622 #ifdef __ARMEB__
2623 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2624 #endif
2625 
2626 	if (vector_page == ARM_VECTORS_HIGH)
2627 		cpuctrl |= CPU_CONTROL_VECRELOC;
2628 
2629 	/* Clear out the cache */
2630 	cpu_idcache_wbinv_all();
2631 
2632 	/* Now really make sure they are clean.  */
2633 	__asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
2634 
2635 	/* Set the control register */
2636 	curcpu()->ci_ctrl = cpuctrl;
2637 	cpu_control(0xffffffff, cpuctrl);
2638 
2639 	/* And again. */
2640 	cpu_idcache_wbinv_all();
2641 }
2642 #endif	/* CPU_ARM9E || CPU_ARM10 */
2643 
2644 #if defined(CPU_ARM11)
2645 struct cpu_option arm11_options[] = {
2646 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2647 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2648 	{ "arm11.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2649 	{ "arm11.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2650 	{ "arm11.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2651 	{ "cpu.branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2652 	{ "arm11.branchpredict", BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2653 	{ NULL,			IGN, IGN, 0 }
2654 };
2655 
2656 void
2657 arm11_setup(char *args)
2658 {
2659 	int cpuctrl, cpuctrlmask;
2660 
2661 #if defined(PROCESS_ID_IS_CURCPU)
2662 	/* set curcpu() */
2663 	__asm("mcr\tp15, 0, %0, c13, c0, 4" : : "r"(&cpu_info_store));
2664 #elif defined(PROCESS_ID_IS_CURLWP)
2665 	/* set curlwp() */
2666 	__asm("mcr\tp15, 0, %0, c13, c0, 4" : : "r"(&lwp0));
2667 #endif
2668 
2669 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
2670 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2671 	    /* | CPU_CONTROL_BPRD_ENABLE */;
2672 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
2673 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2674 	    | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BPRD_ENABLE
2675 	    | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2676 	    | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
2677 
2678 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2679 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2680 #endif
2681 
2682 	cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl);
2683 
2684 #ifdef __ARMEB__
2685 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2686 #endif
2687 
2688 	if (vector_page == ARM_VECTORS_HIGH)
2689 		cpuctrl |= CPU_CONTROL_VECRELOC;
2690 
2691 	/* Clear out the cache */
2692 	cpu_idcache_wbinv_all();
2693 
2694 	/* Now really make sure they are clean.  */
2695 	__asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
2696 
2697 	/* Allow detection code to find the VFP if it's fitted.  */
2698 	__asm volatile ("mcr\tp15, 0, %0, c1, c0, 2" : : "r" (0x0fffffff));
2699 
2700 	/* Set the control register */
2701 	curcpu()->ci_ctrl = cpuctrl;
2702 	cpu_control(0xffffffff, cpuctrl);
2703 
2704 	/* And again. */
2705 	cpu_idcache_wbinv_all();
2706 }
2707 #endif	/* CPU_ARM11 */
2708 
2709 #if defined(CPU_ARM11MPCORE)
2710 
2711 void
2712 arm11mpcore_setup(char *args)
2713 {
2714 	int cpuctrl, cpuctrlmask;
2715 
2716 #if defined(PROCESS_ID_IS_CURCPU)
2717 	/* set curcpu() */
2718 	__asm("mcr\tp15, 0, %0, c13, c0, 4" : : "r"(&cpu_info_store));
2719 #elif defined(PROCESS_ID_IS_CURLWP)
2720 	/* set curlwp() */
2721 	__asm("mcr\tp15, 0, %0, c13, c0, 4" : : "r"(&lwp0));
2722 #endif
2723 
2724 	cpuctrl = CPU_CONTROL_IC_ENABLE
2725 	    | CPU_CONTROL_DC_ENABLE
2726 	    | CPU_CONTROL_BPRD_ENABLE ;
2727 	cpuctrlmask = CPU_CONTROL_IC_ENABLE
2728 	    | CPU_CONTROL_DC_ENABLE
2729 	    | CPU_CONTROL_BPRD_ENABLE
2730 	    | CPU_CONTROL_AFLT_ENABLE
2731 	    | CPU_CONTROL_VECRELOC;
2732 
2733 #ifdef	ARM11MPCORE_MMU_COMPAT
2734 	/* XXX: S and R? */
2735 #endif
2736 
2737 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2738 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2739 #endif
2740 
2741 	cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl);
2742 
2743 	if (vector_page == ARM_VECTORS_HIGH)
2744 		cpuctrl |= CPU_CONTROL_VECRELOC;
2745 
2746 	/* Clear out the cache */
2747 	cpu_idcache_wbinv_all();
2748 
2749 	/* Now really make sure they are clean.  */
2750 	__asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
2751 
2752 	/* Allow detection code to find the VFP if it's fitted.  */
2753 	__asm volatile ("mcr\tp15, 0, %0, c1, c0, 2" : : "r" (0x0fffffff));
2754 
2755 	/* Set the control register */
2756 	curcpu()->ci_ctrl = cpu_control(cpuctrlmask, cpuctrl);
2757 
2758 	/* And again. */
2759 	cpu_idcache_wbinv_all();
2760 }
2761 #endif	/* CPU_ARM11MPCORE */
2762 
2763 
2764 #if defined(CPU_CORTEX)
2765 struct cpu_option armv7_options[] = {
2766     { "cpu.cache",      BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2767     { "cpu.nocache",    OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2768     { "armv7.cache",    BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2769     { "armv7.icache",   BIC, OR,  CPU_CONTROL_IC_ENABLE },
2770     { "armv7.dcache",   BIC, OR,  CPU_CONTROL_DC_ENABLE },
2771 	{ NULL, 			IGN, IGN, 0}
2772 };
2773 
2774 void
2775 armv7_setup(args)
2776 	char *args;
2777 {
2778 	int cpuctrl, cpuctrlmask;
2779 
2780 #if defined(PROCESS_ID_IS_CURCPU)
2781 	/* set curcpu() */
2782 	__asm("mcr\tp15, 0, %0, c13, c0, 4" : : "r"(&cpu_info_store));
2783 #elif defined(PROCESS_ID_IS_CURLWP)
2784 	/* set curlwp() */
2785 	__asm("mcr\tp15, 0, %0, c13, c0, 4" : : "r"(&lwp0));
2786 #endif
2787 
2788 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_IC_ENABLE
2789 	    | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_BPRD_ENABLE ;
2790 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
2791 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2792 	    | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BPRD_ENABLE
2793 	    | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2794 	    | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
2795 
2796 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2797 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2798 #endif
2799 
2800 	cpuctrl = parse_cpu_options(args, armv7_options, cpuctrl);
2801 
2802 #ifdef __ARMEB__
2803 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2804 #endif
2805 
2806 	if (vector_page == ARM_VECTORS_HIGH)
2807 		cpuctrl |= CPU_CONTROL_VECRELOC;
2808 
2809 	/* Clear out the cache */
2810 	cpu_idcache_wbinv_all();
2811 
2812 	/* Set the control register */
2813 	curcpu()->ci_ctrl = cpuctrl;
2814 	cpu_control(0xffffffff, cpuctrl);
2815 }
2816 
2817 /* Clean the data cache to the level of coherency. Slow. */
2818 void
2819 armv7_dcache_wbinv_all()
2820 {
2821 	u_int clidr, loc, level;
2822 
2823 	/* Cache Level ID Register */
2824 	__asm volatile("mrc\tp15, 1, %0, c0, c0, 1" : "=r" (clidr));
2825 
2826 	loc = (clidr >> 24) & 7; /* Level of Coherency */
2827 
2828 	for (level = 0; level <= loc; level++) {
2829 		u_int ctype, csid;
2830 		int line_size, ways, nsets, wayshift, setshift;
2831 
2832 		ctype = (clidr >> (level * 3)) & 7;
2833 		/* We're supposed to stop when ctype == 0, but we
2834 		 * trust that loc isn't larger than necesssary. */
2835 		if (ctype < 2) continue; /* no cache / only icache */
2836 
2837 		csid = get_cachesize_cp15(level << 1);
2838 		line_size = CPU_CSID_LEN(csid);
2839 		ways = CPU_CSID_ASSOC(csid);
2840 		nsets = (csid >> 13) & 0x7fff;
2841 
2842 		wayshift = __builtin_clz(ways); /* leading zeros */
2843 		setshift = line_size + 4;
2844 
2845 		for (; nsets >= 0; nsets--) {
2846 			int way;
2847 
2848 			for (way = ways; way >= 0; way--) {
2849 				/* Clean by set/way */
2850 				const u_int sw = (way << wayshift)
2851 				    | (nsets << setshift)
2852 				    | (level << 1);
2853 
2854 				__asm volatile("mcr\tp15, 0, %0, c7, c10, 2"
2855 				    :: "r"(sw));
2856 			}
2857 		}
2858 	}
2859 
2860 	__asm volatile("dsb");
2861 	__asm volatile("isb");
2862 }
2863 #endif /* CPU_CORTEX */
2864 
2865 
2866 
2867 #if defined(CPU_ARM1136)
2868 void
2869 arm1136_setup(char *args)
2870 {
2871 	int cpuctrl, cpuctrl_wax;
2872 	uint32_t auxctrl, auxctrl_wax;
2873 	uint32_t tmp, tmp2;
2874 	uint32_t sbz=0;
2875 	uint32_t cpuid;
2876 
2877 #if defined(PROCESS_ID_IS_CURCPU)
2878 	/* set curcpu() */
2879 	__asm("mcr\tp15, 0, %0, c13, c0, 4" : : "r"(&cpu_info_store));
2880 #elif defined(PROCESS_ID_IS_CURLWP)
2881 	/* set curlwp() */
2882 	__asm("mcr\tp15, 0, %0, c13, c0, 4" : : "r"(&lwp0));
2883 #endif
2884 
2885 	cpuid = cpu_id();
2886 
2887 	cpuctrl =
2888 		CPU_CONTROL_MMU_ENABLE  |
2889 		CPU_CONTROL_DC_ENABLE   |
2890 		CPU_CONTROL_WBUF_ENABLE |
2891 		CPU_CONTROL_32BP_ENABLE |
2892 		CPU_CONTROL_32BD_ENABLE |
2893 		CPU_CONTROL_LABT_ENABLE |
2894 		CPU_CONTROL_SYST_ENABLE |
2895 		CPU_CONTROL_IC_ENABLE;
2896 
2897 	/*
2898 	 * "write as existing" bits
2899 	 * inverse of this is mask
2900 	 */
2901 	cpuctrl_wax =
2902 		(3 << 30) |
2903 		(1 << 29) |
2904 		(1 << 28) |
2905 		(3 << 26) |
2906 		(3 << 19) |
2907 		(1 << 17);
2908 
2909 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2910 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2911 #endif
2912 
2913 	cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl);
2914 
2915 #ifdef __ARMEB__
2916 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2917 #endif
2918 
2919 	if (vector_page == ARM_VECTORS_HIGH)
2920 		cpuctrl |= CPU_CONTROL_VECRELOC;
2921 
2922 	auxctrl = 0;
2923 	auxctrl_wax = ~0;
2924 	/* This options enables the workaround for the 364296 ARM1136
2925 	 * r0pX errata (possible cache data corruption with
2926 	 * hit-under-miss enabled). It sets the undocumented bit 31 in
2927 	 * the auxiliary control register and the FI bit in the control
2928 	 * register, thus disabling hit-under-miss without putting the
2929 	 * processor into full low interrupt latency mode. ARM11MPCore
2930 	 * is not affected.
2931 	 */
2932 	if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1136JS) { /* ARM1136JSr0pX */
2933 		cpuctrl |= CPU_CONTROL_FI_ENABLE;
2934 		auxctrl = ARM11R0_AUXCTL_PFI;
2935 		auxctrl_wax = ~ARM11R0_AUXCTL_PFI;
2936 	}
2937 
2938 	/* Clear out the cache */
2939 	cpu_idcache_wbinv_all();
2940 
2941 	/* Now really make sure they are clean.  */
2942 	__asm volatile ("mcr\tp15, 0, %0, c7, c7, 0" : : "r"(sbz));
2943 
2944 	/* Allow detection code to find the VFP if it's fitted.  */
2945 	__asm volatile ("mcr\tp15, 0, %0, c1, c0, 2" : : "r" (0x0fffffff));
2946 
2947 	/* Set the control register */
2948 	curcpu()->ci_ctrl = cpuctrl;
2949 	cpu_control(~cpuctrl_wax, cpuctrl);
2950 
2951 	__asm volatile ("mrc	p15, 0, %0, c1, c0, 1\n\t"
2952 			"bic	%1, %0, %2\n\t"
2953 			"eor	%1, %0, %3\n\t"
2954 			"teq	%0, %1\n\t"
2955 			"mcrne	p15, 0, %1, c1, c0, 1\n\t"
2956 			: "=r"(tmp), "=r"(tmp2) :
2957 			  "r"(~auxctrl_wax), "r"(auxctrl));
2958 
2959 	/* And again. */
2960 	cpu_idcache_wbinv_all();
2961 }
2962 #endif	/* CPU_ARM1136 */
2963 
2964 #ifdef CPU_SA110
2965 struct cpu_option sa110_options[] = {
2966 #ifdef COMPAT_12
2967 	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2968 	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
2969 #endif	/* COMPAT_12 */
2970 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2971 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2972 	{ "sa110.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2973 	{ "sa110.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2974 	{ "sa110.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2975 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2976 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2977 	{ "sa110.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2978 	{ NULL,			IGN, IGN, 0 }
2979 };
2980 
2981 void
2982 sa110_setup(char *args)
2983 {
2984 	int cpuctrl, cpuctrlmask;
2985 
2986 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2987 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2988 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2989 		 | CPU_CONTROL_WBUF_ENABLE;
2990 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2991 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2992 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2993 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2994 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2995 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
2996 		 | CPU_CONTROL_CPCLK;
2997 
2998 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2999 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3000 #endif
3001 
3002 	cpuctrl = parse_cpu_options(args, sa110_options, cpuctrl);
3003 
3004 #ifdef __ARMEB__
3005 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
3006 #endif
3007 
3008 	if (vector_page == ARM_VECTORS_HIGH)
3009 		cpuctrl |= CPU_CONTROL_VECRELOC;
3010 
3011 	/* Clear out the cache */
3012 	cpu_idcache_wbinv_all();
3013 
3014 	/* Set the control register */
3015 	curcpu()->ci_ctrl = cpuctrl;
3016 /*	cpu_control(cpuctrlmask, cpuctrl);*/
3017 	cpu_control(0xffffffff, cpuctrl);
3018 
3019 	/*
3020 	 * enable clockswitching, note that this doesn't read or write to r0,
3021 	 * r0 is just to make it valid asm
3022 	 */
3023 	__asm ("mcr 15, 0, r0, c15, c1, 2");
3024 }
3025 #endif	/* CPU_SA110 */
3026 
3027 #if defined(CPU_SA1100) || defined(CPU_SA1110)
3028 struct cpu_option sa11x0_options[] = {
3029 #ifdef COMPAT_12
3030 	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3031 	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
3032 #endif	/* COMPAT_12 */
3033 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3034 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3035 	{ "sa11x0.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3036 	{ "sa11x0.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
3037 	{ "sa11x0.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
3038 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
3039 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
3040 	{ "sa11x0.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
3041 	{ NULL,			IGN, IGN, 0 }
3042 };
3043 
3044 void
3045 sa11x0_setup(char *args)
3046 {
3047 	int cpuctrl, cpuctrlmask;
3048 
3049 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
3050 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
3051 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3052 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE;
3053 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
3054 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
3055 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3056 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
3057 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
3058 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
3059 		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
3060 
3061 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
3062 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3063 #endif
3064 
3065 	cpuctrl = parse_cpu_options(args, sa11x0_options, cpuctrl);
3066 
3067 #ifdef __ARMEB__
3068 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
3069 #endif
3070 
3071 	if (vector_page == ARM_VECTORS_HIGH)
3072 		cpuctrl |= CPU_CONTROL_VECRELOC;
3073 
3074 	/* Clear out the cache */
3075 	cpu_idcache_wbinv_all();
3076 
3077 	/* Set the control register */
3078 	curcpu()->ci_ctrl = cpuctrl;
3079 	cpu_control(0xffffffff, cpuctrl);
3080 }
3081 #endif	/* CPU_SA1100 || CPU_SA1110 */
3082 
3083 #if defined(CPU_FA526)
3084 struct cpu_option fa526_options[] = {
3085 #ifdef COMPAT_12
3086 	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3087 	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
3088 #endif	/* COMPAT_12 */
3089 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3090 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3091 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
3092 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
3093 	{ NULL,			IGN, IGN, 0 }
3094 };
3095 
3096 void
3097 fa526_setup(char *args)
3098 {
3099 	int cpuctrl, cpuctrlmask;
3100 
3101 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
3102 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
3103 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3104 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE;
3105 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
3106 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
3107 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3108 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
3109 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
3110 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
3111 		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
3112 
3113 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
3114 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3115 #endif
3116 
3117 	cpuctrl = parse_cpu_options(args, fa526_options, cpuctrl);
3118 
3119 #ifdef __ARMEB__
3120 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
3121 #endif
3122 
3123 	if (vector_page == ARM_VECTORS_HIGH)
3124 		cpuctrl |= CPU_CONTROL_VECRELOC;
3125 
3126 	/* Clear out the cache */
3127 	cpu_idcache_wbinv_all();
3128 
3129 	/* Set the control register */
3130 	curcpu()->ci_ctrl = cpuctrl;
3131 	cpu_control(0xffffffff, cpuctrl);
3132 }
3133 #endif	/* CPU_FA526 */
3134 
3135 #if defined(CPU_IXP12X0)
3136 struct cpu_option ixp12x0_options[] = {
3137 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3138 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3139 	{ "ixp12x0.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3140 	{ "ixp12x0.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
3141 	{ "ixp12x0.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
3142 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
3143 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
3144 	{ "ixp12x0.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
3145 	{ NULL,			IGN, IGN, 0 }
3146 };
3147 
3148 void
3149 ixp12x0_setup(char *args)
3150 {
3151 	int cpuctrl, cpuctrlmask;
3152 
3153 
3154 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE
3155 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_SYST_ENABLE
3156 		 | CPU_CONTROL_IC_ENABLE;
3157 
3158 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_AFLT_ENABLE
3159 		 | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_WBUF_ENABLE
3160 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_SYST_ENABLE
3161 		 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_IC_ENABLE
3162 		 | CPU_CONTROL_VECRELOC;
3163 
3164 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
3165 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3166 #endif
3167 
3168 	cpuctrl = parse_cpu_options(args, ixp12x0_options, cpuctrl);
3169 
3170 #ifdef __ARMEB__
3171 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
3172 #endif
3173 
3174 	if (vector_page == ARM_VECTORS_HIGH)
3175 		cpuctrl |= CPU_CONTROL_VECRELOC;
3176 
3177 	/* Clear out the cache */
3178 	cpu_idcache_wbinv_all();
3179 
3180 	/* Set the control register */
3181 	curcpu()->ci_ctrl = cpuctrl;
3182 	/* cpu_control(0xffffffff, cpuctrl); */
3183 	cpu_control(cpuctrlmask, cpuctrl);
3184 }
3185 #endif /* CPU_IXP12X0 */
3186 
3187 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
3188     defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425) || defined(CPU_CORTEX)
3189 struct cpu_option xscale_options[] = {
3190 #ifdef COMPAT_12
3191 	{ "branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
3192 	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3193 #endif	/* COMPAT_12 */
3194 	{ "cpu.branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
3195 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3196 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3197 	{ "xscale.branchpredict", BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
3198 	{ "xscale.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3199 	{ "xscale.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
3200 	{ "xscale.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
3201 	{ NULL,			IGN, IGN, 0 }
3202 };
3203 
3204 void
3205 xscale_setup(char *args)
3206 {
3207 	uint32_t auxctl;
3208 	int cpuctrl, cpuctrlmask;
3209 
3210 	/*
3211 	 * The XScale Write Buffer is always enabled.  Our option
3212 	 * is to enable/disable coalescing.  Note that bits 6:3
3213 	 * must always be enabled.
3214 	 */
3215 
3216 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
3217 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
3218 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3219 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
3220 		 | CPU_CONTROL_BPRD_ENABLE;
3221 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
3222 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
3223 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3224 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
3225 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
3226 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
3227 		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
3228 
3229 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
3230 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3231 #endif
3232 
3233 	cpuctrl = parse_cpu_options(args, xscale_options, cpuctrl);
3234 
3235 #ifdef __ARMEB__
3236 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
3237 #endif
3238 
3239 	if (vector_page == ARM_VECTORS_HIGH)
3240 		cpuctrl |= CPU_CONTROL_VECRELOC;
3241 
3242 	/* Clear out the cache */
3243 	cpu_idcache_wbinv_all();
3244 
3245 	/*
3246 	 * Set the control register.  Note that bits 6:3 must always
3247 	 * be set to 1.
3248 	 */
3249 	curcpu()->ci_ctrl = cpuctrl;
3250 /*	cpu_control(cpuctrlmask, cpuctrl);*/
3251 	cpu_control(0xffffffff, cpuctrl);
3252 
3253 	/* Make sure write coalescing is turned on */
3254 	__asm volatile("mrc p15, 0, %0, c1, c0, 1"
3255 		: "=r" (auxctl));
3256 #ifdef XSCALE_NO_COALESCE_WRITES
3257 	auxctl |= XSCALE_AUXCTL_K;
3258 #else
3259 	auxctl &= ~XSCALE_AUXCTL_K;
3260 #endif
3261 	__asm volatile("mcr p15, 0, %0, c1, c0, 1"
3262 		: : "r" (auxctl));
3263 }
3264 #endif	/* CPU_XSCALE_80200 || CPU_XSCALE_80321 || __CPU_XSCALE_PXA2XX || CPU_XSCALE_IXP425 */
3265 
3266 #if defined(CPU_SHEEVA)
3267 struct cpu_option sheeva_options[] = {
3268 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3269 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3270 	{ "sheeva.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3271 	{ "sheeva.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
3272 	{ "sheeva.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
3273 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
3274 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
3275 	{ "sheeva.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
3276 	{ NULL,			IGN, IGN, 0 }
3277 };
3278 
3279 void
3280 sheeva_setup(char *args)
3281 {
3282 	int cpuctrl, cpuctrlmask;
3283 
3284 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
3285 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3286 	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
3287 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
3288 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3289 	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
3290 	    | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
3291 	    | CPU_CONTROL_BPRD_ENABLE
3292 	    | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
3293 
3294 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
3295 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3296 #endif
3297 
3298 	cpuctrl = parse_cpu_options(args, sheeva_options, cpuctrl);
3299 
3300 	/*
3301 	 * Sheeva has L2 Cache.  Enable/Disable it here.
3302 	 * Really not support yet...
3303 	 */
3304 
3305 #ifdef __ARMEB__
3306 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
3307 #endif
3308 
3309 	if (vector_page == ARM_VECTORS_HIGH)
3310 		cpuctrl |= CPU_CONTROL_VECRELOC;
3311 
3312 	/* Clear out the cache */
3313 	cpu_idcache_wbinv_all();
3314 
3315 	/* Now really make sure they are clean.  */
3316 	__asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
3317 
3318 	/* Set the control register */
3319 	curcpu()->ci_ctrl = cpuctrl;
3320 	cpu_control(0xffffffff, cpuctrl);
3321 
3322 	/* And again. */
3323 	cpu_idcache_wbinv_all();
3324 }
3325 #endif	/* CPU_SHEEVA */
3326