xref: /netbsd-src/sys/arch/arm/arm/cpufunc.c (revision 3816d47b2c42fcd6e549e3407f842a5b1a1d23ad)
1 /*	$NetBSD: cpufunc.c,v 1.94 2009/12/27 05:14:56 uebayasi Exp $	*/
2 
3 /*
4  * arm7tdmi support code Copyright (c) 2001 John Fremlin
5  * arm8 support code Copyright (c) 1997 ARM Limited
6  * arm8 support code Copyright (c) 1997 Causality Limited
7  * arm9 support code Copyright (C) 2001 ARM Ltd
8  * arm11 support code Copyright (c) 2007 Microsoft
9  * cortexa8 support code Copyright (c) 2008 3am Software Foundry
10  * Copyright (c) 1997 Mark Brinicombe.
11  * Copyright (c) 1997 Causality Limited
12  * All rights reserved.
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions
16  * are met:
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in the
21  *    documentation and/or other materials provided with the distribution.
22  * 3. All advertising materials mentioning features or use of this software
23  *    must display the following acknowledgement:
24  *	This product includes software developed by Causality Limited.
25  * 4. The name of Causality Limited may not be used to endorse or promote
26  *    products derived from this software without specific prior written
27  *    permission.
28  *
29  * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
30  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
31  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
32  * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
33  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
34  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
35  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39  * SUCH DAMAGE.
40  *
41  * RiscBSD kernel project
42  *
43  * cpufuncs.c
44  *
45  * C functions for supporting CPU / MMU / TLB specific operations.
46  *
47  * Created      : 30/01/97
48  */
49 
50 #include <sys/cdefs.h>
51 __KERNEL_RCSID(0, "$NetBSD: cpufunc.c,v 1.94 2009/12/27 05:14:56 uebayasi Exp $");
52 
53 #include "opt_compat_netbsd.h"
54 #include "opt_cpuoptions.h"
55 #include "opt_perfctrs.h"
56 
57 #include <sys/types.h>
58 #include <sys/param.h>
59 #include <sys/pmc.h>
60 #include <sys/systm.h>
61 #include <machine/cpu.h>
62 #include <machine/bootconfig.h>
63 #include <arch/arm/arm/disassem.h>
64 
65 #include <uvm/uvm.h>
66 
67 #include <arm/cpuconf.h>
68 #include <arm/cpufunc.h>
69 
70 #ifdef CPU_XSCALE_80200
71 #include <arm/xscale/i80200reg.h>
72 #include <arm/xscale/i80200var.h>
73 #endif
74 
75 #ifdef CPU_XSCALE_80321
76 #include <arm/xscale/i80321reg.h>
77 #include <arm/xscale/i80321var.h>
78 #endif
79 
80 #ifdef CPU_XSCALE_IXP425
81 #include <arm/xscale/ixp425reg.h>
82 #include <arm/xscale/ixp425var.h>
83 #endif
84 
85 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321)
86 #include <arm/xscale/xscalereg.h>
87 #endif
88 
89 #if defined(PERFCTRS)
90 struct arm_pmc_funcs *arm_pmc;
91 #endif
92 
93 /* PRIMARY CACHE VARIABLES */
94 int	arm_picache_size;
95 int	arm_picache_line_size;
96 int	arm_picache_ways;
97 
98 int	arm_pdcache_size;	/* and unified */
99 int	arm_pdcache_line_size;
100 int	arm_pdcache_ways;
101 #if (ARM_MMU_V6) != 0
102 int	arm_cache_prefer_mask;
103 #endif
104 
105 
106 int	arm_pcache_type;
107 int	arm_pcache_unified;
108 
109 int	arm_dcache_align;
110 int	arm_dcache_align_mask;
111 
112 /* 1 == use cpu_sleep(), 0 == don't */
113 int cpu_do_powersave;
114 
115 #ifdef CPU_ARM2
116 struct cpu_functions arm2_cpufuncs = {
117 	/* CPU functions */
118 
119 	.cf_id			= arm2_id,
120 	.cf_cpwait		= cpufunc_nullop,
121 
122 	/* MMU functions */
123 
124 	.cf_control		= (void *)cpufunc_nullop,
125 
126 	/* TLB functions */
127 
128 	.cf_tlb_flushID		= cpufunc_nullop,
129 	.cf_tlb_flushID_SE	= (void *)cpufunc_nullop,
130 	.cf_tlb_flushI		= cpufunc_nullop,
131 	.cf_tlb_flushI_SE	= (void *)cpufunc_nullop,
132 	.cf_tlb_flushD		= cpufunc_nullop,
133 	.cf_tlb_flushD_SE	= (void *)cpufunc_nullop,
134 
135 	/* Cache operations */
136 
137 	.cf_icache_sync_all	= cpufunc_nullop,
138 	.cf_icache_sync_range	= (void *) cpufunc_nullop,
139 
140 	.cf_dcache_wbinv_all	= arm3_cache_flush,
141 	.cf_dcache_wbinv_range	= (void *)cpufunc_nullop,
142 	.cf_dcache_inv_range	= (void *)cpufunc_nullop,
143 	.cf_dcache_wb_range	= (void *)cpufunc_nullop,
144 
145 	.cf_idcache_wbinv_all	= cpufunc_nullop,
146 	.cf_idcache_wbinv_range	= (void *)cpufunc_nullop,
147 
148 	/* Other functions */
149 
150 	.cf_flush_prefetchbuf	= cpufunc_nullop,
151 	.cf_drain_writebuf	= cpufunc_nullop,
152 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
153 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
154 
155 	.cf_sleep		= (void *)cpufunc_nullop,
156 
157 	/* Soft functions */
158 
159 	.cf_dataabt_fixup	= early_abort_fixup,
160 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
161 
162 	.cf_setup		= (void *)cpufunc_nullop
163 
164 };
165 #endif	/* CPU_ARM2 */
166 
167 #ifdef CPU_ARM250
168 struct cpu_functions arm250_cpufuncs = {
169 	/* CPU functions */
170 
171 	.cf_id			= arm250_id,
172 	.cf_cpwait		= cpufunc_nullop,
173 
174 	/* MMU functions */
175 
176 	.cf_control		= (void *)cpufunc_nullop,
177 
178 	/* TLB functions */
179 
180 	.cf_tlb_flushID		= cpufunc_nullop,
181 	.cf_tlb_flushID_SE	= (void *)cpufunc_nullop,
182 	.cf_tlb_flushI		= cpufunc_nullop,
183 	.cf_tlb_flushI_SE	= (void *)cpufunc_nullop,
184 	.cf_tlb_flushD		= cpufunc_nullop,
185 	.cf_tlb_flushD_SE	= (void *)cpufunc_nullop,
186 
187 	/* Cache operations */
188 
189 	.cf_icache_sync_all	= cpufunc_nullop,
190 	.cf_icache_sync_range	= (void *) cpufunc_nullop,
191 
192 	.cf_dcache_wbinv_all	= arm3_cache_flush,
193 	.cf_dcache_wbinv_range	= (void *)cpufunc_nullop,
194 	.cf_dcache_inv_range	= (void *)cpufunc_nullop,
195 	.cf_dcache_wb_range	= (void *)cpufunc_nullop,
196 
197 	.cf_idcache_wbinv_all	= cpufunc_nullop,
198 	.cf_idcache_wbinv_range	= (void *)cpufunc_nullop,
199 
200 	/* Other functions */
201 
202 	.cf_flush_prefetchbuf	= cpufunc_nullop,
203 	.cf_drain_writebuf	= cpufunc_nullop,
204 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
205 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
206 
207 	.cf_sleep		= (void *)cpufunc_nullop,
208 
209 	/* Soft functions */
210 
211 	.cf_dataabt_fixup	= early_abort_fixup,
212 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
213 
214 	.cf_setup		= (void *)cpufunc_nullop
215 
216 };
217 #endif	/* CPU_ARM250 */
218 
219 #ifdef CPU_ARM3
220 struct cpu_functions arm3_cpufuncs = {
221 	/* CPU functions */
222 
223 	.cf_id			= cpufunc_id,
224 	.cf_cpwait		= cpufunc_nullop,
225 
226 	/* MMU functions */
227 
228 	.cf_control		= arm3_control,
229 
230 	/* TLB functions */
231 
232 	.cf_tlb_flushID		= cpufunc_nullop,
233 	.cf_tlb_flushID_SE	= (void *)cpufunc_nullop,
234 	.cf_tlb_flushI		= cpufunc_nullop,
235 	.cf_tlb_flushI_SE	= (void *)cpufunc_nullop,
236 	.cf_tlb_flushD		= cpufunc_nullop,
237 	.cf_tlb_flushD_SE	= (void *)cpufunc_nullop,
238 
239 	/* Cache operations */
240 
241 	.cf_icache_sync_all	= cpufunc_nullop,
242 	.cf_icache_sync_range	= (void *) cpufunc_nullop,
243 
244 	.cf_dcache_wbinv_all	= arm3_cache_flush,
245 	.cf_dcache_wbinv_range	= (void *)arm3_cache_flush,
246 	.cf_dcache_inv_range	= (void *)arm3_cache_flush,
247 	.cf_dcache_wb_range	= (void *)cpufunc_nullop,
248 
249 	.cf_idcache_wbinv_all	= arm3_cache_flush,
250 	.cf_idcache_wbinv_range	= (void *)arm3_cache_flush,
251 
252 	/* Other functions */
253 
254 	.cf_flush_prefetchbuf	= cpufunc_nullop,
255 	.cf_drain_writebuf	= cpufunc_nullop,
256 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
257 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
258 
259 	.cf_sleep		= (void *)cpufunc_nullop,
260 
261 	/* Soft functions */
262 
263 	.cf_dataabt_fixup	= early_abort_fixup,
264 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
265 
266 	.cf_setup		= (void *)cpufunc_nullop
267 
268 };
269 #endif	/* CPU_ARM3 */
270 
271 #ifdef CPU_ARM6
272 struct cpu_functions arm6_cpufuncs = {
273 	/* CPU functions */
274 
275 	.cf_id			= cpufunc_id,
276 	.cf_cpwait		= cpufunc_nullop,
277 
278 	/* MMU functions */
279 
280 	.cf_control		= cpufunc_control,
281 	.cf_domains		= cpufunc_domains,
282 	.cf_setttb		= arm67_setttb,
283 	.cf_faultstatus		= cpufunc_faultstatus,
284 	.cf_faultaddress	= cpufunc_faultaddress,
285 
286 	/* TLB functions */
287 
288 	.cf_tlb_flushID		= arm67_tlb_flush,
289 	.cf_tlb_flushID_SE	= arm67_tlb_purge,
290 	.cf_tlb_flushI		= arm67_tlb_flush,
291 	.cf_tlb_flushI_SE	= arm67_tlb_purge,
292 	.cf_tlb_flushD		= arm67_tlb_flush,
293 	.cf_tlb_flushD_SE	= arm67_tlb_purge,
294 
295 	/* Cache operations */
296 
297 	.cf_icache_sync_all	= cpufunc_nullop,
298 	.cf_icache_sync_range	= (void *) cpufunc_nullop,
299 
300 	.cf_dcache_wbinv_all	= arm67_cache_flush,
301 	.cf_dcache_wbinv_range	= (void *)arm67_cache_flush,
302 	.cf_dcache_inv_range	= (void *)arm67_cache_flush,
303 	.cf_dcache_wb_range	= (void *)cpufunc_nullop,
304 
305 	.cf_idcache_wbinv_all	= arm67_cache_flush,
306 	.cf_idcache_wbinv_range	= (void *)arm67_cache_flush,
307 
308 	/* Other functions */
309 
310 	.cf_flush_prefetchbuf	= cpufunc_nullop,
311 	.cf_drain_writebuf	= cpufunc_nullop,
312 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
313 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
314 
315 	.cf_sleep		= (void *)cpufunc_nullop,
316 
317 	/* Soft functions */
318 
319 #ifdef ARM6_LATE_ABORT
320 	.cf_dataabt_fixup	= late_abort_fixup,
321 #else
322 	.cf_dataabt_fixup	= early_abort_fixup,
323 #endif
324 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
325 
326 	.cf_context_switch	= arm67_context_switch,
327 
328 	.cf_setup		= arm6_setup
329 
330 };
331 #endif	/* CPU_ARM6 */
332 
333 #ifdef CPU_ARM7
334 struct cpu_functions arm7_cpufuncs = {
335 	/* CPU functions */
336 
337 	.cf_id			= cpufunc_id,
338 	.cf_cpwait		= cpufunc_nullop,
339 
340 	/* MMU functions */
341 
342 	.cf_control		= cpufunc_control,
343 	.cf_domains		= cpufunc_domains,
344 	.cf_setttb		= arm67_setttb,
345 	.cf_faultstatus		= cpufunc_faultstatus,
346 	.cf_faultaddress	= cpufunc_faultaddress,
347 
348 	/* TLB functions */
349 
350 	.cf_tlb_flushID		= arm67_tlb_flush,
351 	.cf_tlb_flushID_SE	= arm67_tlb_purge,
352 	.cf_tlb_flushI		= arm67_tlb_flush,
353 	.cf_tlb_flushI_SE	= arm67_tlb_purge,
354 	.cf_tlb_flushD		= arm67_tlb_flush,
355 	.cf_tlb_flushD_SE	= arm67_tlb_purge,
356 
357 	/* Cache operations */
358 
359 	.cf_icache_sync_all	= cpufunc_nullop,
360 	.cf_icache_sync_range	= (void *)cpufunc_nullop,
361 
362 	.cf_dcache_wbinv_all	= arm67_cache_flush,
363 	.cf_dcache_wbinv_range	= (void *)arm67_cache_flush,
364 	.cf_dcache_inv_range	= (void *)arm67_cache_flush,
365 	.cf_dcache_wb_range	= (void *)cpufunc_nullop,
366 
367 	.cf_idcache_wbinv_all	= arm67_cache_flush,
368 	.cf_idcache_wbinv_range	= (void *)arm67_cache_flush,
369 
370 	/* Other functions */
371 
372 	.cf_flush_prefetchbuf	= cpufunc_nullop,
373 	.cf_drain_writebuf	= cpufunc_nullop,
374 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
375 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
376 
377 	.cf_sleep		= (void *)cpufunc_nullop,
378 
379 	/* Soft functions */
380 
381 	.cf_dataabt_fixup	= late_abort_fixup,
382 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
383 
384 	.cf_context_switch	= arm67_context_switch,
385 
386 	.cf_setup		= arm7_setup
387 
388 };
389 #endif	/* CPU_ARM7 */
390 
391 #ifdef CPU_ARM7TDMI
392 struct cpu_functions arm7tdmi_cpufuncs = {
393 	/* CPU functions */
394 
395 	.cf_id			= cpufunc_id,
396 	.cf_cpwait		= cpufunc_nullop,
397 
398 	/* MMU functions */
399 
400 	.cf_control		= cpufunc_control,
401 	.cf_domains		= cpufunc_domains,
402 	.cf_setttb		= arm7tdmi_setttb,
403 	.cf_faultstatus		= cpufunc_faultstatus,
404 	.cf_faultaddress	= cpufunc_faultaddress,
405 
406 	/* TLB functions */
407 
408 	.cf_tlb_flushID		= arm7tdmi_tlb_flushID,
409 	.cf_tlb_flushID_SE	= arm7tdmi_tlb_flushID_SE,
410 	.cf_tlb_flushI		= arm7tdmi_tlb_flushID,
411 	.cf_tlb_flushI_SE	= arm7tdmi_tlb_flushID_SE,
412 	.cf_tlb_flushD		= arm7tdmi_tlb_flushID,
413 	.cf_tlb_flushD_SE	= arm7tdmi_tlb_flushID_SE,
414 
415 	/* Cache operations */
416 
417 	.cf_icache_sync_all	= cpufunc_nullop,
418 	.cf_icache_sync_range	= (void *)cpufunc_nullop,
419 
420 	.cf_dcache_wbinv_all	= arm7tdmi_cache_flushID,
421 	.cf_dcache_wbinv_range	= (void *)arm7tdmi_cache_flushID,
422 	.cf_dcache_inv_range	= (void *)arm7tdmi_cache_flushID,
423 	.cf_dcache_wb_range	= (void *)cpufunc_nullop,
424 
425 	.cf_idcache_wbinv_all	= arm7tdmi_cache_flushID,
426 	.cf_idcache_wbinv_range	= (void *)arm7tdmi_cache_flushID,
427 
428 	/* Other functions */
429 
430 	.cf_flush_prefetchbuf	= cpufunc_nullop,
431 	.cf_drain_writebuf	= cpufunc_nullop,
432 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
433 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
434 
435 	.cf_sleep		= (void *)cpufunc_nullop,
436 
437 	/* Soft functions */
438 
439 	.cf_dataabt_fixup	= late_abort_fixup,
440 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
441 
442 	.cf_context_switch	= arm7tdmi_context_switch,
443 
444 	.cf_setup		= arm7tdmi_setup
445 
446 };
447 #endif	/* CPU_ARM7TDMI */
448 
449 #ifdef CPU_ARM8
450 struct cpu_functions arm8_cpufuncs = {
451 	/* CPU functions */
452 
453 	.cf_id			= cpufunc_id,
454 	.cf_cpwait		= cpufunc_nullop,
455 
456 	/* MMU functions */
457 
458 	.cf_control		= cpufunc_control,
459 	.cf_domains		= cpufunc_domains,
460 	.cf_setttb		= arm8_setttb,
461 	.cf_faultstatus		= cpufunc_faultstatus,
462 	.cf_faultaddress	= cpufunc_faultaddress,
463 
464 	/* TLB functions */
465 
466 	.cf_tlb_flushID		= arm8_tlb_flushID,
467 	.cf_tlb_flushID_SE	= arm8_tlb_flushID_SE,
468 	.cf_tlb_flushI		= arm8_tlb_flushID,
469 	.cf_tlb_flushI_SE	= arm8_tlb_flushID_SE,
470 	.cf_tlb_flushD		= arm8_tlb_flushID,
471 	.cf_tlb_flushD_SE	= arm8_tlb_flushID_SE,
472 
473 	/* Cache operations */
474 
475 	.cf_icache_sync_all	= cpufunc_nullop,
476 	.cf_icache_sync_range	= (void *)cpufunc_nullop,
477 
478 	.cf_dcache_wbinv_all	= arm8_cache_purgeID,
479 	.cf_dcache_wbinv_range	= (void *)arm8_cache_purgeID,
480 /*XXX*/	.cf_dcache_inv_range	= (void *)arm8_cache_purgeID,
481 	.cf_dcache_wb_range	= (void *)arm8_cache_cleanID,
482 
483 	.cf_idcache_wbinv_all	= arm8_cache_purgeID,
484 	.cf_idcache_wbinv_range = (void *)arm8_cache_purgeID,
485 
486 	/* Other functions */
487 
488 	.cf_flush_prefetchbuf	= cpufunc_nullop,
489 	.cf_drain_writebuf	= cpufunc_nullop,
490 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
491 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
492 
493 	.cf_sleep		= (void *)cpufunc_nullop,
494 
495 	/* Soft functions */
496 
497 	.cf_dataabt_fixup	= cpufunc_null_fixup,
498 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
499 
500 	.cf_context_switch	= arm8_context_switch,
501 
502 	.cf_setup		= arm8_setup
503 };
504 #endif	/* CPU_ARM8 */
505 
506 #ifdef CPU_ARM9
507 struct cpu_functions arm9_cpufuncs = {
508 	/* CPU functions */
509 
510 	.cf_id			= cpufunc_id,
511 	.cf_cpwait		= cpufunc_nullop,
512 
513 	/* MMU functions */
514 
515 	.cf_control		= cpufunc_control,
516 	.cf_domains		= cpufunc_domains,
517 	.cf_setttb		= arm9_setttb,
518 	.cf_faultstatus		= cpufunc_faultstatus,
519 	.cf_faultaddress	= cpufunc_faultaddress,
520 
521 	/* TLB functions */
522 
523 	.cf_tlb_flushID		= armv4_tlb_flushID,
524 	.cf_tlb_flushID_SE	= arm9_tlb_flushID_SE,
525 	.cf_tlb_flushI		= armv4_tlb_flushI,
526 	.cf_tlb_flushI_SE	= (void *)armv4_tlb_flushI,
527 	.cf_tlb_flushD		= armv4_tlb_flushD,
528 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
529 
530 	/* Cache operations */
531 
532 	.cf_icache_sync_all	= arm9_icache_sync_all,
533 	.cf_icache_sync_range	= arm9_icache_sync_range,
534 
535 	.cf_dcache_wbinv_all	= arm9_dcache_wbinv_all,
536 	.cf_dcache_wbinv_range	= arm9_dcache_wbinv_range,
537 /*XXX*/	.cf_dcache_inv_range	= arm9_dcache_wbinv_range,
538 	.cf_dcache_wb_range	= arm9_dcache_wb_range,
539 
540 	.cf_idcache_wbinv_all	= arm9_idcache_wbinv_all,
541 	.cf_idcache_wbinv_range = arm9_idcache_wbinv_range,
542 
543 	/* Other functions */
544 
545 	.cf_flush_prefetchbuf	= cpufunc_nullop,
546 	.cf_drain_writebuf	= armv4_drain_writebuf,
547 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
548 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
549 
550 	.cf_sleep		= (void *)cpufunc_nullop,
551 
552 	/* Soft functions */
553 
554 	.cf_dataabt_fixup	= cpufunc_null_fixup,
555 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
556 
557 	.cf_context_switch	= arm9_context_switch,
558 
559 	.cf_setup		= arm9_setup
560 
561 };
562 #endif /* CPU_ARM9 */
563 
564 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
565 struct cpu_functions armv5_ec_cpufuncs = {
566 	/* CPU functions */
567 
568 	.cf_id			= cpufunc_id,
569 	.cf_cpwait		= cpufunc_nullop,
570 
571 	/* MMU functions */
572 
573 	.cf_control		= cpufunc_control,
574 	.cf_domains		= cpufunc_domains,
575 	.cf_setttb		= armv5_ec_setttb,
576 	.cf_faultstatus		= cpufunc_faultstatus,
577 	.cf_faultaddress	= cpufunc_faultaddress,
578 
579 	/* TLB functions */
580 
581 	.cf_tlb_flushID		= armv4_tlb_flushID,
582 	.cf_tlb_flushID_SE	= arm10_tlb_flushID_SE,
583 	.cf_tlb_flushI		= armv4_tlb_flushI,
584 	.cf_tlb_flushI_SE	= arm10_tlb_flushI_SE,
585 	.cf_tlb_flushD		= armv4_tlb_flushD,
586 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
587 
588 	/* Cache operations */
589 
590 	.cf_icache_sync_all	= armv5_ec_icache_sync_all,
591 	.cf_icache_sync_range	= armv5_ec_icache_sync_range,
592 
593 	.cf_dcache_wbinv_all	= armv5_ec_dcache_wbinv_all,
594 	.cf_dcache_wbinv_range	= armv5_ec_dcache_wbinv_range,
595 /*XXX*/	.cf_dcache_inv_range	= armv5_ec_dcache_wbinv_range,
596 	.cf_dcache_wb_range	= armv5_ec_dcache_wb_range,
597 
598 	.cf_idcache_wbinv_all	= armv5_ec_idcache_wbinv_all,
599 	.cf_idcache_wbinv_range = armv5_ec_idcache_wbinv_range,
600 
601 	/* Other functions */
602 
603 	.cf_flush_prefetchbuf	= cpufunc_nullop,
604 	.cf_drain_writebuf	= armv4_drain_writebuf,
605 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
606 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
607 
608 	.cf_sleep		= (void *)cpufunc_nullop,
609 
610 	/* Soft functions */
611 
612 	.cf_dataabt_fixup	= cpufunc_null_fixup,
613 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
614 
615 	.cf_context_switch	= arm10_context_switch,
616 
617 	.cf_setup		= arm10_setup
618 
619 };
620 #endif /* CPU_ARM9E || CPU_ARM10 */
621 
622 #ifdef CPU_ARM10
623 struct cpu_functions arm10_cpufuncs = {
624 	/* CPU functions */
625 
626 	.cf_id			= cpufunc_id,
627 	.cf_cpwait		= cpufunc_nullop,
628 
629 	/* MMU functions */
630 
631 	.cf_control		= cpufunc_control,
632 	.cf_domains		= cpufunc_domains,
633 	.cf_setttb		= armv5_setttb,
634 	.cf_faultstatus		= cpufunc_faultstatus,
635 	.cf_faultaddress	= cpufunc_faultaddress,
636 
637 	/* TLB functions */
638 
639 	.cf_tlb_flushID		= armv4_tlb_flushID,
640 	.cf_tlb_flushID_SE	= arm10_tlb_flushID_SE,
641 	.cf_tlb_flushI		= armv4_tlb_flushI,
642 	.cf_tlb_flushI_SE	= arm10_tlb_flushI_SE,
643 	.cf_tlb_flushD		= armv4_tlb_flushD,
644 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
645 
646 	/* Cache operations */
647 
648 	.cf_icache_sync_all	= armv5_icache_sync_all,
649 	.cf_icache_sync_range	= armv5_icache_sync_range,
650 
651 	.cf_dcache_wbinv_all	= armv5_dcache_wbinv_all,
652 	.cf_dcache_wbinv_range	= armv5_dcache_wbinv_range,
653 /*XXX*/	.cf_dcache_inv_range	= armv5_dcache_wbinv_range,
654 	.cf_dcache_wb_range	= armv5_dcache_wb_range,
655 
656 	.cf_idcache_wbinv_all	= armv5_idcache_wbinv_all,
657 	.cf_idcache_wbinv_range = armv5_idcache_wbinv_range,
658 
659 	/* Other functions */
660 
661 	.cf_flush_prefetchbuf	= cpufunc_nullop,
662 	.cf_drain_writebuf	= armv4_drain_writebuf,
663 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
664 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
665 
666 	.cf_sleep		= (void *)cpufunc_nullop,
667 
668 	/* Soft functions */
669 
670 	.cf_dataabt_fixup	= cpufunc_null_fixup,
671 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
672 
673 	.cf_context_switch	= arm10_context_switch,
674 
675 	.cf_setup		= arm10_setup
676 
677 };
678 #endif /* CPU_ARM10 */
679 
680 #ifdef CPU_ARM11
681 struct cpu_functions arm11_cpufuncs = {
682 	/* CPU functions */
683 
684 	.cf_id			= cpufunc_id,
685 	.cf_cpwait		= cpufunc_nullop,
686 
687 	/* MMU functions */
688 
689 	.cf_control		= cpufunc_control,
690 	.cf_domains		= cpufunc_domains,
691 	.cf_setttb		= arm11_setttb,
692 	.cf_faultstatus		= cpufunc_faultstatus,
693 	.cf_faultaddress	= cpufunc_faultaddress,
694 
695 	/* TLB functions */
696 
697 	.cf_tlb_flushID		= arm11_tlb_flushID,
698 	.cf_tlb_flushID_SE	= arm11_tlb_flushID_SE,
699 	.cf_tlb_flushI		= arm11_tlb_flushI,
700 	.cf_tlb_flushI_SE	= arm11_tlb_flushI_SE,
701 	.cf_tlb_flushD		= arm11_tlb_flushD,
702 	.cf_tlb_flushD_SE	= arm11_tlb_flushD_SE,
703 
704 	/* Cache operations */
705 
706 	.cf_icache_sync_all	= armv6_icache_sync_all,
707 	.cf_icache_sync_range	= armv6_icache_sync_range,
708 
709 	.cf_dcache_wbinv_all	= armv6_dcache_wbinv_all,
710 	.cf_dcache_wbinv_range	= armv6_dcache_wbinv_range,
711 	.cf_dcache_inv_range	= armv6_dcache_inv_range,
712 	.cf_dcache_wb_range	= armv6_dcache_wb_range,
713 
714 	.cf_idcache_wbinv_all	= armv6_idcache_wbinv_all,
715 	.cf_idcache_wbinv_range = armv6_idcache_wbinv_range,
716 
717 	/* Other functions */
718 
719 	.cf_flush_prefetchbuf	= cpufunc_nullop,
720 	.cf_drain_writebuf	= arm11_drain_writebuf,
721 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
722 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
723 
724 	.cf_sleep		= arm11_sleep,
725 
726 	/* Soft functions */
727 
728 	.cf_dataabt_fixup	= cpufunc_null_fixup,
729 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
730 
731 	.cf_context_switch	= arm11_context_switch,
732 
733 	.cf_setup		= arm11_setup
734 
735 };
736 #endif /* CPU_ARM11 */
737 
738 #ifdef CPU_ARM1136
739 struct cpu_functions arm1136_cpufuncs = {
740 	/* CPU functions */
741 
742 	.cf_id			= cpufunc_id,
743 	.cf_cpwait		= cpufunc_nullop,
744 
745 	/* MMU functions */
746 
747 	.cf_control		= cpufunc_control,
748 	.cf_domains		= cpufunc_domains,
749 	.cf_setttb		= arm1136_setttb,
750 	.cf_faultstatus		= cpufunc_faultstatus,
751 	.cf_faultaddress	= cpufunc_faultaddress,
752 
753 	/* TLB functions */
754 
755 	.cf_tlb_flushID		= arm11_tlb_flushID,
756 	.cf_tlb_flushID_SE	= arm11_tlb_flushID_SE,
757 	.cf_tlb_flushI		= arm11_tlb_flushI,
758 	.cf_tlb_flushI_SE	= arm11_tlb_flushI_SE,
759 	.cf_tlb_flushD		= arm11_tlb_flushD,
760 	.cf_tlb_flushD_SE	= arm11_tlb_flushD_SE,
761 
762 	/* Cache operations */
763 
764 	.cf_icache_sync_all	= arm1136_icache_sync_all,	/* 411920 */
765 	.cf_icache_sync_range	= arm1136_icache_sync_range,	/* 371025 */
766 
767 	.cf_dcache_wbinv_all	= arm1136_dcache_wbinv_all,	/* 411920 */
768 	.cf_dcache_wbinv_range	= armv6_dcache_wbinv_range,
769 	.cf_dcache_inv_range	= armv6_dcache_inv_range,
770 	.cf_dcache_wb_range	= armv6_dcache_wb_range,
771 
772 	.cf_idcache_wbinv_all	= arm1136_idcache_wbinv_all,	/* 411920 */
773 	.cf_idcache_wbinv_range = arm1136_idcache_wbinv_range,	/* 371025 */
774 
775 	/* Other functions */
776 
777 	.cf_flush_prefetchbuf	= arm1136_flush_prefetchbuf,
778 	.cf_drain_writebuf	= arm11_drain_writebuf,
779 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
780 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
781 
782 	.cf_sleep		= arm11_sleep,
783 
784 	/* Soft functions */
785 
786 	.cf_dataabt_fixup	= cpufunc_null_fixup,
787 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
788 
789 	.cf_context_switch	= arm11_context_switch,
790 
791 	.cf_setup		= arm1136_setup
792 
793 };
794 #endif /* CPU_ARM1136 */
795 
796 #ifdef CPU_SA110
797 struct cpu_functions sa110_cpufuncs = {
798 	/* CPU functions */
799 
800 	.cf_id			= cpufunc_id,
801 	.cf_cpwait		= cpufunc_nullop,
802 
803 	/* MMU functions */
804 
805 	.cf_control		= cpufunc_control,
806 	.cf_domains		= cpufunc_domains,
807 	.cf_setttb		= sa1_setttb,
808 	.cf_faultstatus		= cpufunc_faultstatus,
809 	.cf_faultaddress	= cpufunc_faultaddress,
810 
811 	/* TLB functions */
812 
813 	.cf_tlb_flushID		= armv4_tlb_flushID,
814 	.cf_tlb_flushID_SE	= sa1_tlb_flushID_SE,
815 	.cf_tlb_flushI		= armv4_tlb_flushI,
816 	.cf_tlb_flushI_SE	= (void *)armv4_tlb_flushI,
817 	.cf_tlb_flushD		= armv4_tlb_flushD,
818 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
819 
820 	/* Cache operations */
821 
822 	.cf_icache_sync_all	= sa1_cache_syncI,
823 	.cf_icache_sync_range	= sa1_cache_syncI_rng,
824 
825 	.cf_dcache_wbinv_all	= sa1_cache_purgeD,
826 	.cf_dcache_wbinv_range	= sa1_cache_purgeD_rng,
827 /*XXX*/	.cf_dcache_inv_range	= sa1_cache_purgeD_rng,
828 	.cf_dcache_wb_range	= sa1_cache_cleanD_rng,
829 
830 	.cf_idcache_wbinv_all	= sa1_cache_purgeID,
831 	.cf_idcache_wbinv_range	= sa1_cache_purgeID_rng,
832 
833 	/* Other functions */
834 
835 	.cf_flush_prefetchbuf	= cpufunc_nullop,
836 	.cf_drain_writebuf	= armv4_drain_writebuf,
837 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
838 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
839 
840 	.cf_sleep		= (void *)cpufunc_nullop,
841 
842 	/* Soft functions */
843 
844 	.cf_dataabt_fixup	= cpufunc_null_fixup,
845 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
846 
847 	.cf_context_switch	= sa110_context_switch,
848 
849 	.cf_setup		= sa110_setup
850 };
851 #endif	/* CPU_SA110 */
852 
853 #if defined(CPU_SA1100) || defined(CPU_SA1110)
854 struct cpu_functions sa11x0_cpufuncs = {
855 	/* CPU functions */
856 
857 	.cf_id			= cpufunc_id,
858 	.cf_cpwait		= cpufunc_nullop,
859 
860 	/* MMU functions */
861 
862 	.cf_control		= cpufunc_control,
863 	.cf_domains		= cpufunc_domains,
864 	.cf_setttb		= sa1_setttb,
865 	.cf_faultstatus		= cpufunc_faultstatus,
866 	.cf_faultaddress	= cpufunc_faultaddress,
867 
868 	/* TLB functions */
869 
870 	.cf_tlb_flushID		= armv4_tlb_flushID,
871 	.cf_tlb_flushID_SE	= sa1_tlb_flushID_SE,
872 	.cf_tlb_flushI		= armv4_tlb_flushI,
873 	.cf_tlb_flushI_SE	= (void *)armv4_tlb_flushI,
874 	.cf_tlb_flushD		= armv4_tlb_flushD,
875 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
876 
877 	/* Cache operations */
878 
879 	.cf_icache_sync_all	= sa1_cache_syncI,
880 	.cf_icache_sync_range	= sa1_cache_syncI_rng,
881 
882 	.cf_dcache_wbinv_all	= sa1_cache_purgeD,
883 	.cf_dcache_wbinv_range	= sa1_cache_purgeD_rng,
884 /*XXX*/	.cf_dcache_inv_range	= sa1_cache_purgeD_rng,
885 	.cf_dcache_wb_range	= sa1_cache_cleanD_rng,
886 
887 	.cf_idcache_wbinv_all	= sa1_cache_purgeID,
888 	.cf_idcache_wbinv_range	= sa1_cache_purgeID_rng,
889 
890 	/* Other functions */
891 
892 	.cf_flush_prefetchbuf	= sa11x0_drain_readbuf,
893 	.cf_drain_writebuf	= armv4_drain_writebuf,
894 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
895 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
896 
897 	.cf_sleep		= sa11x0_cpu_sleep,
898 
899 	/* Soft functions */
900 
901 	.cf_dataabt_fixup	= cpufunc_null_fixup,
902 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
903 
904 	.cf_context_switch	= sa11x0_context_switch,
905 
906 	.cf_setup		= sa11x0_setup
907 };
908 #endif	/* CPU_SA1100 || CPU_SA1110 */
909 
910 #if defined(CPU_FA526)
911 struct cpu_functions fa526_cpufuncs = {
912 	/* CPU functions */
913 
914 	.cf_id			= cpufunc_id,
915 	.cf_cpwait		= cpufunc_nullop,
916 
917 	/* MMU functions */
918 
919 	.cf_control		= cpufunc_control,
920 	.cf_domains		= cpufunc_domains,
921 	.cf_setttb		= fa526_setttb,
922 	.cf_faultstatus		= cpufunc_faultstatus,
923 	.cf_faultaddress	= cpufunc_faultaddress,
924 
925 	/* TLB functions */
926 
927 	.cf_tlb_flushID		= armv4_tlb_flushID,
928 	.cf_tlb_flushID_SE	= fa526_tlb_flushID_SE,
929 	.cf_tlb_flushI		= armv4_tlb_flushI,
930 	.cf_tlb_flushI_SE	= fa526_tlb_flushI_SE,
931 	.cf_tlb_flushD		= armv4_tlb_flushD,
932 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
933 
934 	/* Cache operations */
935 
936 	.cf_icache_sync_all	= fa526_icache_sync_all,
937 	.cf_icache_sync_range	= fa526_icache_sync_range,
938 
939 	.cf_dcache_wbinv_all	= fa526_dcache_wbinv_all,
940 	.cf_dcache_wbinv_range	= fa526_dcache_wbinv_range,
941 	.cf_dcache_inv_range	= fa526_dcache_inv_range,
942 	.cf_dcache_wb_range	= fa526_dcache_wb_range,
943 
944 	.cf_idcache_wbinv_all	= fa526_idcache_wbinv_all,
945 	.cf_idcache_wbinv_range	= fa526_idcache_wbinv_range,
946 
947 	/* Other functions */
948 
949 	.cf_flush_prefetchbuf	= fa526_flush_prefetchbuf,
950 	.cf_drain_writebuf	= armv4_drain_writebuf,
951 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
952 	.cf_flush_brnchtgt_E	= fa526_flush_brnchtgt_E,
953 
954 	.cf_sleep		= fa526_cpu_sleep,
955 
956 	/* Soft functions */
957 
958 	.cf_dataabt_fixup	= cpufunc_null_fixup,
959 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
960 
961 	.cf_context_switch	= fa526_context_switch,
962 
963 	.cf_setup		= fa526_setup
964 };
965 #endif	/* CPU_FA526 */
966 
967 #ifdef CPU_IXP12X0
968 struct cpu_functions ixp12x0_cpufuncs = {
969 	/* CPU functions */
970 
971 	.cf_id			= cpufunc_id,
972 	.cf_cpwait		= cpufunc_nullop,
973 
974 	/* MMU functions */
975 
976 	.cf_control		= cpufunc_control,
977 	.cf_domains		= cpufunc_domains,
978 	.cf_setttb		= sa1_setttb,
979 	.cf_faultstatus		= cpufunc_faultstatus,
980 	.cf_faultaddress	= cpufunc_faultaddress,
981 
982 	/* TLB functions */
983 
984 	.cf_tlb_flushID		= armv4_tlb_flushID,
985 	.cf_tlb_flushID_SE	= sa1_tlb_flushID_SE,
986 	.cf_tlb_flushI		= armv4_tlb_flushI,
987 	.cf_tlb_flushI_SE	= (void *)armv4_tlb_flushI,
988 	.cf_tlb_flushD		= armv4_tlb_flushD,
989 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
990 
991 	/* Cache operations */
992 
993 	.cf_icache_sync_all	= sa1_cache_syncI,
994 	.cf_icache_sync_range	= sa1_cache_syncI_rng,
995 
996 	.cf_dcache_wbinv_all	= sa1_cache_purgeD,
997 	.cf_dcache_wbinv_range	= sa1_cache_purgeD_rng,
998 /*XXX*/	.cf_dcache_inv_range	= sa1_cache_purgeD_rng,
999 	.cf_dcache_wb_range	= sa1_cache_cleanD_rng,
1000 
1001 	.cf_idcache_wbinv_all	= sa1_cache_purgeID,
1002 	.cf_idcache_wbinv_range	= sa1_cache_purgeID_rng,
1003 
1004 	/* Other functions */
1005 
1006 	.cf_flush_prefetchbuf	= ixp12x0_drain_readbuf,
1007 	.cf_drain_writebuf	= armv4_drain_writebuf,
1008 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
1009 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
1010 
1011 	.cf_sleep		= (void *)cpufunc_nullop,
1012 
1013 	/* Soft functions */
1014 
1015 	.cf_dataabt_fixup	= cpufunc_null_fixup,
1016 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
1017 
1018 	.cf_context_switch	= ixp12x0_context_switch,
1019 
1020 	.cf_setup		= ixp12x0_setup
1021 };
1022 #endif	/* CPU_IXP12X0 */
1023 
1024 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
1025     defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425)
1026 struct cpu_functions xscale_cpufuncs = {
1027 	/* CPU functions */
1028 
1029 	.cf_id			= cpufunc_id,
1030 	.cf_cpwait		= xscale_cpwait,
1031 
1032 	/* MMU functions */
1033 
1034 	.cf_control		= xscale_control,
1035 	.cf_domains		= cpufunc_domains,
1036 	.cf_setttb		= xscale_setttb,
1037 	.cf_faultstatus		= cpufunc_faultstatus,
1038 	.cf_faultaddress	= cpufunc_faultaddress,
1039 
1040 	/* TLB functions */
1041 
1042 	.cf_tlb_flushID		= armv4_tlb_flushID,
1043 	.cf_tlb_flushID_SE	= xscale_tlb_flushID_SE,
1044 	.cf_tlb_flushI		= armv4_tlb_flushI,
1045 	.cf_tlb_flushI_SE	= (void *)armv4_tlb_flushI,
1046 	.cf_tlb_flushD		= armv4_tlb_flushD,
1047 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
1048 
1049 	/* Cache operations */
1050 
1051 	.cf_icache_sync_all	= xscale_cache_syncI,
1052 	.cf_icache_sync_range	= xscale_cache_syncI_rng,
1053 
1054 	.cf_dcache_wbinv_all	= xscale_cache_purgeD,
1055 	.cf_dcache_wbinv_range	= xscale_cache_purgeD_rng,
1056 	.cf_dcache_inv_range	= xscale_cache_flushD_rng,
1057 	.cf_dcache_wb_range	= xscale_cache_cleanD_rng,
1058 
1059 	.cf_idcache_wbinv_all	= xscale_cache_purgeID,
1060 	.cf_idcache_wbinv_range = xscale_cache_purgeID_rng,
1061 
1062 	/* Other functions */
1063 
1064 	.cf_flush_prefetchbuf	= cpufunc_nullop,
1065 	.cf_drain_writebuf	= armv4_drain_writebuf,
1066 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
1067 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
1068 
1069 	.cf_sleep		= xscale_cpu_sleep,
1070 
1071 	/* Soft functions */
1072 
1073 	.cf_dataabt_fixup	= cpufunc_null_fixup,
1074 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
1075 
1076 	.cf_context_switch	= xscale_context_switch,
1077 
1078 	.cf_setup		= xscale_setup
1079 };
1080 #endif
1081 /* CPU_XSCALE_80200 || CPU_XSCALE_80321 || __CPU_XSCALE_PXA2XX || CPU_XSCALE_IXP425 */
1082 
1083 /*
1084  * Global constants also used by locore.s
1085  */
1086 
1087 struct cpu_functions cpufuncs;
1088 u_int cputype;
1089 u_int cpu_reset_needs_v4_MMU_disable;	/* flag used in locore.s */
1090 
1091 #if defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined(CPU_ARM9) || \
1092     defined(CPU_ARM9E) || defined(CPU_ARM10) || defined(CPU_ARM11) || \
1093     defined(CPU_FA526) || \
1094     defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
1095     defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425)
1096 static void get_cachetype_cp15(void);
1097 
1098 /* Additional cache information local to this file.  Log2 of some of the
1099    above numbers.  */
1100 static int	arm_dcache_l2_nsets;
1101 static int	arm_dcache_l2_assoc;
1102 static int	arm_dcache_l2_linesize;
1103 
1104 static void
1105 get_cachetype_cp15()
1106 {
1107 	u_int ctype, isize, dsize;
1108 	u_int multiplier;
1109 
1110 	__asm volatile("mrc p15, 0, %0, c0, c0, 1"
1111 		: "=r" (ctype));
1112 
1113 	/*
1114 	 * ...and thus spake the ARM ARM:
1115 	 *
1116 	 * If an <opcode2> value corresponding to an unimplemented or
1117 	 * reserved ID register is encountered, the System Control
1118 	 * processor returns the value of the main ID register.
1119 	 */
1120 	if (ctype == cpu_id())
1121 		goto out;
1122 
1123 #if (ARM_MMU_V6) > 0
1124 	if (CPU_CT_FORMAT(ctype) == 4) {
1125 		u_int csid1, csid2;
1126 		isize = 1U << (CPU_CT4_ILINE(ctype) + 2);
1127 		dsize = 1U << (CPU_CT4_DLINE(ctype) + 2);
1128 
1129 		__asm volatile("mcr p15, 1, %0, c0, c0, 2"
1130 		    :: "r" (CPU_CSSR_L1));	/* select L1 cache values */
1131 		__asm volatile("mrc p15, 1, %0, c0, c0, 0" : "=r" (csid1));
1132 		arm_pdcache_ways = CPU_CSID_ASSOC(csid1) + 1;
1133 		arm_pdcache_line_size = dsize << CPU_CSID_LEN(csid1);
1134 		arm_pdcache_size = arm_pdcache_line_size * arm_pdcache_ways;
1135 		arm_pdcache_size *= CPU_CSID_NUMSETS(csid1);
1136 		arm_cache_prefer_mask = PAGE_SIZE;
1137 
1138 		arm_dcache_align = arm_pdcache_line_size;
1139 
1140 		__asm volatile("mcr p15, 1, %0, c0, c0, 2"
1141 		    :: "r" (CPU_CSSR_L2));	/* select L2 cache values */
1142 		__asm volatile("mrc p15, 1, %0, c0, c0, 0" : "=r" (csid2));
1143 		arm_dcache_l2_assoc = CPU_CSID_ASSOC(csid2) + 1;
1144 		arm_dcache_l2_linesize = dsize << CPU_CSID_LEN(csid2);
1145 		arm_dcache_l2_nsets = CPU_CSID_NUMSETS(csid2) + 1;
1146 		arm_pcache_type = CPU_CT_CTYPE_WB14;
1147 		goto out;
1148 	}
1149 #endif /* ARM_MMU_V6 > 0 */
1150 
1151 	if ((ctype & CPU_CT_S) == 0)
1152 		arm_pcache_unified = 1;
1153 
1154 	/*
1155 	 * If you want to know how this code works, go read the ARM ARM.
1156 	 */
1157 
1158 	arm_pcache_type = CPU_CT_CTYPE(ctype);
1159 
1160 	if (arm_pcache_unified == 0) {
1161 		isize = CPU_CT_ISIZE(ctype);
1162 		multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
1163 		arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
1164 		if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
1165 			if (isize & CPU_CT_xSIZE_M)
1166 				arm_picache_line_size = 0; /* not present */
1167 			else
1168 				arm_picache_ways = 1;
1169 		} else {
1170 			arm_picache_ways = multiplier <<
1171 			    (CPU_CT_xSIZE_ASSOC(isize) - 1);
1172 #if (ARM_MMU_V6) > 0
1173 			if (CPU_CT_xSIZE_P & isize)
1174 				arm_cache_prefer_mask |=
1175 				    __BIT(9 + CPU_CT_xSIZE_SIZE(isize)
1176 					  - CPU_CT_xSIZE_ASSOC(isize))
1177 				    - PAGE_SIZE;
1178 #endif
1179 		}
1180 		arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
1181 	}
1182 
1183 	dsize = CPU_CT_DSIZE(ctype);
1184 	multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
1185 	arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
1186 	if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
1187 		if (dsize & CPU_CT_xSIZE_M)
1188 			arm_pdcache_line_size = 0; /* not present */
1189 		else
1190 			arm_pdcache_ways = 1;
1191 	} else {
1192 		arm_pdcache_ways = multiplier <<
1193 		    (CPU_CT_xSIZE_ASSOC(dsize) - 1);
1194 #if (ARM_MMU_V6) > 0
1195 		if (CPU_CT_xSIZE_P & dsize)
1196 			arm_cache_prefer_mask |=
1197 			    __BIT(9 + CPU_CT_xSIZE_SIZE(dsize)
1198 				  - CPU_CT_xSIZE_ASSOC(dsize)) - PAGE_SIZE;
1199 #endif
1200 	}
1201 	arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
1202 
1203 	arm_dcache_align = arm_pdcache_line_size;
1204 
1205 	arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
1206 	arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
1207 	arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
1208 	    CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
1209 
1210  out:
1211 	arm_dcache_align_mask = arm_dcache_align - 1;
1212 }
1213 #endif /* ARM7TDMI || ARM8 || ARM9 || XSCALE */
1214 
1215 #if defined(CPU_ARM2) || defined(CPU_ARM250) || defined(CPU_ARM3) || \
1216     defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_SA110) || \
1217     defined(CPU_SA1100) || defined(CPU_SA1110) || defined(CPU_IXP12X0)
1218 /* Cache information for CPUs without cache type registers. */
1219 struct cachetab {
1220 	u_int32_t ct_cpuid;
1221 	int	ct_pcache_type;
1222 	int	ct_pcache_unified;
1223 	int	ct_pdcache_size;
1224 	int	ct_pdcache_line_size;
1225 	int	ct_pdcache_ways;
1226 	int	ct_picache_size;
1227 	int	ct_picache_line_size;
1228 	int	ct_picache_ways;
1229 };
1230 
1231 struct cachetab cachetab[] = {
1232     /* cpuid,           cache type,       u,  dsiz, ls, wy,  isiz, ls, wy */
1233     { CPU_ID_ARM2,      0,                1,     0,  0,  0,     0,  0,  0 },
1234     { CPU_ID_ARM250,    0,                1,     0,  0,  0,     0,  0,  0 },
1235     { CPU_ID_ARM3,      CPU_CT_CTYPE_WT,  1,  4096, 16, 64,     0,  0,  0 },
1236     { CPU_ID_ARM610,	CPU_CT_CTYPE_WT,  1,  4096, 16, 64,     0,  0,  0 },
1237     { CPU_ID_ARM710,    CPU_CT_CTYPE_WT,  1,  8192, 32,  4,     0,  0,  0 },
1238     { CPU_ID_ARM7500,   CPU_CT_CTYPE_WT,  1,  4096, 16,  4,     0,  0,  0 },
1239     { CPU_ID_ARM710A,   CPU_CT_CTYPE_WT,  1,  8192, 16,  4,     0,  0,  0 },
1240     { CPU_ID_ARM7500FE, CPU_CT_CTYPE_WT,  1,  4096, 16,  4,     0,  0,  0 },
1241     /* XXX is this type right for SA-1? */
1242     { CPU_ID_SA110,	CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 },
1243     { CPU_ID_SA1100,	CPU_CT_CTYPE_WB1, 0,  8192, 32, 32, 16384, 32, 32 },
1244     { CPU_ID_SA1110,	CPU_CT_CTYPE_WB1, 0,  8192, 32, 32, 16384, 32, 32 },
1245     { CPU_ID_IXP1200,	CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 }, /* XXX */
1246     { 0, 0, 0, 0, 0, 0, 0, 0}
1247 };
1248 
1249 static void get_cachetype_table(void);
1250 
1251 static void
1252 get_cachetype_table(void)
1253 {
1254 	int i;
1255 	u_int32_t cpuid = cpu_id();
1256 
1257 	for (i = 0; cachetab[i].ct_cpuid != 0; i++) {
1258 		if (cachetab[i].ct_cpuid == (cpuid & CPU_ID_CPU_MASK)) {
1259 			arm_pcache_type = cachetab[i].ct_pcache_type;
1260 			arm_pcache_unified = cachetab[i].ct_pcache_unified;
1261 			arm_pdcache_size = cachetab[i].ct_pdcache_size;
1262 			arm_pdcache_line_size =
1263 			    cachetab[i].ct_pdcache_line_size;
1264 			arm_pdcache_ways = cachetab[i].ct_pdcache_ways;
1265 			arm_picache_size = cachetab[i].ct_picache_size;
1266 			arm_picache_line_size =
1267 			    cachetab[i].ct_picache_line_size;
1268 			arm_picache_ways = cachetab[i].ct_picache_ways;
1269 		}
1270 	}
1271 	arm_dcache_align = arm_pdcache_line_size;
1272 
1273 	arm_dcache_align_mask = arm_dcache_align - 1;
1274 }
1275 
1276 #endif /* ARM2 || ARM250 || ARM3 || ARM6 || ARM7 || SA110 || SA1100 || SA1111 || IXP12X0 */
1277 
1278 /*
1279  * Cannot panic here as we may not have a console yet ...
1280  */
1281 
1282 int
1283 set_cpufuncs(void)
1284 {
1285 	if (cputype == 0) {
1286 		cputype = cpufunc_id();
1287 		cputype &= CPU_ID_CPU_MASK;
1288 	}
1289 
1290 	/*
1291 	 * NOTE: cpu_do_powersave defaults to off.  If we encounter a
1292 	 * CPU type where we want to use it by default, then we set it.
1293 	 */
1294 #ifdef CPU_ARM2
1295 	if (cputype == CPU_ID_ARM2) {
1296 		cpufuncs = arm2_cpufuncs;
1297 		cpu_reset_needs_v4_MMU_disable = 0;
1298 		get_cachetype_table();
1299 		return 0;
1300 	}
1301 #endif /* CPU_ARM2 */
1302 #ifdef CPU_ARM250
1303 	if (cputype == CPU_ID_ARM250) {
1304 		cpufuncs = arm250_cpufuncs;
1305 		cpu_reset_needs_v4_MMU_disable = 0;
1306 		get_cachetype_table();
1307 		return 0;
1308 	}
1309 #endif
1310 #ifdef CPU_ARM3
1311 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
1312 	    (cputype & 0x00000f00) == 0x00000300) {
1313 		cpufuncs = arm3_cpufuncs;
1314 		cpu_reset_needs_v4_MMU_disable = 0;
1315 		get_cachetype_table();
1316 		return 0;
1317 	}
1318 #endif	/* CPU_ARM3 */
1319 #ifdef CPU_ARM6
1320 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
1321 	    (cputype & 0x00000f00) == 0x00000600) {
1322 		cpufuncs = arm6_cpufuncs;
1323 		cpu_reset_needs_v4_MMU_disable = 0;
1324 		get_cachetype_table();
1325 		pmap_pte_init_generic();
1326 		return 0;
1327 	}
1328 #endif	/* CPU_ARM6 */
1329 #ifdef CPU_ARM7
1330 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
1331 	    CPU_ID_IS7(cputype) &&
1332 	    (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V3) {
1333 		cpufuncs = arm7_cpufuncs;
1334 		cpu_reset_needs_v4_MMU_disable = 0;
1335 		get_cachetype_table();
1336 		pmap_pte_init_generic();
1337 		return 0;
1338 	}
1339 #endif	/* CPU_ARM7 */
1340 #ifdef CPU_ARM7TDMI
1341 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
1342 	    CPU_ID_IS7(cputype) &&
1343 	    (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V4T) {
1344 		cpufuncs = arm7tdmi_cpufuncs;
1345 		cpu_reset_needs_v4_MMU_disable = 0;
1346 		get_cachetype_cp15();
1347 		pmap_pte_init_generic();
1348 		return 0;
1349 	}
1350 #endif
1351 #ifdef CPU_ARM8
1352 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
1353 	    (cputype & 0x0000f000) == 0x00008000) {
1354 		cpufuncs = arm8_cpufuncs;
1355 		cpu_reset_needs_v4_MMU_disable = 0;	/* XXX correct? */
1356 		get_cachetype_cp15();
1357 		pmap_pte_init_arm8();
1358 		return 0;
1359 	}
1360 #endif	/* CPU_ARM8 */
1361 #ifdef CPU_ARM9
1362 	if (((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD ||
1363 	     (cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_TI) &&
1364 	    (cputype & 0x0000f000) == 0x00009000) {
1365 		cpufuncs = arm9_cpufuncs;
1366 		cpu_reset_needs_v4_MMU_disable = 1;	/* V4 or higher */
1367 		get_cachetype_cp15();
1368 		arm9_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
1369 		arm9_dcache_sets_max =
1370 		    (1U << (arm_dcache_l2_linesize + arm_dcache_l2_nsets)) -
1371 		    arm9_dcache_sets_inc;
1372 		arm9_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
1373 		arm9_dcache_index_max = 0U - arm9_dcache_index_inc;
1374 #ifdef	ARM9_CACHE_WRITE_THROUGH
1375 		pmap_pte_init_arm9();
1376 #else
1377 		pmap_pte_init_generic();
1378 #endif
1379 		return 0;
1380 	}
1381 #endif /* CPU_ARM9 */
1382 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
1383 	if (cputype == CPU_ID_ARM926EJS ||
1384 	    cputype == CPU_ID_ARM1026EJS) {
1385 		cpufuncs = armv5_ec_cpufuncs;
1386 		cpu_reset_needs_v4_MMU_disable = 1;	/* V4 or higher */
1387 		get_cachetype_cp15();
1388 		pmap_pte_init_generic();
1389 		return 0;
1390 	}
1391 #endif /* CPU_ARM9E || CPU_ARM10 */
1392 #ifdef CPU_ARM10
1393 	if (/* cputype == CPU_ID_ARM1020T || */
1394 	    cputype == CPU_ID_ARM1020E) {
1395 		/*
1396 		 * Select write-through cacheing (this isn't really an
1397 		 * option on ARM1020T).
1398 		 */
1399 		cpufuncs = arm10_cpufuncs;
1400 		cpu_reset_needs_v4_MMU_disable = 1;	/* V4 or higher */
1401 		get_cachetype_cp15();
1402 		armv5_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
1403 		armv5_dcache_sets_max =
1404 		    (1U << (arm_dcache_l2_linesize + arm_dcache_l2_nsets)) -
1405 		    armv5_dcache_sets_inc;
1406 		armv5_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
1407 		armv5_dcache_index_max = 0U - armv5_dcache_index_inc;
1408 		pmap_pte_init_generic();
1409 		return 0;
1410 	}
1411 #endif /* CPU_ARM10 */
1412 #if defined(CPU_ARM11)
1413 	if (cputype == CPU_ID_ARM1136JS ||
1414 	    cputype == CPU_ID_ARM1136JSR1 ||
1415 	    cputype == CPU_ID_ARM1176JS ||
1416 	    cputype == CPU_ID_CORTEXA8R1 ||
1417 	    cputype == CPU_ID_CORTEXA8R2) {
1418 		cpufuncs = arm11_cpufuncs;
1419 #if defined(CPU_ARM1136)
1420 		if (cputype != CPU_ID_ARM1176JS) {
1421 			cpufuncs = arm1136_cpufuncs;
1422 			if (cputype == CPU_ID_ARM1136JS)
1423 				cpufuncs.cf_sleep = arm1136_sleep_rev0;
1424 		}
1425 #endif
1426 		cpu_reset_needs_v4_MMU_disable = 1;	/* V4 or higher */
1427 		cpu_do_powersave = 1;			/* Enable powersave */
1428 		get_cachetype_cp15();
1429 #ifdef ARM11_CACHE_WRITE_THROUGH
1430 		pmap_pte_init_arm11();
1431 #else
1432 		pmap_pte_init_generic();
1433 #endif
1434 		if (arm_cache_prefer_mask)
1435 			uvmexp.ncolors = (arm_cache_prefer_mask >> PGSHIFT) + 1;
1436 
1437 		return 0;
1438 	}
1439 #endif /* CPU_ARM11 */
1440 #ifdef CPU_SA110
1441 	if (cputype == CPU_ID_SA110) {
1442 		cpufuncs = sa110_cpufuncs;
1443 		cpu_reset_needs_v4_MMU_disable = 1;	/* SA needs it */
1444 		get_cachetype_table();
1445 		pmap_pte_init_sa1();
1446 		return 0;
1447 	}
1448 #endif	/* CPU_SA110 */
1449 #ifdef CPU_SA1100
1450 	if (cputype == CPU_ID_SA1100) {
1451 		cpufuncs = sa11x0_cpufuncs;
1452 		cpu_reset_needs_v4_MMU_disable = 1;	/* SA needs it	*/
1453 		get_cachetype_table();
1454 		pmap_pte_init_sa1();
1455 
1456 		/* Use powersave on this CPU. */
1457 		cpu_do_powersave = 1;
1458 
1459 		return 0;
1460 	}
1461 #endif	/* CPU_SA1100 */
1462 #ifdef CPU_SA1110
1463 	if (cputype == CPU_ID_SA1110) {
1464 		cpufuncs = sa11x0_cpufuncs;
1465 		cpu_reset_needs_v4_MMU_disable = 1;	/* SA needs it	*/
1466 		get_cachetype_table();
1467 		pmap_pte_init_sa1();
1468 
1469 		/* Use powersave on this CPU. */
1470 		cpu_do_powersave = 1;
1471 
1472 		return 0;
1473 	}
1474 #endif	/* CPU_SA1110 */
1475 #ifdef CPU_FA526
1476 	if (cputype == CPU_ID_FA526) {
1477 		cpufuncs = fa526_cpufuncs;
1478 		cpu_reset_needs_v4_MMU_disable = 1;	/* SA needs it	*/
1479 		get_cachetype_cp15();
1480 		pmap_pte_init_generic();
1481 
1482 		/* Use powersave on this CPU. */
1483 		cpu_do_powersave = 1;
1484 
1485 		return 0;
1486 	}
1487 #endif	/* CPU_FA526 */
1488 #ifdef CPU_IXP12X0
1489         if (cputype == CPU_ID_IXP1200) {
1490                 cpufuncs = ixp12x0_cpufuncs;
1491                 cpu_reset_needs_v4_MMU_disable = 1;
1492                 get_cachetype_table();
1493                 pmap_pte_init_sa1();
1494                 return 0;
1495         }
1496 #endif  /* CPU_IXP12X0 */
1497 #ifdef CPU_XSCALE_80200
1498 	if (cputype == CPU_ID_80200) {
1499 		int rev = cpufunc_id() & CPU_ID_REVISION_MASK;
1500 
1501 		i80200_icu_init();
1502 
1503 		/*
1504 		 * Reset the Performance Monitoring Unit to a
1505 		 * pristine state:
1506 		 *	- CCNT, PMN0, PMN1 reset to 0
1507 		 *	- overflow indications cleared
1508 		 *	- all counters disabled
1509 		 */
1510 		__asm volatile("mcr p14, 0, %0, c0, c0, 0"
1511 			:
1512 			: "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF|
1513 			       PMNC_CC_IF));
1514 
1515 #if defined(XSCALE_CCLKCFG)
1516 		/*
1517 		 * Crank CCLKCFG to maximum legal value.
1518 		 */
1519 		__asm volatile ("mcr p14, 0, %0, c6, c0, 0"
1520 			:
1521 			: "r" (XSCALE_CCLKCFG));
1522 #endif
1523 
1524 		/*
1525 		 * XXX Disable ECC in the Bus Controller Unit; we
1526 		 * don't really support it, yet.  Clear any pending
1527 		 * error indications.
1528 		 */
1529 		__asm volatile("mcr p13, 0, %0, c0, c1, 0"
1530 			:
1531 			: "r" (BCUCTL_E0|BCUCTL_E1|BCUCTL_EV));
1532 
1533 		cpufuncs = xscale_cpufuncs;
1534 #if defined(PERFCTRS)
1535 		xscale_pmu_init();
1536 #endif
1537 
1538 		/*
1539 		 * i80200 errata: Step-A0 and A1 have a bug where
1540 		 * D$ dirty bits are not cleared on "invalidate by
1541 		 * address".
1542 		 *
1543 		 * Workaround: Clean cache line before invalidating.
1544 		 */
1545 		if (rev == 0 || rev == 1)
1546 			cpufuncs.cf_dcache_inv_range = xscale_cache_purgeD_rng;
1547 
1548 		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1549 		get_cachetype_cp15();
1550 		pmap_pte_init_xscale();
1551 		return 0;
1552 	}
1553 #endif /* CPU_XSCALE_80200 */
1554 #ifdef CPU_XSCALE_80321
1555 	if (cputype == CPU_ID_80321_400 || cputype == CPU_ID_80321_600 ||
1556 	    cputype == CPU_ID_80321_400_B0 || cputype == CPU_ID_80321_600_B0 ||
1557 	    cputype == CPU_ID_80219_400 || cputype == CPU_ID_80219_600) {
1558 		i80321_icu_init();
1559 
1560 		/*
1561 		 * Reset the Performance Monitoring Unit to a
1562 		 * pristine state:
1563 		 *	- CCNT, PMN0, PMN1 reset to 0
1564 		 *	- overflow indications cleared
1565 		 *	- all counters disabled
1566 		 */
1567 		__asm volatile("mcr p14, 0, %0, c0, c0, 0"
1568 			:
1569 			: "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF|
1570 			       PMNC_CC_IF));
1571 
1572 		cpufuncs = xscale_cpufuncs;
1573 #if defined(PERFCTRS)
1574 		xscale_pmu_init();
1575 #endif
1576 
1577 		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1578 		get_cachetype_cp15();
1579 		pmap_pte_init_xscale();
1580 		return 0;
1581 	}
1582 #endif /* CPU_XSCALE_80321 */
1583 #ifdef __CPU_XSCALE_PXA2XX
1584 	/* ignore core revision to test PXA2xx CPUs */
1585 	if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA27X ||
1586 	    (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 ||
1587 	    (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) {
1588 
1589 		cpufuncs = xscale_cpufuncs;
1590 #if defined(PERFCTRS)
1591 		xscale_pmu_init();
1592 #endif
1593 
1594 		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1595 		get_cachetype_cp15();
1596 		pmap_pte_init_xscale();
1597 
1598 		/* Use powersave on this CPU. */
1599 		cpu_do_powersave = 1;
1600 
1601 		return 0;
1602 	}
1603 #endif /* __CPU_XSCALE_PXA2XX */
1604 #ifdef CPU_XSCALE_IXP425
1605 	if (cputype == CPU_ID_IXP425_533 || cputype == CPU_ID_IXP425_400 ||
1606             cputype == CPU_ID_IXP425_266) {
1607 		ixp425_icu_init();
1608 
1609 		cpufuncs = xscale_cpufuncs;
1610 #if defined(PERFCTRS)
1611 		xscale_pmu_init();
1612 #endif
1613 
1614 		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1615 		get_cachetype_cp15();
1616 		pmap_pte_init_xscale();
1617 
1618 		return 0;
1619 	}
1620 #endif /* CPU_XSCALE_IXP425 */
1621 	/*
1622 	 * Bzzzz. And the answer was ...
1623 	 */
1624 	panic("No support for this CPU type (%08x) in kernel", cputype);
1625 	return(ARCHITECTURE_NOT_PRESENT);
1626 }
1627 
1628 #ifdef CPU_ARM2
1629 u_int arm2_id(void)
1630 {
1631 
1632 	return CPU_ID_ARM2;
1633 }
1634 #endif /* CPU_ARM2 */
1635 
1636 #ifdef CPU_ARM250
1637 u_int arm250_id(void)
1638 {
1639 
1640 	return CPU_ID_ARM250;
1641 }
1642 #endif /* CPU_ARM250 */
1643 
1644 /*
1645  * Fixup routines for data and prefetch aborts.
1646  *
1647  * Several compile time symbols are used
1648  *
1649  * DEBUG_FAULT_CORRECTION - Print debugging information during the
1650  * correction of registers after a fault.
1651  * ARM6_LATE_ABORT - ARM6 supports both early and late aborts
1652  * when defined should use late aborts
1653  */
1654 
1655 
1656 /*
1657  * Null abort fixup routine.
1658  * For use when no fixup is required.
1659  */
1660 int
1661 cpufunc_null_fixup(void *arg)
1662 {
1663 	return(ABORT_FIXUP_OK);
1664 }
1665 
1666 
1667 #if defined(CPU_ARM2) || defined(CPU_ARM250) || defined(CPU_ARM3) || \
1668     defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI)
1669 
1670 #ifdef DEBUG_FAULT_CORRECTION
1671 #define DFC_PRINTF(x)		printf x
1672 #define DFC_DISASSEMBLE(x)	disassemble(x)
1673 #else
1674 #define DFC_PRINTF(x)		/* nothing */
1675 #define DFC_DISASSEMBLE(x)	/* nothing */
1676 #endif
1677 
1678 /*
1679  * "Early" data abort fixup.
1680  *
1681  * For ARM2, ARM2as, ARM3 and ARM6 (in early-abort mode).  Also used
1682  * indirectly by ARM6 (in late-abort mode) and ARM7[TDMI].
1683  *
1684  * In early aborts, we may have to fix up LDM, STM, LDC and STC.
1685  */
1686 int
1687 early_abort_fixup(void *arg)
1688 {
1689 	trapframe_t *frame = arg;
1690 	u_int fault_pc;
1691 	u_int fault_instruction;
1692 	int saved_lr = 0;
1693 
1694 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1695 
1696 		/* Ok an abort in SVC mode */
1697 
1698 		/*
1699 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1700 		 * as the fault happened in svc mode but we need it in the
1701 		 * usr slot so we can treat the registers as an array of ints
1702 		 * during fixing.
1703 		 * NOTE: This PC is in the position but writeback is not
1704 		 * allowed on r15.
1705 		 * Doing it like this is more efficient than trapping this
1706 		 * case in all possible locations in the following fixup code.
1707 		 */
1708 
1709 		saved_lr = frame->tf_usr_lr;
1710 		frame->tf_usr_lr = frame->tf_svc_lr;
1711 
1712 		/*
1713 		 * Note the trapframe does not have the SVC r13 so a fault
1714 		 * from an instruction with writeback to r13 in SVC mode is
1715 		 * not allowed. This should not happen as the kstack is
1716 		 * always valid.
1717 		 */
1718 	}
1719 
1720 	/* Get fault address and status from the CPU */
1721 
1722 	fault_pc = frame->tf_pc;
1723 	fault_instruction = *((volatile unsigned int *)fault_pc);
1724 
1725 	/* Decode the fault instruction and fix the registers as needed */
1726 
1727 	if ((fault_instruction & 0x0e000000) == 0x08000000) {
1728 		int base;
1729 		int loop;
1730 		int count;
1731 		int *registers = &frame->tf_r0;
1732 
1733 		DFC_PRINTF(("LDM/STM\n"));
1734 		DFC_DISASSEMBLE(fault_pc);
1735 		if (fault_instruction & (1 << 21)) {
1736 			DFC_PRINTF(("This instruction must be corrected\n"));
1737 			base = (fault_instruction >> 16) & 0x0f;
1738 			if (base == 15)
1739 				return ABORT_FIXUP_FAILED;
1740 			/* Count registers transferred */
1741 			count = 0;
1742 			for (loop = 0; loop < 16; ++loop) {
1743 				if (fault_instruction & (1<<loop))
1744 					++count;
1745 			}
1746 			DFC_PRINTF(("%d registers used\n", count));
1747 			DFC_PRINTF(("Corrected r%d by %d bytes ",
1748 				       base, count * 4));
1749 			if (fault_instruction & (1 << 23)) {
1750 				DFC_PRINTF(("down\n"));
1751 				registers[base] -= count * 4;
1752 			} else {
1753 				DFC_PRINTF(("up\n"));
1754 				registers[base] += count * 4;
1755 			}
1756 		}
1757 	} else if ((fault_instruction & 0x0e000000) == 0x0c000000) {
1758 		int base;
1759 		int offset;
1760 		int *registers = &frame->tf_r0;
1761 
1762 		/* REGISTER CORRECTION IS REQUIRED FOR THESE INSTRUCTIONS */
1763 
1764 		DFC_DISASSEMBLE(fault_pc);
1765 
1766 		/* Only need to fix registers if write back is turned on */
1767 
1768 		if ((fault_instruction & (1 << 21)) != 0) {
1769 			base = (fault_instruction >> 16) & 0x0f;
1770 			if (base == 13 &&
1771 			    (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
1772 				return ABORT_FIXUP_FAILED;
1773 			if (base == 15)
1774 				return ABORT_FIXUP_FAILED;
1775 
1776 			offset = (fault_instruction & 0xff) << 2;
1777 			DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1778 			if ((fault_instruction & (1 << 23)) != 0)
1779 				offset = -offset;
1780 			registers[base] += offset;
1781 			DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1782 		}
1783 	} else if ((fault_instruction & 0x0e000000) == 0x0c000000)
1784 		return ABORT_FIXUP_FAILED;
1785 
1786 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1787 
1788 		/* Ok an abort in SVC mode */
1789 
1790 		/*
1791 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1792 		 * as the fault happened in svc mode but we need it in the
1793 		 * usr slot so we can treat the registers as an array of ints
1794 		 * during fixing.
1795 		 * NOTE: This PC is in the position but writeback is not
1796 		 * allowed on r15.
1797 		 * Doing it like this is more efficient than trapping this
1798 		 * case in all possible locations in the prior fixup code.
1799 		 */
1800 
1801 		frame->tf_svc_lr = frame->tf_usr_lr;
1802 		frame->tf_usr_lr = saved_lr;
1803 
1804 		/*
1805 		 * Note the trapframe does not have the SVC r13 so a fault
1806 		 * from an instruction with writeback to r13 in SVC mode is
1807 		 * not allowed. This should not happen as the kstack is
1808 		 * always valid.
1809 		 */
1810 	}
1811 
1812 	return(ABORT_FIXUP_OK);
1813 }
1814 #endif	/* CPU_ARM2/250/3/6/7 */
1815 
1816 
1817 #if (defined(CPU_ARM6) && defined(ARM6_LATE_ABORT)) || defined(CPU_ARM7) || \
1818 	defined(CPU_ARM7TDMI)
1819 /*
1820  * "Late" (base updated) data abort fixup
1821  *
1822  * For ARM6 (in late-abort mode) and ARM7.
1823  *
1824  * In this model, all data-transfer instructions need fixing up.  We defer
1825  * LDM, STM, LDC and STC fixup to the early-abort handler.
1826  */
1827 int
1828 late_abort_fixup(void *arg)
1829 {
1830 	trapframe_t *frame = arg;
1831 	u_int fault_pc;
1832 	u_int fault_instruction;
1833 	int saved_lr = 0;
1834 
1835 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1836 
1837 		/* Ok an abort in SVC mode */
1838 
1839 		/*
1840 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1841 		 * as the fault happened in svc mode but we need it in the
1842 		 * usr slot so we can treat the registers as an array of ints
1843 		 * during fixing.
1844 		 * NOTE: This PC is in the position but writeback is not
1845 		 * allowed on r15.
1846 		 * Doing it like this is more efficient than trapping this
1847 		 * case in all possible locations in the following fixup code.
1848 		 */
1849 
1850 		saved_lr = frame->tf_usr_lr;
1851 		frame->tf_usr_lr = frame->tf_svc_lr;
1852 
1853 		/*
1854 		 * Note the trapframe does not have the SVC r13 so a fault
1855 		 * from an instruction with writeback to r13 in SVC mode is
1856 		 * not allowed. This should not happen as the kstack is
1857 		 * always valid.
1858 		 */
1859 	}
1860 
1861 	/* Get fault address and status from the CPU */
1862 
1863 	fault_pc = frame->tf_pc;
1864 	fault_instruction = *((volatile unsigned int *)fault_pc);
1865 
1866 	/* Decode the fault instruction and fix the registers as needed */
1867 
1868 	/* Was is a swap instruction ? */
1869 
1870 	if ((fault_instruction & 0x0fb00ff0) == 0x01000090) {
1871 		DFC_DISASSEMBLE(fault_pc);
1872 	} else if ((fault_instruction & 0x0c000000) == 0x04000000) {
1873 
1874 		/* Was is a ldr/str instruction */
1875 		/* This is for late abort only */
1876 
1877 		int base;
1878 		int offset;
1879 		int *registers = &frame->tf_r0;
1880 
1881 		DFC_DISASSEMBLE(fault_pc);
1882 
1883 		/* This is for late abort only */
1884 
1885 		if ((fault_instruction & (1 << 24)) == 0
1886 		    || (fault_instruction & (1 << 21)) != 0) {
1887 			/* postindexed ldr/str with no writeback */
1888 
1889 			base = (fault_instruction >> 16) & 0x0f;
1890 			if (base == 13 &&
1891 			    (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
1892 				return ABORT_FIXUP_FAILED;
1893 			if (base == 15)
1894 				return ABORT_FIXUP_FAILED;
1895 			DFC_PRINTF(("late abt fix: r%d=%08x : ",
1896 				       base, registers[base]));
1897 			if ((fault_instruction & (1 << 25)) == 0) {
1898 				/* Immediate offset - easy */
1899 
1900 				offset = fault_instruction & 0xfff;
1901 				if ((fault_instruction & (1 << 23)))
1902 					offset = -offset;
1903 				registers[base] += offset;
1904 				DFC_PRINTF(("imm=%08x ", offset));
1905 			} else {
1906 				/* offset is a shifted register */
1907 				int shift;
1908 
1909 				offset = fault_instruction & 0x0f;
1910 				if (offset == base)
1911 					return ABORT_FIXUP_FAILED;
1912 
1913 				/*
1914 				 * Register offset - hard we have to
1915 				 * cope with shifts !
1916 				 */
1917 				offset = registers[offset];
1918 
1919 				if ((fault_instruction & (1 << 4)) == 0)
1920 					/* shift with amount */
1921 					shift = (fault_instruction >> 7) & 0x1f;
1922 				else {
1923 					/* shift with register */
1924 					if ((fault_instruction & (1 << 7)) != 0)
1925 						/* undefined for now so bail out */
1926 						return ABORT_FIXUP_FAILED;
1927 					shift = ((fault_instruction >> 8) & 0xf);
1928 					if (base == shift)
1929 						return ABORT_FIXUP_FAILED;
1930 					DFC_PRINTF(("shift reg=%d ", shift));
1931 					shift = registers[shift];
1932 				}
1933 				DFC_PRINTF(("shift=%08x ", shift));
1934 				switch (((fault_instruction >> 5) & 0x3)) {
1935 				case 0 : /* Logical left */
1936 					offset = (int)(((u_int)offset) << shift);
1937 					break;
1938 				case 1 : /* Logical Right */
1939 					if (shift == 0) shift = 32;
1940 					offset = (int)(((u_int)offset) >> shift);
1941 					break;
1942 				case 2 : /* Arithmetic Right */
1943 					if (shift == 0) shift = 32;
1944 					offset = (int)(((int)offset) >> shift);
1945 					break;
1946 				case 3 : /* Rotate right (rol or rxx) */
1947 					return ABORT_FIXUP_FAILED;
1948 					break;
1949 				}
1950 
1951 				DFC_PRINTF(("abt: fixed LDR/STR with "
1952 					       "register offset\n"));
1953 				if ((fault_instruction & (1 << 23)))
1954 					offset = -offset;
1955 				DFC_PRINTF(("offset=%08x ", offset));
1956 				registers[base] += offset;
1957 			}
1958 			DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1959 		}
1960 	}
1961 
1962 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1963 
1964 		/* Ok an abort in SVC mode */
1965 
1966 		/*
1967 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1968 		 * as the fault happened in svc mode but we need it in the
1969 		 * usr slot so we can treat the registers as an array of ints
1970 		 * during fixing.
1971 		 * NOTE: This PC is in the position but writeback is not
1972 		 * allowed on r15.
1973 		 * Doing it like this is more efficient than trapping this
1974 		 * case in all possible locations in the prior fixup code.
1975 		 */
1976 
1977 		frame->tf_svc_lr = frame->tf_usr_lr;
1978 		frame->tf_usr_lr = saved_lr;
1979 
1980 		/*
1981 		 * Note the trapframe does not have the SVC r13 so a fault
1982 		 * from an instruction with writeback to r13 in SVC mode is
1983 		 * not allowed. This should not happen as the kstack is
1984 		 * always valid.
1985 		 */
1986 	}
1987 
1988 	/*
1989 	 * Now let the early-abort fixup routine have a go, in case it
1990 	 * was an LDM, STM, LDC or STC that faulted.
1991 	 */
1992 
1993 	return early_abort_fixup(arg);
1994 }
1995 #endif	/* CPU_ARM6(LATE)/7/7TDMI */
1996 
1997 /*
1998  * CPU Setup code
1999  */
2000 
2001 #if defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) || \
2002 	defined(CPU_ARM8) || defined (CPU_ARM9) || defined (CPU_ARM9E) || \
2003 	defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) || \
2004 	defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
2005 	defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425) || \
2006 	defined(CPU_ARM10) || defined(CPU_ARM11) || defined(CPU_ARM1136) || \
2007 	defined(CPU_FA526)
2008 
2009 #define IGN	0
2010 #define OR	1
2011 #define BIC	2
2012 
2013 struct cpu_option {
2014 	const char *co_name;
2015 	int	co_falseop;
2016 	int	co_trueop;
2017 	int	co_value;
2018 };
2019 
2020 static u_int parse_cpu_options(char *, struct cpu_option *, u_int);
2021 
2022 static u_int
2023 parse_cpu_options(char *args, struct cpu_option *optlist, u_int cpuctrl)
2024 {
2025 	int integer;
2026 
2027 	if (args == NULL)
2028 		return(cpuctrl);
2029 
2030 	while (optlist->co_name) {
2031 		if (get_bootconf_option(args, optlist->co_name,
2032 		    BOOTOPT_TYPE_BOOLEAN, &integer)) {
2033 			if (integer) {
2034 				if (optlist->co_trueop == OR)
2035 					cpuctrl |= optlist->co_value;
2036 				else if (optlist->co_trueop == BIC)
2037 					cpuctrl &= ~optlist->co_value;
2038 			} else {
2039 				if (optlist->co_falseop == OR)
2040 					cpuctrl |= optlist->co_value;
2041 				else if (optlist->co_falseop == BIC)
2042 					cpuctrl &= ~optlist->co_value;
2043 			}
2044 		}
2045 		++optlist;
2046 	}
2047 	return(cpuctrl);
2048 }
2049 #endif /* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 || CPU_SA110 */
2050 
2051 #if defined (CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) \
2052 	|| defined(CPU_ARM8)
2053 struct cpu_option arm678_options[] = {
2054 #ifdef COMPAT_12
2055 	{ "nocache",		IGN, BIC, CPU_CONTROL_IDC_ENABLE },
2056 	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
2057 #endif	/* COMPAT_12 */
2058 	{ "cpu.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
2059 	{ "cpu.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
2060 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2061 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2062 	{ NULL,			IGN, IGN, 0 }
2063 };
2064 
2065 #endif	/* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 */
2066 
2067 #ifdef CPU_ARM6
2068 struct cpu_option arm6_options[] = {
2069 	{ "arm6.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
2070 	{ "arm6.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
2071 	{ "arm6.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2072 	{ "arm6.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2073 	{ NULL,			IGN, IGN, 0 }
2074 };
2075 
2076 void
2077 arm6_setup(char *args)
2078 {
2079 	int cpuctrl, cpuctrlmask;
2080 
2081 	/* Set up default control registers bits */
2082 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2083 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2084 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
2085 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2086 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2087 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
2088 		 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BEND_ENABLE
2089 		 | CPU_CONTROL_AFLT_ENABLE;
2090 
2091 #ifdef ARM6_LATE_ABORT
2092 	cpuctrl |= CPU_CONTROL_LABT_ENABLE;
2093 #endif	/* ARM6_LATE_ABORT */
2094 
2095 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2096 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2097 #endif
2098 
2099 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
2100 	cpuctrl = parse_cpu_options(args, arm6_options, cpuctrl);
2101 
2102 #ifdef __ARMEB__
2103 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2104 #endif
2105 
2106 	/* Clear out the cache */
2107 	cpu_idcache_wbinv_all();
2108 
2109 	/* Set the control register */
2110 	curcpu()->ci_ctrl = cpuctrl;
2111 	cpu_control(0xffffffff, cpuctrl);
2112 }
2113 #endif	/* CPU_ARM6 */
2114 
2115 #ifdef CPU_ARM7
2116 struct cpu_option arm7_options[] = {
2117 	{ "arm7.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
2118 	{ "arm7.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
2119 	{ "arm7.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2120 	{ "arm7.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2121 #ifdef COMPAT_12
2122 	{ "fpaclk2",		BIC, OR,  CPU_CONTROL_CPCLK },
2123 #endif	/* COMPAT_12 */
2124 	{ "arm700.fpaclk",	BIC, OR,  CPU_CONTROL_CPCLK },
2125 	{ NULL,			IGN, IGN, 0 }
2126 };
2127 
2128 void
2129 arm7_setup(char *args)
2130 {
2131 	int cpuctrl, cpuctrlmask;
2132 
2133 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2134 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2135 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
2136 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2137 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2138 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
2139 		 | CPU_CONTROL_CPCLK | CPU_CONTROL_LABT_ENABLE
2140 		 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BEND_ENABLE
2141 		 | CPU_CONTROL_AFLT_ENABLE;
2142 
2143 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2144 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2145 #endif
2146 
2147 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
2148 	cpuctrl = parse_cpu_options(args, arm7_options, cpuctrl);
2149 
2150 #ifdef __ARMEB__
2151 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2152 #endif
2153 
2154 	/* Clear out the cache */
2155 	cpu_idcache_wbinv_all();
2156 
2157 	/* Set the control register */
2158 	curcpu()->ci_ctrl = cpuctrl;
2159 	cpu_control(0xffffffff, cpuctrl);
2160 }
2161 #endif	/* CPU_ARM7 */
2162 
2163 #ifdef CPU_ARM7TDMI
2164 struct cpu_option arm7tdmi_options[] = {
2165 	{ "arm7.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
2166 	{ "arm7.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
2167 	{ "arm7.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2168 	{ "arm7.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2169 #ifdef COMPAT_12
2170 	{ "fpaclk2",		BIC, OR,  CPU_CONTROL_CPCLK },
2171 #endif	/* COMPAT_12 */
2172 	{ "arm700.fpaclk",	BIC, OR,  CPU_CONTROL_CPCLK },
2173 	{ NULL,			IGN, IGN, 0 }
2174 };
2175 
2176 void
2177 arm7tdmi_setup(char *args)
2178 {
2179 	int cpuctrl;
2180 
2181 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2182 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2183 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
2184 
2185 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
2186 	cpuctrl = parse_cpu_options(args, arm7tdmi_options, cpuctrl);
2187 
2188 #ifdef __ARMEB__
2189 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2190 #endif
2191 
2192 	/* Clear out the cache */
2193 	cpu_idcache_wbinv_all();
2194 
2195 	/* Set the control register */
2196 	curcpu()->ci_ctrl = cpuctrl;
2197 	cpu_control(0xffffffff, cpuctrl);
2198 }
2199 #endif	/* CPU_ARM7TDMI */
2200 
2201 #ifdef CPU_ARM8
2202 struct cpu_option arm8_options[] = {
2203 	{ "arm8.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
2204 	{ "arm8.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
2205 	{ "arm8.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2206 	{ "arm8.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2207 #ifdef COMPAT_12
2208 	{ "branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2209 #endif	/* COMPAT_12 */
2210 	{ "cpu.branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2211 	{ "arm8.branchpredict",	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2212 	{ NULL,			IGN, IGN, 0 }
2213 };
2214 
2215 void
2216 arm8_setup(char *args)
2217 {
2218 	int integer;
2219 	int cpuctrl, cpuctrlmask;
2220 	int clocktest;
2221 	int setclock = 0;
2222 
2223 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2224 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2225 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
2226 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2227 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2228 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
2229 		 | CPU_CONTROL_BPRD_ENABLE | CPU_CONTROL_ROM_ENABLE
2230 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE;
2231 
2232 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2233 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2234 #endif
2235 
2236 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
2237 	cpuctrl = parse_cpu_options(args, arm8_options, cpuctrl);
2238 
2239 #ifdef __ARMEB__
2240 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2241 #endif
2242 
2243 	/* Get clock configuration */
2244 	clocktest = arm8_clock_config(0, 0) & 0x0f;
2245 
2246 	/* Special ARM8 clock and test configuration */
2247 	if (get_bootconf_option(args, "arm8.clock.reset", BOOTOPT_TYPE_BOOLEAN, &integer)) {
2248 		clocktest = 0;
2249 		setclock = 1;
2250 	}
2251 	if (get_bootconf_option(args, "arm8.clock.dynamic", BOOTOPT_TYPE_BOOLEAN, &integer)) {
2252 		if (integer)
2253 			clocktest |= 0x01;
2254 		else
2255 			clocktest &= ~(0x01);
2256 		setclock = 1;
2257 	}
2258 	if (get_bootconf_option(args, "arm8.clock.sync", BOOTOPT_TYPE_BOOLEAN, &integer)) {
2259 		if (integer)
2260 			clocktest |= 0x02;
2261 		else
2262 			clocktest &= ~(0x02);
2263 		setclock = 1;
2264 	}
2265 	if (get_bootconf_option(args, "arm8.clock.fast", BOOTOPT_TYPE_BININT, &integer)) {
2266 		clocktest = (clocktest & ~0xc0) | (integer & 3) << 2;
2267 		setclock = 1;
2268 	}
2269 	if (get_bootconf_option(args, "arm8.test", BOOTOPT_TYPE_BININT, &integer)) {
2270 		clocktest |= (integer & 7) << 5;
2271 		setclock = 1;
2272 	}
2273 
2274 	/* Clear out the cache */
2275 	cpu_idcache_wbinv_all();
2276 
2277 	/* Set the control register */
2278 	curcpu()->ci_ctrl = cpuctrl;
2279 	cpu_control(0xffffffff, cpuctrl);
2280 
2281 	/* Set the clock/test register */
2282 	if (setclock)
2283 		arm8_clock_config(0x7f, clocktest);
2284 }
2285 #endif	/* CPU_ARM8 */
2286 
2287 #ifdef CPU_ARM9
2288 struct cpu_option arm9_options[] = {
2289 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2290 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2291 	{ "arm9.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2292 	{ "arm9.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2293 	{ "arm9.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2294 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2295 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2296 	{ "arm9.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2297 	{ NULL,			IGN, IGN, 0 }
2298 };
2299 
2300 void
2301 arm9_setup(char *args)
2302 {
2303 	int cpuctrl, cpuctrlmask;
2304 
2305 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2306 	    | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2307 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2308 	    | CPU_CONTROL_WBUF_ENABLE;
2309 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2310 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2311 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2312 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2313 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2314 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC
2315 		 | CPU_CONTROL_ROUNDROBIN;
2316 
2317 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2318 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2319 #endif
2320 
2321 	cpuctrl = parse_cpu_options(args, arm9_options, cpuctrl);
2322 
2323 #ifdef __ARMEB__
2324 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2325 #endif
2326 
2327 	if (vector_page == ARM_VECTORS_HIGH)
2328 		cpuctrl |= CPU_CONTROL_VECRELOC;
2329 
2330 	/* Clear out the cache */
2331 	cpu_idcache_wbinv_all();
2332 
2333 	/* Set the control register */
2334 	curcpu()->ci_ctrl = cpuctrl;
2335 	cpu_control(cpuctrlmask, cpuctrl);
2336 
2337 }
2338 #endif	/* CPU_ARM9 */
2339 
2340 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
2341 struct cpu_option arm10_options[] = {
2342 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2343 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2344 	{ "arm10.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2345 	{ "arm10.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2346 	{ "arm10.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2347 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2348 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2349 	{ "arm10.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2350 	{ NULL,			IGN, IGN, 0 }
2351 };
2352 
2353 void
2354 arm10_setup(char *args)
2355 {
2356 	int cpuctrl, cpuctrlmask;
2357 
2358 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
2359 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2360 	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
2361 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
2362 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2363 	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2364 	    | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2365 	    | CPU_CONTROL_BPRD_ENABLE
2366 	    | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
2367 
2368 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2369 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2370 #endif
2371 
2372 	cpuctrl = parse_cpu_options(args, arm10_options, cpuctrl);
2373 
2374 #ifdef __ARMEB__
2375 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2376 #endif
2377 
2378 	if (vector_page == ARM_VECTORS_HIGH)
2379 		cpuctrl |= CPU_CONTROL_VECRELOC;
2380 
2381 	/* Clear out the cache */
2382 	cpu_idcache_wbinv_all();
2383 
2384 	/* Now really make sure they are clean.  */
2385 	__asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
2386 
2387 	/* Set the control register */
2388 	curcpu()->ci_ctrl = cpuctrl;
2389 	cpu_control(0xffffffff, cpuctrl);
2390 
2391 	/* And again. */
2392 	cpu_idcache_wbinv_all();
2393 }
2394 #endif	/* CPU_ARM9E || CPU_ARM10 */
2395 
2396 #if defined(CPU_ARM11)
2397 struct cpu_option arm11_options[] = {
2398 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2399 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2400 	{ "arm11.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2401 	{ "arm11.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2402 	{ "arm11.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2403 	{ NULL,			IGN, IGN, 0 }
2404 };
2405 
2406 void
2407 arm11_setup(char *args)
2408 {
2409 	int cpuctrl, cpuctrlmask;
2410 
2411 #if defined(PROCESS_ID_IS_CURCPU)
2412 	/* set curcpu() */
2413         __asm("mcr\tp15, 0, %0, c13, c0, 4" : : "r"(&cpu_info_store));
2414 #elif defined(PROCESS_ID_IS_CURLWP)
2415 	/* set curlwp() */
2416         __asm("mcr\tp15, 0, %0, c13, c0, 4" : : "r"(&lwp0));
2417 #endif
2418 
2419 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
2420 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2421 	    /* | CPU_CONTROL_BPRD_ENABLE */;
2422 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
2423 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2424 	    | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BPRD_ENABLE
2425 	    | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2426 	    | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
2427 
2428 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2429 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2430 #endif
2431 
2432 	cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl);
2433 
2434 #ifdef __ARMEB__
2435 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2436 #endif
2437 
2438 	if (vector_page == ARM_VECTORS_HIGH)
2439 		cpuctrl |= CPU_CONTROL_VECRELOC;
2440 
2441 	/* Clear out the cache */
2442 	cpu_idcache_wbinv_all();
2443 
2444 	/* Now really make sure they are clean.  */
2445 	__asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
2446 
2447 	/* Allow detection code to find the VFP if it's fitted.  */
2448 	__asm volatile ("mcr\tp15, 0, %0, c1, c0, 2" : : "r" (0x0fffffff));
2449 
2450 	/* Set the control register */
2451 	curcpu()->ci_ctrl = cpuctrl;
2452 	cpu_control(0xffffffff, cpuctrl);
2453 
2454 	/* And again. */
2455 	cpu_idcache_wbinv_all();
2456 }
2457 #endif	/* CPU_ARM11 */
2458 
2459 #if defined(CPU_ARM1136)
2460 void
2461 arm1136_setup(char *args)
2462 {
2463 	int cpuctrl, cpuctrl_wax;
2464 	uint32_t auxctrl, auxctrl_wax;
2465 	uint32_t tmp, tmp2;
2466 	uint32_t sbz=0;
2467 	uint32_t cpuid;
2468 
2469 #if defined(PROCESS_ID_IS_CURCPU)
2470 	/* set curcpu() */
2471         __asm("mcr\tp15, 0, %0, c13, c0, 4" : : "r"(&cpu_info_store));
2472 #elif defined(PROCESS_ID_IS_CURLWP)
2473 	/* set curlwp() */
2474         __asm("mcr\tp15, 0, %0, c13, c0, 4" : : "r"(&lwp0));
2475 #endif
2476 
2477 	cpuid = cpu_id();
2478 
2479 	cpuctrl =
2480 		CPU_CONTROL_MMU_ENABLE  |
2481 		CPU_CONTROL_DC_ENABLE   |
2482 		CPU_CONTROL_WBUF_ENABLE |
2483 		CPU_CONTROL_32BP_ENABLE |
2484 		CPU_CONTROL_32BD_ENABLE |
2485 		CPU_CONTROL_LABT_ENABLE |
2486 		CPU_CONTROL_SYST_ENABLE |
2487 		CPU_CONTROL_IC_ENABLE;
2488 
2489 	/*
2490 	 * "write as existing" bits
2491 	 * inverse of this is mask
2492 	 */
2493 	cpuctrl_wax =
2494 		(3 << 30) |
2495 		(1 << 29) |
2496 		(1 << 28) |
2497 		(3 << 26) |
2498 		(3 << 19) |
2499 		(1 << 17);
2500 
2501 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2502 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2503 #endif
2504 
2505 	cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl);
2506 
2507 #ifdef __ARMEB__
2508 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2509 #endif
2510 
2511 	if (vector_page == ARM_VECTORS_HIGH)
2512 		cpuctrl |= CPU_CONTROL_VECRELOC;
2513 
2514 	auxctrl = 0;
2515 	auxctrl_wax = ~0;
2516 	/* This options enables the workaround for the 364296 ARM1136
2517 	 * r0pX errata (possible cache data corruption with
2518 	 * hit-under-miss enabled). It sets the undocumented bit 31 in
2519 	 * the auxiliary control register and the FI bit in the control
2520 	 * register, thus disabling hit-under-miss without putting the
2521 	 * processor into full low interrupt latency mode. ARM11MPCore
2522 	 * is not affected.
2523 	 */
2524 	if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1136JS) { /* ARM1136JSr0pX */
2525 		cpuctrl |= CPU_CONTROL_FI_ENABLE;
2526 		auxctrl = ARM11R0_AUXCTL_PFI;
2527 		auxctrl_wax = ~ARM11R0_AUXCTL_PFI;
2528 	}
2529 
2530 	/* Clear out the cache */
2531 	cpu_idcache_wbinv_all();
2532 
2533 	/* Now really make sure they are clean.  */
2534 	__asm volatile ("mcr\tp15, 0, %0, c7, c7, 0" : : "r"(sbz));
2535 
2536 	/* Allow detection code to find the VFP if it's fitted.  */
2537 	__asm volatile ("mcr\tp15, 0, %0, c1, c0, 2" : : "r" (0x0fffffff));
2538 
2539 	/* Set the control register */
2540 	curcpu()->ci_ctrl = cpuctrl;
2541 	cpu_control(~cpuctrl_wax, cpuctrl);
2542 
2543 	__asm volatile ("mrc	p15, 0, %0, c1, c0, 1\n\t"
2544 			"bic	%1, %0, %2\n\t"
2545 			"eor	%1, %0, %3\n\t"
2546 			"teq	%0, %1\n\t"
2547 			"mcrne	p15, 0, %1, c1, c0, 1\n\t"
2548 			: "=r"(tmp), "=r"(tmp2) :
2549 			  "r"(~auxctrl_wax), "r"(auxctrl));
2550 
2551 	/* And again. */
2552 	cpu_idcache_wbinv_all();
2553 }
2554 #endif	/* CPU_ARM1136 */
2555 
2556 #ifdef CPU_SA110
2557 struct cpu_option sa110_options[] = {
2558 #ifdef COMPAT_12
2559 	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2560 	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
2561 #endif	/* COMPAT_12 */
2562 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2563 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2564 	{ "sa110.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2565 	{ "sa110.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2566 	{ "sa110.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2567 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2568 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2569 	{ "sa110.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2570 	{ NULL,			IGN, IGN, 0 }
2571 };
2572 
2573 void
2574 sa110_setup(char *args)
2575 {
2576 	int cpuctrl, cpuctrlmask;
2577 
2578 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2579 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2580 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2581 		 | CPU_CONTROL_WBUF_ENABLE;
2582 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2583 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2584 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2585 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2586 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2587 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
2588 		 | CPU_CONTROL_CPCLK;
2589 
2590 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2591 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2592 #endif
2593 
2594 	cpuctrl = parse_cpu_options(args, sa110_options, cpuctrl);
2595 
2596 #ifdef __ARMEB__
2597 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2598 #endif
2599 
2600 	if (vector_page == ARM_VECTORS_HIGH)
2601 		cpuctrl |= CPU_CONTROL_VECRELOC;
2602 
2603 	/* Clear out the cache */
2604 	cpu_idcache_wbinv_all();
2605 
2606 	/* Set the control register */
2607 	curcpu()->ci_ctrl = cpuctrl;
2608 /*	cpu_control(cpuctrlmask, cpuctrl);*/
2609 	cpu_control(0xffffffff, cpuctrl);
2610 
2611 	/*
2612 	 * enable clockswitching, note that this doesn't read or write to r0,
2613 	 * r0 is just to make it valid asm
2614 	 */
2615 	__asm ("mcr 15, 0, r0, c15, c1, 2");
2616 }
2617 #endif	/* CPU_SA110 */
2618 
2619 #if defined(CPU_SA1100) || defined(CPU_SA1110)
2620 struct cpu_option sa11x0_options[] = {
2621 #ifdef COMPAT_12
2622 	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2623 	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
2624 #endif	/* COMPAT_12 */
2625 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2626 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2627 	{ "sa11x0.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2628 	{ "sa11x0.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2629 	{ "sa11x0.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2630 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2631 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2632 	{ "sa11x0.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2633 	{ NULL,			IGN, IGN, 0 }
2634 };
2635 
2636 void
2637 sa11x0_setup(char *args)
2638 {
2639 	int cpuctrl, cpuctrlmask;
2640 
2641 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2642 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2643 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2644 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE;
2645 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2646 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2647 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2648 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2649 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2650 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
2651 		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
2652 
2653 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2654 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2655 #endif
2656 
2657 	cpuctrl = parse_cpu_options(args, sa11x0_options, cpuctrl);
2658 
2659 #ifdef __ARMEB__
2660 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2661 #endif
2662 
2663 	if (vector_page == ARM_VECTORS_HIGH)
2664 		cpuctrl |= CPU_CONTROL_VECRELOC;
2665 
2666 	/* Clear out the cache */
2667 	cpu_idcache_wbinv_all();
2668 
2669 	/* Set the control register */
2670 	curcpu()->ci_ctrl = cpuctrl;
2671 	cpu_control(0xffffffff, cpuctrl);
2672 }
2673 #endif	/* CPU_SA1100 || CPU_SA1110 */
2674 
2675 #if defined(CPU_FA526)
2676 struct cpu_option fa526_options[] = {
2677 #ifdef COMPAT_12
2678 	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2679 	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
2680 #endif	/* COMPAT_12 */
2681 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2682 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2683 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2684 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2685 	{ NULL,			IGN, IGN, 0 }
2686 };
2687 
2688 void
2689 fa526_setup(char *args)
2690 {
2691 	int cpuctrl, cpuctrlmask;
2692 
2693 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2694 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2695 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2696 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE;
2697 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2698 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2699 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2700 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2701 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2702 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
2703 		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
2704 
2705 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2706 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2707 #endif
2708 
2709 	cpuctrl = parse_cpu_options(args, fa526_options, cpuctrl);
2710 
2711 #ifdef __ARMEB__
2712 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2713 #endif
2714 
2715 	if (vector_page == ARM_VECTORS_HIGH)
2716 		cpuctrl |= CPU_CONTROL_VECRELOC;
2717 
2718 	/* Clear out the cache */
2719 	cpu_idcache_wbinv_all();
2720 
2721 	/* Set the control register */
2722 	curcpu()->ci_ctrl = cpuctrl;
2723 	cpu_control(0xffffffff, cpuctrl);
2724 }
2725 #endif	/* CPU_FA526 */
2726 
2727 #if defined(CPU_IXP12X0)
2728 struct cpu_option ixp12x0_options[] = {
2729 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2730 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2731 	{ "ixp12x0.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2732 	{ "ixp12x0.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2733 	{ "ixp12x0.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2734 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2735 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2736 	{ "ixp12x0.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2737 	{ NULL,			IGN, IGN, 0 }
2738 };
2739 
2740 void
2741 ixp12x0_setup(char *args)
2742 {
2743 	int cpuctrl, cpuctrlmask;
2744 
2745 
2746 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE
2747 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_SYST_ENABLE
2748 		 | CPU_CONTROL_IC_ENABLE;
2749 
2750 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_AFLT_ENABLE
2751 		 | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_WBUF_ENABLE
2752 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_SYST_ENABLE
2753 		 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_IC_ENABLE
2754 		 | CPU_CONTROL_VECRELOC;
2755 
2756 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2757 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2758 #endif
2759 
2760 	cpuctrl = parse_cpu_options(args, ixp12x0_options, cpuctrl);
2761 
2762 #ifdef __ARMEB__
2763 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2764 #endif
2765 
2766 	if (vector_page == ARM_VECTORS_HIGH)
2767 		cpuctrl |= CPU_CONTROL_VECRELOC;
2768 
2769 	/* Clear out the cache */
2770 	cpu_idcache_wbinv_all();
2771 
2772 	/* Set the control register */
2773 	curcpu()->ci_ctrl = cpuctrl;
2774 	/* cpu_control(0xffffffff, cpuctrl); */
2775 	cpu_control(cpuctrlmask, cpuctrl);
2776 }
2777 #endif /* CPU_IXP12X0 */
2778 
2779 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
2780     defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425)
2781 struct cpu_option xscale_options[] = {
2782 #ifdef COMPAT_12
2783 	{ "branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2784 	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2785 #endif	/* COMPAT_12 */
2786 	{ "cpu.branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2787 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2788 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2789 	{ "xscale.branchpredict", BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2790 	{ "xscale.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2791 	{ "xscale.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2792 	{ "xscale.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2793 	{ NULL,			IGN, IGN, 0 }
2794 };
2795 
2796 void
2797 xscale_setup(char *args)
2798 {
2799 	uint32_t auxctl;
2800 	int cpuctrl, cpuctrlmask;
2801 
2802 	/*
2803 	 * The XScale Write Buffer is always enabled.  Our option
2804 	 * is to enable/disable coalescing.  Note that bits 6:3
2805 	 * must always be enabled.
2806 	 */
2807 
2808 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2809 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2810 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2811 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
2812 		 | CPU_CONTROL_BPRD_ENABLE;
2813 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2814 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2815 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2816 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2817 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2818 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
2819 		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
2820 
2821 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2822 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2823 #endif
2824 
2825 	cpuctrl = parse_cpu_options(args, xscale_options, cpuctrl);
2826 
2827 #ifdef __ARMEB__
2828 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2829 #endif
2830 
2831 	if (vector_page == ARM_VECTORS_HIGH)
2832 		cpuctrl |= CPU_CONTROL_VECRELOC;
2833 
2834 	/* Clear out the cache */
2835 	cpu_idcache_wbinv_all();
2836 
2837 	/*
2838 	 * Set the control register.  Note that bits 6:3 must always
2839 	 * be set to 1.
2840 	 */
2841 	curcpu()->ci_ctrl = cpuctrl;
2842 /*	cpu_control(cpuctrlmask, cpuctrl);*/
2843 	cpu_control(0xffffffff, cpuctrl);
2844 
2845 	/* Make sure write coalescing is turned on */
2846 	__asm volatile("mrc p15, 0, %0, c1, c0, 1"
2847 		: "=r" (auxctl));
2848 #ifdef XSCALE_NO_COALESCE_WRITES
2849 	auxctl |= XSCALE_AUXCTL_K;
2850 #else
2851 	auxctl &= ~XSCALE_AUXCTL_K;
2852 #endif
2853 	__asm volatile("mcr p15, 0, %0, c1, c0, 1"
2854 		: : "r" (auxctl));
2855 }
2856 #endif	/* CPU_XSCALE_80200 || CPU_XSCALE_80321 || __CPU_XSCALE_PXA2XX || CPU_XSCALE_IXP425 */
2857