xref: /netbsd-src/sys/arch/arm/arm/cpufunc.c (revision 274254cdae52594c1aa480a736aef78313d15c9c)
1 /*	$NetBSD: cpufunc.c,v 1.93 2009/03/15 22:23:16 cegger Exp $	*/
2 
3 /*
4  * arm7tdmi support code Copyright (c) 2001 John Fremlin
5  * arm8 support code Copyright (c) 1997 ARM Limited
6  * arm8 support code Copyright (c) 1997 Causality Limited
7  * arm9 support code Copyright (C) 2001 ARM Ltd
8  * arm11 support code Copyright (c) 2007 Microsoft
9  * cortexa8 support code Copyright (c) 2008 3am Software Foundry
10  * Copyright (c) 1997 Mark Brinicombe.
11  * Copyright (c) 1997 Causality Limited
12  * All rights reserved.
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions
16  * are met:
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in the
21  *    documentation and/or other materials provided with the distribution.
22  * 3. All advertising materials mentioning features or use of this software
23  *    must display the following acknowledgement:
24  *	This product includes software developed by Causality Limited.
25  * 4. The name of Causality Limited may not be used to endorse or promote
26  *    products derived from this software without specific prior written
27  *    permission.
28  *
29  * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
30  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
31  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
32  * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
33  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
34  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
35  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39  * SUCH DAMAGE.
40  *
41  * RiscBSD kernel project
42  *
43  * cpufuncs.c
44  *
45  * C functions for supporting CPU / MMU / TLB specific operations.
46  *
47  * Created      : 30/01/97
48  */
49 
50 #include <sys/cdefs.h>
51 __KERNEL_RCSID(0, "$NetBSD: cpufunc.c,v 1.93 2009/03/15 22:23:16 cegger Exp $");
52 
53 #include "opt_compat_netbsd.h"
54 #include "opt_cpuoptions.h"
55 #include "opt_perfctrs.h"
56 
57 #include <sys/types.h>
58 #include <sys/param.h>
59 #include <sys/pmc.h>
60 #include <sys/systm.h>
61 #include <machine/cpu.h>
62 #include <machine/bootconfig.h>
63 #include <arch/arm/arm/disassem.h>
64 
65 #include <uvm/uvm.h>
66 
67 #include <arm/cpuconf.h>
68 #include <arm/cpufunc.h>
69 
70 #ifdef CPU_XSCALE_80200
71 #include <arm/xscale/i80200reg.h>
72 #include <arm/xscale/i80200var.h>
73 #endif
74 
75 #ifdef CPU_XSCALE_80321
76 #include <arm/xscale/i80321reg.h>
77 #include <arm/xscale/i80321var.h>
78 #endif
79 
80 #ifdef CPU_XSCALE_IXP425
81 #include <arm/xscale/ixp425reg.h>
82 #include <arm/xscale/ixp425var.h>
83 #endif
84 
85 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321)
86 #include <arm/xscale/xscalereg.h>
87 #endif
88 
89 #if defined(PERFCTRS)
90 struct arm_pmc_funcs *arm_pmc;
91 #endif
92 
93 /* PRIMARY CACHE VARIABLES */
94 int	arm_picache_size;
95 int	arm_picache_line_size;
96 int	arm_picache_ways;
97 
98 int	arm_pdcache_size;	/* and unified */
99 int	arm_pdcache_line_size;
100 int	arm_pdcache_ways;
101 #if (ARM_MMU_V6) != 0
102 int	arm_cache_prefer_mask;
103 #endif
104 
105 
106 int	arm_pcache_type;
107 int	arm_pcache_unified;
108 
109 int	arm_dcache_align;
110 int	arm_dcache_align_mask;
111 
112 /* 1 == use cpu_sleep(), 0 == don't */
113 int cpu_do_powersave;
114 
115 #ifdef CPU_ARM2
116 struct cpu_functions arm2_cpufuncs = {
117 	/* CPU functions */
118 
119 	.cf_id			= arm2_id,
120 	.cf_cpwait		= cpufunc_nullop,
121 
122 	/* MMU functions */
123 
124 	.cf_control		= (void *)cpufunc_nullop,
125 
126 	/* TLB functions */
127 
128 	.cf_tlb_flushID		= cpufunc_nullop,
129 	.cf_tlb_flushID_SE	= (void *)cpufunc_nullop,
130 	.cf_tlb_flushI		= cpufunc_nullop,
131 	.cf_tlb_flushI_SE	= (void *)cpufunc_nullop,
132 	.cf_tlb_flushD		= cpufunc_nullop,
133 	.cf_tlb_flushD_SE	= (void *)cpufunc_nullop,
134 
135 	/* Cache operations */
136 
137 	.cf_icache_sync_all	= cpufunc_nullop,
138 	.cf_icache_sync_range	= (void *) cpufunc_nullop,
139 
140 	.cf_dcache_wbinv_all	= arm3_cache_flush,
141 	.cf_dcache_wbinv_range	= (void *)cpufunc_nullop,
142 	.cf_dcache_inv_range	= (void *)cpufunc_nullop,
143 	.cf_dcache_wb_range	= (void *)cpufunc_nullop,
144 
145 	.cf_idcache_wbinv_all	= cpufunc_nullop,
146 	.cf_idcache_wbinv_range	= (void *)cpufunc_nullop,
147 
148 	/* Other functions */
149 
150 	.cf_flush_prefetchbuf	= cpufunc_nullop,
151 	.cf_drain_writebuf	= cpufunc_nullop,
152 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
153 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
154 
155 	.cf_sleep		= (void *)cpufunc_nullop,
156 
157 	/* Soft functions */
158 
159 	.cf_dataabt_fixup	= early_abort_fixup,
160 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
161 
162 	.cf_setup		= (void *)cpufunc_nullop
163 
164 };
165 #endif	/* CPU_ARM2 */
166 
167 #ifdef CPU_ARM250
168 struct cpu_functions arm250_cpufuncs = {
169 	/* CPU functions */
170 
171 	.cf_id			= arm250_id,
172 	.cf_cpwait		= cpufunc_nullop,
173 
174 	/* MMU functions */
175 
176 	.cf_control		= (void *)cpufunc_nullop,
177 
178 	/* TLB functions */
179 
180 	.cf_tlb_flushID		= cpufunc_nullop,
181 	.cf_tlb_flushID_SE	= (void *)cpufunc_nullop,
182 	.cf_tlb_flushI		= cpufunc_nullop,
183 	.cf_tlb_flushI_SE	= (void *)cpufunc_nullop,
184 	.cf_tlb_flushD		= cpufunc_nullop,
185 	.cf_tlb_flushD_SE	= (void *)cpufunc_nullop,
186 
187 	/* Cache operations */
188 
189 	.cf_icache_sync_all	= cpufunc_nullop,
190 	.cf_icache_sync_range	= (void *) cpufunc_nullop,
191 
192 	.cf_dcache_wbinv_all	= arm3_cache_flush,
193 	.cf_dcache_wbinv_range	= (void *)cpufunc_nullop,
194 	.cf_dcache_inv_range	= (void *)cpufunc_nullop,
195 	.cf_dcache_wb_range	= (void *)cpufunc_nullop,
196 
197 	.cf_idcache_wbinv_all	= cpufunc_nullop,
198 	.cf_idcache_wbinv_range	= (void *)cpufunc_nullop,
199 
200 	/* Other functions */
201 
202 	.cf_flush_prefetchbuf	= cpufunc_nullop,
203 	.cf_drain_writebuf	= cpufunc_nullop,
204 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
205 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
206 
207 	.cf_sleep		= (void *)cpufunc_nullop,
208 
209 	/* Soft functions */
210 
211 	.cf_dataabt_fixup	= early_abort_fixup,
212 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
213 
214 	.cf_setup		= (void *)cpufunc_nullop
215 
216 };
217 #endif	/* CPU_ARM250 */
218 
219 #ifdef CPU_ARM3
220 struct cpu_functions arm3_cpufuncs = {
221 	/* CPU functions */
222 
223 	.cf_id			= cpufunc_id,
224 	.cf_cpwait		= cpufunc_nullop,
225 
226 	/* MMU functions */
227 
228 	.cf_control		= arm3_control,
229 
230 	/* TLB functions */
231 
232 	.cf_tlb_flushID		= cpufunc_nullop,
233 	.cf_tlb_flushID_SE	= (void *)cpufunc_nullop,
234 	.cf_tlb_flushI		= cpufunc_nullop,
235 	.cf_tlb_flushI_SE	= (void *)cpufunc_nullop,
236 	.cf_tlb_flushD		= cpufunc_nullop,
237 	.cf_tlb_flushD_SE	= (void *)cpufunc_nullop,
238 
239 	/* Cache operations */
240 
241 	.cf_icache_sync_all	= cpufunc_nullop,
242 	.cf_icache_sync_range	= (void *) cpufunc_nullop,
243 
244 	.cf_dcache_wbinv_all	= arm3_cache_flush,
245 	.cf_dcache_wbinv_range	= (void *)arm3_cache_flush,
246 	.cf_dcache_inv_range	= (void *)arm3_cache_flush,
247 	.cf_dcache_wb_range	= (void *)cpufunc_nullop,
248 
249 	.cf_idcache_wbinv_all	= arm3_cache_flush,
250 	.cf_idcache_wbinv_range	= (void *)arm3_cache_flush,
251 
252 	/* Other functions */
253 
254 	.cf_flush_prefetchbuf	= cpufunc_nullop,
255 	.cf_drain_writebuf	= cpufunc_nullop,
256 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
257 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
258 
259 	.cf_sleep		= (void *)cpufunc_nullop,
260 
261 	/* Soft functions */
262 
263 	.cf_dataabt_fixup	= early_abort_fixup,
264 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
265 
266 	.cf_setup		= (void *)cpufunc_nullop
267 
268 };
269 #endif	/* CPU_ARM3 */
270 
271 #ifdef CPU_ARM6
272 struct cpu_functions arm6_cpufuncs = {
273 	/* CPU functions */
274 
275 	.cf_id			= cpufunc_id,
276 	.cf_cpwait		= cpufunc_nullop,
277 
278 	/* MMU functions */
279 
280 	.cf_control		= cpufunc_control,
281 	.cf_domains		= cpufunc_domains,
282 	.cf_setttb		= arm67_setttb,
283 	.cf_faultstatus		= cpufunc_faultstatus,
284 	.cf_faultaddress	= cpufunc_faultaddress,
285 
286 	/* TLB functions */
287 
288 	.cf_tlb_flushID		= arm67_tlb_flush,
289 	.cf_tlb_flushID_SE	= arm67_tlb_purge,
290 	.cf_tlb_flushI		= arm67_tlb_flush,
291 	.cf_tlb_flushI_SE	= arm67_tlb_purge,
292 	.cf_tlb_flushD		= arm67_tlb_flush,
293 	.cf_tlb_flushD_SE	= arm67_tlb_purge,
294 
295 	/* Cache operations */
296 
297 	.cf_icache_sync_all	= cpufunc_nullop,
298 	.cf_icache_sync_range	= (void *) cpufunc_nullop,
299 
300 	.cf_dcache_wbinv_all	= arm67_cache_flush,
301 	.cf_dcache_wbinv_range	= (void *)arm67_cache_flush,
302 	.cf_dcache_inv_range	= (void *)arm67_cache_flush,
303 	.cf_dcache_wb_range	= (void *)cpufunc_nullop,
304 
305 	.cf_idcache_wbinv_all	= arm67_cache_flush,
306 	.cf_idcache_wbinv_range	= (void *)arm67_cache_flush,
307 
308 	/* Other functions */
309 
310 	.cf_flush_prefetchbuf	= cpufunc_nullop,
311 	.cf_drain_writebuf	= cpufunc_nullop,
312 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
313 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
314 
315 	.cf_sleep		= (void *)cpufunc_nullop,
316 
317 	/* Soft functions */
318 
319 #ifdef ARM6_LATE_ABORT
320 	.cf_dataabt_fixup	= late_abort_fixup,
321 #else
322 	.cf_dataabt_fixup	= early_abort_fixup,
323 #endif
324 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
325 
326 	.cf_context_switch	= arm67_context_switch,
327 
328 	.cf_setup		= arm6_setup
329 
330 };
331 #endif	/* CPU_ARM6 */
332 
333 #ifdef CPU_ARM7
334 struct cpu_functions arm7_cpufuncs = {
335 	/* CPU functions */
336 
337 	.cf_id			= cpufunc_id,
338 	.cf_cpwait		= cpufunc_nullop,
339 
340 	/* MMU functions */
341 
342 	.cf_control		= cpufunc_control,
343 	.cf_domains		= cpufunc_domains,
344 	.cf_setttb		= arm67_setttb,
345 	.cf_faultstatus		= cpufunc_faultstatus,
346 	.cf_faultaddress	= cpufunc_faultaddress,
347 
348 	/* TLB functions */
349 
350 	.cf_tlb_flushID		= arm67_tlb_flush,
351 	.cf_tlb_flushID_SE	= arm67_tlb_purge,
352 	.cf_tlb_flushI		= arm67_tlb_flush,
353 	.cf_tlb_flushI_SE	= arm67_tlb_purge,
354 	.cf_tlb_flushD		= arm67_tlb_flush,
355 	.cf_tlb_flushD_SE	= arm67_tlb_purge,
356 
357 	/* Cache operations */
358 
359 	.cf_icache_sync_all	= cpufunc_nullop,
360 	.cf_icache_sync_range	= (void *)cpufunc_nullop,
361 
362 	.cf_dcache_wbinv_all	= arm67_cache_flush,
363 	.cf_dcache_wbinv_range	= (void *)arm67_cache_flush,
364 	.cf_dcache_inv_range	= (void *)arm67_cache_flush,
365 	.cf_dcache_wb_range	= (void *)cpufunc_nullop,
366 
367 	.cf_idcache_wbinv_all	= arm67_cache_flush,
368 	.cf_idcache_wbinv_range	= (void *)arm67_cache_flush,
369 
370 	/* Other functions */
371 
372 	.cf_flush_prefetchbuf	= cpufunc_nullop,
373 	.cf_drain_writebuf	= cpufunc_nullop,
374 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
375 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
376 
377 	.cf_sleep		= (void *)cpufunc_nullop,
378 
379 	/* Soft functions */
380 
381 	.cf_dataabt_fixup	= late_abort_fixup,
382 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
383 
384 	.cf_context_switch	= arm67_context_switch,
385 
386 	.cf_setup		= arm7_setup
387 
388 };
389 #endif	/* CPU_ARM7 */
390 
391 #ifdef CPU_ARM7TDMI
392 struct cpu_functions arm7tdmi_cpufuncs = {
393 	/* CPU functions */
394 
395 	.cf_id			= cpufunc_id,
396 	.cf_cpwait		= cpufunc_nullop,
397 
398 	/* MMU functions */
399 
400 	.cf_control		= cpufunc_control,
401 	.cf_domains		= cpufunc_domains,
402 	.cf_setttb		= arm7tdmi_setttb,
403 	.cf_faultstatus		= cpufunc_faultstatus,
404 	.cf_faultaddress	= cpufunc_faultaddress,
405 
406 	/* TLB functions */
407 
408 	.cf_tlb_flushID		= arm7tdmi_tlb_flushID,
409 	.cf_tlb_flushID_SE	= arm7tdmi_tlb_flushID_SE,
410 	.cf_tlb_flushI		= arm7tdmi_tlb_flushID,
411 	.cf_tlb_flushI_SE	= arm7tdmi_tlb_flushID_SE,
412 	.cf_tlb_flushD		= arm7tdmi_tlb_flushID,
413 	.cf_tlb_flushD_SE	= arm7tdmi_tlb_flushID_SE,
414 
415 	/* Cache operations */
416 
417 	.cf_icache_sync_all	= cpufunc_nullop,
418 	.cf_icache_sync_range	= (void *)cpufunc_nullop,
419 
420 	.cf_dcache_wbinv_all	= arm7tdmi_cache_flushID,
421 	.cf_dcache_wbinv_range	= (void *)arm7tdmi_cache_flushID,
422 	.cf_dcache_inv_range	= (void *)arm7tdmi_cache_flushID,
423 	.cf_dcache_wb_range	= (void *)cpufunc_nullop,
424 
425 	.cf_idcache_wbinv_all	= arm7tdmi_cache_flushID,
426 	.cf_idcache_wbinv_range	= (void *)arm7tdmi_cache_flushID,
427 
428 	/* Other functions */
429 
430 	.cf_flush_prefetchbuf	= cpufunc_nullop,
431 	.cf_drain_writebuf	= cpufunc_nullop,
432 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
433 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
434 
435 	.cf_sleep		= (void *)cpufunc_nullop,
436 
437 	/* Soft functions */
438 
439 	.cf_dataabt_fixup	= late_abort_fixup,
440 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
441 
442 	.cf_context_switch	= arm7tdmi_context_switch,
443 
444 	.cf_setup		= arm7tdmi_setup
445 
446 };
447 #endif	/* CPU_ARM7TDMI */
448 
449 #ifdef CPU_ARM8
450 struct cpu_functions arm8_cpufuncs = {
451 	/* CPU functions */
452 
453 	.cf_id			= cpufunc_id,
454 	.cf_cpwait		= cpufunc_nullop,
455 
456 	/* MMU functions */
457 
458 	.cf_control		= cpufunc_control,
459 	.cf_domains		= cpufunc_domains,
460 	.cf_setttb		= arm8_setttb,
461 	.cf_faultstatus		= cpufunc_faultstatus,
462 	.cf_faultaddress	= cpufunc_faultaddress,
463 
464 	/* TLB functions */
465 
466 	.cf_tlb_flushID		= arm8_tlb_flushID,
467 	.cf_tlb_flushID_SE	= arm8_tlb_flushID_SE,
468 	.cf_tlb_flushI		= arm8_tlb_flushID,
469 	.cf_tlb_flushI_SE	= arm8_tlb_flushID_SE,
470 	.cf_tlb_flushD		= arm8_tlb_flushID,
471 	.cf_tlb_flushD_SE	= arm8_tlb_flushID_SE,
472 
473 	/* Cache operations */
474 
475 	.cf_icache_sync_all	= cpufunc_nullop,
476 	.cf_icache_sync_range	= (void *)cpufunc_nullop,
477 
478 	.cf_dcache_wbinv_all	= arm8_cache_purgeID,
479 	.cf_dcache_wbinv_range	= (void *)arm8_cache_purgeID,
480 /*XXX*/	.cf_dcache_inv_range	= (void *)arm8_cache_purgeID,
481 	.cf_dcache_wb_range	= (void *)arm8_cache_cleanID,
482 
483 	.cf_idcache_wbinv_all	= arm8_cache_purgeID,
484 	.cf_idcache_wbinv_range = (void *)arm8_cache_purgeID,
485 
486 	/* Other functions */
487 
488 	.cf_flush_prefetchbuf	= cpufunc_nullop,
489 	.cf_drain_writebuf	= cpufunc_nullop,
490 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
491 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
492 
493 	.cf_sleep		= (void *)cpufunc_nullop,
494 
495 	/* Soft functions */
496 
497 	.cf_dataabt_fixup	= cpufunc_null_fixup,
498 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
499 
500 	.cf_context_switch	= arm8_context_switch,
501 
502 	.cf_setup		= arm8_setup
503 };
504 #endif	/* CPU_ARM8 */
505 
506 #ifdef CPU_ARM9
507 struct cpu_functions arm9_cpufuncs = {
508 	/* CPU functions */
509 
510 	.cf_id			= cpufunc_id,
511 	.cf_cpwait		= cpufunc_nullop,
512 
513 	/* MMU functions */
514 
515 	.cf_control		= cpufunc_control,
516 	.cf_domains		= cpufunc_domains,
517 	.cf_setttb		= arm9_setttb,
518 	.cf_faultstatus		= cpufunc_faultstatus,
519 	.cf_faultaddress	= cpufunc_faultaddress,
520 
521 	/* TLB functions */
522 
523 	.cf_tlb_flushID		= armv4_tlb_flushID,
524 	.cf_tlb_flushID_SE	= arm9_tlb_flushID_SE,
525 	.cf_tlb_flushI		= armv4_tlb_flushI,
526 	.cf_tlb_flushI_SE	= (void *)armv4_tlb_flushI,
527 	.cf_tlb_flushD		= armv4_tlb_flushD,
528 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
529 
530 	/* Cache operations */
531 
532 	.cf_icache_sync_all	= arm9_icache_sync_all,
533 	.cf_icache_sync_range	= arm9_icache_sync_range,
534 
535 	.cf_dcache_wbinv_all	= arm9_dcache_wbinv_all,
536 	.cf_dcache_wbinv_range	= arm9_dcache_wbinv_range,
537 /*XXX*/	.cf_dcache_inv_range	= arm9_dcache_wbinv_range,
538 	.cf_dcache_wb_range	= arm9_dcache_wb_range,
539 
540 	.cf_idcache_wbinv_all	= arm9_idcache_wbinv_all,
541 	.cf_idcache_wbinv_range = arm9_idcache_wbinv_range,
542 
543 	/* Other functions */
544 
545 	.cf_flush_prefetchbuf	= cpufunc_nullop,
546 	.cf_drain_writebuf	= armv4_drain_writebuf,
547 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
548 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
549 
550 	.cf_sleep		= (void *)cpufunc_nullop,
551 
552 	/* Soft functions */
553 
554 	.cf_dataabt_fixup	= cpufunc_null_fixup,
555 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
556 
557 	.cf_context_switch	= arm9_context_switch,
558 
559 	.cf_setup		= arm9_setup
560 
561 };
562 #endif /* CPU_ARM9 */
563 
564 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
565 struct cpu_functions armv5_ec_cpufuncs = {
566 	/* CPU functions */
567 
568 	.cf_id			= cpufunc_id,
569 	.cf_cpwait		= cpufunc_nullop,
570 
571 	/* MMU functions */
572 
573 	.cf_control		= cpufunc_control,
574 	.cf_domains		= cpufunc_domains,
575 	.cf_setttb		= armv5_ec_setttb,
576 	.cf_faultstatus		= cpufunc_faultstatus,
577 	.cf_faultaddress	= cpufunc_faultaddress,
578 
579 	/* TLB functions */
580 
581 	.cf_tlb_flushID		= armv4_tlb_flushID,
582 	.cf_tlb_flushID_SE	= arm10_tlb_flushID_SE,
583 	.cf_tlb_flushI		= armv4_tlb_flushI,
584 	.cf_tlb_flushI_SE	= arm10_tlb_flushI_SE,
585 	.cf_tlb_flushD		= armv4_tlb_flushD,
586 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
587 
588 	/* Cache operations */
589 
590 	.cf_icache_sync_all	= armv5_ec_icache_sync_all,
591 	.cf_icache_sync_range	= armv5_ec_icache_sync_range,
592 
593 	.cf_dcache_wbinv_all	= armv5_ec_dcache_wbinv_all,
594 	.cf_dcache_wbinv_range	= armv5_ec_dcache_wbinv_range,
595 /*XXX*/	.cf_dcache_inv_range	= armv5_ec_dcache_wbinv_range,
596 	.cf_dcache_wb_range	= armv5_ec_dcache_wb_range,
597 
598 	.cf_idcache_wbinv_all	= armv5_ec_idcache_wbinv_all,
599 	.cf_idcache_wbinv_range = armv5_ec_idcache_wbinv_range,
600 
601 	/* Other functions */
602 
603 	.cf_flush_prefetchbuf	= cpufunc_nullop,
604 	.cf_drain_writebuf	= armv4_drain_writebuf,
605 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
606 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
607 
608 	.cf_sleep		= (void *)cpufunc_nullop,
609 
610 	/* Soft functions */
611 
612 	.cf_dataabt_fixup	= cpufunc_null_fixup,
613 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
614 
615 	.cf_context_switch	= arm10_context_switch,
616 
617 	.cf_setup		= arm10_setup
618 
619 };
620 #endif /* CPU_ARM9E || CPU_ARM10 */
621 
622 #ifdef CPU_ARM10
623 struct cpu_functions arm10_cpufuncs = {
624 	/* CPU functions */
625 
626 	.cf_id			= cpufunc_id,
627 	.cf_cpwait		= cpufunc_nullop,
628 
629 	/* MMU functions */
630 
631 	.cf_control		= cpufunc_control,
632 	.cf_domains		= cpufunc_domains,
633 	.cf_setttb		= armv5_setttb,
634 	.cf_faultstatus		= cpufunc_faultstatus,
635 	.cf_faultaddress	= cpufunc_faultaddress,
636 
637 	/* TLB functions */
638 
639 	.cf_tlb_flushID		= armv4_tlb_flushID,
640 	.cf_tlb_flushID_SE	= arm10_tlb_flushID_SE,
641 	.cf_tlb_flushI		= armv4_tlb_flushI,
642 	.cf_tlb_flushI_SE	= arm10_tlb_flushI_SE,
643 	.cf_tlb_flushD		= armv4_tlb_flushD,
644 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
645 
646 	/* Cache operations */
647 
648 	.cf_icache_sync_all	= armv5_icache_sync_all,
649 	.cf_icache_sync_range	= armv5_icache_sync_range,
650 
651 	.cf_dcache_wbinv_all	= armv5_dcache_wbinv_all,
652 	.cf_dcache_wbinv_range	= armv5_dcache_wbinv_range,
653 /*XXX*/	.cf_dcache_inv_range	= armv5_dcache_wbinv_range,
654 	.cf_dcache_wb_range	= armv5_dcache_wb_range,
655 
656 	.cf_idcache_wbinv_all	= armv5_idcache_wbinv_all,
657 	.cf_idcache_wbinv_range = armv5_idcache_wbinv_range,
658 
659 	/* Other functions */
660 
661 	.cf_flush_prefetchbuf	= cpufunc_nullop,
662 	.cf_drain_writebuf	= armv4_drain_writebuf,
663 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
664 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
665 
666 	.cf_sleep		= (void *)cpufunc_nullop,
667 
668 	/* Soft functions */
669 
670 	.cf_dataabt_fixup	= cpufunc_null_fixup,
671 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
672 
673 	.cf_context_switch	= arm10_context_switch,
674 
675 	.cf_setup		= arm10_setup
676 
677 };
678 #endif /* CPU_ARM10 */
679 
680 #ifdef CPU_ARM11
681 struct cpu_functions arm11_cpufuncs = {
682 	/* CPU functions */
683 
684 	.cf_id			= cpufunc_id,
685 	.cf_cpwait		= cpufunc_nullop,
686 
687 	/* MMU functions */
688 
689 	.cf_control		= cpufunc_control,
690 	.cf_domains		= cpufunc_domains,
691 	.cf_setttb		= arm11_setttb,
692 	.cf_faultstatus		= cpufunc_faultstatus,
693 	.cf_faultaddress	= cpufunc_faultaddress,
694 
695 	/* TLB functions */
696 
697 	.cf_tlb_flushID		= arm11_tlb_flushID,
698 	.cf_tlb_flushID_SE	= arm11_tlb_flushID_SE,
699 	.cf_tlb_flushI		= arm11_tlb_flushI,
700 	.cf_tlb_flushI_SE	= arm11_tlb_flushI_SE,
701 	.cf_tlb_flushD		= arm11_tlb_flushD,
702 	.cf_tlb_flushD_SE	= arm11_tlb_flushD_SE,
703 
704 	/* Cache operations */
705 
706 	.cf_icache_sync_all	= armv6_icache_sync_all,
707 	.cf_icache_sync_range	= armv6_icache_sync_range,
708 
709 	.cf_dcache_wbinv_all	= armv6_dcache_wbinv_all,
710 	.cf_dcache_wbinv_range	= armv6_dcache_wbinv_range,
711 	.cf_dcache_inv_range	= armv6_dcache_inv_range,
712 	.cf_dcache_wb_range	= armv6_dcache_wb_range,
713 
714 	.cf_idcache_wbinv_all	= armv6_idcache_wbinv_all,
715 	.cf_idcache_wbinv_range = armv6_idcache_wbinv_range,
716 
717 	/* Other functions */
718 
719 	.cf_flush_prefetchbuf	= cpufunc_nullop,
720 	.cf_drain_writebuf	= arm11_drain_writebuf,
721 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
722 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
723 
724 	.cf_sleep		= arm11_sleep,
725 
726 	/* Soft functions */
727 
728 	.cf_dataabt_fixup	= cpufunc_null_fixup,
729 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
730 
731 	.cf_context_switch	= arm11_context_switch,
732 
733 	.cf_setup		= arm11_setup
734 
735 };
736 #endif /* CPU_ARM11 */
737 
738 #ifdef CPU_ARM1136
739 struct cpu_functions arm1136_cpufuncs = {
740 	/* CPU functions */
741 
742 	.cf_id			= cpufunc_id,
743 	.cf_cpwait		= cpufunc_nullop,
744 
745 	/* MMU functions */
746 
747 	.cf_control		= cpufunc_control,
748 	.cf_domains		= cpufunc_domains,
749 	.cf_setttb		= arm1136_setttb,
750 	.cf_faultstatus		= cpufunc_faultstatus,
751 	.cf_faultaddress	= cpufunc_faultaddress,
752 
753 	/* TLB functions */
754 
755 	.cf_tlb_flushID		= arm11_tlb_flushID,
756 	.cf_tlb_flushID_SE	= arm11_tlb_flushID_SE,
757 	.cf_tlb_flushI		= arm11_tlb_flushI,
758 	.cf_tlb_flushI_SE	= arm11_tlb_flushI_SE,
759 	.cf_tlb_flushD		= arm11_tlb_flushD,
760 	.cf_tlb_flushD_SE	= arm11_tlb_flushD_SE,
761 
762 	/* Cache operations */
763 
764 	.cf_icache_sync_all	= arm1136_icache_sync_all,	/* 411920 */
765 	.cf_icache_sync_range	= arm1136_icache_sync_range,	/* 371025 */
766 
767 	.cf_dcache_wbinv_all	= arm1136_dcache_wbinv_all,	/* 411920 */
768 	.cf_dcache_wbinv_range	= armv6_dcache_wbinv_range,
769 	.cf_dcache_inv_range	= armv6_dcache_inv_range,
770 	.cf_dcache_wb_range	= armv6_dcache_wb_range,
771 
772 	.cf_idcache_wbinv_all	= arm1136_idcache_wbinv_all,	/* 411920 */
773 	.cf_idcache_wbinv_range = arm1136_idcache_wbinv_range,	/* 371025 */
774 
775 	/* Other functions */
776 
777 	.cf_flush_prefetchbuf	= arm1136_flush_prefetchbuf,
778 	.cf_drain_writebuf	= arm11_drain_writebuf,
779 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
780 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
781 
782 	.cf_sleep		= arm11_sleep,
783 
784 	/* Soft functions */
785 
786 	.cf_dataabt_fixup	= cpufunc_null_fixup,
787 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
788 
789 	.cf_context_switch	= arm11_context_switch,
790 
791 	.cf_setup		= arm1136_setup
792 
793 };
794 #endif /* CPU_ARM1136 */
795 
796 #ifdef CPU_SA110
797 struct cpu_functions sa110_cpufuncs = {
798 	/* CPU functions */
799 
800 	.cf_id			= cpufunc_id,
801 	.cf_cpwait		= cpufunc_nullop,
802 
803 	/* MMU functions */
804 
805 	.cf_control		= cpufunc_control,
806 	.cf_domains		= cpufunc_domains,
807 	.cf_setttb		= sa1_setttb,
808 	.cf_faultstatus		= cpufunc_faultstatus,
809 	.cf_faultaddress	= cpufunc_faultaddress,
810 
811 	/* TLB functions */
812 
813 	.cf_tlb_flushID		= armv4_tlb_flushID,
814 	.cf_tlb_flushID_SE	= sa1_tlb_flushID_SE,
815 	.cf_tlb_flushI		= armv4_tlb_flushI,
816 	.cf_tlb_flushI_SE	= (void *)armv4_tlb_flushI,
817 	.cf_tlb_flushD		= armv4_tlb_flushD,
818 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
819 
820 	/* Cache operations */
821 
822 	.cf_icache_sync_all	= sa1_cache_syncI,
823 	.cf_icache_sync_range	= sa1_cache_syncI_rng,
824 
825 	.cf_dcache_wbinv_all	= sa1_cache_purgeD,
826 	.cf_dcache_wbinv_range	= sa1_cache_purgeD_rng,
827 /*XXX*/	.cf_dcache_inv_range	= sa1_cache_purgeD_rng,
828 	.cf_dcache_wb_range	= sa1_cache_cleanD_rng,
829 
830 	.cf_idcache_wbinv_all	= sa1_cache_purgeID,
831 	.cf_idcache_wbinv_range	= sa1_cache_purgeID_rng,
832 
833 	/* Other functions */
834 
835 	.cf_flush_prefetchbuf	= cpufunc_nullop,
836 	.cf_drain_writebuf	= armv4_drain_writebuf,
837 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
838 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
839 
840 	.cf_sleep		= (void *)cpufunc_nullop,
841 
842 	/* Soft functions */
843 
844 	.cf_dataabt_fixup	= cpufunc_null_fixup,
845 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
846 
847 	.cf_context_switch	= sa110_context_switch,
848 
849 	.cf_setup		= sa110_setup
850 };
851 #endif	/* CPU_SA110 */
852 
853 #if defined(CPU_SA1100) || defined(CPU_SA1110)
854 struct cpu_functions sa11x0_cpufuncs = {
855 	/* CPU functions */
856 
857 	.cf_id			= cpufunc_id,
858 	.cf_cpwait		= cpufunc_nullop,
859 
860 	/* MMU functions */
861 
862 	.cf_control		= cpufunc_control,
863 	.cf_domains		= cpufunc_domains,
864 	.cf_setttb		= sa1_setttb,
865 	.cf_faultstatus		= cpufunc_faultstatus,
866 	.cf_faultaddress	= cpufunc_faultaddress,
867 
868 	/* TLB functions */
869 
870 	.cf_tlb_flushID		= armv4_tlb_flushID,
871 	.cf_tlb_flushID_SE	= sa1_tlb_flushID_SE,
872 	.cf_tlb_flushI		= armv4_tlb_flushI,
873 	.cf_tlb_flushI_SE	= (void *)armv4_tlb_flushI,
874 	.cf_tlb_flushD		= armv4_tlb_flushD,
875 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
876 
877 	/* Cache operations */
878 
879 	.cf_icache_sync_all	= sa1_cache_syncI,
880 	.cf_icache_sync_range	= sa1_cache_syncI_rng,
881 
882 	.cf_dcache_wbinv_all	= sa1_cache_purgeD,
883 	.cf_dcache_wbinv_range	= sa1_cache_purgeD_rng,
884 /*XXX*/	.cf_dcache_inv_range	= sa1_cache_purgeD_rng,
885 	.cf_dcache_wb_range	= sa1_cache_cleanD_rng,
886 
887 	.cf_idcache_wbinv_all	= sa1_cache_purgeID,
888 	.cf_idcache_wbinv_range	= sa1_cache_purgeID_rng,
889 
890 	/* Other functions */
891 
892 	.cf_flush_prefetchbuf	= sa11x0_drain_readbuf,
893 	.cf_drain_writebuf	= armv4_drain_writebuf,
894 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
895 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
896 
897 	.cf_sleep		= sa11x0_cpu_sleep,
898 
899 	/* Soft functions */
900 
901 	.cf_dataabt_fixup	= cpufunc_null_fixup,
902 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
903 
904 	.cf_context_switch	= sa11x0_context_switch,
905 
906 	.cf_setup		= sa11x0_setup
907 };
908 #endif	/* CPU_SA1100 || CPU_SA1110 */
909 
910 #if defined(CPU_FA526)
911 struct cpu_functions fa526_cpufuncs = {
912 	/* CPU functions */
913 
914 	.cf_id			= cpufunc_id,
915 	.cf_cpwait		= cpufunc_nullop,
916 
917 	/* MMU functions */
918 
919 	.cf_control		= cpufunc_control,
920 	.cf_domains		= cpufunc_domains,
921 	.cf_setttb		= fa526_setttb,
922 	.cf_faultstatus		= cpufunc_faultstatus,
923 	.cf_faultaddress	= cpufunc_faultaddress,
924 
925 	/* TLB functions */
926 
927 	.cf_tlb_flushID		= armv4_tlb_flushID,
928 	.cf_tlb_flushID_SE	= fa526_tlb_flushID_SE,
929 	.cf_tlb_flushI		= armv4_tlb_flushI,
930 	.cf_tlb_flushI_SE	= fa526_tlb_flushI_SE,
931 	.cf_tlb_flushD		= armv4_tlb_flushD,
932 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
933 
934 	/* Cache operations */
935 
936 	.cf_icache_sync_all	= fa526_icache_sync_all,
937 	.cf_icache_sync_range	= fa526_icache_sync_range,
938 
939 	.cf_dcache_wbinv_all	= fa526_dcache_wbinv_all,
940 	.cf_dcache_wbinv_range	= fa526_dcache_wbinv_range,
941 	.cf_dcache_inv_range	= fa526_dcache_inv_range,
942 	.cf_dcache_wb_range	= fa526_dcache_wb_range,
943 
944 	.cf_idcache_wbinv_all	= fa526_idcache_wbinv_all,
945 	.cf_idcache_wbinv_range	= fa526_idcache_wbinv_range,
946 
947 	/* Other functions */
948 
949 	.cf_flush_prefetchbuf	= fa526_flush_prefetchbuf,
950 	.cf_drain_writebuf	= armv4_drain_writebuf,
951 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
952 	.cf_flush_brnchtgt_E	= fa526_flush_brnchtgt_E,
953 
954 	.cf_sleep		= fa526_cpu_sleep,
955 
956 	/* Soft functions */
957 
958 	.cf_dataabt_fixup	= cpufunc_null_fixup,
959 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
960 
961 	.cf_context_switch	= fa526_context_switch,
962 
963 	.cf_setup		= fa526_setup
964 };
965 #endif	/* CPU_FA526 */
966 
967 #ifdef CPU_IXP12X0
968 struct cpu_functions ixp12x0_cpufuncs = {
969 	/* CPU functions */
970 
971 	.cf_id			= cpufunc_id,
972 	.cf_cpwait		= cpufunc_nullop,
973 
974 	/* MMU functions */
975 
976 	.cf_control		= cpufunc_control,
977 	.cf_domains		= cpufunc_domains,
978 	.cf_setttb		= sa1_setttb,
979 	.cf_faultstatus		= cpufunc_faultstatus,
980 	.cf_faultaddress	= cpufunc_faultaddress,
981 
982 	/* TLB functions */
983 
984 	.cf_tlb_flushID		= armv4_tlb_flushID,
985 	.cf_tlb_flushID_SE	= sa1_tlb_flushID_SE,
986 	.cf_tlb_flushI		= armv4_tlb_flushI,
987 	.cf_tlb_flushI_SE	= (void *)armv4_tlb_flushI,
988 	.cf_tlb_flushD		= armv4_tlb_flushD,
989 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
990 
991 	/* Cache operations */
992 
993 	.cf_icache_sync_all	= sa1_cache_syncI,
994 	.cf_icache_sync_range	= sa1_cache_syncI_rng,
995 
996 	.cf_dcache_wbinv_all	= sa1_cache_purgeD,
997 	.cf_dcache_wbinv_range	= sa1_cache_purgeD_rng,
998 /*XXX*/	.cf_dcache_inv_range	= sa1_cache_purgeD_rng,
999 	.cf_dcache_wb_range	= sa1_cache_cleanD_rng,
1000 
1001 	.cf_idcache_wbinv_all	= sa1_cache_purgeID,
1002 	.cf_idcache_wbinv_range	= sa1_cache_purgeID_rng,
1003 
1004 	/* Other functions */
1005 
1006 	.cf_flush_prefetchbuf	= ixp12x0_drain_readbuf,
1007 	.cf_drain_writebuf	= armv4_drain_writebuf,
1008 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
1009 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
1010 
1011 	.cf_sleep		= (void *)cpufunc_nullop,
1012 
1013 	/* Soft functions */
1014 
1015 	.cf_dataabt_fixup	= cpufunc_null_fixup,
1016 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
1017 
1018 	.cf_context_switch	= ixp12x0_context_switch,
1019 
1020 	.cf_setup		= ixp12x0_setup
1021 };
1022 #endif	/* CPU_IXP12X0 */
1023 
1024 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
1025     defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425)
1026 struct cpu_functions xscale_cpufuncs = {
1027 	/* CPU functions */
1028 
1029 	.cf_id			= cpufunc_id,
1030 	.cf_cpwait		= xscale_cpwait,
1031 
1032 	/* MMU functions */
1033 
1034 	.cf_control		= xscale_control,
1035 	.cf_domains		= cpufunc_domains,
1036 	.cf_setttb		= xscale_setttb,
1037 	.cf_faultstatus		= cpufunc_faultstatus,
1038 	.cf_faultaddress	= cpufunc_faultaddress,
1039 
1040 	/* TLB functions */
1041 
1042 	.cf_tlb_flushID		= armv4_tlb_flushID,
1043 	.cf_tlb_flushID_SE	= xscale_tlb_flushID_SE,
1044 	.cf_tlb_flushI		= armv4_tlb_flushI,
1045 	.cf_tlb_flushI_SE	= (void *)armv4_tlb_flushI,
1046 	.cf_tlb_flushD		= armv4_tlb_flushD,
1047 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
1048 
1049 	/* Cache operations */
1050 
1051 	.cf_icache_sync_all	= xscale_cache_syncI,
1052 	.cf_icache_sync_range	= xscale_cache_syncI_rng,
1053 
1054 	.cf_dcache_wbinv_all	= xscale_cache_purgeD,
1055 	.cf_dcache_wbinv_range	= xscale_cache_purgeD_rng,
1056 	.cf_dcache_inv_range	= xscale_cache_flushD_rng,
1057 	.cf_dcache_wb_range	= xscale_cache_cleanD_rng,
1058 
1059 	.cf_idcache_wbinv_all	= xscale_cache_purgeID,
1060 	.cf_idcache_wbinv_range = xscale_cache_purgeID_rng,
1061 
1062 	/* Other functions */
1063 
1064 	.cf_flush_prefetchbuf	= cpufunc_nullop,
1065 	.cf_drain_writebuf	= armv4_drain_writebuf,
1066 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
1067 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
1068 
1069 	.cf_sleep		= xscale_cpu_sleep,
1070 
1071 	/* Soft functions */
1072 
1073 	.cf_dataabt_fixup	= cpufunc_null_fixup,
1074 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
1075 
1076 	.cf_context_switch	= xscale_context_switch,
1077 
1078 	.cf_setup		= xscale_setup
1079 };
1080 #endif
1081 /* CPU_XSCALE_80200 || CPU_XSCALE_80321 || __CPU_XSCALE_PXA2XX || CPU_XSCALE_IXP425 */
1082 
1083 /*
1084  * Global constants also used by locore.s
1085  */
1086 
1087 struct cpu_functions cpufuncs;
1088 u_int cputype;
1089 u_int cpu_reset_needs_v4_MMU_disable;	/* flag used in locore.s */
1090 
1091 #if defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined(CPU_ARM9) || \
1092     defined(CPU_ARM9E) || defined(CPU_ARM10) || defined(CPU_ARM11) || \
1093     defined(CPU_FA526) || \
1094     defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
1095     defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425)
1096 static void get_cachetype_cp15(void);
1097 
1098 /* Additional cache information local to this file.  Log2 of some of the
1099    above numbers.  */
1100 static int	arm_dcache_l2_nsets;
1101 static int	arm_dcache_l2_assoc;
1102 static int	arm_dcache_l2_linesize;
1103 
1104 static void
1105 get_cachetype_cp15()
1106 {
1107 	u_int ctype, isize, dsize;
1108 	u_int multiplier;
1109 
1110 	__asm volatile("mrc p15, 0, %0, c0, c0, 1"
1111 		: "=r" (ctype));
1112 
1113 	/*
1114 	 * ...and thus spake the ARM ARM:
1115 	 *
1116 	 * If an <opcode2> value corresponding to an unimplemented or
1117 	 * reserved ID register is encountered, the System Control
1118 	 * processor returns the value of the main ID register.
1119 	 */
1120 	if (ctype == cpu_id())
1121 		goto out;
1122 
1123 #if (ARM_MMU_V6) > 0
1124 	if (CPU_CT_FORMAT(ctype) == 4) {
1125 		u_int csid1, csid2;
1126 		isize = 1U << (CPU_CT4_ILINE(ctype) + 2);
1127 		dsize = 1U << (CPU_CT4_DLINE(ctype) + 2);
1128 
1129 		__asm volatile("mcr p15, 1, %0, c0, c0, 2"
1130 		    :: "r" (CPU_CSSR_L1));	/* select L1 cache values */
1131 		__asm volatile("mrc p15, 1, %0, c0, c0, 0" : "=r" (csid1));
1132 		arm_pdcache_ways = CPU_CSID_ASSOC(csid1) + 1;
1133 		arm_pdcache_line_size = dsize << CPU_CSID_LEN(csid1);
1134 		arm_pdcache_size = arm_pdcache_line_size * arm_pdcache_ways;
1135 		arm_pdcache_size *= CPU_CSID_NUMSETS(csid1);
1136 		arm_cache_prefer_mask = PAGE_SIZE;
1137 
1138 		arm_dcache_align = arm_pdcache_line_size;
1139 
1140 		__asm volatile("mcr p15, 1, %0, c0, c0, 2"
1141 		    :: "r" (CPU_CSSR_L2));	/* select L2 cache values */
1142 		__asm volatile("mrc p15, 1, %0, c0, c0, 0" : "=r" (csid2));
1143 		arm_dcache_l2_assoc = CPU_CSID_ASSOC(csid2) + 1;
1144 		arm_dcache_l2_linesize = dsize << CPU_CSID_LEN(csid2);
1145 		arm_dcache_l2_nsets = CPU_CSID_NUMSETS(csid2) + 1;
1146 		arm_pcache_type = CPU_CT_CTYPE_WB14;
1147 		goto out;
1148 	}
1149 #endif /* ARM_MMU_V6 > 0 */
1150 
1151 	if ((ctype & CPU_CT_S) == 0)
1152 		arm_pcache_unified = 1;
1153 
1154 	/*
1155 	 * If you want to know how this code works, go read the ARM ARM.
1156 	 */
1157 
1158 	arm_pcache_type = CPU_CT_CTYPE(ctype);
1159 
1160 	if (arm_pcache_unified == 0) {
1161 		isize = CPU_CT_ISIZE(ctype);
1162 		multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
1163 		arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
1164 		if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
1165 			if (isize & CPU_CT_xSIZE_M)
1166 				arm_picache_line_size = 0; /* not present */
1167 			else
1168 				arm_picache_ways = 1;
1169 		} else {
1170 			arm_picache_ways = multiplier <<
1171 			    (CPU_CT_xSIZE_ASSOC(isize) - 1);
1172 #if (ARM_MMU_V6) > 0
1173 			if (CPU_CT_xSIZE_P & isize)
1174 				arm_cache_prefer_mask |=
1175 				    __BIT(9 + CPU_CT_xSIZE_SIZE(isize)
1176 					  - CPU_CT_xSIZE_ASSOC(isize))
1177 				    - PAGE_SIZE;
1178 #endif
1179 		}
1180 		arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
1181 	}
1182 
1183 	dsize = CPU_CT_DSIZE(ctype);
1184 	multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
1185 	arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
1186 	if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
1187 		if (dsize & CPU_CT_xSIZE_M)
1188 			arm_pdcache_line_size = 0; /* not present */
1189 		else
1190 			arm_pdcache_ways = 1;
1191 	} else {
1192 		arm_pdcache_ways = multiplier <<
1193 		    (CPU_CT_xSIZE_ASSOC(dsize) - 1);
1194 #if (ARM_MMU_V6) > 0
1195 		if (CPU_CT_xSIZE_P & dsize)
1196 			arm_cache_prefer_mask |=
1197 			    __BIT(9 + CPU_CT_xSIZE_SIZE(dsize)
1198 				  - CPU_CT_xSIZE_ASSOC(dsize)) - PAGE_SIZE;
1199 #endif
1200 	}
1201 	arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
1202 
1203 	arm_dcache_align = arm_pdcache_line_size;
1204 
1205 	arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
1206 	arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
1207 	arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
1208 	    CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
1209 
1210  out:
1211 	arm_dcache_align_mask = arm_dcache_align - 1;
1212 }
1213 #endif /* ARM7TDMI || ARM8 || ARM9 || XSCALE */
1214 
1215 #if defined(CPU_ARM2) || defined(CPU_ARM250) || defined(CPU_ARM3) || \
1216     defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_SA110) || \
1217     defined(CPU_SA1100) || defined(CPU_SA1110) || defined(CPU_IXP12X0)
1218 /* Cache information for CPUs without cache type registers. */
1219 struct cachetab {
1220 	u_int32_t ct_cpuid;
1221 	int	ct_pcache_type;
1222 	int	ct_pcache_unified;
1223 	int	ct_pdcache_size;
1224 	int	ct_pdcache_line_size;
1225 	int	ct_pdcache_ways;
1226 	int	ct_picache_size;
1227 	int	ct_picache_line_size;
1228 	int	ct_picache_ways;
1229 };
1230 
1231 struct cachetab cachetab[] = {
1232     /* cpuid,           cache type,       u,  dsiz, ls, wy,  isiz, ls, wy */
1233     { CPU_ID_ARM2,      0,                1,     0,  0,  0,     0,  0,  0 },
1234     { CPU_ID_ARM250,    0,                1,     0,  0,  0,     0,  0,  0 },
1235     { CPU_ID_ARM3,      CPU_CT_CTYPE_WT,  1,  4096, 16, 64,     0,  0,  0 },
1236     { CPU_ID_ARM610,	CPU_CT_CTYPE_WT,  1,  4096, 16, 64,     0,  0,  0 },
1237     { CPU_ID_ARM710,    CPU_CT_CTYPE_WT,  1,  8192, 32,  4,     0,  0,  0 },
1238     { CPU_ID_ARM7500,   CPU_CT_CTYPE_WT,  1,  4096, 16,  4,     0,  0,  0 },
1239     { CPU_ID_ARM710A,   CPU_CT_CTYPE_WT,  1,  8192, 16,  4,     0,  0,  0 },
1240     { CPU_ID_ARM7500FE, CPU_CT_CTYPE_WT,  1,  4096, 16,  4,     0,  0,  0 },
1241     /* XXX is this type right for SA-1? */
1242     { CPU_ID_SA110,	CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 },
1243     { CPU_ID_SA1100,	CPU_CT_CTYPE_WB1, 0,  8192, 32, 32, 16384, 32, 32 },
1244     { CPU_ID_SA1110,	CPU_CT_CTYPE_WB1, 0,  8192, 32, 32, 16384, 32, 32 },
1245     { CPU_ID_IXP1200,	CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 }, /* XXX */
1246     { 0, 0, 0, 0, 0, 0, 0, 0}
1247 };
1248 
1249 static void get_cachetype_table(void);
1250 
1251 static void
1252 get_cachetype_table(void)
1253 {
1254 	int i;
1255 	u_int32_t cpuid = cpu_id();
1256 
1257 	for (i = 0; cachetab[i].ct_cpuid != 0; i++) {
1258 		if (cachetab[i].ct_cpuid == (cpuid & CPU_ID_CPU_MASK)) {
1259 			arm_pcache_type = cachetab[i].ct_pcache_type;
1260 			arm_pcache_unified = cachetab[i].ct_pcache_unified;
1261 			arm_pdcache_size = cachetab[i].ct_pdcache_size;
1262 			arm_pdcache_line_size =
1263 			    cachetab[i].ct_pdcache_line_size;
1264 			arm_pdcache_ways = cachetab[i].ct_pdcache_ways;
1265 			arm_picache_size = cachetab[i].ct_picache_size;
1266 			arm_picache_line_size =
1267 			    cachetab[i].ct_picache_line_size;
1268 			arm_picache_ways = cachetab[i].ct_picache_ways;
1269 		}
1270 	}
1271 	arm_dcache_align = arm_pdcache_line_size;
1272 
1273 	arm_dcache_align_mask = arm_dcache_align - 1;
1274 }
1275 
1276 #endif /* ARM2 || ARM250 || ARM3 || ARM6 || ARM7 || SA110 || SA1100 || SA1111 || IXP12X0 */
1277 
1278 /*
1279  * Cannot panic here as we may not have a console yet ...
1280  */
1281 
1282 int
1283 set_cpufuncs(void)
1284 {
1285 	if (cputype == 0) {
1286 		cputype = cpufunc_id();
1287 		cputype &= CPU_ID_CPU_MASK;
1288 	}
1289 
1290 	/*
1291 	 * NOTE: cpu_do_powersave defaults to off.  If we encounter a
1292 	 * CPU type where we want to use it by default, then we set it.
1293 	 */
1294 #ifdef CPU_ARM2
1295 	if (cputype == CPU_ID_ARM2) {
1296 		cpufuncs = arm2_cpufuncs;
1297 		cpu_reset_needs_v4_MMU_disable = 0;
1298 		get_cachetype_table();
1299 		return 0;
1300 	}
1301 #endif /* CPU_ARM2 */
1302 #ifdef CPU_ARM250
1303 	if (cputype == CPU_ID_ARM250) {
1304 		cpufuncs = arm250_cpufuncs;
1305 		cpu_reset_needs_v4_MMU_disable = 0;
1306 		get_cachetype_table();
1307 		return 0;
1308 	}
1309 #endif
1310 #ifdef CPU_ARM3
1311 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
1312 	    (cputype & 0x00000f00) == 0x00000300) {
1313 		cpufuncs = arm3_cpufuncs;
1314 		cpu_reset_needs_v4_MMU_disable = 0;
1315 		get_cachetype_table();
1316 		return 0;
1317 	}
1318 #endif	/* CPU_ARM3 */
1319 #ifdef CPU_ARM6
1320 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
1321 	    (cputype & 0x00000f00) == 0x00000600) {
1322 		cpufuncs = arm6_cpufuncs;
1323 		cpu_reset_needs_v4_MMU_disable = 0;
1324 		get_cachetype_table();
1325 		pmap_pte_init_generic();
1326 		return 0;
1327 	}
1328 #endif	/* CPU_ARM6 */
1329 #ifdef CPU_ARM7
1330 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
1331 	    CPU_ID_IS7(cputype) &&
1332 	    (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V3) {
1333 		cpufuncs = arm7_cpufuncs;
1334 		cpu_reset_needs_v4_MMU_disable = 0;
1335 		get_cachetype_table();
1336 		pmap_pte_init_generic();
1337 		return 0;
1338 	}
1339 #endif	/* CPU_ARM7 */
1340 #ifdef CPU_ARM7TDMI
1341 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
1342 	    CPU_ID_IS7(cputype) &&
1343 	    (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V4T) {
1344 		cpufuncs = arm7tdmi_cpufuncs;
1345 		cpu_reset_needs_v4_MMU_disable = 0;
1346 		get_cachetype_cp15();
1347 		pmap_pte_init_generic();
1348 		return 0;
1349 	}
1350 #endif
1351 #ifdef CPU_ARM8
1352 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
1353 	    (cputype & 0x0000f000) == 0x00008000) {
1354 		cpufuncs = arm8_cpufuncs;
1355 		cpu_reset_needs_v4_MMU_disable = 0;	/* XXX correct? */
1356 		get_cachetype_cp15();
1357 		pmap_pte_init_arm8();
1358 		return 0;
1359 	}
1360 #endif	/* CPU_ARM8 */
1361 #ifdef CPU_ARM9
1362 	if (((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD ||
1363 	     (cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_TI) &&
1364 	    (cputype & 0x0000f000) == 0x00009000) {
1365 		cpufuncs = arm9_cpufuncs;
1366 		cpu_reset_needs_v4_MMU_disable = 1;	/* V4 or higher */
1367 		get_cachetype_cp15();
1368 		arm9_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
1369 		arm9_dcache_sets_max =
1370 		    (1U << (arm_dcache_l2_linesize + arm_dcache_l2_nsets)) -
1371 		    arm9_dcache_sets_inc;
1372 		arm9_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
1373 		arm9_dcache_index_max = 0U - arm9_dcache_index_inc;
1374 #ifdef	ARM9_CACHE_WRITE_THROUGH
1375 		pmap_pte_init_arm9();
1376 #else
1377 		pmap_pte_init_generic();
1378 #endif
1379 		return 0;
1380 	}
1381 #endif /* CPU_ARM9 */
1382 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
1383 	if (cputype == CPU_ID_ARM926EJS ||
1384 	    cputype == CPU_ID_ARM1026EJS) {
1385 		cpufuncs = armv5_ec_cpufuncs;
1386 		cpu_reset_needs_v4_MMU_disable = 1;	/* V4 or higher */
1387 		get_cachetype_cp15();
1388 		pmap_pte_init_generic();
1389 		return 0;
1390 	}
1391 #endif /* CPU_ARM9E || CPU_ARM10 */
1392 #ifdef CPU_ARM10
1393 	if (/* cputype == CPU_ID_ARM1020T || */
1394 	    cputype == CPU_ID_ARM1020E) {
1395 		/*
1396 		 * Select write-through cacheing (this isn't really an
1397 		 * option on ARM1020T).
1398 		 */
1399 		cpufuncs = arm10_cpufuncs;
1400 		cpu_reset_needs_v4_MMU_disable = 1;	/* V4 or higher */
1401 		get_cachetype_cp15();
1402 		armv5_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
1403 		armv5_dcache_sets_max =
1404 		    (1U << (arm_dcache_l2_linesize + arm_dcache_l2_nsets)) -
1405 		    armv5_dcache_sets_inc;
1406 		armv5_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
1407 		armv5_dcache_index_max = 0U - armv5_dcache_index_inc;
1408 		pmap_pte_init_generic();
1409 		return 0;
1410 	}
1411 #endif /* CPU_ARM10 */
1412 #if defined(CPU_ARM11)
1413 	if (cputype == CPU_ID_ARM1136JS ||
1414 	    cputype == CPU_ID_ARM1136JSR1 ||
1415 	    cputype == CPU_ID_ARM1176JS ||
1416 	    cputype == CPU_ID_CORTEXA8R1 ||
1417 	    cputype == CPU_ID_CORTEXA8R2) {
1418 		cpufuncs = arm11_cpufuncs;
1419 #if defined(CPU_ARM1136)
1420 		if (cputype != CPU_ID_ARM1176JS) {
1421 			cpufuncs = arm1136_cpufuncs;
1422 			if (cputype == CPU_ID_ARM1136JS)
1423 				cpufuncs.cf_sleep = arm1136_sleep_rev0;
1424 		}
1425 #endif
1426 		cpu_reset_needs_v4_MMU_disable = 1;	/* V4 or higher */
1427 		cpu_do_powersave = 1;			/* Enable powersave */
1428 		get_cachetype_cp15();
1429 		pmap_pte_init_generic();
1430 		if (arm_cache_prefer_mask)
1431 			uvmexp.ncolors = (arm_cache_prefer_mask >> PGSHIFT) + 1;
1432 
1433 		return 0;
1434 	}
1435 #endif /* CPU_ARM11 */
1436 #ifdef CPU_SA110
1437 	if (cputype == CPU_ID_SA110) {
1438 		cpufuncs = sa110_cpufuncs;
1439 		cpu_reset_needs_v4_MMU_disable = 1;	/* SA needs it */
1440 		get_cachetype_table();
1441 		pmap_pte_init_sa1();
1442 		return 0;
1443 	}
1444 #endif	/* CPU_SA110 */
1445 #ifdef CPU_SA1100
1446 	if (cputype == CPU_ID_SA1100) {
1447 		cpufuncs = sa11x0_cpufuncs;
1448 		cpu_reset_needs_v4_MMU_disable = 1;	/* SA needs it	*/
1449 		get_cachetype_table();
1450 		pmap_pte_init_sa1();
1451 
1452 		/* Use powersave on this CPU. */
1453 		cpu_do_powersave = 1;
1454 
1455 		return 0;
1456 	}
1457 #endif	/* CPU_SA1100 */
1458 #ifdef CPU_SA1110
1459 	if (cputype == CPU_ID_SA1110) {
1460 		cpufuncs = sa11x0_cpufuncs;
1461 		cpu_reset_needs_v4_MMU_disable = 1;	/* SA needs it	*/
1462 		get_cachetype_table();
1463 		pmap_pte_init_sa1();
1464 
1465 		/* Use powersave on this CPU. */
1466 		cpu_do_powersave = 1;
1467 
1468 		return 0;
1469 	}
1470 #endif	/* CPU_SA1110 */
1471 #ifdef CPU_FA526
1472 	if (cputype == CPU_ID_FA526) {
1473 		cpufuncs = fa526_cpufuncs;
1474 		cpu_reset_needs_v4_MMU_disable = 1;	/* SA needs it	*/
1475 		get_cachetype_cp15();
1476 		pmap_pte_init_generic();
1477 
1478 		/* Use powersave on this CPU. */
1479 		cpu_do_powersave = 1;
1480 
1481 		return 0;
1482 	}
1483 #endif	/* CPU_FA526 */
1484 #ifdef CPU_IXP12X0
1485         if (cputype == CPU_ID_IXP1200) {
1486                 cpufuncs = ixp12x0_cpufuncs;
1487                 cpu_reset_needs_v4_MMU_disable = 1;
1488                 get_cachetype_table();
1489                 pmap_pte_init_sa1();
1490                 return 0;
1491         }
1492 #endif  /* CPU_IXP12X0 */
1493 #ifdef CPU_XSCALE_80200
1494 	if (cputype == CPU_ID_80200) {
1495 		int rev = cpufunc_id() & CPU_ID_REVISION_MASK;
1496 
1497 		i80200_icu_init();
1498 
1499 		/*
1500 		 * Reset the Performance Monitoring Unit to a
1501 		 * pristine state:
1502 		 *	- CCNT, PMN0, PMN1 reset to 0
1503 		 *	- overflow indications cleared
1504 		 *	- all counters disabled
1505 		 */
1506 		__asm volatile("mcr p14, 0, %0, c0, c0, 0"
1507 			:
1508 			: "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF|
1509 			       PMNC_CC_IF));
1510 
1511 #if defined(XSCALE_CCLKCFG)
1512 		/*
1513 		 * Crank CCLKCFG to maximum legal value.
1514 		 */
1515 		__asm volatile ("mcr p14, 0, %0, c6, c0, 0"
1516 			:
1517 			: "r" (XSCALE_CCLKCFG));
1518 #endif
1519 
1520 		/*
1521 		 * XXX Disable ECC in the Bus Controller Unit; we
1522 		 * don't really support it, yet.  Clear any pending
1523 		 * error indications.
1524 		 */
1525 		__asm volatile("mcr p13, 0, %0, c0, c1, 0"
1526 			:
1527 			: "r" (BCUCTL_E0|BCUCTL_E1|BCUCTL_EV));
1528 
1529 		cpufuncs = xscale_cpufuncs;
1530 #if defined(PERFCTRS)
1531 		xscale_pmu_init();
1532 #endif
1533 
1534 		/*
1535 		 * i80200 errata: Step-A0 and A1 have a bug where
1536 		 * D$ dirty bits are not cleared on "invalidate by
1537 		 * address".
1538 		 *
1539 		 * Workaround: Clean cache line before invalidating.
1540 		 */
1541 		if (rev == 0 || rev == 1)
1542 			cpufuncs.cf_dcache_inv_range = xscale_cache_purgeD_rng;
1543 
1544 		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1545 		get_cachetype_cp15();
1546 		pmap_pte_init_xscale();
1547 		return 0;
1548 	}
1549 #endif /* CPU_XSCALE_80200 */
1550 #ifdef CPU_XSCALE_80321
1551 	if (cputype == CPU_ID_80321_400 || cputype == CPU_ID_80321_600 ||
1552 	    cputype == CPU_ID_80321_400_B0 || cputype == CPU_ID_80321_600_B0 ||
1553 	    cputype == CPU_ID_80219_400 || cputype == CPU_ID_80219_600) {
1554 		i80321_icu_init();
1555 
1556 		/*
1557 		 * Reset the Performance Monitoring Unit to a
1558 		 * pristine state:
1559 		 *	- CCNT, PMN0, PMN1 reset to 0
1560 		 *	- overflow indications cleared
1561 		 *	- all counters disabled
1562 		 */
1563 		__asm volatile("mcr p14, 0, %0, c0, c0, 0"
1564 			:
1565 			: "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF|
1566 			       PMNC_CC_IF));
1567 
1568 		cpufuncs = xscale_cpufuncs;
1569 #if defined(PERFCTRS)
1570 		xscale_pmu_init();
1571 #endif
1572 
1573 		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1574 		get_cachetype_cp15();
1575 		pmap_pte_init_xscale();
1576 		return 0;
1577 	}
1578 #endif /* CPU_XSCALE_80321 */
1579 #ifdef __CPU_XSCALE_PXA2XX
1580 	/* ignore core revision to test PXA2xx CPUs */
1581 	if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA27X ||
1582 	    (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 ||
1583 	    (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) {
1584 
1585 		cpufuncs = xscale_cpufuncs;
1586 #if defined(PERFCTRS)
1587 		xscale_pmu_init();
1588 #endif
1589 
1590 		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1591 		get_cachetype_cp15();
1592 		pmap_pte_init_xscale();
1593 
1594 		/* Use powersave on this CPU. */
1595 		cpu_do_powersave = 1;
1596 
1597 		return 0;
1598 	}
1599 #endif /* __CPU_XSCALE_PXA2XX */
1600 #ifdef CPU_XSCALE_IXP425
1601 	if (cputype == CPU_ID_IXP425_533 || cputype == CPU_ID_IXP425_400 ||
1602             cputype == CPU_ID_IXP425_266) {
1603 		ixp425_icu_init();
1604 
1605 		cpufuncs = xscale_cpufuncs;
1606 #if defined(PERFCTRS)
1607 		xscale_pmu_init();
1608 #endif
1609 
1610 		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1611 		get_cachetype_cp15();
1612 		pmap_pte_init_xscale();
1613 
1614 		return 0;
1615 	}
1616 #endif /* CPU_XSCALE_IXP425 */
1617 	/*
1618 	 * Bzzzz. And the answer was ...
1619 	 */
1620 	panic("No support for this CPU type (%08x) in kernel", cputype);
1621 	return(ARCHITECTURE_NOT_PRESENT);
1622 }
1623 
1624 #ifdef CPU_ARM2
1625 u_int arm2_id(void)
1626 {
1627 
1628 	return CPU_ID_ARM2;
1629 }
1630 #endif /* CPU_ARM2 */
1631 
1632 #ifdef CPU_ARM250
1633 u_int arm250_id(void)
1634 {
1635 
1636 	return CPU_ID_ARM250;
1637 }
1638 #endif /* CPU_ARM250 */
1639 
1640 /*
1641  * Fixup routines for data and prefetch aborts.
1642  *
1643  * Several compile time symbols are used
1644  *
1645  * DEBUG_FAULT_CORRECTION - Print debugging information during the
1646  * correction of registers after a fault.
1647  * ARM6_LATE_ABORT - ARM6 supports both early and late aborts
1648  * when defined should use late aborts
1649  */
1650 
1651 
1652 /*
1653  * Null abort fixup routine.
1654  * For use when no fixup is required.
1655  */
1656 int
1657 cpufunc_null_fixup(void *arg)
1658 {
1659 	return(ABORT_FIXUP_OK);
1660 }
1661 
1662 
1663 #if defined(CPU_ARM2) || defined(CPU_ARM250) || defined(CPU_ARM3) || \
1664     defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI)
1665 
1666 #ifdef DEBUG_FAULT_CORRECTION
1667 #define DFC_PRINTF(x)		printf x
1668 #define DFC_DISASSEMBLE(x)	disassemble(x)
1669 #else
1670 #define DFC_PRINTF(x)		/* nothing */
1671 #define DFC_DISASSEMBLE(x)	/* nothing */
1672 #endif
1673 
1674 /*
1675  * "Early" data abort fixup.
1676  *
1677  * For ARM2, ARM2as, ARM3 and ARM6 (in early-abort mode).  Also used
1678  * indirectly by ARM6 (in late-abort mode) and ARM7[TDMI].
1679  *
1680  * In early aborts, we may have to fix up LDM, STM, LDC and STC.
1681  */
1682 int
1683 early_abort_fixup(void *arg)
1684 {
1685 	trapframe_t *frame = arg;
1686 	u_int fault_pc;
1687 	u_int fault_instruction;
1688 	int saved_lr = 0;
1689 
1690 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1691 
1692 		/* Ok an abort in SVC mode */
1693 
1694 		/*
1695 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1696 		 * as the fault happened in svc mode but we need it in the
1697 		 * usr slot so we can treat the registers as an array of ints
1698 		 * during fixing.
1699 		 * NOTE: This PC is in the position but writeback is not
1700 		 * allowed on r15.
1701 		 * Doing it like this is more efficient than trapping this
1702 		 * case in all possible locations in the following fixup code.
1703 		 */
1704 
1705 		saved_lr = frame->tf_usr_lr;
1706 		frame->tf_usr_lr = frame->tf_svc_lr;
1707 
1708 		/*
1709 		 * Note the trapframe does not have the SVC r13 so a fault
1710 		 * from an instruction with writeback to r13 in SVC mode is
1711 		 * not allowed. This should not happen as the kstack is
1712 		 * always valid.
1713 		 */
1714 	}
1715 
1716 	/* Get fault address and status from the CPU */
1717 
1718 	fault_pc = frame->tf_pc;
1719 	fault_instruction = *((volatile unsigned int *)fault_pc);
1720 
1721 	/* Decode the fault instruction and fix the registers as needed */
1722 
1723 	if ((fault_instruction & 0x0e000000) == 0x08000000) {
1724 		int base;
1725 		int loop;
1726 		int count;
1727 		int *registers = &frame->tf_r0;
1728 
1729 		DFC_PRINTF(("LDM/STM\n"));
1730 		DFC_DISASSEMBLE(fault_pc);
1731 		if (fault_instruction & (1 << 21)) {
1732 			DFC_PRINTF(("This instruction must be corrected\n"));
1733 			base = (fault_instruction >> 16) & 0x0f;
1734 			if (base == 15)
1735 				return ABORT_FIXUP_FAILED;
1736 			/* Count registers transferred */
1737 			count = 0;
1738 			for (loop = 0; loop < 16; ++loop) {
1739 				if (fault_instruction & (1<<loop))
1740 					++count;
1741 			}
1742 			DFC_PRINTF(("%d registers used\n", count));
1743 			DFC_PRINTF(("Corrected r%d by %d bytes ",
1744 				       base, count * 4));
1745 			if (fault_instruction & (1 << 23)) {
1746 				DFC_PRINTF(("down\n"));
1747 				registers[base] -= count * 4;
1748 			} else {
1749 				DFC_PRINTF(("up\n"));
1750 				registers[base] += count * 4;
1751 			}
1752 		}
1753 	} else if ((fault_instruction & 0x0e000000) == 0x0c000000) {
1754 		int base;
1755 		int offset;
1756 		int *registers = &frame->tf_r0;
1757 
1758 		/* REGISTER CORRECTION IS REQUIRED FOR THESE INSTRUCTIONS */
1759 
1760 		DFC_DISASSEMBLE(fault_pc);
1761 
1762 		/* Only need to fix registers if write back is turned on */
1763 
1764 		if ((fault_instruction & (1 << 21)) != 0) {
1765 			base = (fault_instruction >> 16) & 0x0f;
1766 			if (base == 13 &&
1767 			    (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
1768 				return ABORT_FIXUP_FAILED;
1769 			if (base == 15)
1770 				return ABORT_FIXUP_FAILED;
1771 
1772 			offset = (fault_instruction & 0xff) << 2;
1773 			DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1774 			if ((fault_instruction & (1 << 23)) != 0)
1775 				offset = -offset;
1776 			registers[base] += offset;
1777 			DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1778 		}
1779 	} else if ((fault_instruction & 0x0e000000) == 0x0c000000)
1780 		return ABORT_FIXUP_FAILED;
1781 
1782 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1783 
1784 		/* Ok an abort in SVC mode */
1785 
1786 		/*
1787 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1788 		 * as the fault happened in svc mode but we need it in the
1789 		 * usr slot so we can treat the registers as an array of ints
1790 		 * during fixing.
1791 		 * NOTE: This PC is in the position but writeback is not
1792 		 * allowed on r15.
1793 		 * Doing it like this is more efficient than trapping this
1794 		 * case in all possible locations in the prior fixup code.
1795 		 */
1796 
1797 		frame->tf_svc_lr = frame->tf_usr_lr;
1798 		frame->tf_usr_lr = saved_lr;
1799 
1800 		/*
1801 		 * Note the trapframe does not have the SVC r13 so a fault
1802 		 * from an instruction with writeback to r13 in SVC mode is
1803 		 * not allowed. This should not happen as the kstack is
1804 		 * always valid.
1805 		 */
1806 	}
1807 
1808 	return(ABORT_FIXUP_OK);
1809 }
1810 #endif	/* CPU_ARM2/250/3/6/7 */
1811 
1812 
1813 #if (defined(CPU_ARM6) && defined(ARM6_LATE_ABORT)) || defined(CPU_ARM7) || \
1814 	defined(CPU_ARM7TDMI)
1815 /*
1816  * "Late" (base updated) data abort fixup
1817  *
1818  * For ARM6 (in late-abort mode) and ARM7.
1819  *
1820  * In this model, all data-transfer instructions need fixing up.  We defer
1821  * LDM, STM, LDC and STC fixup to the early-abort handler.
1822  */
1823 int
1824 late_abort_fixup(void *arg)
1825 {
1826 	trapframe_t *frame = arg;
1827 	u_int fault_pc;
1828 	u_int fault_instruction;
1829 	int saved_lr = 0;
1830 
1831 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1832 
1833 		/* Ok an abort in SVC mode */
1834 
1835 		/*
1836 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1837 		 * as the fault happened in svc mode but we need it in the
1838 		 * usr slot so we can treat the registers as an array of ints
1839 		 * during fixing.
1840 		 * NOTE: This PC is in the position but writeback is not
1841 		 * allowed on r15.
1842 		 * Doing it like this is more efficient than trapping this
1843 		 * case in all possible locations in the following fixup code.
1844 		 */
1845 
1846 		saved_lr = frame->tf_usr_lr;
1847 		frame->tf_usr_lr = frame->tf_svc_lr;
1848 
1849 		/*
1850 		 * Note the trapframe does not have the SVC r13 so a fault
1851 		 * from an instruction with writeback to r13 in SVC mode is
1852 		 * not allowed. This should not happen as the kstack is
1853 		 * always valid.
1854 		 */
1855 	}
1856 
1857 	/* Get fault address and status from the CPU */
1858 
1859 	fault_pc = frame->tf_pc;
1860 	fault_instruction = *((volatile unsigned int *)fault_pc);
1861 
1862 	/* Decode the fault instruction and fix the registers as needed */
1863 
1864 	/* Was is a swap instruction ? */
1865 
1866 	if ((fault_instruction & 0x0fb00ff0) == 0x01000090) {
1867 		DFC_DISASSEMBLE(fault_pc);
1868 	} else if ((fault_instruction & 0x0c000000) == 0x04000000) {
1869 
1870 		/* Was is a ldr/str instruction */
1871 		/* This is for late abort only */
1872 
1873 		int base;
1874 		int offset;
1875 		int *registers = &frame->tf_r0;
1876 
1877 		DFC_DISASSEMBLE(fault_pc);
1878 
1879 		/* This is for late abort only */
1880 
1881 		if ((fault_instruction & (1 << 24)) == 0
1882 		    || (fault_instruction & (1 << 21)) != 0) {
1883 			/* postindexed ldr/str with no writeback */
1884 
1885 			base = (fault_instruction >> 16) & 0x0f;
1886 			if (base == 13 &&
1887 			    (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
1888 				return ABORT_FIXUP_FAILED;
1889 			if (base == 15)
1890 				return ABORT_FIXUP_FAILED;
1891 			DFC_PRINTF(("late abt fix: r%d=%08x : ",
1892 				       base, registers[base]));
1893 			if ((fault_instruction & (1 << 25)) == 0) {
1894 				/* Immediate offset - easy */
1895 
1896 				offset = fault_instruction & 0xfff;
1897 				if ((fault_instruction & (1 << 23)))
1898 					offset = -offset;
1899 				registers[base] += offset;
1900 				DFC_PRINTF(("imm=%08x ", offset));
1901 			} else {
1902 				/* offset is a shifted register */
1903 				int shift;
1904 
1905 				offset = fault_instruction & 0x0f;
1906 				if (offset == base)
1907 					return ABORT_FIXUP_FAILED;
1908 
1909 				/*
1910 				 * Register offset - hard we have to
1911 				 * cope with shifts !
1912 				 */
1913 				offset = registers[offset];
1914 
1915 				if ((fault_instruction & (1 << 4)) == 0)
1916 					/* shift with amount */
1917 					shift = (fault_instruction >> 7) & 0x1f;
1918 				else {
1919 					/* shift with register */
1920 					if ((fault_instruction & (1 << 7)) != 0)
1921 						/* undefined for now so bail out */
1922 						return ABORT_FIXUP_FAILED;
1923 					shift = ((fault_instruction >> 8) & 0xf);
1924 					if (base == shift)
1925 						return ABORT_FIXUP_FAILED;
1926 					DFC_PRINTF(("shift reg=%d ", shift));
1927 					shift = registers[shift];
1928 				}
1929 				DFC_PRINTF(("shift=%08x ", shift));
1930 				switch (((fault_instruction >> 5) & 0x3)) {
1931 				case 0 : /* Logical left */
1932 					offset = (int)(((u_int)offset) << shift);
1933 					break;
1934 				case 1 : /* Logical Right */
1935 					if (shift == 0) shift = 32;
1936 					offset = (int)(((u_int)offset) >> shift);
1937 					break;
1938 				case 2 : /* Arithmetic Right */
1939 					if (shift == 0) shift = 32;
1940 					offset = (int)(((int)offset) >> shift);
1941 					break;
1942 				case 3 : /* Rotate right (rol or rxx) */
1943 					return ABORT_FIXUP_FAILED;
1944 					break;
1945 				}
1946 
1947 				DFC_PRINTF(("abt: fixed LDR/STR with "
1948 					       "register offset\n"));
1949 				if ((fault_instruction & (1 << 23)))
1950 					offset = -offset;
1951 				DFC_PRINTF(("offset=%08x ", offset));
1952 				registers[base] += offset;
1953 			}
1954 			DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1955 		}
1956 	}
1957 
1958 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1959 
1960 		/* Ok an abort in SVC mode */
1961 
1962 		/*
1963 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1964 		 * as the fault happened in svc mode but we need it in the
1965 		 * usr slot so we can treat the registers as an array of ints
1966 		 * during fixing.
1967 		 * NOTE: This PC is in the position but writeback is not
1968 		 * allowed on r15.
1969 		 * Doing it like this is more efficient than trapping this
1970 		 * case in all possible locations in the prior fixup code.
1971 		 */
1972 
1973 		frame->tf_svc_lr = frame->tf_usr_lr;
1974 		frame->tf_usr_lr = saved_lr;
1975 
1976 		/*
1977 		 * Note the trapframe does not have the SVC r13 so a fault
1978 		 * from an instruction with writeback to r13 in SVC mode is
1979 		 * not allowed. This should not happen as the kstack is
1980 		 * always valid.
1981 		 */
1982 	}
1983 
1984 	/*
1985 	 * Now let the early-abort fixup routine have a go, in case it
1986 	 * was an LDM, STM, LDC or STC that faulted.
1987 	 */
1988 
1989 	return early_abort_fixup(arg);
1990 }
1991 #endif	/* CPU_ARM6(LATE)/7/7TDMI */
1992 
1993 /*
1994  * CPU Setup code
1995  */
1996 
1997 #if defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) || \
1998 	defined(CPU_ARM8) || defined (CPU_ARM9) || defined (CPU_ARM9E) || \
1999 	defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) || \
2000 	defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
2001 	defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425) || \
2002 	defined(CPU_ARM10) || defined(CPU_ARM11) || defined(CPU_ARM1136) || \
2003 	defined(CPU_FA526)
2004 
2005 #define IGN	0
2006 #define OR	1
2007 #define BIC	2
2008 
2009 struct cpu_option {
2010 	const char *co_name;
2011 	int	co_falseop;
2012 	int	co_trueop;
2013 	int	co_value;
2014 };
2015 
2016 static u_int parse_cpu_options(char *, struct cpu_option *, u_int);
2017 
2018 static u_int
2019 parse_cpu_options(char *args, struct cpu_option *optlist, u_int cpuctrl)
2020 {
2021 	int integer;
2022 
2023 	if (args == NULL)
2024 		return(cpuctrl);
2025 
2026 	while (optlist->co_name) {
2027 		if (get_bootconf_option(args, optlist->co_name,
2028 		    BOOTOPT_TYPE_BOOLEAN, &integer)) {
2029 			if (integer) {
2030 				if (optlist->co_trueop == OR)
2031 					cpuctrl |= optlist->co_value;
2032 				else if (optlist->co_trueop == BIC)
2033 					cpuctrl &= ~optlist->co_value;
2034 			} else {
2035 				if (optlist->co_falseop == OR)
2036 					cpuctrl |= optlist->co_value;
2037 				else if (optlist->co_falseop == BIC)
2038 					cpuctrl &= ~optlist->co_value;
2039 			}
2040 		}
2041 		++optlist;
2042 	}
2043 	return(cpuctrl);
2044 }
2045 #endif /* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 || CPU_SA110 */
2046 
2047 #if defined (CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) \
2048 	|| defined(CPU_ARM8)
2049 struct cpu_option arm678_options[] = {
2050 #ifdef COMPAT_12
2051 	{ "nocache",		IGN, BIC, CPU_CONTROL_IDC_ENABLE },
2052 	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
2053 #endif	/* COMPAT_12 */
2054 	{ "cpu.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
2055 	{ "cpu.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
2056 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2057 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2058 	{ NULL,			IGN, IGN, 0 }
2059 };
2060 
2061 #endif	/* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 */
2062 
2063 #ifdef CPU_ARM6
2064 struct cpu_option arm6_options[] = {
2065 	{ "arm6.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
2066 	{ "arm6.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
2067 	{ "arm6.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2068 	{ "arm6.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2069 	{ NULL,			IGN, IGN, 0 }
2070 };
2071 
2072 void
2073 arm6_setup(char *args)
2074 {
2075 	int cpuctrl, cpuctrlmask;
2076 
2077 	/* Set up default control registers bits */
2078 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2079 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2080 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
2081 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2082 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2083 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
2084 		 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BEND_ENABLE
2085 		 | CPU_CONTROL_AFLT_ENABLE;
2086 
2087 #ifdef ARM6_LATE_ABORT
2088 	cpuctrl |= CPU_CONTROL_LABT_ENABLE;
2089 #endif	/* ARM6_LATE_ABORT */
2090 
2091 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2092 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2093 #endif
2094 
2095 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
2096 	cpuctrl = parse_cpu_options(args, arm6_options, cpuctrl);
2097 
2098 #ifdef __ARMEB__
2099 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2100 #endif
2101 
2102 	/* Clear out the cache */
2103 	cpu_idcache_wbinv_all();
2104 
2105 	/* Set the control register */
2106 	curcpu()->ci_ctrl = cpuctrl;
2107 	cpu_control(0xffffffff, cpuctrl);
2108 }
2109 #endif	/* CPU_ARM6 */
2110 
2111 #ifdef CPU_ARM7
2112 struct cpu_option arm7_options[] = {
2113 	{ "arm7.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
2114 	{ "arm7.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
2115 	{ "arm7.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2116 	{ "arm7.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2117 #ifdef COMPAT_12
2118 	{ "fpaclk2",		BIC, OR,  CPU_CONTROL_CPCLK },
2119 #endif	/* COMPAT_12 */
2120 	{ "arm700.fpaclk",	BIC, OR,  CPU_CONTROL_CPCLK },
2121 	{ NULL,			IGN, IGN, 0 }
2122 };
2123 
2124 void
2125 arm7_setup(char *args)
2126 {
2127 	int cpuctrl, cpuctrlmask;
2128 
2129 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2130 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2131 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
2132 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2133 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2134 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
2135 		 | CPU_CONTROL_CPCLK | CPU_CONTROL_LABT_ENABLE
2136 		 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BEND_ENABLE
2137 		 | CPU_CONTROL_AFLT_ENABLE;
2138 
2139 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2140 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2141 #endif
2142 
2143 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
2144 	cpuctrl = parse_cpu_options(args, arm7_options, cpuctrl);
2145 
2146 #ifdef __ARMEB__
2147 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2148 #endif
2149 
2150 	/* Clear out the cache */
2151 	cpu_idcache_wbinv_all();
2152 
2153 	/* Set the control register */
2154 	curcpu()->ci_ctrl = cpuctrl;
2155 	cpu_control(0xffffffff, cpuctrl);
2156 }
2157 #endif	/* CPU_ARM7 */
2158 
2159 #ifdef CPU_ARM7TDMI
2160 struct cpu_option arm7tdmi_options[] = {
2161 	{ "arm7.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
2162 	{ "arm7.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
2163 	{ "arm7.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2164 	{ "arm7.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2165 #ifdef COMPAT_12
2166 	{ "fpaclk2",		BIC, OR,  CPU_CONTROL_CPCLK },
2167 #endif	/* COMPAT_12 */
2168 	{ "arm700.fpaclk",	BIC, OR,  CPU_CONTROL_CPCLK },
2169 	{ NULL,			IGN, IGN, 0 }
2170 };
2171 
2172 void
2173 arm7tdmi_setup(char *args)
2174 {
2175 	int cpuctrl;
2176 
2177 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2178 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2179 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
2180 
2181 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
2182 	cpuctrl = parse_cpu_options(args, arm7tdmi_options, cpuctrl);
2183 
2184 #ifdef __ARMEB__
2185 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2186 #endif
2187 
2188 	/* Clear out the cache */
2189 	cpu_idcache_wbinv_all();
2190 
2191 	/* Set the control register */
2192 	curcpu()->ci_ctrl = cpuctrl;
2193 	cpu_control(0xffffffff, cpuctrl);
2194 }
2195 #endif	/* CPU_ARM7TDMI */
2196 
2197 #ifdef CPU_ARM8
2198 struct cpu_option arm8_options[] = {
2199 	{ "arm8.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
2200 	{ "arm8.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
2201 	{ "arm8.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2202 	{ "arm8.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2203 #ifdef COMPAT_12
2204 	{ "branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2205 #endif	/* COMPAT_12 */
2206 	{ "cpu.branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2207 	{ "arm8.branchpredict",	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2208 	{ NULL,			IGN, IGN, 0 }
2209 };
2210 
2211 void
2212 arm8_setup(char *args)
2213 {
2214 	int integer;
2215 	int cpuctrl, cpuctrlmask;
2216 	int clocktest;
2217 	int setclock = 0;
2218 
2219 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2220 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2221 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
2222 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2223 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2224 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
2225 		 | CPU_CONTROL_BPRD_ENABLE | CPU_CONTROL_ROM_ENABLE
2226 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE;
2227 
2228 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2229 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2230 #endif
2231 
2232 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
2233 	cpuctrl = parse_cpu_options(args, arm8_options, cpuctrl);
2234 
2235 #ifdef __ARMEB__
2236 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2237 #endif
2238 
2239 	/* Get clock configuration */
2240 	clocktest = arm8_clock_config(0, 0) & 0x0f;
2241 
2242 	/* Special ARM8 clock and test configuration */
2243 	if (get_bootconf_option(args, "arm8.clock.reset", BOOTOPT_TYPE_BOOLEAN, &integer)) {
2244 		clocktest = 0;
2245 		setclock = 1;
2246 	}
2247 	if (get_bootconf_option(args, "arm8.clock.dynamic", BOOTOPT_TYPE_BOOLEAN, &integer)) {
2248 		if (integer)
2249 			clocktest |= 0x01;
2250 		else
2251 			clocktest &= ~(0x01);
2252 		setclock = 1;
2253 	}
2254 	if (get_bootconf_option(args, "arm8.clock.sync", BOOTOPT_TYPE_BOOLEAN, &integer)) {
2255 		if (integer)
2256 			clocktest |= 0x02;
2257 		else
2258 			clocktest &= ~(0x02);
2259 		setclock = 1;
2260 	}
2261 	if (get_bootconf_option(args, "arm8.clock.fast", BOOTOPT_TYPE_BININT, &integer)) {
2262 		clocktest = (clocktest & ~0xc0) | (integer & 3) << 2;
2263 		setclock = 1;
2264 	}
2265 	if (get_bootconf_option(args, "arm8.test", BOOTOPT_TYPE_BININT, &integer)) {
2266 		clocktest |= (integer & 7) << 5;
2267 		setclock = 1;
2268 	}
2269 
2270 	/* Clear out the cache */
2271 	cpu_idcache_wbinv_all();
2272 
2273 	/* Set the control register */
2274 	curcpu()->ci_ctrl = cpuctrl;
2275 	cpu_control(0xffffffff, cpuctrl);
2276 
2277 	/* Set the clock/test register */
2278 	if (setclock)
2279 		arm8_clock_config(0x7f, clocktest);
2280 }
2281 #endif	/* CPU_ARM8 */
2282 
2283 #ifdef CPU_ARM9
2284 struct cpu_option arm9_options[] = {
2285 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2286 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2287 	{ "arm9.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2288 	{ "arm9.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2289 	{ "arm9.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2290 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2291 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2292 	{ "arm9.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2293 	{ NULL,			IGN, IGN, 0 }
2294 };
2295 
2296 void
2297 arm9_setup(char *args)
2298 {
2299 	int cpuctrl, cpuctrlmask;
2300 
2301 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2302 	    | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2303 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2304 	    | CPU_CONTROL_WBUF_ENABLE;
2305 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2306 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2307 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2308 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2309 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2310 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC
2311 		 | CPU_CONTROL_ROUNDROBIN;
2312 
2313 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2314 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2315 #endif
2316 
2317 	cpuctrl = parse_cpu_options(args, arm9_options, cpuctrl);
2318 
2319 #ifdef __ARMEB__
2320 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2321 #endif
2322 
2323 	if (vector_page == ARM_VECTORS_HIGH)
2324 		cpuctrl |= CPU_CONTROL_VECRELOC;
2325 
2326 	/* Clear out the cache */
2327 	cpu_idcache_wbinv_all();
2328 
2329 	/* Set the control register */
2330 	curcpu()->ci_ctrl = cpuctrl;
2331 	cpu_control(cpuctrlmask, cpuctrl);
2332 
2333 }
2334 #endif	/* CPU_ARM9 */
2335 
2336 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
2337 struct cpu_option arm10_options[] = {
2338 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2339 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2340 	{ "arm10.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2341 	{ "arm10.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2342 	{ "arm10.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2343 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2344 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2345 	{ "arm10.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2346 	{ NULL,			IGN, IGN, 0 }
2347 };
2348 
2349 void
2350 arm10_setup(char *args)
2351 {
2352 	int cpuctrl, cpuctrlmask;
2353 
2354 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
2355 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2356 	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
2357 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
2358 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2359 	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2360 	    | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2361 	    | CPU_CONTROL_BPRD_ENABLE
2362 	    | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
2363 
2364 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2365 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2366 #endif
2367 
2368 	cpuctrl = parse_cpu_options(args, arm10_options, cpuctrl);
2369 
2370 #ifdef __ARMEB__
2371 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2372 #endif
2373 
2374 	if (vector_page == ARM_VECTORS_HIGH)
2375 		cpuctrl |= CPU_CONTROL_VECRELOC;
2376 
2377 	/* Clear out the cache */
2378 	cpu_idcache_wbinv_all();
2379 
2380 	/* Now really make sure they are clean.  */
2381 	__asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
2382 
2383 	/* Set the control register */
2384 	curcpu()->ci_ctrl = cpuctrl;
2385 	cpu_control(0xffffffff, cpuctrl);
2386 
2387 	/* And again. */
2388 	cpu_idcache_wbinv_all();
2389 }
2390 #endif	/* CPU_ARM9E || CPU_ARM10 */
2391 
2392 #if defined(CPU_ARM11)
2393 struct cpu_option arm11_options[] = {
2394 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2395 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2396 	{ "arm11.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2397 	{ "arm11.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2398 	{ "arm11.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2399 	{ NULL,			IGN, IGN, 0 }
2400 };
2401 
2402 void
2403 arm11_setup(char *args)
2404 {
2405 	int cpuctrl, cpuctrlmask;
2406 
2407 #if defined(PROCESS_ID_IS_CURCPU)
2408 	/* set curcpu() */
2409         __asm("mcr\tp15, 0, %0, c13, c0, 4" : : "r"(&cpu_info_store));
2410 #elif defined(PROCESS_ID_IS_CURLWP)
2411 	/* set curlwp() */
2412         __asm("mcr\tp15, 0, %0, c13, c0, 4" : : "r"(&lwp0));
2413 #endif
2414 
2415 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
2416 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2417 	    /* | CPU_CONTROL_BPRD_ENABLE */;
2418 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
2419 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2420 	    | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BPRD_ENABLE
2421 	    | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2422 	    | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
2423 
2424 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2425 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2426 #endif
2427 
2428 	cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl);
2429 
2430 #ifdef __ARMEB__
2431 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2432 #endif
2433 
2434 	if (vector_page == ARM_VECTORS_HIGH)
2435 		cpuctrl |= CPU_CONTROL_VECRELOC;
2436 
2437 	/* Clear out the cache */
2438 	cpu_idcache_wbinv_all();
2439 
2440 	/* Now really make sure they are clean.  */
2441 	__asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
2442 
2443 	/* Allow detection code to find the VFP if it's fitted.  */
2444 	__asm volatile ("mcr\tp15, 0, %0, c1, c0, 2" : : "r" (0x0fffffff));
2445 
2446 	/* Set the control register */
2447 	curcpu()->ci_ctrl = cpuctrl;
2448 	cpu_control(0xffffffff, cpuctrl);
2449 
2450 	/* And again. */
2451 	cpu_idcache_wbinv_all();
2452 }
2453 #endif	/* CPU_ARM11 */
2454 
2455 #if defined(CPU_ARM1136)
2456 void
2457 arm1136_setup(char *args)
2458 {
2459 	int cpuctrl, cpuctrl_wax;
2460 	uint32_t auxctrl, auxctrl_wax;
2461 	uint32_t tmp, tmp2;
2462 	uint32_t sbz=0;
2463 	uint32_t cpuid;
2464 
2465 #if defined(PROCESS_ID_IS_CURCPU)
2466 	/* set curcpu() */
2467         __asm("mcr\tp15, 0, %0, c13, c0, 4" : : "r"(&cpu_info_store));
2468 #elif defined(PROCESS_ID_IS_CURLWP)
2469 	/* set curlwp() */
2470         __asm("mcr\tp15, 0, %0, c13, c0, 4" : : "r"(&lwp0));
2471 #endif
2472 
2473 	cpuid = cpu_id();
2474 
2475 	cpuctrl =
2476 		CPU_CONTROL_MMU_ENABLE  |
2477 		CPU_CONTROL_DC_ENABLE   |
2478 		CPU_CONTROL_WBUF_ENABLE |
2479 		CPU_CONTROL_32BP_ENABLE |
2480 		CPU_CONTROL_32BD_ENABLE |
2481 		CPU_CONTROL_LABT_ENABLE |
2482 		CPU_CONTROL_SYST_ENABLE |
2483 		CPU_CONTROL_IC_ENABLE;
2484 
2485 	/*
2486 	 * "write as existing" bits
2487 	 * inverse of this is mask
2488 	 */
2489 	cpuctrl_wax =
2490 		(3 << 30) |
2491 		(1 << 29) |
2492 		(1 << 28) |
2493 		(3 << 26) |
2494 		(3 << 19) |
2495 		(1 << 17);
2496 
2497 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2498 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2499 #endif
2500 
2501 	cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl);
2502 
2503 #ifdef __ARMEB__
2504 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2505 #endif
2506 
2507 	if (vector_page == ARM_VECTORS_HIGH)
2508 		cpuctrl |= CPU_CONTROL_VECRELOC;
2509 
2510 	auxctrl = 0;
2511 	auxctrl_wax = ~0;
2512 	/* This options enables the workaround for the 364296 ARM1136
2513 	 * r0pX errata (possible cache data corruption with
2514 	 * hit-under-miss enabled). It sets the undocumented bit 31 in
2515 	 * the auxiliary control register and the FI bit in the control
2516 	 * register, thus disabling hit-under-miss without putting the
2517 	 * processor into full low interrupt latency mode. ARM11MPCore
2518 	 * is not affected.
2519 	 */
2520 	if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1136JS) { /* ARM1136JSr0pX */
2521 		cpuctrl |= CPU_CONTROL_FI_ENABLE;
2522 		auxctrl = ARM11R0_AUXCTL_PFI;
2523 		auxctrl_wax = ~ARM11R0_AUXCTL_PFI;
2524 	}
2525 
2526 	/* Clear out the cache */
2527 	cpu_idcache_wbinv_all();
2528 
2529 	/* Now really make sure they are clean.  */
2530 	__asm volatile ("mcr\tp15, 0, %0, c7, c7, 0" : : "r"(sbz));
2531 
2532 	/* Allow detection code to find the VFP if it's fitted.  */
2533 	__asm volatile ("mcr\tp15, 0, %0, c1, c0, 2" : : "r" (0x0fffffff));
2534 
2535 	/* Set the control register */
2536 	curcpu()->ci_ctrl = cpuctrl;
2537 	cpu_control(~cpuctrl_wax, cpuctrl);
2538 
2539 	__asm volatile ("mrc	p15, 0, %0, c1, c0, 1\n\t"
2540 			"bic	%1, %0, %2\n\t"
2541 			"eor	%1, %0, %3\n\t"
2542 			"teq	%0, %1\n\t"
2543 			"mcrne	p15, 0, %1, c1, c0, 1\n\t"
2544 			: "=r"(tmp), "=r"(tmp2) :
2545 			  "r"(~auxctrl_wax), "r"(auxctrl));
2546 
2547 	/* And again. */
2548 	cpu_idcache_wbinv_all();
2549 }
2550 #endif	/* CPU_ARM1136 */
2551 
2552 #ifdef CPU_SA110
2553 struct cpu_option sa110_options[] = {
2554 #ifdef COMPAT_12
2555 	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2556 	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
2557 #endif	/* COMPAT_12 */
2558 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2559 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2560 	{ "sa110.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2561 	{ "sa110.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2562 	{ "sa110.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2563 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2564 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2565 	{ "sa110.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2566 	{ NULL,			IGN, IGN, 0 }
2567 };
2568 
2569 void
2570 sa110_setup(char *args)
2571 {
2572 	int cpuctrl, cpuctrlmask;
2573 
2574 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2575 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2576 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2577 		 | CPU_CONTROL_WBUF_ENABLE;
2578 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2579 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2580 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2581 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2582 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2583 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
2584 		 | CPU_CONTROL_CPCLK;
2585 
2586 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2587 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2588 #endif
2589 
2590 	cpuctrl = parse_cpu_options(args, sa110_options, cpuctrl);
2591 
2592 #ifdef __ARMEB__
2593 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2594 #endif
2595 
2596 	if (vector_page == ARM_VECTORS_HIGH)
2597 		cpuctrl |= CPU_CONTROL_VECRELOC;
2598 
2599 	/* Clear out the cache */
2600 	cpu_idcache_wbinv_all();
2601 
2602 	/* Set the control register */
2603 	curcpu()->ci_ctrl = cpuctrl;
2604 /*	cpu_control(cpuctrlmask, cpuctrl);*/
2605 	cpu_control(0xffffffff, cpuctrl);
2606 
2607 	/*
2608 	 * enable clockswitching, note that this doesn't read or write to r0,
2609 	 * r0 is just to make it valid asm
2610 	 */
2611 	__asm ("mcr 15, 0, r0, c15, c1, 2");
2612 }
2613 #endif	/* CPU_SA110 */
2614 
2615 #if defined(CPU_SA1100) || defined(CPU_SA1110)
2616 struct cpu_option sa11x0_options[] = {
2617 #ifdef COMPAT_12
2618 	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2619 	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
2620 #endif	/* COMPAT_12 */
2621 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2622 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2623 	{ "sa11x0.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2624 	{ "sa11x0.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2625 	{ "sa11x0.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2626 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2627 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2628 	{ "sa11x0.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2629 	{ NULL,			IGN, IGN, 0 }
2630 };
2631 
2632 void
2633 sa11x0_setup(char *args)
2634 {
2635 	int cpuctrl, cpuctrlmask;
2636 
2637 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2638 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2639 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2640 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE;
2641 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2642 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2643 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2644 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2645 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2646 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
2647 		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
2648 
2649 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2650 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2651 #endif
2652 
2653 	cpuctrl = parse_cpu_options(args, sa11x0_options, cpuctrl);
2654 
2655 #ifdef __ARMEB__
2656 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2657 #endif
2658 
2659 	if (vector_page == ARM_VECTORS_HIGH)
2660 		cpuctrl |= CPU_CONTROL_VECRELOC;
2661 
2662 	/* Clear out the cache */
2663 	cpu_idcache_wbinv_all();
2664 
2665 	/* Set the control register */
2666 	curcpu()->ci_ctrl = cpuctrl;
2667 	cpu_control(0xffffffff, cpuctrl);
2668 }
2669 #endif	/* CPU_SA1100 || CPU_SA1110 */
2670 
2671 #if defined(CPU_FA526)
2672 struct cpu_option fa526_options[] = {
2673 #ifdef COMPAT_12
2674 	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2675 	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
2676 #endif	/* COMPAT_12 */
2677 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2678 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2679 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2680 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2681 	{ NULL,			IGN, IGN, 0 }
2682 };
2683 
2684 void
2685 fa526_setup(char *args)
2686 {
2687 	int cpuctrl, cpuctrlmask;
2688 
2689 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2690 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2691 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2692 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE;
2693 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2694 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2695 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2696 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2697 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2698 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
2699 		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
2700 
2701 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2702 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2703 #endif
2704 
2705 	cpuctrl = parse_cpu_options(args, fa526_options, cpuctrl);
2706 
2707 #ifdef __ARMEB__
2708 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2709 #endif
2710 
2711 	if (vector_page == ARM_VECTORS_HIGH)
2712 		cpuctrl |= CPU_CONTROL_VECRELOC;
2713 
2714 	/* Clear out the cache */
2715 	cpu_idcache_wbinv_all();
2716 
2717 	/* Set the control register */
2718 	curcpu()->ci_ctrl = cpuctrl;
2719 	cpu_control(0xffffffff, cpuctrl);
2720 }
2721 #endif	/* CPU_FA526 */
2722 
2723 #if defined(CPU_IXP12X0)
2724 struct cpu_option ixp12x0_options[] = {
2725 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2726 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2727 	{ "ixp12x0.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2728 	{ "ixp12x0.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2729 	{ "ixp12x0.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2730 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2731 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2732 	{ "ixp12x0.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2733 	{ NULL,			IGN, IGN, 0 }
2734 };
2735 
2736 void
2737 ixp12x0_setup(char *args)
2738 {
2739 	int cpuctrl, cpuctrlmask;
2740 
2741 
2742 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE
2743 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_SYST_ENABLE
2744 		 | CPU_CONTROL_IC_ENABLE;
2745 
2746 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_AFLT_ENABLE
2747 		 | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_WBUF_ENABLE
2748 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_SYST_ENABLE
2749 		 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_IC_ENABLE
2750 		 | CPU_CONTROL_VECRELOC;
2751 
2752 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2753 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2754 #endif
2755 
2756 	cpuctrl = parse_cpu_options(args, ixp12x0_options, cpuctrl);
2757 
2758 #ifdef __ARMEB__
2759 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2760 #endif
2761 
2762 	if (vector_page == ARM_VECTORS_HIGH)
2763 		cpuctrl |= CPU_CONTROL_VECRELOC;
2764 
2765 	/* Clear out the cache */
2766 	cpu_idcache_wbinv_all();
2767 
2768 	/* Set the control register */
2769 	curcpu()->ci_ctrl = cpuctrl;
2770 	/* cpu_control(0xffffffff, cpuctrl); */
2771 	cpu_control(cpuctrlmask, cpuctrl);
2772 }
2773 #endif /* CPU_IXP12X0 */
2774 
2775 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
2776     defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425)
2777 struct cpu_option xscale_options[] = {
2778 #ifdef COMPAT_12
2779 	{ "branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2780 	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2781 #endif	/* COMPAT_12 */
2782 	{ "cpu.branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2783 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2784 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2785 	{ "xscale.branchpredict", BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2786 	{ "xscale.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2787 	{ "xscale.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2788 	{ "xscale.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2789 	{ NULL,			IGN, IGN, 0 }
2790 };
2791 
2792 void
2793 xscale_setup(char *args)
2794 {
2795 	uint32_t auxctl;
2796 	int cpuctrl, cpuctrlmask;
2797 
2798 	/*
2799 	 * The XScale Write Buffer is always enabled.  Our option
2800 	 * is to enable/disable coalescing.  Note that bits 6:3
2801 	 * must always be enabled.
2802 	 */
2803 
2804 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2805 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2806 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2807 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
2808 		 | CPU_CONTROL_BPRD_ENABLE;
2809 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2810 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2811 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2812 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2813 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2814 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
2815 		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
2816 
2817 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2818 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2819 #endif
2820 
2821 	cpuctrl = parse_cpu_options(args, xscale_options, cpuctrl);
2822 
2823 #ifdef __ARMEB__
2824 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2825 #endif
2826 
2827 	if (vector_page == ARM_VECTORS_HIGH)
2828 		cpuctrl |= CPU_CONTROL_VECRELOC;
2829 
2830 	/* Clear out the cache */
2831 	cpu_idcache_wbinv_all();
2832 
2833 	/*
2834 	 * Set the control register.  Note that bits 6:3 must always
2835 	 * be set to 1.
2836 	 */
2837 	curcpu()->ci_ctrl = cpuctrl;
2838 /*	cpu_control(cpuctrlmask, cpuctrl);*/
2839 	cpu_control(0xffffffff, cpuctrl);
2840 
2841 	/* Make sure write coalescing is turned on */
2842 	__asm volatile("mrc p15, 0, %0, c1, c0, 1"
2843 		: "=r" (auxctl));
2844 #ifdef XSCALE_NO_COALESCE_WRITES
2845 	auxctl |= XSCALE_AUXCTL_K;
2846 #else
2847 	auxctl &= ~XSCALE_AUXCTL_K;
2848 #endif
2849 	__asm volatile("mcr p15, 0, %0, c1, c0, 1"
2850 		: : "r" (auxctl));
2851 }
2852 #endif	/* CPU_XSCALE_80200 || CPU_XSCALE_80321 || __CPU_XSCALE_PXA2XX || CPU_XSCALE_IXP425 */
2853