xref: /netbsd-src/sys/arch/arm/arm/cpufunc.c (revision 404fbe5fb94ca1e054339640cabb2801ce52dd30)
1 /*	$NetBSD: cpufunc.c,v 1.90 2008/12/12 18:13:55 matt Exp $	*/
2 
3 /*
4  * arm7tdmi support code Copyright (c) 2001 John Fremlin
5  * arm8 support code Copyright (c) 1997 ARM Limited
6  * arm8 support code Copyright (c) 1997 Causality Limited
7  * arm9 support code Copyright (C) 2001 ARM Ltd
8  * arm11 support code Copyright (c) 2007 Microsoft
9  * cortexa8 support code Copyright (c) 2008 3am Software Foundry
10  * Copyright (c) 1997 Mark Brinicombe.
11  * Copyright (c) 1997 Causality Limited
12  * All rights reserved.
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions
16  * are met:
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in the
21  *    documentation and/or other materials provided with the distribution.
22  * 3. All advertising materials mentioning features or use of this software
23  *    must display the following acknowledgement:
24  *	This product includes software developed by Causality Limited.
25  * 4. The name of Causality Limited may not be used to endorse or promote
26  *    products derived from this software without specific prior written
27  *    permission.
28  *
29  * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
30  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
31  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
32  * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
33  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
34  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
35  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39  * SUCH DAMAGE.
40  *
41  * RiscBSD kernel project
42  *
43  * cpufuncs.c
44  *
45  * C functions for supporting CPU / MMU / TLB specific operations.
46  *
47  * Created      : 30/01/97
48  */
49 
50 #include <sys/cdefs.h>
51 __KERNEL_RCSID(0, "$NetBSD: cpufunc.c,v 1.90 2008/12/12 18:13:55 matt Exp $");
52 
53 #include "opt_compat_netbsd.h"
54 #include "opt_cpuoptions.h"
55 #include "opt_perfctrs.h"
56 
57 #include <sys/types.h>
58 #include <sys/param.h>
59 #include <sys/pmc.h>
60 #include <sys/systm.h>
61 #include <machine/cpu.h>
62 #include <machine/bootconfig.h>
63 #include <arch/arm/arm/disassem.h>
64 
65 #include <uvm/uvm.h>
66 
67 #include <arm/cpuconf.h>
68 #include <arm/cpufunc.h>
69 
70 #ifdef CPU_XSCALE_80200
71 #include <arm/xscale/i80200reg.h>
72 #include <arm/xscale/i80200var.h>
73 #endif
74 
75 #ifdef CPU_XSCALE_80321
76 #include <arm/xscale/i80321reg.h>
77 #include <arm/xscale/i80321var.h>
78 #endif
79 
80 #ifdef CPU_XSCALE_IXP425
81 #include <arm/xscale/ixp425reg.h>
82 #include <arm/xscale/ixp425var.h>
83 #endif
84 
85 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321)
86 #include <arm/xscale/xscalereg.h>
87 #endif
88 
89 #if defined(PERFCTRS)
90 struct arm_pmc_funcs *arm_pmc;
91 #endif
92 
93 /* PRIMARY CACHE VARIABLES */
94 int	arm_picache_size;
95 int	arm_picache_line_size;
96 int	arm_picache_ways;
97 
98 int	arm_pdcache_size;	/* and unified */
99 int	arm_pdcache_line_size;
100 int	arm_pdcache_ways;
101 #if (ARM_MMU_V6) != 0
102 int	arm_cache_prefer_mask;
103 #endif
104 
105 
106 int	arm_pcache_type;
107 int	arm_pcache_unified;
108 
109 int	arm_dcache_align;
110 int	arm_dcache_align_mask;
111 
112 /* 1 == use cpu_sleep(), 0 == don't */
113 int cpu_do_powersave;
114 
115 #ifdef CPU_ARM2
116 struct cpu_functions arm2_cpufuncs = {
117 	/* CPU functions */
118 
119 	.cf_id			= arm2_id,
120 	.cf_cpwait		= cpufunc_nullop,
121 
122 	/* MMU functions */
123 
124 	.cf_control		= (void *)cpufunc_nullop,
125 
126 	/* TLB functions */
127 
128 	.cf_tlb_flushID		= cpufunc_nullop,
129 	.cf_tlb_flushID_SE	= (void *)cpufunc_nullop,
130 	.cf_tlb_flushI		= cpufunc_nullop,
131 	.cf_tlb_flushI_SE	= (void *)cpufunc_nullop,
132 	.cf_tlb_flushD		= cpufunc_nullop,
133 	.cf_tlb_flushD_SE	= (void *)cpufunc_nullop,
134 
135 	/* Cache operations */
136 
137 	.cf_icache_sync_all	= cpufunc_nullop,
138 	.cf_icache_sync_range	= (void *) cpufunc_nullop,
139 
140 	.cf_dcache_wbinv_all	= arm3_cache_flush,
141 	.cf_dcache_wbinv_range	= (void *)cpufunc_nullop,
142 	.cf_dcache_inv_range	= (void *)cpufunc_nullop,
143 	.cf_dcache_wb_range	= (void *)cpufunc_nullop,
144 
145 	.cf_idcache_wbinv_all	= cpufunc_nullop,
146 	.cf_idcache_wbinv_range	= (void *)cpufunc_nullop,
147 
148 	/* Other functions */
149 
150 	.cf_flush_prefetchbuf	= cpufunc_nullop,
151 	.cf_drain_writebuf	= cpufunc_nullop,
152 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
153 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
154 
155 	.cf_sleep		= (void *)cpufunc_nullop,
156 
157 	/* Soft functions */
158 
159 	.cf_dataabt_fixup	= early_abort_fixup,
160 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
161 
162 	.cf_setup		= (void *)cpufunc_nullop
163 
164 };
165 #endif	/* CPU_ARM2 */
166 
167 #ifdef CPU_ARM250
168 struct cpu_functions arm250_cpufuncs = {
169 	/* CPU functions */
170 
171 	.cf_id			= arm250_id,
172 	.cf_cpwait		= cpufunc_nullop,
173 
174 	/* MMU functions */
175 
176 	.cf_control		= (void *)cpufunc_nullop,
177 
178 	/* TLB functions */
179 
180 	.cf_tlb_flushID		= cpufunc_nullop,
181 	.cf_tlb_flushID_SE	= (void *)cpufunc_nullop,
182 	.cf_tlb_flushI		= cpufunc_nullop,
183 	.cf_tlb_flushI_SE	= (void *)cpufunc_nullop,
184 	.cf_tlb_flushD		= cpufunc_nullop,
185 	.cf_tlb_flushD_SE	= (void *)cpufunc_nullop,
186 
187 	/* Cache operations */
188 
189 	.cf_icache_sync_all	= cpufunc_nullop,
190 	.cf_icache_sync_range	= (void *) cpufunc_nullop,
191 
192 	.cf_dcache_wbinv_all	= arm3_cache_flush,
193 	.cf_dcache_wbinv_range	= (void *)cpufunc_nullop,
194 	.cf_dcache_inv_range	= (void *)cpufunc_nullop,
195 	.cf_dcache_wb_range	= (void *)cpufunc_nullop,
196 
197 	.cf_idcache_wbinv_all	= cpufunc_nullop,
198 	.cf_idcache_wbinv_range	= (void *)cpufunc_nullop,
199 
200 	/* Other functions */
201 
202 	.cf_flush_prefetchbuf	= cpufunc_nullop,
203 	.cf_drain_writebuf	= cpufunc_nullop,
204 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
205 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
206 
207 	.cf_sleep		= (void *)cpufunc_nullop,
208 
209 	/* Soft functions */
210 
211 	.cf_dataabt_fixup	= early_abort_fixup,
212 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
213 
214 	.cf_setup		= (void *)cpufunc_nullop
215 
216 };
217 #endif	/* CPU_ARM250 */
218 
219 #ifdef CPU_ARM3
220 struct cpu_functions arm3_cpufuncs = {
221 	/* CPU functions */
222 
223 	.cf_id			= cpufunc_id,
224 	.cf_cpwait		= cpufunc_nullop,
225 
226 	/* MMU functions */
227 
228 	.cf_control		= arm3_control,
229 
230 	/* TLB functions */
231 
232 	.cf_tlb_flushID		= cpufunc_nullop,
233 	.cf_tlb_flushID_SE	= (void *)cpufunc_nullop,
234 	.cf_tlb_flushI		= cpufunc_nullop,
235 	.cf_tlb_flushI_SE	= (void *)cpufunc_nullop,
236 	.cf_tlb_flushD		= cpufunc_nullop,
237 	.cf_tlb_flushD_SE	= (void *)cpufunc_nullop,
238 
239 	/* Cache operations */
240 
241 	.cf_icache_sync_all	= cpufunc_nullop,
242 	.cf_icache_sync_range	= (void *) cpufunc_nullop,
243 
244 	.cf_dcache_wbinv_all	= arm3_cache_flush,
245 	.cf_dcache_wbinv_range	= (void *)arm3_cache_flush,
246 	.cf_dcache_inv_range	= (void *)arm3_cache_flush,
247 	.cf_dcache_wb_range	= (void *)cpufunc_nullop,
248 
249 	.cf_idcache_wbinv_all	= arm3_cache_flush,
250 	.cf_idcache_wbinv_range	= (void *)arm3_cache_flush,
251 
252 	/* Other functions */
253 
254 	.cf_flush_prefetchbuf	= cpufunc_nullop,
255 	.cf_drain_writebuf	= cpufunc_nullop,
256 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
257 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
258 
259 	.cf_sleep		= (void *)cpufunc_nullop,
260 
261 	/* Soft functions */
262 
263 	.cf_dataabt_fixup	= early_abort_fixup,
264 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
265 
266 	.cf_setup		= (void *)cpufunc_nullop
267 
268 };
269 #endif	/* CPU_ARM3 */
270 
271 #ifdef CPU_ARM6
272 struct cpu_functions arm6_cpufuncs = {
273 	/* CPU functions */
274 
275 	.cf_id			= cpufunc_id,
276 	.cf_cpwait		= cpufunc_nullop,
277 
278 	/* MMU functions */
279 
280 	.cf_control		= cpufunc_control,
281 	.cf_domains		= cpufunc_domains,
282 	.cf_setttb		= arm67_setttb,
283 	.cf_faultstatus		= cpufunc_faultstatus,
284 	.cf_faultaddress	= cpufunc_faultaddress,
285 
286 	/* TLB functions */
287 
288 	.cf_tlb_flushID		= arm67_tlb_flush,
289 	.cf_tlb_flushID_SE	= arm67_tlb_purge,
290 	.cf_tlb_flushI		= arm67_tlb_flush,
291 	.cf_tlb_flushI_SE	= arm67_tlb_purge,
292 	.cf_tlb_flushD		= arm67_tlb_flush,
293 	.cf_tlb_flushD_SE	= arm67_tlb_purge,
294 
295 	/* Cache operations */
296 
297 	.cf_icache_sync_all	= cpufunc_nullop,
298 	.cf_icache_sync_range	= (void *) cpufunc_nullop,
299 
300 	.cf_dcache_wbinv_all	= arm67_cache_flush,
301 	.cf_dcache_wbinv_range	= (void *)arm67_cache_flush,
302 	.cf_dcache_inv_range	= (void *)arm67_cache_flush,
303 	.cf_dcache_wb_range	= (void *)cpufunc_nullop,
304 
305 	.cf_idcache_wbinv_all	= arm67_cache_flush,
306 	.cf_idcache_wbinv_range	= (void *)arm67_cache_flush,
307 
308 	/* Other functions */
309 
310 	.cf_flush_prefetchbuf	= cpufunc_nullop,
311 	.cf_drain_writebuf	= cpufunc_nullop,
312 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
313 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
314 
315 	.cf_sleep		= (void *)cpufunc_nullop,
316 
317 	/* Soft functions */
318 
319 #ifdef ARM6_LATE_ABORT
320 	.cf_dataabt_fixup	= late_abort_fixup,
321 #else
322 	.cf_dataabt_fixup	= early_abort_fixup,
323 #endif
324 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
325 
326 	.cf_context_switch	= arm67_context_switch,
327 
328 	.cf_setup		= arm6_setup
329 
330 };
331 #endif	/* CPU_ARM6 */
332 
333 #ifdef CPU_ARM7
334 struct cpu_functions arm7_cpufuncs = {
335 	/* CPU functions */
336 
337 	.cf_id			= cpufunc_id,
338 	.cf_cpwait		= cpufunc_nullop,
339 
340 	/* MMU functions */
341 
342 	.cf_control		= cpufunc_control,
343 	.cf_domains		= cpufunc_domains,
344 	.cf_setttb		= arm67_setttb,
345 	.cf_faultstatus		= cpufunc_faultstatus,
346 	.cf_faultaddress	= cpufunc_faultaddress,
347 
348 	/* TLB functions */
349 
350 	.cf_tlb_flushID		= arm67_tlb_flush,
351 	.cf_tlb_flushID_SE	= arm67_tlb_purge,
352 	.cf_tlb_flushI		= arm67_tlb_flush,
353 	.cf_tlb_flushI_SE	= arm67_tlb_purge,
354 	.cf_tlb_flushD		= arm67_tlb_flush,
355 	.cf_tlb_flushD_SE	= arm67_tlb_purge,
356 
357 	/* Cache operations */
358 
359 	.cf_icache_sync_all	= cpufunc_nullop,
360 	.cf_icache_sync_range	= (void *)cpufunc_nullop,
361 
362 	.cf_dcache_wbinv_all	= arm67_cache_flush,
363 	.cf_dcache_wbinv_range	= (void *)arm67_cache_flush,
364 	.cf_dcache_inv_range	= (void *)arm67_cache_flush,
365 	.cf_dcache_wb_range	= (void *)cpufunc_nullop,
366 
367 	.cf_idcache_wbinv_all	= arm67_cache_flush,
368 	.cf_idcache_wbinv_range	= (void *)arm67_cache_flush,
369 
370 	/* Other functions */
371 
372 	.cf_flush_prefetchbuf	= cpufunc_nullop,
373 	.cf_drain_writebuf	= cpufunc_nullop,
374 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
375 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
376 
377 	.cf_sleep		= (void *)cpufunc_nullop,
378 
379 	/* Soft functions */
380 
381 	.cf_dataabt_fixup	= late_abort_fixup,
382 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
383 
384 	.cf_context_switch	= arm67_context_switch,
385 
386 	.cf_setup		= arm7_setup
387 
388 };
389 #endif	/* CPU_ARM7 */
390 
391 #ifdef CPU_ARM7TDMI
392 struct cpu_functions arm7tdmi_cpufuncs = {
393 	/* CPU functions */
394 
395 	.cf_id			= cpufunc_id,
396 	.cf_cpwait		= cpufunc_nullop,
397 
398 	/* MMU functions */
399 
400 	.cf_control		= cpufunc_control,
401 	.cf_domains		= cpufunc_domains,
402 	.cf_setttb		= arm7tdmi_setttb,
403 	.cf_faultstatus		= cpufunc_faultstatus,
404 	.cf_faultaddress	= cpufunc_faultaddress,
405 
406 	/* TLB functions */
407 
408 	.cf_tlb_flushID		= arm7tdmi_tlb_flushID,
409 	.cf_tlb_flushID_SE	= arm7tdmi_tlb_flushID_SE,
410 	.cf_tlb_flushI		= arm7tdmi_tlb_flushID,
411 	.cf_tlb_flushI_SE	= arm7tdmi_tlb_flushID_SE,
412 	.cf_tlb_flushD		= arm7tdmi_tlb_flushID,
413 	.cf_tlb_flushD_SE	= arm7tdmi_tlb_flushID_SE,
414 
415 	/* Cache operations */
416 
417 	.cf_icache_sync_all	= cpufunc_nullop,
418 	.cf_icache_sync_range	= (void *)cpufunc_nullop,
419 
420 	.cf_dcache_wbinv_all	= arm7tdmi_cache_flushID,
421 	.cf_dcache_wbinv_range	= (void *)arm7tdmi_cache_flushID,
422 	.cf_dcache_inv_range	= (void *)arm7tdmi_cache_flushID,
423 	.cf_dcache_wb_range	= (void *)cpufunc_nullop,
424 
425 	.cf_idcache_wbinv_all	= arm7tdmi_cache_flushID,
426 	.cf_idcache_wbinv_range	= (void *)arm7tdmi_cache_flushID,
427 
428 	/* Other functions */
429 
430 	.cf_flush_prefetchbuf	= cpufunc_nullop,
431 	.cf_drain_writebuf	= cpufunc_nullop,
432 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
433 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
434 
435 	.cf_sleep		= (void *)cpufunc_nullop,
436 
437 	/* Soft functions */
438 
439 	.cf_dataabt_fixup	= late_abort_fixup,
440 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
441 
442 	.cf_context_switch	= arm7tdmi_context_switch,
443 
444 	.cf_setup		= arm7tdmi_setup
445 
446 };
447 #endif	/* CPU_ARM7TDMI */
448 
449 #ifdef CPU_ARM8
450 struct cpu_functions arm8_cpufuncs = {
451 	/* CPU functions */
452 
453 	.cf_id			= cpufunc_id,
454 	.cf_cpwait		= cpufunc_nullop,
455 
456 	/* MMU functions */
457 
458 	.cf_control		= cpufunc_control,
459 	.cf_domains		= cpufunc_domains,
460 	.cf_setttb		= arm8_setttb,
461 	.cf_faultstatus		= cpufunc_faultstatus,
462 	.cf_faultaddress	= cpufunc_faultaddress,
463 
464 	/* TLB functions */
465 
466 	.cf_tlb_flushID		= arm8_tlb_flushID,
467 	.cf_tlb_flushID_SE	= arm8_tlb_flushID_SE,
468 	.cf_tlb_flushI		= arm8_tlb_flushID,
469 	.cf_tlb_flushI_SE	= arm8_tlb_flushID_SE,
470 	.cf_tlb_flushD		= arm8_tlb_flushID,
471 	.cf_tlb_flushD_SE	= arm8_tlb_flushID_SE,
472 
473 	/* Cache operations */
474 
475 	.cf_icache_sync_all	= cpufunc_nullop,
476 	.cf_icache_sync_range	= (void *)cpufunc_nullop,
477 
478 	.cf_dcache_wbinv_all	= arm8_cache_purgeID,
479 	.cf_dcache_wbinv_range	= (void *)arm8_cache_purgeID,
480 /*XXX*/	.cf_dcache_inv_range	= (void *)arm8_cache_purgeID,
481 	.cf_dcache_wb_range	= (void *)arm8_cache_cleanID,
482 
483 	.cf_idcache_wbinv_all	= arm8_cache_purgeID,
484 	.cf_idcache_wbinv_range = (void *)arm8_cache_purgeID,
485 
486 	/* Other functions */
487 
488 	.cf_flush_prefetchbuf	= cpufunc_nullop,
489 	.cf_drain_writebuf	= cpufunc_nullop,
490 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
491 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
492 
493 	.cf_sleep		= (void *)cpufunc_nullop,
494 
495 	/* Soft functions */
496 
497 	.cf_dataabt_fixup	= cpufunc_null_fixup,
498 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
499 
500 	.cf_context_switch	= arm8_context_switch,
501 
502 	.cf_setup		= arm8_setup
503 };
504 #endif	/* CPU_ARM8 */
505 
506 #ifdef CPU_ARM9
507 struct cpu_functions arm9_cpufuncs = {
508 	/* CPU functions */
509 
510 	.cf_id			= cpufunc_id,
511 	.cf_cpwait		= cpufunc_nullop,
512 
513 	/* MMU functions */
514 
515 	.cf_control		= cpufunc_control,
516 	.cf_domains		= cpufunc_domains,
517 	.cf_setttb		= arm9_setttb,
518 	.cf_faultstatus		= cpufunc_faultstatus,
519 	.cf_faultaddress	= cpufunc_faultaddress,
520 
521 	/* TLB functions */
522 
523 	.cf_tlb_flushID		= armv4_tlb_flushID,
524 	.cf_tlb_flushID_SE	= arm9_tlb_flushID_SE,
525 	.cf_tlb_flushI		= armv4_tlb_flushI,
526 	.cf_tlb_flushI_SE	= (void *)armv4_tlb_flushI,
527 	.cf_tlb_flushD		= armv4_tlb_flushD,
528 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
529 
530 	/* Cache operations */
531 
532 	.cf_icache_sync_all	= arm9_icache_sync_all,
533 	.cf_icache_sync_range	= arm9_icache_sync_range,
534 
535 	.cf_dcache_wbinv_all	= arm9_dcache_wbinv_all,
536 	.cf_dcache_wbinv_range	= arm9_dcache_wbinv_range,
537 /*XXX*/	.cf_dcache_inv_range	= arm9_dcache_wbinv_range,
538 	.cf_dcache_wb_range	= arm9_dcache_wb_range,
539 
540 	.cf_idcache_wbinv_all	= arm9_idcache_wbinv_all,
541 	.cf_idcache_wbinv_range = arm9_idcache_wbinv_range,
542 
543 	/* Other functions */
544 
545 	.cf_flush_prefetchbuf	= cpufunc_nullop,
546 	.cf_drain_writebuf	= armv4_drain_writebuf,
547 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
548 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
549 
550 	.cf_sleep		= (void *)cpufunc_nullop,
551 
552 	/* Soft functions */
553 
554 	.cf_dataabt_fixup	= cpufunc_null_fixup,
555 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
556 
557 	.cf_context_switch	= arm9_context_switch,
558 
559 	.cf_setup		= arm9_setup
560 
561 };
562 #endif /* CPU_ARM9 */
563 
564 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
565 struct cpu_functions armv5_ec_cpufuncs = {
566 	/* CPU functions */
567 
568 	.cf_id			= cpufunc_id,
569 	.cf_cpwait		= cpufunc_nullop,
570 
571 	/* MMU functions */
572 
573 	.cf_control		= cpufunc_control,
574 	.cf_domains		= cpufunc_domains,
575 	.cf_setttb		= armv5_ec_setttb,
576 	.cf_faultstatus		= cpufunc_faultstatus,
577 	.cf_faultaddress	= cpufunc_faultaddress,
578 
579 	/* TLB functions */
580 
581 	.cf_tlb_flushID		= armv4_tlb_flushID,
582 	.cf_tlb_flushID_SE	= arm10_tlb_flushID_SE,
583 	.cf_tlb_flushI		= armv4_tlb_flushI,
584 	.cf_tlb_flushI_SE	= arm10_tlb_flushI_SE,
585 	.cf_tlb_flushD		= armv4_tlb_flushD,
586 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
587 
588 	/* Cache operations */
589 
590 	.cf_icache_sync_all	= armv5_ec_icache_sync_all,
591 	.cf_icache_sync_range	= armv5_ec_icache_sync_range,
592 
593 	.cf_dcache_wbinv_all	= armv5_ec_dcache_wbinv_all,
594 	.cf_dcache_wbinv_range	= armv5_ec_dcache_wbinv_range,
595 /*XXX*/	.cf_dcache_inv_range	= armv5_ec_dcache_wbinv_range,
596 	.cf_dcache_wb_range	= armv5_ec_dcache_wb_range,
597 
598 	.cf_idcache_wbinv_all	= armv5_ec_idcache_wbinv_all,
599 	.cf_idcache_wbinv_range = armv5_ec_idcache_wbinv_range,
600 
601 	/* Other functions */
602 
603 	.cf_flush_prefetchbuf	= cpufunc_nullop,
604 	.cf_drain_writebuf	= armv4_drain_writebuf,
605 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
606 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
607 
608 	.cf_sleep		= (void *)cpufunc_nullop,
609 
610 	/* Soft functions */
611 
612 	.cf_dataabt_fixup	= cpufunc_null_fixup,
613 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
614 
615 	.cf_context_switch	= arm10_context_switch,
616 
617 	.cf_setup		= arm10_setup
618 
619 };
620 #endif /* CPU_ARM9E || CPU_ARM10 */
621 
622 #ifdef CPU_ARM10
623 struct cpu_functions arm10_cpufuncs = {
624 	/* CPU functions */
625 
626 	.cf_id			= cpufunc_id,
627 	.cf_cpwait		= cpufunc_nullop,
628 
629 	/* MMU functions */
630 
631 	.cf_control		= cpufunc_control,
632 	.cf_domains		= cpufunc_domains,
633 	.cf_setttb		= armv5_setttb,
634 	.cf_faultstatus		= cpufunc_faultstatus,
635 	.cf_faultaddress	= cpufunc_faultaddress,
636 
637 	/* TLB functions */
638 
639 	.cf_tlb_flushID		= armv4_tlb_flushID,
640 	.cf_tlb_flushID_SE	= arm10_tlb_flushID_SE,
641 	.cf_tlb_flushI		= armv4_tlb_flushI,
642 	.cf_tlb_flushI_SE	= arm10_tlb_flushI_SE,
643 	.cf_tlb_flushD		= armv4_tlb_flushD,
644 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
645 
646 	/* Cache operations */
647 
648 	.cf_icache_sync_all	= armv5_icache_sync_all,
649 	.cf_icache_sync_range	= armv5_icache_sync_range,
650 
651 	.cf_dcache_wbinv_all	= armv5_dcache_wbinv_all,
652 	.cf_dcache_wbinv_range	= armv5_dcache_wbinv_range,
653 /*XXX*/	.cf_dcache_inv_range	= armv5_dcache_wbinv_range,
654 	.cf_dcache_wb_range	= armv5_dcache_wb_range,
655 
656 	.cf_idcache_wbinv_all	= armv5_idcache_wbinv_all,
657 	.cf_idcache_wbinv_range = armv5_idcache_wbinv_range,
658 
659 	/* Other functions */
660 
661 	.cf_flush_prefetchbuf	= cpufunc_nullop,
662 	.cf_drain_writebuf	= armv4_drain_writebuf,
663 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
664 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
665 
666 	.cf_sleep		= (void *)cpufunc_nullop,
667 
668 	/* Soft functions */
669 
670 	.cf_dataabt_fixup	= cpufunc_null_fixup,
671 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
672 
673 	.cf_context_switch	= arm10_context_switch,
674 
675 	.cf_setup		= arm10_setup
676 
677 };
678 #endif /* CPU_ARM10 */
679 
680 #ifdef CPU_ARM11
681 struct cpu_functions arm11_cpufuncs = {
682 	/* CPU functions */
683 
684 	.cf_id			= cpufunc_id,
685 	.cf_cpwait		= cpufunc_nullop,
686 
687 	/* MMU functions */
688 
689 	.cf_control		= cpufunc_control,
690 	.cf_domains		= cpufunc_domains,
691 	.cf_setttb		= arm11_setttb,
692 	.cf_faultstatus		= cpufunc_faultstatus,
693 	.cf_faultaddress	= cpufunc_faultaddress,
694 
695 	/* TLB functions */
696 
697 	.cf_tlb_flushID		= arm11_tlb_flushID,
698 	.cf_tlb_flushID_SE	= arm11_tlb_flushID_SE,
699 	.cf_tlb_flushI		= arm11_tlb_flushI,
700 	.cf_tlb_flushI_SE	= arm11_tlb_flushI_SE,
701 	.cf_tlb_flushD		= arm11_tlb_flushD,
702 	.cf_tlb_flushD_SE	= arm11_tlb_flushD_SE,
703 
704 	/* Cache operations */
705 
706 	.cf_icache_sync_all	= armv6_icache_sync_all,
707 	.cf_icache_sync_range	= armv6_icache_sync_range,
708 
709 	.cf_dcache_wbinv_all	= armv6_dcache_wbinv_all,
710 	.cf_dcache_wbinv_range	= armv6_dcache_wbinv_range,
711 	.cf_dcache_inv_range	= armv6_dcache_inv_range,
712 	.cf_dcache_wb_range	= armv6_dcache_wb_range,
713 
714 	.cf_idcache_wbinv_all	= armv6_idcache_wbinv_all,
715 	.cf_idcache_wbinv_range = armv6_idcache_wbinv_range,
716 
717 	/* Other functions */
718 
719 	.cf_flush_prefetchbuf	= cpufunc_nullop,
720 	.cf_drain_writebuf	= arm11_drain_writebuf,
721 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
722 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
723 
724 	.cf_sleep		= arm11_sleep,
725 
726 	/* Soft functions */
727 
728 	.cf_dataabt_fixup	= cpufunc_null_fixup,
729 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
730 
731 	.cf_context_switch	= arm11_context_switch,
732 
733 	.cf_setup		= arm11_setup
734 
735 };
736 #endif /* CPU_ARM11 */
737 
738 #ifdef CPU_ARM1136
739 struct cpu_functions arm1136_cpufuncs = {
740 	/* CPU functions */
741 
742 	.cf_id			= cpufunc_id,
743 	.cf_cpwait		= cpufunc_nullop,
744 
745 	/* MMU functions */
746 
747 	.cf_control		= cpufunc_control,
748 	.cf_domains		= cpufunc_domains,
749 	.cf_setttb		= arm1136_setttb,
750 	.cf_faultstatus		= cpufunc_faultstatus,
751 	.cf_faultaddress	= cpufunc_faultaddress,
752 
753 	/* TLB functions */
754 
755 	.cf_tlb_flushID		= arm11_tlb_flushID,
756 	.cf_tlb_flushID_SE	= arm11_tlb_flushID_SE,
757 	.cf_tlb_flushI		= arm11_tlb_flushI,
758 	.cf_tlb_flushI_SE	= arm11_tlb_flushI_SE,
759 	.cf_tlb_flushD		= arm11_tlb_flushD,
760 	.cf_tlb_flushD_SE	= arm11_tlb_flushD_SE,
761 
762 	/* Cache operations */
763 
764 	.cf_icache_sync_all	= arm1136_icache_sync_all,	/* 411920 */
765 	.cf_icache_sync_range	= arm1136_icache_sync_range,	/* 371025 */
766 
767 	.cf_dcache_wbinv_all	= arm1136_dcache_wbinv_all,	/* 411920 */
768 	.cf_dcache_wbinv_range	= armv6_dcache_wbinv_range,
769 	.cf_dcache_inv_range	= armv6_dcache_inv_range,
770 	.cf_dcache_wb_range	= armv6_dcache_wb_range,
771 
772 	.cf_idcache_wbinv_all	= arm1136_idcache_wbinv_all,	/* 411920 */
773 	.cf_idcache_wbinv_range = arm1136_idcache_wbinv_range,	/* 371025 */
774 
775 	/* Other functions */
776 
777 	.cf_flush_prefetchbuf	= arm1136_flush_prefetchbuf,
778 	.cf_drain_writebuf	= arm11_drain_writebuf,
779 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
780 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
781 
782 	.cf_sleep		= arm11_sleep,
783 
784 	/* Soft functions */
785 
786 	.cf_dataabt_fixup	= cpufunc_null_fixup,
787 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
788 
789 	.cf_context_switch	= arm11_context_switch,
790 
791 	.cf_setup		= arm1136_setup
792 
793 };
794 #endif /* CPU_ARM1136 */
795 
796 #ifdef CPU_SA110
797 struct cpu_functions sa110_cpufuncs = {
798 	/* CPU functions */
799 
800 	.cf_id			= cpufunc_id,
801 	.cf_cpwait		= cpufunc_nullop,
802 
803 	/* MMU functions */
804 
805 	.cf_control		= cpufunc_control,
806 	.cf_domains		= cpufunc_domains,
807 	.cf_setttb		= sa1_setttb,
808 	.cf_faultstatus		= cpufunc_faultstatus,
809 	.cf_faultaddress	= cpufunc_faultaddress,
810 
811 	/* TLB functions */
812 
813 	.cf_tlb_flushID		= armv4_tlb_flushID,
814 	.cf_tlb_flushID_SE	= sa1_tlb_flushID_SE,
815 	.cf_tlb_flushI		= armv4_tlb_flushI,
816 	.cf_tlb_flushI_SE	= (void *)armv4_tlb_flushI,
817 	.cf_tlb_flushD		= armv4_tlb_flushD,
818 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
819 
820 	/* Cache operations */
821 
822 	.cf_icache_sync_all	= sa1_cache_syncI,
823 	.cf_icache_sync_range	= sa1_cache_syncI_rng,
824 
825 	.cf_dcache_wbinv_all	= sa1_cache_purgeD,
826 	.cf_dcache_wbinv_range	= sa1_cache_purgeD_rng,
827 /*XXX*/	.cf_dcache_inv_range	= sa1_cache_purgeD_rng,
828 	.cf_dcache_wb_range	= sa1_cache_cleanD_rng,
829 
830 	.cf_idcache_wbinv_all	= sa1_cache_purgeID,
831 	.cf_idcache_wbinv_range	= sa1_cache_purgeID_rng,
832 
833 	/* Other functions */
834 
835 	.cf_flush_prefetchbuf	= cpufunc_nullop,
836 	.cf_drain_writebuf	= armv4_drain_writebuf,
837 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
838 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
839 
840 	.cf_sleep		= (void *)cpufunc_nullop,
841 
842 	/* Soft functions */
843 
844 	.cf_dataabt_fixup	= cpufunc_null_fixup,
845 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
846 
847 	.cf_context_switch	= sa110_context_switch,
848 
849 	.cf_setup		= sa110_setup
850 };
851 #endif	/* CPU_SA110 */
852 
853 #if defined(CPU_SA1100) || defined(CPU_SA1110)
854 struct cpu_functions sa11x0_cpufuncs = {
855 	/* CPU functions */
856 
857 	.cf_id			= cpufunc_id,
858 	.cf_cpwait		= cpufunc_nullop,
859 
860 	/* MMU functions */
861 
862 	.cf_control		= cpufunc_control,
863 	.cf_domains		= cpufunc_domains,
864 	.cf_setttb		= sa1_setttb,
865 	.cf_faultstatus		= cpufunc_faultstatus,
866 	.cf_faultaddress	= cpufunc_faultaddress,
867 
868 	/* TLB functions */
869 
870 	.cf_tlb_flushID		= armv4_tlb_flushID,
871 	.cf_tlb_flushID_SE	= sa1_tlb_flushID_SE,
872 	.cf_tlb_flushI		= armv4_tlb_flushI,
873 	.cf_tlb_flushI_SE	= (void *)armv4_tlb_flushI,
874 	.cf_tlb_flushD		= armv4_tlb_flushD,
875 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
876 
877 	/* Cache operations */
878 
879 	.cf_icache_sync_all	= sa1_cache_syncI,
880 	.cf_icache_sync_range	= sa1_cache_syncI_rng,
881 
882 	.cf_dcache_wbinv_all	= sa1_cache_purgeD,
883 	.cf_dcache_wbinv_range	= sa1_cache_purgeD_rng,
884 /*XXX*/	.cf_dcache_inv_range	= sa1_cache_purgeD_rng,
885 	.cf_dcache_wb_range	= sa1_cache_cleanD_rng,
886 
887 	.cf_idcache_wbinv_all	= sa1_cache_purgeID,
888 	.cf_idcache_wbinv_range	= sa1_cache_purgeID_rng,
889 
890 	/* Other functions */
891 
892 	.cf_flush_prefetchbuf	= sa11x0_drain_readbuf,
893 	.cf_drain_writebuf	= armv4_drain_writebuf,
894 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
895 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
896 
897 	.cf_sleep		= sa11x0_cpu_sleep,
898 
899 	/* Soft functions */
900 
901 	.cf_dataabt_fixup	= cpufunc_null_fixup,
902 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
903 
904 	.cf_context_switch	= sa11x0_context_switch,
905 
906 	.cf_setup		= sa11x0_setup
907 };
908 #endif	/* CPU_SA1100 || CPU_SA1110 */
909 
910 #if defined(CPU_FA526)
911 struct cpu_functions fa526_cpufuncs = {
912 	/* CPU functions */
913 
914 	.cf_id			= cpufunc_id,
915 	.cf_cpwait		= cpufunc_nullop,
916 
917 	/* MMU functions */
918 
919 	.cf_control		= cpufunc_control,
920 	.cf_domains		= cpufunc_domains,
921 	.cf_setttb		= fa526_setttb,
922 	.cf_faultstatus		= cpufunc_faultstatus,
923 	.cf_faultaddress	= cpufunc_faultaddress,
924 
925 	/* TLB functions */
926 
927 	.cf_tlb_flushID		= armv4_tlb_flushID,
928 	.cf_tlb_flushID_SE	= fa526_tlb_flushID_SE,
929 	.cf_tlb_flushI		= armv4_tlb_flushI,
930 	.cf_tlb_flushI_SE	= fa526_tlb_flushI_SE,
931 	.cf_tlb_flushD		= armv4_tlb_flushD,
932 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
933 
934 	/* Cache operations */
935 
936 	.cf_icache_sync_all	= fa526_icache_sync_all,
937 	.cf_icache_sync_range	= fa526_icache_sync_range,
938 
939 	.cf_dcache_wbinv_all	= fa526_dcache_wbinv_all,
940 	.cf_dcache_wbinv_range	= fa526_dcache_wbinv_range,
941 	.cf_dcache_inv_range	= fa526_dcache_inv_range,
942 	.cf_dcache_wb_range	= fa526_dcache_wb_range,
943 
944 	.cf_idcache_wbinv_all	= fa526_idcache_wbinv_all,
945 	.cf_idcache_wbinv_range	= fa526_idcache_wbinv_range,
946 
947 	/* Other functions */
948 
949 	.cf_flush_prefetchbuf	= fa526_flush_prefetchbuf,
950 	.cf_drain_writebuf	= armv4_drain_writebuf,
951 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
952 	.cf_flush_brnchtgt_E	= fa526_flush_brnchtgt_E,
953 
954 	.cf_sleep		= fa526_cpu_sleep,
955 
956 	/* Soft functions */
957 
958 	.cf_dataabt_fixup	= cpufunc_null_fixup,
959 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
960 
961 	.cf_context_switch	= fa526_context_switch,
962 
963 	.cf_setup		= fa526_setup
964 };
965 #endif	/* CPU_FA526 */
966 
967 #ifdef CPU_IXP12X0
968 struct cpu_functions ixp12x0_cpufuncs = {
969 	/* CPU functions */
970 
971 	.cf_id			= cpufunc_id,
972 	.cf_cpwait		= cpufunc_nullop,
973 
974 	/* MMU functions */
975 
976 	.cf_control		= cpufunc_control,
977 	.cf_domains		= cpufunc_domains,
978 	.cf_setttb		= sa1_setttb,
979 	.cf_faultstatus		= cpufunc_faultstatus,
980 	.cf_faultaddress	= cpufunc_faultaddress,
981 
982 	/* TLB functions */
983 
984 	.cf_tlb_flushID		= armv4_tlb_flushID,
985 	.cf_tlb_flushID_SE	= sa1_tlb_flushID_SE,
986 	.cf_tlb_flushI		= armv4_tlb_flushI,
987 	.cf_tlb_flushI_SE	= (void *)armv4_tlb_flushI,
988 	.cf_tlb_flushD		= armv4_tlb_flushD,
989 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
990 
991 	/* Cache operations */
992 
993 	.cf_icache_sync_all	= sa1_cache_syncI,
994 	.cf_icache_sync_range	= sa1_cache_syncI_rng,
995 
996 	.cf_dcache_wbinv_all	= sa1_cache_purgeD,
997 	.cf_dcache_wbinv_range	= sa1_cache_purgeD_rng,
998 /*XXX*/	.cf_dcache_inv_range	= sa1_cache_purgeD_rng,
999 	.cf_dcache_wb_range	= sa1_cache_cleanD_rng,
1000 
1001 	.cf_idcache_wbinv_all	= sa1_cache_purgeID,
1002 	.cf_idcache_wbinv_range	= sa1_cache_purgeID_rng,
1003 
1004 	/* Other functions */
1005 
1006 	.cf_flush_prefetchbuf	= ixp12x0_drain_readbuf,
1007 	.cf_drain_writebuf	= armv4_drain_writebuf,
1008 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
1009 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
1010 
1011 	.cf_sleep		= (void *)cpufunc_nullop,
1012 
1013 	/* Soft functions */
1014 
1015 	.cf_dataabt_fixup	= cpufunc_null_fixup,
1016 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
1017 
1018 	.cf_context_switch	= ixp12x0_context_switch,
1019 
1020 	.cf_setup		= ixp12x0_setup
1021 };
1022 #endif	/* CPU_IXP12X0 */
1023 
1024 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
1025     defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425)
1026 struct cpu_functions xscale_cpufuncs = {
1027 	/* CPU functions */
1028 
1029 	.cf_id			= cpufunc_id,
1030 	.cf_cpwait		= xscale_cpwait,
1031 
1032 	/* MMU functions */
1033 
1034 	.cf_control		= xscale_control,
1035 	.cf_domains		= cpufunc_domains,
1036 	.cf_setttb		= xscale_setttb,
1037 	.cf_faultstatus		= cpufunc_faultstatus,
1038 	.cf_faultaddress	= cpufunc_faultaddress,
1039 
1040 	/* TLB functions */
1041 
1042 	.cf_tlb_flushID		= armv4_tlb_flushID,
1043 	.cf_tlb_flushID_SE	= xscale_tlb_flushID_SE,
1044 	.cf_tlb_flushI		= armv4_tlb_flushI,
1045 	.cf_tlb_flushI_SE	= (void *)armv4_tlb_flushI,
1046 	.cf_tlb_flushD		= armv4_tlb_flushD,
1047 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
1048 
1049 	/* Cache operations */
1050 
1051 	.cf_icache_sync_all	= xscale_cache_syncI,
1052 	.cf_icache_sync_range	= xscale_cache_syncI_rng,
1053 
1054 	.cf_dcache_wbinv_all	= xscale_cache_purgeD,
1055 	.cf_dcache_wbinv_range	= xscale_cache_purgeD_rng,
1056 	.cf_dcache_inv_range	= xscale_cache_flushD_rng,
1057 	.cf_dcache_wb_range	= xscale_cache_cleanD_rng,
1058 
1059 	.cf_idcache_wbinv_all	= xscale_cache_purgeID,
1060 	.cf_idcache_wbinv_range = xscale_cache_purgeID_rng,
1061 
1062 	/* Other functions */
1063 
1064 	.cf_flush_prefetchbuf	= cpufunc_nullop,
1065 	.cf_drain_writebuf	= armv4_drain_writebuf,
1066 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
1067 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
1068 
1069 	.cf_sleep		= xscale_cpu_sleep,
1070 
1071 	/* Soft functions */
1072 
1073 	.cf_dataabt_fixup	= cpufunc_null_fixup,
1074 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
1075 
1076 	.cf_context_switch	= xscale_context_switch,
1077 
1078 	.cf_setup		= xscale_setup
1079 };
1080 #endif
1081 /* CPU_XSCALE_80200 || CPU_XSCALE_80321 || __CPU_XSCALE_PXA2XX || CPU_XSCALE_IXP425 */
1082 
1083 /*
1084  * Global constants also used by locore.s
1085  */
1086 
1087 struct cpu_functions cpufuncs;
1088 u_int cputype;
1089 u_int cpu_reset_needs_v4_MMU_disable;	/* flag used in locore.s */
1090 
1091 #if defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined(CPU_ARM9) || \
1092     defined(CPU_ARM9E) || defined(CPU_ARM10) || defined(CPU_ARM11) || \
1093     defined(CPU_FA526) || \
1094     defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
1095     defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425)
1096 static void get_cachetype_cp15(void);
1097 
1098 /* Additional cache information local to this file.  Log2 of some of the
1099    above numbers.  */
1100 static int	arm_dcache_l2_nsets;
1101 static int	arm_dcache_l2_assoc;
1102 static int	arm_dcache_l2_linesize;
1103 
1104 static void
1105 get_cachetype_cp15()
1106 {
1107 	u_int ctype, isize, dsize;
1108 	u_int multiplier;
1109 
1110 	__asm volatile("mrc p15, 0, %0, c0, c0, 1"
1111 		: "=r" (ctype));
1112 
1113 	/*
1114 	 * ...and thus spake the ARM ARM:
1115 	 *
1116 	 * If an <opcode2> value corresponding to an unimplemented or
1117 	 * reserved ID register is encountered, the System Control
1118 	 * processor returns the value of the main ID register.
1119 	 */
1120 	if (ctype == cpu_id())
1121 		goto out;
1122 
1123 #if (ARM_MMU_V6) > 0
1124 	if (CPU_CT_FORMAT(ctype) == 4) {
1125 		u_int csid1, csid2;
1126 		isize = 1U << (CPU_CT4_ILINE(ctype) + 2);
1127 		dsize = 1U << (CPU_CT4_DLINE(ctype) + 2);
1128 
1129 		__asm volatile("mcr p15, 1, %0, c0, c0, 2"
1130 		    :: "r" (CPU_CSSR_L1));	/* select L1 cache values */
1131 		__asm volatile("mrc p15, 1, %0, c0, c0, 0" : "=r" (csid1));
1132 		arm_pdcache_ways = CPU_CSID_ASSOC(csid1) + 1;
1133 		arm_pdcache_line_size = dsize << CPU_CSID_LEN(csid1);
1134 		arm_pdcache_size = arm_pdcache_line_size * arm_pdcache_ways;
1135 		arm_pdcache_size *= CPU_CSID_NUMSETS(csid1);
1136 		arm_cache_prefer_mask = PAGE_SIZE;
1137 
1138 		arm_dcache_align = arm_pdcache_line_size;
1139 
1140 		__asm volatile("mcr p15, 1, %0, c0, c0, 2"
1141 		    :: "r" (CPU_CSSR_L2));	/* select L2 cache values */
1142 		__asm volatile("mrc p15, 1, %0, c0, c0, 0" : "=r" (csid2));
1143 		arm_dcache_l2_assoc = CPU_CSID_ASSOC(csid2) + 1;
1144 		arm_dcache_l2_linesize = dsize << CPU_CSID_LEN(csid2);
1145 		arm_dcache_l2_nsets = CPU_CSID_NUMSETS(csid2) + 1;
1146 		arm_pcache_type = CPU_CT_CTYPE_WB14;
1147 		goto out;
1148 	}
1149 #endif /* ARM_MMU_V6 > 0 */
1150 
1151 	if ((ctype & CPU_CT_S) == 0)
1152 		arm_pcache_unified = 1;
1153 
1154 	/*
1155 	 * If you want to know how this code works, go read the ARM ARM.
1156 	 */
1157 
1158 	arm_pcache_type = CPU_CT_CTYPE(ctype);
1159 
1160 	if (arm_pcache_unified == 0) {
1161 		isize = CPU_CT_ISIZE(ctype);
1162 		multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
1163 		arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
1164 		if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
1165 			if (isize & CPU_CT_xSIZE_M)
1166 				arm_picache_line_size = 0; /* not present */
1167 			else
1168 				arm_picache_ways = 1;
1169 		} else {
1170 			arm_picache_ways = multiplier <<
1171 			    (CPU_CT_xSIZE_ASSOC(isize) - 1);
1172 #if (ARM_MMU_V6) > 0
1173 			if (CPU_CT_xSIZE_P & isize)
1174 				arm_cache_prefer_mask |=
1175 				    __BIT(9 + CPU_CT_xSIZE_SIZE(isize)
1176 					  - CPU_CT_xSIZE_ASSOC(isize))
1177 				    - PAGE_SIZE;
1178 #endif
1179 		}
1180 		arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
1181 	}
1182 
1183 	dsize = CPU_CT_DSIZE(ctype);
1184 	multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
1185 	arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
1186 	if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
1187 		if (dsize & CPU_CT_xSIZE_M)
1188 			arm_pdcache_line_size = 0; /* not present */
1189 		else
1190 			arm_pdcache_ways = 1;
1191 	} else {
1192 		arm_pdcache_ways = multiplier <<
1193 		    (CPU_CT_xSIZE_ASSOC(dsize) - 1);
1194 #if (ARM_MMU_V6) > 0
1195 		if (CPU_CT_xSIZE_P & dsize)
1196 			arm_cache_prefer_mask |=
1197 			    __BIT(9 + CPU_CT_xSIZE_SIZE(dsize)
1198 				  - CPU_CT_xSIZE_ASSOC(dsize)) - PAGE_SIZE;
1199 #endif
1200 	}
1201 	arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
1202 
1203 	arm_dcache_align = arm_pdcache_line_size;
1204 
1205 	arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
1206 	arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
1207 	arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
1208 	    CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
1209 
1210  out:
1211 	arm_dcache_align_mask = arm_dcache_align - 1;
1212 }
1213 #endif /* ARM7TDMI || ARM8 || ARM9 || XSCALE */
1214 
1215 #if defined(CPU_ARM2) || defined(CPU_ARM250) || defined(CPU_ARM3) || \
1216     defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_SA110) || \
1217     defined(CPU_SA1100) || defined(CPU_SA1110) || defined(CPU_IXP12X0)
1218 /* Cache information for CPUs without cache type registers. */
1219 struct cachetab {
1220 	u_int32_t ct_cpuid;
1221 	int	ct_pcache_type;
1222 	int	ct_pcache_unified;
1223 	int	ct_pdcache_size;
1224 	int	ct_pdcache_line_size;
1225 	int	ct_pdcache_ways;
1226 	int	ct_picache_size;
1227 	int	ct_picache_line_size;
1228 	int	ct_picache_ways;
1229 };
1230 
1231 struct cachetab cachetab[] = {
1232     /* cpuid,           cache type,       u,  dsiz, ls, wy,  isiz, ls, wy */
1233     { CPU_ID_ARM2,      0,                1,     0,  0,  0,     0,  0,  0 },
1234     { CPU_ID_ARM250,    0,                1,     0,  0,  0,     0,  0,  0 },
1235     { CPU_ID_ARM3,      CPU_CT_CTYPE_WT,  1,  4096, 16, 64,     0,  0,  0 },
1236     { CPU_ID_ARM610,	CPU_CT_CTYPE_WT,  1,  4096, 16, 64,     0,  0,  0 },
1237     { CPU_ID_ARM710,    CPU_CT_CTYPE_WT,  1,  8192, 32,  4,     0,  0,  0 },
1238     { CPU_ID_ARM7500,   CPU_CT_CTYPE_WT,  1,  4096, 16,  4,     0,  0,  0 },
1239     { CPU_ID_ARM710A,   CPU_CT_CTYPE_WT,  1,  8192, 16,  4,     0,  0,  0 },
1240     { CPU_ID_ARM7500FE, CPU_CT_CTYPE_WT,  1,  4096, 16,  4,     0,  0,  0 },
1241     /* XXX is this type right for SA-1? */
1242     { CPU_ID_SA110,	CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 },
1243     { CPU_ID_SA1100,	CPU_CT_CTYPE_WB1, 0,  8192, 32, 32, 16384, 32, 32 },
1244     { CPU_ID_SA1110,	CPU_CT_CTYPE_WB1, 0,  8192, 32, 32, 16384, 32, 32 },
1245     { CPU_ID_IXP1200,	CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 }, /* XXX */
1246     { 0, 0, 0, 0, 0, 0, 0, 0}
1247 };
1248 
1249 static void get_cachetype_table __P((void));
1250 
1251 static void
1252 get_cachetype_table()
1253 {
1254 	int i;
1255 	u_int32_t cpuid = cpu_id();
1256 
1257 	for (i = 0; cachetab[i].ct_cpuid != 0; i++) {
1258 		if (cachetab[i].ct_cpuid == (cpuid & CPU_ID_CPU_MASK)) {
1259 			arm_pcache_type = cachetab[i].ct_pcache_type;
1260 			arm_pcache_unified = cachetab[i].ct_pcache_unified;
1261 			arm_pdcache_size = cachetab[i].ct_pdcache_size;
1262 			arm_pdcache_line_size =
1263 			    cachetab[i].ct_pdcache_line_size;
1264 			arm_pdcache_ways = cachetab[i].ct_pdcache_ways;
1265 			arm_picache_size = cachetab[i].ct_picache_size;
1266 			arm_picache_line_size =
1267 			    cachetab[i].ct_picache_line_size;
1268 			arm_picache_ways = cachetab[i].ct_picache_ways;
1269 		}
1270 	}
1271 	arm_dcache_align = arm_pdcache_line_size;
1272 
1273 	arm_dcache_align_mask = arm_dcache_align - 1;
1274 }
1275 
1276 #endif /* ARM2 || ARM250 || ARM3 || ARM6 || ARM7 || SA110 || SA1100 || SA1111 || IXP12X0 */
1277 
1278 /*
1279  * Cannot panic here as we may not have a console yet ...
1280  */
1281 
1282 int
1283 set_cpufuncs()
1284 {
1285 	if (cputype == 0) {
1286 		cputype = cpufunc_id();
1287 		cputype &= CPU_ID_CPU_MASK;
1288 	}
1289 
1290 	/*
1291 	 * NOTE: cpu_do_powersave defaults to off.  If we encounter a
1292 	 * CPU type where we want to use it by default, then we set it.
1293 	 */
1294 #ifdef CPU_ARM2
1295 	if (cputype == CPU_ID_ARM2) {
1296 		cpufuncs = arm2_cpufuncs;
1297 		cpu_reset_needs_v4_MMU_disable = 0;
1298 		get_cachetype_table();
1299 		return 0;
1300 	}
1301 #endif /* CPU_ARM2 */
1302 #ifdef CPU_ARM250
1303 	if (cputype == CPU_ID_ARM250) {
1304 		cpufuncs = arm250_cpufuncs;
1305 		cpu_reset_needs_v4_MMU_disable = 0;
1306 		get_cachetype_table();
1307 		return 0;
1308 	}
1309 #endif
1310 #ifdef CPU_ARM3
1311 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
1312 	    (cputype & 0x00000f00) == 0x00000300) {
1313 		cpufuncs = arm3_cpufuncs;
1314 		cpu_reset_needs_v4_MMU_disable = 0;
1315 		get_cachetype_table();
1316 		return 0;
1317 	}
1318 #endif	/* CPU_ARM3 */
1319 #ifdef CPU_ARM6
1320 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
1321 	    (cputype & 0x00000f00) == 0x00000600) {
1322 		cpufuncs = arm6_cpufuncs;
1323 		cpu_reset_needs_v4_MMU_disable = 0;
1324 		get_cachetype_table();
1325 		pmap_pte_init_generic();
1326 		return 0;
1327 	}
1328 #endif	/* CPU_ARM6 */
1329 #ifdef CPU_ARM7
1330 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
1331 	    CPU_ID_IS7(cputype) &&
1332 	    (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V3) {
1333 		cpufuncs = arm7_cpufuncs;
1334 		cpu_reset_needs_v4_MMU_disable = 0;
1335 		get_cachetype_table();
1336 		pmap_pte_init_generic();
1337 		return 0;
1338 	}
1339 #endif	/* CPU_ARM7 */
1340 #ifdef CPU_ARM7TDMI
1341 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
1342 	    CPU_ID_IS7(cputype) &&
1343 	    (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V4T) {
1344 		cpufuncs = arm7tdmi_cpufuncs;
1345 		cpu_reset_needs_v4_MMU_disable = 0;
1346 		get_cachetype_cp15();
1347 		pmap_pte_init_generic();
1348 		return 0;
1349 	}
1350 #endif
1351 #ifdef CPU_ARM8
1352 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
1353 	    (cputype & 0x0000f000) == 0x00008000) {
1354 		cpufuncs = arm8_cpufuncs;
1355 		cpu_reset_needs_v4_MMU_disable = 0;	/* XXX correct? */
1356 		get_cachetype_cp15();
1357 		pmap_pte_init_arm8();
1358 		return 0;
1359 	}
1360 #endif	/* CPU_ARM8 */
1361 #ifdef CPU_ARM9
1362 	if (((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD ||
1363 	     (cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_TI) &&
1364 	    (cputype & 0x0000f000) == 0x00009000) {
1365 		cpufuncs = arm9_cpufuncs;
1366 		cpu_reset_needs_v4_MMU_disable = 1;	/* V4 or higher */
1367 		get_cachetype_cp15();
1368 		arm9_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
1369 		arm9_dcache_sets_max =
1370 		    (1U << (arm_dcache_l2_linesize + arm_dcache_l2_nsets)) -
1371 		    arm9_dcache_sets_inc;
1372 		arm9_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
1373 		arm9_dcache_index_max = 0U - arm9_dcache_index_inc;
1374 #ifdef	ARM9_CACHE_WRITE_THROUGH
1375 		pmap_pte_init_arm9();
1376 #else
1377 		pmap_pte_init_generic();
1378 #endif
1379 		return 0;
1380 	}
1381 #endif /* CPU_ARM9 */
1382 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
1383 	if (cputype == CPU_ID_ARM926EJS ||
1384 	    cputype == CPU_ID_ARM1026EJS) {
1385 		cpufuncs = armv5_ec_cpufuncs;
1386 		cpu_reset_needs_v4_MMU_disable = 1;	/* V4 or higher */
1387 		get_cachetype_cp15();
1388 		pmap_pte_init_generic();
1389 		return 0;
1390 	}
1391 #endif /* CPU_ARM9E || CPU_ARM10 */
1392 #ifdef CPU_ARM10
1393 	if (/* cputype == CPU_ID_ARM1020T || */
1394 	    cputype == CPU_ID_ARM1020E) {
1395 		/*
1396 		 * Select write-through cacheing (this isn't really an
1397 		 * option on ARM1020T).
1398 		 */
1399 		cpufuncs = arm10_cpufuncs;
1400 		cpu_reset_needs_v4_MMU_disable = 1;	/* V4 or higher */
1401 		get_cachetype_cp15();
1402 		armv5_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
1403 		armv5_dcache_sets_max =
1404 		    (1U << (arm_dcache_l2_linesize + arm_dcache_l2_nsets)) -
1405 		    armv5_dcache_sets_inc;
1406 		armv5_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
1407 		armv5_dcache_index_max = 0U - armv5_dcache_index_inc;
1408 		pmap_pte_init_generic();
1409 		return 0;
1410 	}
1411 #endif /* CPU_ARM10 */
1412 #if defined(CPU_ARM11)
1413 	if (cputype == CPU_ID_ARM1136JS ||
1414 	    cputype == CPU_ID_ARM1136JSR1 ||
1415 	    cputype == CPU_ID_ARM1176JS ||
1416 	    cputype == CPU_ID_CORTEXA8R1 ||
1417 	    cputype == CPU_ID_CORTEXA8R2) {
1418 		cpufuncs = arm11_cpufuncs;
1419 #if defined(CPU_ARM1136)
1420 		if (cputype != CPU_ID_ARM1176JS) {
1421 			cpufuncs = arm1136_cpufuncs;
1422 			if (cputype == CPU_ID_ARM1136JS)
1423 				cpufuncs.cf_sleep = arm1136_sleep_rev0;
1424 		}
1425 #endif
1426 		cpu_reset_needs_v4_MMU_disable = 1;	/* V4 or higher */
1427 		cpu_do_powersave = 1;			/* Enable powersave */
1428 		get_cachetype_cp15();
1429 		pmap_pte_init_generic();
1430 		if (arm_cache_prefer_mask)
1431 			uvmexp.ncolors = (arm_cache_prefer_mask >> PGSHIFT) + 1;
1432 
1433 		return 0;
1434 	}
1435 #endif /* CPU_ARM11 */
1436 #ifdef CPU_SA110
1437 	if (cputype == CPU_ID_SA110) {
1438 		cpufuncs = sa110_cpufuncs;
1439 		cpu_reset_needs_v4_MMU_disable = 1;	/* SA needs it */
1440 		get_cachetype_table();
1441 		pmap_pte_init_sa1();
1442 		return 0;
1443 	}
1444 #endif	/* CPU_SA110 */
1445 #ifdef CPU_SA1100
1446 	if (cputype == CPU_ID_SA1100) {
1447 		cpufuncs = sa11x0_cpufuncs;
1448 		cpu_reset_needs_v4_MMU_disable = 1;	/* SA needs it	*/
1449 		get_cachetype_table();
1450 		pmap_pte_init_sa1();
1451 
1452 		/* Use powersave on this CPU. */
1453 		cpu_do_powersave = 1;
1454 
1455 		return 0;
1456 	}
1457 #endif	/* CPU_SA1100 */
1458 #ifdef CPU_SA1110
1459 	if (cputype == CPU_ID_SA1110) {
1460 		cpufuncs = sa11x0_cpufuncs;
1461 		cpu_reset_needs_v4_MMU_disable = 1;	/* SA needs it	*/
1462 		get_cachetype_table();
1463 		pmap_pte_init_sa1();
1464 
1465 		/* Use powersave on this CPU. */
1466 		cpu_do_powersave = 1;
1467 
1468 		return 0;
1469 	}
1470 #endif	/* CPU_SA1110 */
1471 #ifdef CPU_FA526
1472 	if (cputype == CPU_ID_FA526) {
1473 		cpufuncs = fa526_cpufuncs;
1474 		cpu_reset_needs_v4_MMU_disable = 1;	/* SA needs it	*/
1475 		get_cachetype_cp15();
1476 		pmap_pte_init_generic();
1477 
1478 		/* Use powersave on this CPU. */
1479 		cpu_do_powersave = 1;
1480 
1481 		return 0;
1482 	}
1483 #endif	/* CPU_FA526 */
1484 #ifdef CPU_IXP12X0
1485         if (cputype == CPU_ID_IXP1200) {
1486                 cpufuncs = ixp12x0_cpufuncs;
1487                 cpu_reset_needs_v4_MMU_disable = 1;
1488                 get_cachetype_table();
1489                 pmap_pte_init_sa1();
1490                 return 0;
1491         }
1492 #endif  /* CPU_IXP12X0 */
1493 #ifdef CPU_XSCALE_80200
1494 	if (cputype == CPU_ID_80200) {
1495 		int rev = cpufunc_id() & CPU_ID_REVISION_MASK;
1496 
1497 		i80200_icu_init();
1498 
1499 		/*
1500 		 * Reset the Performance Monitoring Unit to a
1501 		 * pristine state:
1502 		 *	- CCNT, PMN0, PMN1 reset to 0
1503 		 *	- overflow indications cleared
1504 		 *	- all counters disabled
1505 		 */
1506 		__asm volatile("mcr p14, 0, %0, c0, c0, 0"
1507 			:
1508 			: "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF|
1509 			       PMNC_CC_IF));
1510 
1511 #if defined(XSCALE_CCLKCFG)
1512 		/*
1513 		 * Crank CCLKCFG to maximum legal value.
1514 		 */
1515 		__asm volatile ("mcr p14, 0, %0, c6, c0, 0"
1516 			:
1517 			: "r" (XSCALE_CCLKCFG));
1518 #endif
1519 
1520 		/*
1521 		 * XXX Disable ECC in the Bus Controller Unit; we
1522 		 * don't really support it, yet.  Clear any pending
1523 		 * error indications.
1524 		 */
1525 		__asm volatile("mcr p13, 0, %0, c0, c1, 0"
1526 			:
1527 			: "r" (BCUCTL_E0|BCUCTL_E1|BCUCTL_EV));
1528 
1529 		cpufuncs = xscale_cpufuncs;
1530 #if defined(PERFCTRS)
1531 		xscale_pmu_init();
1532 #endif
1533 
1534 		/*
1535 		 * i80200 errata: Step-A0 and A1 have a bug where
1536 		 * D$ dirty bits are not cleared on "invalidate by
1537 		 * address".
1538 		 *
1539 		 * Workaround: Clean cache line before invalidating.
1540 		 */
1541 		if (rev == 0 || rev == 1)
1542 			cpufuncs.cf_dcache_inv_range = xscale_cache_purgeD_rng;
1543 
1544 		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1545 		get_cachetype_cp15();
1546 		pmap_pte_init_xscale();
1547 		return 0;
1548 	}
1549 #endif /* CPU_XSCALE_80200 */
1550 #ifdef CPU_XSCALE_80321
1551 	if (cputype == CPU_ID_80321_400 || cputype == CPU_ID_80321_600 ||
1552 	    cputype == CPU_ID_80321_400_B0 || cputype == CPU_ID_80321_600_B0 ||
1553 	    cputype == CPU_ID_80219_400 || cputype == CPU_ID_80219_600) {
1554 		i80321_icu_init();
1555 
1556 		/*
1557 		 * Reset the Performance Monitoring Unit to a
1558 		 * pristine state:
1559 		 *	- CCNT, PMN0, PMN1 reset to 0
1560 		 *	- overflow indications cleared
1561 		 *	- all counters disabled
1562 		 */
1563 		__asm volatile("mcr p14, 0, %0, c0, c0, 0"
1564 			:
1565 			: "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF|
1566 			       PMNC_CC_IF));
1567 
1568 		cpufuncs = xscale_cpufuncs;
1569 #if defined(PERFCTRS)
1570 		xscale_pmu_init();
1571 #endif
1572 
1573 		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1574 		get_cachetype_cp15();
1575 		pmap_pte_init_xscale();
1576 		return 0;
1577 	}
1578 #endif /* CPU_XSCALE_80321 */
1579 #ifdef __CPU_XSCALE_PXA2XX
1580 	/* ignore core revision to test PXA2xx CPUs */
1581 	if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA27X ||
1582 	    (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 ||
1583 	    (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) {
1584 
1585 		cpufuncs = xscale_cpufuncs;
1586 #if defined(PERFCTRS)
1587 		xscale_pmu_init();
1588 #endif
1589 
1590 		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1591 		get_cachetype_cp15();
1592 		pmap_pte_init_xscale();
1593 
1594 		/* Use powersave on this CPU. */
1595 		cpu_do_powersave = 1;
1596 
1597 		return 0;
1598 	}
1599 #endif /* __CPU_XSCALE_PXA2XX */
1600 #ifdef CPU_XSCALE_IXP425
1601 	if (cputype == CPU_ID_IXP425_533 || cputype == CPU_ID_IXP425_400 ||
1602             cputype == CPU_ID_IXP425_266) {
1603 		ixp425_icu_init();
1604 
1605 		cpufuncs = xscale_cpufuncs;
1606 #if defined(PERFCTRS)
1607 		xscale_pmu_init();
1608 #endif
1609 
1610 		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1611 		get_cachetype_cp15();
1612 		pmap_pte_init_xscale();
1613 
1614 		return 0;
1615 	}
1616 #endif /* CPU_XSCALE_IXP425 */
1617 	/*
1618 	 * Bzzzz. And the answer was ...
1619 	 */
1620 	panic("No support for this CPU type (%08x) in kernel", cputype);
1621 	return(ARCHITECTURE_NOT_PRESENT);
1622 }
1623 
1624 #ifdef CPU_ARM2
1625 u_int arm2_id(void)
1626 {
1627 
1628 	return CPU_ID_ARM2;
1629 }
1630 #endif /* CPU_ARM2 */
1631 
1632 #ifdef CPU_ARM250
1633 u_int arm250_id(void)
1634 {
1635 
1636 	return CPU_ID_ARM250;
1637 }
1638 #endif /* CPU_ARM250 */
1639 
1640 /*
1641  * Fixup routines for data and prefetch aborts.
1642  *
1643  * Several compile time symbols are used
1644  *
1645  * DEBUG_FAULT_CORRECTION - Print debugging information during the
1646  * correction of registers after a fault.
1647  * ARM6_LATE_ABORT - ARM6 supports both early and late aborts
1648  * when defined should use late aborts
1649  */
1650 
1651 
1652 /*
1653  * Null abort fixup routine.
1654  * For use when no fixup is required.
1655  */
1656 int
1657 cpufunc_null_fixup(arg)
1658 	void *arg;
1659 {
1660 	return(ABORT_FIXUP_OK);
1661 }
1662 
1663 
1664 #if defined(CPU_ARM2) || defined(CPU_ARM250) || defined(CPU_ARM3) || \
1665     defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI)
1666 
1667 #ifdef DEBUG_FAULT_CORRECTION
1668 #define DFC_PRINTF(x)		printf x
1669 #define DFC_DISASSEMBLE(x)	disassemble(x)
1670 #else
1671 #define DFC_PRINTF(x)		/* nothing */
1672 #define DFC_DISASSEMBLE(x)	/* nothing */
1673 #endif
1674 
1675 /*
1676  * "Early" data abort fixup.
1677  *
1678  * For ARM2, ARM2as, ARM3 and ARM6 (in early-abort mode).  Also used
1679  * indirectly by ARM6 (in late-abort mode) and ARM7[TDMI].
1680  *
1681  * In early aborts, we may have to fix up LDM, STM, LDC and STC.
1682  */
1683 int
1684 early_abort_fixup(arg)
1685 	void *arg;
1686 {
1687 	trapframe_t *frame = arg;
1688 	u_int fault_pc;
1689 	u_int fault_instruction;
1690 	int saved_lr = 0;
1691 
1692 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1693 
1694 		/* Ok an abort in SVC mode */
1695 
1696 		/*
1697 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1698 		 * as the fault happened in svc mode but we need it in the
1699 		 * usr slot so we can treat the registers as an array of ints
1700 		 * during fixing.
1701 		 * NOTE: This PC is in the position but writeback is not
1702 		 * allowed on r15.
1703 		 * Doing it like this is more efficient than trapping this
1704 		 * case in all possible locations in the following fixup code.
1705 		 */
1706 
1707 		saved_lr = frame->tf_usr_lr;
1708 		frame->tf_usr_lr = frame->tf_svc_lr;
1709 
1710 		/*
1711 		 * Note the trapframe does not have the SVC r13 so a fault
1712 		 * from an instruction with writeback to r13 in SVC mode is
1713 		 * not allowed. This should not happen as the kstack is
1714 		 * always valid.
1715 		 */
1716 	}
1717 
1718 	/* Get fault address and status from the CPU */
1719 
1720 	fault_pc = frame->tf_pc;
1721 	fault_instruction = *((volatile unsigned int *)fault_pc);
1722 
1723 	/* Decode the fault instruction and fix the registers as needed */
1724 
1725 	if ((fault_instruction & 0x0e000000) == 0x08000000) {
1726 		int base;
1727 		int loop;
1728 		int count;
1729 		int *registers = &frame->tf_r0;
1730 
1731 		DFC_PRINTF(("LDM/STM\n"));
1732 		DFC_DISASSEMBLE(fault_pc);
1733 		if (fault_instruction & (1 << 21)) {
1734 			DFC_PRINTF(("This instruction must be corrected\n"));
1735 			base = (fault_instruction >> 16) & 0x0f;
1736 			if (base == 15)
1737 				return ABORT_FIXUP_FAILED;
1738 			/* Count registers transferred */
1739 			count = 0;
1740 			for (loop = 0; loop < 16; ++loop) {
1741 				if (fault_instruction & (1<<loop))
1742 					++count;
1743 			}
1744 			DFC_PRINTF(("%d registers used\n", count));
1745 			DFC_PRINTF(("Corrected r%d by %d bytes ",
1746 				       base, count * 4));
1747 			if (fault_instruction & (1 << 23)) {
1748 				DFC_PRINTF(("down\n"));
1749 				registers[base] -= count * 4;
1750 			} else {
1751 				DFC_PRINTF(("up\n"));
1752 				registers[base] += count * 4;
1753 			}
1754 		}
1755 	} else if ((fault_instruction & 0x0e000000) == 0x0c000000) {
1756 		int base;
1757 		int offset;
1758 		int *registers = &frame->tf_r0;
1759 
1760 		/* REGISTER CORRECTION IS REQUIRED FOR THESE INSTRUCTIONS */
1761 
1762 		DFC_DISASSEMBLE(fault_pc);
1763 
1764 		/* Only need to fix registers if write back is turned on */
1765 
1766 		if ((fault_instruction & (1 << 21)) != 0) {
1767 			base = (fault_instruction >> 16) & 0x0f;
1768 			if (base == 13 &&
1769 			    (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
1770 				return ABORT_FIXUP_FAILED;
1771 			if (base == 15)
1772 				return ABORT_FIXUP_FAILED;
1773 
1774 			offset = (fault_instruction & 0xff) << 2;
1775 			DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1776 			if ((fault_instruction & (1 << 23)) != 0)
1777 				offset = -offset;
1778 			registers[base] += offset;
1779 			DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1780 		}
1781 	} else if ((fault_instruction & 0x0e000000) == 0x0c000000)
1782 		return ABORT_FIXUP_FAILED;
1783 
1784 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1785 
1786 		/* Ok an abort in SVC mode */
1787 
1788 		/*
1789 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1790 		 * as the fault happened in svc mode but we need it in the
1791 		 * usr slot so we can treat the registers as an array of ints
1792 		 * during fixing.
1793 		 * NOTE: This PC is in the position but writeback is not
1794 		 * allowed on r15.
1795 		 * Doing it like this is more efficient than trapping this
1796 		 * case in all possible locations in the prior fixup code.
1797 		 */
1798 
1799 		frame->tf_svc_lr = frame->tf_usr_lr;
1800 		frame->tf_usr_lr = saved_lr;
1801 
1802 		/*
1803 		 * Note the trapframe does not have the SVC r13 so a fault
1804 		 * from an instruction with writeback to r13 in SVC mode is
1805 		 * not allowed. This should not happen as the kstack is
1806 		 * always valid.
1807 		 */
1808 	}
1809 
1810 	return(ABORT_FIXUP_OK);
1811 }
1812 #endif	/* CPU_ARM2/250/3/6/7 */
1813 
1814 
1815 #if (defined(CPU_ARM6) && defined(ARM6_LATE_ABORT)) || defined(CPU_ARM7) || \
1816 	defined(CPU_ARM7TDMI)
1817 /*
1818  * "Late" (base updated) data abort fixup
1819  *
1820  * For ARM6 (in late-abort mode) and ARM7.
1821  *
1822  * In this model, all data-transfer instructions need fixing up.  We defer
1823  * LDM, STM, LDC and STC fixup to the early-abort handler.
1824  */
1825 int
1826 late_abort_fixup(arg)
1827 	void *arg;
1828 {
1829 	trapframe_t *frame = arg;
1830 	u_int fault_pc;
1831 	u_int fault_instruction;
1832 	int saved_lr = 0;
1833 
1834 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1835 
1836 		/* Ok an abort in SVC mode */
1837 
1838 		/*
1839 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1840 		 * as the fault happened in svc mode but we need it in the
1841 		 * usr slot so we can treat the registers as an array of ints
1842 		 * during fixing.
1843 		 * NOTE: This PC is in the position but writeback is not
1844 		 * allowed on r15.
1845 		 * Doing it like this is more efficient than trapping this
1846 		 * case in all possible locations in the following fixup code.
1847 		 */
1848 
1849 		saved_lr = frame->tf_usr_lr;
1850 		frame->tf_usr_lr = frame->tf_svc_lr;
1851 
1852 		/*
1853 		 * Note the trapframe does not have the SVC r13 so a fault
1854 		 * from an instruction with writeback to r13 in SVC mode is
1855 		 * not allowed. This should not happen as the kstack is
1856 		 * always valid.
1857 		 */
1858 	}
1859 
1860 	/* Get fault address and status from the CPU */
1861 
1862 	fault_pc = frame->tf_pc;
1863 	fault_instruction = *((volatile unsigned int *)fault_pc);
1864 
1865 	/* Decode the fault instruction and fix the registers as needed */
1866 
1867 	/* Was is a swap instruction ? */
1868 
1869 	if ((fault_instruction & 0x0fb00ff0) == 0x01000090) {
1870 		DFC_DISASSEMBLE(fault_pc);
1871 	} else if ((fault_instruction & 0x0c000000) == 0x04000000) {
1872 
1873 		/* Was is a ldr/str instruction */
1874 		/* This is for late abort only */
1875 
1876 		int base;
1877 		int offset;
1878 		int *registers = &frame->tf_r0;
1879 
1880 		DFC_DISASSEMBLE(fault_pc);
1881 
1882 		/* This is for late abort only */
1883 
1884 		if ((fault_instruction & (1 << 24)) == 0
1885 		    || (fault_instruction & (1 << 21)) != 0) {
1886 			/* postindexed ldr/str with no writeback */
1887 
1888 			base = (fault_instruction >> 16) & 0x0f;
1889 			if (base == 13 &&
1890 			    (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
1891 				return ABORT_FIXUP_FAILED;
1892 			if (base == 15)
1893 				return ABORT_FIXUP_FAILED;
1894 			DFC_PRINTF(("late abt fix: r%d=%08x : ",
1895 				       base, registers[base]));
1896 			if ((fault_instruction & (1 << 25)) == 0) {
1897 				/* Immediate offset - easy */
1898 
1899 				offset = fault_instruction & 0xfff;
1900 				if ((fault_instruction & (1 << 23)))
1901 					offset = -offset;
1902 				registers[base] += offset;
1903 				DFC_PRINTF(("imm=%08x ", offset));
1904 			} else {
1905 				/* offset is a shifted register */
1906 				int shift;
1907 
1908 				offset = fault_instruction & 0x0f;
1909 				if (offset == base)
1910 					return ABORT_FIXUP_FAILED;
1911 
1912 				/*
1913 				 * Register offset - hard we have to
1914 				 * cope with shifts !
1915 				 */
1916 				offset = registers[offset];
1917 
1918 				if ((fault_instruction & (1 << 4)) == 0)
1919 					/* shift with amount */
1920 					shift = (fault_instruction >> 7) & 0x1f;
1921 				else {
1922 					/* shift with register */
1923 					if ((fault_instruction & (1 << 7)) != 0)
1924 						/* undefined for now so bail out */
1925 						return ABORT_FIXUP_FAILED;
1926 					shift = ((fault_instruction >> 8) & 0xf);
1927 					if (base == shift)
1928 						return ABORT_FIXUP_FAILED;
1929 					DFC_PRINTF(("shift reg=%d ", shift));
1930 					shift = registers[shift];
1931 				}
1932 				DFC_PRINTF(("shift=%08x ", shift));
1933 				switch (((fault_instruction >> 5) & 0x3)) {
1934 				case 0 : /* Logical left */
1935 					offset = (int)(((u_int)offset) << shift);
1936 					break;
1937 				case 1 : /* Logical Right */
1938 					if (shift == 0) shift = 32;
1939 					offset = (int)(((u_int)offset) >> shift);
1940 					break;
1941 				case 2 : /* Arithmetic Right */
1942 					if (shift == 0) shift = 32;
1943 					offset = (int)(((int)offset) >> shift);
1944 					break;
1945 				case 3 : /* Rotate right (rol or rxx) */
1946 					return ABORT_FIXUP_FAILED;
1947 					break;
1948 				}
1949 
1950 				DFC_PRINTF(("abt: fixed LDR/STR with "
1951 					       "register offset\n"));
1952 				if ((fault_instruction & (1 << 23)))
1953 					offset = -offset;
1954 				DFC_PRINTF(("offset=%08x ", offset));
1955 				registers[base] += offset;
1956 			}
1957 			DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1958 		}
1959 	}
1960 
1961 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1962 
1963 		/* Ok an abort in SVC mode */
1964 
1965 		/*
1966 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1967 		 * as the fault happened in svc mode but we need it in the
1968 		 * usr slot so we can treat the registers as an array of ints
1969 		 * during fixing.
1970 		 * NOTE: This PC is in the position but writeback is not
1971 		 * allowed on r15.
1972 		 * Doing it like this is more efficient than trapping this
1973 		 * case in all possible locations in the prior fixup code.
1974 		 */
1975 
1976 		frame->tf_svc_lr = frame->tf_usr_lr;
1977 		frame->tf_usr_lr = saved_lr;
1978 
1979 		/*
1980 		 * Note the trapframe does not have the SVC r13 so a fault
1981 		 * from an instruction with writeback to r13 in SVC mode is
1982 		 * not allowed. This should not happen as the kstack is
1983 		 * always valid.
1984 		 */
1985 	}
1986 
1987 	/*
1988 	 * Now let the early-abort fixup routine have a go, in case it
1989 	 * was an LDM, STM, LDC or STC that faulted.
1990 	 */
1991 
1992 	return early_abort_fixup(arg);
1993 }
1994 #endif	/* CPU_ARM6(LATE)/7/7TDMI */
1995 
1996 /*
1997  * CPU Setup code
1998  */
1999 
2000 #if defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) || \
2001 	defined(CPU_ARM8) || defined (CPU_ARM9) || defined (CPU_ARM9E) || \
2002 	defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) || \
2003 	defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
2004 	defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425) || \
2005 	defined(CPU_ARM10) || defined(CPU_ARM11) || defined(CPU_ARM1136) || \
2006 	defined(CPU_FA526)
2007 
2008 #define IGN	0
2009 #define OR	1
2010 #define BIC	2
2011 
2012 struct cpu_option {
2013 	const char *co_name;
2014 	int	co_falseop;
2015 	int	co_trueop;
2016 	int	co_value;
2017 };
2018 
2019 static u_int parse_cpu_options __P((char *, struct cpu_option *, u_int));
2020 
2021 static u_int
2022 parse_cpu_options(args, optlist, cpuctrl)
2023 	char *args;
2024 	struct cpu_option *optlist;
2025 	u_int cpuctrl;
2026 {
2027 	int integer;
2028 
2029 	if (args == NULL)
2030 		return(cpuctrl);
2031 
2032 	while (optlist->co_name) {
2033 		if (get_bootconf_option(args, optlist->co_name,
2034 		    BOOTOPT_TYPE_BOOLEAN, &integer)) {
2035 			if (integer) {
2036 				if (optlist->co_trueop == OR)
2037 					cpuctrl |= optlist->co_value;
2038 				else if (optlist->co_trueop == BIC)
2039 					cpuctrl &= ~optlist->co_value;
2040 			} else {
2041 				if (optlist->co_falseop == OR)
2042 					cpuctrl |= optlist->co_value;
2043 				else if (optlist->co_falseop == BIC)
2044 					cpuctrl &= ~optlist->co_value;
2045 			}
2046 		}
2047 		++optlist;
2048 	}
2049 	return(cpuctrl);
2050 }
2051 #endif /* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 || CPU_SA110 */
2052 
2053 #if defined (CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) \
2054 	|| defined(CPU_ARM8)
2055 struct cpu_option arm678_options[] = {
2056 #ifdef COMPAT_12
2057 	{ "nocache",		IGN, BIC, CPU_CONTROL_IDC_ENABLE },
2058 	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
2059 #endif	/* COMPAT_12 */
2060 	{ "cpu.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
2061 	{ "cpu.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
2062 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2063 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2064 	{ NULL,			IGN, IGN, 0 }
2065 };
2066 
2067 #endif	/* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 */
2068 
2069 #ifdef CPU_ARM6
2070 struct cpu_option arm6_options[] = {
2071 	{ "arm6.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
2072 	{ "arm6.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
2073 	{ "arm6.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2074 	{ "arm6.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2075 	{ NULL,			IGN, IGN, 0 }
2076 };
2077 
2078 void
2079 arm6_setup(args)
2080 	char *args;
2081 {
2082 	int cpuctrl, cpuctrlmask;
2083 
2084 	/* Set up default control registers bits */
2085 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2086 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2087 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
2088 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2089 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2090 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
2091 		 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BEND_ENABLE
2092 		 | CPU_CONTROL_AFLT_ENABLE;
2093 
2094 #ifdef ARM6_LATE_ABORT
2095 	cpuctrl |= CPU_CONTROL_LABT_ENABLE;
2096 #endif	/* ARM6_LATE_ABORT */
2097 
2098 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2099 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2100 #endif
2101 
2102 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
2103 	cpuctrl = parse_cpu_options(args, arm6_options, cpuctrl);
2104 
2105 #ifdef __ARMEB__
2106 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2107 #endif
2108 
2109 	/* Clear out the cache */
2110 	cpu_idcache_wbinv_all();
2111 
2112 	/* Set the control register */
2113 	curcpu()->ci_ctrl = cpuctrl;
2114 	cpu_control(0xffffffff, cpuctrl);
2115 }
2116 #endif	/* CPU_ARM6 */
2117 
2118 #ifdef CPU_ARM7
2119 struct cpu_option arm7_options[] = {
2120 	{ "arm7.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
2121 	{ "arm7.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
2122 	{ "arm7.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2123 	{ "arm7.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2124 #ifdef COMPAT_12
2125 	{ "fpaclk2",		BIC, OR,  CPU_CONTROL_CPCLK },
2126 #endif	/* COMPAT_12 */
2127 	{ "arm700.fpaclk",	BIC, OR,  CPU_CONTROL_CPCLK },
2128 	{ NULL,			IGN, IGN, 0 }
2129 };
2130 
2131 void
2132 arm7_setup(args)
2133 	char *args;
2134 {
2135 	int cpuctrl, cpuctrlmask;
2136 
2137 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2138 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2139 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
2140 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2141 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2142 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
2143 		 | CPU_CONTROL_CPCLK | CPU_CONTROL_LABT_ENABLE
2144 		 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BEND_ENABLE
2145 		 | CPU_CONTROL_AFLT_ENABLE;
2146 
2147 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2148 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2149 #endif
2150 
2151 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
2152 	cpuctrl = parse_cpu_options(args, arm7_options, cpuctrl);
2153 
2154 #ifdef __ARMEB__
2155 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2156 #endif
2157 
2158 	/* Clear out the cache */
2159 	cpu_idcache_wbinv_all();
2160 
2161 	/* Set the control register */
2162 	curcpu()->ci_ctrl = cpuctrl;
2163 	cpu_control(0xffffffff, cpuctrl);
2164 }
2165 #endif	/* CPU_ARM7 */
2166 
2167 #ifdef CPU_ARM7TDMI
2168 struct cpu_option arm7tdmi_options[] = {
2169 	{ "arm7.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
2170 	{ "arm7.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
2171 	{ "arm7.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2172 	{ "arm7.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2173 #ifdef COMPAT_12
2174 	{ "fpaclk2",		BIC, OR,  CPU_CONTROL_CPCLK },
2175 #endif	/* COMPAT_12 */
2176 	{ "arm700.fpaclk",	BIC, OR,  CPU_CONTROL_CPCLK },
2177 	{ NULL,			IGN, IGN, 0 }
2178 };
2179 
2180 void
2181 arm7tdmi_setup(args)
2182 	char *args;
2183 {
2184 	int cpuctrl;
2185 
2186 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2187 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2188 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
2189 
2190 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
2191 	cpuctrl = parse_cpu_options(args, arm7tdmi_options, cpuctrl);
2192 
2193 #ifdef __ARMEB__
2194 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2195 #endif
2196 
2197 	/* Clear out the cache */
2198 	cpu_idcache_wbinv_all();
2199 
2200 	/* Set the control register */
2201 	curcpu()->ci_ctrl = cpuctrl;
2202 	cpu_control(0xffffffff, cpuctrl);
2203 }
2204 #endif	/* CPU_ARM7TDMI */
2205 
2206 #ifdef CPU_ARM8
2207 struct cpu_option arm8_options[] = {
2208 	{ "arm8.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
2209 	{ "arm8.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
2210 	{ "arm8.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2211 	{ "arm8.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2212 #ifdef COMPAT_12
2213 	{ "branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2214 #endif	/* COMPAT_12 */
2215 	{ "cpu.branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2216 	{ "arm8.branchpredict",	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2217 	{ NULL,			IGN, IGN, 0 }
2218 };
2219 
2220 void
2221 arm8_setup(args)
2222 	char *args;
2223 {
2224 	int integer;
2225 	int cpuctrl, cpuctrlmask;
2226 	int clocktest;
2227 	int setclock = 0;
2228 
2229 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2230 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2231 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
2232 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2233 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2234 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
2235 		 | CPU_CONTROL_BPRD_ENABLE | CPU_CONTROL_ROM_ENABLE
2236 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE;
2237 
2238 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2239 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2240 #endif
2241 
2242 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
2243 	cpuctrl = parse_cpu_options(args, arm8_options, cpuctrl);
2244 
2245 #ifdef __ARMEB__
2246 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2247 #endif
2248 
2249 	/* Get clock configuration */
2250 	clocktest = arm8_clock_config(0, 0) & 0x0f;
2251 
2252 	/* Special ARM8 clock and test configuration */
2253 	if (get_bootconf_option(args, "arm8.clock.reset", BOOTOPT_TYPE_BOOLEAN, &integer)) {
2254 		clocktest = 0;
2255 		setclock = 1;
2256 	}
2257 	if (get_bootconf_option(args, "arm8.clock.dynamic", BOOTOPT_TYPE_BOOLEAN, &integer)) {
2258 		if (integer)
2259 			clocktest |= 0x01;
2260 		else
2261 			clocktest &= ~(0x01);
2262 		setclock = 1;
2263 	}
2264 	if (get_bootconf_option(args, "arm8.clock.sync", BOOTOPT_TYPE_BOOLEAN, &integer)) {
2265 		if (integer)
2266 			clocktest |= 0x02;
2267 		else
2268 			clocktest &= ~(0x02);
2269 		setclock = 1;
2270 	}
2271 	if (get_bootconf_option(args, "arm8.clock.fast", BOOTOPT_TYPE_BININT, &integer)) {
2272 		clocktest = (clocktest & ~0xc0) | (integer & 3) << 2;
2273 		setclock = 1;
2274 	}
2275 	if (get_bootconf_option(args, "arm8.test", BOOTOPT_TYPE_BININT, &integer)) {
2276 		clocktest |= (integer & 7) << 5;
2277 		setclock = 1;
2278 	}
2279 
2280 	/* Clear out the cache */
2281 	cpu_idcache_wbinv_all();
2282 
2283 	/* Set the control register */
2284 	curcpu()->ci_ctrl = cpuctrl;
2285 	cpu_control(0xffffffff, cpuctrl);
2286 
2287 	/* Set the clock/test register */
2288 	if (setclock)
2289 		arm8_clock_config(0x7f, clocktest);
2290 }
2291 #endif	/* CPU_ARM8 */
2292 
2293 #ifdef CPU_ARM9
2294 struct cpu_option arm9_options[] = {
2295 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2296 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2297 	{ "arm9.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2298 	{ "arm9.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2299 	{ "arm9.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2300 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2301 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2302 	{ "arm9.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2303 	{ NULL,			IGN, IGN, 0 }
2304 };
2305 
2306 void
2307 arm9_setup(args)
2308 	char *args;
2309 {
2310 	int cpuctrl, cpuctrlmask;
2311 
2312 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2313 	    | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2314 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2315 	    | CPU_CONTROL_WBUF_ENABLE;
2316 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2317 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2318 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2319 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2320 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2321 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC
2322 		 | CPU_CONTROL_ROUNDROBIN;
2323 
2324 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2325 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2326 #endif
2327 
2328 	cpuctrl = parse_cpu_options(args, arm9_options, cpuctrl);
2329 
2330 #ifdef __ARMEB__
2331 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2332 #endif
2333 
2334 	if (vector_page == ARM_VECTORS_HIGH)
2335 		cpuctrl |= CPU_CONTROL_VECRELOC;
2336 
2337 	/* Clear out the cache */
2338 	cpu_idcache_wbinv_all();
2339 
2340 	/* Set the control register */
2341 	curcpu()->ci_ctrl = cpuctrl;
2342 	cpu_control(cpuctrlmask, cpuctrl);
2343 
2344 }
2345 #endif	/* CPU_ARM9 */
2346 
2347 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
2348 struct cpu_option arm10_options[] = {
2349 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2350 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2351 	{ "arm10.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2352 	{ "arm10.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2353 	{ "arm10.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2354 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2355 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2356 	{ "arm10.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2357 	{ NULL,			IGN, IGN, 0 }
2358 };
2359 
2360 void
2361 arm10_setup(args)
2362 	char *args;
2363 {
2364 	int cpuctrl, cpuctrlmask;
2365 
2366 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
2367 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2368 	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
2369 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
2370 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2371 	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2372 	    | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2373 	    | CPU_CONTROL_BPRD_ENABLE
2374 	    | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
2375 
2376 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2377 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2378 #endif
2379 
2380 	cpuctrl = parse_cpu_options(args, arm10_options, cpuctrl);
2381 
2382 #ifdef __ARMEB__
2383 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2384 #endif
2385 
2386 	if (vector_page == ARM_VECTORS_HIGH)
2387 		cpuctrl |= CPU_CONTROL_VECRELOC;
2388 
2389 	/* Clear out the cache */
2390 	cpu_idcache_wbinv_all();
2391 
2392 	/* Now really make sure they are clean.  */
2393 	__asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
2394 
2395 	/* Set the control register */
2396 	curcpu()->ci_ctrl = cpuctrl;
2397 	cpu_control(0xffffffff, cpuctrl);
2398 
2399 	/* And again. */
2400 	cpu_idcache_wbinv_all();
2401 }
2402 #endif	/* CPU_ARM9E || CPU_ARM10 */
2403 
2404 #if defined(CPU_ARM11)
2405 struct cpu_option arm11_options[] = {
2406 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2407 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2408 	{ "arm11.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2409 	{ "arm11.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2410 	{ "arm11.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2411 	{ NULL,			IGN, IGN, 0 }
2412 };
2413 
2414 void
2415 arm11_setup(args)
2416 	char *args;
2417 {
2418 	int cpuctrl, cpuctrlmask;
2419 
2420 #if defined(PROCESS_ID_IS_CURCPU)
2421 	/* set curcpu() */
2422         __asm("mcr\tp15, 0, %0, c13, c0, 4" : : "r"(&cpu_info_store));
2423 #elif defined(PROCESS_ID_IS_CURLWP)
2424 	/* set curlwp() */
2425         __asm("mcr\tp15, 0, %0, c13, c0, 4" : : "r"(&lwp0));
2426 #endif
2427 
2428 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
2429 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2430 	    /* | CPU_CONTROL_BPRD_ENABLE */;
2431 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
2432 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2433 	    | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BPRD_ENABLE
2434 	    | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2435 	    | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
2436 
2437 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2438 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2439 #endif
2440 
2441 	cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl);
2442 
2443 #ifdef __ARMEB__
2444 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2445 #endif
2446 
2447 	if (vector_page == ARM_VECTORS_HIGH)
2448 		cpuctrl |= CPU_CONTROL_VECRELOC;
2449 
2450 	/* Clear out the cache */
2451 	cpu_idcache_wbinv_all();
2452 
2453 	/* Now really make sure they are clean.  */
2454 	__asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
2455 
2456 	/* Allow detection code to find the VFP if it's fitted.  */
2457 	__asm volatile ("mcr\tp15, 0, %0, c1, c0, 2" : : "r" (0x0fffffff));
2458 
2459 	/* Set the control register */
2460 	curcpu()->ci_ctrl = cpuctrl;
2461 	cpu_control(0xffffffff, cpuctrl);
2462 
2463 	/* And again. */
2464 	cpu_idcache_wbinv_all();
2465 }
2466 #endif	/* CPU_ARM11 */
2467 
2468 #if defined(CPU_ARM1136)
2469 void
2470 arm1136_setup(char *args)
2471 {
2472 	int cpuctrl, cpuctrl_wax;
2473 	uint32_t auxctrl, auxctrl_wax;
2474 	uint32_t tmp, tmp2;
2475 	uint32_t sbz=0;
2476 	uint32_t cpuid;
2477 
2478 #if defined(PROCESS_ID_IS_CURCPU)
2479 	/* set curcpu() */
2480         __asm("mcr\tp15, 0, %0, c13, c0, 4" : : "r"(&cpu_info_store));
2481 #elif defined(PROCESS_ID_IS_CURLWP)
2482 	/* set curlwp() */
2483         __asm("mcr\tp15, 0, %0, c13, c0, 4" : : "r"(&lwp0));
2484 #endif
2485 
2486 	cpuid = cpu_id();
2487 
2488 	cpuctrl =
2489 		CPU_CONTROL_MMU_ENABLE  |
2490 		CPU_CONTROL_DC_ENABLE   |
2491 		CPU_CONTROL_WBUF_ENABLE |
2492 		CPU_CONTROL_32BP_ENABLE |
2493 		CPU_CONTROL_32BD_ENABLE |
2494 		CPU_CONTROL_LABT_ENABLE |
2495 		CPU_CONTROL_SYST_ENABLE |
2496 		CPU_CONTROL_IC_ENABLE;
2497 
2498 	/*
2499 	 * "write as existing" bits
2500 	 * inverse of this is mask
2501 	 */
2502 	cpuctrl_wax =
2503 		(3 << 30) |
2504 		(1 << 29) |
2505 		(1 << 28) |
2506 		(3 << 26) |
2507 		(3 << 19) |
2508 		(1 << 17);
2509 
2510 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2511 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2512 #endif
2513 
2514 	cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl);
2515 
2516 #ifdef __ARMEB__
2517 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2518 #endif
2519 
2520 	if (vector_page == ARM_VECTORS_HIGH)
2521 		cpuctrl |= CPU_CONTROL_VECRELOC;
2522 
2523 	auxctrl = 0;
2524 	auxctrl_wax = ~0;
2525 	/* This options enables the workaround for the 364296 ARM1136
2526 	 * r0pX errata (possible cache data corruption with
2527 	 * hit-under-miss enabled). It sets the undocumented bit 31 in
2528 	 * the auxiliary control register and the FI bit in the control
2529 	 * register, thus disabling hit-under-miss without putting the
2530 	 * processor into full low interrupt latency mode. ARM11MPCore
2531 	 * is not affected.
2532 	 */
2533 	if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1136JS) { /* ARM1136JSr0pX */
2534 		cpuctrl |= CPU_CONTROL_FI_ENABLE;
2535 		auxctrl = ARM11R0_AUXCTL_PFI;
2536 		auxctrl_wax = ~ARM11R0_AUXCTL_PFI;
2537 	}
2538 
2539 	/* Clear out the cache */
2540 	cpu_idcache_wbinv_all();
2541 
2542 	/* Now really make sure they are clean.  */
2543 	__asm volatile ("mcr\tp15, 0, %0, c7, c7, 0" : : "r"(sbz));
2544 
2545 	/* Allow detection code to find the VFP if it's fitted.  */
2546 	__asm volatile ("mcr\tp15, 0, %0, c1, c0, 2" : : "r" (0x0fffffff));
2547 
2548 	/* Set the control register */
2549 	curcpu()->ci_ctrl = cpuctrl;
2550 	cpu_control(~cpuctrl_wax, cpuctrl);
2551 
2552 	__asm volatile ("mrc	p15, 0, %0, c1, c0, 1\n\t"
2553 			"bic	%1, %0, %2\n\t"
2554 			"eor	%1, %0, %3\n\t"
2555 			"teq	%0, %1\n\t"
2556 			"mcrne	p15, 0, %1, c1, c0, 1\n\t"
2557 			: "=r"(tmp), "=r"(tmp2) :
2558 			  "r"(~auxctrl_wax), "r"(auxctrl));
2559 
2560 	/* And again. */
2561 	cpu_idcache_wbinv_all();
2562 }
2563 #endif	/* CPU_ARM1136 */
2564 
2565 #ifdef CPU_SA110
2566 struct cpu_option sa110_options[] = {
2567 #ifdef COMPAT_12
2568 	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2569 	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
2570 #endif	/* COMPAT_12 */
2571 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2572 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2573 	{ "sa110.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2574 	{ "sa110.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2575 	{ "sa110.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2576 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2577 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2578 	{ "sa110.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2579 	{ NULL,			IGN, IGN, 0 }
2580 };
2581 
2582 void
2583 sa110_setup(args)
2584 	char *args;
2585 {
2586 	int cpuctrl, cpuctrlmask;
2587 
2588 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2589 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2590 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2591 		 | CPU_CONTROL_WBUF_ENABLE;
2592 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2593 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2594 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2595 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2596 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2597 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
2598 		 | CPU_CONTROL_CPCLK;
2599 
2600 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2601 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2602 #endif
2603 
2604 	cpuctrl = parse_cpu_options(args, sa110_options, cpuctrl);
2605 
2606 #ifdef __ARMEB__
2607 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2608 #endif
2609 
2610 	if (vector_page == ARM_VECTORS_HIGH)
2611 		cpuctrl |= CPU_CONTROL_VECRELOC;
2612 
2613 	/* Clear out the cache */
2614 	cpu_idcache_wbinv_all();
2615 
2616 	/* Set the control register */
2617 	curcpu()->ci_ctrl = cpuctrl;
2618 /*	cpu_control(cpuctrlmask, cpuctrl);*/
2619 	cpu_control(0xffffffff, cpuctrl);
2620 
2621 	/*
2622 	 * enable clockswitching, note that this doesn't read or write to r0,
2623 	 * r0 is just to make it valid asm
2624 	 */
2625 	__asm ("mcr 15, 0, r0, c15, c1, 2");
2626 }
2627 #endif	/* CPU_SA110 */
2628 
2629 #if defined(CPU_SA1100) || defined(CPU_SA1110)
2630 struct cpu_option sa11x0_options[] = {
2631 #ifdef COMPAT_12
2632 	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2633 	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
2634 #endif	/* COMPAT_12 */
2635 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2636 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2637 	{ "sa11x0.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2638 	{ "sa11x0.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2639 	{ "sa11x0.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2640 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2641 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2642 	{ "sa11x0.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2643 	{ NULL,			IGN, IGN, 0 }
2644 };
2645 
2646 void
2647 sa11x0_setup(args)
2648 	char *args;
2649 {
2650 	int cpuctrl, cpuctrlmask;
2651 
2652 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2653 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2654 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2655 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE;
2656 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2657 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2658 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2659 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2660 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2661 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
2662 		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
2663 
2664 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2665 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2666 #endif
2667 
2668 	cpuctrl = parse_cpu_options(args, sa11x0_options, cpuctrl);
2669 
2670 #ifdef __ARMEB__
2671 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2672 #endif
2673 
2674 	if (vector_page == ARM_VECTORS_HIGH)
2675 		cpuctrl |= CPU_CONTROL_VECRELOC;
2676 
2677 	/* Clear out the cache */
2678 	cpu_idcache_wbinv_all();
2679 
2680 	/* Set the control register */
2681 	curcpu()->ci_ctrl = cpuctrl;
2682 	cpu_control(0xffffffff, cpuctrl);
2683 }
2684 #endif	/* CPU_SA1100 || CPU_SA1110 */
2685 
2686 #if defined(CPU_FA526)
2687 struct cpu_option fa526_options[] = {
2688 #ifdef COMPAT_12
2689 	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2690 	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
2691 #endif	/* COMPAT_12 */
2692 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2693 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2694 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2695 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2696 	{ NULL,			IGN, IGN, 0 }
2697 };
2698 
2699 void
2700 fa526_setup(char *args)
2701 {
2702 	int cpuctrl, cpuctrlmask;
2703 
2704 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2705 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2706 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2707 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE;
2708 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2709 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2710 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2711 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2712 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2713 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
2714 		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
2715 
2716 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2717 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2718 #endif
2719 
2720 	cpuctrl = parse_cpu_options(args, fa526_options, cpuctrl);
2721 
2722 #ifdef __ARMEB__
2723 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2724 #endif
2725 
2726 	if (vector_page == ARM_VECTORS_HIGH)
2727 		cpuctrl |= CPU_CONTROL_VECRELOC;
2728 
2729 	/* Clear out the cache */
2730 	cpu_idcache_wbinv_all();
2731 
2732 	/* Set the control register */
2733 	curcpu()->ci_ctrl = cpuctrl;
2734 	cpu_control(0xffffffff, cpuctrl);
2735 }
2736 #endif	/* CPU_FA526 */
2737 
2738 #if defined(CPU_IXP12X0)
2739 struct cpu_option ixp12x0_options[] = {
2740 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2741 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2742 	{ "ixp12x0.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2743 	{ "ixp12x0.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2744 	{ "ixp12x0.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2745 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2746 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2747 	{ "ixp12x0.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2748 	{ NULL,			IGN, IGN, 0 }
2749 };
2750 
2751 void
2752 ixp12x0_setup(args)
2753 	char *args;
2754 {
2755 	int cpuctrl, cpuctrlmask;
2756 
2757 
2758 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE
2759 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_SYST_ENABLE
2760 		 | CPU_CONTROL_IC_ENABLE;
2761 
2762 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_AFLT_ENABLE
2763 		 | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_WBUF_ENABLE
2764 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_SYST_ENABLE
2765 		 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_IC_ENABLE
2766 		 | CPU_CONTROL_VECRELOC;
2767 
2768 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2769 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2770 #endif
2771 
2772 	cpuctrl = parse_cpu_options(args, ixp12x0_options, cpuctrl);
2773 
2774 #ifdef __ARMEB__
2775 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2776 #endif
2777 
2778 	if (vector_page == ARM_VECTORS_HIGH)
2779 		cpuctrl |= CPU_CONTROL_VECRELOC;
2780 
2781 	/* Clear out the cache */
2782 	cpu_idcache_wbinv_all();
2783 
2784 	/* Set the control register */
2785 	curcpu()->ci_ctrl = cpuctrl;
2786 	/* cpu_control(0xffffffff, cpuctrl); */
2787 	cpu_control(cpuctrlmask, cpuctrl);
2788 }
2789 #endif /* CPU_IXP12X0 */
2790 
2791 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
2792     defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425)
2793 struct cpu_option xscale_options[] = {
2794 #ifdef COMPAT_12
2795 	{ "branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2796 	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2797 #endif	/* COMPAT_12 */
2798 	{ "cpu.branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2799 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2800 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2801 	{ "xscale.branchpredict", BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2802 	{ "xscale.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2803 	{ "xscale.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2804 	{ "xscale.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2805 	{ NULL,			IGN, IGN, 0 }
2806 };
2807 
2808 void
2809 xscale_setup(args)
2810 	char *args;
2811 {
2812 	uint32_t auxctl;
2813 	int cpuctrl, cpuctrlmask;
2814 
2815 	/*
2816 	 * The XScale Write Buffer is always enabled.  Our option
2817 	 * is to enable/disable coalescing.  Note that bits 6:3
2818 	 * must always be enabled.
2819 	 */
2820 
2821 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2822 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2823 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2824 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
2825 		 | CPU_CONTROL_BPRD_ENABLE;
2826 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2827 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2828 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2829 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2830 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2831 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
2832 		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
2833 
2834 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2835 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2836 #endif
2837 
2838 	cpuctrl = parse_cpu_options(args, xscale_options, cpuctrl);
2839 
2840 #ifdef __ARMEB__
2841 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2842 #endif
2843 
2844 	if (vector_page == ARM_VECTORS_HIGH)
2845 		cpuctrl |= CPU_CONTROL_VECRELOC;
2846 
2847 	/* Clear out the cache */
2848 	cpu_idcache_wbinv_all();
2849 
2850 	/*
2851 	 * Set the control register.  Note that bits 6:3 must always
2852 	 * be set to 1.
2853 	 */
2854 	curcpu()->ci_ctrl = cpuctrl;
2855 /*	cpu_control(cpuctrlmask, cpuctrl);*/
2856 	cpu_control(0xffffffff, cpuctrl);
2857 
2858 	/* Make sure write coalescing is turned on */
2859 	__asm volatile("mrc p15, 0, %0, c1, c0, 1"
2860 		: "=r" (auxctl));
2861 #ifdef XSCALE_NO_COALESCE_WRITES
2862 	auxctl |= XSCALE_AUXCTL_K;
2863 #else
2864 	auxctl &= ~XSCALE_AUXCTL_K;
2865 #endif
2866 	__asm volatile("mcr p15, 0, %0, c1, c0, 1"
2867 		: : "r" (auxctl));
2868 }
2869 #endif	/* CPU_XSCALE_80200 || CPU_XSCALE_80321 || __CPU_XSCALE_PXA2XX || CPU_XSCALE_IXP425 */
2870