xref: /netbsd-src/sys/arch/arm/arm/cpufunc.c (revision 8b0f9554ff8762542c4defc4f70e1eb76fb508fa)
1 /*	$NetBSD: cpufunc.c,v 1.81 2007/04/15 20:29:21 matt Exp $	*/
2 
3 /*
4  * arm7tdmi support code Copyright (c) 2001 John Fremlin
5  * arm8 support code Copyright (c) 1997 ARM Limited
6  * arm8 support code Copyright (c) 1997 Causality Limited
7  * arm9 support code Copyright (C) 2001 ARM Ltd
8  * Copyright (c) 1997 Mark Brinicombe.
9  * Copyright (c) 1997 Causality Limited
10  * All rights reserved.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. All advertising materials mentioning features or use of this software
21  *    must display the following acknowledgement:
22  *	This product includes software developed by Causality Limited.
23  * 4. The name of Causality Limited may not be used to endorse or promote
24  *    products derived from this software without specific prior written
25  *    permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
28  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
29  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
30  * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
31  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
32  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
33  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37  * SUCH DAMAGE.
38  *
39  * RiscBSD kernel project
40  *
41  * cpufuncs.c
42  *
43  * C functions for supporting CPU / MMU / TLB specific operations.
44  *
45  * Created      : 30/01/97
46  */
47 
48 #include <sys/cdefs.h>
49 __KERNEL_RCSID(0, "$NetBSD: cpufunc.c,v 1.81 2007/04/15 20:29:21 matt Exp $");
50 
51 #include "opt_compat_netbsd.h"
52 #include "opt_cpuoptions.h"
53 #include "opt_perfctrs.h"
54 
55 #include <sys/types.h>
56 #include <sys/param.h>
57 #include <sys/pmc.h>
58 #include <sys/systm.h>
59 #include <machine/cpu.h>
60 #include <machine/bootconfig.h>
61 #include <arch/arm/arm/disassem.h>
62 
63 #include <uvm/uvm.h>
64 
65 #include <arm/cpuconf.h>
66 #include <arm/cpufunc.h>
67 
68 #ifdef CPU_XSCALE_80200
69 #include <arm/xscale/i80200reg.h>
70 #include <arm/xscale/i80200var.h>
71 #endif
72 
73 #ifdef CPU_XSCALE_80321
74 #include <arm/xscale/i80321reg.h>
75 #include <arm/xscale/i80321var.h>
76 #endif
77 
78 #ifdef CPU_XSCALE_IXP425
79 #include <arm/xscale/ixp425reg.h>
80 #include <arm/xscale/ixp425var.h>
81 #endif
82 
83 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321)
84 #include <arm/xscale/xscalereg.h>
85 #endif
86 
87 #if defined(PERFCTRS)
88 struct arm_pmc_funcs *arm_pmc;
89 #endif
90 
91 /* PRIMARY CACHE VARIABLES */
92 int	arm_picache_size;
93 int	arm_picache_line_size;
94 int	arm_picache_ways;
95 
96 int	arm_pdcache_size;	/* and unified */
97 int	arm_pdcache_line_size;
98 int	arm_pdcache_ways;
99 
100 int	arm_pcache_type;
101 int	arm_pcache_unified;
102 
103 int	arm_dcache_align;
104 int	arm_dcache_align_mask;
105 
106 /* 1 == use cpu_sleep(), 0 == don't */
107 int cpu_do_powersave;
108 
109 #ifdef CPU_ARM2
110 struct cpu_functions arm2_cpufuncs = {
111 	/* CPU functions */
112 
113 	.cf_id			= arm2_id,
114 	.cf_cpwait		= cpufunc_nullop,
115 
116 	/* MMU functions */
117 
118 	.cf_control		= (void *)cpufunc_nullop,
119 
120 	/* TLB functions */
121 
122 	.cf_tlb_flushID		= cpufunc_nullop,
123 	.cf_tlb_flushID_SE	= (void *)cpufunc_nullop,
124 	.cf_tlb_flushI		= cpufunc_nullop,
125 	.cf_tlb_flushI_SE	= (void *)cpufunc_nullop,
126 	.cf_tlb_flushD		= cpufunc_nullop,
127 	.cf_tlb_flushD_SE	= (void *)cpufunc_nullop,
128 
129 	/* Cache operations */
130 
131 	.cf_icache_sync_all	= cpufunc_nullop,
132 	.cf_icache_sync_range	= (void *) cpufunc_nullop,
133 
134 	.cf_dcache_wbinv_all	= arm3_cache_flush,
135 	.cf_dcache_wbinv_range	= (void *)cpufunc_nullop,
136 	.cf_dcache_inv_range	= (void *)cpufunc_nullop,
137 	.cf_dcache_wb_range	= (void *)cpufunc_nullop,
138 
139 	.cf_idcache_wbinv_all	= cpufunc_nullop,
140 	.cf_idcache_wbinv_range	= (void *)cpufunc_nullop,
141 
142 	/* Other functions */
143 
144 	.cf_flush_prefetchbuf	= cpufunc_nullop,
145 	.cf_drain_writebuf	= cpufunc_nullop,
146 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
147 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
148 
149 	.cf_sleep		= (void *)cpufunc_nullop,
150 
151 	/* Soft functions */
152 
153 	.cf_dataabt_fixup	= early_abort_fixup,
154 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
155 
156 	.cf_setup		= (void *)cpufunc_nullop
157 
158 };
159 #endif	/* CPU_ARM2 */
160 
161 #ifdef CPU_ARM250
162 struct cpu_functions arm250_cpufuncs = {
163 	/* CPU functions */
164 
165 	.cf_id			= arm250_id,
166 	.cf_cpwait		= cpufunc_nullop,
167 
168 	/* MMU functions */
169 
170 	.cf_control		= (void *)cpufunc_nullop,
171 
172 	/* TLB functions */
173 
174 	.cf_tlb_flushID		= cpufunc_nullop,
175 	.cf_tlb_flushID_SE	= (void *)cpufunc_nullop,
176 	.cf_tlb_flushI		= cpufunc_nullop,
177 	.cf_tlb_flushI_SE	= (void *)cpufunc_nullop,
178 	.cf_tlb_flushD		= cpufunc_nullop,
179 	.cf_tlb_flushD_SE	= (void *)cpufunc_nullop,
180 
181 	/* Cache operations */
182 
183 	.cf_icache_sync_all	= cpufunc_nullop,
184 	.cf_icache_sync_range	= (void *) cpufunc_nullop,
185 
186 	.cf_dcache_wbinv_all	= arm3_cache_flush,
187 	.cf_dcache_wbinv_range	= (void *)cpufunc_nullop,
188 	.cf_dcache_inv_range	= (void *)cpufunc_nullop,
189 	.cf_dcache_wb_range	= (void *)cpufunc_nullop,
190 
191 	.cf_idcache_wbinv_all	= cpufunc_nullop,
192 	.cf_idcache_wbinv_range	= (void *)cpufunc_nullop,
193 
194 	/* Other functions */
195 
196 	.cf_flush_prefetchbuf	= cpufunc_nullop,
197 	.cf_drain_writebuf	= cpufunc_nullop,
198 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
199 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
200 
201 	.cf_sleep		= (void *)cpufunc_nullop,
202 
203 	/* Soft functions */
204 
205 	.cf_dataabt_fixup	= early_abort_fixup,
206 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
207 
208 	.cf_setup		= (void *)cpufunc_nullop
209 
210 };
211 #endif	/* CPU_ARM250 */
212 
213 #ifdef CPU_ARM3
214 struct cpu_functions arm3_cpufuncs = {
215 	/* CPU functions */
216 
217 	.cf_id			= cpufunc_id,
218 	.cf_cpwait		= cpufunc_nullop,
219 
220 	/* MMU functions */
221 
222 	.cf_control		= arm3_control,
223 
224 	/* TLB functions */
225 
226 	.cf_tlb_flushID		= cpufunc_nullop,
227 	.cf_tlb_flushID_SE	= (void *)cpufunc_nullop,
228 	.cf_tlb_flushI		= cpufunc_nullop,
229 	.cf_tlb_flushI_SE	= (void *)cpufunc_nullop,
230 	.cf_tlb_flushD		= cpufunc_nullop,
231 	.cf_tlb_flushD_SE	= (void *)cpufunc_nullop,
232 
233 	/* Cache operations */
234 
235 	.cf_icache_sync_all	= cpufunc_nullop,
236 	.cf_icache_sync_range	= (void *) cpufunc_nullop,
237 
238 	.cf_dcache_wbinv_all	= arm3_cache_flush,
239 	.cf_dcache_wbinv_range	= (void *)arm3_cache_flush,
240 	.cf_dcache_inv_range	= (void *)arm3_cache_flush,
241 	.cf_dcache_wb_range	= (void *)cpufunc_nullop,
242 
243 	.cf_idcache_wbinv_all	= arm3_cache_flush,
244 	.cf_idcache_wbinv_range	= (void *)arm3_cache_flush,
245 
246 	/* Other functions */
247 
248 	.cf_flush_prefetchbuf	= cpufunc_nullop,
249 	.cf_drain_writebuf	= cpufunc_nullop,
250 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
251 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
252 
253 	.cf_sleep		= (void *)cpufunc_nullop,
254 
255 	/* Soft functions */
256 
257 	.cf_dataabt_fixup	= early_abort_fixup,
258 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
259 
260 	.cf_setup		= (void *)cpufunc_nullop
261 
262 };
263 #endif	/* CPU_ARM3 */
264 
265 #ifdef CPU_ARM6
266 struct cpu_functions arm6_cpufuncs = {
267 	/* CPU functions */
268 
269 	.cf_id			= cpufunc_id,
270 	.cf_cpwait		= cpufunc_nullop,
271 
272 	/* MMU functions */
273 
274 	.cf_control		= cpufunc_control,
275 	.cf_domains		= cpufunc_domains,
276 	.cf_setttb		= arm67_setttb,
277 	.cf_faultstatus		= cpufunc_faultstatus,
278 	.cf_faultaddress	= cpufunc_faultaddress,
279 
280 	/* TLB functions */
281 
282 	.cf_tlb_flushID		= arm67_tlb_flush,
283 	.cf_tlb_flushID_SE	= arm67_tlb_purge,
284 	.cf_tlb_flushI		= arm67_tlb_flush,
285 	.cf_tlb_flushI_SE	= arm67_tlb_purge,
286 	.cf_tlb_flushD		= arm67_tlb_flush,
287 	.cf_tlb_flushD_SE	= arm67_tlb_purge,
288 
289 	/* Cache operations */
290 
291 	.cf_icache_sync_all	= cpufunc_nullop,
292 	.cf_icache_sync_range	= (void *) cpufunc_nullop,
293 
294 	.cf_dcache_wbinv_all	= arm67_cache_flush,
295 	.cf_dcache_wbinv_range	= (void *)arm67_cache_flush,
296 	.cf_dcache_inv_range	= (void *)arm67_cache_flush,
297 	.cf_dcache_wb_range	= (void *)cpufunc_nullop,
298 
299 	.cf_idcache_wbinv_all	= arm67_cache_flush,
300 	.cf_idcache_wbinv_range	= (void *)arm67_cache_flush,
301 
302 	/* Other functions */
303 
304 	.cf_flush_prefetchbuf	= cpufunc_nullop,
305 	.cf_drain_writebuf	= cpufunc_nullop,
306 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
307 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
308 
309 	.cf_sleep		= (void *)cpufunc_nullop,
310 
311 	/* Soft functions */
312 
313 #ifdef ARM6_LATE_ABORT
314 	.cf_dataabt_fixup	= late_abort_fixup,
315 #else
316 	.cf_dataabt_fixup	= early_abort_fixup,
317 #endif
318 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
319 
320 	.cf_context_switch	= arm67_context_switch,
321 
322 	.cf_setup		= arm6_setup
323 
324 };
325 #endif	/* CPU_ARM6 */
326 
327 #ifdef CPU_ARM7
328 struct cpu_functions arm7_cpufuncs = {
329 	/* CPU functions */
330 
331 	.cf_id			= cpufunc_id,
332 	.cf_cpwait		= cpufunc_nullop,
333 
334 	/* MMU functions */
335 
336 	.cf_control		= cpufunc_control,
337 	.cf_domains		= cpufunc_domains,
338 	.cf_setttb		= arm67_setttb,
339 	.cf_faultstatus		= cpufunc_faultstatus,
340 	.cf_faultaddress	= cpufunc_faultaddress,
341 
342 	/* TLB functions */
343 
344 	.cf_tlb_flushID		= arm67_tlb_flush,
345 	.cf_tlb_flushID_SE	= arm67_tlb_purge,
346 	.cf_tlb_flushI		= arm67_tlb_flush,
347 	.cf_tlb_flushI_SE	= arm67_tlb_purge,
348 	.cf_tlb_flushD		= arm67_tlb_flush,
349 	.cf_tlb_flushD_SE	= arm67_tlb_purge,
350 
351 	/* Cache operations */
352 
353 	.cf_icache_sync_all	= cpufunc_nullop,
354 	.cf_icache_sync_range	= (void *)cpufunc_nullop,
355 
356 	.cf_dcache_wbinv_all	= arm67_cache_flush,
357 	.cf_dcache_wbinv_range	= (void *)arm67_cache_flush,
358 	.cf_dcache_inv_range	= (void *)arm67_cache_flush,
359 	.cf_dcache_wb_range	= (void *)cpufunc_nullop,
360 
361 	.cf_idcache_wbinv_all	= arm67_cache_flush,
362 	.cf_idcache_wbinv_range	= (void *)arm67_cache_flush,
363 
364 	/* Other functions */
365 
366 	.cf_flush_prefetchbuf	= cpufunc_nullop,
367 	.cf_drain_writebuf	= cpufunc_nullop,
368 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
369 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
370 
371 	.cf_sleep		= (void *)cpufunc_nullop,
372 
373 	/* Soft functions */
374 
375 	.cf_dataabt_fixup	= late_abort_fixup,
376 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
377 
378 	.cf_context_switch	= arm67_context_switch,
379 
380 	.cf_setup		= arm7_setup
381 
382 };
383 #endif	/* CPU_ARM7 */
384 
385 #ifdef CPU_ARM7TDMI
386 struct cpu_functions arm7tdmi_cpufuncs = {
387 	/* CPU functions */
388 
389 	.cf_id			= cpufunc_id,
390 	.cf_cpwait		= cpufunc_nullop,
391 
392 	/* MMU functions */
393 
394 	.cf_control		= cpufunc_control,
395 	.cf_domains		= cpufunc_domains,
396 	.cf_setttb		= arm7tdmi_setttb,
397 	.cf_faultstatus		= cpufunc_faultstatus,
398 	.cf_faultaddress	= cpufunc_faultaddress,
399 
400 	/* TLB functions */
401 
402 	.cf_tlb_flushID		= arm7tdmi_tlb_flushID,
403 	.cf_tlb_flushID_SE	= arm7tdmi_tlb_flushID_SE,
404 	.cf_tlb_flushI		= arm7tdmi_tlb_flushID,
405 	.cf_tlb_flushI_SE	= arm7tdmi_tlb_flushID_SE,
406 	.cf_tlb_flushD		= arm7tdmi_tlb_flushID,
407 	.cf_tlb_flushD_SE	= arm7tdmi_tlb_flushID_SE,
408 
409 	/* Cache operations */
410 
411 	.cf_icache_sync_all	= cpufunc_nullop,
412 	.cf_icache_sync_range	= (void *)cpufunc_nullop,
413 
414 	.cf_dcache_wbinv_all	= arm7tdmi_cache_flushID,
415 	.cf_dcache_wbinv_range	= (void *)arm7tdmi_cache_flushID,
416 	.cf_dcache_inv_range	= (void *)arm7tdmi_cache_flushID,
417 	.cf_dcache_wb_range	= (void *)cpufunc_nullop,
418 
419 	.cf_idcache_wbinv_all	= arm7tdmi_cache_flushID,
420 	.cf_idcache_wbinv_range	= (void *)arm7tdmi_cache_flushID,
421 
422 	/* Other functions */
423 
424 	.cf_flush_prefetchbuf	= cpufunc_nullop,
425 	.cf_drain_writebuf	= cpufunc_nullop,
426 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
427 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
428 
429 	.cf_sleep		= (void *)cpufunc_nullop,
430 
431 	/* Soft functions */
432 
433 	.cf_dataabt_fixup	= late_abort_fixup,
434 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
435 
436 	.cf_context_switch	= arm7tdmi_context_switch,
437 
438 	.cf_setup		= arm7tdmi_setup
439 
440 };
441 #endif	/* CPU_ARM7TDMI */
442 
443 #ifdef CPU_ARM8
444 struct cpu_functions arm8_cpufuncs = {
445 	/* CPU functions */
446 
447 	.cf_id			= cpufunc_id,
448 	.cf_cpwait		= cpufunc_nullop,
449 
450 	/* MMU functions */
451 
452 	.cf_control		= cpufunc_control,
453 	.cf_domains		= cpufunc_domains,
454 	.cf_setttb		= arm8_setttb,
455 	.cf_faultstatus		= cpufunc_faultstatus,
456 	.cf_faultaddress	= cpufunc_faultaddress,
457 
458 	/* TLB functions */
459 
460 	.cf_tlb_flushID		= arm8_tlb_flushID,
461 	.cf_tlb_flushID_SE	= arm8_tlb_flushID_SE,
462 	.cf_tlb_flushI		= arm8_tlb_flushID,
463 	.cf_tlb_flushI_SE	= arm8_tlb_flushID_SE,
464 	.cf_tlb_flushD		= arm8_tlb_flushID,
465 	.cf_tlb_flushD_SE	= arm8_tlb_flushID_SE,
466 
467 	/* Cache operations */
468 
469 	.cf_icache_sync_all	= cpufunc_nullop,
470 	.cf_icache_sync_range	= (void *)cpufunc_nullop,
471 
472 	.cf_dcache_wbinv_all	= arm8_cache_purgeID,
473 	.cf_dcache_wbinv_range	= (void *)arm8_cache_purgeID,
474 /*XXX*/	.cf_dcache_inv_range	= (void *)arm8_cache_purgeID,
475 	.cf_dcache_wb_range	= (void *)arm8_cache_cleanID,
476 
477 	.cf_idcache_wbinv_all	= arm8_cache_purgeID,
478 	.cf_idcache_wbinv_range = (void *)arm8_cache_purgeID,
479 
480 	/* Other functions */
481 
482 	.cf_flush_prefetchbuf	= cpufunc_nullop,
483 	.cf_drain_writebuf	= cpufunc_nullop,
484 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
485 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
486 
487 	.cf_sleep		= (void *)cpufunc_nullop,
488 
489 	/* Soft functions */
490 
491 	.cf_dataabt_fixup	= cpufunc_null_fixup,
492 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
493 
494 	.cf_context_switch	= arm8_context_switch,
495 
496 	.cf_setup		= arm8_setup
497 };
498 #endif	/* CPU_ARM8 */
499 
500 #ifdef CPU_ARM9
501 struct cpu_functions arm9_cpufuncs = {
502 	/* CPU functions */
503 
504 	.cf_id			= cpufunc_id,
505 	.cf_cpwait		= cpufunc_nullop,
506 
507 	/* MMU functions */
508 
509 	.cf_control		= cpufunc_control,
510 	.cf_domains		= cpufunc_domains,
511 	.cf_setttb		= arm9_setttb,
512 	.cf_faultstatus		= cpufunc_faultstatus,
513 	.cf_faultaddress	= cpufunc_faultaddress,
514 
515 	/* TLB functions */
516 
517 	.cf_tlb_flushID		= armv4_tlb_flushID,
518 	.cf_tlb_flushID_SE	= arm9_tlb_flushID_SE,
519 	.cf_tlb_flushI		= armv4_tlb_flushI,
520 	.cf_tlb_flushI_SE	= (void *)armv4_tlb_flushI,
521 	.cf_tlb_flushD		= armv4_tlb_flushD,
522 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
523 
524 	/* Cache operations */
525 
526 	.cf_icache_sync_all	= arm9_icache_sync_all,
527 	.cf_icache_sync_range	= arm9_icache_sync_range,
528 
529 	.cf_dcache_wbinv_all	= arm9_dcache_wbinv_all,
530 	.cf_dcache_wbinv_range	= arm9_dcache_wbinv_range,
531 /*XXX*/	.cf_dcache_inv_range	= arm9_dcache_wbinv_range,
532 	.cf_dcache_wb_range	= arm9_dcache_wb_range,
533 
534 	.cf_idcache_wbinv_all	= arm9_idcache_wbinv_all,
535 	.cf_idcache_wbinv_range = arm9_idcache_wbinv_range,
536 
537 	/* Other functions */
538 
539 	.cf_flush_prefetchbuf	= cpufunc_nullop,
540 	.cf_drain_writebuf	= armv4_drain_writebuf,
541 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
542 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
543 
544 	.cf_sleep		= (void *)cpufunc_nullop,
545 
546 	/* Soft functions */
547 
548 	.cf_dataabt_fixup	= cpufunc_null_fixup,
549 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
550 
551 	.cf_context_switch	= arm9_context_switch,
552 
553 	.cf_setup		= arm9_setup
554 
555 };
556 #endif /* CPU_ARM9 */
557 
558 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
559 struct cpu_functions armv5_ec_cpufuncs = {
560 	/* CPU functions */
561 
562 	.cf_id			= cpufunc_id,
563 	.cf_cpwait		= cpufunc_nullop,
564 
565 	/* MMU functions */
566 
567 	.cf_control		= cpufunc_control,
568 	.cf_domains		= cpufunc_domains,
569 	.cf_setttb		= armv5_ec_setttb,
570 	.cf_faultstatus		= cpufunc_faultstatus,
571 	.cf_faultaddress	= cpufunc_faultaddress,
572 
573 	/* TLB functions */
574 
575 	.cf_tlb_flushID		= armv4_tlb_flushID,
576 	.cf_tlb_flushID_SE	= arm10_tlb_flushID_SE,
577 	.cf_tlb_flushI		= armv4_tlb_flushI,
578 	.cf_tlb_flushI_SE	= arm10_tlb_flushI_SE,
579 	.cf_tlb_flushD		= armv4_tlb_flushD,
580 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
581 
582 	/* Cache operations */
583 
584 	.cf_icache_sync_all	= armv5_ec_icache_sync_all,
585 	.cf_icache_sync_range	= armv5_ec_icache_sync_range,
586 
587 	.cf_dcache_wbinv_all	= armv5_ec_dcache_wbinv_all,
588 	.cf_dcache_wbinv_range	= armv5_ec_dcache_wbinv_range,
589 /*XXX*/	.cf_dcache_inv_range	= armv5_ec_dcache_wbinv_range,
590 	.cf_dcache_wb_range	= armv5_ec_dcache_wb_range,
591 
592 	.cf_idcache_wbinv_all	= armv5_ec_idcache_wbinv_all,
593 	.cf_idcache_wbinv_range = armv5_ec_idcache_wbinv_range,
594 
595 	/* Other functions */
596 
597 	.cf_flush_prefetchbuf	= cpufunc_nullop,
598 	.cf_drain_writebuf	= armv4_drain_writebuf,
599 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
600 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
601 
602 	.cf_sleep		= (void *)cpufunc_nullop,
603 
604 	/* Soft functions */
605 
606 	.cf_dataabt_fixup	= cpufunc_null_fixup,
607 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
608 
609 	.cf_context_switch	= arm10_context_switch,
610 
611 	.cf_setup		= arm10_setup
612 
613 };
614 #endif /* CPU_ARM9E || CPU_ARM10 */
615 
616 #ifdef CPU_ARM10
617 struct cpu_functions arm10_cpufuncs = {
618 	/* CPU functions */
619 
620 	.cf_id			= cpufunc_id,
621 	.cf_cpwait		= cpufunc_nullop,
622 
623 	/* MMU functions */
624 
625 	.cf_control		= cpufunc_control,
626 	.cf_domains		= cpufunc_domains,
627 	.cf_setttb		= armv5_setttb,
628 	.cf_faultstatus		= cpufunc_faultstatus,
629 	.cf_faultaddress	= cpufunc_faultaddress,
630 
631 	/* TLB functions */
632 
633 	.cf_tlb_flushID		= armv4_tlb_flushID,
634 	.cf_tlb_flushID_SE	= arm10_tlb_flushID_SE,
635 	.cf_tlb_flushI		= armv4_tlb_flushI,
636 	.cf_tlb_flushI_SE	= arm10_tlb_flushI_SE,
637 	.cf_tlb_flushD		= armv4_tlb_flushD,
638 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
639 
640 	/* Cache operations */
641 
642 	.cf_icache_sync_all	= armv5_icache_sync_all,
643 	.cf_icache_sync_range	= armv5_icache_sync_range,
644 
645 	.cf_dcache_wbinv_all	= armv5_dcache_wbinv_all,
646 	.cf_dcache_wbinv_range	= armv5_dcache_wbinv_range,
647 /*XXX*/	.cf_dcache_inv_range	= armv5_dcache_wbinv_range,
648 	.cf_dcache_wb_range	= armv5_dcache_wb_range,
649 
650 	.cf_idcache_wbinv_all	= armv5_idcache_wbinv_all,
651 	.cf_idcache_wbinv_range = armv5_idcache_wbinv_range,
652 
653 	/* Other functions */
654 
655 	.cf_flush_prefetchbuf	= cpufunc_nullop,
656 	.cf_drain_writebuf	= armv4_drain_writebuf,
657 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
658 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
659 
660 	.cf_sleep		= (void *)cpufunc_nullop,
661 
662 	/* Soft functions */
663 
664 	.cf_dataabt_fixup	= cpufunc_null_fixup,
665 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
666 
667 	.cf_context_switch	= arm10_context_switch,
668 
669 	.cf_setup		= arm10_setup
670 
671 };
672 #endif /* CPU_ARM10 */
673 
674 #ifdef CPU_ARM11
675 struct cpu_functions arm11_cpufuncs = {
676 	/* CPU functions */
677 
678 	.cf_id			= cpufunc_id,
679 	.cf_cpwait		= cpufunc_nullop,
680 
681 	/* MMU functions */
682 
683 	.cf_control		= cpufunc_control,
684 	.cf_domains		= cpufunc_domains,
685 	.cf_setttb		= arm11_setttb,
686 	.cf_faultstatus		= cpufunc_faultstatus,
687 	.cf_faultaddress	= cpufunc_faultaddress,
688 
689 	/* TLB functions */
690 
691 	.cf_tlb_flushID		= arm11_tlb_flushID,
692 	.cf_tlb_flushID_SE	= arm11_tlb_flushID_SE,
693 	.cf_tlb_flushI		= arm11_tlb_flushI,
694 	.cf_tlb_flushI_SE	= arm11_tlb_flushI_SE,
695 	.cf_tlb_flushD		= arm11_tlb_flushD,
696 	.cf_tlb_flushD_SE	= arm11_tlb_flushD_SE,
697 
698 	/* Cache operations */
699 
700 	.cf_icache_sync_all	= armv5_icache_sync_all,
701 	.cf_icache_sync_range	= armv5_icache_sync_range,
702 
703 	.cf_dcache_wbinv_all	= armv5_dcache_wbinv_all,
704 	.cf_dcache_wbinv_range	= armv5_dcache_wbinv_range,
705 /*XXX*/	.cf_dcache_inv_range	= armv5_dcache_wbinv_range,
706 	.cf_dcache_wb_range	= armv5_dcache_wb_range,
707 
708 	.cf_idcache_wbinv_all	= armv5_idcache_wbinv_all,
709 	.cf_idcache_wbinv_range = armv5_idcache_wbinv_range,
710 
711 	/* Other functions */
712 
713 	.cf_flush_prefetchbuf	= cpufunc_nullop,
714 	.cf_drain_writebuf	= arm11_drain_writebuf,
715 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
716 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
717 
718 	.cf_sleep		= (void *)cpufunc_nullop,
719 
720 	/* Soft functions */
721 
722 	.cf_dataabt_fixup	= cpufunc_null_fixup,
723 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
724 
725 	.cf_context_switch	= arm11_context_switch,
726 
727 	.cf_setup		= arm11_setup
728 
729 };
730 #endif /* CPU_ARM11 */
731 
732 #ifdef CPU_SA110
733 struct cpu_functions sa110_cpufuncs = {
734 	/* CPU functions */
735 
736 	.cf_id			= cpufunc_id,
737 	.cf_cpwait		= cpufunc_nullop,
738 
739 	/* MMU functions */
740 
741 	.cf_control		= cpufunc_control,
742 	.cf_domains		= cpufunc_domains,
743 	.cf_setttb		= sa1_setttb,
744 	.cf_faultstatus		= cpufunc_faultstatus,
745 	.cf_faultaddress	= cpufunc_faultaddress,
746 
747 	/* TLB functions */
748 
749 	.cf_tlb_flushID		= armv4_tlb_flushID,
750 	.cf_tlb_flushID_SE	= sa1_tlb_flushID_SE,
751 	.cf_tlb_flushI		= armv4_tlb_flushI,
752 	.cf_tlb_flushI_SE	= (void *)armv4_tlb_flushI,
753 	.cf_tlb_flushD		= armv4_tlb_flushD,
754 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
755 
756 	/* Cache operations */
757 
758 	.cf_icache_sync_all	= sa1_cache_syncI,
759 	.cf_icache_sync_range	= sa1_cache_syncI_rng,
760 
761 	.cf_dcache_wbinv_all	= sa1_cache_purgeD,
762 	.cf_dcache_wbinv_range	= sa1_cache_purgeD_rng,
763 /*XXX*/	.cf_dcache_inv_range	= sa1_cache_purgeD_rng,
764 	.cf_dcache_wb_range	= sa1_cache_cleanD_rng,
765 
766 	.cf_idcache_wbinv_all	= sa1_cache_purgeID,
767 	.cf_idcache_wbinv_range	= sa1_cache_purgeID_rng,
768 
769 	/* Other functions */
770 
771 	.cf_flush_prefetchbuf	= cpufunc_nullop,
772 	.cf_drain_writebuf	= armv4_drain_writebuf,
773 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
774 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
775 
776 	.cf_sleep		= (void *)cpufunc_nullop,
777 
778 	/* Soft functions */
779 
780 	.cf_dataabt_fixup	= cpufunc_null_fixup,
781 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
782 
783 	.cf_context_switch	= sa110_context_switch,
784 
785 	.cf_setup		= sa110_setup
786 };
787 #endif	/* CPU_SA110 */
788 
789 #if defined(CPU_SA1100) || defined(CPU_SA1110)
790 struct cpu_functions sa11x0_cpufuncs = {
791 	/* CPU functions */
792 
793 	.cf_id			= cpufunc_id,
794 	.cf_cpwait		= cpufunc_nullop,
795 
796 	/* MMU functions */
797 
798 	.cf_control		= cpufunc_control,
799 	.cf_domains		= cpufunc_domains,
800 	.cf_setttb		= sa1_setttb,
801 	.cf_faultstatus		= cpufunc_faultstatus,
802 	.cf_faultaddress	= cpufunc_faultaddress,
803 
804 	/* TLB functions */
805 
806 	.cf_tlb_flushID		= armv4_tlb_flushID,
807 	.cf_tlb_flushID_SE	= sa1_tlb_flushID_SE,
808 	.cf_tlb_flushI		= armv4_tlb_flushI,
809 	.cf_tlb_flushI_SE	= (void *)armv4_tlb_flushI,
810 	.cf_tlb_flushD		= armv4_tlb_flushD,
811 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
812 
813 	/* Cache operations */
814 
815 	.cf_icache_sync_all	= sa1_cache_syncI,
816 	.cf_icache_sync_range	= sa1_cache_syncI_rng,
817 
818 	.cf_dcache_wbinv_all	= sa1_cache_purgeD,
819 	.cf_dcache_wbinv_range	= sa1_cache_purgeD_rng,
820 /*XXX*/	.cf_dcache_inv_range	= sa1_cache_purgeD_rng,
821 	.cf_dcache_wb_range	= sa1_cache_cleanD_rng,
822 
823 	.cf_idcache_wbinv_all	= sa1_cache_purgeID,
824 	.cf_idcache_wbinv_range	= sa1_cache_purgeID_rng,
825 
826 	/* Other functions */
827 
828 	.cf_flush_prefetchbuf	= sa11x0_drain_readbuf,
829 	.cf_drain_writebuf	= armv4_drain_writebuf,
830 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
831 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
832 
833 	.cf_sleep		= sa11x0_cpu_sleep,
834 
835 	/* Soft functions */
836 
837 	.cf_dataabt_fixup	= cpufunc_null_fixup,
838 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
839 
840 	.cf_context_switch	= sa11x0_context_switch,
841 
842 	.cf_setup		= sa11x0_setup
843 };
844 #endif	/* CPU_SA1100 || CPU_SA1110 */
845 
846 #ifdef CPU_IXP12X0
847 struct cpu_functions ixp12x0_cpufuncs = {
848 	/* CPU functions */
849 
850 	.cf_id			= cpufunc_id,
851 	.cf_cpwait		= cpufunc_nullop,
852 
853 	/* MMU functions */
854 
855 	.cf_control		= cpufunc_control,
856 	.cf_domains		= cpufunc_domains,
857 	.cf_setttb		= sa1_setttb,
858 	.cf_faultstatus		= cpufunc_faultstatus,
859 	.cf_faultaddress	= cpufunc_faultaddress,
860 
861 	/* TLB functions */
862 
863 	.cf_tlb_flushID		= armv4_tlb_flushID,
864 	.cf_tlb_flushID_SE	= sa1_tlb_flushID_SE,
865 	.cf_tlb_flushI		= armv4_tlb_flushI,
866 	.cf_tlb_flushI_SE	= (void *)armv4_tlb_flushI,
867 	.cf_tlb_flushD		= armv4_tlb_flushD,
868 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
869 
870 	/* Cache operations */
871 
872 	.cf_icache_sync_all	= sa1_cache_syncI,
873 	.cf_icache_sync_range	= sa1_cache_syncI_rng,
874 
875 	.cf_dcache_wbinv_all	= sa1_cache_purgeD,
876 	.cf_dcache_wbinv_range	= sa1_cache_purgeD_rng,
877 /*XXX*/	.cf_dcache_inv_range	= sa1_cache_purgeD_rng,
878 	.cf_dcache_wb_range	= sa1_cache_cleanD_rng,
879 
880 	.cf_idcache_wbinv_all	= sa1_cache_purgeID,
881 	.cf_idcache_wbinv_range	= sa1_cache_purgeID_rng,
882 
883 	/* Other functions */
884 
885 	.cf_flush_prefetchbuf	= ixp12x0_drain_readbuf,
886 	.cf_drain_writebuf	= armv4_drain_writebuf,
887 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
888 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
889 
890 	.cf_sleep		= (void *)cpufunc_nullop,
891 
892 	/* Soft functions */
893 
894 	.cf_dataabt_fixup	= cpufunc_null_fixup,
895 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
896 
897 	.cf_context_switch	= ixp12x0_context_switch,
898 
899 	.cf_setup		= ixp12x0_setup
900 };
901 #endif	/* CPU_IXP12X0 */
902 
903 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
904     defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425)
905 struct cpu_functions xscale_cpufuncs = {
906 	/* CPU functions */
907 
908 	.cf_id			= cpufunc_id,
909 	.cf_cpwait		= xscale_cpwait,
910 
911 	/* MMU functions */
912 
913 	.cf_control		= xscale_control,
914 	.cf_domains		= cpufunc_domains,
915 	.cf_setttb		= xscale_setttb,
916 	.cf_faultstatus		= cpufunc_faultstatus,
917 	.cf_faultaddress	= cpufunc_faultaddress,
918 
919 	/* TLB functions */
920 
921 	.cf_tlb_flushID		= armv4_tlb_flushID,
922 	.cf_tlb_flushID_SE	= xscale_tlb_flushID_SE,
923 	.cf_tlb_flushI		= armv4_tlb_flushI,
924 	.cf_tlb_flushI_SE	= (void *)armv4_tlb_flushI,
925 	.cf_tlb_flushD		= armv4_tlb_flushD,
926 	.cf_tlb_flushD_SE	= armv4_tlb_flushD_SE,
927 
928 	/* Cache operations */
929 
930 	.cf_icache_sync_all	= xscale_cache_syncI,
931 	.cf_icache_sync_range	= xscale_cache_syncI_rng,
932 
933 	.cf_dcache_wbinv_all	= xscale_cache_purgeD,
934 	.cf_dcache_wbinv_range	= xscale_cache_purgeD_rng,
935 	.cf_dcache_inv_range	= xscale_cache_flushD_rng,
936 	.cf_dcache_wb_range	= xscale_cache_cleanD_rng,
937 
938 	.cf_idcache_wbinv_all	= xscale_cache_purgeID,
939 	.cf_idcache_wbinv_range = xscale_cache_purgeID_rng,
940 
941 	/* Other functions */
942 
943 	.cf_flush_prefetchbuf	= cpufunc_nullop,
944 	.cf_drain_writebuf	= armv4_drain_writebuf,
945 	.cf_flush_brnchtgt_C	= cpufunc_nullop,
946 	.cf_flush_brnchtgt_E	= (void *)cpufunc_nullop,
947 
948 	.cf_sleep		= xscale_cpu_sleep,
949 
950 	/* Soft functions */
951 
952 	.cf_dataabt_fixup	= cpufunc_null_fixup,
953 	.cf_prefetchabt_fixup	= cpufunc_null_fixup,
954 
955 	.cf_context_switch	= xscale_context_switch,
956 
957 	.cf_setup		= xscale_setup
958 };
959 #endif
960 /* CPU_XSCALE_80200 || CPU_XSCALE_80321 || __CPU_XSCALE_PXA2XX || CPU_XSCALE_IXP425 */
961 
962 /*
963  * Global constants also used by locore.s
964  */
965 
966 struct cpu_functions cpufuncs;
967 u_int cputype;
968 u_int cpu_reset_needs_v4_MMU_disable;	/* flag used in locore.s */
969 
970 #if defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined(CPU_ARM9) || \
971     defined (CPU_ARM9E) || defined (CPU_ARM10) || defined (CPU_ARM11) || \
972     defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
973     defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425)
974 static void get_cachetype_cp15 __P((void));
975 
976 /* Additional cache information local to this file.  Log2 of some of the
977    above numbers.  */
978 static int	arm_dcache_l2_nsets;
979 static int	arm_dcache_l2_assoc;
980 static int	arm_dcache_l2_linesize;
981 
982 static void
983 get_cachetype_cp15()
984 {
985 	u_int ctype, isize, dsize;
986 	u_int multiplier;
987 
988 	__asm volatile("mrc p15, 0, %0, c0, c0, 1"
989 		: "=r" (ctype));
990 
991 	/*
992 	 * ...and thus spake the ARM ARM:
993 	 *
994 	 * If an <opcode2> value corresponding to an unimplemented or
995 	 * reserved ID register is encountered, the System Control
996 	 * processor returns the value of the main ID register.
997 	 */
998 	if (ctype == cpu_id())
999 		goto out;
1000 
1001 	if ((ctype & CPU_CT_S) == 0)
1002 		arm_pcache_unified = 1;
1003 
1004 	/*
1005 	 * If you want to know how this code works, go read the ARM ARM.
1006 	 */
1007 
1008 	arm_pcache_type = CPU_CT_CTYPE(ctype);
1009 
1010 	if (arm_pcache_unified == 0) {
1011 		isize = CPU_CT_ISIZE(ctype);
1012 		multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
1013 		arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
1014 		if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
1015 			if (isize & CPU_CT_xSIZE_M)
1016 				arm_picache_line_size = 0; /* not present */
1017 			else
1018 				arm_picache_ways = 1;
1019 		} else {
1020 			arm_picache_ways = multiplier <<
1021 			    (CPU_CT_xSIZE_ASSOC(isize) - 1);
1022 		}
1023 		arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
1024 	}
1025 
1026 	dsize = CPU_CT_DSIZE(ctype);
1027 	multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
1028 	arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
1029 	if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
1030 		if (dsize & CPU_CT_xSIZE_M)
1031 			arm_pdcache_line_size = 0; /* not present */
1032 		else
1033 			arm_pdcache_ways = 1;
1034 	} else {
1035 		arm_pdcache_ways = multiplier <<
1036 		    (CPU_CT_xSIZE_ASSOC(dsize) - 1);
1037 	}
1038 	arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
1039 
1040 	arm_dcache_align = arm_pdcache_line_size;
1041 
1042 	arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
1043 	arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
1044 	arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
1045 	    CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
1046 
1047  out:
1048 	arm_dcache_align_mask = arm_dcache_align - 1;
1049 }
1050 #endif /* ARM7TDMI || ARM8 || ARM9 || XSCALE */
1051 
1052 #if defined(CPU_ARM2) || defined(CPU_ARM250) || defined(CPU_ARM3) || \
1053     defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_SA110) || \
1054     defined(CPU_SA1100) || defined(CPU_SA1110) || defined(CPU_IXP12X0)
1055 /* Cache information for CPUs without cache type registers. */
1056 struct cachetab {
1057 	u_int32_t ct_cpuid;
1058 	int	ct_pcache_type;
1059 	int	ct_pcache_unified;
1060 	int	ct_pdcache_size;
1061 	int	ct_pdcache_line_size;
1062 	int	ct_pdcache_ways;
1063 	int	ct_picache_size;
1064 	int	ct_picache_line_size;
1065 	int	ct_picache_ways;
1066 };
1067 
1068 struct cachetab cachetab[] = {
1069     /* cpuid,           cache type,       u,  dsiz, ls, wy,  isiz, ls, wy */
1070     { CPU_ID_ARM2,      0,                1,     0,  0,  0,     0,  0,  0 },
1071     { CPU_ID_ARM250,    0,                1,     0,  0,  0,     0,  0,  0 },
1072     { CPU_ID_ARM3,      CPU_CT_CTYPE_WT,  1,  4096, 16, 64,     0,  0,  0 },
1073     { CPU_ID_ARM610,	CPU_CT_CTYPE_WT,  1,  4096, 16, 64,     0,  0,  0 },
1074     { CPU_ID_ARM710,    CPU_CT_CTYPE_WT,  1,  8192, 32,  4,     0,  0,  0 },
1075     { CPU_ID_ARM7500,   CPU_CT_CTYPE_WT,  1,  4096, 16,  4,     0,  0,  0 },
1076     { CPU_ID_ARM710A,   CPU_CT_CTYPE_WT,  1,  8192, 16,  4,     0,  0,  0 },
1077     { CPU_ID_ARM7500FE, CPU_CT_CTYPE_WT,  1,  4096, 16,  4,     0,  0,  0 },
1078     /* XXX is this type right for SA-1? */
1079     { CPU_ID_SA110,	CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 },
1080     { CPU_ID_SA1100,	CPU_CT_CTYPE_WB1, 0,  8192, 32, 32, 16384, 32, 32 },
1081     { CPU_ID_SA1110,	CPU_CT_CTYPE_WB1, 0,  8192, 32, 32, 16384, 32, 32 },
1082     { CPU_ID_IXP1200,	CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 }, /* XXX */
1083     { 0, 0, 0, 0, 0, 0, 0, 0}
1084 };
1085 
1086 static void get_cachetype_table __P((void));
1087 
1088 static void
1089 get_cachetype_table()
1090 {
1091 	int i;
1092 	u_int32_t cpuid = cpu_id();
1093 
1094 	for (i = 0; cachetab[i].ct_cpuid != 0; i++) {
1095 		if (cachetab[i].ct_cpuid == (cpuid & CPU_ID_CPU_MASK)) {
1096 			arm_pcache_type = cachetab[i].ct_pcache_type;
1097 			arm_pcache_unified = cachetab[i].ct_pcache_unified;
1098 			arm_pdcache_size = cachetab[i].ct_pdcache_size;
1099 			arm_pdcache_line_size =
1100 			    cachetab[i].ct_pdcache_line_size;
1101 			arm_pdcache_ways = cachetab[i].ct_pdcache_ways;
1102 			arm_picache_size = cachetab[i].ct_picache_size;
1103 			arm_picache_line_size =
1104 			    cachetab[i].ct_picache_line_size;
1105 			arm_picache_ways = cachetab[i].ct_picache_ways;
1106 		}
1107 	}
1108 	arm_dcache_align = arm_pdcache_line_size;
1109 
1110 	arm_dcache_align_mask = arm_dcache_align - 1;
1111 }
1112 
1113 #endif /* ARM2 || ARM250 || ARM3 || ARM6 || ARM7 || SA110 || SA1100 || SA1111 || IXP12X0 */
1114 
1115 /*
1116  * Cannot panic here as we may not have a console yet ...
1117  */
1118 
1119 int
1120 set_cpufuncs()
1121 {
1122 	if (cputype == 0) {
1123 		cputype = cpufunc_id();
1124 		cputype &= CPU_ID_CPU_MASK;
1125 	}
1126 
1127 	/*
1128 	 * NOTE: cpu_do_powersave defaults to off.  If we encounter a
1129 	 * CPU type where we want to use it by default, then we set it.
1130 	 */
1131 #ifdef CPU_ARM2
1132 	if (cputype == CPU_ID_ARM2) {
1133 		cpufuncs = arm2_cpufuncs;
1134 		cpu_reset_needs_v4_MMU_disable = 0;
1135 		get_cachetype_table();
1136 		return 0;
1137 	}
1138 #endif /* CPU_ARM2 */
1139 #ifdef CPU_ARM250
1140 	if (cputype == CPU_ID_ARM250) {
1141 		cpufuncs = arm250_cpufuncs;
1142 		cpu_reset_needs_v4_MMU_disable = 0;
1143 		get_cachetype_table();
1144 		return 0;
1145 	}
1146 #endif
1147 #ifdef CPU_ARM3
1148 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
1149 	    (cputype & 0x00000f00) == 0x00000300) {
1150 		cpufuncs = arm3_cpufuncs;
1151 		cpu_reset_needs_v4_MMU_disable = 0;
1152 		get_cachetype_table();
1153 		return 0;
1154 	}
1155 #endif	/* CPU_ARM3 */
1156 #ifdef CPU_ARM6
1157 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
1158 	    (cputype & 0x00000f00) == 0x00000600) {
1159 		cpufuncs = arm6_cpufuncs;
1160 		cpu_reset_needs_v4_MMU_disable = 0;
1161 		get_cachetype_table();
1162 		pmap_pte_init_generic();
1163 		return 0;
1164 	}
1165 #endif	/* CPU_ARM6 */
1166 #ifdef CPU_ARM7
1167 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
1168 	    CPU_ID_IS7(cputype) &&
1169 	    (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V3) {
1170 		cpufuncs = arm7_cpufuncs;
1171 		cpu_reset_needs_v4_MMU_disable = 0;
1172 		get_cachetype_table();
1173 		pmap_pte_init_generic();
1174 		return 0;
1175 	}
1176 #endif	/* CPU_ARM7 */
1177 #ifdef CPU_ARM7TDMI
1178 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
1179 	    CPU_ID_IS7(cputype) &&
1180 	    (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V4T) {
1181 		cpufuncs = arm7tdmi_cpufuncs;
1182 		cpu_reset_needs_v4_MMU_disable = 0;
1183 		get_cachetype_cp15();
1184 		pmap_pte_init_generic();
1185 		return 0;
1186 	}
1187 #endif
1188 #ifdef CPU_ARM8
1189 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
1190 	    (cputype & 0x0000f000) == 0x00008000) {
1191 		cpufuncs = arm8_cpufuncs;
1192 		cpu_reset_needs_v4_MMU_disable = 0;	/* XXX correct? */
1193 		get_cachetype_cp15();
1194 		pmap_pte_init_arm8();
1195 		return 0;
1196 	}
1197 #endif	/* CPU_ARM8 */
1198 #ifdef CPU_ARM9
1199 	if (((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD ||
1200 	     (cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_TI) &&
1201 	    (cputype & 0x0000f000) == 0x00009000) {
1202 		cpufuncs = arm9_cpufuncs;
1203 		cpu_reset_needs_v4_MMU_disable = 1;	/* V4 or higher */
1204 		get_cachetype_cp15();
1205 		arm9_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
1206 		arm9_dcache_sets_max =
1207 		    (1U << (arm_dcache_l2_linesize + arm_dcache_l2_nsets)) -
1208 		    arm9_dcache_sets_inc;
1209 		arm9_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
1210 		arm9_dcache_index_max = 0U - arm9_dcache_index_inc;
1211 #ifdef	ARM9_CACHE_WRITE_THROUGH
1212 		pmap_pte_init_arm9();
1213 #else
1214 		pmap_pte_init_generic();
1215 #endif
1216 		return 0;
1217 	}
1218 #endif /* CPU_ARM9 */
1219 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
1220 	if (cputype == CPU_ID_ARM926EJS ||
1221 	    cputype == CPU_ID_ARM1026EJS) {
1222 		cpufuncs = armv5_ec_cpufuncs;
1223 		cpu_reset_needs_v4_MMU_disable = 1;	/* V4 or higher */
1224 		get_cachetype_cp15();
1225 		pmap_pte_init_generic();
1226 		return 0;
1227 	}
1228 #endif /* CPU_ARM9E || CPU_ARM10 */
1229 #ifdef CPU_ARM10
1230 	if (/* cputype == CPU_ID_ARM1020T || */
1231 	    cputype == CPU_ID_ARM1020E) {
1232 		/*
1233 		 * Select write-through cacheing (this isn't really an
1234 		 * option on ARM1020T).
1235 		 */
1236 		cpufuncs = arm10_cpufuncs;
1237 		cpu_reset_needs_v4_MMU_disable = 1;	/* V4 or higher */
1238 		get_cachetype_cp15();
1239 		armv5_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
1240 		armv5_dcache_sets_max =
1241 		    (1U << (arm_dcache_l2_linesize + arm_dcache_l2_nsets)) -
1242 		    armv5_dcache_sets_inc;
1243 		armv5_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
1244 		armv5_dcache_index_max = 0U - armv5_dcache_index_inc;
1245 		pmap_pte_init_generic();
1246 		return 0;
1247 	}
1248 #endif /* CPU_ARM10 */
1249 #ifdef CPU_ARM11
1250 	if (cputype == CPU_ID_ARM1136JS ||
1251 	    cputype == CPU_ID_ARM1136JSR1) {
1252 		cpufuncs = arm11_cpufuncs;
1253 		cpu_reset_needs_v4_MMU_disable = 1;	/* V4 or higher */
1254 		get_cachetype_cp15();
1255 		armv5_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
1256 		armv5_dcache_sets_max =
1257 		    (1U << (arm_dcache_l2_linesize + arm_dcache_l2_nsets)) -
1258 		    armv5_dcache_sets_inc;
1259 		armv5_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
1260 		armv5_dcache_index_max = 0U - armv5_dcache_index_inc;
1261 		pmap_pte_init_generic();
1262 		return 0;
1263 	}
1264 #endif /* CPU_ARM11 */
1265 #ifdef CPU_SA110
1266 	if (cputype == CPU_ID_SA110) {
1267 		cpufuncs = sa110_cpufuncs;
1268 		cpu_reset_needs_v4_MMU_disable = 1;	/* SA needs it */
1269 		get_cachetype_table();
1270 		pmap_pte_init_sa1();
1271 		return 0;
1272 	}
1273 #endif	/* CPU_SA110 */
1274 #ifdef CPU_SA1100
1275 	if (cputype == CPU_ID_SA1100) {
1276 		cpufuncs = sa11x0_cpufuncs;
1277 		cpu_reset_needs_v4_MMU_disable = 1;	/* SA needs it	*/
1278 		get_cachetype_table();
1279 		pmap_pte_init_sa1();
1280 
1281 		/* Use powersave on this CPU. */
1282 		cpu_do_powersave = 1;
1283 
1284 		return 0;
1285 	}
1286 #endif	/* CPU_SA1100 */
1287 #ifdef CPU_SA1110
1288 	if (cputype == CPU_ID_SA1110) {
1289 		cpufuncs = sa11x0_cpufuncs;
1290 		cpu_reset_needs_v4_MMU_disable = 1;	/* SA needs it	*/
1291 		get_cachetype_table();
1292 		pmap_pte_init_sa1();
1293 
1294 		/* Use powersave on this CPU. */
1295 		cpu_do_powersave = 1;
1296 
1297 		return 0;
1298 	}
1299 #endif	/* CPU_SA1110 */
1300 #ifdef CPU_IXP12X0
1301         if (cputype == CPU_ID_IXP1200) {
1302                 cpufuncs = ixp12x0_cpufuncs;
1303                 cpu_reset_needs_v4_MMU_disable = 1;
1304                 get_cachetype_table();
1305                 pmap_pte_init_sa1();
1306                 return 0;
1307         }
1308 #endif  /* CPU_IXP12X0 */
1309 #ifdef CPU_XSCALE_80200
1310 	if (cputype == CPU_ID_80200) {
1311 		int rev = cpufunc_id() & CPU_ID_REVISION_MASK;
1312 
1313 		i80200_icu_init();
1314 
1315 		/*
1316 		 * Reset the Performance Monitoring Unit to a
1317 		 * pristine state:
1318 		 *	- CCNT, PMN0, PMN1 reset to 0
1319 		 *	- overflow indications cleared
1320 		 *	- all counters disabled
1321 		 */
1322 		__asm volatile("mcr p14, 0, %0, c0, c0, 0"
1323 			:
1324 			: "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF|
1325 			       PMNC_CC_IF));
1326 
1327 #if defined(XSCALE_CCLKCFG)
1328 		/*
1329 		 * Crank CCLKCFG to maximum legal value.
1330 		 */
1331 		__asm volatile ("mcr p14, 0, %0, c6, c0, 0"
1332 			:
1333 			: "r" (XSCALE_CCLKCFG));
1334 #endif
1335 
1336 		/*
1337 		 * XXX Disable ECC in the Bus Controller Unit; we
1338 		 * don't really support it, yet.  Clear any pending
1339 		 * error indications.
1340 		 */
1341 		__asm volatile("mcr p13, 0, %0, c0, c1, 0"
1342 			:
1343 			: "r" (BCUCTL_E0|BCUCTL_E1|BCUCTL_EV));
1344 
1345 		cpufuncs = xscale_cpufuncs;
1346 #if defined(PERFCTRS)
1347 		xscale_pmu_init();
1348 #endif
1349 
1350 		/*
1351 		 * i80200 errata: Step-A0 and A1 have a bug where
1352 		 * D$ dirty bits are not cleared on "invalidate by
1353 		 * address".
1354 		 *
1355 		 * Workaround: Clean cache line before invalidating.
1356 		 */
1357 		if (rev == 0 || rev == 1)
1358 			cpufuncs.cf_dcache_inv_range = xscale_cache_purgeD_rng;
1359 
1360 		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1361 		get_cachetype_cp15();
1362 		pmap_pte_init_xscale();
1363 		return 0;
1364 	}
1365 #endif /* CPU_XSCALE_80200 */
1366 #ifdef CPU_XSCALE_80321
1367 	if (cputype == CPU_ID_80321_400 || cputype == CPU_ID_80321_600 ||
1368 	    cputype == CPU_ID_80321_400_B0 || cputype == CPU_ID_80321_600_B0 ||
1369 	    cputype == CPU_ID_80219_400 || cputype == CPU_ID_80219_600) {
1370 		i80321_icu_init();
1371 
1372 		/*
1373 		 * Reset the Performance Monitoring Unit to a
1374 		 * pristine state:
1375 		 *	- CCNT, PMN0, PMN1 reset to 0
1376 		 *	- overflow indications cleared
1377 		 *	- all counters disabled
1378 		 */
1379 		__asm volatile("mcr p14, 0, %0, c0, c0, 0"
1380 			:
1381 			: "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF|
1382 			       PMNC_CC_IF));
1383 
1384 		cpufuncs = xscale_cpufuncs;
1385 #if defined(PERFCTRS)
1386 		xscale_pmu_init();
1387 #endif
1388 
1389 		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1390 		get_cachetype_cp15();
1391 		pmap_pte_init_xscale();
1392 		return 0;
1393 	}
1394 #endif /* CPU_XSCALE_80321 */
1395 #ifdef __CPU_XSCALE_PXA2XX
1396 	/* ignore core revision to test PXA2xx CPUs */
1397 	if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA27X ||
1398 	    (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 ||
1399 	    (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) {
1400 
1401 		cpufuncs = xscale_cpufuncs;
1402 #if defined(PERFCTRS)
1403 		xscale_pmu_init();
1404 #endif
1405 
1406 		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1407 		get_cachetype_cp15();
1408 		pmap_pte_init_xscale();
1409 
1410 		/* Use powersave on this CPU. */
1411 		cpu_do_powersave = 1;
1412 
1413 		return 0;
1414 	}
1415 #endif /* __CPU_XSCALE_PXA2XX */
1416 #ifdef CPU_XSCALE_IXP425
1417 	if (cputype == CPU_ID_IXP425_533 || cputype == CPU_ID_IXP425_400 ||
1418             cputype == CPU_ID_IXP425_266) {
1419 		ixp425_icu_init();
1420 
1421 		cpufuncs = xscale_cpufuncs;
1422 #if defined(PERFCTRS)
1423 		xscale_pmu_init();
1424 #endif
1425 
1426 		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1427 		get_cachetype_cp15();
1428 		pmap_pte_init_xscale();
1429 
1430 		return 0;
1431 	}
1432 #endif /* CPU_XSCALE_IXP425 */
1433 	/*
1434 	 * Bzzzz. And the answer was ...
1435 	 */
1436 	panic("No support for this CPU type (%08x) in kernel", cputype);
1437 	return(ARCHITECTURE_NOT_PRESENT);
1438 }
1439 
1440 #ifdef CPU_ARM2
1441 u_int arm2_id(void)
1442 {
1443 
1444 	return CPU_ID_ARM2;
1445 }
1446 #endif /* CPU_ARM2 */
1447 
1448 #ifdef CPU_ARM250
1449 u_int arm250_id(void)
1450 {
1451 
1452 	return CPU_ID_ARM250;
1453 }
1454 #endif /* CPU_ARM250 */
1455 
1456 /*
1457  * Fixup routines for data and prefetch aborts.
1458  *
1459  * Several compile time symbols are used
1460  *
1461  * DEBUG_FAULT_CORRECTION - Print debugging information during the
1462  * correction of registers after a fault.
1463  * ARM6_LATE_ABORT - ARM6 supports both early and late aborts
1464  * when defined should use late aborts
1465  */
1466 
1467 
1468 /*
1469  * Null abort fixup routine.
1470  * For use when no fixup is required.
1471  */
1472 int
1473 cpufunc_null_fixup(arg)
1474 	void *arg;
1475 {
1476 	return(ABORT_FIXUP_OK);
1477 }
1478 
1479 
1480 #if defined(CPU_ARM2) || defined(CPU_ARM250) || defined(CPU_ARM3) || \
1481     defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI)
1482 
1483 #ifdef DEBUG_FAULT_CORRECTION
1484 #define DFC_PRINTF(x)		printf x
1485 #define DFC_DISASSEMBLE(x)	disassemble(x)
1486 #else
1487 #define DFC_PRINTF(x)		/* nothing */
1488 #define DFC_DISASSEMBLE(x)	/* nothing */
1489 #endif
1490 
1491 /*
1492  * "Early" data abort fixup.
1493  *
1494  * For ARM2, ARM2as, ARM3 and ARM6 (in early-abort mode).  Also used
1495  * indirectly by ARM6 (in late-abort mode) and ARM7[TDMI].
1496  *
1497  * In early aborts, we may have to fix up LDM, STM, LDC and STC.
1498  */
1499 int
1500 early_abort_fixup(arg)
1501 	void *arg;
1502 {
1503 	trapframe_t *frame = arg;
1504 	u_int fault_pc;
1505 	u_int fault_instruction;
1506 	int saved_lr = 0;
1507 
1508 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1509 
1510 		/* Ok an abort in SVC mode */
1511 
1512 		/*
1513 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1514 		 * as the fault happened in svc mode but we need it in the
1515 		 * usr slot so we can treat the registers as an array of ints
1516 		 * during fixing.
1517 		 * NOTE: This PC is in the position but writeback is not
1518 		 * allowed on r15.
1519 		 * Doing it like this is more efficient than trapping this
1520 		 * case in all possible locations in the following fixup code.
1521 		 */
1522 
1523 		saved_lr = frame->tf_usr_lr;
1524 		frame->tf_usr_lr = frame->tf_svc_lr;
1525 
1526 		/*
1527 		 * Note the trapframe does not have the SVC r13 so a fault
1528 		 * from an instruction with writeback to r13 in SVC mode is
1529 		 * not allowed. This should not happen as the kstack is
1530 		 * always valid.
1531 		 */
1532 	}
1533 
1534 	/* Get fault address and status from the CPU */
1535 
1536 	fault_pc = frame->tf_pc;
1537 	fault_instruction = *((volatile unsigned int *)fault_pc);
1538 
1539 	/* Decode the fault instruction and fix the registers as needed */
1540 
1541 	if ((fault_instruction & 0x0e000000) == 0x08000000) {
1542 		int base;
1543 		int loop;
1544 		int count;
1545 		int *registers = &frame->tf_r0;
1546 
1547 		DFC_PRINTF(("LDM/STM\n"));
1548 		DFC_DISASSEMBLE(fault_pc);
1549 		if (fault_instruction & (1 << 21)) {
1550 			DFC_PRINTF(("This instruction must be corrected\n"));
1551 			base = (fault_instruction >> 16) & 0x0f;
1552 			if (base == 15)
1553 				return ABORT_FIXUP_FAILED;
1554 			/* Count registers transferred */
1555 			count = 0;
1556 			for (loop = 0; loop < 16; ++loop) {
1557 				if (fault_instruction & (1<<loop))
1558 					++count;
1559 			}
1560 			DFC_PRINTF(("%d registers used\n", count));
1561 			DFC_PRINTF(("Corrected r%d by %d bytes ",
1562 				       base, count * 4));
1563 			if (fault_instruction & (1 << 23)) {
1564 				DFC_PRINTF(("down\n"));
1565 				registers[base] -= count * 4;
1566 			} else {
1567 				DFC_PRINTF(("up\n"));
1568 				registers[base] += count * 4;
1569 			}
1570 		}
1571 	} else if ((fault_instruction & 0x0e000000) == 0x0c000000) {
1572 		int base;
1573 		int offset;
1574 		int *registers = &frame->tf_r0;
1575 
1576 		/* REGISTER CORRECTION IS REQUIRED FOR THESE INSTRUCTIONS */
1577 
1578 		DFC_DISASSEMBLE(fault_pc);
1579 
1580 		/* Only need to fix registers if write back is turned on */
1581 
1582 		if ((fault_instruction & (1 << 21)) != 0) {
1583 			base = (fault_instruction >> 16) & 0x0f;
1584 			if (base == 13 &&
1585 			    (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
1586 				return ABORT_FIXUP_FAILED;
1587 			if (base == 15)
1588 				return ABORT_FIXUP_FAILED;
1589 
1590 			offset = (fault_instruction & 0xff) << 2;
1591 			DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1592 			if ((fault_instruction & (1 << 23)) != 0)
1593 				offset = -offset;
1594 			registers[base] += offset;
1595 			DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1596 		}
1597 	} else if ((fault_instruction & 0x0e000000) == 0x0c000000)
1598 		return ABORT_FIXUP_FAILED;
1599 
1600 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1601 
1602 		/* Ok an abort in SVC mode */
1603 
1604 		/*
1605 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1606 		 * as the fault happened in svc mode but we need it in the
1607 		 * usr slot so we can treat the registers as an array of ints
1608 		 * during fixing.
1609 		 * NOTE: This PC is in the position but writeback is not
1610 		 * allowed on r15.
1611 		 * Doing it like this is more efficient than trapping this
1612 		 * case in all possible locations in the prior fixup code.
1613 		 */
1614 
1615 		frame->tf_svc_lr = frame->tf_usr_lr;
1616 		frame->tf_usr_lr = saved_lr;
1617 
1618 		/*
1619 		 * Note the trapframe does not have the SVC r13 so a fault
1620 		 * from an instruction with writeback to r13 in SVC mode is
1621 		 * not allowed. This should not happen as the kstack is
1622 		 * always valid.
1623 		 */
1624 	}
1625 
1626 	return(ABORT_FIXUP_OK);
1627 }
1628 #endif	/* CPU_ARM2/250/3/6/7 */
1629 
1630 
1631 #if (defined(CPU_ARM6) && defined(ARM6_LATE_ABORT)) || defined(CPU_ARM7) || \
1632 	defined(CPU_ARM7TDMI)
1633 /*
1634  * "Late" (base updated) data abort fixup
1635  *
1636  * For ARM6 (in late-abort mode) and ARM7.
1637  *
1638  * In this model, all data-transfer instructions need fixing up.  We defer
1639  * LDM, STM, LDC and STC fixup to the early-abort handler.
1640  */
1641 int
1642 late_abort_fixup(arg)
1643 	void *arg;
1644 {
1645 	trapframe_t *frame = arg;
1646 	u_int fault_pc;
1647 	u_int fault_instruction;
1648 	int saved_lr = 0;
1649 
1650 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1651 
1652 		/* Ok an abort in SVC mode */
1653 
1654 		/*
1655 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1656 		 * as the fault happened in svc mode but we need it in the
1657 		 * usr slot so we can treat the registers as an array of ints
1658 		 * during fixing.
1659 		 * NOTE: This PC is in the position but writeback is not
1660 		 * allowed on r15.
1661 		 * Doing it like this is more efficient than trapping this
1662 		 * case in all possible locations in the following fixup code.
1663 		 */
1664 
1665 		saved_lr = frame->tf_usr_lr;
1666 		frame->tf_usr_lr = frame->tf_svc_lr;
1667 
1668 		/*
1669 		 * Note the trapframe does not have the SVC r13 so a fault
1670 		 * from an instruction with writeback to r13 in SVC mode is
1671 		 * not allowed. This should not happen as the kstack is
1672 		 * always valid.
1673 		 */
1674 	}
1675 
1676 	/* Get fault address and status from the CPU */
1677 
1678 	fault_pc = frame->tf_pc;
1679 	fault_instruction = *((volatile unsigned int *)fault_pc);
1680 
1681 	/* Decode the fault instruction and fix the registers as needed */
1682 
1683 	/* Was is a swap instruction ? */
1684 
1685 	if ((fault_instruction & 0x0fb00ff0) == 0x01000090) {
1686 		DFC_DISASSEMBLE(fault_pc);
1687 	} else if ((fault_instruction & 0x0c000000) == 0x04000000) {
1688 
1689 		/* Was is a ldr/str instruction */
1690 		/* This is for late abort only */
1691 
1692 		int base;
1693 		int offset;
1694 		int *registers = &frame->tf_r0;
1695 
1696 		DFC_DISASSEMBLE(fault_pc);
1697 
1698 		/* This is for late abort only */
1699 
1700 		if ((fault_instruction & (1 << 24)) == 0
1701 		    || (fault_instruction & (1 << 21)) != 0) {
1702 			/* postindexed ldr/str with no writeback */
1703 
1704 			base = (fault_instruction >> 16) & 0x0f;
1705 			if (base == 13 &&
1706 			    (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
1707 				return ABORT_FIXUP_FAILED;
1708 			if (base == 15)
1709 				return ABORT_FIXUP_FAILED;
1710 			DFC_PRINTF(("late abt fix: r%d=%08x : ",
1711 				       base, registers[base]));
1712 			if ((fault_instruction & (1 << 25)) == 0) {
1713 				/* Immediate offset - easy */
1714 
1715 				offset = fault_instruction & 0xfff;
1716 				if ((fault_instruction & (1 << 23)))
1717 					offset = -offset;
1718 				registers[base] += offset;
1719 				DFC_PRINTF(("imm=%08x ", offset));
1720 			} else {
1721 				/* offset is a shifted register */
1722 				int shift;
1723 
1724 				offset = fault_instruction & 0x0f;
1725 				if (offset == base)
1726 					return ABORT_FIXUP_FAILED;
1727 
1728 				/*
1729 				 * Register offset - hard we have to
1730 				 * cope with shifts !
1731 				 */
1732 				offset = registers[offset];
1733 
1734 				if ((fault_instruction & (1 << 4)) == 0)
1735 					/* shift with amount */
1736 					shift = (fault_instruction >> 7) & 0x1f;
1737 				else {
1738 					/* shift with register */
1739 					if ((fault_instruction & (1 << 7)) != 0)
1740 						/* undefined for now so bail out */
1741 						return ABORT_FIXUP_FAILED;
1742 					shift = ((fault_instruction >> 8) & 0xf);
1743 					if (base == shift)
1744 						return ABORT_FIXUP_FAILED;
1745 					DFC_PRINTF(("shift reg=%d ", shift));
1746 					shift = registers[shift];
1747 				}
1748 				DFC_PRINTF(("shift=%08x ", shift));
1749 				switch (((fault_instruction >> 5) & 0x3)) {
1750 				case 0 : /* Logical left */
1751 					offset = (int)(((u_int)offset) << shift);
1752 					break;
1753 				case 1 : /* Logical Right */
1754 					if (shift == 0) shift = 32;
1755 					offset = (int)(((u_int)offset) >> shift);
1756 					break;
1757 				case 2 : /* Arithmetic Right */
1758 					if (shift == 0) shift = 32;
1759 					offset = (int)(((int)offset) >> shift);
1760 					break;
1761 				case 3 : /* Rotate right (rol or rxx) */
1762 					return ABORT_FIXUP_FAILED;
1763 					break;
1764 				}
1765 
1766 				DFC_PRINTF(("abt: fixed LDR/STR with "
1767 					       "register offset\n"));
1768 				if ((fault_instruction & (1 << 23)))
1769 					offset = -offset;
1770 				DFC_PRINTF(("offset=%08x ", offset));
1771 				registers[base] += offset;
1772 			}
1773 			DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1774 		}
1775 	}
1776 
1777 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1778 
1779 		/* Ok an abort in SVC mode */
1780 
1781 		/*
1782 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1783 		 * as the fault happened in svc mode but we need it in the
1784 		 * usr slot so we can treat the registers as an array of ints
1785 		 * during fixing.
1786 		 * NOTE: This PC is in the position but writeback is not
1787 		 * allowed on r15.
1788 		 * Doing it like this is more efficient than trapping this
1789 		 * case in all possible locations in the prior fixup code.
1790 		 */
1791 
1792 		frame->tf_svc_lr = frame->tf_usr_lr;
1793 		frame->tf_usr_lr = saved_lr;
1794 
1795 		/*
1796 		 * Note the trapframe does not have the SVC r13 so a fault
1797 		 * from an instruction with writeback to r13 in SVC mode is
1798 		 * not allowed. This should not happen as the kstack is
1799 		 * always valid.
1800 		 */
1801 	}
1802 
1803 	/*
1804 	 * Now let the early-abort fixup routine have a go, in case it
1805 	 * was an LDM, STM, LDC or STC that faulted.
1806 	 */
1807 
1808 	return early_abort_fixup(arg);
1809 }
1810 #endif	/* CPU_ARM6(LATE)/7/7TDMI */
1811 
1812 /*
1813  * CPU Setup code
1814  */
1815 
1816 #if defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) || \
1817 	defined(CPU_ARM8) || defined (CPU_ARM9) || defined (CPU_ARM9E) || \
1818 	defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) || \
1819 	defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
1820 	defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425) || \
1821 	defined(CPU_ARM10) || defined(CPU_ARM11)
1822 
1823 #define IGN	0
1824 #define OR	1
1825 #define BIC	2
1826 
1827 struct cpu_option {
1828 	const char *co_name;
1829 	int	co_falseop;
1830 	int	co_trueop;
1831 	int	co_value;
1832 };
1833 
1834 static u_int parse_cpu_options __P((char *, struct cpu_option *, u_int));
1835 
1836 static u_int
1837 parse_cpu_options(args, optlist, cpuctrl)
1838 	char *args;
1839 	struct cpu_option *optlist;
1840 	u_int cpuctrl;
1841 {
1842 	int integer;
1843 
1844 	if (args == NULL)
1845 		return(cpuctrl);
1846 
1847 	while (optlist->co_name) {
1848 		if (get_bootconf_option(args, optlist->co_name,
1849 		    BOOTOPT_TYPE_BOOLEAN, &integer)) {
1850 			if (integer) {
1851 				if (optlist->co_trueop == OR)
1852 					cpuctrl |= optlist->co_value;
1853 				else if (optlist->co_trueop == BIC)
1854 					cpuctrl &= ~optlist->co_value;
1855 			} else {
1856 				if (optlist->co_falseop == OR)
1857 					cpuctrl |= optlist->co_value;
1858 				else if (optlist->co_falseop == BIC)
1859 					cpuctrl &= ~optlist->co_value;
1860 			}
1861 		}
1862 		++optlist;
1863 	}
1864 	return(cpuctrl);
1865 }
1866 #endif /* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 || CPU_SA110 */
1867 
1868 #if defined (CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) \
1869 	|| defined(CPU_ARM8)
1870 struct cpu_option arm678_options[] = {
1871 #ifdef COMPAT_12
1872 	{ "nocache",		IGN, BIC, CPU_CONTROL_IDC_ENABLE },
1873 	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
1874 #endif	/* COMPAT_12 */
1875 	{ "cpu.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
1876 	{ "cpu.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
1877 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1878 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1879 	{ NULL,			IGN, IGN, 0 }
1880 };
1881 
1882 #endif	/* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 */
1883 
1884 #ifdef CPU_ARM6
1885 struct cpu_option arm6_options[] = {
1886 	{ "arm6.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
1887 	{ "arm6.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
1888 	{ "arm6.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1889 	{ "arm6.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1890 	{ NULL,			IGN, IGN, 0 }
1891 };
1892 
1893 void
1894 arm6_setup(args)
1895 	char *args;
1896 {
1897 	int cpuctrl, cpuctrlmask;
1898 
1899 	/* Set up default control registers bits */
1900 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1901 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1902 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
1903 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1904 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1905 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
1906 		 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BEND_ENABLE
1907 		 | CPU_CONTROL_AFLT_ENABLE;
1908 
1909 #ifdef ARM6_LATE_ABORT
1910 	cpuctrl |= CPU_CONTROL_LABT_ENABLE;
1911 #endif	/* ARM6_LATE_ABORT */
1912 
1913 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1914 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1915 #endif
1916 
1917 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
1918 	cpuctrl = parse_cpu_options(args, arm6_options, cpuctrl);
1919 
1920 #ifdef __ARMEB__
1921 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1922 #endif
1923 
1924 	/* Clear out the cache */
1925 	cpu_idcache_wbinv_all();
1926 
1927 	/* Set the control register */
1928 	curcpu()->ci_ctrl = cpuctrl;
1929 	cpu_control(0xffffffff, cpuctrl);
1930 }
1931 #endif	/* CPU_ARM6 */
1932 
1933 #ifdef CPU_ARM7
1934 struct cpu_option arm7_options[] = {
1935 	{ "arm7.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
1936 	{ "arm7.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
1937 	{ "arm7.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1938 	{ "arm7.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1939 #ifdef COMPAT_12
1940 	{ "fpaclk2",		BIC, OR,  CPU_CONTROL_CPCLK },
1941 #endif	/* COMPAT_12 */
1942 	{ "arm700.fpaclk",	BIC, OR,  CPU_CONTROL_CPCLK },
1943 	{ NULL,			IGN, IGN, 0 }
1944 };
1945 
1946 void
1947 arm7_setup(args)
1948 	char *args;
1949 {
1950 	int cpuctrl, cpuctrlmask;
1951 
1952 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1953 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1954 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
1955 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1956 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1957 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
1958 		 | CPU_CONTROL_CPCLK | CPU_CONTROL_LABT_ENABLE
1959 		 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BEND_ENABLE
1960 		 | CPU_CONTROL_AFLT_ENABLE;
1961 
1962 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1963 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1964 #endif
1965 
1966 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
1967 	cpuctrl = parse_cpu_options(args, arm7_options, cpuctrl);
1968 
1969 #ifdef __ARMEB__
1970 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1971 #endif
1972 
1973 	/* Clear out the cache */
1974 	cpu_idcache_wbinv_all();
1975 
1976 	/* Set the control register */
1977 	curcpu()->ci_ctrl = cpuctrl;
1978 	cpu_control(0xffffffff, cpuctrl);
1979 }
1980 #endif	/* CPU_ARM7 */
1981 
1982 #ifdef CPU_ARM7TDMI
1983 struct cpu_option arm7tdmi_options[] = {
1984 	{ "arm7.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
1985 	{ "arm7.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
1986 	{ "arm7.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1987 	{ "arm7.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1988 #ifdef COMPAT_12
1989 	{ "fpaclk2",		BIC, OR,  CPU_CONTROL_CPCLK },
1990 #endif	/* COMPAT_12 */
1991 	{ "arm700.fpaclk",	BIC, OR,  CPU_CONTROL_CPCLK },
1992 	{ NULL,			IGN, IGN, 0 }
1993 };
1994 
1995 void
1996 arm7tdmi_setup(args)
1997 	char *args;
1998 {
1999 	int cpuctrl;
2000 
2001 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2002 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2003 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
2004 
2005 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
2006 	cpuctrl = parse_cpu_options(args, arm7tdmi_options, cpuctrl);
2007 
2008 #ifdef __ARMEB__
2009 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2010 #endif
2011 
2012 	/* Clear out the cache */
2013 	cpu_idcache_wbinv_all();
2014 
2015 	/* Set the control register */
2016 	curcpu()->ci_ctrl = cpuctrl;
2017 	cpu_control(0xffffffff, cpuctrl);
2018 }
2019 #endif	/* CPU_ARM7TDMI */
2020 
2021 #ifdef CPU_ARM8
2022 struct cpu_option arm8_options[] = {
2023 	{ "arm8.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
2024 	{ "arm8.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
2025 	{ "arm8.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2026 	{ "arm8.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2027 #ifdef COMPAT_12
2028 	{ "branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2029 #endif	/* COMPAT_12 */
2030 	{ "cpu.branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2031 	{ "arm8.branchpredict",	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2032 	{ NULL,			IGN, IGN, 0 }
2033 };
2034 
2035 void
2036 arm8_setup(args)
2037 	char *args;
2038 {
2039 	int integer;
2040 	int cpuctrl, cpuctrlmask;
2041 	int clocktest;
2042 	int setclock = 0;
2043 
2044 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2045 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2046 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
2047 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2048 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2049 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
2050 		 | CPU_CONTROL_BPRD_ENABLE | CPU_CONTROL_ROM_ENABLE
2051 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE;
2052 
2053 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2054 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2055 #endif
2056 
2057 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
2058 	cpuctrl = parse_cpu_options(args, arm8_options, cpuctrl);
2059 
2060 #ifdef __ARMEB__
2061 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2062 #endif
2063 
2064 	/* Get clock configuration */
2065 	clocktest = arm8_clock_config(0, 0) & 0x0f;
2066 
2067 	/* Special ARM8 clock and test configuration */
2068 	if (get_bootconf_option(args, "arm8.clock.reset", BOOTOPT_TYPE_BOOLEAN, &integer)) {
2069 		clocktest = 0;
2070 		setclock = 1;
2071 	}
2072 	if (get_bootconf_option(args, "arm8.clock.dynamic", BOOTOPT_TYPE_BOOLEAN, &integer)) {
2073 		if (integer)
2074 			clocktest |= 0x01;
2075 		else
2076 			clocktest &= ~(0x01);
2077 		setclock = 1;
2078 	}
2079 	if (get_bootconf_option(args, "arm8.clock.sync", BOOTOPT_TYPE_BOOLEAN, &integer)) {
2080 		if (integer)
2081 			clocktest |= 0x02;
2082 		else
2083 			clocktest &= ~(0x02);
2084 		setclock = 1;
2085 	}
2086 	if (get_bootconf_option(args, "arm8.clock.fast", BOOTOPT_TYPE_BININT, &integer)) {
2087 		clocktest = (clocktest & ~0xc0) | (integer & 3) << 2;
2088 		setclock = 1;
2089 	}
2090 	if (get_bootconf_option(args, "arm8.test", BOOTOPT_TYPE_BININT, &integer)) {
2091 		clocktest |= (integer & 7) << 5;
2092 		setclock = 1;
2093 	}
2094 
2095 	/* Clear out the cache */
2096 	cpu_idcache_wbinv_all();
2097 
2098 	/* Set the control register */
2099 	curcpu()->ci_ctrl = cpuctrl;
2100 	cpu_control(0xffffffff, cpuctrl);
2101 
2102 	/* Set the clock/test register */
2103 	if (setclock)
2104 		arm8_clock_config(0x7f, clocktest);
2105 }
2106 #endif	/* CPU_ARM8 */
2107 
2108 #ifdef CPU_ARM9
2109 struct cpu_option arm9_options[] = {
2110 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2111 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2112 	{ "arm9.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2113 	{ "arm9.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2114 	{ "arm9.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2115 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2116 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2117 	{ "arm9.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2118 	{ NULL,			IGN, IGN, 0 }
2119 };
2120 
2121 void
2122 arm9_setup(args)
2123 	char *args;
2124 {
2125 	int cpuctrl, cpuctrlmask;
2126 
2127 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2128 	    | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2129 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2130 	    | CPU_CONTROL_WBUF_ENABLE;
2131 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2132 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2133 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2134 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2135 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2136 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC
2137 		 | CPU_CONTROL_ROUNDROBIN;
2138 
2139 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2140 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2141 #endif
2142 
2143 	cpuctrl = parse_cpu_options(args, arm9_options, cpuctrl);
2144 
2145 #ifdef __ARMEB__
2146 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2147 #endif
2148 
2149 	if (vector_page == ARM_VECTORS_HIGH)
2150 		cpuctrl |= CPU_CONTROL_VECRELOC;
2151 
2152 	/* Clear out the cache */
2153 	cpu_idcache_wbinv_all();
2154 
2155 	/* Set the control register */
2156 	curcpu()->ci_ctrl = cpuctrl;
2157 	cpu_control(cpuctrlmask, cpuctrl);
2158 
2159 }
2160 #endif	/* CPU_ARM9 */
2161 
2162 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
2163 struct cpu_option arm10_options[] = {
2164 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2165 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2166 	{ "arm10.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2167 	{ "arm10.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2168 	{ "arm10.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2169 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2170 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2171 	{ "arm10.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2172 	{ NULL,			IGN, IGN, 0 }
2173 };
2174 
2175 void
2176 arm10_setup(args)
2177 	char *args;
2178 {
2179 	int cpuctrl, cpuctrlmask;
2180 
2181 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
2182 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2183 	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
2184 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
2185 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2186 	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2187 	    | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2188 	    | CPU_CONTROL_BPRD_ENABLE
2189 	    | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
2190 
2191 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2192 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2193 #endif
2194 
2195 	cpuctrl = parse_cpu_options(args, arm10_options, cpuctrl);
2196 
2197 #ifdef __ARMEB__
2198 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2199 #endif
2200 
2201 	/* Clear out the cache */
2202 	cpu_idcache_wbinv_all();
2203 
2204 	/* Now really make sure they are clean.  */
2205 	__asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
2206 
2207 	/* Set the control register */
2208 	curcpu()->ci_ctrl = cpuctrl;
2209 	cpu_control(0xffffffff, cpuctrl);
2210 
2211 	/* And again. */
2212 	cpu_idcache_wbinv_all();
2213 }
2214 #endif	/* CPU_ARM9E || CPU_ARM10 */
2215 
2216 #ifdef CPU_ARM11
2217 struct cpu_option arm11_options[] = {
2218 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2219 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2220 	{ "arm11.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2221 	{ "arm11.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2222 	{ "arm11.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2223 	{ NULL,			IGN, IGN, 0 }
2224 };
2225 
2226 void
2227 arm11_setup(args)
2228 	char *args;
2229 {
2230 	int cpuctrl, cpuctrlmask;
2231 
2232 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
2233 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2234 	    /* | CPU_CONTROL_BPRD_ENABLE */;
2235 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
2236 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2237 	    | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BPRD_ENABLE
2238 	    | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2239 	    | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
2240 
2241 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2242 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2243 #endif
2244 
2245 	cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl);
2246 
2247 #ifdef __ARMEB__
2248 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2249 #endif
2250 
2251 	/* Clear out the cache */
2252 	cpu_idcache_wbinv_all();
2253 
2254 	/* Now really make sure they are clean.  */
2255 	__asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
2256 
2257 	/* Set the control register */
2258 	curcpu()->ci_ctrl = cpuctrl;
2259 	cpu_control(0xffffffff, cpuctrl);
2260 
2261 	/* And again. */
2262 	cpu_idcache_wbinv_all();
2263 }
2264 #endif	/* CPU_ARM11 */
2265 
2266 #ifdef CPU_SA110
2267 struct cpu_option sa110_options[] = {
2268 #ifdef COMPAT_12
2269 	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2270 	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
2271 #endif	/* COMPAT_12 */
2272 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2273 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2274 	{ "sa110.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2275 	{ "sa110.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2276 	{ "sa110.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2277 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2278 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2279 	{ "sa110.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2280 	{ NULL,			IGN, IGN, 0 }
2281 };
2282 
2283 void
2284 sa110_setup(args)
2285 	char *args;
2286 {
2287 	int cpuctrl, cpuctrlmask;
2288 
2289 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2290 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2291 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2292 		 | CPU_CONTROL_WBUF_ENABLE;
2293 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2294 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2295 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2296 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2297 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2298 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
2299 		 | CPU_CONTROL_CPCLK;
2300 
2301 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2302 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2303 #endif
2304 
2305 	cpuctrl = parse_cpu_options(args, sa110_options, cpuctrl);
2306 
2307 #ifdef __ARMEB__
2308 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2309 #endif
2310 
2311 	/* Clear out the cache */
2312 	cpu_idcache_wbinv_all();
2313 
2314 	/* Set the control register */
2315 	curcpu()->ci_ctrl = cpuctrl;
2316 /*	cpu_control(cpuctrlmask, cpuctrl);*/
2317 	cpu_control(0xffffffff, cpuctrl);
2318 
2319 	/*
2320 	 * enable clockswitching, note that this doesn't read or write to r0,
2321 	 * r0 is just to make it valid asm
2322 	 */
2323 	__asm ("mcr 15, 0, r0, c15, c1, 2");
2324 }
2325 #endif	/* CPU_SA110 */
2326 
2327 #if defined(CPU_SA1100) || defined(CPU_SA1110)
2328 struct cpu_option sa11x0_options[] = {
2329 #ifdef COMPAT_12
2330 	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2331 	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
2332 #endif	/* COMPAT_12 */
2333 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2334 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2335 	{ "sa11x0.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2336 	{ "sa11x0.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2337 	{ "sa11x0.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2338 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2339 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2340 	{ "sa11x0.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2341 	{ NULL,			IGN, IGN, 0 }
2342 };
2343 
2344 void
2345 sa11x0_setup(args)
2346 	char *args;
2347 {
2348 	int cpuctrl, cpuctrlmask;
2349 
2350 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2351 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2352 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2353 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE;
2354 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2355 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2356 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2357 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2358 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2359 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
2360 		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
2361 
2362 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2363 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2364 #endif
2365 
2366 	cpuctrl = parse_cpu_options(args, sa11x0_options, cpuctrl);
2367 
2368 #ifdef __ARMEB__
2369 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2370 #endif
2371 
2372 	if (vector_page == ARM_VECTORS_HIGH)
2373 		cpuctrl |= CPU_CONTROL_VECRELOC;
2374 
2375 	/* Clear out the cache */
2376 	cpu_idcache_wbinv_all();
2377 
2378 	/* Set the control register */
2379 	curcpu()->ci_ctrl = cpuctrl;
2380 	cpu_control(0xffffffff, cpuctrl);
2381 }
2382 #endif	/* CPU_SA1100 || CPU_SA1110 */
2383 
2384 #if defined(CPU_IXP12X0)
2385 struct cpu_option ixp12x0_options[] = {
2386 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2387 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2388 	{ "ixp12x0.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2389 	{ "ixp12x0.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2390 	{ "ixp12x0.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2391 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2392 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2393 	{ "ixp12x0.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2394 	{ NULL,			IGN, IGN, 0 }
2395 };
2396 
2397 void
2398 ixp12x0_setup(args)
2399 	char *args;
2400 {
2401 	int cpuctrl, cpuctrlmask;
2402 
2403 
2404 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE
2405 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_SYST_ENABLE
2406 		 | CPU_CONTROL_IC_ENABLE;
2407 
2408 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_AFLT_ENABLE
2409 		 | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_WBUF_ENABLE
2410 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_SYST_ENABLE
2411 		 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_IC_ENABLE
2412 		 | CPU_CONTROL_VECRELOC;
2413 
2414 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2415 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2416 #endif
2417 
2418 	cpuctrl = parse_cpu_options(args, ixp12x0_options, cpuctrl);
2419 
2420 #ifdef __ARMEB__
2421 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2422 #endif
2423 
2424 	if (vector_page == ARM_VECTORS_HIGH)
2425 		cpuctrl |= CPU_CONTROL_VECRELOC;
2426 
2427 	/* Clear out the cache */
2428 	cpu_idcache_wbinv_all();
2429 
2430 	/* Set the control register */
2431 	curcpu()->ci_ctrl = cpuctrl;
2432 	/* cpu_control(0xffffffff, cpuctrl); */
2433 	cpu_control(cpuctrlmask, cpuctrl);
2434 }
2435 #endif /* CPU_IXP12X0 */
2436 
2437 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
2438     defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425)
2439 struct cpu_option xscale_options[] = {
2440 #ifdef COMPAT_12
2441 	{ "branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2442 	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2443 #endif	/* COMPAT_12 */
2444 	{ "cpu.branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2445 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2446 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2447 	{ "xscale.branchpredict", BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2448 	{ "xscale.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2449 	{ "xscale.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2450 	{ "xscale.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2451 	{ NULL,			IGN, IGN, 0 }
2452 };
2453 
2454 void
2455 xscale_setup(args)
2456 	char *args;
2457 {
2458 	uint32_t auxctl;
2459 	int cpuctrl, cpuctrlmask;
2460 
2461 	/*
2462 	 * The XScale Write Buffer is always enabled.  Our option
2463 	 * is to enable/disable coalescing.  Note that bits 6:3
2464 	 * must always be enabled.
2465 	 */
2466 
2467 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2468 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2469 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2470 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
2471 		 | CPU_CONTROL_BPRD_ENABLE;
2472 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2473 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2474 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2475 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2476 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2477 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
2478 		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
2479 
2480 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2481 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2482 #endif
2483 
2484 	cpuctrl = parse_cpu_options(args, xscale_options, cpuctrl);
2485 
2486 #ifdef __ARMEB__
2487 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2488 #endif
2489 
2490 	if (vector_page == ARM_VECTORS_HIGH)
2491 		cpuctrl |= CPU_CONTROL_VECRELOC;
2492 
2493 	/* Clear out the cache */
2494 	cpu_idcache_wbinv_all();
2495 
2496 	/*
2497 	 * Set the control register.  Note that bits 6:3 must always
2498 	 * be set to 1.
2499 	 */
2500 	curcpu()->ci_ctrl = cpuctrl;
2501 /*	cpu_control(cpuctrlmask, cpuctrl);*/
2502 	cpu_control(0xffffffff, cpuctrl);
2503 
2504 	/* Make sure write coalescing is turned on */
2505 	__asm volatile("mrc p15, 0, %0, c1, c0, 1"
2506 		: "=r" (auxctl));
2507 #ifdef XSCALE_NO_COALESCE_WRITES
2508 	auxctl |= XSCALE_AUXCTL_K;
2509 #else
2510 	auxctl &= ~XSCALE_AUXCTL_K;
2511 #endif
2512 	__asm volatile("mcr p15, 0, %0, c1, c0, 1"
2513 		: : "r" (auxctl));
2514 }
2515 #endif	/* CPU_XSCALE_80200 || CPU_XSCALE_80321 || __CPU_XSCALE_PXA2XX || CPU_XSCALE_IXP425 */
2516