xref: /netbsd-src/sys/arch/arm/arm/cpufunc.c (revision 0dd5877adce57db949b16ae963e5a6831cccdfb6)
1 /*	$NetBSD: cpufunc.c,v 1.29 2002/01/30 00:37:18 thorpej Exp $	*/
2 
3 /*
4  * arm7tdmi support code Copyright (c) 2001 John Fremlin
5  * arm8 support code Copyright (c) 1997 ARM Limited
6  * arm8 support code Copyright (c) 1997 Causality Limited
7  * arm9 support code Copyright (C) 2001 ARM Ltd
8  * Copyright (c) 1997 Mark Brinicombe.
9  * Copyright (c) 1997 Causality Limited
10  * All rights reserved.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. All advertising materials mentioning features or use of this software
21  *    must display the following acknowledgement:
22  *	This product includes software developed by Causality Limited.
23  * 4. The name of Causality Limited may not be used to endorse or promote
24  *    products derived from this software without specific prior written
25  *    permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
28  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
29  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
30  * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
31  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
32  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
33  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37  * SUCH DAMAGE.
38  *
39  * RiscBSD kernel project
40  *
41  * cpufuncs.c
42  *
43  * C functions for supporting CPU / MMU / TLB specific operations.
44  *
45  * Created      : 30/01/97
46  */
47 
48 #include "opt_compat_netbsd.h"
49 #include "opt_cputypes.h"
50 #include "opt_cpuoptions.h"
51 #include "opt_pmap_debug.h"
52 
53 #include <sys/types.h>
54 #include <sys/param.h>
55 #include <sys/systm.h>
56 #include <machine/cpu.h>
57 #include <machine/bootconfig.h>
58 #include <arch/arm/arm/disassem.h>
59 
60 #include <arm/cpufunc.h>
61 
62 #ifdef CPU_XSCALE
63 #include <arm/xscale/i80200reg.h>
64 #include <arm/xscale/i80200var.h>
65 #endif
66 
67 /* PRIMARY CACHE VARIABLES */
68 int	arm_picache_size;
69 int	arm_picache_line_size;
70 int	arm_picache_ways;
71 
72 int	arm_pdcache_size;	/* and unified */
73 int	arm_pdcache_line_size;
74 int	arm_pdcache_ways;
75 
76 int	arm_pcache_type;
77 int	arm_pcache_unified;
78 
79 int	arm_dcache_align;
80 int	arm_dcache_align_mask;
81 
82 #ifdef CPU_ARM3
83 struct cpu_functions arm3_cpufuncs = {
84 	/* CPU functions */
85 
86 	cpufunc_id,			/* id			*/
87 	cpufunc_nullop,			/* cpwait		*/
88 
89 	/* MMU functions */
90 
91 	arm3_control,			/* control		*/
92 	NULL,				/* domain		*/
93 	NULL,				/* setttb		*/
94 	NULL,				/* faultstatus		*/
95 	NULL,				/* faultaddress		*/
96 
97 	/* TLB functions */
98 
99 	cpufunc_nullop,			/* tlb_flushID		*/
100 	(void *)cpufunc_nullop,		/* tlb_flushID_SE	*/
101 	cpufunc_nullop,			/* tlb_flushI		*/
102 	(void *)cpufunc_nullop,		/* tlb_flushI_SE	*/
103 	cpufunc_nullop,			/* tlb_flushD		*/
104 	(void *)cpufunc_nullop,		/* tlb_flushD_SE	*/
105 
106 	/* Cache operations */
107 
108 	cpufunc_nullop,			/* icache_sync_all	*/
109 	(void *) cpufunc_nullop,	/* icache_sync_range	*/
110 
111 	arm3_cache_flush,		/* dcache_wbinv_all	*/
112 	(void *)arm3_cache_flush,	/* dcache_wbinv_range	*/
113 	(void *)arm3_cache_flush,	/* dcache_inv_range	*/
114 	(void *)cpufunc_nullop,		/* dcache_wb_range	*/
115 
116 	arm3_cache_flush,		/* idcache_wbinv_all	*/
117 	(void *)arm3_cache_flush,	/* idcache_wbinv_range	*/
118 
119 	/* Other functions */
120 
121 	cpufunc_nullop,			/* flush_prefetchbuf	*/
122 	cpufunc_nullop,			/* drain_writebuf	*/
123 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
124 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
125 
126 	(void *)cpufunc_nullop,		/* sleep		*/
127 
128 	/* Soft functions */
129 
130 	early_abort_fixup,		/* dataabt_fixup	*/
131 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
132 
133 	NULL,				/* context_switch	*/
134 
135 	(void *)cpufunc_nullop		/* cpu setup		*/
136 
137 };
138 #endif	/* CPU_ARM3 */
139 
140 #ifdef CPU_ARM6
141 struct cpu_functions arm6_cpufuncs = {
142 	/* CPU functions */
143 
144 	cpufunc_id,			/* id			*/
145 	cpufunc_nullop,			/* cpwait		*/
146 
147 	/* MMU functions */
148 
149 	cpufunc_control,		/* control		*/
150 	cpufunc_domains,		/* domain		*/
151 	arm67_setttb,			/* setttb		*/
152 	cpufunc_faultstatus,		/* faultstatus		*/
153 	cpufunc_faultaddress,		/* faultaddress		*/
154 
155 	/* TLB functions */
156 
157 	arm67_tlb_flush,		/* tlb_flushID		*/
158 	arm67_tlb_purge,		/* tlb_flushID_SE	*/
159 	arm67_tlb_flush,		/* tlb_flushI		*/
160 	arm67_tlb_purge,		/* tlb_flushI_SE	*/
161 	arm67_tlb_flush,		/* tlb_flushD		*/
162 	arm67_tlb_purge,		/* tlb_flushD_SE	*/
163 
164 	/* Cache operations */
165 
166 	cpufunc_nullop,			/* icache_sync_all	*/
167 	(void *) cpufunc_nullop,	/* icache_sync_range	*/
168 
169 	arm67_cache_flush,		/* dcache_wbinv_all	*/
170 	(void *)arm67_cache_flush,	/* dcache_wbinv_range	*/
171 	(void *)arm67_cache_flush,	/* dcache_inv_range	*/
172 	(void *)cpufunc_nullop,		/* dcache_wb_range	*/
173 
174 	arm67_cache_flush,		/* idcache_wbinv_all	*/
175 	(void *)arm67_cache_flush,	/* idcache_wbinv_range	*/
176 
177 	/* Other functions */
178 
179 	cpufunc_nullop,			/* flush_prefetchbuf	*/
180 	cpufunc_nullop,			/* drain_writebuf	*/
181 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
182 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
183 
184 	(void *)cpufunc_nullop,		/* sleep		*/
185 
186 	/* Soft functions */
187 
188 #ifdef ARM6_LATE_ABORT
189 	late_abort_fixup,		/* dataabt_fixup	*/
190 #else
191 	early_abort_fixup,		/* dataabt_fixup	*/
192 #endif
193 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
194 
195 	arm67_context_switch,		/* context_switch	*/
196 
197 	arm6_setup			/* cpu setup		*/
198 
199 };
200 #endif	/* CPU_ARM6 */
201 
202 #ifdef CPU_ARM7
203 struct cpu_functions arm7_cpufuncs = {
204 	/* CPU functions */
205 
206 	cpufunc_id,			/* id			*/
207 	cpufunc_nullop,			/* cpwait		*/
208 
209 	/* MMU functions */
210 
211 	cpufunc_control,		/* control		*/
212 	cpufunc_domains,		/* domain		*/
213 	arm67_setttb,			/* setttb		*/
214 	cpufunc_faultstatus,		/* faultstatus		*/
215 	cpufunc_faultaddress,		/* faultaddress		*/
216 
217 	/* TLB functions */
218 
219 	arm67_tlb_flush,		/* tlb_flushID		*/
220 	arm67_tlb_purge,		/* tlb_flushID_SE	*/
221 	arm67_tlb_flush,		/* tlb_flushI		*/
222 	arm67_tlb_purge,		/* tlb_flushI_SE	*/
223 	arm67_tlb_flush,		/* tlb_flushD		*/
224 	arm67_tlb_purge,		/* tlb_flushD_SE	*/
225 
226 	/* Cache operations */
227 
228 	cpufunc_nullop,			/* icache_sync_all	*/
229 	(void *)cpufunc_nullop,		/* icache_sync_range	*/
230 
231 	arm67_cache_flush,		/* dcache_wbinv_all	*/
232 	(void *)arm67_cache_flush,	/* dcache_wbinv_range	*/
233 	(void *)arm67_cache_flush,	/* dcache_inv_range	*/
234 	(void *)cpufunc_nullop,		/* dcache_wb_range	*/
235 
236 	arm67_cache_flush,		/* idcache_wbinv_all	*/
237 	(void *)arm67_cache_flush,	/* idcache_wbinv_range	*/
238 
239 	/* Other functions */
240 
241 	cpufunc_nullop,			/* flush_prefetchbuf	*/
242 	cpufunc_nullop,			/* drain_writebuf	*/
243 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
244 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
245 
246 	(void *)cpufunc_nullop,		/* sleep		*/
247 
248 	/* Soft functions */
249 
250 	late_abort_fixup,		/* dataabt_fixup	*/
251 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
252 
253 	arm67_context_switch,		/* context_switch	*/
254 
255 	arm7_setup			/* cpu setup		*/
256 
257 };
258 #endif	/* CPU_ARM7 */
259 
260 #ifdef CPU_ARM7TDMI
261 struct cpu_functions arm7tdmi_cpufuncs = {
262 	/* CPU functions */
263 
264 	cpufunc_id,			/* id			*/
265 	cpufunc_nullop,			/* cpwait		*/
266 
267 	/* MMU functions */
268 
269 	cpufunc_control,		/* control		*/
270 	cpufunc_domains,		/* domain		*/
271 	arm7tdmi_setttb,		/* setttb		*/
272 	cpufunc_faultstatus,		/* faultstatus		*/
273 	cpufunc_faultaddress,		/* faultaddress		*/
274 
275 	/* TLB functions */
276 
277 	arm7tdmi_tlb_flushID,		/* tlb_flushID		*/
278 	arm7tdmi_tlb_flushID_SE,	/* tlb_flushID_SE	*/
279 	arm7tdmi_tlb_flushID,		/* tlb_flushI		*/
280 	arm7tdmi_tlb_flushID_SE,	/* tlb_flushI_SE	*/
281 	arm7tdmi_tlb_flushID,		/* tlb_flushD		*/
282 	arm7tdmi_tlb_flushID_SE,	/* tlb_flushD_SE	*/
283 
284 	/* Cache operations */
285 
286 	cpufunc_nullop,			/* icache_sync_all	*/
287 	(void *)cpufunc_nullop,		/* icache_sync_range	*/
288 
289 	arm7tdmi_cache_flushID,		/* dcache_wbinv_all	*/
290 	(void *)arm7tdmi_cache_flushID,	/* dcache_wbinv_range	*/
291 	(void *)arm7tdmi_cache_flushID,	/* dcache_inv_range	*/
292 	(void *)cpufunc_nullop,		/* dcache_wb_range	*/
293 
294 	arm7tdmi_cache_flushID,		/* idcache_wbinv_all	*/
295 	(void *)arm7tdmi_cache_flushID,	/* idcache_wbinv_range	*/
296 
297 	/* Other functions */
298 
299 	cpufunc_nullop,			/* flush_prefetchbuf	*/
300 	cpufunc_nullop,			/* drain_writebuf	*/
301 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
302 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
303 
304 	(void *)cpufunc_nullop,		/* sleep		*/
305 
306 	/* Soft functions */
307 
308 	late_abort_fixup,		/* dataabt_fixup	*/
309 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
310 
311 	arm7tdmi_context_switch,	/* context_switch	*/
312 
313 	arm7tdmi_setup			/* cpu setup		*/
314 
315 };
316 #endif	/* CPU_ARM7TDMI */
317 
318 #ifdef CPU_ARM8
319 struct cpu_functions arm8_cpufuncs = {
320 	/* CPU functions */
321 
322 	cpufunc_id,			/* id			*/
323 	cpufunc_nullop,			/* cpwait		*/
324 
325 	/* MMU functions */
326 
327 	cpufunc_control,		/* control		*/
328 	cpufunc_domains,		/* domain		*/
329 	arm8_setttb,			/* setttb		*/
330 	cpufunc_faultstatus,		/* faultstatus		*/
331 	cpufunc_faultaddress,		/* faultaddress		*/
332 
333 	/* TLB functions */
334 
335 	arm8_tlb_flushID,		/* tlb_flushID		*/
336 	arm8_tlb_flushID_SE,		/* tlb_flushID_SE	*/
337 	arm8_tlb_flushID,		/* tlb_flushI		*/
338 	arm8_tlb_flushID_SE,		/* tlb_flushI_SE	*/
339 	arm8_tlb_flushID,		/* tlb_flushD		*/
340 	arm8_tlb_flushID_SE,		/* tlb_flushD_SE	*/
341 
342 	/* Cache operations */
343 
344 	cpufunc_nullop,			/* icache_sync_all	*/
345 	(void *)cpufunc_nullop,		/* icache_sync_range	*/
346 
347 	arm8_cache_purgeID,		/* dcache_wbinv_all	*/
348 	(void *)arm8_cache_purgeID,	/* dcache_wbinv_range	*/
349 /*XXX*/	(void *)arm8_cache_purgeID,	/* dcache_inv_range	*/
350 	(void *)arm8_cache_cleanID,	/* dcache_wb_range	*/
351 
352 	arm8_cache_purgeID,		/* idcache_wbinv_all	*/
353 	(void *)arm8_cache_purgeID,	/* idcache_wbinv_range	*/
354 
355 	/* Other functions */
356 
357 	cpufunc_nullop,			/* flush_prefetchbuf	*/
358 	cpufunc_nullop,			/* drain_writebuf	*/
359 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
360 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
361 
362 	(void *)cpufunc_nullop,		/* sleep		*/
363 
364 	/* Soft functions */
365 
366 	cpufunc_null_fixup,		/* dataabt_fixup	*/
367 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
368 
369 	arm8_context_switch,		/* context_switch	*/
370 
371 	arm8_setup			/* cpu setup		*/
372 };
373 #endif	/* CPU_ARM8 */
374 
375 #ifdef CPU_ARM9
376 struct cpu_functions arm9_cpufuncs = {
377 	/* CPU functions */
378 
379 	cpufunc_id,			/* id			*/
380 	cpufunc_nullop,			/* cpwait		*/
381 
382 	/* MMU functions */
383 
384 	cpufunc_control,		/* control		*/
385 	cpufunc_domains,		/* Domain		*/
386 	arm9_setttb,			/* Setttb		*/
387 	cpufunc_faultstatus,		/* Faultstatus		*/
388 	cpufunc_faultaddress,		/* Faultaddress		*/
389 
390 	/* TLB functions */
391 
392 	armv4_tlb_flushID,		/* tlb_flushID		*/
393 	arm9_tlb_flushID_SE,		/* tlb_flushID_SE	*/
394 	armv4_tlb_flushI,		/* tlb_flushI		*/
395 	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
396 	armv4_tlb_flushD,		/* tlb_flushD		*/
397 	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
398 
399 	/* Cache operations */
400 
401 	arm9_cache_syncI,		/* icache_sync_all	*/
402 	arm9_cache_syncI_rng,		/* icache_sync_range	*/
403 
404 		/* ...cache in write-though mode... */
405 	arm9_cache_flushD,		/* dcache_wbinv_all	*/
406 	arm9_cache_flushD_rng,		/* dcache_wbinv_range	*/
407 	arm9_cache_flushD_rng,		/* dcache_inv_range	*/
408 	(void *)cpufunc_nullop,		/* dcache_wb_range	*/
409 
410 	arm9_cache_flushID,		/* idcache_wbinv_all	*/
411 	arm9_cache_flushID_rng,		/* idcache_wbinv_range	*/
412 
413 	/* Other functions */
414 
415 	cpufunc_nullop,			/* flush_prefetchbuf	*/
416 	armv4_drain_writebuf,		/* drain_writebuf	*/
417 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
418 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
419 
420 	(void *)cpufunc_nullop,		/* sleep		*/
421 
422 	/* Soft functions */
423 
424 	cpufunc_null_fixup,		/* dataabt_fixup	*/
425 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
426 
427 	arm9_context_switch,		/* context_switch	*/
428 
429 	arm9_setup			/* cpu setup		*/
430 
431 };
432 #endif /* CPU_ARM9 */
433 
434 #ifdef CPU_SA110
435 struct cpu_functions sa110_cpufuncs = {
436 	/* CPU functions */
437 
438 	cpufunc_id,			/* id			*/
439 	cpufunc_nullop,			/* cpwait		*/
440 
441 	/* MMU functions */
442 
443 	cpufunc_control,		/* control		*/
444 	cpufunc_domains,		/* domain		*/
445 	sa110_setttb,			/* setttb		*/
446 	cpufunc_faultstatus,		/* faultstatus		*/
447 	cpufunc_faultaddress,		/* faultaddress		*/
448 
449 	/* TLB functions */
450 
451 	armv4_tlb_flushID,		/* tlb_flushID		*/
452 	sa110_tlb_flushID_SE,		/* tlb_flushID_SE	*/
453 	armv4_tlb_flushI,		/* tlb_flushI		*/
454 	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
455 	armv4_tlb_flushD,		/* tlb_flushD		*/
456 	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
457 
458 	/* Cache operations */
459 
460 	sa110_cache_syncI,		/* icache_sync_all	*/
461 	sa110_cache_syncI_rng,		/* icache_sync_range	*/
462 
463 	sa110_cache_purgeD,		/* dcache_wbinv_all	*/
464 	sa110_cache_purgeD_rng,		/* dcache_wbinv_range	*/
465 /*XXX*/	sa110_cache_purgeD_rng,		/* dcache_inv_range	*/
466 	sa110_cache_cleanD_rng,		/* dcache_wb_range	*/
467 
468 	sa110_cache_purgeID,		/* idcache_wbinv_all	*/
469 	sa110_cache_purgeID_rng,	/* idcache_wbinv_range	*/
470 
471 	/* Other functions */
472 
473 	cpufunc_nullop,			/* flush_prefetchbuf	*/
474 	armv4_drain_writebuf,		/* drain_writebuf	*/
475 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
476 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
477 
478 	(void *)cpufunc_nullop,		/* sleep		*/
479 
480 	/* Soft functions */
481 
482 	cpufunc_null_fixup,		/* dataabt_fixup	*/
483 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
484 
485 	sa110_context_switch,		/* context_switch	*/
486 
487 	sa110_setup			/* cpu setup		*/
488 };
489 #endif	/* CPU_SA110 */
490 
491 #ifdef CPU_XSCALE
492 struct cpu_functions xscale_cpufuncs = {
493 	/* CPU functions */
494 
495 	cpufunc_id,			/* id			*/
496 	xscale_cpwait,			/* cpwait		*/
497 
498 	/* MMU functions */
499 
500 	xscale_control,			/* control		*/
501 	cpufunc_domains,		/* domain		*/
502 	xscale_setttb,			/* setttb		*/
503 	cpufunc_faultstatus,		/* faultstatus		*/
504 	cpufunc_faultaddress,		/* faultaddress		*/
505 
506 	/* TLB functions */
507 
508 	armv4_tlb_flushID,		/* tlb_flushID		*/
509 	xscale_tlb_flushID_SE,		/* tlb_flushID_SE	*/
510 	armv4_tlb_flushI,		/* tlb_flushI		*/
511 	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
512 	armv4_tlb_flushD,		/* tlb_flushD		*/
513 	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
514 
515 	/* Cache operations */
516 
517 	xscale_cache_syncI,		/* icache_sync_all	*/
518 	xscale_cache_syncI_rng,		/* icache_sync_range	*/
519 
520 	xscale_cache_purgeD,		/* dcache_wbinv_all	*/
521 	xscale_cache_purgeD_rng,	/* dcache_wbinv_range	*/
522 	xscale_cache_flushD_rng,	/* dcache_inv_range	*/
523 	xscale_cache_cleanD_rng,	/* dcache_wb_range	*/
524 
525 	xscale_cache_purgeID,		/* idcache_wbinv_all	*/
526 	xscale_cache_purgeID_rng,	/* idcache_wbinv_range	*/
527 
528 	/* Other functions */
529 
530 	cpufunc_nullop,			/* flush_prefetchbuf	*/
531 	armv4_drain_writebuf,		/* drain_writebuf	*/
532 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
533 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
534 
535 	xscale_cpu_sleep,		/* sleep		*/
536 
537 	/* Soft functions */
538 
539 	cpufunc_null_fixup,		/* dataabt_fixup	*/
540 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
541 
542 	xscale_context_switch,		/* context_switch	*/
543 
544 	xscale_setup			/* cpu setup		*/
545 };
546 #endif /* CPU_XSCALE */
547 
548 /*
549  * Global constants also used by locore.s
550  */
551 
552 struct cpu_functions cpufuncs;
553 u_int cputype;
554 u_int cpu_reset_needs_v4_MMU_disable;	/* flag used in locore.s */
555 
556 #if defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined(CPU_ARM9) || \
557     defined(CPU_SA110) || defined(CPU_XSCALE)
558 static void get_cachetype __P((void));
559 
560 static void
561 get_cachetype()
562 {
563 	u_int ctype, isize, dsize;
564 	u_int multiplier;
565 
566 	__asm __volatile("mrc p15, 0, %0, c0, c0, 1"
567 		: "=r" (ctype));
568 
569 	/*
570 	 * ...and thus spake the ARM ARM:
571 	 *
572 	 * If an <opcode2> value corresponding to an unimplemented or
573 	 * reserved ID register is encountered, the System Control
574 	 * processor returns the value of the main ID register.
575 	 */
576 	if (ctype == cpufunc_id())
577 		goto out;
578 
579 	if ((ctype & CPU_CT_S) == 0)
580 		arm_pcache_unified = 1;
581 
582 	/*
583 	 * If you want to know how this code works, go read the ARM ARM.
584 	 */
585 
586 	arm_pcache_type = CPU_CT_CTYPE(ctype);
587 
588 	if (arm_pcache_unified == 0) {
589 		isize = CPU_CT_ISIZE(ctype);
590 		multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
591 		arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
592 		if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
593 			if (isize & CPU_CT_xSIZE_M)
594 				arm_picache_line_size = 0; /* not present */
595 			else
596 				arm_picache_ways = 1;
597 		} else {
598 			arm_picache_ways = multiplier <<
599 			    (CPU_CT_xSIZE_ASSOC(isize) - 1);
600 		}
601 		arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
602 	}
603 
604 	dsize = CPU_CT_DSIZE(ctype);
605 	multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
606 	arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
607 	if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
608 		if (dsize & CPU_CT_xSIZE_M)
609 			arm_pdcache_line_size = 0; /* not present */
610 		else
611 			arm_pdcache_ways = 0;
612 	} else {
613 		arm_pdcache_ways = multiplier <<
614 		    (CPU_CT_xSIZE_ASSOC(dsize) - 1);
615 	}
616 	arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
617 
618 	arm_dcache_align = arm_pdcache_line_size;
619 
620  out:
621 	arm_dcache_align_mask = arm_dcache_align - 1;
622 }
623 #endif /* ARM7TDMI || ARM8 || ARM9 || SA110 || XSCALE */
624 
625 /*
626  * Cannot panic here as we may not have a console yet ...
627  */
628 
629 int
630 set_cpufuncs()
631 {
632 	cputype = cpufunc_id();
633 	cputype &= CPU_ID_CPU_MASK;
634 
635 
636 #ifdef CPU_ARM3
637 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
638 	    (cputype & 0x00000f00) == 0x00000300) {
639 		cpufuncs = arm3_cpufuncs;
640 		cpu_reset_needs_v4_MMU_disable = 0;
641 		/* XXX Cache info? */
642 		arm_dcache_align_mask = -1;
643 		return 0;
644 	}
645 #endif	/* CPU_ARM3 */
646 #ifdef CPU_ARM6
647 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
648 	    (cputype & 0x00000f00) == 0x00000600) {
649 		cpufuncs = arm6_cpufuncs;
650 		cpu_reset_needs_v4_MMU_disable = 0;
651 		/* XXX Cache info? */
652 		arm_dcache_align_mask = -1;
653 		return 0;
654 	}
655 #endif	/* CPU_ARM6 */
656 #ifdef CPU_ARM7
657 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
658 	    CPU_ID_IS7(cputype) &&
659 	    (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V3) {
660 		cpufuncs = arm7_cpufuncs;
661 		cpu_reset_needs_v4_MMU_disable = 0;
662 		/* XXX Cache info? */
663 		arm_dcache_align_mask = -1;
664 		return 0;
665 	}
666 #endif	/* CPU_ARM7 */
667 #ifdef CPU_ARM7TDMI
668 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
669 	    CPU_ID_IS7(cputype) &&
670 	    (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V4T) {
671 		cpufuncs = arm7tdmi_cpufuncs;
672 		cpu_reset_needs_v4_MMU_disable = 0;
673 		get_cachetype();
674 		return 0;
675 	}
676 #endif
677 #ifdef CPU_ARM8
678 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
679 	    (cputype & 0x0000f000) == 0x00008000) {
680 		cpufuncs = arm8_cpufuncs;
681 		cpu_reset_needs_v4_MMU_disable = 0;	/* XXX correct? */
682 		get_cachetype();
683 		return 0;
684 	}
685 #endif	/* CPU_ARM8 */
686 #ifdef CPU_ARM9
687 	if (cputype == CPU_ID_ARM920T) {
688 		pte_cache_mode = PT_C;	/* Select write-through cacheing. */
689 		cpufuncs = arm9_cpufuncs;
690 		cpu_reset_needs_v4_MMU_disable = 1;	/* V4 or higher */
691 		get_cachetype();
692 		return 0;
693 	}
694 #endif /* CPU_ARM9 */
695 #ifdef CPU_SA110
696 	if (cputype == CPU_ID_SA110 || cputype == CPU_ID_SA1100 ||
697 	    cputype == CPU_ID_SA1110) {
698 		cpufuncs = sa110_cpufuncs;
699 		cpu_reset_needs_v4_MMU_disable = 1;	/* SA needs it */
700 		get_cachetype();
701 		/*
702 		 * Enable the right variant of sleeping.
703 		 */
704 		if (cputype == CPU_ID_SA1100 ||
705 		    cputype == CPU_ID_SA1110)
706 			cpufuncs.cf_sleep = sa11x0_cpu_sleep;
707 		return 0;
708 	}
709 #endif	/* CPU_SA110 */
710 #ifdef CPU_XSCALE
711 	if (cputype == CPU_ID_I80200) {
712 		int rev = cpufunc_id() & CPU_ID_REVISION_MASK;
713 
714 		i80200_intr_init();
715 
716 		/*
717 		 * Reset the Performance Monitoring Unit to a
718 		 * pristine state:
719 		 *	- CCNT, PMN0, PMN1 reset to 0
720 		 *	- overflow indications cleared
721 		 *	- all counters disabled
722 		 */
723 		__asm __volatile("mcr p14, 0, %0, c0, c0, 0"
724 			:
725 			: "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF|
726 			       PMNC_CC_IF));
727 
728 #ifdef XSCALE_CCLKCFG
729 		/*
730 		 * Crank CCLKCFG to maximum legal value.
731 		 */
732 		__asm __volatile ("mcr p14, 0, %0, c6, c0, 0"
733 			:
734 			: "r" (XSCALE_CCLKCFG));
735 #endif
736 
737 		/*
738 		 * XXX Disable ECC in the Bus Controller Unit; we
739 		 * don't really support it, yet.  Clear any pending
740 		 * error indications.
741 		 */
742 		__asm __volatile("mcr p13, 0, %0, c0, c1, 0"
743 			:
744 			: "r" (BCUCTL_E0|BCUCTL_E1|BCUCTL_EV));
745 
746 		pte_cache_mode = PT_C;	/* Select write-through cacheing. */
747 		cpufuncs = xscale_cpufuncs;
748 
749 		/*
750 		 * i80200 errata: Step-A0 and A1 have a bug where
751 		 * D$ dirty bits are not cleared on "invalidate by
752 		 * address".
753 		 *
754 		 * Workaround: Clean cache line before invalidating.
755 		 */
756 		if (rev == 0 || rev == 1)
757 			cpufuncs.cf_dcache_inv_range = xscale_cache_purgeD_rng;
758 
759 		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
760 		get_cachetype();
761 		return 0;
762 	}
763 #endif /* CPU_XSCALE */
764 	/*
765 	 * Bzzzz. And the answer was ...
766 	 */
767 /*	panic("No support for this CPU type (%08x) in kernel", cputype);*/
768 	return(ARCHITECTURE_NOT_PRESENT);
769 }
770 
771 /*
772  * Fixup routines for data and prefetch aborts.
773  *
774  * Several compile time symbols are used
775  *
776  * DEBUG_FAULT_CORRECTION - Print debugging information during the
777  * correction of registers after a fault.
778  * ARM6_LATE_ABORT - ARM6 supports both early and late aborts
779  * when defined should use late aborts
780  */
781 
782 #if defined(DEBUG_FAULT_CORRECTION) && !defined(PMAP_DEBUG)
783 #error PMAP_DEBUG must be defined to use DEBUG_FAULT_CORRECTION
784 #endif
785 
786 /*
787  * Null abort fixup routine.
788  * For use when no fixup is required.
789  */
790 int
791 cpufunc_null_fixup(arg)
792 	void *arg;
793 {
794 	return(ABORT_FIXUP_OK);
795 }
796 
797 #if defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI)
798 #ifdef DEBUG_FAULT_CORRECTION
799 extern int pmap_debug_level;
800 #endif
801 #endif
802 
803 #if defined(CPU_ARM2) || defined(CPU_ARM250) || defined(CPU_ARM3) || \
804     defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI)
805 /*
806  * "Early" data abort fixup.
807  *
808  * For ARM2, ARM2as, ARM3 and ARM6 (in early-abort mode).  Also used
809  * indirectly by ARM6 (in late-abort mode) and ARM7[TDMI].
810  *
811  * In early aborts, we may have to fix up LDM, STM, LDC and STC.
812  */
813 int
814 early_abort_fixup(arg)
815 	void *arg;
816 {
817 	trapframe_t *frame = arg;
818 	u_int fault_pc;
819 	u_int fault_instruction;
820 	int saved_lr = 0;
821 
822 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
823 
824 		/* Ok an abort in SVC mode */
825 
826 		/*
827 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
828 		 * as the fault happened in svc mode but we need it in the
829 		 * usr slot so we can treat the registers as an array of ints
830 		 * during fixing.
831 		 * NOTE: This PC is in the position but writeback is not
832 		 * allowed on r15.
833 		 * Doing it like this is more efficient than trapping this
834 		 * case in all possible locations in the following fixup code.
835 		 */
836 
837 		saved_lr = frame->tf_usr_lr;
838 		frame->tf_usr_lr = frame->tf_svc_lr;
839 
840 		/*
841 		 * Note the trapframe does not have the SVC r13 so a fault
842 		 * from an instruction with writeback to r13 in SVC mode is
843 		 * not allowed. This should not happen as the kstack is
844 		 * always valid.
845 		 */
846 	}
847 
848 	/* Get fault address and status from the CPU */
849 
850 	fault_pc = frame->tf_pc;
851 	fault_instruction = *((volatile unsigned int *)fault_pc);
852 
853 	/* Decode the fault instruction and fix the registers as needed */
854 
855 	if ((fault_instruction & 0x0e000000) == 0x08000000) {
856 		int base;
857 		int loop;
858 		int count;
859 		int *registers = &frame->tf_r0;
860 
861 #ifdef DEBUG_FAULT_CORRECTION
862 		if (pmap_debug_level >= 0) {
863 			printf("LDM/STM\n");
864 			disassemble(fault_pc);
865 		}
866 #endif	/* DEBUG_FAULT_CORRECTION */
867 		if (fault_instruction & (1 << 21)) {
868 #ifdef DEBUG_FAULT_CORRECTION
869 			if (pmap_debug_level >= 0)
870 				printf("This instruction must be corrected\n");
871 #endif	/* DEBUG_FAULT_CORRECTION */
872 			base = (fault_instruction >> 16) & 0x0f;
873 			if (base == 15)
874 				return ABORT_FIXUP_FAILED;
875 			/* Count registers transferred */
876 			count = 0;
877 			for (loop = 0; loop < 16; ++loop) {
878 				if (fault_instruction & (1<<loop))
879 					++count;
880 			}
881 #ifdef DEBUG_FAULT_CORRECTION
882 			if (pmap_debug_level >= 0) {
883 				printf("%d registers used\n", count);
884 				printf("Corrected r%d by %d bytes ", base, count * 4);
885 			}
886 #endif	/* DEBUG_FAULT_CORRECTION */
887 			if (fault_instruction & (1 << 23)) {
888 #ifdef DEBUG_FAULT_CORRECTION
889 				if (pmap_debug_level >= 0)
890 					printf("down\n");
891 #endif	/* DEBUG_FAULT_CORRECTION */
892 				registers[base] -= count * 4;
893 			} else {
894 #ifdef DEBUG_FAULT_CORRECTION
895 				if (pmap_debug_level >= 0)
896 					printf("up\n");
897 #endif	/* DEBUG_FAULT_CORRECTION */
898 				registers[base] += count * 4;
899 			}
900 		}
901 	} else if ((fault_instruction & 0x0e000000) == 0x0c000000) {
902 		int base;
903 		int offset;
904 		int *registers = &frame->tf_r0;
905 
906 /* REGISTER CORRECTION IS REQUIRED FOR THESE INSTRUCTIONS */
907 
908 #ifdef DEBUG_FAULT_CORRECTION
909 		if (pmap_debug_level >= 0)
910 			disassemble(fault_pc);
911 #endif	/* DEBUG_FAULT_CORRECTION */
912 
913 /* Only need to fix registers if write back is turned on */
914 
915 		if ((fault_instruction & (1 << 21)) != 0) {
916 			base = (fault_instruction >> 16) & 0x0f;
917 			if (base == 13 && (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
918 				return ABORT_FIXUP_FAILED;
919 			if (base == 15)
920 				return ABORT_FIXUP_FAILED;
921 
922 			offset = (fault_instruction & 0xff) << 2;
923 #ifdef DEBUG_FAULT_CORRECTION
924 			if (pmap_debug_level >= 0)
925 				printf("r%d=%08x\n", base, registers[base]);
926 #endif	/* DEBUG_FAULT_CORRECTION */
927 			if ((fault_instruction & (1 << 23)) != 0)
928 				offset = -offset;
929 			registers[base] += offset;
930 #ifdef DEBUG_FAULT_CORRECTION
931 			if (pmap_debug_level >= 0)
932 				printf("r%d=%08x\n", base, registers[base]);
933 #endif	/* DEBUG_FAULT_CORRECTION */
934 		}
935 	} else if ((fault_instruction & 0x0e000000) == 0x0c000000)
936 		return ABORT_FIXUP_FAILED;
937 
938 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
939 
940 		/* Ok an abort in SVC mode */
941 
942 		/*
943 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
944 		 * as the fault happened in svc mode but we need it in the
945 		 * usr slot so we can treat the registers as an array of ints
946 		 * during fixing.
947 		 * NOTE: This PC is in the position but writeback is not
948 		 * allowed on r15.
949 		 * Doing it like this is more efficient than trapping this
950 		 * case in all possible locations in the prior fixup code.
951 		 */
952 
953 		frame->tf_svc_lr = frame->tf_usr_lr;
954 		frame->tf_usr_lr = saved_lr;
955 
956 		/*
957 		 * Note the trapframe does not have the SVC r13 so a fault
958 		 * from an instruction with writeback to r13 in SVC mode is
959 		 * not allowed. This should not happen as the kstack is
960 		 * always valid.
961 		 */
962 	}
963 
964 	return(ABORT_FIXUP_OK);
965 }
966 #endif	/* CPU_ARM2/250/3/6/7 */
967 
968 #if (defined(CPU_ARM6) && defined(ARM6_LATE_ABORT)) || defined(CPU_ARM7) || \
969 	defined(CPU_ARM7TDMI)
970 /*
971  * "Late" (base updated) data abort fixup
972  *
973  * For ARM6 (in late-abort mode) and ARM7.
974  *
975  * In this model, all data-transfer instructions need fixing up.  We defer
976  * LDM, STM, LDC and STC fixup to the early-abort handler.
977  */
978 int
979 late_abort_fixup(arg)
980 	void *arg;
981 {
982 	trapframe_t *frame = arg;
983 	u_int fault_pc;
984 	u_int fault_instruction;
985 	int saved_lr = 0;
986 
987 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
988 
989 		/* Ok an abort in SVC mode */
990 
991 		/*
992 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
993 		 * as the fault happened in svc mode but we need it in the
994 		 * usr slot so we can treat the registers as an array of ints
995 		 * during fixing.
996 		 * NOTE: This PC is in the position but writeback is not
997 		 * allowed on r15.
998 		 * Doing it like this is more efficient than trapping this
999 		 * case in all possible locations in the following fixup code.
1000 		 */
1001 
1002 		saved_lr = frame->tf_usr_lr;
1003 		frame->tf_usr_lr = frame->tf_svc_lr;
1004 
1005 		/*
1006 		 * Note the trapframe does not have the SVC r13 so a fault
1007 		 * from an instruction with writeback to r13 in SVC mode is
1008 		 * not allowed. This should not happen as the kstack is
1009 		 * always valid.
1010 		 */
1011 	}
1012 
1013 	/* Get fault address and status from the CPU */
1014 
1015 	fault_pc = frame->tf_pc;
1016 	fault_instruction = *((volatile unsigned int *)fault_pc);
1017 
1018 	/* Decode the fault instruction and fix the registers as needed */
1019 
1020 	/* Was is a swap instruction ? */
1021 
1022 	if ((fault_instruction & 0x0fb00ff0) == 0x01000090) {
1023 #ifdef DEBUG_FAULT_CORRECTION
1024 		if (pmap_debug_level >= 0)
1025 			disassemble(fault_pc);
1026 #endif	/* DEBUG_FAULT_CORRECTION */
1027 	} else if ((fault_instruction & 0x0c000000) == 0x04000000) {
1028 
1029 		/* Was is a ldr/str instruction */
1030 		/* This is for late abort only */
1031 
1032 		int base;
1033 		int offset;
1034 		int *registers = &frame->tf_r0;
1035 
1036 #ifdef DEBUG_FAULT_CORRECTION
1037 		if (pmap_debug_level >= 0)
1038 			disassemble(fault_pc);
1039 #endif	/* DEBUG_FAULT_CORRECTION */
1040 
1041 		/* This is for late abort only */
1042 
1043 		if ((fault_instruction & (1 << 24)) == 0
1044 		    || (fault_instruction & (1 << 21)) != 0) {
1045 			base = (fault_instruction >> 16) & 0x0f;
1046 			if (base == 13 && (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
1047 				return ABORT_FIXUP_FAILED;
1048 			if (base == 15)
1049 				return ABORT_FIXUP_FAILED;
1050 #ifdef DEBUG_FAULT_CORRECTION
1051 			if (pmap_debug_level >=0)
1052 				printf("late abt fix: r%d=%08x ", base, registers[base]);
1053 #endif	/* DEBUG_FAULT_CORRECTION */
1054 			if ((fault_instruction & (1 << 25)) == 0) {
1055 				/* Immediate offset - easy */
1056 				offset = fault_instruction & 0xfff;
1057 				if ((fault_instruction & (1 << 23)))
1058 					offset = -offset;
1059 				registers[base] += offset;
1060 #ifdef DEBUG_FAULT_CORRECTION
1061 				if (pmap_debug_level >=0)
1062 					printf("imm=%08x ", offset);
1063 #endif	/* DEBUG_FAULT_CORRECTION */
1064 			} else {
1065 				int shift;
1066 
1067 				offset = fault_instruction & 0x0f;
1068 				if (offset == base)
1069 					return ABORT_FIXUP_FAILED;
1070 
1071 /* Register offset - hard we have to cope with shifts ! */
1072 				offset = registers[offset];
1073 
1074 				if ((fault_instruction & (1 << 4)) == 0)
1075 					shift = (fault_instruction >> 7) & 0x1f;
1076 				else {
1077 					if ((fault_instruction & (1 << 7)) != 0)
1078 						return ABORT_FIXUP_FAILED;
1079 					shift = ((fault_instruction >> 8) & 0xf);
1080 					if (base == shift)
1081 						return ABORT_FIXUP_FAILED;
1082 #ifdef DEBUG_FAULT_CORRECTION
1083 					if (pmap_debug_level >=0)
1084 						printf("shift reg=%d ", shift);
1085 #endif	/* DEBUG_FAULT_CORRECTION */
1086 					shift = registers[shift];
1087 				}
1088 #ifdef DEBUG_FAULT_CORRECTION
1089 				if (pmap_debug_level >=0)
1090 					printf("shift=%08x ", shift);
1091 #endif	/* DEBUG_FAULT_CORRECTION */
1092 				switch (((fault_instruction >> 5) & 0x3)) {
1093 				case 0 : /* Logical left */
1094 					offset = (int)(((u_int)offset) << shift);
1095 					break;
1096 				case 1 : /* Logical Right */
1097 					if (shift == 0) shift = 32;
1098 					offset = (int)(((u_int)offset) >> shift);
1099 					break;
1100 				case 2 : /* Arithmetic Right */
1101 					if (shift == 0) shift = 32;
1102 					offset = (int)(((int)offset) >> shift);
1103 					break;
1104 				case 3 : /* Rotate right */
1105 					return ABORT_FIXUP_FAILED;
1106 				}
1107 
1108 #ifdef DEBUG_FAULT_CORRECTION
1109 				if (pmap_debug_level >=0)
1110 					printf("abt: fixed LDR/STR with register offset\n");
1111 #endif	/* DEBUG_FAULT_CORRECTION */
1112 				if ((fault_instruction & (1 << 23)))
1113 					offset = -offset;
1114 #ifdef DEBUG_FAULT_CORRECTION
1115 				if (pmap_debug_level >=0)
1116 					printf("offset=%08x ", offset);
1117 #endif	/* DEBUG_FAULT_CORRECTION */
1118 				registers[base] += offset;
1119 			}
1120 #ifdef DEBUG_FAULT_CORRECTION
1121 			if (pmap_debug_level >=0)
1122 				printf("r%d=%08x\n", base, registers[base]);
1123 #endif	/* DEBUG_FAULT_CORRECTION */
1124 		}
1125 	}
1126 
1127 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1128 
1129 		/* Ok an abort in SVC mode */
1130 
1131 		/*
1132 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1133 		 * as the fault happened in svc mode but we need it in the
1134 		 * usr slot so we can treat the registers as an array of ints
1135 		 * during fixing.
1136 		 * NOTE: This PC is in the position but writeback is not
1137 		 * allowed on r15.
1138 		 * Doing it like this is more efficient than trapping this
1139 		 * case in all possible locations in the prior fixup code.
1140 		 */
1141 
1142 		frame->tf_svc_lr = frame->tf_usr_lr;
1143 		frame->tf_usr_lr = saved_lr;
1144 
1145 		/*
1146 		 * Note the trapframe does not have the SVC r13 so a fault
1147 		 * from an instruction with writeback to r13 in SVC mode is
1148 		 * not allowed. This should not happen as the kstack is
1149 		 * always valid.
1150 		 */
1151 	}
1152 
1153 	/*
1154 	 * Now let the early-abort fixup routine have a go, in case it
1155 	 * was an LDM, STM, LDC or STC that faulted.
1156 	 */
1157 
1158 	return early_abort_fixup(arg);
1159 }
1160 #endif	/* CPU_ARM6(LATE)/7/7TDMI */
1161 
1162 /*
1163  * CPU Setup code
1164  */
1165 
1166 #if defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) || \
1167 	defined(CPU_ARM8) || defined (CPU_ARM9) || defined(CPU_SA110) || \
1168 	defined(CPU_XSCALE)
1169 int cpuctrl;
1170 
1171 #define IGN	0
1172 #define OR	1
1173 #define BIC	2
1174 
1175 struct cpu_option {
1176 	char	*co_name;
1177 	int	co_falseop;
1178 	int	co_trueop;
1179 	int	co_value;
1180 };
1181 
1182 static u_int parse_cpu_options __P((char *, struct cpu_option *, u_int));
1183 
1184 static u_int
1185 parse_cpu_options(args, optlist, cpuctrl)
1186 	char *args;
1187 	struct cpu_option *optlist;
1188 	u_int cpuctrl;
1189 {
1190 	int integer;
1191 
1192 	while (optlist->co_name) {
1193 		if (get_bootconf_option(args, optlist->co_name,
1194 		    BOOTOPT_TYPE_BOOLEAN, &integer)) {
1195 			if (integer) {
1196 				if (optlist->co_trueop == OR)
1197 					cpuctrl |= optlist->co_value;
1198 				else if (optlist->co_trueop == BIC)
1199 					cpuctrl &= ~optlist->co_value;
1200 			} else {
1201 				if (optlist->co_falseop == OR)
1202 					cpuctrl |= optlist->co_value;
1203 				else if (optlist->co_falseop == BIC)
1204 					cpuctrl &= ~optlist->co_value;
1205 			}
1206 		}
1207 		++optlist;
1208 	}
1209 	return(cpuctrl);
1210 }
1211 #endif /* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 || CPU_SA110 */
1212 
1213 #if defined (CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) \
1214 	|| defined(CPU_ARM8)
1215 struct cpu_option arm678_options[] = {
1216 #ifdef COMPAT_12
1217 	{ "nocache",		IGN, BIC, CPU_CONTROL_IDC_ENABLE },
1218 	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
1219 #endif	/* COMPAT_12 */
1220 	{ "cpu.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
1221 	{ "cpu.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
1222 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1223 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1224 	{ NULL,			IGN, IGN, 0 }
1225 };
1226 
1227 #endif	/* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 */
1228 
1229 #ifdef CPU_ARM6
1230 struct cpu_option arm6_options[] = {
1231 	{ "arm6.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
1232 	{ "arm6.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
1233 	{ "arm6.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1234 	{ "arm6.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1235 	{ NULL,			IGN, IGN, 0 }
1236 };
1237 
1238 void
1239 arm6_setup(args)
1240 	char *args;
1241 {
1242 	int cpuctrlmask;
1243 
1244 	/* Set up default control registers bits */
1245 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1246 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1247 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
1248 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1249 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1250 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
1251 		 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BEND_ENABLE
1252 		 | CPU_CONTROL_AFLT_ENABLE;
1253 
1254 #ifdef ARM6_LATE_ABORT
1255 	cpuctrl |= CPU_CONTROL_LABT_ENABLE;
1256 #endif	/* ARM6_LATE_ABORT */
1257 
1258 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
1259 	cpuctrl = parse_cpu_options(args, arm6_options, cpuctrl);
1260 
1261 	/* Clear out the cache */
1262 	cpu_idcache_wbinv_all();
1263 
1264 	/* Set the control register */
1265 	cpu_control(0xffffffff, cpuctrl);
1266 }
1267 #endif	/* CPU_ARM6 */
1268 
1269 #ifdef CPU_ARM7
1270 struct cpu_option arm7_options[] = {
1271 	{ "arm7.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
1272 	{ "arm7.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
1273 	{ "arm7.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1274 	{ "arm7.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1275 #ifdef COMPAT_12
1276 	{ "fpaclk2",		BIC, OR,  CPU_CONTROL_CPCLK },
1277 #endif	/* COMPAT_12 */
1278 	{ "arm700.fpaclk",	BIC, OR,  CPU_CONTROL_CPCLK },
1279 	{ NULL,			IGN, IGN, 0 }
1280 };
1281 
1282 void
1283 arm7_setup(args)
1284 	char *args;
1285 {
1286 	int cpuctrlmask;
1287 
1288 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1289 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1290 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
1291 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1292 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1293 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
1294 		 | CPU_CONTROL_CPCLK | CPU_CONTROL_LABT_ENABLE
1295 		 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BEND_ENABLE
1296 		 | CPU_CONTROL_AFLT_ENABLE;
1297 
1298 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
1299 	cpuctrl = parse_cpu_options(args, arm7_options, cpuctrl);
1300 
1301 	/* Clear out the cache */
1302 	cpu_idcache_wbinv_all();
1303 
1304 	/* Set the control register */
1305 	cpu_control(0xffffffff, cpuctrl);
1306 }
1307 #endif	/* CPU_ARM7 */
1308 
1309 #ifdef CPU_ARM7TDMI
1310 struct cpu_option arm7tdmi_options[] = {
1311 	{ "arm7.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
1312 	{ "arm7.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
1313 	{ "arm7.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1314 	{ "arm7.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1315 #ifdef COMPAT_12
1316 	{ "fpaclk2",		BIC, OR,  CPU_CONTROL_CPCLK },
1317 #endif	/* COMPAT_12 */
1318 	{ "arm700.fpaclk",	BIC, OR,  CPU_CONTROL_CPCLK },
1319 	{ NULL,			IGN, IGN, 0 }
1320 };
1321 
1322 void
1323 arm7tdmi_setup(args)
1324 	char *args;
1325 {
1326 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1327 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1328 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
1329 
1330 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
1331 	cpuctrl = parse_cpu_options(args, arm7tdmi_options, cpuctrl);
1332 
1333 	/* Clear out the cache */
1334 	cpu_idcache_wbinv_all();
1335 
1336 	/* Set the control register */
1337 	cpu_control(0xffffffff, cpuctrl);
1338 }
1339 #endif	/* CPU_ARM7TDMI */
1340 
1341 #ifdef CPU_ARM8
1342 struct cpu_option arm8_options[] = {
1343 	{ "arm8.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
1344 	{ "arm8.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
1345 	{ "arm8.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1346 	{ "arm8.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1347 #ifdef COMPAT_12
1348 	{ "branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1349 #endif	/* COMPAT_12 */
1350 	{ "cpu.branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1351 	{ "arm8.branchpredict",	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1352 	{ NULL,			IGN, IGN, 0 }
1353 };
1354 
1355 void
1356 arm8_setup(args)
1357 	char *args;
1358 {
1359 	int integer;
1360 	int cpuctrlmask;
1361 	int clocktest;
1362 	int setclock = 0;
1363 
1364 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1365 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1366 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
1367 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1368 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1369 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
1370 		 | CPU_CONTROL_BPRD_ENABLE | CPU_CONTROL_ROM_ENABLE
1371 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE;
1372 
1373 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
1374 	cpuctrl = parse_cpu_options(args, arm8_options, cpuctrl);
1375 
1376 	/* Get clock configuration */
1377 	clocktest = arm8_clock_config(0, 0) & 0x0f;
1378 
1379 	/* Special ARM8 clock and test configuration */
1380 	if (get_bootconf_option(args, "arm8.clock.reset", BOOTOPT_TYPE_BOOLEAN, &integer)) {
1381 		clocktest = 0;
1382 		setclock = 1;
1383 	}
1384 	if (get_bootconf_option(args, "arm8.clock.dynamic", BOOTOPT_TYPE_BOOLEAN, &integer)) {
1385 		if (integer)
1386 			clocktest |= 0x01;
1387 		else
1388 			clocktest &= ~(0x01);
1389 		setclock = 1;
1390 	}
1391 	if (get_bootconf_option(args, "arm8.clock.sync", BOOTOPT_TYPE_BOOLEAN, &integer)) {
1392 		if (integer)
1393 			clocktest |= 0x02;
1394 		else
1395 			clocktest &= ~(0x02);
1396 		setclock = 1;
1397 	}
1398 	if (get_bootconf_option(args, "arm8.clock.fast", BOOTOPT_TYPE_BININT, &integer)) {
1399 		clocktest = (clocktest & ~0xc0) | (integer & 3) << 2;
1400 		setclock = 1;
1401 	}
1402 	if (get_bootconf_option(args, "arm8.test", BOOTOPT_TYPE_BININT, &integer)) {
1403 		clocktest |= (integer & 7) << 5;
1404 		setclock = 1;
1405 	}
1406 
1407 	/* Clear out the cache */
1408 	cpu_idcache_wbinv_all();
1409 
1410 	/* Set the control register */
1411 	cpu_control(0xffffffff, cpuctrl);
1412 
1413 	/* Set the clock/test register */
1414 	if (setclock)
1415 		arm8_clock_config(0x7f, clocktest);
1416 }
1417 #endif	/* CPU_ARM8 */
1418 
1419 #ifdef CPU_ARM9
1420 struct cpu_option arm9_options[] = {
1421 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1422 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1423 	{ "arm9.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1424 	{ "arm9.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
1425 	{ "arm9.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
1426 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1427 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1428 	{ "arm9.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1429 	{ NULL,			IGN, IGN, 0 }
1430 };
1431 
1432 void
1433 arm9_setup(args)
1434 	char *args;
1435 {
1436 	int cpuctrlmask;
1437 
1438 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1439 	    | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1440 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1441 	    | CPU_CONTROL_WBUF_ENABLE;
1442 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1443 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1444 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1445 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1446 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1447 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1448 		 | CPU_CONTROL_CPCLK;
1449 
1450 	cpuctrl = parse_cpu_options(args, arm9_options, cpuctrl);
1451 
1452 	/* Clear out the cache */
1453 	cpu_idcache_wbinv_all();
1454 
1455 	/* Set the control register */
1456 	cpu_control(0xffffffff, cpuctrl);
1457 
1458 }
1459 #endif	/* CPU_ARM9 */
1460 
1461 #ifdef CPU_SA110
1462 struct cpu_option sa110_options[] = {
1463 #ifdef COMPAT_12
1464 	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1465 	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
1466 #endif	/* COMPAT_12 */
1467 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1468 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1469 	{ "sa110.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1470 	{ "sa110.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
1471 	{ "sa110.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
1472 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1473 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1474 	{ "sa110.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1475 	{ NULL,			IGN, IGN, 0 }
1476 };
1477 
1478 void
1479 sa110_setup(args)
1480 	char *args;
1481 {
1482 	int cpuctrlmask;
1483 
1484 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1485 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1486 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1487 		 | CPU_CONTROL_WBUF_ENABLE;
1488 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1489 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1490 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1491 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1492 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1493 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1494 		 | CPU_CONTROL_CPCLK;
1495 
1496 	cpuctrl = parse_cpu_options(args, sa110_options, cpuctrl);
1497 
1498 	/* Clear out the cache */
1499 	cpu_idcache_wbinv_all();
1500 
1501 	/* Set the control register */
1502 /*	cpu_control(cpuctrlmask, cpuctrl);*/
1503 	cpu_control(0xffffffff, cpuctrl);
1504 
1505 	/*
1506 	 * enable clockswitching, note that this doesn't read or write to r0,
1507 	 * r0 is just to make it valid asm
1508 	 */
1509 	__asm ("mcr 15, 0, r0, c15, c1, 2");
1510 }
1511 #endif	/* CPU_SA110 */
1512 
1513 #ifdef CPU_XSCALE
1514 struct cpu_option xscale_options[] = {
1515 #ifdef COMPAT_12
1516 	{ "branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1517 	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1518 #endif	/* COMPAT_12 */
1519 	{ "cpu.branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1520 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1521 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1522 	{ "xscale.branchpredict", BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1523 	{ "xscale.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1524 	{ "xscale.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
1525 	{ "xscale.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
1526 	{ NULL,			IGN, IGN, 0 }
1527 };
1528 
1529 void
1530 xscale_setup(args)
1531 	char *args;
1532 {
1533 	int cpuctrlmask;
1534 
1535 	/*
1536 	 * The XScale Write Buffer is always enabled.  Our option
1537 	 * is to enable/disable coalescing.  Note that bits 6:3
1538 	 * must always be enabled.
1539 	 */
1540 
1541 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1542 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1543 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1544 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE;
1545 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1546 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1547 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1548 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1549 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1550 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1551 		 | CPU_CONTROL_CPCLK;
1552 
1553 	cpuctrl = parse_cpu_options(args, xscale_options, cpuctrl);
1554 
1555 	/* Clear out the cache */
1556 	cpu_idcache_wbinv_all();
1557 
1558 	/*
1559 	 * Set the control register.  Note that bits 6:3 must always
1560 	 * be set to 1.
1561 	 */
1562 /*	cpu_control(cpuctrlmask, cpuctrl);*/
1563 	cpu_control(0xffffffff, cpuctrl);
1564 
1565 #if 0
1566 	/*
1567 	 * XXX FIXME
1568 	 * Disable write buffer coalescing, PT ECC, and set
1569 	 * the mini-cache to write-back/read-allocate.
1570 	 */
1571 	__asm ("mcr p15, 0, %0, c1, c0, 1" :: "r" (0));
1572 #endif
1573 }
1574 #endif	/* CPU_XSCALE */
1575