xref: /netbsd-src/sys/arch/arm/arm/cpufunc.c (revision 5aa16f7d610ab7ce136d1a2041ae6ce5655dca85)
1 /*	$NetBSD: cpufunc.c,v 1.40 2002/04/09 21:00:42 thorpej Exp $	*/
2 
3 /*
4  * arm7tdmi support code Copyright (c) 2001 John Fremlin
5  * arm8 support code Copyright (c) 1997 ARM Limited
6  * arm8 support code Copyright (c) 1997 Causality Limited
7  * arm9 support code Copyright (C) 2001 ARM Ltd
8  * Copyright (c) 1997 Mark Brinicombe.
9  * Copyright (c) 1997 Causality Limited
10  * All rights reserved.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. All advertising materials mentioning features or use of this software
21  *    must display the following acknowledgement:
22  *	This product includes software developed by Causality Limited.
23  * 4. The name of Causality Limited may not be used to endorse or promote
24  *    products derived from this software without specific prior written
25  *    permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
28  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
29  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
30  * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
31  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
32  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
33  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37  * SUCH DAMAGE.
38  *
39  * RiscBSD kernel project
40  *
41  * cpufuncs.c
42  *
43  * C functions for supporting CPU / MMU / TLB specific operations.
44  *
45  * Created      : 30/01/97
46  */
47 
48 #include "opt_compat_netbsd.h"
49 #include "opt_cputypes.h"
50 #include "opt_cpuoptions.h"
51 
52 #include <sys/types.h>
53 #include <sys/param.h>
54 #include <sys/systm.h>
55 #include <machine/cpu.h>
56 #include <machine/bootconfig.h>
57 #include <arch/arm/arm/disassem.h>
58 
59 #include <uvm/uvm.h>
60 
61 #include <arm/cpufunc.h>
62 
63 #ifdef CPU_XSCALE_80200
64 #include <arm/xscale/i80200reg.h>
65 #include <arm/xscale/i80200var.h>
66 #endif
67 
68 #ifdef CPU_XSCALE_80321
69 #include <arm/xscale/i80321reg.h>
70 #include <arm/xscale/i80321var.h>
71 #endif
72 
73 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321)
74 #include <arm/xscale/xscalereg.h>
75 #endif
76 
77 /* PRIMARY CACHE VARIABLES */
78 int	arm_picache_size;
79 int	arm_picache_line_size;
80 int	arm_picache_ways;
81 
82 int	arm_pdcache_size;	/* and unified */
83 int	arm_pdcache_line_size;
84 int	arm_pdcache_ways;
85 
86 int	arm_pcache_type;
87 int	arm_pcache_unified;
88 
89 int	arm_dcache_align;
90 int	arm_dcache_align_mask;
91 
92 #ifdef CPU_ARM3
93 struct cpu_functions arm3_cpufuncs = {
94 	/* CPU functions */
95 
96 	cpufunc_id,			/* id			*/
97 	cpufunc_nullop,			/* cpwait		*/
98 
99 	/* MMU functions */
100 
101 	arm3_control,			/* control		*/
102 	NULL,				/* domain		*/
103 	NULL,				/* setttb		*/
104 	NULL,				/* faultstatus		*/
105 	NULL,				/* faultaddress		*/
106 
107 	/* TLB functions */
108 
109 	cpufunc_nullop,			/* tlb_flushID		*/
110 	(void *)cpufunc_nullop,		/* tlb_flushID_SE	*/
111 	cpufunc_nullop,			/* tlb_flushI		*/
112 	(void *)cpufunc_nullop,		/* tlb_flushI_SE	*/
113 	cpufunc_nullop,			/* tlb_flushD		*/
114 	(void *)cpufunc_nullop,		/* tlb_flushD_SE	*/
115 
116 	/* Cache operations */
117 
118 	cpufunc_nullop,			/* icache_sync_all	*/
119 	(void *) cpufunc_nullop,	/* icache_sync_range	*/
120 
121 	arm3_cache_flush,		/* dcache_wbinv_all	*/
122 	(void *)arm3_cache_flush,	/* dcache_wbinv_range	*/
123 	(void *)arm3_cache_flush,	/* dcache_inv_range	*/
124 	(void *)cpufunc_nullop,		/* dcache_wb_range	*/
125 
126 	arm3_cache_flush,		/* idcache_wbinv_all	*/
127 	(void *)arm3_cache_flush,	/* idcache_wbinv_range	*/
128 
129 	/* Other functions */
130 
131 	cpufunc_nullop,			/* flush_prefetchbuf	*/
132 	cpufunc_nullop,			/* drain_writebuf	*/
133 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
134 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
135 
136 	(void *)cpufunc_nullop,		/* sleep		*/
137 
138 	/* Soft functions */
139 
140 	early_abort_fixup,		/* dataabt_fixup	*/
141 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
142 
143 	NULL,				/* context_switch	*/
144 
145 	(void *)cpufunc_nullop		/* cpu setup		*/
146 
147 };
148 #endif	/* CPU_ARM3 */
149 
150 #ifdef CPU_ARM6
151 struct cpu_functions arm6_cpufuncs = {
152 	/* CPU functions */
153 
154 	cpufunc_id,			/* id			*/
155 	cpufunc_nullop,			/* cpwait		*/
156 
157 	/* MMU functions */
158 
159 	cpufunc_control,		/* control		*/
160 	cpufunc_domains,		/* domain		*/
161 	arm67_setttb,			/* setttb		*/
162 	cpufunc_faultstatus,		/* faultstatus		*/
163 	cpufunc_faultaddress,		/* faultaddress		*/
164 
165 	/* TLB functions */
166 
167 	arm67_tlb_flush,		/* tlb_flushID		*/
168 	arm67_tlb_purge,		/* tlb_flushID_SE	*/
169 	arm67_tlb_flush,		/* tlb_flushI		*/
170 	arm67_tlb_purge,		/* tlb_flushI_SE	*/
171 	arm67_tlb_flush,		/* tlb_flushD		*/
172 	arm67_tlb_purge,		/* tlb_flushD_SE	*/
173 
174 	/* Cache operations */
175 
176 	cpufunc_nullop,			/* icache_sync_all	*/
177 	(void *) cpufunc_nullop,	/* icache_sync_range	*/
178 
179 	arm67_cache_flush,		/* dcache_wbinv_all	*/
180 	(void *)arm67_cache_flush,	/* dcache_wbinv_range	*/
181 	(void *)arm67_cache_flush,	/* dcache_inv_range	*/
182 	(void *)cpufunc_nullop,		/* dcache_wb_range	*/
183 
184 	arm67_cache_flush,		/* idcache_wbinv_all	*/
185 	(void *)arm67_cache_flush,	/* idcache_wbinv_range	*/
186 
187 	/* Other functions */
188 
189 	cpufunc_nullop,			/* flush_prefetchbuf	*/
190 	cpufunc_nullop,			/* drain_writebuf	*/
191 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
192 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
193 
194 	(void *)cpufunc_nullop,		/* sleep		*/
195 
196 	/* Soft functions */
197 
198 #ifdef ARM6_LATE_ABORT
199 	late_abort_fixup,		/* dataabt_fixup	*/
200 #else
201 	early_abort_fixup,		/* dataabt_fixup	*/
202 #endif
203 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
204 
205 	arm67_context_switch,		/* context_switch	*/
206 
207 	arm6_setup			/* cpu setup		*/
208 
209 };
210 #endif	/* CPU_ARM6 */
211 
212 #ifdef CPU_ARM7
213 struct cpu_functions arm7_cpufuncs = {
214 	/* CPU functions */
215 
216 	cpufunc_id,			/* id			*/
217 	cpufunc_nullop,			/* cpwait		*/
218 
219 	/* MMU functions */
220 
221 	cpufunc_control,		/* control		*/
222 	cpufunc_domains,		/* domain		*/
223 	arm67_setttb,			/* setttb		*/
224 	cpufunc_faultstatus,		/* faultstatus		*/
225 	cpufunc_faultaddress,		/* faultaddress		*/
226 
227 	/* TLB functions */
228 
229 	arm67_tlb_flush,		/* tlb_flushID		*/
230 	arm67_tlb_purge,		/* tlb_flushID_SE	*/
231 	arm67_tlb_flush,		/* tlb_flushI		*/
232 	arm67_tlb_purge,		/* tlb_flushI_SE	*/
233 	arm67_tlb_flush,		/* tlb_flushD		*/
234 	arm67_tlb_purge,		/* tlb_flushD_SE	*/
235 
236 	/* Cache operations */
237 
238 	cpufunc_nullop,			/* icache_sync_all	*/
239 	(void *)cpufunc_nullop,		/* icache_sync_range	*/
240 
241 	arm67_cache_flush,		/* dcache_wbinv_all	*/
242 	(void *)arm67_cache_flush,	/* dcache_wbinv_range	*/
243 	(void *)arm67_cache_flush,	/* dcache_inv_range	*/
244 	(void *)cpufunc_nullop,		/* dcache_wb_range	*/
245 
246 	arm67_cache_flush,		/* idcache_wbinv_all	*/
247 	(void *)arm67_cache_flush,	/* idcache_wbinv_range	*/
248 
249 	/* Other functions */
250 
251 	cpufunc_nullop,			/* flush_prefetchbuf	*/
252 	cpufunc_nullop,			/* drain_writebuf	*/
253 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
254 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
255 
256 	(void *)cpufunc_nullop,		/* sleep		*/
257 
258 	/* Soft functions */
259 
260 	late_abort_fixup,		/* dataabt_fixup	*/
261 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
262 
263 	arm67_context_switch,		/* context_switch	*/
264 
265 	arm7_setup			/* cpu setup		*/
266 
267 };
268 #endif	/* CPU_ARM7 */
269 
270 #ifdef CPU_ARM7TDMI
271 struct cpu_functions arm7tdmi_cpufuncs = {
272 	/* CPU functions */
273 
274 	cpufunc_id,			/* id			*/
275 	cpufunc_nullop,			/* cpwait		*/
276 
277 	/* MMU functions */
278 
279 	cpufunc_control,		/* control		*/
280 	cpufunc_domains,		/* domain		*/
281 	arm7tdmi_setttb,		/* setttb		*/
282 	cpufunc_faultstatus,		/* faultstatus		*/
283 	cpufunc_faultaddress,		/* faultaddress		*/
284 
285 	/* TLB functions */
286 
287 	arm7tdmi_tlb_flushID,		/* tlb_flushID		*/
288 	arm7tdmi_tlb_flushID_SE,	/* tlb_flushID_SE	*/
289 	arm7tdmi_tlb_flushID,		/* tlb_flushI		*/
290 	arm7tdmi_tlb_flushID_SE,	/* tlb_flushI_SE	*/
291 	arm7tdmi_tlb_flushID,		/* tlb_flushD		*/
292 	arm7tdmi_tlb_flushID_SE,	/* tlb_flushD_SE	*/
293 
294 	/* Cache operations */
295 
296 	cpufunc_nullop,			/* icache_sync_all	*/
297 	(void *)cpufunc_nullop,		/* icache_sync_range	*/
298 
299 	arm7tdmi_cache_flushID,		/* dcache_wbinv_all	*/
300 	(void *)arm7tdmi_cache_flushID,	/* dcache_wbinv_range	*/
301 	(void *)arm7tdmi_cache_flushID,	/* dcache_inv_range	*/
302 	(void *)cpufunc_nullop,		/* dcache_wb_range	*/
303 
304 	arm7tdmi_cache_flushID,		/* idcache_wbinv_all	*/
305 	(void *)arm7tdmi_cache_flushID,	/* idcache_wbinv_range	*/
306 
307 	/* Other functions */
308 
309 	cpufunc_nullop,			/* flush_prefetchbuf	*/
310 	cpufunc_nullop,			/* drain_writebuf	*/
311 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
312 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
313 
314 	(void *)cpufunc_nullop,		/* sleep		*/
315 
316 	/* Soft functions */
317 
318 	late_abort_fixup,		/* dataabt_fixup	*/
319 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
320 
321 	arm7tdmi_context_switch,	/* context_switch	*/
322 
323 	arm7tdmi_setup			/* cpu setup		*/
324 
325 };
326 #endif	/* CPU_ARM7TDMI */
327 
328 #ifdef CPU_ARM8
329 struct cpu_functions arm8_cpufuncs = {
330 	/* CPU functions */
331 
332 	cpufunc_id,			/* id			*/
333 	cpufunc_nullop,			/* cpwait		*/
334 
335 	/* MMU functions */
336 
337 	cpufunc_control,		/* control		*/
338 	cpufunc_domains,		/* domain		*/
339 	arm8_setttb,			/* setttb		*/
340 	cpufunc_faultstatus,		/* faultstatus		*/
341 	cpufunc_faultaddress,		/* faultaddress		*/
342 
343 	/* TLB functions */
344 
345 	arm8_tlb_flushID,		/* tlb_flushID		*/
346 	arm8_tlb_flushID_SE,		/* tlb_flushID_SE	*/
347 	arm8_tlb_flushID,		/* tlb_flushI		*/
348 	arm8_tlb_flushID_SE,		/* tlb_flushI_SE	*/
349 	arm8_tlb_flushID,		/* tlb_flushD		*/
350 	arm8_tlb_flushID_SE,		/* tlb_flushD_SE	*/
351 
352 	/* Cache operations */
353 
354 	cpufunc_nullop,			/* icache_sync_all	*/
355 	(void *)cpufunc_nullop,		/* icache_sync_range	*/
356 
357 	arm8_cache_purgeID,		/* dcache_wbinv_all	*/
358 	(void *)arm8_cache_purgeID,	/* dcache_wbinv_range	*/
359 /*XXX*/	(void *)arm8_cache_purgeID,	/* dcache_inv_range	*/
360 	(void *)arm8_cache_cleanID,	/* dcache_wb_range	*/
361 
362 	arm8_cache_purgeID,		/* idcache_wbinv_all	*/
363 	(void *)arm8_cache_purgeID,	/* idcache_wbinv_range	*/
364 
365 	/* Other functions */
366 
367 	cpufunc_nullop,			/* flush_prefetchbuf	*/
368 	cpufunc_nullop,			/* drain_writebuf	*/
369 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
370 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
371 
372 	(void *)cpufunc_nullop,		/* sleep		*/
373 
374 	/* Soft functions */
375 
376 	cpufunc_null_fixup,		/* dataabt_fixup	*/
377 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
378 
379 	arm8_context_switch,		/* context_switch	*/
380 
381 	arm8_setup			/* cpu setup		*/
382 };
383 #endif	/* CPU_ARM8 */
384 
385 #ifdef CPU_ARM9
386 struct cpu_functions arm9_cpufuncs = {
387 	/* CPU functions */
388 
389 	cpufunc_id,			/* id			*/
390 	cpufunc_nullop,			/* cpwait		*/
391 
392 	/* MMU functions */
393 
394 	cpufunc_control,		/* control		*/
395 	cpufunc_domains,		/* Domain		*/
396 	arm9_setttb,			/* Setttb		*/
397 	cpufunc_faultstatus,		/* Faultstatus		*/
398 	cpufunc_faultaddress,		/* Faultaddress		*/
399 
400 	/* TLB functions */
401 
402 	armv4_tlb_flushID,		/* tlb_flushID		*/
403 	arm9_tlb_flushID_SE,		/* tlb_flushID_SE	*/
404 	armv4_tlb_flushI,		/* tlb_flushI		*/
405 	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
406 	armv4_tlb_flushD,		/* tlb_flushD		*/
407 	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
408 
409 	/* Cache operations */
410 
411 	arm9_cache_syncI,		/* icache_sync_all	*/
412 	arm9_cache_syncI_rng,		/* icache_sync_range	*/
413 
414 		/* ...cache in write-though mode... */
415 	arm9_cache_flushD,		/* dcache_wbinv_all	*/
416 	arm9_cache_flushD_rng,		/* dcache_wbinv_range	*/
417 	arm9_cache_flushD_rng,		/* dcache_inv_range	*/
418 	(void *)cpufunc_nullop,		/* dcache_wb_range	*/
419 
420 	arm9_cache_flushID,		/* idcache_wbinv_all	*/
421 	arm9_cache_flushID_rng,		/* idcache_wbinv_range	*/
422 
423 	/* Other functions */
424 
425 	cpufunc_nullop,			/* flush_prefetchbuf	*/
426 	armv4_drain_writebuf,		/* drain_writebuf	*/
427 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
428 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
429 
430 	(void *)cpufunc_nullop,		/* sleep		*/
431 
432 	/* Soft functions */
433 
434 	cpufunc_null_fixup,		/* dataabt_fixup	*/
435 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
436 
437 	arm9_context_switch,		/* context_switch	*/
438 
439 	arm9_setup			/* cpu setup		*/
440 
441 };
442 #endif /* CPU_ARM9 */
443 
444 #ifdef CPU_SA110
445 struct cpu_functions sa110_cpufuncs = {
446 	/* CPU functions */
447 
448 	cpufunc_id,			/* id			*/
449 	cpufunc_nullop,			/* cpwait		*/
450 
451 	/* MMU functions */
452 
453 	cpufunc_control,		/* control		*/
454 	cpufunc_domains,		/* domain		*/
455 	sa110_setttb,			/* setttb		*/
456 	cpufunc_faultstatus,		/* faultstatus		*/
457 	cpufunc_faultaddress,		/* faultaddress		*/
458 
459 	/* TLB functions */
460 
461 	armv4_tlb_flushID,		/* tlb_flushID		*/
462 	sa110_tlb_flushID_SE,		/* tlb_flushID_SE	*/
463 	armv4_tlb_flushI,		/* tlb_flushI		*/
464 	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
465 	armv4_tlb_flushD,		/* tlb_flushD		*/
466 	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
467 
468 	/* Cache operations */
469 
470 	sa110_cache_syncI,		/* icache_sync_all	*/
471 	sa110_cache_syncI_rng,		/* icache_sync_range	*/
472 
473 	sa110_cache_purgeD,		/* dcache_wbinv_all	*/
474 	sa110_cache_purgeD_rng,		/* dcache_wbinv_range	*/
475 /*XXX*/	sa110_cache_purgeD_rng,		/* dcache_inv_range	*/
476 	sa110_cache_cleanD_rng,		/* dcache_wb_range	*/
477 
478 	sa110_cache_purgeID,		/* idcache_wbinv_all	*/
479 	sa110_cache_purgeID_rng,	/* idcache_wbinv_range	*/
480 
481 	/* Other functions */
482 
483 	cpufunc_nullop,			/* flush_prefetchbuf	*/
484 	armv4_drain_writebuf,		/* drain_writebuf	*/
485 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
486 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
487 
488 	(void *)cpufunc_nullop,		/* sleep		*/
489 
490 	/* Soft functions */
491 
492 	cpufunc_null_fixup,		/* dataabt_fixup	*/
493 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
494 
495 	sa110_context_switch,		/* context_switch	*/
496 
497 	sa110_setup			/* cpu setup		*/
498 };
499 #endif	/* CPU_SA110 */
500 
501 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321)
502 struct cpu_functions xscale_cpufuncs = {
503 	/* CPU functions */
504 
505 	cpufunc_id,			/* id			*/
506 	xscale_cpwait,			/* cpwait		*/
507 
508 	/* MMU functions */
509 
510 	xscale_control,			/* control		*/
511 	cpufunc_domains,		/* domain		*/
512 	xscale_setttb,			/* setttb		*/
513 	cpufunc_faultstatus,		/* faultstatus		*/
514 	cpufunc_faultaddress,		/* faultaddress		*/
515 
516 	/* TLB functions */
517 
518 	armv4_tlb_flushID,		/* tlb_flushID		*/
519 	xscale_tlb_flushID_SE,		/* tlb_flushID_SE	*/
520 	armv4_tlb_flushI,		/* tlb_flushI		*/
521 	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
522 	armv4_tlb_flushD,		/* tlb_flushD		*/
523 	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
524 
525 	/* Cache operations */
526 
527 	xscale_cache_syncI,		/* icache_sync_all	*/
528 	xscale_cache_syncI_rng,		/* icache_sync_range	*/
529 
530 	xscale_cache_purgeD,		/* dcache_wbinv_all	*/
531 	xscale_cache_purgeD_rng,	/* dcache_wbinv_range	*/
532 	xscale_cache_flushD_rng,	/* dcache_inv_range	*/
533 	xscale_cache_cleanD_rng,	/* dcache_wb_range	*/
534 
535 	xscale_cache_purgeID,		/* idcache_wbinv_all	*/
536 	xscale_cache_purgeID_rng,	/* idcache_wbinv_range	*/
537 
538 	/* Other functions */
539 
540 	cpufunc_nullop,			/* flush_prefetchbuf	*/
541 	armv4_drain_writebuf,		/* drain_writebuf	*/
542 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
543 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
544 
545 	xscale_cpu_sleep,		/* sleep		*/
546 
547 	/* Soft functions */
548 
549 	cpufunc_null_fixup,		/* dataabt_fixup	*/
550 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
551 
552 	xscale_context_switch,		/* context_switch	*/
553 
554 	xscale_setup			/* cpu setup		*/
555 };
556 #endif /* CPU_XSCALE_80200 || CPU_XSCALE_80321 */
557 
558 /*
559  * Global constants also used by locore.s
560  */
561 
562 struct cpu_functions cpufuncs;
563 u_int cputype;
564 u_int cpu_reset_needs_v4_MMU_disable;	/* flag used in locore.s */
565 
566 #if defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined(CPU_ARM9) || \
567     defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321)
568 static void get_cachetype_cp15 __P((void));
569 
570 static void
571 get_cachetype_cp15()
572 {
573 	u_int ctype, isize, dsize;
574 	u_int multiplier;
575 
576 	__asm __volatile("mrc p15, 0, %0, c0, c0, 1"
577 		: "=r" (ctype));
578 
579 	/*
580 	 * ...and thus spake the ARM ARM:
581 	 *
582 	 * If an <opcode2> value corresponding to an unimplemented or
583 	 * reserved ID register is encountered, the System Control
584 	 * processor returns the value of the main ID register.
585 	 */
586 	if (ctype == cpufunc_id())
587 		goto out;
588 
589 	if ((ctype & CPU_CT_S) == 0)
590 		arm_pcache_unified = 1;
591 
592 	/*
593 	 * If you want to know how this code works, go read the ARM ARM.
594 	 */
595 
596 	arm_pcache_type = CPU_CT_CTYPE(ctype);
597 
598 	if (arm_pcache_unified == 0) {
599 		isize = CPU_CT_ISIZE(ctype);
600 		multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
601 		arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
602 		if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
603 			if (isize & CPU_CT_xSIZE_M)
604 				arm_picache_line_size = 0; /* not present */
605 			else
606 				arm_picache_ways = 1;
607 		} else {
608 			arm_picache_ways = multiplier <<
609 			    (CPU_CT_xSIZE_ASSOC(isize) - 1);
610 		}
611 		arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
612 	}
613 
614 	dsize = CPU_CT_DSIZE(ctype);
615 	multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
616 	arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
617 	if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
618 		if (dsize & CPU_CT_xSIZE_M)
619 			arm_pdcache_line_size = 0; /* not present */
620 		else
621 			arm_pdcache_ways = 0;
622 	} else {
623 		arm_pdcache_ways = multiplier <<
624 		    (CPU_CT_xSIZE_ASSOC(dsize) - 1);
625 	}
626 	arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
627 
628 	arm_dcache_align = arm_pdcache_line_size;
629 
630  out:
631 	arm_dcache_align_mask = arm_dcache_align - 1;
632 }
633 #endif /* ARM7TDMI || ARM8 || ARM9 || XSCALE */
634 
635 #if defined(CPU_ARM2) || defined(CPU_ARM250) || defined(CPU_ARM3) || \
636     defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_SA110)
637 /* Cache information for CPUs without cache type registers. */
638 struct cachetab {
639 	u_int32_t ct_cpuid;
640 	int	ct_pcache_type;
641 	int	ct_pcache_unified;
642 	int	ct_pdcache_size;
643 	int	ct_pdcache_line_size;
644 	int	ct_pdcache_ways;
645 	int	ct_picache_size;
646 	int	ct_picache_line_size;
647 	int	ct_picache_ways;
648 };
649 
650 struct cachetab cachetab[] = {
651     /* cpuid,           cache type,       u,  dsiz, ls, wy,  isiz, ls, wy */
652     { CPU_ID_ARM2,      0,                1,     0,  0,  0,     0,  0,  0 },
653     { CPU_ID_ARM250,    0,                1,     0,  0,  0,     0,  0,  0 },
654     { CPU_ID_ARM3,      CPU_CT_CTYPE_WT,  1,  4096, 16, 64,     0,  0,  0 },
655     { CPU_ID_ARM610,	CPU_CT_CTYPE_WT,  1,  4096, 16, 64,     0,  0,  0 },
656     { CPU_ID_ARM710,    CPU_CT_CTYPE_WT,  1,  8192, 32,  4,     0,  0,  0 },
657     { CPU_ID_ARM7500,   CPU_CT_CTYPE_WT,  1,  4096, 16,  4,     0,  0,  0 },
658     { CPU_ID_ARM710A,   CPU_CT_CTYPE_WT,  1,  8192, 16,  4,     0,  0,  0 },
659     { CPU_ID_ARM7500FE, CPU_CT_CTYPE_WT,  1,  4096, 16,  4,     0,  0,  0 },
660     /* XXX is this type right for SA-1? */
661     { CPU_ID_SA110,	CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 },
662     { CPU_ID_SA1100,	CPU_CT_CTYPE_WB1, 0,  8192, 32, 32, 16384, 32, 32 },
663     { CPU_ID_SA1110,	CPU_CT_CTYPE_WB1, 0,  8192, 32, 32, 16384, 32, 32 },
664     { 0, 0, 0, 0, 0, 0, 0, 0}
665 };
666 
667 static void get_cachetype_table __P((void));
668 
669 static void
670 get_cachetype_table()
671 {
672 	int i;
673 	u_int32_t cpuid = cpufunc_id();
674 
675 	for (i = 0; cachetab[i].ct_cpuid != 0; i++) {
676 		if (cachetab[i].ct_cpuid == (cpuid & CPU_ID_CPU_MASK)) {
677 			arm_pcache_type = cachetab[i].ct_pcache_type;
678 			arm_pcache_unified = cachetab[i].ct_pcache_unified;
679 			arm_pdcache_size = cachetab[i].ct_pdcache_size;
680 			arm_pdcache_line_size =
681 			    cachetab[i].ct_pdcache_line_size;
682 			arm_pdcache_ways = cachetab[i].ct_pdcache_ways;
683 			arm_picache_size = cachetab[i].ct_picache_size;
684 			arm_picache_line_size =
685 			    cachetab[i].ct_picache_line_size;
686 			arm_picache_ways = cachetab[i].ct_picache_ways;
687 		}
688 	}
689 	arm_dcache_align = arm_pdcache_line_size;
690 
691 	arm_dcache_align_mask = arm_dcache_align - 1;
692 }
693 
694 #endif /* ARM2 || ARM250 || ARM3 || ARM6 || ARM7 || SA110 */
695 
696 /*
697  * Cannot panic here as we may not have a console yet ...
698  */
699 
700 int
701 set_cpufuncs()
702 {
703 	cputype = cpufunc_id();
704 	cputype &= CPU_ID_CPU_MASK;
705 
706 
707 #ifdef CPU_ARM3
708 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
709 	    (cputype & 0x00000f00) == 0x00000300) {
710 		cpufuncs = arm3_cpufuncs;
711 		cpu_reset_needs_v4_MMU_disable = 0;
712 		get_cachetype_table();
713 		return 0;
714 	}
715 #endif	/* CPU_ARM3 */
716 #ifdef CPU_ARM6
717 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
718 	    (cputype & 0x00000f00) == 0x00000600) {
719 		cpufuncs = arm6_cpufuncs;
720 		cpu_reset_needs_v4_MMU_disable = 0;
721 		get_cachetype_table();
722 		pmap_pte_init_generic();
723 		return 0;
724 	}
725 #endif	/* CPU_ARM6 */
726 #ifdef CPU_ARM7
727 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
728 	    CPU_ID_IS7(cputype) &&
729 	    (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V3) {
730 		cpufuncs = arm7_cpufuncs;
731 		cpu_reset_needs_v4_MMU_disable = 0;
732 		get_cachetype_table();
733 		pmap_pte_init_generic();
734 		return 0;
735 	}
736 #endif	/* CPU_ARM7 */
737 #ifdef CPU_ARM7TDMI
738 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
739 	    CPU_ID_IS7(cputype) &&
740 	    (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V4T) {
741 		cpufuncs = arm7tdmi_cpufuncs;
742 		cpu_reset_needs_v4_MMU_disable = 0;
743 		get_cachetype_cp15();
744 		pmap_pte_init_generic();
745 		return 0;
746 	}
747 #endif
748 #ifdef CPU_ARM8
749 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
750 	    (cputype & 0x0000f000) == 0x00008000) {
751 		cpufuncs = arm8_cpufuncs;
752 		cpu_reset_needs_v4_MMU_disable = 0;	/* XXX correct? */
753 		get_cachetype_cp15();
754 		pmap_pte_init_generic();
755 		return 0;
756 	}
757 #endif	/* CPU_ARM8 */
758 #ifdef CPU_ARM9
759 	if (cputype == CPU_ID_ARM920T) {
760 		cpufuncs = arm9_cpufuncs;
761 		cpu_reset_needs_v4_MMU_disable = 1;	/* V4 or higher */
762 		get_cachetype_cp15();
763 		pmap_pte_init_arm9();
764 		return 0;
765 	}
766 #endif /* CPU_ARM9 */
767 #ifdef CPU_SA110
768 	if (cputype == CPU_ID_SA110 || cputype == CPU_ID_SA1100 ||
769 	    cputype == CPU_ID_SA1110) {
770 		cpufuncs = sa110_cpufuncs;
771 		cpu_reset_needs_v4_MMU_disable = 1;	/* SA needs it */
772 		get_cachetype_table();
773 		pmap_pte_init_generic();
774 		/*
775 		 * Enable the right variant of sleeping.
776 		 */
777 		if (cputype == CPU_ID_SA1100 ||
778 		    cputype == CPU_ID_SA1110)
779 			cpufuncs.cf_sleep = sa11x0_cpu_sleep;
780 		return 0;
781 	}
782 #endif	/* CPU_SA110 */
783 #ifdef CPU_XSCALE_80200
784 	if (cputype == CPU_ID_80200) {
785 		int rev = cpufunc_id() & CPU_ID_REVISION_MASK;
786 
787 		i80200_icu_init();
788 
789 		/*
790 		 * Reset the Performance Monitoring Unit to a
791 		 * pristine state:
792 		 *	- CCNT, PMN0, PMN1 reset to 0
793 		 *	- overflow indications cleared
794 		 *	- all counters disabled
795 		 */
796 		__asm __volatile("mcr p14, 0, %0, c0, c0, 0"
797 			:
798 			: "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF|
799 			       PMNC_CC_IF));
800 
801 #if defined(XSCALE_CCLKCFG)
802 		/*
803 		 * Crank CCLKCFG to maximum legal value.
804 		 */
805 		__asm __volatile ("mcr p14, 0, %0, c6, c0, 0"
806 			:
807 			: "r" (XSCALE_CCLKCFG));
808 #endif
809 
810 		/*
811 		 * XXX Disable ECC in the Bus Controller Unit; we
812 		 * don't really support it, yet.  Clear any pending
813 		 * error indications.
814 		 */
815 		__asm __volatile("mcr p13, 0, %0, c0, c1, 0"
816 			:
817 			: "r" (BCUCTL_E0|BCUCTL_E1|BCUCTL_EV));
818 
819 		cpufuncs = xscale_cpufuncs;
820 
821 		/*
822 		 * i80200 errata: Step-A0 and A1 have a bug where
823 		 * D$ dirty bits are not cleared on "invalidate by
824 		 * address".
825 		 *
826 		 * Workaround: Clean cache line before invalidating.
827 		 */
828 		if (rev == 0 || rev == 1)
829 			cpufuncs.cf_dcache_inv_range = xscale_cache_purgeD_rng;
830 
831 		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
832 		get_cachetype_cp15();
833 		pmap_pte_init_i80200();
834 		return 0;
835 	}
836 #endif /* CPU_XSCALE_80200 */
837 #ifdef CPU_XSCALE_80321
838 	if (cputype == CPU_ID_80321) {
839 		i80321_icu_init();
840 
841 		/*
842 		 * Reset the Performance Monitoring Unit to a
843 		 * pristine state:
844 		 *	- CCNT, PMN0, PMN1 reset to 0
845 		 *	- overflow indications cleared
846 		 *	- all counters disabled
847 		 */
848 		__asm __volatile("mcr p14, 0, %0, c0, c0, 0"
849 			:
850 			: "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF|
851 			       PMNC_CC_IF));
852 
853 		cpufuncs = xscale_cpufuncs;
854 
855 		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
856 		get_cachetype_cp15();
857 		pmap_pte_init_xscale();
858 		return 0;
859 	}
860 #endif /* CPU_XSCALE_80321 */
861 	/*
862 	 * Bzzzz. And the answer was ...
863 	 */
864 	panic("No support for this CPU type (%08x) in kernel", cputype);
865 	return(ARCHITECTURE_NOT_PRESENT);
866 }
867 
868 /*
869  * Fixup routines for data and prefetch aborts.
870  *
871  * Several compile time symbols are used
872  *
873  * DEBUG_FAULT_CORRECTION - Print debugging information during the
874  * correction of registers after a fault.
875  * ARM6_LATE_ABORT - ARM6 supports both early and late aborts
876  * when defined should use late aborts
877  */
878 
879 
880 /*
881  * Null abort fixup routine.
882  * For use when no fixup is required.
883  */
884 int
885 cpufunc_null_fixup(arg)
886 	void *arg;
887 {
888 	return(ABORT_FIXUP_OK);
889 }
890 
891 
892 #if defined(CPU_ARM2) || defined(CPU_ARM250) || defined(CPU_ARM3) || \
893     defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI)
894 
895 #ifdef DEBUG_FAULT_CORRECTION
896 #define DFC_PRINTF(x)		printf x
897 #define DFC_DISASSEMBLE(x)	disassemble(x)
898 #else
899 #define DFC_PRINTF(x)		/* nothing */
900 #define DFC_DISASSEMBLE(x)	/* nothing */
901 #endif
902 
903 /*
904  * "Early" data abort fixup.
905  *
906  * For ARM2, ARM2as, ARM3 and ARM6 (in early-abort mode).  Also used
907  * indirectly by ARM6 (in late-abort mode) and ARM7[TDMI].
908  *
909  * In early aborts, we may have to fix up LDM, STM, LDC and STC.
910  */
911 int
912 early_abort_fixup(arg)
913 	void *arg;
914 {
915 	trapframe_t *frame = arg;
916 	u_int fault_pc;
917 	u_int fault_instruction;
918 	int saved_lr = 0;
919 
920 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
921 
922 		/* Ok an abort in SVC mode */
923 
924 		/*
925 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
926 		 * as the fault happened in svc mode but we need it in the
927 		 * usr slot so we can treat the registers as an array of ints
928 		 * during fixing.
929 		 * NOTE: This PC is in the position but writeback is not
930 		 * allowed on r15.
931 		 * Doing it like this is more efficient than trapping this
932 		 * case in all possible locations in the following fixup code.
933 		 */
934 
935 		saved_lr = frame->tf_usr_lr;
936 		frame->tf_usr_lr = frame->tf_svc_lr;
937 
938 		/*
939 		 * Note the trapframe does not have the SVC r13 so a fault
940 		 * from an instruction with writeback to r13 in SVC mode is
941 		 * not allowed. This should not happen as the kstack is
942 		 * always valid.
943 		 */
944 	}
945 
946 	/* Get fault address and status from the CPU */
947 
948 	fault_pc = frame->tf_pc;
949 	fault_instruction = *((volatile unsigned int *)fault_pc);
950 
951 	/* Decode the fault instruction and fix the registers as needed */
952 
953 	if ((fault_instruction & 0x0e000000) == 0x08000000) {
954 		int base;
955 		int loop;
956 		int count;
957 		int *registers = &frame->tf_r0;
958 
959 		DFC_PRINTF(("LDM/STM\n"));
960 		DFC_DISASSEMBLE(fault_pc);
961 		if (fault_instruction & (1 << 21)) {
962 			DFC_PRINTF(("This instruction must be corrected\n"));
963 			base = (fault_instruction >> 16) & 0x0f;
964 			if (base == 15)
965 				return ABORT_FIXUP_FAILED;
966 			/* Count registers transferred */
967 			count = 0;
968 			for (loop = 0; loop < 16; ++loop) {
969 				if (fault_instruction & (1<<loop))
970 					++count;
971 			}
972 			DFC_PRINTF(("%d registers used\n", count));
973 			DFC_PRINTF(("Corrected r%d by %d bytes ",
974 				       base, count * 4));
975 			if (fault_instruction & (1 << 23)) {
976 				DFC_PRINTF(("down\n"));
977 				registers[base] -= count * 4;
978 			} else {
979 				DFC_PRINTF(("up\n"));
980 				registers[base] += count * 4;
981 			}
982 		}
983 	} else if ((fault_instruction & 0x0e000000) == 0x0c000000) {
984 		int base;
985 		int offset;
986 		int *registers = &frame->tf_r0;
987 
988 		/* REGISTER CORRECTION IS REQUIRED FOR THESE INSTRUCTIONS */
989 
990 		DFC_DISASSEMBLE(fault_pc);
991 
992 		/* Only need to fix registers if write back is turned on */
993 
994 		if ((fault_instruction & (1 << 21)) != 0) {
995 			base = (fault_instruction >> 16) & 0x0f;
996 			if (base == 13 &&
997 			    (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
998 				return ABORT_FIXUP_FAILED;
999 			if (base == 15)
1000 				return ABORT_FIXUP_FAILED;
1001 
1002 			offset = (fault_instruction & 0xff) << 2;
1003 			DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1004 			if ((fault_instruction & (1 << 23)) != 0)
1005 				offset = -offset;
1006 			registers[base] += offset;
1007 			DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1008 		}
1009 	} else if ((fault_instruction & 0x0e000000) == 0x0c000000)
1010 		return ABORT_FIXUP_FAILED;
1011 
1012 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1013 
1014 		/* Ok an abort in SVC mode */
1015 
1016 		/*
1017 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1018 		 * as the fault happened in svc mode but we need it in the
1019 		 * usr slot so we can treat the registers as an array of ints
1020 		 * during fixing.
1021 		 * NOTE: This PC is in the position but writeback is not
1022 		 * allowed on r15.
1023 		 * Doing it like this is more efficient than trapping this
1024 		 * case in all possible locations in the prior fixup code.
1025 		 */
1026 
1027 		frame->tf_svc_lr = frame->tf_usr_lr;
1028 		frame->tf_usr_lr = saved_lr;
1029 
1030 		/*
1031 		 * Note the trapframe does not have the SVC r13 so a fault
1032 		 * from an instruction with writeback to r13 in SVC mode is
1033 		 * not allowed. This should not happen as the kstack is
1034 		 * always valid.
1035 		 */
1036 	}
1037 
1038 	return(ABORT_FIXUP_OK);
1039 }
1040 #endif	/* CPU_ARM2/250/3/6/7 */
1041 
1042 
1043 #if (defined(CPU_ARM6) && defined(ARM6_LATE_ABORT)) || defined(CPU_ARM7) || \
1044 	defined(CPU_ARM7TDMI)
1045 /*
1046  * "Late" (base updated) data abort fixup
1047  *
1048  * For ARM6 (in late-abort mode) and ARM7.
1049  *
1050  * In this model, all data-transfer instructions need fixing up.  We defer
1051  * LDM, STM, LDC and STC fixup to the early-abort handler.
1052  */
1053 int
1054 late_abort_fixup(arg)
1055 	void *arg;
1056 {
1057 	trapframe_t *frame = arg;
1058 	u_int fault_pc;
1059 	u_int fault_instruction;
1060 	int saved_lr = 0;
1061 
1062 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1063 
1064 		/* Ok an abort in SVC mode */
1065 
1066 		/*
1067 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1068 		 * as the fault happened in svc mode but we need it in the
1069 		 * usr slot so we can treat the registers as an array of ints
1070 		 * during fixing.
1071 		 * NOTE: This PC is in the position but writeback is not
1072 		 * allowed on r15.
1073 		 * Doing it like this is more efficient than trapping this
1074 		 * case in all possible locations in the following fixup code.
1075 		 */
1076 
1077 		saved_lr = frame->tf_usr_lr;
1078 		frame->tf_usr_lr = frame->tf_svc_lr;
1079 
1080 		/*
1081 		 * Note the trapframe does not have the SVC r13 so a fault
1082 		 * from an instruction with writeback to r13 in SVC mode is
1083 		 * not allowed. This should not happen as the kstack is
1084 		 * always valid.
1085 		 */
1086 	}
1087 
1088 	/* Get fault address and status from the CPU */
1089 
1090 	fault_pc = frame->tf_pc;
1091 	fault_instruction = *((volatile unsigned int *)fault_pc);
1092 
1093 	/* Decode the fault instruction and fix the registers as needed */
1094 
1095 	/* Was is a swap instruction ? */
1096 
1097 	if ((fault_instruction & 0x0fb00ff0) == 0x01000090) {
1098 		DFC_DISASSEMBLE(fault_pc);
1099 	} else if ((fault_instruction & 0x0c000000) == 0x04000000) {
1100 
1101 		/* Was is a ldr/str instruction */
1102 		/* This is for late abort only */
1103 
1104 		int base;
1105 		int offset;
1106 		int *registers = &frame->tf_r0;
1107 
1108 		DFC_DISASSEMBLE(fault_pc);
1109 
1110 		/* This is for late abort only */
1111 
1112 		if ((fault_instruction & (1 << 24)) == 0
1113 		    || (fault_instruction & (1 << 21)) != 0) {
1114 			/* postindexed ldr/str with no writeback */
1115 
1116 			base = (fault_instruction >> 16) & 0x0f;
1117 			if (base == 13 &&
1118 			    (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
1119 				return ABORT_FIXUP_FAILED;
1120 			if (base == 15)
1121 				return ABORT_FIXUP_FAILED;
1122 			DFC_PRINTF(("late abt fix: r%d=%08x : ",
1123 				       base, registers[base]));
1124 			if ((fault_instruction & (1 << 25)) == 0) {
1125 				/* Immediate offset - easy */
1126 
1127 				offset = fault_instruction & 0xfff;
1128 				if ((fault_instruction & (1 << 23)))
1129 					offset = -offset;
1130 				registers[base] += offset;
1131 				DFC_PRINTF(("imm=%08x ", offset));
1132 			} else {
1133 				/* offset is a shifted register */
1134 				int shift;
1135 
1136 				offset = fault_instruction & 0x0f;
1137 				if (offset == base)
1138 					return ABORT_FIXUP_FAILED;
1139 
1140 				/*
1141 				 * Register offset - hard we have to
1142 				 * cope with shifts !
1143 				 */
1144 				offset = registers[offset];
1145 
1146 				if ((fault_instruction & (1 << 4)) == 0)
1147 					/* shift with amount */
1148 					shift = (fault_instruction >> 7) & 0x1f;
1149 				else {
1150 					/* shift with register */
1151 					if ((fault_instruction & (1 << 7)) != 0)
1152 						/* undefined for now so bail out */
1153 						return ABORT_FIXUP_FAILED;
1154 					shift = ((fault_instruction >> 8) & 0xf);
1155 					if (base == shift)
1156 						return ABORT_FIXUP_FAILED;
1157 					DFC_PRINTF(("shift reg=%d ", shift));
1158 					shift = registers[shift];
1159 				}
1160 				DFC_PRINTF(("shift=%08x ", shift));
1161 				switch (((fault_instruction >> 5) & 0x3)) {
1162 				case 0 : /* Logical left */
1163 					offset = (int)(((u_int)offset) << shift);
1164 					break;
1165 				case 1 : /* Logical Right */
1166 					if (shift == 0) shift = 32;
1167 					offset = (int)(((u_int)offset) >> shift);
1168 					break;
1169 				case 2 : /* Arithmetic Right */
1170 					if (shift == 0) shift = 32;
1171 					offset = (int)(((int)offset) >> shift);
1172 					break;
1173 				case 3 : /* Rotate right (rol or rxx) */
1174 					return ABORT_FIXUP_FAILED;
1175 					break;
1176 				}
1177 
1178 				DFC_PRINTF(("abt: fixed LDR/STR with "
1179 					       "register offset\n"));
1180 				if ((fault_instruction & (1 << 23)))
1181 					offset = -offset;
1182 				DFC_PRINTF(("offset=%08x ", offset));
1183 				registers[base] += offset;
1184 			}
1185 			DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1186 		}
1187 	}
1188 
1189 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1190 
1191 		/* Ok an abort in SVC mode */
1192 
1193 		/*
1194 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1195 		 * as the fault happened in svc mode but we need it in the
1196 		 * usr slot so we can treat the registers as an array of ints
1197 		 * during fixing.
1198 		 * NOTE: This PC is in the position but writeback is not
1199 		 * allowed on r15.
1200 		 * Doing it like this is more efficient than trapping this
1201 		 * case in all possible locations in the prior fixup code.
1202 		 */
1203 
1204 		frame->tf_svc_lr = frame->tf_usr_lr;
1205 		frame->tf_usr_lr = saved_lr;
1206 
1207 		/*
1208 		 * Note the trapframe does not have the SVC r13 so a fault
1209 		 * from an instruction with writeback to r13 in SVC mode is
1210 		 * not allowed. This should not happen as the kstack is
1211 		 * always valid.
1212 		 */
1213 	}
1214 
1215 	/*
1216 	 * Now let the early-abort fixup routine have a go, in case it
1217 	 * was an LDM, STM, LDC or STC that faulted.
1218 	 */
1219 
1220 	return early_abort_fixup(arg);
1221 }
1222 #endif	/* CPU_ARM6(LATE)/7/7TDMI */
1223 
1224 /*
1225  * CPU Setup code
1226  */
1227 
1228 #if defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) || \
1229 	defined(CPU_ARM8) || defined (CPU_ARM9) || defined(CPU_SA110) || \
1230 	defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321)
1231 
1232 #define IGN	0
1233 #define OR	1
1234 #define BIC	2
1235 
1236 struct cpu_option {
1237 	char	*co_name;
1238 	int	co_falseop;
1239 	int	co_trueop;
1240 	int	co_value;
1241 };
1242 
1243 static u_int parse_cpu_options __P((char *, struct cpu_option *, u_int));
1244 
1245 static u_int
1246 parse_cpu_options(args, optlist, cpuctrl)
1247 	char *args;
1248 	struct cpu_option *optlist;
1249 	u_int cpuctrl;
1250 {
1251 	int integer;
1252 
1253 	while (optlist->co_name) {
1254 		if (get_bootconf_option(args, optlist->co_name,
1255 		    BOOTOPT_TYPE_BOOLEAN, &integer)) {
1256 			if (integer) {
1257 				if (optlist->co_trueop == OR)
1258 					cpuctrl |= optlist->co_value;
1259 				else if (optlist->co_trueop == BIC)
1260 					cpuctrl &= ~optlist->co_value;
1261 			} else {
1262 				if (optlist->co_falseop == OR)
1263 					cpuctrl |= optlist->co_value;
1264 				else if (optlist->co_falseop == BIC)
1265 					cpuctrl &= ~optlist->co_value;
1266 			}
1267 		}
1268 		++optlist;
1269 	}
1270 	return(cpuctrl);
1271 }
1272 #endif /* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 || CPU_SA110 */
1273 
1274 #if defined (CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) \
1275 	|| defined(CPU_ARM8)
1276 struct cpu_option arm678_options[] = {
1277 #ifdef COMPAT_12
1278 	{ "nocache",		IGN, BIC, CPU_CONTROL_IDC_ENABLE },
1279 	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
1280 #endif	/* COMPAT_12 */
1281 	{ "cpu.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
1282 	{ "cpu.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
1283 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1284 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1285 	{ NULL,			IGN, IGN, 0 }
1286 };
1287 
1288 #endif	/* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 */
1289 
1290 #ifdef CPU_ARM6
1291 struct cpu_option arm6_options[] = {
1292 	{ "arm6.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
1293 	{ "arm6.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
1294 	{ "arm6.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1295 	{ "arm6.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1296 	{ NULL,			IGN, IGN, 0 }
1297 };
1298 
1299 void
1300 arm6_setup(args)
1301 	char *args;
1302 {
1303 	int cpuctrl, cpuctrlmask;
1304 
1305 	/* Set up default control registers bits */
1306 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1307 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1308 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
1309 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1310 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1311 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
1312 		 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BEND_ENABLE
1313 		 | CPU_CONTROL_AFLT_ENABLE;
1314 
1315 #ifdef ARM6_LATE_ABORT
1316 	cpuctrl |= CPU_CONTROL_LABT_ENABLE;
1317 #endif	/* ARM6_LATE_ABORT */
1318 
1319 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
1320 	cpuctrl = parse_cpu_options(args, arm6_options, cpuctrl);
1321 
1322 	/* Clear out the cache */
1323 	cpu_idcache_wbinv_all();
1324 
1325 	/* Set the control register */
1326 	curcpu()->ci_ctrl = cpuctrl;
1327 	cpu_control(0xffffffff, cpuctrl);
1328 }
1329 #endif	/* CPU_ARM6 */
1330 
1331 #ifdef CPU_ARM7
1332 struct cpu_option arm7_options[] = {
1333 	{ "arm7.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
1334 	{ "arm7.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
1335 	{ "arm7.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1336 	{ "arm7.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1337 #ifdef COMPAT_12
1338 	{ "fpaclk2",		BIC, OR,  CPU_CONTROL_CPCLK },
1339 #endif	/* COMPAT_12 */
1340 	{ "arm700.fpaclk",	BIC, OR,  CPU_CONTROL_CPCLK },
1341 	{ NULL,			IGN, IGN, 0 }
1342 };
1343 
1344 void
1345 arm7_setup(args)
1346 	char *args;
1347 {
1348 	int cpuctrl, cpuctrlmask;
1349 
1350 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1351 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1352 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
1353 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1354 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1355 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
1356 		 | CPU_CONTROL_CPCLK | CPU_CONTROL_LABT_ENABLE
1357 		 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BEND_ENABLE
1358 		 | CPU_CONTROL_AFLT_ENABLE;
1359 
1360 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
1361 	cpuctrl = parse_cpu_options(args, arm7_options, cpuctrl);
1362 
1363 	/* Clear out the cache */
1364 	cpu_idcache_wbinv_all();
1365 
1366 	/* Set the control register */
1367 	curcpu()->ci_ctrl = cpuctrl;
1368 	cpu_control(0xffffffff, cpuctrl);
1369 }
1370 #endif	/* CPU_ARM7 */
1371 
1372 #ifdef CPU_ARM7TDMI
1373 struct cpu_option arm7tdmi_options[] = {
1374 	{ "arm7.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
1375 	{ "arm7.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
1376 	{ "arm7.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1377 	{ "arm7.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1378 #ifdef COMPAT_12
1379 	{ "fpaclk2",		BIC, OR,  CPU_CONTROL_CPCLK },
1380 #endif	/* COMPAT_12 */
1381 	{ "arm700.fpaclk",	BIC, OR,  CPU_CONTROL_CPCLK },
1382 	{ NULL,			IGN, IGN, 0 }
1383 };
1384 
1385 void
1386 arm7tdmi_setup(args)
1387 	char *args;
1388 {
1389 	int cpuctrl;
1390 
1391 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1392 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1393 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
1394 
1395 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
1396 	cpuctrl = parse_cpu_options(args, arm7tdmi_options, cpuctrl);
1397 
1398 	/* Clear out the cache */
1399 	cpu_idcache_wbinv_all();
1400 
1401 	/* Set the control register */
1402 	curcpu()->ci_ctrl = cpuctrl;
1403 	cpu_control(0xffffffff, cpuctrl);
1404 }
1405 #endif	/* CPU_ARM7TDMI */
1406 
1407 #ifdef CPU_ARM8
1408 struct cpu_option arm8_options[] = {
1409 	{ "arm8.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
1410 	{ "arm8.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
1411 	{ "arm8.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1412 	{ "arm8.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1413 #ifdef COMPAT_12
1414 	{ "branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1415 #endif	/* COMPAT_12 */
1416 	{ "cpu.branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1417 	{ "arm8.branchpredict",	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1418 	{ NULL,			IGN, IGN, 0 }
1419 };
1420 
1421 void
1422 arm8_setup(args)
1423 	char *args;
1424 {
1425 	int integer;
1426 	int cpuctrl, cpuctrlmask;
1427 	int clocktest;
1428 	int setclock = 0;
1429 
1430 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1431 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1432 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
1433 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1434 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1435 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
1436 		 | CPU_CONTROL_BPRD_ENABLE | CPU_CONTROL_ROM_ENABLE
1437 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE;
1438 
1439 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
1440 	cpuctrl = parse_cpu_options(args, arm8_options, cpuctrl);
1441 
1442 	/* Get clock configuration */
1443 	clocktest = arm8_clock_config(0, 0) & 0x0f;
1444 
1445 	/* Special ARM8 clock and test configuration */
1446 	if (get_bootconf_option(args, "arm8.clock.reset", BOOTOPT_TYPE_BOOLEAN, &integer)) {
1447 		clocktest = 0;
1448 		setclock = 1;
1449 	}
1450 	if (get_bootconf_option(args, "arm8.clock.dynamic", BOOTOPT_TYPE_BOOLEAN, &integer)) {
1451 		if (integer)
1452 			clocktest |= 0x01;
1453 		else
1454 			clocktest &= ~(0x01);
1455 		setclock = 1;
1456 	}
1457 	if (get_bootconf_option(args, "arm8.clock.sync", BOOTOPT_TYPE_BOOLEAN, &integer)) {
1458 		if (integer)
1459 			clocktest |= 0x02;
1460 		else
1461 			clocktest &= ~(0x02);
1462 		setclock = 1;
1463 	}
1464 	if (get_bootconf_option(args, "arm8.clock.fast", BOOTOPT_TYPE_BININT, &integer)) {
1465 		clocktest = (clocktest & ~0xc0) | (integer & 3) << 2;
1466 		setclock = 1;
1467 	}
1468 	if (get_bootconf_option(args, "arm8.test", BOOTOPT_TYPE_BININT, &integer)) {
1469 		clocktest |= (integer & 7) << 5;
1470 		setclock = 1;
1471 	}
1472 
1473 	/* Clear out the cache */
1474 	cpu_idcache_wbinv_all();
1475 
1476 	/* Set the control register */
1477 	curcpu()->ci_ctrl = cpuctrl;
1478 	cpu_control(0xffffffff, cpuctrl);
1479 
1480 	/* Set the clock/test register */
1481 	if (setclock)
1482 		arm8_clock_config(0x7f, clocktest);
1483 }
1484 #endif	/* CPU_ARM8 */
1485 
1486 #ifdef CPU_ARM9
1487 struct cpu_option arm9_options[] = {
1488 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1489 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1490 	{ "arm9.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1491 	{ "arm9.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
1492 	{ "arm9.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
1493 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1494 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1495 	{ "arm9.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1496 	{ NULL,			IGN, IGN, 0 }
1497 };
1498 
1499 void
1500 arm9_setup(args)
1501 	char *args;
1502 {
1503 	int cpuctrl, cpuctrlmask;
1504 
1505 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1506 	    | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1507 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1508 	    | CPU_CONTROL_WBUF_ENABLE;
1509 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1510 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1511 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1512 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1513 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1514 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1515 		 | CPU_CONTROL_CPCLK;
1516 
1517 	cpuctrl = parse_cpu_options(args, arm9_options, cpuctrl);
1518 
1519 	/* Clear out the cache */
1520 	cpu_idcache_wbinv_all();
1521 
1522 	/* Set the control register */
1523 	curcpu()->ci_ctrl = cpuctrl;
1524 	cpu_control(0xffffffff, cpuctrl);
1525 
1526 }
1527 #endif	/* CPU_ARM9 */
1528 
1529 #ifdef CPU_SA110
1530 struct cpu_option sa110_options[] = {
1531 #ifdef COMPAT_12
1532 	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1533 	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
1534 #endif	/* COMPAT_12 */
1535 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1536 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1537 	{ "sa110.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1538 	{ "sa110.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
1539 	{ "sa110.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
1540 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1541 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1542 	{ "sa110.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1543 	{ NULL,			IGN, IGN, 0 }
1544 };
1545 
1546 void
1547 sa110_setup(args)
1548 	char *args;
1549 {
1550 	int cpuctrl, cpuctrlmask;
1551 
1552 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1553 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1554 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1555 		 | CPU_CONTROL_WBUF_ENABLE;
1556 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1557 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1558 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1559 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1560 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1561 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1562 		 | CPU_CONTROL_CPCLK;
1563 
1564 	cpuctrl = parse_cpu_options(args, sa110_options, cpuctrl);
1565 
1566 	/* Clear out the cache */
1567 	cpu_idcache_wbinv_all();
1568 
1569 	/* Set the control register */
1570 	curcpu()->ci_ctrl = cpuctrl;
1571 /*	cpu_control(cpuctrlmask, cpuctrl);*/
1572 	cpu_control(0xffffffff, cpuctrl);
1573 
1574 	/*
1575 	 * enable clockswitching, note that this doesn't read or write to r0,
1576 	 * r0 is just to make it valid asm
1577 	 */
1578 	__asm ("mcr 15, 0, r0, c15, c1, 2");
1579 }
1580 #endif	/* CPU_SA110 */
1581 
1582 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321)
1583 struct cpu_option xscale_options[] = {
1584 #ifdef COMPAT_12
1585 	{ "branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1586 	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1587 #endif	/* COMPAT_12 */
1588 	{ "cpu.branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1589 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1590 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1591 	{ "xscale.branchpredict", BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1592 	{ "xscale.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1593 	{ "xscale.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
1594 	{ "xscale.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
1595 	{ NULL,			IGN, IGN, 0 }
1596 };
1597 
1598 void
1599 xscale_setup(args)
1600 	char *args;
1601 {
1602 	int cpuctrl, cpuctrlmask;
1603 
1604 	/*
1605 	 * The XScale Write Buffer is always enabled.  Our option
1606 	 * is to enable/disable coalescing.  Note that bits 6:3
1607 	 * must always be enabled.
1608 	 */
1609 
1610 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1611 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1612 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1613 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE;
1614 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1615 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1616 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1617 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1618 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1619 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1620 		 | CPU_CONTROL_CPCLK;
1621 
1622 	cpuctrl = parse_cpu_options(args, xscale_options, cpuctrl);
1623 
1624 	/* Clear out the cache */
1625 	cpu_idcache_wbinv_all();
1626 
1627 	/*
1628 	 * Set the control register.  Note that bits 6:3 must always
1629 	 * be set to 1.
1630 	 */
1631 	curcpu()->ci_ctrl = cpuctrl;
1632 /*	cpu_control(cpuctrlmask, cpuctrl);*/
1633 	cpu_control(0xffffffff, cpuctrl);
1634 
1635 #if 0
1636 	/*
1637 	 * XXX FIXME
1638 	 * Disable write buffer coalescing, PT ECC, and set
1639 	 * the mini-cache to write-back/read-allocate.
1640 	 */
1641 	__asm ("mcr p15, 0, %0, c1, c0, 1" :: "r" (0));
1642 #endif
1643 }
1644 #endif	/* CPU_XSCALE_80200 */
1645