xref: /netbsd-src/sys/arch/arm/arm/cpufunc.c (revision 1ffa7b76c40339c17a0fb2a09fac93f287cfc046)
1 /*	$NetBSD: cpufunc.c,v 1.58 2003/04/22 00:24:48 thorpej Exp $	*/
2 
3 /*
4  * arm7tdmi support code Copyright (c) 2001 John Fremlin
5  * arm8 support code Copyright (c) 1997 ARM Limited
6  * arm8 support code Copyright (c) 1997 Causality Limited
7  * arm9 support code Copyright (C) 2001 ARM Ltd
8  * Copyright (c) 1997 Mark Brinicombe.
9  * Copyright (c) 1997 Causality Limited
10  * All rights reserved.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. All advertising materials mentioning features or use of this software
21  *    must display the following acknowledgement:
22  *	This product includes software developed by Causality Limited.
23  * 4. The name of Causality Limited may not be used to endorse or promote
24  *    products derived from this software without specific prior written
25  *    permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
28  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
29  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
30  * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
31  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
32  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
33  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37  * SUCH DAMAGE.
38  *
39  * RiscBSD kernel project
40  *
41  * cpufuncs.c
42  *
43  * C functions for supporting CPU / MMU / TLB specific operations.
44  *
45  * Created      : 30/01/97
46  */
47 
48 #include "opt_compat_netbsd.h"
49 #include "opt_cpuoptions.h"
50 #include "opt_perfctrs.h"
51 
52 #include <sys/types.h>
53 #include <sys/param.h>
54 #include <sys/pmc.h>
55 #include <sys/systm.h>
56 #include <machine/cpu.h>
57 #include <machine/bootconfig.h>
58 #include <arch/arm/arm/disassem.h>
59 
60 #include <uvm/uvm.h>
61 
62 #include <arm/cpuconf.h>
63 #include <arm/cpufunc.h>
64 
65 #ifdef CPU_XSCALE_80200
66 #include <arm/xscale/i80200reg.h>
67 #include <arm/xscale/i80200var.h>
68 #endif
69 
70 #ifdef CPU_XSCALE_80321
71 #include <arm/xscale/i80321reg.h>
72 #include <arm/xscale/i80321var.h>
73 #endif
74 
75 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321)
76 #include <arm/xscale/xscalereg.h>
77 #endif
78 
79 #if defined(PERFCTRS)
80 struct arm_pmc_funcs *arm_pmc;
81 #endif
82 
83 /* PRIMARY CACHE VARIABLES */
84 int	arm_picache_size;
85 int	arm_picache_line_size;
86 int	arm_picache_ways;
87 
88 int	arm_pdcache_size;	/* and unified */
89 int	arm_pdcache_line_size;
90 int	arm_pdcache_ways;
91 
92 int	arm_pcache_type;
93 int	arm_pcache_unified;
94 
95 int	arm_dcache_align;
96 int	arm_dcache_align_mask;
97 
98 /* 1 == use cpu_sleep(), 0 == don't */
99 int cpu_do_powersave;
100 
101 #ifdef CPU_ARM3
102 struct cpu_functions arm3_cpufuncs = {
103 	/* CPU functions */
104 
105 	cpufunc_id,			/* id			*/
106 	cpufunc_nullop,			/* cpwait		*/
107 
108 	/* MMU functions */
109 
110 	arm3_control,			/* control		*/
111 	NULL,				/* domain		*/
112 	NULL,				/* setttb		*/
113 	NULL,				/* faultstatus		*/
114 	NULL,				/* faultaddress		*/
115 
116 	/* TLB functions */
117 
118 	cpufunc_nullop,			/* tlb_flushID		*/
119 	(void *)cpufunc_nullop,		/* tlb_flushID_SE	*/
120 	cpufunc_nullop,			/* tlb_flushI		*/
121 	(void *)cpufunc_nullop,		/* tlb_flushI_SE	*/
122 	cpufunc_nullop,			/* tlb_flushD		*/
123 	(void *)cpufunc_nullop,		/* tlb_flushD_SE	*/
124 
125 	/* Cache operations */
126 
127 	cpufunc_nullop,			/* icache_sync_all	*/
128 	(void *) cpufunc_nullop,	/* icache_sync_range	*/
129 
130 	arm3_cache_flush,		/* dcache_wbinv_all	*/
131 	(void *)arm3_cache_flush,	/* dcache_wbinv_range	*/
132 	(void *)arm3_cache_flush,	/* dcache_inv_range	*/
133 	(void *)cpufunc_nullop,		/* dcache_wb_range	*/
134 
135 	arm3_cache_flush,		/* idcache_wbinv_all	*/
136 	(void *)arm3_cache_flush,	/* idcache_wbinv_range	*/
137 
138 	/* Other functions */
139 
140 	cpufunc_nullop,			/* flush_prefetchbuf	*/
141 	cpufunc_nullop,			/* drain_writebuf	*/
142 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
143 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
144 
145 	(void *)cpufunc_nullop,		/* sleep		*/
146 
147 	/* Soft functions */
148 
149 	early_abort_fixup,		/* dataabt_fixup	*/
150 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
151 
152 	NULL,				/* context_switch	*/
153 
154 	(void *)cpufunc_nullop		/* cpu setup		*/
155 
156 };
157 #endif	/* CPU_ARM3 */
158 
159 #ifdef CPU_ARM6
160 struct cpu_functions arm6_cpufuncs = {
161 	/* CPU functions */
162 
163 	cpufunc_id,			/* id			*/
164 	cpufunc_nullop,			/* cpwait		*/
165 
166 	/* MMU functions */
167 
168 	cpufunc_control,		/* control		*/
169 	cpufunc_domains,		/* domain		*/
170 	arm67_setttb,			/* setttb		*/
171 	cpufunc_faultstatus,		/* faultstatus		*/
172 	cpufunc_faultaddress,		/* faultaddress		*/
173 
174 	/* TLB functions */
175 
176 	arm67_tlb_flush,		/* tlb_flushID		*/
177 	arm67_tlb_purge,		/* tlb_flushID_SE	*/
178 	arm67_tlb_flush,		/* tlb_flushI		*/
179 	arm67_tlb_purge,		/* tlb_flushI_SE	*/
180 	arm67_tlb_flush,		/* tlb_flushD		*/
181 	arm67_tlb_purge,		/* tlb_flushD_SE	*/
182 
183 	/* Cache operations */
184 
185 	cpufunc_nullop,			/* icache_sync_all	*/
186 	(void *) cpufunc_nullop,	/* icache_sync_range	*/
187 
188 	arm67_cache_flush,		/* dcache_wbinv_all	*/
189 	(void *)arm67_cache_flush,	/* dcache_wbinv_range	*/
190 	(void *)arm67_cache_flush,	/* dcache_inv_range	*/
191 	(void *)cpufunc_nullop,		/* dcache_wb_range	*/
192 
193 	arm67_cache_flush,		/* idcache_wbinv_all	*/
194 	(void *)arm67_cache_flush,	/* idcache_wbinv_range	*/
195 
196 	/* Other functions */
197 
198 	cpufunc_nullop,			/* flush_prefetchbuf	*/
199 	cpufunc_nullop,			/* drain_writebuf	*/
200 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
201 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
202 
203 	(void *)cpufunc_nullop,		/* sleep		*/
204 
205 	/* Soft functions */
206 
207 #ifdef ARM6_LATE_ABORT
208 	late_abort_fixup,		/* dataabt_fixup	*/
209 #else
210 	early_abort_fixup,		/* dataabt_fixup	*/
211 #endif
212 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
213 
214 	arm67_context_switch,		/* context_switch	*/
215 
216 	arm6_setup			/* cpu setup		*/
217 
218 };
219 #endif	/* CPU_ARM6 */
220 
221 #ifdef CPU_ARM7
222 struct cpu_functions arm7_cpufuncs = {
223 	/* CPU functions */
224 
225 	cpufunc_id,			/* id			*/
226 	cpufunc_nullop,			/* cpwait		*/
227 
228 	/* MMU functions */
229 
230 	cpufunc_control,		/* control		*/
231 	cpufunc_domains,		/* domain		*/
232 	arm67_setttb,			/* setttb		*/
233 	cpufunc_faultstatus,		/* faultstatus		*/
234 	cpufunc_faultaddress,		/* faultaddress		*/
235 
236 	/* TLB functions */
237 
238 	arm67_tlb_flush,		/* tlb_flushID		*/
239 	arm67_tlb_purge,		/* tlb_flushID_SE	*/
240 	arm67_tlb_flush,		/* tlb_flushI		*/
241 	arm67_tlb_purge,		/* tlb_flushI_SE	*/
242 	arm67_tlb_flush,		/* tlb_flushD		*/
243 	arm67_tlb_purge,		/* tlb_flushD_SE	*/
244 
245 	/* Cache operations */
246 
247 	cpufunc_nullop,			/* icache_sync_all	*/
248 	(void *)cpufunc_nullop,		/* icache_sync_range	*/
249 
250 	arm67_cache_flush,		/* dcache_wbinv_all	*/
251 	(void *)arm67_cache_flush,	/* dcache_wbinv_range	*/
252 	(void *)arm67_cache_flush,	/* dcache_inv_range	*/
253 	(void *)cpufunc_nullop,		/* dcache_wb_range	*/
254 
255 	arm67_cache_flush,		/* idcache_wbinv_all	*/
256 	(void *)arm67_cache_flush,	/* idcache_wbinv_range	*/
257 
258 	/* Other functions */
259 
260 	cpufunc_nullop,			/* flush_prefetchbuf	*/
261 	cpufunc_nullop,			/* drain_writebuf	*/
262 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
263 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
264 
265 	(void *)cpufunc_nullop,		/* sleep		*/
266 
267 	/* Soft functions */
268 
269 	late_abort_fixup,		/* dataabt_fixup	*/
270 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
271 
272 	arm67_context_switch,		/* context_switch	*/
273 
274 	arm7_setup			/* cpu setup		*/
275 
276 };
277 #endif	/* CPU_ARM7 */
278 
279 #ifdef CPU_ARM7TDMI
280 struct cpu_functions arm7tdmi_cpufuncs = {
281 	/* CPU functions */
282 
283 	cpufunc_id,			/* id			*/
284 	cpufunc_nullop,			/* cpwait		*/
285 
286 	/* MMU functions */
287 
288 	cpufunc_control,		/* control		*/
289 	cpufunc_domains,		/* domain		*/
290 	arm7tdmi_setttb,		/* setttb		*/
291 	cpufunc_faultstatus,		/* faultstatus		*/
292 	cpufunc_faultaddress,		/* faultaddress		*/
293 
294 	/* TLB functions */
295 
296 	arm7tdmi_tlb_flushID,		/* tlb_flushID		*/
297 	arm7tdmi_tlb_flushID_SE,	/* tlb_flushID_SE	*/
298 	arm7tdmi_tlb_flushID,		/* tlb_flushI		*/
299 	arm7tdmi_tlb_flushID_SE,	/* tlb_flushI_SE	*/
300 	arm7tdmi_tlb_flushID,		/* tlb_flushD		*/
301 	arm7tdmi_tlb_flushID_SE,	/* tlb_flushD_SE	*/
302 
303 	/* Cache operations */
304 
305 	cpufunc_nullop,			/* icache_sync_all	*/
306 	(void *)cpufunc_nullop,		/* icache_sync_range	*/
307 
308 	arm7tdmi_cache_flushID,		/* dcache_wbinv_all	*/
309 	(void *)arm7tdmi_cache_flushID,	/* dcache_wbinv_range	*/
310 	(void *)arm7tdmi_cache_flushID,	/* dcache_inv_range	*/
311 	(void *)cpufunc_nullop,		/* dcache_wb_range	*/
312 
313 	arm7tdmi_cache_flushID,		/* idcache_wbinv_all	*/
314 	(void *)arm7tdmi_cache_flushID,	/* idcache_wbinv_range	*/
315 
316 	/* Other functions */
317 
318 	cpufunc_nullop,			/* flush_prefetchbuf	*/
319 	cpufunc_nullop,			/* drain_writebuf	*/
320 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
321 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
322 
323 	(void *)cpufunc_nullop,		/* sleep		*/
324 
325 	/* Soft functions */
326 
327 	late_abort_fixup,		/* dataabt_fixup	*/
328 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
329 
330 	arm7tdmi_context_switch,	/* context_switch	*/
331 
332 	arm7tdmi_setup			/* cpu setup		*/
333 
334 };
335 #endif	/* CPU_ARM7TDMI */
336 
337 #ifdef CPU_ARM8
338 struct cpu_functions arm8_cpufuncs = {
339 	/* CPU functions */
340 
341 	cpufunc_id,			/* id			*/
342 	cpufunc_nullop,			/* cpwait		*/
343 
344 	/* MMU functions */
345 
346 	cpufunc_control,		/* control		*/
347 	cpufunc_domains,		/* domain		*/
348 	arm8_setttb,			/* setttb		*/
349 	cpufunc_faultstatus,		/* faultstatus		*/
350 	cpufunc_faultaddress,		/* faultaddress		*/
351 
352 	/* TLB functions */
353 
354 	arm8_tlb_flushID,		/* tlb_flushID		*/
355 	arm8_tlb_flushID_SE,		/* tlb_flushID_SE	*/
356 	arm8_tlb_flushID,		/* tlb_flushI		*/
357 	arm8_tlb_flushID_SE,		/* tlb_flushI_SE	*/
358 	arm8_tlb_flushID,		/* tlb_flushD		*/
359 	arm8_tlb_flushID_SE,		/* tlb_flushD_SE	*/
360 
361 	/* Cache operations */
362 
363 	cpufunc_nullop,			/* icache_sync_all	*/
364 	(void *)cpufunc_nullop,		/* icache_sync_range	*/
365 
366 	arm8_cache_purgeID,		/* dcache_wbinv_all	*/
367 	(void *)arm8_cache_purgeID,	/* dcache_wbinv_range	*/
368 /*XXX*/	(void *)arm8_cache_purgeID,	/* dcache_inv_range	*/
369 	(void *)arm8_cache_cleanID,	/* dcache_wb_range	*/
370 
371 	arm8_cache_purgeID,		/* idcache_wbinv_all	*/
372 	(void *)arm8_cache_purgeID,	/* idcache_wbinv_range	*/
373 
374 	/* Other functions */
375 
376 	cpufunc_nullop,			/* flush_prefetchbuf	*/
377 	cpufunc_nullop,			/* drain_writebuf	*/
378 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
379 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
380 
381 	(void *)cpufunc_nullop,		/* sleep		*/
382 
383 	/* Soft functions */
384 
385 	cpufunc_null_fixup,		/* dataabt_fixup	*/
386 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
387 
388 	arm8_context_switch,		/* context_switch	*/
389 
390 	arm8_setup			/* cpu setup		*/
391 };
392 #endif	/* CPU_ARM8 */
393 
394 #ifdef CPU_ARM9
395 struct cpu_functions arm9_cpufuncs = {
396 	/* CPU functions */
397 
398 	cpufunc_id,			/* id			*/
399 	cpufunc_nullop,			/* cpwait		*/
400 
401 	/* MMU functions */
402 
403 	cpufunc_control,		/* control		*/
404 	cpufunc_domains,		/* Domain		*/
405 	arm9_setttb,			/* Setttb		*/
406 	cpufunc_faultstatus,		/* Faultstatus		*/
407 	cpufunc_faultaddress,		/* Faultaddress		*/
408 
409 	/* TLB functions */
410 
411 	armv4_tlb_flushID,		/* tlb_flushID		*/
412 	arm9_tlb_flushID_SE,		/* tlb_flushID_SE	*/
413 	armv4_tlb_flushI,		/* tlb_flushI		*/
414 	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
415 	armv4_tlb_flushD,		/* tlb_flushD		*/
416 	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
417 
418 	/* Cache operations */
419 
420 	arm9_cache_syncI,		/* icache_sync_all	*/
421 	arm9_cache_syncI_rng,		/* icache_sync_range	*/
422 
423 		/* ...cache in write-though mode... */
424 	arm9_cache_flushD,		/* dcache_wbinv_all	*/
425 	arm9_cache_flushD_rng,		/* dcache_wbinv_range	*/
426 	arm9_cache_flushD_rng,		/* dcache_inv_range	*/
427 	(void *)cpufunc_nullop,		/* dcache_wb_range	*/
428 
429 	arm9_cache_flushID,		/* idcache_wbinv_all	*/
430 	arm9_cache_flushID_rng,		/* idcache_wbinv_range	*/
431 
432 	/* Other functions */
433 
434 	cpufunc_nullop,			/* flush_prefetchbuf	*/
435 	armv4_drain_writebuf,		/* drain_writebuf	*/
436 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
437 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
438 
439 	(void *)cpufunc_nullop,		/* sleep		*/
440 
441 	/* Soft functions */
442 
443 	cpufunc_null_fixup,		/* dataabt_fixup	*/
444 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
445 
446 	arm9_context_switch,		/* context_switch	*/
447 
448 	arm9_setup			/* cpu setup		*/
449 
450 };
451 #endif /* CPU_ARM9 */
452 
453 #ifdef CPU_SA110
454 struct cpu_functions sa110_cpufuncs = {
455 	/* CPU functions */
456 
457 	cpufunc_id,			/* id			*/
458 	cpufunc_nullop,			/* cpwait		*/
459 
460 	/* MMU functions */
461 
462 	cpufunc_control,		/* control		*/
463 	cpufunc_domains,		/* domain		*/
464 	sa1_setttb,			/* setttb		*/
465 	cpufunc_faultstatus,		/* faultstatus		*/
466 	cpufunc_faultaddress,		/* faultaddress		*/
467 
468 	/* TLB functions */
469 
470 	armv4_tlb_flushID,		/* tlb_flushID		*/
471 	sa1_tlb_flushID_SE,		/* tlb_flushID_SE	*/
472 	armv4_tlb_flushI,		/* tlb_flushI		*/
473 	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
474 	armv4_tlb_flushD,		/* tlb_flushD		*/
475 	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
476 
477 	/* Cache operations */
478 
479 	sa1_cache_syncI,		/* icache_sync_all	*/
480 	sa1_cache_syncI_rng,		/* icache_sync_range	*/
481 
482 	sa1_cache_purgeD,		/* dcache_wbinv_all	*/
483 	sa1_cache_purgeD_rng,		/* dcache_wbinv_range	*/
484 /*XXX*/	sa1_cache_purgeD_rng,		/* dcache_inv_range	*/
485 	sa1_cache_cleanD_rng,		/* dcache_wb_range	*/
486 
487 	sa1_cache_purgeID,		/* idcache_wbinv_all	*/
488 	sa1_cache_purgeID_rng,		/* idcache_wbinv_range	*/
489 
490 	/* Other functions */
491 
492 	cpufunc_nullop,			/* flush_prefetchbuf	*/
493 	armv4_drain_writebuf,		/* drain_writebuf	*/
494 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
495 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
496 
497 	(void *)cpufunc_nullop,		/* sleep		*/
498 
499 	/* Soft functions */
500 
501 	cpufunc_null_fixup,		/* dataabt_fixup	*/
502 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
503 
504 	sa110_context_switch,		/* context_switch	*/
505 
506 	sa110_setup			/* cpu setup		*/
507 };
508 #endif	/* CPU_SA110 */
509 
510 #if defined(CPU_SA1100) || defined(CPU_SA1110)
511 struct cpu_functions sa11x0_cpufuncs = {
512 	/* CPU functions */
513 
514 	cpufunc_id,			/* id			*/
515 	cpufunc_nullop,			/* cpwait		*/
516 
517 	/* MMU functions */
518 
519 	cpufunc_control,		/* control		*/
520 	cpufunc_domains,		/* domain		*/
521 	sa1_setttb,			/* setttb		*/
522 	cpufunc_faultstatus,		/* faultstatus		*/
523 	cpufunc_faultaddress,		/* faultaddress		*/
524 
525 	/* TLB functions */
526 
527 	armv4_tlb_flushID,		/* tlb_flushID		*/
528 	sa1_tlb_flushID_SE,		/* tlb_flushID_SE	*/
529 	armv4_tlb_flushI,		/* tlb_flushI		*/
530 	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
531 	armv4_tlb_flushD,		/* tlb_flushD		*/
532 	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
533 
534 	/* Cache operations */
535 
536 	sa1_cache_syncI,		/* icache_sync_all	*/
537 	sa1_cache_syncI_rng,		/* icache_sync_range	*/
538 
539 	sa1_cache_purgeD,		/* dcache_wbinv_all	*/
540 	sa1_cache_purgeD_rng,		/* dcache_wbinv_range	*/
541 /*XXX*/	sa1_cache_purgeD_rng,		/* dcache_inv_range	*/
542 	sa1_cache_cleanD_rng,		/* dcache_wb_range	*/
543 
544 	sa1_cache_purgeID,		/* idcache_wbinv_all	*/
545 	sa1_cache_purgeID_rng,		/* idcache_wbinv_range	*/
546 
547 	/* Other functions */
548 
549 	sa11x0_drain_readbuf,		/* flush_prefetchbuf	*/
550 	armv4_drain_writebuf,		/* drain_writebuf	*/
551 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
552 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
553 
554 	sa11x0_cpu_sleep,		/* sleep		*/
555 
556 	/* Soft functions */
557 
558 	cpufunc_null_fixup,		/* dataabt_fixup	*/
559 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
560 
561 	sa11x0_context_switch,		/* context_switch	*/
562 
563 	sa11x0_setup			/* cpu setup		*/
564 };
565 #endif	/* CPU_SA1100 || CPU_SA1110 */
566 
567 #ifdef CPU_IXP12X0
568 struct cpu_functions ixp12x0_cpufuncs = {
569 	/* CPU functions */
570 
571 	cpufunc_id,			/* id			*/
572 	cpufunc_nullop,			/* cpwait		*/
573 
574 	/* MMU functions */
575 
576 	cpufunc_control,		/* control		*/
577 	cpufunc_domains,		/* domain		*/
578 	sa1_setttb,			/* setttb		*/
579 	cpufunc_faultstatus,		/* faultstatus		*/
580 	cpufunc_faultaddress,		/* faultaddress		*/
581 
582 	/* TLB functions */
583 
584 	armv4_tlb_flushID,		/* tlb_flushID		*/
585 	sa1_tlb_flushID_SE,		/* tlb_flushID_SE	*/
586 	armv4_tlb_flushI,		/* tlb_flushI		*/
587 	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
588 	armv4_tlb_flushD,		/* tlb_flushD		*/
589 	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
590 
591 	/* Cache operations */
592 
593 	sa1_cache_syncI,		/* icache_sync_all	*/
594 	sa1_cache_syncI_rng,		/* icache_sync_range	*/
595 
596 	sa1_cache_purgeD,		/* dcache_wbinv_all	*/
597 	sa1_cache_purgeD_rng,		/* dcache_wbinv_range	*/
598 /*XXX*/	sa1_cache_purgeD_rng,		/* dcache_inv_range	*/
599 	sa1_cache_cleanD_rng,		/* dcache_wb_range	*/
600 
601 	sa1_cache_purgeID,		/* idcache_wbinv_all	*/
602 	sa1_cache_purgeID_rng,		/* idcache_wbinv_range	*/
603 
604 	/* Other functions */
605 
606 	ixp12x0_drain_readbuf,			/* flush_prefetchbuf	*/
607 	armv4_drain_writebuf,		/* drain_writebuf	*/
608 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
609 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
610 
611 	(void *)cpufunc_nullop,		/* sleep		*/
612 
613 	/* Soft functions */
614 
615 	cpufunc_null_fixup,		/* dataabt_fixup	*/
616 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
617 
618 	ixp12x0_context_switch,		/* context_switch	*/
619 
620 	ixp12x0_setup			/* cpu setup		*/
621 };
622 #endif	/* CPU_IXP12X0 */
623 
624 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
625     defined(CPU_XSCALE_PXA2X0)
626 struct cpu_functions xscale_cpufuncs = {
627 	/* CPU functions */
628 
629 	cpufunc_id,			/* id			*/
630 	xscale_cpwait,			/* cpwait		*/
631 
632 	/* MMU functions */
633 
634 	xscale_control,			/* control		*/
635 	cpufunc_domains,		/* domain		*/
636 	xscale_setttb,			/* setttb		*/
637 	cpufunc_faultstatus,		/* faultstatus		*/
638 	cpufunc_faultaddress,		/* faultaddress		*/
639 
640 	/* TLB functions */
641 
642 	armv4_tlb_flushID,		/* tlb_flushID		*/
643 	xscale_tlb_flushID_SE,		/* tlb_flushID_SE	*/
644 	armv4_tlb_flushI,		/* tlb_flushI		*/
645 	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
646 	armv4_tlb_flushD,		/* tlb_flushD		*/
647 	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
648 
649 	/* Cache operations */
650 
651 	xscale_cache_syncI,		/* icache_sync_all	*/
652 	xscale_cache_syncI_rng,		/* icache_sync_range	*/
653 
654 	xscale_cache_purgeD,		/* dcache_wbinv_all	*/
655 	xscale_cache_purgeD_rng,	/* dcache_wbinv_range	*/
656 	xscale_cache_flushD_rng,	/* dcache_inv_range	*/
657 	xscale_cache_cleanD_rng,	/* dcache_wb_range	*/
658 
659 	xscale_cache_purgeID,		/* idcache_wbinv_all	*/
660 	xscale_cache_purgeID_rng,	/* idcache_wbinv_range	*/
661 
662 	/* Other functions */
663 
664 	cpufunc_nullop,			/* flush_prefetchbuf	*/
665 	armv4_drain_writebuf,		/* drain_writebuf	*/
666 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
667 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
668 
669 	xscale_cpu_sleep,		/* sleep		*/
670 
671 	/* Soft functions */
672 
673 	cpufunc_null_fixup,		/* dataabt_fixup	*/
674 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
675 
676 	xscale_context_switch,		/* context_switch	*/
677 
678 	xscale_setup			/* cpu setup		*/
679 };
680 #endif /* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 */
681 
682 /*
683  * Global constants also used by locore.s
684  */
685 
686 struct cpu_functions cpufuncs;
687 u_int cputype;
688 u_int cpu_reset_needs_v4_MMU_disable;	/* flag used in locore.s */
689 
690 #if defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined(CPU_ARM9) || \
691     defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
692     defined(CPU_XSCALE_PXA2X0)
693 static void get_cachetype_cp15 __P((void));
694 
695 static void
696 get_cachetype_cp15()
697 {
698 	u_int ctype, isize, dsize;
699 	u_int multiplier;
700 
701 	__asm __volatile("mrc p15, 0, %0, c0, c0, 1"
702 		: "=r" (ctype));
703 
704 	/*
705 	 * ...and thus spake the ARM ARM:
706 	 *
707 	 * If an <opcode2> value corresponding to an unimplemented or
708 	 * reserved ID register is encountered, the System Control
709 	 * processor returns the value of the main ID register.
710 	 */
711 	if (ctype == cpufunc_id())
712 		goto out;
713 
714 	if ((ctype & CPU_CT_S) == 0)
715 		arm_pcache_unified = 1;
716 
717 	/*
718 	 * If you want to know how this code works, go read the ARM ARM.
719 	 */
720 
721 	arm_pcache_type = CPU_CT_CTYPE(ctype);
722 
723 	if (arm_pcache_unified == 0) {
724 		isize = CPU_CT_ISIZE(ctype);
725 		multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
726 		arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
727 		if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
728 			if (isize & CPU_CT_xSIZE_M)
729 				arm_picache_line_size = 0; /* not present */
730 			else
731 				arm_picache_ways = 1;
732 		} else {
733 			arm_picache_ways = multiplier <<
734 			    (CPU_CT_xSIZE_ASSOC(isize) - 1);
735 		}
736 		arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
737 	}
738 
739 	dsize = CPU_CT_DSIZE(ctype);
740 	multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
741 	arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
742 	if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
743 		if (dsize & CPU_CT_xSIZE_M)
744 			arm_pdcache_line_size = 0; /* not present */
745 		else
746 			arm_pdcache_ways = 0;
747 	} else {
748 		arm_pdcache_ways = multiplier <<
749 		    (CPU_CT_xSIZE_ASSOC(dsize) - 1);
750 	}
751 	arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
752 
753 	arm_dcache_align = arm_pdcache_line_size;
754 
755  out:
756 	arm_dcache_align_mask = arm_dcache_align - 1;
757 }
758 #endif /* ARM7TDMI || ARM8 || ARM9 || XSCALE */
759 
760 #if defined(CPU_ARM2) || defined(CPU_ARM250) || defined(CPU_ARM3) || \
761     defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_SA110) || \
762     defined(CPU_SA1100) || defined(CPU_SA1110) || defined(CPU_IXP12X0)
763 /* Cache information for CPUs without cache type registers. */
764 struct cachetab {
765 	u_int32_t ct_cpuid;
766 	int	ct_pcache_type;
767 	int	ct_pcache_unified;
768 	int	ct_pdcache_size;
769 	int	ct_pdcache_line_size;
770 	int	ct_pdcache_ways;
771 	int	ct_picache_size;
772 	int	ct_picache_line_size;
773 	int	ct_picache_ways;
774 };
775 
776 struct cachetab cachetab[] = {
777     /* cpuid,           cache type,       u,  dsiz, ls, wy,  isiz, ls, wy */
778     { CPU_ID_ARM2,      0,                1,     0,  0,  0,     0,  0,  0 },
779     { CPU_ID_ARM250,    0,                1,     0,  0,  0,     0,  0,  0 },
780     { CPU_ID_ARM3,      CPU_CT_CTYPE_WT,  1,  4096, 16, 64,     0,  0,  0 },
781     { CPU_ID_ARM610,	CPU_CT_CTYPE_WT,  1,  4096, 16, 64,     0,  0,  0 },
782     { CPU_ID_ARM710,    CPU_CT_CTYPE_WT,  1,  8192, 32,  4,     0,  0,  0 },
783     { CPU_ID_ARM7500,   CPU_CT_CTYPE_WT,  1,  4096, 16,  4,     0,  0,  0 },
784     { CPU_ID_ARM710A,   CPU_CT_CTYPE_WT,  1,  8192, 16,  4,     0,  0,  0 },
785     { CPU_ID_ARM7500FE, CPU_CT_CTYPE_WT,  1,  4096, 16,  4,     0,  0,  0 },
786     /* XXX is this type right for SA-1? */
787     { CPU_ID_SA110,	CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 },
788     { CPU_ID_SA1100,	CPU_CT_CTYPE_WB1, 0,  8192, 32, 32, 16384, 32, 32 },
789     { CPU_ID_SA1110,	CPU_CT_CTYPE_WB1, 0,  8192, 32, 32, 16384, 32, 32 },
790     { CPU_ID_IXP1200,	CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 }, /* XXX */
791     { 0, 0, 0, 0, 0, 0, 0, 0}
792 };
793 
794 static void get_cachetype_table __P((void));
795 
796 static void
797 get_cachetype_table()
798 {
799 	int i;
800 	u_int32_t cpuid = cpufunc_id();
801 
802 	for (i = 0; cachetab[i].ct_cpuid != 0; i++) {
803 		if (cachetab[i].ct_cpuid == (cpuid & CPU_ID_CPU_MASK)) {
804 			arm_pcache_type = cachetab[i].ct_pcache_type;
805 			arm_pcache_unified = cachetab[i].ct_pcache_unified;
806 			arm_pdcache_size = cachetab[i].ct_pdcache_size;
807 			arm_pdcache_line_size =
808 			    cachetab[i].ct_pdcache_line_size;
809 			arm_pdcache_ways = cachetab[i].ct_pdcache_ways;
810 			arm_picache_size = cachetab[i].ct_picache_size;
811 			arm_picache_line_size =
812 			    cachetab[i].ct_picache_line_size;
813 			arm_picache_ways = cachetab[i].ct_picache_ways;
814 		}
815 	}
816 	arm_dcache_align = arm_pdcache_line_size;
817 
818 	arm_dcache_align_mask = arm_dcache_align - 1;
819 }
820 
821 #endif /* ARM2 || ARM250 || ARM3 || ARM6 || ARM7 || SA110 || SA1100 || SA1111 || IXP12X0 */
822 
823 /*
824  * Cannot panic here as we may not have a console yet ...
825  */
826 
827 int
828 set_cpufuncs()
829 {
830 	cputype = cpufunc_id();
831 	cputype &= CPU_ID_CPU_MASK;
832 
833 	/*
834 	 * NOTE: cpu_do_powersave defaults to off.  If we encounter a
835 	 * CPU type where we want to use it by default, then we set it.
836 	 */
837 
838 #ifdef CPU_ARM3
839 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
840 	    (cputype & 0x00000f00) == 0x00000300) {
841 		cpufuncs = arm3_cpufuncs;
842 		cpu_reset_needs_v4_MMU_disable = 0;
843 		get_cachetype_table();
844 		return 0;
845 	}
846 #endif	/* CPU_ARM3 */
847 #ifdef CPU_ARM6
848 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
849 	    (cputype & 0x00000f00) == 0x00000600) {
850 		cpufuncs = arm6_cpufuncs;
851 		cpu_reset_needs_v4_MMU_disable = 0;
852 		get_cachetype_table();
853 		pmap_pte_init_generic();
854 		return 0;
855 	}
856 #endif	/* CPU_ARM6 */
857 #ifdef CPU_ARM7
858 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
859 	    CPU_ID_IS7(cputype) &&
860 	    (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V3) {
861 		cpufuncs = arm7_cpufuncs;
862 		cpu_reset_needs_v4_MMU_disable = 0;
863 		get_cachetype_table();
864 		pmap_pte_init_generic();
865 		return 0;
866 	}
867 #endif	/* CPU_ARM7 */
868 #ifdef CPU_ARM7TDMI
869 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
870 	    CPU_ID_IS7(cputype) &&
871 	    (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V4T) {
872 		cpufuncs = arm7tdmi_cpufuncs;
873 		cpu_reset_needs_v4_MMU_disable = 0;
874 		get_cachetype_cp15();
875 		pmap_pte_init_generic();
876 		return 0;
877 	}
878 #endif
879 #ifdef CPU_ARM8
880 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
881 	    (cputype & 0x0000f000) == 0x00008000) {
882 		cpufuncs = arm8_cpufuncs;
883 		cpu_reset_needs_v4_MMU_disable = 0;	/* XXX correct? */
884 		get_cachetype_cp15();
885 		pmap_pte_init_arm8();
886 		return 0;
887 	}
888 #endif	/* CPU_ARM8 */
889 #ifdef CPU_ARM9
890 	if (cputype == CPU_ID_ARM920T) {
891 		cpufuncs = arm9_cpufuncs;
892 		cpu_reset_needs_v4_MMU_disable = 1;	/* V4 or higher */
893 		get_cachetype_cp15();
894 		pmap_pte_init_arm9();
895 		return 0;
896 	}
897 #endif /* CPU_ARM9 */
898 #ifdef CPU_SA110
899 	if (cputype == CPU_ID_SA110) {
900 		cpufuncs = sa110_cpufuncs;
901 		cpu_reset_needs_v4_MMU_disable = 1;	/* SA needs it */
902 		get_cachetype_table();
903 		pmap_pte_init_sa1();
904 		return 0;
905 	}
906 #endif	/* CPU_SA110 */
907 #ifdef CPU_SA1100
908 	if (cputype == CPU_ID_SA1100) {
909 		cpufuncs = sa11x0_cpufuncs;
910 		cpu_reset_needs_v4_MMU_disable = 1;	/* SA needs it	*/
911 		get_cachetype_table();
912 		pmap_pte_init_sa1();
913 
914 		/* Use powersave on this CPU. */
915 		cpu_do_powersave = 1;
916 
917 		return 0;
918 	}
919 #endif	/* CPU_SA1100 */
920 #ifdef CPU_SA1110
921 	if (cputype == CPU_ID_SA1110) {
922 		cpufuncs = sa11x0_cpufuncs;
923 		cpu_reset_needs_v4_MMU_disable = 1;	/* SA needs it	*/
924 		get_cachetype_table();
925 		pmap_pte_init_sa1();
926 
927 		/* Use powersave on this CPU. */
928 		cpu_do_powersave = 1;
929 
930 		return 0;
931 	}
932 #endif	/* CPU_SA1110 */
933 #ifdef CPU_IXP12X0
934         if (cputype == CPU_ID_IXP1200) {
935                 cpufuncs = ixp12x0_cpufuncs;
936                 cpu_reset_needs_v4_MMU_disable = 1;
937                 get_cachetype_table();
938                 pmap_pte_init_sa1();
939                 return 0;
940         }
941 #endif  /* CPU_IXP12X0 */
942 #ifdef CPU_XSCALE_80200
943 	if (cputype == CPU_ID_80200) {
944 		int rev = cpufunc_id() & CPU_ID_REVISION_MASK;
945 
946 		i80200_icu_init();
947 
948 		/*
949 		 * Reset the Performance Monitoring Unit to a
950 		 * pristine state:
951 		 *	- CCNT, PMN0, PMN1 reset to 0
952 		 *	- overflow indications cleared
953 		 *	- all counters disabled
954 		 */
955 		__asm __volatile("mcr p14, 0, %0, c0, c0, 0"
956 			:
957 			: "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF|
958 			       PMNC_CC_IF));
959 
960 #if defined(XSCALE_CCLKCFG)
961 		/*
962 		 * Crank CCLKCFG to maximum legal value.
963 		 */
964 		__asm __volatile ("mcr p14, 0, %0, c6, c0, 0"
965 			:
966 			: "r" (XSCALE_CCLKCFG));
967 #endif
968 
969 		/*
970 		 * XXX Disable ECC in the Bus Controller Unit; we
971 		 * don't really support it, yet.  Clear any pending
972 		 * error indications.
973 		 */
974 		__asm __volatile("mcr p13, 0, %0, c0, c1, 0"
975 			:
976 			: "r" (BCUCTL_E0|BCUCTL_E1|BCUCTL_EV));
977 
978 		cpufuncs = xscale_cpufuncs;
979 #if defined(PERFCTRS)
980 		xscale_pmu_init();
981 #endif
982 
983 		/*
984 		 * i80200 errata: Step-A0 and A1 have a bug where
985 		 * D$ dirty bits are not cleared on "invalidate by
986 		 * address".
987 		 *
988 		 * Workaround: Clean cache line before invalidating.
989 		 */
990 		if (rev == 0 || rev == 1)
991 			cpufuncs.cf_dcache_inv_range = xscale_cache_purgeD_rng;
992 
993 		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
994 		get_cachetype_cp15();
995 		pmap_pte_init_xscale();
996 		return 0;
997 	}
998 #endif /* CPU_XSCALE_80200 */
999 #ifdef CPU_XSCALE_80321
1000 	if (cputype == CPU_ID_80321_400 || cputype == CPU_ID_80321_600 ||
1001 	    cputype == CPU_ID_80321_400_B0 || cputype == CPU_ID_80321_600_B0) {
1002 		i80321_icu_init();
1003 
1004 		/*
1005 		 * Reset the Performance Monitoring Unit to a
1006 		 * pristine state:
1007 		 *	- CCNT, PMN0, PMN1 reset to 0
1008 		 *	- overflow indications cleared
1009 		 *	- all counters disabled
1010 		 */
1011 		__asm __volatile("mcr p14, 0, %0, c0, c0, 0"
1012 			:
1013 			: "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF|
1014 			       PMNC_CC_IF));
1015 
1016 		cpufuncs = xscale_cpufuncs;
1017 #if defined(PERFCTRS)
1018 		xscale_pmu_init();
1019 #endif
1020 
1021 		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1022 		get_cachetype_cp15();
1023 		pmap_pte_init_xscale();
1024 		return 0;
1025 	}
1026 #endif /* CPU_XSCALE_80321 */
1027 #ifdef CPU_XSCALE_PXA2X0
1028 	/* ignore core revision to test PXA2xx CPUs */
1029 	if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 ||
1030 	    (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) {
1031 
1032 		cpufuncs = xscale_cpufuncs;
1033 #if defined(PERFCTRS)
1034 		xscale_pmu_init();
1035 #endif
1036 
1037 		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1038 		get_cachetype_cp15();
1039 		pmap_pte_init_xscale();
1040 
1041 		/* Use powersave on this CPU. */
1042 		cpu_do_powersave = 1;
1043 
1044 		return 0;
1045 	}
1046 #endif /* CPU_XSCALE_PXA2X0 */
1047 	/*
1048 	 * Bzzzz. And the answer was ...
1049 	 */
1050 	panic("No support for this CPU type (%08x) in kernel", cputype);
1051 	return(ARCHITECTURE_NOT_PRESENT);
1052 }
1053 
1054 /*
1055  * Fixup routines for data and prefetch aborts.
1056  *
1057  * Several compile time symbols are used
1058  *
1059  * DEBUG_FAULT_CORRECTION - Print debugging information during the
1060  * correction of registers after a fault.
1061  * ARM6_LATE_ABORT - ARM6 supports both early and late aborts
1062  * when defined should use late aborts
1063  */
1064 
1065 
1066 /*
1067  * Null abort fixup routine.
1068  * For use when no fixup is required.
1069  */
1070 int
1071 cpufunc_null_fixup(arg)
1072 	void *arg;
1073 {
1074 	return(ABORT_FIXUP_OK);
1075 }
1076 
1077 
1078 #if defined(CPU_ARM2) || defined(CPU_ARM250) || defined(CPU_ARM3) || \
1079     defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI)
1080 
1081 #ifdef DEBUG_FAULT_CORRECTION
1082 #define DFC_PRINTF(x)		printf x
1083 #define DFC_DISASSEMBLE(x)	disassemble(x)
1084 #else
1085 #define DFC_PRINTF(x)		/* nothing */
1086 #define DFC_DISASSEMBLE(x)	/* nothing */
1087 #endif
1088 
1089 /*
1090  * "Early" data abort fixup.
1091  *
1092  * For ARM2, ARM2as, ARM3 and ARM6 (in early-abort mode).  Also used
1093  * indirectly by ARM6 (in late-abort mode) and ARM7[TDMI].
1094  *
1095  * In early aborts, we may have to fix up LDM, STM, LDC and STC.
1096  */
1097 int
1098 early_abort_fixup(arg)
1099 	void *arg;
1100 {
1101 	trapframe_t *frame = arg;
1102 	u_int fault_pc;
1103 	u_int fault_instruction;
1104 	int saved_lr = 0;
1105 
1106 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1107 
1108 		/* Ok an abort in SVC mode */
1109 
1110 		/*
1111 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1112 		 * as the fault happened in svc mode but we need it in the
1113 		 * usr slot so we can treat the registers as an array of ints
1114 		 * during fixing.
1115 		 * NOTE: This PC is in the position but writeback is not
1116 		 * allowed on r15.
1117 		 * Doing it like this is more efficient than trapping this
1118 		 * case in all possible locations in the following fixup code.
1119 		 */
1120 
1121 		saved_lr = frame->tf_usr_lr;
1122 		frame->tf_usr_lr = frame->tf_svc_lr;
1123 
1124 		/*
1125 		 * Note the trapframe does not have the SVC r13 so a fault
1126 		 * from an instruction with writeback to r13 in SVC mode is
1127 		 * not allowed. This should not happen as the kstack is
1128 		 * always valid.
1129 		 */
1130 	}
1131 
1132 	/* Get fault address and status from the CPU */
1133 
1134 	fault_pc = frame->tf_pc;
1135 	fault_instruction = *((volatile unsigned int *)fault_pc);
1136 
1137 	/* Decode the fault instruction and fix the registers as needed */
1138 
1139 	if ((fault_instruction & 0x0e000000) == 0x08000000) {
1140 		int base;
1141 		int loop;
1142 		int count;
1143 		int *registers = &frame->tf_r0;
1144 
1145 		DFC_PRINTF(("LDM/STM\n"));
1146 		DFC_DISASSEMBLE(fault_pc);
1147 		if (fault_instruction & (1 << 21)) {
1148 			DFC_PRINTF(("This instruction must be corrected\n"));
1149 			base = (fault_instruction >> 16) & 0x0f;
1150 			if (base == 15)
1151 				return ABORT_FIXUP_FAILED;
1152 			/* Count registers transferred */
1153 			count = 0;
1154 			for (loop = 0; loop < 16; ++loop) {
1155 				if (fault_instruction & (1<<loop))
1156 					++count;
1157 			}
1158 			DFC_PRINTF(("%d registers used\n", count));
1159 			DFC_PRINTF(("Corrected r%d by %d bytes ",
1160 				       base, count * 4));
1161 			if (fault_instruction & (1 << 23)) {
1162 				DFC_PRINTF(("down\n"));
1163 				registers[base] -= count * 4;
1164 			} else {
1165 				DFC_PRINTF(("up\n"));
1166 				registers[base] += count * 4;
1167 			}
1168 		}
1169 	} else if ((fault_instruction & 0x0e000000) == 0x0c000000) {
1170 		int base;
1171 		int offset;
1172 		int *registers = &frame->tf_r0;
1173 
1174 		/* REGISTER CORRECTION IS REQUIRED FOR THESE INSTRUCTIONS */
1175 
1176 		DFC_DISASSEMBLE(fault_pc);
1177 
1178 		/* Only need to fix registers if write back is turned on */
1179 
1180 		if ((fault_instruction & (1 << 21)) != 0) {
1181 			base = (fault_instruction >> 16) & 0x0f;
1182 			if (base == 13 &&
1183 			    (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
1184 				return ABORT_FIXUP_FAILED;
1185 			if (base == 15)
1186 				return ABORT_FIXUP_FAILED;
1187 
1188 			offset = (fault_instruction & 0xff) << 2;
1189 			DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1190 			if ((fault_instruction & (1 << 23)) != 0)
1191 				offset = -offset;
1192 			registers[base] += offset;
1193 			DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1194 		}
1195 	} else if ((fault_instruction & 0x0e000000) == 0x0c000000)
1196 		return ABORT_FIXUP_FAILED;
1197 
1198 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1199 
1200 		/* Ok an abort in SVC mode */
1201 
1202 		/*
1203 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1204 		 * as the fault happened in svc mode but we need it in the
1205 		 * usr slot so we can treat the registers as an array of ints
1206 		 * during fixing.
1207 		 * NOTE: This PC is in the position but writeback is not
1208 		 * allowed on r15.
1209 		 * Doing it like this is more efficient than trapping this
1210 		 * case in all possible locations in the prior fixup code.
1211 		 */
1212 
1213 		frame->tf_svc_lr = frame->tf_usr_lr;
1214 		frame->tf_usr_lr = saved_lr;
1215 
1216 		/*
1217 		 * Note the trapframe does not have the SVC r13 so a fault
1218 		 * from an instruction with writeback to r13 in SVC mode is
1219 		 * not allowed. This should not happen as the kstack is
1220 		 * always valid.
1221 		 */
1222 	}
1223 
1224 	return(ABORT_FIXUP_OK);
1225 }
1226 #endif	/* CPU_ARM2/250/3/6/7 */
1227 
1228 
1229 #if (defined(CPU_ARM6) && defined(ARM6_LATE_ABORT)) || defined(CPU_ARM7) || \
1230 	defined(CPU_ARM7TDMI)
1231 /*
1232  * "Late" (base updated) data abort fixup
1233  *
1234  * For ARM6 (in late-abort mode) and ARM7.
1235  *
1236  * In this model, all data-transfer instructions need fixing up.  We defer
1237  * LDM, STM, LDC and STC fixup to the early-abort handler.
1238  */
1239 int
1240 late_abort_fixup(arg)
1241 	void *arg;
1242 {
1243 	trapframe_t *frame = arg;
1244 	u_int fault_pc;
1245 	u_int fault_instruction;
1246 	int saved_lr = 0;
1247 
1248 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1249 
1250 		/* Ok an abort in SVC mode */
1251 
1252 		/*
1253 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1254 		 * as the fault happened in svc mode but we need it in the
1255 		 * usr slot so we can treat the registers as an array of ints
1256 		 * during fixing.
1257 		 * NOTE: This PC is in the position but writeback is not
1258 		 * allowed on r15.
1259 		 * Doing it like this is more efficient than trapping this
1260 		 * case in all possible locations in the following fixup code.
1261 		 */
1262 
1263 		saved_lr = frame->tf_usr_lr;
1264 		frame->tf_usr_lr = frame->tf_svc_lr;
1265 
1266 		/*
1267 		 * Note the trapframe does not have the SVC r13 so a fault
1268 		 * from an instruction with writeback to r13 in SVC mode is
1269 		 * not allowed. This should not happen as the kstack is
1270 		 * always valid.
1271 		 */
1272 	}
1273 
1274 	/* Get fault address and status from the CPU */
1275 
1276 	fault_pc = frame->tf_pc;
1277 	fault_instruction = *((volatile unsigned int *)fault_pc);
1278 
1279 	/* Decode the fault instruction and fix the registers as needed */
1280 
1281 	/* Was is a swap instruction ? */
1282 
1283 	if ((fault_instruction & 0x0fb00ff0) == 0x01000090) {
1284 		DFC_DISASSEMBLE(fault_pc);
1285 	} else if ((fault_instruction & 0x0c000000) == 0x04000000) {
1286 
1287 		/* Was is a ldr/str instruction */
1288 		/* This is for late abort only */
1289 
1290 		int base;
1291 		int offset;
1292 		int *registers = &frame->tf_r0;
1293 
1294 		DFC_DISASSEMBLE(fault_pc);
1295 
1296 		/* This is for late abort only */
1297 
1298 		if ((fault_instruction & (1 << 24)) == 0
1299 		    || (fault_instruction & (1 << 21)) != 0) {
1300 			/* postindexed ldr/str with no writeback */
1301 
1302 			base = (fault_instruction >> 16) & 0x0f;
1303 			if (base == 13 &&
1304 			    (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
1305 				return ABORT_FIXUP_FAILED;
1306 			if (base == 15)
1307 				return ABORT_FIXUP_FAILED;
1308 			DFC_PRINTF(("late abt fix: r%d=%08x : ",
1309 				       base, registers[base]));
1310 			if ((fault_instruction & (1 << 25)) == 0) {
1311 				/* Immediate offset - easy */
1312 
1313 				offset = fault_instruction & 0xfff;
1314 				if ((fault_instruction & (1 << 23)))
1315 					offset = -offset;
1316 				registers[base] += offset;
1317 				DFC_PRINTF(("imm=%08x ", offset));
1318 			} else {
1319 				/* offset is a shifted register */
1320 				int shift;
1321 
1322 				offset = fault_instruction & 0x0f;
1323 				if (offset == base)
1324 					return ABORT_FIXUP_FAILED;
1325 
1326 				/*
1327 				 * Register offset - hard we have to
1328 				 * cope with shifts !
1329 				 */
1330 				offset = registers[offset];
1331 
1332 				if ((fault_instruction & (1 << 4)) == 0)
1333 					/* shift with amount */
1334 					shift = (fault_instruction >> 7) & 0x1f;
1335 				else {
1336 					/* shift with register */
1337 					if ((fault_instruction & (1 << 7)) != 0)
1338 						/* undefined for now so bail out */
1339 						return ABORT_FIXUP_FAILED;
1340 					shift = ((fault_instruction >> 8) & 0xf);
1341 					if (base == shift)
1342 						return ABORT_FIXUP_FAILED;
1343 					DFC_PRINTF(("shift reg=%d ", shift));
1344 					shift = registers[shift];
1345 				}
1346 				DFC_PRINTF(("shift=%08x ", shift));
1347 				switch (((fault_instruction >> 5) & 0x3)) {
1348 				case 0 : /* Logical left */
1349 					offset = (int)(((u_int)offset) << shift);
1350 					break;
1351 				case 1 : /* Logical Right */
1352 					if (shift == 0) shift = 32;
1353 					offset = (int)(((u_int)offset) >> shift);
1354 					break;
1355 				case 2 : /* Arithmetic Right */
1356 					if (shift == 0) shift = 32;
1357 					offset = (int)(((int)offset) >> shift);
1358 					break;
1359 				case 3 : /* Rotate right (rol or rxx) */
1360 					return ABORT_FIXUP_FAILED;
1361 					break;
1362 				}
1363 
1364 				DFC_PRINTF(("abt: fixed LDR/STR with "
1365 					       "register offset\n"));
1366 				if ((fault_instruction & (1 << 23)))
1367 					offset = -offset;
1368 				DFC_PRINTF(("offset=%08x ", offset));
1369 				registers[base] += offset;
1370 			}
1371 			DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1372 		}
1373 	}
1374 
1375 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1376 
1377 		/* Ok an abort in SVC mode */
1378 
1379 		/*
1380 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1381 		 * as the fault happened in svc mode but we need it in the
1382 		 * usr slot so we can treat the registers as an array of ints
1383 		 * during fixing.
1384 		 * NOTE: This PC is in the position but writeback is not
1385 		 * allowed on r15.
1386 		 * Doing it like this is more efficient than trapping this
1387 		 * case in all possible locations in the prior fixup code.
1388 		 */
1389 
1390 		frame->tf_svc_lr = frame->tf_usr_lr;
1391 		frame->tf_usr_lr = saved_lr;
1392 
1393 		/*
1394 		 * Note the trapframe does not have the SVC r13 so a fault
1395 		 * from an instruction with writeback to r13 in SVC mode is
1396 		 * not allowed. This should not happen as the kstack is
1397 		 * always valid.
1398 		 */
1399 	}
1400 
1401 	/*
1402 	 * Now let the early-abort fixup routine have a go, in case it
1403 	 * was an LDM, STM, LDC or STC that faulted.
1404 	 */
1405 
1406 	return early_abort_fixup(arg);
1407 }
1408 #endif	/* CPU_ARM6(LATE)/7/7TDMI */
1409 
1410 /*
1411  * CPU Setup code
1412  */
1413 
1414 #if defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) || \
1415 	defined(CPU_ARM8) || defined (CPU_ARM9) || defined(CPU_SA110) || \
1416 	defined(CPU_SA1100) || defined(CPU_SA1110) || \
1417 	defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
1418 	defined(CPU_XSCALE_PXA2X0)
1419 
1420 #define IGN	0
1421 #define OR	1
1422 #define BIC	2
1423 
1424 struct cpu_option {
1425 	char	*co_name;
1426 	int	co_falseop;
1427 	int	co_trueop;
1428 	int	co_value;
1429 };
1430 
1431 static u_int parse_cpu_options __P((char *, struct cpu_option *, u_int));
1432 
1433 static u_int
1434 parse_cpu_options(args, optlist, cpuctrl)
1435 	char *args;
1436 	struct cpu_option *optlist;
1437 	u_int cpuctrl;
1438 {
1439 	int integer;
1440 
1441 	if (args == NULL)
1442 		return(cpuctrl);
1443 
1444 	while (optlist->co_name) {
1445 		if (get_bootconf_option(args, optlist->co_name,
1446 		    BOOTOPT_TYPE_BOOLEAN, &integer)) {
1447 			if (integer) {
1448 				if (optlist->co_trueop == OR)
1449 					cpuctrl |= optlist->co_value;
1450 				else if (optlist->co_trueop == BIC)
1451 					cpuctrl &= ~optlist->co_value;
1452 			} else {
1453 				if (optlist->co_falseop == OR)
1454 					cpuctrl |= optlist->co_value;
1455 				else if (optlist->co_falseop == BIC)
1456 					cpuctrl &= ~optlist->co_value;
1457 			}
1458 		}
1459 		++optlist;
1460 	}
1461 	return(cpuctrl);
1462 }
1463 #endif /* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 || CPU_SA110 */
1464 
1465 #if defined (CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) \
1466 	|| defined(CPU_ARM8)
1467 struct cpu_option arm678_options[] = {
1468 #ifdef COMPAT_12
1469 	{ "nocache",		IGN, BIC, CPU_CONTROL_IDC_ENABLE },
1470 	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
1471 #endif	/* COMPAT_12 */
1472 	{ "cpu.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
1473 	{ "cpu.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
1474 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1475 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1476 	{ NULL,			IGN, IGN, 0 }
1477 };
1478 
1479 #endif	/* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 */
1480 
1481 #ifdef CPU_ARM6
1482 struct cpu_option arm6_options[] = {
1483 	{ "arm6.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
1484 	{ "arm6.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
1485 	{ "arm6.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1486 	{ "arm6.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1487 	{ NULL,			IGN, IGN, 0 }
1488 };
1489 
1490 void
1491 arm6_setup(args)
1492 	char *args;
1493 {
1494 	int cpuctrl, cpuctrlmask;
1495 
1496 	/* Set up default control registers bits */
1497 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1498 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1499 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
1500 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1501 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1502 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
1503 		 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BEND_ENABLE
1504 		 | CPU_CONTROL_AFLT_ENABLE;
1505 
1506 #ifdef ARM6_LATE_ABORT
1507 	cpuctrl |= CPU_CONTROL_LABT_ENABLE;
1508 #endif	/* ARM6_LATE_ABORT */
1509 
1510 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
1511 	cpuctrl = parse_cpu_options(args, arm6_options, cpuctrl);
1512 
1513 #ifdef __ARMEB__
1514 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1515 #endif
1516 
1517 	/* Clear out the cache */
1518 	cpu_idcache_wbinv_all();
1519 
1520 	/* Set the control register */
1521 	curcpu()->ci_ctrl = cpuctrl;
1522 	cpu_control(0xffffffff, cpuctrl);
1523 }
1524 #endif	/* CPU_ARM6 */
1525 
1526 #ifdef CPU_ARM7
1527 struct cpu_option arm7_options[] = {
1528 	{ "arm7.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
1529 	{ "arm7.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
1530 	{ "arm7.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1531 	{ "arm7.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1532 #ifdef COMPAT_12
1533 	{ "fpaclk2",		BIC, OR,  CPU_CONTROL_CPCLK },
1534 #endif	/* COMPAT_12 */
1535 	{ "arm700.fpaclk",	BIC, OR,  CPU_CONTROL_CPCLK },
1536 	{ NULL,			IGN, IGN, 0 }
1537 };
1538 
1539 void
1540 arm7_setup(args)
1541 	char *args;
1542 {
1543 	int cpuctrl, cpuctrlmask;
1544 
1545 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1546 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1547 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
1548 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1549 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1550 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
1551 		 | CPU_CONTROL_CPCLK | CPU_CONTROL_LABT_ENABLE
1552 		 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BEND_ENABLE
1553 		 | CPU_CONTROL_AFLT_ENABLE;
1554 
1555 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
1556 	cpuctrl = parse_cpu_options(args, arm7_options, cpuctrl);
1557 
1558 #ifdef __ARMEB__
1559 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1560 #endif
1561 
1562 	/* Clear out the cache */
1563 	cpu_idcache_wbinv_all();
1564 
1565 	/* Set the control register */
1566 	curcpu()->ci_ctrl = cpuctrl;
1567 	cpu_control(0xffffffff, cpuctrl);
1568 }
1569 #endif	/* CPU_ARM7 */
1570 
1571 #ifdef CPU_ARM7TDMI
1572 struct cpu_option arm7tdmi_options[] = {
1573 	{ "arm7.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
1574 	{ "arm7.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
1575 	{ "arm7.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1576 	{ "arm7.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1577 #ifdef COMPAT_12
1578 	{ "fpaclk2",		BIC, OR,  CPU_CONTROL_CPCLK },
1579 #endif	/* COMPAT_12 */
1580 	{ "arm700.fpaclk",	BIC, OR,  CPU_CONTROL_CPCLK },
1581 	{ NULL,			IGN, IGN, 0 }
1582 };
1583 
1584 void
1585 arm7tdmi_setup(args)
1586 	char *args;
1587 {
1588 	int cpuctrl;
1589 
1590 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1591 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1592 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
1593 
1594 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
1595 	cpuctrl = parse_cpu_options(args, arm7tdmi_options, cpuctrl);
1596 
1597 #ifdef __ARMEB__
1598 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1599 #endif
1600 
1601 	/* Clear out the cache */
1602 	cpu_idcache_wbinv_all();
1603 
1604 	/* Set the control register */
1605 	curcpu()->ci_ctrl = cpuctrl;
1606 	cpu_control(0xffffffff, cpuctrl);
1607 }
1608 #endif	/* CPU_ARM7TDMI */
1609 
1610 #ifdef CPU_ARM8
1611 struct cpu_option arm8_options[] = {
1612 	{ "arm8.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
1613 	{ "arm8.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
1614 	{ "arm8.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1615 	{ "arm8.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1616 #ifdef COMPAT_12
1617 	{ "branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1618 #endif	/* COMPAT_12 */
1619 	{ "cpu.branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1620 	{ "arm8.branchpredict",	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1621 	{ NULL,			IGN, IGN, 0 }
1622 };
1623 
1624 void
1625 arm8_setup(args)
1626 	char *args;
1627 {
1628 	int integer;
1629 	int cpuctrl, cpuctrlmask;
1630 	int clocktest;
1631 	int setclock = 0;
1632 
1633 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1634 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1635 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
1636 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1637 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1638 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
1639 		 | CPU_CONTROL_BPRD_ENABLE | CPU_CONTROL_ROM_ENABLE
1640 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE;
1641 
1642 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
1643 	cpuctrl = parse_cpu_options(args, arm8_options, cpuctrl);
1644 
1645 #ifdef __ARMEB__
1646 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1647 #endif
1648 
1649 	/* Get clock configuration */
1650 	clocktest = arm8_clock_config(0, 0) & 0x0f;
1651 
1652 	/* Special ARM8 clock and test configuration */
1653 	if (get_bootconf_option(args, "arm8.clock.reset", BOOTOPT_TYPE_BOOLEAN, &integer)) {
1654 		clocktest = 0;
1655 		setclock = 1;
1656 	}
1657 	if (get_bootconf_option(args, "arm8.clock.dynamic", BOOTOPT_TYPE_BOOLEAN, &integer)) {
1658 		if (integer)
1659 			clocktest |= 0x01;
1660 		else
1661 			clocktest &= ~(0x01);
1662 		setclock = 1;
1663 	}
1664 	if (get_bootconf_option(args, "arm8.clock.sync", BOOTOPT_TYPE_BOOLEAN, &integer)) {
1665 		if (integer)
1666 			clocktest |= 0x02;
1667 		else
1668 			clocktest &= ~(0x02);
1669 		setclock = 1;
1670 	}
1671 	if (get_bootconf_option(args, "arm8.clock.fast", BOOTOPT_TYPE_BININT, &integer)) {
1672 		clocktest = (clocktest & ~0xc0) | (integer & 3) << 2;
1673 		setclock = 1;
1674 	}
1675 	if (get_bootconf_option(args, "arm8.test", BOOTOPT_TYPE_BININT, &integer)) {
1676 		clocktest |= (integer & 7) << 5;
1677 		setclock = 1;
1678 	}
1679 
1680 	/* Clear out the cache */
1681 	cpu_idcache_wbinv_all();
1682 
1683 	/* Set the control register */
1684 	curcpu()->ci_ctrl = cpuctrl;
1685 	cpu_control(0xffffffff, cpuctrl);
1686 
1687 	/* Set the clock/test register */
1688 	if (setclock)
1689 		arm8_clock_config(0x7f, clocktest);
1690 }
1691 #endif	/* CPU_ARM8 */
1692 
1693 #ifdef CPU_ARM9
1694 struct cpu_option arm9_options[] = {
1695 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1696 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1697 	{ "arm9.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1698 	{ "arm9.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
1699 	{ "arm9.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
1700 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1701 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1702 	{ "arm9.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1703 	{ NULL,			IGN, IGN, 0 }
1704 };
1705 
1706 void
1707 arm9_setup(args)
1708 	char *args;
1709 {
1710 	int cpuctrl, cpuctrlmask;
1711 
1712 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1713 	    | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1714 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1715 	    | CPU_CONTROL_WBUF_ENABLE;
1716 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1717 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1718 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1719 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1720 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1721 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1722 		 | CPU_CONTROL_CPCLK;
1723 
1724 	cpuctrl = parse_cpu_options(args, arm9_options, cpuctrl);
1725 
1726 #ifdef __ARMEB__
1727 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1728 #endif
1729 
1730 	/* Clear out the cache */
1731 	cpu_idcache_wbinv_all();
1732 
1733 	/* Set the control register */
1734 	curcpu()->ci_ctrl = cpuctrl;
1735 	cpu_control(0xffffffff, cpuctrl);
1736 
1737 }
1738 #endif	/* CPU_ARM9 */
1739 
1740 #ifdef CPU_SA110
1741 struct cpu_option sa110_options[] = {
1742 #ifdef COMPAT_12
1743 	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1744 	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
1745 #endif	/* COMPAT_12 */
1746 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1747 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1748 	{ "sa110.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1749 	{ "sa110.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
1750 	{ "sa110.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
1751 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1752 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1753 	{ "sa110.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1754 	{ NULL,			IGN, IGN, 0 }
1755 };
1756 
1757 void
1758 sa110_setup(args)
1759 	char *args;
1760 {
1761 	int cpuctrl, cpuctrlmask;
1762 
1763 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1764 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1765 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1766 		 | CPU_CONTROL_WBUF_ENABLE;
1767 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1768 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1769 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1770 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1771 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1772 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1773 		 | CPU_CONTROL_CPCLK;
1774 
1775 	cpuctrl = parse_cpu_options(args, sa110_options, cpuctrl);
1776 
1777 #ifdef __ARMEB__
1778 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1779 #endif
1780 
1781 	/* Clear out the cache */
1782 	cpu_idcache_wbinv_all();
1783 
1784 	/* Set the control register */
1785 	curcpu()->ci_ctrl = cpuctrl;
1786 /*	cpu_control(cpuctrlmask, cpuctrl);*/
1787 	cpu_control(0xffffffff, cpuctrl);
1788 
1789 	/*
1790 	 * enable clockswitching, note that this doesn't read or write to r0,
1791 	 * r0 is just to make it valid asm
1792 	 */
1793 	__asm ("mcr 15, 0, r0, c15, c1, 2");
1794 }
1795 #endif	/* CPU_SA110 */
1796 
1797 #if defined(CPU_SA1100) || defined(CPU_SA1110)
1798 struct cpu_option sa11x0_options[] = {
1799 #ifdef COMPAT_12
1800 	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1801 	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
1802 #endif	/* COMPAT_12 */
1803 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1804 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1805 	{ "sa11x0.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1806 	{ "sa11x0.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
1807 	{ "sa11x0.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
1808 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1809 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1810 	{ "sa11x0.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1811 	{ NULL,			IGN, IGN, 0 }
1812 };
1813 
1814 void
1815 sa11x0_setup(args)
1816 	char *args;
1817 {
1818 	int cpuctrl, cpuctrlmask;
1819 
1820 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1821 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1822 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1823 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE;
1824 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1825 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1826 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1827 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1828 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1829 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1830 		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
1831 
1832 	cpuctrl = parse_cpu_options(args, sa11x0_options, cpuctrl);
1833 
1834 #ifdef __ARMEB__
1835 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1836 #endif
1837 
1838 	if (vector_page == ARM_VECTORS_HIGH)
1839 		cpuctrl |= CPU_CONTROL_VECRELOC;
1840 
1841 	/* Clear out the cache */
1842 	cpu_idcache_wbinv_all();
1843 
1844 	/* Set the control register */
1845 	cpu_control(0xffffffff, cpuctrl);
1846 }
1847 #endif	/* CPU_SA1100 || CPU_SA1110 */
1848 
1849 #if defined(CPU_IXP12X0)
1850 struct cpu_option ixp12x0_options[] = {
1851 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1852 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1853 	{ "ixp12x0.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1854 	{ "ixp12x0.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
1855 	{ "ixp12x0.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
1856 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1857 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1858 	{ "ixp12x0.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1859 	{ NULL,			IGN, IGN, 0 }
1860 };
1861 
1862 void
1863 ixp12x0_setup(args)
1864 	char *args;
1865 {
1866 	int cpuctrl, cpuctrlmask;
1867 
1868 
1869 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE
1870 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_SYST_ENABLE
1871 		 | CPU_CONTROL_IC_ENABLE;
1872 
1873 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_AFLT_ENABLE
1874 		 | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_WBUF_ENABLE
1875 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_SYST_ENABLE
1876 		 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_IC_ENABLE
1877 		 | CPU_CONTROL_VECRELOC;
1878 
1879 	cpuctrl = parse_cpu_options(args, ixp12x0_options, cpuctrl);
1880 
1881 #ifdef __ARMEB__
1882 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1883 #endif
1884 
1885 	if (vector_page == ARM_VECTORS_HIGH)
1886 		cpuctrl |= CPU_CONTROL_VECRELOC;
1887 
1888 	/* Clear out the cache */
1889 	cpu_idcache_wbinv_all();
1890 
1891 	/* Set the control register */
1892 	curcpu()->ci_ctrl = cpuctrl;
1893 	/* cpu_control(0xffffffff, cpuctrl); */
1894 	cpu_control(cpuctrlmask, cpuctrl);
1895 }
1896 #endif /* CPU_IXP12X0 */
1897 
1898 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
1899     defined(CPU_XSCALE_PXA2X0)
1900 struct cpu_option xscale_options[] = {
1901 #ifdef COMPAT_12
1902 	{ "branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1903 	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1904 #endif	/* COMPAT_12 */
1905 	{ "cpu.branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1906 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1907 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1908 	{ "xscale.branchpredict", BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1909 	{ "xscale.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1910 	{ "xscale.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
1911 	{ "xscale.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
1912 	{ NULL,			IGN, IGN, 0 }
1913 };
1914 
1915 void
1916 xscale_setup(args)
1917 	char *args;
1918 {
1919 	uint32_t auxctl;
1920 	int cpuctrl, cpuctrlmask;
1921 
1922 	/*
1923 	 * The XScale Write Buffer is always enabled.  Our option
1924 	 * is to enable/disable coalescing.  Note that bits 6:3
1925 	 * must always be enabled.
1926 	 */
1927 
1928 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1929 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1930 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1931 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
1932 		 | CPU_CONTROL_BPRD_ENABLE;
1933 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1934 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1935 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1936 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1937 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1938 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1939 		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
1940 
1941 	cpuctrl = parse_cpu_options(args, xscale_options, cpuctrl);
1942 
1943 #ifdef __ARMEB__
1944 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1945 #endif
1946 
1947 	if (vector_page == ARM_VECTORS_HIGH)
1948 		cpuctrl |= CPU_CONTROL_VECRELOC;
1949 
1950 	/* Clear out the cache */
1951 	cpu_idcache_wbinv_all();
1952 
1953 	/*
1954 	 * Set the control register.  Note that bits 6:3 must always
1955 	 * be set to 1.
1956 	 */
1957 	curcpu()->ci_ctrl = cpuctrl;
1958 /*	cpu_control(cpuctrlmask, cpuctrl);*/
1959 	cpu_control(0xffffffff, cpuctrl);
1960 
1961 	/* Make sure write coalescing is turned on */
1962 	__asm __volatile("mrc p15, 0, %0, c1, c0, 1"
1963 		: "=r" (auxctl));
1964 #ifdef XSCALE_NO_COALESCE_WRITES
1965 	auxctl |= XSCALE_AUXCTL_K;
1966 #else
1967 	auxctl &= ~XSCALE_AUXCTL_K;
1968 #endif
1969 	__asm __volatile("mcr p15, 0, %0, c1, c0, 1"
1970 		: : "r" (auxctl));
1971 }
1972 #endif	/* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 */
1973