xref: /netbsd-src/sys/arch/arm/arm/cpufunc.c (revision d710132b4b8ce7f7cccaaf660cb16aa16b4077a0)
1 /*	$NetBSD: cpufunc.c,v 1.60 2003/05/25 01:30:52 ichiro Exp $	*/
2 
3 /*
4  * arm7tdmi support code Copyright (c) 2001 John Fremlin
5  * arm8 support code Copyright (c) 1997 ARM Limited
6  * arm8 support code Copyright (c) 1997 Causality Limited
7  * arm9 support code Copyright (C) 2001 ARM Ltd
8  * Copyright (c) 1997 Mark Brinicombe.
9  * Copyright (c) 1997 Causality Limited
10  * All rights reserved.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. All advertising materials mentioning features or use of this software
21  *    must display the following acknowledgement:
22  *	This product includes software developed by Causality Limited.
23  * 4. The name of Causality Limited may not be used to endorse or promote
24  *    products derived from this software without specific prior written
25  *    permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
28  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
29  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
30  * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
31  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
32  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
33  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37  * SUCH DAMAGE.
38  *
39  * RiscBSD kernel project
40  *
41  * cpufuncs.c
42  *
43  * C functions for supporting CPU / MMU / TLB specific operations.
44  *
45  * Created      : 30/01/97
46  */
47 
48 #include "opt_compat_netbsd.h"
49 #include "opt_cpuoptions.h"
50 #include "opt_perfctrs.h"
51 
52 #include <sys/types.h>
53 #include <sys/param.h>
54 #include <sys/pmc.h>
55 #include <sys/systm.h>
56 #include <machine/cpu.h>
57 #include <machine/bootconfig.h>
58 #include <arch/arm/arm/disassem.h>
59 
60 #include <uvm/uvm.h>
61 
62 #include <arm/cpuconf.h>
63 #include <arm/cpufunc.h>
64 
65 #ifdef CPU_XSCALE_80200
66 #include <arm/xscale/i80200reg.h>
67 #include <arm/xscale/i80200var.h>
68 #endif
69 
70 #ifdef CPU_XSCALE_80321
71 #include <arm/xscale/i80321reg.h>
72 #include <arm/xscale/i80321var.h>
73 #endif
74 
75 #ifdef CPU_XSCALE_IXP425
76 #include <arm/xscale/ixp425reg.h>
77 #include <arm/xscale/ixp425var.h>
78 #endif
79 
80 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321)
81 #include <arm/xscale/xscalereg.h>
82 #endif
83 
84 #if defined(PERFCTRS)
85 struct arm_pmc_funcs *arm_pmc;
86 #endif
87 
88 /* PRIMARY CACHE VARIABLES */
89 int	arm_picache_size;
90 int	arm_picache_line_size;
91 int	arm_picache_ways;
92 
93 int	arm_pdcache_size;	/* and unified */
94 int	arm_pdcache_line_size;
95 int	arm_pdcache_ways;
96 
97 int	arm_pcache_type;
98 int	arm_pcache_unified;
99 
100 int	arm_dcache_align;
101 int	arm_dcache_align_mask;
102 
103 /* 1 == use cpu_sleep(), 0 == don't */
104 int cpu_do_powersave;
105 
106 #ifdef CPU_ARM3
107 struct cpu_functions arm3_cpufuncs = {
108 	/* CPU functions */
109 
110 	cpufunc_id,			/* id			*/
111 	cpufunc_nullop,			/* cpwait		*/
112 
113 	/* MMU functions */
114 
115 	arm3_control,			/* control		*/
116 	NULL,				/* domain		*/
117 	NULL,				/* setttb		*/
118 	NULL,				/* faultstatus		*/
119 	NULL,				/* faultaddress		*/
120 
121 	/* TLB functions */
122 
123 	cpufunc_nullop,			/* tlb_flushID		*/
124 	(void *)cpufunc_nullop,		/* tlb_flushID_SE	*/
125 	cpufunc_nullop,			/* tlb_flushI		*/
126 	(void *)cpufunc_nullop,		/* tlb_flushI_SE	*/
127 	cpufunc_nullop,			/* tlb_flushD		*/
128 	(void *)cpufunc_nullop,		/* tlb_flushD_SE	*/
129 
130 	/* Cache operations */
131 
132 	cpufunc_nullop,			/* icache_sync_all	*/
133 	(void *) cpufunc_nullop,	/* icache_sync_range	*/
134 
135 	arm3_cache_flush,		/* dcache_wbinv_all	*/
136 	(void *)arm3_cache_flush,	/* dcache_wbinv_range	*/
137 	(void *)arm3_cache_flush,	/* dcache_inv_range	*/
138 	(void *)cpufunc_nullop,		/* dcache_wb_range	*/
139 
140 	arm3_cache_flush,		/* idcache_wbinv_all	*/
141 	(void *)arm3_cache_flush,	/* idcache_wbinv_range	*/
142 
143 	/* Other functions */
144 
145 	cpufunc_nullop,			/* flush_prefetchbuf	*/
146 	cpufunc_nullop,			/* drain_writebuf	*/
147 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
148 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
149 
150 	(void *)cpufunc_nullop,		/* sleep		*/
151 
152 	/* Soft functions */
153 
154 	early_abort_fixup,		/* dataabt_fixup	*/
155 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
156 
157 	NULL,				/* context_switch	*/
158 
159 	(void *)cpufunc_nullop		/* cpu setup		*/
160 
161 };
162 #endif	/* CPU_ARM3 */
163 
164 #ifdef CPU_ARM6
165 struct cpu_functions arm6_cpufuncs = {
166 	/* CPU functions */
167 
168 	cpufunc_id,			/* id			*/
169 	cpufunc_nullop,			/* cpwait		*/
170 
171 	/* MMU functions */
172 
173 	cpufunc_control,		/* control		*/
174 	cpufunc_domains,		/* domain		*/
175 	arm67_setttb,			/* setttb		*/
176 	cpufunc_faultstatus,		/* faultstatus		*/
177 	cpufunc_faultaddress,		/* faultaddress		*/
178 
179 	/* TLB functions */
180 
181 	arm67_tlb_flush,		/* tlb_flushID		*/
182 	arm67_tlb_purge,		/* tlb_flushID_SE	*/
183 	arm67_tlb_flush,		/* tlb_flushI		*/
184 	arm67_tlb_purge,		/* tlb_flushI_SE	*/
185 	arm67_tlb_flush,		/* tlb_flushD		*/
186 	arm67_tlb_purge,		/* tlb_flushD_SE	*/
187 
188 	/* Cache operations */
189 
190 	cpufunc_nullop,			/* icache_sync_all	*/
191 	(void *) cpufunc_nullop,	/* icache_sync_range	*/
192 
193 	arm67_cache_flush,		/* dcache_wbinv_all	*/
194 	(void *)arm67_cache_flush,	/* dcache_wbinv_range	*/
195 	(void *)arm67_cache_flush,	/* dcache_inv_range	*/
196 	(void *)cpufunc_nullop,		/* dcache_wb_range	*/
197 
198 	arm67_cache_flush,		/* idcache_wbinv_all	*/
199 	(void *)arm67_cache_flush,	/* idcache_wbinv_range	*/
200 
201 	/* Other functions */
202 
203 	cpufunc_nullop,			/* flush_prefetchbuf	*/
204 	cpufunc_nullop,			/* drain_writebuf	*/
205 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
206 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
207 
208 	(void *)cpufunc_nullop,		/* sleep		*/
209 
210 	/* Soft functions */
211 
212 #ifdef ARM6_LATE_ABORT
213 	late_abort_fixup,		/* dataabt_fixup	*/
214 #else
215 	early_abort_fixup,		/* dataabt_fixup	*/
216 #endif
217 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
218 
219 	arm67_context_switch,		/* context_switch	*/
220 
221 	arm6_setup			/* cpu setup		*/
222 
223 };
224 #endif	/* CPU_ARM6 */
225 
226 #ifdef CPU_ARM7
227 struct cpu_functions arm7_cpufuncs = {
228 	/* CPU functions */
229 
230 	cpufunc_id,			/* id			*/
231 	cpufunc_nullop,			/* cpwait		*/
232 
233 	/* MMU functions */
234 
235 	cpufunc_control,		/* control		*/
236 	cpufunc_domains,		/* domain		*/
237 	arm67_setttb,			/* setttb		*/
238 	cpufunc_faultstatus,		/* faultstatus		*/
239 	cpufunc_faultaddress,		/* faultaddress		*/
240 
241 	/* TLB functions */
242 
243 	arm67_tlb_flush,		/* tlb_flushID		*/
244 	arm67_tlb_purge,		/* tlb_flushID_SE	*/
245 	arm67_tlb_flush,		/* tlb_flushI		*/
246 	arm67_tlb_purge,		/* tlb_flushI_SE	*/
247 	arm67_tlb_flush,		/* tlb_flushD		*/
248 	arm67_tlb_purge,		/* tlb_flushD_SE	*/
249 
250 	/* Cache operations */
251 
252 	cpufunc_nullop,			/* icache_sync_all	*/
253 	(void *)cpufunc_nullop,		/* icache_sync_range	*/
254 
255 	arm67_cache_flush,		/* dcache_wbinv_all	*/
256 	(void *)arm67_cache_flush,	/* dcache_wbinv_range	*/
257 	(void *)arm67_cache_flush,	/* dcache_inv_range	*/
258 	(void *)cpufunc_nullop,		/* dcache_wb_range	*/
259 
260 	arm67_cache_flush,		/* idcache_wbinv_all	*/
261 	(void *)arm67_cache_flush,	/* idcache_wbinv_range	*/
262 
263 	/* Other functions */
264 
265 	cpufunc_nullop,			/* flush_prefetchbuf	*/
266 	cpufunc_nullop,			/* drain_writebuf	*/
267 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
268 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
269 
270 	(void *)cpufunc_nullop,		/* sleep		*/
271 
272 	/* Soft functions */
273 
274 	late_abort_fixup,		/* dataabt_fixup	*/
275 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
276 
277 	arm67_context_switch,		/* context_switch	*/
278 
279 	arm7_setup			/* cpu setup		*/
280 
281 };
282 #endif	/* CPU_ARM7 */
283 
284 #ifdef CPU_ARM7TDMI
285 struct cpu_functions arm7tdmi_cpufuncs = {
286 	/* CPU functions */
287 
288 	cpufunc_id,			/* id			*/
289 	cpufunc_nullop,			/* cpwait		*/
290 
291 	/* MMU functions */
292 
293 	cpufunc_control,		/* control		*/
294 	cpufunc_domains,		/* domain		*/
295 	arm7tdmi_setttb,		/* setttb		*/
296 	cpufunc_faultstatus,		/* faultstatus		*/
297 	cpufunc_faultaddress,		/* faultaddress		*/
298 
299 	/* TLB functions */
300 
301 	arm7tdmi_tlb_flushID,		/* tlb_flushID		*/
302 	arm7tdmi_tlb_flushID_SE,	/* tlb_flushID_SE	*/
303 	arm7tdmi_tlb_flushID,		/* tlb_flushI		*/
304 	arm7tdmi_tlb_flushID_SE,	/* tlb_flushI_SE	*/
305 	arm7tdmi_tlb_flushID,		/* tlb_flushD		*/
306 	arm7tdmi_tlb_flushID_SE,	/* tlb_flushD_SE	*/
307 
308 	/* Cache operations */
309 
310 	cpufunc_nullop,			/* icache_sync_all	*/
311 	(void *)cpufunc_nullop,		/* icache_sync_range	*/
312 
313 	arm7tdmi_cache_flushID,		/* dcache_wbinv_all	*/
314 	(void *)arm7tdmi_cache_flushID,	/* dcache_wbinv_range	*/
315 	(void *)arm7tdmi_cache_flushID,	/* dcache_inv_range	*/
316 	(void *)cpufunc_nullop,		/* dcache_wb_range	*/
317 
318 	arm7tdmi_cache_flushID,		/* idcache_wbinv_all	*/
319 	(void *)arm7tdmi_cache_flushID,	/* idcache_wbinv_range	*/
320 
321 	/* Other functions */
322 
323 	cpufunc_nullop,			/* flush_prefetchbuf	*/
324 	cpufunc_nullop,			/* drain_writebuf	*/
325 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
326 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
327 
328 	(void *)cpufunc_nullop,		/* sleep		*/
329 
330 	/* Soft functions */
331 
332 	late_abort_fixup,		/* dataabt_fixup	*/
333 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
334 
335 	arm7tdmi_context_switch,	/* context_switch	*/
336 
337 	arm7tdmi_setup			/* cpu setup		*/
338 
339 };
340 #endif	/* CPU_ARM7TDMI */
341 
342 #ifdef CPU_ARM8
343 struct cpu_functions arm8_cpufuncs = {
344 	/* CPU functions */
345 
346 	cpufunc_id,			/* id			*/
347 	cpufunc_nullop,			/* cpwait		*/
348 
349 	/* MMU functions */
350 
351 	cpufunc_control,		/* control		*/
352 	cpufunc_domains,		/* domain		*/
353 	arm8_setttb,			/* setttb		*/
354 	cpufunc_faultstatus,		/* faultstatus		*/
355 	cpufunc_faultaddress,		/* faultaddress		*/
356 
357 	/* TLB functions */
358 
359 	arm8_tlb_flushID,		/* tlb_flushID		*/
360 	arm8_tlb_flushID_SE,		/* tlb_flushID_SE	*/
361 	arm8_tlb_flushID,		/* tlb_flushI		*/
362 	arm8_tlb_flushID_SE,		/* tlb_flushI_SE	*/
363 	arm8_tlb_flushID,		/* tlb_flushD		*/
364 	arm8_tlb_flushID_SE,		/* tlb_flushD_SE	*/
365 
366 	/* Cache operations */
367 
368 	cpufunc_nullop,			/* icache_sync_all	*/
369 	(void *)cpufunc_nullop,		/* icache_sync_range	*/
370 
371 	arm8_cache_purgeID,		/* dcache_wbinv_all	*/
372 	(void *)arm8_cache_purgeID,	/* dcache_wbinv_range	*/
373 /*XXX*/	(void *)arm8_cache_purgeID,	/* dcache_inv_range	*/
374 	(void *)arm8_cache_cleanID,	/* dcache_wb_range	*/
375 
376 	arm8_cache_purgeID,		/* idcache_wbinv_all	*/
377 	(void *)arm8_cache_purgeID,	/* idcache_wbinv_range	*/
378 
379 	/* Other functions */
380 
381 	cpufunc_nullop,			/* flush_prefetchbuf	*/
382 	cpufunc_nullop,			/* drain_writebuf	*/
383 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
384 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
385 
386 	(void *)cpufunc_nullop,		/* sleep		*/
387 
388 	/* Soft functions */
389 
390 	cpufunc_null_fixup,		/* dataabt_fixup	*/
391 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
392 
393 	arm8_context_switch,		/* context_switch	*/
394 
395 	arm8_setup			/* cpu setup		*/
396 };
397 #endif	/* CPU_ARM8 */
398 
399 #ifdef CPU_ARM9
400 struct cpu_functions arm9_cpufuncs = {
401 	/* CPU functions */
402 
403 	cpufunc_id,			/* id			*/
404 	cpufunc_nullop,			/* cpwait		*/
405 
406 	/* MMU functions */
407 
408 	cpufunc_control,		/* control		*/
409 	cpufunc_domains,		/* Domain		*/
410 	arm9_setttb,			/* Setttb		*/
411 	cpufunc_faultstatus,		/* Faultstatus		*/
412 	cpufunc_faultaddress,		/* Faultaddress		*/
413 
414 	/* TLB functions */
415 
416 	armv4_tlb_flushID,		/* tlb_flushID		*/
417 	arm9_tlb_flushID_SE,		/* tlb_flushID_SE	*/
418 	armv4_tlb_flushI,		/* tlb_flushI		*/
419 	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
420 	armv4_tlb_flushD,		/* tlb_flushD		*/
421 	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
422 
423 	/* Cache operations */
424 
425 	arm9_cache_syncI,		/* icache_sync_all	*/
426 	arm9_cache_syncI_rng,		/* icache_sync_range	*/
427 
428 		/* ...cache in write-though mode... */
429 	arm9_cache_flushD,		/* dcache_wbinv_all	*/
430 	arm9_cache_flushD_rng,		/* dcache_wbinv_range	*/
431 	arm9_cache_flushD_rng,		/* dcache_inv_range	*/
432 	(void *)cpufunc_nullop,		/* dcache_wb_range	*/
433 
434 	arm9_cache_flushID,		/* idcache_wbinv_all	*/
435 	arm9_cache_flushID_rng,		/* idcache_wbinv_range	*/
436 
437 	/* Other functions */
438 
439 	cpufunc_nullop,			/* flush_prefetchbuf	*/
440 	armv4_drain_writebuf,		/* drain_writebuf	*/
441 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
442 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
443 
444 	(void *)cpufunc_nullop,		/* sleep		*/
445 
446 	/* Soft functions */
447 
448 	cpufunc_null_fixup,		/* dataabt_fixup	*/
449 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
450 
451 	arm9_context_switch,		/* context_switch	*/
452 
453 	arm9_setup			/* cpu setup		*/
454 
455 };
456 #endif /* CPU_ARM9 */
457 
458 #ifdef CPU_SA110
459 struct cpu_functions sa110_cpufuncs = {
460 	/* CPU functions */
461 
462 	cpufunc_id,			/* id			*/
463 	cpufunc_nullop,			/* cpwait		*/
464 
465 	/* MMU functions */
466 
467 	cpufunc_control,		/* control		*/
468 	cpufunc_domains,		/* domain		*/
469 	sa1_setttb,			/* setttb		*/
470 	cpufunc_faultstatus,		/* faultstatus		*/
471 	cpufunc_faultaddress,		/* faultaddress		*/
472 
473 	/* TLB functions */
474 
475 	armv4_tlb_flushID,		/* tlb_flushID		*/
476 	sa1_tlb_flushID_SE,		/* tlb_flushID_SE	*/
477 	armv4_tlb_flushI,		/* tlb_flushI		*/
478 	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
479 	armv4_tlb_flushD,		/* tlb_flushD		*/
480 	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
481 
482 	/* Cache operations */
483 
484 	sa1_cache_syncI,		/* icache_sync_all	*/
485 	sa1_cache_syncI_rng,		/* icache_sync_range	*/
486 
487 	sa1_cache_purgeD,		/* dcache_wbinv_all	*/
488 	sa1_cache_purgeD_rng,		/* dcache_wbinv_range	*/
489 /*XXX*/	sa1_cache_purgeD_rng,		/* dcache_inv_range	*/
490 	sa1_cache_cleanD_rng,		/* dcache_wb_range	*/
491 
492 	sa1_cache_purgeID,		/* idcache_wbinv_all	*/
493 	sa1_cache_purgeID_rng,		/* idcache_wbinv_range	*/
494 
495 	/* Other functions */
496 
497 	cpufunc_nullop,			/* flush_prefetchbuf	*/
498 	armv4_drain_writebuf,		/* drain_writebuf	*/
499 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
500 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
501 
502 	(void *)cpufunc_nullop,		/* sleep		*/
503 
504 	/* Soft functions */
505 
506 	cpufunc_null_fixup,		/* dataabt_fixup	*/
507 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
508 
509 	sa110_context_switch,		/* context_switch	*/
510 
511 	sa110_setup			/* cpu setup		*/
512 };
513 #endif	/* CPU_SA110 */
514 
515 #if defined(CPU_SA1100) || defined(CPU_SA1110)
516 struct cpu_functions sa11x0_cpufuncs = {
517 	/* CPU functions */
518 
519 	cpufunc_id,			/* id			*/
520 	cpufunc_nullop,			/* cpwait		*/
521 
522 	/* MMU functions */
523 
524 	cpufunc_control,		/* control		*/
525 	cpufunc_domains,		/* domain		*/
526 	sa1_setttb,			/* setttb		*/
527 	cpufunc_faultstatus,		/* faultstatus		*/
528 	cpufunc_faultaddress,		/* faultaddress		*/
529 
530 	/* TLB functions */
531 
532 	armv4_tlb_flushID,		/* tlb_flushID		*/
533 	sa1_tlb_flushID_SE,		/* tlb_flushID_SE	*/
534 	armv4_tlb_flushI,		/* tlb_flushI		*/
535 	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
536 	armv4_tlb_flushD,		/* tlb_flushD		*/
537 	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
538 
539 	/* Cache operations */
540 
541 	sa1_cache_syncI,		/* icache_sync_all	*/
542 	sa1_cache_syncI_rng,		/* icache_sync_range	*/
543 
544 	sa1_cache_purgeD,		/* dcache_wbinv_all	*/
545 	sa1_cache_purgeD_rng,		/* dcache_wbinv_range	*/
546 /*XXX*/	sa1_cache_purgeD_rng,		/* dcache_inv_range	*/
547 	sa1_cache_cleanD_rng,		/* dcache_wb_range	*/
548 
549 	sa1_cache_purgeID,		/* idcache_wbinv_all	*/
550 	sa1_cache_purgeID_rng,		/* idcache_wbinv_range	*/
551 
552 	/* Other functions */
553 
554 	sa11x0_drain_readbuf,		/* flush_prefetchbuf	*/
555 	armv4_drain_writebuf,		/* drain_writebuf	*/
556 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
557 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
558 
559 	sa11x0_cpu_sleep,		/* sleep		*/
560 
561 	/* Soft functions */
562 
563 	cpufunc_null_fixup,		/* dataabt_fixup	*/
564 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
565 
566 	sa11x0_context_switch,		/* context_switch	*/
567 
568 	sa11x0_setup			/* cpu setup		*/
569 };
570 #endif	/* CPU_SA1100 || CPU_SA1110 */
571 
572 #ifdef CPU_IXP12X0
573 struct cpu_functions ixp12x0_cpufuncs = {
574 	/* CPU functions */
575 
576 	cpufunc_id,			/* id			*/
577 	cpufunc_nullop,			/* cpwait		*/
578 
579 	/* MMU functions */
580 
581 	cpufunc_control,		/* control		*/
582 	cpufunc_domains,		/* domain		*/
583 	sa1_setttb,			/* setttb		*/
584 	cpufunc_faultstatus,		/* faultstatus		*/
585 	cpufunc_faultaddress,		/* faultaddress		*/
586 
587 	/* TLB functions */
588 
589 	armv4_tlb_flushID,		/* tlb_flushID		*/
590 	sa1_tlb_flushID_SE,		/* tlb_flushID_SE	*/
591 	armv4_tlb_flushI,		/* tlb_flushI		*/
592 	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
593 	armv4_tlb_flushD,		/* tlb_flushD		*/
594 	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
595 
596 	/* Cache operations */
597 
598 	sa1_cache_syncI,		/* icache_sync_all	*/
599 	sa1_cache_syncI_rng,		/* icache_sync_range	*/
600 
601 	sa1_cache_purgeD,		/* dcache_wbinv_all	*/
602 	sa1_cache_purgeD_rng,		/* dcache_wbinv_range	*/
603 /*XXX*/	sa1_cache_purgeD_rng,		/* dcache_inv_range	*/
604 	sa1_cache_cleanD_rng,		/* dcache_wb_range	*/
605 
606 	sa1_cache_purgeID,		/* idcache_wbinv_all	*/
607 	sa1_cache_purgeID_rng,		/* idcache_wbinv_range	*/
608 
609 	/* Other functions */
610 
611 	ixp12x0_drain_readbuf,			/* flush_prefetchbuf	*/
612 	armv4_drain_writebuf,		/* drain_writebuf	*/
613 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
614 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
615 
616 	(void *)cpufunc_nullop,		/* sleep		*/
617 
618 	/* Soft functions */
619 
620 	cpufunc_null_fixup,		/* dataabt_fixup	*/
621 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
622 
623 	ixp12x0_context_switch,		/* context_switch	*/
624 
625 	ixp12x0_setup			/* cpu setup		*/
626 };
627 #endif	/* CPU_IXP12X0 */
628 
629 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
630     defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425)
631 struct cpu_functions xscale_cpufuncs = {
632 	/* CPU functions */
633 
634 	cpufunc_id,			/* id			*/
635 	xscale_cpwait,			/* cpwait		*/
636 
637 	/* MMU functions */
638 
639 	xscale_control,			/* control		*/
640 	cpufunc_domains,		/* domain		*/
641 	xscale_setttb,			/* setttb		*/
642 	cpufunc_faultstatus,		/* faultstatus		*/
643 	cpufunc_faultaddress,		/* faultaddress		*/
644 
645 	/* TLB functions */
646 
647 	armv4_tlb_flushID,		/* tlb_flushID		*/
648 	xscale_tlb_flushID_SE,		/* tlb_flushID_SE	*/
649 	armv4_tlb_flushI,		/* tlb_flushI		*/
650 	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
651 	armv4_tlb_flushD,		/* tlb_flushD		*/
652 	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
653 
654 	/* Cache operations */
655 
656 	xscale_cache_syncI,		/* icache_sync_all	*/
657 	xscale_cache_syncI_rng,		/* icache_sync_range	*/
658 
659 	xscale_cache_purgeD,		/* dcache_wbinv_all	*/
660 	xscale_cache_purgeD_rng,	/* dcache_wbinv_range	*/
661 	xscale_cache_flushD_rng,	/* dcache_inv_range	*/
662 	xscale_cache_cleanD_rng,	/* dcache_wb_range	*/
663 
664 	xscale_cache_purgeID,		/* idcache_wbinv_all	*/
665 	xscale_cache_purgeID_rng,	/* idcache_wbinv_range	*/
666 
667 	/* Other functions */
668 
669 	cpufunc_nullop,			/* flush_prefetchbuf	*/
670 	armv4_drain_writebuf,		/* drain_writebuf	*/
671 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
672 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
673 
674 	xscale_cpu_sleep,		/* sleep		*/
675 
676 	/* Soft functions */
677 
678 	cpufunc_null_fixup,		/* dataabt_fixup	*/
679 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
680 
681 	xscale_context_switch,		/* context_switch	*/
682 
683 	xscale_setup			/* cpu setup		*/
684 };
685 #endif
686 /* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425 */
687 
688 /*
689  * Global constants also used by locore.s
690  */
691 
692 struct cpu_functions cpufuncs;
693 u_int cputype;
694 u_int cpu_reset_needs_v4_MMU_disable;	/* flag used in locore.s */
695 
696 #if defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined(CPU_ARM9) || \
697     defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
698     defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425)
699 static void get_cachetype_cp15 __P((void));
700 
701 static void
702 get_cachetype_cp15()
703 {
704 	u_int ctype, isize, dsize;
705 	u_int multiplier;
706 
707 	__asm __volatile("mrc p15, 0, %0, c0, c0, 1"
708 		: "=r" (ctype));
709 
710 	/*
711 	 * ...and thus spake the ARM ARM:
712 	 *
713 	 * If an <opcode2> value corresponding to an unimplemented or
714 	 * reserved ID register is encountered, the System Control
715 	 * processor returns the value of the main ID register.
716 	 */
717 	if (ctype == cpufunc_id())
718 		goto out;
719 
720 	if ((ctype & CPU_CT_S) == 0)
721 		arm_pcache_unified = 1;
722 
723 	/*
724 	 * If you want to know how this code works, go read the ARM ARM.
725 	 */
726 
727 	arm_pcache_type = CPU_CT_CTYPE(ctype);
728 
729 	if (arm_pcache_unified == 0) {
730 		isize = CPU_CT_ISIZE(ctype);
731 		multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
732 		arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
733 		if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
734 			if (isize & CPU_CT_xSIZE_M)
735 				arm_picache_line_size = 0; /* not present */
736 			else
737 				arm_picache_ways = 1;
738 		} else {
739 			arm_picache_ways = multiplier <<
740 			    (CPU_CT_xSIZE_ASSOC(isize) - 1);
741 		}
742 		arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
743 	}
744 
745 	dsize = CPU_CT_DSIZE(ctype);
746 	multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
747 	arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
748 	if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
749 		if (dsize & CPU_CT_xSIZE_M)
750 			arm_pdcache_line_size = 0; /* not present */
751 		else
752 			arm_pdcache_ways = 0;
753 	} else {
754 		arm_pdcache_ways = multiplier <<
755 		    (CPU_CT_xSIZE_ASSOC(dsize) - 1);
756 	}
757 	arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
758 
759 	arm_dcache_align = arm_pdcache_line_size;
760 
761  out:
762 	arm_dcache_align_mask = arm_dcache_align - 1;
763 }
764 #endif /* ARM7TDMI || ARM8 || ARM9 || XSCALE */
765 
766 #if defined(CPU_ARM2) || defined(CPU_ARM250) || defined(CPU_ARM3) || \
767     defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_SA110) || \
768     defined(CPU_SA1100) || defined(CPU_SA1110) || defined(CPU_IXP12X0)
769 /* Cache information for CPUs without cache type registers. */
770 struct cachetab {
771 	u_int32_t ct_cpuid;
772 	int	ct_pcache_type;
773 	int	ct_pcache_unified;
774 	int	ct_pdcache_size;
775 	int	ct_pdcache_line_size;
776 	int	ct_pdcache_ways;
777 	int	ct_picache_size;
778 	int	ct_picache_line_size;
779 	int	ct_picache_ways;
780 };
781 
782 struct cachetab cachetab[] = {
783     /* cpuid,           cache type,       u,  dsiz, ls, wy,  isiz, ls, wy */
784     { CPU_ID_ARM2,      0,                1,     0,  0,  0,     0,  0,  0 },
785     { CPU_ID_ARM250,    0,                1,     0,  0,  0,     0,  0,  0 },
786     { CPU_ID_ARM3,      CPU_CT_CTYPE_WT,  1,  4096, 16, 64,     0,  0,  0 },
787     { CPU_ID_ARM610,	CPU_CT_CTYPE_WT,  1,  4096, 16, 64,     0,  0,  0 },
788     { CPU_ID_ARM710,    CPU_CT_CTYPE_WT,  1,  8192, 32,  4,     0,  0,  0 },
789     { CPU_ID_ARM7500,   CPU_CT_CTYPE_WT,  1,  4096, 16,  4,     0,  0,  0 },
790     { CPU_ID_ARM710A,   CPU_CT_CTYPE_WT,  1,  8192, 16,  4,     0,  0,  0 },
791     { CPU_ID_ARM7500FE, CPU_CT_CTYPE_WT,  1,  4096, 16,  4,     0,  0,  0 },
792     /* XXX is this type right for SA-1? */
793     { CPU_ID_SA110,	CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 },
794     { CPU_ID_SA1100,	CPU_CT_CTYPE_WB1, 0,  8192, 32, 32, 16384, 32, 32 },
795     { CPU_ID_SA1110,	CPU_CT_CTYPE_WB1, 0,  8192, 32, 32, 16384, 32, 32 },
796     { CPU_ID_IXP1200,	CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 }, /* XXX */
797     { 0, 0, 0, 0, 0, 0, 0, 0}
798 };
799 
800 static void get_cachetype_table __P((void));
801 
802 static void
803 get_cachetype_table()
804 {
805 	int i;
806 	u_int32_t cpuid = cpufunc_id();
807 
808 	for (i = 0; cachetab[i].ct_cpuid != 0; i++) {
809 		if (cachetab[i].ct_cpuid == (cpuid & CPU_ID_CPU_MASK)) {
810 			arm_pcache_type = cachetab[i].ct_pcache_type;
811 			arm_pcache_unified = cachetab[i].ct_pcache_unified;
812 			arm_pdcache_size = cachetab[i].ct_pdcache_size;
813 			arm_pdcache_line_size =
814 			    cachetab[i].ct_pdcache_line_size;
815 			arm_pdcache_ways = cachetab[i].ct_pdcache_ways;
816 			arm_picache_size = cachetab[i].ct_picache_size;
817 			arm_picache_line_size =
818 			    cachetab[i].ct_picache_line_size;
819 			arm_picache_ways = cachetab[i].ct_picache_ways;
820 		}
821 	}
822 	arm_dcache_align = arm_pdcache_line_size;
823 
824 	arm_dcache_align_mask = arm_dcache_align - 1;
825 }
826 
827 #endif /* ARM2 || ARM250 || ARM3 || ARM6 || ARM7 || SA110 || SA1100 || SA1111 || IXP12X0 */
828 
829 /*
830  * Cannot panic here as we may not have a console yet ...
831  */
832 
833 int
834 set_cpufuncs()
835 {
836 	cputype = cpufunc_id();
837 	cputype &= CPU_ID_CPU_MASK;
838 
839 	/*
840 	 * NOTE: cpu_do_powersave defaults to off.  If we encounter a
841 	 * CPU type where we want to use it by default, then we set it.
842 	 */
843 
844 #ifdef CPU_ARM3
845 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
846 	    (cputype & 0x00000f00) == 0x00000300) {
847 		cpufuncs = arm3_cpufuncs;
848 		cpu_reset_needs_v4_MMU_disable = 0;
849 		get_cachetype_table();
850 		return 0;
851 	}
852 #endif	/* CPU_ARM3 */
853 #ifdef CPU_ARM6
854 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
855 	    (cputype & 0x00000f00) == 0x00000600) {
856 		cpufuncs = arm6_cpufuncs;
857 		cpu_reset_needs_v4_MMU_disable = 0;
858 		get_cachetype_table();
859 		pmap_pte_init_generic();
860 		return 0;
861 	}
862 #endif	/* CPU_ARM6 */
863 #ifdef CPU_ARM7
864 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
865 	    CPU_ID_IS7(cputype) &&
866 	    (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V3) {
867 		cpufuncs = arm7_cpufuncs;
868 		cpu_reset_needs_v4_MMU_disable = 0;
869 		get_cachetype_table();
870 		pmap_pte_init_generic();
871 		return 0;
872 	}
873 #endif	/* CPU_ARM7 */
874 #ifdef CPU_ARM7TDMI
875 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
876 	    CPU_ID_IS7(cputype) &&
877 	    (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V4T) {
878 		cpufuncs = arm7tdmi_cpufuncs;
879 		cpu_reset_needs_v4_MMU_disable = 0;
880 		get_cachetype_cp15();
881 		pmap_pte_init_generic();
882 		return 0;
883 	}
884 #endif
885 #ifdef CPU_ARM8
886 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
887 	    (cputype & 0x0000f000) == 0x00008000) {
888 		cpufuncs = arm8_cpufuncs;
889 		cpu_reset_needs_v4_MMU_disable = 0;	/* XXX correct? */
890 		get_cachetype_cp15();
891 		pmap_pte_init_arm8();
892 		return 0;
893 	}
894 #endif	/* CPU_ARM8 */
895 #ifdef CPU_ARM9
896 	if (cputype == CPU_ID_ARM920T) {
897 		cpufuncs = arm9_cpufuncs;
898 		cpu_reset_needs_v4_MMU_disable = 1;	/* V4 or higher */
899 		get_cachetype_cp15();
900 		pmap_pte_init_arm9();
901 		return 0;
902 	}
903 #endif /* CPU_ARM9 */
904 #ifdef CPU_SA110
905 	if (cputype == CPU_ID_SA110) {
906 		cpufuncs = sa110_cpufuncs;
907 		cpu_reset_needs_v4_MMU_disable = 1;	/* SA needs it */
908 		get_cachetype_table();
909 		pmap_pte_init_sa1();
910 		return 0;
911 	}
912 #endif	/* CPU_SA110 */
913 #ifdef CPU_SA1100
914 	if (cputype == CPU_ID_SA1100) {
915 		cpufuncs = sa11x0_cpufuncs;
916 		cpu_reset_needs_v4_MMU_disable = 1;	/* SA needs it	*/
917 		get_cachetype_table();
918 		pmap_pte_init_sa1();
919 
920 		/* Use powersave on this CPU. */
921 		cpu_do_powersave = 1;
922 
923 		return 0;
924 	}
925 #endif	/* CPU_SA1100 */
926 #ifdef CPU_SA1110
927 	if (cputype == CPU_ID_SA1110) {
928 		cpufuncs = sa11x0_cpufuncs;
929 		cpu_reset_needs_v4_MMU_disable = 1;	/* SA needs it	*/
930 		get_cachetype_table();
931 		pmap_pte_init_sa1();
932 
933 		/* Use powersave on this CPU. */
934 		cpu_do_powersave = 1;
935 
936 		return 0;
937 	}
938 #endif	/* CPU_SA1110 */
939 #ifdef CPU_IXP12X0
940         if (cputype == CPU_ID_IXP1200) {
941                 cpufuncs = ixp12x0_cpufuncs;
942                 cpu_reset_needs_v4_MMU_disable = 1;
943                 get_cachetype_table();
944                 pmap_pte_init_sa1();
945                 return 0;
946         }
947 #endif  /* CPU_IXP12X0 */
948 #ifdef CPU_XSCALE_80200
949 	if (cputype == CPU_ID_80200) {
950 		int rev = cpufunc_id() & CPU_ID_REVISION_MASK;
951 
952 		i80200_icu_init();
953 
954 		/*
955 		 * Reset the Performance Monitoring Unit to a
956 		 * pristine state:
957 		 *	- CCNT, PMN0, PMN1 reset to 0
958 		 *	- overflow indications cleared
959 		 *	- all counters disabled
960 		 */
961 		__asm __volatile("mcr p14, 0, %0, c0, c0, 0"
962 			:
963 			: "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF|
964 			       PMNC_CC_IF));
965 
966 #if defined(XSCALE_CCLKCFG)
967 		/*
968 		 * Crank CCLKCFG to maximum legal value.
969 		 */
970 		__asm __volatile ("mcr p14, 0, %0, c6, c0, 0"
971 			:
972 			: "r" (XSCALE_CCLKCFG));
973 #endif
974 
975 		/*
976 		 * XXX Disable ECC in the Bus Controller Unit; we
977 		 * don't really support it, yet.  Clear any pending
978 		 * error indications.
979 		 */
980 		__asm __volatile("mcr p13, 0, %0, c0, c1, 0"
981 			:
982 			: "r" (BCUCTL_E0|BCUCTL_E1|BCUCTL_EV));
983 
984 		cpufuncs = xscale_cpufuncs;
985 #if defined(PERFCTRS)
986 		xscale_pmu_init();
987 #endif
988 
989 		/*
990 		 * i80200 errata: Step-A0 and A1 have a bug where
991 		 * D$ dirty bits are not cleared on "invalidate by
992 		 * address".
993 		 *
994 		 * Workaround: Clean cache line before invalidating.
995 		 */
996 		if (rev == 0 || rev == 1)
997 			cpufuncs.cf_dcache_inv_range = xscale_cache_purgeD_rng;
998 
999 		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1000 		get_cachetype_cp15();
1001 		pmap_pte_init_xscale();
1002 		return 0;
1003 	}
1004 #endif /* CPU_XSCALE_80200 */
1005 #ifdef CPU_XSCALE_80321
1006 	if (cputype == CPU_ID_80321_400 || cputype == CPU_ID_80321_600 ||
1007 	    cputype == CPU_ID_80321_400_B0 || cputype == CPU_ID_80321_600_B0) {
1008 		i80321_icu_init();
1009 
1010 		/*
1011 		 * Reset the Performance Monitoring Unit to a
1012 		 * pristine state:
1013 		 *	- CCNT, PMN0, PMN1 reset to 0
1014 		 *	- overflow indications cleared
1015 		 *	- all counters disabled
1016 		 */
1017 		__asm __volatile("mcr p14, 0, %0, c0, c0, 0"
1018 			:
1019 			: "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF|
1020 			       PMNC_CC_IF));
1021 
1022 		cpufuncs = xscale_cpufuncs;
1023 #if defined(PERFCTRS)
1024 		xscale_pmu_init();
1025 #endif
1026 
1027 		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1028 		get_cachetype_cp15();
1029 		pmap_pte_init_xscale();
1030 		return 0;
1031 	}
1032 #endif /* CPU_XSCALE_80321 */
1033 #ifdef CPU_XSCALE_PXA2X0
1034 	/* ignore core revision to test PXA2xx CPUs */
1035 	if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 ||
1036 	    (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) {
1037 
1038 		cpufuncs = xscale_cpufuncs;
1039 #if defined(PERFCTRS)
1040 		xscale_pmu_init();
1041 #endif
1042 
1043 		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1044 		get_cachetype_cp15();
1045 		pmap_pte_init_xscale();
1046 
1047 		/* Use powersave on this CPU. */
1048 		cpu_do_powersave = 1;
1049 
1050 		return 0;
1051 	}
1052 #endif /* CPU_XSCALE_PXA2X0 */
1053 #ifdef CPU_XSCALE_IXP425
1054 	if (cputype == CPU_ID_IXP425_533 || cputype == CPU_ID_IXP425_400 ||
1055             cputype == CPU_ID_IXP425_266) {
1056 		ixp425_icu_init();
1057 
1058 		cpufuncs = xscale_cpufuncs;
1059 #if defined(PERFCTRS)
1060 		xscale_pmu_init();
1061 #endif
1062 
1063 		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1064 		get_cachetype_cp15();
1065 		pmap_pte_init_xscale();
1066 
1067 		return 0;
1068 	}
1069 #endif /* CPU_XSCALE_IXP425 */
1070 	/*
1071 	 * Bzzzz. And the answer was ...
1072 	 */
1073 	panic("No support for this CPU type (%08x) in kernel", cputype);
1074 	return(ARCHITECTURE_NOT_PRESENT);
1075 }
1076 
1077 /*
1078  * Fixup routines for data and prefetch aborts.
1079  *
1080  * Several compile time symbols are used
1081  *
1082  * DEBUG_FAULT_CORRECTION - Print debugging information during the
1083  * correction of registers after a fault.
1084  * ARM6_LATE_ABORT - ARM6 supports both early and late aborts
1085  * when defined should use late aborts
1086  */
1087 
1088 
1089 /*
1090  * Null abort fixup routine.
1091  * For use when no fixup is required.
1092  */
1093 int
1094 cpufunc_null_fixup(arg)
1095 	void *arg;
1096 {
1097 	return(ABORT_FIXUP_OK);
1098 }
1099 
1100 
1101 #if defined(CPU_ARM2) || defined(CPU_ARM250) || defined(CPU_ARM3) || \
1102     defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI)
1103 
1104 #ifdef DEBUG_FAULT_CORRECTION
1105 #define DFC_PRINTF(x)		printf x
1106 #define DFC_DISASSEMBLE(x)	disassemble(x)
1107 #else
1108 #define DFC_PRINTF(x)		/* nothing */
1109 #define DFC_DISASSEMBLE(x)	/* nothing */
1110 #endif
1111 
1112 /*
1113  * "Early" data abort fixup.
1114  *
1115  * For ARM2, ARM2as, ARM3 and ARM6 (in early-abort mode).  Also used
1116  * indirectly by ARM6 (in late-abort mode) and ARM7[TDMI].
1117  *
1118  * In early aborts, we may have to fix up LDM, STM, LDC and STC.
1119  */
1120 int
1121 early_abort_fixup(arg)
1122 	void *arg;
1123 {
1124 	trapframe_t *frame = arg;
1125 	u_int fault_pc;
1126 	u_int fault_instruction;
1127 	int saved_lr = 0;
1128 
1129 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1130 
1131 		/* Ok an abort in SVC mode */
1132 
1133 		/*
1134 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1135 		 * as the fault happened in svc mode but we need it in the
1136 		 * usr slot so we can treat the registers as an array of ints
1137 		 * during fixing.
1138 		 * NOTE: This PC is in the position but writeback is not
1139 		 * allowed on r15.
1140 		 * Doing it like this is more efficient than trapping this
1141 		 * case in all possible locations in the following fixup code.
1142 		 */
1143 
1144 		saved_lr = frame->tf_usr_lr;
1145 		frame->tf_usr_lr = frame->tf_svc_lr;
1146 
1147 		/*
1148 		 * Note the trapframe does not have the SVC r13 so a fault
1149 		 * from an instruction with writeback to r13 in SVC mode is
1150 		 * not allowed. This should not happen as the kstack is
1151 		 * always valid.
1152 		 */
1153 	}
1154 
1155 	/* Get fault address and status from the CPU */
1156 
1157 	fault_pc = frame->tf_pc;
1158 	fault_instruction = *((volatile unsigned int *)fault_pc);
1159 
1160 	/* Decode the fault instruction and fix the registers as needed */
1161 
1162 	if ((fault_instruction & 0x0e000000) == 0x08000000) {
1163 		int base;
1164 		int loop;
1165 		int count;
1166 		int *registers = &frame->tf_r0;
1167 
1168 		DFC_PRINTF(("LDM/STM\n"));
1169 		DFC_DISASSEMBLE(fault_pc);
1170 		if (fault_instruction & (1 << 21)) {
1171 			DFC_PRINTF(("This instruction must be corrected\n"));
1172 			base = (fault_instruction >> 16) & 0x0f;
1173 			if (base == 15)
1174 				return ABORT_FIXUP_FAILED;
1175 			/* Count registers transferred */
1176 			count = 0;
1177 			for (loop = 0; loop < 16; ++loop) {
1178 				if (fault_instruction & (1<<loop))
1179 					++count;
1180 			}
1181 			DFC_PRINTF(("%d registers used\n", count));
1182 			DFC_PRINTF(("Corrected r%d by %d bytes ",
1183 				       base, count * 4));
1184 			if (fault_instruction & (1 << 23)) {
1185 				DFC_PRINTF(("down\n"));
1186 				registers[base] -= count * 4;
1187 			} else {
1188 				DFC_PRINTF(("up\n"));
1189 				registers[base] += count * 4;
1190 			}
1191 		}
1192 	} else if ((fault_instruction & 0x0e000000) == 0x0c000000) {
1193 		int base;
1194 		int offset;
1195 		int *registers = &frame->tf_r0;
1196 
1197 		/* REGISTER CORRECTION IS REQUIRED FOR THESE INSTRUCTIONS */
1198 
1199 		DFC_DISASSEMBLE(fault_pc);
1200 
1201 		/* Only need to fix registers if write back is turned on */
1202 
1203 		if ((fault_instruction & (1 << 21)) != 0) {
1204 			base = (fault_instruction >> 16) & 0x0f;
1205 			if (base == 13 &&
1206 			    (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
1207 				return ABORT_FIXUP_FAILED;
1208 			if (base == 15)
1209 				return ABORT_FIXUP_FAILED;
1210 
1211 			offset = (fault_instruction & 0xff) << 2;
1212 			DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1213 			if ((fault_instruction & (1 << 23)) != 0)
1214 				offset = -offset;
1215 			registers[base] += offset;
1216 			DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1217 		}
1218 	} else if ((fault_instruction & 0x0e000000) == 0x0c000000)
1219 		return ABORT_FIXUP_FAILED;
1220 
1221 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1222 
1223 		/* Ok an abort in SVC mode */
1224 
1225 		/*
1226 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1227 		 * as the fault happened in svc mode but we need it in the
1228 		 * usr slot so we can treat the registers as an array of ints
1229 		 * during fixing.
1230 		 * NOTE: This PC is in the position but writeback is not
1231 		 * allowed on r15.
1232 		 * Doing it like this is more efficient than trapping this
1233 		 * case in all possible locations in the prior fixup code.
1234 		 */
1235 
1236 		frame->tf_svc_lr = frame->tf_usr_lr;
1237 		frame->tf_usr_lr = saved_lr;
1238 
1239 		/*
1240 		 * Note the trapframe does not have the SVC r13 so a fault
1241 		 * from an instruction with writeback to r13 in SVC mode is
1242 		 * not allowed. This should not happen as the kstack is
1243 		 * always valid.
1244 		 */
1245 	}
1246 
1247 	return(ABORT_FIXUP_OK);
1248 }
1249 #endif	/* CPU_ARM2/250/3/6/7 */
1250 
1251 
1252 #if (defined(CPU_ARM6) && defined(ARM6_LATE_ABORT)) || defined(CPU_ARM7) || \
1253 	defined(CPU_ARM7TDMI)
1254 /*
1255  * "Late" (base updated) data abort fixup
1256  *
1257  * For ARM6 (in late-abort mode) and ARM7.
1258  *
1259  * In this model, all data-transfer instructions need fixing up.  We defer
1260  * LDM, STM, LDC and STC fixup to the early-abort handler.
1261  */
1262 int
1263 late_abort_fixup(arg)
1264 	void *arg;
1265 {
1266 	trapframe_t *frame = arg;
1267 	u_int fault_pc;
1268 	u_int fault_instruction;
1269 	int saved_lr = 0;
1270 
1271 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1272 
1273 		/* Ok an abort in SVC mode */
1274 
1275 		/*
1276 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1277 		 * as the fault happened in svc mode but we need it in the
1278 		 * usr slot so we can treat the registers as an array of ints
1279 		 * during fixing.
1280 		 * NOTE: This PC is in the position but writeback is not
1281 		 * allowed on r15.
1282 		 * Doing it like this is more efficient than trapping this
1283 		 * case in all possible locations in the following fixup code.
1284 		 */
1285 
1286 		saved_lr = frame->tf_usr_lr;
1287 		frame->tf_usr_lr = frame->tf_svc_lr;
1288 
1289 		/*
1290 		 * Note the trapframe does not have the SVC r13 so a fault
1291 		 * from an instruction with writeback to r13 in SVC mode is
1292 		 * not allowed. This should not happen as the kstack is
1293 		 * always valid.
1294 		 */
1295 	}
1296 
1297 	/* Get fault address and status from the CPU */
1298 
1299 	fault_pc = frame->tf_pc;
1300 	fault_instruction = *((volatile unsigned int *)fault_pc);
1301 
1302 	/* Decode the fault instruction and fix the registers as needed */
1303 
1304 	/* Was is a swap instruction ? */
1305 
1306 	if ((fault_instruction & 0x0fb00ff0) == 0x01000090) {
1307 		DFC_DISASSEMBLE(fault_pc);
1308 	} else if ((fault_instruction & 0x0c000000) == 0x04000000) {
1309 
1310 		/* Was is a ldr/str instruction */
1311 		/* This is for late abort only */
1312 
1313 		int base;
1314 		int offset;
1315 		int *registers = &frame->tf_r0;
1316 
1317 		DFC_DISASSEMBLE(fault_pc);
1318 
1319 		/* This is for late abort only */
1320 
1321 		if ((fault_instruction & (1 << 24)) == 0
1322 		    || (fault_instruction & (1 << 21)) != 0) {
1323 			/* postindexed ldr/str with no writeback */
1324 
1325 			base = (fault_instruction >> 16) & 0x0f;
1326 			if (base == 13 &&
1327 			    (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
1328 				return ABORT_FIXUP_FAILED;
1329 			if (base == 15)
1330 				return ABORT_FIXUP_FAILED;
1331 			DFC_PRINTF(("late abt fix: r%d=%08x : ",
1332 				       base, registers[base]));
1333 			if ((fault_instruction & (1 << 25)) == 0) {
1334 				/* Immediate offset - easy */
1335 
1336 				offset = fault_instruction & 0xfff;
1337 				if ((fault_instruction & (1 << 23)))
1338 					offset = -offset;
1339 				registers[base] += offset;
1340 				DFC_PRINTF(("imm=%08x ", offset));
1341 			} else {
1342 				/* offset is a shifted register */
1343 				int shift;
1344 
1345 				offset = fault_instruction & 0x0f;
1346 				if (offset == base)
1347 					return ABORT_FIXUP_FAILED;
1348 
1349 				/*
1350 				 * Register offset - hard we have to
1351 				 * cope with shifts !
1352 				 */
1353 				offset = registers[offset];
1354 
1355 				if ((fault_instruction & (1 << 4)) == 0)
1356 					/* shift with amount */
1357 					shift = (fault_instruction >> 7) & 0x1f;
1358 				else {
1359 					/* shift with register */
1360 					if ((fault_instruction & (1 << 7)) != 0)
1361 						/* undefined for now so bail out */
1362 						return ABORT_FIXUP_FAILED;
1363 					shift = ((fault_instruction >> 8) & 0xf);
1364 					if (base == shift)
1365 						return ABORT_FIXUP_FAILED;
1366 					DFC_PRINTF(("shift reg=%d ", shift));
1367 					shift = registers[shift];
1368 				}
1369 				DFC_PRINTF(("shift=%08x ", shift));
1370 				switch (((fault_instruction >> 5) & 0x3)) {
1371 				case 0 : /* Logical left */
1372 					offset = (int)(((u_int)offset) << shift);
1373 					break;
1374 				case 1 : /* Logical Right */
1375 					if (shift == 0) shift = 32;
1376 					offset = (int)(((u_int)offset) >> shift);
1377 					break;
1378 				case 2 : /* Arithmetic Right */
1379 					if (shift == 0) shift = 32;
1380 					offset = (int)(((int)offset) >> shift);
1381 					break;
1382 				case 3 : /* Rotate right (rol or rxx) */
1383 					return ABORT_FIXUP_FAILED;
1384 					break;
1385 				}
1386 
1387 				DFC_PRINTF(("abt: fixed LDR/STR with "
1388 					       "register offset\n"));
1389 				if ((fault_instruction & (1 << 23)))
1390 					offset = -offset;
1391 				DFC_PRINTF(("offset=%08x ", offset));
1392 				registers[base] += offset;
1393 			}
1394 			DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1395 		}
1396 	}
1397 
1398 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1399 
1400 		/* Ok an abort in SVC mode */
1401 
1402 		/*
1403 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1404 		 * as the fault happened in svc mode but we need it in the
1405 		 * usr slot so we can treat the registers as an array of ints
1406 		 * during fixing.
1407 		 * NOTE: This PC is in the position but writeback is not
1408 		 * allowed on r15.
1409 		 * Doing it like this is more efficient than trapping this
1410 		 * case in all possible locations in the prior fixup code.
1411 		 */
1412 
1413 		frame->tf_svc_lr = frame->tf_usr_lr;
1414 		frame->tf_usr_lr = saved_lr;
1415 
1416 		/*
1417 		 * Note the trapframe does not have the SVC r13 so a fault
1418 		 * from an instruction with writeback to r13 in SVC mode is
1419 		 * not allowed. This should not happen as the kstack is
1420 		 * always valid.
1421 		 */
1422 	}
1423 
1424 	/*
1425 	 * Now let the early-abort fixup routine have a go, in case it
1426 	 * was an LDM, STM, LDC or STC that faulted.
1427 	 */
1428 
1429 	return early_abort_fixup(arg);
1430 }
1431 #endif	/* CPU_ARM6(LATE)/7/7TDMI */
1432 
1433 /*
1434  * CPU Setup code
1435  */
1436 
1437 #if defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) || \
1438 	defined(CPU_ARM8) || defined (CPU_ARM9) || defined(CPU_SA110) || \
1439 	defined(CPU_SA1100) || defined(CPU_SA1110) || \
1440 	defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
1441 	defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425)
1442 
1443 #define IGN	0
1444 #define OR	1
1445 #define BIC	2
1446 
1447 struct cpu_option {
1448 	char	*co_name;
1449 	int	co_falseop;
1450 	int	co_trueop;
1451 	int	co_value;
1452 };
1453 
1454 static u_int parse_cpu_options __P((char *, struct cpu_option *, u_int));
1455 
1456 static u_int
1457 parse_cpu_options(args, optlist, cpuctrl)
1458 	char *args;
1459 	struct cpu_option *optlist;
1460 	u_int cpuctrl;
1461 {
1462 	int integer;
1463 
1464 	if (args == NULL)
1465 		return(cpuctrl);
1466 
1467 	while (optlist->co_name) {
1468 		if (get_bootconf_option(args, optlist->co_name,
1469 		    BOOTOPT_TYPE_BOOLEAN, &integer)) {
1470 			if (integer) {
1471 				if (optlist->co_trueop == OR)
1472 					cpuctrl |= optlist->co_value;
1473 				else if (optlist->co_trueop == BIC)
1474 					cpuctrl &= ~optlist->co_value;
1475 			} else {
1476 				if (optlist->co_falseop == OR)
1477 					cpuctrl |= optlist->co_value;
1478 				else if (optlist->co_falseop == BIC)
1479 					cpuctrl &= ~optlist->co_value;
1480 			}
1481 		}
1482 		++optlist;
1483 	}
1484 	return(cpuctrl);
1485 }
1486 #endif /* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 || CPU_SA110 */
1487 
1488 #if defined (CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) \
1489 	|| defined(CPU_ARM8)
1490 struct cpu_option arm678_options[] = {
1491 #ifdef COMPAT_12
1492 	{ "nocache",		IGN, BIC, CPU_CONTROL_IDC_ENABLE },
1493 	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
1494 #endif	/* COMPAT_12 */
1495 	{ "cpu.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
1496 	{ "cpu.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
1497 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1498 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1499 	{ NULL,			IGN, IGN, 0 }
1500 };
1501 
1502 #endif	/* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 */
1503 
1504 #ifdef CPU_ARM6
1505 struct cpu_option arm6_options[] = {
1506 	{ "arm6.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
1507 	{ "arm6.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
1508 	{ "arm6.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1509 	{ "arm6.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1510 	{ NULL,			IGN, IGN, 0 }
1511 };
1512 
1513 void
1514 arm6_setup(args)
1515 	char *args;
1516 {
1517 	int cpuctrl, cpuctrlmask;
1518 
1519 	/* Set up default control registers bits */
1520 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1521 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1522 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
1523 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1524 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1525 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
1526 		 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BEND_ENABLE
1527 		 | CPU_CONTROL_AFLT_ENABLE;
1528 
1529 #ifdef ARM6_LATE_ABORT
1530 	cpuctrl |= CPU_CONTROL_LABT_ENABLE;
1531 #endif	/* ARM6_LATE_ABORT */
1532 
1533 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
1534 	cpuctrl = parse_cpu_options(args, arm6_options, cpuctrl);
1535 
1536 #ifdef __ARMEB__
1537 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1538 #endif
1539 
1540 	/* Clear out the cache */
1541 	cpu_idcache_wbinv_all();
1542 
1543 	/* Set the control register */
1544 	curcpu()->ci_ctrl = cpuctrl;
1545 	cpu_control(0xffffffff, cpuctrl);
1546 }
1547 #endif	/* CPU_ARM6 */
1548 
1549 #ifdef CPU_ARM7
1550 struct cpu_option arm7_options[] = {
1551 	{ "arm7.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
1552 	{ "arm7.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
1553 	{ "arm7.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1554 	{ "arm7.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1555 #ifdef COMPAT_12
1556 	{ "fpaclk2",		BIC, OR,  CPU_CONTROL_CPCLK },
1557 #endif	/* COMPAT_12 */
1558 	{ "arm700.fpaclk",	BIC, OR,  CPU_CONTROL_CPCLK },
1559 	{ NULL,			IGN, IGN, 0 }
1560 };
1561 
1562 void
1563 arm7_setup(args)
1564 	char *args;
1565 {
1566 	int cpuctrl, cpuctrlmask;
1567 
1568 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1569 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1570 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
1571 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1572 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1573 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
1574 		 | CPU_CONTROL_CPCLK | CPU_CONTROL_LABT_ENABLE
1575 		 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BEND_ENABLE
1576 		 | CPU_CONTROL_AFLT_ENABLE;
1577 
1578 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
1579 	cpuctrl = parse_cpu_options(args, arm7_options, cpuctrl);
1580 
1581 #ifdef __ARMEB__
1582 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1583 #endif
1584 
1585 	/* Clear out the cache */
1586 	cpu_idcache_wbinv_all();
1587 
1588 	/* Set the control register */
1589 	curcpu()->ci_ctrl = cpuctrl;
1590 	cpu_control(0xffffffff, cpuctrl);
1591 }
1592 #endif	/* CPU_ARM7 */
1593 
1594 #ifdef CPU_ARM7TDMI
1595 struct cpu_option arm7tdmi_options[] = {
1596 	{ "arm7.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
1597 	{ "arm7.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
1598 	{ "arm7.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1599 	{ "arm7.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1600 #ifdef COMPAT_12
1601 	{ "fpaclk2",		BIC, OR,  CPU_CONTROL_CPCLK },
1602 #endif	/* COMPAT_12 */
1603 	{ "arm700.fpaclk",	BIC, OR,  CPU_CONTROL_CPCLK },
1604 	{ NULL,			IGN, IGN, 0 }
1605 };
1606 
1607 void
1608 arm7tdmi_setup(args)
1609 	char *args;
1610 {
1611 	int cpuctrl;
1612 
1613 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1614 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1615 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
1616 
1617 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
1618 	cpuctrl = parse_cpu_options(args, arm7tdmi_options, cpuctrl);
1619 
1620 #ifdef __ARMEB__
1621 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1622 #endif
1623 
1624 	/* Clear out the cache */
1625 	cpu_idcache_wbinv_all();
1626 
1627 	/* Set the control register */
1628 	curcpu()->ci_ctrl = cpuctrl;
1629 	cpu_control(0xffffffff, cpuctrl);
1630 }
1631 #endif	/* CPU_ARM7TDMI */
1632 
1633 #ifdef CPU_ARM8
1634 struct cpu_option arm8_options[] = {
1635 	{ "arm8.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
1636 	{ "arm8.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
1637 	{ "arm8.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1638 	{ "arm8.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1639 #ifdef COMPAT_12
1640 	{ "branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1641 #endif	/* COMPAT_12 */
1642 	{ "cpu.branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1643 	{ "arm8.branchpredict",	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1644 	{ NULL,			IGN, IGN, 0 }
1645 };
1646 
1647 void
1648 arm8_setup(args)
1649 	char *args;
1650 {
1651 	int integer;
1652 	int cpuctrl, cpuctrlmask;
1653 	int clocktest;
1654 	int setclock = 0;
1655 
1656 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1657 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1658 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
1659 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1660 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1661 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
1662 		 | CPU_CONTROL_BPRD_ENABLE | CPU_CONTROL_ROM_ENABLE
1663 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE;
1664 
1665 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
1666 	cpuctrl = parse_cpu_options(args, arm8_options, cpuctrl);
1667 
1668 #ifdef __ARMEB__
1669 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1670 #endif
1671 
1672 	/* Get clock configuration */
1673 	clocktest = arm8_clock_config(0, 0) & 0x0f;
1674 
1675 	/* Special ARM8 clock and test configuration */
1676 	if (get_bootconf_option(args, "arm8.clock.reset", BOOTOPT_TYPE_BOOLEAN, &integer)) {
1677 		clocktest = 0;
1678 		setclock = 1;
1679 	}
1680 	if (get_bootconf_option(args, "arm8.clock.dynamic", BOOTOPT_TYPE_BOOLEAN, &integer)) {
1681 		if (integer)
1682 			clocktest |= 0x01;
1683 		else
1684 			clocktest &= ~(0x01);
1685 		setclock = 1;
1686 	}
1687 	if (get_bootconf_option(args, "arm8.clock.sync", BOOTOPT_TYPE_BOOLEAN, &integer)) {
1688 		if (integer)
1689 			clocktest |= 0x02;
1690 		else
1691 			clocktest &= ~(0x02);
1692 		setclock = 1;
1693 	}
1694 	if (get_bootconf_option(args, "arm8.clock.fast", BOOTOPT_TYPE_BININT, &integer)) {
1695 		clocktest = (clocktest & ~0xc0) | (integer & 3) << 2;
1696 		setclock = 1;
1697 	}
1698 	if (get_bootconf_option(args, "arm8.test", BOOTOPT_TYPE_BININT, &integer)) {
1699 		clocktest |= (integer & 7) << 5;
1700 		setclock = 1;
1701 	}
1702 
1703 	/* Clear out the cache */
1704 	cpu_idcache_wbinv_all();
1705 
1706 	/* Set the control register */
1707 	curcpu()->ci_ctrl = cpuctrl;
1708 	cpu_control(0xffffffff, cpuctrl);
1709 
1710 	/* Set the clock/test register */
1711 	if (setclock)
1712 		arm8_clock_config(0x7f, clocktest);
1713 }
1714 #endif	/* CPU_ARM8 */
1715 
1716 #ifdef CPU_ARM9
1717 struct cpu_option arm9_options[] = {
1718 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1719 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1720 	{ "arm9.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1721 	{ "arm9.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
1722 	{ "arm9.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
1723 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1724 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1725 	{ "arm9.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1726 	{ NULL,			IGN, IGN, 0 }
1727 };
1728 
1729 void
1730 arm9_setup(args)
1731 	char *args;
1732 {
1733 	int cpuctrl, cpuctrlmask;
1734 
1735 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1736 	    | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1737 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1738 	    | CPU_CONTROL_WBUF_ENABLE;
1739 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1740 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1741 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1742 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1743 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1744 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1745 		 | CPU_CONTROL_CPCLK;
1746 
1747 	cpuctrl = parse_cpu_options(args, arm9_options, cpuctrl);
1748 
1749 #ifdef __ARMEB__
1750 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1751 #endif
1752 
1753 	/* Clear out the cache */
1754 	cpu_idcache_wbinv_all();
1755 
1756 	/* Set the control register */
1757 	curcpu()->ci_ctrl = cpuctrl;
1758 	cpu_control(0xffffffff, cpuctrl);
1759 
1760 }
1761 #endif	/* CPU_ARM9 */
1762 
1763 #ifdef CPU_SA110
1764 struct cpu_option sa110_options[] = {
1765 #ifdef COMPAT_12
1766 	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1767 	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
1768 #endif	/* COMPAT_12 */
1769 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1770 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1771 	{ "sa110.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1772 	{ "sa110.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
1773 	{ "sa110.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
1774 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1775 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1776 	{ "sa110.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1777 	{ NULL,			IGN, IGN, 0 }
1778 };
1779 
1780 void
1781 sa110_setup(args)
1782 	char *args;
1783 {
1784 	int cpuctrl, cpuctrlmask;
1785 
1786 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1787 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1788 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1789 		 | CPU_CONTROL_WBUF_ENABLE;
1790 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1791 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1792 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1793 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1794 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1795 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1796 		 | CPU_CONTROL_CPCLK;
1797 
1798 	cpuctrl = parse_cpu_options(args, sa110_options, cpuctrl);
1799 
1800 #ifdef __ARMEB__
1801 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1802 #endif
1803 
1804 	/* Clear out the cache */
1805 	cpu_idcache_wbinv_all();
1806 
1807 	/* Set the control register */
1808 	curcpu()->ci_ctrl = cpuctrl;
1809 /*	cpu_control(cpuctrlmask, cpuctrl);*/
1810 	cpu_control(0xffffffff, cpuctrl);
1811 
1812 	/*
1813 	 * enable clockswitching, note that this doesn't read or write to r0,
1814 	 * r0 is just to make it valid asm
1815 	 */
1816 	__asm ("mcr 15, 0, r0, c15, c1, 2");
1817 }
1818 #endif	/* CPU_SA110 */
1819 
1820 #if defined(CPU_SA1100) || defined(CPU_SA1110)
1821 struct cpu_option sa11x0_options[] = {
1822 #ifdef COMPAT_12
1823 	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1824 	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
1825 #endif	/* COMPAT_12 */
1826 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1827 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1828 	{ "sa11x0.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1829 	{ "sa11x0.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
1830 	{ "sa11x0.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
1831 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1832 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1833 	{ "sa11x0.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1834 	{ NULL,			IGN, IGN, 0 }
1835 };
1836 
1837 void
1838 sa11x0_setup(args)
1839 	char *args;
1840 {
1841 	int cpuctrl, cpuctrlmask;
1842 
1843 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1844 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1845 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1846 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE;
1847 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1848 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1849 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1850 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1851 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1852 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1853 		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
1854 
1855 	cpuctrl = parse_cpu_options(args, sa11x0_options, cpuctrl);
1856 
1857 #ifdef __ARMEB__
1858 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1859 #endif
1860 
1861 	if (vector_page == ARM_VECTORS_HIGH)
1862 		cpuctrl |= CPU_CONTROL_VECRELOC;
1863 
1864 	/* Clear out the cache */
1865 	cpu_idcache_wbinv_all();
1866 
1867 	/* Set the control register */
1868 	cpu_control(0xffffffff, cpuctrl);
1869 }
1870 #endif	/* CPU_SA1100 || CPU_SA1110 */
1871 
1872 #if defined(CPU_IXP12X0)
1873 struct cpu_option ixp12x0_options[] = {
1874 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1875 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1876 	{ "ixp12x0.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1877 	{ "ixp12x0.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
1878 	{ "ixp12x0.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
1879 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1880 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1881 	{ "ixp12x0.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1882 	{ NULL,			IGN, IGN, 0 }
1883 };
1884 
1885 void
1886 ixp12x0_setup(args)
1887 	char *args;
1888 {
1889 	int cpuctrl, cpuctrlmask;
1890 
1891 
1892 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE
1893 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_SYST_ENABLE
1894 		 | CPU_CONTROL_IC_ENABLE;
1895 
1896 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_AFLT_ENABLE
1897 		 | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_WBUF_ENABLE
1898 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_SYST_ENABLE
1899 		 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_IC_ENABLE
1900 		 | CPU_CONTROL_VECRELOC;
1901 
1902 	cpuctrl = parse_cpu_options(args, ixp12x0_options, cpuctrl);
1903 
1904 #ifdef __ARMEB__
1905 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1906 #endif
1907 
1908 	if (vector_page == ARM_VECTORS_HIGH)
1909 		cpuctrl |= CPU_CONTROL_VECRELOC;
1910 
1911 	/* Clear out the cache */
1912 	cpu_idcache_wbinv_all();
1913 
1914 	/* Set the control register */
1915 	curcpu()->ci_ctrl = cpuctrl;
1916 	/* cpu_control(0xffffffff, cpuctrl); */
1917 	cpu_control(cpuctrlmask, cpuctrl);
1918 }
1919 #endif /* CPU_IXP12X0 */
1920 
1921 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
1922     defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425)
1923 struct cpu_option xscale_options[] = {
1924 #ifdef COMPAT_12
1925 	{ "branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1926 	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1927 #endif	/* COMPAT_12 */
1928 	{ "cpu.branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1929 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1930 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1931 	{ "xscale.branchpredict", BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1932 	{ "xscale.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1933 	{ "xscale.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
1934 	{ "xscale.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
1935 	{ NULL,			IGN, IGN, 0 }
1936 };
1937 
1938 void
1939 xscale_setup(args)
1940 	char *args;
1941 {
1942 	uint32_t auxctl;
1943 	int cpuctrl, cpuctrlmask;
1944 
1945 	/*
1946 	 * The XScale Write Buffer is always enabled.  Our option
1947 	 * is to enable/disable coalescing.  Note that bits 6:3
1948 	 * must always be enabled.
1949 	 */
1950 
1951 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1952 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1953 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1954 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
1955 		 | CPU_CONTROL_BPRD_ENABLE;
1956 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1957 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1958 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1959 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1960 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1961 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1962 		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
1963 
1964 	cpuctrl = parse_cpu_options(args, xscale_options, cpuctrl);
1965 
1966 #ifdef __ARMEB__
1967 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1968 #endif
1969 
1970 	if (vector_page == ARM_VECTORS_HIGH)
1971 		cpuctrl |= CPU_CONTROL_VECRELOC;
1972 
1973 	/* Clear out the cache */
1974 	cpu_idcache_wbinv_all();
1975 
1976 	/*
1977 	 * Set the control register.  Note that bits 6:3 must always
1978 	 * be set to 1.
1979 	 */
1980 	curcpu()->ci_ctrl = cpuctrl;
1981 /*	cpu_control(cpuctrlmask, cpuctrl);*/
1982 	cpu_control(0xffffffff, cpuctrl);
1983 
1984 	/* Make sure write coalescing is turned on */
1985 	__asm __volatile("mrc p15, 0, %0, c1, c0, 1"
1986 		: "=r" (auxctl));
1987 #ifdef XSCALE_NO_COALESCE_WRITES
1988 	auxctl |= XSCALE_AUXCTL_K;
1989 #else
1990 	auxctl &= ~XSCALE_AUXCTL_K;
1991 #endif
1992 	__asm __volatile("mcr p15, 0, %0, c1, c0, 1"
1993 		: : "r" (auxctl));
1994 }
1995 #endif	/* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425 */
1996