xref: /netbsd-src/sys/arch/arm/arm/cpufunc.c (revision 23c8222edbfb0f0932d88a8351d3a0cf817dfb9e)
1 /*	$NetBSD: cpufunc.c,v 1.67 2004/04/03 04:34:40 bsh Exp $	*/
2 
3 /*
4  * arm7tdmi support code Copyright (c) 2001 John Fremlin
5  * arm8 support code Copyright (c) 1997 ARM Limited
6  * arm8 support code Copyright (c) 1997 Causality Limited
7  * arm9 support code Copyright (C) 2001 ARM Ltd
8  * Copyright (c) 1997 Mark Brinicombe.
9  * Copyright (c) 1997 Causality Limited
10  * All rights reserved.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. All advertising materials mentioning features or use of this software
21  *    must display the following acknowledgement:
22  *	This product includes software developed by Causality Limited.
23  * 4. The name of Causality Limited may not be used to endorse or promote
24  *    products derived from this software without specific prior written
25  *    permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
28  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
29  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
30  * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
31  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
32  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
33  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37  * SUCH DAMAGE.
38  *
39  * RiscBSD kernel project
40  *
41  * cpufuncs.c
42  *
43  * C functions for supporting CPU / MMU / TLB specific operations.
44  *
45  * Created      : 30/01/97
46  */
47 
48 #include <sys/cdefs.h>
49 __KERNEL_RCSID(0, "$NetBSD: cpufunc.c,v 1.67 2004/04/03 04:34:40 bsh Exp $");
50 
51 #include "opt_compat_netbsd.h"
52 #include "opt_cpuoptions.h"
53 #include "opt_perfctrs.h"
54 
55 #include <sys/types.h>
56 #include <sys/param.h>
57 #include <sys/pmc.h>
58 #include <sys/systm.h>
59 #include <machine/cpu.h>
60 #include <machine/bootconfig.h>
61 #include <arch/arm/arm/disassem.h>
62 
63 #include <uvm/uvm.h>
64 
65 #include <arm/cpuconf.h>
66 #include <arm/cpufunc.h>
67 
68 #ifdef CPU_XSCALE_80200
69 #include <arm/xscale/i80200reg.h>
70 #include <arm/xscale/i80200var.h>
71 #endif
72 
73 #ifdef CPU_XSCALE_80321
74 #include <arm/xscale/i80321reg.h>
75 #include <arm/xscale/i80321var.h>
76 #endif
77 
78 #ifdef CPU_XSCALE_IXP425
79 #include <arm/xscale/ixp425reg.h>
80 #include <arm/xscale/ixp425var.h>
81 #endif
82 
83 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321)
84 #include <arm/xscale/xscalereg.h>
85 #endif
86 
87 #if defined(PERFCTRS)
88 struct arm_pmc_funcs *arm_pmc;
89 #endif
90 
91 /* PRIMARY CACHE VARIABLES */
92 int	arm_picache_size;
93 int	arm_picache_line_size;
94 int	arm_picache_ways;
95 
96 int	arm_pdcache_size;	/* and unified */
97 int	arm_pdcache_line_size;
98 int	arm_pdcache_ways;
99 
100 int	arm_pcache_type;
101 int	arm_pcache_unified;
102 
103 int	arm_dcache_align;
104 int	arm_dcache_align_mask;
105 
106 /* 1 == use cpu_sleep(), 0 == don't */
107 int cpu_do_powersave;
108 
109 #ifdef CPU_ARM3
110 struct cpu_functions arm3_cpufuncs = {
111 	/* CPU functions */
112 
113 	cpufunc_id,			/* id			*/
114 	cpufunc_nullop,			/* cpwait		*/
115 
116 	/* MMU functions */
117 
118 	arm3_control,			/* control		*/
119 	NULL,				/* domain		*/
120 	NULL,				/* setttb		*/
121 	NULL,				/* faultstatus		*/
122 	NULL,				/* faultaddress		*/
123 
124 	/* TLB functions */
125 
126 	cpufunc_nullop,			/* tlb_flushID		*/
127 	(void *)cpufunc_nullop,		/* tlb_flushID_SE	*/
128 	cpufunc_nullop,			/* tlb_flushI		*/
129 	(void *)cpufunc_nullop,		/* tlb_flushI_SE	*/
130 	cpufunc_nullop,			/* tlb_flushD		*/
131 	(void *)cpufunc_nullop,		/* tlb_flushD_SE	*/
132 
133 	/* Cache operations */
134 
135 	cpufunc_nullop,			/* icache_sync_all	*/
136 	(void *) cpufunc_nullop,	/* icache_sync_range	*/
137 
138 	arm3_cache_flush,		/* dcache_wbinv_all	*/
139 	(void *)arm3_cache_flush,	/* dcache_wbinv_range	*/
140 	(void *)arm3_cache_flush,	/* dcache_inv_range	*/
141 	(void *)cpufunc_nullop,		/* dcache_wb_range	*/
142 
143 	arm3_cache_flush,		/* idcache_wbinv_all	*/
144 	(void *)arm3_cache_flush,	/* idcache_wbinv_range	*/
145 
146 	/* Other functions */
147 
148 	cpufunc_nullop,			/* flush_prefetchbuf	*/
149 	cpufunc_nullop,			/* drain_writebuf	*/
150 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
151 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
152 
153 	(void *)cpufunc_nullop,		/* sleep		*/
154 
155 	/* Soft functions */
156 
157 	early_abort_fixup,		/* dataabt_fixup	*/
158 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
159 
160 	NULL,				/* context_switch	*/
161 
162 	(void *)cpufunc_nullop		/* cpu setup		*/
163 
164 };
165 #endif	/* CPU_ARM3 */
166 
167 #ifdef CPU_ARM6
168 struct cpu_functions arm6_cpufuncs = {
169 	/* CPU functions */
170 
171 	cpufunc_id,			/* id			*/
172 	cpufunc_nullop,			/* cpwait		*/
173 
174 	/* MMU functions */
175 
176 	cpufunc_control,		/* control		*/
177 	cpufunc_domains,		/* domain		*/
178 	arm67_setttb,			/* setttb		*/
179 	cpufunc_faultstatus,		/* faultstatus		*/
180 	cpufunc_faultaddress,		/* faultaddress		*/
181 
182 	/* TLB functions */
183 
184 	arm67_tlb_flush,		/* tlb_flushID		*/
185 	arm67_tlb_purge,		/* tlb_flushID_SE	*/
186 	arm67_tlb_flush,		/* tlb_flushI		*/
187 	arm67_tlb_purge,		/* tlb_flushI_SE	*/
188 	arm67_tlb_flush,		/* tlb_flushD		*/
189 	arm67_tlb_purge,		/* tlb_flushD_SE	*/
190 
191 	/* Cache operations */
192 
193 	cpufunc_nullop,			/* icache_sync_all	*/
194 	(void *) cpufunc_nullop,	/* icache_sync_range	*/
195 
196 	arm67_cache_flush,		/* dcache_wbinv_all	*/
197 	(void *)arm67_cache_flush,	/* dcache_wbinv_range	*/
198 	(void *)arm67_cache_flush,	/* dcache_inv_range	*/
199 	(void *)cpufunc_nullop,		/* dcache_wb_range	*/
200 
201 	arm67_cache_flush,		/* idcache_wbinv_all	*/
202 	(void *)arm67_cache_flush,	/* idcache_wbinv_range	*/
203 
204 	/* Other functions */
205 
206 	cpufunc_nullop,			/* flush_prefetchbuf	*/
207 	cpufunc_nullop,			/* drain_writebuf	*/
208 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
209 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
210 
211 	(void *)cpufunc_nullop,		/* sleep		*/
212 
213 	/* Soft functions */
214 
215 #ifdef ARM6_LATE_ABORT
216 	late_abort_fixup,		/* dataabt_fixup	*/
217 #else
218 	early_abort_fixup,		/* dataabt_fixup	*/
219 #endif
220 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
221 
222 	arm67_context_switch,		/* context_switch	*/
223 
224 	arm6_setup			/* cpu setup		*/
225 
226 };
227 #endif	/* CPU_ARM6 */
228 
229 #ifdef CPU_ARM7
230 struct cpu_functions arm7_cpufuncs = {
231 	/* CPU functions */
232 
233 	cpufunc_id,			/* id			*/
234 	cpufunc_nullop,			/* cpwait		*/
235 
236 	/* MMU functions */
237 
238 	cpufunc_control,		/* control		*/
239 	cpufunc_domains,		/* domain		*/
240 	arm67_setttb,			/* setttb		*/
241 	cpufunc_faultstatus,		/* faultstatus		*/
242 	cpufunc_faultaddress,		/* faultaddress		*/
243 
244 	/* TLB functions */
245 
246 	arm67_tlb_flush,		/* tlb_flushID		*/
247 	arm67_tlb_purge,		/* tlb_flushID_SE	*/
248 	arm67_tlb_flush,		/* tlb_flushI		*/
249 	arm67_tlb_purge,		/* tlb_flushI_SE	*/
250 	arm67_tlb_flush,		/* tlb_flushD		*/
251 	arm67_tlb_purge,		/* tlb_flushD_SE	*/
252 
253 	/* Cache operations */
254 
255 	cpufunc_nullop,			/* icache_sync_all	*/
256 	(void *)cpufunc_nullop,		/* icache_sync_range	*/
257 
258 	arm67_cache_flush,		/* dcache_wbinv_all	*/
259 	(void *)arm67_cache_flush,	/* dcache_wbinv_range	*/
260 	(void *)arm67_cache_flush,	/* dcache_inv_range	*/
261 	(void *)cpufunc_nullop,		/* dcache_wb_range	*/
262 
263 	arm67_cache_flush,		/* idcache_wbinv_all	*/
264 	(void *)arm67_cache_flush,	/* idcache_wbinv_range	*/
265 
266 	/* Other functions */
267 
268 	cpufunc_nullop,			/* flush_prefetchbuf	*/
269 	cpufunc_nullop,			/* drain_writebuf	*/
270 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
271 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
272 
273 	(void *)cpufunc_nullop,		/* sleep		*/
274 
275 	/* Soft functions */
276 
277 	late_abort_fixup,		/* dataabt_fixup	*/
278 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
279 
280 	arm67_context_switch,		/* context_switch	*/
281 
282 	arm7_setup			/* cpu setup		*/
283 
284 };
285 #endif	/* CPU_ARM7 */
286 
287 #ifdef CPU_ARM7TDMI
288 struct cpu_functions arm7tdmi_cpufuncs = {
289 	/* CPU functions */
290 
291 	cpufunc_id,			/* id			*/
292 	cpufunc_nullop,			/* cpwait		*/
293 
294 	/* MMU functions */
295 
296 	cpufunc_control,		/* control		*/
297 	cpufunc_domains,		/* domain		*/
298 	arm7tdmi_setttb,		/* setttb		*/
299 	cpufunc_faultstatus,		/* faultstatus		*/
300 	cpufunc_faultaddress,		/* faultaddress		*/
301 
302 	/* TLB functions */
303 
304 	arm7tdmi_tlb_flushID,		/* tlb_flushID		*/
305 	arm7tdmi_tlb_flushID_SE,	/* tlb_flushID_SE	*/
306 	arm7tdmi_tlb_flushID,		/* tlb_flushI		*/
307 	arm7tdmi_tlb_flushID_SE,	/* tlb_flushI_SE	*/
308 	arm7tdmi_tlb_flushID,		/* tlb_flushD		*/
309 	arm7tdmi_tlb_flushID_SE,	/* tlb_flushD_SE	*/
310 
311 	/* Cache operations */
312 
313 	cpufunc_nullop,			/* icache_sync_all	*/
314 	(void *)cpufunc_nullop,		/* icache_sync_range	*/
315 
316 	arm7tdmi_cache_flushID,		/* dcache_wbinv_all	*/
317 	(void *)arm7tdmi_cache_flushID,	/* dcache_wbinv_range	*/
318 	(void *)arm7tdmi_cache_flushID,	/* dcache_inv_range	*/
319 	(void *)cpufunc_nullop,		/* dcache_wb_range	*/
320 
321 	arm7tdmi_cache_flushID,		/* idcache_wbinv_all	*/
322 	(void *)arm7tdmi_cache_flushID,	/* idcache_wbinv_range	*/
323 
324 	/* Other functions */
325 
326 	cpufunc_nullop,			/* flush_prefetchbuf	*/
327 	cpufunc_nullop,			/* drain_writebuf	*/
328 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
329 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
330 
331 	(void *)cpufunc_nullop,		/* sleep		*/
332 
333 	/* Soft functions */
334 
335 	late_abort_fixup,		/* dataabt_fixup	*/
336 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
337 
338 	arm7tdmi_context_switch,	/* context_switch	*/
339 
340 	arm7tdmi_setup			/* cpu setup		*/
341 
342 };
343 #endif	/* CPU_ARM7TDMI */
344 
345 #ifdef CPU_ARM8
346 struct cpu_functions arm8_cpufuncs = {
347 	/* CPU functions */
348 
349 	cpufunc_id,			/* id			*/
350 	cpufunc_nullop,			/* cpwait		*/
351 
352 	/* MMU functions */
353 
354 	cpufunc_control,		/* control		*/
355 	cpufunc_domains,		/* domain		*/
356 	arm8_setttb,			/* setttb		*/
357 	cpufunc_faultstatus,		/* faultstatus		*/
358 	cpufunc_faultaddress,		/* faultaddress		*/
359 
360 	/* TLB functions */
361 
362 	arm8_tlb_flushID,		/* tlb_flushID		*/
363 	arm8_tlb_flushID_SE,		/* tlb_flushID_SE	*/
364 	arm8_tlb_flushID,		/* tlb_flushI		*/
365 	arm8_tlb_flushID_SE,		/* tlb_flushI_SE	*/
366 	arm8_tlb_flushID,		/* tlb_flushD		*/
367 	arm8_tlb_flushID_SE,		/* tlb_flushD_SE	*/
368 
369 	/* Cache operations */
370 
371 	cpufunc_nullop,			/* icache_sync_all	*/
372 	(void *)cpufunc_nullop,		/* icache_sync_range	*/
373 
374 	arm8_cache_purgeID,		/* dcache_wbinv_all	*/
375 	(void *)arm8_cache_purgeID,	/* dcache_wbinv_range	*/
376 /*XXX*/	(void *)arm8_cache_purgeID,	/* dcache_inv_range	*/
377 	(void *)arm8_cache_cleanID,	/* dcache_wb_range	*/
378 
379 	arm8_cache_purgeID,		/* idcache_wbinv_all	*/
380 	(void *)arm8_cache_purgeID,	/* idcache_wbinv_range	*/
381 
382 	/* Other functions */
383 
384 	cpufunc_nullop,			/* flush_prefetchbuf	*/
385 	cpufunc_nullop,			/* drain_writebuf	*/
386 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
387 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
388 
389 	(void *)cpufunc_nullop,		/* sleep		*/
390 
391 	/* Soft functions */
392 
393 	cpufunc_null_fixup,		/* dataabt_fixup	*/
394 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
395 
396 	arm8_context_switch,		/* context_switch	*/
397 
398 	arm8_setup			/* cpu setup		*/
399 };
400 #endif	/* CPU_ARM8 */
401 
402 #ifdef CPU_ARM9
403 struct cpu_functions arm9_cpufuncs = {
404 	/* CPU functions */
405 
406 	cpufunc_id,			/* id			*/
407 	cpufunc_nullop,			/* cpwait		*/
408 
409 	/* MMU functions */
410 
411 	cpufunc_control,		/* control		*/
412 	cpufunc_domains,		/* Domain		*/
413 	arm9_setttb,			/* Setttb		*/
414 	cpufunc_faultstatus,		/* Faultstatus		*/
415 	cpufunc_faultaddress,		/* Faultaddress		*/
416 
417 	/* TLB functions */
418 
419 	armv4_tlb_flushID,		/* tlb_flushID		*/
420 	arm9_tlb_flushID_SE,		/* tlb_flushID_SE	*/
421 	armv4_tlb_flushI,		/* tlb_flushI		*/
422 	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
423 	armv4_tlb_flushD,		/* tlb_flushD		*/
424 	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
425 
426 	/* Cache operations */
427 
428 	arm9_icache_sync_all,		/* icache_sync_all	*/
429 	arm9_icache_sync_range,		/* icache_sync_range	*/
430 
431 	arm9_dcache_wbinv_all,		/* dcache_wbinv_all	*/
432 	arm9_dcache_wbinv_range,	/* dcache_wbinv_range	*/
433 /*XXX*/	arm9_dcache_wbinv_range,	/* dcache_inv_range	*/
434 	arm9_dcache_wb_range,		/* dcache_wb_range	*/
435 
436 	arm9_idcache_wbinv_all,		/* idcache_wbinv_all	*/
437 	arm9_idcache_wbinv_range,	/* idcache_wbinv_range	*/
438 
439 	/* Other functions */
440 
441 	cpufunc_nullop,			/* flush_prefetchbuf	*/
442 	armv4_drain_writebuf,		/* drain_writebuf	*/
443 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
444 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
445 
446 	(void *)cpufunc_nullop,		/* sleep		*/
447 
448 	/* Soft functions */
449 
450 	cpufunc_null_fixup,		/* dataabt_fixup	*/
451 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
452 
453 	arm9_context_switch,		/* context_switch	*/
454 
455 	arm9_setup			/* cpu setup		*/
456 
457 };
458 #endif /* CPU_ARM9 */
459 
460 #ifdef CPU_ARM10
461 struct cpu_functions arm10_cpufuncs = {
462 	/* CPU functions */
463 
464 	cpufunc_id,			/* id			*/
465 	cpufunc_nullop,			/* cpwait		*/
466 
467 	/* MMU functions */
468 
469 	cpufunc_control,		/* control		*/
470 	cpufunc_domains,		/* Domain		*/
471 	arm10_setttb,			/* Setttb		*/
472 	cpufunc_faultstatus,		/* Faultstatus		*/
473 	cpufunc_faultaddress,		/* Faultaddress		*/
474 
475 	/* TLB functions */
476 
477 	armv4_tlb_flushID,		/* tlb_flushID		*/
478 	arm10_tlb_flushID_SE,		/* tlb_flushID_SE	*/
479 	armv4_tlb_flushI,		/* tlb_flushI		*/
480 	arm10_tlb_flushI_SE,		/* tlb_flushI_SE	*/
481 	armv4_tlb_flushD,		/* tlb_flushD		*/
482 	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
483 
484 	/* Cache operations */
485 
486 	arm10_icache_sync_all,		/* icache_sync_all	*/
487 	arm10_icache_sync_range,	/* icache_sync_range	*/
488 
489 	arm10_dcache_wbinv_all,		/* dcache_wbinv_all	*/
490 	arm10_dcache_wbinv_range,	/* dcache_wbinv_range	*/
491 /*XXX*/	arm10_dcache_wbinv_range,	/* dcache_inv_range	*/
492 	arm10_dcache_wb_range,		/* dcache_wb_range	*/
493 
494 	arm10_idcache_wbinv_all,	/* idcache_wbinv_all	*/
495 	arm10_idcache_wbinv_range,	/* idcache_wbinv_range	*/
496 
497 	/* Other functions */
498 
499 	cpufunc_nullop,			/* flush_prefetchbuf	*/
500 	armv4_drain_writebuf,		/* drain_writebuf	*/
501 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
502 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
503 
504 	(void *)cpufunc_nullop,		/* sleep		*/
505 
506 	/* Soft functions */
507 
508 	cpufunc_null_fixup,		/* dataabt_fixup	*/
509 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
510 
511 	arm10_context_switch,		/* context_switch	*/
512 
513 	arm10_setup			/* cpu setup		*/
514 
515 };
516 #endif /* CPU_ARM10 */
517 
518 #ifdef CPU_SA110
519 struct cpu_functions sa110_cpufuncs = {
520 	/* CPU functions */
521 
522 	cpufunc_id,			/* id			*/
523 	cpufunc_nullop,			/* cpwait		*/
524 
525 	/* MMU functions */
526 
527 	cpufunc_control,		/* control		*/
528 	cpufunc_domains,		/* domain		*/
529 	sa1_setttb,			/* setttb		*/
530 	cpufunc_faultstatus,		/* faultstatus		*/
531 	cpufunc_faultaddress,		/* faultaddress		*/
532 
533 	/* TLB functions */
534 
535 	armv4_tlb_flushID,		/* tlb_flushID		*/
536 	sa1_tlb_flushID_SE,		/* tlb_flushID_SE	*/
537 	armv4_tlb_flushI,		/* tlb_flushI		*/
538 	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
539 	armv4_tlb_flushD,		/* tlb_flushD		*/
540 	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
541 
542 	/* Cache operations */
543 
544 	sa1_cache_syncI,		/* icache_sync_all	*/
545 	sa1_cache_syncI_rng,		/* icache_sync_range	*/
546 
547 	sa1_cache_purgeD,		/* dcache_wbinv_all	*/
548 	sa1_cache_purgeD_rng,		/* dcache_wbinv_range	*/
549 /*XXX*/	sa1_cache_purgeD_rng,		/* dcache_inv_range	*/
550 	sa1_cache_cleanD_rng,		/* dcache_wb_range	*/
551 
552 	sa1_cache_purgeID,		/* idcache_wbinv_all	*/
553 	sa1_cache_purgeID_rng,		/* idcache_wbinv_range	*/
554 
555 	/* Other functions */
556 
557 	cpufunc_nullop,			/* flush_prefetchbuf	*/
558 	armv4_drain_writebuf,		/* drain_writebuf	*/
559 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
560 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
561 
562 	(void *)cpufunc_nullop,		/* sleep		*/
563 
564 	/* Soft functions */
565 
566 	cpufunc_null_fixup,		/* dataabt_fixup	*/
567 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
568 
569 	sa110_context_switch,		/* context_switch	*/
570 
571 	sa110_setup			/* cpu setup		*/
572 };
573 #endif	/* CPU_SA110 */
574 
575 #if defined(CPU_SA1100) || defined(CPU_SA1110)
576 struct cpu_functions sa11x0_cpufuncs = {
577 	/* CPU functions */
578 
579 	cpufunc_id,			/* id			*/
580 	cpufunc_nullop,			/* cpwait		*/
581 
582 	/* MMU functions */
583 
584 	cpufunc_control,		/* control		*/
585 	cpufunc_domains,		/* domain		*/
586 	sa1_setttb,			/* setttb		*/
587 	cpufunc_faultstatus,		/* faultstatus		*/
588 	cpufunc_faultaddress,		/* faultaddress		*/
589 
590 	/* TLB functions */
591 
592 	armv4_tlb_flushID,		/* tlb_flushID		*/
593 	sa1_tlb_flushID_SE,		/* tlb_flushID_SE	*/
594 	armv4_tlb_flushI,		/* tlb_flushI		*/
595 	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
596 	armv4_tlb_flushD,		/* tlb_flushD		*/
597 	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
598 
599 	/* Cache operations */
600 
601 	sa1_cache_syncI,		/* icache_sync_all	*/
602 	sa1_cache_syncI_rng,		/* icache_sync_range	*/
603 
604 	sa1_cache_purgeD,		/* dcache_wbinv_all	*/
605 	sa1_cache_purgeD_rng,		/* dcache_wbinv_range	*/
606 /*XXX*/	sa1_cache_purgeD_rng,		/* dcache_inv_range	*/
607 	sa1_cache_cleanD_rng,		/* dcache_wb_range	*/
608 
609 	sa1_cache_purgeID,		/* idcache_wbinv_all	*/
610 	sa1_cache_purgeID_rng,		/* idcache_wbinv_range	*/
611 
612 	/* Other functions */
613 
614 	sa11x0_drain_readbuf,		/* flush_prefetchbuf	*/
615 	armv4_drain_writebuf,		/* drain_writebuf	*/
616 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
617 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
618 
619 	sa11x0_cpu_sleep,		/* sleep		*/
620 
621 	/* Soft functions */
622 
623 	cpufunc_null_fixup,		/* dataabt_fixup	*/
624 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
625 
626 	sa11x0_context_switch,		/* context_switch	*/
627 
628 	sa11x0_setup			/* cpu setup		*/
629 };
630 #endif	/* CPU_SA1100 || CPU_SA1110 */
631 
632 #ifdef CPU_IXP12X0
633 struct cpu_functions ixp12x0_cpufuncs = {
634 	/* CPU functions */
635 
636 	cpufunc_id,			/* id			*/
637 	cpufunc_nullop,			/* cpwait		*/
638 
639 	/* MMU functions */
640 
641 	cpufunc_control,		/* control		*/
642 	cpufunc_domains,		/* domain		*/
643 	sa1_setttb,			/* setttb		*/
644 	cpufunc_faultstatus,		/* faultstatus		*/
645 	cpufunc_faultaddress,		/* faultaddress		*/
646 
647 	/* TLB functions */
648 
649 	armv4_tlb_flushID,		/* tlb_flushID		*/
650 	sa1_tlb_flushID_SE,		/* tlb_flushID_SE	*/
651 	armv4_tlb_flushI,		/* tlb_flushI		*/
652 	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
653 	armv4_tlb_flushD,		/* tlb_flushD		*/
654 	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
655 
656 	/* Cache operations */
657 
658 	sa1_cache_syncI,		/* icache_sync_all	*/
659 	sa1_cache_syncI_rng,		/* icache_sync_range	*/
660 
661 	sa1_cache_purgeD,		/* dcache_wbinv_all	*/
662 	sa1_cache_purgeD_rng,		/* dcache_wbinv_range	*/
663 /*XXX*/	sa1_cache_purgeD_rng,		/* dcache_inv_range	*/
664 	sa1_cache_cleanD_rng,		/* dcache_wb_range	*/
665 
666 	sa1_cache_purgeID,		/* idcache_wbinv_all	*/
667 	sa1_cache_purgeID_rng,		/* idcache_wbinv_range	*/
668 
669 	/* Other functions */
670 
671 	ixp12x0_drain_readbuf,			/* flush_prefetchbuf	*/
672 	armv4_drain_writebuf,		/* drain_writebuf	*/
673 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
674 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
675 
676 	(void *)cpufunc_nullop,		/* sleep		*/
677 
678 	/* Soft functions */
679 
680 	cpufunc_null_fixup,		/* dataabt_fixup	*/
681 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
682 
683 	ixp12x0_context_switch,		/* context_switch	*/
684 
685 	ixp12x0_setup			/* cpu setup		*/
686 };
687 #endif	/* CPU_IXP12X0 */
688 
689 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
690     defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425)
691 struct cpu_functions xscale_cpufuncs = {
692 	/* CPU functions */
693 
694 	cpufunc_id,			/* id			*/
695 	xscale_cpwait,			/* cpwait		*/
696 
697 	/* MMU functions */
698 
699 	xscale_control,			/* control		*/
700 	cpufunc_domains,		/* domain		*/
701 	xscale_setttb,			/* setttb		*/
702 	cpufunc_faultstatus,		/* faultstatus		*/
703 	cpufunc_faultaddress,		/* faultaddress		*/
704 
705 	/* TLB functions */
706 
707 	armv4_tlb_flushID,		/* tlb_flushID		*/
708 	xscale_tlb_flushID_SE,		/* tlb_flushID_SE	*/
709 	armv4_tlb_flushI,		/* tlb_flushI		*/
710 	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
711 	armv4_tlb_flushD,		/* tlb_flushD		*/
712 	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
713 
714 	/* Cache operations */
715 
716 	xscale_cache_syncI,		/* icache_sync_all	*/
717 	xscale_cache_syncI_rng,		/* icache_sync_range	*/
718 
719 	xscale_cache_purgeD,		/* dcache_wbinv_all	*/
720 	xscale_cache_purgeD_rng,	/* dcache_wbinv_range	*/
721 	xscale_cache_flushD_rng,	/* dcache_inv_range	*/
722 	xscale_cache_cleanD_rng,	/* dcache_wb_range	*/
723 
724 	xscale_cache_purgeID,		/* idcache_wbinv_all	*/
725 	xscale_cache_purgeID_rng,	/* idcache_wbinv_range	*/
726 
727 	/* Other functions */
728 
729 	cpufunc_nullop,			/* flush_prefetchbuf	*/
730 	armv4_drain_writebuf,		/* drain_writebuf	*/
731 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
732 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
733 
734 	xscale_cpu_sleep,		/* sleep		*/
735 
736 	/* Soft functions */
737 
738 	cpufunc_null_fixup,		/* dataabt_fixup	*/
739 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
740 
741 	xscale_context_switch,		/* context_switch	*/
742 
743 	xscale_setup			/* cpu setup		*/
744 };
745 #endif
746 /* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425 */
747 
748 /*
749  * Global constants also used by locore.s
750  */
751 
752 struct cpu_functions cpufuncs;
753 u_int cputype;
754 u_int cpu_reset_needs_v4_MMU_disable;	/* flag used in locore.s */
755 
756 #if defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined(CPU_ARM9) || \
757     defined (CPU_ARM10) || \
758     defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
759     defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425)
760 static void get_cachetype_cp15 __P((void));
761 
762 /* Additional cache information local to this file.  Log2 of some of the
763    above numbers.  */
764 static int	arm_dcache_l2_nsets;
765 static int	arm_dcache_l2_assoc;
766 static int	arm_dcache_l2_linesize;
767 
768 static void
769 get_cachetype_cp15()
770 {
771 	u_int ctype, isize, dsize;
772 	u_int multiplier;
773 
774 	__asm __volatile("mrc p15, 0, %0, c0, c0, 1"
775 		: "=r" (ctype));
776 
777 	/*
778 	 * ...and thus spake the ARM ARM:
779 	 *
780 	 * If an <opcode2> value corresponding to an unimplemented or
781 	 * reserved ID register is encountered, the System Control
782 	 * processor returns the value of the main ID register.
783 	 */
784 	if (ctype == cpufunc_id())
785 		goto out;
786 
787 	if ((ctype & CPU_CT_S) == 0)
788 		arm_pcache_unified = 1;
789 
790 	/*
791 	 * If you want to know how this code works, go read the ARM ARM.
792 	 */
793 
794 	arm_pcache_type = CPU_CT_CTYPE(ctype);
795 
796 	if (arm_pcache_unified == 0) {
797 		isize = CPU_CT_ISIZE(ctype);
798 		multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
799 		arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
800 		if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
801 			if (isize & CPU_CT_xSIZE_M)
802 				arm_picache_line_size = 0; /* not present */
803 			else
804 				arm_picache_ways = 1;
805 		} else {
806 			arm_picache_ways = multiplier <<
807 			    (CPU_CT_xSIZE_ASSOC(isize) - 1);
808 		}
809 		arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
810 	}
811 
812 	dsize = CPU_CT_DSIZE(ctype);
813 	multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
814 	arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
815 	if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
816 		if (dsize & CPU_CT_xSIZE_M)
817 			arm_pdcache_line_size = 0; /* not present */
818 		else
819 			arm_pdcache_ways = 1;
820 	} else {
821 		arm_pdcache_ways = multiplier <<
822 		    (CPU_CT_xSIZE_ASSOC(dsize) - 1);
823 	}
824 	arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
825 
826 	arm_dcache_align = arm_pdcache_line_size;
827 
828 	arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
829 	arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
830 	arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
831 	    CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
832 
833  out:
834 	arm_dcache_align_mask = arm_dcache_align - 1;
835 }
836 #endif /* ARM7TDMI || ARM8 || ARM9 || XSCALE */
837 
838 #if defined(CPU_ARM2) || defined(CPU_ARM250) || defined(CPU_ARM3) || \
839     defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_SA110) || \
840     defined(CPU_SA1100) || defined(CPU_SA1110) || defined(CPU_IXP12X0)
841 /* Cache information for CPUs without cache type registers. */
842 struct cachetab {
843 	u_int32_t ct_cpuid;
844 	int	ct_pcache_type;
845 	int	ct_pcache_unified;
846 	int	ct_pdcache_size;
847 	int	ct_pdcache_line_size;
848 	int	ct_pdcache_ways;
849 	int	ct_picache_size;
850 	int	ct_picache_line_size;
851 	int	ct_picache_ways;
852 };
853 
854 struct cachetab cachetab[] = {
855     /* cpuid,           cache type,       u,  dsiz, ls, wy,  isiz, ls, wy */
856     { CPU_ID_ARM2,      0,                1,     0,  0,  0,     0,  0,  0 },
857     { CPU_ID_ARM250,    0,                1,     0,  0,  0,     0,  0,  0 },
858     { CPU_ID_ARM3,      CPU_CT_CTYPE_WT,  1,  4096, 16, 64,     0,  0,  0 },
859     { CPU_ID_ARM610,	CPU_CT_CTYPE_WT,  1,  4096, 16, 64,     0,  0,  0 },
860     { CPU_ID_ARM710,    CPU_CT_CTYPE_WT,  1,  8192, 32,  4,     0,  0,  0 },
861     { CPU_ID_ARM7500,   CPU_CT_CTYPE_WT,  1,  4096, 16,  4,     0,  0,  0 },
862     { CPU_ID_ARM710A,   CPU_CT_CTYPE_WT,  1,  8192, 16,  4,     0,  0,  0 },
863     { CPU_ID_ARM7500FE, CPU_CT_CTYPE_WT,  1,  4096, 16,  4,     0,  0,  0 },
864     /* XXX is this type right for SA-1? */
865     { CPU_ID_SA110,	CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 },
866     { CPU_ID_SA1100,	CPU_CT_CTYPE_WB1, 0,  8192, 32, 32, 16384, 32, 32 },
867     { CPU_ID_SA1110,	CPU_CT_CTYPE_WB1, 0,  8192, 32, 32, 16384, 32, 32 },
868     { CPU_ID_IXP1200,	CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 }, /* XXX */
869     { 0, 0, 0, 0, 0, 0, 0, 0}
870 };
871 
872 static void get_cachetype_table __P((void));
873 
874 static void
875 get_cachetype_table()
876 {
877 	int i;
878 	u_int32_t cpuid = cpufunc_id();
879 
880 	for (i = 0; cachetab[i].ct_cpuid != 0; i++) {
881 		if (cachetab[i].ct_cpuid == (cpuid & CPU_ID_CPU_MASK)) {
882 			arm_pcache_type = cachetab[i].ct_pcache_type;
883 			arm_pcache_unified = cachetab[i].ct_pcache_unified;
884 			arm_pdcache_size = cachetab[i].ct_pdcache_size;
885 			arm_pdcache_line_size =
886 			    cachetab[i].ct_pdcache_line_size;
887 			arm_pdcache_ways = cachetab[i].ct_pdcache_ways;
888 			arm_picache_size = cachetab[i].ct_picache_size;
889 			arm_picache_line_size =
890 			    cachetab[i].ct_picache_line_size;
891 			arm_picache_ways = cachetab[i].ct_picache_ways;
892 		}
893 	}
894 	arm_dcache_align = arm_pdcache_line_size;
895 
896 	arm_dcache_align_mask = arm_dcache_align - 1;
897 }
898 
899 #endif /* ARM2 || ARM250 || ARM3 || ARM6 || ARM7 || SA110 || SA1100 || SA1111 || IXP12X0 */
900 
901 /*
902  * Cannot panic here as we may not have a console yet ...
903  */
904 
905 int
906 set_cpufuncs()
907 {
908 	cputype = cpufunc_id();
909 	cputype &= CPU_ID_CPU_MASK;
910 
911 	/*
912 	 * NOTE: cpu_do_powersave defaults to off.  If we encounter a
913 	 * CPU type where we want to use it by default, then we set it.
914 	 */
915 
916 #ifdef CPU_ARM3
917 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
918 	    (cputype & 0x00000f00) == 0x00000300) {
919 		cpufuncs = arm3_cpufuncs;
920 		cpu_reset_needs_v4_MMU_disable = 0;
921 		get_cachetype_table();
922 		return 0;
923 	}
924 #endif	/* CPU_ARM3 */
925 #ifdef CPU_ARM6
926 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
927 	    (cputype & 0x00000f00) == 0x00000600) {
928 		cpufuncs = arm6_cpufuncs;
929 		cpu_reset_needs_v4_MMU_disable = 0;
930 		get_cachetype_table();
931 		pmap_pte_init_generic();
932 		return 0;
933 	}
934 #endif	/* CPU_ARM6 */
935 #ifdef CPU_ARM7
936 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
937 	    CPU_ID_IS7(cputype) &&
938 	    (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V3) {
939 		cpufuncs = arm7_cpufuncs;
940 		cpu_reset_needs_v4_MMU_disable = 0;
941 		get_cachetype_table();
942 		pmap_pte_init_generic();
943 		return 0;
944 	}
945 #endif	/* CPU_ARM7 */
946 #ifdef CPU_ARM7TDMI
947 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
948 	    CPU_ID_IS7(cputype) &&
949 	    (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V4T) {
950 		cpufuncs = arm7tdmi_cpufuncs;
951 		cpu_reset_needs_v4_MMU_disable = 0;
952 		get_cachetype_cp15();
953 		pmap_pte_init_generic();
954 		return 0;
955 	}
956 #endif
957 #ifdef CPU_ARM8
958 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
959 	    (cputype & 0x0000f000) == 0x00008000) {
960 		cpufuncs = arm8_cpufuncs;
961 		cpu_reset_needs_v4_MMU_disable = 0;	/* XXX correct? */
962 		get_cachetype_cp15();
963 		pmap_pte_init_arm8();
964 		return 0;
965 	}
966 #endif	/* CPU_ARM8 */
967 #ifdef CPU_ARM9
968 	if (((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD ||
969 	     (cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_TI) &&
970 	    (cputype & 0x0000f000) == 0x00009000) {
971 		cpufuncs = arm9_cpufuncs;
972 		cpu_reset_needs_v4_MMU_disable = 1;	/* V4 or higher */
973 		get_cachetype_cp15();
974 		arm9_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
975 		arm9_dcache_sets_max =
976 		    (1U << (arm_dcache_l2_linesize + arm_dcache_l2_nsets)) -
977 		    arm9_dcache_sets_inc;
978 		arm9_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
979 		arm9_dcache_index_max = 0U - arm9_dcache_index_inc;
980 #ifdef	ARM9_CACHE_WRITE_THROUGH
981 		pmap_pte_init_arm9();
982 #else
983 		pmap_pte_init_generic();
984 #endif
985 		return 0;
986 	}
987 #endif /* CPU_ARM9 */
988 #ifdef CPU_ARM10
989 	if (/* cputype == CPU_ID_ARM1020T || */
990 	    cputype == CPU_ID_ARM1020E) {
991 		/*
992 		 * Select write-through cacheing (this isn't really an
993 		 * option on ARM1020T).
994 		 */
995 		cpufuncs = arm10_cpufuncs;
996 		cpu_reset_needs_v4_MMU_disable = 1;	/* V4 or higher */
997 		get_cachetype_cp15();
998 		arm10_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
999 		arm10_dcache_sets_max =
1000 		    (1U << (arm_dcache_l2_linesize + arm_dcache_l2_nsets)) -
1001 		    arm10_dcache_sets_inc;
1002 		arm10_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
1003 		arm10_dcache_index_max = 0U - arm10_dcache_index_inc;
1004 		pmap_pte_init_generic();
1005 		return 0;
1006 	}
1007 #endif /* CPU_ARM10 */
1008 #ifdef CPU_SA110
1009 	if (cputype == CPU_ID_SA110) {
1010 		cpufuncs = sa110_cpufuncs;
1011 		cpu_reset_needs_v4_MMU_disable = 1;	/* SA needs it */
1012 		get_cachetype_table();
1013 		pmap_pte_init_sa1();
1014 		return 0;
1015 	}
1016 #endif	/* CPU_SA110 */
1017 #ifdef CPU_SA1100
1018 	if (cputype == CPU_ID_SA1100) {
1019 		cpufuncs = sa11x0_cpufuncs;
1020 		cpu_reset_needs_v4_MMU_disable = 1;	/* SA needs it	*/
1021 		get_cachetype_table();
1022 		pmap_pte_init_sa1();
1023 
1024 		/* Use powersave on this CPU. */
1025 		cpu_do_powersave = 1;
1026 
1027 		return 0;
1028 	}
1029 #endif	/* CPU_SA1100 */
1030 #ifdef CPU_SA1110
1031 	if (cputype == CPU_ID_SA1110) {
1032 		cpufuncs = sa11x0_cpufuncs;
1033 		cpu_reset_needs_v4_MMU_disable = 1;	/* SA needs it	*/
1034 		get_cachetype_table();
1035 		pmap_pte_init_sa1();
1036 
1037 		/* Use powersave on this CPU. */
1038 		cpu_do_powersave = 1;
1039 
1040 		return 0;
1041 	}
1042 #endif	/* CPU_SA1110 */
1043 #ifdef CPU_IXP12X0
1044         if (cputype == CPU_ID_IXP1200) {
1045                 cpufuncs = ixp12x0_cpufuncs;
1046                 cpu_reset_needs_v4_MMU_disable = 1;
1047                 get_cachetype_table();
1048                 pmap_pte_init_sa1();
1049                 return 0;
1050         }
1051 #endif  /* CPU_IXP12X0 */
1052 #ifdef CPU_XSCALE_80200
1053 	if (cputype == CPU_ID_80200) {
1054 		int rev = cpufunc_id() & CPU_ID_REVISION_MASK;
1055 
1056 		i80200_icu_init();
1057 
1058 		/*
1059 		 * Reset the Performance Monitoring Unit to a
1060 		 * pristine state:
1061 		 *	- CCNT, PMN0, PMN1 reset to 0
1062 		 *	- overflow indications cleared
1063 		 *	- all counters disabled
1064 		 */
1065 		__asm __volatile("mcr p14, 0, %0, c0, c0, 0"
1066 			:
1067 			: "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF|
1068 			       PMNC_CC_IF));
1069 
1070 #if defined(XSCALE_CCLKCFG)
1071 		/*
1072 		 * Crank CCLKCFG to maximum legal value.
1073 		 */
1074 		__asm __volatile ("mcr p14, 0, %0, c6, c0, 0"
1075 			:
1076 			: "r" (XSCALE_CCLKCFG));
1077 #endif
1078 
1079 		/*
1080 		 * XXX Disable ECC in the Bus Controller Unit; we
1081 		 * don't really support it, yet.  Clear any pending
1082 		 * error indications.
1083 		 */
1084 		__asm __volatile("mcr p13, 0, %0, c0, c1, 0"
1085 			:
1086 			: "r" (BCUCTL_E0|BCUCTL_E1|BCUCTL_EV));
1087 
1088 		cpufuncs = xscale_cpufuncs;
1089 #if defined(PERFCTRS)
1090 		xscale_pmu_init();
1091 #endif
1092 
1093 		/*
1094 		 * i80200 errata: Step-A0 and A1 have a bug where
1095 		 * D$ dirty bits are not cleared on "invalidate by
1096 		 * address".
1097 		 *
1098 		 * Workaround: Clean cache line before invalidating.
1099 		 */
1100 		if (rev == 0 || rev == 1)
1101 			cpufuncs.cf_dcache_inv_range = xscale_cache_purgeD_rng;
1102 
1103 		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1104 		get_cachetype_cp15();
1105 		pmap_pte_init_xscale();
1106 		return 0;
1107 	}
1108 #endif /* CPU_XSCALE_80200 */
1109 #ifdef CPU_XSCALE_80321
1110 	if (cputype == CPU_ID_80321_400 || cputype == CPU_ID_80321_600 ||
1111 	    cputype == CPU_ID_80321_400_B0 || cputype == CPU_ID_80321_600_B0) {
1112 		i80321_icu_init();
1113 
1114 		/*
1115 		 * Reset the Performance Monitoring Unit to a
1116 		 * pristine state:
1117 		 *	- CCNT, PMN0, PMN1 reset to 0
1118 		 *	- overflow indications cleared
1119 		 *	- all counters disabled
1120 		 */
1121 		__asm __volatile("mcr p14, 0, %0, c0, c0, 0"
1122 			:
1123 			: "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF|
1124 			       PMNC_CC_IF));
1125 
1126 		cpufuncs = xscale_cpufuncs;
1127 #if defined(PERFCTRS)
1128 		xscale_pmu_init();
1129 #endif
1130 
1131 		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1132 		get_cachetype_cp15();
1133 		pmap_pte_init_xscale();
1134 		return 0;
1135 	}
1136 #endif /* CPU_XSCALE_80321 */
1137 #ifdef CPU_XSCALE_PXA2X0
1138 	/* ignore core revision to test PXA2xx CPUs */
1139 	if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 ||
1140 	    (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) {
1141 
1142 		cpufuncs = xscale_cpufuncs;
1143 #if defined(PERFCTRS)
1144 		xscale_pmu_init();
1145 #endif
1146 
1147 		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1148 		get_cachetype_cp15();
1149 		pmap_pte_init_xscale();
1150 
1151 		/* Use powersave on this CPU. */
1152 		cpu_do_powersave = 1;
1153 
1154 		return 0;
1155 	}
1156 #endif /* CPU_XSCALE_PXA2X0 */
1157 #ifdef CPU_XSCALE_IXP425
1158 	if (cputype == CPU_ID_IXP425_533 || cputype == CPU_ID_IXP425_400 ||
1159             cputype == CPU_ID_IXP425_266) {
1160 		ixp425_icu_init();
1161 
1162 		cpufuncs = xscale_cpufuncs;
1163 #if defined(PERFCTRS)
1164 		xscale_pmu_init();
1165 #endif
1166 
1167 		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1168 		get_cachetype_cp15();
1169 		pmap_pte_init_xscale();
1170 
1171 		return 0;
1172 	}
1173 #endif /* CPU_XSCALE_IXP425 */
1174 	/*
1175 	 * Bzzzz. And the answer was ...
1176 	 */
1177 	panic("No support for this CPU type (%08x) in kernel", cputype);
1178 	return(ARCHITECTURE_NOT_PRESENT);
1179 }
1180 
1181 /*
1182  * Fixup routines for data and prefetch aborts.
1183  *
1184  * Several compile time symbols are used
1185  *
1186  * DEBUG_FAULT_CORRECTION - Print debugging information during the
1187  * correction of registers after a fault.
1188  * ARM6_LATE_ABORT - ARM6 supports both early and late aborts
1189  * when defined should use late aborts
1190  */
1191 
1192 
1193 /*
1194  * Null abort fixup routine.
1195  * For use when no fixup is required.
1196  */
1197 int
1198 cpufunc_null_fixup(arg)
1199 	void *arg;
1200 {
1201 	return(ABORT_FIXUP_OK);
1202 }
1203 
1204 
1205 #if defined(CPU_ARM2) || defined(CPU_ARM250) || defined(CPU_ARM3) || \
1206     defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI)
1207 
1208 #ifdef DEBUG_FAULT_CORRECTION
1209 #define DFC_PRINTF(x)		printf x
1210 #define DFC_DISASSEMBLE(x)	disassemble(x)
1211 #else
1212 #define DFC_PRINTF(x)		/* nothing */
1213 #define DFC_DISASSEMBLE(x)	/* nothing */
1214 #endif
1215 
1216 /*
1217  * "Early" data abort fixup.
1218  *
1219  * For ARM2, ARM2as, ARM3 and ARM6 (in early-abort mode).  Also used
1220  * indirectly by ARM6 (in late-abort mode) and ARM7[TDMI].
1221  *
1222  * In early aborts, we may have to fix up LDM, STM, LDC and STC.
1223  */
1224 int
1225 early_abort_fixup(arg)
1226 	void *arg;
1227 {
1228 	trapframe_t *frame = arg;
1229 	u_int fault_pc;
1230 	u_int fault_instruction;
1231 	int saved_lr = 0;
1232 
1233 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1234 
1235 		/* Ok an abort in SVC mode */
1236 
1237 		/*
1238 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1239 		 * as the fault happened in svc mode but we need it in the
1240 		 * usr slot so we can treat the registers as an array of ints
1241 		 * during fixing.
1242 		 * NOTE: This PC is in the position but writeback is not
1243 		 * allowed on r15.
1244 		 * Doing it like this is more efficient than trapping this
1245 		 * case in all possible locations in the following fixup code.
1246 		 */
1247 
1248 		saved_lr = frame->tf_usr_lr;
1249 		frame->tf_usr_lr = frame->tf_svc_lr;
1250 
1251 		/*
1252 		 * Note the trapframe does not have the SVC r13 so a fault
1253 		 * from an instruction with writeback to r13 in SVC mode is
1254 		 * not allowed. This should not happen as the kstack is
1255 		 * always valid.
1256 		 */
1257 	}
1258 
1259 	/* Get fault address and status from the CPU */
1260 
1261 	fault_pc = frame->tf_pc;
1262 	fault_instruction = *((volatile unsigned int *)fault_pc);
1263 
1264 	/* Decode the fault instruction and fix the registers as needed */
1265 
1266 	if ((fault_instruction & 0x0e000000) == 0x08000000) {
1267 		int base;
1268 		int loop;
1269 		int count;
1270 		int *registers = &frame->tf_r0;
1271 
1272 		DFC_PRINTF(("LDM/STM\n"));
1273 		DFC_DISASSEMBLE(fault_pc);
1274 		if (fault_instruction & (1 << 21)) {
1275 			DFC_PRINTF(("This instruction must be corrected\n"));
1276 			base = (fault_instruction >> 16) & 0x0f;
1277 			if (base == 15)
1278 				return ABORT_FIXUP_FAILED;
1279 			/* Count registers transferred */
1280 			count = 0;
1281 			for (loop = 0; loop < 16; ++loop) {
1282 				if (fault_instruction & (1<<loop))
1283 					++count;
1284 			}
1285 			DFC_PRINTF(("%d registers used\n", count));
1286 			DFC_PRINTF(("Corrected r%d by %d bytes ",
1287 				       base, count * 4));
1288 			if (fault_instruction & (1 << 23)) {
1289 				DFC_PRINTF(("down\n"));
1290 				registers[base] -= count * 4;
1291 			} else {
1292 				DFC_PRINTF(("up\n"));
1293 				registers[base] += count * 4;
1294 			}
1295 		}
1296 	} else if ((fault_instruction & 0x0e000000) == 0x0c000000) {
1297 		int base;
1298 		int offset;
1299 		int *registers = &frame->tf_r0;
1300 
1301 		/* REGISTER CORRECTION IS REQUIRED FOR THESE INSTRUCTIONS */
1302 
1303 		DFC_DISASSEMBLE(fault_pc);
1304 
1305 		/* Only need to fix registers if write back is turned on */
1306 
1307 		if ((fault_instruction & (1 << 21)) != 0) {
1308 			base = (fault_instruction >> 16) & 0x0f;
1309 			if (base == 13 &&
1310 			    (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
1311 				return ABORT_FIXUP_FAILED;
1312 			if (base == 15)
1313 				return ABORT_FIXUP_FAILED;
1314 
1315 			offset = (fault_instruction & 0xff) << 2;
1316 			DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1317 			if ((fault_instruction & (1 << 23)) != 0)
1318 				offset = -offset;
1319 			registers[base] += offset;
1320 			DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1321 		}
1322 	} else if ((fault_instruction & 0x0e000000) == 0x0c000000)
1323 		return ABORT_FIXUP_FAILED;
1324 
1325 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1326 
1327 		/* Ok an abort in SVC mode */
1328 
1329 		/*
1330 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1331 		 * as the fault happened in svc mode but we need it in the
1332 		 * usr slot so we can treat the registers as an array of ints
1333 		 * during fixing.
1334 		 * NOTE: This PC is in the position but writeback is not
1335 		 * allowed on r15.
1336 		 * Doing it like this is more efficient than trapping this
1337 		 * case in all possible locations in the prior fixup code.
1338 		 */
1339 
1340 		frame->tf_svc_lr = frame->tf_usr_lr;
1341 		frame->tf_usr_lr = saved_lr;
1342 
1343 		/*
1344 		 * Note the trapframe does not have the SVC r13 so a fault
1345 		 * from an instruction with writeback to r13 in SVC mode is
1346 		 * not allowed. This should not happen as the kstack is
1347 		 * always valid.
1348 		 */
1349 	}
1350 
1351 	return(ABORT_FIXUP_OK);
1352 }
1353 #endif	/* CPU_ARM2/250/3/6/7 */
1354 
1355 
1356 #if (defined(CPU_ARM6) && defined(ARM6_LATE_ABORT)) || defined(CPU_ARM7) || \
1357 	defined(CPU_ARM7TDMI)
1358 /*
1359  * "Late" (base updated) data abort fixup
1360  *
1361  * For ARM6 (in late-abort mode) and ARM7.
1362  *
1363  * In this model, all data-transfer instructions need fixing up.  We defer
1364  * LDM, STM, LDC and STC fixup to the early-abort handler.
1365  */
1366 int
1367 late_abort_fixup(arg)
1368 	void *arg;
1369 {
1370 	trapframe_t *frame = arg;
1371 	u_int fault_pc;
1372 	u_int fault_instruction;
1373 	int saved_lr = 0;
1374 
1375 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1376 
1377 		/* Ok an abort in SVC mode */
1378 
1379 		/*
1380 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1381 		 * as the fault happened in svc mode but we need it in the
1382 		 * usr slot so we can treat the registers as an array of ints
1383 		 * during fixing.
1384 		 * NOTE: This PC is in the position but writeback is not
1385 		 * allowed on r15.
1386 		 * Doing it like this is more efficient than trapping this
1387 		 * case in all possible locations in the following fixup code.
1388 		 */
1389 
1390 		saved_lr = frame->tf_usr_lr;
1391 		frame->tf_usr_lr = frame->tf_svc_lr;
1392 
1393 		/*
1394 		 * Note the trapframe does not have the SVC r13 so a fault
1395 		 * from an instruction with writeback to r13 in SVC mode is
1396 		 * not allowed. This should not happen as the kstack is
1397 		 * always valid.
1398 		 */
1399 	}
1400 
1401 	/* Get fault address and status from the CPU */
1402 
1403 	fault_pc = frame->tf_pc;
1404 	fault_instruction = *((volatile unsigned int *)fault_pc);
1405 
1406 	/* Decode the fault instruction and fix the registers as needed */
1407 
1408 	/* Was is a swap instruction ? */
1409 
1410 	if ((fault_instruction & 0x0fb00ff0) == 0x01000090) {
1411 		DFC_DISASSEMBLE(fault_pc);
1412 	} else if ((fault_instruction & 0x0c000000) == 0x04000000) {
1413 
1414 		/* Was is a ldr/str instruction */
1415 		/* This is for late abort only */
1416 
1417 		int base;
1418 		int offset;
1419 		int *registers = &frame->tf_r0;
1420 
1421 		DFC_DISASSEMBLE(fault_pc);
1422 
1423 		/* This is for late abort only */
1424 
1425 		if ((fault_instruction & (1 << 24)) == 0
1426 		    || (fault_instruction & (1 << 21)) != 0) {
1427 			/* postindexed ldr/str with no writeback */
1428 
1429 			base = (fault_instruction >> 16) & 0x0f;
1430 			if (base == 13 &&
1431 			    (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
1432 				return ABORT_FIXUP_FAILED;
1433 			if (base == 15)
1434 				return ABORT_FIXUP_FAILED;
1435 			DFC_PRINTF(("late abt fix: r%d=%08x : ",
1436 				       base, registers[base]));
1437 			if ((fault_instruction & (1 << 25)) == 0) {
1438 				/* Immediate offset - easy */
1439 
1440 				offset = fault_instruction & 0xfff;
1441 				if ((fault_instruction & (1 << 23)))
1442 					offset = -offset;
1443 				registers[base] += offset;
1444 				DFC_PRINTF(("imm=%08x ", offset));
1445 			} else {
1446 				/* offset is a shifted register */
1447 				int shift;
1448 
1449 				offset = fault_instruction & 0x0f;
1450 				if (offset == base)
1451 					return ABORT_FIXUP_FAILED;
1452 
1453 				/*
1454 				 * Register offset - hard we have to
1455 				 * cope with shifts !
1456 				 */
1457 				offset = registers[offset];
1458 
1459 				if ((fault_instruction & (1 << 4)) == 0)
1460 					/* shift with amount */
1461 					shift = (fault_instruction >> 7) & 0x1f;
1462 				else {
1463 					/* shift with register */
1464 					if ((fault_instruction & (1 << 7)) != 0)
1465 						/* undefined for now so bail out */
1466 						return ABORT_FIXUP_FAILED;
1467 					shift = ((fault_instruction >> 8) & 0xf);
1468 					if (base == shift)
1469 						return ABORT_FIXUP_FAILED;
1470 					DFC_PRINTF(("shift reg=%d ", shift));
1471 					shift = registers[shift];
1472 				}
1473 				DFC_PRINTF(("shift=%08x ", shift));
1474 				switch (((fault_instruction >> 5) & 0x3)) {
1475 				case 0 : /* Logical left */
1476 					offset = (int)(((u_int)offset) << shift);
1477 					break;
1478 				case 1 : /* Logical Right */
1479 					if (shift == 0) shift = 32;
1480 					offset = (int)(((u_int)offset) >> shift);
1481 					break;
1482 				case 2 : /* Arithmetic Right */
1483 					if (shift == 0) shift = 32;
1484 					offset = (int)(((int)offset) >> shift);
1485 					break;
1486 				case 3 : /* Rotate right (rol or rxx) */
1487 					return ABORT_FIXUP_FAILED;
1488 					break;
1489 				}
1490 
1491 				DFC_PRINTF(("abt: fixed LDR/STR with "
1492 					       "register offset\n"));
1493 				if ((fault_instruction & (1 << 23)))
1494 					offset = -offset;
1495 				DFC_PRINTF(("offset=%08x ", offset));
1496 				registers[base] += offset;
1497 			}
1498 			DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1499 		}
1500 	}
1501 
1502 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1503 
1504 		/* Ok an abort in SVC mode */
1505 
1506 		/*
1507 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1508 		 * as the fault happened in svc mode but we need it in the
1509 		 * usr slot so we can treat the registers as an array of ints
1510 		 * during fixing.
1511 		 * NOTE: This PC is in the position but writeback is not
1512 		 * allowed on r15.
1513 		 * Doing it like this is more efficient than trapping this
1514 		 * case in all possible locations in the prior fixup code.
1515 		 */
1516 
1517 		frame->tf_svc_lr = frame->tf_usr_lr;
1518 		frame->tf_usr_lr = saved_lr;
1519 
1520 		/*
1521 		 * Note the trapframe does not have the SVC r13 so a fault
1522 		 * from an instruction with writeback to r13 in SVC mode is
1523 		 * not allowed. This should not happen as the kstack is
1524 		 * always valid.
1525 		 */
1526 	}
1527 
1528 	/*
1529 	 * Now let the early-abort fixup routine have a go, in case it
1530 	 * was an LDM, STM, LDC or STC that faulted.
1531 	 */
1532 
1533 	return early_abort_fixup(arg);
1534 }
1535 #endif	/* CPU_ARM6(LATE)/7/7TDMI */
1536 
1537 /*
1538  * CPU Setup code
1539  */
1540 
1541 #if defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) || \
1542 	defined(CPU_ARM8) || defined (CPU_ARM9) || defined(CPU_SA110) || \
1543 	defined(CPU_SA1100) || defined(CPU_SA1110) || \
1544 	defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
1545 	defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425)
1546 
1547 #define IGN	0
1548 #define OR	1
1549 #define BIC	2
1550 
1551 struct cpu_option {
1552 	char	*co_name;
1553 	int	co_falseop;
1554 	int	co_trueop;
1555 	int	co_value;
1556 };
1557 
1558 static u_int parse_cpu_options __P((char *, struct cpu_option *, u_int));
1559 
1560 static u_int
1561 parse_cpu_options(args, optlist, cpuctrl)
1562 	char *args;
1563 	struct cpu_option *optlist;
1564 	u_int cpuctrl;
1565 {
1566 	int integer;
1567 
1568 	if (args == NULL)
1569 		return(cpuctrl);
1570 
1571 	while (optlist->co_name) {
1572 		if (get_bootconf_option(args, optlist->co_name,
1573 		    BOOTOPT_TYPE_BOOLEAN, &integer)) {
1574 			if (integer) {
1575 				if (optlist->co_trueop == OR)
1576 					cpuctrl |= optlist->co_value;
1577 				else if (optlist->co_trueop == BIC)
1578 					cpuctrl &= ~optlist->co_value;
1579 			} else {
1580 				if (optlist->co_falseop == OR)
1581 					cpuctrl |= optlist->co_value;
1582 				else if (optlist->co_falseop == BIC)
1583 					cpuctrl &= ~optlist->co_value;
1584 			}
1585 		}
1586 		++optlist;
1587 	}
1588 	return(cpuctrl);
1589 }
1590 #endif /* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 || CPU_SA110 */
1591 
1592 #if defined (CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) \
1593 	|| defined(CPU_ARM8)
1594 struct cpu_option arm678_options[] = {
1595 #ifdef COMPAT_12
1596 	{ "nocache",		IGN, BIC, CPU_CONTROL_IDC_ENABLE },
1597 	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
1598 #endif	/* COMPAT_12 */
1599 	{ "cpu.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
1600 	{ "cpu.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
1601 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1602 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1603 	{ NULL,			IGN, IGN, 0 }
1604 };
1605 
1606 #endif	/* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 */
1607 
1608 #ifdef CPU_ARM6
1609 struct cpu_option arm6_options[] = {
1610 	{ "arm6.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
1611 	{ "arm6.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
1612 	{ "arm6.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1613 	{ "arm6.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1614 	{ NULL,			IGN, IGN, 0 }
1615 };
1616 
1617 void
1618 arm6_setup(args)
1619 	char *args;
1620 {
1621 	int cpuctrl, cpuctrlmask;
1622 
1623 	/* Set up default control registers bits */
1624 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1625 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1626 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
1627 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1628 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1629 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
1630 		 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BEND_ENABLE
1631 		 | CPU_CONTROL_AFLT_ENABLE;
1632 
1633 #ifdef ARM6_LATE_ABORT
1634 	cpuctrl |= CPU_CONTROL_LABT_ENABLE;
1635 #endif	/* ARM6_LATE_ABORT */
1636 
1637 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1638 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1639 #endif
1640 
1641 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
1642 	cpuctrl = parse_cpu_options(args, arm6_options, cpuctrl);
1643 
1644 #ifdef __ARMEB__
1645 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1646 #endif
1647 
1648 	/* Clear out the cache */
1649 	cpu_idcache_wbinv_all();
1650 
1651 	/* Set the control register */
1652 	curcpu()->ci_ctrl = cpuctrl;
1653 	cpu_control(0xffffffff, cpuctrl);
1654 }
1655 #endif	/* CPU_ARM6 */
1656 
1657 #ifdef CPU_ARM7
1658 struct cpu_option arm7_options[] = {
1659 	{ "arm7.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
1660 	{ "arm7.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
1661 	{ "arm7.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1662 	{ "arm7.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1663 #ifdef COMPAT_12
1664 	{ "fpaclk2",		BIC, OR,  CPU_CONTROL_CPCLK },
1665 #endif	/* COMPAT_12 */
1666 	{ "arm700.fpaclk",	BIC, OR,  CPU_CONTROL_CPCLK },
1667 	{ NULL,			IGN, IGN, 0 }
1668 };
1669 
1670 void
1671 arm7_setup(args)
1672 	char *args;
1673 {
1674 	int cpuctrl, cpuctrlmask;
1675 
1676 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1677 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1678 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
1679 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1680 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1681 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
1682 		 | CPU_CONTROL_CPCLK | CPU_CONTROL_LABT_ENABLE
1683 		 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BEND_ENABLE
1684 		 | CPU_CONTROL_AFLT_ENABLE;
1685 
1686 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1687 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1688 #endif
1689 
1690 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
1691 	cpuctrl = parse_cpu_options(args, arm7_options, cpuctrl);
1692 
1693 #ifdef __ARMEB__
1694 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1695 #endif
1696 
1697 	/* Clear out the cache */
1698 	cpu_idcache_wbinv_all();
1699 
1700 	/* Set the control register */
1701 	curcpu()->ci_ctrl = cpuctrl;
1702 	cpu_control(0xffffffff, cpuctrl);
1703 }
1704 #endif	/* CPU_ARM7 */
1705 
1706 #ifdef CPU_ARM7TDMI
1707 struct cpu_option arm7tdmi_options[] = {
1708 	{ "arm7.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
1709 	{ "arm7.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
1710 	{ "arm7.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1711 	{ "arm7.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1712 #ifdef COMPAT_12
1713 	{ "fpaclk2",		BIC, OR,  CPU_CONTROL_CPCLK },
1714 #endif	/* COMPAT_12 */
1715 	{ "arm700.fpaclk",	BIC, OR,  CPU_CONTROL_CPCLK },
1716 	{ NULL,			IGN, IGN, 0 }
1717 };
1718 
1719 void
1720 arm7tdmi_setup(args)
1721 	char *args;
1722 {
1723 	int cpuctrl;
1724 
1725 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1726 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1727 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
1728 
1729 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
1730 	cpuctrl = parse_cpu_options(args, arm7tdmi_options, cpuctrl);
1731 
1732 #ifdef __ARMEB__
1733 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1734 #endif
1735 
1736 	/* Clear out the cache */
1737 	cpu_idcache_wbinv_all();
1738 
1739 	/* Set the control register */
1740 	curcpu()->ci_ctrl = cpuctrl;
1741 	cpu_control(0xffffffff, cpuctrl);
1742 }
1743 #endif	/* CPU_ARM7TDMI */
1744 
1745 #ifdef CPU_ARM8
1746 struct cpu_option arm8_options[] = {
1747 	{ "arm8.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
1748 	{ "arm8.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
1749 	{ "arm8.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1750 	{ "arm8.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1751 #ifdef COMPAT_12
1752 	{ "branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1753 #endif	/* COMPAT_12 */
1754 	{ "cpu.branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1755 	{ "arm8.branchpredict",	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1756 	{ NULL,			IGN, IGN, 0 }
1757 };
1758 
1759 void
1760 arm8_setup(args)
1761 	char *args;
1762 {
1763 	int integer;
1764 	int cpuctrl, cpuctrlmask;
1765 	int clocktest;
1766 	int setclock = 0;
1767 
1768 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1769 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1770 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
1771 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1772 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1773 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
1774 		 | CPU_CONTROL_BPRD_ENABLE | CPU_CONTROL_ROM_ENABLE
1775 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE;
1776 
1777 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1778 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1779 #endif
1780 
1781 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
1782 	cpuctrl = parse_cpu_options(args, arm8_options, cpuctrl);
1783 
1784 #ifdef __ARMEB__
1785 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1786 #endif
1787 
1788 	/* Get clock configuration */
1789 	clocktest = arm8_clock_config(0, 0) & 0x0f;
1790 
1791 	/* Special ARM8 clock and test configuration */
1792 	if (get_bootconf_option(args, "arm8.clock.reset", BOOTOPT_TYPE_BOOLEAN, &integer)) {
1793 		clocktest = 0;
1794 		setclock = 1;
1795 	}
1796 	if (get_bootconf_option(args, "arm8.clock.dynamic", BOOTOPT_TYPE_BOOLEAN, &integer)) {
1797 		if (integer)
1798 			clocktest |= 0x01;
1799 		else
1800 			clocktest &= ~(0x01);
1801 		setclock = 1;
1802 	}
1803 	if (get_bootconf_option(args, "arm8.clock.sync", BOOTOPT_TYPE_BOOLEAN, &integer)) {
1804 		if (integer)
1805 			clocktest |= 0x02;
1806 		else
1807 			clocktest &= ~(0x02);
1808 		setclock = 1;
1809 	}
1810 	if (get_bootconf_option(args, "arm8.clock.fast", BOOTOPT_TYPE_BININT, &integer)) {
1811 		clocktest = (clocktest & ~0xc0) | (integer & 3) << 2;
1812 		setclock = 1;
1813 	}
1814 	if (get_bootconf_option(args, "arm8.test", BOOTOPT_TYPE_BININT, &integer)) {
1815 		clocktest |= (integer & 7) << 5;
1816 		setclock = 1;
1817 	}
1818 
1819 	/* Clear out the cache */
1820 	cpu_idcache_wbinv_all();
1821 
1822 	/* Set the control register */
1823 	curcpu()->ci_ctrl = cpuctrl;
1824 	cpu_control(0xffffffff, cpuctrl);
1825 
1826 	/* Set the clock/test register */
1827 	if (setclock)
1828 		arm8_clock_config(0x7f, clocktest);
1829 }
1830 #endif	/* CPU_ARM8 */
1831 
1832 #ifdef CPU_ARM9
1833 struct cpu_option arm9_options[] = {
1834 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1835 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1836 	{ "arm9.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1837 	{ "arm9.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
1838 	{ "arm9.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
1839 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1840 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1841 	{ "arm9.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1842 	{ NULL,			IGN, IGN, 0 }
1843 };
1844 
1845 void
1846 arm9_setup(args)
1847 	char *args;
1848 {
1849 	int cpuctrl, cpuctrlmask;
1850 
1851 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1852 	    | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1853 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1854 	    | CPU_CONTROL_WBUF_ENABLE;
1855 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1856 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1857 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1858 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1859 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1860 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC
1861 		 | CPU_CONTROL_ROUNDROBIN;
1862 
1863 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1864 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1865 #endif
1866 
1867 	cpuctrl = parse_cpu_options(args, arm9_options, cpuctrl);
1868 
1869 #ifdef __ARMEB__
1870 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1871 #endif
1872 
1873 	/* Clear out the cache */
1874 	cpu_idcache_wbinv_all();
1875 
1876 	/* Set the control register */
1877 	curcpu()->ci_ctrl = cpuctrl;
1878 	cpu_control(cpuctrlmask, cpuctrl);
1879 
1880 }
1881 #endif	/* CPU_ARM9 */
1882 
1883 #ifdef CPU_ARM10
1884 struct cpu_option arm10_options[] = {
1885 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1886 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1887 	{ "arm10.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1888 	{ "arm10.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
1889 	{ "arm10.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
1890 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1891 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1892 	{ "arm10.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1893 	{ NULL,			IGN, IGN, 0 }
1894 };
1895 
1896 void
1897 arm10_setup(args)
1898 	char *args;
1899 {
1900 	int cpuctrl, cpuctrlmask;
1901 
1902 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
1903 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1904 	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
1905 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
1906 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1907 	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1908 	    | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1909 	    | CPU_CONTROL_BPRD_ENABLE
1910 	    | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
1911 
1912 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1913 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1914 #endif
1915 
1916 	cpuctrl = parse_cpu_options(args, arm10_options, cpuctrl);
1917 
1918 #ifdef __ARMEB__
1919 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1920 #endif
1921 
1922 	/* Clear out the cache */
1923 	cpu_idcache_wbinv_all();
1924 
1925 	/* Now really make sure they are clean.  */
1926 	asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
1927 
1928 	/* Set the control register */
1929 	curcpu()->ci_ctrl = cpuctrl;
1930 	cpu_control(0xffffffff, cpuctrl);
1931 
1932 	/* And again. */
1933 	cpu_idcache_wbinv_all();
1934 }
1935 #endif	/* CPU_ARM10 */
1936 
1937 #ifdef CPU_SA110
1938 struct cpu_option sa110_options[] = {
1939 #ifdef COMPAT_12
1940 	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1941 	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
1942 #endif	/* COMPAT_12 */
1943 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1944 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1945 	{ "sa110.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1946 	{ "sa110.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
1947 	{ "sa110.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
1948 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1949 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1950 	{ "sa110.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1951 	{ NULL,			IGN, IGN, 0 }
1952 };
1953 
1954 void
1955 sa110_setup(args)
1956 	char *args;
1957 {
1958 	int cpuctrl, cpuctrlmask;
1959 
1960 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1961 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1962 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1963 		 | CPU_CONTROL_WBUF_ENABLE;
1964 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1965 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1966 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1967 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1968 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1969 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1970 		 | CPU_CONTROL_CPCLK;
1971 
1972 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1973 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1974 #endif
1975 
1976 	cpuctrl = parse_cpu_options(args, sa110_options, cpuctrl);
1977 
1978 #ifdef __ARMEB__
1979 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1980 #endif
1981 
1982 	/* Clear out the cache */
1983 	cpu_idcache_wbinv_all();
1984 
1985 	/* Set the control register */
1986 	curcpu()->ci_ctrl = cpuctrl;
1987 /*	cpu_control(cpuctrlmask, cpuctrl);*/
1988 	cpu_control(0xffffffff, cpuctrl);
1989 
1990 	/*
1991 	 * enable clockswitching, note that this doesn't read or write to r0,
1992 	 * r0 is just to make it valid asm
1993 	 */
1994 	__asm ("mcr 15, 0, r0, c15, c1, 2");
1995 }
1996 #endif	/* CPU_SA110 */
1997 
1998 #if defined(CPU_SA1100) || defined(CPU_SA1110)
1999 struct cpu_option sa11x0_options[] = {
2000 #ifdef COMPAT_12
2001 	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2002 	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
2003 #endif	/* COMPAT_12 */
2004 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2005 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2006 	{ "sa11x0.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2007 	{ "sa11x0.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2008 	{ "sa11x0.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2009 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2010 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2011 	{ "sa11x0.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2012 	{ NULL,			IGN, IGN, 0 }
2013 };
2014 
2015 void
2016 sa11x0_setup(args)
2017 	char *args;
2018 {
2019 	int cpuctrl, cpuctrlmask;
2020 
2021 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2022 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2023 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2024 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE;
2025 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2026 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2027 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2028 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2029 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2030 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
2031 		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
2032 
2033 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2034 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2035 #endif
2036 
2037 	cpuctrl = parse_cpu_options(args, sa11x0_options, cpuctrl);
2038 
2039 #ifdef __ARMEB__
2040 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2041 #endif
2042 
2043 	if (vector_page == ARM_VECTORS_HIGH)
2044 		cpuctrl |= CPU_CONTROL_VECRELOC;
2045 
2046 	/* Clear out the cache */
2047 	cpu_idcache_wbinv_all();
2048 
2049 	/* Set the control register */
2050 	cpu_control(0xffffffff, cpuctrl);
2051 }
2052 #endif	/* CPU_SA1100 || CPU_SA1110 */
2053 
2054 #if defined(CPU_IXP12X0)
2055 struct cpu_option ixp12x0_options[] = {
2056 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2057 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2058 	{ "ixp12x0.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2059 	{ "ixp12x0.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2060 	{ "ixp12x0.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2061 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2062 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2063 	{ "ixp12x0.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2064 	{ NULL,			IGN, IGN, 0 }
2065 };
2066 
2067 void
2068 ixp12x0_setup(args)
2069 	char *args;
2070 {
2071 	int cpuctrl, cpuctrlmask;
2072 
2073 
2074 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE
2075 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_SYST_ENABLE
2076 		 | CPU_CONTROL_IC_ENABLE;
2077 
2078 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_AFLT_ENABLE
2079 		 | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_WBUF_ENABLE
2080 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_SYST_ENABLE
2081 		 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_IC_ENABLE
2082 		 | CPU_CONTROL_VECRELOC;
2083 
2084 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2085 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2086 #endif
2087 
2088 	cpuctrl = parse_cpu_options(args, ixp12x0_options, cpuctrl);
2089 
2090 #ifdef __ARMEB__
2091 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2092 #endif
2093 
2094 	if (vector_page == ARM_VECTORS_HIGH)
2095 		cpuctrl |= CPU_CONTROL_VECRELOC;
2096 
2097 	/* Clear out the cache */
2098 	cpu_idcache_wbinv_all();
2099 
2100 	/* Set the control register */
2101 	curcpu()->ci_ctrl = cpuctrl;
2102 	/* cpu_control(0xffffffff, cpuctrl); */
2103 	cpu_control(cpuctrlmask, cpuctrl);
2104 }
2105 #endif /* CPU_IXP12X0 */
2106 
2107 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
2108     defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425)
2109 struct cpu_option xscale_options[] = {
2110 #ifdef COMPAT_12
2111 	{ "branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2112 	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2113 #endif	/* COMPAT_12 */
2114 	{ "cpu.branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2115 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2116 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2117 	{ "xscale.branchpredict", BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2118 	{ "xscale.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2119 	{ "xscale.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2120 	{ "xscale.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2121 	{ NULL,			IGN, IGN, 0 }
2122 };
2123 
2124 void
2125 xscale_setup(args)
2126 	char *args;
2127 {
2128 	uint32_t auxctl;
2129 	int cpuctrl, cpuctrlmask;
2130 
2131 	/*
2132 	 * The XScale Write Buffer is always enabled.  Our option
2133 	 * is to enable/disable coalescing.  Note that bits 6:3
2134 	 * must always be enabled.
2135 	 */
2136 
2137 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2138 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2139 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2140 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
2141 		 | CPU_CONTROL_BPRD_ENABLE;
2142 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2143 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2144 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2145 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2146 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2147 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
2148 		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
2149 
2150 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2151 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2152 #endif
2153 
2154 	cpuctrl = parse_cpu_options(args, xscale_options, cpuctrl);
2155 
2156 #ifdef __ARMEB__
2157 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2158 #endif
2159 
2160 	if (vector_page == ARM_VECTORS_HIGH)
2161 		cpuctrl |= CPU_CONTROL_VECRELOC;
2162 
2163 	/* Clear out the cache */
2164 	cpu_idcache_wbinv_all();
2165 
2166 	/*
2167 	 * Set the control register.  Note that bits 6:3 must always
2168 	 * be set to 1.
2169 	 */
2170 	curcpu()->ci_ctrl = cpuctrl;
2171 /*	cpu_control(cpuctrlmask, cpuctrl);*/
2172 	cpu_control(0xffffffff, cpuctrl);
2173 
2174 	/* Make sure write coalescing is turned on */
2175 	__asm __volatile("mrc p15, 0, %0, c1, c0, 1"
2176 		: "=r" (auxctl));
2177 #ifdef XSCALE_NO_COALESCE_WRITES
2178 	auxctl |= XSCALE_AUXCTL_K;
2179 #else
2180 	auxctl &= ~XSCALE_AUXCTL_K;
2181 #endif
2182 	__asm __volatile("mcr p15, 0, %0, c1, c0, 1"
2183 		: : "r" (auxctl));
2184 }
2185 #endif	/* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425 */
2186