xref: /netbsd-src/sys/arch/arm/arm/cpufunc.c (revision e5548b402ae4c44fb816de42c7bba9581ce23ef5)
1 /*	$NetBSD: cpufunc.c,v 1.74 2005/12/11 12:16:41 christos Exp $	*/
2 
3 /*
4  * arm7tdmi support code Copyright (c) 2001 John Fremlin
5  * arm8 support code Copyright (c) 1997 ARM Limited
6  * arm8 support code Copyright (c) 1997 Causality Limited
7  * arm9 support code Copyright (C) 2001 ARM Ltd
8  * Copyright (c) 1997 Mark Brinicombe.
9  * Copyright (c) 1997 Causality Limited
10  * All rights reserved.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. All advertising materials mentioning features or use of this software
21  *    must display the following acknowledgement:
22  *	This product includes software developed by Causality Limited.
23  * 4. The name of Causality Limited may not be used to endorse or promote
24  *    products derived from this software without specific prior written
25  *    permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
28  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
29  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
30  * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
31  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
32  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
33  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37  * SUCH DAMAGE.
38  *
39  * RiscBSD kernel project
40  *
41  * cpufuncs.c
42  *
43  * C functions for supporting CPU / MMU / TLB specific operations.
44  *
45  * Created      : 30/01/97
46  */
47 
48 #include <sys/cdefs.h>
49 __KERNEL_RCSID(0, "$NetBSD: cpufunc.c,v 1.74 2005/12/11 12:16:41 christos Exp $");
50 
51 #include "opt_compat_netbsd.h"
52 #include "opt_cpuoptions.h"
53 #include "opt_perfctrs.h"
54 
55 #include <sys/types.h>
56 #include <sys/param.h>
57 #include <sys/pmc.h>
58 #include <sys/systm.h>
59 #include <machine/cpu.h>
60 #include <machine/bootconfig.h>
61 #include <arch/arm/arm/disassem.h>
62 
63 #include <uvm/uvm.h>
64 
65 #include <arm/cpuconf.h>
66 #include <arm/cpufunc.h>
67 
68 #ifdef CPU_XSCALE_80200
69 #include <arm/xscale/i80200reg.h>
70 #include <arm/xscale/i80200var.h>
71 #endif
72 
73 #ifdef CPU_XSCALE_80321
74 #include <arm/xscale/i80321reg.h>
75 #include <arm/xscale/i80321var.h>
76 #endif
77 
78 #ifdef CPU_XSCALE_IXP425
79 #include <arm/xscale/ixp425reg.h>
80 #include <arm/xscale/ixp425var.h>
81 #endif
82 
83 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321)
84 #include <arm/xscale/xscalereg.h>
85 #endif
86 
87 #if defined(PERFCTRS)
88 struct arm_pmc_funcs *arm_pmc;
89 #endif
90 
91 /* PRIMARY CACHE VARIABLES */
92 int	arm_picache_size;
93 int	arm_picache_line_size;
94 int	arm_picache_ways;
95 
96 int	arm_pdcache_size;	/* and unified */
97 int	arm_pdcache_line_size;
98 int	arm_pdcache_ways;
99 
100 int	arm_pcache_type;
101 int	arm_pcache_unified;
102 
103 int	arm_dcache_align;
104 int	arm_dcache_align_mask;
105 
106 /* 1 == use cpu_sleep(), 0 == don't */
107 int cpu_do_powersave;
108 
109 #ifdef CPU_ARM3
110 struct cpu_functions arm3_cpufuncs = {
111 	/* CPU functions */
112 
113 	cpufunc_id,			/* id			*/
114 	cpufunc_nullop,			/* cpwait		*/
115 
116 	/* MMU functions */
117 
118 	arm3_control,			/* control		*/
119 	NULL,				/* domain		*/
120 	NULL,				/* setttb		*/
121 	NULL,				/* faultstatus		*/
122 	NULL,				/* faultaddress		*/
123 
124 	/* TLB functions */
125 
126 	cpufunc_nullop,			/* tlb_flushID		*/
127 	(void *)cpufunc_nullop,		/* tlb_flushID_SE	*/
128 	cpufunc_nullop,			/* tlb_flushI		*/
129 	(void *)cpufunc_nullop,		/* tlb_flushI_SE	*/
130 	cpufunc_nullop,			/* tlb_flushD		*/
131 	(void *)cpufunc_nullop,		/* tlb_flushD_SE	*/
132 
133 	/* Cache operations */
134 
135 	cpufunc_nullop,			/* icache_sync_all	*/
136 	(void *) cpufunc_nullop,	/* icache_sync_range	*/
137 
138 	arm3_cache_flush,		/* dcache_wbinv_all	*/
139 	(void *)arm3_cache_flush,	/* dcache_wbinv_range	*/
140 	(void *)arm3_cache_flush,	/* dcache_inv_range	*/
141 	(void *)cpufunc_nullop,		/* dcache_wb_range	*/
142 
143 	arm3_cache_flush,		/* idcache_wbinv_all	*/
144 	(void *)arm3_cache_flush,	/* idcache_wbinv_range	*/
145 
146 	/* Other functions */
147 
148 	cpufunc_nullop,			/* flush_prefetchbuf	*/
149 	cpufunc_nullop,			/* drain_writebuf	*/
150 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
151 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
152 
153 	(void *)cpufunc_nullop,		/* sleep		*/
154 
155 	/* Soft functions */
156 
157 	early_abort_fixup,		/* dataabt_fixup	*/
158 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
159 
160 	NULL,				/* context_switch	*/
161 
162 	(void *)cpufunc_nullop		/* cpu setup		*/
163 
164 };
165 #endif	/* CPU_ARM3 */
166 
167 #ifdef CPU_ARM6
168 struct cpu_functions arm6_cpufuncs = {
169 	/* CPU functions */
170 
171 	cpufunc_id,			/* id			*/
172 	cpufunc_nullop,			/* cpwait		*/
173 
174 	/* MMU functions */
175 
176 	cpufunc_control,		/* control		*/
177 	cpufunc_domains,		/* domain		*/
178 	arm67_setttb,			/* setttb		*/
179 	cpufunc_faultstatus,		/* faultstatus		*/
180 	cpufunc_faultaddress,		/* faultaddress		*/
181 
182 	/* TLB functions */
183 
184 	arm67_tlb_flush,		/* tlb_flushID		*/
185 	arm67_tlb_purge,		/* tlb_flushID_SE	*/
186 	arm67_tlb_flush,		/* tlb_flushI		*/
187 	arm67_tlb_purge,		/* tlb_flushI_SE	*/
188 	arm67_tlb_flush,		/* tlb_flushD		*/
189 	arm67_tlb_purge,		/* tlb_flushD_SE	*/
190 
191 	/* Cache operations */
192 
193 	cpufunc_nullop,			/* icache_sync_all	*/
194 	(void *) cpufunc_nullop,	/* icache_sync_range	*/
195 
196 	arm67_cache_flush,		/* dcache_wbinv_all	*/
197 	(void *)arm67_cache_flush,	/* dcache_wbinv_range	*/
198 	(void *)arm67_cache_flush,	/* dcache_inv_range	*/
199 	(void *)cpufunc_nullop,		/* dcache_wb_range	*/
200 
201 	arm67_cache_flush,		/* idcache_wbinv_all	*/
202 	(void *)arm67_cache_flush,	/* idcache_wbinv_range	*/
203 
204 	/* Other functions */
205 
206 	cpufunc_nullop,			/* flush_prefetchbuf	*/
207 	cpufunc_nullop,			/* drain_writebuf	*/
208 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
209 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
210 
211 	(void *)cpufunc_nullop,		/* sleep		*/
212 
213 	/* Soft functions */
214 
215 #ifdef ARM6_LATE_ABORT
216 	late_abort_fixup,		/* dataabt_fixup	*/
217 #else
218 	early_abort_fixup,		/* dataabt_fixup	*/
219 #endif
220 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
221 
222 	arm67_context_switch,		/* context_switch	*/
223 
224 	arm6_setup			/* cpu setup		*/
225 
226 };
227 #endif	/* CPU_ARM6 */
228 
229 #ifdef CPU_ARM7
230 struct cpu_functions arm7_cpufuncs = {
231 	/* CPU functions */
232 
233 	cpufunc_id,			/* id			*/
234 	cpufunc_nullop,			/* cpwait		*/
235 
236 	/* MMU functions */
237 
238 	cpufunc_control,		/* control		*/
239 	cpufunc_domains,		/* domain		*/
240 	arm67_setttb,			/* setttb		*/
241 	cpufunc_faultstatus,		/* faultstatus		*/
242 	cpufunc_faultaddress,		/* faultaddress		*/
243 
244 	/* TLB functions */
245 
246 	arm67_tlb_flush,		/* tlb_flushID		*/
247 	arm67_tlb_purge,		/* tlb_flushID_SE	*/
248 	arm67_tlb_flush,		/* tlb_flushI		*/
249 	arm67_tlb_purge,		/* tlb_flushI_SE	*/
250 	arm67_tlb_flush,		/* tlb_flushD		*/
251 	arm67_tlb_purge,		/* tlb_flushD_SE	*/
252 
253 	/* Cache operations */
254 
255 	cpufunc_nullop,			/* icache_sync_all	*/
256 	(void *)cpufunc_nullop,		/* icache_sync_range	*/
257 
258 	arm67_cache_flush,		/* dcache_wbinv_all	*/
259 	(void *)arm67_cache_flush,	/* dcache_wbinv_range	*/
260 	(void *)arm67_cache_flush,	/* dcache_inv_range	*/
261 	(void *)cpufunc_nullop,		/* dcache_wb_range	*/
262 
263 	arm67_cache_flush,		/* idcache_wbinv_all	*/
264 	(void *)arm67_cache_flush,	/* idcache_wbinv_range	*/
265 
266 	/* Other functions */
267 
268 	cpufunc_nullop,			/* flush_prefetchbuf	*/
269 	cpufunc_nullop,			/* drain_writebuf	*/
270 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
271 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
272 
273 	(void *)cpufunc_nullop,		/* sleep		*/
274 
275 	/* Soft functions */
276 
277 	late_abort_fixup,		/* dataabt_fixup	*/
278 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
279 
280 	arm67_context_switch,		/* context_switch	*/
281 
282 	arm7_setup			/* cpu setup		*/
283 
284 };
285 #endif	/* CPU_ARM7 */
286 
287 #ifdef CPU_ARM7TDMI
288 struct cpu_functions arm7tdmi_cpufuncs = {
289 	/* CPU functions */
290 
291 	cpufunc_id,			/* id			*/
292 	cpufunc_nullop,			/* cpwait		*/
293 
294 	/* MMU functions */
295 
296 	cpufunc_control,		/* control		*/
297 	cpufunc_domains,		/* domain		*/
298 	arm7tdmi_setttb,		/* setttb		*/
299 	cpufunc_faultstatus,		/* faultstatus		*/
300 	cpufunc_faultaddress,		/* faultaddress		*/
301 
302 	/* TLB functions */
303 
304 	arm7tdmi_tlb_flushID,		/* tlb_flushID		*/
305 	arm7tdmi_tlb_flushID_SE,	/* tlb_flushID_SE	*/
306 	arm7tdmi_tlb_flushID,		/* tlb_flushI		*/
307 	arm7tdmi_tlb_flushID_SE,	/* tlb_flushI_SE	*/
308 	arm7tdmi_tlb_flushID,		/* tlb_flushD		*/
309 	arm7tdmi_tlb_flushID_SE,	/* tlb_flushD_SE	*/
310 
311 	/* Cache operations */
312 
313 	cpufunc_nullop,			/* icache_sync_all	*/
314 	(void *)cpufunc_nullop,		/* icache_sync_range	*/
315 
316 	arm7tdmi_cache_flushID,		/* dcache_wbinv_all	*/
317 	(void *)arm7tdmi_cache_flushID,	/* dcache_wbinv_range	*/
318 	(void *)arm7tdmi_cache_flushID,	/* dcache_inv_range	*/
319 	(void *)cpufunc_nullop,		/* dcache_wb_range	*/
320 
321 	arm7tdmi_cache_flushID,		/* idcache_wbinv_all	*/
322 	(void *)arm7tdmi_cache_flushID,	/* idcache_wbinv_range	*/
323 
324 	/* Other functions */
325 
326 	cpufunc_nullop,			/* flush_prefetchbuf	*/
327 	cpufunc_nullop,			/* drain_writebuf	*/
328 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
329 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
330 
331 	(void *)cpufunc_nullop,		/* sleep		*/
332 
333 	/* Soft functions */
334 
335 	late_abort_fixup,		/* dataabt_fixup	*/
336 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
337 
338 	arm7tdmi_context_switch,	/* context_switch	*/
339 
340 	arm7tdmi_setup			/* cpu setup		*/
341 
342 };
343 #endif	/* CPU_ARM7TDMI */
344 
345 #ifdef CPU_ARM8
346 struct cpu_functions arm8_cpufuncs = {
347 	/* CPU functions */
348 
349 	cpufunc_id,			/* id			*/
350 	cpufunc_nullop,			/* cpwait		*/
351 
352 	/* MMU functions */
353 
354 	cpufunc_control,		/* control		*/
355 	cpufunc_domains,		/* domain		*/
356 	arm8_setttb,			/* setttb		*/
357 	cpufunc_faultstatus,		/* faultstatus		*/
358 	cpufunc_faultaddress,		/* faultaddress		*/
359 
360 	/* TLB functions */
361 
362 	arm8_tlb_flushID,		/* tlb_flushID		*/
363 	arm8_tlb_flushID_SE,		/* tlb_flushID_SE	*/
364 	arm8_tlb_flushID,		/* tlb_flushI		*/
365 	arm8_tlb_flushID_SE,		/* tlb_flushI_SE	*/
366 	arm8_tlb_flushID,		/* tlb_flushD		*/
367 	arm8_tlb_flushID_SE,		/* tlb_flushD_SE	*/
368 
369 	/* Cache operations */
370 
371 	cpufunc_nullop,			/* icache_sync_all	*/
372 	(void *)cpufunc_nullop,		/* icache_sync_range	*/
373 
374 	arm8_cache_purgeID,		/* dcache_wbinv_all	*/
375 	(void *)arm8_cache_purgeID,	/* dcache_wbinv_range	*/
376 /*XXX*/	(void *)arm8_cache_purgeID,	/* dcache_inv_range	*/
377 	(void *)arm8_cache_cleanID,	/* dcache_wb_range	*/
378 
379 	arm8_cache_purgeID,		/* idcache_wbinv_all	*/
380 	(void *)arm8_cache_purgeID,	/* idcache_wbinv_range	*/
381 
382 	/* Other functions */
383 
384 	cpufunc_nullop,			/* flush_prefetchbuf	*/
385 	cpufunc_nullop,			/* drain_writebuf	*/
386 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
387 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
388 
389 	(void *)cpufunc_nullop,		/* sleep		*/
390 
391 	/* Soft functions */
392 
393 	cpufunc_null_fixup,		/* dataabt_fixup	*/
394 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
395 
396 	arm8_context_switch,		/* context_switch	*/
397 
398 	arm8_setup			/* cpu setup		*/
399 };
400 #endif	/* CPU_ARM8 */
401 
402 #ifdef CPU_ARM9
403 struct cpu_functions arm9_cpufuncs = {
404 	/* CPU functions */
405 
406 	cpufunc_id,			/* id			*/
407 	cpufunc_nullop,			/* cpwait		*/
408 
409 	/* MMU functions */
410 
411 	cpufunc_control,		/* control		*/
412 	cpufunc_domains,		/* Domain		*/
413 	arm9_setttb,			/* Setttb		*/
414 	cpufunc_faultstatus,		/* Faultstatus		*/
415 	cpufunc_faultaddress,		/* Faultaddress		*/
416 
417 	/* TLB functions */
418 
419 	armv4_tlb_flushID,		/* tlb_flushID		*/
420 	arm9_tlb_flushID_SE,		/* tlb_flushID_SE	*/
421 	armv4_tlb_flushI,		/* tlb_flushI		*/
422 	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
423 	armv4_tlb_flushD,		/* tlb_flushD		*/
424 	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
425 
426 	/* Cache operations */
427 
428 	arm9_icache_sync_all,		/* icache_sync_all	*/
429 	arm9_icache_sync_range,		/* icache_sync_range	*/
430 
431 	arm9_dcache_wbinv_all,		/* dcache_wbinv_all	*/
432 	arm9_dcache_wbinv_range,	/* dcache_wbinv_range	*/
433 /*XXX*/	arm9_dcache_wbinv_range,	/* dcache_inv_range	*/
434 	arm9_dcache_wb_range,		/* dcache_wb_range	*/
435 
436 	arm9_idcache_wbinv_all,		/* idcache_wbinv_all	*/
437 	arm9_idcache_wbinv_range,	/* idcache_wbinv_range	*/
438 
439 	/* Other functions */
440 
441 	cpufunc_nullop,			/* flush_prefetchbuf	*/
442 	armv4_drain_writebuf,		/* drain_writebuf	*/
443 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
444 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
445 
446 	(void *)cpufunc_nullop,		/* sleep		*/
447 
448 	/* Soft functions */
449 
450 	cpufunc_null_fixup,		/* dataabt_fixup	*/
451 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
452 
453 	arm9_context_switch,		/* context_switch	*/
454 
455 	arm9_setup			/* cpu setup		*/
456 
457 };
458 #endif /* CPU_ARM9 */
459 
460 #ifdef CPU_ARM10
461 struct cpu_functions arm10_cpufuncs = {
462 	/* CPU functions */
463 
464 	cpufunc_id,			/* id			*/
465 	cpufunc_nullop,			/* cpwait		*/
466 
467 	/* MMU functions */
468 
469 	cpufunc_control,		/* control		*/
470 	cpufunc_domains,		/* Domain		*/
471 	arm10_setttb,			/* Setttb		*/
472 	cpufunc_faultstatus,		/* Faultstatus		*/
473 	cpufunc_faultaddress,		/* Faultaddress		*/
474 
475 	/* TLB functions */
476 
477 	armv4_tlb_flushID,		/* tlb_flushID		*/
478 	arm10_tlb_flushID_SE,		/* tlb_flushID_SE	*/
479 	armv4_tlb_flushI,		/* tlb_flushI		*/
480 	arm10_tlb_flushI_SE,		/* tlb_flushI_SE	*/
481 	armv4_tlb_flushD,		/* tlb_flushD		*/
482 	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
483 
484 	/* Cache operations */
485 
486 	armv5_icache_sync_all,		/* icache_sync_all	*/
487 	armv5_icache_sync_range,	/* icache_sync_range	*/
488 
489 	armv5_dcache_wbinv_all,		/* dcache_wbinv_all	*/
490 	armv5_dcache_wbinv_range,	/* dcache_wbinv_range	*/
491 /*XXX*/	armv5_dcache_wbinv_range,	/* dcache_inv_range	*/
492 	armv5_dcache_wb_range,		/* dcache_wb_range	*/
493 
494 	armv5_idcache_wbinv_all,	/* idcache_wbinv_all	*/
495 	armv5_idcache_wbinv_range,	/* idcache_wbinv_range	*/
496 
497 	/* Other functions */
498 
499 	cpufunc_nullop,			/* flush_prefetchbuf	*/
500 	armv4_drain_writebuf,		/* drain_writebuf	*/
501 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
502 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
503 
504 	(void *)cpufunc_nullop,		/* sleep		*/
505 
506 	/* Soft functions */
507 
508 	cpufunc_null_fixup,		/* dataabt_fixup	*/
509 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
510 
511 	arm10_context_switch,		/* context_switch	*/
512 
513 	arm10_setup			/* cpu setup		*/
514 
515 };
516 #endif /* CPU_ARM10 */
517 
518 #ifdef CPU_ARM11
519 struct cpu_functions arm11_cpufuncs = {
520 	/* CPU functions */
521 
522 	cpufunc_id,			/* id			*/
523 	cpufunc_nullop,			/* cpwait		*/
524 
525 	/* MMU functions */
526 
527 	cpufunc_control,		/* control		*/
528 	cpufunc_domains,		/* Domain		*/
529 	arm11_setttb,			/* Setttb		*/
530 	cpufunc_faultstatus,		/* Faultstatus		*/
531 	cpufunc_faultaddress,		/* Faultaddress		*/
532 
533 	/* TLB functions */
534 
535 	arm11_tlb_flushID,		/* tlb_flushID		*/
536 	arm11_tlb_flushID_SE,		/* tlb_flushID_SE	*/
537 	arm11_tlb_flushI,		/* tlb_flushI		*/
538 	arm11_tlb_flushI_SE,		/* tlb_flushI_SE	*/
539 	arm11_tlb_flushD,		/* tlb_flushD		*/
540 	arm11_tlb_flushD_SE,		/* tlb_flushD_SE	*/
541 
542 	/* Cache operations */
543 
544 	armv5_icache_sync_all,		/* icache_sync_all	*/
545 	armv5_icache_sync_range,	/* icache_sync_range	*/
546 
547 	armv5_dcache_wbinv_all,		/* dcache_wbinv_all	*/
548 	armv5_dcache_wbinv_range,	/* dcache_wbinv_range	*/
549 /*XXX*/	armv5_dcache_wbinv_range,	/* dcache_inv_range	*/
550 	armv5_dcache_wb_range,		/* dcache_wb_range	*/
551 
552 	armv5_idcache_wbinv_all,	/* idcache_wbinv_all	*/
553 	armv5_idcache_wbinv_range,	/* idcache_wbinv_range	*/
554 
555 	/* Other functions */
556 
557 	cpufunc_nullop,			/* flush_prefetchbuf	*/
558 	arm11_drain_writebuf,		/* drain_writebuf	*/
559 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
560 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
561 
562 	(void *)cpufunc_nullop,		/* sleep		*/
563 
564 	/* Soft functions */
565 
566 	cpufunc_null_fixup,		/* dataabt_fixup	*/
567 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
568 
569 	arm11_context_switch,		/* context_switch	*/
570 
571 	arm11_setup			/* cpu setup		*/
572 
573 };
574 #endif /* CPU_ARM10 || CPU_ARM11 */
575 
576 #ifdef CPU_SA110
577 struct cpu_functions sa110_cpufuncs = {
578 	/* CPU functions */
579 
580 	cpufunc_id,			/* id			*/
581 	cpufunc_nullop,			/* cpwait		*/
582 
583 	/* MMU functions */
584 
585 	cpufunc_control,		/* control		*/
586 	cpufunc_domains,		/* domain		*/
587 	sa1_setttb,			/* setttb		*/
588 	cpufunc_faultstatus,		/* faultstatus		*/
589 	cpufunc_faultaddress,		/* faultaddress		*/
590 
591 	/* TLB functions */
592 
593 	armv4_tlb_flushID,		/* tlb_flushID		*/
594 	sa1_tlb_flushID_SE,		/* tlb_flushID_SE	*/
595 	armv4_tlb_flushI,		/* tlb_flushI		*/
596 	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
597 	armv4_tlb_flushD,		/* tlb_flushD		*/
598 	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
599 
600 	/* Cache operations */
601 
602 	sa1_cache_syncI,		/* icache_sync_all	*/
603 	sa1_cache_syncI_rng,		/* icache_sync_range	*/
604 
605 	sa1_cache_purgeD,		/* dcache_wbinv_all	*/
606 	sa1_cache_purgeD_rng,		/* dcache_wbinv_range	*/
607 /*XXX*/	sa1_cache_purgeD_rng,		/* dcache_inv_range	*/
608 	sa1_cache_cleanD_rng,		/* dcache_wb_range	*/
609 
610 	sa1_cache_purgeID,		/* idcache_wbinv_all	*/
611 	sa1_cache_purgeID_rng,		/* idcache_wbinv_range	*/
612 
613 	/* Other functions */
614 
615 	cpufunc_nullop,			/* flush_prefetchbuf	*/
616 	armv4_drain_writebuf,		/* drain_writebuf	*/
617 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
618 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
619 
620 	(void *)cpufunc_nullop,		/* sleep		*/
621 
622 	/* Soft functions */
623 
624 	cpufunc_null_fixup,		/* dataabt_fixup	*/
625 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
626 
627 	sa110_context_switch,		/* context_switch	*/
628 
629 	sa110_setup			/* cpu setup		*/
630 };
631 #endif	/* CPU_SA110 */
632 
633 #if defined(CPU_SA1100) || defined(CPU_SA1110)
634 struct cpu_functions sa11x0_cpufuncs = {
635 	/* CPU functions */
636 
637 	cpufunc_id,			/* id			*/
638 	cpufunc_nullop,			/* cpwait		*/
639 
640 	/* MMU functions */
641 
642 	cpufunc_control,		/* control		*/
643 	cpufunc_domains,		/* domain		*/
644 	sa1_setttb,			/* setttb		*/
645 	cpufunc_faultstatus,		/* faultstatus		*/
646 	cpufunc_faultaddress,		/* faultaddress		*/
647 
648 	/* TLB functions */
649 
650 	armv4_tlb_flushID,		/* tlb_flushID		*/
651 	sa1_tlb_flushID_SE,		/* tlb_flushID_SE	*/
652 	armv4_tlb_flushI,		/* tlb_flushI		*/
653 	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
654 	armv4_tlb_flushD,		/* tlb_flushD		*/
655 	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
656 
657 	/* Cache operations */
658 
659 	sa1_cache_syncI,		/* icache_sync_all	*/
660 	sa1_cache_syncI_rng,		/* icache_sync_range	*/
661 
662 	sa1_cache_purgeD,		/* dcache_wbinv_all	*/
663 	sa1_cache_purgeD_rng,		/* dcache_wbinv_range	*/
664 /*XXX*/	sa1_cache_purgeD_rng,		/* dcache_inv_range	*/
665 	sa1_cache_cleanD_rng,		/* dcache_wb_range	*/
666 
667 	sa1_cache_purgeID,		/* idcache_wbinv_all	*/
668 	sa1_cache_purgeID_rng,		/* idcache_wbinv_range	*/
669 
670 	/* Other functions */
671 
672 	sa11x0_drain_readbuf,		/* flush_prefetchbuf	*/
673 	armv4_drain_writebuf,		/* drain_writebuf	*/
674 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
675 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
676 
677 	sa11x0_cpu_sleep,		/* sleep		*/
678 
679 	/* Soft functions */
680 
681 	cpufunc_null_fixup,		/* dataabt_fixup	*/
682 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
683 
684 	sa11x0_context_switch,		/* context_switch	*/
685 
686 	sa11x0_setup			/* cpu setup		*/
687 };
688 #endif	/* CPU_SA1100 || CPU_SA1110 */
689 
690 #ifdef CPU_IXP12X0
691 struct cpu_functions ixp12x0_cpufuncs = {
692 	/* CPU functions */
693 
694 	cpufunc_id,			/* id			*/
695 	cpufunc_nullop,			/* cpwait		*/
696 
697 	/* MMU functions */
698 
699 	cpufunc_control,		/* control		*/
700 	cpufunc_domains,		/* domain		*/
701 	sa1_setttb,			/* setttb		*/
702 	cpufunc_faultstatus,		/* faultstatus		*/
703 	cpufunc_faultaddress,		/* faultaddress		*/
704 
705 	/* TLB functions */
706 
707 	armv4_tlb_flushID,		/* tlb_flushID		*/
708 	sa1_tlb_flushID_SE,		/* tlb_flushID_SE	*/
709 	armv4_tlb_flushI,		/* tlb_flushI		*/
710 	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
711 	armv4_tlb_flushD,		/* tlb_flushD		*/
712 	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
713 
714 	/* Cache operations */
715 
716 	sa1_cache_syncI,		/* icache_sync_all	*/
717 	sa1_cache_syncI_rng,		/* icache_sync_range	*/
718 
719 	sa1_cache_purgeD,		/* dcache_wbinv_all	*/
720 	sa1_cache_purgeD_rng,		/* dcache_wbinv_range	*/
721 /*XXX*/	sa1_cache_purgeD_rng,		/* dcache_inv_range	*/
722 	sa1_cache_cleanD_rng,		/* dcache_wb_range	*/
723 
724 	sa1_cache_purgeID,		/* idcache_wbinv_all	*/
725 	sa1_cache_purgeID_rng,		/* idcache_wbinv_range	*/
726 
727 	/* Other functions */
728 
729 	ixp12x0_drain_readbuf,			/* flush_prefetchbuf	*/
730 	armv4_drain_writebuf,		/* drain_writebuf	*/
731 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
732 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
733 
734 	(void *)cpufunc_nullop,		/* sleep		*/
735 
736 	/* Soft functions */
737 
738 	cpufunc_null_fixup,		/* dataabt_fixup	*/
739 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
740 
741 	ixp12x0_context_switch,		/* context_switch	*/
742 
743 	ixp12x0_setup			/* cpu setup		*/
744 };
745 #endif	/* CPU_IXP12X0 */
746 
747 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
748     defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425)
749 struct cpu_functions xscale_cpufuncs = {
750 	/* CPU functions */
751 
752 	cpufunc_id,			/* id			*/
753 	xscale_cpwait,			/* cpwait		*/
754 
755 	/* MMU functions */
756 
757 	xscale_control,			/* control		*/
758 	cpufunc_domains,		/* domain		*/
759 	xscale_setttb,			/* setttb		*/
760 	cpufunc_faultstatus,		/* faultstatus		*/
761 	cpufunc_faultaddress,		/* faultaddress		*/
762 
763 	/* TLB functions */
764 
765 	armv4_tlb_flushID,		/* tlb_flushID		*/
766 	xscale_tlb_flushID_SE,		/* tlb_flushID_SE	*/
767 	armv4_tlb_flushI,		/* tlb_flushI		*/
768 	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
769 	armv4_tlb_flushD,		/* tlb_flushD		*/
770 	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
771 
772 	/* Cache operations */
773 
774 	xscale_cache_syncI,		/* icache_sync_all	*/
775 	xscale_cache_syncI_rng,		/* icache_sync_range	*/
776 
777 	xscale_cache_purgeD,		/* dcache_wbinv_all	*/
778 	xscale_cache_purgeD_rng,	/* dcache_wbinv_range	*/
779 	xscale_cache_flushD_rng,	/* dcache_inv_range	*/
780 	xscale_cache_cleanD_rng,	/* dcache_wb_range	*/
781 
782 	xscale_cache_purgeID,		/* idcache_wbinv_all	*/
783 	xscale_cache_purgeID_rng,	/* idcache_wbinv_range	*/
784 
785 	/* Other functions */
786 
787 	cpufunc_nullop,			/* flush_prefetchbuf	*/
788 	armv4_drain_writebuf,		/* drain_writebuf	*/
789 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
790 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
791 
792 	xscale_cpu_sleep,		/* sleep		*/
793 
794 	/* Soft functions */
795 
796 	cpufunc_null_fixup,		/* dataabt_fixup	*/
797 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
798 
799 	xscale_context_switch,		/* context_switch	*/
800 
801 	xscale_setup			/* cpu setup		*/
802 };
803 #endif
804 /* CPU_XSCALE_80200 || CPU_XSCALE_80321 || __CPU_XSCALE_PXA2XX || CPU_XSCALE_IXP425 */
805 
806 /*
807  * Global constants also used by locore.s
808  */
809 
810 struct cpu_functions cpufuncs;
811 u_int cputype;
812 u_int cpu_reset_needs_v4_MMU_disable;	/* flag used in locore.s */
813 
814 #if defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined(CPU_ARM9) || \
815     defined (CPU_ARM10) || defined (CPU_ARM11) || \
816     defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
817     defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425)
818 static void get_cachetype_cp15 __P((void));
819 
820 /* Additional cache information local to this file.  Log2 of some of the
821    above numbers.  */
822 static int	arm_dcache_l2_nsets;
823 static int	arm_dcache_l2_assoc;
824 static int	arm_dcache_l2_linesize;
825 
826 static void
827 get_cachetype_cp15()
828 {
829 	u_int ctype, isize, dsize;
830 	u_int multiplier;
831 
832 	__asm __volatile("mrc p15, 0, %0, c0, c0, 1"
833 		: "=r" (ctype));
834 
835 	/*
836 	 * ...and thus spake the ARM ARM:
837 	 *
838 	 * If an <opcode2> value corresponding to an unimplemented or
839 	 * reserved ID register is encountered, the System Control
840 	 * processor returns the value of the main ID register.
841 	 */
842 	if (ctype == cpufunc_id())
843 		goto out;
844 
845 	if ((ctype & CPU_CT_S) == 0)
846 		arm_pcache_unified = 1;
847 
848 	/*
849 	 * If you want to know how this code works, go read the ARM ARM.
850 	 */
851 
852 	arm_pcache_type = CPU_CT_CTYPE(ctype);
853 
854 	if (arm_pcache_unified == 0) {
855 		isize = CPU_CT_ISIZE(ctype);
856 		multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
857 		arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
858 		if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
859 			if (isize & CPU_CT_xSIZE_M)
860 				arm_picache_line_size = 0; /* not present */
861 			else
862 				arm_picache_ways = 1;
863 		} else {
864 			arm_picache_ways = multiplier <<
865 			    (CPU_CT_xSIZE_ASSOC(isize) - 1);
866 		}
867 		arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
868 	}
869 
870 	dsize = CPU_CT_DSIZE(ctype);
871 	multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
872 	arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
873 	if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
874 		if (dsize & CPU_CT_xSIZE_M)
875 			arm_pdcache_line_size = 0; /* not present */
876 		else
877 			arm_pdcache_ways = 1;
878 	} else {
879 		arm_pdcache_ways = multiplier <<
880 		    (CPU_CT_xSIZE_ASSOC(dsize) - 1);
881 	}
882 	arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
883 
884 	arm_dcache_align = arm_pdcache_line_size;
885 
886 	arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
887 	arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
888 	arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
889 	    CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
890 
891  out:
892 	arm_dcache_align_mask = arm_dcache_align - 1;
893 }
894 #endif /* ARM7TDMI || ARM8 || ARM9 || XSCALE */
895 
896 #if defined(CPU_ARM2) || defined(CPU_ARM250) || defined(CPU_ARM3) || \
897     defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_SA110) || \
898     defined(CPU_SA1100) || defined(CPU_SA1110) || defined(CPU_IXP12X0)
899 /* Cache information for CPUs without cache type registers. */
900 struct cachetab {
901 	u_int32_t ct_cpuid;
902 	int	ct_pcache_type;
903 	int	ct_pcache_unified;
904 	int	ct_pdcache_size;
905 	int	ct_pdcache_line_size;
906 	int	ct_pdcache_ways;
907 	int	ct_picache_size;
908 	int	ct_picache_line_size;
909 	int	ct_picache_ways;
910 };
911 
912 struct cachetab cachetab[] = {
913     /* cpuid,           cache type,       u,  dsiz, ls, wy,  isiz, ls, wy */
914     { CPU_ID_ARM2,      0,                1,     0,  0,  0,     0,  0,  0 },
915     { CPU_ID_ARM250,    0,                1,     0,  0,  0,     0,  0,  0 },
916     { CPU_ID_ARM3,      CPU_CT_CTYPE_WT,  1,  4096, 16, 64,     0,  0,  0 },
917     { CPU_ID_ARM610,	CPU_CT_CTYPE_WT,  1,  4096, 16, 64,     0,  0,  0 },
918     { CPU_ID_ARM710,    CPU_CT_CTYPE_WT,  1,  8192, 32,  4,     0,  0,  0 },
919     { CPU_ID_ARM7500,   CPU_CT_CTYPE_WT,  1,  4096, 16,  4,     0,  0,  0 },
920     { CPU_ID_ARM710A,   CPU_CT_CTYPE_WT,  1,  8192, 16,  4,     0,  0,  0 },
921     { CPU_ID_ARM7500FE, CPU_CT_CTYPE_WT,  1,  4096, 16,  4,     0,  0,  0 },
922     /* XXX is this type right for SA-1? */
923     { CPU_ID_SA110,	CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 },
924     { CPU_ID_SA1100,	CPU_CT_CTYPE_WB1, 0,  8192, 32, 32, 16384, 32, 32 },
925     { CPU_ID_SA1110,	CPU_CT_CTYPE_WB1, 0,  8192, 32, 32, 16384, 32, 32 },
926     { CPU_ID_IXP1200,	CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 }, /* XXX */
927     { 0, 0, 0, 0, 0, 0, 0, 0}
928 };
929 
930 static void get_cachetype_table __P((void));
931 
932 static void
933 get_cachetype_table()
934 {
935 	int i;
936 	u_int32_t cpuid = cpufunc_id();
937 
938 	for (i = 0; cachetab[i].ct_cpuid != 0; i++) {
939 		if (cachetab[i].ct_cpuid == (cpuid & CPU_ID_CPU_MASK)) {
940 			arm_pcache_type = cachetab[i].ct_pcache_type;
941 			arm_pcache_unified = cachetab[i].ct_pcache_unified;
942 			arm_pdcache_size = cachetab[i].ct_pdcache_size;
943 			arm_pdcache_line_size =
944 			    cachetab[i].ct_pdcache_line_size;
945 			arm_pdcache_ways = cachetab[i].ct_pdcache_ways;
946 			arm_picache_size = cachetab[i].ct_picache_size;
947 			arm_picache_line_size =
948 			    cachetab[i].ct_picache_line_size;
949 			arm_picache_ways = cachetab[i].ct_picache_ways;
950 		}
951 	}
952 	arm_dcache_align = arm_pdcache_line_size;
953 
954 	arm_dcache_align_mask = arm_dcache_align - 1;
955 }
956 
957 #endif /* ARM2 || ARM250 || ARM3 || ARM6 || ARM7 || SA110 || SA1100 || SA1111 || IXP12X0 */
958 
959 /*
960  * Cannot panic here as we may not have a console yet ...
961  */
962 
963 int
964 set_cpufuncs()
965 {
966 	cputype = cpufunc_id();
967 	cputype &= CPU_ID_CPU_MASK;
968 
969 	/*
970 	 * NOTE: cpu_do_powersave defaults to off.  If we encounter a
971 	 * CPU type where we want to use it by default, then we set it.
972 	 */
973 
974 #ifdef CPU_ARM3
975 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
976 	    (cputype & 0x00000f00) == 0x00000300) {
977 		cpufuncs = arm3_cpufuncs;
978 		cpu_reset_needs_v4_MMU_disable = 0;
979 		get_cachetype_table();
980 		return 0;
981 	}
982 #endif	/* CPU_ARM3 */
983 #ifdef CPU_ARM6
984 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
985 	    (cputype & 0x00000f00) == 0x00000600) {
986 		cpufuncs = arm6_cpufuncs;
987 		cpu_reset_needs_v4_MMU_disable = 0;
988 		get_cachetype_table();
989 		pmap_pte_init_generic();
990 		return 0;
991 	}
992 #endif	/* CPU_ARM6 */
993 #ifdef CPU_ARM7
994 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
995 	    CPU_ID_IS7(cputype) &&
996 	    (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V3) {
997 		cpufuncs = arm7_cpufuncs;
998 		cpu_reset_needs_v4_MMU_disable = 0;
999 		get_cachetype_table();
1000 		pmap_pte_init_generic();
1001 		return 0;
1002 	}
1003 #endif	/* CPU_ARM7 */
1004 #ifdef CPU_ARM7TDMI
1005 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
1006 	    CPU_ID_IS7(cputype) &&
1007 	    (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V4T) {
1008 		cpufuncs = arm7tdmi_cpufuncs;
1009 		cpu_reset_needs_v4_MMU_disable = 0;
1010 		get_cachetype_cp15();
1011 		pmap_pte_init_generic();
1012 		return 0;
1013 	}
1014 #endif
1015 #ifdef CPU_ARM8
1016 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
1017 	    (cputype & 0x0000f000) == 0x00008000) {
1018 		cpufuncs = arm8_cpufuncs;
1019 		cpu_reset_needs_v4_MMU_disable = 0;	/* XXX correct? */
1020 		get_cachetype_cp15();
1021 		pmap_pte_init_arm8();
1022 		return 0;
1023 	}
1024 #endif	/* CPU_ARM8 */
1025 #ifdef CPU_ARM9
1026 	if (((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD ||
1027 	     (cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_TI) &&
1028 	    (cputype & 0x0000f000) == 0x00009000) {
1029 		cpufuncs = arm9_cpufuncs;
1030 		cpu_reset_needs_v4_MMU_disable = 1;	/* V4 or higher */
1031 		get_cachetype_cp15();
1032 		arm9_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
1033 		arm9_dcache_sets_max =
1034 		    (1U << (arm_dcache_l2_linesize + arm_dcache_l2_nsets)) -
1035 		    arm9_dcache_sets_inc;
1036 		arm9_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
1037 		arm9_dcache_index_max = 0U - arm9_dcache_index_inc;
1038 #ifdef	ARM9_CACHE_WRITE_THROUGH
1039 		pmap_pte_init_arm9();
1040 #else
1041 		pmap_pte_init_generic();
1042 #endif
1043 		return 0;
1044 	}
1045 #endif /* CPU_ARM9 */
1046 #ifdef CPU_ARM10
1047 	if (/* cputype == CPU_ID_ARM1020T || */
1048 	    cputype == CPU_ID_ARM1020E ||
1049 	    cputype == CPU_ID_ARM1026EJS) {
1050 		/*
1051 		 * Select write-through cacheing (this isn't really an
1052 		 * option on ARM1020T).
1053 		 */
1054 		cpufuncs = arm10_cpufuncs;
1055 		cpu_reset_needs_v4_MMU_disable = 1;	/* V4 or higher */
1056 		get_cachetype_cp15();
1057 		armv5_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
1058 		armv5_dcache_sets_max =
1059 		    (1U << (arm_dcache_l2_linesize + arm_dcache_l2_nsets)) -
1060 		    armv5_dcache_sets_inc;
1061 		armv5_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
1062 		armv5_dcache_index_max = 0U - armv5_dcache_index_inc;
1063 		pmap_pte_init_generic();
1064 		return 0;
1065 	}
1066 #endif /* CPU_ARM10 */
1067 #ifdef CPU_ARM11
1068 	if (cputype == CPU_ID_ARM1136JS ||
1069 	    cputype == CPU_ID_ARM1136JSR1) {
1070 		cpufuncs = arm11_cpufuncs;
1071 		cpu_reset_needs_v4_MMU_disable = 1;	/* V4 or higher */
1072 		get_cachetype_cp15();
1073 		armv5_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
1074 		armv5_dcache_sets_max =
1075 		    (1U << (arm_dcache_l2_linesize + arm_dcache_l2_nsets)) -
1076 		    armv5_dcache_sets_inc;
1077 		armv5_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
1078 		armv5_dcache_index_max = 0U - armv5_dcache_index_inc;
1079 		pmap_pte_init_generic();
1080 		return 0;
1081 	}
1082 #endif /* CPU_ARM11 */
1083 #ifdef CPU_SA110
1084 	if (cputype == CPU_ID_SA110) {
1085 		cpufuncs = sa110_cpufuncs;
1086 		cpu_reset_needs_v4_MMU_disable = 1;	/* SA needs it */
1087 		get_cachetype_table();
1088 		pmap_pte_init_sa1();
1089 		return 0;
1090 	}
1091 #endif	/* CPU_SA110 */
1092 #ifdef CPU_SA1100
1093 	if (cputype == CPU_ID_SA1100) {
1094 		cpufuncs = sa11x0_cpufuncs;
1095 		cpu_reset_needs_v4_MMU_disable = 1;	/* SA needs it	*/
1096 		get_cachetype_table();
1097 		pmap_pte_init_sa1();
1098 
1099 		/* Use powersave on this CPU. */
1100 		cpu_do_powersave = 1;
1101 
1102 		return 0;
1103 	}
1104 #endif	/* CPU_SA1100 */
1105 #ifdef CPU_SA1110
1106 	if (cputype == CPU_ID_SA1110) {
1107 		cpufuncs = sa11x0_cpufuncs;
1108 		cpu_reset_needs_v4_MMU_disable = 1;	/* SA needs it	*/
1109 		get_cachetype_table();
1110 		pmap_pte_init_sa1();
1111 
1112 		/* Use powersave on this CPU. */
1113 		cpu_do_powersave = 1;
1114 
1115 		return 0;
1116 	}
1117 #endif	/* CPU_SA1110 */
1118 #ifdef CPU_IXP12X0
1119         if (cputype == CPU_ID_IXP1200) {
1120                 cpufuncs = ixp12x0_cpufuncs;
1121                 cpu_reset_needs_v4_MMU_disable = 1;
1122                 get_cachetype_table();
1123                 pmap_pte_init_sa1();
1124                 return 0;
1125         }
1126 #endif  /* CPU_IXP12X0 */
1127 #ifdef CPU_XSCALE_80200
1128 	if (cputype == CPU_ID_80200) {
1129 		int rev = cpufunc_id() & CPU_ID_REVISION_MASK;
1130 
1131 		i80200_icu_init();
1132 
1133 		/*
1134 		 * Reset the Performance Monitoring Unit to a
1135 		 * pristine state:
1136 		 *	- CCNT, PMN0, PMN1 reset to 0
1137 		 *	- overflow indications cleared
1138 		 *	- all counters disabled
1139 		 */
1140 		__asm __volatile("mcr p14, 0, %0, c0, c0, 0"
1141 			:
1142 			: "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF|
1143 			       PMNC_CC_IF));
1144 
1145 #if defined(XSCALE_CCLKCFG)
1146 		/*
1147 		 * Crank CCLKCFG to maximum legal value.
1148 		 */
1149 		__asm __volatile ("mcr p14, 0, %0, c6, c0, 0"
1150 			:
1151 			: "r" (XSCALE_CCLKCFG));
1152 #endif
1153 
1154 		/*
1155 		 * XXX Disable ECC in the Bus Controller Unit; we
1156 		 * don't really support it, yet.  Clear any pending
1157 		 * error indications.
1158 		 */
1159 		__asm __volatile("mcr p13, 0, %0, c0, c1, 0"
1160 			:
1161 			: "r" (BCUCTL_E0|BCUCTL_E1|BCUCTL_EV));
1162 
1163 		cpufuncs = xscale_cpufuncs;
1164 #if defined(PERFCTRS)
1165 		xscale_pmu_init();
1166 #endif
1167 
1168 		/*
1169 		 * i80200 errata: Step-A0 and A1 have a bug where
1170 		 * D$ dirty bits are not cleared on "invalidate by
1171 		 * address".
1172 		 *
1173 		 * Workaround: Clean cache line before invalidating.
1174 		 */
1175 		if (rev == 0 || rev == 1)
1176 			cpufuncs.cf_dcache_inv_range = xscale_cache_purgeD_rng;
1177 
1178 		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1179 		get_cachetype_cp15();
1180 		pmap_pte_init_xscale();
1181 		return 0;
1182 	}
1183 #endif /* CPU_XSCALE_80200 */
1184 #ifdef CPU_XSCALE_80321
1185 	if (cputype == CPU_ID_80321_400 || cputype == CPU_ID_80321_600 ||
1186 	    cputype == CPU_ID_80321_400_B0 || cputype == CPU_ID_80321_600_B0 ||
1187 	    cputype == CPU_ID_80219_400 || cputype == CPU_ID_80219_600) {
1188 		i80321_icu_init();
1189 
1190 		/*
1191 		 * Reset the Performance Monitoring Unit to a
1192 		 * pristine state:
1193 		 *	- CCNT, PMN0, PMN1 reset to 0
1194 		 *	- overflow indications cleared
1195 		 *	- all counters disabled
1196 		 */
1197 		__asm __volatile("mcr p14, 0, %0, c0, c0, 0"
1198 			:
1199 			: "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF|
1200 			       PMNC_CC_IF));
1201 
1202 		cpufuncs = xscale_cpufuncs;
1203 #if defined(PERFCTRS)
1204 		xscale_pmu_init();
1205 #endif
1206 
1207 		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1208 		get_cachetype_cp15();
1209 		pmap_pte_init_xscale();
1210 		return 0;
1211 	}
1212 #endif /* CPU_XSCALE_80321 */
1213 #ifdef __CPU_XSCALE_PXA2XX
1214 	/* ignore core revision to test PXA2xx CPUs */
1215 	if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA27X ||
1216 	    (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 ||
1217 	    (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) {
1218 
1219 		cpufuncs = xscale_cpufuncs;
1220 #if defined(PERFCTRS)
1221 		xscale_pmu_init();
1222 #endif
1223 
1224 		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1225 		get_cachetype_cp15();
1226 		pmap_pte_init_xscale();
1227 
1228 		/* Use powersave on this CPU. */
1229 		cpu_do_powersave = 1;
1230 
1231 		return 0;
1232 	}
1233 #endif /* __CPU_XSCALE_PXA2XX */
1234 #ifdef CPU_XSCALE_IXP425
1235 	if (cputype == CPU_ID_IXP425_533 || cputype == CPU_ID_IXP425_400 ||
1236             cputype == CPU_ID_IXP425_266) {
1237 		ixp425_icu_init();
1238 
1239 		cpufuncs = xscale_cpufuncs;
1240 #if defined(PERFCTRS)
1241 		xscale_pmu_init();
1242 #endif
1243 
1244 		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1245 		get_cachetype_cp15();
1246 		pmap_pte_init_xscale();
1247 
1248 		return 0;
1249 	}
1250 #endif /* CPU_XSCALE_IXP425 */
1251 	/*
1252 	 * Bzzzz. And the answer was ...
1253 	 */
1254 	panic("No support for this CPU type (%08x) in kernel", cputype);
1255 	return(ARCHITECTURE_NOT_PRESENT);
1256 }
1257 
1258 /*
1259  * Fixup routines for data and prefetch aborts.
1260  *
1261  * Several compile time symbols are used
1262  *
1263  * DEBUG_FAULT_CORRECTION - Print debugging information during the
1264  * correction of registers after a fault.
1265  * ARM6_LATE_ABORT - ARM6 supports both early and late aborts
1266  * when defined should use late aborts
1267  */
1268 
1269 
1270 /*
1271  * Null abort fixup routine.
1272  * For use when no fixup is required.
1273  */
1274 int
1275 cpufunc_null_fixup(arg)
1276 	void *arg;
1277 {
1278 	return(ABORT_FIXUP_OK);
1279 }
1280 
1281 
1282 #if defined(CPU_ARM2) || defined(CPU_ARM250) || defined(CPU_ARM3) || \
1283     defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI)
1284 
1285 #ifdef DEBUG_FAULT_CORRECTION
1286 #define DFC_PRINTF(x)		printf x
1287 #define DFC_DISASSEMBLE(x)	disassemble(x)
1288 #else
1289 #define DFC_PRINTF(x)		/* nothing */
1290 #define DFC_DISASSEMBLE(x)	/* nothing */
1291 #endif
1292 
1293 /*
1294  * "Early" data abort fixup.
1295  *
1296  * For ARM2, ARM2as, ARM3 and ARM6 (in early-abort mode).  Also used
1297  * indirectly by ARM6 (in late-abort mode) and ARM7[TDMI].
1298  *
1299  * In early aborts, we may have to fix up LDM, STM, LDC and STC.
1300  */
1301 int
1302 early_abort_fixup(arg)
1303 	void *arg;
1304 {
1305 	trapframe_t *frame = arg;
1306 	u_int fault_pc;
1307 	u_int fault_instruction;
1308 	int saved_lr = 0;
1309 
1310 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1311 
1312 		/* Ok an abort in SVC mode */
1313 
1314 		/*
1315 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1316 		 * as the fault happened in svc mode but we need it in the
1317 		 * usr slot so we can treat the registers as an array of ints
1318 		 * during fixing.
1319 		 * NOTE: This PC is in the position but writeback is not
1320 		 * allowed on r15.
1321 		 * Doing it like this is more efficient than trapping this
1322 		 * case in all possible locations in the following fixup code.
1323 		 */
1324 
1325 		saved_lr = frame->tf_usr_lr;
1326 		frame->tf_usr_lr = frame->tf_svc_lr;
1327 
1328 		/*
1329 		 * Note the trapframe does not have the SVC r13 so a fault
1330 		 * from an instruction with writeback to r13 in SVC mode is
1331 		 * not allowed. This should not happen as the kstack is
1332 		 * always valid.
1333 		 */
1334 	}
1335 
1336 	/* Get fault address and status from the CPU */
1337 
1338 	fault_pc = frame->tf_pc;
1339 	fault_instruction = *((volatile unsigned int *)fault_pc);
1340 
1341 	/* Decode the fault instruction and fix the registers as needed */
1342 
1343 	if ((fault_instruction & 0x0e000000) == 0x08000000) {
1344 		int base;
1345 		int loop;
1346 		int count;
1347 		int *registers = &frame->tf_r0;
1348 
1349 		DFC_PRINTF(("LDM/STM\n"));
1350 		DFC_DISASSEMBLE(fault_pc);
1351 		if (fault_instruction & (1 << 21)) {
1352 			DFC_PRINTF(("This instruction must be corrected\n"));
1353 			base = (fault_instruction >> 16) & 0x0f;
1354 			if (base == 15)
1355 				return ABORT_FIXUP_FAILED;
1356 			/* Count registers transferred */
1357 			count = 0;
1358 			for (loop = 0; loop < 16; ++loop) {
1359 				if (fault_instruction & (1<<loop))
1360 					++count;
1361 			}
1362 			DFC_PRINTF(("%d registers used\n", count));
1363 			DFC_PRINTF(("Corrected r%d by %d bytes ",
1364 				       base, count * 4));
1365 			if (fault_instruction & (1 << 23)) {
1366 				DFC_PRINTF(("down\n"));
1367 				registers[base] -= count * 4;
1368 			} else {
1369 				DFC_PRINTF(("up\n"));
1370 				registers[base] += count * 4;
1371 			}
1372 		}
1373 	} else if ((fault_instruction & 0x0e000000) == 0x0c000000) {
1374 		int base;
1375 		int offset;
1376 		int *registers = &frame->tf_r0;
1377 
1378 		/* REGISTER CORRECTION IS REQUIRED FOR THESE INSTRUCTIONS */
1379 
1380 		DFC_DISASSEMBLE(fault_pc);
1381 
1382 		/* Only need to fix registers if write back is turned on */
1383 
1384 		if ((fault_instruction & (1 << 21)) != 0) {
1385 			base = (fault_instruction >> 16) & 0x0f;
1386 			if (base == 13 &&
1387 			    (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
1388 				return ABORT_FIXUP_FAILED;
1389 			if (base == 15)
1390 				return ABORT_FIXUP_FAILED;
1391 
1392 			offset = (fault_instruction & 0xff) << 2;
1393 			DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1394 			if ((fault_instruction & (1 << 23)) != 0)
1395 				offset = -offset;
1396 			registers[base] += offset;
1397 			DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1398 		}
1399 	} else if ((fault_instruction & 0x0e000000) == 0x0c000000)
1400 		return ABORT_FIXUP_FAILED;
1401 
1402 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1403 
1404 		/* Ok an abort in SVC mode */
1405 
1406 		/*
1407 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1408 		 * as the fault happened in svc mode but we need it in the
1409 		 * usr slot so we can treat the registers as an array of ints
1410 		 * during fixing.
1411 		 * NOTE: This PC is in the position but writeback is not
1412 		 * allowed on r15.
1413 		 * Doing it like this is more efficient than trapping this
1414 		 * case in all possible locations in the prior fixup code.
1415 		 */
1416 
1417 		frame->tf_svc_lr = frame->tf_usr_lr;
1418 		frame->tf_usr_lr = saved_lr;
1419 
1420 		/*
1421 		 * Note the trapframe does not have the SVC r13 so a fault
1422 		 * from an instruction with writeback to r13 in SVC mode is
1423 		 * not allowed. This should not happen as the kstack is
1424 		 * always valid.
1425 		 */
1426 	}
1427 
1428 	return(ABORT_FIXUP_OK);
1429 }
1430 #endif	/* CPU_ARM2/250/3/6/7 */
1431 
1432 
1433 #if (defined(CPU_ARM6) && defined(ARM6_LATE_ABORT)) || defined(CPU_ARM7) || \
1434 	defined(CPU_ARM7TDMI)
1435 /*
1436  * "Late" (base updated) data abort fixup
1437  *
1438  * For ARM6 (in late-abort mode) and ARM7.
1439  *
1440  * In this model, all data-transfer instructions need fixing up.  We defer
1441  * LDM, STM, LDC and STC fixup to the early-abort handler.
1442  */
1443 int
1444 late_abort_fixup(arg)
1445 	void *arg;
1446 {
1447 	trapframe_t *frame = arg;
1448 	u_int fault_pc;
1449 	u_int fault_instruction;
1450 	int saved_lr = 0;
1451 
1452 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1453 
1454 		/* Ok an abort in SVC mode */
1455 
1456 		/*
1457 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1458 		 * as the fault happened in svc mode but we need it in the
1459 		 * usr slot so we can treat the registers as an array of ints
1460 		 * during fixing.
1461 		 * NOTE: This PC is in the position but writeback is not
1462 		 * allowed on r15.
1463 		 * Doing it like this is more efficient than trapping this
1464 		 * case in all possible locations in the following fixup code.
1465 		 */
1466 
1467 		saved_lr = frame->tf_usr_lr;
1468 		frame->tf_usr_lr = frame->tf_svc_lr;
1469 
1470 		/*
1471 		 * Note the trapframe does not have the SVC r13 so a fault
1472 		 * from an instruction with writeback to r13 in SVC mode is
1473 		 * not allowed. This should not happen as the kstack is
1474 		 * always valid.
1475 		 */
1476 	}
1477 
1478 	/* Get fault address and status from the CPU */
1479 
1480 	fault_pc = frame->tf_pc;
1481 	fault_instruction = *((volatile unsigned int *)fault_pc);
1482 
1483 	/* Decode the fault instruction and fix the registers as needed */
1484 
1485 	/* Was is a swap instruction ? */
1486 
1487 	if ((fault_instruction & 0x0fb00ff0) == 0x01000090) {
1488 		DFC_DISASSEMBLE(fault_pc);
1489 	} else if ((fault_instruction & 0x0c000000) == 0x04000000) {
1490 
1491 		/* Was is a ldr/str instruction */
1492 		/* This is for late abort only */
1493 
1494 		int base;
1495 		int offset;
1496 		int *registers = &frame->tf_r0;
1497 
1498 		DFC_DISASSEMBLE(fault_pc);
1499 
1500 		/* This is for late abort only */
1501 
1502 		if ((fault_instruction & (1 << 24)) == 0
1503 		    || (fault_instruction & (1 << 21)) != 0) {
1504 			/* postindexed ldr/str with no writeback */
1505 
1506 			base = (fault_instruction >> 16) & 0x0f;
1507 			if (base == 13 &&
1508 			    (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
1509 				return ABORT_FIXUP_FAILED;
1510 			if (base == 15)
1511 				return ABORT_FIXUP_FAILED;
1512 			DFC_PRINTF(("late abt fix: r%d=%08x : ",
1513 				       base, registers[base]));
1514 			if ((fault_instruction & (1 << 25)) == 0) {
1515 				/* Immediate offset - easy */
1516 
1517 				offset = fault_instruction & 0xfff;
1518 				if ((fault_instruction & (1 << 23)))
1519 					offset = -offset;
1520 				registers[base] += offset;
1521 				DFC_PRINTF(("imm=%08x ", offset));
1522 			} else {
1523 				/* offset is a shifted register */
1524 				int shift;
1525 
1526 				offset = fault_instruction & 0x0f;
1527 				if (offset == base)
1528 					return ABORT_FIXUP_FAILED;
1529 
1530 				/*
1531 				 * Register offset - hard we have to
1532 				 * cope with shifts !
1533 				 */
1534 				offset = registers[offset];
1535 
1536 				if ((fault_instruction & (1 << 4)) == 0)
1537 					/* shift with amount */
1538 					shift = (fault_instruction >> 7) & 0x1f;
1539 				else {
1540 					/* shift with register */
1541 					if ((fault_instruction & (1 << 7)) != 0)
1542 						/* undefined for now so bail out */
1543 						return ABORT_FIXUP_FAILED;
1544 					shift = ((fault_instruction >> 8) & 0xf);
1545 					if (base == shift)
1546 						return ABORT_FIXUP_FAILED;
1547 					DFC_PRINTF(("shift reg=%d ", shift));
1548 					shift = registers[shift];
1549 				}
1550 				DFC_PRINTF(("shift=%08x ", shift));
1551 				switch (((fault_instruction >> 5) & 0x3)) {
1552 				case 0 : /* Logical left */
1553 					offset = (int)(((u_int)offset) << shift);
1554 					break;
1555 				case 1 : /* Logical Right */
1556 					if (shift == 0) shift = 32;
1557 					offset = (int)(((u_int)offset) >> shift);
1558 					break;
1559 				case 2 : /* Arithmetic Right */
1560 					if (shift == 0) shift = 32;
1561 					offset = (int)(((int)offset) >> shift);
1562 					break;
1563 				case 3 : /* Rotate right (rol or rxx) */
1564 					return ABORT_FIXUP_FAILED;
1565 					break;
1566 				}
1567 
1568 				DFC_PRINTF(("abt: fixed LDR/STR with "
1569 					       "register offset\n"));
1570 				if ((fault_instruction & (1 << 23)))
1571 					offset = -offset;
1572 				DFC_PRINTF(("offset=%08x ", offset));
1573 				registers[base] += offset;
1574 			}
1575 			DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1576 		}
1577 	}
1578 
1579 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1580 
1581 		/* Ok an abort in SVC mode */
1582 
1583 		/*
1584 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1585 		 * as the fault happened in svc mode but we need it in the
1586 		 * usr slot so we can treat the registers as an array of ints
1587 		 * during fixing.
1588 		 * NOTE: This PC is in the position but writeback is not
1589 		 * allowed on r15.
1590 		 * Doing it like this is more efficient than trapping this
1591 		 * case in all possible locations in the prior fixup code.
1592 		 */
1593 
1594 		frame->tf_svc_lr = frame->tf_usr_lr;
1595 		frame->tf_usr_lr = saved_lr;
1596 
1597 		/*
1598 		 * Note the trapframe does not have the SVC r13 so a fault
1599 		 * from an instruction with writeback to r13 in SVC mode is
1600 		 * not allowed. This should not happen as the kstack is
1601 		 * always valid.
1602 		 */
1603 	}
1604 
1605 	/*
1606 	 * Now let the early-abort fixup routine have a go, in case it
1607 	 * was an LDM, STM, LDC or STC that faulted.
1608 	 */
1609 
1610 	return early_abort_fixup(arg);
1611 }
1612 #endif	/* CPU_ARM6(LATE)/7/7TDMI */
1613 
1614 /*
1615  * CPU Setup code
1616  */
1617 
1618 #if defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) || \
1619 	defined(CPU_ARM8) || defined (CPU_ARM9) || defined(CPU_SA110) || \
1620 	defined(CPU_SA1100) || defined(CPU_SA1110) || \
1621 	defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
1622 	defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425) || \
1623 	defined(CPU_ARM10) || defined(CPU_ARM11)
1624 
1625 #define IGN	0
1626 #define OR	1
1627 #define BIC	2
1628 
1629 struct cpu_option {
1630 	const char *co_name;
1631 	int	co_falseop;
1632 	int	co_trueop;
1633 	int	co_value;
1634 };
1635 
1636 static u_int parse_cpu_options __P((char *, struct cpu_option *, u_int));
1637 
1638 static u_int
1639 parse_cpu_options(args, optlist, cpuctrl)
1640 	char *args;
1641 	struct cpu_option *optlist;
1642 	u_int cpuctrl;
1643 {
1644 	int integer;
1645 
1646 	if (args == NULL)
1647 		return(cpuctrl);
1648 
1649 	while (optlist->co_name) {
1650 		if (get_bootconf_option(args, optlist->co_name,
1651 		    BOOTOPT_TYPE_BOOLEAN, &integer)) {
1652 			if (integer) {
1653 				if (optlist->co_trueop == OR)
1654 					cpuctrl |= optlist->co_value;
1655 				else if (optlist->co_trueop == BIC)
1656 					cpuctrl &= ~optlist->co_value;
1657 			} else {
1658 				if (optlist->co_falseop == OR)
1659 					cpuctrl |= optlist->co_value;
1660 				else if (optlist->co_falseop == BIC)
1661 					cpuctrl &= ~optlist->co_value;
1662 			}
1663 		}
1664 		++optlist;
1665 	}
1666 	return(cpuctrl);
1667 }
1668 #endif /* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 || CPU_SA110 */
1669 
1670 #if defined (CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) \
1671 	|| defined(CPU_ARM8)
1672 struct cpu_option arm678_options[] = {
1673 #ifdef COMPAT_12
1674 	{ "nocache",		IGN, BIC, CPU_CONTROL_IDC_ENABLE },
1675 	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
1676 #endif	/* COMPAT_12 */
1677 	{ "cpu.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
1678 	{ "cpu.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
1679 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1680 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1681 	{ NULL,			IGN, IGN, 0 }
1682 };
1683 
1684 #endif	/* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 */
1685 
1686 #ifdef CPU_ARM6
1687 struct cpu_option arm6_options[] = {
1688 	{ "arm6.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
1689 	{ "arm6.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
1690 	{ "arm6.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1691 	{ "arm6.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1692 	{ NULL,			IGN, IGN, 0 }
1693 };
1694 
1695 void
1696 arm6_setup(args)
1697 	char *args;
1698 {
1699 	int cpuctrl, cpuctrlmask;
1700 
1701 	/* Set up default control registers bits */
1702 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1703 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1704 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
1705 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1706 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1707 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
1708 		 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BEND_ENABLE
1709 		 | CPU_CONTROL_AFLT_ENABLE;
1710 
1711 #ifdef ARM6_LATE_ABORT
1712 	cpuctrl |= CPU_CONTROL_LABT_ENABLE;
1713 #endif	/* ARM6_LATE_ABORT */
1714 
1715 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1716 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1717 #endif
1718 
1719 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
1720 	cpuctrl = parse_cpu_options(args, arm6_options, cpuctrl);
1721 
1722 #ifdef __ARMEB__
1723 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1724 #endif
1725 
1726 	/* Clear out the cache */
1727 	cpu_idcache_wbinv_all();
1728 
1729 	/* Set the control register */
1730 	curcpu()->ci_ctrl = cpuctrl;
1731 	cpu_control(0xffffffff, cpuctrl);
1732 }
1733 #endif	/* CPU_ARM6 */
1734 
1735 #ifdef CPU_ARM7
1736 struct cpu_option arm7_options[] = {
1737 	{ "arm7.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
1738 	{ "arm7.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
1739 	{ "arm7.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1740 	{ "arm7.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1741 #ifdef COMPAT_12
1742 	{ "fpaclk2",		BIC, OR,  CPU_CONTROL_CPCLK },
1743 #endif	/* COMPAT_12 */
1744 	{ "arm700.fpaclk",	BIC, OR,  CPU_CONTROL_CPCLK },
1745 	{ NULL,			IGN, IGN, 0 }
1746 };
1747 
1748 void
1749 arm7_setup(args)
1750 	char *args;
1751 {
1752 	int cpuctrl, cpuctrlmask;
1753 
1754 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1755 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1756 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
1757 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1758 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1759 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
1760 		 | CPU_CONTROL_CPCLK | CPU_CONTROL_LABT_ENABLE
1761 		 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BEND_ENABLE
1762 		 | CPU_CONTROL_AFLT_ENABLE;
1763 
1764 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1765 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1766 #endif
1767 
1768 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
1769 	cpuctrl = parse_cpu_options(args, arm7_options, cpuctrl);
1770 
1771 #ifdef __ARMEB__
1772 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1773 #endif
1774 
1775 	/* Clear out the cache */
1776 	cpu_idcache_wbinv_all();
1777 
1778 	/* Set the control register */
1779 	curcpu()->ci_ctrl = cpuctrl;
1780 	cpu_control(0xffffffff, cpuctrl);
1781 }
1782 #endif	/* CPU_ARM7 */
1783 
1784 #ifdef CPU_ARM7TDMI
1785 struct cpu_option arm7tdmi_options[] = {
1786 	{ "arm7.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
1787 	{ "arm7.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
1788 	{ "arm7.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1789 	{ "arm7.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1790 #ifdef COMPAT_12
1791 	{ "fpaclk2",		BIC, OR,  CPU_CONTROL_CPCLK },
1792 #endif	/* COMPAT_12 */
1793 	{ "arm700.fpaclk",	BIC, OR,  CPU_CONTROL_CPCLK },
1794 	{ NULL,			IGN, IGN, 0 }
1795 };
1796 
1797 void
1798 arm7tdmi_setup(args)
1799 	char *args;
1800 {
1801 	int cpuctrl;
1802 
1803 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1804 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1805 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
1806 
1807 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
1808 	cpuctrl = parse_cpu_options(args, arm7tdmi_options, cpuctrl);
1809 
1810 #ifdef __ARMEB__
1811 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1812 #endif
1813 
1814 	/* Clear out the cache */
1815 	cpu_idcache_wbinv_all();
1816 
1817 	/* Set the control register */
1818 	curcpu()->ci_ctrl = cpuctrl;
1819 	cpu_control(0xffffffff, cpuctrl);
1820 }
1821 #endif	/* CPU_ARM7TDMI */
1822 
1823 #ifdef CPU_ARM8
1824 struct cpu_option arm8_options[] = {
1825 	{ "arm8.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
1826 	{ "arm8.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
1827 	{ "arm8.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1828 	{ "arm8.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1829 #ifdef COMPAT_12
1830 	{ "branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1831 #endif	/* COMPAT_12 */
1832 	{ "cpu.branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1833 	{ "arm8.branchpredict",	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1834 	{ NULL,			IGN, IGN, 0 }
1835 };
1836 
1837 void
1838 arm8_setup(args)
1839 	char *args;
1840 {
1841 	int integer;
1842 	int cpuctrl, cpuctrlmask;
1843 	int clocktest;
1844 	int setclock = 0;
1845 
1846 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1847 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1848 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
1849 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1850 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1851 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
1852 		 | CPU_CONTROL_BPRD_ENABLE | CPU_CONTROL_ROM_ENABLE
1853 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE;
1854 
1855 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1856 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1857 #endif
1858 
1859 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
1860 	cpuctrl = parse_cpu_options(args, arm8_options, cpuctrl);
1861 
1862 #ifdef __ARMEB__
1863 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1864 #endif
1865 
1866 	/* Get clock configuration */
1867 	clocktest = arm8_clock_config(0, 0) & 0x0f;
1868 
1869 	/* Special ARM8 clock and test configuration */
1870 	if (get_bootconf_option(args, "arm8.clock.reset", BOOTOPT_TYPE_BOOLEAN, &integer)) {
1871 		clocktest = 0;
1872 		setclock = 1;
1873 	}
1874 	if (get_bootconf_option(args, "arm8.clock.dynamic", BOOTOPT_TYPE_BOOLEAN, &integer)) {
1875 		if (integer)
1876 			clocktest |= 0x01;
1877 		else
1878 			clocktest &= ~(0x01);
1879 		setclock = 1;
1880 	}
1881 	if (get_bootconf_option(args, "arm8.clock.sync", BOOTOPT_TYPE_BOOLEAN, &integer)) {
1882 		if (integer)
1883 			clocktest |= 0x02;
1884 		else
1885 			clocktest &= ~(0x02);
1886 		setclock = 1;
1887 	}
1888 	if (get_bootconf_option(args, "arm8.clock.fast", BOOTOPT_TYPE_BININT, &integer)) {
1889 		clocktest = (clocktest & ~0xc0) | (integer & 3) << 2;
1890 		setclock = 1;
1891 	}
1892 	if (get_bootconf_option(args, "arm8.test", BOOTOPT_TYPE_BININT, &integer)) {
1893 		clocktest |= (integer & 7) << 5;
1894 		setclock = 1;
1895 	}
1896 
1897 	/* Clear out the cache */
1898 	cpu_idcache_wbinv_all();
1899 
1900 	/* Set the control register */
1901 	curcpu()->ci_ctrl = cpuctrl;
1902 	cpu_control(0xffffffff, cpuctrl);
1903 
1904 	/* Set the clock/test register */
1905 	if (setclock)
1906 		arm8_clock_config(0x7f, clocktest);
1907 }
1908 #endif	/* CPU_ARM8 */
1909 
1910 #ifdef CPU_ARM9
1911 struct cpu_option arm9_options[] = {
1912 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1913 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1914 	{ "arm9.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1915 	{ "arm9.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
1916 	{ "arm9.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
1917 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1918 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1919 	{ "arm9.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1920 	{ NULL,			IGN, IGN, 0 }
1921 };
1922 
1923 void
1924 arm9_setup(args)
1925 	char *args;
1926 {
1927 	int cpuctrl, cpuctrlmask;
1928 
1929 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1930 	    | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1931 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1932 	    | CPU_CONTROL_WBUF_ENABLE;
1933 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1934 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1935 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1936 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1937 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1938 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC
1939 		 | CPU_CONTROL_ROUNDROBIN;
1940 
1941 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1942 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1943 #endif
1944 
1945 	cpuctrl = parse_cpu_options(args, arm9_options, cpuctrl);
1946 
1947 #ifdef __ARMEB__
1948 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1949 #endif
1950 
1951 	if (vector_page == ARM_VECTORS_HIGH)
1952 		cpuctrl |= CPU_CONTROL_VECRELOC;
1953 
1954 	/* Clear out the cache */
1955 	cpu_idcache_wbinv_all();
1956 
1957 	/* Set the control register */
1958 	curcpu()->ci_ctrl = cpuctrl;
1959 	cpu_control(cpuctrlmask, cpuctrl);
1960 
1961 }
1962 #endif	/* CPU_ARM9 */
1963 
1964 #ifdef CPU_ARM10
1965 struct cpu_option arm10_options[] = {
1966 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1967 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1968 	{ "arm10.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1969 	{ "arm10.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
1970 	{ "arm10.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
1971 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1972 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1973 	{ "arm10.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1974 	{ NULL,			IGN, IGN, 0 }
1975 };
1976 
1977 void
1978 arm10_setup(args)
1979 	char *args;
1980 {
1981 	int cpuctrl, cpuctrlmask;
1982 
1983 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
1984 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1985 	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
1986 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
1987 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1988 	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1989 	    | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1990 	    | CPU_CONTROL_BPRD_ENABLE
1991 	    | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
1992 
1993 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1994 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1995 #endif
1996 
1997 	cpuctrl = parse_cpu_options(args, arm10_options, cpuctrl);
1998 
1999 #ifdef __ARMEB__
2000 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2001 #endif
2002 
2003 	/* Clear out the cache */
2004 	cpu_idcache_wbinv_all();
2005 
2006 	/* Now really make sure they are clean.  */
2007 	asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
2008 
2009 	/* Set the control register */
2010 	curcpu()->ci_ctrl = cpuctrl;
2011 	cpu_control(0xffffffff, cpuctrl);
2012 
2013 	/* And again. */
2014 	cpu_idcache_wbinv_all();
2015 }
2016 #endif	/* CPU_ARM10 */
2017 
2018 #ifdef CPU_ARM11
2019 struct cpu_option arm11_options[] = {
2020 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2021 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2022 	{ "arm11.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2023 	{ "arm11.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2024 	{ "arm11.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2025 	{ NULL,			IGN, IGN, 0 }
2026 };
2027 
2028 void
2029 arm11_setup(args)
2030 	char *args;
2031 {
2032 	int cpuctrl, cpuctrlmask;
2033 
2034 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
2035 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2036 	    /* | CPU_CONTROL_BPRD_ENABLE */;
2037 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
2038 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2039 	    | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BPRD_ENABLE
2040 	    | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2041 	    | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
2042 
2043 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2044 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2045 #endif
2046 
2047 	cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl);
2048 
2049 #ifdef __ARMEB__
2050 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2051 #endif
2052 
2053 	/* Clear out the cache */
2054 	cpu_idcache_wbinv_all();
2055 
2056 	/* Now really make sure they are clean.  */
2057 	asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
2058 
2059 	/* Set the control register */
2060 	curcpu()->ci_ctrl = cpuctrl;
2061 	cpu_control(0xffffffff, cpuctrl);
2062 
2063 	/* And again. */
2064 	cpu_idcache_wbinv_all();
2065 }
2066 #endif	/* CPU_ARM11 */
2067 
2068 #ifdef CPU_SA110
2069 struct cpu_option sa110_options[] = {
2070 #ifdef COMPAT_12
2071 	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2072 	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
2073 #endif	/* COMPAT_12 */
2074 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2075 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2076 	{ "sa110.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2077 	{ "sa110.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2078 	{ "sa110.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2079 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2080 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2081 	{ "sa110.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2082 	{ NULL,			IGN, IGN, 0 }
2083 };
2084 
2085 void
2086 sa110_setup(args)
2087 	char *args;
2088 {
2089 	int cpuctrl, cpuctrlmask;
2090 
2091 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2092 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2093 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2094 		 | CPU_CONTROL_WBUF_ENABLE;
2095 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2096 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2097 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2098 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2099 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2100 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
2101 		 | CPU_CONTROL_CPCLK;
2102 
2103 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2104 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2105 #endif
2106 
2107 	cpuctrl = parse_cpu_options(args, sa110_options, cpuctrl);
2108 
2109 #ifdef __ARMEB__
2110 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2111 #endif
2112 
2113 	/* Clear out the cache */
2114 	cpu_idcache_wbinv_all();
2115 
2116 	/* Set the control register */
2117 	curcpu()->ci_ctrl = cpuctrl;
2118 /*	cpu_control(cpuctrlmask, cpuctrl);*/
2119 	cpu_control(0xffffffff, cpuctrl);
2120 
2121 	/*
2122 	 * enable clockswitching, note that this doesn't read or write to r0,
2123 	 * r0 is just to make it valid asm
2124 	 */
2125 	__asm ("mcr 15, 0, r0, c15, c1, 2");
2126 }
2127 #endif	/* CPU_SA110 */
2128 
2129 #if defined(CPU_SA1100) || defined(CPU_SA1110)
2130 struct cpu_option sa11x0_options[] = {
2131 #ifdef COMPAT_12
2132 	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2133 	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
2134 #endif	/* COMPAT_12 */
2135 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2136 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2137 	{ "sa11x0.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2138 	{ "sa11x0.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2139 	{ "sa11x0.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2140 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2141 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2142 	{ "sa11x0.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2143 	{ NULL,			IGN, IGN, 0 }
2144 };
2145 
2146 void
2147 sa11x0_setup(args)
2148 	char *args;
2149 {
2150 	int cpuctrl, cpuctrlmask;
2151 
2152 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2153 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2154 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2155 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE;
2156 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2157 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2158 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2159 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2160 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2161 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
2162 		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
2163 
2164 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2165 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2166 #endif
2167 
2168 	cpuctrl = parse_cpu_options(args, sa11x0_options, cpuctrl);
2169 
2170 #ifdef __ARMEB__
2171 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2172 #endif
2173 
2174 	if (vector_page == ARM_VECTORS_HIGH)
2175 		cpuctrl |= CPU_CONTROL_VECRELOC;
2176 
2177 	/* Clear out the cache */
2178 	cpu_idcache_wbinv_all();
2179 
2180 	/* Set the control register */
2181 	cpu_control(0xffffffff, cpuctrl);
2182 }
2183 #endif	/* CPU_SA1100 || CPU_SA1110 */
2184 
2185 #if defined(CPU_IXP12X0)
2186 struct cpu_option ixp12x0_options[] = {
2187 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2188 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2189 	{ "ixp12x0.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2190 	{ "ixp12x0.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2191 	{ "ixp12x0.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2192 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2193 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2194 	{ "ixp12x0.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2195 	{ NULL,			IGN, IGN, 0 }
2196 };
2197 
2198 void
2199 ixp12x0_setup(args)
2200 	char *args;
2201 {
2202 	int cpuctrl, cpuctrlmask;
2203 
2204 
2205 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE
2206 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_SYST_ENABLE
2207 		 | CPU_CONTROL_IC_ENABLE;
2208 
2209 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_AFLT_ENABLE
2210 		 | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_WBUF_ENABLE
2211 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_SYST_ENABLE
2212 		 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_IC_ENABLE
2213 		 | CPU_CONTROL_VECRELOC;
2214 
2215 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2216 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2217 #endif
2218 
2219 	cpuctrl = parse_cpu_options(args, ixp12x0_options, cpuctrl);
2220 
2221 #ifdef __ARMEB__
2222 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2223 #endif
2224 
2225 	if (vector_page == ARM_VECTORS_HIGH)
2226 		cpuctrl |= CPU_CONTROL_VECRELOC;
2227 
2228 	/* Clear out the cache */
2229 	cpu_idcache_wbinv_all();
2230 
2231 	/* Set the control register */
2232 	curcpu()->ci_ctrl = cpuctrl;
2233 	/* cpu_control(0xffffffff, cpuctrl); */
2234 	cpu_control(cpuctrlmask, cpuctrl);
2235 }
2236 #endif /* CPU_IXP12X0 */
2237 
2238 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
2239     defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425)
2240 struct cpu_option xscale_options[] = {
2241 #ifdef COMPAT_12
2242 	{ "branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2243 	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2244 #endif	/* COMPAT_12 */
2245 	{ "cpu.branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2246 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2247 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2248 	{ "xscale.branchpredict", BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2249 	{ "xscale.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2250 	{ "xscale.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2251 	{ "xscale.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2252 	{ NULL,			IGN, IGN, 0 }
2253 };
2254 
2255 void
2256 xscale_setup(args)
2257 	char *args;
2258 {
2259 	uint32_t auxctl;
2260 	int cpuctrl, cpuctrlmask;
2261 
2262 	/*
2263 	 * The XScale Write Buffer is always enabled.  Our option
2264 	 * is to enable/disable coalescing.  Note that bits 6:3
2265 	 * must always be enabled.
2266 	 */
2267 
2268 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2269 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2270 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2271 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
2272 		 | CPU_CONTROL_BPRD_ENABLE;
2273 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2274 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2275 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2276 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2277 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2278 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
2279 		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
2280 
2281 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2282 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2283 #endif
2284 
2285 	cpuctrl = parse_cpu_options(args, xscale_options, cpuctrl);
2286 
2287 #ifdef __ARMEB__
2288 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2289 #endif
2290 
2291 	if (vector_page == ARM_VECTORS_HIGH)
2292 		cpuctrl |= CPU_CONTROL_VECRELOC;
2293 
2294 	/* Clear out the cache */
2295 	cpu_idcache_wbinv_all();
2296 
2297 	/*
2298 	 * Set the control register.  Note that bits 6:3 must always
2299 	 * be set to 1.
2300 	 */
2301 	curcpu()->ci_ctrl = cpuctrl;
2302 /*	cpu_control(cpuctrlmask, cpuctrl);*/
2303 	cpu_control(0xffffffff, cpuctrl);
2304 
2305 	/* Make sure write coalescing is turned on */
2306 	__asm __volatile("mrc p15, 0, %0, c1, c0, 1"
2307 		: "=r" (auxctl));
2308 #ifdef XSCALE_NO_COALESCE_WRITES
2309 	auxctl |= XSCALE_AUXCTL_K;
2310 #else
2311 	auxctl &= ~XSCALE_AUXCTL_K;
2312 #endif
2313 	__asm __volatile("mcr p15, 0, %0, c1, c0, 1"
2314 		: : "r" (auxctl));
2315 }
2316 #endif	/* CPU_XSCALE_80200 || CPU_XSCALE_80321 || __CPU_XSCALE_PXA2XX || CPU_XSCALE_IXP425 */
2317