xref: /netbsd-src/sys/arch/arm/arm/cpufunc.c (revision bf1e9b32e27832f0c493206710fb8b58a980838a)
1 /*	$NetBSD: cpufunc.c,v 1.71 2005/06/03 15:55:55 rearnsha Exp $	*/
2 
3 /*
4  * arm7tdmi support code Copyright (c) 2001 John Fremlin
5  * arm8 support code Copyright (c) 1997 ARM Limited
6  * arm8 support code Copyright (c) 1997 Causality Limited
7  * arm9 support code Copyright (C) 2001 ARM Ltd
8  * Copyright (c) 1997 Mark Brinicombe.
9  * Copyright (c) 1997 Causality Limited
10  * All rights reserved.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. All advertising materials mentioning features or use of this software
21  *    must display the following acknowledgement:
22  *	This product includes software developed by Causality Limited.
23  * 4. The name of Causality Limited may not be used to endorse or promote
24  *    products derived from this software without specific prior written
25  *    permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
28  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
29  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
30  * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
31  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
32  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
33  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37  * SUCH DAMAGE.
38  *
39  * RiscBSD kernel project
40  *
41  * cpufuncs.c
42  *
43  * C functions for supporting CPU / MMU / TLB specific operations.
44  *
45  * Created      : 30/01/97
46  */
47 
48 #include <sys/cdefs.h>
49 __KERNEL_RCSID(0, "$NetBSD: cpufunc.c,v 1.71 2005/06/03 15:55:55 rearnsha Exp $");
50 
51 #include "opt_compat_netbsd.h"
52 #include "opt_cpuoptions.h"
53 #include "opt_perfctrs.h"
54 
55 #include <sys/types.h>
56 #include <sys/param.h>
57 #include <sys/pmc.h>
58 #include <sys/systm.h>
59 #include <machine/cpu.h>
60 #include <machine/bootconfig.h>
61 #include <arch/arm/arm/disassem.h>
62 
63 #include <uvm/uvm.h>
64 
65 #include <arm/cpuconf.h>
66 #include <arm/cpufunc.h>
67 
68 #ifdef CPU_XSCALE_80200
69 #include <arm/xscale/i80200reg.h>
70 #include <arm/xscale/i80200var.h>
71 #endif
72 
73 #ifdef CPU_XSCALE_80321
74 #include <arm/xscale/i80321reg.h>
75 #include <arm/xscale/i80321var.h>
76 #endif
77 
78 #ifdef CPU_XSCALE_IXP425
79 #include <arm/xscale/ixp425reg.h>
80 #include <arm/xscale/ixp425var.h>
81 #endif
82 
83 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321)
84 #include <arm/xscale/xscalereg.h>
85 #endif
86 
87 #if defined(PERFCTRS)
88 struct arm_pmc_funcs *arm_pmc;
89 #endif
90 
91 /* PRIMARY CACHE VARIABLES */
92 int	arm_picache_size;
93 int	arm_picache_line_size;
94 int	arm_picache_ways;
95 
96 int	arm_pdcache_size;	/* and unified */
97 int	arm_pdcache_line_size;
98 int	arm_pdcache_ways;
99 
100 int	arm_pcache_type;
101 int	arm_pcache_unified;
102 
103 int	arm_dcache_align;
104 int	arm_dcache_align_mask;
105 
106 /* 1 == use cpu_sleep(), 0 == don't */
107 int cpu_do_powersave;
108 
109 #ifdef CPU_ARM3
110 struct cpu_functions arm3_cpufuncs = {
111 	/* CPU functions */
112 
113 	cpufunc_id,			/* id			*/
114 	cpufunc_nullop,			/* cpwait		*/
115 
116 	/* MMU functions */
117 
118 	arm3_control,			/* control		*/
119 	NULL,				/* domain		*/
120 	NULL,				/* setttb		*/
121 	NULL,				/* faultstatus		*/
122 	NULL,				/* faultaddress		*/
123 
124 	/* TLB functions */
125 
126 	cpufunc_nullop,			/* tlb_flushID		*/
127 	(void *)cpufunc_nullop,		/* tlb_flushID_SE	*/
128 	cpufunc_nullop,			/* tlb_flushI		*/
129 	(void *)cpufunc_nullop,		/* tlb_flushI_SE	*/
130 	cpufunc_nullop,			/* tlb_flushD		*/
131 	(void *)cpufunc_nullop,		/* tlb_flushD_SE	*/
132 
133 	/* Cache operations */
134 
135 	cpufunc_nullop,			/* icache_sync_all	*/
136 	(void *) cpufunc_nullop,	/* icache_sync_range	*/
137 
138 	arm3_cache_flush,		/* dcache_wbinv_all	*/
139 	(void *)arm3_cache_flush,	/* dcache_wbinv_range	*/
140 	(void *)arm3_cache_flush,	/* dcache_inv_range	*/
141 	(void *)cpufunc_nullop,		/* dcache_wb_range	*/
142 
143 	arm3_cache_flush,		/* idcache_wbinv_all	*/
144 	(void *)arm3_cache_flush,	/* idcache_wbinv_range	*/
145 
146 	/* Other functions */
147 
148 	cpufunc_nullop,			/* flush_prefetchbuf	*/
149 	cpufunc_nullop,			/* drain_writebuf	*/
150 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
151 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
152 
153 	(void *)cpufunc_nullop,		/* sleep		*/
154 
155 	/* Soft functions */
156 
157 	early_abort_fixup,		/* dataabt_fixup	*/
158 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
159 
160 	NULL,				/* context_switch	*/
161 
162 	(void *)cpufunc_nullop		/* cpu setup		*/
163 
164 };
165 #endif	/* CPU_ARM3 */
166 
167 #ifdef CPU_ARM6
168 struct cpu_functions arm6_cpufuncs = {
169 	/* CPU functions */
170 
171 	cpufunc_id,			/* id			*/
172 	cpufunc_nullop,			/* cpwait		*/
173 
174 	/* MMU functions */
175 
176 	cpufunc_control,		/* control		*/
177 	cpufunc_domains,		/* domain		*/
178 	arm67_setttb,			/* setttb		*/
179 	cpufunc_faultstatus,		/* faultstatus		*/
180 	cpufunc_faultaddress,		/* faultaddress		*/
181 
182 	/* TLB functions */
183 
184 	arm67_tlb_flush,		/* tlb_flushID		*/
185 	arm67_tlb_purge,		/* tlb_flushID_SE	*/
186 	arm67_tlb_flush,		/* tlb_flushI		*/
187 	arm67_tlb_purge,		/* tlb_flushI_SE	*/
188 	arm67_tlb_flush,		/* tlb_flushD		*/
189 	arm67_tlb_purge,		/* tlb_flushD_SE	*/
190 
191 	/* Cache operations */
192 
193 	cpufunc_nullop,			/* icache_sync_all	*/
194 	(void *) cpufunc_nullop,	/* icache_sync_range	*/
195 
196 	arm67_cache_flush,		/* dcache_wbinv_all	*/
197 	(void *)arm67_cache_flush,	/* dcache_wbinv_range	*/
198 	(void *)arm67_cache_flush,	/* dcache_inv_range	*/
199 	(void *)cpufunc_nullop,		/* dcache_wb_range	*/
200 
201 	arm67_cache_flush,		/* idcache_wbinv_all	*/
202 	(void *)arm67_cache_flush,	/* idcache_wbinv_range	*/
203 
204 	/* Other functions */
205 
206 	cpufunc_nullop,			/* flush_prefetchbuf	*/
207 	cpufunc_nullop,			/* drain_writebuf	*/
208 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
209 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
210 
211 	(void *)cpufunc_nullop,		/* sleep		*/
212 
213 	/* Soft functions */
214 
215 #ifdef ARM6_LATE_ABORT
216 	late_abort_fixup,		/* dataabt_fixup	*/
217 #else
218 	early_abort_fixup,		/* dataabt_fixup	*/
219 #endif
220 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
221 
222 	arm67_context_switch,		/* context_switch	*/
223 
224 	arm6_setup			/* cpu setup		*/
225 
226 };
227 #endif	/* CPU_ARM6 */
228 
229 #ifdef CPU_ARM7
230 struct cpu_functions arm7_cpufuncs = {
231 	/* CPU functions */
232 
233 	cpufunc_id,			/* id			*/
234 	cpufunc_nullop,			/* cpwait		*/
235 
236 	/* MMU functions */
237 
238 	cpufunc_control,		/* control		*/
239 	cpufunc_domains,		/* domain		*/
240 	arm67_setttb,			/* setttb		*/
241 	cpufunc_faultstatus,		/* faultstatus		*/
242 	cpufunc_faultaddress,		/* faultaddress		*/
243 
244 	/* TLB functions */
245 
246 	arm67_tlb_flush,		/* tlb_flushID		*/
247 	arm67_tlb_purge,		/* tlb_flushID_SE	*/
248 	arm67_tlb_flush,		/* tlb_flushI		*/
249 	arm67_tlb_purge,		/* tlb_flushI_SE	*/
250 	arm67_tlb_flush,		/* tlb_flushD		*/
251 	arm67_tlb_purge,		/* tlb_flushD_SE	*/
252 
253 	/* Cache operations */
254 
255 	cpufunc_nullop,			/* icache_sync_all	*/
256 	(void *)cpufunc_nullop,		/* icache_sync_range	*/
257 
258 	arm67_cache_flush,		/* dcache_wbinv_all	*/
259 	(void *)arm67_cache_flush,	/* dcache_wbinv_range	*/
260 	(void *)arm67_cache_flush,	/* dcache_inv_range	*/
261 	(void *)cpufunc_nullop,		/* dcache_wb_range	*/
262 
263 	arm67_cache_flush,		/* idcache_wbinv_all	*/
264 	(void *)arm67_cache_flush,	/* idcache_wbinv_range	*/
265 
266 	/* Other functions */
267 
268 	cpufunc_nullop,			/* flush_prefetchbuf	*/
269 	cpufunc_nullop,			/* drain_writebuf	*/
270 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
271 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
272 
273 	(void *)cpufunc_nullop,		/* sleep		*/
274 
275 	/* Soft functions */
276 
277 	late_abort_fixup,		/* dataabt_fixup	*/
278 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
279 
280 	arm67_context_switch,		/* context_switch	*/
281 
282 	arm7_setup			/* cpu setup		*/
283 
284 };
285 #endif	/* CPU_ARM7 */
286 
287 #ifdef CPU_ARM7TDMI
288 struct cpu_functions arm7tdmi_cpufuncs = {
289 	/* CPU functions */
290 
291 	cpufunc_id,			/* id			*/
292 	cpufunc_nullop,			/* cpwait		*/
293 
294 	/* MMU functions */
295 
296 	cpufunc_control,		/* control		*/
297 	cpufunc_domains,		/* domain		*/
298 	arm7tdmi_setttb,		/* setttb		*/
299 	cpufunc_faultstatus,		/* faultstatus		*/
300 	cpufunc_faultaddress,		/* faultaddress		*/
301 
302 	/* TLB functions */
303 
304 	arm7tdmi_tlb_flushID,		/* tlb_flushID		*/
305 	arm7tdmi_tlb_flushID_SE,	/* tlb_flushID_SE	*/
306 	arm7tdmi_tlb_flushID,		/* tlb_flushI		*/
307 	arm7tdmi_tlb_flushID_SE,	/* tlb_flushI_SE	*/
308 	arm7tdmi_tlb_flushID,		/* tlb_flushD		*/
309 	arm7tdmi_tlb_flushID_SE,	/* tlb_flushD_SE	*/
310 
311 	/* Cache operations */
312 
313 	cpufunc_nullop,			/* icache_sync_all	*/
314 	(void *)cpufunc_nullop,		/* icache_sync_range	*/
315 
316 	arm7tdmi_cache_flushID,		/* dcache_wbinv_all	*/
317 	(void *)arm7tdmi_cache_flushID,	/* dcache_wbinv_range	*/
318 	(void *)arm7tdmi_cache_flushID,	/* dcache_inv_range	*/
319 	(void *)cpufunc_nullop,		/* dcache_wb_range	*/
320 
321 	arm7tdmi_cache_flushID,		/* idcache_wbinv_all	*/
322 	(void *)arm7tdmi_cache_flushID,	/* idcache_wbinv_range	*/
323 
324 	/* Other functions */
325 
326 	cpufunc_nullop,			/* flush_prefetchbuf	*/
327 	cpufunc_nullop,			/* drain_writebuf	*/
328 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
329 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
330 
331 	(void *)cpufunc_nullop,		/* sleep		*/
332 
333 	/* Soft functions */
334 
335 	late_abort_fixup,		/* dataabt_fixup	*/
336 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
337 
338 	arm7tdmi_context_switch,	/* context_switch	*/
339 
340 	arm7tdmi_setup			/* cpu setup		*/
341 
342 };
343 #endif	/* CPU_ARM7TDMI */
344 
345 #ifdef CPU_ARM8
346 struct cpu_functions arm8_cpufuncs = {
347 	/* CPU functions */
348 
349 	cpufunc_id,			/* id			*/
350 	cpufunc_nullop,			/* cpwait		*/
351 
352 	/* MMU functions */
353 
354 	cpufunc_control,		/* control		*/
355 	cpufunc_domains,		/* domain		*/
356 	arm8_setttb,			/* setttb		*/
357 	cpufunc_faultstatus,		/* faultstatus		*/
358 	cpufunc_faultaddress,		/* faultaddress		*/
359 
360 	/* TLB functions */
361 
362 	arm8_tlb_flushID,		/* tlb_flushID		*/
363 	arm8_tlb_flushID_SE,		/* tlb_flushID_SE	*/
364 	arm8_tlb_flushID,		/* tlb_flushI		*/
365 	arm8_tlb_flushID_SE,		/* tlb_flushI_SE	*/
366 	arm8_tlb_flushID,		/* tlb_flushD		*/
367 	arm8_tlb_flushID_SE,		/* tlb_flushD_SE	*/
368 
369 	/* Cache operations */
370 
371 	cpufunc_nullop,			/* icache_sync_all	*/
372 	(void *)cpufunc_nullop,		/* icache_sync_range	*/
373 
374 	arm8_cache_purgeID,		/* dcache_wbinv_all	*/
375 	(void *)arm8_cache_purgeID,	/* dcache_wbinv_range	*/
376 /*XXX*/	(void *)arm8_cache_purgeID,	/* dcache_inv_range	*/
377 	(void *)arm8_cache_cleanID,	/* dcache_wb_range	*/
378 
379 	arm8_cache_purgeID,		/* idcache_wbinv_all	*/
380 	(void *)arm8_cache_purgeID,	/* idcache_wbinv_range	*/
381 
382 	/* Other functions */
383 
384 	cpufunc_nullop,			/* flush_prefetchbuf	*/
385 	cpufunc_nullop,			/* drain_writebuf	*/
386 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
387 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
388 
389 	(void *)cpufunc_nullop,		/* sleep		*/
390 
391 	/* Soft functions */
392 
393 	cpufunc_null_fixup,		/* dataabt_fixup	*/
394 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
395 
396 	arm8_context_switch,		/* context_switch	*/
397 
398 	arm8_setup			/* cpu setup		*/
399 };
400 #endif	/* CPU_ARM8 */
401 
402 #ifdef CPU_ARM9
403 struct cpu_functions arm9_cpufuncs = {
404 	/* CPU functions */
405 
406 	cpufunc_id,			/* id			*/
407 	cpufunc_nullop,			/* cpwait		*/
408 
409 	/* MMU functions */
410 
411 	cpufunc_control,		/* control		*/
412 	cpufunc_domains,		/* Domain		*/
413 	arm9_setttb,			/* Setttb		*/
414 	cpufunc_faultstatus,		/* Faultstatus		*/
415 	cpufunc_faultaddress,		/* Faultaddress		*/
416 
417 	/* TLB functions */
418 
419 	armv4_tlb_flushID,		/* tlb_flushID		*/
420 	arm9_tlb_flushID_SE,		/* tlb_flushID_SE	*/
421 	armv4_tlb_flushI,		/* tlb_flushI		*/
422 	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
423 	armv4_tlb_flushD,		/* tlb_flushD		*/
424 	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
425 
426 	/* Cache operations */
427 
428 	arm9_icache_sync_all,		/* icache_sync_all	*/
429 	arm9_icache_sync_range,		/* icache_sync_range	*/
430 
431 	arm9_dcache_wbinv_all,		/* dcache_wbinv_all	*/
432 	arm9_dcache_wbinv_range,	/* dcache_wbinv_range	*/
433 /*XXX*/	arm9_dcache_wbinv_range,	/* dcache_inv_range	*/
434 	arm9_dcache_wb_range,		/* dcache_wb_range	*/
435 
436 	arm9_idcache_wbinv_all,		/* idcache_wbinv_all	*/
437 	arm9_idcache_wbinv_range,	/* idcache_wbinv_range	*/
438 
439 	/* Other functions */
440 
441 	cpufunc_nullop,			/* flush_prefetchbuf	*/
442 	armv4_drain_writebuf,		/* drain_writebuf	*/
443 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
444 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
445 
446 	(void *)cpufunc_nullop,		/* sleep		*/
447 
448 	/* Soft functions */
449 
450 	cpufunc_null_fixup,		/* dataabt_fixup	*/
451 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
452 
453 	arm9_context_switch,		/* context_switch	*/
454 
455 	arm9_setup			/* cpu setup		*/
456 
457 };
458 #endif /* CPU_ARM9 */
459 
460 #ifdef CPU_ARM10
461 struct cpu_functions arm10_cpufuncs = {
462 	/* CPU functions */
463 
464 	cpufunc_id,			/* id			*/
465 	cpufunc_nullop,			/* cpwait		*/
466 
467 	/* MMU functions */
468 
469 	cpufunc_control,		/* control		*/
470 	cpufunc_domains,		/* Domain		*/
471 	arm10_setttb,			/* Setttb		*/
472 	cpufunc_faultstatus,		/* Faultstatus		*/
473 	cpufunc_faultaddress,		/* Faultaddress		*/
474 
475 	/* TLB functions */
476 
477 	armv4_tlb_flushID,		/* tlb_flushID		*/
478 	arm10_tlb_flushID_SE,		/* tlb_flushID_SE	*/
479 	armv4_tlb_flushI,		/* tlb_flushI		*/
480 	arm10_tlb_flushI_SE,		/* tlb_flushI_SE	*/
481 	armv4_tlb_flushD,		/* tlb_flushD		*/
482 	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
483 
484 	/* Cache operations */
485 
486 	armv5_icache_sync_all,		/* icache_sync_all	*/
487 	armv5_icache_sync_range,	/* icache_sync_range	*/
488 
489 	armv5_dcache_wbinv_all,		/* dcache_wbinv_all	*/
490 	armv5_dcache_wbinv_range,	/* dcache_wbinv_range	*/
491 /*XXX*/	armv5_dcache_wbinv_range,	/* dcache_inv_range	*/
492 	armv5_dcache_wb_range,		/* dcache_wb_range	*/
493 
494 	armv5_idcache_wbinv_all,	/* idcache_wbinv_all	*/
495 	armv5_idcache_wbinv_range,	/* idcache_wbinv_range	*/
496 
497 	/* Other functions */
498 
499 	cpufunc_nullop,			/* flush_prefetchbuf	*/
500 	armv4_drain_writebuf,		/* drain_writebuf	*/
501 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
502 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
503 
504 	(void *)cpufunc_nullop,		/* sleep		*/
505 
506 	/* Soft functions */
507 
508 	cpufunc_null_fixup,		/* dataabt_fixup	*/
509 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
510 
511 	arm10_context_switch,		/* context_switch	*/
512 
513 	arm10_setup			/* cpu setup		*/
514 
515 };
516 #endif /* CPU_ARM10 */
517 
518 #ifdef CPU_ARM11
519 struct cpu_functions arm11_cpufuncs = {
520 	/* CPU functions */
521 
522 	cpufunc_id,			/* id			*/
523 	cpufunc_nullop,			/* cpwait		*/
524 
525 	/* MMU functions */
526 
527 	cpufunc_control,		/* control		*/
528 	cpufunc_domains,		/* Domain		*/
529 	arm11_setttb,			/* Setttb		*/
530 	cpufunc_faultstatus,		/* Faultstatus		*/
531 	cpufunc_faultaddress,		/* Faultaddress		*/
532 
533 	/* TLB functions */
534 
535 	arm11_tlb_flushID,		/* tlb_flushID		*/
536 	arm11_tlb_flushID_SE,		/* tlb_flushID_SE	*/
537 	arm11_tlb_flushI,		/* tlb_flushI		*/
538 	arm11_tlb_flushI_SE,		/* tlb_flushI_SE	*/
539 	arm11_tlb_flushD,		/* tlb_flushD		*/
540 	arm11_tlb_flushD_SE,		/* tlb_flushD_SE	*/
541 
542 	/* Cache operations */
543 
544 	armv5_icache_sync_all,		/* icache_sync_all	*/
545 	armv5_icache_sync_range,	/* icache_sync_range	*/
546 
547 	armv5_dcache_wbinv_all,		/* dcache_wbinv_all	*/
548 	armv5_dcache_wbinv_range,	/* dcache_wbinv_range	*/
549 /*XXX*/	armv5_dcache_wbinv_range,	/* dcache_inv_range	*/
550 	armv5_dcache_wb_range,		/* dcache_wb_range	*/
551 
552 	armv5_idcache_wbinv_all,	/* idcache_wbinv_all	*/
553 	armv5_idcache_wbinv_range,	/* idcache_wbinv_range	*/
554 
555 	/* Other functions */
556 
557 	cpufunc_nullop,			/* flush_prefetchbuf	*/
558 	arm11_drain_writebuf,		/* drain_writebuf	*/
559 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
560 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
561 
562 	(void *)cpufunc_nullop,		/* sleep		*/
563 
564 	/* Soft functions */
565 
566 	cpufunc_null_fixup,		/* dataabt_fixup	*/
567 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
568 
569 	arm11_context_switch,		/* context_switch	*/
570 
571 	arm11_setup			/* cpu setup		*/
572 
573 };
574 #endif /* CPU_ARM10 || CPU_ARM11 */
575 
576 #ifdef CPU_SA110
577 struct cpu_functions sa110_cpufuncs = {
578 	/* CPU functions */
579 
580 	cpufunc_id,			/* id			*/
581 	cpufunc_nullop,			/* cpwait		*/
582 
583 	/* MMU functions */
584 
585 	cpufunc_control,		/* control		*/
586 	cpufunc_domains,		/* domain		*/
587 	sa1_setttb,			/* setttb		*/
588 	cpufunc_faultstatus,		/* faultstatus		*/
589 	cpufunc_faultaddress,		/* faultaddress		*/
590 
591 	/* TLB functions */
592 
593 	armv4_tlb_flushID,		/* tlb_flushID		*/
594 	sa1_tlb_flushID_SE,		/* tlb_flushID_SE	*/
595 	armv4_tlb_flushI,		/* tlb_flushI		*/
596 	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
597 	armv4_tlb_flushD,		/* tlb_flushD		*/
598 	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
599 
600 	/* Cache operations */
601 
602 	sa1_cache_syncI,		/* icache_sync_all	*/
603 	sa1_cache_syncI_rng,		/* icache_sync_range	*/
604 
605 	sa1_cache_purgeD,		/* dcache_wbinv_all	*/
606 	sa1_cache_purgeD_rng,		/* dcache_wbinv_range	*/
607 /*XXX*/	sa1_cache_purgeD_rng,		/* dcache_inv_range	*/
608 	sa1_cache_cleanD_rng,		/* dcache_wb_range	*/
609 
610 	sa1_cache_purgeID,		/* idcache_wbinv_all	*/
611 	sa1_cache_purgeID_rng,		/* idcache_wbinv_range	*/
612 
613 	/* Other functions */
614 
615 	cpufunc_nullop,			/* flush_prefetchbuf	*/
616 	armv4_drain_writebuf,		/* drain_writebuf	*/
617 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
618 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
619 
620 	(void *)cpufunc_nullop,		/* sleep		*/
621 
622 	/* Soft functions */
623 
624 	cpufunc_null_fixup,		/* dataabt_fixup	*/
625 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
626 
627 	sa110_context_switch,		/* context_switch	*/
628 
629 	sa110_setup			/* cpu setup		*/
630 };
631 #endif	/* CPU_SA110 */
632 
633 #if defined(CPU_SA1100) || defined(CPU_SA1110)
634 struct cpu_functions sa11x0_cpufuncs = {
635 	/* CPU functions */
636 
637 	cpufunc_id,			/* id			*/
638 	cpufunc_nullop,			/* cpwait		*/
639 
640 	/* MMU functions */
641 
642 	cpufunc_control,		/* control		*/
643 	cpufunc_domains,		/* domain		*/
644 	sa1_setttb,			/* setttb		*/
645 	cpufunc_faultstatus,		/* faultstatus		*/
646 	cpufunc_faultaddress,		/* faultaddress		*/
647 
648 	/* TLB functions */
649 
650 	armv4_tlb_flushID,		/* tlb_flushID		*/
651 	sa1_tlb_flushID_SE,		/* tlb_flushID_SE	*/
652 	armv4_tlb_flushI,		/* tlb_flushI		*/
653 	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
654 	armv4_tlb_flushD,		/* tlb_flushD		*/
655 	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
656 
657 	/* Cache operations */
658 
659 	sa1_cache_syncI,		/* icache_sync_all	*/
660 	sa1_cache_syncI_rng,		/* icache_sync_range	*/
661 
662 	sa1_cache_purgeD,		/* dcache_wbinv_all	*/
663 	sa1_cache_purgeD_rng,		/* dcache_wbinv_range	*/
664 /*XXX*/	sa1_cache_purgeD_rng,		/* dcache_inv_range	*/
665 	sa1_cache_cleanD_rng,		/* dcache_wb_range	*/
666 
667 	sa1_cache_purgeID,		/* idcache_wbinv_all	*/
668 	sa1_cache_purgeID_rng,		/* idcache_wbinv_range	*/
669 
670 	/* Other functions */
671 
672 	sa11x0_drain_readbuf,		/* flush_prefetchbuf	*/
673 	armv4_drain_writebuf,		/* drain_writebuf	*/
674 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
675 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
676 
677 	sa11x0_cpu_sleep,		/* sleep		*/
678 
679 	/* Soft functions */
680 
681 	cpufunc_null_fixup,		/* dataabt_fixup	*/
682 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
683 
684 	sa11x0_context_switch,		/* context_switch	*/
685 
686 	sa11x0_setup			/* cpu setup		*/
687 };
688 #endif	/* CPU_SA1100 || CPU_SA1110 */
689 
690 #ifdef CPU_IXP12X0
691 struct cpu_functions ixp12x0_cpufuncs = {
692 	/* CPU functions */
693 
694 	cpufunc_id,			/* id			*/
695 	cpufunc_nullop,			/* cpwait		*/
696 
697 	/* MMU functions */
698 
699 	cpufunc_control,		/* control		*/
700 	cpufunc_domains,		/* domain		*/
701 	sa1_setttb,			/* setttb		*/
702 	cpufunc_faultstatus,		/* faultstatus		*/
703 	cpufunc_faultaddress,		/* faultaddress		*/
704 
705 	/* TLB functions */
706 
707 	armv4_tlb_flushID,		/* tlb_flushID		*/
708 	sa1_tlb_flushID_SE,		/* tlb_flushID_SE	*/
709 	armv4_tlb_flushI,		/* tlb_flushI		*/
710 	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
711 	armv4_tlb_flushD,		/* tlb_flushD		*/
712 	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
713 
714 	/* Cache operations */
715 
716 	sa1_cache_syncI,		/* icache_sync_all	*/
717 	sa1_cache_syncI_rng,		/* icache_sync_range	*/
718 
719 	sa1_cache_purgeD,		/* dcache_wbinv_all	*/
720 	sa1_cache_purgeD_rng,		/* dcache_wbinv_range	*/
721 /*XXX*/	sa1_cache_purgeD_rng,		/* dcache_inv_range	*/
722 	sa1_cache_cleanD_rng,		/* dcache_wb_range	*/
723 
724 	sa1_cache_purgeID,		/* idcache_wbinv_all	*/
725 	sa1_cache_purgeID_rng,		/* idcache_wbinv_range	*/
726 
727 	/* Other functions */
728 
729 	ixp12x0_drain_readbuf,			/* flush_prefetchbuf	*/
730 	armv4_drain_writebuf,		/* drain_writebuf	*/
731 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
732 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
733 
734 	(void *)cpufunc_nullop,		/* sleep		*/
735 
736 	/* Soft functions */
737 
738 	cpufunc_null_fixup,		/* dataabt_fixup	*/
739 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
740 
741 	ixp12x0_context_switch,		/* context_switch	*/
742 
743 	ixp12x0_setup			/* cpu setup		*/
744 };
745 #endif	/* CPU_IXP12X0 */
746 
747 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
748     defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425)
749 struct cpu_functions xscale_cpufuncs = {
750 	/* CPU functions */
751 
752 	cpufunc_id,			/* id			*/
753 	xscale_cpwait,			/* cpwait		*/
754 
755 	/* MMU functions */
756 
757 	xscale_control,			/* control		*/
758 	cpufunc_domains,		/* domain		*/
759 	xscale_setttb,			/* setttb		*/
760 	cpufunc_faultstatus,		/* faultstatus		*/
761 	cpufunc_faultaddress,		/* faultaddress		*/
762 
763 	/* TLB functions */
764 
765 	armv4_tlb_flushID,		/* tlb_flushID		*/
766 	xscale_tlb_flushID_SE,		/* tlb_flushID_SE	*/
767 	armv4_tlb_flushI,		/* tlb_flushI		*/
768 	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
769 	armv4_tlb_flushD,		/* tlb_flushD		*/
770 	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
771 
772 	/* Cache operations */
773 
774 	xscale_cache_syncI,		/* icache_sync_all	*/
775 	xscale_cache_syncI_rng,		/* icache_sync_range	*/
776 
777 	xscale_cache_purgeD,		/* dcache_wbinv_all	*/
778 	xscale_cache_purgeD_rng,	/* dcache_wbinv_range	*/
779 	xscale_cache_flushD_rng,	/* dcache_inv_range	*/
780 	xscale_cache_cleanD_rng,	/* dcache_wb_range	*/
781 
782 	xscale_cache_purgeID,		/* idcache_wbinv_all	*/
783 	xscale_cache_purgeID_rng,	/* idcache_wbinv_range	*/
784 
785 	/* Other functions */
786 
787 	cpufunc_nullop,			/* flush_prefetchbuf	*/
788 	armv4_drain_writebuf,		/* drain_writebuf	*/
789 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
790 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
791 
792 	xscale_cpu_sleep,		/* sleep		*/
793 
794 	/* Soft functions */
795 
796 	cpufunc_null_fixup,		/* dataabt_fixup	*/
797 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
798 
799 	xscale_context_switch,		/* context_switch	*/
800 
801 	xscale_setup			/* cpu setup		*/
802 };
803 #endif
804 /* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425 */
805 
806 /*
807  * Global constants also used by locore.s
808  */
809 
810 struct cpu_functions cpufuncs;
811 u_int cputype;
812 u_int cpu_reset_needs_v4_MMU_disable;	/* flag used in locore.s */
813 
814 #if defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined(CPU_ARM9) || \
815     defined (CPU_ARM10) || defined (CPU_ARM11) || \
816     defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
817     defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425)
818 static void get_cachetype_cp15 __P((void));
819 
820 /* Additional cache information local to this file.  Log2 of some of the
821    above numbers.  */
822 static int	arm_dcache_l2_nsets;
823 static int	arm_dcache_l2_assoc;
824 static int	arm_dcache_l2_linesize;
825 
826 static void
827 get_cachetype_cp15()
828 {
829 	u_int ctype, isize, dsize;
830 	u_int multiplier;
831 
832 	__asm __volatile("mrc p15, 0, %0, c0, c0, 1"
833 		: "=r" (ctype));
834 
835 	/*
836 	 * ...and thus spake the ARM ARM:
837 	 *
838 	 * If an <opcode2> value corresponding to an unimplemented or
839 	 * reserved ID register is encountered, the System Control
840 	 * processor returns the value of the main ID register.
841 	 */
842 	if (ctype == cpufunc_id())
843 		goto out;
844 
845 	if ((ctype & CPU_CT_S) == 0)
846 		arm_pcache_unified = 1;
847 
848 	/*
849 	 * If you want to know how this code works, go read the ARM ARM.
850 	 */
851 
852 	arm_pcache_type = CPU_CT_CTYPE(ctype);
853 
854 	if (arm_pcache_unified == 0) {
855 		isize = CPU_CT_ISIZE(ctype);
856 		multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
857 		arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
858 		if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
859 			if (isize & CPU_CT_xSIZE_M)
860 				arm_picache_line_size = 0; /* not present */
861 			else
862 				arm_picache_ways = 1;
863 		} else {
864 			arm_picache_ways = multiplier <<
865 			    (CPU_CT_xSIZE_ASSOC(isize) - 1);
866 		}
867 		arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
868 	}
869 
870 	dsize = CPU_CT_DSIZE(ctype);
871 	multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
872 	arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
873 	if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
874 		if (dsize & CPU_CT_xSIZE_M)
875 			arm_pdcache_line_size = 0; /* not present */
876 		else
877 			arm_pdcache_ways = 1;
878 	} else {
879 		arm_pdcache_ways = multiplier <<
880 		    (CPU_CT_xSIZE_ASSOC(dsize) - 1);
881 	}
882 	arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
883 
884 	arm_dcache_align = arm_pdcache_line_size;
885 
886 	arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
887 	arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
888 	arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
889 	    CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
890 
891  out:
892 	arm_dcache_align_mask = arm_dcache_align - 1;
893 }
894 #endif /* ARM7TDMI || ARM8 || ARM9 || XSCALE */
895 
896 #if defined(CPU_ARM2) || defined(CPU_ARM250) || defined(CPU_ARM3) || \
897     defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_SA110) || \
898     defined(CPU_SA1100) || defined(CPU_SA1110) || defined(CPU_IXP12X0)
899 /* Cache information for CPUs without cache type registers. */
900 struct cachetab {
901 	u_int32_t ct_cpuid;
902 	int	ct_pcache_type;
903 	int	ct_pcache_unified;
904 	int	ct_pdcache_size;
905 	int	ct_pdcache_line_size;
906 	int	ct_pdcache_ways;
907 	int	ct_picache_size;
908 	int	ct_picache_line_size;
909 	int	ct_picache_ways;
910 };
911 
912 struct cachetab cachetab[] = {
913     /* cpuid,           cache type,       u,  dsiz, ls, wy,  isiz, ls, wy */
914     { CPU_ID_ARM2,      0,                1,     0,  0,  0,     0,  0,  0 },
915     { CPU_ID_ARM250,    0,                1,     0,  0,  0,     0,  0,  0 },
916     { CPU_ID_ARM3,      CPU_CT_CTYPE_WT,  1,  4096, 16, 64,     0,  0,  0 },
917     { CPU_ID_ARM610,	CPU_CT_CTYPE_WT,  1,  4096, 16, 64,     0,  0,  0 },
918     { CPU_ID_ARM710,    CPU_CT_CTYPE_WT,  1,  8192, 32,  4,     0,  0,  0 },
919     { CPU_ID_ARM7500,   CPU_CT_CTYPE_WT,  1,  4096, 16,  4,     0,  0,  0 },
920     { CPU_ID_ARM710A,   CPU_CT_CTYPE_WT,  1,  8192, 16,  4,     0,  0,  0 },
921     { CPU_ID_ARM7500FE, CPU_CT_CTYPE_WT,  1,  4096, 16,  4,     0,  0,  0 },
922     /* XXX is this type right for SA-1? */
923     { CPU_ID_SA110,	CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 },
924     { CPU_ID_SA1100,	CPU_CT_CTYPE_WB1, 0,  8192, 32, 32, 16384, 32, 32 },
925     { CPU_ID_SA1110,	CPU_CT_CTYPE_WB1, 0,  8192, 32, 32, 16384, 32, 32 },
926     { CPU_ID_IXP1200,	CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 }, /* XXX */
927     { 0, 0, 0, 0, 0, 0, 0, 0}
928 };
929 
930 static void get_cachetype_table __P((void));
931 
932 static void
933 get_cachetype_table()
934 {
935 	int i;
936 	u_int32_t cpuid = cpufunc_id();
937 
938 	for (i = 0; cachetab[i].ct_cpuid != 0; i++) {
939 		if (cachetab[i].ct_cpuid == (cpuid & CPU_ID_CPU_MASK)) {
940 			arm_pcache_type = cachetab[i].ct_pcache_type;
941 			arm_pcache_unified = cachetab[i].ct_pcache_unified;
942 			arm_pdcache_size = cachetab[i].ct_pdcache_size;
943 			arm_pdcache_line_size =
944 			    cachetab[i].ct_pdcache_line_size;
945 			arm_pdcache_ways = cachetab[i].ct_pdcache_ways;
946 			arm_picache_size = cachetab[i].ct_picache_size;
947 			arm_picache_line_size =
948 			    cachetab[i].ct_picache_line_size;
949 			arm_picache_ways = cachetab[i].ct_picache_ways;
950 		}
951 	}
952 	arm_dcache_align = arm_pdcache_line_size;
953 
954 	arm_dcache_align_mask = arm_dcache_align - 1;
955 }
956 
957 #endif /* ARM2 || ARM250 || ARM3 || ARM6 || ARM7 || SA110 || SA1100 || SA1111 || IXP12X0 */
958 
959 /*
960  * Cannot panic here as we may not have a console yet ...
961  */
962 
963 int
964 set_cpufuncs()
965 {
966 	cputype = cpufunc_id();
967 	cputype &= CPU_ID_CPU_MASK;
968 
969 	/*
970 	 * NOTE: cpu_do_powersave defaults to off.  If we encounter a
971 	 * CPU type where we want to use it by default, then we set it.
972 	 */
973 
974 #ifdef CPU_ARM3
975 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
976 	    (cputype & 0x00000f00) == 0x00000300) {
977 		cpufuncs = arm3_cpufuncs;
978 		cpu_reset_needs_v4_MMU_disable = 0;
979 		get_cachetype_table();
980 		return 0;
981 	}
982 #endif	/* CPU_ARM3 */
983 #ifdef CPU_ARM6
984 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
985 	    (cputype & 0x00000f00) == 0x00000600) {
986 		cpufuncs = arm6_cpufuncs;
987 		cpu_reset_needs_v4_MMU_disable = 0;
988 		get_cachetype_table();
989 		pmap_pte_init_generic();
990 		return 0;
991 	}
992 #endif	/* CPU_ARM6 */
993 #ifdef CPU_ARM7
994 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
995 	    CPU_ID_IS7(cputype) &&
996 	    (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V3) {
997 		cpufuncs = arm7_cpufuncs;
998 		cpu_reset_needs_v4_MMU_disable = 0;
999 		get_cachetype_table();
1000 		pmap_pte_init_generic();
1001 		return 0;
1002 	}
1003 #endif	/* CPU_ARM7 */
1004 #ifdef CPU_ARM7TDMI
1005 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
1006 	    CPU_ID_IS7(cputype) &&
1007 	    (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V4T) {
1008 		cpufuncs = arm7tdmi_cpufuncs;
1009 		cpu_reset_needs_v4_MMU_disable = 0;
1010 		get_cachetype_cp15();
1011 		pmap_pte_init_generic();
1012 		return 0;
1013 	}
1014 #endif
1015 #ifdef CPU_ARM8
1016 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
1017 	    (cputype & 0x0000f000) == 0x00008000) {
1018 		cpufuncs = arm8_cpufuncs;
1019 		cpu_reset_needs_v4_MMU_disable = 0;	/* XXX correct? */
1020 		get_cachetype_cp15();
1021 		pmap_pte_init_arm8();
1022 		return 0;
1023 	}
1024 #endif	/* CPU_ARM8 */
1025 #ifdef CPU_ARM9
1026 	if (((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD ||
1027 	     (cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_TI) &&
1028 	    (cputype & 0x0000f000) == 0x00009000) {
1029 		cpufuncs = arm9_cpufuncs;
1030 		cpu_reset_needs_v4_MMU_disable = 1;	/* V4 or higher */
1031 		get_cachetype_cp15();
1032 		arm9_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
1033 		arm9_dcache_sets_max =
1034 		    (1U << (arm_dcache_l2_linesize + arm_dcache_l2_nsets)) -
1035 		    arm9_dcache_sets_inc;
1036 		arm9_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
1037 		arm9_dcache_index_max = 0U - arm9_dcache_index_inc;
1038 #ifdef	ARM9_CACHE_WRITE_THROUGH
1039 		pmap_pte_init_arm9();
1040 #else
1041 		pmap_pte_init_generic();
1042 #endif
1043 		return 0;
1044 	}
1045 #endif /* CPU_ARM9 */
1046 #ifdef CPU_ARM10
1047 	if (/* cputype == CPU_ID_ARM1020T || */
1048 	    cputype == CPU_ID_ARM1020E ||
1049 	    cputype == CPU_ID_ARM1026EJS) {
1050 		/*
1051 		 * Select write-through cacheing (this isn't really an
1052 		 * option on ARM1020T).
1053 		 */
1054 		cpufuncs = arm10_cpufuncs;
1055 		cpu_reset_needs_v4_MMU_disable = 1;	/* V4 or higher */
1056 		get_cachetype_cp15();
1057 		armv5_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
1058 		armv5_dcache_sets_max =
1059 		    (1U << (arm_dcache_l2_linesize + arm_dcache_l2_nsets)) -
1060 		    armv5_dcache_sets_inc;
1061 		armv5_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
1062 		armv5_dcache_index_max = 0U - armv5_dcache_index_inc;
1063 		pmap_pte_init_generic();
1064 		return 0;
1065 	}
1066 #endif /* CPU_ARM10 */
1067 #ifdef CPU_ARM11
1068 	if (cputype == CPU_ID_ARM1136JS ||
1069 	    cputype == CPU_ID_ARM1136JSR1) {
1070 		cpufuncs = arm11_cpufuncs;
1071 		cpu_reset_needs_v4_MMU_disable = 1;	/* V4 or higher */
1072 		get_cachetype_cp15();
1073 		armv5_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
1074 		armv5_dcache_sets_max =
1075 		    (1U << (arm_dcache_l2_linesize + arm_dcache_l2_nsets)) -
1076 		    armv5_dcache_sets_inc;
1077 		armv5_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
1078 		armv5_dcache_index_max = 0U - armv5_dcache_index_inc;
1079 		pmap_pte_init_generic();
1080 		return 0;
1081 	}
1082 #endif /* CPU_ARM11 */
1083 #ifdef CPU_SA110
1084 	if (cputype == CPU_ID_SA110) {
1085 		cpufuncs = sa110_cpufuncs;
1086 		cpu_reset_needs_v4_MMU_disable = 1;	/* SA needs it */
1087 		get_cachetype_table();
1088 		pmap_pte_init_sa1();
1089 		return 0;
1090 	}
1091 #endif	/* CPU_SA110 */
1092 #ifdef CPU_SA1100
1093 	if (cputype == CPU_ID_SA1100) {
1094 		cpufuncs = sa11x0_cpufuncs;
1095 		cpu_reset_needs_v4_MMU_disable = 1;	/* SA needs it	*/
1096 		get_cachetype_table();
1097 		pmap_pte_init_sa1();
1098 
1099 		/* Use powersave on this CPU. */
1100 		cpu_do_powersave = 1;
1101 
1102 		return 0;
1103 	}
1104 #endif	/* CPU_SA1100 */
1105 #ifdef CPU_SA1110
1106 	if (cputype == CPU_ID_SA1110) {
1107 		cpufuncs = sa11x0_cpufuncs;
1108 		cpu_reset_needs_v4_MMU_disable = 1;	/* SA needs it	*/
1109 		get_cachetype_table();
1110 		pmap_pte_init_sa1();
1111 
1112 		/* Use powersave on this CPU. */
1113 		cpu_do_powersave = 1;
1114 
1115 		return 0;
1116 	}
1117 #endif	/* CPU_SA1110 */
1118 #ifdef CPU_IXP12X0
1119         if (cputype == CPU_ID_IXP1200) {
1120                 cpufuncs = ixp12x0_cpufuncs;
1121                 cpu_reset_needs_v4_MMU_disable = 1;
1122                 get_cachetype_table();
1123                 pmap_pte_init_sa1();
1124                 return 0;
1125         }
1126 #endif  /* CPU_IXP12X0 */
1127 #ifdef CPU_XSCALE_80200
1128 	if (cputype == CPU_ID_80200) {
1129 		int rev = cpufunc_id() & CPU_ID_REVISION_MASK;
1130 
1131 		i80200_icu_init();
1132 
1133 		/*
1134 		 * Reset the Performance Monitoring Unit to a
1135 		 * pristine state:
1136 		 *	- CCNT, PMN0, PMN1 reset to 0
1137 		 *	- overflow indications cleared
1138 		 *	- all counters disabled
1139 		 */
1140 		__asm __volatile("mcr p14, 0, %0, c0, c0, 0"
1141 			:
1142 			: "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF|
1143 			       PMNC_CC_IF));
1144 
1145 #if defined(XSCALE_CCLKCFG)
1146 		/*
1147 		 * Crank CCLKCFG to maximum legal value.
1148 		 */
1149 		__asm __volatile ("mcr p14, 0, %0, c6, c0, 0"
1150 			:
1151 			: "r" (XSCALE_CCLKCFG));
1152 #endif
1153 
1154 		/*
1155 		 * XXX Disable ECC in the Bus Controller Unit; we
1156 		 * don't really support it, yet.  Clear any pending
1157 		 * error indications.
1158 		 */
1159 		__asm __volatile("mcr p13, 0, %0, c0, c1, 0"
1160 			:
1161 			: "r" (BCUCTL_E0|BCUCTL_E1|BCUCTL_EV));
1162 
1163 		cpufuncs = xscale_cpufuncs;
1164 #if defined(PERFCTRS)
1165 		xscale_pmu_init();
1166 #endif
1167 
1168 		/*
1169 		 * i80200 errata: Step-A0 and A1 have a bug where
1170 		 * D$ dirty bits are not cleared on "invalidate by
1171 		 * address".
1172 		 *
1173 		 * Workaround: Clean cache line before invalidating.
1174 		 */
1175 		if (rev == 0 || rev == 1)
1176 			cpufuncs.cf_dcache_inv_range = xscale_cache_purgeD_rng;
1177 
1178 		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1179 		get_cachetype_cp15();
1180 		pmap_pte_init_xscale();
1181 		return 0;
1182 	}
1183 #endif /* CPU_XSCALE_80200 */
1184 #ifdef CPU_XSCALE_80321
1185 	if (cputype == CPU_ID_80321_400 || cputype == CPU_ID_80321_600 ||
1186 	    cputype == CPU_ID_80321_400_B0 || cputype == CPU_ID_80321_600_B0) {
1187 		i80321_icu_init();
1188 
1189 		/*
1190 		 * Reset the Performance Monitoring Unit to a
1191 		 * pristine state:
1192 		 *	- CCNT, PMN0, PMN1 reset to 0
1193 		 *	- overflow indications cleared
1194 		 *	- all counters disabled
1195 		 */
1196 		__asm __volatile("mcr p14, 0, %0, c0, c0, 0"
1197 			:
1198 			: "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF|
1199 			       PMNC_CC_IF));
1200 
1201 		cpufuncs = xscale_cpufuncs;
1202 #if defined(PERFCTRS)
1203 		xscale_pmu_init();
1204 #endif
1205 
1206 		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1207 		get_cachetype_cp15();
1208 		pmap_pte_init_xscale();
1209 		return 0;
1210 	}
1211 #endif /* CPU_XSCALE_80321 */
1212 #ifdef CPU_XSCALE_PXA2X0
1213 	/* ignore core revision to test PXA2xx CPUs */
1214 	if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 ||
1215 	    (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) {
1216 
1217 		cpufuncs = xscale_cpufuncs;
1218 #if defined(PERFCTRS)
1219 		xscale_pmu_init();
1220 #endif
1221 
1222 		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1223 		get_cachetype_cp15();
1224 		pmap_pte_init_xscale();
1225 
1226 		/* Use powersave on this CPU. */
1227 		cpu_do_powersave = 1;
1228 
1229 		return 0;
1230 	}
1231 #endif /* CPU_XSCALE_PXA2X0 */
1232 #ifdef CPU_XSCALE_IXP425
1233 	if (cputype == CPU_ID_IXP425_533 || cputype == CPU_ID_IXP425_400 ||
1234             cputype == CPU_ID_IXP425_266) {
1235 		ixp425_icu_init();
1236 
1237 		cpufuncs = xscale_cpufuncs;
1238 #if defined(PERFCTRS)
1239 		xscale_pmu_init();
1240 #endif
1241 
1242 		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1243 		get_cachetype_cp15();
1244 		pmap_pte_init_xscale();
1245 
1246 		return 0;
1247 	}
1248 #endif /* CPU_XSCALE_IXP425 */
1249 	/*
1250 	 * Bzzzz. And the answer was ...
1251 	 */
1252 	panic("No support for this CPU type (%08x) in kernel", cputype);
1253 	return(ARCHITECTURE_NOT_PRESENT);
1254 }
1255 
1256 /*
1257  * Fixup routines for data and prefetch aborts.
1258  *
1259  * Several compile time symbols are used
1260  *
1261  * DEBUG_FAULT_CORRECTION - Print debugging information during the
1262  * correction of registers after a fault.
1263  * ARM6_LATE_ABORT - ARM6 supports both early and late aborts
1264  * when defined should use late aborts
1265  */
1266 
1267 
1268 /*
1269  * Null abort fixup routine.
1270  * For use when no fixup is required.
1271  */
1272 int
1273 cpufunc_null_fixup(arg)
1274 	void *arg;
1275 {
1276 	return(ABORT_FIXUP_OK);
1277 }
1278 
1279 
1280 #if defined(CPU_ARM2) || defined(CPU_ARM250) || defined(CPU_ARM3) || \
1281     defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI)
1282 
1283 #ifdef DEBUG_FAULT_CORRECTION
1284 #define DFC_PRINTF(x)		printf x
1285 #define DFC_DISASSEMBLE(x)	disassemble(x)
1286 #else
1287 #define DFC_PRINTF(x)		/* nothing */
1288 #define DFC_DISASSEMBLE(x)	/* nothing */
1289 #endif
1290 
1291 /*
1292  * "Early" data abort fixup.
1293  *
1294  * For ARM2, ARM2as, ARM3 and ARM6 (in early-abort mode).  Also used
1295  * indirectly by ARM6 (in late-abort mode) and ARM7[TDMI].
1296  *
1297  * In early aborts, we may have to fix up LDM, STM, LDC and STC.
1298  */
1299 int
1300 early_abort_fixup(arg)
1301 	void *arg;
1302 {
1303 	trapframe_t *frame = arg;
1304 	u_int fault_pc;
1305 	u_int fault_instruction;
1306 	int saved_lr = 0;
1307 
1308 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1309 
1310 		/* Ok an abort in SVC mode */
1311 
1312 		/*
1313 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1314 		 * as the fault happened in svc mode but we need it in the
1315 		 * usr slot so we can treat the registers as an array of ints
1316 		 * during fixing.
1317 		 * NOTE: This PC is in the position but writeback is not
1318 		 * allowed on r15.
1319 		 * Doing it like this is more efficient than trapping this
1320 		 * case in all possible locations in the following fixup code.
1321 		 */
1322 
1323 		saved_lr = frame->tf_usr_lr;
1324 		frame->tf_usr_lr = frame->tf_svc_lr;
1325 
1326 		/*
1327 		 * Note the trapframe does not have the SVC r13 so a fault
1328 		 * from an instruction with writeback to r13 in SVC mode is
1329 		 * not allowed. This should not happen as the kstack is
1330 		 * always valid.
1331 		 */
1332 	}
1333 
1334 	/* Get fault address and status from the CPU */
1335 
1336 	fault_pc = frame->tf_pc;
1337 	fault_instruction = *((volatile unsigned int *)fault_pc);
1338 
1339 	/* Decode the fault instruction and fix the registers as needed */
1340 
1341 	if ((fault_instruction & 0x0e000000) == 0x08000000) {
1342 		int base;
1343 		int loop;
1344 		int count;
1345 		int *registers = &frame->tf_r0;
1346 
1347 		DFC_PRINTF(("LDM/STM\n"));
1348 		DFC_DISASSEMBLE(fault_pc);
1349 		if (fault_instruction & (1 << 21)) {
1350 			DFC_PRINTF(("This instruction must be corrected\n"));
1351 			base = (fault_instruction >> 16) & 0x0f;
1352 			if (base == 15)
1353 				return ABORT_FIXUP_FAILED;
1354 			/* Count registers transferred */
1355 			count = 0;
1356 			for (loop = 0; loop < 16; ++loop) {
1357 				if (fault_instruction & (1<<loop))
1358 					++count;
1359 			}
1360 			DFC_PRINTF(("%d registers used\n", count));
1361 			DFC_PRINTF(("Corrected r%d by %d bytes ",
1362 				       base, count * 4));
1363 			if (fault_instruction & (1 << 23)) {
1364 				DFC_PRINTF(("down\n"));
1365 				registers[base] -= count * 4;
1366 			} else {
1367 				DFC_PRINTF(("up\n"));
1368 				registers[base] += count * 4;
1369 			}
1370 		}
1371 	} else if ((fault_instruction & 0x0e000000) == 0x0c000000) {
1372 		int base;
1373 		int offset;
1374 		int *registers = &frame->tf_r0;
1375 
1376 		/* REGISTER CORRECTION IS REQUIRED FOR THESE INSTRUCTIONS */
1377 
1378 		DFC_DISASSEMBLE(fault_pc);
1379 
1380 		/* Only need to fix registers if write back is turned on */
1381 
1382 		if ((fault_instruction & (1 << 21)) != 0) {
1383 			base = (fault_instruction >> 16) & 0x0f;
1384 			if (base == 13 &&
1385 			    (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
1386 				return ABORT_FIXUP_FAILED;
1387 			if (base == 15)
1388 				return ABORT_FIXUP_FAILED;
1389 
1390 			offset = (fault_instruction & 0xff) << 2;
1391 			DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1392 			if ((fault_instruction & (1 << 23)) != 0)
1393 				offset = -offset;
1394 			registers[base] += offset;
1395 			DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1396 		}
1397 	} else if ((fault_instruction & 0x0e000000) == 0x0c000000)
1398 		return ABORT_FIXUP_FAILED;
1399 
1400 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1401 
1402 		/* Ok an abort in SVC mode */
1403 
1404 		/*
1405 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1406 		 * as the fault happened in svc mode but we need it in the
1407 		 * usr slot so we can treat the registers as an array of ints
1408 		 * during fixing.
1409 		 * NOTE: This PC is in the position but writeback is not
1410 		 * allowed on r15.
1411 		 * Doing it like this is more efficient than trapping this
1412 		 * case in all possible locations in the prior fixup code.
1413 		 */
1414 
1415 		frame->tf_svc_lr = frame->tf_usr_lr;
1416 		frame->tf_usr_lr = saved_lr;
1417 
1418 		/*
1419 		 * Note the trapframe does not have the SVC r13 so a fault
1420 		 * from an instruction with writeback to r13 in SVC mode is
1421 		 * not allowed. This should not happen as the kstack is
1422 		 * always valid.
1423 		 */
1424 	}
1425 
1426 	return(ABORT_FIXUP_OK);
1427 }
1428 #endif	/* CPU_ARM2/250/3/6/7 */
1429 
1430 
1431 #if (defined(CPU_ARM6) && defined(ARM6_LATE_ABORT)) || defined(CPU_ARM7) || \
1432 	defined(CPU_ARM7TDMI)
1433 /*
1434  * "Late" (base updated) data abort fixup
1435  *
1436  * For ARM6 (in late-abort mode) and ARM7.
1437  *
1438  * In this model, all data-transfer instructions need fixing up.  We defer
1439  * LDM, STM, LDC and STC fixup to the early-abort handler.
1440  */
1441 int
1442 late_abort_fixup(arg)
1443 	void *arg;
1444 {
1445 	trapframe_t *frame = arg;
1446 	u_int fault_pc;
1447 	u_int fault_instruction;
1448 	int saved_lr = 0;
1449 
1450 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1451 
1452 		/* Ok an abort in SVC mode */
1453 
1454 		/*
1455 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1456 		 * as the fault happened in svc mode but we need it in the
1457 		 * usr slot so we can treat the registers as an array of ints
1458 		 * during fixing.
1459 		 * NOTE: This PC is in the position but writeback is not
1460 		 * allowed on r15.
1461 		 * Doing it like this is more efficient than trapping this
1462 		 * case in all possible locations in the following fixup code.
1463 		 */
1464 
1465 		saved_lr = frame->tf_usr_lr;
1466 		frame->tf_usr_lr = frame->tf_svc_lr;
1467 
1468 		/*
1469 		 * Note the trapframe does not have the SVC r13 so a fault
1470 		 * from an instruction with writeback to r13 in SVC mode is
1471 		 * not allowed. This should not happen as the kstack is
1472 		 * always valid.
1473 		 */
1474 	}
1475 
1476 	/* Get fault address and status from the CPU */
1477 
1478 	fault_pc = frame->tf_pc;
1479 	fault_instruction = *((volatile unsigned int *)fault_pc);
1480 
1481 	/* Decode the fault instruction and fix the registers as needed */
1482 
1483 	/* Was is a swap instruction ? */
1484 
1485 	if ((fault_instruction & 0x0fb00ff0) == 0x01000090) {
1486 		DFC_DISASSEMBLE(fault_pc);
1487 	} else if ((fault_instruction & 0x0c000000) == 0x04000000) {
1488 
1489 		/* Was is a ldr/str instruction */
1490 		/* This is for late abort only */
1491 
1492 		int base;
1493 		int offset;
1494 		int *registers = &frame->tf_r0;
1495 
1496 		DFC_DISASSEMBLE(fault_pc);
1497 
1498 		/* This is for late abort only */
1499 
1500 		if ((fault_instruction & (1 << 24)) == 0
1501 		    || (fault_instruction & (1 << 21)) != 0) {
1502 			/* postindexed ldr/str with no writeback */
1503 
1504 			base = (fault_instruction >> 16) & 0x0f;
1505 			if (base == 13 &&
1506 			    (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
1507 				return ABORT_FIXUP_FAILED;
1508 			if (base == 15)
1509 				return ABORT_FIXUP_FAILED;
1510 			DFC_PRINTF(("late abt fix: r%d=%08x : ",
1511 				       base, registers[base]));
1512 			if ((fault_instruction & (1 << 25)) == 0) {
1513 				/* Immediate offset - easy */
1514 
1515 				offset = fault_instruction & 0xfff;
1516 				if ((fault_instruction & (1 << 23)))
1517 					offset = -offset;
1518 				registers[base] += offset;
1519 				DFC_PRINTF(("imm=%08x ", offset));
1520 			} else {
1521 				/* offset is a shifted register */
1522 				int shift;
1523 
1524 				offset = fault_instruction & 0x0f;
1525 				if (offset == base)
1526 					return ABORT_FIXUP_FAILED;
1527 
1528 				/*
1529 				 * Register offset - hard we have to
1530 				 * cope with shifts !
1531 				 */
1532 				offset = registers[offset];
1533 
1534 				if ((fault_instruction & (1 << 4)) == 0)
1535 					/* shift with amount */
1536 					shift = (fault_instruction >> 7) & 0x1f;
1537 				else {
1538 					/* shift with register */
1539 					if ((fault_instruction & (1 << 7)) != 0)
1540 						/* undefined for now so bail out */
1541 						return ABORT_FIXUP_FAILED;
1542 					shift = ((fault_instruction >> 8) & 0xf);
1543 					if (base == shift)
1544 						return ABORT_FIXUP_FAILED;
1545 					DFC_PRINTF(("shift reg=%d ", shift));
1546 					shift = registers[shift];
1547 				}
1548 				DFC_PRINTF(("shift=%08x ", shift));
1549 				switch (((fault_instruction >> 5) & 0x3)) {
1550 				case 0 : /* Logical left */
1551 					offset = (int)(((u_int)offset) << shift);
1552 					break;
1553 				case 1 : /* Logical Right */
1554 					if (shift == 0) shift = 32;
1555 					offset = (int)(((u_int)offset) >> shift);
1556 					break;
1557 				case 2 : /* Arithmetic Right */
1558 					if (shift == 0) shift = 32;
1559 					offset = (int)(((int)offset) >> shift);
1560 					break;
1561 				case 3 : /* Rotate right (rol or rxx) */
1562 					return ABORT_FIXUP_FAILED;
1563 					break;
1564 				}
1565 
1566 				DFC_PRINTF(("abt: fixed LDR/STR with "
1567 					       "register offset\n"));
1568 				if ((fault_instruction & (1 << 23)))
1569 					offset = -offset;
1570 				DFC_PRINTF(("offset=%08x ", offset));
1571 				registers[base] += offset;
1572 			}
1573 			DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1574 		}
1575 	}
1576 
1577 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1578 
1579 		/* Ok an abort in SVC mode */
1580 
1581 		/*
1582 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1583 		 * as the fault happened in svc mode but we need it in the
1584 		 * usr slot so we can treat the registers as an array of ints
1585 		 * during fixing.
1586 		 * NOTE: This PC is in the position but writeback is not
1587 		 * allowed on r15.
1588 		 * Doing it like this is more efficient than trapping this
1589 		 * case in all possible locations in the prior fixup code.
1590 		 */
1591 
1592 		frame->tf_svc_lr = frame->tf_usr_lr;
1593 		frame->tf_usr_lr = saved_lr;
1594 
1595 		/*
1596 		 * Note the trapframe does not have the SVC r13 so a fault
1597 		 * from an instruction with writeback to r13 in SVC mode is
1598 		 * not allowed. This should not happen as the kstack is
1599 		 * always valid.
1600 		 */
1601 	}
1602 
1603 	/*
1604 	 * Now let the early-abort fixup routine have a go, in case it
1605 	 * was an LDM, STM, LDC or STC that faulted.
1606 	 */
1607 
1608 	return early_abort_fixup(arg);
1609 }
1610 #endif	/* CPU_ARM6(LATE)/7/7TDMI */
1611 
1612 /*
1613  * CPU Setup code
1614  */
1615 
1616 #if defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) || \
1617 	defined(CPU_ARM8) || defined (CPU_ARM9) || defined(CPU_SA110) || \
1618 	defined(CPU_SA1100) || defined(CPU_SA1110) || \
1619 	defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
1620 	defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
1621 	defined(CPU_ARM10) || defined(CPU_ARM11)
1622 
1623 #define IGN	0
1624 #define OR	1
1625 #define BIC	2
1626 
1627 struct cpu_option {
1628 	const char *co_name;
1629 	int	co_falseop;
1630 	int	co_trueop;
1631 	int	co_value;
1632 };
1633 
1634 static u_int parse_cpu_options __P((char *, struct cpu_option *, u_int));
1635 
1636 static u_int
1637 parse_cpu_options(args, optlist, cpuctrl)
1638 	char *args;
1639 	struct cpu_option *optlist;
1640 	u_int cpuctrl;
1641 {
1642 	int integer;
1643 
1644 	if (args == NULL)
1645 		return(cpuctrl);
1646 
1647 	while (optlist->co_name) {
1648 		if (get_bootconf_option(args, optlist->co_name,
1649 		    BOOTOPT_TYPE_BOOLEAN, &integer)) {
1650 			if (integer) {
1651 				if (optlist->co_trueop == OR)
1652 					cpuctrl |= optlist->co_value;
1653 				else if (optlist->co_trueop == BIC)
1654 					cpuctrl &= ~optlist->co_value;
1655 			} else {
1656 				if (optlist->co_falseop == OR)
1657 					cpuctrl |= optlist->co_value;
1658 				else if (optlist->co_falseop == BIC)
1659 					cpuctrl &= ~optlist->co_value;
1660 			}
1661 		}
1662 		++optlist;
1663 	}
1664 	return(cpuctrl);
1665 }
1666 #endif /* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 || CPU_SA110 */
1667 
1668 #if defined (CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) \
1669 	|| defined(CPU_ARM8)
1670 struct cpu_option arm678_options[] = {
1671 #ifdef COMPAT_12
1672 	{ "nocache",		IGN, BIC, CPU_CONTROL_IDC_ENABLE },
1673 	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
1674 #endif	/* COMPAT_12 */
1675 	{ "cpu.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
1676 	{ "cpu.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
1677 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1678 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1679 	{ NULL,			IGN, IGN, 0 }
1680 };
1681 
1682 #endif	/* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 */
1683 
1684 #ifdef CPU_ARM6
1685 struct cpu_option arm6_options[] = {
1686 	{ "arm6.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
1687 	{ "arm6.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
1688 	{ "arm6.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1689 	{ "arm6.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1690 	{ NULL,			IGN, IGN, 0 }
1691 };
1692 
1693 void
1694 arm6_setup(args)
1695 	char *args;
1696 {
1697 	int cpuctrl, cpuctrlmask;
1698 
1699 	/* Set up default control registers bits */
1700 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1701 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1702 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
1703 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1704 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1705 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
1706 		 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BEND_ENABLE
1707 		 | CPU_CONTROL_AFLT_ENABLE;
1708 
1709 #ifdef ARM6_LATE_ABORT
1710 	cpuctrl |= CPU_CONTROL_LABT_ENABLE;
1711 #endif	/* ARM6_LATE_ABORT */
1712 
1713 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1714 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1715 #endif
1716 
1717 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
1718 	cpuctrl = parse_cpu_options(args, arm6_options, cpuctrl);
1719 
1720 #ifdef __ARMEB__
1721 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1722 #endif
1723 
1724 	/* Clear out the cache */
1725 	cpu_idcache_wbinv_all();
1726 
1727 	/* Set the control register */
1728 	curcpu()->ci_ctrl = cpuctrl;
1729 	cpu_control(0xffffffff, cpuctrl);
1730 }
1731 #endif	/* CPU_ARM6 */
1732 
1733 #ifdef CPU_ARM7
1734 struct cpu_option arm7_options[] = {
1735 	{ "arm7.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
1736 	{ "arm7.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
1737 	{ "arm7.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1738 	{ "arm7.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1739 #ifdef COMPAT_12
1740 	{ "fpaclk2",		BIC, OR,  CPU_CONTROL_CPCLK },
1741 #endif	/* COMPAT_12 */
1742 	{ "arm700.fpaclk",	BIC, OR,  CPU_CONTROL_CPCLK },
1743 	{ NULL,			IGN, IGN, 0 }
1744 };
1745 
1746 void
1747 arm7_setup(args)
1748 	char *args;
1749 {
1750 	int cpuctrl, cpuctrlmask;
1751 
1752 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1753 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1754 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
1755 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1756 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1757 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
1758 		 | CPU_CONTROL_CPCLK | CPU_CONTROL_LABT_ENABLE
1759 		 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BEND_ENABLE
1760 		 | CPU_CONTROL_AFLT_ENABLE;
1761 
1762 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1763 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1764 #endif
1765 
1766 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
1767 	cpuctrl = parse_cpu_options(args, arm7_options, cpuctrl);
1768 
1769 #ifdef __ARMEB__
1770 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1771 #endif
1772 
1773 	/* Clear out the cache */
1774 	cpu_idcache_wbinv_all();
1775 
1776 	/* Set the control register */
1777 	curcpu()->ci_ctrl = cpuctrl;
1778 	cpu_control(0xffffffff, cpuctrl);
1779 }
1780 #endif	/* CPU_ARM7 */
1781 
1782 #ifdef CPU_ARM7TDMI
1783 struct cpu_option arm7tdmi_options[] = {
1784 	{ "arm7.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
1785 	{ "arm7.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
1786 	{ "arm7.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1787 	{ "arm7.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1788 #ifdef COMPAT_12
1789 	{ "fpaclk2",		BIC, OR,  CPU_CONTROL_CPCLK },
1790 #endif	/* COMPAT_12 */
1791 	{ "arm700.fpaclk",	BIC, OR,  CPU_CONTROL_CPCLK },
1792 	{ NULL,			IGN, IGN, 0 }
1793 };
1794 
1795 void
1796 arm7tdmi_setup(args)
1797 	char *args;
1798 {
1799 	int cpuctrl;
1800 
1801 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1802 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1803 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
1804 
1805 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
1806 	cpuctrl = parse_cpu_options(args, arm7tdmi_options, cpuctrl);
1807 
1808 #ifdef __ARMEB__
1809 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1810 #endif
1811 
1812 	/* Clear out the cache */
1813 	cpu_idcache_wbinv_all();
1814 
1815 	/* Set the control register */
1816 	curcpu()->ci_ctrl = cpuctrl;
1817 	cpu_control(0xffffffff, cpuctrl);
1818 }
1819 #endif	/* CPU_ARM7TDMI */
1820 
1821 #ifdef CPU_ARM8
1822 struct cpu_option arm8_options[] = {
1823 	{ "arm8.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
1824 	{ "arm8.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
1825 	{ "arm8.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1826 	{ "arm8.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1827 #ifdef COMPAT_12
1828 	{ "branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1829 #endif	/* COMPAT_12 */
1830 	{ "cpu.branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1831 	{ "arm8.branchpredict",	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1832 	{ NULL,			IGN, IGN, 0 }
1833 };
1834 
1835 void
1836 arm8_setup(args)
1837 	char *args;
1838 {
1839 	int integer;
1840 	int cpuctrl, cpuctrlmask;
1841 	int clocktest;
1842 	int setclock = 0;
1843 
1844 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1845 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1846 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
1847 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1848 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1849 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
1850 		 | CPU_CONTROL_BPRD_ENABLE | CPU_CONTROL_ROM_ENABLE
1851 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE;
1852 
1853 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1854 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1855 #endif
1856 
1857 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
1858 	cpuctrl = parse_cpu_options(args, arm8_options, cpuctrl);
1859 
1860 #ifdef __ARMEB__
1861 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1862 #endif
1863 
1864 	/* Get clock configuration */
1865 	clocktest = arm8_clock_config(0, 0) & 0x0f;
1866 
1867 	/* Special ARM8 clock and test configuration */
1868 	if (get_bootconf_option(args, "arm8.clock.reset", BOOTOPT_TYPE_BOOLEAN, &integer)) {
1869 		clocktest = 0;
1870 		setclock = 1;
1871 	}
1872 	if (get_bootconf_option(args, "arm8.clock.dynamic", BOOTOPT_TYPE_BOOLEAN, &integer)) {
1873 		if (integer)
1874 			clocktest |= 0x01;
1875 		else
1876 			clocktest &= ~(0x01);
1877 		setclock = 1;
1878 	}
1879 	if (get_bootconf_option(args, "arm8.clock.sync", BOOTOPT_TYPE_BOOLEAN, &integer)) {
1880 		if (integer)
1881 			clocktest |= 0x02;
1882 		else
1883 			clocktest &= ~(0x02);
1884 		setclock = 1;
1885 	}
1886 	if (get_bootconf_option(args, "arm8.clock.fast", BOOTOPT_TYPE_BININT, &integer)) {
1887 		clocktest = (clocktest & ~0xc0) | (integer & 3) << 2;
1888 		setclock = 1;
1889 	}
1890 	if (get_bootconf_option(args, "arm8.test", BOOTOPT_TYPE_BININT, &integer)) {
1891 		clocktest |= (integer & 7) << 5;
1892 		setclock = 1;
1893 	}
1894 
1895 	/* Clear out the cache */
1896 	cpu_idcache_wbinv_all();
1897 
1898 	/* Set the control register */
1899 	curcpu()->ci_ctrl = cpuctrl;
1900 	cpu_control(0xffffffff, cpuctrl);
1901 
1902 	/* Set the clock/test register */
1903 	if (setclock)
1904 		arm8_clock_config(0x7f, clocktest);
1905 }
1906 #endif	/* CPU_ARM8 */
1907 
1908 #ifdef CPU_ARM9
1909 struct cpu_option arm9_options[] = {
1910 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1911 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1912 	{ "arm9.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1913 	{ "arm9.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
1914 	{ "arm9.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
1915 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1916 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1917 	{ "arm9.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1918 	{ NULL,			IGN, IGN, 0 }
1919 };
1920 
1921 void
1922 arm9_setup(args)
1923 	char *args;
1924 {
1925 	int cpuctrl, cpuctrlmask;
1926 
1927 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1928 	    | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1929 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1930 	    | CPU_CONTROL_WBUF_ENABLE;
1931 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1932 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1933 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1934 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1935 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1936 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC
1937 		 | CPU_CONTROL_ROUNDROBIN;
1938 
1939 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1940 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1941 #endif
1942 
1943 	cpuctrl = parse_cpu_options(args, arm9_options, cpuctrl);
1944 
1945 #ifdef __ARMEB__
1946 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1947 #endif
1948 
1949 	if (vector_page == ARM_VECTORS_HIGH)
1950 		cpuctrl |= CPU_CONTROL_VECRELOC;
1951 
1952 	/* Clear out the cache */
1953 	cpu_idcache_wbinv_all();
1954 
1955 	/* Set the control register */
1956 	curcpu()->ci_ctrl = cpuctrl;
1957 	cpu_control(cpuctrlmask, cpuctrl);
1958 
1959 }
1960 #endif	/* CPU_ARM9 */
1961 
1962 #ifdef CPU_ARM10
1963 struct cpu_option arm10_options[] = {
1964 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1965 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1966 	{ "arm10.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1967 	{ "arm10.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
1968 	{ "arm10.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
1969 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1970 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1971 	{ "arm10.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1972 	{ NULL,			IGN, IGN, 0 }
1973 };
1974 
1975 void
1976 arm10_setup(args)
1977 	char *args;
1978 {
1979 	int cpuctrl, cpuctrlmask;
1980 
1981 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
1982 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1983 	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
1984 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
1985 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1986 	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1987 	    | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1988 	    | CPU_CONTROL_BPRD_ENABLE
1989 	    | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
1990 
1991 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1992 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1993 #endif
1994 
1995 	cpuctrl = parse_cpu_options(args, arm10_options, cpuctrl);
1996 
1997 #ifdef __ARMEB__
1998 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1999 #endif
2000 
2001 	/* Clear out the cache */
2002 	cpu_idcache_wbinv_all();
2003 
2004 	/* Now really make sure they are clean.  */
2005 	asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
2006 
2007 	/* Set the control register */
2008 	curcpu()->ci_ctrl = cpuctrl;
2009 	cpu_control(0xffffffff, cpuctrl);
2010 
2011 	/* And again. */
2012 	cpu_idcache_wbinv_all();
2013 }
2014 #endif	/* CPU_ARM10 */
2015 
2016 #ifdef CPU_ARM11
2017 struct cpu_option arm11_options[] = {
2018 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2019 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2020 	{ "arm11.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2021 	{ "arm11.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2022 	{ "arm11.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2023 	{ NULL,			IGN, IGN, 0 }
2024 };
2025 
2026 void
2027 arm11_setup(args)
2028 	char *args;
2029 {
2030 	int cpuctrl, cpuctrlmask;
2031 
2032 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
2033 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2034 	    /* | CPU_CONTROL_BPRD_ENABLE */;
2035 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
2036 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2037 	    | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BPRD_ENABLE
2038 	    | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2039 	    | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
2040 
2041 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2042 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2043 #endif
2044 
2045 	cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl);
2046 
2047 #ifdef __ARMEB__
2048 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2049 #endif
2050 
2051 	/* Clear out the cache */
2052 	cpu_idcache_wbinv_all();
2053 
2054 	/* Now really make sure they are clean.  */
2055 	asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
2056 
2057 	/* Set the control register */
2058 	curcpu()->ci_ctrl = cpuctrl;
2059 	cpu_control(0xffffffff, cpuctrl);
2060 
2061 	/* And again. */
2062 	cpu_idcache_wbinv_all();
2063 }
2064 #endif	/* CPU_ARM11 */
2065 
2066 #ifdef CPU_SA110
2067 struct cpu_option sa110_options[] = {
2068 #ifdef COMPAT_12
2069 	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2070 	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
2071 #endif	/* COMPAT_12 */
2072 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2073 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2074 	{ "sa110.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2075 	{ "sa110.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2076 	{ "sa110.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2077 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2078 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2079 	{ "sa110.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2080 	{ NULL,			IGN, IGN, 0 }
2081 };
2082 
2083 void
2084 sa110_setup(args)
2085 	char *args;
2086 {
2087 	int cpuctrl, cpuctrlmask;
2088 
2089 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2090 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2091 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2092 		 | CPU_CONTROL_WBUF_ENABLE;
2093 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2094 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2095 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2096 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2097 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2098 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
2099 		 | CPU_CONTROL_CPCLK;
2100 
2101 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2102 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2103 #endif
2104 
2105 	cpuctrl = parse_cpu_options(args, sa110_options, cpuctrl);
2106 
2107 #ifdef __ARMEB__
2108 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2109 #endif
2110 
2111 	/* Clear out the cache */
2112 	cpu_idcache_wbinv_all();
2113 
2114 	/* Set the control register */
2115 	curcpu()->ci_ctrl = cpuctrl;
2116 /*	cpu_control(cpuctrlmask, cpuctrl);*/
2117 	cpu_control(0xffffffff, cpuctrl);
2118 
2119 	/*
2120 	 * enable clockswitching, note that this doesn't read or write to r0,
2121 	 * r0 is just to make it valid asm
2122 	 */
2123 	__asm ("mcr 15, 0, r0, c15, c1, 2");
2124 }
2125 #endif	/* CPU_SA110 */
2126 
2127 #if defined(CPU_SA1100) || defined(CPU_SA1110)
2128 struct cpu_option sa11x0_options[] = {
2129 #ifdef COMPAT_12
2130 	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2131 	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
2132 #endif	/* COMPAT_12 */
2133 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2134 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2135 	{ "sa11x0.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2136 	{ "sa11x0.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2137 	{ "sa11x0.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2138 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2139 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2140 	{ "sa11x0.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2141 	{ NULL,			IGN, IGN, 0 }
2142 };
2143 
2144 void
2145 sa11x0_setup(args)
2146 	char *args;
2147 {
2148 	int cpuctrl, cpuctrlmask;
2149 
2150 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2151 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2152 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2153 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE;
2154 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2155 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2156 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2157 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2158 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2159 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
2160 		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
2161 
2162 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2163 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2164 #endif
2165 
2166 	cpuctrl = parse_cpu_options(args, sa11x0_options, cpuctrl);
2167 
2168 #ifdef __ARMEB__
2169 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2170 #endif
2171 
2172 	if (vector_page == ARM_VECTORS_HIGH)
2173 		cpuctrl |= CPU_CONTROL_VECRELOC;
2174 
2175 	/* Clear out the cache */
2176 	cpu_idcache_wbinv_all();
2177 
2178 	/* Set the control register */
2179 	cpu_control(0xffffffff, cpuctrl);
2180 }
2181 #endif	/* CPU_SA1100 || CPU_SA1110 */
2182 
2183 #if defined(CPU_IXP12X0)
2184 struct cpu_option ixp12x0_options[] = {
2185 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2186 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2187 	{ "ixp12x0.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2188 	{ "ixp12x0.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2189 	{ "ixp12x0.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2190 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2191 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2192 	{ "ixp12x0.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2193 	{ NULL,			IGN, IGN, 0 }
2194 };
2195 
2196 void
2197 ixp12x0_setup(args)
2198 	char *args;
2199 {
2200 	int cpuctrl, cpuctrlmask;
2201 
2202 
2203 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE
2204 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_SYST_ENABLE
2205 		 | CPU_CONTROL_IC_ENABLE;
2206 
2207 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_AFLT_ENABLE
2208 		 | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_WBUF_ENABLE
2209 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_SYST_ENABLE
2210 		 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_IC_ENABLE
2211 		 | CPU_CONTROL_VECRELOC;
2212 
2213 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2214 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2215 #endif
2216 
2217 	cpuctrl = parse_cpu_options(args, ixp12x0_options, cpuctrl);
2218 
2219 #ifdef __ARMEB__
2220 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2221 #endif
2222 
2223 	if (vector_page == ARM_VECTORS_HIGH)
2224 		cpuctrl |= CPU_CONTROL_VECRELOC;
2225 
2226 	/* Clear out the cache */
2227 	cpu_idcache_wbinv_all();
2228 
2229 	/* Set the control register */
2230 	curcpu()->ci_ctrl = cpuctrl;
2231 	/* cpu_control(0xffffffff, cpuctrl); */
2232 	cpu_control(cpuctrlmask, cpuctrl);
2233 }
2234 #endif /* CPU_IXP12X0 */
2235 
2236 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
2237     defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425)
2238 struct cpu_option xscale_options[] = {
2239 #ifdef COMPAT_12
2240 	{ "branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2241 	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2242 #endif	/* COMPAT_12 */
2243 	{ "cpu.branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2244 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2245 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2246 	{ "xscale.branchpredict", BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2247 	{ "xscale.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2248 	{ "xscale.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2249 	{ "xscale.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2250 	{ NULL,			IGN, IGN, 0 }
2251 };
2252 
2253 void
2254 xscale_setup(args)
2255 	char *args;
2256 {
2257 	uint32_t auxctl;
2258 	int cpuctrl, cpuctrlmask;
2259 
2260 	/*
2261 	 * The XScale Write Buffer is always enabled.  Our option
2262 	 * is to enable/disable coalescing.  Note that bits 6:3
2263 	 * must always be enabled.
2264 	 */
2265 
2266 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2267 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2268 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2269 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
2270 		 | CPU_CONTROL_BPRD_ENABLE;
2271 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2272 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2273 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2274 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2275 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2276 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
2277 		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
2278 
2279 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2280 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2281 #endif
2282 
2283 	cpuctrl = parse_cpu_options(args, xscale_options, cpuctrl);
2284 
2285 #ifdef __ARMEB__
2286 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2287 #endif
2288 
2289 	if (vector_page == ARM_VECTORS_HIGH)
2290 		cpuctrl |= CPU_CONTROL_VECRELOC;
2291 
2292 	/* Clear out the cache */
2293 	cpu_idcache_wbinv_all();
2294 
2295 	/*
2296 	 * Set the control register.  Note that bits 6:3 must always
2297 	 * be set to 1.
2298 	 */
2299 	curcpu()->ci_ctrl = cpuctrl;
2300 /*	cpu_control(cpuctrlmask, cpuctrl);*/
2301 	cpu_control(0xffffffff, cpuctrl);
2302 
2303 	/* Make sure write coalescing is turned on */
2304 	__asm __volatile("mrc p15, 0, %0, c1, c0, 1"
2305 		: "=r" (auxctl));
2306 #ifdef XSCALE_NO_COALESCE_WRITES
2307 	auxctl |= XSCALE_AUXCTL_K;
2308 #else
2309 	auxctl &= ~XSCALE_AUXCTL_K;
2310 #endif
2311 	__asm __volatile("mcr p15, 0, %0, c1, c0, 1"
2312 		: : "r" (auxctl));
2313 }
2314 #endif	/* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425 */
2315