xref: /netbsd-src/sys/arch/arm/arm/cpufunc.c (revision 220b5c059a84c51ea44107ea8951a57ffaecdc8c)
1 /*	$NetBSD: cpufunc.c,v 1.23 2001/12/08 21:30:04 chris Exp $	*/
2 
3 /*
4  * arm7tdmi support code Copyright (c) 2001 John Fremlin
5  * arm8 support code Copyright (c) 1997 ARM Limited
6  * arm8 support code Copyright (c) 1997 Causality Limited
7  * arm9 support code Copyright (C) 2001 ARM Ltd
8  * Copyright (c) 1997 Mark Brinicombe.
9  * Copyright (c) 1997 Causality Limited
10  * All rights reserved.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. All advertising materials mentioning features or use of this software
21  *    must display the following acknowledgement:
22  *	This product includes software developed by Causality Limited.
23  * 4. The name of Causality Limited may not be used to endorse or promote
24  *    products derived from this software without specific prior written
25  *    permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
28  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
29  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
30  * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
31  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
32  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
33  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37  * SUCH DAMAGE.
38  *
39  * RiscBSD kernel project
40  *
41  * cpufuncs.c
42  *
43  * C functions for supporting CPU / MMU / TLB specific operations.
44  *
45  * Created      : 30/01/97
46  */
47 
48 #include "opt_compat_netbsd.h"
49 #include "opt_cputypes.h"
50 #include "opt_pmap_debug.h"
51 
52 #include <sys/types.h>
53 #include <sys/param.h>
54 #include <sys/systm.h>
55 #include <machine/cpu.h>
56 #include <machine/bootconfig.h>
57 #include <arch/arm/arm/disassem.h>
58 
59 #include <arm/cpufunc.h>
60 
61 #ifdef CPU_XSCALE
62 #include <arm/xscale/i80200reg.h>
63 #endif
64 
65 /* PRIMARY CACHE VARIABLES */
66 int	arm_picache_size;
67 int	arm_picache_line_size;
68 int	arm_picache_ways;
69 
70 int	arm_pdcache_size;	/* and unified */
71 int	arm_pdcache_line_size;
72 int	arm_pdcache_ways;
73 
74 int	arm_pcache_type;
75 int	arm_pcache_unified;
76 
77 int	arm_dcache_align;
78 int	arm_dcache_align_mask;
79 
80 #ifdef CPU_ARM3
81 struct cpu_functions arm3_cpufuncs = {
82 	/* CPU functions */
83 
84 	cpufunc_id,			/* id			*/
85 	cpufunc_nullop,			/* cpwait		*/
86 
87 	/* MMU functions */
88 
89 	arm3_control,			/* control		*/
90 	NULL,				/* domain		*/
91 	NULL,				/* setttb		*/
92 	NULL,				/* faultstatus		*/
93 	NULL,				/* faultaddress		*/
94 
95 	/* TLB functions */
96 
97 	cpufunc_nullop,			/* tlb_flushID		*/
98 	(void *)cpufunc_nullop,		/* tlb_flushID_SE	*/
99 	cpufunc_nullop,			/* tlb_flushI		*/
100 	(void *)cpufunc_nullop,		/* tlb_flushI_SE	*/
101 	cpufunc_nullop,			/* tlb_flushD		*/
102 	(void *)cpufunc_nullop,		/* tlb_flushD_SE	*/
103 
104 	/* Cache functions */
105 
106 	arm3_cache_flush,		/* cache_flushID	*/
107 	(void *)arm3_cache_flush,	/* cache_flushID_SE	*/
108 	arm3_cache_flush,		/* cache_flushI		*/
109 	(void *)arm3_cache_flush,	/* cache_flushI_SE	*/
110 	arm3_cache_flush,		/* cache_flushD		*/
111 	(void *)arm3_cache_flush,	/* cache_flushD_SE	*/
112 
113 	cpufunc_nullop,			/* cache_cleanID	s*/
114 	(void *)cpufunc_nullop,		/* cache_cleanID_E	s*/
115 	cpufunc_nullop,			/* cache_cleanD		s*/
116 	(void *)cpufunc_nullop,		/* cache_cleanD_E	*/
117 
118 	arm3_cache_flush,		/* cache_purgeID	s*/
119 	(void *)arm3_cache_flush,	/* cache_purgeID_E	s*/
120 	arm3_cache_flush,		/* cache_purgeD		s*/
121 	(void *)arm3_cache_flush,	/* cache_purgeD_E	s*/
122 
123 	/* Other functions */
124 
125 	cpufunc_nullop,			/* flush_prefetchbuf	*/
126 	cpufunc_nullop,			/* drain_writebuf	*/
127 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
128 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
129 
130 	(void *)cpufunc_nullop,		/* sleep		*/
131 
132 	/* Soft functions */
133 
134 	cpufunc_nullop,			/* cache_syncI		*/
135 	(void *)cpufunc_nullop,		/* cache_cleanID_rng	*/
136 	(void *)cpufunc_nullop,		/* cache_cleanD_rng	*/
137 	(void *)arm3_cache_flush,	/* cache_purgeID_rng	*/
138 	(void *)arm3_cache_flush,	/* cache_purgeD_rng	*/
139 	(void *)cpufunc_nullop,		/* cache_syncI_rng	*/
140 
141 	early_abort_fixup,		/* dataabt_fixup	*/
142 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
143 
144 	NULL,				/* context_switch	*/
145 
146 	(void *)cpufunc_nullop		/* cpu setup		*/
147 
148 };
149 #endif	/* CPU_ARM3 */
150 
151 #ifdef CPU_ARM6
152 struct cpu_functions arm6_cpufuncs = {
153 	/* CPU functions */
154 
155 	cpufunc_id,			/* id			*/
156 	cpufunc_nullop,			/* cpwait		*/
157 
158 	/* MMU functions */
159 
160 	cpufunc_control,		/* control		*/
161 	cpufunc_domains,		/* domain		*/
162 	arm67_setttb,			/* setttb		*/
163 	cpufunc_faultstatus,		/* faultstatus		*/
164 	cpufunc_faultaddress,		/* faultaddress		*/
165 
166 	/* TLB functions */
167 
168 	arm67_tlb_flush,		/* tlb_flushID		*/
169 	arm67_tlb_purge,		/* tlb_flushID_SE	*/
170 	arm67_tlb_flush,		/* tlb_flushI		*/
171 	arm67_tlb_purge,		/* tlb_flushI_SE	*/
172 	arm67_tlb_flush,		/* tlb_flushD		*/
173 	arm67_tlb_purge,		/* tlb_flushD_SE	*/
174 
175 	/* Cache functions */
176 
177 	arm67_cache_flush,		/* cache_flushID	*/
178 	(void *)arm67_cache_flush,	/* cache_flushID_SE	*/
179 	arm67_cache_flush,		/* cache_flushI		*/
180 	(void *)arm67_cache_flush,	/* cache_flushI_SE	*/
181 	arm67_cache_flush,		/* cache_flushD		*/
182 	(void *)arm67_cache_flush,	/* cache_flushD_SE	*/
183 
184 	cpufunc_nullop,			/* cache_cleanID	s*/
185 	(void *)cpufunc_nullop,		/* cache_cleanID_E	s*/
186 	cpufunc_nullop,			/* cache_cleanD		s*/
187 	(void *)cpufunc_nullop,		/* cache_cleanD_E	*/
188 
189 	arm67_cache_flush,		/* cache_purgeID	s*/
190 	(void *)arm67_cache_flush,	/* cache_purgeID_E	s*/
191 	arm67_cache_flush,		/* cache_purgeD		s*/
192 	(void *)arm67_cache_flush,	/* cache_purgeD_E	s*/
193 
194 	/* Other functions */
195 
196 	cpufunc_nullop,			/* flush_prefetchbuf	*/
197 	cpufunc_nullop,			/* drain_writebuf	*/
198 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
199 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
200 
201 	(void *)cpufunc_nullop,		/* sleep		*/
202 
203 	/* Soft functions */
204 
205 	cpufunc_nullop,			/* cache_syncI		*/
206 	(void *)cpufunc_nullop,		/* cache_cleanID_rng	*/
207 	(void *)cpufunc_nullop,		/* cache_cleanD_rng	*/
208 	(void *)arm67_cache_flush,	/* cache_purgeID_rng	*/
209 	(void *)arm67_cache_flush,	/* cache_purgeD_rng	*/
210 	(void *)cpufunc_nullop,		/* cache_syncI_rng	*/
211 
212 #ifdef ARM6_LATE_ABORT
213 	late_abort_fixup,		/* dataabt_fixup	*/
214 #else
215 	early_abort_fixup,		/* dataabt_fixup	*/
216 #endif
217 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
218 
219 	arm67_context_switch,		/* context_switch	*/
220 
221 	arm6_setup			/* cpu setup		*/
222 
223 };
224 #endif	/* CPU_ARM6 */
225 
226 #ifdef CPU_ARM7
227 struct cpu_functions arm7_cpufuncs = {
228 	/* CPU functions */
229 
230 	cpufunc_id,			/* id			*/
231 	cpufunc_nullop,			/* cpwait		*/
232 
233 	/* MMU functions */
234 
235 	cpufunc_control,		/* control		*/
236 	cpufunc_domains,		/* domain		*/
237 	arm67_setttb,			/* setttb		*/
238 	cpufunc_faultstatus,		/* faultstatus		*/
239 	cpufunc_faultaddress,		/* faultaddress		*/
240 
241 	/* TLB functions */
242 
243 	arm67_tlb_flush,		/* tlb_flushID		*/
244 	arm67_tlb_purge,		/* tlb_flushID_SE	*/
245 	arm67_tlb_flush,		/* tlb_flushI		*/
246 	arm67_tlb_purge,		/* tlb_flushI_SE	*/
247 	arm67_tlb_flush,		/* tlb_flushD		*/
248 	arm67_tlb_purge,		/* tlb_flushD_SE	*/
249 
250 	/* Cache functions */
251 
252 	arm67_cache_flush,		/* cache_flushID	*/
253 	(void *)arm67_cache_flush,	/* cache_flushID_SE	*/
254 	arm67_cache_flush,		/* cache_flushI		*/
255 	(void *)arm67_cache_flush,	/* cache_flushI_SE	*/
256 	arm67_cache_flush,		/* cache_flushD		*/
257 	(void *)arm67_cache_flush,	/* cache_flushD_SE	*/
258 
259 	cpufunc_nullop,			/* cache_cleanID	s*/
260 	(void *)cpufunc_nullop,		/* cache_cleanID_E	s*/
261 	cpufunc_nullop,			/* cache_cleanD		s*/
262 	(void *)cpufunc_nullop,		/* cache_cleanD_E	*/
263 
264 	arm67_cache_flush,		/* cache_purgeID	s*/
265 	(void *)arm67_cache_flush,	/* cache_purgeID_E	s*/
266 	arm67_cache_flush,		/* cache_purgeD		s*/
267 	(void *)arm67_cache_flush,	/* cache_purgeD_E	s*/
268 
269 	/* Other functions */
270 
271 	cpufunc_nullop,			/* flush_prefetchbuf	*/
272 	cpufunc_nullop,			/* drain_writebuf	*/
273 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
274 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
275 
276 	(void *)cpufunc_nullop,		/* sleep		*/
277 
278 	/* Soft functions */
279 
280 	cpufunc_nullop,			/* cache_syncI		*/
281 	(void *)cpufunc_nullop,		/* cache_cleanID_rng	*/
282 	(void *)cpufunc_nullop,		/* cache_cleanD_rng	*/
283 	(void *)arm67_cache_flush,	/* cache_purgeID_rng	*/
284 	(void *)arm67_cache_flush,	/* cache_purgeD_rng	*/
285 	(void *)cpufunc_nullop,		/* cache_syncI_rng	*/
286 
287 	late_abort_fixup,		/* dataabt_fixup	*/
288 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
289 
290 	arm67_context_switch,		/* context_switch	*/
291 
292 	arm7_setup			/* cpu setup		*/
293 
294 };
295 #endif	/* CPU_ARM7 */
296 
297 #ifdef CPU_ARM7TDMI
298 struct cpu_functions arm7tdmi_cpufuncs = {
299 	/* CPU functions */
300 
301 	cpufunc_id,			/* id			*/
302 	cpufunc_nullop,			/* cpwait		*/
303 
304 	/* MMU functions */
305 
306 	cpufunc_control,		/* control		*/
307 	cpufunc_domains,		/* domain		*/
308 	arm7tdmi_setttb,		/* setttb		*/
309 	cpufunc_faultstatus,		/* faultstatus		*/
310 	cpufunc_faultaddress,		/* faultaddress		*/
311 
312 	/* TLB functions */
313 
314 	arm7tdmi_tlb_flushID,		/* tlb_flushID		*/
315 	arm7tdmi_tlb_flushID_SE,	/* tlb_flushID_SE	*/
316 	arm7tdmi_tlb_flushID,		/* tlb_flushI		*/
317 	arm7tdmi_tlb_flushID_SE,	/* tlb_flushI_SE	*/
318 	arm7tdmi_tlb_flushID,		/* tlb_flushD		*/
319 	arm7tdmi_tlb_flushID_SE,	/* tlb_flushD_SE	*/
320 
321 	/* Cache functions */
322 
323 	arm7tdmi_cache_flushID,		/* cache_flushID	*/
324 	(void *)arm7tdmi_cache_flushID,	/* cache_flushID_SE	*/
325 	arm7tdmi_cache_flushID,		/* cache_flushI		*/
326 	(void *)arm7tdmi_cache_flushID,	/* cache_flushI_SE	*/
327 	arm7tdmi_cache_flushID,		/* cache_flushD		*/
328 	(void *)arm7tdmi_cache_flushID,	/* cache_flushD_SE	*/
329 
330 	cpufunc_nullop,			/* cache_cleanID	s*/
331 	(void *)cpufunc_nullop,		/* cache_cleanID_E	s*/
332 	cpufunc_nullop,			/* cache_cleanD		s*/
333 	(void *)cpufunc_nullop,		/* cache_cleanD_E	*/
334 
335 	arm7tdmi_cache_flushID,		/* cache_purgeID	s*/
336 	(void *)arm7tdmi_cache_flushID,	/* cache_purgeID_E	s*/
337 	arm7tdmi_cache_flushID,		/* cache_purgeD		s*/
338 	(void *)arm7tdmi_cache_flushID,	/* cache_purgeD_E	s*/
339 
340 	/* Other functions */
341 
342 	cpufunc_nullop,			/* flush_prefetchbuf	*/
343 	cpufunc_nullop,			/* drain_writebuf	*/
344 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
345 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
346 
347 	(void *)cpufunc_nullop,		/* sleep		*/
348 
349 	/* Soft functions */
350 
351 	cpufunc_nullop,			/* cache_syncI		*/
352 	(void *)cpufunc_nullop,		/* cache_cleanID_rng	*/
353 	(void *)cpufunc_nullop,		/* cache_cleanD_rng	*/
354 	(void *)arm7tdmi_cache_flushID,	/* cache_purgeID_rng	*/
355 	(void *)arm7tdmi_cache_flushID,	/* cache_purgeD_rng	*/
356 	(void *)cpufunc_nullop,		/* cache_syncI_rng	*/
357 
358 	late_abort_fixup,		/* dataabt_fixup	*/
359 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
360 
361 	arm7tdmi_context_switch,	/* context_switch	*/
362 
363 	arm7tdmi_setup			/* cpu setup		*/
364 
365 };
366 #endif	/* CPU_ARM7TDMI */
367 
368 #ifdef CPU_ARM8
369 struct cpu_functions arm8_cpufuncs = {
370 	/* CPU functions */
371 
372 	cpufunc_id,			/* id			*/
373 	cpufunc_nullop,			/* cpwait		*/
374 
375 	/* MMU functions */
376 
377 	cpufunc_control,		/* control		*/
378 	cpufunc_domains,		/* domain		*/
379 	arm8_setttb,			/* setttb		*/
380 	cpufunc_faultstatus,		/* faultstatus		*/
381 	cpufunc_faultaddress,		/* faultaddress		*/
382 
383 	/* TLB functions */
384 
385 	arm8_tlb_flushID,		/* tlb_flushID		*/
386 	arm8_tlb_flushID_SE,		/* tlb_flushID_SE	*/
387 	arm8_tlb_flushID,		/* tlb_flushI		*/
388 	arm8_tlb_flushID_SE,		/* tlb_flushI_SE	*/
389 	arm8_tlb_flushID,		/* tlb_flushD		*/
390 	arm8_tlb_flushID_SE,		/* tlb_flushD_SE	*/
391 
392 	/* Cache functions */
393 
394 	arm8_cache_flushID,		/* cache_flushID	*/
395 	arm8_cache_flushID_E,		/* cache_flushID_SE	*/
396 	arm8_cache_flushID,		/* cache_flushI		*/
397 	arm8_cache_flushID_E,		/* cache_flushI_SE	*/
398 	arm8_cache_flushID,		/* cache_flushD		*/
399 	arm8_cache_flushID_E,		/* cache_flushD_SE	*/
400 
401 	arm8_cache_cleanID,		/* cache_cleanID	s*/
402 	arm8_cache_cleanID_E,		/* cache_cleanID_E	s*/
403 	arm8_cache_cleanID,		/* cache_cleanD		s*/
404 	arm8_cache_cleanID_E,		/* cache_cleanD_E	*/
405 
406 	arm8_cache_purgeID,		/* cache_purgeID	s*/
407 	arm8_cache_purgeID_E,		/* cache_purgeID_E	s*/
408 	arm8_cache_purgeID,		/* cache_purgeD		s*/
409 	arm8_cache_purgeID_E,		/* cache_purgeD_E	s*/
410 
411 	/* Other functions */
412 
413 	cpufunc_nullop,			/* flush_prefetchbuf	*/
414 	cpufunc_nullop,			/* drain_writebuf	*/
415 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
416 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
417 
418 	(void *)cpufunc_nullop,		/* sleep		*/
419 
420 	/* Soft functions */
421 
422 	(void *)cpufunc_nullop,		/* cache_syncI		*/
423 	(void *)arm8_cache_cleanID,	/* cache_cleanID_rng	*/
424 	(void *)arm8_cache_cleanID,	/* cache_cleanD_rng	*/
425 	(void *)arm8_cache_purgeID,	/* cache_purgeID_rng	*/
426 	(void *)arm8_cache_purgeID,	/* cache_purgeD_rng	*/
427 	(void *)cpufunc_nullop,		/* cache_syncI_rng	*/
428 
429 	cpufunc_null_fixup,		/* dataabt_fixup	*/
430 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
431 
432 	arm8_context_switch,		/* context_switch	*/
433 
434 	arm8_setup			/* cpu setup		*/
435 };
436 #endif	/* CPU_ARM8 */
437 
438 #ifdef CPU_ARM9
439 struct cpu_functions arm9_cpufuncs = {
440 	/* CPU functions */
441 
442 	cpufunc_id,			/* id			*/
443 	cpufunc_nullop,			/* cpwait		*/
444 
445 	/* MMU functions */
446 
447 	cpufunc_control,		/* control		*/
448 	cpufunc_domains,		/* Domain		*/
449 	arm9_setttb,			/* Setttb		*/
450 	cpufunc_faultstatus,		/* Faultstatus		*/
451 	cpufunc_faultaddress,		/* Faultaddress		*/
452 
453 	/* TLB functions */
454 
455 	armv4_tlb_flushID,		/* tlb_flushID		*/
456 	arm9_tlb_flushID_SE,		/* tlb_flushID_SE	*/
457 	armv4_tlb_flushI,		/* tlb_flushI		*/
458 	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
459 	armv4_tlb_flushD,		/* tlb_flushD		*/
460 	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
461 
462 	/* Cache functions */
463 
464 	arm9_cache_flushID,		/* cache_flushID	*/
465 	arm9_cache_flushID_SE,		/* cache_flushID_SE	*/
466 	arm9_cache_flushI,		/* cache_flushI		*/
467 	arm9_cache_flushI_SE,		/* cache_flushI_SE	*/
468 	arm9_cache_flushD,		/* cache_flushD		*/
469 	arm9_cache_flushD_SE,		/* cache_flushD_SE	*/
470 
471 	/* ... lets use the cache in write-through mode.  */
472 	arm9_cache_cleanID,		/* cache_cleanID	*/
473 	(void *)arm9_cache_cleanID,	/* cache_cleanID_SE	*/
474 	arm9_cache_cleanID,		/* cache_cleanD		*/
475 	(void *)arm9_cache_cleanID,	/* cache_cleanD_SE	*/
476 
477 	arm9_cache_flushID,		/* cache_purgeID	*/
478 	arm9_cache_flushID_SE,		/* cache_purgeID_SE	*/
479 	arm9_cache_flushD,		/* cache_purgeD		*/
480 	arm9_cache_flushD_SE,		/* cache_purgeD_SE	*/
481 
482 	/* Other functions */
483 
484 	cpufunc_nullop,			/* flush_prefetchbuf	*/
485 	armv4_drain_writebuf,		/* drain_writebuf	*/
486 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
487 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
488 
489 	(void *)cpufunc_nullop,		/* sleep		*/
490 
491 	/* Soft functions */
492 	arm9_cache_syncI,		/* cache_syncI		*/
493 	(void *)arm9_cache_cleanID,	/* cache_cleanID_rng	*/
494 	(void *)arm9_cache_cleanID,	/* cache_cleanD_rng	*/
495 	arm9_cache_flushID_rng,		/* cache_purgeID_rng	*/
496 	arm9_cache_flushD_rng,		/* cache_purgeD_rng	*/
497 	arm9_cache_syncI_rng,		/* cache_syncI_rng	*/
498 
499 	cpufunc_null_fixup,		/* dataabt_fixup	*/
500 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
501 
502 	arm9_context_switch,		/* context_switch	*/
503 
504 	arm9_setup			/* cpu setup		*/
505 
506 };
507 #endif /* CPU_ARM9 */
508 
509 #ifdef CPU_SA110
510 struct cpu_functions sa110_cpufuncs = {
511 	/* CPU functions */
512 
513 	cpufunc_id,			/* id			*/
514 	cpufunc_nullop,			/* cpwait		*/
515 
516 	/* MMU functions */
517 
518 	cpufunc_control,		/* control		*/
519 	cpufunc_domains,		/* domain		*/
520 	sa110_setttb,			/* setttb		*/
521 	cpufunc_faultstatus,		/* faultstatus		*/
522 	cpufunc_faultaddress,		/* faultaddress		*/
523 
524 	/* TLB functions */
525 
526 	armv4_tlb_flushID,		/* tlb_flushID		*/
527 	sa110_tlb_flushID_SE,		/* tlb_flushID_SE	*/
528 	armv4_tlb_flushI,		/* tlb_flushI		*/
529 	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
530 	armv4_tlb_flushD,		/* tlb_flushD		*/
531 	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
532 
533 	/* Cache functions */
534 
535 	sa110_cache_flushID,		/* cache_flushID	*/
536 	(void *)sa110_cache_flushID,	/* cache_flushID_SE	*/
537 	sa110_cache_flushI,		/* cache_flushI		*/
538 	(void *)sa110_cache_flushI,	/* cache_flushI_SE	*/
539 	sa110_cache_flushD,		/* cache_flushD		*/
540 	sa110_cache_flushD_SE,		/* cache_flushD_SE	*/
541 
542 	sa110_cache_cleanID,		/* cache_cleanID	s*/
543 	sa110_cache_cleanD_E,		/* cache_cleanID_E	s*/
544 	sa110_cache_cleanD,		/* cache_cleanD		s*/
545 	sa110_cache_cleanD_E,		/* cache_cleanD_E	*/
546 
547 	sa110_cache_purgeID,		/* cache_purgeID	s*/
548 	sa110_cache_purgeID_E,		/* cache_purgeID_E	s*/
549 	sa110_cache_purgeD,		/* cache_purgeD		s*/
550 	sa110_cache_purgeD_E,		/* cache_purgeD_E	s*/
551 
552 	/* Other functions */
553 
554 	cpufunc_nullop,			/* flush_prefetchbuf	*/
555 	armv4_drain_writebuf,		/* drain_writebuf	*/
556 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
557 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
558 
559 	(void *)cpufunc_nullop,		/* sleep		*/
560 
561 	/* Soft functions */
562 
563 	sa110_cache_syncI,		/* cache_syncI		*/
564 	sa110_cache_cleanID_rng,	/* cache_cleanID_rng	*/
565 	sa110_cache_cleanD_rng,		/* cache_cleanD_rng	*/
566 	sa110_cache_purgeID_rng,	/* cache_purgeID_rng	*/
567 	sa110_cache_purgeD_rng,		/* cache_purgeD_rng	*/
568 	sa110_cache_syncI_rng,		/* cache_syncI_rng	*/
569 
570 	cpufunc_null_fixup,		/* dataabt_fixup	*/
571 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
572 
573 	sa110_context_switch,		/* context_switch	*/
574 
575 	sa110_setup			/* cpu setup		*/
576 };
577 #endif	/* CPU_SA110 */
578 
579 #ifdef CPU_XSCALE
580 struct cpu_functions xscale_cpufuncs = {
581 	/* CPU functions */
582 
583 	cpufunc_id,			/* id			*/
584 	xscale_cpwait,			/* cpwait		*/
585 
586 	/* MMU functions */
587 
588 	xscale_control,			/* control		*/
589 	cpufunc_domains,		/* domain		*/
590 	xscale_setttb,			/* setttb		*/
591 	cpufunc_faultstatus,		/* faultstatus		*/
592 	cpufunc_faultaddress,		/* faultaddress		*/
593 
594 	/* TLB functions */
595 
596 	armv4_tlb_flushID,		/* tlb_flushID		*/
597 	xscale_tlb_flushID_SE,		/* tlb_flushID_SE	*/
598 	armv4_tlb_flushI,		/* tlb_flushI		*/
599 	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
600 	armv4_tlb_flushD,		/* tlb_flushD		*/
601 	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
602 
603 	/* Cache functions */
604 
605 	xscale_cache_flushID,		/* cache_flushID	*/
606 	(void *)xscale_cache_flushID,	/* cache_flushID_SE	*/
607 	xscale_cache_flushI,		/* cache_flushI		*/
608 	(void *)xscale_cache_flushI,	/* cache_flushI_SE	*/
609 	xscale_cache_flushD,		/* cache_flushD		*/
610 	xscale_cache_flushD_SE,		/* cache_flushD_SE	*/
611 
612 	xscale_cache_cleanID,		/* cache_cleanID	s*/
613 	xscale_cache_cleanD_E,		/* cache_cleanID_E	s*/
614 	xscale_cache_cleanD,		/* cache_cleanD		s*/
615 	xscale_cache_cleanD_E,		/* cache_cleanD_E	*/
616 
617 	xscale_cache_purgeID,		/* cache_purgeID	s*/
618 	xscale_cache_purgeID_E,		/* cache_purgeID_E	s*/
619 	xscale_cache_purgeD,		/* cache_purgeD		s*/
620 	xscale_cache_purgeD_E,		/* cache_purgeD_E	s*/
621 
622 	/* Other functions */
623 
624 	cpufunc_nullop,			/* flush_prefetchbuf	*/
625 	armv4_drain_writebuf,		/* drain_writebuf	*/
626 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
627 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
628 
629 	(void *)cpufunc_nullop,		/* sleep		*/
630 
631 	/* Soft functions */
632 
633 	xscale_cache_syncI,		/* cache_syncI		*/
634 	xscale_cache_cleanID_rng,	/* cache_cleanID_rng	*/
635 	xscale_cache_cleanD_rng,	/* cache_cleanD_rng	*/
636 	xscale_cache_purgeID_rng,	/* cache_purgeID_rng	*/
637 	xscale_cache_purgeD_rng,	/* cache_purgeD_rng	*/
638 	xscale_cache_syncI_rng,		/* cache_syncI_rng	*/
639 
640 	cpufunc_null_fixup,		/* dataabt_fixup	*/
641 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
642 
643 	xscale_context_switch,		/* context_switch	*/
644 
645 	xscale_setup			/* cpu setup		*/
646 };
647 
648 struct cpu_functions xscale_writethrough_cpufuncs = {
649 	/* CPU functions */
650 
651 	cpufunc_id,			/* id			*/
652 	xscale_cpwait,			/* cpwait		*/
653 
654 	/* MMU functions */
655 
656 	xscale_control,			/* control		*/
657 	cpufunc_domains,		/* domain		*/
658 	xscale_setttb,			/* setttb		*/
659 	cpufunc_faultstatus,		/* faultstatus		*/
660 	cpufunc_faultaddress,		/* faultaddress		*/
661 
662 	/* TLB functions */
663 
664 	armv4_tlb_flushID,		/* tlb_flushID		*/
665 	xscale_tlb_flushID_SE,		/* tlb_flushID_SE	*/
666 	armv4_tlb_flushI,		/* tlb_flushI		*/
667 	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
668 	armv4_tlb_flushD,		/* tlb_flushD		*/
669 	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
670 
671 	/* Cache functions */
672 
673 	xscale_cache_flushID,		/* cache_flushID	*/
674 	(void *)xscale_cache_flushID,	/* cache_flushID_SE	*/
675 	xscale_cache_flushI,		/* cache_flushI		*/
676 	(void *)xscale_cache_flushI,	/* cache_flushI_SE	*/
677 	xscale_cache_flushD,		/* cache_flushD		*/
678 	xscale_cache_flushD_SE,		/* cache_flushD_SE	*/
679 
680 	cpufunc_nullop,			/* cache_cleanID	s*/
681 	(void *)cpufunc_nullop,		/* cache_cleanID_E	s*/
682 	cpufunc_nullop,			/* cache_cleanD		s*/
683 	(void *)cpufunc_nullop,		/* cache_cleanD_E	*/
684 
685 	xscale_cache_flushID,		/* cache_purgeID	s*/
686 	(void *)xscale_cache_flushID,	/* cache_purgeID_E	s*/
687 	xscale_cache_flushD,		/* cache_purgeD		s*/
688 	xscale_cache_flushD_SE,		/* cache_purgeD_E	s*/
689 
690 	/* Other functions */
691 
692 	cpufunc_nullop,			/* flush_prefetchbuf	*/
693 	armv4_drain_writebuf,		/* drain_writebuf	*/
694 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
695 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
696 
697 	(void *)cpufunc_nullop,		/* sleep		*/
698 
699 	/* Soft functions */
700 
701 	xscale_cache_flushI,		/* cache_syncI		*/
702 	(void *)cpufunc_nullop,		/* cache_cleanID_rng	*/
703 	(void *)cpufunc_nullop,		/* cache_cleanD_rng	*/
704 	xscale_cache_flushID_rng,	/* cache_purgeID_rng	*/
705 	xscale_cache_flushD_rng,	/* cache_purgeD_rng	*/
706 	xscale_cache_flushI_rng,	/* cache_syncI_rng	*/
707 
708 	cpufunc_null_fixup,		/* dataabt_fixup	*/
709 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
710 
711 	xscale_context_switch,		/* context_switch	*/
712 
713 	xscale_setup			/* cpu setup		*/
714 };
715 #endif /* CPU_XSCALE */
716 
717 /*
718  * Global constants also used by locore.s
719  */
720 
721 struct cpu_functions cpufuncs;
722 u_int cputype;
723 u_int cpu_reset_needs_v4_MMU_disable;	/* flag used in locore.s */
724 
725 #if defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined(CPU_ARM9) || \
726     defined(CPU_SA110) || defined(CPU_XSCALE)
727 static void
728 get_cachetype()
729 {
730 	u_int ctype, isize, dsize;
731 	u_int multiplier;
732 
733 	__asm __volatile("mrc p15, 0, %0, c0, c0, 1"
734 		: "=r" (ctype));
735 
736 	/*
737 	 * ...and thus spake the ARM ARM:
738 	 *
739 	 * If an <opcode2> value corresponding to an unimplemented or
740 	 * reserved ID register is encountered, the System Control
741 	 * processor returns the value of the main ID register.
742 	 */
743 	if (ctype == cpufunc_id())
744 		goto out;
745 
746 	if ((ctype & CPU_CT_S) == 0)
747 		arm_pcache_unified = 1;
748 
749 	/*
750 	 * If you want to know how this code works, go read the ARM ARM.
751 	 */
752 
753 	arm_pcache_type = CPU_CT_CTYPE(ctype);
754 
755 	if (arm_pcache_unified == 0) {
756 		isize = CPU_CT_ISIZE(ctype);
757 		multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
758 		arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
759 		if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
760 			if (isize & CPU_CT_xSIZE_M)
761 				arm_picache_line_size = 0; /* not present */
762 			else
763 				arm_picache_ways = 1;
764 		} else {
765 			arm_picache_ways = multiplier <<
766 			    (CPU_CT_xSIZE_ASSOC(isize) - 1);
767 		}
768 		arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
769 	}
770 
771 	dsize = CPU_CT_DSIZE(ctype);
772 	multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
773 	arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
774 	if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
775 		if (dsize & CPU_CT_xSIZE_M)
776 			arm_pdcache_line_size = 0; /* not present */
777 		else
778 			arm_pdcache_ways = 0;
779 	} else {
780 		arm_pdcache_ways = multiplier <<
781 		    (CPU_CT_xSIZE_ASSOC(dsize) - 1);
782 	}
783 	arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
784 
785 	arm_dcache_align = arm_pdcache_line_size;
786 
787  out:
788 	arm_dcache_align_mask = arm_dcache_align - 1;
789 }
790 #endif /* ARM7TDMI || ARM8 || ARM9 || SA110 || XSCALE */
791 
792 /*
793  * Cannot panic here as we may not have a console yet ...
794  */
795 
796 int
797 set_cpufuncs()
798 {
799 	cputype = cpufunc_id();
800 	cputype &= CPU_ID_CPU_MASK;
801 
802 
803 #ifdef CPU_ARM3
804 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
805 	    (cputype & 0x00000f00) == 0x00000300) {
806 		cpufuncs = arm3_cpufuncs;
807 		cpu_reset_needs_v4_MMU_disable = 0;
808 		/* XXX Cache info? */
809 		arm_dcache_align_mask = -1;
810 		return 0;
811 	}
812 #endif	/* CPU_ARM3 */
813 #ifdef CPU_ARM6
814 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
815 	    (cputype & 0x00000f00) == 0x00000600) {
816 		cpufuncs = arm6_cpufuncs;
817 		cpu_reset_needs_v4_MMU_disable = 0;
818 		/* XXX Cache info? */
819 		arm_dcache_align_mask = -1;
820 		return 0;
821 	}
822 #endif	/* CPU_ARM6 */
823 #ifdef CPU_ARM7
824 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
825 	    CPU_ID_IS7(cputype) &&
826 	    (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V3) {
827 		cpufuncs = arm7_cpufuncs;
828 		cpu_reset_needs_v4_MMU_disable = 0;
829 		/* XXX Cache info? */
830 		arm_dcache_align_mask = -1;
831 		return 0;
832 	}
833 #endif	/* CPU_ARM7 */
834 #ifdef CPU_ARM7TDMI
835 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
836 	    CPU_ID_IS7(cputype) &&
837 	    (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V4T) {
838 		cpufuncs = arm7tdmi_cpufuncs;
839 		cpu_reset_needs_v4_MMU_disable = 0;
840 		get_cachetype();
841 		return 0;
842 	}
843 #endif
844 #ifdef CPU_ARM8
845 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
846 	    (cputype & 0x0000f000) == 0x00008000) {
847 		cpufuncs = arm8_cpufuncs;
848 		cpu_reset_needs_v4_MMU_disable = 0;	/* XXX correct? */
849 		get_cachetype();
850 		return 0;
851 	}
852 #endif	/* CPU_ARM8 */
853 #ifdef CPU_ARM9
854 	if (cputype == CPU_ID_ARM920T) {
855 		pte_cache_mode = PT_C;	/* Select write-through cacheing. */
856 		cpufuncs = arm9_cpufuncs;
857 		cpu_reset_needs_v4_MMU_disable = 1;	/* V4 or higher */
858 		get_cachetype();
859 		return 0;
860 	}
861 #endif /* CPU_ARM9 */
862 #ifdef CPU_SA110
863 	if (cputype == CPU_ID_SA110 || cputype == CPU_ID_SA1100 ||
864 	    cputype == CPU_ID_SA1110) {
865 		cpufuncs = sa110_cpufuncs;
866 		cpu_reset_needs_v4_MMU_disable = 1;	/* SA needs it */
867 		get_cachetype();
868 		return 0;
869 	}
870 #endif	/* CPU_SA110 */
871 #ifdef CPU_XSCALE
872 	if (cputype == CPU_ID_I80200) {
873 		/*
874 		 * Reset the Interrupt Controller Unit to a pristine
875 		 * state:
876 		 *	- all interrupt sources disabled
877 		 *	- PMU/BCU sterred to IRQ
878 		 */
879 		__asm __volatile("mcr p13, 0, %0, c0, c0, 0"
880 			:
881 			: "r" (0));
882 		__asm __volatile("mcr p13, 0, %0, c2, c0, 0"
883 			:
884 			: "r" (0));
885 
886 		/*
887 		 * Reset the Performance Monitoring Unit to a
888 		 * pristine state:
889 		 *	- CCNT, PMN0, PMN1 reset to 0
890 		 *	- overflow indications cleared
891 		 *	- all counters disabled
892 		 */
893 		__asm __volatile("mcr p14, 0, %0, c0, c0, 0"
894 			:
895 			: "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF|
896 			       PMNC_CC_IF));
897 
898 		/*
899 		 * XXX Disable ECC in the Bus Controller Unit; we
900 		 * don't really support it, yet.  Clear any pending
901 		 * error indications.
902 		 */
903 		__asm __volatile("mcr p13, 0, %0, c0, c1, 0"
904 			:
905 			: "r" (BCUCTL_E0|BCUCTL_E1|BCUCTL_EV));
906 
907 		pte_cache_mode = PT_C;	/* Select write-through cacheing. */
908 		cpufuncs = xscale_writethrough_cpufuncs;
909 		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
910 		get_cachetype();
911 		return 0;
912 	}
913 #endif /* CPU_XSCALE */
914 	/*
915 	 * Bzzzz. And the answer was ...
916 	 */
917 /*	panic("No support for this CPU type (%08x) in kernel", cputype);*/
918 	return(ARCHITECTURE_NOT_PRESENT);
919 }
920 
921 /*
922  * Fixup routines for data and prefetch aborts.
923  *
924  * Several compile time symbols are used
925  *
926  * DEBUG_FAULT_CORRECTION - Print debugging information during the
927  * correction of registers after a fault.
928  * ARM6_LATE_ABORT - ARM6 supports both early and late aborts
929  * when defined should use late aborts
930  */
931 
932 #if defined(DEBUG_FAULT_CORRECTION) && !defined(PMAP_DEBUG)
933 #error PMAP_DEBUG must be defined to use DEBUG_FAULT_CORRECTION
934 #endif
935 
936 /*
937  * Null abort fixup routine.
938  * For use when no fixup is required.
939  */
940 int
941 cpufunc_null_fixup(arg)
942 	void *arg;
943 {
944 	return(ABORT_FIXUP_OK);
945 }
946 
947 #if defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI)
948 #ifdef DEBUG_FAULT_CORRECTION
949 extern int pmap_debug_level;
950 #endif
951 #endif
952 
953 #if defined(CPU_ARM2) || defined(CPU_ARM250) || defined(CPU_ARM3) || \
954     defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI)
955 /*
956  * "Early" data abort fixup.
957  *
958  * For ARM2, ARM2as, ARM3 and ARM6 (in early-abort mode).  Also used
959  * indirectly by ARM6 (in late-abort mode) and ARM7[TDMI].
960  *
961  * In early aborts, we may have to fix up LDM, STM, LDC and STC.
962  */
963 int
964 early_abort_fixup(arg)
965 	void *arg;
966 {
967 	trapframe_t *frame = arg;
968 	u_int fault_pc;
969 	u_int fault_instruction;
970 	int saved_lr = 0;
971 
972 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
973 
974 		/* Ok an abort in SVC mode */
975 
976 		/*
977 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
978 		 * as the fault happened in svc mode but we need it in the
979 		 * usr slot so we can treat the registers as an array of ints
980 		 * during fixing.
981 		 * NOTE: This PC is in the position but writeback is not
982 		 * allowed on r15.
983 		 * Doing it like this is more efficient than trapping this
984 		 * case in all possible locations in the following fixup code.
985 		 */
986 
987 		saved_lr = frame->tf_usr_lr;
988 		frame->tf_usr_lr = frame->tf_svc_lr;
989 
990 		/*
991 		 * Note the trapframe does not have the SVC r13 so a fault
992 		 * from an instruction with writeback to r13 in SVC mode is
993 		 * not allowed. This should not happen as the kstack is
994 		 * always valid.
995 		 */
996 	}
997 
998 	/* Get fault address and status from the CPU */
999 
1000 	fault_pc = frame->tf_pc;
1001 	fault_instruction = *((volatile unsigned int *)fault_pc);
1002 
1003 	/* Decode the fault instruction and fix the registers as needed */
1004 
1005 	if ((fault_instruction & 0x0e000000) == 0x08000000) {
1006 		int base;
1007 		int loop;
1008 		int count;
1009 		int *registers = &frame->tf_r0;
1010 
1011 #ifdef DEBUG_FAULT_CORRECTION
1012 		if (pmap_debug_level >= 0) {
1013 			printf("LDM/STM\n");
1014 			disassemble(fault_pc);
1015 		}
1016 #endif	/* DEBUG_FAULT_CORRECTION */
1017 		if (fault_instruction & (1 << 21)) {
1018 #ifdef DEBUG_FAULT_CORRECTION
1019 			if (pmap_debug_level >= 0)
1020 				printf("This instruction must be corrected\n");
1021 #endif	/* DEBUG_FAULT_CORRECTION */
1022 			base = (fault_instruction >> 16) & 0x0f;
1023 			if (base == 15)
1024 				return ABORT_FIXUP_FAILED;
1025 			/* Count registers transferred */
1026 			count = 0;
1027 			for (loop = 0; loop < 16; ++loop) {
1028 				if (fault_instruction & (1<<loop))
1029 					++count;
1030 			}
1031 #ifdef DEBUG_FAULT_CORRECTION
1032 			if (pmap_debug_level >= 0) {
1033 				printf("%d registers used\n", count);
1034 				printf("Corrected r%d by %d bytes ", base, count * 4);
1035 			}
1036 #endif	/* DEBUG_FAULT_CORRECTION */
1037 			if (fault_instruction & (1 << 23)) {
1038 #ifdef DEBUG_FAULT_CORRECTION
1039 				if (pmap_debug_level >= 0)
1040 					printf("down\n");
1041 #endif	/* DEBUG_FAULT_CORRECTION */
1042 				registers[base] -= count * 4;
1043 			} else {
1044 #ifdef DEBUG_FAULT_CORRECTION
1045 				if (pmap_debug_level >= 0)
1046 					printf("up\n");
1047 #endif	/* DEBUG_FAULT_CORRECTION */
1048 				registers[base] += count * 4;
1049 			}
1050 		}
1051 	} else if ((fault_instruction & 0x0e000000) == 0x0c000000) {
1052 		int base;
1053 		int offset;
1054 		int *registers = &frame->tf_r0;
1055 
1056 /* REGISTER CORRECTION IS REQUIRED FOR THESE INSTRUCTIONS */
1057 
1058 #ifdef DEBUG_FAULT_CORRECTION
1059 		if (pmap_debug_level >= 0)
1060 			disassemble(fault_pc);
1061 #endif	/* DEBUG_FAULT_CORRECTION */
1062 
1063 /* Only need to fix registers if write back is turned on */
1064 
1065 		if ((fault_instruction & (1 << 21)) != 0) {
1066 			base = (fault_instruction >> 16) & 0x0f;
1067 			if (base == 13 && (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
1068 				return ABORT_FIXUP_FAILED;
1069 			if (base == 15)
1070 				return ABORT_FIXUP_FAILED;
1071 
1072 			offset = (fault_instruction & 0xff) << 2;
1073 #ifdef DEBUG_FAULT_CORRECTION
1074 			if (pmap_debug_level >= 0)
1075 				printf("r%d=%08x\n", base, registers[base]);
1076 #endif	/* DEBUG_FAULT_CORRECTION */
1077 			if ((fault_instruction & (1 << 23)) != 0)
1078 				offset = -offset;
1079 			registers[base] += offset;
1080 #ifdef DEBUG_FAULT_CORRECTION
1081 			if (pmap_debug_level >= 0)
1082 				printf("r%d=%08x\n", base, registers[base]);
1083 #endif	/* DEBUG_FAULT_CORRECTION */
1084 		}
1085 	} else if ((fault_instruction & 0x0e000000) == 0x0c000000)
1086 		return ABORT_FIXUP_FAILED;
1087 
1088 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1089 
1090 		/* Ok an abort in SVC mode */
1091 
1092 		/*
1093 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1094 		 * as the fault happened in svc mode but we need it in the
1095 		 * usr slot so we can treat the registers as an array of ints
1096 		 * during fixing.
1097 		 * NOTE: This PC is in the position but writeback is not
1098 		 * allowed on r15.
1099 		 * Doing it like this is more efficient than trapping this
1100 		 * case in all possible locations in the prior fixup code.
1101 		 */
1102 
1103 		frame->tf_svc_lr = frame->tf_usr_lr;
1104 		frame->tf_usr_lr = saved_lr;
1105 
1106 		/*
1107 		 * Note the trapframe does not have the SVC r13 so a fault
1108 		 * from an instruction with writeback to r13 in SVC mode is
1109 		 * not allowed. This should not happen as the kstack is
1110 		 * always valid.
1111 		 */
1112 	}
1113 
1114 	return(ABORT_FIXUP_OK);
1115 }
1116 #endif	/* CPU_ARM2/250/3/6/7 */
1117 
1118 #if (defined(CPU_ARM6) && defined(ARM6_LATE_ABORT)) || defined(CPU_ARM7) || \
1119 	defined(CPU_ARM7TDMI)
1120 /*
1121  * "Late" (base updated) data abort fixup
1122  *
1123  * For ARM6 (in late-abort mode) and ARM7.
1124  *
1125  * In this model, all data-transfer instructions need fixing up.  We defer
1126  * LDM, STM, LDC and STC fixup to the early-abort handler.
1127  */
1128 int
1129 late_abort_fixup(arg)
1130 	void *arg;
1131 {
1132 	trapframe_t *frame = arg;
1133 	u_int fault_pc;
1134 	u_int fault_instruction;
1135 	int saved_lr = 0;
1136 
1137 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1138 
1139 		/* Ok an abort in SVC mode */
1140 
1141 		/*
1142 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1143 		 * as the fault happened in svc mode but we need it in the
1144 		 * usr slot so we can treat the registers as an array of ints
1145 		 * during fixing.
1146 		 * NOTE: This PC is in the position but writeback is not
1147 		 * allowed on r15.
1148 		 * Doing it like this is more efficient than trapping this
1149 		 * case in all possible locations in the following fixup code.
1150 		 */
1151 
1152 		saved_lr = frame->tf_usr_lr;
1153 		frame->tf_usr_lr = frame->tf_svc_lr;
1154 
1155 		/*
1156 		 * Note the trapframe does not have the SVC r13 so a fault
1157 		 * from an instruction with writeback to r13 in SVC mode is
1158 		 * not allowed. This should not happen as the kstack is
1159 		 * always valid.
1160 		 */
1161 	}
1162 
1163 	/* Get fault address and status from the CPU */
1164 
1165 	fault_pc = frame->tf_pc;
1166 	fault_instruction = *((volatile unsigned int *)fault_pc);
1167 
1168 	/* Decode the fault instruction and fix the registers as needed */
1169 
1170 	/* Was is a swap instruction ? */
1171 
1172 	if ((fault_instruction & 0x0fb00ff0) == 0x01000090) {
1173 #ifdef DEBUG_FAULT_CORRECTION
1174 		if (pmap_debug_level >= 0)
1175 			disassemble(fault_pc);
1176 #endif	/* DEBUG_FAULT_CORRECTION */
1177 	} else if ((fault_instruction & 0x0c000000) == 0x04000000) {
1178 
1179 		/* Was is a ldr/str instruction */
1180 		/* This is for late abort only */
1181 
1182 		int base;
1183 		int offset;
1184 		int *registers = &frame->tf_r0;
1185 
1186 #ifdef DEBUG_FAULT_CORRECTION
1187 		if (pmap_debug_level >= 0)
1188 			disassemble(fault_pc);
1189 #endif	/* DEBUG_FAULT_CORRECTION */
1190 
1191 		/* This is for late abort only */
1192 
1193 		if ((fault_instruction & (1 << 24)) == 0
1194 		    || (fault_instruction & (1 << 21)) != 0) {
1195 			base = (fault_instruction >> 16) & 0x0f;
1196 			if (base == 13 && (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
1197 				return ABORT_FIXUP_FAILED;
1198 			if (base == 15)
1199 				return ABORT_FIXUP_FAILED;
1200 #ifdef DEBUG_FAULT_CORRECTION
1201 			if (pmap_debug_level >=0)
1202 				printf("late abt fix: r%d=%08x ", base, registers[base]);
1203 #endif	/* DEBUG_FAULT_CORRECTION */
1204 			if ((fault_instruction & (1 << 25)) == 0) {
1205 				/* Immediate offset - easy */
1206 				offset = fault_instruction & 0xfff;
1207 				if ((fault_instruction & (1 << 23)))
1208 					offset = -offset;
1209 				registers[base] += offset;
1210 #ifdef DEBUG_FAULT_CORRECTION
1211 				if (pmap_debug_level >=0)
1212 					printf("imm=%08x ", offset);
1213 #endif	/* DEBUG_FAULT_CORRECTION */
1214 			} else {
1215 				int shift;
1216 
1217 				offset = fault_instruction & 0x0f;
1218 				if (offset == base)
1219 					return ABORT_FIXUP_FAILED;
1220 
1221 /* Register offset - hard we have to cope with shifts ! */
1222 				offset = registers[offset];
1223 
1224 				if ((fault_instruction & (1 << 4)) == 0)
1225 					shift = (fault_instruction >> 7) & 0x1f;
1226 				else {
1227 					if ((fault_instruction & (1 << 7)) != 0)
1228 						return ABORT_FIXUP_FAILED;
1229 					shift = ((fault_instruction >> 8) & 0xf);
1230 					if (base == shift)
1231 						return ABORT_FIXUP_FAILED;
1232 #ifdef DEBUG_FAULT_CORRECTION
1233 					if (pmap_debug_level >=0)
1234 						printf("shift reg=%d ", shift);
1235 #endif	/* DEBUG_FAULT_CORRECTION */
1236 					shift = registers[shift];
1237 				}
1238 #ifdef DEBUG_FAULT_CORRECTION
1239 				if (pmap_debug_level >=0)
1240 					printf("shift=%08x ", shift);
1241 #endif	/* DEBUG_FAULT_CORRECTION */
1242 				switch (((fault_instruction >> 5) & 0x3)) {
1243 				case 0 : /* Logical left */
1244 					offset = (int)(((u_int)offset) << shift);
1245 					break;
1246 				case 1 : /* Logical Right */
1247 					if (shift == 0) shift = 32;
1248 					offset = (int)(((u_int)offset) >> shift);
1249 					break;
1250 				case 2 : /* Arithmetic Right */
1251 					if (shift == 0) shift = 32;
1252 					offset = (int)(((int)offset) >> shift);
1253 					break;
1254 				case 3 : /* Rotate right */
1255 					return ABORT_FIXUP_FAILED;
1256 				}
1257 
1258 #ifdef DEBUG_FAULT_CORRECTION
1259 				if (pmap_debug_level >=0)
1260 					printf("abt: fixed LDR/STR with register offset\n");
1261 #endif	/* DEBUG_FAULT_CORRECTION */
1262 				if ((fault_instruction & (1 << 23)))
1263 					offset = -offset;
1264 #ifdef DEBUG_FAULT_CORRECTION
1265 				if (pmap_debug_level >=0)
1266 					printf("offset=%08x ", offset);
1267 #endif	/* DEBUG_FAULT_CORRECTION */
1268 				registers[base] += offset;
1269 			}
1270 #ifdef DEBUG_FAULT_CORRECTION
1271 			if (pmap_debug_level >=0)
1272 				printf("r%d=%08x\n", base, registers[base]);
1273 #endif	/* DEBUG_FAULT_CORRECTION */
1274 		}
1275 	}
1276 
1277 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1278 
1279 		/* Ok an abort in SVC mode */
1280 
1281 		/*
1282 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1283 		 * as the fault happened in svc mode but we need it in the
1284 		 * usr slot so we can treat the registers as an array of ints
1285 		 * during fixing.
1286 		 * NOTE: This PC is in the position but writeback is not
1287 		 * allowed on r15.
1288 		 * Doing it like this is more efficient than trapping this
1289 		 * case in all possible locations in the prior fixup code.
1290 		 */
1291 
1292 		frame->tf_svc_lr = frame->tf_usr_lr;
1293 		frame->tf_usr_lr = saved_lr;
1294 
1295 		/*
1296 		 * Note the trapframe does not have the SVC r13 so a fault
1297 		 * from an instruction with writeback to r13 in SVC mode is
1298 		 * not allowed. This should not happen as the kstack is
1299 		 * always valid.
1300 		 */
1301 	}
1302 
1303 	/*
1304 	 * Now let the early-abort fixup routine have a go, in case it
1305 	 * was an LDM, STM, LDC or STC that faulted.
1306 	 */
1307 
1308 	return early_abort_fixup(arg);
1309 }
1310 #endif	/* CPU_ARM6(LATE)/7/7TDMI */
1311 
1312 /*
1313  * CPU Setup code
1314  */
1315 
1316 #if defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) || \
1317 	defined(CPU_ARM8) || defined (CPU_ARM9) || defined(CPU_SA110) || \
1318 	defined(CPU_XSCALE)
1319 int cpuctrl;
1320 
1321 #define IGN	0
1322 #define OR	1
1323 #define BIC	2
1324 
1325 struct cpu_option {
1326 	char	*co_name;
1327 	int	co_falseop;
1328 	int	co_trueop;
1329 	int	co_value;
1330 };
1331 
1332 static u_int
1333 parse_cpu_options(args, optlist, cpuctrl)
1334 	char *args;
1335 	struct cpu_option *optlist;
1336 	u_int cpuctrl;
1337 {
1338 	int integer;
1339 
1340 	while (optlist->co_name) {
1341 		if (get_bootconf_option(args, optlist->co_name,
1342 		    BOOTOPT_TYPE_BOOLEAN, &integer)) {
1343 			if (integer) {
1344 				if (optlist->co_trueop == OR)
1345 					cpuctrl |= optlist->co_value;
1346 				else if (optlist->co_trueop == BIC)
1347 					cpuctrl &= ~optlist->co_value;
1348 			} else {
1349 				if (optlist->co_falseop == OR)
1350 					cpuctrl |= optlist->co_value;
1351 				else if (optlist->co_falseop == BIC)
1352 					cpuctrl &= ~optlist->co_value;
1353 			}
1354 		}
1355 		++optlist;
1356 	}
1357 	return(cpuctrl);
1358 }
1359 #endif /* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 || CPU_SA110 */
1360 
1361 #if defined (CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) \
1362 	|| defined(CPU_ARM8)
1363 struct cpu_option arm678_options[] = {
1364 #ifdef COMPAT_12
1365 	{ "nocache",		IGN, BIC, CPU_CONTROL_IDC_ENABLE },
1366 	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
1367 #endif	/* COMPAT_12 */
1368 	{ "cpu.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
1369 	{ "cpu.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
1370 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1371 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1372 	{ NULL,			IGN, IGN, 0 }
1373 };
1374 
1375 #endif	/* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 */
1376 
1377 #ifdef CPU_ARM6
1378 struct cpu_option arm6_options[] = {
1379 	{ "arm6.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
1380 	{ "arm6.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
1381 	{ "arm6.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1382 	{ "arm6.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1383 	{ NULL,			IGN, IGN, 0 }
1384 };
1385 
1386 void
1387 arm6_setup(args)
1388 	char *args;
1389 {
1390 	int cpuctrlmask;
1391 
1392 	/* Set up default control registers bits */
1393 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1394 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1395 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
1396 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1397 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1398 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
1399 		 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BEND_ENABLE
1400 		 | CPU_CONTROL_AFLT_ENABLE;
1401 
1402 #ifdef ARM6_LATE_ABORT
1403 	cpuctrl |= CPU_CONTROL_LABT_ENABLE;
1404 #endif	/* ARM6_LATE_ABORT */
1405 
1406 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
1407 	cpuctrl = parse_cpu_options(args, arm6_options, cpuctrl);
1408 
1409 	/* Clear out the cache */
1410 	cpu_cache_purgeID();
1411 
1412 	/* Set the control register */
1413 	cpu_control(0xffffffff, cpuctrl);
1414 }
1415 #endif	/* CPU_ARM6 */
1416 
1417 #ifdef CPU_ARM7
1418 struct cpu_option arm7_options[] = {
1419 	{ "arm7.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
1420 	{ "arm7.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
1421 	{ "arm7.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1422 	{ "arm7.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1423 #ifdef COMPAT_12
1424 	{ "fpaclk2",		BIC, OR,  CPU_CONTROL_CPCLK },
1425 #endif	/* COMPAT_12 */
1426 	{ "arm700.fpaclk",	BIC, OR,  CPU_CONTROL_CPCLK },
1427 	{ NULL,			IGN, IGN, 0 }
1428 };
1429 
1430 void
1431 arm7_setup(args)
1432 	char *args;
1433 {
1434 	int cpuctrlmask;
1435 
1436 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1437 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1438 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
1439 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1440 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1441 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
1442 		 | CPU_CONTROL_CPCLK | CPU_CONTROL_LABT_ENABLE
1443 		 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BEND_ENABLE
1444 		 | CPU_CONTROL_AFLT_ENABLE;
1445 
1446 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
1447 	cpuctrl = parse_cpu_options(args, arm7_options, cpuctrl);
1448 
1449 	/* Clear out the cache */
1450 	cpu_cache_purgeID();
1451 
1452 	/* Set the control register */
1453 	cpu_control(0xffffffff, cpuctrl);
1454 }
1455 #endif	/* CPU_ARM7 */
1456 
1457 #ifdef CPU_ARM7TDMI
1458 struct cpu_option arm7tdmi_options[] = {
1459 	{ "arm7.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
1460 	{ "arm7.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
1461 	{ "arm7.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1462 	{ "arm7.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1463 #ifdef COMPAT_12
1464 	{ "fpaclk2",		BIC, OR,  CPU_CONTROL_CPCLK },
1465 #endif	/* COMPAT_12 */
1466 	{ "arm700.fpaclk",	BIC, OR,  CPU_CONTROL_CPCLK },
1467 	{ NULL,			IGN, IGN, 0 }
1468 };
1469 
1470 void
1471 arm7tdmi_setup(args)
1472 	char *args;
1473 {
1474 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1475 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1476 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
1477 
1478 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
1479 	cpuctrl = parse_cpu_options(args, arm7tdmi_options, cpuctrl);
1480 
1481 	/* Clear out the cache */
1482 	cpu_cache_purgeID();
1483 
1484 	/* Set the control register */
1485 	cpu_control(0xffffffff, cpuctrl);
1486 }
1487 #endif	/* CPU_ARM7TDMI */
1488 
1489 #ifdef CPU_ARM8
1490 struct cpu_option arm8_options[] = {
1491 	{ "arm8.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
1492 	{ "arm8.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
1493 	{ "arm8.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1494 	{ "arm8.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1495 #ifdef COMPAT_12
1496 	{ "branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1497 #endif	/* COMPAT_12 */
1498 	{ "cpu.branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1499 	{ "arm8.branchpredict",	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1500 	{ NULL,			IGN, IGN, 0 }
1501 };
1502 
1503 void
1504 arm8_setup(args)
1505 	char *args;
1506 {
1507 	int integer;
1508 	int cpuctrlmask;
1509 	int clocktest;
1510 	int setclock = 0;
1511 
1512 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1513 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1514 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
1515 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1516 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1517 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
1518 		 | CPU_CONTROL_BPRD_ENABLE | CPU_CONTROL_ROM_ENABLE
1519 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE;
1520 
1521 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
1522 	cpuctrl = parse_cpu_options(args, arm8_options, cpuctrl);
1523 
1524 	/* Get clock configuration */
1525 	clocktest = arm8_clock_config(0, 0) & 0x0f;
1526 
1527 	/* Special ARM8 clock and test configuration */
1528 	if (get_bootconf_option(args, "arm8.clock.reset", BOOTOPT_TYPE_BOOLEAN, &integer)) {
1529 		clocktest = 0;
1530 		setclock = 1;
1531 	}
1532 	if (get_bootconf_option(args, "arm8.clock.dynamic", BOOTOPT_TYPE_BOOLEAN, &integer)) {
1533 		if (integer)
1534 			clocktest |= 0x01;
1535 		else
1536 			clocktest &= ~(0x01);
1537 		setclock = 1;
1538 	}
1539 	if (get_bootconf_option(args, "arm8.clock.sync", BOOTOPT_TYPE_BOOLEAN, &integer)) {
1540 		if (integer)
1541 			clocktest |= 0x02;
1542 		else
1543 			clocktest &= ~(0x02);
1544 		setclock = 1;
1545 	}
1546 	if (get_bootconf_option(args, "arm8.clock.fast", BOOTOPT_TYPE_BININT, &integer)) {
1547 		clocktest = (clocktest & ~0xc0) | (integer & 3) << 2;
1548 		setclock = 1;
1549 	}
1550 	if (get_bootconf_option(args, "arm8.test", BOOTOPT_TYPE_BININT, &integer)) {
1551 		clocktest |= (integer & 7) << 5;
1552 		setclock = 1;
1553 	}
1554 
1555 	/* Clear out the cache */
1556 	cpu_cache_purgeID();
1557 
1558 	/* Set the control register */
1559 	cpu_control(0xffffffff, cpuctrl);
1560 
1561 	/* Set the clock/test register */
1562 	if (setclock)
1563 		arm8_clock_config(0x7f, clocktest);
1564 }
1565 #endif	/* CPU_ARM8 */
1566 
1567 #ifdef CPU_ARM9
1568 struct cpu_option arm9_options[] = {
1569 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1570 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1571 	{ "arm9.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1572 	{ "arm9.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
1573 	{ "arm9.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
1574 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1575 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1576 	{ "arm9.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1577 	{ NULL,			IGN, IGN, 0 }
1578 };
1579 
1580 void
1581 arm9_setup(args)
1582 	char *args;
1583 {
1584 	int cpuctrlmask;
1585 
1586 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1587 	    | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1588 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1589 	    | CPU_CONTROL_WBUF_ENABLE;
1590 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1591 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1592 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1593 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1594 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1595 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1596 		 | CPU_CONTROL_CPCLK;
1597 
1598 	cpuctrl = parse_cpu_options(args, arm9_options, cpuctrl);
1599 
1600 	/* Clear out the cache */
1601 	cpu_cache_purgeID();
1602 
1603 	/* Set the control register */
1604 	cpu_control(0xffffffff, cpuctrl);
1605 
1606 }
1607 #endif	/* CPU_ARM9 */
1608 
1609 #ifdef CPU_SA110
1610 struct cpu_option sa110_options[] = {
1611 #ifdef COMPAT_12
1612 	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1613 	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
1614 #endif	/* COMPAT_12 */
1615 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1616 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1617 	{ "sa110.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1618 	{ "sa110.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
1619 	{ "sa110.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
1620 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1621 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1622 	{ "sa110.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1623 	{ NULL,			IGN, IGN, 0 }
1624 };
1625 
1626 void
1627 sa110_setup(args)
1628 	char *args;
1629 {
1630 	int cpuctrlmask;
1631 
1632 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1633 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1634 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1635 		 | CPU_CONTROL_WBUF_ENABLE;
1636 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1637 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1638 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1639 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1640 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1641 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1642 		 | CPU_CONTROL_CPCLK;
1643 
1644 	cpuctrl = parse_cpu_options(args, sa110_options, cpuctrl);
1645 
1646 	/* Clear out the cache */
1647 	cpu_cache_purgeID();
1648 
1649 	/* Set the control register */
1650 /*	cpu_control(cpuctrlmask, cpuctrl);*/
1651 	cpu_control(0xffffffff, cpuctrl);
1652 
1653 	/*
1654 	 * enable clockswitching, note that this doesn't read or write to r0,
1655 	 * r0 is just to make it valid asm
1656 	 */
1657 	__asm ("mcr 15, 0, r0, c15, c1, 2");
1658 }
1659 #endif	/* CPU_SA110 */
1660 
1661 #ifdef CPU_XSCALE
1662 struct cpu_option xscale_options[] = {
1663 #ifdef COMPAT_12
1664 	{ "branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1665 	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1666 #endif	/* COMPAT_12 */
1667 	{ "cpu.branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1668 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1669 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1670 	{ "xscale.branchpredict", BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1671 	{ "xscale.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1672 	{ "xscale.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
1673 	{ "xscale.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
1674 	{ NULL,			IGN, IGN, 0 }
1675 };
1676 
1677 void
1678 xscale_setup(args)
1679 	char *args;
1680 {
1681 	int cpuctrlmask;
1682 
1683 	/*
1684 	 * The XScale Write Buffer is always enabled.  Our option
1685 	 * is to enable/disable coalescing.  Note that bits 6:3
1686 	 * must always be enabled.
1687 	 */
1688 
1689 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1690 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1691 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1692 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE;
1693 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1694 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1695 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1696 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1697 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1698 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1699 		 | CPU_CONTROL_CPCLK;
1700 
1701 	cpuctrl = parse_cpu_options(args, xscale_options, cpuctrl);
1702 
1703 	/* Clear out the cache */
1704 	cpu_cache_purgeID();
1705 
1706 	/*
1707 	 * Set the control register.  Note that bits 6:3 must always
1708 	 * be set to 1.
1709 	 */
1710 /*	cpu_control(cpuctrlmask, cpuctrl);*/
1711 	cpu_control(0xffffffff, cpuctrl);
1712 
1713 #if 0
1714 	/*
1715 	 * XXX FIXME
1716 	 * Disable write buffer coalescing, PT ECC, and set
1717 	 * the mini-cache to write-back/read-allocate.
1718 	 */
1719 	__asm ("mcr p15, 0, %0, c1, c0, 1" :: "r" (0));
1720 #endif
1721 }
1722 #endif	/* CPU_XSCALE */
1723