xref: /netbsd-src/sys/arch/arm/arm/cpufunc.c (revision 06be8101a16cc95f40783b3cb7afd12112103a9a)
1 /*	$NetBSD: cpufunc.c,v 1.15 2001/11/14 01:00:05 thorpej Exp $	*/
2 
3 /*
4  * arm7tdmi support code Copyright (c) 2001 John Fremlin
5  * arm8 support code Copyright (c) 1997 ARM Limited
6  * arm8 support code Copyright (c) 1997 Causality Limited
7  * arm9 support code Copyright (C) 2001 ARM Ltd
8  * Copyright (c) 1997 Mark Brinicombe.
9  * Copyright (c) 1997 Causality Limited
10  * All rights reserved.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. All advertising materials mentioning features or use of this software
21  *    must display the following acknowledgement:
22  *	This product includes software developed by Causality Limited.
23  * 4. The name of Causality Limited may not be used to endorse or promote
24  *    products derived from this software without specific prior written
25  *    permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
28  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
29  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
30  * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
31  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
32  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
33  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37  * SUCH DAMAGE.
38  *
39  * RiscBSD kernel project
40  *
41  * cpufuncs.c
42  *
43  * C functions for supporting CPU / MMU / TLB specific operations.
44  *
45  * Created      : 30/01/97
46  */
47 
48 #include "opt_compat_netbsd.h"
49 #include "opt_cputypes.h"
50 #include "opt_pmap_debug.h"
51 
52 #include <sys/types.h>
53 #include <sys/param.h>
54 #include <sys/systm.h>
55 #include <machine/cpu.h>
56 #include <machine/cpufunc.h>
57 #include <machine/bootconfig.h>
58 #include <arch/arm/arm/disassem.h>
59 
60 #ifdef CPU_ARM3
61 struct cpu_functions arm3_cpufuncs = {
62 	/* CPU functions */
63 
64 	cpufunc_id,			/* id			 */
65 
66 	/* MMU functions */
67 
68 	arm3_control,			/* control		*/
69 	NULL,				/* domain		*/
70 	NULL,				/* setttb		*/
71 	NULL,				/* faultstatus		*/
72 	NULL,				/* faultaddress		*/
73 
74 	/* TLB functions */
75 
76 	cpufunc_nullop,			/* tlb_flushID		*/
77 	(void *)cpufunc_nullop,		/* tlb_flushID_SE	*/
78 	cpufunc_nullop,			/* tlb_flushI		*/
79 	(void *)cpufunc_nullop,		/* tlb_flushI_SE	*/
80 	cpufunc_nullop,			/* tlb_flushD		*/
81 	(void *)cpufunc_nullop,		/* tlb_flushD_SE	*/
82 
83 	/* Cache functions */
84 
85 	arm3_cache_flush,		/* cache_flushID	*/
86 	(void *)arm3_cache_flush,	/* cache_flushID_SE	*/
87 	arm3_cache_flush,		/* cache_flushI		*/
88 	(void *)arm3_cache_flush,	/* cache_flushI_SE	*/
89 	arm3_cache_flush,		/* cache_flushD		*/
90 	(void *)arm3_cache_flush,	/* cache_flushD_SE	*/
91 
92 	cpufunc_nullop,			/* cache_cleanID	s*/
93 	(void *)cpufunc_nullop,		/* cache_cleanID_E	s*/
94 	cpufunc_nullop,			/* cache_cleanD		s*/
95 	(void *)cpufunc_nullop,		/* cache_cleanD_E	*/
96 
97 	arm3_cache_flush,		/* cache_purgeID	s*/
98 	(void *)arm3_cache_flush,	/* cache_purgeID_E	s*/
99 	arm3_cache_flush,		/* cache_purgeD		s*/
100 	(void *)arm3_cache_flush,	/* cache_purgeD_E	s*/
101 
102 	/* Other functions */
103 
104 	cpufunc_nullop,			/* flush_prefetchbuf	*/
105 	cpufunc_nullop,			/* drain_writebuf	*/
106 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
107 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
108 
109 	(void *)cpufunc_nullop,		/* sleep		*/
110 
111 	/* Soft functions */
112 
113 	cpufunc_nullop,			/* cache_syncI		*/
114 	(void *)cpufunc_nullop,		/* cache_cleanID_rng	*/
115 	(void *)cpufunc_nullop,		/* cache_cleanD_rng	*/
116 	(void *)arm3_cache_flush,	/* cache_purgeID_rng	*/
117 	(void *)arm3_cache_flush,	/* cache_purgeD_rng	*/
118 	(void *)cpufunc_nullop,		/* cache_syncI_rng	*/
119 
120 	early_abort_fixup,		/* dataabt_fixup	*/
121 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
122 
123 	NULL,				/* context_switch	*/
124 
125 	(void *)cpufunc_nullop		/* cpu setup		*/
126 
127 };
128 #endif	/* CPU_ARM3 */
129 
130 #ifdef CPU_ARM6
131 struct cpu_functions arm6_cpufuncs = {
132 	/* CPU functions */
133 
134 	cpufunc_id,			/* id			 */
135 
136 	/* MMU functions */
137 
138 	cpufunc_control,		/* control		*/
139 	cpufunc_domains,		/* domain		*/
140 	arm67_setttb,			/* setttb		*/
141 	cpufunc_faultstatus,		/* faultstatus		*/
142 	cpufunc_faultaddress,		/* faultaddress		*/
143 
144 	/* TLB functions */
145 
146 	arm67_tlb_flush,		/* tlb_flushID		*/
147 	arm67_tlb_purge,		/* tlb_flushID_SE	*/
148 	arm67_tlb_flush,		/* tlb_flushI		*/
149 	arm67_tlb_purge,		/* tlb_flushI_SE	*/
150 	arm67_tlb_flush,		/* tlb_flushD		*/
151 	arm67_tlb_purge,		/* tlb_flushD_SE	*/
152 
153 	/* Cache functions */
154 
155 	arm67_cache_flush,		/* cache_flushID	*/
156 	(void *)arm67_cache_flush,	/* cache_flushID_SE	*/
157 	arm67_cache_flush,		/* cache_flushI		*/
158 	(void *)arm67_cache_flush,	/* cache_flushI_SE	*/
159 	arm67_cache_flush,		/* cache_flushD		*/
160 	(void *)arm67_cache_flush,	/* cache_flushD_SE	*/
161 
162 	cpufunc_nullop,			/* cache_cleanID	s*/
163 	(void *)cpufunc_nullop,		/* cache_cleanID_E	s*/
164 	cpufunc_nullop,			/* cache_cleanD		s*/
165 	(void *)cpufunc_nullop,		/* cache_cleanD_E	*/
166 
167 	arm67_cache_flush,		/* cache_purgeID	s*/
168 	(void *)arm67_cache_flush,	/* cache_purgeID_E	s*/
169 	arm67_cache_flush,		/* cache_purgeD		s*/
170 	(void *)arm67_cache_flush,	/* cache_purgeD_E	s*/
171 
172 	/* Other functions */
173 
174 	cpufunc_nullop,			/* flush_prefetchbuf	*/
175 	cpufunc_nullop,			/* drain_writebuf	*/
176 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
177 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
178 
179 	(void *)cpufunc_nullop,		/* sleep		*/
180 
181 	/* Soft functions */
182 
183 	cpufunc_nullop,			/* cache_syncI		*/
184 	(void *)cpufunc_nullop,		/* cache_cleanID_rng	*/
185 	(void *)cpufunc_nullop,		/* cache_cleanD_rng	*/
186 	(void *)arm67_cache_flush,	/* cache_purgeID_rng	*/
187 	(void *)arm67_cache_flush,	/* cache_purgeD_rng	*/
188 	(void *)cpufunc_nullop,		/* cache_syncI_rng	*/
189 
190 #ifdef ARM6_LATE_ABORT
191 	late_abort_fixup,		/* dataabt_fixup	*/
192 #else
193 	early_abort_fixup,		/* dataabt_fixup	*/
194 #endif
195 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
196 
197 	arm67_context_switch,		/* context_switch	*/
198 
199 	arm6_setup			/* cpu setup		*/
200 
201 };
202 #endif	/* CPU_ARM6 */
203 
204 #ifdef CPU_ARM7
205 struct cpu_functions arm7_cpufuncs = {
206 	/* CPU functions */
207 
208 	cpufunc_id,			/* id			 */
209 
210 	/* MMU functions */
211 
212 	cpufunc_control,		/* control		*/
213 	cpufunc_domains,		/* domain		*/
214 	arm67_setttb,			/* setttb		*/
215 	cpufunc_faultstatus,		/* faultstatus		*/
216 	cpufunc_faultaddress,		/* faultaddress		*/
217 
218 	/* TLB functions */
219 
220 	arm67_tlb_flush,		/* tlb_flushID		*/
221 	arm67_tlb_purge,		/* tlb_flushID_SE	*/
222 	arm67_tlb_flush,		/* tlb_flushI		*/
223 	arm67_tlb_purge,		/* tlb_flushI_SE	*/
224 	arm67_tlb_flush,		/* tlb_flushD		*/
225 	arm67_tlb_purge,		/* tlb_flushD_SE	*/
226 
227 	/* Cache functions */
228 
229 	arm67_cache_flush,		/* cache_flushID	*/
230 	(void *)arm67_cache_flush,	/* cache_flushID_SE	*/
231 	arm67_cache_flush,		/* cache_flushI		*/
232 	(void *)arm67_cache_flush,	/* cache_flushI_SE	*/
233 	arm67_cache_flush,		/* cache_flushD		*/
234 	(void *)arm67_cache_flush,	/* cache_flushD_SE	*/
235 
236 	cpufunc_nullop,			/* cache_cleanID	s*/
237 	(void *)cpufunc_nullop,		/* cache_cleanID_E	s*/
238 	cpufunc_nullop,			/* cache_cleanD		s*/
239 	(void *)cpufunc_nullop,		/* cache_cleanD_E	*/
240 
241 	arm67_cache_flush,		/* cache_purgeID	s*/
242 	(void *)arm67_cache_flush,	/* cache_purgeID_E	s*/
243 	arm67_cache_flush,		/* cache_purgeD		s*/
244 	(void *)arm67_cache_flush,	/* cache_purgeD_E	s*/
245 
246 	/* Other functions */
247 
248 	cpufunc_nullop,			/* flush_prefetchbuf	*/
249 	cpufunc_nullop,			/* drain_writebuf	*/
250 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
251 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
252 
253 	(void *)cpufunc_nullop,		/* sleep		*/
254 
255 	/* Soft functions */
256 
257 	cpufunc_nullop,			/* cache_syncI		*/
258 	(void *)cpufunc_nullop,		/* cache_cleanID_rng	*/
259 	(void *)cpufunc_nullop,		/* cache_cleanD_rng	*/
260 	(void *)arm67_cache_flush,	/* cache_purgeID_rng	*/
261 	(void *)arm67_cache_flush,	/* cache_purgeD_rng	*/
262 	(void *)cpufunc_nullop,		/* cache_syncI_rng	*/
263 
264 	late_abort_fixup,		/* dataabt_fixup	*/
265 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
266 
267 	arm67_context_switch,		/* context_switch	*/
268 
269 	arm7_setup			/* cpu setup		*/
270 
271 };
272 #endif	/* CPU_ARM7 */
273 
274 #ifdef CPU_ARM7TDMI
275 struct cpu_functions arm7tdmi_cpufuncs = {
276 	/* CPU functions */
277 
278 	cpufunc_id,			/* id			 */
279 
280 	/* MMU functions */
281 
282 	cpufunc_control,		/* control		*/
283 	cpufunc_domains,		/* domain		*/
284 	arm7tdmi_setttb,		/* setttb		*/
285 	cpufunc_faultstatus,		/* faultstatus		*/
286 	cpufunc_faultaddress,		/* faultaddress		*/
287 
288 	/* TLB functions */
289 
290 	arm7tdmi_tlb_flushID,		/* tlb_flushID		*/
291 	arm7tdmi_tlb_flushID_SE,	/* tlb_flushID_SE	*/
292 	arm7tdmi_tlb_flushID,		/* tlb_flushI		*/
293 	arm7tdmi_tlb_flushID_SE,	/* tlb_flushI_SE	*/
294 	arm7tdmi_tlb_flushID,		/* tlb_flushD		*/
295 	arm7tdmi_tlb_flushID_SE,	/* tlb_flushD_SE	*/
296 
297 	/* Cache functions */
298 
299 	arm7tdmi_cache_flushID,		/* cache_flushID	*/
300 	(void *)arm7tdmi_cache_flushID,	/* cache_flushID_SE	*/
301 	arm7tdmi_cache_flushID,		/* cache_flushI		*/
302 	(void *)arm7tdmi_cache_flushID,	/* cache_flushI_SE	*/
303 	arm7tdmi_cache_flushID,		/* cache_flushD		*/
304 	(void *)arm7tdmi_cache_flushID,	/* cache_flushD_SE	*/
305 
306 	cpufunc_nullop,			/* cache_cleanID	s*/
307 	(void *)cpufunc_nullop,		/* cache_cleanID_E	s*/
308 	cpufunc_nullop,			/* cache_cleanD		s*/
309 	(void *)cpufunc_nullop,		/* cache_cleanD_E	*/
310 
311 	arm7tdmi_cache_flushID,		/* cache_purgeID	s*/
312 	(void *)arm7tdmi_cache_flushID,	/* cache_purgeID_E	s*/
313 	arm7tdmi_cache_flushID,		/* cache_purgeD		s*/
314 	(void *)arm7tdmi_cache_flushID,	/* cache_purgeD_E	s*/
315 
316 	/* Other functions */
317 
318 	cpufunc_nullop,			/* flush_prefetchbuf	*/
319 	cpufunc_nullop,			/* drain_writebuf	*/
320 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
321 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
322 
323 	(void *)cpufunc_nullop,		/* sleep		*/
324 
325 	/* Soft functions */
326 
327 	cpufunc_nullop,			/* cache_syncI		*/
328 	(void *)cpufunc_nullop,		/* cache_cleanID_rng	*/
329 	(void *)cpufunc_nullop,		/* cache_cleanD_rng	*/
330 	(void *)arm7tdmi_cache_flushID,	/* cache_purgeID_rng	*/
331 	(void *)arm7tdmi_cache_flushID,	/* cache_purgeD_rng	*/
332 	(void *)cpufunc_nullop,		/* cache_syncI_rng	*/
333 
334 	late_abort_fixup,		/* dataabt_fixup	*/
335 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
336 
337 	arm7tdmi_context_switch,	/* context_switch	*/
338 
339 	arm7tdmi_setup			/* cpu setup		*/
340 
341 };
342 #endif	/* CPU_ARM7TDMI */
343 
344 #ifdef CPU_ARM8
345 struct cpu_functions arm8_cpufuncs = {
346 	/* CPU functions */
347 
348 	cpufunc_id,			/* id			 */
349 
350 	/* MMU functions */
351 
352 	cpufunc_control,		/* control		*/
353 	cpufunc_domains,		/* domain		*/
354 	arm8_setttb,			/* setttb		*/
355 	cpufunc_faultstatus,		/* faultstatus		*/
356 	cpufunc_faultaddress,		/* faultaddress		*/
357 
358 	/* TLB functions */
359 
360 	arm8_tlb_flushID,		/* tlb_flushID		*/
361 	arm8_tlb_flushID_SE,		/* tlb_flushID_SE	*/
362 	arm8_tlb_flushID,		/* tlb_flushI		*/
363 	arm8_tlb_flushID_SE,		/* tlb_flushI_SE	*/
364 	arm8_tlb_flushID,		/* tlb_flushD		*/
365 	arm8_tlb_flushID_SE,		/* tlb_flushD_SE	*/
366 
367 	/* Cache functions */
368 
369 	arm8_cache_flushID,		/* cache_flushID	*/
370 	arm8_cache_flushID_E,		/* cache_flushID_SE	*/
371 	arm8_cache_flushID,		/* cache_flushI		*/
372 	arm8_cache_flushID_E,		/* cache_flushI_SE	*/
373 	arm8_cache_flushID,		/* cache_flushD		*/
374 	arm8_cache_flushID_E,		/* cache_flushD_SE	*/
375 
376 	arm8_cache_cleanID,		/* cache_cleanID	s*/
377 	arm8_cache_cleanID_E,		/* cache_cleanID_E	s*/
378 	arm8_cache_cleanID,		/* cache_cleanD		s*/
379 	arm8_cache_cleanID_E,		/* cache_cleanD_E	*/
380 
381 	arm8_cache_purgeID,		/* cache_purgeID	s*/
382 	arm8_cache_purgeID_E,		/* cache_purgeID_E	s*/
383 	arm8_cache_purgeID,		/* cache_purgeD		s*/
384 	arm8_cache_purgeID_E,		/* cache_purgeD_E	s*/
385 
386 	/* Other functions */
387 
388 	cpufunc_nullop,			/* flush_prefetchbuf	*/
389 	cpufunc_nullop,			/* drain_writebuf	*/
390 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
391 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
392 
393 	(void *)cpufunc_nullop,		/* sleep		*/
394 
395 	/* Soft functions */
396 
397 	(void *)cpufunc_nullop,		/* cache_syncI		*/
398 	(void *)arm8_cache_cleanID,	/* cache_cleanID_rng	*/
399 	(void *)arm8_cache_cleanID,	/* cache_cleanD_rng	*/
400 	(void *)arm8_cache_purgeID,	/* cache_purgeID_rng	*/
401 	(void *)arm8_cache_purgeID,	/* cache_purgeD_rng	*/
402 	(void *)cpufunc_nullop,		/* cache_syncI_rng	*/
403 
404 	cpufunc_null_fixup,		/* dataabt_fixup	*/
405 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
406 
407 	arm8_context_switch,		/* context_switch	*/
408 
409 	arm8_setup			/* cpu setup		*/
410 };
411 #endif	/* CPU_ARM8 */
412 
413 #ifdef CPU_ARM9
414 struct cpu_functions arm9_cpufuncs = {
415 	/* CPU functions */
416 
417 	cpufunc_id,			/* id			*/
418 
419 	/* MMU functions */
420 
421 	cpufunc_control,		/* control		*/
422 	cpufunc_domains,		/* Domain		*/
423 	arm9_setttb,			/* Setttb		*/
424 	cpufunc_faultstatus,		/* Faultstatus		*/
425 	cpufunc_faultaddress,		/* Faultaddress		*/
426 
427 	/* TLB functions */
428 
429 	armv4_tlb_flushID,		/* tlb_flushID		*/
430 	arm9_tlb_flushID_SE,		/* tlb_flushID_SE	*/
431 	armv4_tlb_flushI,		/* tlb_flushI		*/
432 	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
433 	armv4_tlb_flushD,		/* tlb_flushD		*/
434 	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
435 
436 	/* Cache functions */
437 
438 	arm9_cache_flushID,		/* cache_flushID	*/
439 	arm9_cache_flushID_SE,		/* cache_flushID_SE	*/
440 	arm9_cache_flushI,		/* cache_flushI		*/
441 	arm9_cache_flushI_SE,		/* cache_flushI_SE	*/
442 	arm9_cache_flushD,		/* cache_flushD		*/
443 	arm9_cache_flushD_SE,		/* cache_flushD_SE	*/
444 
445 	/* ... lets use the cache in write-through mode.  */
446 	arm9_cache_cleanID,		/* cache_cleanID	*/
447 	(void *)arm9_cache_cleanID,	/* cache_cleanID_SE	*/
448 	arm9_cache_cleanID,		/* cache_cleanD		*/
449 	(void *)arm9_cache_cleanID,	/* cache_cleanD_SE	*/
450 
451 	arm9_cache_flushID,		/* cache_purgeID	*/
452 	arm9_cache_flushID_SE,		/* cache_purgeID_SE	*/
453 	arm9_cache_flushD,		/* cache_purgeD		*/
454 	arm9_cache_flushD_SE,		/* cache_purgeD_SE	*/
455 
456 	/* Other functions */
457 
458 	cpufunc_nullop,			/* flush_prefetchbuf	*/
459 	armv4_drain_writebuf,		/* drain_writebuf	*/
460 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
461 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
462 
463 	(void *)cpufunc_nullop,		/* sleep		*/
464 
465 	/* Soft functions */
466 	arm9_cache_syncI,		/* cache_syncI		*/
467 	(void *)arm9_cache_cleanID,	/* cache_cleanID_rng	*/
468 	(void *)arm9_cache_cleanID,	/* cache_cleanD_rng	*/
469 	arm9_cache_flushID_rng,		/* cache_purgeID_rng	*/
470 	arm9_cache_flushD_rng,		/* cache_purgeD_rng	*/
471 	arm9_cache_syncI_rng,		/* cache_syncI_rng	*/
472 
473 	cpufunc_null_fixup,		/* dataabt_fixup	*/
474 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
475 
476 	arm9_context_switch,		/* context_switch	*/
477 
478 	arm9_setup			/* cpu setup		*/
479 
480 };
481 #endif /* CPU_ARM9 */
482 
483 #ifdef CPU_SA110
484 struct cpu_functions sa110_cpufuncs = {
485 	/* CPU functions */
486 
487 	cpufunc_id,			/* id			 */
488 
489 	/* MMU functions */
490 
491 	cpufunc_control,		/* control		*/
492 	cpufunc_domains,		/* domain		*/
493 	sa110_setttb,			/* setttb		*/
494 	cpufunc_faultstatus,		/* faultstatus		*/
495 	cpufunc_faultaddress,		/* faultaddress		*/
496 
497 	/* TLB functions */
498 
499 	armv4_tlb_flushID,		/* tlb_flushID		*/
500 	sa110_tlb_flushID_SE,		/* tlb_flushID_SE	*/
501 	armv4_tlb_flushI,		/* tlb_flushI		*/
502 	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
503 	armv4_tlb_flushD,		/* tlb_flushD		*/
504 	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
505 
506 	/* Cache functions */
507 
508 	sa110_cache_flushID,		/* cache_flushID	*/
509 	(void *)sa110_cache_flushID,	/* cache_flushID_SE	*/
510 	sa110_cache_flushI,		/* cache_flushI		*/
511 	(void *)sa110_cache_flushI,	/* cache_flushI_SE	*/
512 	sa110_cache_flushD,		/* cache_flushD		*/
513 	sa110_cache_flushD_SE,		/* cache_flushD_SE	*/
514 
515 	sa110_cache_cleanID,		/* cache_cleanID	s*/
516 	sa110_cache_cleanD_E,		/* cache_cleanID_E	s*/
517 	sa110_cache_cleanD,		/* cache_cleanD		s*/
518 	sa110_cache_cleanD_E,		/* cache_cleanD_E	*/
519 
520 	sa110_cache_purgeID,		/* cache_purgeID	s*/
521 	sa110_cache_purgeID_E,		/* cache_purgeID_E	s*/
522 	sa110_cache_purgeD,		/* cache_purgeD		s*/
523 	sa110_cache_purgeD_E,		/* cache_purgeD_E	s*/
524 
525 	/* Other functions */
526 
527 	cpufunc_nullop,			/* flush_prefetchbuf	*/
528 	armv4_drain_writebuf,		/* drain_writebuf	*/
529 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
530 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
531 
532 	(void *)cpufunc_nullop,		/* sleep		*/
533 
534 	/* Soft functions */
535 
536 	sa110_cache_syncI,		/* cache_syncI		*/
537 	sa110_cache_cleanID_rng,	/* cache_cleanID_rng	*/
538 	sa110_cache_cleanD_rng,		/* cache_cleanD_rng	*/
539 	sa110_cache_purgeID_rng,	/* cache_purgeID_rng	*/
540 	sa110_cache_purgeD_rng,		/* cache_purgeD_rng	*/
541 	sa110_cache_syncI_rng,		/* cache_syncI_rng	*/
542 
543 	cpufunc_null_fixup,		/* dataabt_fixup	*/
544 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
545 
546 	sa110_context_switch,		/* context_switch	*/
547 
548 	sa110_setup			/* cpu setup		*/
549 };
550 #endif	/* CPU_SA110 */
551 
552 #ifdef CPU_XSCALE
553 struct cpu_functions xscale_cpufuncs = {
554 	/* CPU functions */
555 
556 	cpufunc_id,			/* id			 */
557 
558 	/* MMU functions */
559 
560 	xscale_control,			/* control		*/
561 	cpufunc_domains,		/* domain		*/
562 	xscale_setttb,			/* setttb		*/
563 	cpufunc_faultstatus,		/* faultstatus		*/
564 	cpufunc_faultaddress,		/* faultaddress		*/
565 
566 	/* TLB functions */
567 
568 	armv4_tlb_flushID,		/* tlb_flushID		*/
569 	xscale_tlb_flushID_SE,		/* tlb_flushID_SE	*/
570 	armv4_tlb_flushI,		/* tlb_flushI		*/
571 	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
572 	armv4_tlb_flushD,		/* tlb_flushD		*/
573 	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
574 
575 	/* Cache functions */
576 
577 	xscale_cache_flushID,		/* cache_flushID	*/
578 	(void *)xscale_cache_flushID,	/* cache_flushID_SE	*/
579 	xscale_cache_flushI,		/* cache_flushI		*/
580 	(void *)xscale_cache_flushI,	/* cache_flushI_SE	*/
581 	xscale_cache_flushD,		/* cache_flushD		*/
582 	xscale_cache_flushD_SE,		/* cache_flushD_SE	*/
583 
584 	xscale_cache_cleanID,		/* cache_cleanID	s*/
585 	xscale_cache_cleanD_E,		/* cache_cleanID_E	s*/
586 	xscale_cache_cleanD,		/* cache_cleanD		s*/
587 	xscale_cache_cleanD_E,		/* cache_cleanD_E	*/
588 
589 	xscale_cache_purgeID,		/* cache_purgeID	s*/
590 	xscale_cache_purgeID_E,		/* cache_purgeID_E	s*/
591 	xscale_cache_purgeD,		/* cache_purgeD		s*/
592 	xscale_cache_purgeD_E,		/* cache_purgeD_E	s*/
593 
594 	/* Other functions */
595 
596 	cpufunc_nullop,			/* flush_prefetchbuf	*/
597 	armv4_drain_writebuf,		/* drain_writebuf	*/
598 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
599 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
600 
601 	(void *)cpufunc_nullop,		/* sleep		*/
602 
603 	/* Soft functions */
604 
605 	xscale_cache_syncI,		/* cache_syncI		*/
606 	xscale_cache_cleanID_rng,	/* cache_cleanID_rng	*/
607 	xscale_cache_cleanD_rng,	/* cache_cleanD_rng	*/
608 	xscale_cache_purgeID_rng,	/* cache_purgeID_rng	*/
609 	xscale_cache_purgeD_rng,	/* cache_purgeD_rng	*/
610 	xscale_cache_syncI_rng,		/* cache_syncI_rng	*/
611 
612 	cpufunc_null_fixup,		/* dataabt_fixup	*/
613 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
614 
615 	xscale_context_switch,		/* context_switch	*/
616 
617 	xscale_setup			/* cpu setup		*/
618 };
619 #endif /* CPU_XSCALE */
620 
621 /*
622  * Global constants also used by locore.s
623  */
624 
625 struct cpu_functions cpufuncs;
626 u_int cputype;
627 u_int cpu_reset_needs_v4_MMU_disable;	/* flag used in locore.s */
628 
629 /*
630  * Cannot panic here as we may not have a console yet ...
631  */
632 
633 int
634 set_cpufuncs()
635 {
636 	cputype = cpufunc_id();
637 	cputype &= CPU_ID_CPU_MASK;
638 
639 
640 #ifdef CPU_ARM3
641 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
642 	    (cputype & 0x00000f00) == 0x00000300) {
643 		cpufuncs = arm3_cpufuncs;
644 		cpu_reset_needs_v4_MMU_disable = 0;
645 		return 0;
646 	}
647 #endif	/* CPU_ARM3 */
648 #ifdef CPU_ARM6
649 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
650 	    (cputype & 0x00000f00) == 0x00000600) {
651 		cpufuncs = arm6_cpufuncs;
652 		cpu_reset_needs_v4_MMU_disable = 0;
653 		return 0;
654 	}
655 #endif	/* CPU_ARM6 */
656 #ifdef CPU_ARM7
657 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
658 	    CPU_ID_IS7(cputype) &&
659 	    (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V3) {
660 		cpufuncs = arm7_cpufuncs;
661 		cpu_reset_needs_v4_MMU_disable = 0;
662 		return 0;
663 	}
664 #endif	/* CPU_ARM7 */
665 #ifdef CPU_ARM7TDMI
666 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
667 	    CPU_ID_IS7(cputype) &&
668 	    (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V4T) {
669 		cpufuncs = arm7tdmi_cpufuncs;
670 		cpu_reset_needs_v4_MMU_disable = 0;
671 		return 0;
672 	}
673 #endif
674 #ifdef CPU_ARM8
675 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
676 	    (cputype & 0x0000f000) == 0x00008000) {
677 		cpufuncs = arm8_cpufuncs;
678 		cpu_reset_needs_v4_MMU_disable = 0;	/* XXX correct? */
679 		return 0;
680 	}
681 #endif	/* CPU_ARM8 */
682 #ifdef CPU_ARM9
683 	if (cputype == CPU_ID_ARM920T) {
684 		pte_cache_mode = PT_C;	/* Select write-through cacheing. */
685 		cpufuncs = arm9_cpufuncs;
686 		cpu_reset_needs_v4_MMU_disable = 1;	/* V4 or higher */
687 		return 0;
688 	}
689 #endif /* CPU_ARM9 */
690 #ifdef CPU_SA110
691 	if (cputype == CPU_ID_SA110 || cputype == CPU_ID_SA1100 ||
692 	    cputype == CPU_ID_SA1110) {
693 		cpufuncs = sa110_cpufuncs;
694 		cpu_reset_needs_v4_MMU_disable = 1;	/* SA needs it */
695 		return 0;
696 	}
697 #endif	/* CPU_SA110 */
698 #ifdef CPU_XSCALE
699 	if (cputype == CPU_ID_I80200) {
700 		cpufuncs = xscale_cpufuncs;
701 		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
702 		return 0;
703 	}
704 #endif /* CPU_XSCALE */
705 	/*
706 	 * Bzzzz. And the answer was ...
707 	 */
708 /*	panic("No support for this CPU type (%08x) in kernel", cputype);*/
709 	return(ARCHITECTURE_NOT_PRESENT);
710 }
711 
712 /*
713  * Fixup routines for data and prefetch aborts.
714  *
715  * Several compile time symbols are used
716  *
717  * DEBUG_FAULT_CORRECTION - Print debugging information during the
718  * correction of registers after a fault.
719  * ARM6_LATE_ABORT - ARM6 supports both early and late aborts
720  * when defined should use late aborts
721  */
722 
723 #if defined(DEBUG_FAULT_CORRECTION) && !defined(PMAP_DEBUG)
724 #error PMAP_DEBUG must be defined to use DEBUG_FAULT_CORRECTION
725 #endif
726 
727 /*
728  * Null abort fixup routine.
729  * For use when no fixup is required.
730  */
731 int
732 cpufunc_null_fixup(arg)
733 	void *arg;
734 {
735 	return(ABORT_FIXUP_OK);
736 }
737 
738 #if defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI)
739 #ifdef DEBUG_FAULT_CORRECTION
740 extern int pmap_debug_level;
741 #endif
742 #endif
743 
744 #if defined(CPU_ARM2) || defined(CPU_ARM250) || defined(CPU_ARM3) || \
745     defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI)
746 /*
747  * "Early" data abort fixup.
748  *
749  * For ARM2, ARM2as, ARM3 and ARM6 (in early-abort mode).  Also used
750  * indirectly by ARM6 (in late-abort mode) and ARM7[TDMI].
751  *
752  * In early aborts, we may have to fix up LDM, STM, LDC and STC.
753  */
754 int
755 early_abort_fixup(arg)
756 	void *arg;
757 {
758 	trapframe_t *frame = arg;
759 	u_int fault_pc;
760 	u_int fault_instruction;
761 	int saved_lr = 0;
762 
763 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
764 
765 		/* Ok an abort in SVC mode */
766 
767 		/*
768 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
769 		 * as the fault happened in svc mode but we need it in the
770 		 * usr slot so we can treat the registers as an array of ints
771 		 * during fixing.
772 		 * NOTE: This PC is in the position but writeback is not
773 		 * allowed on r15.
774 		 * Doing it like this is more efficient than trapping this
775 		 * case in all possible locations in the following fixup code.
776 		 */
777 
778 		saved_lr = frame->tf_usr_lr;
779 		frame->tf_usr_lr = frame->tf_svc_lr;
780 
781 		/*
782 		 * Note the trapframe does not have the SVC r13 so a fault
783 		 * from an instruction with writeback to r13 in SVC mode is
784 		 * not allowed. This should not happen as the kstack is
785 		 * always valid.
786 		 */
787 	}
788 
789 	/* Get fault address and status from the CPU */
790 
791 	fault_pc = frame->tf_pc;
792 	fault_instruction = *((volatile unsigned int *)fault_pc);
793 
794 	/* Decode the fault instruction and fix the registers as needed */
795 
796 	if ((fault_instruction & 0x0e000000) == 0x08000000) {
797 		int base;
798 		int loop;
799 		int count;
800 		int *registers = &frame->tf_r0;
801 
802 #ifdef DEBUG_FAULT_CORRECTION
803 		if (pmap_debug_level >= 0) {
804 			printf("LDM/STM\n");
805 			disassemble(fault_pc);
806 		}
807 #endif	/* DEBUG_FAULT_CORRECTION */
808 		if (fault_instruction & (1 << 21)) {
809 #ifdef DEBUG_FAULT_CORRECTION
810 			if (pmap_debug_level >= 0)
811 				printf("This instruction must be corrected\n");
812 #endif	/* DEBUG_FAULT_CORRECTION */
813 			base = (fault_instruction >> 16) & 0x0f;
814 			if (base == 15)
815 				return ABORT_FIXUP_FAILED;
816 			/* Count registers transferred */
817 			count = 0;
818 			for (loop = 0; loop < 16; ++loop) {
819 				if (fault_instruction & (1<<loop))
820 					++count;
821 			}
822 #ifdef DEBUG_FAULT_CORRECTION
823 			if (pmap_debug_level >= 0) {
824 				printf("%d registers used\n", count);
825 				printf("Corrected r%d by %d bytes ", base, count * 4);
826 			}
827 #endif	/* DEBUG_FAULT_CORRECTION */
828 			if (fault_instruction & (1 << 23)) {
829 #ifdef DEBUG_FAULT_CORRECTION
830 				if (pmap_debug_level >= 0)
831 					printf("down\n");
832 #endif	/* DEBUG_FAULT_CORRECTION */
833 				registers[base] -= count * 4;
834 			} else {
835 #ifdef DEBUG_FAULT_CORRECTION
836 				if (pmap_debug_level >= 0)
837 					printf("up\n");
838 #endif	/* DEBUG_FAULT_CORRECTION */
839 				registers[base] += count * 4;
840 			}
841 		}
842 	} else if ((fault_instruction & 0x0e000000) == 0x0c000000) {
843 		int base;
844 		int offset;
845 		int *registers = &frame->tf_r0;
846 
847 /* REGISTER CORRECTION IS REQUIRED FOR THESE INSTRUCTIONS */
848 
849 #ifdef DEBUG_FAULT_CORRECTION
850 		if (pmap_debug_level >= 0)
851 			disassemble(fault_pc);
852 #endif	/* DEBUG_FAULT_CORRECTION */
853 
854 /* Only need to fix registers if write back is turned on */
855 
856 		if ((fault_instruction & (1 << 21)) != 0) {
857 			base = (fault_instruction >> 16) & 0x0f;
858 			if (base == 13 && (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
859 				return ABORT_FIXUP_FAILED;
860 			if (base == 15)
861 				return ABORT_FIXUP_FAILED;
862 
863 			offset = (fault_instruction & 0xff) << 2;
864 #ifdef DEBUG_FAULT_CORRECTION
865 			if (pmap_debug_level >= 0)
866 				printf("r%d=%08x\n", base, registers[base]);
867 #endif	/* DEBUG_FAULT_CORRECTION */
868 			if ((fault_instruction & (1 << 23)) != 0)
869 				offset = -offset;
870 			registers[base] += offset;
871 #ifdef DEBUG_FAULT_CORRECTION
872 			if (pmap_debug_level >= 0)
873 				printf("r%d=%08x\n", base, registers[base]);
874 #endif	/* DEBUG_FAULT_CORRECTION */
875 		}
876 	} else if ((fault_instruction & 0x0e000000) == 0x0c000000)
877 		return ABORT_FIXUP_FAILED;
878 
879 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
880 
881 		/* Ok an abort in SVC mode */
882 
883 		/*
884 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
885 		 * as the fault happened in svc mode but we need it in the
886 		 * usr slot so we can treat the registers as an array of ints
887 		 * during fixing.
888 		 * NOTE: This PC is in the position but writeback is not
889 		 * allowed on r15.
890 		 * Doing it like this is more efficient than trapping this
891 		 * case in all possible locations in the prior fixup code.
892 		 */
893 
894 		frame->tf_svc_lr = frame->tf_usr_lr;
895 		frame->tf_usr_lr = saved_lr;
896 
897 		/*
898 		 * Note the trapframe does not have the SVC r13 so a fault
899 		 * from an instruction with writeback to r13 in SVC mode is
900 		 * not allowed. This should not happen as the kstack is
901 		 * always valid.
902 		 */
903 	}
904 
905 	return(ABORT_FIXUP_OK);
906 }
907 #endif	/* CPU_ARM2/250/3/6/7 */
908 
909 #if (defined(CPU_ARM6) && defined(ARM6_LATE_ABORT)) || defined(CPU_ARM7) || \
910 	defined(CPU_ARM7TDMI)
911 /*
912  * "Late" (base updated) data abort fixup
913  *
914  * For ARM6 (in late-abort mode) and ARM7.
915  *
916  * In this model, all data-transfer instructions need fixing up.  We defer
917  * LDM, STM, LDC and STC fixup to the early-abort handler.
918  */
919 int
920 late_abort_fixup(arg)
921 	void *arg;
922 {
923 	trapframe_t *frame = arg;
924 	u_int fault_pc;
925 	u_int fault_instruction;
926 	int saved_lr = 0;
927 
928 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
929 
930 		/* Ok an abort in SVC mode */
931 
932 		/*
933 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
934 		 * as the fault happened in svc mode but we need it in the
935 		 * usr slot so we can treat the registers as an array of ints
936 		 * during fixing.
937 		 * NOTE: This PC is in the position but writeback is not
938 		 * allowed on r15.
939 		 * Doing it like this is more efficient than trapping this
940 		 * case in all possible locations in the following fixup code.
941 		 */
942 
943 		saved_lr = frame->tf_usr_lr;
944 		frame->tf_usr_lr = frame->tf_svc_lr;
945 
946 		/*
947 		 * Note the trapframe does not have the SVC r13 so a fault
948 		 * from an instruction with writeback to r13 in SVC mode is
949 		 * not allowed. This should not happen as the kstack is
950 		 * always valid.
951 		 */
952 	}
953 
954 	/* Get fault address and status from the CPU */
955 
956 	fault_pc = frame->tf_pc;
957 	fault_instruction = *((volatile unsigned int *)fault_pc);
958 
959 	/* Decode the fault instruction and fix the registers as needed */
960 
961 	/* Was is a swap instruction ? */
962 
963 	if ((fault_instruction & 0x0fb00ff0) == 0x01000090) {
964 #ifdef DEBUG_FAULT_CORRECTION
965 		if (pmap_debug_level >= 0)
966 			disassemble(fault_pc);
967 #endif	/* DEBUG_FAULT_CORRECTION */
968 	} else if ((fault_instruction & 0x0c000000) == 0x04000000) {
969 
970 		/* Was is a ldr/str instruction */
971 		/* This is for late abort only */
972 
973 		int base;
974 		int offset;
975 		int *registers = &frame->tf_r0;
976 
977 #ifdef DEBUG_FAULT_CORRECTION
978 		if (pmap_debug_level >= 0)
979 			disassemble(fault_pc);
980 #endif	/* DEBUG_FAULT_CORRECTION */
981 
982 		/* This is for late abort only */
983 
984 		if ((fault_instruction & (1 << 24)) == 0
985 		    || (fault_instruction & (1 << 21)) != 0) {
986 			base = (fault_instruction >> 16) & 0x0f;
987 			if (base == 13 && (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
988 				return ABORT_FIXUP_FAILED;
989 			if (base == 15)
990 				return ABORT_FIXUP_FAILED;
991 #ifdef DEBUG_FAULT_CORRECTION
992 			if (pmap_debug_level >=0)
993 				printf("late abt fix: r%d=%08x ", base, registers[base]);
994 #endif	/* DEBUG_FAULT_CORRECTION */
995 			if ((fault_instruction & (1 << 25)) == 0) {
996 				/* Immediate offset - easy */
997 				offset = fault_instruction & 0xfff;
998 				if ((fault_instruction & (1 << 23)))
999 					offset = -offset;
1000 				registers[base] += offset;
1001 #ifdef DEBUG_FAULT_CORRECTION
1002 				if (pmap_debug_level >=0)
1003 					printf("imm=%08x ", offset);
1004 #endif	/* DEBUG_FAULT_CORRECTION */
1005 			} else {
1006 				int shift;
1007 
1008 				offset = fault_instruction & 0x0f;
1009 				if (offset == base)
1010 					return ABORT_FIXUP_FAILED;
1011 
1012 /* Register offset - hard we have to cope with shifts ! */
1013 				offset = registers[offset];
1014 
1015 				if ((fault_instruction & (1 << 4)) == 0)
1016 					shift = (fault_instruction >> 7) & 0x1f;
1017 				else {
1018 					if ((fault_instruction & (1 << 7)) != 0)
1019 						return ABORT_FIXUP_FAILED;
1020 					shift = ((fault_instruction >> 8) & 0xf);
1021 					if (base == shift)
1022 						return ABORT_FIXUP_FAILED;
1023 #ifdef DEBUG_FAULT_CORRECTION
1024 					if (pmap_debug_level >=0)
1025 						printf("shift reg=%d ", shift);
1026 #endif	/* DEBUG_FAULT_CORRECTION */
1027 					shift = registers[shift];
1028 				}
1029 #ifdef DEBUG_FAULT_CORRECTION
1030 				if (pmap_debug_level >=0)
1031 					printf("shift=%08x ", shift);
1032 #endif	/* DEBUG_FAULT_CORRECTION */
1033 				switch (((fault_instruction >> 5) & 0x3)) {
1034 				case 0 : /* Logical left */
1035 					offset = (int)(((u_int)offset) << shift);
1036 					break;
1037 				case 1 : /* Logical Right */
1038 					if (shift == 0) shift = 32;
1039 					offset = (int)(((u_int)offset) >> shift);
1040 					break;
1041 				case 2 : /* Arithmetic Right */
1042 					if (shift == 0) shift = 32;
1043 					offset = (int)(((int)offset) >> shift);
1044 					break;
1045 				case 3 : /* Rotate right */
1046 					return ABORT_FIXUP_FAILED;
1047 				}
1048 
1049 #ifdef DEBUG_FAULT_CORRECTION
1050 				if (pmap_debug_level >=0)
1051 					printf("abt: fixed LDR/STR with register offset\n");
1052 #endif	/* DEBUG_FAULT_CORRECTION */
1053 				if ((fault_instruction & (1 << 23)))
1054 					offset = -offset;
1055 #ifdef DEBUG_FAULT_CORRECTION
1056 				if (pmap_debug_level >=0)
1057 					printf("offset=%08x ", offset);
1058 #endif	/* DEBUG_FAULT_CORRECTION */
1059 				registers[base] += offset;
1060 			}
1061 #ifdef DEBUG_FAULT_CORRECTION
1062 			if (pmap_debug_level >=0)
1063 				printf("r%d=%08x\n", base, registers[base]);
1064 #endif	/* DEBUG_FAULT_CORRECTION */
1065 		}
1066 	}
1067 
1068 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1069 
1070 		/* Ok an abort in SVC mode */
1071 
1072 		/*
1073 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1074 		 * as the fault happened in svc mode but we need it in the
1075 		 * usr slot so we can treat the registers as an array of ints
1076 		 * during fixing.
1077 		 * NOTE: This PC is in the position but writeback is not
1078 		 * allowed on r15.
1079 		 * Doing it like this is more efficient than trapping this
1080 		 * case in all possible locations in the prior fixup code.
1081 		 */
1082 
1083 		frame->tf_svc_lr = frame->tf_usr_lr;
1084 		frame->tf_usr_lr = saved_lr;
1085 
1086 		/*
1087 		 * Note the trapframe does not have the SVC r13 so a fault
1088 		 * from an instruction with writeback to r13 in SVC mode is
1089 		 * not allowed. This should not happen as the kstack is
1090 		 * always valid.
1091 		 */
1092 	}
1093 
1094 	/*
1095 	 * Now let the early-abort fixup routine have a go, in case it
1096 	 * was an LDM, STM, LDC or STC that faulted.
1097 	 */
1098 
1099 	return early_abort_fixup(arg);
1100 }
1101 #endif	/* CPU_ARM6(LATE)/7/7TDMI */
1102 
1103 /*
1104  * CPU Setup code
1105  */
1106 
1107 #if defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) || \
1108 	defined(CPU_ARM8) || defined (CPU_ARM9) || defined(CPU_SA110) || \
1109 	defined(CPU_XSCALE)
1110 int cpuctrl;
1111 
1112 #define IGN	0
1113 #define OR	1
1114 #define BIC	2
1115 
1116 struct cpu_option {
1117 	char	*co_name;
1118 	int	co_falseop;
1119 	int	co_trueop;
1120 	int	co_value;
1121 };
1122 
1123 static u_int
1124 parse_cpu_options(args, optlist, cpuctrl)
1125 	char *args;
1126 	struct cpu_option *optlist;
1127 	u_int cpuctrl;
1128 {
1129 	int integer;
1130 
1131 	while (optlist->co_name) {
1132 		if (get_bootconf_option(args, optlist->co_name,
1133 		    BOOTOPT_TYPE_BOOLEAN, &integer)) {
1134 			if (integer) {
1135 				if (optlist->co_trueop == OR)
1136 					cpuctrl |= optlist->co_value;
1137 				else if (optlist->co_trueop == BIC)
1138 					cpuctrl &= ~optlist->co_value;
1139 			} else {
1140 				if (optlist->co_falseop == OR)
1141 					cpuctrl |= optlist->co_value;
1142 				else if (optlist->co_falseop == BIC)
1143 					cpuctrl &= ~optlist->co_value;
1144 			}
1145 		}
1146 		++optlist;
1147 	}
1148 	return(cpuctrl);
1149 }
1150 #endif /* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 || CPU_SA110 */
1151 
1152 #if defined (CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) \
1153 	|| defined(CPU_ARM8)
1154 struct cpu_option arm678_options[] = {
1155 #ifdef COMPAT_12
1156 	{ "nocache",		IGN, BIC, CPU_CONTROL_IDC_ENABLE },
1157 	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
1158 #endif	/* COMPAT_12 */
1159 	{ "cpu.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
1160 	{ "cpu.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
1161 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1162 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1163 	{ NULL,			IGN, IGN, 0 }
1164 };
1165 
1166 #endif	/* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 */
1167 
1168 #ifdef CPU_ARM6
1169 struct cpu_option arm6_options[] = {
1170 	{ "arm6.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
1171 	{ "arm6.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
1172 	{ "arm6.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1173 	{ "arm6.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1174 	{ NULL,			IGN, IGN, 0 }
1175 };
1176 
1177 void
1178 arm6_setup(args)
1179 	char *args;
1180 {
1181 	int cpuctrlmask;
1182 
1183 	/* Set up default control registers bits */
1184 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1185 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1186 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
1187 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1188 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1189 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
1190 		 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BEND_ENABLE
1191 		 | CPU_CONTROL_AFLT_ENABLE;
1192 
1193 #ifdef ARM6_LATE_ABORT
1194 	cpuctrl |= CPU_CONTROL_LABT_ENABLE;
1195 #endif	/* ARM6_LATE_ABORT */
1196 
1197 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
1198 	cpuctrl = parse_cpu_options(args, arm6_options, cpuctrl);
1199 
1200 	/* Clear out the cache */
1201 	cpu_cache_purgeID();
1202 
1203 	/* Set the control register */
1204 	cpu_control(0xffffffff, cpuctrl);
1205 }
1206 #endif	/* CPU_ARM6 */
1207 
1208 #ifdef CPU_ARM7
1209 struct cpu_option arm7_options[] = {
1210 	{ "arm7.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
1211 	{ "arm7.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
1212 	{ "arm7.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1213 	{ "arm7.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1214 #ifdef COMPAT_12
1215 	{ "fpaclk2",		BIC, OR,  CPU_CONTROL_CPCLK },
1216 #endif	/* COMPAT_12 */
1217 	{ "arm700.fpaclk",	BIC, OR,  CPU_CONTROL_CPCLK },
1218 	{ NULL,			IGN, IGN, 0 }
1219 };
1220 
1221 void
1222 arm7_setup(args)
1223 	char *args;
1224 {
1225 	int cpuctrlmask;
1226 
1227 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1228 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1229 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
1230 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1231 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1232 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
1233 		 | CPU_CONTROL_CPCLK | CPU_CONTROL_LABT_ENABLE
1234 		 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BEND_ENABLE
1235 		 | CPU_CONTROL_AFLT_ENABLE;
1236 
1237 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
1238 	cpuctrl = parse_cpu_options(args, arm7_options, cpuctrl);
1239 
1240 	/* Clear out the cache */
1241 	cpu_cache_purgeID();
1242 
1243 	/* Set the control register */
1244 	cpu_control(0xffffffff, cpuctrl);
1245 }
1246 #endif	/* CPU_ARM7 */
1247 
1248 #ifdef CPU_ARM7TDMI
1249 struct cpu_option arm7tdmi_options[] = {
1250 	{ "arm7.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
1251 	{ "arm7.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
1252 	{ "arm7.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1253 	{ "arm7.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1254 #ifdef COMPAT_12
1255 	{ "fpaclk2",		BIC, OR,  CPU_CONTROL_CPCLK },
1256 #endif	/* COMPAT_12 */
1257 	{ "arm700.fpaclk",	BIC, OR,  CPU_CONTROL_CPCLK },
1258 	{ NULL,			IGN, IGN, 0 }
1259 };
1260 
1261 void
1262 arm7tdmi_setup(args)
1263 	char *args;
1264 {
1265 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1266 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1267 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
1268 
1269 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
1270 	cpuctrl = parse_cpu_options(args, arm7tdmi_options, cpuctrl);
1271 
1272 	/* Clear out the cache */
1273 	cpu_cache_purgeID();
1274 
1275 	/* Set the control register */
1276 	cpu_control(0xffffffff, cpuctrl);
1277 }
1278 #endif	/* CPU_ARM7TDMI */
1279 
1280 #ifdef CPU_ARM8
1281 struct cpu_option arm8_options[] = {
1282 	{ "arm8.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
1283 	{ "arm8.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
1284 	{ "arm8.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1285 	{ "arm8.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1286 #ifdef COMPAT_12
1287 	{ "branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1288 #endif	/* COMPAT_12 */
1289 	{ "cpu.branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1290 	{ "arm8.branchpredict",	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1291 	{ NULL,			IGN, IGN, 0 }
1292 };
1293 
1294 void
1295 arm8_setup(args)
1296 	char *args;
1297 {
1298 	int integer;
1299 	int cpuctrlmask;
1300 	int clocktest;
1301 	int setclock = 0;
1302 
1303 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1304 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1305 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
1306 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1307 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1308 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
1309 		 | CPU_CONTROL_BPRD_ENABLE | CPU_CONTROL_ROM_ENABLE
1310 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE;
1311 
1312 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
1313 	cpuctrl = parse_cpu_options(args, arm8_options, cpuctrl);
1314 
1315 	/* Get clock configuration */
1316 	clocktest = arm8_clock_config(0, 0) & 0x0f;
1317 
1318 	/* Special ARM8 clock and test configuration */
1319 	if (get_bootconf_option(args, "arm8.clock.reset", BOOTOPT_TYPE_BOOLEAN, &integer)) {
1320 		clocktest = 0;
1321 		setclock = 1;
1322 	}
1323 	if (get_bootconf_option(args, "arm8.clock.dynamic", BOOTOPT_TYPE_BOOLEAN, &integer)) {
1324 		if (integer)
1325 			clocktest |= 0x01;
1326 		else
1327 			clocktest &= ~(0x01);
1328 		setclock = 1;
1329 	}
1330 	if (get_bootconf_option(args, "arm8.clock.sync", BOOTOPT_TYPE_BOOLEAN, &integer)) {
1331 		if (integer)
1332 			clocktest |= 0x02;
1333 		else
1334 			clocktest &= ~(0x02);
1335 		setclock = 1;
1336 	}
1337 	if (get_bootconf_option(args, "arm8.clock.fast", BOOTOPT_TYPE_BININT, &integer)) {
1338 		clocktest = (clocktest & ~0xc0) | (integer & 3) << 2;
1339 		setclock = 1;
1340 	}
1341 	if (get_bootconf_option(args, "arm8.test", BOOTOPT_TYPE_BININT, &integer)) {
1342 		clocktest |= (integer & 7) << 5;
1343 		setclock = 1;
1344 	}
1345 
1346 	/* Clear out the cache */
1347 	cpu_cache_purgeID();
1348 
1349 	/* Set the control register */
1350 	cpu_control(0xffffffff, cpuctrl);
1351 
1352 	/* Set the clock/test register */
1353 	if (setclock)
1354 		arm8_clock_config(0x7f, clocktest);
1355 }
1356 #endif	/* CPU_ARM8 */
1357 
1358 #ifdef CPU_ARM9
1359 struct cpu_option arm9_options[] = {
1360 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1361 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1362 	{ "arm9.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1363 	{ "arm9.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
1364 	{ "arm9.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
1365 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1366 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1367 	{ "arm9.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1368 	{ NULL,			IGN, IGN, 0 }
1369 };
1370 
1371 void
1372 arm9_setup(args)
1373 	char *args;
1374 {
1375 	int cpuctrlmask;
1376 
1377 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1378 	    | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1379 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1380 	    | CPU_CONTROL_WBUF_ENABLE;
1381 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1382 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1383 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1384 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1385 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1386 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1387 		 | CPU_CONTROL_CPCLK;
1388 
1389 	cpuctrl = parse_cpu_options(args, arm9_options, cpuctrl);
1390 
1391 	/* Clear out the cache */
1392 	cpu_cache_purgeID();
1393 
1394 	/* Set the control register */
1395 	cpu_control(0xffffffff, cpuctrl);
1396 
1397 }
1398 #endif	/* CPU_ARM9 */
1399 
1400 #ifdef CPU_SA110
1401 struct cpu_option sa110_options[] = {
1402 #ifdef COMPAT_12
1403 	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1404 	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
1405 #endif	/* COMPAT_12 */
1406 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1407 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1408 	{ "sa110.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1409 	{ "sa110.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
1410 	{ "sa110.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
1411 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1412 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1413 	{ "sa110.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1414 	{ NULL,			IGN, IGN, 0 }
1415 };
1416 
1417 void
1418 sa110_setup(args)
1419 	char *args;
1420 {
1421 	int cpuctrlmask;
1422 
1423 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1424 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1425 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1426 		 | CPU_CONTROL_WBUF_ENABLE;
1427 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1428 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1429 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1430 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1431 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1432 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1433 		 | CPU_CONTROL_CPCLK;
1434 
1435 	cpuctrl = parse_cpu_options(args, sa110_options, cpuctrl);
1436 
1437 	/* Clear out the cache */
1438 	cpu_cache_purgeID();
1439 
1440 	/* Set the control register */
1441 /*	cpu_control(cpuctrlmask, cpuctrl);*/
1442 	cpu_control(0xffffffff, cpuctrl);
1443 
1444 	/* enable clockswitching */
1445 	__asm ("mcr 15, 0, r0, c15, c1, 2");
1446 }
1447 #endif	/* CPU_SA110 */
1448 
1449 #ifdef CPU_XSCALE
1450 struct cpu_option xscale_options[] = {
1451 #ifdef COMPAT_12
1452 	{ "branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1453 	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1454 #endif	/* COMPAT_12 */
1455 	{ "cpu.branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1456 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1457 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1458 	{ "xscale.branchpredict", BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1459 	{ "xscale.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1460 	{ "xscale.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
1461 	{ "xscale.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
1462 	{ NULL,			IGN, IGN, 0 }
1463 };
1464 
1465 void
1466 xscale_setup(args)
1467 	char *args;
1468 {
1469 	int cpuctrlmask;
1470 
1471 	/*
1472 	 * The XScale Write Buffer is always enabled.  Our option
1473 	 * is to enable/disable coalescing.  Note that bits 6:3
1474 	 * must always be enabled.
1475 	 */
1476 
1477 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1478 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1479 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1480 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
1481 		 | CPU_CONTROL_BPRD_ENABLE;
1482 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1483 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1484 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1485 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1486 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1487 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1488 		 | CPU_CONTROL_CPCLK;
1489 
1490 	cpuctrl = parse_cpu_options(args, xscale_options, cpuctrl);
1491 
1492 	/* Clear out the cache */
1493 	cpu_cache_purgeID();
1494 
1495 	/*
1496 	 * Set the control register.  Note that bits 6:3 must always
1497 	 * be set to 1.
1498 	 */
1499 /*	cpu_control(cpuctrlmask, cpuctrl);*/
1500 	cpu_control(0xffffffff, cpuctrl);
1501 
1502 #if 0
1503 	/*
1504 	 * XXX FIXME
1505 	 * Disable write buffer coalescing, PT ECC, and set
1506 	 * the mini-cache to write-back/read-allocate.
1507 	 */
1508 	__asm ("mcr p15, 0, %0, c1, c0, 1" :: "r" (0));
1509 #endif
1510 }
1511 #endif	/* CPU_XSCALE */
1512