xref: /plan9/sys/src/9/teg2/l.s (revision 53ff6c4dddeb19bd84c918cdd2542d7c51263329)
1/*
2 * tegra 2 SoC machine assist
3 * dual arm cortex-a9 processors
4 *
5 * ARM v7 arch. ref. man. §B1.3.3 says that we don't need barriers
6 * around writes to CPSR.
7 *
8 * LDREX/STREX use an exclusive monitor, which is part of the data cache unit
9 * for the L1 cache, so they won't work right if the L1 cache is disabled.
10 */
11
12#include "arm.s"
13
14#define	LDREX(fp,t)   WORD $(0xe<<28|0x01900f9f | (fp)<<16 | (t)<<12)
15/* `The order of operands is from left to right in dataflow order' - asm man */
16#define	STREX(f,tp,r) WORD $(0xe<<28|0x01800f90 | (tp)<<16 | (r)<<12 | (f)<<0)
17
18#define MAXMB	(KiB-1)			/* last MB has vectors */
19#define TMPSTACK (DRAMSIZE - 64*MiB)	/* used only during cpu startup */
20/* tas/cas strex debugging limits; started at 10000 */
21#define MAXSC 100000
22
23GLOBL	testmem(SB), $4
24
25/*
26 * Entered here from Das U-Boot or another Plan 9 kernel with MMU disabled.
27 * Until the MMU is enabled it is OK to call functions provided
28 * they are within ±32MiB relative and do not require any
29 * local variables or more than one argument (i.e. there is
30 * no stack).
31 */
32TEXT _start(SB), 1, $-4
33	CPSMODE(PsrMsvc)
34	CPSID					/* interrupts off */
35	CPSAE
36	SETEND(0)				/* little-endian */
37	BARRIERS
38	CLREX
39	SETZSB
40
41	MOVW	CPSR, R0
42	ORR	$PsrDfiq, R0
43	MOVW	R0, CPSR
44
45	/* invalidate i-cache and branch-target cache */
46	MTCP	CpSC, 0, PC, C(CpCACHE), C(CpCACHEinvi), CpCACHEall
47	BARRIERS
48
49	/* put cpus other than 0 to sleep until cpu 0 is ready */
50	CPUID(R1)
51	BEQ	cpuinit
52
53	/* not cpu 0 */
54PUTC('Z')
55PUTC('Z')
56	BARRIERS
57dowfi:
58	WFI
59	MOVW	cpus_proceed(SB), R1
60	CMP	$0, R1
61	BEQ	dowfi
62	BL	cpureset(SB)
63	B	dowfi
64
65cpuinit:
66	DELAY(printloopret, 1)
67PUTC('\r')
68	DELAY(printloopnl, 1)
69PUTC('\n')
70
71	DELAY(printloops, 1)
72PUTC('P')
73	/* disable the PL310 L2 cache on cpu0 */
74	MOVW	$(PHYSL2BAG+0x100), R1
75	MOVW	$0, R2
76	MOVW	R2, (R1)
77	BARRIERS
78	/* invalidate it */
79	MOVW	$((1<<16)-1), R2
80	MOVW	R2, 0x77c(R1)
81	BARRIERS
82
83	/*
84	 * disable my MMU & caches
85	 */
86	MFCP	CpSC, 0, R1, C(CpCONTROL), C(0), CpMainctl
87	ORR	$CpCsbo, R1
88	BIC	$(CpCsbz|CpCmmu|CpCdcache|CpCicache|CpCpredict), R1
89	MTCP	CpSC, 0, R1, C(CpCONTROL), C(0), CpMainctl
90	BARRIERS
91
92	/* cortex-a9 model-specific initial configuration */
93	MOVW	$0, R1
94	MTCP	CpSC, 0, R1, C(CpCONTROL), C(0), CpAuxctl
95	BARRIERS
96
97PUTC('l')
98	DELAY(printloop3, 1)
99
100	MOVW	$testmem-KZERO(SB), R0
101	BL	memdiag(SB)
102
103PUTC('a')
104	/* clear Mach for cpu 0 */
105	MOVW	$PADDR(MACHADDR), R4		/* address of Mach for cpu 0 */
106	MOVW	$0, R0
107_machZ:
108	MOVW	R0, (R4)
109	ADD	$4, R4
110	CMP.S	$PADDR(L1+L1X(0)), R4	/* end at top-level page table */
111	BNE	_machZ
112
113	/*
114	 * set up the MMU page table for cpu 0
115	 */
116
117PUTC('n')
118	/* clear all PTEs first, to provide a default */
119//	MOVW	$PADDR(L1+L1X(0)), R4		/* address of PTE for 0 */
120_ptenv0:
121	ZEROPTE()
122	CMP.S	$PADDR(L1+16*KiB), R4
123	BNE	_ptenv0
124
125	DELAY(printloop4, 2)
126PUTC(' ')
127	/*
128	 * set up double map of PHYSDRAM, KZERO to PHYSDRAM for first few MBs,
129	 * but only if KZERO and PHYSDRAM differ.
130	 */
131	MOVW	$PTEDRAM, R2			/* PTE bits */
132	MOVW	$PHYSDRAM, R3			/* pa */
133	CMP	$KZERO, R3
134	BEQ	no2map
135	MOVW	$PADDR(L1+L1X(PHYSDRAM)), R4  /* address of PTE for PHYSDRAM */
136	MOVW	$DOUBLEMAPMBS, R5
137_ptdbl:
138	FILLPTE()
139	SUB.S	$1, R5
140	BNE	_ptdbl
141no2map:
142
143	/*
144	 * back up and fill in PTEs for memory at KZERO.
145	 * trimslice has 1 bank of 1GB at PHYSDRAM.
146	 * Map the maximum.
147	 */
148PUTC('9')
149	MOVW	$PTEDRAM, R2			/* PTE bits */
150	MOVW	$PHYSDRAM, R3
151	MOVW	$PADDR(L1+L1X(KZERO)), R4	/* start with PTE for KZERO */
152	MOVW	$MAXMB, R5			/* inner loop count (MBs) */
153_ptekrw:					/* set PTEs */
154	FILLPTE()
155	SUB.S	$1, R5				/* decrement inner loop count */
156	BNE	_ptekrw
157
158	/*
159	 * back up and fill in PTEs for MMIO
160	 */
161PUTC(' ')
162	MOVW	$PTEIO, R2			/* PTE bits */
163	MOVW	$PHYSIO, R3
164	MOVW	$PADDR(L1+L1X(VIRTIO)), R4	/* start with PTE for VIRTIO */
165_ptenv2:
166	FILLPTE()
167	CMP.S	$PADDR(L1+L1X(PHYSIOEND)), R4
168	BNE	_ptenv2
169
170	/* mmu.c sets up the trap vectors later */
171
172	MOVW	$(PHYSDRAM | TMPSTACK), SP
173
174	/*
175	 * learn l1 cache characteristics (on cpu 0 only).
176	 */
177
178	MOVW	$(1-1), R0			/* l1 */
179	SLL	$1, R0				/* R0 = (cache - 1) << 1 */
180	MTCP	CpSC, CpIDcssel, R0, C(CpID), C(CpIDid), 0 /* select l1 cache */
181	BARRIERS
182	MFCP	CpSC, CpIDcsize, R0, C(CpID), C(CpIDid), 0 /* get sets & ways */
183	MOVW	$CACHECONF, R8
184
185	/* get log2linelen into l1setsh */
186	MOVW	R0, R1
187	AND	$3, R1
188	ADD	$4, R1
189	/* l1 & l2 must have same cache line size, thus same set shift */
190	MOVW	R1, 4(R8)		/*  +4 = l1setsh */
191	MOVW	R1, 12(R8)		/* +12 = l2setsh */
192
193	/* get nways in R1 */
194	SRA	$3, R0, R1
195	AND	$((1<<10)-1), R1
196	ADD	$1, R1
197
198	/* get log2(nways) in R2 (assume nways is 2^n) */
199	MOVW	$(BI2BY*BY2WD - 1), R2
200	CLZ(1, 1)
201	SUB.S	R1, R2			/* R2 = 31 - clz(nways) */
202	ADD.EQ	$1, R2
203//	MOVW	R2, R3			/* print log2(nways): 2 */
204
205	MOVW	$32, R1
206	SUB	R2, R1			/* R1 = 32 - log2(nways) */
207	MOVW	R1, 0(R8)		/* +0 = l1waysh */
208
209	BARRIERS
210
211	MOVW	$testmem-KZERO(SB), R0
212	BL	memdiag(SB)
213
214	/*
215	 * the mpcore manual says invalidate d-cache, scu, pl310 in that order,
216	 * but says nothing about when to disable them.
217	 *
218	 * invalidate my caches before enabling
219	 */
220	BL	cachedinv(SB)
221	MTCP	CpSC, 0, PC, C(CpCACHE), C(CpCACHEinvi), CpCACHEall
222	BARRIERS
223
224PUTC('f')
225	/*
226	 * the mpcore manual says enable scu, d-cache, pl310, smp mode
227	 * in that order.  we have to reverse the last two; see main().
228	 */
229	BL	scuon(SB)
230
231	/*
232	 * turn my L1 cache on; need it for tas below.
233	 */
234	MFCP	CpSC, 0, R1, C(CpCONTROL), C(0), CpMainctl
235	ORR	$(CpCdcache|CpCicache|CpCalign|CpCpredict), R1
236	MTCP	CpSC, 0, R1, C(CpCONTROL), C(0), CpMainctl
237	BARRIERS
238
239	/* cortex-a9 model-specific configuration */
240	MOVW	$CpACl1pref, R1
241	MTCP	CpSC, 0, R1, C(CpCONTROL), C(0), CpAuxctl
242	BARRIERS
243
244	/* we're supposed to wait until l1 & l2 are on before calling smpon */
245
246PUTC('r')
247	/* set the domain access control */
248	MOVW	$Client, R0
249	BL	dacput(SB)
250
251	DELAY(printloop5, 2)
252PUTC('o')
253	BL	mmuinvalidate(SB)
254
255	MOVW	$0, R0
256	BL	pidput(SB)
257
258	/* set the translation table base */
259	MOVW	$PADDR(L1), R0
260	BL	ttbput(SB)
261
262PUTC('m')
263	/*
264	 * the little dance to turn the MMU on
265	 */
266	BL	cacheuwbinv(SB)
267	BL	mmuinvalidate(SB)
268	BL	mmuenable(SB)
269
270PUTC(' ')
271	/* warp the PC into the virtual map */
272	MOVW	$KZERO, R0
273	BL	_r15warp(SB)
274	/*
275	 * cpu 0 is now running at KZERO+something!
276	 */
277
278	BARRIERS
279	MOVW	$setR12(SB), R12		/* reload kernel SB */
280	MOVW	$(KZERO | TMPSTACK), SP
281
282	BL	cacheuwbinv(SB)
283
284PUTC('B')
285	MOVW	$PHYSDRAM, R3			/* pa */
286	CMP	$KZERO, R3
287	BEQ	no2unmap
288	/* undo double map of PHYSDRAM, KZERO & first few MBs */
289	MOVW	$(L1+L1X(PHYSDRAM)), R4		/* addr. of PTE for PHYSDRAM */
290	MOVW	$0, R0
291	MOVW	$DOUBLEMAPMBS, R5
292_ptudbl:
293	ZEROPTE()
294	SUB.S	$1, R5
295	BNE	_ptudbl
296no2unmap:
297
298	BL	cachedwb(SB)
299	BL	mmuinvalidate(SB)
300
301	/*
302	 * call main in C
303	 * pass Mach to main and set up the stack in it
304	 */
305	MOVW	$MACHADDR, R0			/* cpu 0 Mach */
306	MOVW	R0, R(MACH)			/* m = MACHADDR */
307	ADD	$(MACHSIZE-4), R0, SP		/* leave space for link register */
308PUTC('e')
309	BL	main(SB)			/* main(m) */
310limbo:
311	BL	idlehands(SB)
312	B	limbo
313
314	BL	_div(SB)			/* hack to load _div, etc. */
315
316
317/*
318 * called on cpu(s) other than 0, to start them, from _vrst
319 * (reset vector) in lexception.s, with interrupts disabled
320 * and in SVC mode, running in the zero segment (pc is in lower 256MB).
321 * SB is set for the zero segment.
322 */
323TEXT cpureset(SB), 1, $-4
324	CLREX
325	MOVW	CPSR, R0
326	ORR	$PsrDfiq, R0
327	MOVW	R0, CPSR
328
329	MOVW	$(PHYSDRAM | TMPSTACK), SP	/* stack for cache ops */
330
331	/* paranoia: turn my mmu and caches off. */
332	MFCP	CpSC, 0, R0, C(CpCONTROL), C(0), CpMainctl
333	ORR	$CpCsbo, R0
334	BIC	$(CpCsbz|CpCmmu|CpCdcache|CpCicache|CpCpredict), R0
335	MTCP	CpSC, 0, R0, C(CpCONTROL), C(0), CpMainctl
336	BARRIERS
337
338	/* cortex-a9 model-specific initial configuration */
339	MOVW	$0, R1
340	MTCP	CpSC, 0, R1, C(CpCONTROL), C(0), CpAuxctl
341	ISB
342
343	/* invalidate my caches before enabling */
344	BL	cachedinv(SB)
345	MTCP	CpSC, 0, PC, C(CpCACHE), C(CpCACHEinvi), CpCACHEall
346	BARRIERS
347
348	/*
349	 * turn my L1 cache on; need it (and mmu) for tas below.
350	 * need branch prediction to make delay() timing right.
351	 */
352	MFCP	CpSC, 0, R0, C(CpCONTROL), C(0), CpMainctl
353	ORR	$(CpCdcache|CpCicache|CpCalign|CpCpredict), R0
354	MTCP	CpSC, 0, R0, C(CpCONTROL), C(0), CpMainctl
355	BARRIERS
356
357	/* enable l1 caches coherency, at minimum for ldrex/strex. */
358	BL	smpon(SB)
359	BARRIERS
360
361	/*
362	 * we used to write to PHYSEVP here; now we do it in C, which offers
363	 * more assurance that we're up and won't go off the rails.
364	 */
365
366	/* set the domain access control */
367	MOVW	$Client, R0
368	BL	dacput(SB)
369
370	BL	setmach(SB)
371
372	/*
373	 * redo double map of PHYSDRAM, KZERO in this cpu's ptes.
374	 * mmuinit will undo this later.
375	 */
376
377	MOVW	$PHYSDRAM, R3
378	CMP	$KZERO, R3
379	BEQ	noun2map
380
381	/* launchinit set m->mmul1 to a copy of cpu0's l1 page table */
382	MOVW	12(R(MACH)), R0		/* m->mmul1 (virtual addr) */
383	BL	k2paddr(SB)		/* R0 = PADDR(m->mmul1) */
384	ADD	$L1X(PHYSDRAM), R0, R4	/* R4 = address of PHYSDRAM's PTE */
385
386	MOVW	$PTEDRAM, R2		/* PTE bits */
387	MOVW	$DOUBLEMAPMBS, R5
388_ptrdbl:
389	ORR	R3, R2, R1		/* first identity-map 0 to 0, etc. */
390	MOVW	R1, (R4)
391	ADD	$4, R4			/* bump PTE address */
392	ADD	$MiB, R3		/* bump pa */
393	SUB.S	$1, R5
394	BNE	_ptrdbl
395noun2map:
396
397	MOVW	$0, R0
398	BL	pidput(SB)
399
400	/* set the translation table base to PADDR(m->mmul1) */
401	MOVW	12(R(MACH)), R0		/* m->mmul1 */
402	BL	k2paddr(SB)		/* R0 = PADDR(m->mmul1) */
403	BL	ttbput(SB)
404
405	/*
406	 * the little dance to turn the MMU on
407	 */
408	BL	cacheuwbinv(SB)
409	BL	mmuinvalidate(SB)
410	BL	mmuenable(SB)
411
412	/*
413	 * mmu is now on, with l1 pt at m->mmul1.
414	 */
415
416	/* warp the PC into the virtual map */
417	MOVW	$KZERO, R0
418	BL	_r15warp(SB)
419
420	/*
421	 * now running at KZERO+something!
422	 */
423
424	BARRIERS
425	MOVW	$setR12(SB), R12	/* reload kernel's SB */
426	MOVW	$(KZERO | TMPSTACK), SP	/* stack for cache ops*/
427	BL	setmach(SB)
428	ADD	$(MACHSIZE-4), R(MACH), SP /* leave space for link register */
429	BL	cpustart(SB)
430
431
432/*
433 * converts virtual address in R0 to a physical address.
434 */
435TEXT k2paddr(SB), 1, $-4
436	BIC	$KSEGM, R0
437	ADD	$PHYSDRAM, R0
438	RET
439
440/*
441 * converts physical address in R0 to a virtual address.
442 */
443TEXT p2kaddr(SB), 1, $-4
444	BIC	$KSEGM, R0
445	ORR	$KZERO, R0
446	RET
447
448/*
449 * converts address in R0 to the current segment, as defined by the PC.
450 * clobbers R1.
451 */
452TEXT addr2pcseg(SB), 1, $-4
453	BIC	$KSEGM, R0
454	MOVW	PC, R1
455	AND	$KSEGM, R1		/* segment PC is in */
456	ORR	R1, R0
457	RET
458
459/* sets R(MACH), preserves other registers */
460TEXT setmach(SB), 1, $-4
461	MOVM.DB.W [R14], (R13)
462	MOVM.DB.W [R0-R2], (R13)
463
464	CPUID(R2)
465	SLL	$2, R2			/* convert to word index */
466
467	MOVW	$machaddr(SB), R0
468	BL	addr2pcseg(SB)
469	ADD	R2, R0			/* R0 = &machaddr[cpuid] */
470	MOVW	(R0), R0		/* R0 = machaddr[cpuid] */
471	CMP	$0, R0
472	MOVW.EQ	$MACHADDR, R0		/* paranoia: use MACHADDR if 0 */
473	BL	addr2pcseg(SB)
474	MOVW	R0, R(MACH)		/* m = machaddr[cpuid] */
475
476	MOVM.IA.W (R13), [R0-R2]
477	MOVM.IA.W (R13), [R14]
478	RET
479
480
481/*
482 * memory diagnostic
483 * tests word at (R0); modifies R7 and R8
484 */
485TEXT memdiag(SB), 1, $-4
486	MOVW	$0xabcdef89, R7
487	MOVW	R7, (R0)
488	MOVW	(R0), R8
489	CMP	R7, R8
490	BNE	mbuggery		/* broken memory */
491
492	BARRIERS
493	MOVW	(R0), R8
494	CMP	R7, R8
495	BNE	mbuggery		/* broken memory */
496
497	MOVW	$0, R7
498	MOVW	R7, (R0)
499	BARRIERS
500	RET
501
502/* modifies R0, R3—R6 */
503TEXT printhex(SB), 1, $-4
504	MOVW	R0, R3
505	PUTC('0')
506	PUTC('x')
507	MOVW	$(32-4), R5	/* bits to shift right */
508nextdig:
509	SRA	R5, R3, R4
510	AND	$0xf, R4
511	ADD	$'0', R4
512	CMP.S	$'9', R4
513	BLE	nothex		/* if R4 <= 9, jump */
514	ADD	$('a'-('9'+1)), R4
515nothex:
516	PUTC(R4)
517	SUB.S	$4, R5
518	BGE	nextdig
519
520	PUTC('\r')
521	PUTC('\n')
522	DELAY(proct, 50)
523	RET
524
525mbuggery:
526	PUTC('?')
527	PUTC('m')
528mtopanic:
529	MOVW	$membmsg(SB), R0
530	MOVW	R14, R1		/* get R14's segment ... */
531	AND	$KSEGM, R1
532	BIC	$KSEGM,	R0	/* strip segment from address */
533	ORR	R1, R0		/* combine them */
534	BL	panic(SB)
535mbugloop:
536	WFI
537	B	mbugloop
538
539	DATA	membmsg+0(SB)/8,$"memory b"
540	DATA	membmsg+8(SB)/6,$"roken\z"
541	GLOBL	membmsg(SB), $14
542
543TEXT _r15warp(SB), 1, $-4
544	BIC	$KSEGM, R14			/* link reg, will become PC */
545	ORR	R0, R14
546	BIC	$KSEGM, SP
547	ORR	R0, SP
548	RET
549
550/*
551 * `single-element' cache operations.
552 * in arm arch v7, they operate on all architected cache levels, so separate
553 * l2 functions are usually unnecessary.
554 */
555
556TEXT cachedwbse(SB), $-4			/* D writeback SE */
557	MOVW	R0, R2
558
559	MOVW	CPSR, R3
560	CPSID					/* splhi */
561
562	BARRIERS			/* force outstanding stores to cache */
563	MOVW	R2, R0
564	MOVW	4(FP), R1
565	ADD	R0, R1				/* R1 is end address */
566	BIC	$(CACHELINESZ-1), R0		/* cache line start */
567_dwbse:
568	MTCP	CpSC, 0, R0, C(CpCACHE), C(CpCACHEwb), CpCACHEse
569	ADD	$CACHELINESZ, R0
570	CMP.S	R0, R1
571	BGT	_dwbse
572	B	_wait
573
574TEXT cachedwbinvse(SB), $-4			/* D writeback+invalidate SE */
575	MOVW	R0, R2
576
577	MOVW	CPSR, R3
578	CPSID					/* splhi */
579
580	BARRIERS			/* force outstanding stores to cache */
581	MOVW	R2, R0
582	MOVW	4(FP), R1
583	ADD	R0, R1				/* R1 is end address */
584	BIC	$(CACHELINESZ-1), R0		/* cache line start */
585_dwbinvse:
586	MTCP	CpSC, 0, R0, C(CpCACHE), C(CpCACHEwbi), CpCACHEse
587	ADD	$CACHELINESZ, R0
588	CMP.S	R0, R1
589	BGT	_dwbinvse
590_wait:						/* drain write buffer */
591	BARRIERS
592
593	MOVW	R3, CPSR			/* splx */
594	RET
595
596TEXT cachedinvse(SB), $-4			/* D invalidate SE */
597	MOVW	R0, R2
598
599	MOVW	CPSR, R3
600	CPSID					/* splhi */
601
602	BARRIERS			/* force outstanding stores to cache */
603	MOVW	R2, R0
604	MOVW	4(FP), R1
605	ADD	R0, R1				/* R1 is end address */
606
607	/*
608	 * if start & end addresses are not on cache-line boundaries,
609	 * flush first & last cache lines before invalidating.
610	 */
611	AND.S	$(CACHELINESZ-1), R0, R4
612	BEQ	stok
613	BIC	$(CACHELINESZ-1), R0, R4	/* cache line start */
614	MTCP	CpSC, 0, R4, C(CpCACHE), C(CpCACHEwb), CpCACHEse
615stok:
616	AND.S	$(CACHELINESZ-1), R1, R4
617	BEQ	endok
618	BIC	$(CACHELINESZ-1), R1, R4	/* cache line start */
619	MTCP	CpSC, 0, R4, C(CpCACHE), C(CpCACHEwb), CpCACHEse
620endok:
621	BIC	$(CACHELINESZ-1), R0		/* cache line start */
622_dinvse:
623	MTCP	CpSC, 0, R0, C(CpCACHE), C(CpCACHEinvd), CpCACHEse
624	ADD	$CACHELINESZ, R0
625	CMP.S	R0, R1
626	BGT	_dinvse
627	B	_wait
628
629/*
630 *  enable mmu and high vectors
631 */
632TEXT mmuenable(SB), 1, $-4
633	MFCP	CpSC, 0, R0, C(CpCONTROL), C(0), CpMainctl
634	ORR	$CpCmmu, R0
635	MTCP	CpSC, 0, R0, C(CpCONTROL), C(0), CpMainctl
636	BARRIERS
637	RET
638
639TEXT mmudisable(SB), 1, $-4
640	MFCP	CpSC, 0, R0, C(CpCONTROL), C(0), CpMainctl
641	BIC	$CpCmmu, R0
642	MTCP	CpSC, 0, R0, C(CpCONTROL), C(0), CpMainctl
643	BARRIERS
644	RET
645
646/*
647 * If one of these MCR instructions crashes or hangs the machine,
648 * check your Level 1 page table (at TTB) closely.
649 */
650TEXT mmuinvalidate(SB), $-4			/* invalidate all */
651	MOVW	CPSR, R2
652	CPSID					/* interrupts off */
653	BARRIERS
654	MTCP	CpSC, 0, PC, C(CpTLB), C(CpTLBinvu), CpTLBinv
655	BARRIERS
656	MOVW	R2, CPSR			/* interrupts restored */
657	RET
658
659TEXT mmuinvalidateaddr(SB), $-4			/* invalidate single entry */
660	MTCP	CpSC, 0, R0, C(CpTLB), C(CpTLBinvu), CpTLBinvse
661	BARRIERS
662	RET
663
664TEXT cpidget(SB), 1, $-4			/* main ID */
665	MFCP	CpSC, 0, R0, C(CpID), C(CpIDidct), CpIDid
666	RET
667
668TEXT cpctget(SB), 1, $-4			/* cache type */
669	MFCP	CpSC, 0, R0, C(CpID), C(CpIDidct), CpIDct
670	RET
671
672TEXT controlget(SB), 1, $-4			/* system control (sctlr) */
673	MFCP	CpSC, 0, R0, C(CpCONTROL), C(0), CpMainctl
674	RET
675
676TEXT ttbget(SB), 1, $-4				/* translation table base */
677	MFCP	CpSC, 0, R0, C(CpTTB), C(0), CpTTB0
678	RET
679
680TEXT ttbput(SB), 1, $-4				/* translation table base */
681	MOVW	CPSR, R2
682	CPSID
683	MOVW	R0, R1
684	BARRIERS		/* finish prior accesses before changing ttb */
685	MTCP	CpSC, 0, R1, C(CpTTB), C(0), CpTTB0
686	MTCP	CpSC, 0, R1, C(CpTTB), C(0), CpTTB1	/* non-secure too */
687	MOVW	$0, R0
688	MTCP	CpSC, 0, R0, C(CpTTB), C(0), CpTTBctl
689	BARRIERS
690	MOVW	R2, CPSR
691	RET
692
693TEXT dacget(SB), 1, $-4				/* domain access control */
694	MFCP	CpSC, 0, R0, C(CpDAC), C(0)
695	RET
696
697TEXT dacput(SB), 1, $-4				/* domain access control */
698	MOVW	R0, R1
699	BARRIERS
700	MTCP	CpSC, 0, R1, C(CpDAC), C(0)
701	ISB
702	RET
703
704TEXT fsrget(SB), 1, $-4				/* fault status */
705	MFCP	CpSC, 0, R0, C(CpFSR), C(0), CpDFSR
706	RET
707
708TEXT farget(SB), 1, $-4				/* fault address */
709	MFCP	CpSC, 0, R0, C(CpFAR), C(0), CpDFAR
710	RET
711
712TEXT getpsr(SB), 1, $-4
713	MOVW	CPSR, R0
714	RET
715
716TEXT getscr(SB), 1, $-4				/* secure configuration */
717	MFCP	CpSC, 0, R0, C(CpCONTROL), C(CpCONTROLscr), CpSCRscr
718	RET
719
720TEXT pidget(SB), 1, $-4				/* address translation pid */
721	MFCP	CpSC, 0, R0, C(CpPID), C(0x0)
722	RET
723
724TEXT pidput(SB), 1, $-4				/* address translation pid */
725	MTCP	CpSC, 0, R0, C(CpPID), C(0), 0	/* pid, v7a deprecated */
726	MTCP	CpSC, 0, R0, C(CpPID), C(0), 1	/* context id, errata 754322 */
727	ISB
728	RET
729
730/*
731 * access to yet more coprocessor registers
732 */
733
734TEXT getauxctl(SB), 1, $-4		/* get cortex-a9 aux. ctl. */
735	MFCP	CpSC, 0, R0, C(CpCONTROL), C(0), CpAuxctl
736	RET
737
738TEXT putauxctl(SB), 1, $-4		/* put cortex-a9 aux. ctl. */
739	BARRIERS
740	MTCP	CpSC, 0, R0, C(CpCONTROL), C(0), CpAuxctl
741	BARRIERS
742	RET
743
744TEXT getclvlid(SB), 1, $-4
745	MFCP	CpSC, CpIDcsize, R0, C(CpID), C(CpIDidct), CpIDclvlid
746	RET
747
748TEXT getcyc(SB), 1, $-4
749	MFCP	CpSC, 0, R0, C(CpCLD), C(CpCLDcyc), 0
750	RET
751
752TEXT getdebug(SB), 1, $-4		/* get cortex-a9 debug enable register */
753	MFCP	CpSC, 0, R0, C(1), C(1), 1
754	RET
755
756TEXT getpc(SB), 1, $-4
757	MOVW	PC, R0
758	RET
759
760TEXT getsb(SB), 1, $-4
761	MOVW	R12, R0
762	RET
763
764TEXT setsp(SB), 1, $-4
765	MOVW	R0, SP
766	RET
767
768
769TEXT splhi(SB), 1, $-4
770	MOVW	CPSR, R0			/* return old CPSR */
771	CPSID					/* turn off interrupts */
772	CMP.S	$0, R(MACH)
773	MOVW.NE	R14, 4(R(MACH))			/* save caller pc in m->splpc */
774	RET
775
776TEXT spllo(SB), 1, $-4			/* start marker for devkprof.c */
777	MOVW	CPSR, R0			/* return old CPSR */
778	MOVW	$0, R1
779	CMP.S	R1, R(MACH)
780	MOVW.NE	R1, 4(R(MACH))			/* clear m->splpc */
781	CPSIE
782	RET
783
784TEXT splx(SB), 1, $-4
785	MOVW	CPSR, R3			/* must return old CPSR */
786	CPSID
787
788	CMP.S	$0, R(MACH)
789	MOVW.NE	R14, 4(R(MACH))			/* save caller pc in m->splpc */
790	MOVW	R0, CPSR			/* reset interrupt level */
791	MOVW	R3, R0				/* must return old CPSR */
792	RET
793
794TEXT spldone(SB), 1, $0				/* end marker for devkprof.c */
795	RET
796
797TEXT islo(SB), 1, $-4
798	MOVW	CPSR, R0
799	AND	$(PsrDirq), R0
800	EOR	$(PsrDirq), R0
801	RET
802
803TEXT clz(SB), $-4
804	CLZ(0, 0)			/* 0 is R0 */
805	RET
806
807TEXT setlabel(SB), 1, $-4
808	MOVW	SP, 0(R0)
809	MOVW	R14, 4(R0)		/* pc */
810	MOVW	$0, R0
811	RET
812
813TEXT gotolabel(SB), 1, $-4
814	MOVW	0(R0), SP
815	MOVW	4(R0), R14		/* pc */
816	MOVW	$1, R0
817	RET
818
819TEXT getcallerpc(SB), 1, $-4
820	MOVW	0(SP), R0
821	RET
822
823TEXT wfi(SB), $-4
824	MOVW	CPSR, R1
825	/*
826	 * an interrupt should break us out of wfi.  masking interrupts
827	 * slows interrupt response slightly but prevents recursion.
828	 */
829//	CPSIE
830	CPSID
831
832	BARRIERS
833	WFI
834
835	MOVW	R1, CPSR
836	RET
837
838TEXT coherence(SB), $-4
839	BARRIERS
840	RET
841
842GLOBL cpus_proceed+0(SB), $4
843
844#include "cache.v7.s"
845
846TEXT	tas(SB), $-4			/* _tas(ulong *) */
847	/* returns old (R0) after modifying (R0) */
848	MOVW	R0,R5
849	DMB
850
851	MOVW	$1,R2		/* new value of (R0) */
852	MOVW	$MAXSC, R8
853tas1:
854	LDREX(5,7)		/* LDREX 0(R5),R7 */
855	CMP.S	$0, R7		/* old value non-zero (lock taken)? */
856	BNE	lockbusy	/* we lose */
857	SUB.S	$1, R8
858	BEQ	lockloop2
859	STREX(2,5,4)		/* STREX R2,(R5),R4 */
860	CMP.S	$0, R4
861	BNE	tas1		/* strex failed? try again */
862	DMB
863	B	tas0
864lockloop2:
865	PUTC('?')
866	PUTC('l')
867	PUTC('t')
868	BL	abort(SB)
869lockbusy:
870	CLREX
871tas0:
872	MOVW	R7, R0		/* return old value */
873	RET
874