1/* 2 * omap3530 machine assist, definitions 3 * cortex-a8 processor 4 * 5 * loader uses R11 as scratch. 6 */ 7 8#include "mem.h" 9#include "arm.h" 10 11#undef B /* B is for 'botch' */ 12 13#define KADDR(pa) (KZERO | ((pa) & ~KSEGM)) 14#define PADDR(va) (PHYSDRAM | ((va) & ~KSEGM)) 15 16#define L1X(va) (((((va))>>20) & 0x0fff)<<2) 17 18#define MACHADDR (L1-MACHSIZE) 19 20#define PTEDRAM (Dom0|L1AP(Krw)|Section|Cached|Buffered) 21#define PTEIO (Dom0|L1AP(Krw)|Section) 22 23#define DOUBLEMAPMBS 256 /* megabytes of low dram to double-map */ 24 25/* steps on R0 */ 26#define DELAY(label, mloops) \ 27 MOVW $((mloops)*1000000), R0; \ 28label: \ 29 SUB.S $1, R0; \ 30 BNE label 31 32/* wave at the user; clobbers R0, R1 & R6; needs R12 (SB) set */ 33#define PUTC(c) \ 34 BARRIERS; \ 35 MOVW $(c), R1; \ 36 MOVW $PHYSCONS, R6; \ 37 MOVW R1, (R6); \ 38 BARRIERS 39 40/* 41 * new instructions 42 */ 43 44#define SMC WORD $0xe1600070 /* low 4-bits are call # (trustzone) */ 45/* flush branch-target cache; zeroes R0 (cortex) */ 46#define FLBTC \ 47 MOVW $0, R0; \ 48 MCR CpSC, 0, R0, C(CpCACHE), C(CpCACHEinvi), CpCACHEflushbtc 49/* flush one entry of the branch-target cache, va in R0 (cortex) */ 50#define FLBTSE \ 51 MCR CpSC, 0, R0, C(CpCACHE), C(CpCACHEinvi), CpCACHEflushbtse 52 53/* arm v7 arch defines these */ 54#define WFI WORD $0xe320f003 /* wait for interrupt */ 55#define DMB WORD $0xf57ff05f /* data mem. barrier; last f = SY */ 56#define DSB WORD $0xf57ff04f /* data synch. barrier; last f = SY */ 57#define ISB WORD $0xf57ff06f /* instr. sync. barrier; last f = SY */ 58#define NOOP WORD $0xe320f000 59#define CLZ(s, d) WORD $(0xe16f0f10 | (d) << 12 | (s)) /* count leading 0s */ 60#define CPSIE WORD $0xf1080080 /* intr enable: zeroes I bit */ 61#define CPSID WORD $0xf10c0080 /* intr disable: sets I bit */ 62 63/* floating point */ 64#define VMRS(fp, cpu) WORD $(0xeef00a10 | (fp)<<16 | (cpu)<<12) /* FP → arm */ 65#define VMSR(cpu, fp) WORD $(0xeee00a10 | (fp)<<16 | (cpu)<<12) /* arm → FP */ 66 67/* 68 * a popular code sequence used to write a pte for va is: 69 * 70 * MOVW R(n), TTB[LnX(va)] 71 * // clean the cache line 72 * DSB 73 * // invalidate tlb entry for va 74 * FLBTC 75 * DSB 76 * PFF (now ISB) 77 */ 78/* zeroes R0 */ 79#define BARRIERS FLBTC; DSB; ISB 80 81/* 82 * invoked with PTE bits in R2, pa in R3, PTE pointed to by R4. 83 * fill PTE pointed to by R4 and increment R4 past it. 84 * increment R3 by a MB. clobbers R1. 85 */ 86#define FILLPTE() \ 87 ORR R3, R2, R1; /* pte bits in R2, pa in R3 */ \ 88 MOVW R1, (R4); \ 89 ADD $4, R4; /* bump PTE address */ \ 90 ADD $MiB, R3; /* bump pa */ \ 91 92/* zero PTE pointed to by R4 and increment R4 past it. assumes R0 is 0. */ 93#define ZEROPTE() \ 94 MOVW R0, (R4); \ 95 ADD $4, R4; /* bump PTE address */ 96