xref: /llvm-project/clang/test/CodeGen/AArch64/inline-asm.c (revision 207e5ccceec8d3cc3f32723e78f2a142bc61b07d)
1 // RUN: %clang_cc1 -triple arm64-none-linux-gnu -emit-llvm -o - %s | FileCheck %s
2 
3 // The only part clang really deals with is the lvalue/rvalue
4 // distinction on constraints. It's sufficient to emit llvm and make
5 // sure that's sane.
6 
7 long var;
8 
9 void test_generic_constraints(int var32, long var64) {
10     asm("add %0, %1, %1" : "=r"(var32) : "0"(var32));
11 // CHECK: [[R32_ARG:%[a-zA-Z0-9]+]] = load i32, ptr
12 // CHECK: call i32 asm "add $0, $1, $1", "=r,0"(i32 [[R32_ARG]])
13 
14     asm("add %0, %1, %1" : "=r"(var64) : "0"(var64));
15 // CHECK: [[R32_ARG:%[a-zA-Z0-9]+]] = load i64, ptr
16 // CHECK: call i64 asm "add $0, $1, $1", "=r,0"(i64 [[R32_ARG]])
17 
18     asm("ldr %0, %1" : "=r"(var32) : "m"(var));
19     asm("ldr %0, [%1]" : "=r"(var64) : "r"(&var));
20 // CHECK: call i32 asm "ldr $0, $1", "=r,*m"(ptr elementtype(i64) @var)
21 // CHECK: call i64 asm "ldr $0, [$1]", "=r,r"(ptr @var)
22 }
23 
24 float f;
25 double d;
26 void test_constraint_w(void) {
27     asm("fadd %s0, %s1, %s1" : "=w"(f) : "w"(f));
28 // CHECK: [[FLT_ARG:%[a-zA-Z_0-9]+]] = load float, ptr @f
29 // CHECK: call float asm "fadd ${0:s}, ${1:s}, ${1:s}", "=w,w"(float [[FLT_ARG]])
30 
31     asm("fadd %d0, %d1, %d1" : "=w"(d) : "w"(d));
32 // CHECK: [[DBL_ARG:%[a-zA-Z_0-9]+]] = load double, ptr @d
33 // CHECK: call double asm "fadd ${0:d}, ${1:d}, ${1:d}", "=w,w"(double [[DBL_ARG]])
34 }
35 
36 void test_constraints_immed(void) {
37     asm("add x0, x0, %0" : : "I"(4095) : "x0");
38     asm("and w0, w0, %0" : : "K"(0xaaaaaaaa) : "w0");
39     asm("and x0, x0, %0" : : "L"(0xaaaaaaaaaaaaaaaa) : "x0");
40 // CHECK: call void asm sideeffect "add x0, x0, $0", "I,~{x0}"(i32 4095)
41 // CHECK: call void asm sideeffect "and w0, w0, $0", "K,~{w0}"(i32 -1431655766)
42 // CHECK: call void asm sideeffect "and x0, x0, $0", "L,~{x0}"(i64 -6148914691236517206)
43 }
44 
45 void test_constraint_S(void) {
46     int *addr;
47     asm("adrp %0, %1\n\t"
48         "add %0, %0, :lo12:%1" : "=r"(addr) : "S"(&var));
49 // CHECK: call ptr asm "adrp $0, $1\0A\09add $0, $0, :lo12:$1", "=r,S"(ptr @var)
50 }
51 
52 void test_constraint_Q(void) {
53     int val;
54     asm("ldxr %0, %1" : "=r"(val) : "Q"(var));
55 // CHECK: call i32 asm "ldxr $0, $1", "=r,*Q"(ptr elementtype(i64) @var)
56 }
57 
58 void test_gcc_registers(void) {
59     register unsigned long reg0 asm("r0") = 0;
60     register unsigned long reg1 asm("r1") = 1;
61     register unsigned int  reg29 asm("r29") = 2;
62     register unsigned int  reg30 asm("r30") = 3;
63 
64     // Test remapping register names in register ... asm("rN") statments.
65     // rN register operands in these two inline assembly lines
66     // should get renamed to valid AArch64 registers.
67     asm volatile("hvc #0" : : "r" (reg0), "r" (reg1));
68     // CHECK: call void asm sideeffect "hvc #0", "{x0},{x1}"
69     asm volatile("hvc #0" : : "r" (reg29), "r" (reg30));
70     // CHECK: call void asm sideeffect "hvc #0", "{fp},{lr}"
71 
72     // rN registers when used without register ... asm("rN") syntax
73     // should not be remapped.
74     asm volatile("mov r0, r1\n");
75     // CHECK: call void asm sideeffect "mov r0, r1\0A", ""()
76 }
77 
78 void test_tied_earlyclobber(void) {
79   register int a asm("x1");
80   asm("" : "+&r"(a));
81   // CHECK: call i32 asm "", "=&{x1},0"(i32 %0)
82 }
83 
84 void test_reduced_gpr_constraints(int var32, long var64) {
85   asm("add w0, w0, %0" : : "Uci"(var32) : "w0");
86 // CHECK: [[ARG1:%.+]] = load i32, ptr
87 // CHECK: call void asm sideeffect "add w0, w0, $0", "@3Uci,~{w0}"(i32 [[ARG1]])
88   asm("add x0, x0, %0" : : "Uci"(var64) : "x0");
89 // CHECK: [[ARG1:%.+]] = load i64, ptr
90 // CHECK: call void asm sideeffect "add x0, x0, $0", "@3Uci,~{x0}"(i64 [[ARG1]])
91   asm("add w0, w0, %0" : : "Ucj"(var32) : "w0");
92 // CHECK: [[ARG2:%.+]] = load i32, ptr
93 // CHECK: call void asm sideeffect "add w0, w0, $0", "@3Ucj,~{w0}"(i32 [[ARG2]])
94   asm("add x0, x0, %0" : : "Ucj"(var64) : "x0");
95 // CHECK: [[ARG2:%.+]] = load i64, ptr
96 // CHECK: call void asm sideeffect "add x0, x0, $0", "@3Ucj,~{x0}"(i64 [[ARG2]])
97 }
98 
99 void test_sme_constraints(){
100   asm("movt zt0[3, mul vl], z0" : : : "za");
101 // CHECK: call void asm sideeffect "movt zt0[3, mul vl], z0", "~{za}"()
102 
103   asm("movt zt0[3, mul vl], z0" : : : "zt0");
104 // CHECK: call void asm sideeffect "movt zt0[3, mul vl], z0", "~{zt0}"()
105 }