xref: /llvm-project/llvm/test/CodeGen/AMDGPU/inlineasm-mismatched-size.ll (revision 3ba4092c066b589d6c16fcca2f2826cd6f51140f)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
2; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 < %s | FileCheck -check-prefix=CHECK %s
3
4; Allow single registers that are too wide for the IR type:
5
6define i16 @inline_asm_i16_in_v_def() {
7; CHECK-LABEL: inline_asm_i16_in_v_def:
8; CHECK:       ; %bb.0:
9; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
10; CHECK-NEXT:    ;;#ASMSTART
11; CHECK-NEXT:    ; def v8
12; CHECK-NEXT:    ;;#ASMEND
13; CHECK-NEXT:    ;;#ASMSTART
14; CHECK-NEXT:    ; def v0
15; CHECK-NEXT:    ;;#ASMEND
16; CHECK-NEXT:    v_and_b32_e32 v0, v8, v0
17; CHECK-NEXT:    s_setpc_b64 s[30:31]
18  %phys = call i16 asm sideeffect "; def $0", "={v8}"()
19  %virt = call i16 asm sideeffect "; def $0", "=v"()
20  %r = and i16 %phys, %virt
21  ret i16 %r
22}
23
24define void @inline_asm_i16_in_v_use(i16 %val) {
25; CHECK-LABEL: inline_asm_i16_in_v_use:
26; CHECK:       ; %bb.0:
27; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
28; CHECK-NEXT:    v_and_b32_e32 v8, 0xffff, v0
29; CHECK-NEXT:    ;;#ASMSTART
30; CHECK-NEXT:    ; use v8
31; CHECK-NEXT:    ;;#ASMEND
32; CHECK-NEXT:    ;;#ASMSTART
33; CHECK-NEXT:    ; use v8
34; CHECK-NEXT:    ;;#ASMEND
35; CHECK-NEXT:    s_setpc_b64 s[30:31]
36  call void asm sideeffect "; use $0", "{v8}"(i16 %val)
37  call void asm sideeffect "; use $0", "v"(i16 %val)
38  ret void
39}
40
41define i16 @inline_asm_i16_in_s_def() {
42; CHECK-LABEL: inline_asm_i16_in_s_def:
43; CHECK:       ; %bb.0:
44; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
45; CHECK-NEXT:    ;;#ASMSTART
46; CHECK-NEXT:    ; def s8
47; CHECK-NEXT:    ;;#ASMEND
48; CHECK-NEXT:    ;;#ASMSTART
49; CHECK-NEXT:    ; def s4
50; CHECK-NEXT:    ;;#ASMEND
51; CHECK-NEXT:    s_and_b32 s4, s8, s4
52; CHECK-NEXT:    v_mov_b32_e32 v0, s4
53; CHECK-NEXT:    s_setpc_b64 s[30:31]
54  %phys = call i16 asm sideeffect "; def $0", "={s8}"()
55  %virt = call i16 asm sideeffect "; def $0", "=s"()
56  %r = and i16 %phys, %virt
57  ret i16 %r
58}
59
60define i8 @inline_asm_i8_in_v_def() {
61; CHECK-LABEL: inline_asm_i8_in_v_def:
62; CHECK:       ; %bb.0:
63; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
64; CHECK-NEXT:    ;;#ASMSTART
65; CHECK-NEXT:    ; def v8
66; CHECK-NEXT:    ;;#ASMEND
67; CHECK-NEXT:    v_mov_b32_e32 v0, v8
68; CHECK-NEXT:    s_setpc_b64 s[30:31]
69  %phys = call i8 asm sideeffect "; def $0", "={v8}"()
70  ; %virt = call i8 asm sideeffect "; def $0", "=v"()  ; currently fails
71  ; %r = and i8 %phys, %virt
72  ; ret i8 %r
73  ret i8 %phys
74}
75
76; currently broken, v8 should be set to v0 & 0xFF
77define void @inline_asm_i8_in_v_use(i8 %val) {
78; CHECK-LABEL: inline_asm_i8_in_v_use:
79; CHECK:       ; %bb.0:
80; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
81; CHECK-NEXT:    v_mov_b32_e32 v8, v0
82; CHECK-NEXT:    ;;#ASMSTART
83; CHECK-NEXT:    ; use v8
84; CHECK-NEXT:    ;;#ASMEND
85; CHECK-NEXT:    s_setpc_b64 s[30:31]
86  call void asm sideeffect "; use $0", "{v8}"(i8 %val)
87  ; call void asm sideeffect "; use $0", "v"(i8 %val)  ; currently fails
88  ret void
89}
90
91define i8 @inline_asm_i8_in_sphys_def() {
92; CHECK-LABEL: inline_asm_i8_in_sphys_def:
93; CHECK:       ; %bb.0:
94; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
95; CHECK-NEXT:    ;;#ASMSTART
96; CHECK-NEXT:    ; def s8
97; CHECK-NEXT:    ;;#ASMEND
98; CHECK-NEXT:    v_mov_b32_e32 v0, s8
99; CHECK-NEXT:    s_setpc_b64 s[30:31]
100  %phys = call i8 asm sideeffect "; def $0", "={s8}"()
101  ; %virt = call i8 asm sideeffect "; def $0", "=s"()  ; currently fails
102  ; %r = and i8 %phys, %virt
103  ; ret i8 %r
104  ret i8 %phys
105}
106
107
108; Single registers for vector types that fit are fine.
109
110define void @inline_asm_2xi16_in_v_use(<2 x i16> %val) {
111; CHECK-LABEL: inline_asm_2xi16_in_v_use:
112; CHECK:       ; %bb.0:
113; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
114; CHECK-NEXT:    v_mov_b32_e32 v8, v0
115; CHECK-NEXT:    ;;#ASMSTART
116; CHECK-NEXT:    ; use v8
117; CHECK-NEXT:    ;;#ASMEND
118; CHECK-NEXT:    ;;#ASMSTART
119; CHECK-NEXT:    ; use v0
120; CHECK-NEXT:    ;;#ASMEND
121; CHECK-NEXT:    s_setpc_b64 s[30:31]
122  call void asm sideeffect "; use $0", "{v8}"(<2 x i16> %val)
123  call void asm sideeffect "; use $0", "v"(<2 x i16> %val)
124  ret void
125}
126
127define <2 x i16> @inline_asm_2xi16_in_v_def() {
128; CHECK-LABEL: inline_asm_2xi16_in_v_def:
129; CHECK:       ; %bb.0:
130; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
131; CHECK-NEXT:    ;;#ASMSTART
132; CHECK-NEXT:    ; def v8
133; CHECK-NEXT:    ;;#ASMEND
134; CHECK-NEXT:    ;;#ASMSTART
135; CHECK-NEXT:    ; def v0
136; CHECK-NEXT:    ;;#ASMEND
137; CHECK-NEXT:    v_and_b32_e32 v0, v8, v0
138; CHECK-NEXT:    s_setpc_b64 s[30:31]
139  %phys = call <2 x i16> asm sideeffect "; def $0", "={v8}"()
140  %virt = call <2 x i16> asm sideeffect "; def $0", "=v"()
141  %r = and <2 x i16> %phys, %virt
142  ret <2 x i16> %r
143}
144
145define <2 x i16> @inline_asm_2xi16_in_s_def() {
146; CHECK-LABEL: inline_asm_2xi16_in_s_def:
147; CHECK:       ; %bb.0:
148; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
149; CHECK-NEXT:    ;;#ASMSTART
150; CHECK-NEXT:    ; def s8
151; CHECK-NEXT:    ;;#ASMEND
152; CHECK-NEXT:    ;;#ASMSTART
153; CHECK-NEXT:    ; def s4
154; CHECK-NEXT:    ;;#ASMEND
155; CHECK-NEXT:    s_and_b32 s4, s8, s4
156; CHECK-NEXT:    v_mov_b32_e32 v0, s4
157; CHECK-NEXT:    s_setpc_b64 s[30:31]
158  %phys = call <2 x i16> asm sideeffect "; def $0", "={s8}"()
159  %virt = call <2 x i16> asm sideeffect "; def $0", "=s"()
160  %r = and <2 x i16> %phys, %virt
161  ret <2 x i16> %r
162}
163