1// RUN: mlir-opt %s | mlir-opt | FileCheck %s 2 3 4// CHECK-LABEL: func @baz 5// something to call 6llvm.func @baz() 7 8// CHECK-LABEL: func @ops 9// CHECK-SAME: (%[[I32:.*]]: i32, %[[FLOAT:.*]]: f32, %[[PTR1:.*]]: !llvm.ptr, %[[PTR2:.*]]: !llvm.ptr, %[[BOOL:.*]]: i1, %[[VPTR1:.*]]: !llvm.vec<2 x ptr>) 10func.func @ops(%arg0: i32, %arg1: f32, 11 %arg2: !llvm.ptr, %arg3: !llvm.ptr, 12 %arg4: i1, %arg5 : !llvm.vec<2x!llvm.ptr>) { 13// Integer arithmetic binary operations. 14// 15// CHECK: {{.*}} = llvm.add %[[I32]], %[[I32]] : i32 16// CHECK: {{.*}} = llvm.sub %[[I32]], %[[I32]] : i32 17// CHECK: {{.*}} = llvm.mul %[[I32]], %[[I32]] : i32 18// CHECK: {{.*}} = llvm.udiv %[[I32]], %[[I32]] : i32 19// CHECK: {{.*}} = llvm.sdiv %[[I32]], %[[I32]] : i32 20// CHECK: {{.*}} = llvm.urem %[[I32]], %[[I32]] : i32 21// CHECK: {{.*}} = llvm.srem %[[I32]], %[[I32]] : i32 22// CHECK: %[[SCALAR_PRED0:.+]] = llvm.icmp "ne" %[[I32]], %[[I32]] : i32 23// CHECK: {{.*}} = llvm.add %[[SCALAR_PRED0]], %[[SCALAR_PRED0]] : i1 24// CHECK: %[[SCALAR_PRED1:.+]] = llvm.icmp "ne" %[[PTR1]], %[[PTR1]] : !llvm.ptr 25// CHECK: {{.*}} = llvm.add %[[SCALAR_PRED1]], %[[SCALAR_PRED1]] : i1 26// CHECK: %[[VEC_PRED:.+]] = llvm.icmp "ne" %[[VPTR1]], %[[VPTR1]] : !llvm.vec<2 x ptr> 27// CHECK: {{.*}} = llvm.add %[[VEC_PRED]], %[[VEC_PRED]] : vector<2xi1> 28 %0 = llvm.add %arg0, %arg0 : i32 29 %1 = llvm.sub %arg0, %arg0 : i32 30 %2 = llvm.mul %arg0, %arg0 : i32 31 %3 = llvm.udiv %arg0, %arg0 : i32 32 %4 = llvm.sdiv %arg0, %arg0 : i32 33 %5 = llvm.urem %arg0, %arg0 : i32 34 %6 = llvm.srem %arg0, %arg0 : i32 35 %7 = llvm.icmp "ne" %arg0, %arg0 : i32 36 %typecheck_7 = llvm.add %7, %7 : i1 37 %ptrcmp = llvm.icmp "ne" %arg2, %arg2 : !llvm.ptr 38 %typecheck_ptrcmp = llvm.add %ptrcmp, %ptrcmp : i1 39 %vptrcmp = llvm.icmp "ne" %arg5, %arg5 : !llvm.vec<2 x ptr> 40 %typecheck_vptrcmp = llvm.add %vptrcmp, %vptrcmp : vector<2 x i1> 41 42// Integer overflow flags 43// CHECK: {{.*}} = llvm.add %[[I32]], %[[I32]] overflow<nsw> : i32 44// CHECK: {{.*}} = llvm.sub %[[I32]], %[[I32]] overflow<nuw> : i32 45// CHECK: {{.*}} = llvm.mul %[[I32]], %[[I32]] overflow<nsw, nuw> : i32 46// CHECK: {{.*}} = llvm.shl %[[I32]], %[[I32]] overflow<nsw, nuw> : i32 47 %add_flag = llvm.add %arg0, %arg0 overflow<nsw> : i32 48 %sub_flag = llvm.sub %arg0, %arg0 overflow<nuw> : i32 49 %mul_flag = llvm.mul %arg0, %arg0 overflow<nsw, nuw> : i32 50 %shl_flag = llvm.shl %arg0, %arg0 overflow<nuw, nsw> : i32 51 52// Integer exact flag. 53// CHECK: {{.*}} = llvm.sdiv exact %[[I32]], %[[I32]] : i32 54// CHECK: {{.*}} = llvm.udiv exact %[[I32]], %[[I32]] : i32 55// CHECK: {{.*}} = llvm.ashr exact %[[I32]], %[[I32]] : i32 56// CHECK: {{.*}} = llvm.lshr exact %[[I32]], %[[I32]] : i32 57 %sdiv_flag = llvm.sdiv exact %arg0, %arg0 : i32 58 %udiv_flag = llvm.udiv exact %arg0, %arg0 : i32 59 %ashr_flag = llvm.ashr exact %arg0, %arg0 : i32 60 %lshr_flag = llvm.lshr exact %arg0, %arg0 : i32 61 62// Integer disjoint flag. 63// CHECK: {{.*}} = llvm.or disjoint %[[I32]], %[[I32]] : i32 64 %or_flag = llvm.or disjoint %arg0, %arg0 : i32 65 66// Floating point binary operations. 67// 68// CHECK: {{.*}} = llvm.fadd %[[FLOAT]], %[[FLOAT]] : f32 69// CHECK: {{.*}} = llvm.fsub %[[FLOAT]], %[[FLOAT]] : f32 70// CHECK: {{.*}} = llvm.fmul %[[FLOAT]], %[[FLOAT]] : f32 71// CHECK: {{.*}} = llvm.fdiv %[[FLOAT]], %[[FLOAT]] : f32 72// CHECK: {{.*}} = llvm.frem %[[FLOAT]], %[[FLOAT]] : f32 73 %8 = llvm.fadd %arg1, %arg1 : f32 74 %9 = llvm.fsub %arg1, %arg1 : f32 75 %10 = llvm.fmul %arg1, %arg1 : f32 76 %11 = llvm.fdiv %arg1, %arg1 : f32 77 %12 = llvm.frem %arg1, %arg1 : f32 78 79// Memory-related operations. 80// 81// CHECK-NEXT: %[[ALLOCA:.*]] = llvm.alloca %[[I32]] x f64 : (i32) -> !llvm.ptr 82// CHECK-NEXT: %[[GEP:.*]] = llvm.getelementptr %[[ALLOCA]][%[[I32]]] : (!llvm.ptr, i32) -> !llvm.ptr, f64 83// CHECK-NEXT: %[[VALUE:.*]] = llvm.load %[[GEP]] : !llvm.ptr -> f64 84// CHECK-NEXT: llvm.store %[[VALUE]], %[[ALLOCA]] : f64, !llvm.ptr 85 %13 = llvm.alloca %arg0 x f64 : (i32) -> !llvm.ptr 86 %14 = llvm.getelementptr %13[%arg0] : (!llvm.ptr, i32) -> !llvm.ptr, f64 87 %15 = llvm.load %14 : !llvm.ptr -> f64 88 llvm.store %15, %13 : f64, !llvm.ptr 89 90// Function call-related operations. 91// 92// CHECK: %[[STRUCT:.*]] = llvm.call @foo(%[[I32]]) : (i32) -> !llvm.struct<(i32, f64, i32)> 93// CHECK: %[[VALUE:.*]] = llvm.extractvalue %[[STRUCT]][0] : !llvm.struct<(i32, f64, i32)> 94// CHECK: %[[NEW_STRUCT:.*]] = llvm.insertvalue %[[VALUE]], %[[STRUCT]][2] : !llvm.struct<(i32, f64, i32)> 95// CHECK: %[[FUNC:.*]] = llvm.mlir.addressof @foo : !llvm.ptr 96// CHECK: %{{.*}} = llvm.call %[[FUNC]](%[[I32]]) : !llvm.ptr, (i32) -> !llvm.struct<(i32, f64, i32)> 97 %17 = llvm.call @foo(%arg0) : (i32) -> !llvm.struct<(i32, f64, i32)> 98 %18 = llvm.extractvalue %17[0] : !llvm.struct<(i32, f64, i32)> 99 %19 = llvm.insertvalue %18, %17[2] : !llvm.struct<(i32, f64, i32)> 100 %20 = llvm.mlir.addressof @foo : !llvm.ptr 101 %21 = llvm.call %20(%arg0) : !llvm.ptr, (i32) -> !llvm.struct<(i32, f64, i32)> 102 103// Variadic calls 104// CHECK: llvm.call @vararg_func(%arg0, %arg0) vararg(!llvm.func<void (i32, ...)>) : (i32, i32) -> () 105// CHECK: llvm.call @vararg_func(%arg0, %arg0) vararg(!llvm.func<void (i32, ...)>) {fastmathFlags = #llvm.fastmath<fast>} : (i32, i32) -> () 106// CHECK: %[[VARIADIC_FUNC:.*]] = llvm.mlir.addressof @vararg_func : !llvm.ptr 107// CHECK: llvm.call %[[VARIADIC_FUNC]](%[[I32]], %[[I32]]) vararg(!llvm.func<void (i32, ...)>) : !llvm.ptr, (i32, i32) -> () 108// CHECK: llvm.call %[[VARIADIC_FUNC]](%[[I32]], %[[I32]]) vararg(!llvm.func<void (i32, ...)>) {fastmathFlags = #llvm.fastmath<fast>} : !llvm.ptr, (i32, i32) -> () 109 llvm.call @vararg_func(%arg0, %arg0) vararg(!llvm.func<void (i32, ...)>) : (i32, i32) -> () 110 llvm.call @vararg_func(%arg0, %arg0) vararg(!llvm.func<void (i32, ...)>) {fastmathFlags = #llvm.fastmath<fast>} : (i32, i32) -> () 111 %variadic_func = llvm.mlir.addressof @vararg_func : !llvm.ptr 112 llvm.call %variadic_func(%arg0, %arg0) vararg(!llvm.func<void (i32, ...)>) : !llvm.ptr, (i32, i32) -> () 113 llvm.call %variadic_func(%arg0, %arg0) vararg(!llvm.func<void (i32, ...)>) {fastmathFlags = #llvm.fastmath<fast>} : !llvm.ptr, (i32, i32) -> () 114 115// Function call attributes 116// CHECK: llvm.call @baz() {convergent} : () -> () 117 llvm.call @baz() {convergent} : () -> () 118 119// CHECK: llvm.call @baz() {no_unwind} : () -> () 120 llvm.call @baz() {no_unwind} : () -> () 121 122// CHECK: llvm.call @baz() {will_return} : () -> () 123 llvm.call @baz() {will_return} : () -> () 124 125// CHECK: llvm.call @baz() {memory = #llvm.memory_effects<other = none, argMem = read, inaccessibleMem = write>} : () -> () 126 llvm.call @baz() {memory = #llvm.memory_effects<other = none, argMem = read, inaccessibleMem = write>} : () -> () 127 128// Terminator operations and their successors. 129// 130// CHECK: llvm.br ^[[BB1:.*]] 131 llvm.br ^bb1 132 133// CHECK: ^[[BB1]] 134^bb1: 135// CHECK: llvm.cond_br %7, ^[[BB2:.*]], ^[[BB3:.*]] 136 llvm.cond_br %7, ^bb2, ^bb3 137 138// CHECK: ^[[BB2]] 139^bb2: 140// CHECK: %{{.*}} = llvm.mlir.undef : !llvm.struct<(i32, f64, i32)> 141// CHECK: %{{.*}} = llvm.mlir.constant(42 : i64) : i47 142 %22 = llvm.mlir.undef : !llvm.struct<(i32, f64, i32)> 143 %23 = llvm.mlir.constant(42) : i47 144 // CHECK: llvm.switch %0 : i32, ^[[BB3]] [ 145 // CHECK-NEXT: 1: ^[[BB4:.*]], 146 // CHECK-NEXT: 2: ^[[BB5:.*]], 147 // CHECK-NEXT: 3: ^[[BB6:.*]] 148 // CHECK-NEXT: ] 149 llvm.switch %0 : i32, ^bb3 [ 150 1: ^bb4, 151 2: ^bb5, 152 3: ^bb6 153 ] 154 155// CHECK: ^[[BB3]] 156^bb3: 157// CHECK: llvm.switch %0 : i32, ^[[BB7:.*]] [ 158// CHECK-NEXT: ] 159 llvm.switch %0 : i32, ^bb7 [ 160 ] 161 162// CHECK: ^[[BB4]] 163^bb4: 164 llvm.switch %0 : i32, ^bb7 [ 165 ] 166 167// CHECK: ^[[BB5]] 168^bb5: 169 llvm.switch %0 : i32, ^bb7 [ 170 ] 171 172// CHECK: ^[[BB6]] 173^bb6: 174 llvm.switch %0 : i32, ^bb7 [ 175 ] 176 177// CHECK: ^[[BB7]] 178^bb7: 179// Misc operations. 180// CHECK: %{{.*}} = llvm.select %{{.*}}, %{{.*}}, %{{.*}} : i1, i32 181 %24 = llvm.select %7, %0, %1 : i1, i32 182 183// Integer to pointer and pointer to integer conversions. 184// 185// CHECK: %[[PTR:.*]] = llvm.inttoptr %[[I32]] : i32 to !llvm.ptr 186// CHECK: %{{.*}} = llvm.ptrtoint %[[PTR]] : !llvm.ptr to i32 187 %25 = llvm.inttoptr %arg0 : i32 to !llvm.ptr 188 %26 = llvm.ptrtoint %25 : !llvm.ptr to i32 189 190// Extended and Quad floating point 191// 192// CHECK: %{{.*}} = llvm.fpext %[[FLOAT]] : f32 to f80 193// CHECK: %{{.*}} = llvm.fpext %[[FLOAT]] : f32 to f128 194 %27 = llvm.fpext %arg1 : f32 to f80 195 %28 = llvm.fpext %arg1 : f32 to f128 196 197// CHECK: %{{.*}} = llvm.fneg %[[FLOAT]] : f32 198 %29 = llvm.fneg %arg1 : f32 199 200// CHECK: llvm.intr.sin(%[[FLOAT]]) : (f32) -> f32 201 %30 = llvm.intr.sin(%arg1) : (f32) -> f32 202 203// CHECK: llvm.intr.pow(%[[FLOAT]], %[[FLOAT]]) : (f32, f32) -> f32 204 %31 = llvm.intr.pow(%arg1, %arg1) : (f32, f32) -> f32 205 206// CHECK: llvm.intr.powi(%[[FLOAT]], %[[I32]]) : (f32, i32) -> f32 207 %a31 = llvm.intr.powi(%arg1, %arg0) : (f32, i32) -> f32 208 209// CHECK: llvm.intr.bitreverse(%{{.*}}) : (i32) -> i32 210 %32 = llvm.intr.bitreverse(%arg0) : (i32) -> i32 211 212// CHECK: llvm.intr.ctpop(%{{.*}}) : (i32) -> i32 213 %33 = llvm.intr.ctpop(%arg0) : (i32) -> i32 214 215// CHECK: llvm.intr.round(%[[FLOAT]]) : (f32) -> f32 216 %34 = llvm.intr.round(%arg1) : (f32) -> f32 217 218// CHECK: "llvm.intr.memcpy"(%{{.*}}, %{{.*}}, %{{.*}}) <{isVolatile = false}> : (!llvm.ptr, !llvm.ptr, i32) -> () 219 "llvm.intr.memcpy"(%arg2, %arg3, %arg0) <{isVolatile = false}> : (!llvm.ptr, !llvm.ptr, i32) -> () 220 221// CHECK: "llvm.intr.memcpy"(%{{.*}}, %{{.*}}, %{{.*}}) <{isVolatile = false}> : (!llvm.ptr, !llvm.ptr, i32) -> () 222 "llvm.intr.memcpy"(%arg2, %arg3, %arg0) <{isVolatile = false}> : (!llvm.ptr, !llvm.ptr, i32) -> () 223 224// CHECK: "llvm.intr.memcpy.inline"(%{{.*}}, %{{.*}}) <{isVolatile = false, len = 10 : i64}> : (!llvm.ptr, !llvm.ptr) -> () 225 "llvm.intr.memcpy.inline"(%arg2, %arg3) <{isVolatile = false, len = 10 : i64}> : (!llvm.ptr, !llvm.ptr) -> () 226 227// CHECK: llvm.return 228 llvm.return 229} 230 231// CHECK-LABEL: @gep 232llvm.func @gep(%ptr: !llvm.ptr, %idx: i64, %ptr2: !llvm.ptr) { 233 // CHECK: llvm.getelementptr %{{.*}}[%{{.*}}, 1, 0] : (!llvm.ptr, i64) -> !llvm.ptr, !llvm.struct<(i32, struct<(i32, f32)>)> 234 llvm.getelementptr %ptr[%idx, 1, 0] : (!llvm.ptr, i64) -> !llvm.ptr, !llvm.struct<(i32, struct<(i32, f32)>)> 235 // CHECK: llvm.getelementptr inbounds %{{.*}}[%{{.*}}, 0, %{{.*}}] : (!llvm.ptr, i64, i64) -> !llvm.ptr, !llvm.struct<(array<10 x f32>)> 236 llvm.getelementptr inbounds %ptr2[%idx, 0, %idx] : (!llvm.ptr, i64, i64) -> !llvm.ptr, !llvm.struct<(array<10 x f32>)> 237 llvm.return 238} 239 240llvm.func @vararg_foo(i32, ...) -> !llvm.struct<(i32, f64, i32)> 241 242// An larger self-contained function. 243// CHECK-LABEL: llvm.func @foo(%{{.*}}: i32) -> !llvm.struct<(i32, f64, i32)> { 244llvm.func @foo(%arg0: i32) -> !llvm.struct<(i32, f64, i32)> { 245// CHECK: %[[V0:.*]] = llvm.mlir.constant(3 : i64) : i32 246// CHECK: %[[V1:.*]] = llvm.mlir.constant(3 : i64) : i32 247// CHECK: %[[V2:.*]] = llvm.mlir.constant(4.200000e+01 : f64) : f64 248// CHECK: %[[V3:.*]] = llvm.mlir.constant(4.200000e+01 : f64) : f64 249// CHECK: %[[V4:.*]] = llvm.add %[[V0]], %[[V1]] : i32 250// CHECK: %[[V5:.*]] = llvm.mul %[[V4]], %[[V1]] : i32 251// CHECK: %[[V6:.*]] = llvm.fadd %[[V2]], %[[V3]] : f64 252// CHECK: %[[V7:.*]] = llvm.fsub %[[V3]], %[[V6]] : f64 253// CHECK: %[[V8:.*]] = llvm.mlir.constant(1 : i64) : i1 254// CHECK: llvm.cond_br %[[V8]], ^[[BB1:.*]](%[[V4]] : i32), ^[[BB2:.*]](%[[V4]] : i32) 255 %0 = llvm.mlir.constant(3) : i32 256 %1 = llvm.mlir.constant(3) : i32 257 %2 = llvm.mlir.constant(4.200000e+01) : f64 258 %3 = llvm.mlir.constant(4.200000e+01) : f64 259 %4 = llvm.add %0, %1 : i32 260 %5 = llvm.mul %4, %1 : i32 261 %6 = llvm.fadd %2, %3 : f64 262 %7 = llvm.fsub %3, %6 : f64 263 %8 = llvm.mlir.constant(1) : i1 264 llvm.cond_br %8, ^bb1(%4 : i32), ^bb2(%4 : i32) 265 266// CHECK:^[[BB1]](%[[V9:.*]]: i32): 267// CHECK: %[[V10:.*]] = llvm.call @foo(%[[V9]]) : (i32) -> !llvm.struct<(i32, f64, i32)> 268// CHECK: %[[V11:.*]] = llvm.extractvalue %[[V10]][0] : !llvm.struct<(i32, f64, i32)> 269// CHECK: %[[V12:.*]] = llvm.extractvalue %[[V10]][1] : !llvm.struct<(i32, f64, i32)> 270// CHECK: %[[V13:.*]] = llvm.extractvalue %[[V10]][2] : !llvm.struct<(i32, f64, i32)> 271// CHECK: %[[V14:.*]] = llvm.mlir.undef : !llvm.struct<(i32, f64, i32)> 272// CHECK: %[[V15:.*]] = llvm.insertvalue %[[V5]], %[[V14]][0] : !llvm.struct<(i32, f64, i32)> 273// CHECK: %[[V16:.*]] = llvm.insertvalue %[[V7]], %[[V15]][1] : !llvm.struct<(i32, f64, i32)> 274// CHECK: %[[V17:.*]] = llvm.insertvalue %[[V11]], %[[V16]][2] : !llvm.struct<(i32, f64, i32)> 275// CHECK: llvm.return %[[V17]] : !llvm.struct<(i32, f64, i32)> 276^bb1(%9: i32): 277 %10 = llvm.call @foo(%9) : (i32) -> !llvm.struct<(i32, f64, i32)> 278 %11 = llvm.extractvalue %10[0] : !llvm.struct<(i32, f64, i32)> 279 %12 = llvm.extractvalue %10[1] : !llvm.struct<(i32, f64, i32)> 280 %13 = llvm.extractvalue %10[2] : !llvm.struct<(i32, f64, i32)> 281 %14 = llvm.mlir.undef : !llvm.struct<(i32, f64, i32)> 282 %15 = llvm.insertvalue %5, %14[0] : !llvm.struct<(i32, f64, i32)> 283 %16 = llvm.insertvalue %7, %15[1] : !llvm.struct<(i32, f64, i32)> 284 %17 = llvm.insertvalue %11, %16[2] : !llvm.struct<(i32, f64, i32)> 285 llvm.return %17 : !llvm.struct<(i32, f64, i32)> 286 287// CHECK:^[[BB2]](%[[V18:.*]]: i32): 288// CHECK: %[[V19:.*]] = llvm.mlir.undef : !llvm.struct<(i32, f64, i32)> 289// CHECK: %[[V20:.*]] = llvm.insertvalue %[[V18]], %[[V19]][0] : !llvm.struct<(i32, f64, i32)> 290// CHECK: %[[V21:.*]] = llvm.insertvalue %[[V7]], %[[V20]][1] : !llvm.struct<(i32, f64, i32)> 291// CHECK: %[[V22:.*]] = llvm.insertvalue %[[V5]], %[[V21]][2] : !llvm.struct<(i32, f64, i32)> 292// CHECK: llvm.return %[[V22]] : !llvm.struct<(i32, f64, i32)> 293^bb2(%18: i32): 294 %19 = llvm.mlir.undef : !llvm.struct<(i32, f64, i32)> 295 %20 = llvm.insertvalue %18, %19[0] : !llvm.struct<(i32, f64, i32)> 296 %21 = llvm.insertvalue %7, %20[1] : !llvm.struct<(i32, f64, i32)> 297 %22 = llvm.insertvalue %5, %21[2] : !llvm.struct<(i32, f64, i32)> 298 llvm.return %22 : !llvm.struct<(i32, f64, i32)> 299} 300 301// CHECK-LABEL: @casts 302// CHECK-SAME: (%[[I32:.*]]: i32, %[[I64:.*]]: i64, %[[V4I32:.*]]: vector<4xi32>, %[[V4I64:.*]]: vector<4xi64>, %[[PTR:.*]]: !llvm.ptr) 303func.func @casts(%arg0: i32, %arg1: i64, %arg2: vector<4xi32>, 304 %arg3: vector<4xi64>, %arg4: !llvm.ptr) { 305// CHECK: = llvm.sext %[[I32]] : i32 to i56 306 %0 = llvm.sext %arg0 : i32 to i56 307// CHECK: = llvm.zext %[[I32]] : i32 to i64 308 %1 = llvm.zext %arg0 : i32 to i64 309// CHECK: = llvm.trunc %[[I64]] : i64 to i56 310 %2 = llvm.trunc %arg1 : i64 to i56 311// CHECK: = llvm.sext %[[V4I32]] : vector<4xi32> to vector<4xi56> 312 %3 = llvm.sext %arg2 : vector<4xi32> to vector<4xi56> 313// CHECK: = llvm.zext %[[V4I32]] : vector<4xi32> to vector<4xi64> 314 %4 = llvm.zext %arg2 : vector<4xi32> to vector<4xi64> 315// CHECK: = llvm.trunc %[[V4I64]] : vector<4xi64> to vector<4xi56> 316 %5 = llvm.trunc %arg3 : vector<4xi64> to vector<4xi56> 317// CHECK: = llvm.sitofp %[[I32]] : i32 to f32 318 %6 = llvm.sitofp %arg0 : i32 to f32 319// CHECK: %[[FLOAT:.*]] = llvm.uitofp %[[I32]] : i32 to f32 320 %7 = llvm.uitofp %arg0 : i32 to f32 321// CHECK: = llvm.fptosi %[[FLOAT]] : f32 to i32 322 %8 = llvm.fptosi %7 : f32 to i32 323// CHECK: = llvm.fptoui %[[FLOAT]] : f32 to i32 324 %9 = llvm.fptoui %7 : f32 to i32 325// CHECK: = llvm.addrspacecast %[[PTR]] : !llvm.ptr to !llvm.ptr<2> 326 %10 = llvm.addrspacecast %arg4 : !llvm.ptr to !llvm.ptr<2> 327// CHECK: = llvm.bitcast %[[I64]] : i64 to f64 328 %11 = llvm.bitcast %arg1 : i64 to f64 329 llvm.return 330} 331 332// CHECK-LABEL: @nneg_casts 333// CHECK-SAME: (%[[I32:.*]]: i32, %[[I64:.*]]: i64, %[[V4I32:.*]]: vector<4xi32>, %[[V4I64:.*]]: vector<4xi64>, %[[PTR:.*]]: !llvm.ptr) 334func.func @nneg_casts(%arg0: i32, %arg1: i64, %arg2: vector<4xi32>, 335 %arg3: vector<4xi64>, %arg4: !llvm.ptr) { 336// CHECK: = llvm.zext nneg %[[I32]] : i32 to i64 337 %0 = llvm.zext nneg %arg0 : i32 to i64 338// CHECK: = llvm.zext nneg %[[V4I32]] : vector<4xi32> to vector<4xi64> 339 %4 = llvm.zext nneg %arg2 : vector<4xi32> to vector<4xi64> 340// CHECK: = llvm.uitofp nneg %[[I32]] : i32 to f32 341 %7 = llvm.uitofp nneg %arg0 : i32 to f32 342 llvm.return 343} 344 345// CHECK-LABEL: @casts_overflow 346// CHECK-SAME: (%[[I32:.*]]: i32, %[[I64:.*]]: i64, %[[V4I32:.*]]: vector<4xi32>, %[[V4I64:.*]]: vector<4xi64>, %[[PTR:.*]]: !llvm.ptr) 347func.func @casts_overflow(%arg0: i32, %arg1: i64, %arg2: vector<4xi32>, 348 %arg3: vector<4xi64>, %arg4: !llvm.ptr) { 349// CHECK: = llvm.trunc %[[I64]] overflow<nsw> : i64 to i56 350 %0 = llvm.trunc %arg1 overflow<nsw> : i64 to i56 351// CHECK: = llvm.trunc %[[I64]] overflow<nuw> : i64 to i56 352 %1 = llvm.trunc %arg1 overflow<nuw> : i64 to i56 353// CHECK: = llvm.trunc %[[I64]] overflow<nsw, nuw> : i64 to i56 354 %2 = llvm.trunc %arg1 overflow<nsw, nuw> : i64 to i56 355// CHECK: = llvm.trunc %[[I64]] overflow<nsw, nuw> : i64 to i56 356 %3 = llvm.trunc %arg1 overflow<nuw, nsw> : i64 to i56 357// CHECK: = llvm.trunc %[[V4I64]] overflow<nsw> : vector<4xi64> to vector<4xi56> 358 %4 = llvm.trunc %arg3 overflow<nsw> : vector<4xi64> to vector<4xi56> 359 llvm.return 360} 361 362// CHECK-LABEL: @vect 363func.func @vect(%arg0: vector<4xf32>, %arg1: i32, %arg2: f32, %arg3: !llvm.vec<2 x ptr>) { 364// CHECK: = llvm.extractelement {{.*}} : vector<4xf32> 365 %0 = llvm.extractelement %arg0[%arg1 : i32] : vector<4xf32> 366// CHECK: = llvm.insertelement {{.*}} : vector<4xf32> 367 %1 = llvm.insertelement %arg2, %arg0[%arg1 : i32] : vector<4xf32> 368// CHECK: = llvm.shufflevector {{.*}} [0, 0, 0, 0, 7] : vector<4xf32> 369 %2 = llvm.shufflevector %arg0, %arg0 [0, 0, 0, 0, 7] : vector<4xf32> 370// CHECK: = llvm.shufflevector %{{.+}}, %{{.+}} [1, 0] : !llvm.vec<2 x ptr> 371 %3 = llvm.shufflevector %arg3, %arg3 [1, 0] : !llvm.vec<2 x ptr> 372// CHECK: = llvm.mlir.constant(dense<1.000000e+00> : vector<4xf32>) : vector<4xf32> 373 %4 = llvm.mlir.constant(dense<1.0> : vector<4xf32>) : vector<4xf32> 374 return 375} 376 377// CHECK-LABEL: @scalable_vect 378func.func @scalable_vect(%arg0: vector<[4]xf32>, %arg1: i32, %arg2: f32) { 379// CHECK: = llvm.extractelement {{.*}} : vector<[4]xf32> 380 %0 = llvm.extractelement %arg0[%arg1 : i32] : vector<[4]xf32> 381// CHECK: = llvm.insertelement {{.*}} : vector<[4]xf32> 382 %1 = llvm.insertelement %arg2, %arg0[%arg1 : i32] : vector<[4]xf32> 383// CHECK: = llvm.shufflevector {{.*}} [0, 0, 0, 0] : vector<[4]xf32> 384 %2 = llvm.shufflevector %arg0, %arg0 [0, 0, 0, 0] : vector<[4]xf32> 385// CHECK: = llvm.mlir.constant(dense<1.000000e+00> : vector<[4]xf32>) : vector<[4]xf32> 386 %3 = llvm.mlir.constant(dense<1.0> : vector<[4]xf32>) : vector<[4]xf32> 387 return 388} 389 390// CHECK-LABEL: @mixed_vect 391func.func @mixed_vect(%arg0: vector<8xf32>, %arg1: vector<4xf32>, %arg2: vector<[4]xf32>) { 392 // CHECK: = llvm.intr.vector.insert {{.*}} : vector<8xf32> into vector<[4]xf32> 393 %0 = llvm.intr.vector.insert %arg0, %arg2[0] : vector<8xf32> into vector<[4]xf32> 394 // CHECK: = llvm.intr.vector.insert {{.*}} : vector<4xf32> into vector<[4]xf32> 395 %1 = llvm.intr.vector.insert %arg1, %arg2[0] : vector<4xf32> into vector<[4]xf32> 396 // CHECK: = llvm.intr.vector.insert {{.*}} : vector<4xf32> into vector<[4]xf32> 397 %2 = llvm.intr.vector.insert %arg1, %1[4] : vector<4xf32> into vector<[4]xf32> 398 // CHECK: = llvm.intr.vector.insert {{.*}} : vector<4xf32> into vector<8xf32> 399 %3 = llvm.intr.vector.insert %arg1, %arg0[4] : vector<4xf32> into vector<8xf32> 400 // CHECK: = llvm.intr.vector.extract {{.*}} : vector<8xf32> from vector<[4]xf32> 401 %4 = llvm.intr.vector.extract %2[0] : vector<8xf32> from vector<[4]xf32> 402 // CHECK: = llvm.intr.vector.extract {{.*}} : vector<2xf32> from vector<8xf32> 403 %5 = llvm.intr.vector.extract %arg0[6] : vector<2xf32> from vector<8xf32> 404 return 405} 406 407// CHECK-LABEL: @vector_interleave2 408func.func @vector_interleave2(%vec1: vector<[4]xf16>, %vec2 : vector<[4]xf16>) { 409 // CHECK: = "llvm.intr.vector.interleave2"({{.*}}) : (vector<[4]xf16>, vector<[4]xf16>) -> vector<[8]xf16> 410 %0 = "llvm.intr.vector.interleave2"(%vec1, %vec2) : (vector<[4]xf16>, vector<[4]xf16>) -> vector<[8]xf16> 411 return 412} 413 414// CHECK-LABEL: @vector_deinterleave2 415func.func @vector_deinterleave2(%vec: vector<[8]xf16>) { 416 // CHECK: = "llvm.intr.vector.deinterleave2"({{.*}}) : (vector<[8]xf16>) -> !llvm.struct<(vector<[4]xf16>, vector<[4]xf16>)> 417 %0 = "llvm.intr.vector.deinterleave2"(%vec) : (vector<[8]xf16>) -> !llvm.struct<(vector<[4]xf16>, vector<[4]xf16>)> 418 return 419} 420 421// CHECK-LABEL: @alloca 422func.func @alloca(%size : i64) { 423 // CHECK: llvm.alloca %{{.*}} x i32 : (i64) -> !llvm.ptr 424 llvm.alloca %size x i32 {alignment = 0} : (i64) -> (!llvm.ptr) 425 // CHECK: llvm.alloca inalloca %{{.*}} x i32 {alignment = 8 : i64} : (i64) -> !llvm.ptr 426 llvm.alloca inalloca %size x i32 {alignment = 8} : (i64) -> (!llvm.ptr) 427 llvm.return 428} 429 430// CHECK-LABEL: @null 431func.func @null() { 432 // CHECK: llvm.mlir.zero : !llvm.ptr 433 %0 = llvm.mlir.zero : !llvm.ptr 434 llvm.return 435} 436 437// CHECK-LABEL: @zero 438func.func @zero() { 439 // CHECK: llvm.mlir.zero : i8 440 %0 = llvm.mlir.zero : i8 441 llvm.return 442} 443 444// CHECK-LABEL: @atomic_load 445func.func @atomic_load(%ptr : !llvm.ptr) { 446 // CHECK: llvm.load %{{.*}} atomic monotonic {alignment = 4 : i64} : !llvm.ptr -> f32 447 %0 = llvm.load %ptr atomic monotonic {alignment = 4 : i64} : !llvm.ptr -> f32 448 // CHECK: llvm.load volatile %{{.*}} atomic syncscope("singlethread") monotonic {alignment = 16 : i64} : !llvm.ptr -> f32 449 %1 = llvm.load volatile %ptr atomic syncscope("singlethread") monotonic {alignment = 16 : i64} : !llvm.ptr -> f32 450 // CHECK: llvm.load %{{.*}} atomic monotonic {alignment = 4 : i64} : !llvm.ptr -> i128 451 %2 = llvm.load %ptr atomic monotonic {alignment = 4 : i64} : !llvm.ptr -> i128 452 llvm.return 453} 454 455// CHECK-LABEL: @atomic_store 456func.func @atomic_store(%val : f32, %large_val : i256, %ptr : !llvm.ptr) { 457 // CHECK: llvm.store %{{.*}}, %{{.*}} atomic monotonic {alignment = 4 : i64} : f32, !llvm.ptr 458 llvm.store %val, %ptr atomic monotonic {alignment = 4 : i64} : f32, !llvm.ptr 459 // CHECK: llvm.store volatile %{{.*}}, %{{.*}} atomic syncscope("singlethread") monotonic {alignment = 16 : i64} : f32, !llvm.ptr 460 llvm.store volatile %val, %ptr atomic syncscope("singlethread") monotonic {alignment = 16 : i64} : f32, !llvm.ptr 461 // CHECK: llvm.store %{{.*}}, %{{.*}} atomic monotonic {alignment = 4 : i64} : i256, !llvm.ptr 462 llvm.store %large_val, %ptr atomic monotonic {alignment = 4 : i64} : i256, !llvm.ptr 463 llvm.return 464} 465 466// CHECK-LABEL: @atomicrmw 467func.func @atomicrmw(%ptr : !llvm.ptr, %f32 : f32, %f16_vec : vector<2xf16>) { 468 // CHECK: llvm.atomicrmw fadd %{{.*}}, %{{.*}} monotonic : !llvm.ptr, f32 469 %0 = llvm.atomicrmw fadd %ptr, %f32 monotonic : !llvm.ptr, f32 470 // CHECK: llvm.atomicrmw volatile fsub %{{.*}}, %{{.*}} syncscope("singlethread") monotonic {alignment = 16 : i64} : !llvm.ptr, f32 471 %1 = llvm.atomicrmw volatile fsub %ptr, %f32 syncscope("singlethread") monotonic {alignment = 16 : i64} : !llvm.ptr, f32 472 // CHECK: llvm.atomicrmw fmin %{{.*}}, %{{.*}} monotonic : !llvm.ptr, vector<2xf16> 473 %2 = llvm.atomicrmw fmin %ptr, %f16_vec monotonic : !llvm.ptr, vector<2xf16> 474 llvm.return 475} 476 477// CHECK-LABEL: @cmpxchg 478func.func @cmpxchg(%ptr : !llvm.ptr, %cmp : i32, %new : i32) { 479 // CHECK: llvm.cmpxchg %{{.*}}, %{{.*}}, %{{.*}} acq_rel monotonic : !llvm.ptr, i32 480 %0 = llvm.cmpxchg %ptr, %cmp, %new acq_rel monotonic : !llvm.ptr, i32 481 // CHECK: llvm.cmpxchg weak volatile %{{.*}}, %{{.*}}, %{{.*}} syncscope("singlethread") acq_rel monotonic {alignment = 16 : i64} : !llvm.ptr, i32 482 %1 = llvm.cmpxchg weak volatile %ptr, %cmp, %new syncscope("singlethread") acq_rel monotonic {alignment = 16 : i64} : !llvm.ptr, i32 483 llvm.return 484} 485 486// CHECK-LABEL: @invariant_load 487func.func @invariant_load(%ptr : !llvm.ptr) -> i32 { 488 // CHECK: llvm.load %{{.+}} invariant {alignment = 4 : i64} : !llvm.ptr -> i32 489 %0 = llvm.load %ptr invariant {alignment = 4 : i64} : !llvm.ptr -> i32 490 func.return %0 : i32 491} 492 493// CHECK-LABEL: @invariant_group_load 494func.func @invariant_group_load(%ptr : !llvm.ptr) -> i32 { 495 // CHECK: llvm.load %{{.+}} invariant_group {alignment = 4 : i64} : !llvm.ptr -> i32 496 %0 = llvm.load %ptr invariant_group {alignment = 4 : i64} : !llvm.ptr -> i32 497 func.return %0 : i32 498} 499 500// CHECK-LABEL: @invariant_group_store 501func.func @invariant_group_store(%val: i32, %ptr : !llvm.ptr) { 502 // CHECK: llvm.store %{{.+}}, %{{.+}} invariant_group : i32, !llvm.ptr 503 llvm.store %val, %ptr invariant_group : i32, !llvm.ptr 504 func.return 505} 506 507llvm.mlir.global external constant @_ZTIi() : !llvm.ptr 508llvm.func @bar(!llvm.ptr, !llvm.ptr, !llvm.ptr) 509llvm.func @__gxx_personality_v0(...) -> i32 510 511// CHECK-LABEL: @invokeLandingpad 512llvm.func @invokeLandingpad() -> i32 attributes { personality = @__gxx_personality_v0 } { 513// CHECK: %[[V0:.*]] = llvm.mlir.constant(0 : i32) : i32 514// CHECK: %{{.*}} = llvm.mlir.constant(3 : i32) : i32 515// CHECK: %[[V1:.*]] = llvm.mlir.constant("\01") : !llvm.array<1 x i8> 516// CHECK: %[[V2:.*]] = llvm.mlir.zero : !llvm.ptr 517// CHECK: %[[V3:.*]] = llvm.mlir.addressof @_ZTIi : !llvm.ptr 518// CHECK: %[[V4:.*]] = llvm.mlir.constant(1 : i32) : i32 519// CHECK: %[[V5:.*]] = llvm.alloca %[[V4]] x i8 : (i32) -> !llvm.ptr 520// CHECK: %{{.*}} = llvm.invoke @foo(%[[V4]]) to ^[[BB2:.*]] unwind ^[[BB1:.*]] : (i32) -> !llvm.struct<(i32, f64, i32)> 521 %0 = llvm.mlir.constant(0 : i32) : i32 522 %1 = llvm.mlir.constant(3 : i32) : i32 523 %2 = llvm.mlir.constant("\01") : !llvm.array<1 x i8> 524 %3 = llvm.mlir.zero : !llvm.ptr 525 %4 = llvm.mlir.addressof @_ZTIi : !llvm.ptr 526 %5 = llvm.mlir.constant(1 : i32) : i32 527 %6 = llvm.alloca %5 x i8 : (i32) -> !llvm.ptr 528 %7 = llvm.invoke @foo(%5) to ^bb2 unwind ^bb1 : (i32) -> !llvm.struct<(i32, f64, i32)> 529 530// CHECK: ^[[BB1]]: 531// CHECK: %[[lp:.*]] = llvm.landingpad cleanup (catch %[[V2]] : !llvm.ptr) (catch %[[V3]] : !llvm.ptr) (filter %[[V1]] : !llvm.array<1 x i8>) : !llvm.struct<(ptr, i32)> 532// CHECK: %{{.*}} = llvm.intr.eh.typeid.for %[[V3]] : (!llvm.ptr) -> i32 533// CHECK: llvm.resume %[[lp]] : !llvm.struct<(ptr, i32)> 534^bb1: 535 %10 = llvm.landingpad cleanup (catch %3 : !llvm.ptr) (catch %4 : !llvm.ptr) (filter %2 : !llvm.array<1 x i8>) : !llvm.struct<(ptr, i32)> 536 %11 = llvm.intr.eh.typeid.for %4 : (!llvm.ptr) -> i32 537 llvm.resume %10 : !llvm.struct<(ptr, i32)> 538 539// CHECK: ^[[BB2]]: 540// CHECK: llvm.return %[[V4]] : i32 541^bb2: 542 llvm.return %5 : i32 543 544// CHECK: ^[[BB3:.*]]: 545// CHECK: llvm.invoke @bar(%[[V5]], %[[V3]], %[[V2]]) to ^[[BB2]] unwind ^[[BB1]] : (!llvm.ptr, !llvm.ptr, !llvm.ptr) -> () 546^bb3: 547 llvm.invoke @bar(%6, %4, %3) to ^bb2 unwind ^bb1 : (!llvm.ptr, !llvm.ptr, !llvm.ptr) -> () 548 549// CHECK: ^[[BB4:.*]]: 550// CHECK: %[[FUNC:.*]] = llvm.mlir.addressof @foo : !llvm.ptr 551// CHECK: %{{.*}} = llvm.invoke %[[FUNC]]{{.*}}: !llvm.ptr, 552^bb4: 553 %12 = llvm.mlir.addressof @foo : !llvm.ptr 554 %13 = llvm.invoke %12(%5) to ^bb2 unwind ^bb1 : !llvm.ptr, (i32) -> !llvm.struct<(i32, f64, i32)> 555 556// CHECK: ^[[BB5:.*]]: 557// CHECK: %{{.*}} = llvm.invoke @{{.*}} vararg(!llvm.func<struct<(i32, f64, i32)> (i32, ...)>) : (i32, i32) -> !llvm.struct<(i32, f64, i32)> 558 559^bb5: 560 %14 = llvm.invoke @vararg_foo(%5, %5) to ^bb2 unwind ^bb1 vararg(!llvm.func<struct<(i32, f64, i32)> (i32, ...)>) : (i32, i32) -> !llvm.struct<(i32, f64, i32)> 561 562// CHECK: ^[[BB6:.*]]: 563// CHECK: %[[FUNC:.*]] = llvm.mlir.addressof @vararg_foo : !llvm.ptr 564// CHECK: %{{.*}} = llvm.invoke %[[FUNC]]{{.*}} vararg(!llvm.func<struct<(i32, f64, i32)> (i32, ...)>) : !llvm.ptr, (i32, i32) -> !llvm.struct<(i32, f64, i32)> 565^bb6: 566 %15 = llvm.mlir.addressof @vararg_foo : !llvm.ptr 567 %16 = llvm.invoke %15(%5, %5) to ^bb2 unwind ^bb1 vararg(!llvm.func<!llvm.struct<(i32, f64, i32)> (i32, ...)>) : !llvm.ptr, (i32, i32) -> !llvm.struct<(i32, f64, i32)> 568 569// CHECK: ^[[BB7:.*]]: 570// CHECK: llvm.return %[[V0]] : i32 571^bb7: 572 llvm.return %0 : i32 573} 574 575// CHECK-LABEL: @useFreezeOp 576func.func @useFreezeOp(%arg0: i32) { 577 // CHECK: = llvm.freeze %[[ARG0:.*]] : i32 578 %0 = llvm.freeze %arg0 : i32 579 // CHECK: %[[UNDEF:.*]] = llvm.mlir.undef : i8 580 %1 = llvm.mlir.undef : i8 581 // CHECK: = llvm.freeze %[[UNDEF]] : i8 582 %2 = llvm.freeze %1 : i8 583 // CHECK: %[[POISON:.*]] = llvm.mlir.poison : i8 584 %3 = llvm.mlir.poison : i8 585 // CHECK: = llvm.freeze %[[POISON]] : i8 586 %4 = llvm.freeze %3 : i8 587 return 588} 589 590// CHECK-LABEL: @useFenceInst 591func.func @useFenceInst() { 592 // CHECK: syncscope("agent") seq_cst 593 llvm.fence syncscope("agent") seq_cst 594 // CHECK: seq_cst 595 llvm.fence syncscope("") seq_cst 596 // CHECK: release 597 llvm.fence release 598 return 599} 600 601// CHECK-LABEL: @useInlineAsm 602llvm.func @useInlineAsm(%arg0: i32) { 603 // CHECK: llvm.inline_asm {{.*}} (i32) -> i8 604 %0 = llvm.inline_asm "bswap $0", "=r,r" %arg0 : (i32) -> i8 605 606 // CHECK-NEXT: llvm.inline_asm {{.*}} (i32, i32) -> i8 607 %1 = llvm.inline_asm "foo", "bar" %arg0, %arg0 : (i32, i32) -> i8 608 609 // CHECK-NEXT: llvm.inline_asm has_side_effects {{.*}} (i32, i32) -> i8 610 %2 = llvm.inline_asm has_side_effects "foo", "bar" %arg0, %arg0 : (i32, i32) -> i8 611 612 // CHECK-NEXT: llvm.inline_asm is_align_stack {{.*}} (i32, i32) -> i8 613 %3 = llvm.inline_asm is_align_stack "foo", "bar" %arg0, %arg0 : (i32, i32) -> i8 614 615 // CHECK-NEXT: llvm.inline_asm "foo", "=r,=r,r" {{.*}} : (i32) -> !llvm.struct<(i8, i8)> 616 %5 = llvm.inline_asm "foo", "=r,=r,r" %arg0 : (i32) -> !llvm.struct<(i8, i8)> 617 618 llvm.return 619} 620 621// CHECK-LABEL: @fastmathFlags 622func.func @fastmathFlags(%arg0: f32, %arg1: f32, %arg2: i32, %arg3: vector<2 x f32>, %arg4: vector<2 x f32>) { 623// CHECK: {{.*}} = llvm.fadd %arg0, %arg1 {fastmathFlags = #llvm.fastmath<fast>} : f32 624// CHECK: {{.*}} = llvm.fsub %arg0, %arg1 {fastmathFlags = #llvm.fastmath<fast>} : f32 625// CHECK: {{.*}} = llvm.fmul %arg0, %arg1 {fastmathFlags = #llvm.fastmath<fast>} : f32 626// CHECK: {{.*}} = llvm.fdiv %arg0, %arg1 {fastmathFlags = #llvm.fastmath<fast>} : f32 627// CHECK: {{.*}} = llvm.frem %arg0, %arg1 {fastmathFlags = #llvm.fastmath<fast>} : f32 628 %0 = llvm.fadd %arg0, %arg1 {fastmathFlags = #llvm.fastmath<fast>} : f32 629 %1 = llvm.fsub %arg0, %arg1 {fastmathFlags = #llvm.fastmath<fast>} : f32 630 %2 = llvm.fmul %arg0, %arg1 {fastmathFlags = #llvm.fastmath<fast>} : f32 631 %3 = llvm.fdiv %arg0, %arg1 {fastmathFlags = #llvm.fastmath<fast>} : f32 632 %4 = llvm.frem %arg0, %arg1 {fastmathFlags = #llvm.fastmath<fast>} : f32 633 634// CHECK: %[[SCALAR_PRED0:.+]] = llvm.fcmp "oeq" %arg0, %arg1 {fastmathFlags = #llvm.fastmath<fast>} : f32 635 %5 = llvm.fcmp "oeq" %arg0, %arg1 {fastmathFlags = #llvm.fastmath<fast>} : f32 636// CHECK: %{{.*}} = llvm.add %[[SCALAR_PRED0]], %[[SCALAR_PRED0]] : i1 637 %typecheck_5 = llvm.add %5, %5 : i1 638// CHECK: %[[VEC_PRED0:.+]] = llvm.fcmp "oeq" %arg3, %arg4 {fastmathFlags = #llvm.fastmath<fast>} : vector<2xf32> 639 %vcmp = llvm.fcmp "oeq" %arg3, %arg4 {fastmathFlags = #llvm.fastmath<fast>} : vector<2xf32> 640// CHECK: %{{.*}} = llvm.add %[[VEC_PRED0]], %[[VEC_PRED0]] : vector<2xi1> 641 %typecheck_vcmp = llvm.add %vcmp, %vcmp : vector<2xi1> 642 643// CHECK: {{.*}} = llvm.fneg %arg0 {fastmathFlags = #llvm.fastmath<fast>} : f32 644 %6 = llvm.fneg %arg0 {fastmathFlags = #llvm.fastmath<fast>} : f32 645 646// CHECK: {{.*}} = llvm.call @foo(%arg2) {fastmathFlags = #llvm.fastmath<fast>} : (i32) -> !llvm.struct<(i32, f64, i32)> 647 %7 = llvm.call @foo(%arg2) {fastmathFlags = #llvm.fastmath<fast>} : (i32) -> !llvm.struct<(i32, f64, i32)> 648 649// CHECK: {{.*}} = llvm.fadd %arg0, %arg1 : f32 650 %8 = llvm.fadd %arg0, %arg1 {fastmathFlags = #llvm.fastmath<none>} : f32 651// CHECK: {{.*}} = llvm.fadd %arg0, %arg1 {fastmathFlags = #llvm.fastmath<nnan, ninf>} : f32 652 %9 = llvm.fadd %arg0, %arg1 {fastmathFlags = #llvm.fastmath<nnan,ninf>} : f32 653 654// CHECK: {{.*}} = llvm.fneg %arg0 : f32 655 %10 = llvm.fneg %arg0 {fastmathFlags = #llvm.fastmath<none>} : f32 656 657// CHECK: {{.*}} = llvm.intr.sin(%arg0) {fastmathFlags = #llvm.fastmath<fast>} : (f32) -> f32 658 %11 = llvm.intr.sin(%arg0) {fastmathFlags = #llvm.fastmath<fast>} : (f32) -> f32 659// CHECK: {{.*}} = llvm.intr.sin(%arg0) {fastmathFlags = #llvm.fastmath<afn>} : (f32) -> f32 660 %12 = llvm.intr.sin(%arg0) {fastmathFlags = #llvm.fastmath<afn>} : (f32) -> f32 661 662// CHECK: {{.*}} = llvm.intr.vector.reduce.fmin(%arg3) {fastmathFlags = #llvm.fastmath<nnan>} : (vector<2xf32>) -> f32 663 %13 = llvm.intr.vector.reduce.fmin(%arg3) {fastmathFlags = #llvm.fastmath<nnan>} : (vector<2xf32>) -> f32 664// CHECK: {{.*}} = llvm.intr.vector.reduce.fmax(%arg3) {fastmathFlags = #llvm.fastmath<nnan>} : (vector<2xf32>) -> f32 665 %14 = llvm.intr.vector.reduce.fmax(%arg3) {fastmathFlags = #llvm.fastmath<nnan>} : (vector<2xf32>) -> f32 666// CHECK: {{.*}} = llvm.intr.vector.reduce.fminimum(%arg3) {fastmathFlags = #llvm.fastmath<nnan>} : (vector<2xf32>) -> f32 667 %15 = llvm.intr.vector.reduce.fminimum(%arg3) {fastmathFlags = #llvm.fastmath<nnan>} : (vector<2xf32>) -> f32 668// CHECK: {{.*}} = llvm.intr.vector.reduce.fmaximum(%arg3) {fastmathFlags = #llvm.fastmath<nnan>} : (vector<2xf32>) -> f32 669 %16 = llvm.intr.vector.reduce.fmaximum(%arg3) {fastmathFlags = #llvm.fastmath<nnan>} : (vector<2xf32>) -> f32 670 return 671} 672 673// CHECK-LABEL: @lifetime 674// CHECK-SAME: %[[P:.*]]: !llvm.ptr 675llvm.func @lifetime(%p: !llvm.ptr) { 676 // CHECK: llvm.intr.lifetime.start 16, %[[P]] 677 llvm.intr.lifetime.start 16, %p : !llvm.ptr 678 // CHECK: llvm.intr.lifetime.end 16, %[[P]] 679 llvm.intr.lifetime.end 16, %p : !llvm.ptr 680 llvm.return 681} 682 683// CHECK-LABEL: @invariant 684// CHECK-SAME: %[[P:.*]]: !llvm.ptr 685llvm.func @invariant(%p: !llvm.ptr) { 686 // CHECK: %[[START:.*]] = llvm.intr.invariant.start 1, %[[P]] : !llvm.ptr 687 %1 = llvm.intr.invariant.start 1, %p : !llvm.ptr 688 // CHECK: llvm.intr.invariant.end %[[START]], 1, %[[P]] : !llvm.ptr 689 llvm.intr.invariant.end %1, 1, %p : !llvm.ptr 690 llvm.return 691} 692 693// CHECK-LABEL: @invariant_group_intrinsics 694// CHECK-SAME: %[[P:.+]]: !llvm.ptr 695llvm.func @invariant_group_intrinsics(%p: !llvm.ptr) { 696 // CHECK: %{{.+}} = llvm.intr.launder.invariant.group %[[P]] : !llvm.ptr 697 %1 = llvm.intr.launder.invariant.group %p : !llvm.ptr 698 // CHECK: %{{.+}} = llvm.intr.strip.invariant.group %[[P]] : !llvm.ptr 699 %2 = llvm.intr.strip.invariant.group %p : !llvm.ptr 700 llvm.return 701} 702 703// CHECK-LABEL: @vararg_func 704llvm.func @vararg_func(%arg0: i32, ...) { 705 // CHECK: %[[C:.*]] = llvm.mlir.constant(1 : i32) 706 // CHECK: %[[LIST:.*]] = llvm.alloca 707 // CHECK: llvm.intr.vastart %[[LIST]] : !llvm.ptr{{$}} 708 %1 = llvm.mlir.constant(1 : i32) : i32 709 %list = llvm.alloca %1 x !llvm.struct<"struct.va_list_opaque", (ptr)> : (i32) -> !llvm.ptr 710 llvm.intr.vastart %list : !llvm.ptr 711 712 // CHECK: %[[LIST2:.*]] = llvm.alloca 713 // CHECK: llvm.intr.vacopy %[[LIST]] to %[[LIST2]] : !llvm.ptr, !llvm.ptr{{$}} 714 %list2 = llvm.alloca %1 x !llvm.struct<"struct.va_list_opaque", (ptr)> : (i32) -> !llvm.ptr 715 llvm.intr.vacopy %list to %list2 : !llvm.ptr, !llvm.ptr 716 717 // CHECK: %[[RET:.+]] = llvm.va_arg %[[LIST2]] : (!llvm.ptr) -> i32 718 %ret = llvm.va_arg %list2 : (!llvm.ptr) -> i32 719 720 // CHECK: llvm.intr.vaend %[[LIST]] : !llvm.ptr{{$}} 721 // CHECK: llvm.intr.vaend %[[LIST2]] : !llvm.ptr{{$}} 722 llvm.intr.vaend %list : !llvm.ptr 723 llvm.intr.vaend %list2 : !llvm.ptr 724 llvm.return 725} 726 727// CHECK-LABEL: @eh_typeid 728// CHECK-SAME: %[[ARG0:.*]]: !llvm.ptr 729llvm.func @eh_typeid(%arg0: !llvm.ptr) -> i32 { 730 // CHECK: llvm.intr.eh.typeid.for %[[ARG0]] : (!llvm.ptr) -> i32 731 %0 = llvm.intr.eh.typeid.for %arg0 : (!llvm.ptr) -> i32 732 llvm.return %0 : i32 733} 734 735// CHECK-LABEL: @stackrestore 736// CHECK-SAME: %[[ARG0:.*]]: !llvm.ptr 737llvm.func @stackrestore(%arg0: !llvm.ptr) { 738 // CHECK: llvm.intr.stackrestore %[[ARG0]] : !llvm.ptr 739 llvm.intr.stackrestore %arg0 : !llvm.ptr 740 llvm.return 741} 742 743#alias_scope_domain = #llvm.alias_scope_domain<id = distinct[0]<>, description = "The domain"> 744#alias_scope = #llvm.alias_scope<id = distinct[0]<>, domain = #alias_scope_domain, description = "The domain"> 745 746// CHECK-LABEL: @experimental_noalias_scope_decl 747llvm.func @experimental_noalias_scope_decl() { 748 // CHECK: llvm.intr.experimental.noalias.scope.decl #{{.*}} 749 llvm.intr.experimental.noalias.scope.decl #alias_scope 750 llvm.return 751} 752 753#alias_scope_domain2 = #llvm.alias_scope_domain<id = "domainid", description = "The domain"> 754#alias_scope2 = #llvm.alias_scope<id = "stringid", domain = #alias_scope_domain2, description = "The domain"> 755 756// CHECK-LABEL: @experimental_noalias_scope_with_string_id 757llvm.func @experimental_noalias_scope_with_string_id() { 758 // CHECK: llvm.intr.experimental.noalias.scope.decl #{{.*}} 759 llvm.intr.experimental.noalias.scope.decl #alias_scope2 760 llvm.return 761} 762 763// CHECK-LABEL: @experimental_constrained_fptrunc 764llvm.func @experimental_constrained_fptrunc(%in: f64) { 765 // CHECK: llvm.intr.experimental.constrained.fptrunc %{{.*}} towardzero ignore : f64 to f32 766 %0 = llvm.intr.experimental.constrained.fptrunc %in towardzero ignore : f64 to f32 767 // CHECK: llvm.intr.experimental.constrained.fptrunc %{{.*}} tonearest maytrap : f64 to f32 768 %1 = llvm.intr.experimental.constrained.fptrunc %in tonearest maytrap : f64 to f32 769 // CHECK: llvm.intr.experimental.constrained.fptrunc %{{.*}} upward strict : f64 to f32 770 %2 = llvm.intr.experimental.constrained.fptrunc %in upward strict : f64 to f32 771 // CHECK: llvm.intr.experimental.constrained.fptrunc %{{.*}} downward ignore : f64 to f32 772 %3 = llvm.intr.experimental.constrained.fptrunc %in downward ignore : f64 to f32 773 // CHECK: llvm.intr.experimental.constrained.fptrunc %{{.*}} tonearestaway ignore : f64 to f32 774 %4 = llvm.intr.experimental.constrained.fptrunc %in tonearestaway ignore : f64 to f32 775 llvm.return 776} 777 778// CHECK: llvm.func @tail_call_target() -> i32 779llvm.func @tail_call_target() -> i32 780 781// CHECK-LABEL: @test_none 782llvm.func @test_none() -> i32 { 783 // CHECK-NEXT: llvm.call @tail_call_target() : () -> i32 784 %0 = llvm.call none @tail_call_target() : () -> i32 785 llvm.return %0 : i32 786} 787 788// CHECK-LABEL: @test_default 789llvm.func @test_default() -> i32 { 790 // CHECK-NEXT: llvm.call @tail_call_target() : () -> i32 791 %0 = llvm.call @tail_call_target() : () -> i32 792 llvm.return %0 : i32 793} 794 795// CHECK-LABEL: @test_musttail 796llvm.func @test_musttail() -> i32 { 797 // CHECK-NEXT: llvm.call musttail @tail_call_target() : () -> i32 798 %0 = llvm.call musttail @tail_call_target() : () -> i32 799 llvm.return %0 : i32 800} 801 802// CHECK-LABEL: @test_tail 803llvm.func @test_tail() -> i32 { 804 // CHECK-NEXT: llvm.call tail @tail_call_target() : () -> i32 805 %0 = llvm.call tail @tail_call_target() : () -> i32 806 llvm.return %0 : i32 807} 808 809// CHECK-LABEL: @test_notail 810llvm.func @test_notail() -> i32 { 811 // CHECK-NEXT: llvm.call notail @tail_call_target() : () -> i32 812 %0 = llvm.call notail @tail_call_target() : () -> i32 813 llvm.return %0 : i32 814} 815 816// CHECK-LABEL: @vector_predication_intrinsics 817// CHECK-SAME: (%[[ARG0:.*]]: vector<8xi32>, %[[ARG1:.*]]: vector<8xi32>, %[[ARG2:.*]]: vector<8xi1>, %[[ARG3:.*]]: i32) 818llvm.func @vector_predication_intrinsics(%A: vector<8xi32>, %B: vector<8xi32>, 819 %mask: vector<8xi1>, %evl: i32) { 820 // CHECK-NEXT: "llvm.intr.vp.smax"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]]) 821 "llvm.intr.vp.smax" (%A, %B, %mask, %evl) : 822 (vector<8xi32>, vector<8xi32>, vector<8xi1>, i32) -> vector<8xi32> 823 // CHECK-NEXT: "llvm.intr.vp.smin"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]]) 824 "llvm.intr.vp.smin" (%A, %B, %mask, %evl) : 825 (vector<8xi32>, vector<8xi32>, vector<8xi1>, i32) -> vector<8xi32> 826 // CHECK-NEXT: "llvm.intr.vp.umax"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]]) 827 "llvm.intr.vp.umax" (%A, %B, %mask, %evl) : 828 (vector<8xi32>, vector<8xi32>, vector<8xi1>, i32) -> vector<8xi32> 829 // CHECK-NEXT: "llvm.intr.vp.umin"(%[[ARG0]], %[[ARG1]], %[[ARG2]], %[[ARG3]]) 830 "llvm.intr.vp.umin" (%A, %B, %mask, %evl) : 831 (vector<8xi32>, vector<8xi32>, vector<8xi1>, i32) -> vector<8xi32> 832 llvm.return 833} 834 835llvm.func @op_bundle_target() 836 837// CHECK-LABEL: @test_call_with_empty_opbundle 838llvm.func @test_call_with_empty_opbundle() { 839 // CHECK: llvm.call @op_bundle_target() : () -> () 840 llvm.call @op_bundle_target() [] : () -> () 841 llvm.return 842} 843 844// CHECK-LABEL: @test_call_with_empty_opbundle_operands 845llvm.func @test_call_with_empty_opbundle_operands() { 846 // CHECK: llvm.call @op_bundle_target() ["tag"()] : () -> () 847 llvm.call @op_bundle_target() ["tag"()] : () -> () 848 llvm.return 849} 850 851// CHECK-LABEL: @test_call_with_opbundle 852llvm.func @test_call_with_opbundle() { 853 %0 = llvm.mlir.constant(0 : i32) : i32 854 %1 = llvm.mlir.constant(1 : i32) : i32 855 %2 = llvm.mlir.constant(2 : i32) : i32 856 // CHECK: llvm.call @op_bundle_target() ["tag1"(%{{.+}}, %{{.+}} : i32, i32), "tag2"(%{{.+}} : i32)] : () -> () 857 llvm.call @op_bundle_target() ["tag1"(%0, %1 : i32, i32), "tag2"(%2 : i32)] : () -> () 858 llvm.return 859} 860 861// CHECK-LABEL: @test_invoke_with_empty_opbundle 862llvm.func @test_invoke_with_empty_opbundle() attributes { personality = @__gxx_personality_v0 } { 863 %0 = llvm.mlir.constant(1 : i32) : i32 864 %1 = llvm.mlir.constant(2 : i32) : i32 865 %2 = llvm.mlir.constant(3 : i32) : i32 866 // CHECK: llvm.invoke @op_bundle_target() to ^{{.+}} unwind ^{{.+}} : () -> () 867 llvm.invoke @op_bundle_target() to ^bb2 unwind ^bb1 [] : () -> () 868 869^bb1: 870 %3 = llvm.landingpad cleanup : !llvm.struct<(ptr, i32)> 871 llvm.return 872 873^bb2: 874 llvm.return 875} 876 877// CHECK-LABEL: @test_invoke_with_empty_opbundle_operands 878llvm.func @test_invoke_with_empty_opbundle_operands() attributes { personality = @__gxx_personality_v0 } { 879 %0 = llvm.mlir.constant(1 : i32) : i32 880 %1 = llvm.mlir.constant(2 : i32) : i32 881 %2 = llvm.mlir.constant(3 : i32) : i32 882 // CHECK: llvm.invoke @op_bundle_target() to ^{{.+}} unwind ^{{.+}} ["tag"()] : () -> () 883 llvm.invoke @op_bundle_target() to ^bb2 unwind ^bb1 ["tag"()] : () -> () 884 885^bb1: 886 %3 = llvm.landingpad cleanup : !llvm.struct<(ptr, i32)> 887 llvm.return 888 889^bb2: 890 llvm.return 891} 892 893// CHECK-LABEL: @test_invoke_with_opbundle 894llvm.func @test_invoke_with_opbundle() attributes { personality = @__gxx_personality_v0 } { 895 %0 = llvm.mlir.constant(1 : i32) : i32 896 %1 = llvm.mlir.constant(2 : i32) : i32 897 %2 = llvm.mlir.constant(3 : i32) : i32 898 // CHECK: llvm.invoke @op_bundle_target() to ^{{.+}} unwind ^{{.+}} ["tag1"(%{{.+}}, %{{.+}} : i32, i32), "tag2"(%{{.+}} : i32)] : () -> () 899 llvm.invoke @op_bundle_target() to ^bb2 unwind ^bb1 ["tag1"(%0, %1 : i32, i32), "tag2"(%2 : i32)] : () -> () 900 901^bb1: 902 %3 = llvm.landingpad cleanup : !llvm.struct<(ptr, i32)> 903 llvm.return 904 905^bb2: 906 llvm.return 907} 908 909// CHECK-LABEL: @test_call_intrin_with_opbundle 910llvm.func @test_call_intrin_with_opbundle(%arg0 : !llvm.ptr) { 911 %0 = llvm.mlir.constant(1 : i1) : i1 912 %1 = llvm.mlir.constant(16 : i32) : i32 913 // CHECK: llvm.call_intrinsic "llvm.assume"(%{{.+}}) ["align"(%{{.+}}, %{{.+}} : !llvm.ptr, i32)] : (i1) -> () 914 llvm.call_intrinsic "llvm.assume"(%0) ["align"(%arg0, %1 : !llvm.ptr, i32)] : (i1) -> () 915 llvm.return 916} 917 918// CHECK-LABEL: @test_assume_intr_no_opbundle 919llvm.func @test_assume_intr_no_opbundle(%arg0 : !llvm.ptr) { 920 %0 = llvm.mlir.constant(1 : i1) : i1 921 // CHECK: llvm.intr.assume %0 : i1 922 llvm.intr.assume %0 : i1 923 llvm.return 924} 925 926// CHECK-LABEL: @test_assume_intr_empty_opbundle 927llvm.func @test_assume_intr_empty_opbundle(%arg0 : !llvm.ptr) { 928 %0 = llvm.mlir.constant(1 : i1) : i1 929 // CHECK: llvm.intr.assume %0 : i1 930 llvm.intr.assume %0 [] : i1 931 llvm.return 932} 933 934// CHECK-LABEL: @test_assume_intr_with_opbundles 935llvm.func @test_assume_intr_with_opbundles(%arg0 : !llvm.ptr) { 936 %0 = llvm.mlir.constant(1 : i1) : i1 937 %1 = llvm.mlir.constant(2 : i32) : i32 938 %2 = llvm.mlir.constant(3 : i32) : i32 939 %3 = llvm.mlir.constant(4 : i32) : i32 940 // CHECK: llvm.intr.assume %0 ["tag1"(%1, %2 : i32, i32), "tag2"(%3 : i32)] : i1 941 llvm.intr.assume %0 ["tag1"(%1, %2 : i32, i32), "tag2"(%3 : i32)] : i1 942 llvm.return 943} 944