1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py 2; RUN: opt -passes=instcombine -S < %s | FileCheck %s 3 4declare double @llvm.powi.f64.i32(double, i32) 5declare float @llvm.powi.f32.i32(float, i32) 6declare double @llvm.powi.f64.i64(double, i64) 7declare double @llvm.fabs.f64(double) 8declare double @llvm.copysign.f64(double, double) 9declare void @use(double) 10 11define double @powi_fneg_even_int(double %x) { 12; CHECK-LABEL: @powi_fneg_even_int( 13; CHECK-NEXT: entry: 14; CHECK-NEXT: [[R:%.*]] = tail call double @llvm.powi.f64.i32(double [[X:%.*]], i32 4) 15; CHECK-NEXT: ret double [[R]] 16; 17entry: 18 %fneg = fneg double %x 19 %r = tail call double @llvm.powi.f64.i32(double %fneg, i32 4) 20 ret double %r 21} 22 23define double @powi_fabs_even_int(double %x) { 24; CHECK-LABEL: @powi_fabs_even_int( 25; CHECK-NEXT: entry: 26; CHECK-NEXT: [[R:%.*]] = tail call double @llvm.powi.f64.i32(double [[X:%.*]], i32 4) 27; CHECK-NEXT: ret double [[R]] 28; 29entry: 30 %f = tail call double @llvm.fabs.f64(double %x) 31 %r = tail call double @llvm.powi.f64.i32(double %f, i32 4) 32 ret double %r 33} 34 35define double @powi_copysign_even_int(double %x, double %y) { 36; CHECK-LABEL: @powi_copysign_even_int( 37; CHECK-NEXT: entry: 38; CHECK-NEXT: [[R:%.*]] = tail call double @llvm.powi.f64.i32(double [[X:%.*]], i32 4) 39; CHECK-NEXT: ret double [[R]] 40; 41entry: 42 %cs = tail call double @llvm.copysign.f64(double %x, double %y) 43 %r = tail call double @llvm.powi.f64.i32(double %cs, i32 4) 44 ret double %r 45} 46 47define double @powi_fneg_odd_int(double %x) { 48; CHECK-LABEL: @powi_fneg_odd_int( 49; CHECK-NEXT: entry: 50; CHECK-NEXT: [[FNEG:%.*]] = fneg double [[X:%.*]] 51; CHECK-NEXT: [[R:%.*]] = tail call double @llvm.powi.f64.i32(double [[FNEG]], i32 5) 52; CHECK-NEXT: ret double [[R]] 53; 54entry: 55 %fneg = fneg double %x 56 %r = tail call double @llvm.powi.f64.i32(double %fneg, i32 5) 57 ret double %r 58} 59 60define double @powi_fabs_odd_int(double %x) { 61; CHECK-LABEL: @powi_fabs_odd_int( 62; CHECK-NEXT: entry: 63; CHECK-NEXT: [[F:%.*]] = tail call double @llvm.fabs.f64(double [[X:%.*]]) 64; CHECK-NEXT: [[R:%.*]] = tail call double @llvm.powi.f64.i32(double [[F]], i32 5) 65; CHECK-NEXT: ret double [[R]] 66; 67entry: 68 %f = tail call double @llvm.fabs.f64(double %x) 69 %r = tail call double @llvm.powi.f64.i32(double %f, i32 5) 70 ret double %r 71} 72 73define double @powi_copysign_odd_int(double %x, double %y) { 74; CHECK-LABEL: @powi_copysign_odd_int( 75; CHECK-NEXT: entry: 76; CHECK-NEXT: [[CS:%.*]] = tail call double @llvm.copysign.f64(double [[X:%.*]], double [[Y:%.*]]) 77; CHECK-NEXT: [[R:%.*]] = tail call double @llvm.powi.f64.i32(double [[CS]], i32 5) 78; CHECK-NEXT: ret double [[R]] 79; 80entry: 81 %cs = tail call double @llvm.copysign.f64(double %x, double %y) 82 %r = tail call double @llvm.powi.f64.i32(double %cs, i32 5) 83 ret double %r 84} 85 86define double @powi_fmul_arg0_no_reassoc(double %x, i32 %i) { 87; CHECK-LABEL: @powi_fmul_arg0_no_reassoc( 88; CHECK-NEXT: entry: 89; CHECK-NEXT: [[POW:%.*]] = tail call double @llvm.powi.f64.i32(double [[X:%.*]], i32 [[I:%.*]]) 90; CHECK-NEXT: [[MUL:%.*]] = fmul double [[POW]], [[X]] 91; CHECK-NEXT: ret double [[MUL]] 92; 93entry: 94 %pow = tail call double @llvm.powi.f64.i32(double %x, i32 %i) 95 %mul = fmul double %pow, %x 96 ret double %mul 97} 98 99 100define double @powi_fmul_arg0(double %x, i32 %i) { 101; CHECK-LABEL: @powi_fmul_arg0( 102; CHECK-NEXT: entry: 103; CHECK-NEXT: [[POW:%.*]] = tail call double @llvm.powi.f64.i32(double [[X:%.*]], i32 [[I:%.*]]) 104; CHECK-NEXT: [[MUL:%.*]] = fmul reassoc double [[POW]], [[X]] 105; CHECK-NEXT: ret double [[MUL]] 106; 107entry: 108 %pow = tail call double @llvm.powi.f64.i32(double %x, i32 %i) 109 %mul = fmul reassoc double %pow, %x 110 ret double %mul 111} 112 113define double @powi_fmul_arg0_use(double %x, i32 %i) { 114; CHECK-LABEL: @powi_fmul_arg0_use( 115; CHECK-NEXT: entry: 116; CHECK-NEXT: [[POW:%.*]] = tail call double @llvm.powi.f64.i32(double [[X:%.*]], i32 [[I:%.*]]) 117; CHECK-NEXT: tail call void @use(double [[POW]]) 118; CHECK-NEXT: [[MUL:%.*]] = fmul reassoc double [[POW]], [[X]] 119; CHECK-NEXT: ret double [[MUL]] 120; 121entry: 122 %pow = tail call double @llvm.powi.f64.i32(double %x, i32 %i) 123 tail call void @use(double %pow) 124 %mul = fmul reassoc double %pow, %x 125 ret double %mul 126} 127 128; Negative test: Missing reassoc flag on fmul 129define double @powi_fmul_powi_no_reassoc1(double %x, i32 %y, i32 %z) { 130; CHECK-LABEL: @powi_fmul_powi_no_reassoc1( 131; CHECK-NEXT: entry: 132; CHECK-NEXT: [[P1:%.*]] = tail call reassoc double @llvm.powi.f64.i32(double [[X:%.*]], i32 [[Y:%.*]]) 133; CHECK-NEXT: [[P2:%.*]] = tail call reassoc double @llvm.powi.f64.i32(double [[X]], i32 [[Z:%.*]]) 134; CHECK-NEXT: [[MUL:%.*]] = fmul double [[P2]], [[P1]] 135; CHECK-NEXT: ret double [[MUL]] 136; 137entry: 138 %p1 = tail call reassoc double @llvm.powi.f64.i32(double %x, i32 %y) 139 %p2 = tail call reassoc double @llvm.powi.f64.i32(double %x, i32 %z) 140 %mul = fmul double %p2, %p1 141 ret double %mul 142} 143 144; Negative test: Missing reassoc flag on 2nd operand 145define double @powi_fmul_powi_no_reassoc2(double %x, i32 %y, i32 %z) { 146; CHECK-LABEL: @powi_fmul_powi_no_reassoc2( 147; CHECK-NEXT: entry: 148; CHECK-NEXT: [[P1:%.*]] = tail call reassoc double @llvm.powi.f64.i32(double [[X:%.*]], i32 [[Y:%.*]]) 149; CHECK-NEXT: [[P2:%.*]] = tail call double @llvm.powi.f64.i32(double [[X]], i32 [[Z:%.*]]) 150; CHECK-NEXT: [[MUL:%.*]] = fmul reassoc double [[P2]], [[P1]] 151; CHECK-NEXT: ret double [[MUL]] 152; 153entry: 154 %p1 = tail call reassoc double @llvm.powi.f64.i32(double %x, i32 %y) 155 %p2 = tail call double @llvm.powi.f64.i32(double %x, i32 %z) 156 %mul = fmul reassoc double %p2, %p1 157 ret double %mul 158} 159 160; Negative test: Missing reassoc flag on 1st operand 161define double @powi_fmul_powi_no_reassoc3(double %x, i32 %y, i32 %z) { 162; CHECK-LABEL: @powi_fmul_powi_no_reassoc3( 163; CHECK-NEXT: entry: 164; CHECK-NEXT: [[P1:%.*]] = tail call double @llvm.powi.f64.i32(double [[X:%.*]], i32 [[Y:%.*]]) 165; CHECK-NEXT: [[P2:%.*]] = tail call reassoc double @llvm.powi.f64.i32(double [[X]], i32 [[Z:%.*]]) 166; CHECK-NEXT: [[MUL:%.*]] = fmul reassoc double [[P2]], [[P1]] 167; CHECK-NEXT: ret double [[MUL]] 168; 169entry: 170 %p1 = tail call double @llvm.powi.f64.i32(double %x, i32 %y) 171 %p2 = tail call reassoc double @llvm.powi.f64.i32(double %x, i32 %z) 172 %mul = fmul reassoc double %p2, %p1 173 ret double %mul 174} 175 176; All of the fmul and its operands should have the reassoc flags 177define double @powi_fmul_powi(double %x, i32 %y, i32 %z) { 178; CHECK-LABEL: @powi_fmul_powi( 179; CHECK-NEXT: entry: 180; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[Z:%.*]], [[Y:%.*]] 181; CHECK-NEXT: [[MUL:%.*]] = call reassoc double @llvm.powi.f64.i32(double [[X:%.*]], i32 [[TMP0]]) 182; CHECK-NEXT: ret double [[MUL]] 183; 184entry: 185 %p1 = tail call reassoc double @llvm.powi.f64.i32(double %x, i32 %y) 186 %p2 = tail call reassoc double @llvm.powi.f64.i32(double %x, i32 %z) 187 %mul = fmul reassoc double %p2, %p1 188 ret double %mul 189} 190 191define double @powi_fmul_powi_fast_on_fmul(double %x, i32 %y, i32 %z) { 192; CHECK-LABEL: @powi_fmul_powi_fast_on_fmul( 193; CHECK-NEXT: entry: 194; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[Z:%.*]], [[Y:%.*]] 195; CHECK-NEXT: [[MUL:%.*]] = call fast double @llvm.powi.f64.i32(double [[X:%.*]], i32 [[TMP0]]) 196; CHECK-NEXT: ret double [[MUL]] 197; 198entry: 199 %p1 = tail call fast double @llvm.powi.f64.i32(double %x, i32 %y) 200 %p2 = tail call fast double @llvm.powi.f64.i32(double %x, i32 %z) 201 %mul = fmul fast double %p2, %p1 202 ret double %mul 203} 204 205define double @powi_fmul_powi_fast_on_powi(double %x, i32 %y, i32 %z) { 206; CHECK-LABEL: @powi_fmul_powi_fast_on_powi( 207; CHECK-NEXT: entry: 208; CHECK-NEXT: [[P1:%.*]] = tail call fast double @llvm.powi.f64.i32(double [[X:%.*]], i32 [[Y:%.*]]) 209; CHECK-NEXT: [[P2:%.*]] = tail call fast double @llvm.powi.f64.i32(double [[X]], i32 [[Z:%.*]]) 210; CHECK-NEXT: [[MUL:%.*]] = fmul double [[P2]], [[P1]] 211; CHECK-NEXT: ret double [[MUL]] 212; 213entry: 214 %p1 = tail call fast double @llvm.powi.f64.i32(double %x, i32 %y) 215 %p2 = tail call fast double @llvm.powi.f64.i32(double %x, i32 %z) 216 %mul = fmul double %p2, %p1 217 ret double %mul 218} 219 220define double @powi_fmul_powi_same_power(double %x, i32 %y, i32 %z) { 221; CHECK-LABEL: @powi_fmul_powi_same_power( 222; CHECK-NEXT: entry: 223; CHECK-NEXT: [[TMP0:%.*]] = shl i32 [[Y:%.*]], 1 224; CHECK-NEXT: [[MUL:%.*]] = call reassoc double @llvm.powi.f64.i32(double [[X:%.*]], i32 [[TMP0]]) 225; CHECK-NEXT: ret double [[MUL]] 226; 227entry: 228 %p1 = tail call reassoc double @llvm.powi.f64.i32(double %x, i32 %y) 229 %p2 = tail call reassoc double @llvm.powi.f64.i32(double %x, i32 %y) 230 %mul = fmul reassoc double %p2, %p1 231 ret double %mul 232} 233 234define double @powi_fmul_powi_different_integer_types(double %x, i32 %y, i16 %z) { 235; CHECK-LABEL: @powi_fmul_powi_different_integer_types( 236; CHECK-NEXT: entry: 237; CHECK-NEXT: [[P1:%.*]] = tail call reassoc double @llvm.powi.f64.i32(double [[X:%.*]], i32 [[Y:%.*]]) 238; CHECK-NEXT: [[P2:%.*]] = tail call reassoc double @llvm.powi.f64.i16(double [[X]], i16 [[Z:%.*]]) 239; CHECK-NEXT: [[MUL:%.*]] = fmul reassoc double [[P2]], [[P1]] 240; CHECK-NEXT: ret double [[MUL]] 241; 242entry: 243 %p1 = tail call reassoc double @llvm.powi.f64.i32(double %x, i32 %y) 244 %p2 = tail call reassoc double @llvm.powi.f64.i16(double %x, i16 %z) 245 %mul = fmul reassoc double %p2, %p1 246 ret double %mul 247} 248 249define double @powi_fmul_powi_use_first(double %x, i32 %y, i32 %z) { 250; CHECK-LABEL: @powi_fmul_powi_use_first( 251; CHECK-NEXT: entry: 252; CHECK-NEXT: [[P1:%.*]] = tail call reassoc double @llvm.powi.f64.i32(double [[X:%.*]], i32 [[Y:%.*]]) 253; CHECK-NEXT: tail call void @use(double [[P1]]) 254; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[Y]], [[Z:%.*]] 255; CHECK-NEXT: [[MUL:%.*]] = call reassoc double @llvm.powi.f64.i32(double [[X]], i32 [[TMP0]]) 256; CHECK-NEXT: ret double [[MUL]] 257; 258entry: 259 %p1 = tail call reassoc double @llvm.powi.f64.i32(double %x, i32 %y) 260 tail call void @use(double %p1) 261 %p2 = tail call reassoc double @llvm.powi.f64.i32(double %x, i32 %z) 262 %mul = fmul reassoc double %p1, %p2 263 ret double %mul 264} 265 266define double @powi_fmul_powi_use_second(double %x, i32 %y, i32 %z) { 267; CHECK-LABEL: @powi_fmul_powi_use_second( 268; CHECK-NEXT: entry: 269; CHECK-NEXT: [[P1:%.*]] = tail call reassoc double @llvm.powi.f64.i32(double [[X:%.*]], i32 [[Z:%.*]]) 270; CHECK-NEXT: tail call void @use(double [[P1]]) 271; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[Y:%.*]], [[Z]] 272; CHECK-NEXT: [[MUL:%.*]] = call reassoc double @llvm.powi.f64.i32(double [[X]], i32 [[TMP0]]) 273; CHECK-NEXT: ret double [[MUL]] 274; 275entry: 276 %p1 = tail call reassoc double @llvm.powi.f64.i32(double %x, i32 %z) 277 tail call void @use(double %p1) 278 %p2 = tail call reassoc double @llvm.powi.f64.i32(double %x, i32 %y) 279 %mul = fmul reassoc double %p2, %p1 280 ret double %mul 281} 282 283define double @powi_fmul_different_base(double %x, double %m, i32 %y, i32 %z) { 284; CHECK-LABEL: @powi_fmul_different_base( 285; CHECK-NEXT: entry: 286; CHECK-NEXT: [[P1:%.*]] = tail call double @llvm.powi.f64.i32(double [[X:%.*]], i32 [[Y:%.*]]) 287; CHECK-NEXT: [[P2:%.*]] = tail call double @llvm.powi.f64.i32(double [[M:%.*]], i32 [[Z:%.*]]) 288; CHECK-NEXT: [[MUL:%.*]] = fmul reassoc double [[P2]], [[P1]] 289; CHECK-NEXT: ret double [[MUL]] 290; 291entry: 292 %p1 = tail call double @llvm.powi.f64.i32(double %x, i32 %y) 293 %p2 = tail call double @llvm.powi.f64.i32(double %m, i32 %z) 294 %mul = fmul reassoc double %p2, %p1 295 ret double %mul 296} 297 298define double @different_types_powi(double %x, i32 %y, i64 %z) { 299; CHECK-LABEL: @different_types_powi( 300; CHECK-NEXT: [[P1:%.*]] = tail call double @llvm.powi.f64.i32(double [[X:%.*]], i32 [[Y:%.*]]) 301; CHECK-NEXT: [[P2:%.*]] = tail call double @llvm.powi.f64.i64(double [[X]], i64 [[Z:%.*]]) 302; CHECK-NEXT: [[MUL:%.*]] = fmul reassoc double [[P2]], [[P1]] 303; CHECK-NEXT: ret double [[MUL]] 304; 305 %p1 = tail call double @llvm.powi.f64.i32(double %x, i32 %y) 306 %p2 = tail call double @llvm.powi.f64.i64(double %x, i64 %z) 307 %mul = fmul reassoc double %p2, %p1 308 ret double %mul 309} 310 311define double @fdiv_pow_powi(double %x) { 312; CHECK-LABEL: @fdiv_pow_powi( 313; CHECK-NEXT: [[DIV:%.*]] = fmul reassoc nnan double [[X:%.*]], [[X]] 314; CHECK-NEXT: ret double [[DIV]] 315; 316 %p1 = call reassoc double @llvm.powi.f64.i32(double %x, i32 3) 317 %div = fdiv reassoc nnan double %p1, %x 318 ret double %div 319} 320 321define float @fdiv_powf_powi(float %x) { 322; CHECK-LABEL: @fdiv_powf_powi( 323; CHECK-NEXT: [[DIV:%.*]] = call reassoc nnan float @llvm.powi.f32.i32(float [[X:%.*]], i32 99) 324; CHECK-NEXT: ret float [[DIV]] 325; 326 %p1 = call reassoc float @llvm.powi.f32.i32(float %x, i32 100) 327 %div = fdiv reassoc nnan float %p1, %x 328 ret float %div 329} 330 331; TODO: Multi-use may be also better off creating Powi(x,y-1) then creating 332; (mul, Powi(x,y-1),x) to replace the Powi(x,y). 333define double @fdiv_pow_powi_multi_use(double %x) { 334; CHECK-LABEL: @fdiv_pow_powi_multi_use( 335; CHECK-NEXT: [[P1:%.*]] = call double @llvm.powi.f64.i32(double [[X:%.*]], i32 3) 336; CHECK-NEXT: [[DIV:%.*]] = fdiv reassoc nnan double [[P1]], [[X]] 337; CHECK-NEXT: tail call void @use(double [[P1]]) 338; CHECK-NEXT: ret double [[DIV]] 339; 340 %p1 = call double @llvm.powi.f64.i32(double %x, i32 3) 341 %div = fdiv reassoc nnan double %p1, %x 342 tail call void @use(double %p1) 343 ret double %div 344} 345 346; Negative test: Miss part of the fmf flag for the fdiv instruction 347define float @fdiv_powf_powi_missing_reassoc(float %x) { 348; CHECK-LABEL: @fdiv_powf_powi_missing_reassoc( 349; CHECK-NEXT: [[P1:%.*]] = call float @llvm.powi.f32.i32(float [[X:%.*]], i32 100) 350; CHECK-NEXT: [[DIV:%.*]] = fdiv reassoc nnan float [[P1]], [[X]] 351; CHECK-NEXT: ret float [[DIV]] 352; 353 %p1 = call float @llvm.powi.f32.i32(float %x, i32 100) 354 %div = fdiv reassoc nnan float %p1, %x 355 ret float %div 356} 357 358define float @fdiv_powf_powi_missing_reassoc1(float %x) { 359; CHECK-LABEL: @fdiv_powf_powi_missing_reassoc1( 360; CHECK-NEXT: [[P1:%.*]] = call reassoc float @llvm.powi.f32.i32(float [[X:%.*]], i32 100) 361; CHECK-NEXT: [[DIV:%.*]] = fdiv nnan float [[P1]], [[X]] 362; CHECK-NEXT: ret float [[DIV]] 363; 364 %p1 = call reassoc float @llvm.powi.f32.i32(float %x, i32 100) 365 %div = fdiv nnan float %p1, %x 366 ret float %div 367} 368 369define float @fdiv_powf_powi_missing_nnan(float %x) { 370; CHECK-LABEL: @fdiv_powf_powi_missing_nnan( 371; CHECK-NEXT: [[P1:%.*]] = call float @llvm.powi.f32.i32(float [[X:%.*]], i32 100) 372; CHECK-NEXT: [[DIV:%.*]] = fdiv reassoc float [[P1]], [[X]] 373; CHECK-NEXT: ret float [[DIV]] 374; 375 %p1 = call float @llvm.powi.f32.i32(float %x, i32 100) 376 %div = fdiv reassoc float %p1, %x 377 ret float %div 378} 379 380; Negative test: Illegal because (Y - 1) wraparound 381define double @fdiv_pow_powi_negative(double %x) { 382; CHECK-LABEL: @fdiv_pow_powi_negative( 383; CHECK-NEXT: [[P1:%.*]] = call double @llvm.powi.f64.i32(double [[X:%.*]], i32 -2147483648) 384; CHECK-NEXT: [[DIV:%.*]] = fdiv reassoc nnan double [[P1]], [[X]] 385; CHECK-NEXT: ret double [[DIV]] 386; 387 %p1 = call double @llvm.powi.f64.i32(double %x, i32 -2147483648) ; INT_MIN 388 %div = fdiv reassoc nnan double %p1, %x 389 ret double %div 390} 391 392; Negative test: The 2nd powi argument is a variable 393define double @fdiv_pow_powi_negative_variable(double %x, i32 %y) { 394; CHECK-LABEL: @fdiv_pow_powi_negative_variable( 395; CHECK-NEXT: [[P1:%.*]] = call reassoc double @llvm.powi.f64.i32(double [[X:%.*]], i32 [[Y:%.*]]) 396; CHECK-NEXT: [[DIV:%.*]] = fdiv reassoc nnan double [[P1]], [[X]] 397; CHECK-NEXT: ret double [[DIV]] 398; 399 %p1 = call reassoc double @llvm.powi.f64.i32(double %x, i32 %y) 400 %div = fdiv reassoc nnan double %p1, %x 401 ret double %div 402} 403 404; powi(X,C1)/ (X * Z) --> powi(X,C1 - 1)/ Z 405define double @fdiv_fmul_powi(double %a, double %z) { 406; CHECK-LABEL: @fdiv_fmul_powi( 407; CHECK-NEXT: [[TMP1:%.*]] = call reassoc nnan double @llvm.powi.f64.i32(double [[A:%.*]], i32 4) 408; CHECK-NEXT: [[DIV:%.*]] = fdiv reassoc nnan double [[TMP1]], [[Z:%.*]] 409; CHECK-NEXT: ret double [[DIV]] 410; 411 %pow = call reassoc double @llvm.powi.f64.i32(double %a, i32 5) 412 %square = fmul reassoc double %z, %a 413 %div = fdiv reassoc nnan double %pow, %square 414 ret double %div 415} 416 417; powi(X, 5)/ (X * X) --> powi(X, 4)/ X -> powi(X, 3) 418define double @fdiv_fmul_powi_2(double %a) { 419; CHECK-LABEL: @fdiv_fmul_powi_2( 420; CHECK-NEXT: [[DIV:%.*]] = call reassoc nnan double @llvm.powi.f64.i32(double [[A:%.*]], i32 3) 421; CHECK-NEXT: ret double [[DIV]] 422; 423 %pow = call reassoc double @llvm.powi.f64.i32(double %a, i32 5) 424 %square = fmul reassoc double %a, %a 425 %div = fdiv reassoc nnan double %pow, %square 426 ret double %div 427} 428 429define <2 x float> @fdiv_fmul_powi_vector(<2 x float> %a) { 430; CHECK-LABEL: @fdiv_fmul_powi_vector( 431; CHECK-NEXT: [[DIV:%.*]] = call reassoc nnan <2 x float> @llvm.powi.v2f32.i32(<2 x float> [[A:%.*]], i32 3) 432; CHECK-NEXT: ret <2 x float> [[DIV]] 433; 434 %pow = call reassoc <2 x float> @llvm.powi.v2f32.i32(<2 x float> %a, i32 5) 435 %square = fmul reassoc <2 x float> %a, %a 436 %div = fdiv reassoc nnan <2 x float> %pow, %square 437 ret <2 x float> %div 438} 439 440; Negative test 441define double @fdiv_fmul_powi_missing_reassoc1(double %a) { 442; CHECK-LABEL: @fdiv_fmul_powi_missing_reassoc1( 443; CHECK-NEXT: [[POW:%.*]] = call reassoc double @llvm.powi.f64.i32(double [[A:%.*]], i32 5) 444; CHECK-NEXT: [[SQUARE:%.*]] = fmul reassoc double [[A]], [[A]] 445; CHECK-NEXT: [[DIV:%.*]] = fdiv nnan double [[POW]], [[SQUARE]] 446; CHECK-NEXT: ret double [[DIV]] 447; 448 %pow = call reassoc double @llvm.powi.f64.i32(double %a, i32 5) 449 %square = fmul reassoc double %a, %a 450 %div = fdiv nnan double %pow, %square 451 ret double %div 452} 453 454define double @fdiv_fmul_powi_missing_reassoc2(double %a) { 455; CHECK-LABEL: @fdiv_fmul_powi_missing_reassoc2( 456; CHECK-NEXT: [[POW:%.*]] = call reassoc double @llvm.powi.f64.i32(double [[A:%.*]], i32 5) 457; CHECK-NEXT: [[SQUARE:%.*]] = fmul double [[A]], [[A]] 458; CHECK-NEXT: [[DIV:%.*]] = fdiv reassoc nnan double [[POW]], [[SQUARE]] 459; CHECK-NEXT: ret double [[DIV]] 460; 461 %pow = call reassoc double @llvm.powi.f64.i32(double %a, i32 5) 462 %square = fmul double %a, %a 463 %div = fdiv reassoc nnan double %pow, %square 464 ret double %div 465} 466 467define double @fdiv_fmul_powi_missing_reassoc3(double %a) { 468; CHECK-LABEL: @fdiv_fmul_powi_missing_reassoc3( 469; CHECK-NEXT: [[POW:%.*]] = call double @llvm.powi.f64.i32(double [[A:%.*]], i32 5) 470; CHECK-NEXT: [[SQUARE:%.*]] = fmul reassoc double [[A]], [[A]] 471; CHECK-NEXT: [[DIV:%.*]] = fdiv reassoc nnan double [[POW]], [[SQUARE]] 472; CHECK-NEXT: ret double [[DIV]] 473; 474 %pow = call double @llvm.powi.f64.i32(double %a, i32 5) 475 %square = fmul reassoc double %a, %a 476 %div = fdiv reassoc nnan double %pow, %square 477 ret double %div 478} 479 480define double @fdiv_fmul_powi_missing_nnan(double %a) { 481; CHECK-LABEL: @fdiv_fmul_powi_missing_nnan( 482; CHECK-NEXT: [[POW:%.*]] = call reassoc double @llvm.powi.f64.i32(double [[A:%.*]], i32 5) 483; CHECK-NEXT: [[SQUARE:%.*]] = fmul reassoc double [[A]], [[A]] 484; CHECK-NEXT: [[DIV:%.*]] = fdiv reassoc double [[POW]], [[SQUARE]] 485; CHECK-NEXT: ret double [[DIV]] 486; 487 %pow = call reassoc double @llvm.powi.f64.i32(double %a, i32 5) 488 %square = fmul reassoc double %a, %a 489 %div = fdiv reassoc double %pow, %square 490 ret double %div 491} 492 493define double @fdiv_fmul_powi_negative_wrap(double noundef %x) { 494; CHECK-LABEL: @fdiv_fmul_powi_negative_wrap( 495; CHECK-NEXT: [[P1:%.*]] = tail call double @llvm.powi.f64.i32(double [[X:%.*]], i32 -2147483648) 496; CHECK-NEXT: [[MUL:%.*]] = fmul reassoc double [[P1]], [[X]] 497; CHECK-NEXT: ret double [[MUL]] 498; 499 %p1 = tail call double @llvm.powi.f64.i32(double %x, i32 -2147483648) ; INT_MIN 500 %mul = fmul reassoc double %p1, %x 501 ret double %mul 502} 503 504define double @fdiv_fmul_powi_multi_use(double %a) { 505; CHECK-LABEL: @fdiv_fmul_powi_multi_use( 506; CHECK-NEXT: [[POW:%.*]] = call reassoc double @llvm.powi.f64.i32(double [[A:%.*]], i32 5) 507; CHECK-NEXT: tail call void @use(double [[POW]]) 508; CHECK-NEXT: [[SQUARE:%.*]] = fmul reassoc double [[A]], [[A]] 509; CHECK-NEXT: [[DIV:%.*]] = fdiv reassoc nnan double [[POW]], [[SQUARE]] 510; CHECK-NEXT: ret double [[DIV]] 511; 512 %pow = call reassoc double @llvm.powi.f64.i32(double %a, i32 5) 513 tail call void @use(double %pow) 514 %square = fmul reassoc double %a, %a 515 %div = fdiv reassoc nnan double %pow, %square 516 ret double %div 517} 518 519; powi(X, Y) * X --> powi(X, Y+1) 520define double @powi_fmul_powi_x(double noundef %x) { 521; CHECK-LABEL: @powi_fmul_powi_x( 522; CHECK-NEXT: [[MUL:%.*]] = call reassoc double @llvm.powi.f64.i32(double [[X:%.*]], i32 4) 523; CHECK-NEXT: ret double [[MUL]] 524; 525 %p1 = tail call reassoc double @llvm.powi.f64.i32(double %x, i32 3) 526 %mul = fmul reassoc double %p1, %x 527 ret double %mul 528} 529 530; Negative test: Multi-use 531define double @powi_fmul_powi_x_multi_use(double noundef %x) { 532; CHECK-LABEL: @powi_fmul_powi_x_multi_use( 533; CHECK-NEXT: [[P1:%.*]] = tail call double @llvm.powi.f64.i32(double [[X:%.*]], i32 3) 534; CHECK-NEXT: tail call void @use(double [[P1]]) 535; CHECK-NEXT: [[MUL:%.*]] = fmul reassoc double [[P1]], [[X]] 536; CHECK-NEXT: ret double [[MUL]] 537; 538 %p1 = tail call double @llvm.powi.f64.i32(double %x, i32 3) 539 tail call void @use(double %p1) 540 %mul = fmul reassoc double %p1, %x 541 ret double %mul 542} 543 544; Negative test: Miss fmf flag 545define double @powi_fmul_powi_x_missing_reassoc(double noundef %x) { 546; CHECK-LABEL: @powi_fmul_powi_x_missing_reassoc( 547; CHECK-NEXT: [[P1:%.*]] = tail call double @llvm.powi.f64.i32(double [[X:%.*]], i32 3) 548; CHECK-NEXT: [[MUL:%.*]] = fmul double [[P1]], [[X]] 549; CHECK-NEXT: ret double [[MUL]] 550; 551 %p1 = tail call double @llvm.powi.f64.i32(double %x, i32 3) 552 %mul = fmul double %p1, %x 553 ret double %mul 554} 555 556; Negative test: overflow 557define double @powi_fmul_powi_x_overflow(double noundef %x) { 558; CHECK-LABEL: @powi_fmul_powi_x_overflow( 559; CHECK-NEXT: [[P1:%.*]] = tail call double @llvm.powi.f64.i32(double [[X:%.*]], i32 2147483647) 560; CHECK-NEXT: [[MUL:%.*]] = fmul reassoc double [[P1]], [[X]] 561; CHECK-NEXT: ret double [[MUL]] 562; 563 %p1 = tail call double @llvm.powi.f64.i32(double %x, i32 2147483647) ; INT_MAX 564 %mul = fmul reassoc double %p1, %x 565 ret double %mul 566} 567