1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py 2 // RUN: %clang_cc1 -triple x86_64-unknown-unknown -emit-llvm -o - %s | FileCheck %s 3 4 // CHECK-LABEL: @add1( 5 // CHECK-NEXT: entry: 6 // CHECK-NEXT: [[A_ADDR:%.*]] = alloca half, align 2 7 // CHECK-NEXT: [[B_ADDR:%.*]] = alloca half, align 2 8 // CHECK-NEXT: store half [[A:%.*]], ptr [[A_ADDR]], align 2 9 // CHECK-NEXT: store half [[B:%.*]], ptr [[B_ADDR]], align 2 10 // CHECK-NEXT: [[TMP0:%.*]] = load half, ptr [[A_ADDR]], align 2 11 // CHECK-NEXT: [[EXT:%.*]] = fpext half [[TMP0]] to float 12 // CHECK-NEXT: [[TMP1:%.*]] = load half, ptr [[B_ADDR]], align 2 13 // CHECK-NEXT: [[EXT1:%.*]] = fpext half [[TMP1]] to float 14 // CHECK-NEXT: [[ADD:%.*]] = fadd float [[EXT]], [[EXT1]] 15 // CHECK-NEXT: [[UNPROMOTION:%.*]] = fptrunc float [[ADD]] to half 16 // CHECK-NEXT: ret half [[UNPROMOTION]] 17 // 18 _Float16 add1(_Float16 a, _Float16 b) { 19 return a + b; 20 } 21 22 // CHECK-LABEL: @add2( 23 // CHECK-NEXT: entry: 24 // CHECK-NEXT: [[A_ADDR:%.*]] = alloca half, align 2 25 // CHECK-NEXT: [[B_ADDR:%.*]] = alloca half, align 2 26 // CHECK-NEXT: [[C_ADDR:%.*]] = alloca half, align 2 27 // CHECK-NEXT: store half [[A:%.*]], ptr [[A_ADDR]], align 2 28 // CHECK-NEXT: store half [[B:%.*]], ptr [[B_ADDR]], align 2 29 // CHECK-NEXT: store half [[C:%.*]], ptr [[C_ADDR]], align 2 30 // CHECK-NEXT: [[TMP0:%.*]] = load half, ptr [[A_ADDR]], align 2 31 // CHECK-NEXT: [[EXT:%.*]] = fpext half [[TMP0]] to float 32 // CHECK-NEXT: [[TMP1:%.*]] = load half, ptr [[B_ADDR]], align 2 33 // CHECK-NEXT: [[EXT1:%.*]] = fpext half [[TMP1]] to float 34 // CHECK-NEXT: [[ADD:%.*]] = fadd float [[EXT]], [[EXT1]] 35 // CHECK-NEXT: [[TMP2:%.*]] = load half, ptr [[C_ADDR]], align 2 36 // CHECK-NEXT: [[EXT2:%.*]] = fpext half [[TMP2]] to float 37 // CHECK-NEXT: [[ADD3:%.*]] = fadd float [[ADD]], [[EXT2]] 38 // CHECK-NEXT: [[UNPROMOTION:%.*]] = fptrunc float [[ADD3]] to half 39 // CHECK-NEXT: ret half [[UNPROMOTION]] 40 // 41 _Float16 add2(_Float16 a, _Float16 b, _Float16 c) { 42 return a + b + c; 43 } 44 45 // CHECK-LABEL: @div( 46 // CHECK-NEXT: entry: 47 // CHECK-NEXT: [[A_ADDR:%.*]] = alloca half, align 2 48 // CHECK-NEXT: [[B_ADDR:%.*]] = alloca half, align 2 49 // CHECK-NEXT: store half [[A:%.*]], ptr [[A_ADDR]], align 2 50 // CHECK-NEXT: store half [[B:%.*]], ptr [[B_ADDR]], align 2 51 // CHECK-NEXT: [[TMP0:%.*]] = load half, ptr [[A_ADDR]], align 2 52 // CHECK-NEXT: [[EXT:%.*]] = fpext half [[TMP0]] to float 53 // CHECK-NEXT: [[TMP1:%.*]] = load half, ptr [[B_ADDR]], align 2 54 // CHECK-NEXT: [[EXT1:%.*]] = fpext half [[TMP1]] to float 55 // CHECK-NEXT: [[DIV:%.*]] = fdiv float [[EXT]], [[EXT1]] 56 // CHECK-NEXT: [[UNPROMOTION:%.*]] = fptrunc float [[DIV]] to half 57 // CHECK-NEXT: ret half [[UNPROMOTION]] 58 // 59 _Float16 div(_Float16 a, _Float16 b) { 60 return a / b; 61 } 62 63 // CHECK-LABEL: @mul( 64 // CHECK-NEXT: entry: 65 // CHECK-NEXT: [[A_ADDR:%.*]] = alloca half, align 2 66 // CHECK-NEXT: [[B_ADDR:%.*]] = alloca half, align 2 67 // CHECK-NEXT: store half [[A:%.*]], ptr [[A_ADDR]], align 2 68 // CHECK-NEXT: store half [[B:%.*]], ptr [[B_ADDR]], align 2 69 // CHECK-NEXT: [[TMP0:%.*]] = load half, ptr [[A_ADDR]], align 2 70 // CHECK-NEXT: [[EXT:%.*]] = fpext half [[TMP0]] to float 71 // CHECK-NEXT: [[TMP1:%.*]] = load half, ptr [[B_ADDR]], align 2 72 // CHECK-NEXT: [[EXT1:%.*]] = fpext half [[TMP1]] to float 73 // CHECK-NEXT: [[MUL:%.*]] = fmul float [[EXT]], [[EXT1]] 74 // CHECK-NEXT: [[UNPROMOTION:%.*]] = fptrunc float [[MUL]] to half 75 // CHECK-NEXT: ret half [[UNPROMOTION]] 76 // 77 _Float16 mul(_Float16 a, _Float16 b) { 78 return a * b; 79 } 80 81 // CHECK-LABEL: @add_and_mul1( 82 // CHECK-NEXT: entry: 83 // CHECK-NEXT: [[A_ADDR:%.*]] = alloca half, align 2 84 // CHECK-NEXT: [[B_ADDR:%.*]] = alloca half, align 2 85 // CHECK-NEXT: [[C_ADDR:%.*]] = alloca half, align 2 86 // CHECK-NEXT: [[D_ADDR:%.*]] = alloca half, align 2 87 // CHECK-NEXT: store half [[A:%.*]], ptr [[A_ADDR]], align 2 88 // CHECK-NEXT: store half [[B:%.*]], ptr [[B_ADDR]], align 2 89 // CHECK-NEXT: store half [[C:%.*]], ptr [[C_ADDR]], align 2 90 // CHECK-NEXT: store half [[D:%.*]], ptr [[D_ADDR]], align 2 91 // CHECK-NEXT: [[TMP0:%.*]] = load half, ptr [[A_ADDR]], align 2 92 // CHECK-NEXT: [[EXT:%.*]] = fpext half [[TMP0]] to float 93 // CHECK-NEXT: [[TMP1:%.*]] = load half, ptr [[B_ADDR]], align 2 94 // CHECK-NEXT: [[EXT1:%.*]] = fpext half [[TMP1]] to float 95 // CHECK-NEXT: [[MUL:%.*]] = fmul float [[EXT]], [[EXT1]] 96 // CHECK-NEXT: [[TMP2:%.*]] = load half, ptr [[C_ADDR]], align 2 97 // CHECK-NEXT: [[EXT2:%.*]] = fpext half [[TMP2]] to float 98 // CHECK-NEXT: [[TMP3:%.*]] = load half, ptr [[D_ADDR]], align 2 99 // CHECK-NEXT: [[EXT3:%.*]] = fpext half [[TMP3]] to float 100 // CHECK-NEXT: [[MUL4:%.*]] = fmul float [[EXT2]], [[EXT3]] 101 // CHECK-NEXT: [[ADD:%.*]] = fadd float [[MUL]], [[MUL4]] 102 // CHECK-NEXT: [[UNPROMOTION:%.*]] = fptrunc float [[ADD]] to half 103 // CHECK-NEXT: ret half [[UNPROMOTION]] 104 // 105 _Float16 add_and_mul1(_Float16 a, _Float16 b, _Float16 c, _Float16 d) { 106 return a * b + c * d; 107 } 108 109 // CHECK-LABEL: @add_and_mul2( 110 // CHECK-NEXT: entry: 111 // CHECK-NEXT: [[A_ADDR:%.*]] = alloca half, align 2 112 // CHECK-NEXT: [[B_ADDR:%.*]] = alloca half, align 2 113 // CHECK-NEXT: [[C_ADDR:%.*]] = alloca half, align 2 114 // CHECK-NEXT: store half [[A:%.*]], ptr [[A_ADDR]], align 2 115 // CHECK-NEXT: store half [[B:%.*]], ptr [[B_ADDR]], align 2 116 // CHECK-NEXT: store half [[C:%.*]], ptr [[C_ADDR]], align 2 117 // CHECK-NEXT: [[TMP0:%.*]] = load half, ptr [[A_ADDR]], align 2 118 // CHECK-NEXT: [[EXT:%.*]] = fpext half [[TMP0]] to float 119 // CHECK-NEXT: [[TMP1:%.*]] = load half, ptr [[B_ADDR]], align 2 120 // CHECK-NEXT: [[EXT1:%.*]] = fpext half [[TMP1]] to float 121 // CHECK-NEXT: [[MUL:%.*]] = fmul float 6.000000e+00, [[EXT1]] 122 // CHECK-NEXT: [[SUB:%.*]] = fsub float [[EXT]], [[MUL]] 123 // CHECK-NEXT: [[TMP2:%.*]] = load half, ptr [[C_ADDR]], align 2 124 // CHECK-NEXT: [[EXT2:%.*]] = fpext half [[TMP2]] to float 125 // CHECK-NEXT: [[ADD:%.*]] = fadd float [[SUB]], [[EXT2]] 126 // CHECK-NEXT: [[UNPROMOTION:%.*]] = fptrunc float [[ADD]] to half 127 // CHECK-NEXT: ret half [[UNPROMOTION]] 128 // 129 _Float16 add_and_mul2(_Float16 a, _Float16 b, _Float16 c) { 130 return (a - 6 * b) + c; 131 } 132 133 // CHECK-LABEL: @addcompound( 134 // CHECK-NEXT: entry: 135 // CHECK-NEXT: [[A_ADDR:%.*]] = alloca half, align 2 136 // CHECK-NEXT: [[C_ADDR:%.*]] = alloca half, align 2 137 // CHECK-NEXT: store half [[A:%.*]], ptr [[A_ADDR]], align 2 138 // CHECK-NEXT: store half [[C:%.*]], ptr [[C_ADDR]], align 2 139 // CHECK-NEXT: [[TMP0:%.*]] = load half, ptr [[A_ADDR]], align 2 140 // CHECK-NEXT: [[EXT:%.*]] = fpext half [[TMP0]] to float 141 // CHECK-NEXT: [[TMP1:%.*]] = load half, ptr [[C_ADDR]], align 2 142 // CHECK-NEXT: [[CONV:%.*]] = fpext half [[TMP1]] to float 143 // CHECK-NEXT: [[ADD:%.*]] = fadd float [[CONV]], [[EXT]] 144 // CHECK-NEXT: [[CONV1:%.*]] = fptrunc float [[ADD]] to half 145 // CHECK-NEXT: store half [[CONV1]], ptr [[C_ADDR]], align 2 146 // CHECK-NEXT: [[TMP2:%.*]] = load half, ptr [[C_ADDR]], align 2 147 // CHECK-NEXT: ret half [[TMP2]] 148 // 149 _Float16 addcompound(_Float16 a, _Float16 c) { 150 c += a; 151 return c; 152 } 153 154 // CHECK-LABEL: @mulcompound_int_float16( 155 // CHECK-NEXT: entry: 156 // CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 157 // CHECK-NEXT: [[C_ADDR:%.*]] = alloca half, align 2 158 // CHECK-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4 159 // CHECK-NEXT: store half [[C:%.*]], ptr [[C_ADDR]], align 2 160 // CHECK-NEXT: [[TMP0:%.*]] = load half, ptr [[C_ADDR]], align 2 161 // CHECK-NEXT: [[EXT:%.*]] = fpext half [[TMP0]] to float 162 // CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 163 // CHECK-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP1]] to float 164 // CHECK-NEXT: [[MUL:%.*]] = fmul float [[CONV]], [[EXT]] 165 // CHECK-NEXT: [[CONV1:%.*]] = fptosi float [[MUL]] to i32 166 // CHECK-NEXT: store i32 [[CONV1]], ptr [[A_ADDR]], align 4 167 // CHECK-NEXT: [[TMP2:%.*]] = load half, ptr [[C_ADDR]], align 2 168 // CHECK-NEXT: ret half [[TMP2]] 169 // 170 _Float16 mulcompound_int_float16(int a, _Float16 c) { 171 a *= c; 172 return c; 173 } 174 175 // CHECK-LABEL: @mulcompound_float_float16c( 176 // CHECK-NEXT: entry: 177 // CHECK-NEXT: [[C:%.*]] = alloca { half, half }, align 2 178 // CHECK-NEXT: [[A_ADDR:%.*]] = alloca float, align 4 179 // CHECK-NEXT: store <2 x half> [[C_COERCE:%.*]], ptr [[C]], align 2 180 // CHECK-NEXT: store float [[A:%.*]], ptr [[A_ADDR]], align 4 181 // CHECK-NEXT: [[C_REALP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[C]], i32 0, i32 0 182 // CHECK-NEXT: [[C_REAL:%.*]] = load half, ptr [[C_REALP]], align 2 183 // CHECK-NEXT: [[C_IMAGP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[C]], i32 0, i32 1 184 // CHECK-NEXT: [[C_IMAG:%.*]] = load half, ptr [[C_IMAGP]], align 2 185 // CHECK-NEXT: [[CONV:%.*]] = fpext half [[C_REAL]] to float 186 // CHECK-NEXT: [[CONV1:%.*]] = fpext half [[C_IMAG]] to float 187 // CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[A_ADDR]], align 4 188 // CHECK-NEXT: [[MUL_RL:%.*]] = fmul float [[TMP0]], [[CONV]] 189 // CHECK-NEXT: [[MUL_IR:%.*]] = fmul float [[TMP0]], [[CONV1]] 190 // CHECK-NEXT: store float [[MUL_RL]], ptr [[A_ADDR]], align 4 191 // CHECK-NEXT: [[C_REALP2:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[C]], i32 0, i32 0 192 // CHECK-NEXT: [[C_REAL3:%.*]] = load half, ptr [[C_REALP2]], align 2 193 // CHECK-NEXT: ret half [[C_REAL3]] 194 // 195 _Float16 mulcompound_float_float16c(float a, _Float16 _Complex c) { 196 a *= c; 197 return c; 198 } 199 200 // CHECK-LABEL: @RealOp( 201 // CHECK-NEXT: entry: 202 // CHECK-NEXT: [[A_ADDR:%.*]] = alloca half, align 2 203 // CHECK-NEXT: store half [[A:%.*]], ptr [[A_ADDR]], align 2 204 // CHECK-NEXT: [[TMP0:%.*]] = load half, ptr [[A_ADDR]], align 2 205 // CHECK-NEXT: [[EXT:%.*]] = fpext half [[TMP0]] to float 206 // CHECK-NEXT: [[UNPROMOTION:%.*]] = fptrunc float [[EXT]] to half 207 // CHECK-NEXT: ret half [[UNPROMOTION]] 208 // 209 _Float16 RealOp(_Float16 a) { 210 return __real a; 211 } 212 213 // CHECK-LABEL: @RealOp_c( 214 // CHECK-NEXT: entry: 215 // CHECK-NEXT: [[A:%.*]] = alloca { half, half }, align 2 216 // CHECK-NEXT: store <2 x half> [[A_COERCE:%.*]], ptr [[A]], align 2 217 // CHECK-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[A]], i32 0, i32 0 218 // CHECK-NEXT: [[A_REAL:%.*]] = load half, ptr [[A_REALP]], align 2 219 // CHECK-NEXT: [[EXT:%.*]] = fpext half [[A_REAL]] to float 220 // CHECK-NEXT: [[UNPROMOTION:%.*]] = fptrunc float [[EXT]] to half 221 // CHECK-NEXT: ret half [[UNPROMOTION]] 222 // 223 _Float16 RealOp_c(_Float16 _Complex a) { 224 return __real a; 225 } 226 227 // CHECK-LABEL: @ImagOp( 228 // CHECK-NEXT: entry: 229 // CHECK-NEXT: [[A_ADDR:%.*]] = alloca half, align 2 230 // CHECK-NEXT: store half [[A:%.*]], ptr [[A_ADDR]], align 2 231 // CHECK-NEXT: [[TMP0:%.*]] = load half, ptr [[A_ADDR]], align 2 232 // CHECK-NEXT: [[EXT:%.*]] = fpext half [[TMP0]] to float 233 // CHECK-NEXT: ret half 0xH0000 234 // 235 _Float16 ImagOp(_Float16 a) { 236 return __imag a; 237 } 238 239 // CHECK-LABEL: @ImagOp_c( 240 // CHECK-NEXT: entry: 241 // CHECK-NEXT: [[A:%.*]] = alloca { half, half }, align 2 242 // CHECK-NEXT: store <2 x half> [[A_COERCE:%.*]], ptr [[A]], align 2 243 // CHECK-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[A]], i32 0, i32 1 244 // CHECK-NEXT: [[A_IMAG:%.*]] = load half, ptr [[A_IMAGP]], align 2 245 // CHECK-NEXT: [[EXT:%.*]] = fpext half [[A_IMAG]] to float 246 // CHECK-NEXT: [[UNPROMOTION:%.*]] = fptrunc float [[EXT]] to half 247 // CHECK-NEXT: ret half [[UNPROMOTION]] 248 // 249 _Float16 ImagOp_c(_Float16 _Complex a) { 250 return __imag a; 251 } 252 253 // CHECK-LABEL: @MinusOp_r( 254 // CHECK-NEXT: entry: 255 // CHECK-NEXT: [[A_ADDR:%.*]] = alloca half, align 2 256 // CHECK-NEXT: store half [[A:%.*]], ptr [[A_ADDR]], align 2 257 // CHECK-NEXT: [[TMP0:%.*]] = load half, ptr [[A_ADDR]], align 2 258 // CHECK-NEXT: [[EXT:%.*]] = fpext half [[TMP0]] to float 259 // CHECK-NEXT: [[FNEG:%.*]] = fneg float [[EXT]] 260 // CHECK-NEXT: [[UNPROMOTION:%.*]] = fptrunc float [[FNEG]] to half 261 // CHECK-NEXT: ret half [[UNPROMOTION]] 262 // 263 _Float16 MinusOp_r(_Float16 a) { 264 return -a; 265 } 266 267 // CHECK-LABEL: @MinusOp_c( 268 // CHECK-NEXT: entry: 269 // CHECK-NEXT: [[A:%.*]] = alloca { half, half }, align 2 270 // CHECK-NEXT: store <2 x half> [[A_COERCE:%.*]], ptr [[A]], align 2 271 // CHECK-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[A]], i32 0, i32 0 272 // CHECK-NEXT: [[A_REAL:%.*]] = load half, ptr [[A_REALP]], align 2 273 // CHECK-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[A]], i32 0, i32 1 274 // CHECK-NEXT: [[A_IMAG:%.*]] = load half, ptr [[A_IMAGP]], align 2 275 // CHECK-NEXT: [[EXT:%.*]] = fpext half [[A_REAL]] to float 276 // CHECK-NEXT: [[EXT1:%.*]] = fpext half [[A_IMAG]] to float 277 // CHECK-NEXT: [[NEG_R:%.*]] = fneg float [[EXT]] 278 // CHECK-NEXT: [[NEG_I:%.*]] = fneg float [[EXT1]] 279 // CHECK-NEXT: [[UNPROMOTION:%.*]] = fptrunc float [[NEG_R]] to half 280 // CHECK-NEXT: [[UNPROMOTION2:%.*]] = fptrunc float [[NEG_I]] to half 281 // CHECK-NEXT: ret half [[UNPROMOTION]] 282 // 283 _Float16 MinusOp_c(_Float16 _Complex a) { 284 return -a; 285 } 286 287 // CHECK-LABEL: @PlusOp_r( 288 // CHECK-NEXT: entry: 289 // CHECK-NEXT: [[A_ADDR:%.*]] = alloca half, align 2 290 // CHECK-NEXT: store half [[A:%.*]], ptr [[A_ADDR]], align 2 291 // CHECK-NEXT: [[TMP0:%.*]] = load half, ptr [[A_ADDR]], align 2 292 // CHECK-NEXT: [[EXT:%.*]] = fpext half [[TMP0]] to float 293 // CHECK-NEXT: [[UNPROMOTION:%.*]] = fptrunc float [[EXT]] to half 294 // CHECK-NEXT: ret half [[UNPROMOTION]] 295 // 296 _Float16 PlusOp_r(_Float16 a) { 297 return +a; 298 } 299 300 // CHECK-LABEL: @PlusOp_c( 301 // CHECK-NEXT: entry: 302 // CHECK-NEXT: [[A:%.*]] = alloca { half, half }, align 2 303 // CHECK-NEXT: store <2 x half> [[A_COERCE:%.*]], ptr [[A]], align 2 304 // CHECK-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[A]], i32 0, i32 0 305 // CHECK-NEXT: [[A_REAL:%.*]] = load half, ptr [[A_REALP]], align 2 306 // CHECK-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[A]], i32 0, i32 1 307 // CHECK-NEXT: [[A_IMAG:%.*]] = load half, ptr [[A_IMAGP]], align 2 308 // CHECK-NEXT: [[EXT:%.*]] = fpext half [[A_REAL]] to float 309 // CHECK-NEXT: [[EXT1:%.*]] = fpext half [[A_IMAG]] to float 310 // CHECK-NEXT: [[UNPROMOTION:%.*]] = fptrunc float [[EXT]] to half 311 // CHECK-NEXT: [[UNPROMOTION2:%.*]] = fptrunc float [[EXT1]] to half 312 // CHECK-NEXT: ret half [[UNPROMOTION]] 313 // 314 _Float16 PlusOp_c(_Float16 _Complex a) { 315 return +a; 316 } 317 318 // CHECK-LABEL: @MinusOp_r_r( 319 // CHECK-NEXT: entry: 320 // CHECK-NEXT: [[A_ADDR:%.*]] = alloca half, align 2 321 // CHECK-NEXT: [[C_ADDR:%.*]] = alloca half, align 2 322 // CHECK-NEXT: store half [[A:%.*]], ptr [[A_ADDR]], align 2 323 // CHECK-NEXT: store half [[C:%.*]], ptr [[C_ADDR]], align 2 324 // CHECK-NEXT: [[TMP0:%.*]] = load half, ptr [[A_ADDR]], align 2 325 // CHECK-NEXT: [[EXT:%.*]] = fpext half [[TMP0]] to float 326 // CHECK-NEXT: [[TMP1:%.*]] = load half, ptr [[C_ADDR]], align 2 327 // CHECK-NEXT: [[EXT1:%.*]] = fpext half [[TMP1]] to float 328 // CHECK-NEXT: [[FNEG:%.*]] = fneg float [[EXT1]] 329 // CHECK-NEXT: [[ADD:%.*]] = fadd float [[EXT]], [[FNEG]] 330 // CHECK-NEXT: [[UNPROMOTION:%.*]] = fptrunc float [[ADD]] to half 331 // CHECK-NEXT: ret half [[UNPROMOTION]] 332 // 333 _Float16 MinusOp_r_r(_Float16 a, _Float16 c) { 334 return a + -c; 335 } 336 337 // CHECK-LABEL: @MinusOp_c_r( 338 // CHECK-NEXT: entry: 339 // CHECK-NEXT: [[A:%.*]] = alloca { half, half }, align 2 340 // CHECK-NEXT: [[C_ADDR:%.*]] = alloca half, align 2 341 // CHECK-NEXT: store <2 x half> [[A_COERCE:%.*]], ptr [[A]], align 2 342 // CHECK-NEXT: store half [[C:%.*]], ptr [[C_ADDR]], align 2 343 // CHECK-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[A]], i32 0, i32 0 344 // CHECK-NEXT: [[A_REAL:%.*]] = load half, ptr [[A_REALP]], align 2 345 // CHECK-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[A]], i32 0, i32 1 346 // CHECK-NEXT: [[A_IMAG:%.*]] = load half, ptr [[A_IMAGP]], align 2 347 // CHECK-NEXT: [[EXT:%.*]] = fpext half [[A_REAL]] to float 348 // CHECK-NEXT: [[EXT1:%.*]] = fpext half [[A_IMAG]] to float 349 // CHECK-NEXT: [[TMP0:%.*]] = load half, ptr [[C_ADDR]], align 2 350 // CHECK-NEXT: [[EXT2:%.*]] = fpext half [[TMP0]] to float 351 // CHECK-NEXT: [[FNEG:%.*]] = fneg float [[EXT2]] 352 // CHECK-NEXT: [[ADD_R:%.*]] = fadd float [[EXT]], [[FNEG]] 353 // CHECK-NEXT: [[UNPROMOTION:%.*]] = fptrunc float [[ADD_R]] to half 354 // CHECK-NEXT: [[UNPROMOTION3:%.*]] = fptrunc float [[EXT1]] to half 355 // CHECK-NEXT: ret half [[UNPROMOTION]] 356 // 357 _Float16 MinusOp_c_r(_Float16 _Complex a, _Float16 c) { 358 return a + -c; 359 } 360 361 // CHECK-LABEL: @MinusOp_r_c( 362 // CHECK-NEXT: entry: 363 // CHECK-NEXT: [[C:%.*]] = alloca { half, half }, align 2 364 // CHECK-NEXT: [[A_ADDR:%.*]] = alloca half, align 2 365 // CHECK-NEXT: store <2 x half> [[C_COERCE:%.*]], ptr [[C]], align 2 366 // CHECK-NEXT: store half [[A:%.*]], ptr [[A_ADDR]], align 2 367 // CHECK-NEXT: [[TMP0:%.*]] = load half, ptr [[A_ADDR]], align 2 368 // CHECK-NEXT: [[EXT:%.*]] = fpext half [[TMP0]] to float 369 // CHECK-NEXT: [[C_REALP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[C]], i32 0, i32 0 370 // CHECK-NEXT: [[C_REAL:%.*]] = load half, ptr [[C_REALP]], align 2 371 // CHECK-NEXT: [[C_IMAGP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[C]], i32 0, i32 1 372 // CHECK-NEXT: [[C_IMAG:%.*]] = load half, ptr [[C_IMAGP]], align 2 373 // CHECK-NEXT: [[EXT1:%.*]] = fpext half [[C_REAL]] to float 374 // CHECK-NEXT: [[EXT2:%.*]] = fpext half [[C_IMAG]] to float 375 // CHECK-NEXT: [[NEG_R:%.*]] = fneg float [[EXT1]] 376 // CHECK-NEXT: [[NEG_I:%.*]] = fneg float [[EXT2]] 377 // CHECK-NEXT: [[ADD_R:%.*]] = fadd float [[EXT]], [[NEG_R]] 378 // CHECK-NEXT: [[UNPROMOTION:%.*]] = fptrunc float [[ADD_R]] to half 379 // CHECK-NEXT: [[UNPROMOTION3:%.*]] = fptrunc float [[NEG_I]] to half 380 // CHECK-NEXT: ret half [[UNPROMOTION]] 381 // 382 _Float16 MinusOp_r_c(_Float16 a, _Float16 _Complex c) { 383 return a + -c; 384 } 385 386 // CHECK-LABEL: @MinusOp_c_c( 387 // CHECK-NEXT: entry: 388 // CHECK-NEXT: [[A:%.*]] = alloca { half, half }, align 2 389 // CHECK-NEXT: [[C:%.*]] = alloca { half, half }, align 2 390 // CHECK-NEXT: store <2 x half> [[A_COERCE:%.*]], ptr [[A]], align 2 391 // CHECK-NEXT: store <2 x half> [[C_COERCE:%.*]], ptr [[C]], align 2 392 // CHECK-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[A]], i32 0, i32 0 393 // CHECK-NEXT: [[A_REAL:%.*]] = load half, ptr [[A_REALP]], align 2 394 // CHECK-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[A]], i32 0, i32 1 395 // CHECK-NEXT: [[A_IMAG:%.*]] = load half, ptr [[A_IMAGP]], align 2 396 // CHECK-NEXT: [[EXT:%.*]] = fpext half [[A_REAL]] to float 397 // CHECK-NEXT: [[EXT1:%.*]] = fpext half [[A_IMAG]] to float 398 // CHECK-NEXT: [[C_REALP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[C]], i32 0, i32 0 399 // CHECK-NEXT: [[C_REAL:%.*]] = load half, ptr [[C_REALP]], align 2 400 // CHECK-NEXT: [[C_IMAGP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[C]], i32 0, i32 1 401 // CHECK-NEXT: [[C_IMAG:%.*]] = load half, ptr [[C_IMAGP]], align 2 402 // CHECK-NEXT: [[EXT2:%.*]] = fpext half [[C_REAL]] to float 403 // CHECK-NEXT: [[EXT3:%.*]] = fpext half [[C_IMAG]] to float 404 // CHECK-NEXT: [[NEG_R:%.*]] = fneg float [[EXT2]] 405 // CHECK-NEXT: [[NEG_I:%.*]] = fneg float [[EXT3]] 406 // CHECK-NEXT: [[ADD_R:%.*]] = fadd float [[EXT]], [[NEG_R]] 407 // CHECK-NEXT: [[ADD_I:%.*]] = fadd float [[EXT1]], [[NEG_I]] 408 // CHECK-NEXT: [[UNPROMOTION:%.*]] = fptrunc float [[ADD_R]] to half 409 // CHECK-NEXT: [[UNPROMOTION4:%.*]] = fptrunc float [[ADD_I]] to half 410 // CHECK-NEXT: ret half [[UNPROMOTION]] 411 // 412 _Float16 MinusOp_c_c(_Float16 _Complex a, _Float16 _Complex c) { 413 return a + -c; 414 } 415 416 // CHECK-LABEL: @PlusOp_r_r( 417 // CHECK-NEXT: entry: 418 // CHECK-NEXT: [[A_ADDR:%.*]] = alloca half, align 2 419 // CHECK-NEXT: [[C_ADDR:%.*]] = alloca half, align 2 420 // CHECK-NEXT: store half [[A:%.*]], ptr [[A_ADDR]], align 2 421 // CHECK-NEXT: store half [[C:%.*]], ptr [[C_ADDR]], align 2 422 // CHECK-NEXT: [[TMP0:%.*]] = load half, ptr [[A_ADDR]], align 2 423 // CHECK-NEXT: [[EXT:%.*]] = fpext half [[TMP0]] to float 424 // CHECK-NEXT: [[TMP1:%.*]] = load half, ptr [[C_ADDR]], align 2 425 // CHECK-NEXT: [[EXT1:%.*]] = fpext half [[TMP1]] to float 426 // CHECK-NEXT: [[SUB:%.*]] = fsub float [[EXT]], [[EXT1]] 427 // CHECK-NEXT: [[UNPROMOTION:%.*]] = fptrunc float [[SUB]] to half 428 // CHECK-NEXT: ret half [[UNPROMOTION]] 429 // 430 _Float16 PlusOp_r_r(_Float16 a, _Float16 c) { 431 return a - +c; 432 } 433 434 // CHECK-LABEL: @PlusOp_c_r( 435 // CHECK-NEXT: entry: 436 // CHECK-NEXT: [[A:%.*]] = alloca { half, half }, align 2 437 // CHECK-NEXT: [[C_ADDR:%.*]] = alloca half, align 2 438 // CHECK-NEXT: store <2 x half> [[A_COERCE:%.*]], ptr [[A]], align 2 439 // CHECK-NEXT: store half [[C:%.*]], ptr [[C_ADDR]], align 2 440 // CHECK-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[A]], i32 0, i32 0 441 // CHECK-NEXT: [[A_REAL:%.*]] = load half, ptr [[A_REALP]], align 2 442 // CHECK-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[A]], i32 0, i32 1 443 // CHECK-NEXT: [[A_IMAG:%.*]] = load half, ptr [[A_IMAGP]], align 2 444 // CHECK-NEXT: [[EXT:%.*]] = fpext half [[A_REAL]] to float 445 // CHECK-NEXT: [[EXT1:%.*]] = fpext half [[A_IMAG]] to float 446 // CHECK-NEXT: [[TMP0:%.*]] = load half, ptr [[C_ADDR]], align 2 447 // CHECK-NEXT: [[EXT2:%.*]] = fpext half [[TMP0]] to float 448 // CHECK-NEXT: [[SUB_R:%.*]] = fsub float [[EXT]], [[EXT2]] 449 // CHECK-NEXT: [[UNPROMOTION:%.*]] = fptrunc float [[SUB_R]] to half 450 // CHECK-NEXT: [[UNPROMOTION3:%.*]] = fptrunc float [[EXT1]] to half 451 // CHECK-NEXT: ret half [[UNPROMOTION]] 452 // 453 _Float16 PlusOp_c_r(_Float16 _Complex a, _Float16 c) { 454 return a - +c; 455 } 456 457 // CHECK-LABEL: @PlusOp_r_c( 458 // CHECK-NEXT: entry: 459 // CHECK-NEXT: [[C:%.*]] = alloca { half, half }, align 2 460 // CHECK-NEXT: [[A_ADDR:%.*]] = alloca half, align 2 461 // CHECK-NEXT: store <2 x half> [[C_COERCE:%.*]], ptr [[C]], align 2 462 // CHECK-NEXT: store half [[A:%.*]], ptr [[A_ADDR]], align 2 463 // CHECK-NEXT: [[TMP0:%.*]] = load half, ptr [[A_ADDR]], align 2 464 // CHECK-NEXT: [[EXT:%.*]] = fpext half [[TMP0]] to float 465 // CHECK-NEXT: [[C_REALP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[C]], i32 0, i32 0 466 // CHECK-NEXT: [[C_REAL:%.*]] = load half, ptr [[C_REALP]], align 2 467 // CHECK-NEXT: [[C_IMAGP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[C]], i32 0, i32 1 468 // CHECK-NEXT: [[C_IMAG:%.*]] = load half, ptr [[C_IMAGP]], align 2 469 // CHECK-NEXT: [[EXT1:%.*]] = fpext half [[C_REAL]] to float 470 // CHECK-NEXT: [[EXT2:%.*]] = fpext half [[C_IMAG]] to float 471 // CHECK-NEXT: [[SUB_R:%.*]] = fsub float [[EXT]], [[EXT1]] 472 // CHECK-NEXT: [[SUB_I:%.*]] = fneg float [[EXT2]] 473 // CHECK-NEXT: [[UNPROMOTION:%.*]] = fptrunc float [[SUB_R]] to half 474 // CHECK-NEXT: [[UNPROMOTION3:%.*]] = fptrunc float [[SUB_I]] to half 475 // CHECK-NEXT: ret half [[UNPROMOTION]] 476 // 477 _Float16 PlusOp_r_c(_Float16 a, _Float16 _Complex c) { 478 return a - +c; 479 } 480 481 // CHECK-LABEL: @PlusOp_c_c( 482 // CHECK-NEXT: entry: 483 // CHECK-NEXT: [[A:%.*]] = alloca { half, half }, align 2 484 // CHECK-NEXT: [[C:%.*]] = alloca { half, half }, align 2 485 // CHECK-NEXT: store <2 x half> [[A_COERCE:%.*]], ptr [[A]], align 2 486 // CHECK-NEXT: store <2 x half> [[C_COERCE:%.*]], ptr [[C]], align 2 487 // CHECK-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[A]], i32 0, i32 0 488 // CHECK-NEXT: [[A_REAL:%.*]] = load half, ptr [[A_REALP]], align 2 489 // CHECK-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[A]], i32 0, i32 1 490 // CHECK-NEXT: [[A_IMAG:%.*]] = load half, ptr [[A_IMAGP]], align 2 491 // CHECK-NEXT: [[EXT:%.*]] = fpext half [[A_REAL]] to float 492 // CHECK-NEXT: [[EXT1:%.*]] = fpext half [[A_IMAG]] to float 493 // CHECK-NEXT: [[C_REALP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[C]], i32 0, i32 0 494 // CHECK-NEXT: [[C_REAL:%.*]] = load half, ptr [[C_REALP]], align 2 495 // CHECK-NEXT: [[C_IMAGP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[C]], i32 0, i32 1 496 // CHECK-NEXT: [[C_IMAG:%.*]] = load half, ptr [[C_IMAGP]], align 2 497 // CHECK-NEXT: [[EXT2:%.*]] = fpext half [[C_REAL]] to float 498 // CHECK-NEXT: [[EXT3:%.*]] = fpext half [[C_IMAG]] to float 499 // CHECK-NEXT: [[SUB_R:%.*]] = fsub float [[EXT]], [[EXT2]] 500 // CHECK-NEXT: [[SUB_I:%.*]] = fsub float [[EXT1]], [[EXT3]] 501 // CHECK-NEXT: [[UNPROMOTION:%.*]] = fptrunc float [[SUB_R]] to half 502 // CHECK-NEXT: [[UNPROMOTION4:%.*]] = fptrunc float [[SUB_I]] to half 503 // CHECK-NEXT: ret half [[UNPROMOTION]] 504 // 505 _Float16 PlusOp_c_c(_Float16 _Complex a, _Float16 _Complex c) { 506 return a - +c; 507 } 508