1;; Expander definitions for vector support between altivec & vsx. No 2;; instructions are in this file, this file provides the generic vector 3;; expander, and the actual vector instructions will be in altivec.md and 4;; vsx.md 5 6;; Copyright (C) 2009, 2010, 2011 7;; Free Software Foundation, Inc. 8;; Contributed by Michael Meissner <meissner@linux.vnet.ibm.com> 9 10;; This file is part of GCC. 11 12;; GCC is free software; you can redistribute it and/or modify it 13;; under the terms of the GNU General Public License as published 14;; by the Free Software Foundation; either version 3, or (at your 15;; option) any later version. 16 17;; GCC is distributed in the hope that it will be useful, but WITHOUT 18;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY 19;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public 20;; License for more details. 21 22;; You should have received a copy of the GNU General Public License 23;; along with GCC; see the file COPYING3. If not see 24;; <http://www.gnu.org/licenses/>. 25 26 27;; Vector int modes 28(define_mode_iterator VEC_I [V16QI V8HI V4SI]) 29 30;; Vector float modes 31(define_mode_iterator VEC_F [V4SF V2DF]) 32 33;; Vector arithmetic modes 34(define_mode_iterator VEC_A [V16QI V8HI V4SI V4SF V2DF]) 35 36;; Vector modes that need alginment via permutes 37(define_mode_iterator VEC_K [V16QI V8HI V4SI V4SF]) 38 39;; Vector logical modes 40(define_mode_iterator VEC_L [V16QI V8HI V4SI V2DI V4SF V2DF TI]) 41 42;; Vector modes for moves. Don't do TImode here. 43(define_mode_iterator VEC_M [V16QI V8HI V4SI V2DI V4SF V2DF]) 44 45;; Vector modes for types that don't need a realignment under VSX 46(define_mode_iterator VEC_N [V4SI V4SF V2DI V2DF]) 47 48;; Vector comparison modes 49(define_mode_iterator VEC_C [V16QI V8HI V4SI V4SF V2DF]) 50 51;; Vector init/extract modes 52(define_mode_iterator VEC_E [V16QI V8HI V4SI V2DI V4SF V2DF]) 53 54;; Vector modes for 64-bit base types 55(define_mode_iterator VEC_64 [V2DI V2DF]) 56 57;; Vector reload iterator 58(define_mode_iterator VEC_R [V16QI V8HI V4SI V2DI V4SF V2DF DF TI]) 59 60;; Base type from vector mode 61(define_mode_attr VEC_base [(V16QI "QI") 62 (V8HI "HI") 63 (V4SI "SI") 64 (V2DI "DI") 65 (V4SF "SF") 66 (V2DF "DF") 67 (TI "TI")]) 68 69;; Same size integer type for floating point data 70(define_mode_attr VEC_int [(V4SF "v4si") 71 (V2DF "v2di")]) 72 73(define_mode_attr VEC_INT [(V4SF "V4SI") 74 (V2DF "V2DI")]) 75 76;; constants for unspec 77(define_constants 78 [(UNSPEC_PREDICATE 400)]) 79 80 81;; Vector move instructions. 82(define_expand "mov<mode>" 83 [(set (match_operand:VEC_M 0 "nonimmediate_operand" "") 84 (match_operand:VEC_M 1 "any_operand" ""))] 85 "VECTOR_MEM_ALTIVEC_OR_VSX_P (<MODE>mode)" 86{ 87 if (can_create_pseudo_p ()) 88 { 89 if (CONSTANT_P (operands[1]) 90 && !easy_vector_constant (operands[1], <MODE>mode)) 91 operands[1] = force_const_mem (<MODE>mode, operands[1]); 92 93 else if (!vlogical_operand (operands[0], <MODE>mode) 94 && !vlogical_operand (operands[1], <MODE>mode)) 95 operands[1] = force_reg (<MODE>mode, operands[1]); 96 } 97}) 98 99;; Generic vector floating point load/store instructions. These will match 100;; insns defined in vsx.md or altivec.md depending on the switches. 101(define_expand "vector_load_<mode>" 102 [(set (match_operand:VEC_M 0 "vfloat_operand" "") 103 (match_operand:VEC_M 1 "memory_operand" ""))] 104 "VECTOR_MEM_ALTIVEC_OR_VSX_P (<MODE>mode)" 105 "") 106 107(define_expand "vector_store_<mode>" 108 [(set (match_operand:VEC_M 0 "memory_operand" "") 109 (match_operand:VEC_M 1 "vfloat_operand" ""))] 110 "VECTOR_MEM_ALTIVEC_OR_VSX_P (<MODE>mode)" 111 "") 112 113;; Splits if a GPR register was chosen for the move 114(define_split 115 [(set (match_operand:VEC_L 0 "nonimmediate_operand" "") 116 (match_operand:VEC_L 1 "input_operand" ""))] 117 "VECTOR_MEM_ALTIVEC_OR_VSX_P (<MODE>mode) 118 && reload_completed 119 && gpr_or_gpr_p (operands[0], operands[1])" 120 [(pc)] 121{ 122 rs6000_split_multireg_move (operands[0], operands[1]); 123 DONE; 124}) 125 126;; Vector floating point load/store instructions that uses the Altivec 127;; instructions even if we are compiling for VSX, since the Altivec 128;; instructions silently ignore the bottom 3 bits of the address, and VSX does 129;; not. 130(define_expand "vector_altivec_load_<mode>" 131 [(set (match_operand:VEC_M 0 "vfloat_operand" "") 132 (match_operand:VEC_M 1 "memory_operand" ""))] 133 "VECTOR_MEM_ALTIVEC_OR_VSX_P (<MODE>mode)" 134 " 135{ 136 gcc_assert (VECTOR_MEM_ALTIVEC_OR_VSX_P (<MODE>mode)); 137 138 if (VECTOR_MEM_VSX_P (<MODE>mode)) 139 { 140 operands[1] = rs6000_address_for_altivec (operands[1]); 141 emit_insn (gen_altivec_lvx_<mode> (operands[0], operands[1])); 142 DONE; 143 } 144}") 145 146(define_expand "vector_altivec_store_<mode>" 147 [(set (match_operand:VEC_M 0 "memory_operand" "") 148 (match_operand:VEC_M 1 "vfloat_operand" ""))] 149 "VECTOR_MEM_ALTIVEC_OR_VSX_P (<MODE>mode)" 150 " 151{ 152 gcc_assert (VECTOR_MEM_ALTIVEC_OR_VSX_P (<MODE>mode)); 153 154 if (VECTOR_MEM_VSX_P (<MODE>mode)) 155 { 156 operands[0] = rs6000_address_for_altivec (operands[0]); 157 emit_insn (gen_altivec_stvx_<mode> (operands[0], operands[1])); 158 DONE; 159 } 160}") 161 162 163 164;; Reload patterns for vector operations. We may need an addtional base 165;; register to convert the reg+offset addressing to reg+reg for vector 166;; registers and reg+reg or (reg+reg)&(-16) addressing to just an index 167;; register for gpr registers. 168(define_expand "reload_<VEC_R:mode>_<P:mptrsize>_store" 169 [(parallel [(match_operand:VEC_R 0 "memory_operand" "m") 170 (match_operand:VEC_R 1 "gpc_reg_operand" "r") 171 (match_operand:P 2 "register_operand" "=&b")])] 172 "<P:tptrsize>" 173{ 174 rs6000_secondary_reload_inner (operands[1], operands[0], operands[2], true); 175 DONE; 176}) 177 178(define_expand "reload_<VEC_R:mode>_<P:mptrsize>_load" 179 [(parallel [(match_operand:VEC_R 0 "gpc_reg_operand" "=&r") 180 (match_operand:VEC_R 1 "memory_operand" "m") 181 (match_operand:P 2 "register_operand" "=&b")])] 182 "<P:tptrsize>" 183{ 184 rs6000_secondary_reload_inner (operands[0], operands[1], operands[2], false); 185 DONE; 186}) 187 188;; Reload sometimes tries to move the address to a GPR, and can generate 189;; invalid RTL for addresses involving AND -16. Allow addresses involving 190;; reg+reg, reg+small constant, or just reg, all wrapped in an AND -16. 191 192(define_insn_and_split "*vec_reload_and_plus_<mptrsize>" 193 [(set (match_operand:P 0 "gpc_reg_operand" "=b") 194 (and:P (plus:P (match_operand:P 1 "gpc_reg_operand" "r") 195 (match_operand:P 2 "reg_or_cint_operand" "rI")) 196 (const_int -16)))] 197 "(TARGET_ALTIVEC || TARGET_VSX) && (reload_in_progress || reload_completed)" 198 "#" 199 "&& reload_completed" 200 [(set (match_dup 0) 201 (plus:P (match_dup 1) 202 (match_dup 2))) 203 (parallel [(set (match_dup 0) 204 (and:P (match_dup 0) 205 (const_int -16))) 206 (clobber:CC (scratch:CC))])]) 207 208;; The normal ANDSI3/ANDDI3 won't match if reload decides to move an AND -16 209;; address to a register because there is no clobber of a (scratch), so we add 210;; it here. 211(define_insn_and_split "*vec_reload_and_reg_<mptrsize>" 212 [(set (match_operand:P 0 "gpc_reg_operand" "=b") 213 (and:P (match_operand:P 1 "gpc_reg_operand" "r") 214 (const_int -16)))] 215 "(TARGET_ALTIVEC || TARGET_VSX) && (reload_in_progress || reload_completed)" 216 "#" 217 "&& reload_completed" 218 [(parallel [(set (match_dup 0) 219 (and:P (match_dup 1) 220 (const_int -16))) 221 (clobber:CC (scratch:CC))])]) 222 223;; Generic floating point vector arithmetic support 224(define_expand "add<mode>3" 225 [(set (match_operand:VEC_F 0 "vfloat_operand" "") 226 (plus:VEC_F (match_operand:VEC_F 1 "vfloat_operand" "") 227 (match_operand:VEC_F 2 "vfloat_operand" "")))] 228 "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode)" 229 "") 230 231(define_expand "sub<mode>3" 232 [(set (match_operand:VEC_F 0 "vfloat_operand" "") 233 (minus:VEC_F (match_operand:VEC_F 1 "vfloat_operand" "") 234 (match_operand:VEC_F 2 "vfloat_operand" "")))] 235 "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode)" 236 "") 237 238(define_expand "mul<mode>3" 239 [(set (match_operand:VEC_F 0 "vfloat_operand" "") 240 (mult:VEC_F (match_operand:VEC_F 1 "vfloat_operand" "") 241 (match_operand:VEC_F 2 "vfloat_operand" "")))] 242 "(VECTOR_UNIT_VSX_P (<MODE>mode) 243 || (VECTOR_UNIT_ALTIVEC_P (<MODE>mode) && TARGET_FUSED_MADD))" 244 " 245{ 246 if (<MODE>mode == V4SFmode && VECTOR_UNIT_ALTIVEC_P (<MODE>mode)) 247 { 248 emit_insn (gen_altivec_mulv4sf3 (operands[0], operands[1], operands[2])); 249 DONE; 250 } 251}") 252 253(define_expand "div<mode>3" 254 [(set (match_operand:VEC_F 0 "vfloat_operand" "") 255 (div:VEC_F (match_operand:VEC_F 1 "vfloat_operand" "") 256 (match_operand:VEC_F 2 "vfloat_operand" "")))] 257 "VECTOR_UNIT_VSX_P (<MODE>mode)" 258 "") 259 260(define_expand "neg<mode>2" 261 [(set (match_operand:VEC_F 0 "vfloat_operand" "") 262 (neg:VEC_F (match_operand:VEC_F 1 "vfloat_operand" "")))] 263 "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode)" 264 " 265{ 266 if (<MODE>mode == V4SFmode && VECTOR_UNIT_ALTIVEC_P (<MODE>mode)) 267 { 268 emit_insn (gen_altivec_negv4sf2 (operands[0], operands[1])); 269 DONE; 270 } 271}") 272 273(define_expand "abs<mode>2" 274 [(set (match_operand:VEC_F 0 "vfloat_operand" "") 275 (abs:VEC_F (match_operand:VEC_F 1 "vfloat_operand" "")))] 276 "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode)" 277 " 278{ 279 if (<MODE>mode == V4SFmode && VECTOR_UNIT_ALTIVEC_P (<MODE>mode)) 280 { 281 emit_insn (gen_altivec_absv4sf2 (operands[0], operands[1])); 282 DONE; 283 } 284}") 285 286(define_expand "smin<mode>3" 287 [(set (match_operand:VEC_F 0 "register_operand" "") 288 (smin:VEC_F (match_operand:VEC_F 1 "register_operand" "") 289 (match_operand:VEC_F 2 "register_operand" "")))] 290 "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode)" 291 "") 292 293(define_expand "smax<mode>3" 294 [(set (match_operand:VEC_F 0 "register_operand" "") 295 (smax:VEC_F (match_operand:VEC_F 1 "register_operand" "") 296 (match_operand:VEC_F 2 "register_operand" "")))] 297 "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode)" 298 "") 299 300 301(define_expand "sqrt<mode>2" 302 [(set (match_operand:VEC_F 0 "vfloat_operand" "") 303 (sqrt:VEC_F (match_operand:VEC_F 1 "vfloat_operand" "")))] 304 "VECTOR_UNIT_VSX_P (<MODE>mode)" 305 "") 306 307(define_expand "ftrunc<mode>2" 308 [(set (match_operand:VEC_F 0 "vfloat_operand" "") 309 (fix:VEC_F (match_operand:VEC_F 1 "vfloat_operand" "")))] 310 "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode)" 311 "") 312 313(define_expand "vector_ceil<mode>2" 314 [(set (match_operand:VEC_F 0 "vfloat_operand" "") 315 (unspec:VEC_F [(match_operand:VEC_F 1 "vfloat_operand" "")] 316 UNSPEC_FRIP))] 317 "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode)" 318 "") 319 320(define_expand "vector_floor<mode>2" 321 [(set (match_operand:VEC_F 0 "vfloat_operand" "") 322 (unspec:VEC_F [(match_operand:VEC_F 1 "vfloat_operand" "")] 323 UNSPEC_FRIM))] 324 "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode)" 325 "") 326 327(define_expand "vector_btrunc<mode>2" 328 [(set (match_operand:VEC_F 0 "vfloat_operand" "") 329 (fix:VEC_F (match_operand:VEC_F 1 "vfloat_operand" "")))] 330 "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode)" 331 "") 332 333(define_expand "vector_copysign<mode>3" 334 [(set (match_operand:VEC_F 0 "vfloat_operand" "") 335 (if_then_else:VEC_F 336 (ge:VEC_F (match_operand:VEC_F 2 "vfloat_operand" "") 337 (match_dup 3)) 338 (abs:VEC_F (match_operand:VEC_F 1 "vfloat_operand" "")) 339 (neg:VEC_F (abs:VEC_F (match_dup 1)))))] 340 "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode)" 341 " 342{ 343 if (<MODE>mode == V4SFmode && VECTOR_UNIT_ALTIVEC_P (<MODE>mode)) 344 { 345 emit_insn (gen_altivec_copysign_v4sf3 (operands[0], operands[1], 346 operands[2])); 347 DONE; 348 } 349 350 operands[3] = CONST0_RTX (<MODE>mode); 351}") 352 353 354;; Vector comparisons 355(define_expand "vcond<mode>" 356 [(set (match_operand:VEC_F 0 "vfloat_operand" "") 357 (if_then_else:VEC_F 358 (match_operator 3 "comparison_operator" 359 [(match_operand:VEC_F 4 "vfloat_operand" "") 360 (match_operand:VEC_F 5 "vfloat_operand" "")]) 361 (match_operand:VEC_F 1 "vfloat_operand" "") 362 (match_operand:VEC_F 2 "vfloat_operand" "")))] 363 "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode)" 364 " 365{ 366 if (rs6000_emit_vector_cond_expr (operands[0], operands[1], operands[2], 367 operands[3], operands[4], operands[5])) 368 DONE; 369 else 370 FAIL; 371}") 372 373(define_expand "vcond<mode>" 374 [(set (match_operand:VEC_I 0 "vint_operand" "") 375 (if_then_else:VEC_I 376 (match_operator 3 "comparison_operator" 377 [(match_operand:VEC_I 4 "vint_operand" "") 378 (match_operand:VEC_I 5 "vint_operand" "")]) 379 (match_operand:VEC_I 1 "vint_operand" "") 380 (match_operand:VEC_I 2 "vint_operand" "")))] 381 "VECTOR_UNIT_ALTIVEC_P (<MODE>mode)" 382 " 383{ 384 if (rs6000_emit_vector_cond_expr (operands[0], operands[1], operands[2], 385 operands[3], operands[4], operands[5])) 386 DONE; 387 else 388 FAIL; 389}") 390 391(define_expand "vcondu<mode>" 392 [(set (match_operand:VEC_I 0 "vint_operand" "") 393 (if_then_else:VEC_I 394 (match_operator 3 "comparison_operator" 395 [(match_operand:VEC_I 4 "vint_operand" "") 396 (match_operand:VEC_I 5 "vint_operand" "")]) 397 (match_operand:VEC_I 1 "vint_operand" "") 398 (match_operand:VEC_I 2 "vint_operand" "")))] 399 "VECTOR_UNIT_ALTIVEC_P (<MODE>mode)" 400 " 401{ 402 if (rs6000_emit_vector_cond_expr (operands[0], operands[1], operands[2], 403 operands[3], operands[4], operands[5])) 404 DONE; 405 else 406 FAIL; 407}") 408 409(define_expand "vector_eq<mode>" 410 [(set (match_operand:VEC_C 0 "vlogical_operand" "") 411 (eq:VEC_C (match_operand:VEC_C 1 "vlogical_operand" "") 412 (match_operand:VEC_C 2 "vlogical_operand" "")))] 413 "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode)" 414 "") 415 416(define_expand "vector_gt<mode>" 417 [(set (match_operand:VEC_C 0 "vlogical_operand" "") 418 (gt:VEC_C (match_operand:VEC_C 1 "vlogical_operand" "") 419 (match_operand:VEC_C 2 "vlogical_operand" "")))] 420 "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode)" 421 "") 422 423(define_expand "vector_ge<mode>" 424 [(set (match_operand:VEC_C 0 "vlogical_operand" "") 425 (ge:VEC_C (match_operand:VEC_C 1 "vlogical_operand" "") 426 (match_operand:VEC_C 2 "vlogical_operand" "")))] 427 "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode)" 428 "") 429 430(define_expand "vector_gtu<mode>" 431 [(set (match_operand:VEC_I 0 "vint_operand" "") 432 (gtu:VEC_I (match_operand:VEC_I 1 "vint_operand" "") 433 (match_operand:VEC_I 2 "vint_operand" "")))] 434 "VECTOR_UNIT_ALTIVEC_P (<MODE>mode)" 435 "") 436 437(define_expand "vector_geu<mode>" 438 [(set (match_operand:VEC_I 0 "vint_operand" "") 439 (geu:VEC_I (match_operand:VEC_I 1 "vint_operand" "") 440 (match_operand:VEC_I 2 "vint_operand" "")))] 441 "VECTOR_UNIT_ALTIVEC_P (<MODE>mode)" 442 "") 443 444(define_insn_and_split "*vector_uneq<mode>" 445 [(set (match_operand:VEC_F 0 "vfloat_operand" "") 446 (uneq:VEC_F (match_operand:VEC_F 1 "vfloat_operand" "") 447 (match_operand:VEC_F 2 "vfloat_operand" "")))] 448 "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode)" 449 "#" 450 "" 451 [(set (match_dup 3) 452 (gt:VEC_F (match_dup 1) 453 (match_dup 2))) 454 (set (match_dup 4) 455 (gt:VEC_F (match_dup 2) 456 (match_dup 1))) 457 (set (match_dup 0) 458 (not:VEC_F (ior:VEC_F (match_dup 3) 459 (match_dup 4))))] 460 " 461{ 462 operands[3] = gen_reg_rtx (<MODE>mode); 463 operands[4] = gen_reg_rtx (<MODE>mode); 464}") 465 466(define_insn_and_split "*vector_ltgt<mode>" 467 [(set (match_operand:VEC_F 0 "vfloat_operand" "") 468 (ltgt:VEC_F (match_operand:VEC_F 1 "vfloat_operand" "") 469 (match_operand:VEC_F 2 "vfloat_operand" "")))] 470 "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode)" 471 "#" 472 "" 473 [(set (match_dup 3) 474 (gt:VEC_F (match_dup 1) 475 (match_dup 2))) 476 (set (match_dup 4) 477 (gt:VEC_F (match_dup 2) 478 (match_dup 1))) 479 (set (match_dup 0) 480 (ior:VEC_F (match_dup 3) 481 (match_dup 4)))] 482 " 483{ 484 operands[3] = gen_reg_rtx (<MODE>mode); 485 operands[4] = gen_reg_rtx (<MODE>mode); 486}") 487 488(define_insn_and_split "*vector_ordered<mode>" 489 [(set (match_operand:VEC_F 0 "vfloat_operand" "") 490 (ordered:VEC_F (match_operand:VEC_F 1 "vfloat_operand" "") 491 (match_operand:VEC_F 2 "vfloat_operand" "")))] 492 "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode)" 493 "#" 494 "" 495 [(set (match_dup 3) 496 (ge:VEC_F (match_dup 1) 497 (match_dup 2))) 498 (set (match_dup 4) 499 (ge:VEC_F (match_dup 2) 500 (match_dup 1))) 501 (set (match_dup 0) 502 (ior:VEC_F (match_dup 3) 503 (match_dup 4)))] 504 " 505{ 506 operands[3] = gen_reg_rtx (<MODE>mode); 507 operands[4] = gen_reg_rtx (<MODE>mode); 508}") 509 510(define_insn_and_split "*vector_unordered<mode>" 511 [(set (match_operand:VEC_F 0 "vfloat_operand" "") 512 (unordered:VEC_F (match_operand:VEC_F 1 "vfloat_operand" "") 513 (match_operand:VEC_F 2 "vfloat_operand" "")))] 514 "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode)" 515 "#" 516 "" 517 [(set (match_dup 3) 518 (ge:VEC_F (match_dup 1) 519 (match_dup 2))) 520 (set (match_dup 4) 521 (ge:VEC_F (match_dup 2) 522 (match_dup 1))) 523 (set (match_dup 0) 524 (not:VEC_F (ior:VEC_F (match_dup 3) 525 (match_dup 4))))] 526 " 527{ 528 operands[3] = gen_reg_rtx (<MODE>mode); 529 operands[4] = gen_reg_rtx (<MODE>mode); 530}") 531 532;; Note the arguments for __builtin_altivec_vsel are op2, op1, mask 533;; which is in the reverse order that we want 534(define_expand "vector_select_<mode>" 535 [(set (match_operand:VEC_L 0 "vlogical_operand" "") 536 (if_then_else:VEC_L 537 (ne:CC (match_operand:VEC_L 3 "vlogical_operand" "") 538 (match_dup 4)) 539 (match_operand:VEC_L 2 "vlogical_operand" "") 540 (match_operand:VEC_L 1 "vlogical_operand" "")))] 541 "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode)" 542 "operands[4] = CONST0_RTX (<MODE>mode);") 543 544(define_expand "vector_select_<mode>_uns" 545 [(set (match_operand:VEC_L 0 "vlogical_operand" "") 546 (if_then_else:VEC_L 547 (ne:CCUNS (match_operand:VEC_L 3 "vlogical_operand" "") 548 (match_dup 4)) 549 (match_operand:VEC_L 2 "vlogical_operand" "") 550 (match_operand:VEC_L 1 "vlogical_operand" "")))] 551 "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode)" 552 "operands[4] = CONST0_RTX (<MODE>mode);") 553 554;; Expansions that compare vectors producing a vector result and a predicate, 555;; setting CR6 to indicate a combined status 556(define_expand "vector_eq_<mode>_p" 557 [(parallel 558 [(set (reg:CC 74) 559 (unspec:CC [(eq:CC (match_operand:VEC_A 1 "vlogical_operand" "") 560 (match_operand:VEC_A 2 "vlogical_operand" ""))] 561 UNSPEC_PREDICATE)) 562 (set (match_operand:VEC_A 0 "vlogical_operand" "") 563 (eq:VEC_A (match_dup 1) 564 (match_dup 2)))])] 565 "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode)" 566 "") 567 568(define_expand "vector_gt_<mode>_p" 569 [(parallel 570 [(set (reg:CC 74) 571 (unspec:CC [(gt:CC (match_operand:VEC_A 1 "vlogical_operand" "") 572 (match_operand:VEC_A 2 "vlogical_operand" ""))] 573 UNSPEC_PREDICATE)) 574 (set (match_operand:VEC_A 0 "vlogical_operand" "") 575 (gt:VEC_A (match_dup 1) 576 (match_dup 2)))])] 577 "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode)" 578 "") 579 580(define_expand "vector_ge_<mode>_p" 581 [(parallel 582 [(set (reg:CC 74) 583 (unspec:CC [(ge:CC (match_operand:VEC_F 1 "vfloat_operand" "") 584 (match_operand:VEC_F 2 "vfloat_operand" ""))] 585 UNSPEC_PREDICATE)) 586 (set (match_operand:VEC_F 0 "vfloat_operand" "") 587 (ge:VEC_F (match_dup 1) 588 (match_dup 2)))])] 589 "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode)" 590 "") 591 592(define_expand "vector_gtu_<mode>_p" 593 [(parallel 594 [(set (reg:CC 74) 595 (unspec:CC [(gtu:CC (match_operand:VEC_I 1 "vint_operand" "") 596 (match_operand:VEC_I 2 "vint_operand" ""))] 597 UNSPEC_PREDICATE)) 598 (set (match_operand:VEC_I 0 "vlogical_operand" "") 599 (gtu:VEC_I (match_dup 1) 600 (match_dup 2)))])] 601 "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode)" 602 "") 603 604;; AltiVec/VSX predicates. 605 606(define_expand "cr6_test_for_zero" 607 [(set (match_operand:SI 0 "register_operand" "=r") 608 (eq:SI (reg:CC 74) 609 (const_int 0)))] 610 "TARGET_ALTIVEC || TARGET_VSX" 611 "") 612 613(define_expand "cr6_test_for_zero_reverse" 614 [(set (match_operand:SI 0 "register_operand" "=r") 615 (eq:SI (reg:CC 74) 616 (const_int 0))) 617 (set (match_dup 0) (minus:SI (const_int 1) (match_dup 0)))] 618 "TARGET_ALTIVEC || TARGET_VSX" 619 "") 620 621(define_expand "cr6_test_for_lt" 622 [(set (match_operand:SI 0 "register_operand" "=r") 623 (lt:SI (reg:CC 74) 624 (const_int 0)))] 625 "TARGET_ALTIVEC || TARGET_VSX" 626 "") 627 628(define_expand "cr6_test_for_lt_reverse" 629 [(set (match_operand:SI 0 "register_operand" "=r") 630 (lt:SI (reg:CC 74) 631 (const_int 0))) 632 (set (match_dup 0) (minus:SI (const_int 1) (match_dup 0)))] 633 "TARGET_ALTIVEC || TARGET_VSX" 634 "") 635 636 637;; Vector logical instructions 638(define_expand "xor<mode>3" 639 [(set (match_operand:VEC_L 0 "vlogical_operand" "") 640 (xor:VEC_L (match_operand:VEC_L 1 "vlogical_operand" "") 641 (match_operand:VEC_L 2 "vlogical_operand" "")))] 642 "VECTOR_MEM_ALTIVEC_OR_VSX_P (<MODE>mode)" 643 "") 644 645(define_expand "ior<mode>3" 646 [(set (match_operand:VEC_L 0 "vlogical_operand" "") 647 (ior:VEC_L (match_operand:VEC_L 1 "vlogical_operand" "") 648 (match_operand:VEC_L 2 "vlogical_operand" "")))] 649 "VECTOR_MEM_ALTIVEC_OR_VSX_P (<MODE>mode)" 650 "") 651 652(define_expand "and<mode>3" 653 [(set (match_operand:VEC_L 0 "vlogical_operand" "") 654 (and:VEC_L (match_operand:VEC_L 1 "vlogical_operand" "") 655 (match_operand:VEC_L 2 "vlogical_operand" "")))] 656 "VECTOR_MEM_ALTIVEC_OR_VSX_P (<MODE>mode)" 657 "") 658 659(define_expand "one_cmpl<mode>2" 660 [(set (match_operand:VEC_L 0 "vlogical_operand" "") 661 (not:VEC_L (match_operand:VEC_L 1 "vlogical_operand" "")))] 662 "VECTOR_MEM_ALTIVEC_OR_VSX_P (<MODE>mode)" 663 "") 664 665(define_expand "nor<mode>3" 666 [(set (match_operand:VEC_L 0 "vlogical_operand" "") 667 (not:VEC_L (ior:VEC_L (match_operand:VEC_L 1 "vlogical_operand" "") 668 (match_operand:VEC_L 2 "vlogical_operand" ""))))] 669 "VECTOR_MEM_ALTIVEC_OR_VSX_P (<MODE>mode)" 670 "") 671 672(define_expand "andc<mode>3" 673 [(set (match_operand:VEC_L 0 "vlogical_operand" "") 674 (and:VEC_L (not:VEC_L (match_operand:VEC_L 2 "vlogical_operand" "")) 675 (match_operand:VEC_L 1 "vlogical_operand" "")))] 676 "VECTOR_MEM_ALTIVEC_OR_VSX_P (<MODE>mode)" 677 "") 678 679;; Same size conversions 680(define_expand "float<VEC_int><mode>2" 681 [(set (match_operand:VEC_F 0 "vfloat_operand" "") 682 (float:VEC_F (match_operand:<VEC_INT> 1 "vint_operand" "")))] 683 "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode)" 684 " 685{ 686 if (<MODE>mode == V4SFmode && VECTOR_UNIT_ALTIVEC_P (<MODE>mode)) 687 { 688 emit_insn (gen_altivec_vcfsx (operands[0], operands[1], const0_rtx)); 689 DONE; 690 } 691}") 692 693(define_expand "unsigned_float<VEC_int><mode>2" 694 [(set (match_operand:VEC_F 0 "vfloat_operand" "") 695 (unsigned_float:VEC_F (match_operand:<VEC_INT> 1 "vint_operand" "")))] 696 "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode)" 697 " 698{ 699 if (<MODE>mode == V4SFmode && VECTOR_UNIT_ALTIVEC_P (<MODE>mode)) 700 { 701 emit_insn (gen_altivec_vcfux (operands[0], operands[1], const0_rtx)); 702 DONE; 703 } 704}") 705 706(define_expand "fix_trunc<mode><VEC_int>2" 707 [(set (match_operand:<VEC_INT> 0 "vint_operand" "") 708 (fix:<VEC_INT> (match_operand:VEC_F 1 "vfloat_operand" "")))] 709 "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode)" 710 " 711{ 712 if (<MODE>mode == V4SFmode && VECTOR_UNIT_ALTIVEC_P (<MODE>mode)) 713 { 714 emit_insn (gen_altivec_vctsxs (operands[0], operands[1], const0_rtx)); 715 DONE; 716 } 717}") 718 719(define_expand "fixuns_trunc<mode><VEC_int>2" 720 [(set (match_operand:<VEC_INT> 0 "vint_operand" "") 721 (unsigned_fix:<VEC_INT> (match_operand:VEC_F 1 "vfloat_operand" "")))] 722 "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode)" 723 " 724{ 725 if (<MODE>mode == V4SFmode && VECTOR_UNIT_ALTIVEC_P (<MODE>mode)) 726 { 727 emit_insn (gen_altivec_vctuxs (operands[0], operands[1], const0_rtx)); 728 DONE; 729 } 730}") 731 732 733;; Vector initialization, set, extract 734(define_expand "vec_init<mode>" 735 [(match_operand:VEC_E 0 "vlogical_operand" "") 736 (match_operand:VEC_E 1 "" "")] 737 "VECTOR_MEM_ALTIVEC_OR_VSX_P (<MODE>mode)" 738{ 739 rs6000_expand_vector_init (operands[0], operands[1]); 740 DONE; 741}) 742 743(define_expand "vec_set<mode>" 744 [(match_operand:VEC_E 0 "vlogical_operand" "") 745 (match_operand:<VEC_base> 1 "register_operand" "") 746 (match_operand 2 "const_int_operand" "")] 747 "VECTOR_MEM_ALTIVEC_OR_VSX_P (<MODE>mode)" 748{ 749 rs6000_expand_vector_set (operands[0], operands[1], INTVAL (operands[2])); 750 DONE; 751}) 752 753(define_expand "vec_extract<mode>" 754 [(match_operand:<VEC_base> 0 "register_operand" "") 755 (match_operand:VEC_E 1 "vlogical_operand" "") 756 (match_operand 2 "const_int_operand" "")] 757 "VECTOR_MEM_ALTIVEC_OR_VSX_P (<MODE>mode)" 758{ 759 rs6000_expand_vector_extract (operands[0], operands[1], 760 INTVAL (operands[2])); 761 DONE; 762}) 763 764;; Interleave patterns 765(define_expand "vec_interleave_highv4sf" 766 [(set (match_operand:V4SF 0 "vfloat_operand" "") 767 (vec_merge:V4SF 768 (vec_select:V4SF (match_operand:V4SF 1 "vfloat_operand" "") 769 (parallel [(const_int 0) 770 (const_int 2) 771 (const_int 1) 772 (const_int 3)])) 773 (vec_select:V4SF (match_operand:V4SF 2 "vfloat_operand" "") 774 (parallel [(const_int 2) 775 (const_int 0) 776 (const_int 3) 777 (const_int 1)])) 778 (const_int 5)))] 779 "VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)" 780 "") 781 782(define_expand "vec_interleave_lowv4sf" 783 [(set (match_operand:V4SF 0 "vfloat_operand" "") 784 (vec_merge:V4SF 785 (vec_select:V4SF (match_operand:V4SF 1 "vfloat_operand" "") 786 (parallel [(const_int 2) 787 (const_int 0) 788 (const_int 3) 789 (const_int 1)])) 790 (vec_select:V4SF (match_operand:V4SF 2 "vfloat_operand" "") 791 (parallel [(const_int 0) 792 (const_int 2) 793 (const_int 1) 794 (const_int 3)])) 795 (const_int 5)))] 796 "VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)" 797 "") 798 799(define_expand "vec_interleave_high<mode>" 800 [(set (match_operand:VEC_64 0 "vfloat_operand" "") 801 (vec_concat:VEC_64 802 (vec_select:<VEC_base> (match_operand:VEC_64 1 "vfloat_operand" "") 803 (parallel [(const_int 0)])) 804 (vec_select:<VEC_base> (match_operand:VEC_64 2 "vfloat_operand" "") 805 (parallel [(const_int 0)]))))] 806 "VECTOR_UNIT_VSX_P (<MODE>mode)" 807 "") 808 809(define_expand "vec_interleave_low<mode>" 810 [(set (match_operand:VEC_64 0 "vfloat_operand" "") 811 (vec_concat:VEC_64 812 (vec_select:<VEC_base> (match_operand:VEC_64 1 "vfloat_operand" "") 813 (parallel [(const_int 1)])) 814 (vec_select:<VEC_base> (match_operand:VEC_64 2 "vfloat_operand" "") 815 (parallel [(const_int 1)]))))] 816 "VECTOR_UNIT_VSX_P (<MODE>mode)" 817 "") 818 819 820;; Convert double word types to single word types 821(define_expand "vec_pack_trunc_v2df" 822 [(match_operand:V4SF 0 "vfloat_operand" "") 823 (match_operand:V2DF 1 "vfloat_operand" "") 824 (match_operand:V2DF 2 "vfloat_operand" "")] 825 "VECTOR_UNIT_VSX_P (V2DFmode) && TARGET_ALTIVEC" 826{ 827 rtx r1 = gen_reg_rtx (V4SFmode); 828 rtx r2 = gen_reg_rtx (V4SFmode); 829 830 emit_insn (gen_vsx_xvcvdpsp (r1, operands[1])); 831 emit_insn (gen_vsx_xvcvdpsp (r2, operands[2])); 832 emit_insn (gen_vec_extract_evenv4sf (operands[0], r1, r2)); 833 DONE; 834}) 835 836(define_expand "vec_pack_sfix_trunc_v2df" 837 [(match_operand:V4SI 0 "vint_operand" "") 838 (match_operand:V2DF 1 "vfloat_operand" "") 839 (match_operand:V2DF 2 "vfloat_operand" "")] 840 "VECTOR_UNIT_VSX_P (V2DFmode) && TARGET_ALTIVEC" 841{ 842 rtx r1 = gen_reg_rtx (V4SImode); 843 rtx r2 = gen_reg_rtx (V4SImode); 844 845 emit_insn (gen_vsx_xvcvdpsxws (r1, operands[1])); 846 emit_insn (gen_vsx_xvcvdpsxws (r2, operands[2])); 847 emit_insn (gen_vec_extract_evenv4si (operands[0], r1, r2)); 848 DONE; 849}) 850 851(define_expand "vec_pack_ufix_trunc_v2df" 852 [(match_operand:V4SI 0 "vint_operand" "") 853 (match_operand:V2DF 1 "vfloat_operand" "") 854 (match_operand:V2DF 2 "vfloat_operand" "")] 855 "VECTOR_UNIT_VSX_P (V2DFmode) && TARGET_ALTIVEC" 856{ 857 rtx r1 = gen_reg_rtx (V4SImode); 858 rtx r2 = gen_reg_rtx (V4SImode); 859 860 emit_insn (gen_vsx_xvcvdpuxws (r1, operands[1])); 861 emit_insn (gen_vsx_xvcvdpuxws (r2, operands[2])); 862 emit_insn (gen_vec_extract_evenv4si (operands[0], r1, r2)); 863 DONE; 864}) 865 866;; Convert single word types to double word 867(define_expand "vec_unpacks_hi_v4sf" 868 [(match_operand:V2DF 0 "vfloat_operand" "") 869 (match_operand:V4SF 1 "vfloat_operand" "")] 870 "VECTOR_UNIT_VSX_P (V2DFmode) && VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)" 871{ 872 rtx reg = gen_reg_rtx (V4SFmode); 873 874 emit_insn (gen_vec_interleave_highv4sf (reg, operands[1], operands[1])); 875 emit_insn (gen_vsx_xvcvspdp (operands[0], reg)); 876 DONE; 877}) 878 879(define_expand "vec_unpacks_lo_v4sf" 880 [(match_operand:V2DF 0 "vfloat_operand" "") 881 (match_operand:V4SF 1 "vfloat_operand" "")] 882 "VECTOR_UNIT_VSX_P (V2DFmode) && VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)" 883{ 884 rtx reg = gen_reg_rtx (V4SFmode); 885 886 emit_insn (gen_vec_interleave_lowv4sf (reg, operands[1], operands[1])); 887 emit_insn (gen_vsx_xvcvspdp (operands[0], reg)); 888 DONE; 889}) 890 891(define_expand "vec_unpacks_float_hi_v4si" 892 [(match_operand:V2DF 0 "vfloat_operand" "") 893 (match_operand:V4SI 1 "vint_operand" "")] 894 "VECTOR_UNIT_VSX_P (V2DFmode) && VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SImode)" 895{ 896 rtx reg = gen_reg_rtx (V4SImode); 897 898 emit_insn (gen_vec_interleave_highv4si (reg, operands[1], operands[1])); 899 emit_insn (gen_vsx_xvcvsxwdp (operands[0], reg)); 900 DONE; 901}) 902 903(define_expand "vec_unpacks_float_lo_v4si" 904 [(match_operand:V2DF 0 "vfloat_operand" "") 905 (match_operand:V4SI 1 "vint_operand" "")] 906 "VECTOR_UNIT_VSX_P (V2DFmode) && VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SImode)" 907{ 908 rtx reg = gen_reg_rtx (V4SImode); 909 910 emit_insn (gen_vec_interleave_lowv4si (reg, operands[1], operands[1])); 911 emit_insn (gen_vsx_xvcvsxwdp (operands[0], reg)); 912 DONE; 913}) 914 915(define_expand "vec_unpacku_float_hi_v4si" 916 [(match_operand:V2DF 0 "vfloat_operand" "") 917 (match_operand:V4SI 1 "vint_operand" "")] 918 "VECTOR_UNIT_VSX_P (V2DFmode) && VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SImode)" 919{ 920 rtx reg = gen_reg_rtx (V4SImode); 921 922 emit_insn (gen_vec_interleave_highv4si (reg, operands[1], operands[1])); 923 emit_insn (gen_vsx_xvcvuxwdp (operands[0], reg)); 924 DONE; 925}) 926 927(define_expand "vec_unpacku_float_lo_v4si" 928 [(match_operand:V2DF 0 "vfloat_operand" "") 929 (match_operand:V4SI 1 "vint_operand" "")] 930 "VECTOR_UNIT_VSX_P (V2DFmode) && VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SImode)" 931{ 932 rtx reg = gen_reg_rtx (V4SImode); 933 934 emit_insn (gen_vec_interleave_lowv4si (reg, operands[1], operands[1])); 935 emit_insn (gen_vsx_xvcvuxwdp (operands[0], reg)); 936 DONE; 937}) 938 939 940;; Align vector loads with a permute. 941(define_expand "vec_realign_load_<mode>" 942 [(match_operand:VEC_K 0 "vlogical_operand" "") 943 (match_operand:VEC_K 1 "vlogical_operand" "") 944 (match_operand:VEC_K 2 "vlogical_operand" "") 945 (match_operand:V16QI 3 "vlogical_operand" "")] 946 "VECTOR_MEM_ALTIVEC_OR_VSX_P (<MODE>mode)" 947{ 948 emit_insn (gen_altivec_vperm_<mode> (operands[0], operands[1], operands[2], 949 operands[3])); 950 DONE; 951}) 952 953;; Under VSX, vectors of 4/8 byte alignments do not need to be aligned 954;; since the load already handles it. 955(define_expand "movmisalign<mode>" 956 [(set (match_operand:VEC_N 0 "vfloat_operand" "") 957 (match_operand:VEC_N 1 "vfloat_operand" ""))] 958 "VECTOR_MEM_VSX_P (<MODE>mode) && TARGET_ALLOW_MOVMISALIGN" 959 "") 960 961 962;; Vector shift left in bits. Currently supported ony for shift 963;; amounts that can be expressed as byte shifts (divisible by 8). 964;; General shift amounts can be supported using vslo + vsl. We're 965;; not expecting to see these yet (the vectorizer currently 966;; generates only shifts divisible by byte_size). 967(define_expand "vec_shl_<mode>" 968 [(match_operand:VEC_L 0 "vlogical_operand" "") 969 (match_operand:VEC_L 1 "vlogical_operand" "") 970 (match_operand:QI 2 "reg_or_short_operand" "")] 971 "TARGET_ALTIVEC" 972 " 973{ 974 rtx bitshift = operands[2]; 975 rtx shift; 976 rtx insn; 977 HOST_WIDE_INT bitshift_val; 978 HOST_WIDE_INT byteshift_val; 979 980 if (! CONSTANT_P (bitshift)) 981 FAIL; 982 bitshift_val = INTVAL (bitshift); 983 if (bitshift_val & 0x7) 984 FAIL; 985 byteshift_val = bitshift_val >> 3; 986 if (TARGET_VSX && (byteshift_val & 0x3) == 0) 987 { 988 shift = gen_rtx_CONST_INT (QImode, byteshift_val >> 2); 989 insn = gen_vsx_xxsldwi_<mode> (operands[0], operands[1], operands[1], 990 shift); 991 } 992 else 993 { 994 shift = gen_rtx_CONST_INT (QImode, byteshift_val); 995 insn = gen_altivec_vsldoi_<mode> (operands[0], operands[1], operands[1], 996 shift); 997 } 998 999 emit_insn (insn); 1000 DONE; 1001}") 1002 1003;; Vector shift right in bits. Currently supported ony for shift 1004;; amounts that can be expressed as byte shifts (divisible by 8). 1005;; General shift amounts can be supported using vsro + vsr. We're 1006;; not expecting to see these yet (the vectorizer currently 1007;; generates only shifts divisible by byte_size). 1008(define_expand "vec_shr_<mode>" 1009 [(match_operand:VEC_L 0 "vlogical_operand" "") 1010 (match_operand:VEC_L 1 "vlogical_operand" "") 1011 (match_operand:QI 2 "reg_or_short_operand" "")] 1012 "TARGET_ALTIVEC" 1013 " 1014{ 1015 rtx bitshift = operands[2]; 1016 rtx shift; 1017 rtx insn; 1018 HOST_WIDE_INT bitshift_val; 1019 HOST_WIDE_INT byteshift_val; 1020 1021 if (! CONSTANT_P (bitshift)) 1022 FAIL; 1023 bitshift_val = INTVAL (bitshift); 1024 if (bitshift_val & 0x7) 1025 FAIL; 1026 byteshift_val = 16 - (bitshift_val >> 3); 1027 if (TARGET_VSX && (byteshift_val & 0x3) == 0) 1028 { 1029 shift = gen_rtx_CONST_INT (QImode, byteshift_val >> 2); 1030 insn = gen_vsx_xxsldwi_<mode> (operands[0], operands[1], operands[1], 1031 shift); 1032 } 1033 else 1034 { 1035 shift = gen_rtx_CONST_INT (QImode, byteshift_val); 1036 insn = gen_altivec_vsldoi_<mode> (operands[0], operands[1], operands[1], 1037 shift); 1038 } 1039 1040 emit_insn (insn); 1041 DONE; 1042}") 1043 1044;; Expanders for rotate each element in a vector 1045(define_expand "vrotl<mode>3" 1046 [(set (match_operand:VEC_I 0 "vint_operand" "") 1047 (rotate:VEC_I (match_operand:VEC_I 1 "vint_operand" "") 1048 (match_operand:VEC_I 2 "vint_operand" "")))] 1049 "TARGET_ALTIVEC" 1050 "") 1051 1052;; Expanders for arithmetic shift left on each vector element 1053(define_expand "vashl<mode>3" 1054 [(set (match_operand:VEC_I 0 "vint_operand" "") 1055 (ashift:VEC_I (match_operand:VEC_I 1 "vint_operand" "") 1056 (match_operand:VEC_I 2 "vint_operand" "")))] 1057 "TARGET_ALTIVEC" 1058 "") 1059 1060;; Expanders for logical shift right on each vector element 1061(define_expand "vlshr<mode>3" 1062 [(set (match_operand:VEC_I 0 "vint_operand" "") 1063 (lshiftrt:VEC_I (match_operand:VEC_I 1 "vint_operand" "") 1064 (match_operand:VEC_I 2 "vint_operand" "")))] 1065 "TARGET_ALTIVEC" 1066 "") 1067 1068;; Expanders for arithmetic shift right on each vector element 1069(define_expand "vashr<mode>3" 1070 [(set (match_operand:VEC_I 0 "vint_operand" "") 1071 (ashiftrt:VEC_I (match_operand:VEC_I 1 "vint_operand" "") 1072 (match_operand:VEC_I 2 "vint_operand" "")))] 1073 "TARGET_ALTIVEC" 1074 "") 1075 1076;;; Expanders for vector insn patterns shared between the SPE and TARGET_PAIRED systems. 1077 1078(define_expand "absv2sf2" 1079 [(set (match_operand:V2SF 0 "gpc_reg_operand" "") 1080 (abs:V2SF (match_operand:V2SF 1 "gpc_reg_operand" "")))] 1081 "TARGET_PAIRED_FLOAT || TARGET_SPE" 1082 "") 1083 1084(define_expand "negv2sf2" 1085 [(set (match_operand:V2SF 0 "gpc_reg_operand" "") 1086 (neg:V2SF (match_operand:V2SF 1 "gpc_reg_operand" "")))] 1087 "TARGET_PAIRED_FLOAT || TARGET_SPE" 1088 "") 1089 1090(define_expand "addv2sf3" 1091 [(set (match_operand:V2SF 0 "gpc_reg_operand" "") 1092 (plus:V2SF (match_operand:V2SF 1 "gpc_reg_operand" "") 1093 (match_operand:V2SF 2 "gpc_reg_operand" "")))] 1094 "TARGET_PAIRED_FLOAT || TARGET_SPE" 1095 " 1096{ 1097 if (TARGET_SPE) 1098 { 1099 /* We need to make a note that we clobber SPEFSCR. */ 1100 rtx par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (2)); 1101 1102 XVECEXP (par, 0, 0) = gen_rtx_SET (VOIDmode, operands[0], 1103 gen_rtx_PLUS (V2SFmode, operands[1], operands[2])); 1104 XVECEXP (par, 0, 1) = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, SPEFSCR_REGNO)); 1105 emit_insn (par); 1106 DONE; 1107 } 1108}") 1109 1110(define_expand "subv2sf3" 1111 [(set (match_operand:V2SF 0 "gpc_reg_operand" "") 1112 (minus:V2SF (match_operand:V2SF 1 "gpc_reg_operand" "") 1113 (match_operand:V2SF 2 "gpc_reg_operand" "")))] 1114 "TARGET_PAIRED_FLOAT || TARGET_SPE" 1115 " 1116{ 1117 if (TARGET_SPE) 1118 { 1119 /* We need to make a note that we clobber SPEFSCR. */ 1120 rtx par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (2)); 1121 1122 XVECEXP (par, 0, 0) = gen_rtx_SET (VOIDmode, operands[0], 1123 gen_rtx_MINUS (V2SFmode, operands[1], operands[2])); 1124 XVECEXP (par, 0, 1) = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, SPEFSCR_REGNO)); 1125 emit_insn (par); 1126 DONE; 1127 } 1128}") 1129 1130(define_expand "mulv2sf3" 1131 [(set (match_operand:V2SF 0 "gpc_reg_operand" "") 1132 (mult:V2SF (match_operand:V2SF 1 "gpc_reg_operand" "") 1133 (match_operand:V2SF 2 "gpc_reg_operand" "")))] 1134 "TARGET_PAIRED_FLOAT || TARGET_SPE" 1135 " 1136{ 1137 if (TARGET_SPE) 1138 { 1139 /* We need to make a note that we clobber SPEFSCR. */ 1140 rtx par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (2)); 1141 1142 XVECEXP (par, 0, 0) = gen_rtx_SET (VOIDmode, operands[0], 1143 gen_rtx_MULT (V2SFmode, operands[1], operands[2])); 1144 XVECEXP (par, 0, 1) = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, SPEFSCR_REGNO)); 1145 emit_insn (par); 1146 DONE; 1147 } 1148}") 1149 1150(define_expand "divv2sf3" 1151 [(set (match_operand:V2SF 0 "gpc_reg_operand" "") 1152 (div:V2SF (match_operand:V2SF 1 "gpc_reg_operand" "") 1153 (match_operand:V2SF 2 "gpc_reg_operand" "")))] 1154 "TARGET_PAIRED_FLOAT || TARGET_SPE" 1155 " 1156{ 1157 if (TARGET_SPE) 1158 { 1159 /* We need to make a note that we clobber SPEFSCR. */ 1160 rtx par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (2)); 1161 1162 XVECEXP (par, 0, 0) = gen_rtx_SET (VOIDmode, operands[0], 1163 gen_rtx_DIV (V2SFmode, operands[1], operands[2])); 1164 XVECEXP (par, 0, 1) = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, SPEFSCR_REGNO)); 1165 emit_insn (par); 1166 DONE; 1167 } 1168}") 1169