1;; Machine description the Motorola MCore 2;; Copyright (C) 1993-2020 Free Software Foundation, Inc. 3;; Contributed by Motorola. 4 5;; This file is part of GCC. 6 7;; GCC is free software; you can redistribute it and/or modify 8;; it under the terms of the GNU General Public License as published by 9;; the Free Software Foundation; either version 3, or (at your option) 10;; any later version. 11 12;; GCC is distributed in the hope that it will be useful, 13;; but WITHOUT ANY WARRANTY; without even the implied warranty of 14;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15;; GNU General Public License for more details. 16 17;; You should have received a copy of the GNU General Public License 18;; along with GCC; see the file COPYING3. If not see 19;; <http://www.gnu.org/licenses/>. 20 21;;- See file "rtl.def" for documentation on define_insn, match_*, et. al. 22 23 24 25;; ------------------------------------------------------------------------- 26;; Attributes 27;; ------------------------------------------------------------------------- 28 29; Target CPU. 30 31(define_attr "type" "brcond,branch,jmp,load,store,move,alu,shift" 32 (const_string "alu")) 33 34;; If a branch destination is within -2048..2047 bytes away from the 35;; instruction it can be 2 bytes long. All other conditional branches 36;; are 10 bytes long, and all other unconditional branches are 8 bytes. 37;; 38;; the assembler handles the long-branch span case for us if we use 39;; the "jb*" mnemonics for jumps/branches. This pushes the span 40;; calculations and the literal table placement into the assembler, 41;; where their interactions can be managed in a single place. 42 43;; All MCORE instructions are two bytes long. 44 45(define_attr "length" "" (const_int 2)) 46 47;; Scheduling. We only model a simple load latency. 48(define_insn_reservation "any_insn" 1 49 (eq_attr "type" "!load") 50 "nothing") 51(define_insn_reservation "memory" 2 52 (eq_attr "type" "load") 53 "nothing") 54 55(include "predicates.md") 56(include "constraints.md") 57 58;; ------------------------------------------------------------------------- 59;; Test and bit test 60;; ------------------------------------------------------------------------- 61 62(define_insn "" 63 [(set (reg:SI 17) 64 (sign_extract:SI (match_operand:SI 0 "mcore_arith_reg_operand" "r") 65 (const_int 1) 66 (match_operand:SI 1 "mcore_literal_K_operand" "K")))] 67 "" 68 "btsti %0,%1" 69 [(set_attr "type" "shift")]) 70 71(define_insn "" 72 [(set (reg:SI 17) 73 (zero_extract:SI (match_operand:SI 0 "mcore_arith_reg_operand" "r") 74 (const_int 1) 75 (match_operand:SI 1 "mcore_literal_K_operand" "K")))] 76 "" 77 "btsti %0,%1" 78 [(set_attr "type" "shift")]) 79 80;;; This is created by combine. 81(define_insn "" 82 [(set (reg:CC 17) 83 (ne:CC (zero_extract:SI (match_operand:SI 0 "mcore_arith_reg_operand" "r") 84 (const_int 1) 85 (match_operand:SI 1 "mcore_literal_K_operand" "K")) 86 (const_int 0)))] 87 "" 88 "btsti %0,%1" 89 [(set_attr "type" "shift")]) 90 91 92;; Created by combine from conditional patterns below (see sextb/btsti rx,31) 93 94(define_insn "" 95 [(set (reg:CC 17) 96 (ne:CC (lshiftrt:SI (match_operand:SI 0 "mcore_arith_reg_operand" "r") 97 (const_int 7)) 98 (const_int 0)))] 99 "GET_CODE(operands[0]) == SUBREG && 100 GET_MODE(SUBREG_REG(operands[0])) == QImode" 101 "btsti %0,7" 102 [(set_attr "type" "shift")]) 103 104(define_insn "" 105 [(set (reg:CC 17) 106 (ne:CC (lshiftrt:SI (match_operand:SI 0 "mcore_arith_reg_operand" "r") 107 (const_int 15)) 108 (const_int 0)))] 109 "GET_CODE(operands[0]) == SUBREG && 110 GET_MODE(SUBREG_REG(operands[0])) == HImode" 111 "btsti %0,15" 112 [(set_attr "type" "shift")]) 113 114(define_split 115 [(set (pc) 116 (if_then_else (ne (eq:CC (zero_extract:SI 117 (match_operand:SI 0 "mcore_arith_reg_operand" "") 118 (const_int 1) 119 (match_operand:SI 1 "mcore_literal_K_operand" "")) 120 (const_int 0)) 121 (const_int 0)) 122 (label_ref (match_operand 2 "" "")) 123 (pc)))] 124 "" 125 [(set (reg:CC 17) 126 (zero_extract:SI (match_dup 0) (const_int 1) (match_dup 1))) 127 (set (pc) (if_then_else (eq (reg:CC 17) (const_int 0)) 128 (label_ref (match_dup 2)) 129 (pc)))] 130 "") 131 132(define_split 133 [(set (pc) 134 (if_then_else (eq (ne:CC (zero_extract:SI 135 (match_operand:SI 0 "mcore_arith_reg_operand" "") 136 (const_int 1) 137 (match_operand:SI 1 "mcore_literal_K_operand" "")) 138 (const_int 0)) 139 (const_int 0)) 140 (label_ref (match_operand 2 "" "")) 141 (pc)))] 142 "" 143 [(set (reg:CC 17) 144 (zero_extract:SI (match_dup 0) (const_int 1) (match_dup 1))) 145 (set (pc) (if_then_else (eq (reg:CC 17) (const_int 0)) 146 (label_ref (match_dup 2)) 147 (pc)))] 148 "") 149 150;; XXX - disabled by nickc because it fails on libiberty/fnmatch.c 151;; 152;; ; Experimental - relax immediates for and, andn, or, and tst to allow 153;; ; any immediate value (or an immediate at all -- or, andn, & tst). 154;; ; This is done to allow bit field masks to fold together in combine. 155;; ; The reload phase will force the immediate into a register at the 156;; ; very end. This helps in some cases, but hurts in others: we'd 157;; ; really like to cse these immediates. However, there is a phase 158;; ; ordering problem here. cse picks up individual masks and cse's 159;; ; those, but not folded masks (cse happens before combine). It's 160;; ; not clear what the best solution is because we really want cse 161;; ; before combine (leaving the bit field masks alone). To pick up 162;; ; relaxed immediates use -mrelax-immediates. It might take some 163;; ; experimenting to see which does better (i.e. regular imms vs. 164;; ; arbitrary imms) for a particular code. BRC 165;; 166;; (define_insn "" 167;; [(set (reg:CC 17) 168;; (ne:CC (and:SI (match_operand:SI 0 "mcore_arith_reg_operand" "r") 169;; (match_operand:SI 1 "mcore_arith_any_imm_operand" "rI")) 170;; (const_int 0)))] 171;; "TARGET_RELAX_IMM" 172;; "tst %0,%1") 173;; 174;; (define_insn "" 175;; [(set (reg:CC 17) 176;; (ne:CC (and:SI (match_operand:SI 0 "mcore_arith_reg_operand" "r") 177;; (match_operand:SI 1 "mcore_arith_M_operand" "r")) 178;; (const_int 0)))] 179;; "!TARGET_RELAX_IMM" 180;; "tst %0,%1") 181 182(define_insn "" 183 [(set (reg:CC 17) 184 (ne:CC (and:SI (match_operand:SI 0 "mcore_arith_reg_operand" "r") 185 (match_operand:SI 1 "mcore_arith_M_operand" "r")) 186 (const_int 0)))] 187 "" 188 "tst %0,%1") 189 190 191(define_split 192 [(parallel[ 193 (set (reg:CC 17) 194 (ne:CC (ne:SI (leu:CC (match_operand:SI 0 "mcore_arith_reg_operand" "") 195 (match_operand:SI 1 "mcore_arith_reg_operand" "")) 196 (const_int 0)) 197 (const_int 0))) 198 (clobber (match_operand:CC 2 "mcore_arith_reg_operand" ""))])] 199 "" 200 [(set (reg:CC 17) (ne:SI (match_dup 0) (const_int 0))) 201 (set (reg:CC 17) (leu:CC (match_dup 0) (match_dup 1)))]) 202 203;; ------------------------------------------------------------------------- 204;; SImode signed integer comparisons 205;; ------------------------------------------------------------------------- 206 207(define_insn "decne_t" 208 [(set (reg:CC 17) (ne:CC (plus:SI (match_operand:SI 0 "mcore_arith_reg_operand" "+r") 209 (const_int -1)) 210 (const_int 0))) 211 (set (match_dup 0) 212 (plus:SI (match_dup 0) 213 (const_int -1)))] 214 "" 215 "decne %0") 216 217;; The combiner seems to prefer the following to the former. 218;; 219(define_insn "" 220 [(set (reg:CC 17) (ne:CC (match_operand:SI 0 "mcore_arith_reg_operand" "+r") 221 (const_int 1))) 222 (set (match_dup 0) 223 (plus:SI (match_dup 0) 224 (const_int -1)))] 225 "" 226 "decne %0") 227 228(define_insn "cmpnesi_t" 229 [(set (reg:CC 17) (ne:CC (match_operand:SI 0 "mcore_arith_reg_operand" "r") 230 (match_operand:SI 1 "mcore_arith_reg_operand" "r")))] 231 "" 232 "cmpne %0,%1") 233 234(define_insn "cmpneisi_t" 235 [(set (reg:CC 17) (ne:CC (match_operand:SI 0 "mcore_arith_reg_operand" "r") 236 (match_operand:SI 1 "mcore_arith_K_operand" "K")))] 237 "" 238 "cmpnei %0,%1") 239 240(define_insn "cmpgtsi_t" 241 [(set (reg:CC 17) (gt:CC (match_operand:SI 0 "mcore_arith_reg_operand" "r") 242 (match_operand:SI 1 "mcore_arith_reg_operand" "r")))] 243 "" 244 "cmplt %1,%0") 245 246(define_insn "" 247 [(set (reg:CC 17) (gt:CC (plus:SI 248 (match_operand:SI 0 "mcore_arith_reg_operand" "+r") 249 (const_int -1)) 250 (const_int 0))) 251 (set (match_dup 0) (plus:SI (match_dup 0) (const_int -1)))] 252 "" 253 "decgt %0") 254 255(define_insn "cmpltsi_t" 256 [(set (reg:CC 17) (lt:CC (match_operand:SI 0 "mcore_arith_reg_operand" "r") 257 (match_operand:SI 1 "mcore_arith_reg_operand" "r")))] 258 "" 259 "cmplt %0,%1") 260 261; cmplti is 1-32 262(define_insn "cmpltisi_t" 263 [(set (reg:CC 17) (lt:CC (match_operand:SI 0 "mcore_arith_reg_operand" "r") 264 (match_operand:SI 1 "mcore_arith_J_operand" "J")))] 265 "" 266 "cmplti %0,%1") 267 268; covers cmplti x,0 269(define_insn "" 270 [(set (reg:CC 17) (lt:CC (match_operand:SI 0 "mcore_arith_reg_operand" "r") 271 (const_int 0)))] 272 "" 273 "btsti %0,31") 274 275(define_insn "" 276 [(set (reg:CC 17) (lt:CC (plus:SI 277 (match_operand:SI 0 "mcore_arith_reg_operand" "+r") 278 (const_int -1)) 279 (const_int 0))) 280 (set (match_dup 0) (plus:SI (match_dup 0) (const_int -1)))] 281 "" 282 "declt %0") 283 284;; ------------------------------------------------------------------------- 285;; SImode unsigned integer comparisons 286;; ------------------------------------------------------------------------- 287 288(define_insn "cmpgeusi_t" 289 [(set (reg:CC 17) (geu:CC (match_operand:SI 0 "mcore_arith_reg_operand" "r") 290 (match_operand:SI 1 "mcore_arith_reg_operand" "r")))] 291 "" 292 "cmphs %0,%1") 293 294(define_insn "cmpgeusi_0" 295 [(set (reg:CC 17) (geu:CC (match_operand:SI 0 "mcore_arith_reg_operand" "r") 296 (const_int 0)))] 297 "" 298 "cmpnei %0, 0") 299 300(define_insn "cmpleusi_t" 301 [(set (reg:CC 17) (leu:CC (match_operand:SI 0 "mcore_arith_reg_operand" "r") 302 (match_operand:SI 1 "mcore_arith_reg_operand" "r")))] 303 "" 304 "cmphs %1,%0") 305 306;; ------------------------------------------------------------------------- 307;; Logical operations 308;; ------------------------------------------------------------------------- 309 310;; Logical AND clearing a single bit. andsi3 knows that we have this 311;; pattern and allows the constant literal pass through. 312;; 313 314;; RBE 2/97: don't need this pattern any longer... 315;; RBE: I don't think we need both "S" and exact_log2() clauses. 316;;(define_insn "" 317;; [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") 318;; (and:SI (match_operand:SI 1 "mcore_arith_reg_operand" "%0") 319;; (match_operand:SI 2 "const_int_operand" "S")))] 320;; "mcore_arith_S_operand (operands[2])" 321;; "bclri %0,%Q2") 322;; 323 324(define_insn "andnsi3" 325 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") 326 (and:SI (not:SI (match_operand:SI 1 "mcore_arith_reg_operand" "r")) 327 (match_operand:SI 2 "mcore_arith_reg_operand" "0")))] 328 "" 329 "andn %0,%1") 330 331(define_expand "andsi3" 332 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "") 333 (and:SI (match_operand:SI 1 "mcore_arith_reg_operand" "") 334 (match_operand:SI 2 "nonmemory_operand" "")))] 335 "" 336 " 337{ 338 if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) < 0 339 && ! mcore_arith_S_operand (operands[2])) 340 { 341 HOST_WIDE_INT not_value = ~ INTVAL (operands[2]); 342 343 if ( CONST_OK_FOR_I (not_value) 344 || CONST_OK_FOR_M (not_value) 345 || CONST_OK_FOR_N (not_value)) 346 { 347 operands[2] = copy_to_mode_reg (SImode, GEN_INT (not_value)); 348 emit_insn (gen_andnsi3 (operands[0], operands[2], operands[1])); 349 DONE; 350 } 351 } 352 353 if (! mcore_arith_K_S_operand (operands[2], SImode)) 354 operands[2] = copy_to_mode_reg (SImode, operands[2]); 355}") 356 357(define_insn "" 358 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r") 359 (and:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0,0,r,0") 360 (match_operand:SI 2 "mcore_arith_any_imm_operand" "r,K,0,S")))] 361 "TARGET_RELAX_IMM" 362 "* 363{ 364 switch (which_alternative) 365 { 366 case 0: return \"and %0,%2\"; 367 case 1: return \"andi %0,%2\"; 368 case 2: return \"and %0,%1\"; 369 /* case -1: return \"bclri %0,%Q2\"; will not happen */ 370 case 3: return mcore_output_bclri (operands[0], INTVAL (operands[2])); 371 default: gcc_unreachable (); 372 } 373}") 374 375;; This was the old "S" which was "!(2^n)" */ 376;; case -1: return \"bclri %0,%Q2\"; will not happen */ 377 378(define_insn "" 379 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r") 380 (and:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0,0,r,0") 381 (match_operand:SI 2 "mcore_arith_K_S_operand" "r,K,0,S")))] 382 "!TARGET_RELAX_IMM" 383 "* 384{ 385 switch (which_alternative) 386 { 387 case 0: return \"and %0,%2\"; 388 case 1: return \"andi %0,%2\"; 389 case 2: return \"and %0,%1\"; 390 case 3: return mcore_output_bclri (operands[0], INTVAL (operands[2])); 391 default: gcc_unreachable (); 392 } 393}") 394 395;(define_insn "iorsi3" 396; [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") 397; (ior:SI (match_operand:SI 1 "mcore_arith_reg_operand" "%0") 398; (match_operand:SI 2 "mcore_arith_reg_operand" "r")))] 399; "" 400; "or %0,%2") 401 402; need an expand to resolve ambiguity betw. the two iors below. 403(define_expand "iorsi3" 404 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "") 405 (ior:SI (match_operand:SI 1 "mcore_arith_reg_operand" "") 406 (match_operand:SI 2 "nonmemory_operand" "")))] 407 "" 408 " 409{ 410 if (! mcore_arith_M_operand (operands[2], SImode)) 411 operands[2] = copy_to_mode_reg (SImode, operands[2]); 412}") 413 414(define_insn "" 415 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r") 416 (ior:SI (match_operand:SI 1 "mcore_arith_reg_operand" "%0,0,0") 417 (match_operand:SI 2 "mcore_arith_any_imm_operand" "r,M,T")))] 418 "TARGET_RELAX_IMM" 419 "* 420{ 421 switch (which_alternative) 422 { 423 case 0: return \"or %0,%2\"; 424 case 1: return \"bseti %0,%P2\"; 425 case 2: return mcore_output_bseti (operands[0], INTVAL (operands[2])); 426 default: gcc_unreachable (); 427 } 428}") 429 430(define_insn "" 431 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r") 432 (ior:SI (match_operand:SI 1 "mcore_arith_reg_operand" "%0,0,0") 433 (match_operand:SI 2 "mcore_arith_M_operand" "r,M,T")))] 434 "!TARGET_RELAX_IMM" 435 "* 436{ 437 switch (which_alternative) 438 { 439 case 0: return \"or %0,%2\"; 440 case 1: return \"bseti %0,%P2\"; 441 case 2: return mcore_output_bseti (operands[0], INTVAL (operands[2])); 442 default: gcc_unreachable (); 443 } 444}") 445 446;(define_insn "" 447; [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") 448; (ior:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0") 449; (match_operand:SI 2 "const_int_operand" "M")))] 450; "exact_log2 (INTVAL (operands[2])) >= 0" 451; "bseti %0,%P2") 452 453;(define_insn "" 454; [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") 455; (ior:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0") 456; (match_operand:SI 2 "const_int_operand" "i")))] 457; "mcore_num_ones (INTVAL (operands[2])) < 3" 458; "* return mcore_output_bseti (operands[0], INTVAL (operands[2]));") 459 460(define_insn "xorsi3" 461 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") 462 (xor:SI (match_operand:SI 1 "mcore_arith_reg_operand" "%0") 463 (match_operand:SI 2 "mcore_arith_reg_operand" "r")))] 464 "" 465 "xor %0,%2") 466 467;; ------------------------------------------------------------------------- 468;; Shifts and rotates 469;; ------------------------------------------------------------------------- 470 471;; Only allow these if the shift count is a convenient constant. 472(define_expand "rotlsi3" 473 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "") 474 (rotate:SI (match_operand:SI 1 "mcore_arith_reg_operand" "") 475 (match_operand:SI 2 "nonmemory_operand" "")))] 476 "" 477 "if (! mcore_literal_K_operand (operands[2], SImode)) 478 FAIL; 479 ") 480 481;; We can only do constant rotates, which is what this pattern provides. 482;; The combiner will put it together for us when we do: 483;; (x << N) | (x >> (32 - N)) 484(define_insn "" 485 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") 486 (rotate:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0") 487 (match_operand:SI 2 "mcore_literal_K_operand" "K")))] 488 "" 489 "rotli %0,%2" 490 [(set_attr "type" "shift")]) 491 492(define_insn "ashlsi3" 493 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r") 494 (ashift:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0,0") 495 (match_operand:SI 2 "mcore_arith_K_operand_not_0" "r,K")))] 496 "" 497 "@ 498 lsl %0,%2 499 lsli %0,%2" 500 [(set_attr "type" "shift")]) 501 502(define_insn "" 503 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") 504 (ashift:SI (const_int 1) 505 (match_operand:SI 1 "mcore_arith_reg_operand" "r")))] 506 "" 507 "bgenr %0,%1" 508 [(set_attr "type" "shift")]) 509 510(define_insn "ashrsi3" 511 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r") 512 (ashiftrt:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0,0") 513 (match_operand:SI 2 "mcore_arith_K_operand_not_0" "r,K")))] 514 "" 515 "@ 516 asr %0,%2 517 asri %0,%2" 518 [(set_attr "type" "shift")]) 519 520(define_insn "lshrsi3" 521 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r") 522 (lshiftrt:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0,0") 523 (match_operand:SI 2 "mcore_arith_K_operand_not_0" "r,K")))] 524 "" 525 "@ 526 lsr %0,%2 527 lsri %0,%2" 528 [(set_attr "type" "shift")]) 529 530;(define_expand "ashldi3" 531; [(parallel[(set (match_operand:DI 0 "mcore_arith_reg_operand" "") 532; (ashift:DI (match_operand:DI 1 "mcore_arith_reg_operand" "") 533; (match_operand:DI 2 "immediate_operand" ""))) 534; 535; (clobber (reg:CC 17))])] 536; 537; "" 538; " 539;{ 540; if (GET_CODE (operands[2]) != CONST_INT 541; || INTVAL (operands[2]) != 1) 542; FAIL; 543;}") 544; 545;(define_insn "" 546; [(set (match_operand:DI 0 "mcore_arith_reg_operand" "=r") 547; (ashift:DI (match_operand:DI 1 "mcore_arith_reg_operand" "0") 548; (const_int 1))) 549; (clobber (reg:CC 17))] 550; "" 551; "lsli %R0,0\;rotli %0,0" 552; [(set_attr "length" "4") (set_attr "type" "shift")]) 553 554;; ------------------------------------------------------------------------- 555;; Index instructions 556;; ------------------------------------------------------------------------- 557;; The second of each set of patterns is borrowed from the alpha.md file. 558;; These variants of the above insns can occur if the second operand 559;; is the frame pointer. This is a kludge, but there doesn't 560;; seem to be a way around it. Only recognize them while reloading. 561 562;; We must use reload_operand for some operands in case frame pointer 563;; elimination put a MEM with invalid address there. Otherwise, 564;; the result of the substitution will not match this pattern, and reload 565;; will not be able to correctly fix the result. 566 567;; indexing longlongs or doubles (8 bytes) 568 569(define_insn "indexdi_t" 570 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") 571 (plus:SI (mult:SI (match_operand:SI 1 "mcore_arith_reg_operand" "r") 572 (const_int 8)) 573 (match_operand:SI 2 "mcore_arith_reg_operand" "0")))] 574 "" 575 "* 576 if (! mcore_is_same_reg (operands[1], operands[2])) 577 { 578 output_asm_insn (\"ixw\\t%0,%1\", operands); 579 output_asm_insn (\"ixw\\t%0,%1\", operands); 580 } 581 else 582 { 583 output_asm_insn (\"ixh\\t%0,%1\", operands); 584 output_asm_insn (\"ixh\\t%0,%1\", operands); 585 } 586 return \"\"; 587 " 588;; if operands[1] == operands[2], the first option above is wrong! -- dac 589;; was this... -- dac 590;; ixw %0,%1\;ixw %0,%1" 591 592 [(set_attr "length" "4")]) 593 594(define_insn "" 595 [(set (match_operand:SI 0 "mcore_reload_operand" "=r,r,r") 596 (plus:SI (plus:SI (mult:SI (match_operand:SI 1 "mcore_reload_operand" "r,r,r") 597 (const_int 8)) 598 (match_operand:SI 2 "mcore_arith_reg_operand" "0,0,0")) 599 (match_operand:SI 3 "mcore_addsub_operand" "r,J,L")))] 600 "reload_in_progress" 601 "@ 602 ixw %0,%1\;ixw %0,%1\;addu %0,%3 603 ixw %0,%1\;ixw %0,%1\;addi %0,%3 604 ixw %0,%1\;ixw %0,%1\;subi %0,%M3" 605 [(set_attr "length" "6")]) 606 607;; indexing longs (4 bytes) 608 609(define_insn "indexsi_t" 610 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") 611 (plus:SI (mult:SI (match_operand:SI 1 "mcore_arith_reg_operand" "r") 612 (const_int 4)) 613 (match_operand:SI 2 "mcore_arith_reg_operand" "0")))] 614 "" 615 "ixw %0,%1") 616 617(define_insn "" 618 [(set (match_operand:SI 0 "mcore_reload_operand" "=r,r,r") 619 (plus:SI (plus:SI (mult:SI (match_operand:SI 1 "mcore_reload_operand" "r,r,r") 620 (const_int 4)) 621 (match_operand:SI 2 "mcore_arith_reg_operand" "0,0,0")) 622 (match_operand:SI 3 "mcore_addsub_operand" "r,J,L")))] 623 "reload_in_progress" 624 "@ 625 ixw %0,%1\;addu %0,%3 626 ixw %0,%1\;addi %0,%3 627 ixw %0,%1\;subi %0,%M3" 628 [(set_attr "length" "4")]) 629 630;; indexing shorts (2 bytes) 631 632(define_insn "indexhi_t" 633 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") 634 (plus:SI (mult:SI (match_operand:SI 1 "mcore_arith_reg_operand" "r") 635 (const_int 2)) 636 (match_operand:SI 2 "mcore_arith_reg_operand" "0")))] 637 "" 638 "ixh %0,%1") 639 640(define_insn "" 641 [(set (match_operand:SI 0 "mcore_reload_operand" "=r,r,r") 642 (plus:SI (plus:SI (mult:SI (match_operand:SI 1 "mcore_reload_operand" "r,r,r") 643 (const_int 2)) 644 (match_operand:SI 2 "mcore_arith_reg_operand" "0,0,0")) 645 (match_operand:SI 3 "mcore_addsub_operand" "r,J,L")))] 646 "reload_in_progress" 647 "@ 648 ixh %0,%1\;addu %0,%3 649 ixh %0,%1\;addi %0,%3 650 ixh %0,%1\;subi %0,%M3" 651 [(set_attr "length" "4")]) 652 653;; 654;; Other sizes may be handy for indexing. 655;; the tradeoffs to consider when adding these are 656;; code size, execution time [vs. mul it is easy to win], 657;; and register pressure -- these patterns don't use an extra 658;; register to build the offset from the base 659;; and whether the compiler will not come up with some other idiom. 660;; 661 662;; ------------------------------------------------------------------------- 663;; Addition, Subtraction instructions 664;; ------------------------------------------------------------------------- 665 666(define_expand "addsi3" 667 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "") 668 (plus:SI (match_operand:SI 1 "mcore_arith_reg_operand" "") 669 (match_operand:SI 2 "nonmemory_operand" "")))] 670 "" 671 " 672{ 673 /* If this is an add to the frame pointer, then accept it as is so 674 that we can later fold in the fp/sp offset from frame pointer 675 elimination. */ 676 if (flag_omit_frame_pointer 677 && GET_CODE (operands[1]) == REG 678 && (REGNO (operands[1]) == VIRTUAL_STACK_VARS_REGNUM 679 || REGNO (operands[1]) == FRAME_POINTER_REGNUM)) 680 { 681 emit_insn (gen_addsi3_fp (operands[0], operands[1], operands[2])); 682 DONE; 683 } 684 685 /* Convert adds to subtracts if this makes loading the constant cheaper. 686 But only if we are allowed to generate new pseudos. */ 687 if (! (reload_in_progress || reload_completed) 688 && GET_CODE (operands[2]) == CONST_INT 689 && INTVAL (operands[2]) < -32) 690 { 691 HOST_WIDE_INT neg_value = - INTVAL (operands[2]); 692 693 if ( CONST_OK_FOR_I (neg_value) 694 || CONST_OK_FOR_M (neg_value) 695 || CONST_OK_FOR_N (neg_value)) 696 { 697 operands[2] = copy_to_mode_reg (SImode, GEN_INT (neg_value)); 698 emit_insn (gen_subsi3 (operands[0], operands[1], operands[2])); 699 DONE; 700 } 701 } 702 703 if (! mcore_addsub_operand (operands[2], SImode)) 704 operands[2] = copy_to_mode_reg (SImode, operands[2]); 705}") 706 707;; RBE: for some constants which are not in the range which allows 708;; us to do a single operation, we will try a paired addi/addi instead 709;; of a movi/addi. This relieves some register pressure at the expense 710;; of giving away some potential constant reuse. 711;; 712;; RBE 6/17/97: this didn't buy us anything, but I keep the pattern 713;; for later reference 714;; 715;; (define_insn "addsi3_i2" 716;; [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") 717;; (plus:SI (match_operand:SI 1 "mcore_arith_reg_operand" "%0") 718;; (match_operand:SI 2 "const_int_operand" "g")))] 719;; "GET_CODE(operands[2]) == CONST_INT 720;; && ((INTVAL (operands[2]) > 32 && INTVAL(operands[2]) <= 64) 721;; || (INTVAL (operands[2]) < -32 && INTVAL(operands[2]) >= -64))" 722;; "* 723;; { 724;; HOST_WIDE_INT n = INTVAL(operands[2]); 725;; if (n > 0) 726;; { 727;; operands[2] = GEN_INT(n - 32); 728;; return \"addi\\t%0,32\;addi\\t%0,%2\"; 729;; } 730;; else 731;; { 732;; n = (-n); 733;; operands[2] = GEN_INT(n - 32); 734;; return \"subi\\t%0,32\;subi\\t%0,%2\"; 735;; } 736;; }" 737;; [(set_attr "length" "4")]) 738 739(define_insn "addsi3_i" 740 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r") 741 (plus:SI (match_operand:SI 1 "mcore_arith_reg_operand" "%0,0,0") 742 (match_operand:SI 2 "mcore_addsub_operand" "r,J,L")))] 743 "" 744 "@ 745 addu %0,%2 746 addi %0,%2 747 subi %0,%M2") 748 749;; This exists so that address computations based on the frame pointer 750;; can be folded in when frame pointer elimination occurs. Ordinarily 751;; this would be bad because it allows insns which would require reloading, 752;; but without it, we get multiple adds where one would do. 753 754(define_insn "addsi3_fp" 755 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r") 756 (plus:SI (match_operand:SI 1 "mcore_arith_reg_operand" "%0,0,0") 757 (match_operand:SI 2 "immediate_operand" "r,J,L")))] 758 "flag_omit_frame_pointer 759 && (reload_in_progress || reload_completed || REGNO (operands[1]) == FRAME_POINTER_REGNUM)" 760 "@ 761 addu %0,%2 762 addi %0,%2 763 subi %0,%M2") 764 765;; RBE: for some constants which are not in the range which allows 766;; us to do a single operation, we will try a paired addi/addi instead 767;; of a movi/addi. This relieves some register pressure at the expense 768;; of giving away some potential constant reuse. 769;; 770;; RBE 6/17/97: this didn't buy us anything, but I keep the pattern 771;; for later reference 772;; 773;; (define_insn "subsi3_i2" 774;; [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") 775;; (plus:SI (match_operand:SI 1 "mcore_arith_reg_operand" "%0") 776;; (match_operand:SI 2 "const_int_operand" "g")))] 777;; "TARGET_RBETEST && GET_CODE(operands[2]) == CONST_INT 778;; && ((INTVAL (operands[2]) > 32 && INTVAL(operands[2]) <= 64) 779;; || (INTVAL (operands[2]) < -32 && INTVAL(operands[2]) >= -64))" 780;; "* 781;; { 782;; HOST_WIDE_INT n = INTVAL(operands[2]); 783;; if ( n > 0) 784;; { 785;; operands[2] = GEN_INT( n - 32); 786;; return \"subi\\t%0,32\;subi\\t%0,%2\"; 787;; } 788;; else 789;; { 790;; n = (-n); 791;; operands[2] = GEN_INT(n - 32); 792;; return \"addi\\t%0,32\;addi\\t%0,%2\"; 793;; } 794;; }" 795;; [(set_attr "length" "4")]) 796 797;(define_insn "subsi3" 798; [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r") 799; (minus:SI (match_operand:SI 1 "mcore_arith_K_operand" "0,0,r,K") 800; (match_operand:SI 2 "mcore_arith_J_operand" "r,J,0,0")))] 801; "" 802; "@ 803; sub %0,%2 804; subi %0,%2 805; rsub %0,%1 806; rsubi %0,%1") 807 808(define_insn "subsi3" 809 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r") 810 (minus:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0,0,r") 811 (match_operand:SI 2 "mcore_arith_J_operand" "r,J,0")))] 812 "" 813 "@ 814 subu %0,%2 815 subi %0,%2 816 rsub %0,%1") 817 818(define_insn "" 819 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") 820 (minus:SI (match_operand:SI 1 "mcore_literal_K_operand" "K") 821 (match_operand:SI 2 "mcore_arith_reg_operand" "0")))] 822 "" 823 "rsubi %0,%1") 824 825(define_insn "adddi3" 826 [(set (match_operand:DI 0 "mcore_arith_reg_operand" "=&r") 827 (plus:DI (match_operand:DI 1 "mcore_arith_reg_operand" "%0") 828 (match_operand:DI 2 "mcore_arith_reg_operand" "r"))) 829 (clobber (reg:CC 17))] 830 "" 831 "* 832 { 833 if (TARGET_LITTLE_END) 834 return \"cmplt %0,%0\;addc %0,%2\;addc %R0,%R2\"; 835 return \"cmplt %R0,%R0\;addc %R0,%R2\;addc %0,%2\"; 836 }" 837 [(set_attr "length" "6")]) 838 839;; special case for "longlong += 1" 840(define_insn "" 841 [(set (match_operand:DI 0 "mcore_arith_reg_operand" "=&r") 842 (plus:DI (match_operand:DI 1 "mcore_arith_reg_operand" "0") 843 (const_int 1))) 844 (clobber (reg:CC 17))] 845 "" 846 "* 847 { 848 if (TARGET_LITTLE_END) 849 return \"addi %0,1\;cmpnei %0,0\;incf %R0\"; 850 return \"addi %R0,1\;cmpnei %R0,0\;incf %0\"; 851 }" 852 [(set_attr "length" "6")]) 853 854;; special case for "longlong -= 1" 855(define_insn "" 856 [(set (match_operand:DI 0 "mcore_arith_reg_operand" "=&r") 857 (plus:DI (match_operand:DI 1 "mcore_arith_reg_operand" "0") 858 (const_int -1))) 859 (clobber (reg:CC 17))] 860 "" 861 "* 862 { 863 if (TARGET_LITTLE_END) 864 return \"cmpnei %0,0\;decf %R0\;subi %0,1\"; 865 return \"cmpnei %R0,0\;decf %0\;subi %R0,1\"; 866 }" 867 [(set_attr "length" "6")]) 868 869;; special case for "longlong += const_int" 870;; we have to use a register for the const_int because we don't 871;; have an unsigned compare immediate... only +/- 1 get to 872;; play the no-extra register game because they compare with 0. 873;; This winds up working out for any literal that is synthesized 874;; with a single instruction. The more complicated ones look 875;; like the get broken into subreg's to get initialized too soon 876;; for us to catch here. -- RBE 4/25/96 877;; only allow for-sure positive values. 878 879(define_insn "" 880 [(set (match_operand:DI 0 "mcore_arith_reg_operand" "=&r") 881 (plus:DI (match_operand:DI 1 "mcore_arith_reg_operand" "0") 882 (match_operand:SI 2 "const_int_operand" "r"))) 883 (clobber (reg:CC 17))] 884 "GET_CODE (operands[2]) == CONST_INT 885 && INTVAL (operands[2]) > 0 && ! (INTVAL (operands[2]) & 0x80000000)" 886 "* 887{ 888 gcc_assert (GET_MODE (operands[2]) == SImode); 889 if (TARGET_LITTLE_END) 890 return \"addu %0,%2\;cmphs %0,%2\;incf %R0\"; 891 return \"addu %R0,%2\;cmphs %R0,%2\;incf %0\"; 892}" 893 [(set_attr "length" "6")]) 894 895;; optimize "long long" + "unsigned long" 896;; won't trigger because of how the extension is expanded upstream. 897;; (define_insn "" 898;; [(set (match_operand:DI 0 "mcore_arith_reg_operand" "=&r") 899;; (plus:DI (match_operand:DI 1 "mcore_arith_reg_operand" "%0") 900;; (zero_extend:DI (match_operand:SI 2 "mcore_arith_reg_operand" "r")))) 901;; (clobber (reg:CC 17))] 902;; "0" 903;; "cmplt %R0,%R0\;addc %R0,%2\;inct %0" 904;; [(set_attr "length" "6")]) 905 906;; optimize "long long" + "signed long" 907;; won't trigger because of how the extension is expanded upstream. 908;; (define_insn "" 909;; [(set (match_operand:DI 0 "mcore_arith_reg_operand" "=&r") 910;; (plus:DI (match_operand:DI 1 "mcore_arith_reg_operand" "%0") 911;; (sign_extend:DI (match_operand:SI 2 "mcore_arith_reg_operand" "r")))) 912;; (clobber (reg:CC 17))] 913;; "0" 914;; "cmplt %R0,%R0\;addc %R0,%2\;inct %0\;btsti %2,31\;dect %0" 915;; [(set_attr "length" "6")]) 916 917(define_insn "subdi3" 918 [(set (match_operand:DI 0 "mcore_arith_reg_operand" "=&r") 919 (minus:DI (match_operand:DI 1 "mcore_arith_reg_operand" "0") 920 (match_operand:DI 2 "mcore_arith_reg_operand" "r"))) 921 (clobber (reg:CC 17))] 922 "" 923 "* 924 { 925 if (TARGET_LITTLE_END) 926 return \"cmphs %0,%0\;subc %0,%2\;subc %R0,%R2\"; 927 return \"cmphs %R0,%R0\;subc %R0,%R2\;subc %0,%2\"; 928 }" 929 [(set_attr "length" "6")]) 930 931;; ------------------------------------------------------------------------- 932;; Multiplication instructions 933;; ------------------------------------------------------------------------- 934 935(define_insn "mulsi3" 936 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") 937 (mult:SI (match_operand:SI 1 "mcore_arith_reg_operand" "%0") 938 (match_operand:SI 2 "mcore_arith_reg_operand" "r")))] 939 "" 940 "mult %0,%2") 941 942;; 943;; 32/32 signed division -- added to the MCORE instruction set spring 1997 944;; 945;; Different constraints based on the architecture revision... 946;; 947(define_expand "divsi3" 948 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "") 949 (div:SI (match_operand:SI 1 "mcore_arith_reg_operand" "") 950 (match_operand:SI 2 "mcore_arith_reg_operand" "")))] 951 "TARGET_DIV" 952 "") 953 954;; MCORE Revision 1.50: restricts the divisor to be in r1. (6/97) 955;; 956(define_insn "" 957 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") 958 (div:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0") 959 (match_operand:SI 2 "mcore_arith_reg_operand" "b")))] 960 "TARGET_DIV" 961 "divs %0,%2") 962 963;; 964;; 32/32 signed division -- added to the MCORE instruction set spring 1997 965;; 966;; Different constraints based on the architecture revision... 967;; 968(define_expand "udivsi3" 969 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "") 970 (udiv:SI (match_operand:SI 1 "mcore_arith_reg_operand" "") 971 (match_operand:SI 2 "mcore_arith_reg_operand" "")))] 972 "TARGET_DIV" 973 "") 974 975;; MCORE Revision 1.50: restricts the divisor to be in r1. (6/97) 976(define_insn "" 977 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") 978 (udiv:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0") 979 (match_operand:SI 2 "mcore_arith_reg_operand" "b")))] 980 "TARGET_DIV" 981 "divu %0,%2") 982 983;; ------------------------------------------------------------------------- 984;; Unary arithmetic 985;; ------------------------------------------------------------------------- 986 987(define_insn "negsi2" 988 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") 989 (neg:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0")))] 990 "" 991 "* 992{ 993 return \"rsubi %0,0\"; 994}") 995 996 997(define_insn "abssi2" 998 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") 999 (abs:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0")))] 1000 "" 1001 "abs %0") 1002 1003(define_insn "negdi2" 1004 [(set (match_operand:DI 0 "mcore_arith_reg_operand" "=&r") 1005 (neg:DI (match_operand:DI 1 "mcore_arith_reg_operand" "0"))) 1006 (clobber (reg:CC 17))] 1007 "" 1008 "* 1009{ 1010 if (TARGET_LITTLE_END) 1011 return \"cmpnei %0,0\\n\\trsubi %0,0\\n\\tnot %R0\\n\\tincf %R0\"; 1012 return \"cmpnei %R0,0\\n\\trsubi %R0,0\\n\\tnot %0\\n\\tincf %0\"; 1013}" 1014 [(set_attr "length" "8")]) 1015 1016(define_insn "one_cmplsi2" 1017 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") 1018 (not:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0")))] 1019 "" 1020 "not %0") 1021 1022;; ------------------------------------------------------------------------- 1023;; Zero extension instructions 1024;; ------------------------------------------------------------------------- 1025 1026(define_expand "zero_extendhisi2" 1027 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "") 1028 (zero_extend:SI (match_operand:HI 1 "mcore_arith_reg_operand" "")))] 1029 "" 1030 "") 1031 1032(define_insn "" 1033 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r") 1034 (zero_extend:SI (match_operand:HI 1 "general_operand" "0,m")))] 1035 "" 1036 "@ 1037 zexth %0 1038 ld.h %0,%1" 1039 [(set_attr "type" "shift,load")]) 1040 1041;; ldh gives us a free zero-extension. The combiner picks up on this. 1042(define_insn "" 1043 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") 1044 (zero_extend:SI (mem:HI (match_operand:SI 1 "mcore_arith_reg_operand" "r"))))] 1045 "" 1046 "ld.h %0,(%1)" 1047 [(set_attr "type" "load")]) 1048 1049(define_insn "" 1050 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") 1051 (zero_extend:SI (mem:HI (plus:SI (match_operand:SI 1 "mcore_arith_reg_operand" "r") 1052 (match_operand:SI 2 "const_int_operand" "")))))] 1053 "(INTVAL (operands[2]) >= 0) && 1054 (INTVAL (operands[2]) < 32) && 1055 ((INTVAL (operands[2])&1) == 0)" 1056 "ld.h %0,(%1,%2)" 1057 [(set_attr "type" "load")]) 1058 1059(define_expand "zero_extendqisi2" 1060 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "") 1061 (zero_extend:SI (match_operand:QI 1 "general_operand" "")))] 1062 "" 1063 "") 1064 1065;; RBE: XXX: we don't recognize that the xtrb3 kills the CC register. 1066(define_insn "" 1067 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,b,r") 1068 (zero_extend:SI (match_operand:QI 1 "general_operand" "0,r,m")))] 1069 "" 1070 "@ 1071 zextb %0 1072 xtrb3 %0,%1 1073 ld.b %0,%1" 1074 [(set_attr "type" "shift,shift,load")]) 1075 1076;; ldb gives us a free zero-extension. The combiner picks up on this. 1077(define_insn "" 1078 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") 1079 (zero_extend:SI (mem:QI (match_operand:SI 1 "mcore_arith_reg_operand" "r"))))] 1080 "" 1081 "ld.b %0,(%1)" 1082 [(set_attr "type" "load")]) 1083 1084(define_insn "" 1085 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") 1086 (zero_extend:SI (mem:QI (plus:SI (match_operand:SI 1 "mcore_arith_reg_operand" "r") 1087 (match_operand:SI 2 "const_int_operand" "")))))] 1088 "(INTVAL (operands[2]) >= 0) && 1089 (INTVAL (operands[2]) < 16)" 1090 "ld.b %0,(%1,%2)" 1091 [(set_attr "type" "load")]) 1092 1093(define_expand "zero_extendqihi2" 1094 [(set (match_operand:HI 0 "mcore_arith_reg_operand" "") 1095 (zero_extend:HI (match_operand:QI 1 "general_operand" "")))] 1096 "" 1097 "") 1098 1099;; RBE: XXX: we don't recognize that the xtrb3 kills the CC register. 1100(define_insn "" 1101 [(set (match_operand:HI 0 "mcore_arith_reg_operand" "=r,b,r") 1102 (zero_extend:HI (match_operand:QI 1 "general_operand" "0,r,m")))] 1103 "" 1104 "@ 1105 zextb %0 1106 xtrb3 %0,%1 1107 ld.b %0,%1" 1108 [(set_attr "type" "shift,shift,load")]) 1109 1110;; ldb gives us a free zero-extension. The combiner picks up on this. 1111;; this doesn't catch references that are into a structure. 1112;; note that normally the compiler uses the above insn, unless it turns 1113;; out that we're dealing with a volatile... 1114(define_insn "" 1115 [(set (match_operand:HI 0 "mcore_arith_reg_operand" "=r") 1116 (zero_extend:HI (mem:QI (match_operand:SI 1 "mcore_arith_reg_operand" "r"))))] 1117 "" 1118 "ld.b %0,(%1)" 1119 [(set_attr "type" "load")]) 1120 1121(define_insn "" 1122 [(set (match_operand:HI 0 "mcore_arith_reg_operand" "=r") 1123 (zero_extend:HI (mem:QI (plus:SI (match_operand:SI 1 "mcore_arith_reg_operand" "r") 1124 (match_operand:SI 2 "const_int_operand" "")))))] 1125 "(INTVAL (operands[2]) >= 0) && 1126 (INTVAL (operands[2]) < 16)" 1127 "ld.b %0,(%1,%2)" 1128 [(set_attr "type" "load")]) 1129 1130 1131;; ------------------------------------------------------------------------- 1132;; Sign extension instructions 1133;; ------------------------------------------------------------------------- 1134 1135(define_expand "extendsidi2" 1136 [(set (match_operand:DI 0 "mcore_arith_reg_operand" "=r") 1137 (match_operand:SI 1 "mcore_arith_reg_operand" "r"))] 1138 "" 1139 " 1140 { 1141 int low, high; 1142 1143 if (TARGET_LITTLE_END) 1144 low = 0, high = 4; 1145 else 1146 low = 4, high = 0; 1147 1148 emit_insn (gen_rtx_SET (gen_rtx_SUBREG (SImode, operands[0], low), 1149 operands[1])); 1150 emit_insn (gen_rtx_SET (gen_rtx_SUBREG (SImode, operands[0], high), 1151 gen_rtx_ASHIFTRT (SImode, 1152 gen_rtx_SUBREG (SImode, operands[0], low), 1153 GEN_INT (31)))); 1154 DONE; 1155 }" 1156) 1157 1158(define_insn "extendhisi2" 1159 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") 1160 (sign_extend:SI (match_operand:HI 1 "mcore_arith_reg_operand" "0")))] 1161 "" 1162 "sexth %0") 1163 1164(define_insn "extendqisi2" 1165 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") 1166 (sign_extend:SI (match_operand:QI 1 "mcore_arith_reg_operand" "0")))] 1167 "" 1168 "sextb %0") 1169 1170(define_insn "extendqihi2" 1171 [(set (match_operand:HI 0 "mcore_arith_reg_operand" "=r") 1172 (sign_extend:HI (match_operand:QI 1 "mcore_arith_reg_operand" "0")))] 1173 "" 1174 "sextb %0") 1175 1176;; ------------------------------------------------------------------------- 1177;; Move instructions 1178;; ------------------------------------------------------------------------- 1179 1180;; SImode 1181 1182(define_expand "movsi" 1183 [(set (match_operand:SI 0 "general_operand" "") 1184 (match_operand:SI 1 "general_operand" ""))] 1185 "" 1186 " 1187{ 1188 if (GET_CODE (operands[0]) == MEM) 1189 operands[1] = force_reg (SImode, operands[1]); 1190}") 1191 1192(define_insn "" 1193 [(set (match_operand:SI 0 "mcore_general_movdst_operand" "=r,r,a,r,a,r,m") 1194 (match_operand:SI 1 "mcore_general_movsrc_operand" "r,P,i,c,R,m,r"))] 1195 "(register_operand (operands[0], SImode) 1196 || register_operand (operands[1], SImode))" 1197 "* return mcore_output_move (insn, operands, SImode);" 1198 [(set_attr "type" "move,move,move,move,load,load,store")]) 1199 1200;; 1201;; HImode 1202;; 1203 1204(define_expand "movhi" 1205 [(set (match_operand:HI 0 "general_operand" "") 1206 (match_operand:HI 1 "general_operand" ""))] 1207 "" 1208 " 1209{ 1210 if (GET_CODE (operands[0]) == MEM) 1211 operands[1] = force_reg (HImode, operands[1]); 1212 else if (CONSTANT_P (operands[1]) 1213 && (GET_CODE (operands[1]) != CONST_INT 1214 || (! CONST_OK_FOR_I (INTVAL (operands[1])) 1215 && ! CONST_OK_FOR_M (INTVAL (operands[1])) 1216 && ! CONST_OK_FOR_N (INTVAL (operands[1])))) 1217 && ! reload_completed && ! reload_in_progress) 1218 { 1219 rtx reg = gen_reg_rtx (SImode); 1220 emit_insn (gen_movsi (reg, operands[1])); 1221 operands[1] = gen_lowpart (HImode, reg); 1222 } 1223}") 1224 1225(define_insn "" 1226 [(set (match_operand:HI 0 "mcore_general_movdst_operand" "=r,r,a,r,r,m") 1227 (match_operand:HI 1 "mcore_general_movsrc_operand" "r,P,i,c,m,r"))] 1228 "(register_operand (operands[0], HImode) 1229 || register_operand (operands[1], HImode))" 1230 "* return mcore_output_move (insn, operands, HImode);" 1231 [(set_attr "type" "move,move,move,move,load,store")]) 1232 1233;; 1234;; QImode 1235;; 1236 1237(define_expand "movqi" 1238 [(set (match_operand:QI 0 "general_operand" "") 1239 (match_operand:QI 1 "general_operand" ""))] 1240 "" 1241 " 1242{ 1243 if (GET_CODE (operands[0]) == MEM) 1244 operands[1] = force_reg (QImode, operands[1]); 1245 else if (CONSTANT_P (operands[1]) 1246 && (GET_CODE (operands[1]) != CONST_INT 1247 || (! CONST_OK_FOR_I (INTVAL (operands[1])) 1248 && ! CONST_OK_FOR_M (INTVAL (operands[1])) 1249 && ! CONST_OK_FOR_N (INTVAL (operands[1])))) 1250 && ! reload_completed && ! reload_in_progress) 1251 { 1252 rtx reg = gen_reg_rtx (SImode); 1253 emit_insn (gen_movsi (reg, operands[1])); 1254 operands[1] = gen_lowpart (QImode, reg); 1255 } 1256}") 1257 1258(define_insn "" 1259 [(set (match_operand:QI 0 "mcore_general_movdst_operand" "=r,r,a,r,r,m") 1260 (match_operand:QI 1 "mcore_general_movsrc_operand" "r,P,i,c,m,r"))] 1261 "(register_operand (operands[0], QImode) 1262 || register_operand (operands[1], QImode))" 1263 "* return mcore_output_move (insn, operands, QImode);" 1264 [(set_attr "type" "move,move,move,move,load,store")]) 1265 1266 1267;; DImode 1268 1269(define_expand "movdi" 1270 [(set (match_operand:DI 0 "general_operand" "") 1271 (match_operand:DI 1 "general_operand" ""))] 1272 "" 1273 " 1274{ 1275 if (GET_CODE (operands[0]) == MEM) 1276 operands[1] = force_reg (DImode, operands[1]); 1277 else if (GET_CODE (operands[1]) == CONST_INT 1278 && ! CONST_OK_FOR_I (INTVAL (operands[1])) 1279 && ! CONST_OK_FOR_M (INTVAL (operands[1])) 1280 && ! CONST_OK_FOR_N (INTVAL (operands[1]))) 1281 { 1282 int i; 1283 for (i = 0; i < UNITS_PER_WORD * 2; i += UNITS_PER_WORD) 1284 emit_move_insn (simplify_gen_subreg (SImode, operands[0], DImode, i), 1285 simplify_gen_subreg (SImode, operands[1], DImode, i)); 1286 DONE; 1287 } 1288}") 1289 1290(define_insn "movdi_i" 1291 [(set (match_operand:DI 0 "nonimmediate_operand" "=r,r,r,r,a,r,m") 1292 (match_operand:DI 1 "mcore_general_movsrc_operand" "I,M,N,r,R,m,r"))] 1293 "" 1294 "* return mcore_output_movedouble (operands, DImode);" 1295 [(set_attr "length" "4") (set_attr "type" "move,move,move,move,load,load,store")]) 1296 1297;; SFmode 1298 1299(define_expand "movsf" 1300 [(set (match_operand:SF 0 "general_operand" "") 1301 (match_operand:SF 1 "general_operand" ""))] 1302 "" 1303 " 1304{ 1305 if (GET_CODE (operands[0]) == MEM) 1306 operands[1] = force_reg (SFmode, operands[1]); 1307}") 1308 1309(define_insn "movsf_i" 1310 [(set (match_operand:SF 0 "nonimmediate_operand" "=r,r,m") 1311 (match_operand:SF 1 "general_operand" "r,m,r"))] 1312 "" 1313 "@ 1314 mov %0,%1 1315 ld.w %0,%1 1316 st.w %1,%0" 1317 [(set_attr "type" "move,load,store")]) 1318 1319;; DFmode 1320 1321(define_expand "movdf" 1322 [(set (match_operand:DF 0 "general_operand" "") 1323 (match_operand:DF 1 "general_operand" ""))] 1324 "" 1325 " 1326{ 1327 if (GET_CODE (operands[0]) == MEM) 1328 operands[1] = force_reg (DFmode, operands[1]); 1329}") 1330 1331(define_insn "movdf_k" 1332 [(set (match_operand:DF 0 "nonimmediate_operand" "=r,r,m") 1333 (match_operand:DF 1 "general_operand" "r,m,r"))] 1334 "" 1335 "* return mcore_output_movedouble (operands, DFmode);" 1336 [(set_attr "length" "4") (set_attr "type" "move,load,store")]) 1337 1338 1339;; Load/store multiple 1340 1341;; ??? This is not currently used. 1342(define_insn "ldm" 1343 [(set (match_operand:TI 0 "mcore_arith_reg_operand" "=r") 1344 (mem:TI (match_operand:SI 1 "mcore_arith_reg_operand" "r")))] 1345 "" 1346 "ldq %U0,(%1)") 1347 1348;; ??? This is not currently used. 1349(define_insn "stm" 1350 [(set (mem:TI (match_operand:SI 0 "mcore_arith_reg_operand" "r")) 1351 (match_operand:TI 1 "mcore_arith_reg_operand" "r"))] 1352 "" 1353 "stq %U1,(%0)") 1354 1355(define_expand "load_multiple" 1356 [(match_par_dup 3 [(set (match_operand:SI 0 "" "") 1357 (match_operand:SI 1 "" "")) 1358 (use (match_operand:SI 2 "" ""))])] 1359 "" 1360 " 1361{ 1362 int regno, count, i; 1363 1364 /* Support only loading a constant number of registers from memory and 1365 only if at least two registers. The last register must be r15. */ 1366 if (GET_CODE (operands[2]) != CONST_INT 1367 || INTVAL (operands[2]) < 2 1368 || GET_CODE (operands[1]) != MEM 1369 || XEXP (operands[1], 0) != stack_pointer_rtx 1370 || GET_CODE (operands[0]) != REG 1371 || REGNO (operands[0]) + INTVAL (operands[2]) != 16) 1372 FAIL; 1373 1374 count = INTVAL (operands[2]); 1375 regno = REGNO (operands[0]); 1376 1377 operands[3] = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count)); 1378 1379 for (i = 0; i < count; i++) 1380 XVECEXP (operands[3], 0, i) 1381 = gen_rtx_SET (gen_rtx_REG (SImode, regno + i), 1382 gen_rtx_MEM (SImode, plus_constant (Pmode, stack_pointer_rtx, 1383 i * 4))); 1384}") 1385 1386(define_insn "" 1387 [(match_parallel 0 "mcore_load_multiple_operation" 1388 [(set (match_operand:SI 1 "mcore_arith_reg_operand" "=r") 1389 (mem:SI (match_operand:SI 2 "register_operand" "r")))])] 1390 "GET_CODE (operands[2]) == REG && REGNO (operands[2]) == STACK_POINTER_REGNUM" 1391 "ldm %1-r15,(%2)") 1392 1393(define_expand "store_multiple" 1394 [(match_par_dup 3 [(set (match_operand:SI 0 "" "") 1395 (match_operand:SI 1 "" "")) 1396 (use (match_operand:SI 2 "" ""))])] 1397 "" 1398 " 1399{ 1400 int regno, count, i; 1401 1402 /* Support only storing a constant number of registers to memory and 1403 only if at least two registers. The last register must be r15. */ 1404 if (GET_CODE (operands[2]) != CONST_INT 1405 || INTVAL (operands[2]) < 2 1406 || GET_CODE (operands[0]) != MEM 1407 || XEXP (operands[0], 0) != stack_pointer_rtx 1408 || GET_CODE (operands[1]) != REG 1409 || REGNO (operands[1]) + INTVAL (operands[2]) != 16) 1410 FAIL; 1411 1412 count = INTVAL (operands[2]); 1413 regno = REGNO (operands[1]); 1414 1415 operands[3] = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count)); 1416 1417 for (i = 0; i < count; i++) 1418 XVECEXP (operands[3], 0, i) 1419 = gen_rtx_SET ( 1420 gen_rtx_MEM (SImode, plus_constant (Pmode, stack_pointer_rtx, 1421 i * 4)), 1422 gen_rtx_REG (SImode, regno + i)); 1423}") 1424 1425(define_insn "" 1426 [(match_parallel 0 "mcore_store_multiple_operation" 1427 [(set (mem:SI (match_operand:SI 2 "register_operand" "r")) 1428 (match_operand:SI 1 "mcore_arith_reg_operand" "r"))])] 1429 "GET_CODE (operands[2]) == REG && REGNO (operands[2]) == STACK_POINTER_REGNUM" 1430 "stm %1-r15,(%2)") 1431 1432;; ------------------------------------------------------------------------ 1433;; Define the real conditional branch instructions. 1434;; ------------------------------------------------------------------------ 1435 1436;; At top-level, condition test are eq/ne, because we 1437;; are comparing against the condition register (which 1438;; has the result of the true relational test 1439 1440(define_insn "branch_true" 1441 [(set (pc) (if_then_else (ne (reg:CC 17) (const_int 0)) 1442 (label_ref (match_operand 0 "" "")) 1443 (pc)))] 1444 "" 1445 "jbt %l0" 1446 [(set_attr "type" "brcond")]) 1447 1448(define_insn "branch_false" 1449 [(set (pc) (if_then_else (eq (reg:CC 17) (const_int 0)) 1450 (label_ref (match_operand 0 "" "")) 1451 (pc)))] 1452 "" 1453 "jbf %l0" 1454 [(set_attr "type" "brcond")]) 1455 1456(define_insn "inverse_branch_true" 1457 [(set (pc) (if_then_else (ne (reg:CC 17) (const_int 0)) 1458 (pc) 1459 (label_ref (match_operand 0 "" ""))))] 1460 "" 1461 "jbf %l0" 1462 [(set_attr "type" "brcond")]) 1463 1464(define_insn "inverse_branch_false" 1465 [(set (pc) (if_then_else (eq (reg:CC 17) (const_int 0)) 1466 (pc) 1467 (label_ref (match_operand 0 "" ""))))] 1468 "" 1469 "jbt %l0" 1470 [(set_attr "type" "brcond")]) 1471 1472;; Conditional branch insns 1473 1474(define_expand "cbranchsi4" 1475 [(set (pc) 1476 (if_then_else (match_operator 0 "ordered_comparison_operator" 1477 [(match_operand:SI 1 "mcore_compare_operand") 1478 (match_operand:SI 2 "nonmemory_operand")]) 1479 (label_ref (match_operand 3 "")) 1480 (pc)))] 1481 "" 1482 " 1483{ 1484 bool invert; 1485 invert = mcore_gen_compare (GET_CODE (operands[0]), 1486 operands[1], operands[2]); 1487 1488 if (invert) 1489 emit_jump_insn (gen_branch_false (operands[3])); 1490 else 1491 emit_jump_insn (gen_branch_true (operands[3])); 1492 DONE; 1493}") 1494 1495 1496 1497;; ------------------------------------------------------------------------ 1498;; Jump and linkage insns 1499;; ------------------------------------------------------------------------ 1500 1501(define_insn "jump_real" 1502 [(set (pc) 1503 (label_ref (match_operand 0 "" "")))] 1504 "" 1505 "jbr %l0" 1506 [(set_attr "type" "branch")]) 1507 1508(define_expand "jump" 1509 [(set (pc) (label_ref (match_operand 0 "" "")))] 1510 "" 1511 " 1512{ 1513 emit_jump_insn (gen_jump_real (operand0)); 1514 DONE; 1515} 1516") 1517 1518(define_insn "indirect_jump" 1519 [(set (pc) 1520 (match_operand:SI 0 "mcore_arith_reg_operand" "r"))] 1521 "" 1522 "jmp %0" 1523 [(set_attr "type" "jmp")]) 1524 1525(define_expand "call" 1526 [(parallel[(call (match_operand:SI 0 "" "") 1527 (match_operand 1 "" "")) 1528 (clobber (reg:SI 15))])] 1529 "" 1530 " 1531{ 1532 if (GET_CODE (operands[0]) == MEM 1533 && ! register_operand (XEXP (operands[0], 0), SImode) 1534 && ! mcore_symbolic_address_p (XEXP (operands[0], 0))) 1535 operands[0] = gen_rtx_MEM (GET_MODE (operands[0]), 1536 force_reg (Pmode, XEXP (operands[0], 0))); 1537}") 1538 1539(define_insn "call_internal" 1540 [(call (mem:SI (match_operand:SI 0 "mcore_call_address_operand" "riR")) 1541 (match_operand 1 "" "")) 1542 (clobber (reg:SI 15))] 1543 "" 1544 "* return mcore_output_call (operands, 0);") 1545 1546(define_expand "call_value" 1547 [(parallel[(set (match_operand 0 "register_operand" "") 1548 (call (match_operand:SI 1 "" "") 1549 (match_operand 2 "" ""))) 1550 (clobber (reg:SI 15))])] 1551 "" 1552 " 1553{ 1554 if (GET_CODE (operands[0]) == MEM 1555 && ! register_operand (XEXP (operands[0], 0), SImode) 1556 && ! mcore_symbolic_address_p (XEXP (operands[0], 0))) 1557 operands[1] = gen_rtx_MEM (GET_MODE (operands[1]), 1558 force_reg (Pmode, XEXP (operands[1], 0))); 1559}") 1560 1561(define_insn "call_value_internal" 1562 [(set (match_operand 0 "register_operand" "=r") 1563 (call (mem:SI (match_operand:SI 1 "mcore_call_address_operand" "riR")) 1564 (match_operand 2 "" ""))) 1565 (clobber (reg:SI 15))] 1566 "" 1567 "* return mcore_output_call (operands, 1);") 1568 1569(define_insn "call_value_struct" 1570 [(parallel [(set (match_parallel 0 "" 1571 [(expr_list (match_operand 3 "register_operand" "") (match_operand 4 "immediate_operand" "")) 1572 (expr_list (match_operand 5 "register_operand" "") (match_operand 6 "immediate_operand" ""))]) 1573 (call (match_operand:SI 1 "" "") 1574 (match_operand 2 "" ""))) 1575 (clobber (reg:SI 15))])] 1576 "" 1577 "* return mcore_output_call (operands, 1);" 1578) 1579 1580 1581;; ------------------------------------------------------------------------ 1582;; Misc insns 1583;; ------------------------------------------------------------------------ 1584 1585(define_insn "nop" 1586 [(const_int 0)] 1587 "" 1588 "or r0,r0") 1589 1590(define_insn "tablejump" 1591 [(set (pc) 1592 (match_operand:SI 0 "mcore_arith_reg_operand" "r")) 1593 (use (label_ref (match_operand 1 "" "")))] 1594 "" 1595 "jmp %0" 1596 [(set_attr "type" "jmp")]) 1597 1598(define_insn "*return" 1599 [(return)] 1600 "reload_completed && ! mcore_naked_function_p ()" 1601 "jmp r15" 1602 [(set_attr "type" "jmp")]) 1603 1604(define_insn "*no_return" 1605 [(return)] 1606 "reload_completed && mcore_naked_function_p ()" 1607 "" 1608 [(set_attr "length" "0")] 1609) 1610 1611(define_expand "prologue" 1612 [(const_int 0)] 1613 "" 1614 "mcore_expand_prolog (); DONE;") 1615 1616(define_expand "epilogue" 1617 [(return)] 1618 "" 1619 "mcore_expand_epilog ();") 1620 1621;; ------------------------------------------------------------------------ 1622;; Scc instructions 1623;; ------------------------------------------------------------------------ 1624 1625(define_insn "mvc" 1626 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") 1627 (ne:SI (reg:CC 17) (const_int 0)))] 1628 "" 1629 "mvc %0" 1630 [(set_attr "type" "move")]) 1631 1632(define_insn "mvcv" 1633 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") 1634 (eq:SI (reg:CC 17) (const_int 0)))] 1635 "" 1636 "mvcv %0" 1637 [(set_attr "type" "move")]) 1638 1639; in 0.97 use (LE 0) with (LT 1) and complement c. BRC 1640(define_split 1641 [(parallel[ 1642 (set (match_operand:SI 0 "mcore_arith_reg_operand" "") 1643 (ne:SI (gt:CC (match_operand:SI 1 "mcore_arith_reg_operand" "") 1644 (const_int 0)) 1645 (const_int 0))) 1646 (clobber (reg:SI 17))])] 1647 "" 1648 [(set (reg:CC 17) 1649 (lt:CC (match_dup 1) (const_int 1))) 1650 (set (match_dup 0) (eq:SI (reg:CC 17) (const_int 0)))]) 1651 1652 1653(define_expand "cstoresi4" 1654 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "") 1655 (match_operator:SI 1 "ordered_comparison_operator" 1656 [(match_operand:SI 2 "mcore_compare_operand" "") 1657 (match_operand:SI 3 "nonmemory_operand" "")]))] 1658 "" 1659 " 1660{ 1661 bool invert; 1662 invert = mcore_gen_compare (GET_CODE (operands[1]), 1663 operands[2], operands[3]); 1664 1665 if (invert) 1666 emit_insn (gen_mvcv (operands[0])); 1667 else 1668 emit_insn (gen_mvc (operands[0])); 1669 DONE; 1670}") 1671 1672(define_insn "incscc" 1673 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") 1674 (plus:SI (ne (reg:CC 17) (const_int 0)) 1675 (match_operand:SI 1 "mcore_arith_reg_operand" "0")))] 1676 "" 1677 "inct %0") 1678 1679(define_insn "incscc_false" 1680 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") 1681 (plus:SI (eq (reg:CC 17) (const_int 0)) 1682 (match_operand:SI 1 "mcore_arith_reg_operand" "0")))] 1683 "" 1684 "incf %0") 1685 1686(define_insn "decscc" 1687 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") 1688 (minus:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0") 1689 (ne (reg:CC 17) (const_int 0))))] 1690 "" 1691 "dect %0") 1692 1693(define_insn "decscc_false" 1694 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") 1695 (minus:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0") 1696 (eq (reg:CC 17) (const_int 0))))] 1697 "" 1698 "decf %0") 1699 1700;; ------------------------------------------------------------------------ 1701;; Conditional move patterns. 1702;; ------------------------------------------------------------------------ 1703 1704(define_expand "smaxsi3" 1705 [(set (reg:CC 17) 1706 (lt:CC (match_operand:SI 1 "mcore_arith_reg_operand" "") 1707 (match_operand:SI 2 "mcore_arith_reg_operand" ""))) 1708 (set (match_operand:SI 0 "mcore_arith_reg_operand" "") 1709 (if_then_else:SI (eq (reg:CC 17) (const_int 0)) 1710 (match_dup 1) (match_dup 2)))] 1711 "" 1712 "") 1713 1714(define_split 1715 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "") 1716 (smax:SI (match_operand:SI 1 "mcore_arith_reg_operand" "") 1717 (match_operand:SI 2 "mcore_arith_reg_operand" "")))] 1718 "" 1719 [(set (reg:CC 17) 1720 (lt:SI (match_dup 1) (match_dup 2))) 1721 (set (match_dup 0) 1722 (if_then_else:SI (eq (reg:CC 17) (const_int 0)) 1723 (match_dup 1) (match_dup 2)))] 1724 "") 1725 1726; no tstgt in 0.97, so just use cmplti (btsti x,31) and reverse move 1727; condition BRC 1728(define_split 1729 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "") 1730 (smax:SI (match_operand:SI 1 "mcore_arith_reg_operand" "") 1731 (const_int 0)))] 1732 "" 1733 [(set (reg:CC 17) 1734 (lt:CC (match_dup 1) (const_int 0))) 1735 (set (match_dup 0) 1736 (if_then_else:SI (eq (reg:CC 17) (const_int 0)) 1737 (match_dup 1) (const_int 0)))] 1738 "") 1739 1740(define_expand "sminsi3" 1741 [(set (reg:CC 17) 1742 (lt:CC (match_operand:SI 1 "mcore_arith_reg_operand" "") 1743 (match_operand:SI 2 "mcore_arith_reg_operand" ""))) 1744 (set (match_operand:SI 0 "mcore_arith_reg_operand" "") 1745 (if_then_else:SI (ne (reg:CC 17) (const_int 0)) 1746 (match_dup 1) (match_dup 2)))] 1747 "" 1748 "") 1749 1750(define_split 1751 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "") 1752 (smin:SI (match_operand:SI 1 "mcore_arith_reg_operand" "") 1753 (match_operand:SI 2 "mcore_arith_reg_operand" "")))] 1754 "" 1755 [(set (reg:CC 17) 1756 (lt:SI (match_dup 1) (match_dup 2))) 1757 (set (match_dup 0) 1758 (if_then_else:SI (ne (reg:CC 17) (const_int 0)) 1759 (match_dup 1) (match_dup 2)))] 1760 "") 1761 1762;(define_split 1763; [(set (match_operand:SI 0 "mcore_arith_reg_operand" "") 1764; (smin:SI (match_operand:SI 1 "mcore_arith_reg_operand" "") 1765; (const_int 0)))] 1766; "" 1767; [(set (reg:CC 17) 1768; (gt:CC (match_dup 1) (const_int 0))) 1769; (set (match_dup 0) 1770; (if_then_else:SI (eq (reg:CC 17) (const_int 0)) 1771; (match_dup 1) (const_int 0)))] 1772; "") 1773 1774; changed these unsigned patterns to use geu instead of ltu. it appears 1775; that the c-torture & ssrl test suites didn't catch these! only showed 1776; up in friedman's clib work. BRC 7/7/95 1777 1778(define_expand "umaxsi3" 1779 [(set (reg:CC 17) 1780 (geu:CC (match_operand:SI 1 "mcore_arith_reg_operand" "") 1781 (match_operand:SI 2 "mcore_arith_reg_operand" ""))) 1782 (set (match_operand:SI 0 "mcore_arith_reg_operand" "") 1783 (if_then_else:SI (eq (reg:CC 17) (const_int 0)) 1784 (match_dup 2) (match_dup 1)))] 1785 "" 1786 "") 1787 1788(define_split 1789 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "") 1790 (umax:SI (match_operand:SI 1 "mcore_arith_reg_operand" "") 1791 (match_operand:SI 2 "mcore_arith_reg_operand" "")))] 1792 "" 1793 [(set (reg:CC 17) 1794 (geu:SI (match_dup 1) (match_dup 2))) 1795 (set (match_dup 0) 1796 (if_then_else:SI (eq (reg:CC 17) (const_int 0)) 1797 (match_dup 2) (match_dup 1)))] 1798 "") 1799 1800(define_expand "uminsi3" 1801 [(set (reg:CC 17) 1802 (geu:CC (match_operand:SI 1 "mcore_arith_reg_operand" "") 1803 (match_operand:SI 2 "mcore_arith_reg_operand" ""))) 1804 (set (match_operand:SI 0 "mcore_arith_reg_operand" "") 1805 (if_then_else:SI (ne (reg:CC 17) (const_int 0)) 1806 (match_dup 2) (match_dup 1)))] 1807 "" 1808 "") 1809 1810(define_split 1811 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "") 1812 (umin:SI (match_operand:SI 1 "mcore_arith_reg_operand" "") 1813 (match_operand:SI 2 "mcore_arith_reg_operand" "")))] 1814 "" 1815 [(set (reg:CC 17) 1816 (geu:SI (match_dup 1) (match_dup 2))) 1817 (set (match_dup 0) 1818 (if_then_else:SI (ne (reg:CC 17) (const_int 0)) 1819 (match_dup 2) (match_dup 1)))] 1820 "") 1821 1822;; ------------------------------------------------------------------------ 1823;; conditional move patterns really start here 1824;; ------------------------------------------------------------------------ 1825 1826;; the "movtK" patterns are experimental. they are intended to account for 1827;; gcc's mucking on code such as: 1828;; 1829;; free_ent = ((block_compress) ? 257 : 256 ); 1830;; 1831;; these patterns help to get a tstne/bgeni/inct (or equivalent) sequence 1832;; when both arms have constants that are +/- 1 of each other. 1833;; 1834;; note in the following patterns that the "movtK" ones should be the first 1835;; one defined in each sequence. this is because the general pattern also 1836;; matches, so use ordering to determine priority (it's easier this way than 1837;; adding conditions to the general patterns). BRC 1838;; 1839;; the U and Q constraints are necessary to ensure that reload does the 1840;; 'right thing'. U constrains the operand to 0 and Q to 1 for use in the 1841;; clrt & clrf and clrt/inct & clrf/incf patterns. BRC 6/26 1842;; 1843;; ??? there appears to be some problems with these movtK patterns for ops 1844;; other than eq & ne. need to fix. 6/30 BRC 1845 1846;; ------------------------------------------------------------------------ 1847;; ne 1848;; ------------------------------------------------------------------------ 1849 1850; experimental conditional move with two constants +/- 1 BRC 1851 1852(define_insn "movtK_1" 1853 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") 1854 (if_then_else:SI 1855 (ne (reg:CC 17) (const_int 0)) 1856 (match_operand:SI 1 "mcore_arith_O_operand" "O") 1857 (match_operand:SI 2 "mcore_arith_O_operand" "O")))] 1858 " GET_CODE (operands[1]) == CONST_INT 1859 && GET_CODE (operands[2]) == CONST_INT 1860 && ( (INTVAL (operands[1]) - INTVAL (operands[2]) == 1) 1861 || (INTVAL (operands[2]) - INTVAL (operands[1]) == 1))" 1862 "* return mcore_output_cmov (operands, 1, NULL);" 1863 [(set_attr "length" "4")]) 1864 1865(define_insn "movt0" 1866 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r") 1867 (if_then_else:SI 1868 (ne (reg:CC 17) (const_int 0)) 1869 (match_operand:SI 1 "mcore_arith_imm_operand" "r,0,U,0") 1870 (match_operand:SI 2 "mcore_arith_imm_operand" "0,r,0,U")))] 1871 "" 1872 "@ 1873 movt %0,%1 1874 movf %0,%2 1875 clrt %0 1876 clrf %0") 1877 1878;; ------------------------------------------------------------------------ 1879;; eq 1880;; ------------------------------------------------------------------------ 1881 1882; experimental conditional move with two constants +/- 1 BRC 1883(define_insn "movtK_2" 1884 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") 1885 (if_then_else:SI 1886 (eq (reg:CC 17) (const_int 0)) 1887 (match_operand:SI 1 "mcore_arith_O_operand" "O") 1888 (match_operand:SI 2 "mcore_arith_O_operand" "O")))] 1889 " GET_CODE (operands[1]) == CONST_INT 1890 && GET_CODE (operands[2]) == CONST_INT 1891 && ( (INTVAL (operands[1]) - INTVAL (operands[2]) == 1) 1892 || (INTVAL (operands[2]) - INTVAL (operands[1]) == 1))" 1893 "* return mcore_output_cmov (operands, 0, NULL);" 1894 [(set_attr "length" "4")]) 1895 1896(define_insn "movf0" 1897 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r") 1898 (if_then_else:SI 1899 (eq (reg:CC 17) (const_int 0)) 1900 (match_operand:SI 1 "mcore_arith_imm_operand" "r,0,U,0") 1901 (match_operand:SI 2 "mcore_arith_imm_operand" "0,r,0,U")))] 1902 "" 1903 "@ 1904 movf %0,%1 1905 movt %0,%2 1906 clrf %0 1907 clrt %0") 1908 1909; turns lsli rx,imm/btsti rx,31 into btsti rx,imm. not done by a peephole 1910; because the instructions are not adjacent (peepholes are related by posn - 1911; not by dataflow). BRC 1912 1913(define_insn "" 1914 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r") 1915 (if_then_else:SI (eq (zero_extract:SI 1916 (match_operand:SI 1 "mcore_arith_reg_operand" "r,r,r,r") 1917 (const_int 1) 1918 (match_operand:SI 2 "mcore_literal_K_operand" "K,K,K,K")) 1919 (const_int 0)) 1920 (match_operand:SI 3 "mcore_arith_imm_operand" "r,0,U,0") 1921 (match_operand:SI 4 "mcore_arith_imm_operand" "0,r,0,U")))] 1922 "" 1923 "@ 1924 btsti %1,%2\;movf %0,%3 1925 btsti %1,%2\;movt %0,%4 1926 btsti %1,%2\;clrf %0 1927 btsti %1,%2\;clrt %0" 1928 [(set_attr "length" "4")]) 1929 1930; turns sextb rx/btsti rx,31 into btsti rx,7. must be QImode to be safe. BRC 1931 1932(define_insn "" 1933 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r") 1934 (if_then_else:SI (eq (lshiftrt:SI 1935 (match_operand:SI 1 "mcore_arith_reg_operand" "r,r,r,r") 1936 (const_int 7)) 1937 (const_int 0)) 1938 (match_operand:SI 2 "mcore_arith_imm_operand" "r,0,U,0") 1939 (match_operand:SI 3 "mcore_arith_imm_operand" "0,r,0,U")))] 1940 "GET_CODE (operands[1]) == SUBREG && 1941 GET_MODE (SUBREG_REG (operands[1])) == QImode" 1942 "@ 1943 btsti %1,7\;movf %0,%2 1944 btsti %1,7\;movt %0,%3 1945 btsti %1,7\;clrf %0 1946 btsti %1,7\;clrt %0" 1947 [(set_attr "length" "4")]) 1948 1949 1950;; ------------------------------------------------------------------------ 1951;; ne 1952;; ------------------------------------------------------------------------ 1953 1954;; Combine creates this from an andn instruction in a scc sequence. 1955;; We must recognize it to get conditional moves generated. 1956 1957; experimental conditional move with two constants +/- 1 BRC 1958(define_insn "movtK_3" 1959 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") 1960 (if_then_else:SI 1961 (ne (match_operand:SI 1 "mcore_arith_reg_operand" "r") 1962 (const_int 0)) 1963 (match_operand:SI 2 "mcore_arith_O_operand" "O") 1964 (match_operand:SI 3 "mcore_arith_O_operand" "O")))] 1965 " GET_CODE (operands[2]) == CONST_INT 1966 && GET_CODE (operands[3]) == CONST_INT 1967 && ( (INTVAL (operands[2]) - INTVAL (operands[3]) == 1) 1968 || (INTVAL (operands[3]) - INTVAL (operands[2]) == 1))" 1969 "* 1970{ 1971 rtx out_operands[4]; 1972 out_operands[0] = operands[0]; 1973 out_operands[1] = operands[2]; 1974 out_operands[2] = operands[3]; 1975 out_operands[3] = operands[1]; 1976 1977 return mcore_output_cmov (out_operands, 1, \"cmpnei %3,0\"); 1978 1979}" 1980 [(set_attr "length" "6")]) 1981 1982(define_insn "movt2" 1983 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r") 1984 (if_then_else:SI (ne (match_operand:SI 1 "mcore_arith_reg_operand" "r,r,r,r") 1985 (const_int 0)) 1986 (match_operand:SI 2 "mcore_arith_imm_operand" "r,0,U,0") 1987 (match_operand:SI 3 "mcore_arith_imm_operand" "0,r,0,U")))] 1988 "" 1989 "@ 1990 cmpnei %1,0\;movt %0,%2 1991 cmpnei %1,0\;movf %0,%3 1992 cmpnei %1,0\;clrt %0 1993 cmpnei %1,0\;clrf %0" 1994 [(set_attr "length" "4")]) 1995 1996; turns lsli rx,imm/btsti rx,31 into btsti rx,imm. not done by a peephole 1997; because the instructions are not adjacent (peepholes are related by posn - 1998; not by dataflow). BRC 1999 2000(define_insn "" 2001 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r") 2002 (if_then_else:SI (ne (zero_extract:SI 2003 (match_operand:SI 1 "mcore_arith_reg_operand" "r,r,r,r") 2004 (const_int 1) 2005 (match_operand:SI 2 "mcore_literal_K_operand" "K,K,K,K")) 2006 (const_int 0)) 2007 (match_operand:SI 3 "mcore_arith_imm_operand" "r,0,U,0") 2008 (match_operand:SI 4 "mcore_arith_imm_operand" "0,r,0,U")))] 2009 "" 2010 "@ 2011 btsti %1,%2\;movt %0,%3 2012 btsti %1,%2\;movf %0,%4 2013 btsti %1,%2\;clrt %0 2014 btsti %1,%2\;clrf %0" 2015 [(set_attr "length" "4")]) 2016 2017; turns sextb rx/btsti rx,31 into btsti rx,7. must be QImode to be safe. BRC 2018 2019(define_insn "" 2020 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r") 2021 (if_then_else:SI (ne (lshiftrt:SI 2022 (match_operand:SI 1 "mcore_arith_reg_operand" "r,r,r,r") 2023 (const_int 7)) 2024 (const_int 0)) 2025 (match_operand:SI 2 "mcore_arith_imm_operand" "r,0,U,0") 2026 (match_operand:SI 3 "mcore_arith_imm_operand" "0,r,0,U")))] 2027 "GET_CODE (operands[1]) == SUBREG && 2028 GET_MODE (SUBREG_REG (operands[1])) == QImode" 2029 "@ 2030 btsti %1,7\;movt %0,%2 2031 btsti %1,7\;movf %0,%3 2032 btsti %1,7\;clrt %0 2033 btsti %1,7\;clrf %0" 2034 [(set_attr "length" "4")]) 2035 2036;; ------------------------------------------------------------------------ 2037;; eq/eq 2038;; ------------------------------------------------------------------------ 2039 2040; experimental conditional move with two constants +/- 1 BRC 2041(define_insn "movtK_4" 2042 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") 2043 (if_then_else:SI 2044 (eq (eq:SI (reg:CC 17) (const_int 0)) (const_int 0)) 2045 (match_operand:SI 1 "mcore_arith_O_operand" "O") 2046 (match_operand:SI 2 "mcore_arith_O_operand" "O")))] 2047 "GET_CODE (operands[1]) == CONST_INT && 2048 GET_CODE (operands[2]) == CONST_INT && 2049 ((INTVAL (operands[1]) - INTVAL (operands[2]) == 1) || 2050 (INTVAL (operands[2]) - INTVAL (operands[1]) == 1))" 2051 "* return mcore_output_cmov(operands, 1, NULL);" 2052 [(set_attr "length" "4")]) 2053 2054(define_insn "movt3" 2055 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r") 2056 (if_then_else:SI 2057 (eq (eq:SI (reg:CC 17) (const_int 0)) (const_int 0)) 2058 (match_operand:SI 1 "mcore_arith_imm_operand" "r,0,U,0") 2059 (match_operand:SI 2 "mcore_arith_imm_operand" "0,r,0,U")))] 2060 "" 2061 "@ 2062 movt %0,%1 2063 movf %0,%2 2064 clrt %0 2065 clrf %0") 2066 2067;; ------------------------------------------------------------------------ 2068;; eq/ne 2069;; ------------------------------------------------------------------------ 2070 2071; experimental conditional move with two constants +/- 1 BRC 2072(define_insn "movtK_5" 2073 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") 2074 (if_then_else:SI 2075 (eq (ne:SI (reg:CC 17) (const_int 0)) (const_int 0)) 2076 (match_operand:SI 1 "mcore_arith_O_operand" "O") 2077 (match_operand:SI 2 "mcore_arith_O_operand" "O")))] 2078 "GET_CODE (operands[1]) == CONST_INT && 2079 GET_CODE (operands[2]) == CONST_INT && 2080 ((INTVAL (operands[1]) - INTVAL (operands[2]) == 1) || 2081 (INTVAL (operands[2]) - INTVAL (operands[1]) == 1))" 2082 "* return mcore_output_cmov (operands, 0, NULL);" 2083 [(set_attr "length" "4")]) 2084 2085(define_insn "movf1" 2086 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r") 2087 (if_then_else:SI 2088 (eq (ne:SI (reg:CC 17) (const_int 0)) (const_int 0)) 2089 (match_operand:SI 1 "mcore_arith_imm_operand" "r,0,U,0") 2090 (match_operand:SI 2 "mcore_arith_imm_operand" "0,r,0,U")))] 2091 "" 2092 "@ 2093 movf %0,%1 2094 movt %0,%2 2095 clrf %0 2096 clrt %0") 2097 2098;; ------------------------------------------------------------------------ 2099;; eq 2100;; ------------------------------------------------------------------------ 2101 2102;; Combine creates this from an andn instruction in a scc sequence. 2103;; We must recognize it to get conditional moves generated. 2104 2105; experimental conditional move with two constants +/- 1 BRC 2106 2107(define_insn "movtK_6" 2108 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") 2109 (if_then_else:SI 2110 (eq (match_operand:SI 1 "mcore_arith_reg_operand" "r") 2111 (const_int 0)) 2112 (match_operand:SI 2 "mcore_arith_O_operand" "O") 2113 (match_operand:SI 3 "mcore_arith_O_operand" "O")))] 2114 "GET_CODE (operands[1]) == CONST_INT && 2115 GET_CODE (operands[2]) == CONST_INT && 2116 ((INTVAL (operands[2]) - INTVAL (operands[3]) == 1) || 2117 (INTVAL (operands[3]) - INTVAL (operands[2]) == 1))" 2118 "* 2119{ 2120 rtx out_operands[4]; 2121 out_operands[0] = operands[0]; 2122 out_operands[1] = operands[2]; 2123 out_operands[2] = operands[3]; 2124 out_operands[3] = operands[1]; 2125 2126 return mcore_output_cmov (out_operands, 0, \"cmpnei %3,0\"); 2127}" 2128 [(set_attr "length" "6")]) 2129 2130(define_insn "movf3" 2131 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r") 2132 (if_then_else:SI (eq (match_operand:SI 1 "mcore_arith_reg_operand" "r,r,r,r") 2133 (const_int 0)) 2134 (match_operand:SI 2 "mcore_arith_imm_operand" "r,0,U,0") 2135 (match_operand:SI 3 "mcore_arith_imm_operand" "0,r,0,U")))] 2136 "" 2137 "@ 2138 cmpnei %1,0\;movf %0,%2 2139 cmpnei %1,0\;movt %0,%3 2140 cmpnei %1,0\;clrf %0 2141 cmpnei %1,0\;clrt %0" 2142 [(set_attr "length" "4")]) 2143 2144;; ------------------------------------------------------------------------ 2145;; ne/eq 2146;; ------------------------------------------------------------------------ 2147 2148; experimental conditional move with two constants +/- 1 BRC 2149(define_insn "movtK_7" 2150 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") 2151 (if_then_else:SI 2152 (ne (eq:SI (reg:CC 17) (const_int 0)) (const_int 0)) 2153 (match_operand:SI 1 "mcore_arith_O_operand" "O") 2154 (match_operand:SI 2 "mcore_arith_O_operand" "O")))] 2155 "GET_CODE (operands[1]) == CONST_INT && 2156 GET_CODE (operands[2]) == CONST_INT && 2157 ((INTVAL (operands[1]) - INTVAL (operands[2]) == 1) || 2158 (INTVAL (operands[2]) - INTVAL (operands[1]) == 1))" 2159 "* return mcore_output_cmov (operands, 0, NULL);" 2160 [(set_attr "length" "4")]) 2161 2162(define_insn "movf4" 2163 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r") 2164 (if_then_else:SI 2165 (ne (eq:SI (reg:CC 17) (const_int 0)) (const_int 0)) 2166 (match_operand:SI 1 "mcore_arith_imm_operand" "r,0,U,0") 2167 (match_operand:SI 2 "mcore_arith_imm_operand" "0,r,0,U")))] 2168 "" 2169 "@ 2170 movf %0,%1 2171 movt %0,%2 2172 clrf %0 2173 clrt %0") 2174 2175;; ------------------------------------------------------------------------ 2176;; ne/ne 2177;; ------------------------------------------------------------------------ 2178 2179; experimental conditional move with two constants +/- 1 BRC 2180(define_insn "movtK_8" 2181 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") 2182 (if_then_else:SI 2183 (ne (ne:SI (reg:CC 17) (const_int 0)) (const_int 0)) 2184 (match_operand:SI 1 "mcore_arith_O_operand" "O") 2185 (match_operand:SI 2 "mcore_arith_O_operand" "O")))] 2186 "GET_CODE (operands[1]) == CONST_INT && 2187 GET_CODE (operands[2]) == CONST_INT && 2188 ((INTVAL (operands[1]) - INTVAL (operands[2]) == 1) || 2189 (INTVAL (operands[2]) - INTVAL (operands[1]) == 1))" 2190 "* return mcore_output_cmov (operands, 1, NULL);" 2191 [(set_attr "length" "4")]) 2192 2193(define_insn "movt4" 2194 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r") 2195 (if_then_else:SI 2196 (ne (ne:SI (reg:CC 17) (const_int 0)) (const_int 0)) 2197 (match_operand:SI 1 "mcore_arith_imm_operand" "r,0,U,0") 2198 (match_operand:SI 2 "mcore_arith_imm_operand" "0,r,0,U")))] 2199 "" 2200 "@ 2201 movt %0,%1 2202 movf %0,%2 2203 clrt %0 2204 clrf %0") 2205 2206;; Also need patterns to recognize lt/ge, since otherwise the compiler will 2207;; try to output not/asri/tstne/movf. 2208 2209;; ------------------------------------------------------------------------ 2210;; lt 2211;; ------------------------------------------------------------------------ 2212 2213; experimental conditional move with two constants +/- 1 BRC 2214(define_insn "movtK_9" 2215 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") 2216 (if_then_else:SI 2217 (lt (match_operand:SI 1 "mcore_arith_reg_operand" "r") 2218 (const_int 0)) 2219 (match_operand:SI 2 "mcore_arith_O_operand" "O") 2220 (match_operand:SI 3 "mcore_arith_O_operand" "O")))] 2221 "GET_CODE (operands[2]) == CONST_INT && 2222 GET_CODE (operands[3]) == CONST_INT && 2223 ((INTVAL (operands[2]) - INTVAL (operands[3]) == 1) || 2224 (INTVAL (operands[3]) - INTVAL (operands[2]) == 1))" 2225 "* 2226{ 2227 rtx out_operands[4]; 2228 out_operands[0] = operands[0]; 2229 out_operands[1] = operands[2]; 2230 out_operands[2] = operands[3]; 2231 out_operands[3] = operands[1]; 2232 2233 return mcore_output_cmov (out_operands, 1, \"btsti %3,31\"); 2234}" 2235 [(set_attr "length" "6")]) 2236 2237(define_insn "movt5" 2238 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r") 2239 (if_then_else:SI (lt (match_operand:SI 1 "mcore_arith_reg_operand" "r,r,r,r") 2240 (const_int 0)) 2241 (match_operand:SI 2 "mcore_arith_imm_operand" "r,0,U,0") 2242 (match_operand:SI 3 "mcore_arith_imm_operand" "0,r,0,U")))] 2243 "" 2244 "@ 2245 btsti %1,31\;movt %0,%2 2246 btsti %1,31\;movf %0,%3 2247 btsti %1,31\;clrt %0 2248 btsti %1,31\;clrf %0" 2249 [(set_attr "length" "4")]) 2250 2251 2252;; ------------------------------------------------------------------------ 2253;; ge 2254;; ------------------------------------------------------------------------ 2255 2256; experimental conditional move with two constants +/- 1 BRC 2257(define_insn "movtK_10" 2258 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") 2259 (if_then_else:SI 2260 (ge (match_operand:SI 1 "mcore_arith_reg_operand" "r") 2261 (const_int 0)) 2262 (match_operand:SI 2 "mcore_arith_O_operand" "O") 2263 (match_operand:SI 3 "mcore_arith_O_operand" "O")))] 2264 "GET_CODE (operands[2]) == CONST_INT && 2265 GET_CODE (operands[3]) == CONST_INT && 2266 ((INTVAL (operands[2]) - INTVAL (operands[3]) == 1) || 2267 (INTVAL (operands[3]) - INTVAL (operands[2]) == 1))" 2268 "* 2269{ 2270 rtx out_operands[4]; 2271 out_operands[0] = operands[0]; 2272 out_operands[1] = operands[2]; 2273 out_operands[2] = operands[3]; 2274 out_operands[3] = operands[1]; 2275 2276 return mcore_output_cmov (out_operands, 0, \"btsti %3,31\"); 2277}" 2278 [(set_attr "length" "6")]) 2279 2280(define_insn "movf5" 2281 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r") 2282 (if_then_else:SI (ge (match_operand:SI 1 "mcore_arith_reg_operand" "r,r,r,r") 2283 (const_int 0)) 2284 (match_operand:SI 2 "mcore_arith_imm_operand" "r,0,U,0") 2285 (match_operand:SI 3 "mcore_arith_imm_operand" "0,r,0,U")))] 2286 "" 2287 "@ 2288 btsti %1,31\;movf %0,%2 2289 btsti %1,31\;movt %0,%3 2290 btsti %1,31\;clrf %0 2291 btsti %1,31\;clrt %0" 2292 [(set_attr "length" "4")]) 2293 2294;; ------------------------------------------------------------------------ 2295;; Bitfield extract (xtrbN) 2296;; ------------------------------------------------------------------------ 2297 2298; sometimes we're better off using QI/HI mode and letting the machine indep. 2299; part expand insv and extv. 2300; 2301; e.g., sequences like:a [an insertion] 2302; 2303; ldw r8,(r6) 2304; movi r7,0x00ffffff 2305; and r8,r7 r7 dead 2306; stw r8,(r6) r8 dead 2307; 2308; become: 2309; 2310; movi r8,0 2311; stb r8,(r6) r8 dead 2312; 2313; it looks like always using SI mode is a win except in this type of code 2314; (when adjacent bit fields collapse on a byte or halfword boundary). when 2315; expanding with SI mode, non-adjacent bit field masks fold, but with QI/HI 2316; mode, they do not. one thought is to add some peepholes to cover cases 2317; like the above, but this is not a general solution. 2318; 2319; -mword-bitfields expands/inserts using SI mode. otherwise, do it with 2320; the smallest mode possible (using the machine indep. expansions). BRC 2321 2322;(define_expand "extv" 2323; [(set (match_operand:SI 0 "mcore_arith_reg_operand" "") 2324; (sign_extract:SI (match_operand:SI 1 "mcore_arith_reg_operand" "") 2325; (match_operand:SI 2 "const_int_operand" "") 2326; (match_operand:SI 3 "const_int_operand" ""))) 2327; (clobber (reg:CC 17))] 2328; "" 2329; " 2330;{ 2331; if (INTVAL (operands[1]) != 8 || INTVAL (operands[2]) % 8 != 0) 2332; { 2333; if (TARGET_W_FIELD) 2334; { 2335; rtx lshft = GEN_INT (32 - (INTVAL (operands[2]) + INTVAL (operands[3]))); 2336; rtx rshft = GEN_INT (32 - INTVAL (operands[2])); 2337; 2338; emit_insn (gen_rtx_SET (operands[0], operands[1])); 2339; emit_insn (gen_rtx_SET (operands[0], 2340; gen_rtx_ASHIFT (SImode, operands[0], lshft))); 2341; emit_insn (gen_rtx_SET (operands[0], 2342; gen_rtx_ASHIFTRT (SImode, operands[0], rshft))); 2343; DONE; 2344; } 2345; else 2346; FAIL; 2347; } 2348;}") 2349 2350(define_expand "extv" 2351 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "") 2352 (sign_extract:SI (match_operand:SI 1 "mcore_arith_reg_operand" "") 2353 (match_operand:SI 2 "const_int_operand" "") 2354 (match_operand:SI 3 "const_int_operand" ""))) 2355 (clobber (reg:CC 17))] 2356 "" 2357 " 2358{ 2359 if (INTVAL (operands[2]) == 8 && INTVAL (operands[3]) % 8 == 0) 2360 { 2361 /* 8-bit field, aligned properly, use the xtrb[0123]+sext sequence. */ 2362 /* not DONE, not FAIL, but let the RTL get generated.... */ 2363 } 2364 else if (TARGET_W_FIELD) 2365 { 2366 /* Arbitrary placement; note that the tree->rtl generator will make 2367 something close to this if we return FAIL */ 2368 rtx lshft = GEN_INT (32 - (INTVAL (operands[2]) + INTVAL (operands[3]))); 2369 rtx rshft = GEN_INT (32 - INTVAL (operands[2])); 2370 rtx tmp1 = gen_reg_rtx (SImode); 2371 rtx tmp2 = gen_reg_rtx (SImode); 2372 2373 emit_insn (gen_rtx_SET (tmp1, operands[1])); 2374 emit_insn (gen_rtx_SET (tmp2, 2375 gen_rtx_ASHIFT (SImode, tmp1, lshft))); 2376 emit_insn (gen_rtx_SET (operands[0], 2377 gen_rtx_ASHIFTRT (SImode, tmp2, rshft))); 2378 DONE; 2379 } 2380 else 2381 { 2382 /* Let the caller choose an alternate sequence. */ 2383 FAIL; 2384 } 2385}") 2386 2387(define_expand "extzv" 2388 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "") 2389 (zero_extract:SI (match_operand:SI 1 "mcore_arith_reg_operand" "") 2390 (match_operand:SI 2 "const_int_operand" "") 2391 (match_operand:SI 3 "const_int_operand" ""))) 2392 (clobber (reg:CC 17))] 2393 "" 2394 " 2395{ 2396 if (INTVAL (operands[2]) == 8 && INTVAL (operands[3]) % 8 == 0) 2397 { 2398 /* 8-bit field, aligned properly, use the xtrb[0123] sequence. */ 2399 /* Let the template generate some RTL.... */ 2400 } 2401 else if (CONST_OK_FOR_K ((1 << INTVAL (operands[2])) - 1)) 2402 { 2403 /* A narrow bit-field (<=5 bits) means we can do a shift to put 2404 it in place and then use an andi to extract it. 2405 This is as good as a shiftleft/shiftright. */ 2406 2407 rtx shifted; 2408 rtx mask = GEN_INT ((1 << INTVAL (operands[2])) - 1); 2409 2410 if (INTVAL (operands[3]) == 0) 2411 { 2412 shifted = operands[1]; 2413 } 2414 else 2415 { 2416 rtx rshft = GEN_INT (INTVAL (operands[3])); 2417 shifted = gen_reg_rtx (SImode); 2418 emit_insn (gen_rtx_SET (shifted, 2419 gen_rtx_LSHIFTRT (SImode, operands[1], rshft))); 2420 } 2421 emit_insn (gen_rtx_SET (operands[0], 2422 gen_rtx_AND (SImode, shifted, mask))); 2423 DONE; 2424 } 2425 else if (TARGET_W_FIELD) 2426 { 2427 /* Arbitrary pattern; play shift/shift games to get it. 2428 * this is pretty much what the caller will do if we say FAIL */ 2429 rtx lshft = GEN_INT (32 - (INTVAL (operands[2]) + INTVAL (operands[3]))); 2430 rtx rshft = GEN_INT (32 - INTVAL (operands[2])); 2431 rtx tmp1 = gen_reg_rtx (SImode); 2432 rtx tmp2 = gen_reg_rtx (SImode); 2433 2434 emit_insn (gen_rtx_SET (tmp1, operands[1])); 2435 emit_insn (gen_rtx_SET (tmp2, 2436 gen_rtx_ASHIFT (SImode, tmp1, lshft))); 2437 emit_insn (gen_rtx_SET (operands[0], 2438 gen_rtx_LSHIFTRT (SImode, tmp2, rshft))); 2439 DONE; 2440 } 2441 else 2442 { 2443 /* Make the compiler figure out some alternative mechanism. */ 2444 FAIL; 2445 } 2446 2447 /* Emit the RTL pattern; something will match it later. */ 2448}") 2449 2450(define_expand "insv" 2451 [(set (zero_extract:SI (match_operand:SI 0 "mcore_arith_reg_operand" "") 2452 (match_operand:SI 1 "const_int_operand" "") 2453 (match_operand:SI 2 "const_int_operand" "")) 2454 (match_operand:SI 3 "general_operand" "")) 2455 (clobber (reg:CC 17))] 2456 "" 2457 " 2458{ 2459 if (mcore_expand_insv (operands)) 2460 { 2461 DONE; 2462 } 2463 else 2464 { 2465 FAIL; 2466 } 2467}") 2468 2469;; 2470;; the xtrb[0123] instructions handily get at 8-bit fields on nice boundaries. 2471;; but then, they do force you through r1. 2472;; 2473;; the combiner will build such patterns for us, so we'll make them available 2474;; for its use. 2475;; 2476;; Note that we have both SIGNED and UNSIGNED versions of these... 2477;; 2478 2479;; 2480;; These no longer worry about the clobbering of CC bit; not sure this is 2481;; good... 2482;; 2483;; the SIGNED versions of these 2484;; 2485(define_insn "" 2486 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,b") 2487 (sign_extract:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0,r") (const_int 8) (const_int 24)))] 2488 "" 2489 "@ 2490 asri %0,24 2491 xtrb0 %0,%1\;sextb %0" 2492 [(set_attr "type" "shift")]) 2493 2494(define_insn "" 2495 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=b") 2496 (sign_extract:SI (match_operand:SI 1 "mcore_arith_reg_operand" "r") (const_int 8) (const_int 16)))] 2497 "" 2498 "xtrb1 %0,%1\;sextb %0" 2499 [(set_attr "type" "shift")]) 2500 2501(define_insn "" 2502 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=b") 2503 (sign_extract:SI (match_operand:SI 1 "mcore_arith_reg_operand" "r") (const_int 8) (const_int 8)))] 2504 "" 2505 "xtrb2 %0,%1\;sextb %0" 2506 [(set_attr "type" "shift")]) 2507 2508(define_insn "" 2509 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") 2510 (sign_extract:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0") (const_int 8) (const_int 0)))] 2511 "" 2512 "sextb %0" 2513 [(set_attr "type" "shift")]) 2514 2515;; the UNSIGNED uses of xtrb[0123] 2516;; 2517(define_insn "" 2518 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,b") 2519 (zero_extract:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0,r") (const_int 8) (const_int 24)))] 2520 "" 2521 "@ 2522 lsri %0,24 2523 xtrb0 %0,%1" 2524 [(set_attr "type" "shift")]) 2525 2526(define_insn "" 2527 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=b") 2528 (zero_extract:SI (match_operand:SI 1 "mcore_arith_reg_operand" "r") (const_int 8) (const_int 16)))] 2529 "" 2530 "xtrb1 %0,%1" 2531 [(set_attr "type" "shift")]) 2532 2533(define_insn "" 2534 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=b") 2535 (zero_extract:SI (match_operand:SI 1 "mcore_arith_reg_operand" "r") (const_int 8) (const_int 8)))] 2536 "" 2537 "xtrb2 %0,%1" 2538 [(set_attr "type" "shift")]) 2539 2540;; This can be peepholed if it follows a ldb ... 2541(define_insn "" 2542 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,b") 2543 (zero_extract:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0,r") (const_int 8) (const_int 0)))] 2544 "" 2545 "@ 2546 zextb %0 2547 xtrb3 %0,%1\;zextb %0" 2548 [(set_attr "type" "shift")]) 2549 2550 2551;; ------------------------------------------------------------------------ 2552;; Block move - adapted from m88k.md 2553;; ------------------------------------------------------------------------ 2554 2555(define_expand "cpymemsi" 2556 [(parallel [(set (mem:BLK (match_operand:BLK 0 "" "")) 2557 (mem:BLK (match_operand:BLK 1 "" ""))) 2558 (use (match_operand:SI 2 "general_operand" "")) 2559 (use (match_operand:SI 3 "immediate_operand" ""))])] 2560 "" 2561 " 2562{ 2563 if (mcore_expand_block_move (operands)) 2564 DONE; 2565 else 2566 FAIL; 2567}") 2568 2569;; ;;; ??? These patterns are meant to be generated from expand_block_move, 2570;; ;;; but they currently are not. 2571;; 2572;; (define_insn "" 2573;; [(set (match_operand:QI 0 "mcore_arith_reg_operand" "=r") 2574;; (match_operand:BLK 1 "mcore_general_movsrc_operand" "m"))] 2575;; "" 2576;; "ld.b %0,%1" 2577;; [(set_attr "type" "load")]) 2578;; 2579;; (define_insn "" 2580;; [(set (match_operand:HI 0 "mcore_arith_reg_operand" "=r") 2581;; (match_operand:BLK 1 "mcore_general_movsrc_operand" "m"))] 2582;; "" 2583;; "ld.h %0,%1" 2584;; [(set_attr "type" "load")]) 2585;; 2586;; (define_insn "" 2587;; [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") 2588;; (match_operand:BLK 1 "mcore_general_movsrc_operand" "m"))] 2589;; "" 2590;; "ld.w %0,%1" 2591;; [(set_attr "type" "load")]) 2592;; 2593;; (define_insn "" 2594;; [(set (match_operand:BLK 0 "mcore_general_movdst_operand" "=m") 2595;; (match_operand:QI 1 "mcore_arith_reg_operand" "r"))] 2596;; "" 2597;; "st.b %1,%0" 2598;; [(set_attr "type" "store")]) 2599;; 2600;; (define_insn "" 2601;; [(set (match_operand:BLK 0 "mcore_general_movdst_operand" "=m") 2602;; (match_operand:HI 1 "mcore_arith_reg_operand" "r"))] 2603;; "" 2604;; "st.h %1,%0" 2605;; [(set_attr "type" "store")]) 2606;; 2607;; (define_insn "" 2608;; [(set (match_operand:BLK 0 "mcore_general_movdst_operand" "=m") 2609;; (match_operand:SI 1 "mcore_arith_reg_operand" "r"))] 2610;; "" 2611;; "st.w %1,%0" 2612;; [(set_attr "type" "store")]) 2613 2614;; ------------------------------------------------------------------------ 2615;; Misc Optimizing quirks 2616;; ------------------------------------------------------------------------ 2617 2618;; pair to catch constructs like: (int *)((p+=4)-4) which happen 2619;; in stdarg/varargs traversal. This changes a 3 insn sequence to a 2 2620;; insn sequence. -- RBE 11/30/95 2621(define_insn "" 2622 [(parallel[ 2623 (set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") 2624 (match_operand:SI 1 "mcore_arith_reg_operand" "+r")) 2625 (set (match_dup 1) (plus:SI (match_dup 1) (match_operand 2 "mcore_arith_any_imm_operand" "")))])] 2626 "GET_CODE(operands[2]) == CONST_INT" 2627 "#" 2628 [(set_attr "length" "4")]) 2629 2630(define_split 2631 [(parallel[ 2632 (set (match_operand:SI 0 "mcore_arith_reg_operand" "") 2633 (match_operand:SI 1 "mcore_arith_reg_operand" "")) 2634 (set (match_dup 1) (plus:SI (match_dup 1) (match_operand 2 "mcore_arith_any_imm_operand" "")))])] 2635 "GET_CODE(operands[2]) == CONST_INT && 2636 operands[0] != operands[1]" 2637 [(set (match_dup 0) (match_dup 1)) 2638 (set (match_dup 1) (plus:SI (match_dup 1) (match_dup 2)))]) 2639 2640 2641;;; Peepholes 2642 2643; note: in the following patterns, use mcore_is_dead() to ensure that the 2644; reg we may be trashing really is dead. reload doesn't always mark 2645; deaths, so mcore_is_dead() (see mcore.c) scans forward to find its death. BRC 2646 2647;;; A peephole to convert the 3 instruction sequence generated by reload 2648;;; to load a FP-offset address into a 2 instruction sequence. 2649;;; ??? This probably never matches anymore. 2650(define_peephole 2651 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "r") 2652 (match_operand:SI 1 "const_int_operand" "J")) 2653 (set (match_dup 0) (neg:SI (match_dup 0))) 2654 (set (match_dup 0) 2655 (plus:SI (match_dup 0) 2656 (match_operand:SI 2 "mcore_arith_reg_operand" "r")))] 2657 "CONST_OK_FOR_J (INTVAL (operands[1]))" 2658 "error\;mov %0,%2\;subi %0,%1") 2659 2660;; Moves of inlinable constants are done late, so when a 'not' is generated 2661;; it is never combined with the following 'and' to generate an 'andn' b/c 2662;; the combiner never sees it. use a peephole to pick up this case (happens 2663;; mostly with bitfields) BRC 2664 2665(define_peephole 2666 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "r") 2667 (match_operand:SI 1 "const_int_operand" "i")) 2668 (set (match_operand:SI 2 "mcore_arith_reg_operand" "r") 2669 (and:SI (match_dup 2) (match_dup 0)))] 2670 "mcore_const_trick_uses_not (INTVAL (operands[1])) && 2671 operands[0] != operands[2] && 2672 mcore_is_dead (insn, operands[0])" 2673 "* return mcore_output_andn (insn, operands);") 2674 2675; when setting or clearing just two bits, it's cheapest to use two bseti's 2676; or bclri's. only happens when relaxing immediates. BRC 2677 2678(define_peephole 2679 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "") 2680 (match_operand:SI 1 "const_int_operand" "")) 2681 (set (match_operand:SI 2 "mcore_arith_reg_operand" "") 2682 (ior:SI (match_dup 2) (match_dup 0)))] 2683 "TARGET_HARDLIT 2684 && mcore_num_ones (INTVAL (operands[1])) == 2 2685 && mcore_is_dead (insn, operands[0])" 2686 "* return mcore_output_bseti (operands[2], INTVAL (operands[1]));") 2687 2688(define_peephole 2689 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "") 2690 (match_operand:SI 1 "const_int_operand" "")) 2691 (set (match_operand:SI 2 "mcore_arith_reg_operand" "") 2692 (and:SI (match_dup 2) (match_dup 0)))] 2693 "TARGET_HARDLIT && mcore_num_zeros (INTVAL (operands[1])) == 2 && 2694 mcore_is_dead (insn, operands[0])" 2695 "* return mcore_output_bclri (operands[2], INTVAL (operands[1]));") 2696 2697; change an and with a mask that has a single cleared bit into a bclri. this 2698; handles QI and HI mode values using the knowledge that the most significant 2699; bits don't matter. 2700 2701(define_peephole 2702 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "") 2703 (match_operand:SI 1 "const_int_operand" "")) 2704 (set (match_operand:SI 2 "mcore_arith_reg_operand" "") 2705 (and:SI (match_operand:SI 3 "mcore_arith_reg_operand" "") 2706 (match_dup 0)))] 2707 "GET_CODE (operands[3]) == SUBREG && 2708 GET_MODE (SUBREG_REG (operands[3])) == QImode && 2709 mcore_num_zeros (INTVAL (operands[1]) | 0xffffff00) == 1 && 2710 mcore_is_dead (insn, operands[0])" 2711"* 2712 if (! mcore_is_same_reg (operands[2], operands[3])) 2713 output_asm_insn (\"mov\\t%2,%3\", operands); 2714 return mcore_output_bclri (operands[2], INTVAL (operands[1]) | 0xffffff00);") 2715 2716/* Do not fold these together -- mode is lost at final output phase. */ 2717 2718(define_peephole 2719 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "") 2720 (match_operand:SI 1 "const_int_operand" "")) 2721 (set (match_operand:SI 2 "mcore_arith_reg_operand" "") 2722 (and:SI (match_operand:SI 3 "mcore_arith_reg_operand" "") 2723 (match_dup 0)))] 2724 "GET_CODE (operands[3]) == SUBREG && 2725 GET_MODE (SUBREG_REG (operands[3])) == HImode && 2726 mcore_num_zeros (INTVAL (operands[1]) | 0xffff0000) == 1 && 2727 operands[2] == operands[3] && 2728 mcore_is_dead (insn, operands[0])" 2729"* 2730 if (! mcore_is_same_reg (operands[2], operands[3])) 2731 output_asm_insn (\"mov\\t%2,%3\", operands); 2732 return mcore_output_bclri (operands[2], INTVAL (operands[1]) | 0xffff0000);") 2733 2734; This peephole helps when using -mwide-bitfields to widen fields so they 2735; collapse. This, however, has the effect that a narrower mode is not used 2736; when desirable. 2737; 2738; e.g., sequences like: 2739; 2740; ldw r8,(r6) 2741; movi r7,0x00ffffff 2742; and r8,r7 r7 dead 2743; stw r8,(r6) r8 dead 2744; 2745; get peepholed to become: 2746; 2747; movi r8,0 2748; stb r8,(r6) r8 dead 2749; 2750; Do only easy addresses that have no offset. This peephole is also applied 2751; to halfwords. We need to check that the load is non-volatile before we get 2752; rid of it. 2753 2754(define_peephole 2755 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "") 2756 (match_operand:SI 1 "memory_operand" "")) 2757 (set (match_operand:SI 2 "mcore_arith_reg_operand" "") 2758 (match_operand:SI 3 "const_int_operand" "")) 2759 (set (match_dup 0) (and:SI (match_dup 0) (match_dup 2))) 2760 (set (match_operand:SI 4 "memory_operand" "") (match_dup 0))] 2761 "mcore_is_dead (insn, operands[0]) && 2762 ! MEM_VOLATILE_P (operands[1]) && 2763 mcore_is_dead (insn, operands[2]) && 2764 (mcore_byte_offset (INTVAL (operands[3])) > -1 || 2765 mcore_halfword_offset (INTVAL (operands[3])) > -1) && 2766 ! MEM_VOLATILE_P (operands[4]) && 2767 GET_CODE (XEXP (operands[4], 0)) == REG" 2768"* 2769{ 2770 int ofs; 2771 machine_mode mode; 2772 rtx base_reg = XEXP (operands[4], 0); 2773 2774 if ((ofs = mcore_byte_offset (INTVAL (operands[3]))) > -1) 2775 mode = QImode; 2776 else if ((ofs = mcore_halfword_offset (INTVAL (operands[3]))) > -1) 2777 mode = HImode; 2778 else 2779 gcc_unreachable (); 2780 2781 if (ofs > 0) 2782 operands[4] = gen_rtx_MEM (mode, 2783 gen_rtx_PLUS (SImode, base_reg, GEN_INT(ofs))); 2784 else 2785 operands[4] = gen_rtx_MEM (mode, base_reg); 2786 2787 if (mode == QImode) 2788 return \"movi %0,0\\n\\tst.b %0,%4\"; 2789 2790 return \"movi %0,0\\n\\tst.h %0,%4\"; 2791}") 2792 2793; from sop11. get btsti's for (LT A 0) where A is a QI or HI value 2794 2795(define_peephole 2796 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "r") 2797 (sign_extend:SI (match_operand:QI 1 "mcore_arith_reg_operand" "0"))) 2798 (set (reg:CC 17) 2799 (lt:CC (match_dup 0) 2800 (const_int 0)))] 2801 "mcore_is_dead (insn, operands[0])" 2802 "btsti %0,7") 2803 2804(define_peephole 2805 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "r") 2806 (sign_extend:SI (match_operand:HI 1 "mcore_arith_reg_operand" "0"))) 2807 (set (reg:CC 17) 2808 (lt:CC (match_dup 0) 2809 (const_int 0)))] 2810 "mcore_is_dead (insn, operands[0])" 2811 "btsti %0,15") 2812 2813; Pick up a tst. This combination happens because the immediate is not 2814; allowed to fold into one of the operands of the tst. Does not happen 2815; when relaxing immediates. BRC 2816 2817(define_peephole 2818 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "") 2819 (match_operand:SI 1 "mcore_arith_reg_operand" "")) 2820 (set (match_dup 0) 2821 (and:SI (match_dup 0) 2822 (match_operand:SI 2 "mcore_literal_K_operand" ""))) 2823 (set (reg:CC 17) (ne:CC (match_dup 0) (const_int 0)))] 2824 "mcore_is_dead (insn, operands[0])" 2825 "movi %0,%2\;tst %1,%0") 2826 2827(define_peephole 2828 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "") 2829 (if_then_else:SI (ne (zero_extract:SI 2830 (match_operand:SI 1 "mcore_arith_reg_operand" "") 2831 (const_int 1) 2832 (match_operand:SI 2 "mcore_literal_K_operand" "")) 2833 (const_int 0)) 2834 (match_operand:SI 3 "mcore_arith_imm_operand" "") 2835 (match_operand:SI 4 "mcore_arith_imm_operand" ""))) 2836 (set (reg:CC 17) (ne:CC (match_dup 0) (const_int 0)))] 2837 "" 2838"* 2839{ 2840 unsigned int op0 = REGNO (operands[0]); 2841 2842 if (GET_CODE (operands[3]) == REG) 2843 { 2844 if (REGNO (operands[3]) == op0 && GET_CODE (operands[4]) == CONST_INT 2845 && INTVAL (operands[4]) == 0) 2846 return \"btsti %1,%2\\n\\tclrf %0\"; 2847 else if (GET_CODE (operands[4]) == REG) 2848 { 2849 if (REGNO (operands[4]) == op0) 2850 return \"btsti %1,%2\\n\\tmovf %0,%3\"; 2851 else if (REGNO (operands[3]) == op0) 2852 return \"btsti %1,%2\\n\\tmovt %0,%4\"; 2853 } 2854 2855 gcc_unreachable (); 2856 } 2857 else if (GET_CODE (operands[3]) == CONST_INT 2858 && INTVAL (operands[3]) == 0 2859 && GET_CODE (operands[4]) == REG) 2860 return \"btsti %1,%2\\n\\tclrt %0\"; 2861 2862 gcc_unreachable (); 2863}") 2864 2865; experimental - do the constant folding ourselves. note that this isn't 2866; re-applied like we'd really want. i.e., four ands collapse into two 2867; instead of one. this is because peepholes are applied as a sliding 2868; window. the peephole does not generate new rtl's, but instead slides 2869; across the rtl's generating machine instructions. it would be nice 2870; if the peephole optimizer is changed to re-apply patterns and to gen 2871; new rtl's. this is more flexible. the pattern below helps when we're 2872; not using relaxed immediates. BRC 2873 2874;(define_peephole 2875; [(set (match_operand:SI 0 "mcore_arith_reg_operand" "") 2876; (match_operand:SI 1 "const_int_operand" "")) 2877; (set (match_operand:SI 2 "mcore_arith_reg_operand" "") 2878; (and:SI (match_dup 2) (match_dup 0))) 2879; (set (match_dup 0) 2880; (match_operand:SI 3 "const_int_operand" "")) 2881; (set (match_dup 2) 2882; (and:SI (match_dup 2) (match_dup 0)))] 2883; "!TARGET_RELAX_IMM && mcore_is_dead (insn, operands[0]) && 2884; mcore_const_ok_for_inline (INTVAL (operands[1]) & INTVAL (operands[3]))" 2885; "* 2886;{ 2887; rtx out_operands[2]; 2888; out_operands[0] = operands[0]; 2889; out_operands[1] = GEN_INT (INTVAL (operands[1]) & INTVAL (operands[3])); 2890; 2891; output_inline_const (SImode, out_operands); 2892; 2893; output_asm_insn (\"and %2,%0\", operands); 2894; 2895; return \"\"; 2896;}") 2897 2898; BRC: for inlining get rid of extra test - experimental 2899;(define_peephole 2900; [(set (match_operand:SI 0 "mcore_arith_reg_operand" "r") 2901; (ne:SI (reg:CC 17) (const_int 0))) 2902; (set (reg:CC 17) (ne:CC (match_dup 0) (const_int 0))) 2903; (set (pc) 2904; (if_then_else (eq (reg:CC 17) (const_int 0)) 2905; (label_ref (match_operand 1 "" "")) 2906; (pc)))] 2907; "" 2908; "* 2909;{ 2910; if (get_attr_length (insn) == 10) 2911; { 2912; output_asm_insn (\"bt 2f\\n\\tjmpi [1f]\", operands); 2913; output_asm_insn (\".align 2\\n1:\", operands); 2914; output_asm_insn (\".long %1\\n2:\", operands); 2915; return \"\"; 2916; } 2917; return \"bf %l1\"; 2918;}") 2919 2920 2921;;; Special patterns for dealing with the constant pool. 2922 2923;;; 4 byte integer in line. 2924 2925(define_insn "consttable_4" 2926 [(unspec_volatile [(match_operand:SI 0 "general_operand" "=g")] 0)] 2927 "" 2928 "* 2929{ 2930 assemble_integer (operands[0], 4, BITS_PER_WORD, 1); 2931 return \"\"; 2932}" 2933 [(set_attr "length" "4")]) 2934 2935;;; align to a four byte boundary. 2936 2937(define_insn "align_4" 2938 [(unspec_volatile [(const_int 0)] 1)] 2939 "" 2940 ".align 2") 2941 2942;;; Handle extra constant pool entries created during final pass. 2943 2944(define_insn "consttable_end" 2945 [(unspec_volatile [(const_int 0)] 2)] 2946 "" 2947 "* return mcore_output_jump_label_table ();") 2948 2949;; 2950;; Stack allocation -- in particular, for alloca(). 2951;; this is *not* what we use for entry into functions. 2952;; 2953;; This is how we allocate stack space. If we are allocating a 2954;; constant amount of space and we know it is less than 4096 2955;; bytes, we need do nothing. 2956;; 2957;; If it is more than 4096 bytes, we need to probe the stack 2958;; periodically. 2959;; 2960;; operands[1], the distance is a POSITIVE number indicating that we 2961;; are allocating stack space 2962;; 2963(define_expand "allocate_stack" 2964 [(set (reg:SI 0) 2965 (plus:SI (reg:SI 0) 2966 (match_operand:SI 1 "general_operand" ""))) 2967 (set (match_operand:SI 0 "register_operand" "=r") 2968 (match_dup 2))] 2969 "" 2970 " 2971{ 2972 /* If he wants no probing, just do it for him. */ 2973 if (mcore_stack_increment == 0) 2974 { 2975 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,operands[1])); 2976;; emit_move_insn (operands[0], virtual_stack_dynamic_rtx); 2977 DONE; 2978 } 2979 2980 /* For small constant growth, we unroll the code. */ 2981 if (GET_CODE (operands[1]) == CONST_INT 2982 && INTVAL (operands[1]) < 8 * STACK_UNITS_MAXSTEP) 2983 { 2984 HOST_WIDE_INT left = INTVAL(operands[1]); 2985 2986 /* If it's a long way, get close enough for a last shot. */ 2987 if (left >= STACK_UNITS_MAXSTEP) 2988 { 2989 rtx tmp = gen_reg_rtx (Pmode); 2990 emit_insn (gen_movsi (tmp, GEN_INT (STACK_UNITS_MAXSTEP))); 2991 do 2992 { 2993 rtx memref = gen_rtx_MEM (SImode, stack_pointer_rtx); 2994 2995 MEM_VOLATILE_P (memref) = 1; 2996 emit_insn (gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, tmp)); 2997 emit_insn (gen_movsi (memref, stack_pointer_rtx)); 2998 left -= STACK_UNITS_MAXSTEP; 2999 } 3000 while (left > STACK_UNITS_MAXSTEP); 3001 } 3002 /* Perform the final adjustment. */ 3003 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, GEN_INT (-left))); 3004;; emit_move_insn (operands[0], virtual_stack_dynamic_rtx); 3005 DONE; 3006 } 3007 else 3008 { 3009 rtx_code_label *out_label = 0; 3010 rtx_code_label *loop_label = gen_label_rtx (); 3011 rtx step = gen_reg_rtx (Pmode); 3012 rtx tmp = gen_reg_rtx (Pmode); 3013 rtx test, memref; 3014 3015#if 1 3016 emit_insn (gen_movsi (tmp, operands[1])); 3017 emit_insn (gen_movsi (step, GEN_INT (STACK_UNITS_MAXSTEP))); 3018 3019 if (GET_CODE (operands[1]) != CONST_INT) 3020 { 3021 out_label = gen_label_rtx (); 3022 test = gen_rtx_GEU (VOIDmode, step, tmp); /* quick out */ 3023 emit_jump_insn (gen_cbranchsi4 (test, step, tmp, out_label)); 3024 } 3025 3026 /* Run a loop that steps it incrementally. */ 3027 emit_label (loop_label); 3028 3029 /* Extend a step, probe, and adjust remaining count. */ 3030 emit_insn(gen_subsi3(stack_pointer_rtx, stack_pointer_rtx, step)); 3031 memref = gen_rtx_MEM (SImode, stack_pointer_rtx); 3032 MEM_VOLATILE_P (memref) = 1; 3033 emit_insn(gen_movsi(memref, stack_pointer_rtx)); 3034 emit_insn(gen_subsi3(tmp, tmp, step)); 3035 3036 /* Loop condition -- going back up. */ 3037 test = gen_rtx_LTU (VOIDmode, step, tmp); 3038 emit_jump_insn (gen_cbranchsi4 (test, step, tmp, loop_label)); 3039 3040 if (out_label) 3041 emit_label (out_label); 3042 3043 /* Bump the residual. */ 3044 emit_insn (gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, tmp)); 3045;; emit_move_insn (operands[0], virtual_stack_dynamic_rtx); 3046 DONE; 3047#else 3048 /* simple one-shot -- ensure register and do a subtract. 3049 * This does NOT comply with the ABI. */ 3050 emit_insn (gen_movsi (tmp, operands[1])); 3051 emit_insn (gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, tmp)); 3052;; emit_move_insn (operands[0], virtual_stack_dynamic_rtx); 3053 DONE; 3054#endif 3055 } 3056}") 3057