138fd1498Szrj/* Match-and-simplify patterns for shared GENERIC and GIMPLE folding. 238fd1498Szrj This file is consumed by genmatch which produces gimple-match.c 338fd1498Szrj and generic-match.c from it. 438fd1498Szrj 538fd1498Szrj Copyright (C) 2014-2018 Free Software Foundation, Inc. 638fd1498Szrj Contributed by Richard Biener <rguenther@suse.de> 738fd1498Szrj and Prathamesh Kulkarni <bilbotheelffriend@gmail.com> 838fd1498Szrj 938fd1498SzrjThis file is part of GCC. 1038fd1498Szrj 1138fd1498SzrjGCC is free software; you can redistribute it and/or modify it under 1238fd1498Szrjthe terms of the GNU General Public License as published by the Free 1338fd1498SzrjSoftware Foundation; either version 3, or (at your option) any later 1438fd1498Szrjversion. 1538fd1498Szrj 1638fd1498SzrjGCC is distributed in the hope that it will be useful, but WITHOUT ANY 1738fd1498SzrjWARRANTY; without even the implied warranty of MERCHANTABILITY or 1838fd1498SzrjFITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 1938fd1498Szrjfor more details. 2038fd1498Szrj 2138fd1498SzrjYou should have received a copy of the GNU General Public License 2238fd1498Szrjalong with GCC; see the file COPYING3. If not see 2338fd1498Szrj<http://www.gnu.org/licenses/>. */ 2438fd1498Szrj 2538fd1498Szrj 2638fd1498Szrj/* Generic tree predicates we inherit. */ 2738fd1498Szrj(define_predicates 2838fd1498Szrj integer_onep integer_zerop integer_all_onesp integer_minus_onep 2938fd1498Szrj integer_each_onep integer_truep integer_nonzerop 3038fd1498Szrj real_zerop real_onep real_minus_onep 3138fd1498Szrj zerop 3238fd1498Szrj CONSTANT_CLASS_P 3338fd1498Szrj tree_expr_nonnegative_p 3438fd1498Szrj tree_expr_nonzero_p 3538fd1498Szrj integer_valued_real_p 3638fd1498Szrj integer_pow2p 3738fd1498Szrj HONOR_NANS) 3838fd1498Szrj 3938fd1498Szrj/* Operator lists. */ 4038fd1498Szrj(define_operator_list tcc_comparison 4138fd1498Szrj lt le eq ne ge gt unordered ordered unlt unle ungt unge uneq ltgt) 4238fd1498Szrj(define_operator_list inverted_tcc_comparison 4338fd1498Szrj ge gt ne eq lt le ordered unordered ge gt le lt ltgt uneq) 4438fd1498Szrj(define_operator_list inverted_tcc_comparison_with_nans 4538fd1498Szrj unge ungt ne eq unlt unle ordered unordered ge gt le lt ltgt uneq) 4638fd1498Szrj(define_operator_list swapped_tcc_comparison 4738fd1498Szrj gt ge eq ne le lt unordered ordered ungt unge unlt unle uneq ltgt) 4838fd1498Szrj(define_operator_list simple_comparison lt le eq ne ge gt) 4938fd1498Szrj(define_operator_list swapped_simple_comparison gt ge eq ne le lt) 5038fd1498Szrj 5138fd1498Szrj#include "cfn-operators.pd" 5238fd1498Szrj 5338fd1498Szrj/* Define operand lists for math rounding functions {,i,l,ll}FN, 5438fd1498Szrj where the versions prefixed with "i" return an int, those prefixed with 5538fd1498Szrj "l" return a long and those prefixed with "ll" return a long long. 5638fd1498Szrj 5738fd1498Szrj Also define operand lists: 5838fd1498Szrj 5938fd1498Szrj X<FN>F for all float functions, in the order i, l, ll 6038fd1498Szrj X<FN> for all double functions, in the same order 6138fd1498Szrj X<FN>L for all long double functions, in the same order. */ 6238fd1498Szrj#define DEFINE_INT_AND_FLOAT_ROUND_FN(FN) \ 6338fd1498Szrj (define_operator_list X##FN##F BUILT_IN_I##FN##F \ 6438fd1498Szrj BUILT_IN_L##FN##F \ 6538fd1498Szrj BUILT_IN_LL##FN##F) \ 6638fd1498Szrj (define_operator_list X##FN BUILT_IN_I##FN \ 6738fd1498Szrj BUILT_IN_L##FN \ 6838fd1498Szrj BUILT_IN_LL##FN) \ 6938fd1498Szrj (define_operator_list X##FN##L BUILT_IN_I##FN##L \ 7038fd1498Szrj BUILT_IN_L##FN##L \ 7138fd1498Szrj BUILT_IN_LL##FN##L) 7238fd1498Szrj 7338fd1498SzrjDEFINE_INT_AND_FLOAT_ROUND_FN (FLOOR) 7438fd1498SzrjDEFINE_INT_AND_FLOAT_ROUND_FN (CEIL) 7538fd1498SzrjDEFINE_INT_AND_FLOAT_ROUND_FN (ROUND) 7638fd1498SzrjDEFINE_INT_AND_FLOAT_ROUND_FN (RINT) 7738fd1498Szrj 7838fd1498Szrj/* As opposed to convert?, this still creates a single pattern, so 7938fd1498Szrj it is not a suitable replacement for convert? in all cases. */ 8038fd1498Szrj(match (nop_convert @0) 8138fd1498Szrj (convert @0) 8238fd1498Szrj (if (tree_nop_conversion_p (type, TREE_TYPE (@0))))) 8338fd1498Szrj(match (nop_convert @0) 8438fd1498Szrj (view_convert @0) 8538fd1498Szrj (if (VECTOR_TYPE_P (type) && VECTOR_TYPE_P (TREE_TYPE (@0)) 8638fd1498Szrj && known_eq (TYPE_VECTOR_SUBPARTS (type), 8738fd1498Szrj TYPE_VECTOR_SUBPARTS (TREE_TYPE (@0))) 8838fd1498Szrj && tree_nop_conversion_p (TREE_TYPE (type), TREE_TYPE (TREE_TYPE (@0)))))) 8938fd1498Szrj/* This one has to be last, or it shadows the others. */ 9038fd1498Szrj(match (nop_convert @0) 9138fd1498Szrj @0) 9238fd1498Szrj 9338fd1498Szrj/* Simplifications of operations with one constant operand and 9438fd1498Szrj simplifications to constants or single values. */ 9538fd1498Szrj 9638fd1498Szrj(for op (plus pointer_plus minus bit_ior bit_xor) 9738fd1498Szrj (simplify 9838fd1498Szrj (op @0 integer_zerop) 9938fd1498Szrj (non_lvalue @0))) 10038fd1498Szrj 10138fd1498Szrj/* 0 +p index -> (type)index */ 10238fd1498Szrj(simplify 10338fd1498Szrj (pointer_plus integer_zerop @1) 10438fd1498Szrj (non_lvalue (convert @1))) 10538fd1498Szrj 10638fd1498Szrj/* ptr - 0 -> (type)ptr */ 10738fd1498Szrj(simplify 10838fd1498Szrj (pointer_diff @0 integer_zerop) 10938fd1498Szrj (convert @0)) 11038fd1498Szrj 11138fd1498Szrj/* See if ARG1 is zero and X + ARG1 reduces to X. 11238fd1498Szrj Likewise if the operands are reversed. */ 11338fd1498Szrj(simplify 11438fd1498Szrj (plus:c @0 real_zerop@1) 11538fd1498Szrj (if (fold_real_zero_addition_p (type, @1, 0)) 11638fd1498Szrj (non_lvalue @0))) 11738fd1498Szrj 11838fd1498Szrj/* See if ARG1 is zero and X - ARG1 reduces to X. */ 11938fd1498Szrj(simplify 12038fd1498Szrj (minus @0 real_zerop@1) 12138fd1498Szrj (if (fold_real_zero_addition_p (type, @1, 1)) 12238fd1498Szrj (non_lvalue @0))) 12338fd1498Szrj 12438fd1498Szrj/* Simplify x - x. 12538fd1498Szrj This is unsafe for certain floats even in non-IEEE formats. 12638fd1498Szrj In IEEE, it is unsafe because it does wrong for NaNs. 12738fd1498Szrj Also note that operand_equal_p is always false if an operand 12838fd1498Szrj is volatile. */ 12938fd1498Szrj(simplify 13038fd1498Szrj (minus @0 @0) 13138fd1498Szrj (if (!FLOAT_TYPE_P (type) || !HONOR_NANS (type)) 13238fd1498Szrj { build_zero_cst (type); })) 13338fd1498Szrj(simplify 13438fd1498Szrj (pointer_diff @@0 @0) 13538fd1498Szrj { build_zero_cst (type); }) 13638fd1498Szrj 13738fd1498Szrj(simplify 13838fd1498Szrj (mult @0 integer_zerop@1) 13938fd1498Szrj @1) 14038fd1498Szrj 14138fd1498Szrj/* Maybe fold x * 0 to 0. The expressions aren't the same 14238fd1498Szrj when x is NaN, since x * 0 is also NaN. Nor are they the 14338fd1498Szrj same in modes with signed zeros, since multiplying a 14438fd1498Szrj negative value by 0 gives -0, not +0. */ 14538fd1498Szrj(simplify 14638fd1498Szrj (mult @0 real_zerop@1) 14738fd1498Szrj (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type)) 14838fd1498Szrj @1)) 14938fd1498Szrj 15038fd1498Szrj/* In IEEE floating point, x*1 is not equivalent to x for snans. 15138fd1498Szrj Likewise for complex arithmetic with signed zeros. */ 15238fd1498Szrj(simplify 15338fd1498Szrj (mult @0 real_onep) 15438fd1498Szrj (if (!HONOR_SNANS (type) 15538fd1498Szrj && (!HONOR_SIGNED_ZEROS (type) 15638fd1498Szrj || !COMPLEX_FLOAT_TYPE_P (type))) 15738fd1498Szrj (non_lvalue @0))) 15838fd1498Szrj 15938fd1498Szrj/* Transform x * -1.0 into -x. */ 16038fd1498Szrj(simplify 16138fd1498Szrj (mult @0 real_minus_onep) 16238fd1498Szrj (if (!HONOR_SNANS (type) 16338fd1498Szrj && (!HONOR_SIGNED_ZEROS (type) 16438fd1498Szrj || !COMPLEX_FLOAT_TYPE_P (type))) 16538fd1498Szrj (negate @0))) 16638fd1498Szrj 16738fd1498Szrj(for cmp (gt ge lt le) 16838fd1498Szrj outp (convert convert negate negate) 16938fd1498Szrj outn (negate negate convert convert) 17038fd1498Szrj /* Transform (X > 0.0 ? 1.0 : -1.0) into copysign(1, X). */ 17138fd1498Szrj /* Transform (X >= 0.0 ? 1.0 : -1.0) into copysign(1, X). */ 17238fd1498Szrj /* Transform (X < 0.0 ? 1.0 : -1.0) into copysign(1,-X). */ 17338fd1498Szrj /* Transform (X <= 0.0 ? 1.0 : -1.0) into copysign(1,-X). */ 17438fd1498Szrj (simplify 17538fd1498Szrj (cond (cmp @0 real_zerop) real_onep@1 real_minus_onep) 17638fd1498Szrj (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type) 17738fd1498Szrj && types_match (type, TREE_TYPE (@0))) 17838fd1498Szrj (switch 17938fd1498Szrj (if (types_match (type, float_type_node)) 18038fd1498Szrj (BUILT_IN_COPYSIGNF @1 (outp @0))) 18138fd1498Szrj (if (types_match (type, double_type_node)) 18238fd1498Szrj (BUILT_IN_COPYSIGN @1 (outp @0))) 18338fd1498Szrj (if (types_match (type, long_double_type_node)) 18438fd1498Szrj (BUILT_IN_COPYSIGNL @1 (outp @0)))))) 18538fd1498Szrj /* Transform (X > 0.0 ? -1.0 : 1.0) into copysign(1,-X). */ 18638fd1498Szrj /* Transform (X >= 0.0 ? -1.0 : 1.0) into copysign(1,-X). */ 18738fd1498Szrj /* Transform (X < 0.0 ? -1.0 : 1.0) into copysign(1,X). */ 18838fd1498Szrj /* Transform (X <= 0.0 ? -1.0 : 1.0) into copysign(1,X). */ 18938fd1498Szrj (simplify 19038fd1498Szrj (cond (cmp @0 real_zerop) real_minus_onep real_onep@1) 19138fd1498Szrj (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type) 19238fd1498Szrj && types_match (type, TREE_TYPE (@0))) 19338fd1498Szrj (switch 19438fd1498Szrj (if (types_match (type, float_type_node)) 19538fd1498Szrj (BUILT_IN_COPYSIGNF @1 (outn @0))) 19638fd1498Szrj (if (types_match (type, double_type_node)) 19738fd1498Szrj (BUILT_IN_COPYSIGN @1 (outn @0))) 19838fd1498Szrj (if (types_match (type, long_double_type_node)) 19938fd1498Szrj (BUILT_IN_COPYSIGNL @1 (outn @0))))))) 20038fd1498Szrj 20138fd1498Szrj/* Transform X * copysign (1.0, X) into abs(X). */ 20238fd1498Szrj(simplify 20338fd1498Szrj (mult:c @0 (COPYSIGN_ALL real_onep @0)) 20438fd1498Szrj (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type)) 20538fd1498Szrj (abs @0))) 20638fd1498Szrj 20738fd1498Szrj/* Transform X * copysign (1.0, -X) into -abs(X). */ 20838fd1498Szrj(simplify 20938fd1498Szrj (mult:c @0 (COPYSIGN_ALL real_onep (negate @0))) 21038fd1498Szrj (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type)) 21138fd1498Szrj (negate (abs @0)))) 21238fd1498Szrj 21338fd1498Szrj/* Transform copysign (CST, X) into copysign (ABS(CST), X). */ 21438fd1498Szrj(simplify 21538fd1498Szrj (COPYSIGN_ALL REAL_CST@0 @1) 21638fd1498Szrj (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@0))) 21738fd1498Szrj (COPYSIGN_ALL (negate @0) @1))) 21838fd1498Szrj 21938fd1498Szrj/* X * 1, X / 1 -> X. */ 22038fd1498Szrj(for op (mult trunc_div ceil_div floor_div round_div exact_div) 22138fd1498Szrj (simplify 22238fd1498Szrj (op @0 integer_onep) 22338fd1498Szrj (non_lvalue @0))) 22438fd1498Szrj 22538fd1498Szrj/* (A / (1 << B)) -> (A >> B). 22638fd1498Szrj Only for unsigned A. For signed A, this would not preserve rounding 22738fd1498Szrj toward zero. 22838fd1498Szrj For example: (-1 / ( 1 << B)) != -1 >> B. */ 22938fd1498Szrj(simplify 23038fd1498Szrj (trunc_div @0 (lshift integer_onep@1 @2)) 23138fd1498Szrj (if ((TYPE_UNSIGNED (type) || tree_expr_nonnegative_p (@0)) 23238fd1498Szrj && (!VECTOR_TYPE_P (type) 23338fd1498Szrj || target_supports_op_p (type, RSHIFT_EXPR, optab_vector) 23438fd1498Szrj || target_supports_op_p (type, RSHIFT_EXPR, optab_scalar))) 23538fd1498Szrj (rshift @0 @2))) 23638fd1498Szrj 23738fd1498Szrj/* Preserve explicit divisions by 0: the C++ front-end wants to detect 23838fd1498Szrj undefined behavior in constexpr evaluation, and assuming that the division 23938fd1498Szrj traps enables better optimizations than these anyway. */ 24038fd1498Szrj(for div (trunc_div ceil_div floor_div round_div exact_div) 24138fd1498Szrj /* 0 / X is always zero. */ 24238fd1498Szrj (simplify 24338fd1498Szrj (div integer_zerop@0 @1) 24438fd1498Szrj /* But not for 0 / 0 so that we can get the proper warnings and errors. */ 24538fd1498Szrj (if (!integer_zerop (@1)) 24638fd1498Szrj @0)) 24738fd1498Szrj /* X / -1 is -X. */ 24838fd1498Szrj (simplify 24938fd1498Szrj (div @0 integer_minus_onep@1) 25038fd1498Szrj (if (!TYPE_UNSIGNED (type)) 25138fd1498Szrj (negate @0))) 25238fd1498Szrj /* X / X is one. */ 25338fd1498Szrj (simplify 25438fd1498Szrj (div @0 @0) 25538fd1498Szrj /* But not for 0 / 0 so that we can get the proper warnings and errors. 25638fd1498Szrj And not for _Fract types where we can't build 1. */ 25738fd1498Szrj (if (!integer_zerop (@0) && !ALL_FRACT_MODE_P (TYPE_MODE (type))) 25838fd1498Szrj { build_one_cst (type); })) 25938fd1498Szrj /* X / abs (X) is X < 0 ? -1 : 1. */ 26038fd1498Szrj (simplify 26138fd1498Szrj (div:C @0 (abs @0)) 26238fd1498Szrj (if (INTEGRAL_TYPE_P (type) 26338fd1498Szrj && TYPE_OVERFLOW_UNDEFINED (type)) 26438fd1498Szrj (cond (lt @0 { build_zero_cst (type); }) 26538fd1498Szrj { build_minus_one_cst (type); } { build_one_cst (type); }))) 26638fd1498Szrj /* X / -X is -1. */ 26738fd1498Szrj (simplify 26838fd1498Szrj (div:C @0 (negate @0)) 26938fd1498Szrj (if ((INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type)) 27038fd1498Szrj && TYPE_OVERFLOW_UNDEFINED (type)) 27138fd1498Szrj { build_minus_one_cst (type); }))) 27238fd1498Szrj 27338fd1498Szrj/* For unsigned integral types, FLOOR_DIV_EXPR is the same as 27438fd1498Szrj TRUNC_DIV_EXPR. Rewrite into the latter in this case. */ 27538fd1498Szrj(simplify 27638fd1498Szrj (floor_div @0 @1) 27738fd1498Szrj (if ((INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type)) 27838fd1498Szrj && TYPE_UNSIGNED (type)) 27938fd1498Szrj (trunc_div @0 @1))) 28038fd1498Szrj 28138fd1498Szrj/* Combine two successive divisions. Note that combining ceil_div 28238fd1498Szrj and floor_div is trickier and combining round_div even more so. */ 28338fd1498Szrj(for div (trunc_div exact_div) 28438fd1498Szrj (simplify 28538fd1498Szrj (div (div @0 INTEGER_CST@1) INTEGER_CST@2) 28638fd1498Szrj (with { 28738fd1498Szrj bool overflow_p; 28838fd1498Szrj wide_int mul = wi::mul (wi::to_wide (@1), wi::to_wide (@2), 28938fd1498Szrj TYPE_SIGN (type), &overflow_p); 29038fd1498Szrj } 29138fd1498Szrj (if (!overflow_p) 29238fd1498Szrj (div @0 { wide_int_to_tree (type, mul); }) 29338fd1498Szrj (if (TYPE_UNSIGNED (type) 29438fd1498Szrj || mul != wi::min_value (TYPE_PRECISION (type), SIGNED)) 29538fd1498Szrj { build_zero_cst (type); }))))) 29638fd1498Szrj 29738fd1498Szrj/* Combine successive multiplications. Similar to above, but handling 29838fd1498Szrj overflow is different. */ 29938fd1498Szrj(simplify 30038fd1498Szrj (mult (mult @0 INTEGER_CST@1) INTEGER_CST@2) 30138fd1498Szrj (with { 30238fd1498Szrj bool overflow_p; 30338fd1498Szrj wide_int mul = wi::mul (wi::to_wide (@1), wi::to_wide (@2), 30438fd1498Szrj TYPE_SIGN (type), &overflow_p); 30538fd1498Szrj } 30638fd1498Szrj /* Skip folding on overflow: the only special case is @1 * @2 == -INT_MIN, 30738fd1498Szrj otherwise undefined overflow implies that @0 must be zero. */ 30838fd1498Szrj (if (!overflow_p || TYPE_OVERFLOW_WRAPS (type)) 30938fd1498Szrj (mult @0 { wide_int_to_tree (type, mul); })))) 31038fd1498Szrj 31138fd1498Szrj/* Optimize A / A to 1.0 if we don't care about 31238fd1498Szrj NaNs or Infinities. */ 31338fd1498Szrj(simplify 31438fd1498Szrj (rdiv @0 @0) 31538fd1498Szrj (if (FLOAT_TYPE_P (type) 31638fd1498Szrj && ! HONOR_NANS (type) 31738fd1498Szrj && ! HONOR_INFINITIES (type)) 31838fd1498Szrj { build_one_cst (type); })) 31938fd1498Szrj 32038fd1498Szrj/* Optimize -A / A to -1.0 if we don't care about 32138fd1498Szrj NaNs or Infinities. */ 32238fd1498Szrj(simplify 32338fd1498Szrj (rdiv:C @0 (negate @0)) 32438fd1498Szrj (if (FLOAT_TYPE_P (type) 32538fd1498Szrj && ! HONOR_NANS (type) 32638fd1498Szrj && ! HONOR_INFINITIES (type)) 32738fd1498Szrj { build_minus_one_cst (type); })) 32838fd1498Szrj 32938fd1498Szrj/* PR71078: x / abs(x) -> copysign (1.0, x) */ 33038fd1498Szrj(simplify 33138fd1498Szrj (rdiv:C (convert? @0) (convert? (abs @0))) 33238fd1498Szrj (if (SCALAR_FLOAT_TYPE_P (type) 33338fd1498Szrj && ! HONOR_NANS (type) 33438fd1498Szrj && ! HONOR_INFINITIES (type)) 33538fd1498Szrj (switch 33638fd1498Szrj (if (types_match (type, float_type_node)) 33738fd1498Szrj (BUILT_IN_COPYSIGNF { build_one_cst (type); } (convert @0))) 33838fd1498Szrj (if (types_match (type, double_type_node)) 33938fd1498Szrj (BUILT_IN_COPYSIGN { build_one_cst (type); } (convert @0))) 34038fd1498Szrj (if (types_match (type, long_double_type_node)) 34138fd1498Szrj (BUILT_IN_COPYSIGNL { build_one_cst (type); } (convert @0)))))) 34238fd1498Szrj 34338fd1498Szrj/* In IEEE floating point, x/1 is not equivalent to x for snans. */ 34438fd1498Szrj(simplify 34538fd1498Szrj (rdiv @0 real_onep) 34638fd1498Szrj (if (!HONOR_SNANS (type)) 34738fd1498Szrj (non_lvalue @0))) 34838fd1498Szrj 34938fd1498Szrj/* In IEEE floating point, x/-1 is not equivalent to -x for snans. */ 35038fd1498Szrj(simplify 35138fd1498Szrj (rdiv @0 real_minus_onep) 35238fd1498Szrj (if (!HONOR_SNANS (type)) 35338fd1498Szrj (negate @0))) 35438fd1498Szrj 35538fd1498Szrj(if (flag_reciprocal_math) 35638fd1498Szrj /* Convert (A/B)/C to A/(B*C). */ 35738fd1498Szrj (simplify 35838fd1498Szrj (rdiv (rdiv:s @0 @1) @2) 35938fd1498Szrj (rdiv @0 (mult @1 @2))) 36038fd1498Szrj 36138fd1498Szrj /* Canonicalize x / (C1 * y) to (x * C2) / y. */ 36238fd1498Szrj (simplify 36338fd1498Szrj (rdiv @0 (mult:s @1 REAL_CST@2)) 36438fd1498Szrj (with 36538fd1498Szrj { tree tem = const_binop (RDIV_EXPR, type, build_one_cst (type), @2); } 36638fd1498Szrj (if (tem) 36738fd1498Szrj (rdiv (mult @0 { tem; } ) @1)))) 36838fd1498Szrj 36938fd1498Szrj /* Convert A/(B/C) to (A/B)*C */ 37038fd1498Szrj (simplify 37138fd1498Szrj (rdiv @0 (rdiv:s @1 @2)) 37238fd1498Szrj (mult (rdiv @0 @1) @2))) 37338fd1498Szrj 37438fd1498Szrj/* Simplify x / (- y) to -x / y. */ 37538fd1498Szrj(simplify 37638fd1498Szrj (rdiv @0 (negate @1)) 37738fd1498Szrj (rdiv (negate @0) @1)) 37838fd1498Szrj 37938fd1498Szrj/* Optimize (X & (-A)) / A where A is a power of 2, to X >> log2(A) */ 38038fd1498Szrj(for div (trunc_div ceil_div floor_div round_div exact_div) 38138fd1498Szrj (simplify 38238fd1498Szrj (div (convert? (bit_and @0 INTEGER_CST@1)) INTEGER_CST@2) 38338fd1498Szrj (if (integer_pow2p (@2) 38438fd1498Szrj && tree_int_cst_sgn (@2) > 0 38538fd1498Szrj && tree_nop_conversion_p (type, TREE_TYPE (@0)) 38638fd1498Szrj && wi::to_wide (@2) + wi::to_wide (@1) == 0) 38738fd1498Szrj (rshift (convert @0) 38838fd1498Szrj { build_int_cst (integer_type_node, 38938fd1498Szrj wi::exact_log2 (wi::to_wide (@2))); })))) 39038fd1498Szrj 39138fd1498Szrj/* If ARG1 is a constant, we can convert this to a multiply by the 39238fd1498Szrj reciprocal. This does not have the same rounding properties, 39338fd1498Szrj so only do this if -freciprocal-math. We can actually 39438fd1498Szrj always safely do it if ARG1 is a power of two, but it's hard to 39538fd1498Szrj tell if it is or not in a portable manner. */ 39638fd1498Szrj(for cst (REAL_CST COMPLEX_CST VECTOR_CST) 39738fd1498Szrj (simplify 39838fd1498Szrj (rdiv @0 cst@1) 39938fd1498Szrj (if (optimize) 40038fd1498Szrj (if (flag_reciprocal_math 40138fd1498Szrj && !real_zerop (@1)) 40238fd1498Szrj (with 40338fd1498Szrj { tree tem = const_binop (RDIV_EXPR, type, build_one_cst (type), @1); } 40438fd1498Szrj (if (tem) 40538fd1498Szrj (mult @0 { tem; } ))) 40638fd1498Szrj (if (cst != COMPLEX_CST) 40738fd1498Szrj (with { tree inverse = exact_inverse (type, @1); } 40838fd1498Szrj (if (inverse) 40938fd1498Szrj (mult @0 { inverse; } )))))))) 41038fd1498Szrj 41138fd1498Szrj(for mod (ceil_mod floor_mod round_mod trunc_mod) 41238fd1498Szrj /* 0 % X is always zero. */ 41338fd1498Szrj (simplify 41438fd1498Szrj (mod integer_zerop@0 @1) 41538fd1498Szrj /* But not for 0 % 0 so that we can get the proper warnings and errors. */ 41638fd1498Szrj (if (!integer_zerop (@1)) 41738fd1498Szrj @0)) 41838fd1498Szrj /* X % 1 is always zero. */ 41938fd1498Szrj (simplify 42038fd1498Szrj (mod @0 integer_onep) 42138fd1498Szrj { build_zero_cst (type); }) 42238fd1498Szrj /* X % -1 is zero. */ 42338fd1498Szrj (simplify 42438fd1498Szrj (mod @0 integer_minus_onep@1) 42538fd1498Szrj (if (!TYPE_UNSIGNED (type)) 42638fd1498Szrj { build_zero_cst (type); })) 42738fd1498Szrj /* X % X is zero. */ 42838fd1498Szrj (simplify 42938fd1498Szrj (mod @0 @0) 43038fd1498Szrj /* But not for 0 % 0 so that we can get the proper warnings and errors. */ 43138fd1498Szrj (if (!integer_zerop (@0)) 43238fd1498Szrj { build_zero_cst (type); })) 43338fd1498Szrj /* (X % Y) % Y is just X % Y. */ 43438fd1498Szrj (simplify 43538fd1498Szrj (mod (mod@2 @0 @1) @1) 43638fd1498Szrj @2) 43738fd1498Szrj /* From extract_muldiv_1: (X * C1) % C2 is zero if C1 is a multiple of C2. */ 43838fd1498Szrj (simplify 43938fd1498Szrj (mod (mult @0 INTEGER_CST@1) INTEGER_CST@2) 44038fd1498Szrj (if (ANY_INTEGRAL_TYPE_P (type) 44138fd1498Szrj && TYPE_OVERFLOW_UNDEFINED (type) 44238fd1498Szrj && wi::multiple_of_p (wi::to_wide (@1), wi::to_wide (@2), 44338fd1498Szrj TYPE_SIGN (type))) 44438fd1498Szrj { build_zero_cst (type); }))) 44538fd1498Szrj 44638fd1498Szrj/* X % -C is the same as X % C. */ 44738fd1498Szrj(simplify 44838fd1498Szrj (trunc_mod @0 INTEGER_CST@1) 44938fd1498Szrj (if (TYPE_SIGN (type) == SIGNED 45038fd1498Szrj && !TREE_OVERFLOW (@1) 45138fd1498Szrj && wi::neg_p (wi::to_wide (@1)) 45238fd1498Szrj && !TYPE_OVERFLOW_TRAPS (type) 45338fd1498Szrj /* Avoid this transformation if C is INT_MIN, i.e. C == -C. */ 45438fd1498Szrj && !sign_bit_p (@1, @1)) 45538fd1498Szrj (trunc_mod @0 (negate @1)))) 45638fd1498Szrj 45738fd1498Szrj/* X % -Y is the same as X % Y. */ 45838fd1498Szrj(simplify 45938fd1498Szrj (trunc_mod @0 (convert? (negate @1))) 46038fd1498Szrj (if (INTEGRAL_TYPE_P (type) 46138fd1498Szrj && !TYPE_UNSIGNED (type) 46238fd1498Szrj && !TYPE_OVERFLOW_TRAPS (type) 46338fd1498Szrj && tree_nop_conversion_p (type, TREE_TYPE (@1)) 46438fd1498Szrj /* Avoid this transformation if X might be INT_MIN or 46538fd1498Szrj Y might be -1, because we would then change valid 46638fd1498Szrj INT_MIN % -(-1) into invalid INT_MIN % -1. */ 46738fd1498Szrj && (expr_not_equal_to (@0, wi::to_wide (TYPE_MIN_VALUE (type))) 46838fd1498Szrj || expr_not_equal_to (@1, wi::minus_one (TYPE_PRECISION 46938fd1498Szrj (TREE_TYPE (@1)))))) 47038fd1498Szrj (trunc_mod @0 (convert @1)))) 47138fd1498Szrj 47238fd1498Szrj/* X - (X / Y) * Y is the same as X % Y. */ 47338fd1498Szrj(simplify 47438fd1498Szrj (minus (convert1? @0) (convert2? (mult:c (trunc_div @@0 @@1) @1))) 47538fd1498Szrj (if (INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type)) 47638fd1498Szrj (convert (trunc_mod @0 @1)))) 47738fd1498Szrj 47838fd1498Szrj/* Optimize TRUNC_MOD_EXPR by a power of two into a BIT_AND_EXPR, 47938fd1498Szrj i.e. "X % C" into "X & (C - 1)", if X and C are positive. 48038fd1498Szrj Also optimize A % (C << N) where C is a power of 2, 48138fd1498Szrj to A & ((C << N) - 1). */ 48238fd1498Szrj(match (power_of_two_cand @1) 48338fd1498Szrj INTEGER_CST@1) 48438fd1498Szrj(match (power_of_two_cand @1) 48538fd1498Szrj (lshift INTEGER_CST@1 @2)) 48638fd1498Szrj(for mod (trunc_mod floor_mod) 48738fd1498Szrj (simplify 48838fd1498Szrj (mod @0 (convert?@3 (power_of_two_cand@1 @2))) 48938fd1498Szrj (if ((TYPE_UNSIGNED (type) 49038fd1498Szrj || tree_expr_nonnegative_p (@0)) 49138fd1498Szrj && tree_nop_conversion_p (type, TREE_TYPE (@3)) 49238fd1498Szrj && integer_pow2p (@2) && tree_int_cst_sgn (@2) > 0) 49338fd1498Szrj (bit_and @0 (convert (minus @1 { build_int_cst (TREE_TYPE (@1), 1); })))))) 49438fd1498Szrj 49538fd1498Szrj/* Simplify (unsigned t * 2)/2 -> unsigned t & 0x7FFFFFFF. */ 49638fd1498Szrj(simplify 49738fd1498Szrj (trunc_div (mult @0 integer_pow2p@1) @1) 49838fd1498Szrj (if (TYPE_UNSIGNED (TREE_TYPE (@0))) 49938fd1498Szrj (bit_and @0 { wide_int_to_tree 50038fd1498Szrj (type, wi::mask (TYPE_PRECISION (type) 50138fd1498Szrj - wi::exact_log2 (wi::to_wide (@1)), 50238fd1498Szrj false, TYPE_PRECISION (type))); }))) 50338fd1498Szrj 50438fd1498Szrj/* Simplify (unsigned t / 2) * 2 -> unsigned t & ~1. */ 50538fd1498Szrj(simplify 50638fd1498Szrj (mult (trunc_div @0 integer_pow2p@1) @1) 50738fd1498Szrj (if (TYPE_UNSIGNED (TREE_TYPE (@0))) 50838fd1498Szrj (bit_and @0 (negate @1)))) 50938fd1498Szrj 51038fd1498Szrj/* Simplify (t * 2) / 2) -> t. */ 51138fd1498Szrj(for div (trunc_div ceil_div floor_div round_div exact_div) 51238fd1498Szrj (simplify 51338fd1498Szrj (div (mult:c @0 @1) @1) 51438fd1498Szrj (if (ANY_INTEGRAL_TYPE_P (type) 51538fd1498Szrj && TYPE_OVERFLOW_UNDEFINED (type)) 51638fd1498Szrj @0))) 51738fd1498Szrj 51838fd1498Szrj(for op (negate abs) 51938fd1498Szrj /* Simplify cos(-x) and cos(|x|) -> cos(x). Similarly for cosh. */ 52038fd1498Szrj (for coss (COS COSH) 52138fd1498Szrj (simplify 52238fd1498Szrj (coss (op @0)) 52338fd1498Szrj (coss @0))) 52438fd1498Szrj /* Simplify pow(-x, y) and pow(|x|,y) -> pow(x,y) if y is an even integer. */ 52538fd1498Szrj (for pows (POW) 52638fd1498Szrj (simplify 52738fd1498Szrj (pows (op @0) REAL_CST@1) 52838fd1498Szrj (with { HOST_WIDE_INT n; } 52938fd1498Szrj (if (real_isinteger (&TREE_REAL_CST (@1), &n) && (n & 1) == 0) 53038fd1498Szrj (pows @0 @1))))) 53138fd1498Szrj /* Likewise for powi. */ 53238fd1498Szrj (for pows (POWI) 53338fd1498Szrj (simplify 53438fd1498Szrj (pows (op @0) INTEGER_CST@1) 53538fd1498Szrj (if ((wi::to_wide (@1) & 1) == 0) 53638fd1498Szrj (pows @0 @1)))) 53738fd1498Szrj /* Strip negate and abs from both operands of hypot. */ 53838fd1498Szrj (for hypots (HYPOT) 53938fd1498Szrj (simplify 54038fd1498Szrj (hypots (op @0) @1) 54138fd1498Szrj (hypots @0 @1)) 54238fd1498Szrj (simplify 54338fd1498Szrj (hypots @0 (op @1)) 54438fd1498Szrj (hypots @0 @1))) 54538fd1498Szrj /* copysign(-x, y) and copysign(abs(x), y) -> copysign(x, y). */ 54638fd1498Szrj (for copysigns (COPYSIGN_ALL) 54738fd1498Szrj (simplify 54838fd1498Szrj (copysigns (op @0) @1) 54938fd1498Szrj (copysigns @0 @1)))) 55038fd1498Szrj 55138fd1498Szrj/* abs(x)*abs(x) -> x*x. Should be valid for all types. */ 55238fd1498Szrj(simplify 55338fd1498Szrj (mult (abs@1 @0) @1) 55438fd1498Szrj (mult @0 @0)) 55538fd1498Szrj 55638fd1498Szrj/* cos(copysign(x, y)) -> cos(x). Similarly for cosh. */ 55738fd1498Szrj(for coss (COS COSH) 55838fd1498Szrj copysigns (COPYSIGN) 55938fd1498Szrj (simplify 56038fd1498Szrj (coss (copysigns @0 @1)) 56138fd1498Szrj (coss @0))) 56238fd1498Szrj 56338fd1498Szrj/* pow(copysign(x, y), z) -> pow(x, z) if z is an even integer. */ 56438fd1498Szrj(for pows (POW) 56538fd1498Szrj copysigns (COPYSIGN) 56638fd1498Szrj (simplify 56738fd1498Szrj (pows (copysigns @0 @2) REAL_CST@1) 56838fd1498Szrj (with { HOST_WIDE_INT n; } 56938fd1498Szrj (if (real_isinteger (&TREE_REAL_CST (@1), &n) && (n & 1) == 0) 57038fd1498Szrj (pows @0 @1))))) 57138fd1498Szrj/* Likewise for powi. */ 57238fd1498Szrj(for pows (POWI) 57338fd1498Szrj copysigns (COPYSIGN) 57438fd1498Szrj (simplify 57538fd1498Szrj (pows (copysigns @0 @2) INTEGER_CST@1) 57638fd1498Szrj (if ((wi::to_wide (@1) & 1) == 0) 57738fd1498Szrj (pows @0 @1)))) 57838fd1498Szrj 57938fd1498Szrj(for hypots (HYPOT) 58038fd1498Szrj copysigns (COPYSIGN) 58138fd1498Szrj /* hypot(copysign(x, y), z) -> hypot(x, z). */ 58238fd1498Szrj (simplify 58338fd1498Szrj (hypots (copysigns @0 @1) @2) 58438fd1498Szrj (hypots @0 @2)) 58538fd1498Szrj /* hypot(x, copysign(y, z)) -> hypot(x, y). */ 58638fd1498Szrj (simplify 58738fd1498Szrj (hypots @0 (copysigns @1 @2)) 58838fd1498Szrj (hypots @0 @1))) 58938fd1498Szrj 59038fd1498Szrj/* copysign(x, CST) -> [-]abs (x). */ 59138fd1498Szrj(for copysigns (COPYSIGN_ALL) 59238fd1498Szrj (simplify 59338fd1498Szrj (copysigns @0 REAL_CST@1) 59438fd1498Szrj (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1))) 59538fd1498Szrj (negate (abs @0)) 59638fd1498Szrj (abs @0)))) 59738fd1498Szrj 59838fd1498Szrj/* copysign(copysign(x, y), z) -> copysign(x, z). */ 59938fd1498Szrj(for copysigns (COPYSIGN_ALL) 60038fd1498Szrj (simplify 60138fd1498Szrj (copysigns (copysigns @0 @1) @2) 60238fd1498Szrj (copysigns @0 @2))) 60338fd1498Szrj 60438fd1498Szrj/* copysign(x,y)*copysign(x,y) -> x*x. */ 60538fd1498Szrj(for copysigns (COPYSIGN_ALL) 60638fd1498Szrj (simplify 60738fd1498Szrj (mult (copysigns@2 @0 @1) @2) 60838fd1498Szrj (mult @0 @0))) 60938fd1498Szrj 61038fd1498Szrj/* ccos(-x) -> ccos(x). Similarly for ccosh. */ 61138fd1498Szrj(for ccoss (CCOS CCOSH) 61238fd1498Szrj (simplify 61338fd1498Szrj (ccoss (negate @0)) 61438fd1498Szrj (ccoss @0))) 61538fd1498Szrj 61638fd1498Szrj/* cabs(-x) and cos(conj(x)) -> cabs(x). */ 61738fd1498Szrj(for ops (conj negate) 61838fd1498Szrj (for cabss (CABS) 61938fd1498Szrj (simplify 62038fd1498Szrj (cabss (ops @0)) 62138fd1498Szrj (cabss @0)))) 62238fd1498Szrj 62338fd1498Szrj/* Fold (a * (1 << b)) into (a << b) */ 62438fd1498Szrj(simplify 62538fd1498Szrj (mult:c @0 (convert? (lshift integer_onep@1 @2))) 62638fd1498Szrj (if (! FLOAT_TYPE_P (type) 62738fd1498Szrj && tree_nop_conversion_p (type, TREE_TYPE (@1))) 62838fd1498Szrj (lshift @0 @2))) 62938fd1498Szrj 63038fd1498Szrj/* Fold (1 << (C - x)) where C = precision(type) - 1 63138fd1498Szrj into ((1 << C) >> x). */ 63238fd1498Szrj(simplify 63338fd1498Szrj (lshift integer_onep@0 (minus@1 INTEGER_CST@2 @3)) 63438fd1498Szrj (if (INTEGRAL_TYPE_P (type) 63538fd1498Szrj && wi::eq_p (wi::to_wide (@2), TYPE_PRECISION (type) - 1) 63638fd1498Szrj && single_use (@1)) 63738fd1498Szrj (if (TYPE_UNSIGNED (type)) 63838fd1498Szrj (rshift (lshift @0 @2) @3) 63938fd1498Szrj (with 64038fd1498Szrj { tree utype = unsigned_type_for (type); } 64138fd1498Szrj (convert (rshift (lshift (convert:utype @0) @2) @3)))))) 64238fd1498Szrj 64338fd1498Szrj/* Fold (C1/X)*C2 into (C1*C2)/X. */ 64438fd1498Szrj(simplify 64538fd1498Szrj (mult (rdiv@3 REAL_CST@0 @1) REAL_CST@2) 64638fd1498Szrj (if (flag_associative_math 64738fd1498Szrj && single_use (@3)) 64838fd1498Szrj (with 64938fd1498Szrj { tree tem = const_binop (MULT_EXPR, type, @0, @2); } 65038fd1498Szrj (if (tem) 65138fd1498Szrj (rdiv { tem; } @1))))) 65238fd1498Szrj 65338fd1498Szrj/* Simplify ~X & X as zero. */ 65438fd1498Szrj(simplify 65538fd1498Szrj (bit_and:c (convert? @0) (convert? (bit_not @0))) 65638fd1498Szrj { build_zero_cst (type); }) 65738fd1498Szrj 65838fd1498Szrj/* PR71636: Transform x & ((1U << b) - 1) -> x & ~(~0U << b); */ 65938fd1498Szrj(simplify 66038fd1498Szrj (bit_and:c @0 (plus:s (lshift:s integer_onep @1) integer_minus_onep)) 66138fd1498Szrj (if (TYPE_UNSIGNED (type)) 66238fd1498Szrj (bit_and @0 (bit_not (lshift { build_all_ones_cst (type); } @1))))) 66338fd1498Szrj 66438fd1498Szrj(for bitop (bit_and bit_ior) 66538fd1498Szrj cmp (eq ne) 66638fd1498Szrj /* PR35691: Transform 66738fd1498Szrj (x == 0 & y == 0) -> (x | typeof(x)(y)) == 0. 66838fd1498Szrj (x != 0 | y != 0) -> (x | typeof(x)(y)) != 0. */ 66938fd1498Szrj (simplify 67038fd1498Szrj (bitop (cmp @0 integer_zerop@2) (cmp @1 integer_zerop)) 67138fd1498Szrj (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) 67238fd1498Szrj && INTEGRAL_TYPE_P (TREE_TYPE (@1)) 67338fd1498Szrj && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1))) 67438fd1498Szrj (cmp (bit_ior @0 (convert @1)) @2))) 67538fd1498Szrj /* Transform: 67638fd1498Szrj (x == -1 & y == -1) -> (x & typeof(x)(y)) == -1. 67738fd1498Szrj (x != -1 | y != -1) -> (x & typeof(x)(y)) != -1. */ 67838fd1498Szrj (simplify 67938fd1498Szrj (bitop (cmp @0 integer_all_onesp@2) (cmp @1 integer_all_onesp)) 68038fd1498Szrj (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) 68138fd1498Szrj && INTEGRAL_TYPE_P (TREE_TYPE (@1)) 68238fd1498Szrj && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1))) 68338fd1498Szrj (cmp (bit_and @0 (convert @1)) @2)))) 68438fd1498Szrj 68538fd1498Szrj/* Fold (A & ~B) - (A & B) into (A ^ B) - B. */ 68638fd1498Szrj(simplify 68738fd1498Szrj (minus (bit_and:cs @0 (bit_not @1)) (bit_and:cs @0 @1)) 68838fd1498Szrj (minus (bit_xor @0 @1) @1)) 68938fd1498Szrj(simplify 69038fd1498Szrj (minus (bit_and:s @0 INTEGER_CST@2) (bit_and:s @0 INTEGER_CST@1)) 69138fd1498Szrj (if (~wi::to_wide (@2) == wi::to_wide (@1)) 69238fd1498Szrj (minus (bit_xor @0 @1) @1))) 69338fd1498Szrj 69438fd1498Szrj/* Fold (A & B) - (A & ~B) into B - (A ^ B). */ 69538fd1498Szrj(simplify 69638fd1498Szrj (minus (bit_and:cs @0 @1) (bit_and:cs @0 (bit_not @1))) 69738fd1498Szrj (minus @1 (bit_xor @0 @1))) 69838fd1498Szrj 69938fd1498Szrj/* Simplify (X & ~Y) |^+ (~X & Y) -> X ^ Y. */ 70038fd1498Szrj(for op (bit_ior bit_xor plus) 70138fd1498Szrj (simplify 70238fd1498Szrj (op (bit_and:c @0 (bit_not @1)) (bit_and:c (bit_not @0) @1)) 70338fd1498Szrj (bit_xor @0 @1)) 70438fd1498Szrj (simplify 70538fd1498Szrj (op:c (bit_and @0 INTEGER_CST@2) (bit_and (bit_not @0) INTEGER_CST@1)) 70638fd1498Szrj (if (~wi::to_wide (@2) == wi::to_wide (@1)) 70738fd1498Szrj (bit_xor @0 @1)))) 70838fd1498Szrj 70938fd1498Szrj/* PR53979: Transform ((a ^ b) | a) -> (a | b) */ 71038fd1498Szrj(simplify 71138fd1498Szrj (bit_ior:c (bit_xor:c @0 @1) @0) 71238fd1498Szrj (bit_ior @0 @1)) 71338fd1498Szrj 71438fd1498Szrj/* (a & ~b) | (a ^ b) --> a ^ b */ 71538fd1498Szrj(simplify 71638fd1498Szrj (bit_ior:c (bit_and:c @0 (bit_not @1)) (bit_xor:c@2 @0 @1)) 71738fd1498Szrj @2) 71838fd1498Szrj 71938fd1498Szrj/* (a & ~b) ^ ~a --> ~(a & b) */ 72038fd1498Szrj(simplify 72138fd1498Szrj (bit_xor:c (bit_and:cs @0 (bit_not @1)) (bit_not @0)) 72238fd1498Szrj (bit_not (bit_and @0 @1))) 72338fd1498Szrj 72438fd1498Szrj/* (a | b) & ~(a ^ b) --> a & b */ 72538fd1498Szrj(simplify 72638fd1498Szrj (bit_and:c (bit_ior @0 @1) (bit_not (bit_xor:c @0 @1))) 72738fd1498Szrj (bit_and @0 @1)) 72838fd1498Szrj 72938fd1498Szrj/* a | ~(a ^ b) --> a | ~b */ 73038fd1498Szrj(simplify 73138fd1498Szrj (bit_ior:c @0 (bit_not:s (bit_xor:c @0 @1))) 73238fd1498Szrj (bit_ior @0 (bit_not @1))) 73338fd1498Szrj 73438fd1498Szrj/* (a | b) | (a &^ b) --> a | b */ 73538fd1498Szrj(for op (bit_and bit_xor) 73638fd1498Szrj (simplify 73738fd1498Szrj (bit_ior:c (bit_ior@2 @0 @1) (op:c @0 @1)) 73838fd1498Szrj @2)) 73938fd1498Szrj 74038fd1498Szrj/* (a & b) | ~(a ^ b) --> ~(a ^ b) */ 74138fd1498Szrj(simplify 74238fd1498Szrj (bit_ior:c (bit_and:c @0 @1) (bit_not@2 (bit_xor @0 @1))) 74338fd1498Szrj @2) 74438fd1498Szrj 74538fd1498Szrj/* ~(~a & b) --> a | ~b */ 74638fd1498Szrj(simplify 74738fd1498Szrj (bit_not (bit_and:cs (bit_not @0) @1)) 74838fd1498Szrj (bit_ior @0 (bit_not @1))) 74938fd1498Szrj 75038fd1498Szrj/* Simplify (~X & Y) to X ^ Y if we know that (X & ~Y) is 0. */ 75138fd1498Szrj#if GIMPLE 75238fd1498Szrj(simplify 75338fd1498Szrj (bit_and (bit_not SSA_NAME@0) INTEGER_CST@1) 75438fd1498Szrj (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) 75538fd1498Szrj && wi::bit_and_not (get_nonzero_bits (@0), wi::to_wide (@1)) == 0) 75638fd1498Szrj (bit_xor @0 @1))) 75738fd1498Szrj#endif 75838fd1498Szrj 75938fd1498Szrj/* X % Y is smaller than Y. */ 76038fd1498Szrj(for cmp (lt ge) 76138fd1498Szrj (simplify 76238fd1498Szrj (cmp (trunc_mod @0 @1) @1) 76338fd1498Szrj (if (TYPE_UNSIGNED (TREE_TYPE (@0))) 76438fd1498Szrj { constant_boolean_node (cmp == LT_EXPR, type); }))) 76538fd1498Szrj(for cmp (gt le) 76638fd1498Szrj (simplify 76738fd1498Szrj (cmp @1 (trunc_mod @0 @1)) 76838fd1498Szrj (if (TYPE_UNSIGNED (TREE_TYPE (@0))) 76938fd1498Szrj { constant_boolean_node (cmp == GT_EXPR, type); }))) 77038fd1498Szrj 77138fd1498Szrj/* x | ~0 -> ~0 */ 77238fd1498Szrj(simplify 77338fd1498Szrj (bit_ior @0 integer_all_onesp@1) 77438fd1498Szrj @1) 77538fd1498Szrj 77638fd1498Szrj/* x | 0 -> x */ 77738fd1498Szrj(simplify 77838fd1498Szrj (bit_ior @0 integer_zerop) 77938fd1498Szrj @0) 78038fd1498Szrj 78138fd1498Szrj/* x & 0 -> 0 */ 78238fd1498Szrj(simplify 78338fd1498Szrj (bit_and @0 integer_zerop@1) 78438fd1498Szrj @1) 78538fd1498Szrj 78638fd1498Szrj/* ~x | x -> -1 */ 78738fd1498Szrj/* ~x ^ x -> -1 */ 78838fd1498Szrj/* ~x + x -> -1 */ 78938fd1498Szrj(for op (bit_ior bit_xor plus) 79038fd1498Szrj (simplify 79138fd1498Szrj (op:c (convert? @0) (convert? (bit_not @0))) 79238fd1498Szrj (convert { build_all_ones_cst (TREE_TYPE (@0)); }))) 79338fd1498Szrj 79438fd1498Szrj/* x ^ x -> 0 */ 79538fd1498Szrj(simplify 79638fd1498Szrj (bit_xor @0 @0) 79738fd1498Szrj { build_zero_cst (type); }) 79838fd1498Szrj 79938fd1498Szrj/* Canonicalize X ^ ~0 to ~X. */ 80038fd1498Szrj(simplify 80138fd1498Szrj (bit_xor @0 integer_all_onesp@1) 80238fd1498Szrj (bit_not @0)) 80338fd1498Szrj 80438fd1498Szrj/* x & ~0 -> x */ 80538fd1498Szrj(simplify 80638fd1498Szrj (bit_and @0 integer_all_onesp) 80738fd1498Szrj (non_lvalue @0)) 80838fd1498Szrj 80938fd1498Szrj/* x & x -> x, x | x -> x */ 81038fd1498Szrj(for bitop (bit_and bit_ior) 81138fd1498Szrj (simplify 81238fd1498Szrj (bitop @0 @0) 81338fd1498Szrj (non_lvalue @0))) 81438fd1498Szrj 81538fd1498Szrj/* x & C -> x if we know that x & ~C == 0. */ 81638fd1498Szrj#if GIMPLE 81738fd1498Szrj(simplify 81838fd1498Szrj (bit_and SSA_NAME@0 INTEGER_CST@1) 81938fd1498Szrj (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) 82038fd1498Szrj && wi::bit_and_not (get_nonzero_bits (@0), wi::to_wide (@1)) == 0) 82138fd1498Szrj @0)) 82238fd1498Szrj#endif 82338fd1498Szrj 82438fd1498Szrj/* x + (x & 1) -> (x + 1) & ~1 */ 82538fd1498Szrj(simplify 82638fd1498Szrj (plus:c @0 (bit_and:s @0 integer_onep@1)) 82738fd1498Szrj (bit_and (plus @0 @1) (bit_not @1))) 82838fd1498Szrj 82938fd1498Szrj/* x & ~(x & y) -> x & ~y */ 83038fd1498Szrj/* x | ~(x | y) -> x | ~y */ 83138fd1498Szrj(for bitop (bit_and bit_ior) 83238fd1498Szrj (simplify 83338fd1498Szrj (bitop:c @0 (bit_not (bitop:cs @0 @1))) 83438fd1498Szrj (bitop @0 (bit_not @1)))) 83538fd1498Szrj 83638fd1498Szrj/* (x | y) & ~x -> y & ~x */ 83738fd1498Szrj/* (x & y) | ~x -> y | ~x */ 83838fd1498Szrj(for bitop (bit_and bit_ior) 83938fd1498Szrj rbitop (bit_ior bit_and) 84038fd1498Szrj (simplify 84138fd1498Szrj (bitop:c (rbitop:c @0 @1) (bit_not@2 @0)) 84238fd1498Szrj (bitop @1 @2))) 84338fd1498Szrj 84438fd1498Szrj/* (x & y) ^ (x | y) -> x ^ y */ 84538fd1498Szrj(simplify 84638fd1498Szrj (bit_xor:c (bit_and @0 @1) (bit_ior @0 @1)) 84738fd1498Szrj (bit_xor @0 @1)) 84838fd1498Szrj 84938fd1498Szrj/* (x ^ y) ^ (x | y) -> x & y */ 85038fd1498Szrj(simplify 85138fd1498Szrj (bit_xor:c (bit_xor @0 @1) (bit_ior @0 @1)) 85238fd1498Szrj (bit_and @0 @1)) 85338fd1498Szrj 85438fd1498Szrj/* (x & y) + (x ^ y) -> x | y */ 85538fd1498Szrj/* (x & y) | (x ^ y) -> x | y */ 85638fd1498Szrj/* (x & y) ^ (x ^ y) -> x | y */ 85738fd1498Szrj(for op (plus bit_ior bit_xor) 85838fd1498Szrj (simplify 85938fd1498Szrj (op:c (bit_and @0 @1) (bit_xor @0 @1)) 86038fd1498Szrj (bit_ior @0 @1))) 86138fd1498Szrj 86238fd1498Szrj/* (x & y) + (x | y) -> x + y */ 86338fd1498Szrj(simplify 86438fd1498Szrj (plus:c (bit_and @0 @1) (bit_ior @0 @1)) 86538fd1498Szrj (plus @0 @1)) 86638fd1498Szrj 86738fd1498Szrj/* (x + y) - (x | y) -> x & y */ 86838fd1498Szrj(simplify 86938fd1498Szrj (minus (plus @0 @1) (bit_ior @0 @1)) 87038fd1498Szrj (if (!TYPE_OVERFLOW_SANITIZED (type) && !TYPE_OVERFLOW_TRAPS (type) 87138fd1498Szrj && !TYPE_SATURATING (type)) 87238fd1498Szrj (bit_and @0 @1))) 87338fd1498Szrj 87438fd1498Szrj/* (x + y) - (x & y) -> x | y */ 87538fd1498Szrj(simplify 87638fd1498Szrj (minus (plus @0 @1) (bit_and @0 @1)) 87738fd1498Szrj (if (!TYPE_OVERFLOW_SANITIZED (type) && !TYPE_OVERFLOW_TRAPS (type) 87838fd1498Szrj && !TYPE_SATURATING (type)) 87938fd1498Szrj (bit_ior @0 @1))) 88038fd1498Szrj 88138fd1498Szrj/* (x | y) - (x ^ y) -> x & y */ 88238fd1498Szrj(simplify 88338fd1498Szrj (minus (bit_ior @0 @1) (bit_xor @0 @1)) 88438fd1498Szrj (bit_and @0 @1)) 88538fd1498Szrj 88638fd1498Szrj/* (x | y) - (x & y) -> x ^ y */ 88738fd1498Szrj(simplify 88838fd1498Szrj (minus (bit_ior @0 @1) (bit_and @0 @1)) 88938fd1498Szrj (bit_xor @0 @1)) 89038fd1498Szrj 89138fd1498Szrj/* (x | y) & ~(x & y) -> x ^ y */ 89238fd1498Szrj(simplify 89338fd1498Szrj (bit_and:c (bit_ior @0 @1) (bit_not (bit_and @0 @1))) 89438fd1498Szrj (bit_xor @0 @1)) 89538fd1498Szrj 89638fd1498Szrj/* (x | y) & (~x ^ y) -> x & y */ 89738fd1498Szrj(simplify 89838fd1498Szrj (bit_and:c (bit_ior:c @0 @1) (bit_xor:c @1 (bit_not @0))) 89938fd1498Szrj (bit_and @0 @1)) 90038fd1498Szrj 90138fd1498Szrj/* ~x & ~y -> ~(x | y) 90238fd1498Szrj ~x | ~y -> ~(x & y) */ 90338fd1498Szrj(for op (bit_and bit_ior) 90438fd1498Szrj rop (bit_ior bit_and) 90538fd1498Szrj (simplify 90638fd1498Szrj (op (convert1? (bit_not @0)) (convert2? (bit_not @1))) 90738fd1498Szrj (if (element_precision (type) <= element_precision (TREE_TYPE (@0)) 90838fd1498Szrj && element_precision (type) <= element_precision (TREE_TYPE (@1))) 90938fd1498Szrj (bit_not (rop (convert @0) (convert @1)))))) 91038fd1498Szrj 91138fd1498Szrj/* If we are XORing or adding two BIT_AND_EXPR's, both of which are and'ing 91238fd1498Szrj with a constant, and the two constants have no bits in common, 91338fd1498Szrj we should treat this as a BIT_IOR_EXPR since this may produce more 91438fd1498Szrj simplifications. */ 91538fd1498Szrj(for op (bit_xor plus) 91638fd1498Szrj (simplify 91738fd1498Szrj (op (convert1? (bit_and@4 @0 INTEGER_CST@1)) 91838fd1498Szrj (convert2? (bit_and@5 @2 INTEGER_CST@3))) 91938fd1498Szrj (if (tree_nop_conversion_p (type, TREE_TYPE (@0)) 92038fd1498Szrj && tree_nop_conversion_p (type, TREE_TYPE (@2)) 92138fd1498Szrj && (wi::to_wide (@1) & wi::to_wide (@3)) == 0) 92238fd1498Szrj (bit_ior (convert @4) (convert @5))))) 92338fd1498Szrj 92438fd1498Szrj/* (X | Y) ^ X -> Y & ~ X*/ 92538fd1498Szrj(simplify 92638fd1498Szrj (bit_xor:c (convert1? (bit_ior:c @@0 @1)) (convert2? @0)) 92738fd1498Szrj (if (tree_nop_conversion_p (type, TREE_TYPE (@0))) 92838fd1498Szrj (convert (bit_and @1 (bit_not @0))))) 92938fd1498Szrj 93038fd1498Szrj/* Convert ~X ^ ~Y to X ^ Y. */ 93138fd1498Szrj(simplify 93238fd1498Szrj (bit_xor (convert1? (bit_not @0)) (convert2? (bit_not @1))) 93338fd1498Szrj (if (element_precision (type) <= element_precision (TREE_TYPE (@0)) 93438fd1498Szrj && element_precision (type) <= element_precision (TREE_TYPE (@1))) 93538fd1498Szrj (bit_xor (convert @0) (convert @1)))) 93638fd1498Szrj 93738fd1498Szrj/* Convert ~X ^ C to X ^ ~C. */ 93838fd1498Szrj(simplify 93938fd1498Szrj (bit_xor (convert? (bit_not @0)) INTEGER_CST@1) 94038fd1498Szrj (if (tree_nop_conversion_p (type, TREE_TYPE (@0))) 94138fd1498Szrj (bit_xor (convert @0) (bit_not @1)))) 94238fd1498Szrj 94338fd1498Szrj/* Fold (X & Y) ^ Y and (X ^ Y) & Y as ~X & Y. */ 94438fd1498Szrj(for opo (bit_and bit_xor) 94538fd1498Szrj opi (bit_xor bit_and) 94638fd1498Szrj (simplify 94738fd1498Szrj (opo:c (opi:c @0 @1) @1) 94838fd1498Szrj (bit_and (bit_not @0) @1))) 94938fd1498Szrj 95038fd1498Szrj/* Given a bit-wise operation CODE applied to ARG0 and ARG1, see if both 95138fd1498Szrj operands are another bit-wise operation with a common input. If so, 95238fd1498Szrj distribute the bit operations to save an operation and possibly two if 95338fd1498Szrj constants are involved. For example, convert 95438fd1498Szrj (A | B) & (A | C) into A | (B & C) 95538fd1498Szrj Further simplification will occur if B and C are constants. */ 95638fd1498Szrj(for op (bit_and bit_ior bit_xor) 95738fd1498Szrj rop (bit_ior bit_and bit_and) 95838fd1498Szrj (simplify 95938fd1498Szrj (op (convert? (rop:c @@0 @1)) (convert? (rop:c @0 @2))) 96038fd1498Szrj (if (tree_nop_conversion_p (type, TREE_TYPE (@1)) 96138fd1498Szrj && tree_nop_conversion_p (type, TREE_TYPE (@2))) 96238fd1498Szrj (rop (convert @0) (op (convert @1) (convert @2)))))) 96338fd1498Szrj 96438fd1498Szrj/* Some simple reassociation for bit operations, also handled in reassoc. */ 96538fd1498Szrj/* (X & Y) & Y -> X & Y 96638fd1498Szrj (X | Y) | Y -> X | Y */ 96738fd1498Szrj(for op (bit_and bit_ior) 96838fd1498Szrj (simplify 96938fd1498Szrj (op:c (convert1?@2 (op:c @0 @@1)) (convert2? @1)) 97038fd1498Szrj @2)) 97138fd1498Szrj/* (X ^ Y) ^ Y -> X */ 97238fd1498Szrj(simplify 97338fd1498Szrj (bit_xor:c (convert1? (bit_xor:c @0 @@1)) (convert2? @1)) 97438fd1498Szrj (convert @0)) 97538fd1498Szrj/* (X & Y) & (X & Z) -> (X & Y) & Z 97638fd1498Szrj (X | Y) | (X | Z) -> (X | Y) | Z */ 97738fd1498Szrj(for op (bit_and bit_ior) 97838fd1498Szrj (simplify 97938fd1498Szrj (op (convert1?@3 (op:c@4 @0 @1)) (convert2?@5 (op:c@6 @0 @2))) 98038fd1498Szrj (if (tree_nop_conversion_p (type, TREE_TYPE (@1)) 98138fd1498Szrj && tree_nop_conversion_p (type, TREE_TYPE (@2))) 98238fd1498Szrj (if (single_use (@5) && single_use (@6)) 98338fd1498Szrj (op @3 (convert @2)) 98438fd1498Szrj (if (single_use (@3) && single_use (@4)) 98538fd1498Szrj (op (convert @1) @5)))))) 98638fd1498Szrj/* (X ^ Y) ^ (X ^ Z) -> Y ^ Z */ 98738fd1498Szrj(simplify 98838fd1498Szrj (bit_xor (convert1? (bit_xor:c @0 @1)) (convert2? (bit_xor:c @0 @2))) 98938fd1498Szrj (if (tree_nop_conversion_p (type, TREE_TYPE (@1)) 99038fd1498Szrj && tree_nop_conversion_p (type, TREE_TYPE (@2))) 99138fd1498Szrj (bit_xor (convert @1) (convert @2)))) 99238fd1498Szrj 99338fd1498Szrj(simplify 99438fd1498Szrj (abs (abs@1 @0)) 99538fd1498Szrj @1) 99638fd1498Szrj(simplify 99738fd1498Szrj (abs (negate @0)) 99838fd1498Szrj (abs @0)) 99938fd1498Szrj(simplify 100038fd1498Szrj (abs tree_expr_nonnegative_p@0) 100138fd1498Szrj @0) 100238fd1498Szrj 100338fd1498Szrj/* A few cases of fold-const.c negate_expr_p predicate. */ 100438fd1498Szrj(match negate_expr_p 100538fd1498Szrj INTEGER_CST 100638fd1498Szrj (if ((INTEGRAL_TYPE_P (type) 100738fd1498Szrj && TYPE_UNSIGNED (type)) 100838fd1498Szrj || (!TYPE_OVERFLOW_SANITIZED (type) 100938fd1498Szrj && may_negate_without_overflow_p (t))))) 101038fd1498Szrj(match negate_expr_p 101138fd1498Szrj FIXED_CST) 101238fd1498Szrj(match negate_expr_p 101338fd1498Szrj (negate @0) 101438fd1498Szrj (if (!TYPE_OVERFLOW_SANITIZED (type)))) 101538fd1498Szrj(match negate_expr_p 101638fd1498Szrj REAL_CST 101738fd1498Szrj (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (t))))) 101838fd1498Szrj/* VECTOR_CST handling of non-wrapping types would recurse in unsupported 101938fd1498Szrj ways. */ 102038fd1498Szrj(match negate_expr_p 102138fd1498Szrj VECTOR_CST 102238fd1498Szrj (if (FLOAT_TYPE_P (TREE_TYPE (type)) || TYPE_OVERFLOW_WRAPS (type)))) 102338fd1498Szrj(match negate_expr_p 102438fd1498Szrj (minus @0 @1) 102538fd1498Szrj (if ((ANY_INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_WRAPS (type)) 102638fd1498Szrj || (FLOAT_TYPE_P (type) 102738fd1498Szrj && !HONOR_SIGN_DEPENDENT_ROUNDING (type) 102838fd1498Szrj && !HONOR_SIGNED_ZEROS (type))))) 102938fd1498Szrj 103038fd1498Szrj/* (-A) * (-B) -> A * B */ 103138fd1498Szrj(simplify 103238fd1498Szrj (mult:c (convert1? (negate @0)) (convert2? negate_expr_p@1)) 103338fd1498Szrj (if (tree_nop_conversion_p (type, TREE_TYPE (@0)) 103438fd1498Szrj && tree_nop_conversion_p (type, TREE_TYPE (@1))) 103538fd1498Szrj (mult (convert @0) (convert (negate @1))))) 103638fd1498Szrj 103738fd1498Szrj/* -(A + B) -> (-B) - A. */ 103838fd1498Szrj(simplify 103938fd1498Szrj (negate (plus:c @0 negate_expr_p@1)) 104038fd1498Szrj (if (!HONOR_SIGN_DEPENDENT_ROUNDING (element_mode (type)) 104138fd1498Szrj && !HONOR_SIGNED_ZEROS (element_mode (type))) 104238fd1498Szrj (minus (negate @1) @0))) 104338fd1498Szrj 104438fd1498Szrj/* -(A - B) -> B - A. */ 104538fd1498Szrj(simplify 104638fd1498Szrj (negate (minus @0 @1)) 104738fd1498Szrj (if ((ANY_INTEGRAL_TYPE_P (type) && !TYPE_OVERFLOW_SANITIZED (type)) 104838fd1498Szrj || (FLOAT_TYPE_P (type) 104938fd1498Szrj && !HONOR_SIGN_DEPENDENT_ROUNDING (type) 105038fd1498Szrj && !HONOR_SIGNED_ZEROS (type))) 105138fd1498Szrj (minus @1 @0))) 105238fd1498Szrj(simplify 105338fd1498Szrj (negate (pointer_diff @0 @1)) 105438fd1498Szrj (if (TYPE_OVERFLOW_UNDEFINED (type)) 105538fd1498Szrj (pointer_diff @1 @0))) 105638fd1498Szrj 105738fd1498Szrj/* A - B -> A + (-B) if B is easily negatable. */ 105838fd1498Szrj(simplify 105938fd1498Szrj (minus @0 negate_expr_p@1) 106038fd1498Szrj (if (!FIXED_POINT_TYPE_P (type)) 106138fd1498Szrj (plus @0 (negate @1)))) 106238fd1498Szrj 106338fd1498Szrj/* Try to fold (type) X op CST -> (type) (X op ((type-x) CST)) 106438fd1498Szrj when profitable. 106538fd1498Szrj For bitwise binary operations apply operand conversions to the 106638fd1498Szrj binary operation result instead of to the operands. This allows 106738fd1498Szrj to combine successive conversions and bitwise binary operations. 106838fd1498Szrj We combine the above two cases by using a conditional convert. */ 106938fd1498Szrj(for bitop (bit_and bit_ior bit_xor) 107038fd1498Szrj (simplify 107138fd1498Szrj (bitop (convert @0) (convert? @1)) 107238fd1498Szrj (if (((TREE_CODE (@1) == INTEGER_CST 107338fd1498Szrj && INTEGRAL_TYPE_P (TREE_TYPE (@0)) 107438fd1498Szrj && int_fits_type_p (@1, TREE_TYPE (@0))) 107538fd1498Szrj || types_match (@0, @1)) 107638fd1498Szrj /* ??? This transform conflicts with fold-const.c doing 107738fd1498Szrj Convert (T)(x & c) into (T)x & (T)c, if c is an integer 107838fd1498Szrj constants (if x has signed type, the sign bit cannot be set 107938fd1498Szrj in c). This folds extension into the BIT_AND_EXPR. 108038fd1498Szrj Restrict it to GIMPLE to avoid endless recursions. */ 108138fd1498Szrj && (bitop != BIT_AND_EXPR || GIMPLE) 108238fd1498Szrj && (/* That's a good idea if the conversion widens the operand, thus 108338fd1498Szrj after hoisting the conversion the operation will be narrower. */ 108438fd1498Szrj TYPE_PRECISION (TREE_TYPE (@0)) < TYPE_PRECISION (type) 108538fd1498Szrj /* It's also a good idea if the conversion is to a non-integer 108638fd1498Szrj mode. */ 108738fd1498Szrj || GET_MODE_CLASS (TYPE_MODE (type)) != MODE_INT 108838fd1498Szrj /* Or if the precision of TO is not the same as the precision 108938fd1498Szrj of its mode. */ 109038fd1498Szrj || !type_has_mode_precision_p (type))) 109138fd1498Szrj (convert (bitop @0 (convert @1)))))) 109238fd1498Szrj 109338fd1498Szrj(for bitop (bit_and bit_ior) 109438fd1498Szrj rbitop (bit_ior bit_and) 109538fd1498Szrj /* (x | y) & x -> x */ 109638fd1498Szrj /* (x & y) | x -> x */ 109738fd1498Szrj (simplify 109838fd1498Szrj (bitop:c (rbitop:c @0 @1) @0) 109938fd1498Szrj @0) 110038fd1498Szrj /* (~x | y) & x -> x & y */ 110138fd1498Szrj /* (~x & y) | x -> x | y */ 110238fd1498Szrj (simplify 110338fd1498Szrj (bitop:c (rbitop:c (bit_not @0) @1) @0) 110438fd1498Szrj (bitop @0 @1))) 110538fd1498Szrj 110638fd1498Szrj/* (x | CST1) & CST2 -> (x & CST2) | (CST1 & CST2) */ 110738fd1498Szrj(simplify 110838fd1498Szrj (bit_and (bit_ior @0 CONSTANT_CLASS_P@1) CONSTANT_CLASS_P@2) 110938fd1498Szrj (bit_ior (bit_and @0 @2) (bit_and @1 @2))) 111038fd1498Szrj 111138fd1498Szrj/* Combine successive equal operations with constants. */ 111238fd1498Szrj(for bitop (bit_and bit_ior bit_xor) 111338fd1498Szrj (simplify 111438fd1498Szrj (bitop (bitop @0 CONSTANT_CLASS_P@1) CONSTANT_CLASS_P@2) 111538fd1498Szrj (if (!CONSTANT_CLASS_P (@0)) 111638fd1498Szrj /* This is the canonical form regardless of whether (bitop @1 @2) can be 111738fd1498Szrj folded to a constant. */ 111838fd1498Szrj (bitop @0 (bitop @1 @2)) 111938fd1498Szrj /* In this case we have three constants and (bitop @0 @1) doesn't fold 112038fd1498Szrj to a constant. This can happen if @0 or @1 is a POLY_INT_CST and if 112138fd1498Szrj the values involved are such that the operation can't be decided at 112238fd1498Szrj compile time. Try folding one of @0 or @1 with @2 to see whether 112338fd1498Szrj that combination can be decided at compile time. 112438fd1498Szrj 112538fd1498Szrj Keep the existing form if both folds fail, to avoid endless 112638fd1498Szrj oscillation. */ 112738fd1498Szrj (with { tree cst1 = const_binop (bitop, type, @0, @2); } 112838fd1498Szrj (if (cst1) 112938fd1498Szrj (bitop @1 { cst1; }) 113038fd1498Szrj (with { tree cst2 = const_binop (bitop, type, @1, @2); } 113138fd1498Szrj (if (cst2) 113238fd1498Szrj (bitop @0 { cst2; })))))))) 113338fd1498Szrj 113438fd1498Szrj/* Try simple folding for X op !X, and X op X with the help 113538fd1498Szrj of the truth_valued_p and logical_inverted_value predicates. */ 113638fd1498Szrj(match truth_valued_p 113738fd1498Szrj @0 113838fd1498Szrj (if (INTEGRAL_TYPE_P (type) && TYPE_PRECISION (type) == 1))) 113938fd1498Szrj(for op (tcc_comparison truth_and truth_andif truth_or truth_orif truth_xor) 114038fd1498Szrj (match truth_valued_p 114138fd1498Szrj (op @0 @1))) 114238fd1498Szrj(match truth_valued_p 114338fd1498Szrj (truth_not @0)) 114438fd1498Szrj 114538fd1498Szrj(match (logical_inverted_value @0) 114638fd1498Szrj (truth_not @0)) 114738fd1498Szrj(match (logical_inverted_value @0) 114838fd1498Szrj (bit_not truth_valued_p@0)) 114938fd1498Szrj(match (logical_inverted_value @0) 115038fd1498Szrj (eq @0 integer_zerop)) 115138fd1498Szrj(match (logical_inverted_value @0) 115238fd1498Szrj (ne truth_valued_p@0 integer_truep)) 115338fd1498Szrj(match (logical_inverted_value @0) 115438fd1498Szrj (bit_xor truth_valued_p@0 integer_truep)) 115538fd1498Szrj 115638fd1498Szrj/* X & !X -> 0. */ 115738fd1498Szrj(simplify 115838fd1498Szrj (bit_and:c @0 (logical_inverted_value @0)) 115938fd1498Szrj { build_zero_cst (type); }) 116038fd1498Szrj/* X | !X and X ^ !X -> 1, , if X is truth-valued. */ 116138fd1498Szrj(for op (bit_ior bit_xor) 116238fd1498Szrj (simplify 116338fd1498Szrj (op:c truth_valued_p@0 (logical_inverted_value @0)) 116438fd1498Szrj { constant_boolean_node (true, type); })) 116538fd1498Szrj/* X ==/!= !X is false/true. */ 116638fd1498Szrj(for op (eq ne) 116738fd1498Szrj (simplify 116838fd1498Szrj (op:c truth_valued_p@0 (logical_inverted_value @0)) 116938fd1498Szrj { constant_boolean_node (op == NE_EXPR ? true : false, type); })) 117038fd1498Szrj 117138fd1498Szrj/* ~~x -> x */ 117238fd1498Szrj(simplify 117338fd1498Szrj (bit_not (bit_not @0)) 117438fd1498Szrj @0) 117538fd1498Szrj 117638fd1498Szrj/* Convert ~ (-A) to A - 1. */ 117738fd1498Szrj(simplify 117838fd1498Szrj (bit_not (convert? (negate @0))) 117938fd1498Szrj (if (element_precision (type) <= element_precision (TREE_TYPE (@0)) 118038fd1498Szrj || !TYPE_UNSIGNED (TREE_TYPE (@0))) 118138fd1498Szrj (convert (minus @0 { build_each_one_cst (TREE_TYPE (@0)); })))) 118238fd1498Szrj 118338fd1498Szrj/* Convert - (~A) to A + 1. */ 118438fd1498Szrj(simplify 118538fd1498Szrj (negate (nop_convert (bit_not @0))) 118638fd1498Szrj (plus (view_convert @0) { build_each_one_cst (type); })) 118738fd1498Szrj 118838fd1498Szrj/* Convert ~ (A - 1) or ~ (A + -1) to -A. */ 118938fd1498Szrj(simplify 119038fd1498Szrj (bit_not (convert? (minus @0 integer_each_onep))) 119138fd1498Szrj (if (element_precision (type) <= element_precision (TREE_TYPE (@0)) 119238fd1498Szrj || !TYPE_UNSIGNED (TREE_TYPE (@0))) 119338fd1498Szrj (convert (negate @0)))) 119438fd1498Szrj(simplify 119538fd1498Szrj (bit_not (convert? (plus @0 integer_all_onesp))) 119638fd1498Szrj (if (element_precision (type) <= element_precision (TREE_TYPE (@0)) 119738fd1498Szrj || !TYPE_UNSIGNED (TREE_TYPE (@0))) 119838fd1498Szrj (convert (negate @0)))) 119938fd1498Szrj 120038fd1498Szrj/* Part of convert ~(X ^ Y) to ~X ^ Y or X ^ ~Y if ~X or ~Y simplify. */ 120138fd1498Szrj(simplify 120238fd1498Szrj (bit_not (convert? (bit_xor @0 INTEGER_CST@1))) 120338fd1498Szrj (if (tree_nop_conversion_p (type, TREE_TYPE (@0))) 120438fd1498Szrj (convert (bit_xor @0 (bit_not @1))))) 120538fd1498Szrj(simplify 120638fd1498Szrj (bit_not (convert? (bit_xor:c (bit_not @0) @1))) 120738fd1498Szrj (if (tree_nop_conversion_p (type, TREE_TYPE (@0))) 120838fd1498Szrj (convert (bit_xor @0 @1)))) 120938fd1498Szrj 121038fd1498Szrj/* Otherwise prefer ~(X ^ Y) to ~X ^ Y as more canonical. */ 121138fd1498Szrj(simplify 121238fd1498Szrj (bit_xor:c (nop_convert:s (bit_not:s @0)) @1) 121338fd1498Szrj (if (tree_nop_conversion_p (type, TREE_TYPE (@0))) 121438fd1498Szrj (bit_not (bit_xor (view_convert @0) @1)))) 121538fd1498Szrj 121638fd1498Szrj/* (x & ~m) | (y & m) -> ((x ^ y) & m) ^ x */ 121738fd1498Szrj(simplify 121838fd1498Szrj (bit_ior:c (bit_and:cs @0 (bit_not @2)) (bit_and:cs @1 @2)) 121938fd1498Szrj (bit_xor (bit_and (bit_xor @0 @1) @2) @0)) 122038fd1498Szrj 122138fd1498Szrj/* Fold A - (A & B) into ~B & A. */ 122238fd1498Szrj(simplify 122338fd1498Szrj (minus (convert1? @0) (convert2?:s (bit_and:cs @@0 @1))) 122438fd1498Szrj (if (tree_nop_conversion_p (type, TREE_TYPE (@0)) 122538fd1498Szrj && tree_nop_conversion_p (type, TREE_TYPE (@1))) 122638fd1498Szrj (convert (bit_and (bit_not @1) @0)))) 122738fd1498Szrj 122838fd1498Szrj/* (m1 CMP m2) * d -> (m1 CMP m2) ? d : 0 */ 122938fd1498Szrj(for cmp (gt lt ge le) 123038fd1498Szrj(simplify 123138fd1498Szrj (mult (convert (cmp @0 @1)) @2) 123238fd1498Szrj (cond (cmp @0 @1) @2 { build_zero_cst (type); }))) 123338fd1498Szrj 123438fd1498Szrj/* For integral types with undefined overflow and C != 0 fold 123538fd1498Szrj x * C EQ/NE y * C into x EQ/NE y. */ 123638fd1498Szrj(for cmp (eq ne) 123738fd1498Szrj (simplify 123838fd1498Szrj (cmp (mult:c @0 @1) (mult:c @2 @1)) 123938fd1498Szrj (if (INTEGRAL_TYPE_P (TREE_TYPE (@1)) 124038fd1498Szrj && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)) 124138fd1498Szrj && tree_expr_nonzero_p (@1)) 124238fd1498Szrj (cmp @0 @2)))) 124338fd1498Szrj 124438fd1498Szrj/* For integral types with wrapping overflow and C odd fold 124538fd1498Szrj x * C EQ/NE y * C into x EQ/NE y. */ 124638fd1498Szrj(for cmp (eq ne) 124738fd1498Szrj (simplify 124838fd1498Szrj (cmp (mult @0 INTEGER_CST@1) (mult @2 @1)) 124938fd1498Szrj (if (INTEGRAL_TYPE_P (TREE_TYPE (@1)) 125038fd1498Szrj && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)) 125138fd1498Szrj && (TREE_INT_CST_LOW (@1) & 1) != 0) 125238fd1498Szrj (cmp @0 @2)))) 125338fd1498Szrj 125438fd1498Szrj/* For integral types with undefined overflow and C != 0 fold 125538fd1498Szrj x * C RELOP y * C into: 125638fd1498Szrj 125738fd1498Szrj x RELOP y for nonnegative C 125838fd1498Szrj y RELOP x for negative C */ 125938fd1498Szrj(for cmp (lt gt le ge) 126038fd1498Szrj (simplify 126138fd1498Szrj (cmp (mult:c @0 @1) (mult:c @2 @1)) 126238fd1498Szrj (if (INTEGRAL_TYPE_P (TREE_TYPE (@1)) 126338fd1498Szrj && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))) 126438fd1498Szrj (if (tree_expr_nonnegative_p (@1) && tree_expr_nonzero_p (@1)) 126538fd1498Szrj (cmp @0 @2) 126638fd1498Szrj (if (TREE_CODE (@1) == INTEGER_CST 126738fd1498Szrj && wi::neg_p (wi::to_wide (@1), TYPE_SIGN (TREE_TYPE (@1)))) 126838fd1498Szrj (cmp @2 @0)))))) 126938fd1498Szrj 127038fd1498Szrj/* (X - 1U) <= INT_MAX-1U into (int) X > 0. */ 127138fd1498Szrj(for cmp (le gt) 127238fd1498Szrj icmp (gt le) 127338fd1498Szrj (simplify 127438fd1498Szrj (cmp (plus @0 integer_minus_onep@1) INTEGER_CST@2) 127538fd1498Szrj (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) 127638fd1498Szrj && TYPE_UNSIGNED (TREE_TYPE (@0)) 127738fd1498Szrj && TYPE_PRECISION (TREE_TYPE (@0)) > 1 127838fd1498Szrj && (wi::to_wide (@2) 127938fd1498Szrj == wi::max_value (TYPE_PRECISION (TREE_TYPE (@0)), SIGNED) - 1)) 128038fd1498Szrj (with { tree stype = signed_type_for (TREE_TYPE (@0)); } 128138fd1498Szrj (icmp (convert:stype @0) { build_int_cst (stype, 0); }))))) 128238fd1498Szrj 128338fd1498Szrj/* X / 4 < Y / 4 iff X < Y when the division is known to be exact. */ 128438fd1498Szrj(for cmp (simple_comparison) 128538fd1498Szrj (simplify 128638fd1498Szrj (cmp (exact_div @0 INTEGER_CST@2) (exact_div @1 @2)) 128738fd1498Szrj (if (wi::gt_p (wi::to_wide (@2), 0, TYPE_SIGN (TREE_TYPE (@2)))) 128838fd1498Szrj (cmp @0 @1)))) 128938fd1498Szrj 129038fd1498Szrj/* X / C1 op C2 into a simple range test. */ 129138fd1498Szrj(for cmp (simple_comparison) 129238fd1498Szrj (simplify 129338fd1498Szrj (cmp (trunc_div:s @0 INTEGER_CST@1) INTEGER_CST@2) 129438fd1498Szrj (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) 129538fd1498Szrj && integer_nonzerop (@1) 129638fd1498Szrj && !TREE_OVERFLOW (@1) 129738fd1498Szrj && !TREE_OVERFLOW (@2)) 129838fd1498Szrj (with { tree lo, hi; bool neg_overflow; 129938fd1498Szrj enum tree_code code = fold_div_compare (cmp, @1, @2, &lo, &hi, 130038fd1498Szrj &neg_overflow); } 130138fd1498Szrj (switch 130238fd1498Szrj (if (code == LT_EXPR || code == GE_EXPR) 130338fd1498Szrj (if (TREE_OVERFLOW (lo)) 130438fd1498Szrj { build_int_cst (type, (code == LT_EXPR) ^ neg_overflow); } 130538fd1498Szrj (if (code == LT_EXPR) 130638fd1498Szrj (lt @0 { lo; }) 130738fd1498Szrj (ge @0 { lo; })))) 130838fd1498Szrj (if (code == LE_EXPR || code == GT_EXPR) 130938fd1498Szrj (if (TREE_OVERFLOW (hi)) 131038fd1498Szrj { build_int_cst (type, (code == LE_EXPR) ^ neg_overflow); } 131138fd1498Szrj (if (code == LE_EXPR) 131238fd1498Szrj (le @0 { hi; }) 131338fd1498Szrj (gt @0 { hi; })))) 131438fd1498Szrj (if (!lo && !hi) 131538fd1498Szrj { build_int_cst (type, code == NE_EXPR); }) 131638fd1498Szrj (if (code == EQ_EXPR && !hi) 131738fd1498Szrj (ge @0 { lo; })) 131838fd1498Szrj (if (code == EQ_EXPR && !lo) 131938fd1498Szrj (le @0 { hi; })) 132038fd1498Szrj (if (code == NE_EXPR && !hi) 132138fd1498Szrj (lt @0 { lo; })) 132238fd1498Szrj (if (code == NE_EXPR && !lo) 132338fd1498Szrj (gt @0 { hi; })) 132438fd1498Szrj (if (GENERIC) 132538fd1498Szrj { build_range_check (UNKNOWN_LOCATION, type, @0, code == EQ_EXPR, 132638fd1498Szrj lo, hi); }) 132738fd1498Szrj (with 132838fd1498Szrj { 132938fd1498Szrj tree etype = range_check_type (TREE_TYPE (@0)); 133038fd1498Szrj if (etype) 133138fd1498Szrj { 133238fd1498Szrj if (! TYPE_UNSIGNED (etype)) 133338fd1498Szrj etype = unsigned_type_for (etype); 133438fd1498Szrj hi = fold_convert (etype, hi); 133538fd1498Szrj lo = fold_convert (etype, lo); 133638fd1498Szrj hi = const_binop (MINUS_EXPR, etype, hi, lo); 133738fd1498Szrj } 133838fd1498Szrj } 133938fd1498Szrj (if (etype && hi && !TREE_OVERFLOW (hi)) 134038fd1498Szrj (if (code == EQ_EXPR) 134138fd1498Szrj (le (minus (convert:etype @0) { lo; }) { hi; }) 134238fd1498Szrj (gt (minus (convert:etype @0) { lo; }) { hi; }))))))))) 134338fd1498Szrj 134438fd1498Szrj/* X + Z < Y + Z is the same as X < Y when there is no overflow. */ 134538fd1498Szrj(for op (lt le ge gt) 134638fd1498Szrj (simplify 134738fd1498Szrj (op (plus:c @0 @2) (plus:c @1 @2)) 134838fd1498Szrj (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) 134938fd1498Szrj && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))) 135038fd1498Szrj (op @0 @1)))) 135138fd1498Szrj/* For equality and subtraction, this is also true with wrapping overflow. */ 135238fd1498Szrj(for op (eq ne minus) 135338fd1498Szrj (simplify 135438fd1498Szrj (op (plus:c @0 @2) (plus:c @1 @2)) 135538fd1498Szrj (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) 135638fd1498Szrj && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)) 135738fd1498Szrj || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))) 135838fd1498Szrj (op @0 @1)))) 135938fd1498Szrj 136038fd1498Szrj/* X - Z < Y - Z is the same as X < Y when there is no overflow. */ 136138fd1498Szrj(for op (lt le ge gt) 136238fd1498Szrj (simplify 136338fd1498Szrj (op (minus @0 @2) (minus @1 @2)) 136438fd1498Szrj (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) 136538fd1498Szrj && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))) 136638fd1498Szrj (op @0 @1)))) 136738fd1498Szrj/* For equality and subtraction, this is also true with wrapping overflow. */ 136838fd1498Szrj(for op (eq ne minus) 136938fd1498Szrj (simplify 137038fd1498Szrj (op (minus @0 @2) (minus @1 @2)) 137138fd1498Szrj (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) 137238fd1498Szrj && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)) 137338fd1498Szrj || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))) 137438fd1498Szrj (op @0 @1)))) 137538fd1498Szrj/* And for pointers... */ 137638fd1498Szrj(for op (simple_comparison) 137738fd1498Szrj (simplify 137838fd1498Szrj (op (pointer_diff@3 @0 @2) (pointer_diff @1 @2)) 137938fd1498Szrj (if (!TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2))) 138038fd1498Szrj (op @0 @1)))) 138138fd1498Szrj(simplify 138238fd1498Szrj (minus (pointer_diff@3 @0 @2) (pointer_diff @1 @2)) 138338fd1498Szrj (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@3)) 138438fd1498Szrj && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2))) 138538fd1498Szrj (pointer_diff @0 @1))) 138638fd1498Szrj 138738fd1498Szrj/* Z - X < Z - Y is the same as Y < X when there is no overflow. */ 138838fd1498Szrj(for op (lt le ge gt) 138938fd1498Szrj (simplify 139038fd1498Szrj (op (minus @2 @0) (minus @2 @1)) 139138fd1498Szrj (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) 139238fd1498Szrj && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))) 139338fd1498Szrj (op @1 @0)))) 139438fd1498Szrj/* For equality and subtraction, this is also true with wrapping overflow. */ 139538fd1498Szrj(for op (eq ne minus) 139638fd1498Szrj (simplify 139738fd1498Szrj (op (minus @2 @0) (minus @2 @1)) 139838fd1498Szrj (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) 139938fd1498Szrj && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)) 140038fd1498Szrj || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))) 140138fd1498Szrj (op @1 @0)))) 140238fd1498Szrj/* And for pointers... */ 140338fd1498Szrj(for op (simple_comparison) 140438fd1498Szrj (simplify 140538fd1498Szrj (op (pointer_diff@3 @2 @0) (pointer_diff @2 @1)) 140638fd1498Szrj (if (!TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2))) 140738fd1498Szrj (op @1 @0)))) 140838fd1498Szrj(simplify 140938fd1498Szrj (minus (pointer_diff@3 @2 @0) (pointer_diff @2 @1)) 141038fd1498Szrj (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@3)) 141138fd1498Szrj && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2))) 141238fd1498Szrj (pointer_diff @1 @0))) 141338fd1498Szrj 141438fd1498Szrj/* X + Y < Y is the same as X < 0 when there is no overflow. */ 141538fd1498Szrj(for op (lt le gt ge) 141638fd1498Szrj (simplify 141738fd1498Szrj (op:c (plus:c@2 @0 @1) @1) 141838fd1498Szrj (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) 141938fd1498Szrj && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)) 1420*58e805e6Szrj && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@0)) 142138fd1498Szrj && (CONSTANT_CLASS_P (@0) || single_use (@2))) 142238fd1498Szrj (op @0 { build_zero_cst (TREE_TYPE (@0)); })))) 142338fd1498Szrj/* For equality, this is also true with wrapping overflow. */ 142438fd1498Szrj(for op (eq ne) 142538fd1498Szrj (simplify 142638fd1498Szrj (op:c (nop_convert@3 (plus:c@2 @0 (convert1? @1))) (convert2? @1)) 142738fd1498Szrj (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) 142838fd1498Szrj && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)) 142938fd1498Szrj || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))) 143038fd1498Szrj && (CONSTANT_CLASS_P (@0) || (single_use (@2) && single_use (@3))) 143138fd1498Szrj && tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@2)) 143238fd1498Szrj && tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@1))) 143338fd1498Szrj (op @0 { build_zero_cst (TREE_TYPE (@0)); }))) 143438fd1498Szrj (simplify 143538fd1498Szrj (op:c (nop_convert@3 (pointer_plus@2 (convert1? @0) @1)) (convert2? @0)) 143638fd1498Szrj (if (tree_nop_conversion_p (TREE_TYPE (@2), TREE_TYPE (@0)) 143738fd1498Szrj && tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@0)) 143838fd1498Szrj && (CONSTANT_CLASS_P (@1) || (single_use (@2) && single_use (@3)))) 143938fd1498Szrj (op @1 { build_zero_cst (TREE_TYPE (@1)); })))) 144038fd1498Szrj 144138fd1498Szrj/* X - Y < X is the same as Y > 0 when there is no overflow. 144238fd1498Szrj For equality, this is also true with wrapping overflow. */ 144338fd1498Szrj(for op (simple_comparison) 144438fd1498Szrj (simplify 144538fd1498Szrj (op:c @0 (minus@2 @0 @1)) 144638fd1498Szrj (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) 144738fd1498Szrj && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)) 144838fd1498Szrj || ((op == EQ_EXPR || op == NE_EXPR) 144938fd1498Szrj && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))) 145038fd1498Szrj && (CONSTANT_CLASS_P (@1) || single_use (@2))) 145138fd1498Szrj (op @1 { build_zero_cst (TREE_TYPE (@1)); })))) 145238fd1498Szrj 145338fd1498Szrj/* Transform: 1454*58e805e6Szrj (X / Y) == 0 -> X < Y if X, Y are unsigned. 1455*58e805e6Szrj (X / Y) != 0 -> X >= Y, if X, Y are unsigned. */ 145638fd1498Szrj(for cmp (eq ne) 145738fd1498Szrj ocmp (lt ge) 145838fd1498Szrj (simplify 145938fd1498Szrj (cmp (trunc_div @0 @1) integer_zerop) 146038fd1498Szrj (if (TYPE_UNSIGNED (TREE_TYPE (@0)) 1461*58e805e6Szrj /* Complex ==/!= is allowed, but not </>=. */ 1462*58e805e6Szrj && TREE_CODE (TREE_TYPE (@0)) != COMPLEX_TYPE 146338fd1498Szrj && (VECTOR_TYPE_P (type) || !VECTOR_TYPE_P (TREE_TYPE (@0)))) 146438fd1498Szrj (ocmp @0 @1)))) 146538fd1498Szrj 146638fd1498Szrj/* X == C - X can never be true if C is odd. */ 146738fd1498Szrj(for cmp (eq ne) 146838fd1498Szrj (simplify 146938fd1498Szrj (cmp:c (convert? @0) (convert1? (minus INTEGER_CST@1 (convert2? @0)))) 147038fd1498Szrj (if (TREE_INT_CST_LOW (@1) & 1) 147138fd1498Szrj { constant_boolean_node (cmp == NE_EXPR, type); }))) 147238fd1498Szrj 147338fd1498Szrj/* Arguments on which one can call get_nonzero_bits to get the bits 147438fd1498Szrj possibly set. */ 147538fd1498Szrj(match with_possible_nonzero_bits 147638fd1498Szrj INTEGER_CST@0) 147738fd1498Szrj(match with_possible_nonzero_bits 147838fd1498Szrj SSA_NAME@0 147938fd1498Szrj (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0))))) 148038fd1498Szrj/* Slightly extended version, do not make it recursive to keep it cheap. */ 148138fd1498Szrj(match (with_possible_nonzero_bits2 @0) 148238fd1498Szrj with_possible_nonzero_bits@0) 148338fd1498Szrj(match (with_possible_nonzero_bits2 @0) 148438fd1498Szrj (bit_and:c with_possible_nonzero_bits@0 @2)) 148538fd1498Szrj 148638fd1498Szrj/* Same for bits that are known to be set, but we do not have 148738fd1498Szrj an equivalent to get_nonzero_bits yet. */ 148838fd1498Szrj(match (with_certain_nonzero_bits2 @0) 148938fd1498Szrj INTEGER_CST@0) 149038fd1498Szrj(match (with_certain_nonzero_bits2 @0) 149138fd1498Szrj (bit_ior @1 INTEGER_CST@0)) 149238fd1498Szrj 149338fd1498Szrj/* X == C (or X & Z == Y | C) is impossible if ~nonzero(X) & C != 0. */ 149438fd1498Szrj(for cmp (eq ne) 149538fd1498Szrj (simplify 149638fd1498Szrj (cmp:c (with_possible_nonzero_bits2 @0) (with_certain_nonzero_bits2 @1)) 149738fd1498Szrj (if (wi::bit_and_not (wi::to_wide (@1), get_nonzero_bits (@0)) != 0) 149838fd1498Szrj { constant_boolean_node (cmp == NE_EXPR, type); }))) 149938fd1498Szrj 150038fd1498Szrj/* ((X inner_op C0) outer_op C1) 150138fd1498Szrj With X being a tree where value_range has reasoned certain bits to always be 150238fd1498Szrj zero throughout its computed value range, 150338fd1498Szrj inner_op = {|,^}, outer_op = {|,^} and inner_op != outer_op 150438fd1498Szrj where zero_mask has 1's for all bits that are sure to be 0 in 150538fd1498Szrj and 0's otherwise. 150638fd1498Szrj if (inner_op == '^') C0 &= ~C1; 150738fd1498Szrj if ((C0 & ~zero_mask) == 0) then emit (X outer_op (C0 outer_op C1) 150838fd1498Szrj if ((C1 & ~zero_mask) == 0) then emit (X inner_op (C0 outer_op C1) 150938fd1498Szrj*/ 151038fd1498Szrj(for inner_op (bit_ior bit_xor) 151138fd1498Szrj outer_op (bit_xor bit_ior) 151238fd1498Szrj(simplify 151338fd1498Szrj (outer_op 151438fd1498Szrj (inner_op:s @2 INTEGER_CST@0) INTEGER_CST@1) 151538fd1498Szrj (with 151638fd1498Szrj { 151738fd1498Szrj bool fail = false; 151838fd1498Szrj wide_int zero_mask_not; 151938fd1498Szrj wide_int C0; 152038fd1498Szrj wide_int cst_emit; 152138fd1498Szrj 152238fd1498Szrj if (TREE_CODE (@2) == SSA_NAME) 152338fd1498Szrj zero_mask_not = get_nonzero_bits (@2); 152438fd1498Szrj else 152538fd1498Szrj fail = true; 152638fd1498Szrj 152738fd1498Szrj if (inner_op == BIT_XOR_EXPR) 152838fd1498Szrj { 152938fd1498Szrj C0 = wi::bit_and_not (wi::to_wide (@0), wi::to_wide (@1)); 153038fd1498Szrj cst_emit = C0 | wi::to_wide (@1); 153138fd1498Szrj } 153238fd1498Szrj else 153338fd1498Szrj { 153438fd1498Szrj C0 = wi::to_wide (@0); 153538fd1498Szrj cst_emit = C0 ^ wi::to_wide (@1); 153638fd1498Szrj } 153738fd1498Szrj } 153838fd1498Szrj (if (!fail && (C0 & zero_mask_not) == 0) 153938fd1498Szrj (outer_op @2 { wide_int_to_tree (type, cst_emit); }) 154038fd1498Szrj (if (!fail && (wi::to_wide (@1) & zero_mask_not) == 0) 154138fd1498Szrj (inner_op @2 { wide_int_to_tree (type, cst_emit); })))))) 154238fd1498Szrj 154338fd1498Szrj/* Associate (p +p off1) +p off2 as (p +p (off1 + off2)). */ 154438fd1498Szrj(simplify 154538fd1498Szrj (pointer_plus (pointer_plus:s @0 @1) @3) 154638fd1498Szrj (pointer_plus @0 (plus @1 @3))) 154738fd1498Szrj 154838fd1498Szrj/* Pattern match 154938fd1498Szrj tem1 = (long) ptr1; 155038fd1498Szrj tem2 = (long) ptr2; 155138fd1498Szrj tem3 = tem2 - tem1; 155238fd1498Szrj tem4 = (unsigned long) tem3; 155338fd1498Szrj tem5 = ptr1 + tem4; 155438fd1498Szrj and produce 155538fd1498Szrj tem5 = ptr2; */ 155638fd1498Szrj(simplify 155738fd1498Szrj (pointer_plus @0 (convert?@2 (minus@3 (convert @1) (convert @0)))) 155838fd1498Szrj /* Conditionally look through a sign-changing conversion. */ 155938fd1498Szrj (if (TYPE_PRECISION (TREE_TYPE (@2)) == TYPE_PRECISION (TREE_TYPE (@3)) 156038fd1498Szrj && ((GIMPLE && useless_type_conversion_p (type, TREE_TYPE (@1))) 156138fd1498Szrj || (GENERIC && type == TREE_TYPE (@1)))) 156238fd1498Szrj @1)) 156338fd1498Szrj(simplify 156438fd1498Szrj (pointer_plus @0 (convert?@2 (pointer_diff@3 @1 @@0))) 156538fd1498Szrj (if (TYPE_PRECISION (TREE_TYPE (@2)) >= TYPE_PRECISION (TREE_TYPE (@3))) 156638fd1498Szrj (convert @1))) 156738fd1498Szrj 156838fd1498Szrj/* Pattern match 156938fd1498Szrj tem = (sizetype) ptr; 157038fd1498Szrj tem = tem & algn; 157138fd1498Szrj tem = -tem; 157238fd1498Szrj ... = ptr p+ tem; 157338fd1498Szrj and produce the simpler and easier to analyze with respect to alignment 157438fd1498Szrj ... = ptr & ~algn; */ 157538fd1498Szrj(simplify 157638fd1498Szrj (pointer_plus @0 (negate (bit_and (convert @0) INTEGER_CST@1))) 157738fd1498Szrj (with { tree algn = wide_int_to_tree (TREE_TYPE (@0), ~wi::to_wide (@1)); } 157838fd1498Szrj (bit_and @0 { algn; }))) 157938fd1498Szrj 158038fd1498Szrj/* Try folding difference of addresses. */ 158138fd1498Szrj(simplify 158238fd1498Szrj (minus (convert ADDR_EXPR@0) (convert @1)) 158338fd1498Szrj (if (tree_nop_conversion_p (type, TREE_TYPE (@0))) 158438fd1498Szrj (with { poly_int64 diff; } 158538fd1498Szrj (if (ptr_difference_const (@0, @1, &diff)) 158638fd1498Szrj { build_int_cst_type (type, diff); })))) 158738fd1498Szrj(simplify 158838fd1498Szrj (minus (convert @0) (convert ADDR_EXPR@1)) 158938fd1498Szrj (if (tree_nop_conversion_p (type, TREE_TYPE (@0))) 159038fd1498Szrj (with { poly_int64 diff; } 159138fd1498Szrj (if (ptr_difference_const (@0, @1, &diff)) 159238fd1498Szrj { build_int_cst_type (type, diff); })))) 159338fd1498Szrj(simplify 1594*58e805e6Szrj (pointer_diff (convert?@2 ADDR_EXPR@0) (convert1?@3 @1)) 159538fd1498Szrj (if (tree_nop_conversion_p (TREE_TYPE(@2), TREE_TYPE (@0)) 159638fd1498Szrj && tree_nop_conversion_p (TREE_TYPE(@3), TREE_TYPE (@1))) 159738fd1498Szrj (with { poly_int64 diff; } 159838fd1498Szrj (if (ptr_difference_const (@0, @1, &diff)) 159938fd1498Szrj { build_int_cst_type (type, diff); })))) 160038fd1498Szrj(simplify 1601*58e805e6Szrj (pointer_diff (convert?@2 @0) (convert1?@3 ADDR_EXPR@1)) 160238fd1498Szrj (if (tree_nop_conversion_p (TREE_TYPE(@2), TREE_TYPE (@0)) 160338fd1498Szrj && tree_nop_conversion_p (TREE_TYPE(@3), TREE_TYPE (@1))) 160438fd1498Szrj (with { poly_int64 diff; } 160538fd1498Szrj (if (ptr_difference_const (@0, @1, &diff)) 160638fd1498Szrj { build_int_cst_type (type, diff); })))) 160738fd1498Szrj 160838fd1498Szrj/* If arg0 is derived from the address of an object or function, we may 160938fd1498Szrj be able to fold this expression using the object or function's 161038fd1498Szrj alignment. */ 161138fd1498Szrj(simplify 161238fd1498Szrj (bit_and (convert? @0) INTEGER_CST@1) 161338fd1498Szrj (if (POINTER_TYPE_P (TREE_TYPE (@0)) 161438fd1498Szrj && tree_nop_conversion_p (type, TREE_TYPE (@0))) 161538fd1498Szrj (with 161638fd1498Szrj { 161738fd1498Szrj unsigned int align; 161838fd1498Szrj unsigned HOST_WIDE_INT bitpos; 161938fd1498Szrj get_pointer_alignment_1 (@0, &align, &bitpos); 162038fd1498Szrj } 162138fd1498Szrj (if (wi::ltu_p (wi::to_wide (@1), align / BITS_PER_UNIT)) 162238fd1498Szrj { wide_int_to_tree (type, (wi::to_wide (@1) 162338fd1498Szrj & (bitpos / BITS_PER_UNIT))); })))) 162438fd1498Szrj 162538fd1498Szrj 162638fd1498Szrj/* We can't reassociate at all for saturating types. */ 162738fd1498Szrj(if (!TYPE_SATURATING (type)) 162838fd1498Szrj 162938fd1498Szrj /* Contract negates. */ 163038fd1498Szrj /* A + (-B) -> A - B */ 163138fd1498Szrj (simplify 163238fd1498Szrj (plus:c @0 (convert? (negate @1))) 163338fd1498Szrj /* Apply STRIP_NOPS on the negate. */ 163438fd1498Szrj (if (tree_nop_conversion_p (type, TREE_TYPE (@1)) 163538fd1498Szrj && !TYPE_OVERFLOW_SANITIZED (type)) 163638fd1498Szrj (with 163738fd1498Szrj { 163838fd1498Szrj tree t1 = type; 163938fd1498Szrj if (INTEGRAL_TYPE_P (type) 164038fd1498Szrj && TYPE_OVERFLOW_WRAPS (type) != TYPE_OVERFLOW_WRAPS (TREE_TYPE (@1))) 164138fd1498Szrj t1 = TYPE_OVERFLOW_WRAPS (type) ? type : TREE_TYPE (@1); 164238fd1498Szrj } 164338fd1498Szrj (convert (minus (convert:t1 @0) (convert:t1 @1)))))) 164438fd1498Szrj /* A - (-B) -> A + B */ 164538fd1498Szrj (simplify 164638fd1498Szrj (minus @0 (convert? (negate @1))) 164738fd1498Szrj (if (tree_nop_conversion_p (type, TREE_TYPE (@1)) 164838fd1498Szrj && !TYPE_OVERFLOW_SANITIZED (type)) 164938fd1498Szrj (with 165038fd1498Szrj { 165138fd1498Szrj tree t1 = type; 165238fd1498Szrj if (INTEGRAL_TYPE_P (type) 165338fd1498Szrj && TYPE_OVERFLOW_WRAPS (type) != TYPE_OVERFLOW_WRAPS (TREE_TYPE (@1))) 165438fd1498Szrj t1 = TYPE_OVERFLOW_WRAPS (type) ? type : TREE_TYPE (@1); 165538fd1498Szrj } 165638fd1498Szrj (convert (plus (convert:t1 @0) (convert:t1 @1)))))) 165738fd1498Szrj /* -(T)(-A) -> (T)A 165838fd1498Szrj Sign-extension is ok except for INT_MIN, which thankfully cannot 165938fd1498Szrj happen without overflow. */ 166038fd1498Szrj (simplify 166138fd1498Szrj (negate (convert (negate @1))) 166238fd1498Szrj (if (INTEGRAL_TYPE_P (type) 166338fd1498Szrj && (TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@1)) 166438fd1498Szrj || (!TYPE_UNSIGNED (TREE_TYPE (@1)) 166538fd1498Szrj && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@1)))) 166638fd1498Szrj && !TYPE_OVERFLOW_SANITIZED (type) 166738fd1498Szrj && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@1))) 166838fd1498Szrj (convert @1))) 166938fd1498Szrj (simplify 167038fd1498Szrj (negate (convert negate_expr_p@1)) 167138fd1498Szrj (if (SCALAR_FLOAT_TYPE_P (type) 167238fd1498Szrj && ((DECIMAL_FLOAT_TYPE_P (type) 167338fd1498Szrj == DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@1)) 167438fd1498Szrj && TYPE_PRECISION (type) >= TYPE_PRECISION (TREE_TYPE (@1))) 167538fd1498Szrj || !HONOR_SIGN_DEPENDENT_ROUNDING (type))) 167638fd1498Szrj (convert (negate @1)))) 167738fd1498Szrj (simplify 167838fd1498Szrj (negate (nop_convert (negate @1))) 167938fd1498Szrj (if (!TYPE_OVERFLOW_SANITIZED (type) 168038fd1498Szrj && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@1))) 168138fd1498Szrj (view_convert @1))) 168238fd1498Szrj 168338fd1498Szrj /* We can't reassociate floating-point unless -fassociative-math 168438fd1498Szrj or fixed-point plus or minus because of saturation to +-Inf. */ 168538fd1498Szrj (if ((!FLOAT_TYPE_P (type) || flag_associative_math) 168638fd1498Szrj && !FIXED_POINT_TYPE_P (type)) 168738fd1498Szrj 168838fd1498Szrj /* Match patterns that allow contracting a plus-minus pair 168938fd1498Szrj irrespective of overflow issues. */ 169038fd1498Szrj /* (A +- B) - A -> +- B */ 169138fd1498Szrj /* (A +- B) -+ B -> A */ 169238fd1498Szrj /* A - (A +- B) -> -+ B */ 169338fd1498Szrj /* A +- (B -+ A) -> +- B */ 169438fd1498Szrj (simplify 169538fd1498Szrj (minus (plus:c @0 @1) @0) 169638fd1498Szrj @1) 169738fd1498Szrj (simplify 169838fd1498Szrj (minus (minus @0 @1) @0) 169938fd1498Szrj (negate @1)) 170038fd1498Szrj (simplify 170138fd1498Szrj (plus:c (minus @0 @1) @1) 170238fd1498Szrj @0) 170338fd1498Szrj (simplify 170438fd1498Szrj (minus @0 (plus:c @0 @1)) 170538fd1498Szrj (negate @1)) 170638fd1498Szrj (simplify 170738fd1498Szrj (minus @0 (minus @0 @1)) 170838fd1498Szrj @1) 170938fd1498Szrj /* (A +- B) + (C - A) -> C +- B */ 171038fd1498Szrj /* (A + B) - (A - C) -> B + C */ 171138fd1498Szrj /* More cases are handled with comparisons. */ 171238fd1498Szrj (simplify 171338fd1498Szrj (plus:c (plus:c @0 @1) (minus @2 @0)) 171438fd1498Szrj (plus @2 @1)) 171538fd1498Szrj (simplify 171638fd1498Szrj (plus:c (minus @0 @1) (minus @2 @0)) 171738fd1498Szrj (minus @2 @1)) 171838fd1498Szrj (simplify 171938fd1498Szrj (plus:c (pointer_diff @0 @1) (pointer_diff @2 @0)) 172038fd1498Szrj (if (TYPE_OVERFLOW_UNDEFINED (type) 172138fd1498Szrj && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@0))) 172238fd1498Szrj (pointer_diff @2 @1))) 172338fd1498Szrj (simplify 172438fd1498Szrj (minus (plus:c @0 @1) (minus @0 @2)) 172538fd1498Szrj (plus @1 @2)) 172638fd1498Szrj 172738fd1498Szrj /* (A +- CST1) +- CST2 -> A + CST3 172838fd1498Szrj Use view_convert because it is safe for vectors and equivalent for 172938fd1498Szrj scalars. */ 173038fd1498Szrj (for outer_op (plus minus) 173138fd1498Szrj (for inner_op (plus minus) 173238fd1498Szrj neg_inner_op (minus plus) 173338fd1498Szrj (simplify 173438fd1498Szrj (outer_op (nop_convert (inner_op @0 CONSTANT_CLASS_P@1)) 173538fd1498Szrj CONSTANT_CLASS_P@2) 173638fd1498Szrj /* If one of the types wraps, use that one. */ 173738fd1498Szrj (if (!ANY_INTEGRAL_TYPE_P (type) || TYPE_OVERFLOW_WRAPS (type)) 173838fd1498Szrj /* If all 3 captures are CONSTANT_CLASS_P, punt, as we might recurse 173938fd1498Szrj forever if something doesn't simplify into a constant. */ 174038fd1498Szrj (if (!CONSTANT_CLASS_P (@0)) 174138fd1498Szrj (if (outer_op == PLUS_EXPR) 174238fd1498Szrj (plus (view_convert @0) (inner_op @2 (view_convert @1))) 174338fd1498Szrj (minus (view_convert @0) (neg_inner_op @2 (view_convert @1))))) 174438fd1498Szrj (if (!ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) 174538fd1498Szrj || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))) 174638fd1498Szrj (if (outer_op == PLUS_EXPR) 174738fd1498Szrj (view_convert (plus @0 (inner_op (view_convert @2) @1))) 174838fd1498Szrj (view_convert (minus @0 (neg_inner_op (view_convert @2) @1)))) 174938fd1498Szrj /* If the constant operation overflows we cannot do the transform 175038fd1498Szrj directly as we would introduce undefined overflow, for example 175138fd1498Szrj with (a - 1) + INT_MIN. */ 175238fd1498Szrj (if (types_match (type, @0)) 175338fd1498Szrj (with { tree cst = const_binop (outer_op == inner_op 175438fd1498Szrj ? PLUS_EXPR : MINUS_EXPR, 175538fd1498Szrj type, @1, @2); } 175638fd1498Szrj (if (cst && !TREE_OVERFLOW (cst)) 175738fd1498Szrj (inner_op @0 { cst; } ) 175838fd1498Szrj /* X+INT_MAX+1 is X-INT_MIN. */ 175938fd1498Szrj (if (INTEGRAL_TYPE_P (type) && cst 176038fd1498Szrj && wi::to_wide (cst) == wi::min_value (type)) 176138fd1498Szrj (neg_inner_op @0 { wide_int_to_tree (type, wi::to_wide (cst)); }) 176238fd1498Szrj /* Last resort, use some unsigned type. */ 176338fd1498Szrj (with { tree utype = unsigned_type_for (type); } 1764*58e805e6Szrj (if (utype) 176538fd1498Szrj (view_convert (inner_op 176638fd1498Szrj (view_convert:utype @0) 176738fd1498Szrj (view_convert:utype 1768*58e805e6Szrj { drop_tree_overflow (cst); })))))))))))))) 176938fd1498Szrj 177038fd1498Szrj /* (CST1 - A) +- CST2 -> CST3 - A */ 177138fd1498Szrj (for outer_op (plus minus) 177238fd1498Szrj (simplify 177338fd1498Szrj (outer_op (minus CONSTANT_CLASS_P@1 @0) CONSTANT_CLASS_P@2) 177438fd1498Szrj (with { tree cst = const_binop (outer_op, type, @1, @2); } 177538fd1498Szrj (if (cst && !TREE_OVERFLOW (cst)) 177638fd1498Szrj (minus { cst; } @0))))) 177738fd1498Szrj 177838fd1498Szrj /* CST1 - (CST2 - A) -> CST3 + A */ 177938fd1498Szrj (simplify 178038fd1498Szrj (minus CONSTANT_CLASS_P@1 (minus CONSTANT_CLASS_P@2 @0)) 178138fd1498Szrj (with { tree cst = const_binop (MINUS_EXPR, type, @1, @2); } 178238fd1498Szrj (if (cst && !TREE_OVERFLOW (cst)) 178338fd1498Szrj (plus { cst; } @0)))) 178438fd1498Szrj 178538fd1498Szrj /* ~A + A -> -1 */ 178638fd1498Szrj (simplify 178738fd1498Szrj (plus:c (bit_not @0) @0) 178838fd1498Szrj (if (!TYPE_OVERFLOW_TRAPS (type)) 178938fd1498Szrj { build_all_ones_cst (type); })) 179038fd1498Szrj 179138fd1498Szrj /* ~A + 1 -> -A */ 179238fd1498Szrj (simplify 179338fd1498Szrj (plus (convert? (bit_not @0)) integer_each_onep) 179438fd1498Szrj (if (tree_nop_conversion_p (type, TREE_TYPE (@0))) 179538fd1498Szrj (negate (convert @0)))) 179638fd1498Szrj 179738fd1498Szrj /* -A - 1 -> ~A */ 179838fd1498Szrj (simplify 179938fd1498Szrj (minus (convert? (negate @0)) integer_each_onep) 180038fd1498Szrj (if (!TYPE_OVERFLOW_TRAPS (type) 180138fd1498Szrj && tree_nop_conversion_p (type, TREE_TYPE (@0))) 180238fd1498Szrj (bit_not (convert @0)))) 180338fd1498Szrj 180438fd1498Szrj /* -1 - A -> ~A */ 180538fd1498Szrj (simplify 180638fd1498Szrj (minus integer_all_onesp @0) 180738fd1498Szrj (bit_not @0)) 180838fd1498Szrj 180938fd1498Szrj /* (T)(P + A) - (T)P -> (T) A */ 181038fd1498Szrj (simplify 181138fd1498Szrj (minus (convert (plus:c @@0 @1)) 181238fd1498Szrj (convert? @0)) 181338fd1498Szrj (if (element_precision (type) <= element_precision (TREE_TYPE (@1)) 181438fd1498Szrj /* For integer types, if A has a smaller type 181538fd1498Szrj than T the result depends on the possible 181638fd1498Szrj overflow in P + A. 181738fd1498Szrj E.g. T=size_t, A=(unsigned)429497295, P>0. 181838fd1498Szrj However, if an overflow in P + A would cause 181938fd1498Szrj undefined behavior, we can assume that there 182038fd1498Szrj is no overflow. */ 182138fd1498Szrj || (INTEGRAL_TYPE_P (TREE_TYPE (@1)) 182238fd1498Szrj && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@1)))) 182338fd1498Szrj (convert @1))) 182438fd1498Szrj (simplify 182538fd1498Szrj (minus (convert (pointer_plus @@0 @1)) 182638fd1498Szrj (convert @0)) 182738fd1498Szrj (if (element_precision (type) <= element_precision (TREE_TYPE (@1)) 182838fd1498Szrj /* For pointer types, if the conversion of A to the 182938fd1498Szrj final type requires a sign- or zero-extension, 183038fd1498Szrj then we have to punt - it is not defined which 183138fd1498Szrj one is correct. */ 183238fd1498Szrj || (POINTER_TYPE_P (TREE_TYPE (@0)) 183338fd1498Szrj && TREE_CODE (@1) == INTEGER_CST 183438fd1498Szrj && tree_int_cst_sign_bit (@1) == 0)) 183538fd1498Szrj (convert @1))) 183638fd1498Szrj (simplify 183738fd1498Szrj (pointer_diff (pointer_plus @@0 @1) @0) 183838fd1498Szrj /* The second argument of pointer_plus must be interpreted as signed, and 183938fd1498Szrj thus sign-extended if necessary. */ 184038fd1498Szrj (with { tree stype = signed_type_for (TREE_TYPE (@1)); } 184138fd1498Szrj /* Use view_convert instead of convert here, as POINTER_PLUS_EXPR 184238fd1498Szrj second arg is unsigned even when we need to consider it as signed, 184338fd1498Szrj we don't want to diagnose overflow here. */ 184438fd1498Szrj (convert (view_convert:stype @1)))) 184538fd1498Szrj 184638fd1498Szrj /* (T)P - (T)(P + A) -> -(T) A */ 184738fd1498Szrj (simplify 184838fd1498Szrj (minus (convert? @0) 184938fd1498Szrj (convert (plus:c @@0 @1))) 185038fd1498Szrj (if (INTEGRAL_TYPE_P (type) 185138fd1498Szrj && TYPE_OVERFLOW_UNDEFINED (type) 185238fd1498Szrj && element_precision (type) <= element_precision (TREE_TYPE (@1))) 185338fd1498Szrj (with { tree utype = unsigned_type_for (type); } 185438fd1498Szrj (convert (negate (convert:utype @1)))) 185538fd1498Szrj (if (element_precision (type) <= element_precision (TREE_TYPE (@1)) 185638fd1498Szrj /* For integer types, if A has a smaller type 185738fd1498Szrj than T the result depends on the possible 185838fd1498Szrj overflow in P + A. 185938fd1498Szrj E.g. T=size_t, A=(unsigned)429497295, P>0. 186038fd1498Szrj However, if an overflow in P + A would cause 186138fd1498Szrj undefined behavior, we can assume that there 186238fd1498Szrj is no overflow. */ 186338fd1498Szrj || (INTEGRAL_TYPE_P (TREE_TYPE (@1)) 186438fd1498Szrj && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@1)))) 186538fd1498Szrj (negate (convert @1))))) 186638fd1498Szrj (simplify 186738fd1498Szrj (minus (convert @0) 186838fd1498Szrj (convert (pointer_plus @@0 @1))) 186938fd1498Szrj (if (INTEGRAL_TYPE_P (type) 187038fd1498Szrj && TYPE_OVERFLOW_UNDEFINED (type) 187138fd1498Szrj && element_precision (type) <= element_precision (TREE_TYPE (@1))) 187238fd1498Szrj (with { tree utype = unsigned_type_for (type); } 187338fd1498Szrj (convert (negate (convert:utype @1)))) 187438fd1498Szrj (if (element_precision (type) <= element_precision (TREE_TYPE (@1)) 187538fd1498Szrj /* For pointer types, if the conversion of A to the 187638fd1498Szrj final type requires a sign- or zero-extension, 187738fd1498Szrj then we have to punt - it is not defined which 187838fd1498Szrj one is correct. */ 187938fd1498Szrj || (POINTER_TYPE_P (TREE_TYPE (@0)) 188038fd1498Szrj && TREE_CODE (@1) == INTEGER_CST 188138fd1498Szrj && tree_int_cst_sign_bit (@1) == 0)) 188238fd1498Szrj (negate (convert @1))))) 188338fd1498Szrj (simplify 188438fd1498Szrj (pointer_diff @0 (pointer_plus @@0 @1)) 188538fd1498Szrj /* The second argument of pointer_plus must be interpreted as signed, and 188638fd1498Szrj thus sign-extended if necessary. */ 188738fd1498Szrj (with { tree stype = signed_type_for (TREE_TYPE (@1)); } 188838fd1498Szrj /* Use view_convert instead of convert here, as POINTER_PLUS_EXPR 188938fd1498Szrj second arg is unsigned even when we need to consider it as signed, 189038fd1498Szrj we don't want to diagnose overflow here. */ 189138fd1498Szrj (negate (convert (view_convert:stype @1))))) 189238fd1498Szrj 189338fd1498Szrj /* (T)(P + A) - (T)(P + B) -> (T)A - (T)B */ 189438fd1498Szrj (simplify 189538fd1498Szrj (minus (convert (plus:c @@0 @1)) 189638fd1498Szrj (convert (plus:c @0 @2))) 189738fd1498Szrj (if (INTEGRAL_TYPE_P (type) 189838fd1498Szrj && TYPE_OVERFLOW_UNDEFINED (type) 189938fd1498Szrj && element_precision (type) <= element_precision (TREE_TYPE (@1)) 190038fd1498Szrj && element_precision (type) <= element_precision (TREE_TYPE (@2))) 190138fd1498Szrj (with { tree utype = unsigned_type_for (type); } 190238fd1498Szrj (convert (minus (convert:utype @1) (convert:utype @2)))) 190338fd1498Szrj (if (((element_precision (type) <= element_precision (TREE_TYPE (@1))) 190438fd1498Szrj == (element_precision (type) <= element_precision (TREE_TYPE (@2)))) 190538fd1498Szrj && (element_precision (type) <= element_precision (TREE_TYPE (@1)) 190638fd1498Szrj /* For integer types, if A has a smaller type 190738fd1498Szrj than T the result depends on the possible 190838fd1498Szrj overflow in P + A. 190938fd1498Szrj E.g. T=size_t, A=(unsigned)429497295, P>0. 191038fd1498Szrj However, if an overflow in P + A would cause 191138fd1498Szrj undefined behavior, we can assume that there 191238fd1498Szrj is no overflow. */ 191338fd1498Szrj || (INTEGRAL_TYPE_P (TREE_TYPE (@1)) 191438fd1498Szrj && INTEGRAL_TYPE_P (TREE_TYPE (@2)) 191538fd1498Szrj && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@1)) 191638fd1498Szrj && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@2))))) 191738fd1498Szrj (minus (convert @1) (convert @2))))) 191838fd1498Szrj (simplify 191938fd1498Szrj (minus (convert (pointer_plus @@0 @1)) 192038fd1498Szrj (convert (pointer_plus @0 @2))) 192138fd1498Szrj (if (INTEGRAL_TYPE_P (type) 192238fd1498Szrj && TYPE_OVERFLOW_UNDEFINED (type) 192338fd1498Szrj && element_precision (type) <= element_precision (TREE_TYPE (@1))) 192438fd1498Szrj (with { tree utype = unsigned_type_for (type); } 192538fd1498Szrj (convert (minus (convert:utype @1) (convert:utype @2)))) 192638fd1498Szrj (if (element_precision (type) <= element_precision (TREE_TYPE (@1)) 192738fd1498Szrj /* For pointer types, if the conversion of A to the 192838fd1498Szrj final type requires a sign- or zero-extension, 192938fd1498Szrj then we have to punt - it is not defined which 193038fd1498Szrj one is correct. */ 193138fd1498Szrj || (POINTER_TYPE_P (TREE_TYPE (@0)) 193238fd1498Szrj && TREE_CODE (@1) == INTEGER_CST 193338fd1498Szrj && tree_int_cst_sign_bit (@1) == 0 193438fd1498Szrj && TREE_CODE (@2) == INTEGER_CST 193538fd1498Szrj && tree_int_cst_sign_bit (@2) == 0)) 193638fd1498Szrj (minus (convert @1) (convert @2))))) 193738fd1498Szrj (simplify 193838fd1498Szrj (pointer_diff (pointer_plus @@0 @1) (pointer_plus @0 @2)) 193938fd1498Szrj /* The second argument of pointer_plus must be interpreted as signed, and 194038fd1498Szrj thus sign-extended if necessary. */ 194138fd1498Szrj (with { tree stype = signed_type_for (TREE_TYPE (@1)); } 194238fd1498Szrj /* Use view_convert instead of convert here, as POINTER_PLUS_EXPR 194338fd1498Szrj second arg is unsigned even when we need to consider it as signed, 194438fd1498Szrj we don't want to diagnose overflow here. */ 194538fd1498Szrj (minus (convert (view_convert:stype @1)) 194638fd1498Szrj (convert (view_convert:stype @2))))))) 194738fd1498Szrj 194838fd1498Szrj/* (A * C) +- (B * C) -> (A+-B) * C and (A * C) +- A -> A * (C+-1). 194938fd1498Szrj Modeled after fold_plusminus_mult_expr. */ 195038fd1498Szrj(if (!TYPE_SATURATING (type) 195138fd1498Szrj && (!FLOAT_TYPE_P (type) || flag_associative_math)) 195238fd1498Szrj (for plusminus (plus minus) 195338fd1498Szrj (simplify 195438fd1498Szrj (plusminus (mult:cs@3 @0 @1) (mult:cs@4 @0 @2)) 195538fd1498Szrj (if ((!ANY_INTEGRAL_TYPE_P (type) 195638fd1498Szrj || TYPE_OVERFLOW_WRAPS (type) 195738fd1498Szrj || (INTEGRAL_TYPE_P (type) 195838fd1498Szrj && tree_expr_nonzero_p (@0) 195938fd1498Szrj && expr_not_equal_to (@0, wi::minus_one (TYPE_PRECISION (type))))) 196038fd1498Szrj /* If @1 +- @2 is constant require a hard single-use on either 196138fd1498Szrj original operand (but not on both). */ 196238fd1498Szrj && (single_use (@3) || single_use (@4))) 196338fd1498Szrj (mult (plusminus @1 @2) @0))) 196438fd1498Szrj /* We cannot generate constant 1 for fract. */ 196538fd1498Szrj (if (!ALL_FRACT_MODE_P (TYPE_MODE (type))) 196638fd1498Szrj (simplify 196738fd1498Szrj (plusminus @0 (mult:c@3 @0 @2)) 196838fd1498Szrj (if ((!ANY_INTEGRAL_TYPE_P (type) 196938fd1498Szrj || TYPE_OVERFLOW_WRAPS (type) 197038fd1498Szrj || (INTEGRAL_TYPE_P (type) 197138fd1498Szrj && tree_expr_nonzero_p (@0) 197238fd1498Szrj && expr_not_equal_to (@0, wi::minus_one (TYPE_PRECISION (type))))) 197338fd1498Szrj && single_use (@3)) 197438fd1498Szrj (mult (plusminus { build_one_cst (type); } @2) @0))) 197538fd1498Szrj (simplify 197638fd1498Szrj (plusminus (mult:c@3 @0 @2) @0) 197738fd1498Szrj (if ((!ANY_INTEGRAL_TYPE_P (type) 197838fd1498Szrj || TYPE_OVERFLOW_WRAPS (type) 197938fd1498Szrj || (INTEGRAL_TYPE_P (type) 198038fd1498Szrj && tree_expr_nonzero_p (@0) 198138fd1498Szrj && expr_not_equal_to (@0, wi::minus_one (TYPE_PRECISION (type))))) 198238fd1498Szrj && single_use (@3)) 198338fd1498Szrj (mult (plusminus @2 { build_one_cst (type); }) @0)))))) 198438fd1498Szrj 198538fd1498Szrj/* Simplifications of MIN_EXPR, MAX_EXPR, fmin() and fmax(). */ 198638fd1498Szrj 198738fd1498Szrj(for minmax (min max FMIN_ALL FMAX_ALL) 198838fd1498Szrj (simplify 198938fd1498Szrj (minmax @0 @0) 199038fd1498Szrj @0)) 199138fd1498Szrj/* min(max(x,y),y) -> y. */ 199238fd1498Szrj(simplify 199338fd1498Szrj (min:c (max:c @0 @1) @1) 199438fd1498Szrj @1) 199538fd1498Szrj/* max(min(x,y),y) -> y. */ 199638fd1498Szrj(simplify 199738fd1498Szrj (max:c (min:c @0 @1) @1) 199838fd1498Szrj @1) 199938fd1498Szrj/* max(a,-a) -> abs(a). */ 200038fd1498Szrj(simplify 200138fd1498Szrj (max:c @0 (negate @0)) 200238fd1498Szrj (if (TREE_CODE (type) != COMPLEX_TYPE 200338fd1498Szrj && (! ANY_INTEGRAL_TYPE_P (type) 200438fd1498Szrj || TYPE_OVERFLOW_UNDEFINED (type))) 200538fd1498Szrj (abs @0))) 200638fd1498Szrj/* min(a,-a) -> -abs(a). */ 200738fd1498Szrj(simplify 200838fd1498Szrj (min:c @0 (negate @0)) 200938fd1498Szrj (if (TREE_CODE (type) != COMPLEX_TYPE 201038fd1498Szrj && (! ANY_INTEGRAL_TYPE_P (type) 201138fd1498Szrj || TYPE_OVERFLOW_UNDEFINED (type))) 201238fd1498Szrj (negate (abs @0)))) 201338fd1498Szrj(simplify 201438fd1498Szrj (min @0 @1) 201538fd1498Szrj (switch 201638fd1498Szrj (if (INTEGRAL_TYPE_P (type) 201738fd1498Szrj && TYPE_MIN_VALUE (type) 201838fd1498Szrj && operand_equal_p (@1, TYPE_MIN_VALUE (type), OEP_ONLY_CONST)) 201938fd1498Szrj @1) 202038fd1498Szrj (if (INTEGRAL_TYPE_P (type) 202138fd1498Szrj && TYPE_MAX_VALUE (type) 202238fd1498Szrj && operand_equal_p (@1, TYPE_MAX_VALUE (type), OEP_ONLY_CONST)) 202338fd1498Szrj @0))) 202438fd1498Szrj(simplify 202538fd1498Szrj (max @0 @1) 202638fd1498Szrj (switch 202738fd1498Szrj (if (INTEGRAL_TYPE_P (type) 202838fd1498Szrj && TYPE_MAX_VALUE (type) 202938fd1498Szrj && operand_equal_p (@1, TYPE_MAX_VALUE (type), OEP_ONLY_CONST)) 203038fd1498Szrj @1) 203138fd1498Szrj (if (INTEGRAL_TYPE_P (type) 203238fd1498Szrj && TYPE_MIN_VALUE (type) 203338fd1498Szrj && operand_equal_p (@1, TYPE_MIN_VALUE (type), OEP_ONLY_CONST)) 203438fd1498Szrj @0))) 203538fd1498Szrj 203638fd1498Szrj/* max (a, a + CST) -> a + CST where CST is positive. */ 203738fd1498Szrj/* max (a, a + CST) -> a where CST is negative. */ 203838fd1498Szrj(simplify 203938fd1498Szrj (max:c @0 (plus@2 @0 INTEGER_CST@1)) 204038fd1498Szrj (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))) 204138fd1498Szrj (if (tree_int_cst_sgn (@1) > 0) 204238fd1498Szrj @2 204338fd1498Szrj @0))) 204438fd1498Szrj 204538fd1498Szrj/* min (a, a + CST) -> a where CST is positive. */ 204638fd1498Szrj/* min (a, a + CST) -> a + CST where CST is negative. */ 204738fd1498Szrj(simplify 204838fd1498Szrj (min:c @0 (plus@2 @0 INTEGER_CST@1)) 204938fd1498Szrj (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))) 205038fd1498Szrj (if (tree_int_cst_sgn (@1) > 0) 205138fd1498Szrj @0 205238fd1498Szrj @2))) 205338fd1498Szrj 205438fd1498Szrj/* (convert (minmax ((convert (x) c)))) -> minmax (x c) if x is promoted 205538fd1498Szrj and the outer convert demotes the expression back to x's type. */ 205638fd1498Szrj(for minmax (min max) 205738fd1498Szrj (simplify 205838fd1498Szrj (convert (minmax@0 (convert @1) INTEGER_CST@2)) 205938fd1498Szrj (if (INTEGRAL_TYPE_P (type) 206038fd1498Szrj && types_match (@1, type) && int_fits_type_p (@2, type) 206138fd1498Szrj && TYPE_SIGN (TREE_TYPE (@0)) == TYPE_SIGN (type) 206238fd1498Szrj && TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (type)) 206338fd1498Szrj (minmax @1 (convert @2))))) 206438fd1498Szrj 206538fd1498Szrj(for minmax (FMIN_ALL FMAX_ALL) 206638fd1498Szrj /* If either argument is NaN, return the other one. Avoid the 206738fd1498Szrj transformation if we get (and honor) a signalling NaN. */ 206838fd1498Szrj (simplify 206938fd1498Szrj (minmax:c @0 REAL_CST@1) 207038fd1498Szrj (if (real_isnan (TREE_REAL_CST_PTR (@1)) 207138fd1498Szrj && (!HONOR_SNANS (@1) || !TREE_REAL_CST (@1).signalling)) 207238fd1498Szrj @0))) 207338fd1498Szrj/* Convert fmin/fmax to MIN_EXPR/MAX_EXPR. C99 requires these 207438fd1498Szrj functions to return the numeric arg if the other one is NaN. 207538fd1498Szrj MIN and MAX don't honor that, so only transform if -ffinite-math-only 207638fd1498Szrj is set. C99 doesn't require -0.0 to be handled, so we don't have to 207738fd1498Szrj worry about it either. */ 207838fd1498Szrj(if (flag_finite_math_only) 207938fd1498Szrj (simplify 208038fd1498Szrj (FMIN_ALL @0 @1) 208138fd1498Szrj (min @0 @1)) 208238fd1498Szrj (simplify 208338fd1498Szrj (FMAX_ALL @0 @1) 208438fd1498Szrj (max @0 @1))) 208538fd1498Szrj/* min (-A, -B) -> -max (A, B) */ 208638fd1498Szrj(for minmax (min max FMIN_ALL FMAX_ALL) 208738fd1498Szrj maxmin (max min FMAX_ALL FMIN_ALL) 208838fd1498Szrj (simplify 208938fd1498Szrj (minmax (negate:s@2 @0) (negate:s@3 @1)) 209038fd1498Szrj (if (FLOAT_TYPE_P (TREE_TYPE (@0)) 209138fd1498Szrj || (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) 209238fd1498Szrj && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))) 209338fd1498Szrj (negate (maxmin @0 @1))))) 209438fd1498Szrj/* MIN (~X, ~Y) -> ~MAX (X, Y) 209538fd1498Szrj MAX (~X, ~Y) -> ~MIN (X, Y) */ 209638fd1498Szrj(for minmax (min max) 209738fd1498Szrj maxmin (max min) 209838fd1498Szrj (simplify 209938fd1498Szrj (minmax (bit_not:s@2 @0) (bit_not:s@3 @1)) 210038fd1498Szrj (bit_not (maxmin @0 @1)))) 210138fd1498Szrj 210238fd1498Szrj/* MIN (X, Y) == X -> X <= Y */ 210338fd1498Szrj(for minmax (min min max max) 210438fd1498Szrj cmp (eq ne eq ne ) 210538fd1498Szrj out (le gt ge lt ) 210638fd1498Szrj (simplify 210738fd1498Szrj (cmp:c (minmax:c @0 @1) @0) 210838fd1498Szrj (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))) 210938fd1498Szrj (out @0 @1)))) 211038fd1498Szrj/* MIN (X, 5) == 0 -> X == 0 211138fd1498Szrj MIN (X, 5) == 7 -> false */ 211238fd1498Szrj(for cmp (eq ne) 211338fd1498Szrj (simplify 211438fd1498Szrj (cmp (min @0 INTEGER_CST@1) INTEGER_CST@2) 211538fd1498Szrj (if (wi::lt_p (wi::to_wide (@1), wi::to_wide (@2), 211638fd1498Szrj TYPE_SIGN (TREE_TYPE (@0)))) 211738fd1498Szrj { constant_boolean_node (cmp == NE_EXPR, type); } 211838fd1498Szrj (if (wi::gt_p (wi::to_wide (@1), wi::to_wide (@2), 211938fd1498Szrj TYPE_SIGN (TREE_TYPE (@0)))) 212038fd1498Szrj (cmp @0 @2))))) 212138fd1498Szrj(for cmp (eq ne) 212238fd1498Szrj (simplify 212338fd1498Szrj (cmp (max @0 INTEGER_CST@1) INTEGER_CST@2) 212438fd1498Szrj (if (wi::gt_p (wi::to_wide (@1), wi::to_wide (@2), 212538fd1498Szrj TYPE_SIGN (TREE_TYPE (@0)))) 212638fd1498Szrj { constant_boolean_node (cmp == NE_EXPR, type); } 212738fd1498Szrj (if (wi::lt_p (wi::to_wide (@1), wi::to_wide (@2), 212838fd1498Szrj TYPE_SIGN (TREE_TYPE (@0)))) 212938fd1498Szrj (cmp @0 @2))))) 213038fd1498Szrj/* MIN (X, C1) < C2 -> X < C2 || C1 < C2 */ 213138fd1498Szrj(for minmax (min min max max min min max max ) 213238fd1498Szrj cmp (lt le gt ge gt ge lt le ) 213338fd1498Szrj comb (bit_ior bit_ior bit_ior bit_ior bit_and bit_and bit_and bit_and) 213438fd1498Szrj (simplify 213538fd1498Szrj (cmp (minmax @0 INTEGER_CST@1) INTEGER_CST@2) 213638fd1498Szrj (comb (cmp @0 @2) (cmp @1 @2)))) 213738fd1498Szrj 213838fd1498Szrj/* Simplifications of shift and rotates. */ 213938fd1498Szrj 214038fd1498Szrj(for rotate (lrotate rrotate) 214138fd1498Szrj (simplify 214238fd1498Szrj (rotate integer_all_onesp@0 @1) 214338fd1498Szrj @0)) 214438fd1498Szrj 214538fd1498Szrj/* Optimize -1 >> x for arithmetic right shifts. */ 214638fd1498Szrj(simplify 214738fd1498Szrj (rshift integer_all_onesp@0 @1) 214838fd1498Szrj (if (!TYPE_UNSIGNED (type) 214938fd1498Szrj && tree_expr_nonnegative_p (@1)) 215038fd1498Szrj @0)) 215138fd1498Szrj 215238fd1498Szrj/* Optimize (x >> c) << c into x & (-1<<c). */ 215338fd1498Szrj(simplify 215438fd1498Szrj (lshift (rshift @0 INTEGER_CST@1) @1) 215538fd1498Szrj (if (wi::ltu_p (wi::to_wide (@1), element_precision (type))) 215638fd1498Szrj (bit_and @0 (lshift { build_minus_one_cst (type); } @1)))) 215738fd1498Szrj 215838fd1498Szrj/* Optimize (x << c) >> c into x & ((unsigned)-1 >> c) for unsigned 215938fd1498Szrj types. */ 216038fd1498Szrj(simplify 216138fd1498Szrj (rshift (lshift @0 INTEGER_CST@1) @1) 216238fd1498Szrj (if (TYPE_UNSIGNED (type) 216338fd1498Szrj && (wi::ltu_p (wi::to_wide (@1), element_precision (type)))) 216438fd1498Szrj (bit_and @0 (rshift { build_minus_one_cst (type); } @1)))) 216538fd1498Szrj 216638fd1498Szrj(for shiftrotate (lrotate rrotate lshift rshift) 216738fd1498Szrj (simplify 216838fd1498Szrj (shiftrotate @0 integer_zerop) 216938fd1498Szrj (non_lvalue @0)) 217038fd1498Szrj (simplify 217138fd1498Szrj (shiftrotate integer_zerop@0 @1) 217238fd1498Szrj @0) 217338fd1498Szrj /* Prefer vector1 << scalar to vector1 << vector2 217438fd1498Szrj if vector2 is uniform. */ 217538fd1498Szrj (for vec (VECTOR_CST CONSTRUCTOR) 217638fd1498Szrj (simplify 217738fd1498Szrj (shiftrotate @0 vec@1) 217838fd1498Szrj (with { tree tem = uniform_vector_p (@1); } 217938fd1498Szrj (if (tem) 218038fd1498Szrj (shiftrotate @0 { tem; })))))) 218138fd1498Szrj 218238fd1498Szrj/* Simplify X << Y where Y's low width bits are 0 to X, as only valid 218338fd1498Szrj Y is 0. Similarly for X >> Y. */ 218438fd1498Szrj#if GIMPLE 218538fd1498Szrj(for shift (lshift rshift) 218638fd1498Szrj (simplify 218738fd1498Szrj (shift @0 SSA_NAME@1) 218838fd1498Szrj (if (INTEGRAL_TYPE_P (TREE_TYPE (@1))) 218938fd1498Szrj (with { 219038fd1498Szrj int width = ceil_log2 (element_precision (TREE_TYPE (@0))); 219138fd1498Szrj int prec = TYPE_PRECISION (TREE_TYPE (@1)); 219238fd1498Szrj } 219338fd1498Szrj (if ((get_nonzero_bits (@1) & wi::mask (width, false, prec)) == 0) 219438fd1498Szrj @0))))) 219538fd1498Szrj#endif 219638fd1498Szrj 219738fd1498Szrj/* Rewrite an LROTATE_EXPR by a constant into an 219838fd1498Szrj RROTATE_EXPR by a new constant. */ 219938fd1498Szrj(simplify 220038fd1498Szrj (lrotate @0 INTEGER_CST@1) 220138fd1498Szrj (rrotate @0 { const_binop (MINUS_EXPR, TREE_TYPE (@1), 220238fd1498Szrj build_int_cst (TREE_TYPE (@1), 220338fd1498Szrj element_precision (type)), @1); })) 220438fd1498Szrj 220538fd1498Szrj/* Turn (a OP c1) OP c2 into a OP (c1+c2). */ 220638fd1498Szrj(for op (lrotate rrotate rshift lshift) 220738fd1498Szrj (simplify 220838fd1498Szrj (op (op @0 INTEGER_CST@1) INTEGER_CST@2) 220938fd1498Szrj (with { unsigned int prec = element_precision (type); } 221038fd1498Szrj (if (wi::ge_p (wi::to_wide (@1), 0, TYPE_SIGN (TREE_TYPE (@1))) 221138fd1498Szrj && wi::lt_p (wi::to_wide (@1), prec, TYPE_SIGN (TREE_TYPE (@1))) 221238fd1498Szrj && wi::ge_p (wi::to_wide (@2), 0, TYPE_SIGN (TREE_TYPE (@2))) 221338fd1498Szrj && wi::lt_p (wi::to_wide (@2), prec, TYPE_SIGN (TREE_TYPE (@2)))) 221438fd1498Szrj (with { unsigned int low = (tree_to_uhwi (@1) 221538fd1498Szrj + tree_to_uhwi (@2)); } 221638fd1498Szrj /* Deal with a OP (c1 + c2) being undefined but (a OP c1) OP c2 221738fd1498Szrj being well defined. */ 221838fd1498Szrj (if (low >= prec) 221938fd1498Szrj (if (op == LROTATE_EXPR || op == RROTATE_EXPR) 222038fd1498Szrj (op @0 { build_int_cst (TREE_TYPE (@1), low % prec); }) 222138fd1498Szrj (if (TYPE_UNSIGNED (type) || op == LSHIFT_EXPR) 222238fd1498Szrj { build_zero_cst (type); } 222338fd1498Szrj (op @0 { build_int_cst (TREE_TYPE (@1), prec - 1); }))) 222438fd1498Szrj (op @0 { build_int_cst (TREE_TYPE (@1), low); }))))))) 222538fd1498Szrj 222638fd1498Szrj 222738fd1498Szrj/* ((1 << A) & 1) != 0 -> A == 0 222838fd1498Szrj ((1 << A) & 1) == 0 -> A != 0 */ 222938fd1498Szrj(for cmp (ne eq) 223038fd1498Szrj icmp (eq ne) 223138fd1498Szrj (simplify 223238fd1498Szrj (cmp (bit_and (lshift integer_onep @0) integer_onep) integer_zerop) 223338fd1498Szrj (icmp @0 { build_zero_cst (TREE_TYPE (@0)); }))) 223438fd1498Szrj 223538fd1498Szrj/* (CST1 << A) == CST2 -> A == ctz (CST2) - ctz (CST1) 223638fd1498Szrj (CST1 << A) != CST2 -> A != ctz (CST2) - ctz (CST1) 223738fd1498Szrj if CST2 != 0. */ 223838fd1498Szrj(for cmp (ne eq) 223938fd1498Szrj (simplify 224038fd1498Szrj (cmp (lshift INTEGER_CST@0 @1) INTEGER_CST@2) 224138fd1498Szrj (with { int cand = wi::ctz (wi::to_wide (@2)) - wi::ctz (wi::to_wide (@0)); } 224238fd1498Szrj (if (cand < 0 224338fd1498Szrj || (!integer_zerop (@2) 224438fd1498Szrj && wi::lshift (wi::to_wide (@0), cand) != wi::to_wide (@2))) 224538fd1498Szrj { constant_boolean_node (cmp == NE_EXPR, type); } 224638fd1498Szrj (if (!integer_zerop (@2) 224738fd1498Szrj && wi::lshift (wi::to_wide (@0), cand) == wi::to_wide (@2)) 224838fd1498Szrj (cmp @1 { build_int_cst (TREE_TYPE (@1), cand); })))))) 224938fd1498Szrj 225038fd1498Szrj/* Fold (X << C1) & C2 into (X << C1) & (C2 | ((1 << C1) - 1)) 225138fd1498Szrj (X >> C1) & C2 into (X >> C1) & (C2 | ~((type) -1 >> C1)) 225238fd1498Szrj if the new mask might be further optimized. */ 225338fd1498Szrj(for shift (lshift rshift) 225438fd1498Szrj (simplify 225538fd1498Szrj (bit_and (convert?:s@4 (shift:s@5 (convert1?@3 @0) INTEGER_CST@1)) 225638fd1498Szrj INTEGER_CST@2) 225738fd1498Szrj (if (tree_nop_conversion_p (TREE_TYPE (@4), TREE_TYPE (@5)) 225838fd1498Szrj && TYPE_PRECISION (type) <= HOST_BITS_PER_WIDE_INT 225938fd1498Szrj && tree_fits_uhwi_p (@1) 226038fd1498Szrj && tree_to_uhwi (@1) > 0 226138fd1498Szrj && tree_to_uhwi (@1) < TYPE_PRECISION (type)) 226238fd1498Szrj (with 226338fd1498Szrj { 226438fd1498Szrj unsigned int shiftc = tree_to_uhwi (@1); 226538fd1498Szrj unsigned HOST_WIDE_INT mask = TREE_INT_CST_LOW (@2); 226638fd1498Szrj unsigned HOST_WIDE_INT newmask, zerobits = 0; 226738fd1498Szrj tree shift_type = TREE_TYPE (@3); 226838fd1498Szrj unsigned int prec; 226938fd1498Szrj 227038fd1498Szrj if (shift == LSHIFT_EXPR) 227138fd1498Szrj zerobits = ((HOST_WIDE_INT_1U << shiftc) - 1); 227238fd1498Szrj else if (shift == RSHIFT_EXPR 227338fd1498Szrj && type_has_mode_precision_p (shift_type)) 227438fd1498Szrj { 227538fd1498Szrj prec = TYPE_PRECISION (TREE_TYPE (@3)); 227638fd1498Szrj tree arg00 = @0; 227738fd1498Szrj /* See if more bits can be proven as zero because of 227838fd1498Szrj zero extension. */ 227938fd1498Szrj if (@3 != @0 228038fd1498Szrj && TYPE_UNSIGNED (TREE_TYPE (@0))) 228138fd1498Szrj { 228238fd1498Szrj tree inner_type = TREE_TYPE (@0); 228338fd1498Szrj if (type_has_mode_precision_p (inner_type) 228438fd1498Szrj && TYPE_PRECISION (inner_type) < prec) 228538fd1498Szrj { 228638fd1498Szrj prec = TYPE_PRECISION (inner_type); 228738fd1498Szrj /* See if we can shorten the right shift. */ 228838fd1498Szrj if (shiftc < prec) 228938fd1498Szrj shift_type = inner_type; 229038fd1498Szrj /* Otherwise X >> C1 is all zeros, so we'll optimize 229138fd1498Szrj it into (X, 0) later on by making sure zerobits 229238fd1498Szrj is all ones. */ 229338fd1498Szrj } 229438fd1498Szrj } 229538fd1498Szrj zerobits = HOST_WIDE_INT_M1U; 229638fd1498Szrj if (shiftc < prec) 229738fd1498Szrj { 229838fd1498Szrj zerobits >>= HOST_BITS_PER_WIDE_INT - shiftc; 229938fd1498Szrj zerobits <<= prec - shiftc; 230038fd1498Szrj } 230138fd1498Szrj /* For arithmetic shift if sign bit could be set, zerobits 230238fd1498Szrj can contain actually sign bits, so no transformation is 230338fd1498Szrj possible, unless MASK masks them all away. In that 230438fd1498Szrj case the shift needs to be converted into logical shift. */ 230538fd1498Szrj if (!TYPE_UNSIGNED (TREE_TYPE (@3)) 230638fd1498Szrj && prec == TYPE_PRECISION (TREE_TYPE (@3))) 230738fd1498Szrj { 230838fd1498Szrj if ((mask & zerobits) == 0) 230938fd1498Szrj shift_type = unsigned_type_for (TREE_TYPE (@3)); 231038fd1498Szrj else 231138fd1498Szrj zerobits = 0; 231238fd1498Szrj } 231338fd1498Szrj } 231438fd1498Szrj } 231538fd1498Szrj /* ((X << 16) & 0xff00) is (X, 0). */ 231638fd1498Szrj (if ((mask & zerobits) == mask) 231738fd1498Szrj { build_int_cst (type, 0); } 231838fd1498Szrj (with { newmask = mask | zerobits; } 231938fd1498Szrj (if (newmask != mask && (newmask & (newmask + 1)) == 0) 232038fd1498Szrj (with 232138fd1498Szrj { 232238fd1498Szrj /* Only do the transformation if NEWMASK is some integer 232338fd1498Szrj mode's mask. */ 232438fd1498Szrj for (prec = BITS_PER_UNIT; 232538fd1498Szrj prec < HOST_BITS_PER_WIDE_INT; prec <<= 1) 232638fd1498Szrj if (newmask == (HOST_WIDE_INT_1U << prec) - 1) 232738fd1498Szrj break; 232838fd1498Szrj } 232938fd1498Szrj (if (prec < HOST_BITS_PER_WIDE_INT 233038fd1498Szrj || newmask == HOST_WIDE_INT_M1U) 233138fd1498Szrj (with 233238fd1498Szrj { tree newmaskt = build_int_cst_type (TREE_TYPE (@2), newmask); } 233338fd1498Szrj (if (!tree_int_cst_equal (newmaskt, @2)) 233438fd1498Szrj (if (shift_type != TREE_TYPE (@3)) 233538fd1498Szrj (bit_and (convert (shift:shift_type (convert @3) @1)) { newmaskt; }) 233638fd1498Szrj (bit_and @4 { newmaskt; }))))))))))))) 233738fd1498Szrj 233838fd1498Szrj/* Fold (X {&,^,|} C2) << C1 into (X << C1) {&,^,|} (C2 << C1) 233938fd1498Szrj (X {&,^,|} C2) >> C1 into (X >> C1) & (C2 >> C1). */ 234038fd1498Szrj(for shift (lshift rshift) 234138fd1498Szrj (for bit_op (bit_and bit_xor bit_ior) 234238fd1498Szrj (simplify 234338fd1498Szrj (shift (convert?:s (bit_op:s @0 INTEGER_CST@2)) INTEGER_CST@1) 234438fd1498Szrj (if (tree_nop_conversion_p (type, TREE_TYPE (@0))) 234538fd1498Szrj (with { tree mask = int_const_binop (shift, fold_convert (type, @2), @1); } 234638fd1498Szrj (bit_op (shift (convert @0) @1) { mask; })))))) 234738fd1498Szrj 234838fd1498Szrj/* ~(~X >> Y) -> X >> Y (for arithmetic shift). */ 234938fd1498Szrj(simplify 235038fd1498Szrj (bit_not (convert1?:s (rshift:s (convert2?@0 (bit_not @1)) @2))) 235138fd1498Szrj (if (!TYPE_UNSIGNED (TREE_TYPE (@0)) 235238fd1498Szrj && (element_precision (TREE_TYPE (@0)) 235338fd1498Szrj <= element_precision (TREE_TYPE (@1)) 235438fd1498Szrj || !TYPE_UNSIGNED (TREE_TYPE (@1)))) 235538fd1498Szrj (with 235638fd1498Szrj { tree shift_type = TREE_TYPE (@0); } 235738fd1498Szrj (convert (rshift (convert:shift_type @1) @2))))) 235838fd1498Szrj 235938fd1498Szrj/* ~(~X >>r Y) -> X >>r Y 236038fd1498Szrj ~(~X <<r Y) -> X <<r Y */ 236138fd1498Szrj(for rotate (lrotate rrotate) 236238fd1498Szrj (simplify 236338fd1498Szrj (bit_not (convert1?:s (rotate:s (convert2?@0 (bit_not @1)) @2))) 236438fd1498Szrj (if ((element_precision (TREE_TYPE (@0)) 236538fd1498Szrj <= element_precision (TREE_TYPE (@1)) 236638fd1498Szrj || !TYPE_UNSIGNED (TREE_TYPE (@1))) 236738fd1498Szrj && (element_precision (type) <= element_precision (TREE_TYPE (@0)) 236838fd1498Szrj || !TYPE_UNSIGNED (TREE_TYPE (@0)))) 236938fd1498Szrj (with 237038fd1498Szrj { tree rotate_type = TREE_TYPE (@0); } 237138fd1498Szrj (convert (rotate (convert:rotate_type @1) @2)))))) 237238fd1498Szrj 237338fd1498Szrj/* Simplifications of conversions. */ 237438fd1498Szrj 237538fd1498Szrj/* Basic strip-useless-type-conversions / strip_nops. */ 237638fd1498Szrj(for cvt (convert view_convert float fix_trunc) 237738fd1498Szrj (simplify 237838fd1498Szrj (cvt @0) 237938fd1498Szrj (if ((GIMPLE && useless_type_conversion_p (type, TREE_TYPE (@0))) 238038fd1498Szrj || (GENERIC && type == TREE_TYPE (@0))) 238138fd1498Szrj @0))) 238238fd1498Szrj 238338fd1498Szrj/* Contract view-conversions. */ 238438fd1498Szrj(simplify 238538fd1498Szrj (view_convert (view_convert @0)) 238638fd1498Szrj (view_convert @0)) 238738fd1498Szrj 238838fd1498Szrj/* For integral conversions with the same precision or pointer 238938fd1498Szrj conversions use a NOP_EXPR instead. */ 239038fd1498Szrj(simplify 239138fd1498Szrj (view_convert @0) 239238fd1498Szrj (if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type)) 239338fd1498Szrj && (INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0))) 239438fd1498Szrj && TYPE_PRECISION (type) == TYPE_PRECISION (TREE_TYPE (@0))) 239538fd1498Szrj (convert @0))) 239638fd1498Szrj 239738fd1498Szrj/* Strip inner integral conversions that do not change precision or size, or 239838fd1498Szrj zero-extend while keeping the same size (for bool-to-char). */ 239938fd1498Szrj(simplify 240038fd1498Szrj (view_convert (convert@0 @1)) 240138fd1498Szrj (if ((INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0))) 240238fd1498Szrj && (INTEGRAL_TYPE_P (TREE_TYPE (@1)) || POINTER_TYPE_P (TREE_TYPE (@1))) 240338fd1498Szrj && TYPE_SIZE (TREE_TYPE (@0)) == TYPE_SIZE (TREE_TYPE (@1)) 240438fd1498Szrj && (TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1)) 240538fd1498Szrj || (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (TREE_TYPE (@1)) 240638fd1498Szrj && TYPE_UNSIGNED (TREE_TYPE (@1))))) 240738fd1498Szrj (view_convert @1))) 240838fd1498Szrj 240938fd1498Szrj/* Re-association barriers around constants and other re-association 241038fd1498Szrj barriers can be removed. */ 241138fd1498Szrj(simplify 241238fd1498Szrj (paren CONSTANT_CLASS_P@0) 241338fd1498Szrj @0) 241438fd1498Szrj(simplify 241538fd1498Szrj (paren (paren@1 @0)) 241638fd1498Szrj @1) 241738fd1498Szrj 241838fd1498Szrj/* Handle cases of two conversions in a row. */ 241938fd1498Szrj(for ocvt (convert float fix_trunc) 242038fd1498Szrj (for icvt (convert float) 242138fd1498Szrj (simplify 242238fd1498Szrj (ocvt (icvt@1 @0)) 242338fd1498Szrj (with 242438fd1498Szrj { 242538fd1498Szrj tree inside_type = TREE_TYPE (@0); 242638fd1498Szrj tree inter_type = TREE_TYPE (@1); 242738fd1498Szrj int inside_int = INTEGRAL_TYPE_P (inside_type); 242838fd1498Szrj int inside_ptr = POINTER_TYPE_P (inside_type); 242938fd1498Szrj int inside_float = FLOAT_TYPE_P (inside_type); 243038fd1498Szrj int inside_vec = VECTOR_TYPE_P (inside_type); 243138fd1498Szrj unsigned int inside_prec = TYPE_PRECISION (inside_type); 243238fd1498Szrj int inside_unsignedp = TYPE_UNSIGNED (inside_type); 243338fd1498Szrj int inter_int = INTEGRAL_TYPE_P (inter_type); 243438fd1498Szrj int inter_ptr = POINTER_TYPE_P (inter_type); 243538fd1498Szrj int inter_float = FLOAT_TYPE_P (inter_type); 243638fd1498Szrj int inter_vec = VECTOR_TYPE_P (inter_type); 243738fd1498Szrj unsigned int inter_prec = TYPE_PRECISION (inter_type); 243838fd1498Szrj int inter_unsignedp = TYPE_UNSIGNED (inter_type); 243938fd1498Szrj int final_int = INTEGRAL_TYPE_P (type); 244038fd1498Szrj int final_ptr = POINTER_TYPE_P (type); 244138fd1498Szrj int final_float = FLOAT_TYPE_P (type); 244238fd1498Szrj int final_vec = VECTOR_TYPE_P (type); 244338fd1498Szrj unsigned int final_prec = TYPE_PRECISION (type); 244438fd1498Szrj int final_unsignedp = TYPE_UNSIGNED (type); 244538fd1498Szrj } 244638fd1498Szrj (switch 244738fd1498Szrj /* In addition to the cases of two conversions in a row 244838fd1498Szrj handled below, if we are converting something to its own 244938fd1498Szrj type via an object of identical or wider precision, neither 245038fd1498Szrj conversion is needed. */ 245138fd1498Szrj (if (((GIMPLE && useless_type_conversion_p (type, inside_type)) 245238fd1498Szrj || (GENERIC 245338fd1498Szrj && TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (inside_type))) 245438fd1498Szrj && (((inter_int || inter_ptr) && final_int) 245538fd1498Szrj || (inter_float && final_float)) 245638fd1498Szrj && inter_prec >= final_prec) 245738fd1498Szrj (ocvt @0)) 245838fd1498Szrj 245938fd1498Szrj /* Likewise, if the intermediate and initial types are either both 246038fd1498Szrj float or both integer, we don't need the middle conversion if the 246138fd1498Szrj former is wider than the latter and doesn't change the signedness 246238fd1498Szrj (for integers). Avoid this if the final type is a pointer since 246338fd1498Szrj then we sometimes need the middle conversion. */ 246438fd1498Szrj (if (((inter_int && inside_int) || (inter_float && inside_float)) 246538fd1498Szrj && (final_int || final_float) 246638fd1498Szrj && inter_prec >= inside_prec 246738fd1498Szrj && (inter_float || inter_unsignedp == inside_unsignedp)) 246838fd1498Szrj (ocvt @0)) 246938fd1498Szrj 247038fd1498Szrj /* If we have a sign-extension of a zero-extended value, we can 247138fd1498Szrj replace that by a single zero-extension. Likewise if the 247238fd1498Szrj final conversion does not change precision we can drop the 247338fd1498Szrj intermediate conversion. */ 247438fd1498Szrj (if (inside_int && inter_int && final_int 247538fd1498Szrj && ((inside_prec < inter_prec && inter_prec < final_prec 247638fd1498Szrj && inside_unsignedp && !inter_unsignedp) 247738fd1498Szrj || final_prec == inter_prec)) 247838fd1498Szrj (ocvt @0)) 247938fd1498Szrj 248038fd1498Szrj /* Two conversions in a row are not needed unless: 248138fd1498Szrj - some conversion is floating-point (overstrict for now), or 248238fd1498Szrj - some conversion is a vector (overstrict for now), or 248338fd1498Szrj - the intermediate type is narrower than both initial and 248438fd1498Szrj final, or 248538fd1498Szrj - the intermediate type and innermost type differ in signedness, 248638fd1498Szrj and the outermost type is wider than the intermediate, or 248738fd1498Szrj - the initial type is a pointer type and the precisions of the 248838fd1498Szrj intermediate and final types differ, or 248938fd1498Szrj - the final type is a pointer type and the precisions of the 249038fd1498Szrj initial and intermediate types differ. */ 249138fd1498Szrj (if (! inside_float && ! inter_float && ! final_float 249238fd1498Szrj && ! inside_vec && ! inter_vec && ! final_vec 249338fd1498Szrj && (inter_prec >= inside_prec || inter_prec >= final_prec) 249438fd1498Szrj && ! (inside_int && inter_int 249538fd1498Szrj && inter_unsignedp != inside_unsignedp 249638fd1498Szrj && inter_prec < final_prec) 249738fd1498Szrj && ((inter_unsignedp && inter_prec > inside_prec) 249838fd1498Szrj == (final_unsignedp && final_prec > inter_prec)) 249938fd1498Szrj && ! (inside_ptr && inter_prec != final_prec) 250038fd1498Szrj && ! (final_ptr && inside_prec != inter_prec)) 250138fd1498Szrj (ocvt @0)) 250238fd1498Szrj 250338fd1498Szrj /* A truncation to an unsigned type (a zero-extension) should be 250438fd1498Szrj canonicalized as bitwise and of a mask. */ 250538fd1498Szrj (if (GIMPLE /* PR70366: doing this in GENERIC breaks -Wconversion. */ 250638fd1498Szrj && final_int && inter_int && inside_int 250738fd1498Szrj && final_prec == inside_prec 250838fd1498Szrj && final_prec > inter_prec 250938fd1498Szrj && inter_unsignedp) 251038fd1498Szrj (convert (bit_and @0 { wide_int_to_tree 251138fd1498Szrj (inside_type, 251238fd1498Szrj wi::mask (inter_prec, false, 251338fd1498Szrj TYPE_PRECISION (inside_type))); }))) 251438fd1498Szrj 251538fd1498Szrj /* If we are converting an integer to a floating-point that can 251638fd1498Szrj represent it exactly and back to an integer, we can skip the 251738fd1498Szrj floating-point conversion. */ 251838fd1498Szrj (if (GIMPLE /* PR66211 */ 251938fd1498Szrj && inside_int && inter_float && final_int && 252038fd1498Szrj (unsigned) significand_size (TYPE_MODE (inter_type)) 252138fd1498Szrj >= inside_prec - !inside_unsignedp) 252238fd1498Szrj (convert @0))))))) 252338fd1498Szrj 252438fd1498Szrj/* If we have a narrowing conversion to an integral type that is fed by a 252538fd1498Szrj BIT_AND_EXPR, we might be able to remove the BIT_AND_EXPR if it merely 252638fd1498Szrj masks off bits outside the final type (and nothing else). */ 252738fd1498Szrj(simplify 252838fd1498Szrj (convert (bit_and @0 INTEGER_CST@1)) 252938fd1498Szrj (if (INTEGRAL_TYPE_P (type) 253038fd1498Szrj && INTEGRAL_TYPE_P (TREE_TYPE (@0)) 253138fd1498Szrj && TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@0)) 253238fd1498Szrj && operand_equal_p (@1, build_low_bits_mask (TREE_TYPE (@1), 253338fd1498Szrj TYPE_PRECISION (type)), 0)) 253438fd1498Szrj (convert @0))) 253538fd1498Szrj 253638fd1498Szrj 253738fd1498Szrj/* (X /[ex] A) * A -> X. */ 253838fd1498Szrj(simplify 253938fd1498Szrj (mult (convert1? (exact_div @0 @@1)) (convert2? @1)) 254038fd1498Szrj (convert @0)) 254138fd1498Szrj 254238fd1498Szrj/* Canonicalization of binary operations. */ 254338fd1498Szrj 254438fd1498Szrj/* Convert X + -C into X - C. */ 254538fd1498Szrj(simplify 254638fd1498Szrj (plus @0 REAL_CST@1) 254738fd1498Szrj (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1))) 254838fd1498Szrj (with { tree tem = const_unop (NEGATE_EXPR, type, @1); } 254938fd1498Szrj (if (!TREE_OVERFLOW (tem) || !flag_trapping_math) 255038fd1498Szrj (minus @0 { tem; }))))) 255138fd1498Szrj 255238fd1498Szrj/* Convert x+x into x*2. */ 255338fd1498Szrj(simplify 255438fd1498Szrj (plus @0 @0) 255538fd1498Szrj (if (SCALAR_FLOAT_TYPE_P (type)) 255638fd1498Szrj (mult @0 { build_real (type, dconst2); }) 255738fd1498Szrj (if (INTEGRAL_TYPE_P (type)) 255838fd1498Szrj (mult @0 { build_int_cst (type, 2); })))) 255938fd1498Szrj 256038fd1498Szrj/* 0 - X -> -X. */ 256138fd1498Szrj(simplify 256238fd1498Szrj (minus integer_zerop @1) 256338fd1498Szrj (negate @1)) 256438fd1498Szrj(simplify 256538fd1498Szrj (pointer_diff integer_zerop @1) 256638fd1498Szrj (negate (convert @1))) 256738fd1498Szrj 256838fd1498Szrj/* (ARG0 - ARG1) is the same as (-ARG1 + ARG0). So check whether 256938fd1498Szrj ARG0 is zero and X + ARG0 reduces to X, since that would mean 257038fd1498Szrj (-ARG1 + ARG0) reduces to -ARG1. */ 257138fd1498Szrj(simplify 257238fd1498Szrj (minus real_zerop@0 @1) 257338fd1498Szrj (if (fold_real_zero_addition_p (type, @0, 0)) 257438fd1498Szrj (negate @1))) 257538fd1498Szrj 257638fd1498Szrj/* Transform x * -1 into -x. */ 257738fd1498Szrj(simplify 257838fd1498Szrj (mult @0 integer_minus_onep) 257938fd1498Szrj (negate @0)) 258038fd1498Szrj 258138fd1498Szrj/* Reassociate (X * CST) * Y to (X * Y) * CST. This does not introduce 258238fd1498Szrj signed overflow for CST != 0 && CST != -1. */ 258338fd1498Szrj(simplify 258438fd1498Szrj (mult:c (mult:s@3 @0 INTEGER_CST@1) @2) 258538fd1498Szrj (if (TREE_CODE (@2) != INTEGER_CST 258638fd1498Szrj && single_use (@3) 258738fd1498Szrj && !integer_zerop (@1) && !integer_minus_onep (@1)) 258838fd1498Szrj (mult (mult @0 @2) @1))) 258938fd1498Szrj 259038fd1498Szrj/* True if we can easily extract the real and imaginary parts of a complex 259138fd1498Szrj number. */ 259238fd1498Szrj(match compositional_complex 259338fd1498Szrj (convert? (complex @0 @1))) 259438fd1498Szrj 259538fd1498Szrj/* COMPLEX_EXPR and REALPART/IMAGPART_EXPR cancellations. */ 259638fd1498Szrj(simplify 259738fd1498Szrj (complex (realpart @0) (imagpart @0)) 259838fd1498Szrj @0) 259938fd1498Szrj(simplify 260038fd1498Szrj (realpart (complex @0 @1)) 260138fd1498Szrj @0) 260238fd1498Szrj(simplify 260338fd1498Szrj (imagpart (complex @0 @1)) 260438fd1498Szrj @1) 260538fd1498Szrj 260638fd1498Szrj/* Sometimes we only care about half of a complex expression. */ 260738fd1498Szrj(simplify 260838fd1498Szrj (realpart (convert?:s (conj:s @0))) 260938fd1498Szrj (convert (realpart @0))) 261038fd1498Szrj(simplify 261138fd1498Szrj (imagpart (convert?:s (conj:s @0))) 261238fd1498Szrj (convert (negate (imagpart @0)))) 261338fd1498Szrj(for part (realpart imagpart) 261438fd1498Szrj (for op (plus minus) 261538fd1498Szrj (simplify 261638fd1498Szrj (part (convert?:s@2 (op:s @0 @1))) 261738fd1498Szrj (convert (op (part @0) (part @1)))))) 261838fd1498Szrj(simplify 261938fd1498Szrj (realpart (convert?:s (CEXPI:s @0))) 262038fd1498Szrj (convert (COS @0))) 262138fd1498Szrj(simplify 262238fd1498Szrj (imagpart (convert?:s (CEXPI:s @0))) 262338fd1498Szrj (convert (SIN @0))) 262438fd1498Szrj 262538fd1498Szrj/* conj(conj(x)) -> x */ 262638fd1498Szrj(simplify 262738fd1498Szrj (conj (convert? (conj @0))) 262838fd1498Szrj (if (tree_nop_conversion_p (TREE_TYPE (@0), type)) 262938fd1498Szrj (convert @0))) 263038fd1498Szrj 263138fd1498Szrj/* conj({x,y}) -> {x,-y} */ 263238fd1498Szrj(simplify 263338fd1498Szrj (conj (convert?:s (complex:s @0 @1))) 263438fd1498Szrj (with { tree itype = TREE_TYPE (type); } 263538fd1498Szrj (complex (convert:itype @0) (negate (convert:itype @1))))) 263638fd1498Szrj 263738fd1498Szrj/* BSWAP simplifications, transforms checked by gcc.dg/builtin-bswap-8.c. */ 263838fd1498Szrj(for bswap (BUILT_IN_BSWAP16 BUILT_IN_BSWAP32 BUILT_IN_BSWAP64) 263938fd1498Szrj (simplify 264038fd1498Szrj (bswap (bswap @0)) 264138fd1498Szrj @0) 264238fd1498Szrj (simplify 264338fd1498Szrj (bswap (bit_not (bswap @0))) 264438fd1498Szrj (bit_not @0)) 264538fd1498Szrj (for bitop (bit_xor bit_ior bit_and) 264638fd1498Szrj (simplify 264738fd1498Szrj (bswap (bitop:c (bswap @0) @1)) 264838fd1498Szrj (bitop @0 (bswap @1))))) 264938fd1498Szrj 265038fd1498Szrj 265138fd1498Szrj/* Combine COND_EXPRs and VEC_COND_EXPRs. */ 265238fd1498Szrj 265338fd1498Szrj/* Simplify constant conditions. 265438fd1498Szrj Only optimize constant conditions when the selected branch 265538fd1498Szrj has the same type as the COND_EXPR. This avoids optimizing 265638fd1498Szrj away "c ? x : throw", where the throw has a void type. 265738fd1498Szrj Note that we cannot throw away the fold-const.c variant nor 265838fd1498Szrj this one as we depend on doing this transform before possibly 265938fd1498Szrj A ? B : B -> B triggers and the fold-const.c one can optimize 266038fd1498Szrj 0 ? A : B to B even if A has side-effects. Something 266138fd1498Szrj genmatch cannot handle. */ 266238fd1498Szrj(simplify 266338fd1498Szrj (cond INTEGER_CST@0 @1 @2) 266438fd1498Szrj (if (integer_zerop (@0)) 266538fd1498Szrj (if (!VOID_TYPE_P (TREE_TYPE (@2)) || VOID_TYPE_P (type)) 266638fd1498Szrj @2) 266738fd1498Szrj (if (!VOID_TYPE_P (TREE_TYPE (@1)) || VOID_TYPE_P (type)) 266838fd1498Szrj @1))) 266938fd1498Szrj(simplify 267038fd1498Szrj (vec_cond VECTOR_CST@0 @1 @2) 267138fd1498Szrj (if (integer_all_onesp (@0)) 267238fd1498Szrj @1 267338fd1498Szrj (if (integer_zerop (@0)) 267438fd1498Szrj @2))) 267538fd1498Szrj 267638fd1498Szrj/* Simplification moved from fold_cond_expr_with_comparison. It may also 267738fd1498Szrj be extended. */ 267838fd1498Szrj/* This pattern implements two kinds simplification: 267938fd1498Szrj 268038fd1498Szrj Case 1) 268138fd1498Szrj (cond (cmp (convert1? x) c1) (convert2? x) c2) -> (minmax (x c)) if: 268238fd1498Szrj 1) Conversions are type widening from smaller type. 268338fd1498Szrj 2) Const c1 equals to c2 after canonicalizing comparison. 268438fd1498Szrj 3) Comparison has tree code LT, LE, GT or GE. 268538fd1498Szrj This specific pattern is needed when (cmp (convert x) c) may not 268638fd1498Szrj be simplified by comparison patterns because of multiple uses of 268738fd1498Szrj x. It also makes sense here because simplifying across multiple 268838fd1498Szrj referred var is always benefitial for complicated cases. 268938fd1498Szrj 269038fd1498Szrj Case 2) 269138fd1498Szrj (cond (eq (convert1? x) c1) (convert2? x) c2) -> (cond (eq x c1) c1 c2). */ 269238fd1498Szrj(for cmp (lt le gt ge eq) 269338fd1498Szrj (simplify 269438fd1498Szrj (cond (cmp (convert1? @1) INTEGER_CST@3) (convert2? @1) INTEGER_CST@2) 269538fd1498Szrj (with 269638fd1498Szrj { 269738fd1498Szrj tree from_type = TREE_TYPE (@1); 269838fd1498Szrj tree c1_type = TREE_TYPE (@3), c2_type = TREE_TYPE (@2); 269938fd1498Szrj enum tree_code code = ERROR_MARK; 270038fd1498Szrj 270138fd1498Szrj if (INTEGRAL_TYPE_P (from_type) 270238fd1498Szrj && int_fits_type_p (@2, from_type) 270338fd1498Szrj && (types_match (c1_type, from_type) 270438fd1498Szrj || (TYPE_PRECISION (c1_type) > TYPE_PRECISION (from_type) 270538fd1498Szrj && (TYPE_UNSIGNED (from_type) 270638fd1498Szrj || TYPE_SIGN (c1_type) == TYPE_SIGN (from_type)))) 270738fd1498Szrj && (types_match (c2_type, from_type) 270838fd1498Szrj || (TYPE_PRECISION (c2_type) > TYPE_PRECISION (from_type) 270938fd1498Szrj && (TYPE_UNSIGNED (from_type) 271038fd1498Szrj || TYPE_SIGN (c2_type) == TYPE_SIGN (from_type))))) 271138fd1498Szrj { 271238fd1498Szrj if (cmp != EQ_EXPR) 271338fd1498Szrj { 271438fd1498Szrj if (wi::to_widest (@3) == (wi::to_widest (@2) - 1)) 271538fd1498Szrj { 271638fd1498Szrj /* X <= Y - 1 equals to X < Y. */ 271738fd1498Szrj if (cmp == LE_EXPR) 271838fd1498Szrj code = LT_EXPR; 271938fd1498Szrj /* X > Y - 1 equals to X >= Y. */ 272038fd1498Szrj if (cmp == GT_EXPR) 272138fd1498Szrj code = GE_EXPR; 272238fd1498Szrj } 272338fd1498Szrj if (wi::to_widest (@3) == (wi::to_widest (@2) + 1)) 272438fd1498Szrj { 272538fd1498Szrj /* X < Y + 1 equals to X <= Y. */ 272638fd1498Szrj if (cmp == LT_EXPR) 272738fd1498Szrj code = LE_EXPR; 272838fd1498Szrj /* X >= Y + 1 equals to X > Y. */ 272938fd1498Szrj if (cmp == GE_EXPR) 273038fd1498Szrj code = GT_EXPR; 273138fd1498Szrj } 273238fd1498Szrj if (code != ERROR_MARK 273338fd1498Szrj || wi::to_widest (@2) == wi::to_widest (@3)) 273438fd1498Szrj { 273538fd1498Szrj if (cmp == LT_EXPR || cmp == LE_EXPR) 273638fd1498Szrj code = MIN_EXPR; 273738fd1498Szrj if (cmp == GT_EXPR || cmp == GE_EXPR) 273838fd1498Szrj code = MAX_EXPR; 273938fd1498Szrj } 274038fd1498Szrj } 274138fd1498Szrj /* Can do A == C1 ? A : C2 -> A == C1 ? C1 : C2? */ 274238fd1498Szrj else if (int_fits_type_p (@3, from_type)) 274338fd1498Szrj code = EQ_EXPR; 274438fd1498Szrj } 274538fd1498Szrj } 274638fd1498Szrj (if (code == MAX_EXPR) 274738fd1498Szrj (convert (max @1 (convert @2))) 274838fd1498Szrj (if (code == MIN_EXPR) 274938fd1498Szrj (convert (min @1 (convert @2))) 275038fd1498Szrj (if (code == EQ_EXPR) 275138fd1498Szrj (convert (cond (eq @1 (convert @3)) 275238fd1498Szrj (convert:from_type @3) (convert:from_type @2))))))))) 275338fd1498Szrj 275438fd1498Szrj/* (cond (cmp (convert? x) c1) (op x c2) c3) -> (op (minmax x c1) c2) if: 275538fd1498Szrj 275638fd1498Szrj 1) OP is PLUS or MINUS. 275738fd1498Szrj 2) CMP is LT, LE, GT or GE. 275838fd1498Szrj 3) C3 == (C1 op C2), and computation doesn't have undefined behavior. 275938fd1498Szrj 276038fd1498Szrj This pattern also handles special cases like: 276138fd1498Szrj 276238fd1498Szrj A) Operand x is a unsigned to signed type conversion and c1 is 276338fd1498Szrj integer zero. In this case, 276438fd1498Szrj (signed type)x < 0 <=> x > MAX_VAL(signed type) 276538fd1498Szrj (signed type)x >= 0 <=> x <= MAX_VAL(signed type) 276638fd1498Szrj B) Const c1 may not equal to (C3 op' C2). In this case we also 276738fd1498Szrj check equality for (c1+1) and (c1-1) by adjusting comparison 276838fd1498Szrj code. 276938fd1498Szrj 277038fd1498Szrj TODO: Though signed type is handled by this pattern, it cannot be 277138fd1498Szrj simplified at the moment because C standard requires additional 277238fd1498Szrj type promotion. In order to match&simplify it here, the IR needs 277338fd1498Szrj to be cleaned up by other optimizers, i.e, VRP. */ 277438fd1498Szrj(for op (plus minus) 277538fd1498Szrj (for cmp (lt le gt ge) 277638fd1498Szrj (simplify 277738fd1498Szrj (cond (cmp (convert? @X) INTEGER_CST@1) (op @X INTEGER_CST@2) INTEGER_CST@3) 277838fd1498Szrj (with { tree from_type = TREE_TYPE (@X), to_type = TREE_TYPE (@1); } 277938fd1498Szrj (if (types_match (from_type, to_type) 278038fd1498Szrj /* Check if it is special case A). */ 278138fd1498Szrj || (TYPE_UNSIGNED (from_type) 278238fd1498Szrj && !TYPE_UNSIGNED (to_type) 278338fd1498Szrj && TYPE_PRECISION (from_type) == TYPE_PRECISION (to_type) 278438fd1498Szrj && integer_zerop (@1) 278538fd1498Szrj && (cmp == LT_EXPR || cmp == GE_EXPR))) 278638fd1498Szrj (with 278738fd1498Szrj { 278838fd1498Szrj bool overflow = false; 278938fd1498Szrj enum tree_code code, cmp_code = cmp; 279038fd1498Szrj wide_int real_c1; 279138fd1498Szrj wide_int c1 = wi::to_wide (@1); 279238fd1498Szrj wide_int c2 = wi::to_wide (@2); 279338fd1498Szrj wide_int c3 = wi::to_wide (@3); 279438fd1498Szrj signop sgn = TYPE_SIGN (from_type); 279538fd1498Szrj 279638fd1498Szrj /* Handle special case A), given x of unsigned type: 279738fd1498Szrj ((signed type)x < 0) <=> (x > MAX_VAL(signed type)) 279838fd1498Szrj ((signed type)x >= 0) <=> (x <= MAX_VAL(signed type)) */ 279938fd1498Szrj if (!types_match (from_type, to_type)) 280038fd1498Szrj { 280138fd1498Szrj if (cmp_code == LT_EXPR) 280238fd1498Szrj cmp_code = GT_EXPR; 280338fd1498Szrj if (cmp_code == GE_EXPR) 280438fd1498Szrj cmp_code = LE_EXPR; 280538fd1498Szrj c1 = wi::max_value (to_type); 280638fd1498Szrj } 280738fd1498Szrj /* To simplify this pattern, we require c3 = (c1 op c2). Here we 280838fd1498Szrj compute (c3 op' c2) and check if it equals to c1 with op' being 280938fd1498Szrj the inverted operator of op. Make sure overflow doesn't happen 281038fd1498Szrj if it is undefined. */ 281138fd1498Szrj if (op == PLUS_EXPR) 281238fd1498Szrj real_c1 = wi::sub (c3, c2, sgn, &overflow); 281338fd1498Szrj else 281438fd1498Szrj real_c1 = wi::add (c3, c2, sgn, &overflow); 281538fd1498Szrj 281638fd1498Szrj code = cmp_code; 281738fd1498Szrj if (!overflow || !TYPE_OVERFLOW_UNDEFINED (from_type)) 281838fd1498Szrj { 281938fd1498Szrj /* Check if c1 equals to real_c1. Boundary condition is handled 282038fd1498Szrj by adjusting comparison operation if necessary. */ 282138fd1498Szrj if (!wi::cmp (wi::sub (real_c1, 1, sgn, &overflow), c1, sgn) 282238fd1498Szrj && !overflow) 282338fd1498Szrj { 282438fd1498Szrj /* X <= Y - 1 equals to X < Y. */ 282538fd1498Szrj if (cmp_code == LE_EXPR) 282638fd1498Szrj code = LT_EXPR; 282738fd1498Szrj /* X > Y - 1 equals to X >= Y. */ 282838fd1498Szrj if (cmp_code == GT_EXPR) 282938fd1498Szrj code = GE_EXPR; 283038fd1498Szrj } 283138fd1498Szrj if (!wi::cmp (wi::add (real_c1, 1, sgn, &overflow), c1, sgn) 283238fd1498Szrj && !overflow) 283338fd1498Szrj { 283438fd1498Szrj /* X < Y + 1 equals to X <= Y. */ 283538fd1498Szrj if (cmp_code == LT_EXPR) 283638fd1498Szrj code = LE_EXPR; 283738fd1498Szrj /* X >= Y + 1 equals to X > Y. */ 283838fd1498Szrj if (cmp_code == GE_EXPR) 283938fd1498Szrj code = GT_EXPR; 284038fd1498Szrj } 284138fd1498Szrj if (code != cmp_code || !wi::cmp (real_c1, c1, sgn)) 284238fd1498Szrj { 284338fd1498Szrj if (cmp_code == LT_EXPR || cmp_code == LE_EXPR) 284438fd1498Szrj code = MIN_EXPR; 284538fd1498Szrj if (cmp_code == GT_EXPR || cmp_code == GE_EXPR) 284638fd1498Szrj code = MAX_EXPR; 284738fd1498Szrj } 284838fd1498Szrj } 284938fd1498Szrj } 285038fd1498Szrj (if (code == MAX_EXPR) 285138fd1498Szrj (op (max @X { wide_int_to_tree (from_type, real_c1); }) 285238fd1498Szrj { wide_int_to_tree (from_type, c2); }) 285338fd1498Szrj (if (code == MIN_EXPR) 285438fd1498Szrj (op (min @X { wide_int_to_tree (from_type, real_c1); }) 285538fd1498Szrj { wide_int_to_tree (from_type, c2); }))))))))) 285638fd1498Szrj 285738fd1498Szrj(for cnd (cond vec_cond) 285838fd1498Szrj /* A ? B : (A ? X : C) -> A ? B : C. */ 285938fd1498Szrj (simplify 286038fd1498Szrj (cnd @0 (cnd @0 @1 @2) @3) 286138fd1498Szrj (cnd @0 @1 @3)) 286238fd1498Szrj (simplify 286338fd1498Szrj (cnd @0 @1 (cnd @0 @2 @3)) 286438fd1498Szrj (cnd @0 @1 @3)) 286538fd1498Szrj /* A ? B : (!A ? C : X) -> A ? B : C. */ 286638fd1498Szrj /* ??? This matches embedded conditions open-coded because genmatch 286738fd1498Szrj would generate matching code for conditions in separate stmts only. 286838fd1498Szrj The following is still important to merge then and else arm cases 286938fd1498Szrj from if-conversion. */ 287038fd1498Szrj (simplify 287138fd1498Szrj (cnd @0 @1 (cnd @2 @3 @4)) 287238fd1498Szrj (if (COMPARISON_CLASS_P (@0) 287338fd1498Szrj && COMPARISON_CLASS_P (@2) 287438fd1498Szrj && invert_tree_comparison 287538fd1498Szrj (TREE_CODE (@0), HONOR_NANS (TREE_OPERAND (@0, 0))) == TREE_CODE (@2) 287638fd1498Szrj && operand_equal_p (TREE_OPERAND (@0, 0), TREE_OPERAND (@2, 0), 0) 287738fd1498Szrj && operand_equal_p (TREE_OPERAND (@0, 1), TREE_OPERAND (@2, 1), 0)) 287838fd1498Szrj (cnd @0 @1 @3))) 287938fd1498Szrj (simplify 288038fd1498Szrj (cnd @0 (cnd @1 @2 @3) @4) 288138fd1498Szrj (if (COMPARISON_CLASS_P (@0) 288238fd1498Szrj && COMPARISON_CLASS_P (@1) 288338fd1498Szrj && invert_tree_comparison 288438fd1498Szrj (TREE_CODE (@0), HONOR_NANS (TREE_OPERAND (@0, 0))) == TREE_CODE (@1) 288538fd1498Szrj && operand_equal_p (TREE_OPERAND (@0, 0), TREE_OPERAND (@1, 0), 0) 288638fd1498Szrj && operand_equal_p (TREE_OPERAND (@0, 1), TREE_OPERAND (@1, 1), 0)) 288738fd1498Szrj (cnd @0 @3 @4))) 288838fd1498Szrj 288938fd1498Szrj /* A ? B : B -> B. */ 289038fd1498Szrj (simplify 289138fd1498Szrj (cnd @0 @1 @1) 289238fd1498Szrj @1) 289338fd1498Szrj 289438fd1498Szrj /* !A ? B : C -> A ? C : B. */ 289538fd1498Szrj (simplify 289638fd1498Szrj (cnd (logical_inverted_value truth_valued_p@0) @1 @2) 289738fd1498Szrj (cnd @0 @2 @1))) 289838fd1498Szrj 289938fd1498Szrj/* A + (B vcmp C ? 1 : 0) -> A - (B vcmp C ? -1 : 0), since vector comparisons 290038fd1498Szrj return all -1 or all 0 results. */ 290138fd1498Szrj/* ??? We could instead convert all instances of the vec_cond to negate, 290238fd1498Szrj but that isn't necessarily a win on its own. */ 290338fd1498Szrj(simplify 290438fd1498Szrj (plus:c @3 (view_convert? (vec_cond:s @0 integer_each_onep@1 integer_zerop@2))) 290538fd1498Szrj (if (VECTOR_TYPE_P (type) 290638fd1498Szrj && known_eq (TYPE_VECTOR_SUBPARTS (type), 290738fd1498Szrj TYPE_VECTOR_SUBPARTS (TREE_TYPE (@1))) 290838fd1498Szrj && (TYPE_MODE (TREE_TYPE (type)) 290938fd1498Szrj == TYPE_MODE (TREE_TYPE (TREE_TYPE (@1))))) 291038fd1498Szrj (minus @3 (view_convert (vec_cond @0 (negate @1) @2))))) 291138fd1498Szrj 291238fd1498Szrj/* ... likewise A - (B vcmp C ? 1 : 0) -> A + (B vcmp C ? -1 : 0). */ 291338fd1498Szrj(simplify 291438fd1498Szrj (minus @3 (view_convert? (vec_cond:s @0 integer_each_onep@1 integer_zerop@2))) 291538fd1498Szrj (if (VECTOR_TYPE_P (type) 291638fd1498Szrj && known_eq (TYPE_VECTOR_SUBPARTS (type), 291738fd1498Szrj TYPE_VECTOR_SUBPARTS (TREE_TYPE (@1))) 291838fd1498Szrj && (TYPE_MODE (TREE_TYPE (type)) 291938fd1498Szrj == TYPE_MODE (TREE_TYPE (TREE_TYPE (@1))))) 292038fd1498Szrj (plus @3 (view_convert (vec_cond @0 (negate @1) @2))))) 292138fd1498Szrj 292238fd1498Szrj 292338fd1498Szrj/* Simplifications of comparisons. */ 292438fd1498Szrj 292538fd1498Szrj/* See if we can reduce the magnitude of a constant involved in a 292638fd1498Szrj comparison by changing the comparison code. This is a canonicalization 292738fd1498Szrj formerly done by maybe_canonicalize_comparison_1. */ 292838fd1498Szrj(for cmp (le gt) 292938fd1498Szrj acmp (lt ge) 293038fd1498Szrj (simplify 293138fd1498Szrj (cmp @0 INTEGER_CST@1) 293238fd1498Szrj (if (tree_int_cst_sgn (@1) == -1) 293338fd1498Szrj (acmp @0 { wide_int_to_tree (TREE_TYPE (@1), wi::to_wide (@1) + 1); })))) 293438fd1498Szrj(for cmp (ge lt) 293538fd1498Szrj acmp (gt le) 293638fd1498Szrj (simplify 293738fd1498Szrj (cmp @0 INTEGER_CST@1) 293838fd1498Szrj (if (tree_int_cst_sgn (@1) == 1) 293938fd1498Szrj (acmp @0 { wide_int_to_tree (TREE_TYPE (@1), wi::to_wide (@1) - 1); })))) 294038fd1498Szrj 294138fd1498Szrj 294238fd1498Szrj/* We can simplify a logical negation of a comparison to the 294338fd1498Szrj inverted comparison. As we cannot compute an expression 294438fd1498Szrj operator using invert_tree_comparison we have to simulate 294538fd1498Szrj that with expression code iteration. */ 294638fd1498Szrj(for cmp (tcc_comparison) 294738fd1498Szrj icmp (inverted_tcc_comparison) 294838fd1498Szrj ncmp (inverted_tcc_comparison_with_nans) 294938fd1498Szrj /* Ideally we'd like to combine the following two patterns 295038fd1498Szrj and handle some more cases by using 295138fd1498Szrj (logical_inverted_value (cmp @0 @1)) 295238fd1498Szrj here but for that genmatch would need to "inline" that. 295338fd1498Szrj For now implement what forward_propagate_comparison did. */ 295438fd1498Szrj (simplify 295538fd1498Szrj (bit_not (cmp @0 @1)) 295638fd1498Szrj (if (VECTOR_TYPE_P (type) 295738fd1498Szrj || (INTEGRAL_TYPE_P (type) && TYPE_PRECISION (type) == 1)) 295838fd1498Szrj /* Comparison inversion may be impossible for trapping math, 295938fd1498Szrj invert_tree_comparison will tell us. But we can't use 296038fd1498Szrj a computed operator in the replacement tree thus we have 296138fd1498Szrj to play the trick below. */ 296238fd1498Szrj (with { enum tree_code ic = invert_tree_comparison 296338fd1498Szrj (cmp, HONOR_NANS (@0)); } 296438fd1498Szrj (if (ic == icmp) 296538fd1498Szrj (icmp @0 @1) 296638fd1498Szrj (if (ic == ncmp) 296738fd1498Szrj (ncmp @0 @1)))))) 296838fd1498Szrj (simplify 296938fd1498Szrj (bit_xor (cmp @0 @1) integer_truep) 297038fd1498Szrj (with { enum tree_code ic = invert_tree_comparison 297138fd1498Szrj (cmp, HONOR_NANS (@0)); } 297238fd1498Szrj (if (ic == icmp) 297338fd1498Szrj (icmp @0 @1) 297438fd1498Szrj (if (ic == ncmp) 297538fd1498Szrj (ncmp @0 @1)))))) 297638fd1498Szrj 297738fd1498Szrj/* Transform comparisons of the form X - Y CMP 0 to X CMP Y. 297838fd1498Szrj ??? The transformation is valid for the other operators if overflow 297938fd1498Szrj is undefined for the type, but performing it here badly interacts 298038fd1498Szrj with the transformation in fold_cond_expr_with_comparison which 298138fd1498Szrj attempts to synthetize ABS_EXPR. */ 298238fd1498Szrj(for cmp (eq ne) 298338fd1498Szrj (for sub (minus pointer_diff) 298438fd1498Szrj (simplify 298538fd1498Szrj (cmp (sub@2 @0 @1) integer_zerop) 298638fd1498Szrj (if (single_use (@2)) 298738fd1498Szrj (cmp @0 @1))))) 298838fd1498Szrj 298938fd1498Szrj/* Transform comparisons of the form X * C1 CMP 0 to X CMP 0 in the 299038fd1498Szrj signed arithmetic case. That form is created by the compiler 299138fd1498Szrj often enough for folding it to be of value. One example is in 299238fd1498Szrj computing loop trip counts after Operator Strength Reduction. */ 299338fd1498Szrj(for cmp (simple_comparison) 299438fd1498Szrj scmp (swapped_simple_comparison) 299538fd1498Szrj (simplify 299638fd1498Szrj (cmp (mult@3 @0 INTEGER_CST@1) integer_zerop@2) 299738fd1498Szrj /* Handle unfolded multiplication by zero. */ 299838fd1498Szrj (if (integer_zerop (@1)) 299938fd1498Szrj (cmp @1 @2) 300038fd1498Szrj (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) 300138fd1498Szrj && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)) 300238fd1498Szrj && single_use (@3)) 300338fd1498Szrj /* If @1 is negative we swap the sense of the comparison. */ 300438fd1498Szrj (if (tree_int_cst_sgn (@1) < 0) 300538fd1498Szrj (scmp @0 @2) 300638fd1498Szrj (cmp @0 @2)))))) 300738fd1498Szrj 300838fd1498Szrj/* Simplify comparison of something with itself. For IEEE 300938fd1498Szrj floating-point, we can only do some of these simplifications. */ 301038fd1498Szrj(for cmp (eq ge le) 301138fd1498Szrj (simplify 301238fd1498Szrj (cmp @0 @0) 301338fd1498Szrj (if (! FLOAT_TYPE_P (TREE_TYPE (@0)) 301438fd1498Szrj || ! HONOR_NANS (@0)) 301538fd1498Szrj { constant_boolean_node (true, type); } 301638fd1498Szrj (if (cmp != EQ_EXPR) 301738fd1498Szrj (eq @0 @0))))) 301838fd1498Szrj(for cmp (ne gt lt) 301938fd1498Szrj (simplify 302038fd1498Szrj (cmp @0 @0) 302138fd1498Szrj (if (cmp != NE_EXPR 302238fd1498Szrj || ! FLOAT_TYPE_P (TREE_TYPE (@0)) 302338fd1498Szrj || ! HONOR_NANS (@0)) 302438fd1498Szrj { constant_boolean_node (false, type); }))) 302538fd1498Szrj(for cmp (unle unge uneq) 302638fd1498Szrj (simplify 302738fd1498Szrj (cmp @0 @0) 302838fd1498Szrj { constant_boolean_node (true, type); })) 302938fd1498Szrj(for cmp (unlt ungt) 303038fd1498Szrj (simplify 303138fd1498Szrj (cmp @0 @0) 303238fd1498Szrj (unordered @0 @0))) 303338fd1498Szrj(simplify 303438fd1498Szrj (ltgt @0 @0) 303538fd1498Szrj (if (!flag_trapping_math) 303638fd1498Szrj { constant_boolean_node (false, type); })) 303738fd1498Szrj 303838fd1498Szrj/* Fold ~X op ~Y as Y op X. */ 303938fd1498Szrj(for cmp (simple_comparison) 304038fd1498Szrj (simplify 304138fd1498Szrj (cmp (bit_not@2 @0) (bit_not@3 @1)) 304238fd1498Szrj (if (single_use (@2) && single_use (@3)) 304338fd1498Szrj (cmp @1 @0)))) 304438fd1498Szrj 304538fd1498Szrj/* Fold ~X op C as X op' ~C, where op' is the swapped comparison. */ 304638fd1498Szrj(for cmp (simple_comparison) 304738fd1498Szrj scmp (swapped_simple_comparison) 304838fd1498Szrj (simplify 304938fd1498Szrj (cmp (bit_not@2 @0) CONSTANT_CLASS_P@1) 305038fd1498Szrj (if (single_use (@2) 305138fd1498Szrj && (TREE_CODE (@1) == INTEGER_CST || TREE_CODE (@1) == VECTOR_CST)) 305238fd1498Szrj (scmp @0 (bit_not @1))))) 305338fd1498Szrj 305438fd1498Szrj(for cmp (simple_comparison) 305538fd1498Szrj /* Fold (double)float1 CMP (double)float2 into float1 CMP float2. */ 305638fd1498Szrj (simplify 305738fd1498Szrj (cmp (convert@2 @0) (convert? @1)) 305838fd1498Szrj (if (FLOAT_TYPE_P (TREE_TYPE (@0)) 305938fd1498Szrj && (DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@2)) 306038fd1498Szrj == DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@0))) 306138fd1498Szrj && (DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@2)) 306238fd1498Szrj == DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@1)))) 306338fd1498Szrj (with 306438fd1498Szrj { 306538fd1498Szrj tree type1 = TREE_TYPE (@1); 306638fd1498Szrj if (TREE_CODE (@1) == REAL_CST && !DECIMAL_FLOAT_TYPE_P (type1)) 306738fd1498Szrj { 306838fd1498Szrj REAL_VALUE_TYPE orig = TREE_REAL_CST (@1); 306938fd1498Szrj if (TYPE_PRECISION (type1) > TYPE_PRECISION (float_type_node) 307038fd1498Szrj && exact_real_truncate (TYPE_MODE (float_type_node), &orig)) 307138fd1498Szrj type1 = float_type_node; 307238fd1498Szrj if (TYPE_PRECISION (type1) > TYPE_PRECISION (double_type_node) 307338fd1498Szrj && exact_real_truncate (TYPE_MODE (double_type_node), &orig)) 307438fd1498Szrj type1 = double_type_node; 307538fd1498Szrj } 307638fd1498Szrj tree newtype 307738fd1498Szrj = (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (type1) 307838fd1498Szrj ? TREE_TYPE (@0) : type1); 307938fd1498Szrj } 308038fd1498Szrj (if (TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (newtype)) 308138fd1498Szrj (cmp (convert:newtype @0) (convert:newtype @1)))))) 308238fd1498Szrj 308338fd1498Szrj (simplify 308438fd1498Szrj (cmp @0 REAL_CST@1) 308538fd1498Szrj /* IEEE doesn't distinguish +0 and -0 in comparisons. */ 308638fd1498Szrj (switch 308738fd1498Szrj /* a CMP (-0) -> a CMP 0 */ 308838fd1498Szrj (if (REAL_VALUE_MINUS_ZERO (TREE_REAL_CST (@1))) 308938fd1498Szrj (cmp @0 { build_real (TREE_TYPE (@1), dconst0); })) 309038fd1498Szrj /* x != NaN is always true, other ops are always false. */ 309138fd1498Szrj (if (REAL_VALUE_ISNAN (TREE_REAL_CST (@1)) 309238fd1498Szrj && ! HONOR_SNANS (@1)) 309338fd1498Szrj { constant_boolean_node (cmp == NE_EXPR, type); }) 309438fd1498Szrj /* Fold comparisons against infinity. */ 309538fd1498Szrj (if (REAL_VALUE_ISINF (TREE_REAL_CST (@1)) 309638fd1498Szrj && MODE_HAS_INFINITIES (TYPE_MODE (TREE_TYPE (@1)))) 309738fd1498Szrj (with 309838fd1498Szrj { 309938fd1498Szrj REAL_VALUE_TYPE max; 310038fd1498Szrj enum tree_code code = cmp; 310138fd1498Szrj bool neg = REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1)); 310238fd1498Szrj if (neg) 310338fd1498Szrj code = swap_tree_comparison (code); 310438fd1498Szrj } 310538fd1498Szrj (switch 310638fd1498Szrj /* x > +Inf is always false, if we ignore NaNs or exceptions. */ 310738fd1498Szrj (if (code == GT_EXPR 310838fd1498Szrj && !(HONOR_NANS (@0) && flag_trapping_math)) 310938fd1498Szrj { constant_boolean_node (false, type); }) 311038fd1498Szrj (if (code == LE_EXPR) 311138fd1498Szrj /* x <= +Inf is always true, if we don't care about NaNs. */ 311238fd1498Szrj (if (! HONOR_NANS (@0)) 311338fd1498Szrj { constant_boolean_node (true, type); } 311438fd1498Szrj /* x <= +Inf is the same as x == x, i.e. !isnan(x), but this loses 311538fd1498Szrj an "invalid" exception. */ 311638fd1498Szrj (if (!flag_trapping_math) 311738fd1498Szrj (eq @0 @0)))) 311838fd1498Szrj /* x == +Inf and x >= +Inf are always equal to x > DBL_MAX, but 311938fd1498Szrj for == this introduces an exception for x a NaN. */ 312038fd1498Szrj (if ((code == EQ_EXPR && !(HONOR_NANS (@0) && flag_trapping_math)) 312138fd1498Szrj || code == GE_EXPR) 312238fd1498Szrj (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); } 312338fd1498Szrj (if (neg) 312438fd1498Szrj (lt @0 { build_real (TREE_TYPE (@0), max); }) 312538fd1498Szrj (gt @0 { build_real (TREE_TYPE (@0), max); })))) 312638fd1498Szrj /* x < +Inf is always equal to x <= DBL_MAX. */ 312738fd1498Szrj (if (code == LT_EXPR) 312838fd1498Szrj (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); } 312938fd1498Szrj (if (neg) 313038fd1498Szrj (ge @0 { build_real (TREE_TYPE (@0), max); }) 313138fd1498Szrj (le @0 { build_real (TREE_TYPE (@0), max); })))) 313238fd1498Szrj /* x != +Inf is always equal to !(x > DBL_MAX), but this introduces 313338fd1498Szrj an exception for x a NaN so use an unordered comparison. */ 313438fd1498Szrj (if (code == NE_EXPR) 313538fd1498Szrj (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); } 313638fd1498Szrj (if (! HONOR_NANS (@0)) 313738fd1498Szrj (if (neg) 313838fd1498Szrj (ge @0 { build_real (TREE_TYPE (@0), max); }) 313938fd1498Szrj (le @0 { build_real (TREE_TYPE (@0), max); })) 314038fd1498Szrj (if (neg) 314138fd1498Szrj (unge @0 { build_real (TREE_TYPE (@0), max); }) 314238fd1498Szrj (unle @0 { build_real (TREE_TYPE (@0), max); })))))))))) 314338fd1498Szrj 314438fd1498Szrj /* If this is a comparison of a real constant with a PLUS_EXPR 314538fd1498Szrj or a MINUS_EXPR of a real constant, we can convert it into a 314638fd1498Szrj comparison with a revised real constant as long as no overflow 314738fd1498Szrj occurs when unsafe_math_optimizations are enabled. */ 314838fd1498Szrj (if (flag_unsafe_math_optimizations) 314938fd1498Szrj (for op (plus minus) 315038fd1498Szrj (simplify 315138fd1498Szrj (cmp (op @0 REAL_CST@1) REAL_CST@2) 315238fd1498Szrj (with 315338fd1498Szrj { 315438fd1498Szrj tree tem = const_binop (op == PLUS_EXPR ? MINUS_EXPR : PLUS_EXPR, 315538fd1498Szrj TREE_TYPE (@1), @2, @1); 315638fd1498Szrj } 315738fd1498Szrj (if (tem && !TREE_OVERFLOW (tem)) 315838fd1498Szrj (cmp @0 { tem; })))))) 315938fd1498Szrj 316038fd1498Szrj /* Likewise, we can simplify a comparison of a real constant with 316138fd1498Szrj a MINUS_EXPR whose first operand is also a real constant, i.e. 316238fd1498Szrj (c1 - x) < c2 becomes x > c1-c2. Reordering is allowed on 316338fd1498Szrj floating-point types only if -fassociative-math is set. */ 316438fd1498Szrj (if (flag_associative_math) 316538fd1498Szrj (simplify 316638fd1498Szrj (cmp (minus REAL_CST@0 @1) REAL_CST@2) 316738fd1498Szrj (with { tree tem = const_binop (MINUS_EXPR, TREE_TYPE (@1), @0, @2); } 316838fd1498Szrj (if (tem && !TREE_OVERFLOW (tem)) 316938fd1498Szrj (cmp { tem; } @1))))) 317038fd1498Szrj 317138fd1498Szrj /* Fold comparisons against built-in math functions. */ 317238fd1498Szrj (if (flag_unsafe_math_optimizations 317338fd1498Szrj && ! flag_errno_math) 317438fd1498Szrj (for sq (SQRT) 317538fd1498Szrj (simplify 317638fd1498Szrj (cmp (sq @0) REAL_CST@1) 317738fd1498Szrj (switch 317838fd1498Szrj (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1))) 317938fd1498Szrj (switch 318038fd1498Szrj /* sqrt(x) < y is always false, if y is negative. */ 318138fd1498Szrj (if (cmp == EQ_EXPR || cmp == LT_EXPR || cmp == LE_EXPR) 318238fd1498Szrj { constant_boolean_node (false, type); }) 318338fd1498Szrj /* sqrt(x) > y is always true, if y is negative and we 318438fd1498Szrj don't care about NaNs, i.e. negative values of x. */ 318538fd1498Szrj (if (cmp == NE_EXPR || !HONOR_NANS (@0)) 318638fd1498Szrj { constant_boolean_node (true, type); }) 318738fd1498Szrj /* sqrt(x) > y is the same as x >= 0, if y is negative. */ 318838fd1498Szrj (ge @0 { build_real (TREE_TYPE (@0), dconst0); }))) 318938fd1498Szrj (if (real_equal (TREE_REAL_CST_PTR (@1), &dconst0)) 319038fd1498Szrj (switch 319138fd1498Szrj /* sqrt(x) < 0 is always false. */ 319238fd1498Szrj (if (cmp == LT_EXPR) 319338fd1498Szrj { constant_boolean_node (false, type); }) 319438fd1498Szrj /* sqrt(x) >= 0 is always true if we don't care about NaNs. */ 319538fd1498Szrj (if (cmp == GE_EXPR && !HONOR_NANS (@0)) 319638fd1498Szrj { constant_boolean_node (true, type); }) 319738fd1498Szrj /* sqrt(x) <= 0 -> x == 0. */ 319838fd1498Szrj (if (cmp == LE_EXPR) 319938fd1498Szrj (eq @0 @1)) 320038fd1498Szrj /* Otherwise sqrt(x) cmp 0 -> x cmp 0. Here cmp can be >=, >, 320138fd1498Szrj == or !=. In the last case: 320238fd1498Szrj 320338fd1498Szrj (sqrt(x) != 0) == (NaN != 0) == true == (x != 0) 320438fd1498Szrj 320538fd1498Szrj if x is negative or NaN. Due to -funsafe-math-optimizations, 320638fd1498Szrj the results for other x follow from natural arithmetic. */ 320738fd1498Szrj (cmp @0 @1))) 320838fd1498Szrj (if (cmp == GT_EXPR || cmp == GE_EXPR) 320938fd1498Szrj (with 321038fd1498Szrj { 321138fd1498Szrj REAL_VALUE_TYPE c2; 321238fd1498Szrj real_arithmetic (&c2, MULT_EXPR, 321338fd1498Szrj &TREE_REAL_CST (@1), &TREE_REAL_CST (@1)); 321438fd1498Szrj real_convert (&c2, TYPE_MODE (TREE_TYPE (@0)), &c2); 321538fd1498Szrj } 321638fd1498Szrj (if (REAL_VALUE_ISINF (c2)) 321738fd1498Szrj /* sqrt(x) > y is x == +Inf, when y is very large. */ 321838fd1498Szrj (if (HONOR_INFINITIES (@0)) 321938fd1498Szrj (eq @0 { build_real (TREE_TYPE (@0), c2); }) 322038fd1498Szrj { constant_boolean_node (false, type); }) 322138fd1498Szrj /* sqrt(x) > c is the same as x > c*c. */ 322238fd1498Szrj (cmp @0 { build_real (TREE_TYPE (@0), c2); })))) 322338fd1498Szrj (if (cmp == LT_EXPR || cmp == LE_EXPR) 322438fd1498Szrj (with 322538fd1498Szrj { 322638fd1498Szrj REAL_VALUE_TYPE c2; 322738fd1498Szrj real_arithmetic (&c2, MULT_EXPR, 322838fd1498Szrj &TREE_REAL_CST (@1), &TREE_REAL_CST (@1)); 322938fd1498Szrj real_convert (&c2, TYPE_MODE (TREE_TYPE (@0)), &c2); 323038fd1498Szrj } 323138fd1498Szrj (if (REAL_VALUE_ISINF (c2)) 323238fd1498Szrj (switch 323338fd1498Szrj /* sqrt(x) < y is always true, when y is a very large 323438fd1498Szrj value and we don't care about NaNs or Infinities. */ 323538fd1498Szrj (if (! HONOR_NANS (@0) && ! HONOR_INFINITIES (@0)) 323638fd1498Szrj { constant_boolean_node (true, type); }) 323738fd1498Szrj /* sqrt(x) < y is x != +Inf when y is very large and we 323838fd1498Szrj don't care about NaNs. */ 323938fd1498Szrj (if (! HONOR_NANS (@0)) 324038fd1498Szrj (ne @0 { build_real (TREE_TYPE (@0), c2); })) 324138fd1498Szrj /* sqrt(x) < y is x >= 0 when y is very large and we 324238fd1498Szrj don't care about Infinities. */ 324338fd1498Szrj (if (! HONOR_INFINITIES (@0)) 324438fd1498Szrj (ge @0 { build_real (TREE_TYPE (@0), dconst0); })) 324538fd1498Szrj /* sqrt(x) < y is x >= 0 && x != +Inf, when y is large. */ 324638fd1498Szrj (if (GENERIC) 324738fd1498Szrj (truth_andif 324838fd1498Szrj (ge @0 { build_real (TREE_TYPE (@0), dconst0); }) 324938fd1498Szrj (ne @0 { build_real (TREE_TYPE (@0), c2); })))) 325038fd1498Szrj /* sqrt(x) < c is the same as x < c*c, if we ignore NaNs. */ 325138fd1498Szrj (if (! HONOR_NANS (@0)) 325238fd1498Szrj (cmp @0 { build_real (TREE_TYPE (@0), c2); }) 325338fd1498Szrj /* sqrt(x) < c is the same as x >= 0 && x < c*c. */ 325438fd1498Szrj (if (GENERIC) 325538fd1498Szrj (truth_andif 325638fd1498Szrj (ge @0 { build_real (TREE_TYPE (@0), dconst0); }) 325738fd1498Szrj (cmp @0 { build_real (TREE_TYPE (@0), c2); }))))))))) 325838fd1498Szrj /* Transform sqrt(x) cmp sqrt(y) -> x cmp y. */ 325938fd1498Szrj (simplify 326038fd1498Szrj (cmp (sq @0) (sq @1)) 326138fd1498Szrj (if (! HONOR_NANS (@0)) 326238fd1498Szrj (cmp @0 @1)))))) 326338fd1498Szrj 326438fd1498Szrj/* Optimize various special cases of (FTYPE) N CMP CST. */ 326538fd1498Szrj(for cmp (lt le eq ne ge gt) 326638fd1498Szrj icmp (le le eq ne ge ge) 326738fd1498Szrj (simplify 326838fd1498Szrj (cmp (float @0) REAL_CST@1) 326938fd1498Szrj (if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (@1)) 327038fd1498Szrj && ! DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@1))) 327138fd1498Szrj (with 327238fd1498Szrj { 327338fd1498Szrj tree itype = TREE_TYPE (@0); 327438fd1498Szrj signop isign = TYPE_SIGN (itype); 327538fd1498Szrj format_helper fmt (REAL_MODE_FORMAT (TYPE_MODE (TREE_TYPE (@1)))); 327638fd1498Szrj const REAL_VALUE_TYPE *cst = TREE_REAL_CST_PTR (@1); 327738fd1498Szrj /* Be careful to preserve any potential exceptions due to 327838fd1498Szrj NaNs. qNaNs are ok in == or != context. 327938fd1498Szrj TODO: relax under -fno-trapping-math or 328038fd1498Szrj -fno-signaling-nans. */ 328138fd1498Szrj bool exception_p 328238fd1498Szrj = real_isnan (cst) && (cst->signalling 328338fd1498Szrj || (cmp != EQ_EXPR && cmp != NE_EXPR)); 328438fd1498Szrj /* INT?_MIN is power-of-two so it takes 328538fd1498Szrj only one mantissa bit. */ 328638fd1498Szrj bool signed_p = isign == SIGNED; 328738fd1498Szrj bool itype_fits_ftype_p 328838fd1498Szrj = TYPE_PRECISION (itype) - signed_p <= significand_size (fmt); 328938fd1498Szrj } 329038fd1498Szrj /* TODO: allow non-fitting itype and SNaNs when 329138fd1498Szrj -fno-trapping-math. */ 329238fd1498Szrj (if (itype_fits_ftype_p && ! exception_p) 329338fd1498Szrj (with 329438fd1498Szrj { 329538fd1498Szrj REAL_VALUE_TYPE imin, imax; 329638fd1498Szrj real_from_integer (&imin, fmt, wi::min_value (itype), isign); 329738fd1498Szrj real_from_integer (&imax, fmt, wi::max_value (itype), isign); 329838fd1498Szrj 329938fd1498Szrj REAL_VALUE_TYPE icst; 330038fd1498Szrj if (cmp == GT_EXPR || cmp == GE_EXPR) 330138fd1498Szrj real_ceil (&icst, fmt, cst); 330238fd1498Szrj else if (cmp == LT_EXPR || cmp == LE_EXPR) 330338fd1498Szrj real_floor (&icst, fmt, cst); 330438fd1498Szrj else 330538fd1498Szrj real_trunc (&icst, fmt, cst); 330638fd1498Szrj 330738fd1498Szrj bool cst_int_p = !real_isnan (cst) && real_identical (&icst, cst); 330838fd1498Szrj 330938fd1498Szrj bool overflow_p = false; 331038fd1498Szrj wide_int icst_val 331138fd1498Szrj = real_to_integer (&icst, &overflow_p, TYPE_PRECISION (itype)); 331238fd1498Szrj } 331338fd1498Szrj (switch 331438fd1498Szrj /* Optimize cases when CST is outside of ITYPE's range. */ 331538fd1498Szrj (if (real_compare (LT_EXPR, cst, &imin)) 331638fd1498Szrj { constant_boolean_node (cmp == GT_EXPR || cmp == GE_EXPR || cmp == NE_EXPR, 331738fd1498Szrj type); }) 331838fd1498Szrj (if (real_compare (GT_EXPR, cst, &imax)) 331938fd1498Szrj { constant_boolean_node (cmp == LT_EXPR || cmp == LE_EXPR || cmp == NE_EXPR, 332038fd1498Szrj type); }) 332138fd1498Szrj /* Remove cast if CST is an integer representable by ITYPE. */ 332238fd1498Szrj (if (cst_int_p) 332338fd1498Szrj (cmp @0 { gcc_assert (!overflow_p); 332438fd1498Szrj wide_int_to_tree (itype, icst_val); }) 332538fd1498Szrj ) 332638fd1498Szrj /* When CST is fractional, optimize 332738fd1498Szrj (FTYPE) N == CST -> 0 332838fd1498Szrj (FTYPE) N != CST -> 1. */ 332938fd1498Szrj (if (cmp == EQ_EXPR || cmp == NE_EXPR) 333038fd1498Szrj { constant_boolean_node (cmp == NE_EXPR, type); }) 333138fd1498Szrj /* Otherwise replace with sensible integer constant. */ 333238fd1498Szrj (with 333338fd1498Szrj { 333438fd1498Szrj gcc_checking_assert (!overflow_p); 333538fd1498Szrj } 333638fd1498Szrj (icmp @0 { wide_int_to_tree (itype, icst_val); }))))))))) 333738fd1498Szrj 333838fd1498Szrj/* Fold A /[ex] B CMP C to A CMP B * C. */ 333938fd1498Szrj(for cmp (eq ne) 334038fd1498Szrj (simplify 334138fd1498Szrj (cmp (exact_div @0 @1) INTEGER_CST@2) 334238fd1498Szrj (if (!integer_zerop (@1)) 334338fd1498Szrj (if (wi::to_wide (@2) == 0) 334438fd1498Szrj (cmp @0 @2) 334538fd1498Szrj (if (TREE_CODE (@1) == INTEGER_CST) 334638fd1498Szrj (with 334738fd1498Szrj { 334838fd1498Szrj bool ovf; 334938fd1498Szrj wide_int prod = wi::mul (wi::to_wide (@2), wi::to_wide (@1), 335038fd1498Szrj TYPE_SIGN (TREE_TYPE (@1)), &ovf); 335138fd1498Szrj } 335238fd1498Szrj (if (ovf) 335338fd1498Szrj { constant_boolean_node (cmp == NE_EXPR, type); } 335438fd1498Szrj (cmp @0 { wide_int_to_tree (TREE_TYPE (@0), prod); })))))))) 335538fd1498Szrj(for cmp (lt le gt ge) 335638fd1498Szrj (simplify 335738fd1498Szrj (cmp (exact_div @0 INTEGER_CST@1) INTEGER_CST@2) 335838fd1498Szrj (if (wi::gt_p (wi::to_wide (@1), 0, TYPE_SIGN (TREE_TYPE (@1)))) 335938fd1498Szrj (with 336038fd1498Szrj { 336138fd1498Szrj bool ovf; 336238fd1498Szrj wide_int prod = wi::mul (wi::to_wide (@2), wi::to_wide (@1), 336338fd1498Szrj TYPE_SIGN (TREE_TYPE (@1)), &ovf); 336438fd1498Szrj } 336538fd1498Szrj (if (ovf) 336638fd1498Szrj { constant_boolean_node (wi::lt_p (wi::to_wide (@2), 0, 336738fd1498Szrj TYPE_SIGN (TREE_TYPE (@2))) 336838fd1498Szrj != (cmp == LT_EXPR || cmp == LE_EXPR), type); } 336938fd1498Szrj (cmp @0 { wide_int_to_tree (TREE_TYPE (@0), prod); })))))) 337038fd1498Szrj 337138fd1498Szrj/* Unordered tests if either argument is a NaN. */ 337238fd1498Szrj(simplify 337338fd1498Szrj (bit_ior (unordered @0 @0) (unordered @1 @1)) 337438fd1498Szrj (if (types_match (@0, @1)) 337538fd1498Szrj (unordered @0 @1))) 337638fd1498Szrj(simplify 337738fd1498Szrj (bit_and (ordered @0 @0) (ordered @1 @1)) 337838fd1498Szrj (if (types_match (@0, @1)) 337938fd1498Szrj (ordered @0 @1))) 338038fd1498Szrj(simplify 338138fd1498Szrj (bit_ior:c (unordered @0 @0) (unordered:c@2 @0 @1)) 338238fd1498Szrj @2) 338338fd1498Szrj(simplify 338438fd1498Szrj (bit_and:c (ordered @0 @0) (ordered:c@2 @0 @1)) 338538fd1498Szrj @2) 338638fd1498Szrj 338738fd1498Szrj/* Simple range test simplifications. */ 338838fd1498Szrj/* A < B || A >= B -> true. */ 338938fd1498Szrj(for test1 (lt le le le ne ge) 339038fd1498Szrj test2 (ge gt ge ne eq ne) 339138fd1498Szrj (simplify 339238fd1498Szrj (bit_ior:c (test1 @0 @1) (test2 @0 @1)) 339338fd1498Szrj (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) 339438fd1498Szrj || VECTOR_INTEGER_TYPE_P (TREE_TYPE (@0))) 339538fd1498Szrj { constant_boolean_node (true, type); }))) 339638fd1498Szrj/* A < B && A >= B -> false. */ 339738fd1498Szrj(for test1 (lt lt lt le ne eq) 339838fd1498Szrj test2 (ge gt eq gt eq gt) 339938fd1498Szrj (simplify 340038fd1498Szrj (bit_and:c (test1 @0 @1) (test2 @0 @1)) 340138fd1498Szrj (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) 340238fd1498Szrj || VECTOR_INTEGER_TYPE_P (TREE_TYPE (@0))) 340338fd1498Szrj { constant_boolean_node (false, type); }))) 340438fd1498Szrj 340538fd1498Szrj/* A & (2**N - 1) <= 2**K - 1 -> A & (2**N - 2**K) == 0 340638fd1498Szrj A & (2**N - 1) > 2**K - 1 -> A & (2**N - 2**K) != 0 340738fd1498Szrj 340838fd1498Szrj Note that comparisons 340938fd1498Szrj A & (2**N - 1) < 2**K -> A & (2**N - 2**K) == 0 341038fd1498Szrj A & (2**N - 1) >= 2**K -> A & (2**N - 2**K) != 0 341138fd1498Szrj will be canonicalized to above so there's no need to 341238fd1498Szrj consider them here. 341338fd1498Szrj */ 341438fd1498Szrj 341538fd1498Szrj(for cmp (le gt) 341638fd1498Szrj eqcmp (eq ne) 341738fd1498Szrj (simplify 341838fd1498Szrj (cmp (bit_and@0 @1 INTEGER_CST@2) INTEGER_CST@3) 341938fd1498Szrj (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))) 342038fd1498Szrj (with 342138fd1498Szrj { 342238fd1498Szrj tree ty = TREE_TYPE (@0); 342338fd1498Szrj unsigned prec = TYPE_PRECISION (ty); 342438fd1498Szrj wide_int mask = wi::to_wide (@2, prec); 342538fd1498Szrj wide_int rhs = wi::to_wide (@3, prec); 342638fd1498Szrj signop sgn = TYPE_SIGN (ty); 342738fd1498Szrj } 342838fd1498Szrj (if ((mask & (mask + 1)) == 0 && wi::gt_p (rhs, 0, sgn) 342938fd1498Szrj && (rhs & (rhs + 1)) == 0 && wi::ge_p (mask, rhs, sgn)) 343038fd1498Szrj (eqcmp (bit_and @1 { wide_int_to_tree (ty, mask - rhs); }) 343138fd1498Szrj { build_zero_cst (ty); })))))) 343238fd1498Szrj 343338fd1498Szrj/* -A CMP -B -> B CMP A. */ 343438fd1498Szrj(for cmp (tcc_comparison) 343538fd1498Szrj scmp (swapped_tcc_comparison) 343638fd1498Szrj (simplify 343738fd1498Szrj (cmp (negate @0) (negate @1)) 343838fd1498Szrj (if (FLOAT_TYPE_P (TREE_TYPE (@0)) 343938fd1498Szrj || (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) 344038fd1498Szrj && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))) 344138fd1498Szrj (scmp @0 @1))) 344238fd1498Szrj (simplify 344338fd1498Szrj (cmp (negate @0) CONSTANT_CLASS_P@1) 344438fd1498Szrj (if (FLOAT_TYPE_P (TREE_TYPE (@0)) 344538fd1498Szrj || (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) 344638fd1498Szrj && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))) 344738fd1498Szrj (with { tree tem = const_unop (NEGATE_EXPR, TREE_TYPE (@0), @1); } 344838fd1498Szrj (if (tem && !TREE_OVERFLOW (tem)) 344938fd1498Szrj (scmp @0 { tem; })))))) 345038fd1498Szrj 345138fd1498Szrj/* Convert ABS_EXPR<x> == 0 or ABS_EXPR<x> != 0 to x == 0 or x != 0. */ 345238fd1498Szrj(for op (eq ne) 345338fd1498Szrj (simplify 345438fd1498Szrj (op (abs @0) zerop@1) 345538fd1498Szrj (op @0 @1))) 345638fd1498Szrj 345738fd1498Szrj/* From fold_sign_changed_comparison and fold_widened_comparison. 345838fd1498Szrj FIXME: the lack of symmetry is disturbing. */ 345938fd1498Szrj(for cmp (simple_comparison) 346038fd1498Szrj (simplify 346138fd1498Szrj (cmp (convert@0 @00) (convert?@1 @10)) 346238fd1498Szrj (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) 346338fd1498Szrj /* Disable this optimization if we're casting a function pointer 346438fd1498Szrj type on targets that require function pointer canonicalization. */ 346538fd1498Szrj && !(targetm.have_canonicalize_funcptr_for_compare () 3466*58e805e6Szrj && POINTER_TYPE_P (TREE_TYPE (@00)) 3467*58e805e6Szrj && FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@00)))) 346838fd1498Szrj && single_use (@0)) 346938fd1498Szrj (if (TYPE_PRECISION (TREE_TYPE (@00)) == TYPE_PRECISION (TREE_TYPE (@0)) 347038fd1498Szrj && (TREE_CODE (@10) == INTEGER_CST 347138fd1498Szrj || @1 != @10) 347238fd1498Szrj && (TYPE_UNSIGNED (TREE_TYPE (@00)) == TYPE_UNSIGNED (TREE_TYPE (@0)) 347338fd1498Szrj || cmp == NE_EXPR 347438fd1498Szrj || cmp == EQ_EXPR) 347538fd1498Szrj && !POINTER_TYPE_P (TREE_TYPE (@00))) 347638fd1498Szrj /* ??? The special-casing of INTEGER_CST conversion was in the original 347738fd1498Szrj code and here to avoid a spurious overflow flag on the resulting 347838fd1498Szrj constant which fold_convert produces. */ 347938fd1498Szrj (if (TREE_CODE (@1) == INTEGER_CST) 348038fd1498Szrj (cmp @00 { force_fit_type (TREE_TYPE (@00), wi::to_widest (@1), 0, 348138fd1498Szrj TREE_OVERFLOW (@1)); }) 348238fd1498Szrj (cmp @00 (convert @1))) 348338fd1498Szrj 348438fd1498Szrj (if (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (TREE_TYPE (@00))) 348538fd1498Szrj /* If possible, express the comparison in the shorter mode. */ 348638fd1498Szrj (if ((cmp == EQ_EXPR || cmp == NE_EXPR 348738fd1498Szrj || TYPE_UNSIGNED (TREE_TYPE (@0)) == TYPE_UNSIGNED (TREE_TYPE (@00)) 348838fd1498Szrj || (!TYPE_UNSIGNED (TREE_TYPE (@0)) 348938fd1498Szrj && TYPE_UNSIGNED (TREE_TYPE (@00)))) 349038fd1498Szrj && (types_match (TREE_TYPE (@10), TREE_TYPE (@00)) 349138fd1498Szrj || ((TYPE_PRECISION (TREE_TYPE (@00)) 349238fd1498Szrj >= TYPE_PRECISION (TREE_TYPE (@10))) 349338fd1498Szrj && (TYPE_UNSIGNED (TREE_TYPE (@00)) 349438fd1498Szrj == TYPE_UNSIGNED (TREE_TYPE (@10)))) 349538fd1498Szrj || (TREE_CODE (@10) == INTEGER_CST 349638fd1498Szrj && INTEGRAL_TYPE_P (TREE_TYPE (@00)) 349738fd1498Szrj && int_fits_type_p (@10, TREE_TYPE (@00))))) 349838fd1498Szrj (cmp @00 (convert @10)) 349938fd1498Szrj (if (TREE_CODE (@10) == INTEGER_CST 350038fd1498Szrj && INTEGRAL_TYPE_P (TREE_TYPE (@00)) 350138fd1498Szrj && !int_fits_type_p (@10, TREE_TYPE (@00))) 350238fd1498Szrj (with 350338fd1498Szrj { 350438fd1498Szrj tree min = lower_bound_in_type (TREE_TYPE (@10), TREE_TYPE (@00)); 350538fd1498Szrj tree max = upper_bound_in_type (TREE_TYPE (@10), TREE_TYPE (@00)); 350638fd1498Szrj bool above = integer_nonzerop (const_binop (LT_EXPR, type, max, @10)); 350738fd1498Szrj bool below = integer_nonzerop (const_binop (LT_EXPR, type, @10, min)); 350838fd1498Szrj } 350938fd1498Szrj (if (above || below) 351038fd1498Szrj (if (cmp == EQ_EXPR || cmp == NE_EXPR) 351138fd1498Szrj { constant_boolean_node (cmp == EQ_EXPR ? false : true, type); } 351238fd1498Szrj (if (cmp == LT_EXPR || cmp == LE_EXPR) 351338fd1498Szrj { constant_boolean_node (above ? true : false, type); } 351438fd1498Szrj (if (cmp == GT_EXPR || cmp == GE_EXPR) 351538fd1498Szrj { constant_boolean_node (above ? false : true, type); })))))))))))) 351638fd1498Szrj 351738fd1498Szrj(for cmp (eq ne) 351838fd1498Szrj /* A local variable can never be pointed to by 351938fd1498Szrj the default SSA name of an incoming parameter. 352038fd1498Szrj SSA names are canonicalized to 2nd place. */ 352138fd1498Szrj (simplify 352238fd1498Szrj (cmp addr@0 SSA_NAME@1) 352338fd1498Szrj (if (SSA_NAME_IS_DEFAULT_DEF (@1) 352438fd1498Szrj && TREE_CODE (SSA_NAME_VAR (@1)) == PARM_DECL) 352538fd1498Szrj (with { tree base = get_base_address (TREE_OPERAND (@0, 0)); } 352638fd1498Szrj (if (TREE_CODE (base) == VAR_DECL 352738fd1498Szrj && auto_var_in_fn_p (base, current_function_decl)) 352838fd1498Szrj (if (cmp == NE_EXPR) 352938fd1498Szrj { constant_boolean_node (true, type); } 353038fd1498Szrj { constant_boolean_node (false, type); })))))) 353138fd1498Szrj 353238fd1498Szrj/* Equality compare simplifications from fold_binary */ 353338fd1498Szrj(for cmp (eq ne) 353438fd1498Szrj 353538fd1498Szrj /* If we have (A | C) == D where C & ~D != 0, convert this into 0. 353638fd1498Szrj Similarly for NE_EXPR. */ 353738fd1498Szrj (simplify 353838fd1498Szrj (cmp (convert?@3 (bit_ior @0 INTEGER_CST@1)) INTEGER_CST@2) 353938fd1498Szrj (if (tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@0)) 354038fd1498Szrj && wi::bit_and_not (wi::to_wide (@1), wi::to_wide (@2)) != 0) 354138fd1498Szrj { constant_boolean_node (cmp == NE_EXPR, type); })) 354238fd1498Szrj 354338fd1498Szrj /* (X ^ Y) == 0 becomes X == Y, and (X ^ Y) != 0 becomes X != Y. */ 354438fd1498Szrj (simplify 354538fd1498Szrj (cmp (bit_xor @0 @1) integer_zerop) 354638fd1498Szrj (cmp @0 @1)) 354738fd1498Szrj 354838fd1498Szrj /* (X ^ Y) == Y becomes X == 0. 354938fd1498Szrj Likewise (X ^ Y) == X becomes Y == 0. */ 355038fd1498Szrj (simplify 355138fd1498Szrj (cmp:c (bit_xor:c @0 @1) @0) 355238fd1498Szrj (cmp @1 { build_zero_cst (TREE_TYPE (@1)); })) 355338fd1498Szrj 355438fd1498Szrj /* (X ^ C1) op C2 can be rewritten as X op (C1 ^ C2). */ 355538fd1498Szrj (simplify 355638fd1498Szrj (cmp (convert?@3 (bit_xor @0 INTEGER_CST@1)) INTEGER_CST@2) 355738fd1498Szrj (if (tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@0))) 355838fd1498Szrj (cmp @0 (bit_xor @1 (convert @2))))) 355938fd1498Szrj 356038fd1498Szrj (simplify 356138fd1498Szrj (cmp (convert? addr@0) integer_zerop) 356238fd1498Szrj (if (tree_single_nonzero_warnv_p (@0, NULL)) 356338fd1498Szrj { constant_boolean_node (cmp == NE_EXPR, type); }))) 356438fd1498Szrj 356538fd1498Szrj/* If we have (A & C) == C where C is a power of 2, convert this into 356638fd1498Szrj (A & C) != 0. Similarly for NE_EXPR. */ 356738fd1498Szrj(for cmp (eq ne) 356838fd1498Szrj icmp (ne eq) 356938fd1498Szrj (simplify 357038fd1498Szrj (cmp (bit_and@2 @0 integer_pow2p@1) @1) 357138fd1498Szrj (icmp @2 { build_zero_cst (TREE_TYPE (@0)); }))) 357238fd1498Szrj 357338fd1498Szrj/* If we have (A & C) != 0 ? D : 0 where C and D are powers of 2, 357438fd1498Szrj convert this into a shift followed by ANDing with D. */ 357538fd1498Szrj(simplify 357638fd1498Szrj (cond 357738fd1498Szrj (ne (bit_and @0 integer_pow2p@1) integer_zerop) 357838fd1498Szrj INTEGER_CST@2 integer_zerop) 357938fd1498Szrj (if (integer_pow2p (@2)) 358038fd1498Szrj (with { 358138fd1498Szrj int shift = (wi::exact_log2 (wi::to_wide (@2)) 358238fd1498Szrj - wi::exact_log2 (wi::to_wide (@1))); 358338fd1498Szrj } 358438fd1498Szrj (if (shift > 0) 358538fd1498Szrj (bit_and 358638fd1498Szrj (lshift (convert @0) { build_int_cst (integer_type_node, shift); }) @2) 358738fd1498Szrj (bit_and 358838fd1498Szrj (convert (rshift @0 { build_int_cst (integer_type_node, -shift); })) 358938fd1498Szrj @2))))) 359038fd1498Szrj 359138fd1498Szrj/* If we have (A & C) != 0 where C is the sign bit of A, convert 359238fd1498Szrj this into A < 0. Similarly for (A & C) == 0 into A >= 0. */ 359338fd1498Szrj(for cmp (eq ne) 359438fd1498Szrj ncmp (ge lt) 359538fd1498Szrj (simplify 359638fd1498Szrj (cmp (bit_and (convert?@2 @0) integer_pow2p@1) integer_zerop) 359738fd1498Szrj (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) 359838fd1498Szrj && type_has_mode_precision_p (TREE_TYPE (@0)) 359938fd1498Szrj && element_precision (@2) >= element_precision (@0) 360038fd1498Szrj && wi::only_sign_bit_p (wi::to_wide (@1), element_precision (@0))) 360138fd1498Szrj (with { tree stype = signed_type_for (TREE_TYPE (@0)); } 360238fd1498Szrj (ncmp (convert:stype @0) { build_zero_cst (stype); }))))) 360338fd1498Szrj 360438fd1498Szrj/* If we have A < 0 ? C : 0 where C is a power of 2, convert 360538fd1498Szrj this into a right shift or sign extension followed by ANDing with C. */ 360638fd1498Szrj(simplify 360738fd1498Szrj (cond 360838fd1498Szrj (lt @0 integer_zerop) 360938fd1498Szrj INTEGER_CST@1 integer_zerop) 361038fd1498Szrj (if (integer_pow2p (@1) 361138fd1498Szrj && !TYPE_UNSIGNED (TREE_TYPE (@0))) 361238fd1498Szrj (with { 361338fd1498Szrj int shift = element_precision (@0) - wi::exact_log2 (wi::to_wide (@1)) - 1; 361438fd1498Szrj } 361538fd1498Szrj (if (shift >= 0) 361638fd1498Szrj (bit_and 361738fd1498Szrj (convert (rshift @0 { build_int_cst (integer_type_node, shift); })) 361838fd1498Szrj @1) 361938fd1498Szrj /* Otherwise ctype must be wider than TREE_TYPE (@0) and pure 362038fd1498Szrj sign extension followed by AND with C will achieve the effect. */ 362138fd1498Szrj (bit_and (convert @0) @1))))) 362238fd1498Szrj 362338fd1498Szrj/* When the addresses are not directly of decls compare base and offset. 362438fd1498Szrj This implements some remaining parts of fold_comparison address 362538fd1498Szrj comparisons but still no complete part of it. Still it is good 362638fd1498Szrj enough to make fold_stmt not regress when not dispatching to fold_binary. */ 362738fd1498Szrj(for cmp (simple_comparison) 362838fd1498Szrj (simplify 362938fd1498Szrj (cmp (convert1?@2 addr@0) (convert2? addr@1)) 363038fd1498Szrj (with 363138fd1498Szrj { 363238fd1498Szrj poly_int64 off0, off1; 363338fd1498Szrj tree base0 = get_addr_base_and_unit_offset (TREE_OPERAND (@0, 0), &off0); 363438fd1498Szrj tree base1 = get_addr_base_and_unit_offset (TREE_OPERAND (@1, 0), &off1); 363538fd1498Szrj if (base0 && TREE_CODE (base0) == MEM_REF) 363638fd1498Szrj { 363738fd1498Szrj off0 += mem_ref_offset (base0).force_shwi (); 363838fd1498Szrj base0 = TREE_OPERAND (base0, 0); 363938fd1498Szrj } 364038fd1498Szrj if (base1 && TREE_CODE (base1) == MEM_REF) 364138fd1498Szrj { 364238fd1498Szrj off1 += mem_ref_offset (base1).force_shwi (); 364338fd1498Szrj base1 = TREE_OPERAND (base1, 0); 364438fd1498Szrj } 364538fd1498Szrj } 364638fd1498Szrj (if (base0 && base1) 364738fd1498Szrj (with 364838fd1498Szrj { 364938fd1498Szrj int equal = 2; 365038fd1498Szrj /* Punt in GENERIC on variables with value expressions; 365138fd1498Szrj the value expressions might point to fields/elements 365238fd1498Szrj of other vars etc. */ 365338fd1498Szrj if (GENERIC 365438fd1498Szrj && ((VAR_P (base0) && DECL_HAS_VALUE_EXPR_P (base0)) 365538fd1498Szrj || (VAR_P (base1) && DECL_HAS_VALUE_EXPR_P (base1)))) 365638fd1498Szrj ; 365738fd1498Szrj else if (decl_in_symtab_p (base0) 365838fd1498Szrj && decl_in_symtab_p (base1)) 365938fd1498Szrj equal = symtab_node::get_create (base0) 366038fd1498Szrj ->equal_address_to (symtab_node::get_create (base1)); 366138fd1498Szrj else if ((DECL_P (base0) 366238fd1498Szrj || TREE_CODE (base0) == SSA_NAME 366338fd1498Szrj || TREE_CODE (base0) == STRING_CST) 366438fd1498Szrj && (DECL_P (base1) 366538fd1498Szrj || TREE_CODE (base1) == SSA_NAME 366638fd1498Szrj || TREE_CODE (base1) == STRING_CST)) 366738fd1498Szrj equal = (base0 == base1); 366838fd1498Szrj } 366938fd1498Szrj (if (equal == 1 367038fd1498Szrj && (cmp == EQ_EXPR || cmp == NE_EXPR 367138fd1498Szrj /* If the offsets are equal we can ignore overflow. */ 367238fd1498Szrj || known_eq (off0, off1) 367338fd1498Szrj || TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)) 367438fd1498Szrj /* Or if we compare using pointers to decls or strings. */ 367538fd1498Szrj || (POINTER_TYPE_P (TREE_TYPE (@2)) 367638fd1498Szrj && (DECL_P (base0) || TREE_CODE (base0) == STRING_CST)))) 367738fd1498Szrj (switch 367838fd1498Szrj (if (cmp == EQ_EXPR && (known_eq (off0, off1) || known_ne (off0, off1))) 367938fd1498Szrj { constant_boolean_node (known_eq (off0, off1), type); }) 368038fd1498Szrj (if (cmp == NE_EXPR && (known_eq (off0, off1) || known_ne (off0, off1))) 368138fd1498Szrj { constant_boolean_node (known_ne (off0, off1), type); }) 368238fd1498Szrj (if (cmp == LT_EXPR && (known_lt (off0, off1) || known_ge (off0, off1))) 368338fd1498Szrj { constant_boolean_node (known_lt (off0, off1), type); }) 368438fd1498Szrj (if (cmp == LE_EXPR && (known_le (off0, off1) || known_gt (off0, off1))) 368538fd1498Szrj { constant_boolean_node (known_le (off0, off1), type); }) 368638fd1498Szrj (if (cmp == GE_EXPR && (known_ge (off0, off1) || known_lt (off0, off1))) 368738fd1498Szrj { constant_boolean_node (known_ge (off0, off1), type); }) 368838fd1498Szrj (if (cmp == GT_EXPR && (known_gt (off0, off1) || known_le (off0, off1))) 368938fd1498Szrj { constant_boolean_node (known_gt (off0, off1), type); })) 369038fd1498Szrj (if (equal == 0 369138fd1498Szrj && DECL_P (base0) && DECL_P (base1) 369238fd1498Szrj /* If we compare this as integers require equal offset. */ 369338fd1498Szrj && (!INTEGRAL_TYPE_P (TREE_TYPE (@2)) 369438fd1498Szrj || known_eq (off0, off1))) 369538fd1498Szrj (switch 369638fd1498Szrj (if (cmp == EQ_EXPR) 369738fd1498Szrj { constant_boolean_node (false, type); }) 369838fd1498Szrj (if (cmp == NE_EXPR) 369938fd1498Szrj { constant_boolean_node (true, type); }))))))))) 370038fd1498Szrj 370138fd1498Szrj/* Simplify pointer equality compares using PTA. */ 370238fd1498Szrj(for neeq (ne eq) 370338fd1498Szrj (simplify 370438fd1498Szrj (neeq @0 @1) 370538fd1498Szrj (if (POINTER_TYPE_P (TREE_TYPE (@0)) 370638fd1498Szrj && ptrs_compare_unequal (@0, @1)) 370738fd1498Szrj { constant_boolean_node (neeq != EQ_EXPR, type); }))) 370838fd1498Szrj 370938fd1498Szrj/* PR70920: Transform (intptr_t)x eq/ne CST to x eq/ne (typeof x) CST. 371038fd1498Szrj and (typeof ptr_cst) x eq/ne ptr_cst to x eq/ne (typeof x) CST. 371138fd1498Szrj Disable the transform if either operand is pointer to function. 371238fd1498Szrj This broke pr22051-2.c for arm where function pointer 371338fd1498Szrj canonicalizaion is not wanted. */ 371438fd1498Szrj 371538fd1498Szrj(for cmp (ne eq) 371638fd1498Szrj (simplify 371738fd1498Szrj (cmp (convert @0) INTEGER_CST@1) 371838fd1498Szrj (if (((POINTER_TYPE_P (TREE_TYPE (@0)) 371938fd1498Szrj && !FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@0))) 372038fd1498Szrj && INTEGRAL_TYPE_P (TREE_TYPE (@1))) 372138fd1498Szrj || (INTEGRAL_TYPE_P (TREE_TYPE (@0)) 372238fd1498Szrj && POINTER_TYPE_P (TREE_TYPE (@1)) 372338fd1498Szrj && !FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@1))))) 372438fd1498Szrj && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1))) 372538fd1498Szrj (cmp @0 (convert @1))))) 372638fd1498Szrj 372738fd1498Szrj/* Non-equality compare simplifications from fold_binary */ 372838fd1498Szrj(for cmp (lt gt le ge) 372938fd1498Szrj /* Comparisons with the highest or lowest possible integer of 373038fd1498Szrj the specified precision will have known values. */ 373138fd1498Szrj (simplify 373238fd1498Szrj (cmp (convert?@2 @0) INTEGER_CST@1) 373338fd1498Szrj (if ((INTEGRAL_TYPE_P (TREE_TYPE (@1)) || POINTER_TYPE_P (TREE_TYPE (@1))) 373438fd1498Szrj && tree_nop_conversion_p (TREE_TYPE (@2), TREE_TYPE (@0))) 373538fd1498Szrj (with 373638fd1498Szrj { 373738fd1498Szrj tree arg1_type = TREE_TYPE (@1); 373838fd1498Szrj unsigned int prec = TYPE_PRECISION (arg1_type); 373938fd1498Szrj wide_int max = wi::max_value (arg1_type); 374038fd1498Szrj wide_int signed_max = wi::max_value (prec, SIGNED); 374138fd1498Szrj wide_int min = wi::min_value (arg1_type); 374238fd1498Szrj } 374338fd1498Szrj (switch 374438fd1498Szrj (if (wi::to_wide (@1) == max) 374538fd1498Szrj (switch 374638fd1498Szrj (if (cmp == GT_EXPR) 374738fd1498Szrj { constant_boolean_node (false, type); }) 374838fd1498Szrj (if (cmp == GE_EXPR) 374938fd1498Szrj (eq @2 @1)) 375038fd1498Szrj (if (cmp == LE_EXPR) 375138fd1498Szrj { constant_boolean_node (true, type); }) 375238fd1498Szrj (if (cmp == LT_EXPR) 375338fd1498Szrj (ne @2 @1)))) 375438fd1498Szrj (if (wi::to_wide (@1) == min) 375538fd1498Szrj (switch 375638fd1498Szrj (if (cmp == LT_EXPR) 375738fd1498Szrj { constant_boolean_node (false, type); }) 375838fd1498Szrj (if (cmp == LE_EXPR) 375938fd1498Szrj (eq @2 @1)) 376038fd1498Szrj (if (cmp == GE_EXPR) 376138fd1498Szrj { constant_boolean_node (true, type); }) 376238fd1498Szrj (if (cmp == GT_EXPR) 376338fd1498Szrj (ne @2 @1)))) 376438fd1498Szrj (if (wi::to_wide (@1) == max - 1) 376538fd1498Szrj (switch 376638fd1498Szrj (if (cmp == GT_EXPR) 376738fd1498Szrj (eq @2 { wide_int_to_tree (TREE_TYPE (@1), wi::to_wide (@1) + 1); })) 376838fd1498Szrj (if (cmp == LE_EXPR) 376938fd1498Szrj (ne @2 { wide_int_to_tree (TREE_TYPE (@1), wi::to_wide (@1) + 1); })))) 377038fd1498Szrj (if (wi::to_wide (@1) == min + 1) 377138fd1498Szrj (switch 377238fd1498Szrj (if (cmp == GE_EXPR) 377338fd1498Szrj (ne @2 { wide_int_to_tree (TREE_TYPE (@1), wi::to_wide (@1) - 1); })) 377438fd1498Szrj (if (cmp == LT_EXPR) 377538fd1498Szrj (eq @2 { wide_int_to_tree (TREE_TYPE (@1), wi::to_wide (@1) - 1); })))) 377638fd1498Szrj (if (wi::to_wide (@1) == signed_max 377738fd1498Szrj && TYPE_UNSIGNED (arg1_type) 377838fd1498Szrj /* We will flip the signedness of the comparison operator 377938fd1498Szrj associated with the mode of @1, so the sign bit is 378038fd1498Szrj specified by this mode. Check that @1 is the signed 378138fd1498Szrj max associated with this sign bit. */ 378238fd1498Szrj && prec == GET_MODE_PRECISION (SCALAR_INT_TYPE_MODE (arg1_type)) 378338fd1498Szrj /* signed_type does not work on pointer types. */ 378438fd1498Szrj && INTEGRAL_TYPE_P (arg1_type)) 378538fd1498Szrj /* The following case also applies to X < signed_max+1 378638fd1498Szrj and X >= signed_max+1 because previous transformations. */ 378738fd1498Szrj (if (cmp == LE_EXPR || cmp == GT_EXPR) 378838fd1498Szrj (with { tree st = signed_type_for (arg1_type); } 378938fd1498Szrj (if (cmp == LE_EXPR) 379038fd1498Szrj (ge (convert:st @0) { build_zero_cst (st); }) 379138fd1498Szrj (lt (convert:st @0) { build_zero_cst (st); })))))))))) 379238fd1498Szrj 379338fd1498Szrj(for cmp (unordered ordered unlt unle ungt unge uneq ltgt) 379438fd1498Szrj /* If the second operand is NaN, the result is constant. */ 379538fd1498Szrj (simplify 379638fd1498Szrj (cmp @0 REAL_CST@1) 379738fd1498Szrj (if (REAL_VALUE_ISNAN (TREE_REAL_CST (@1)) 379838fd1498Szrj && (cmp != LTGT_EXPR || ! flag_trapping_math)) 379938fd1498Szrj { constant_boolean_node (cmp == ORDERED_EXPR || cmp == LTGT_EXPR 380038fd1498Szrj ? false : true, type); }))) 380138fd1498Szrj 380238fd1498Szrj/* bool_var != 0 becomes bool_var. */ 380338fd1498Szrj(simplify 380438fd1498Szrj (ne @0 integer_zerop) 380538fd1498Szrj (if (TREE_CODE (TREE_TYPE (@0)) == BOOLEAN_TYPE 380638fd1498Szrj && types_match (type, TREE_TYPE (@0))) 380738fd1498Szrj (non_lvalue @0))) 380838fd1498Szrj/* bool_var == 1 becomes bool_var. */ 380938fd1498Szrj(simplify 381038fd1498Szrj (eq @0 integer_onep) 381138fd1498Szrj (if (TREE_CODE (TREE_TYPE (@0)) == BOOLEAN_TYPE 381238fd1498Szrj && types_match (type, TREE_TYPE (@0))) 381338fd1498Szrj (non_lvalue @0))) 381438fd1498Szrj/* Do not handle 381538fd1498Szrj bool_var == 0 becomes !bool_var or 381638fd1498Szrj bool_var != 1 becomes !bool_var 381738fd1498Szrj here because that only is good in assignment context as long 381838fd1498Szrj as we require a tcc_comparison in GIMPLE_CONDs where we'd 381938fd1498Szrj replace if (x == 0) with tem = ~x; if (tem != 0) which is 382038fd1498Szrj clearly less optimal and which we'll transform again in forwprop. */ 382138fd1498Szrj 382238fd1498Szrj/* When one argument is a constant, overflow detection can be simplified. 382338fd1498Szrj Currently restricted to single use so as not to interfere too much with 382438fd1498Szrj ADD_OVERFLOW detection in tree-ssa-math-opts.c. 382538fd1498Szrj A + CST CMP A -> A CMP' CST' */ 382638fd1498Szrj(for cmp (lt le ge gt) 382738fd1498Szrj out (gt gt le le) 382838fd1498Szrj (simplify 382938fd1498Szrj (cmp:c (plus@2 @0 INTEGER_CST@1) @0) 383038fd1498Szrj (if (TYPE_UNSIGNED (TREE_TYPE (@0)) 383138fd1498Szrj && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)) 383238fd1498Szrj && wi::to_wide (@1) != 0 383338fd1498Szrj && single_use (@2)) 383438fd1498Szrj (with { unsigned int prec = TYPE_PRECISION (TREE_TYPE (@0)); } 383538fd1498Szrj (out @0 { wide_int_to_tree (TREE_TYPE (@0), 383638fd1498Szrj wi::max_value (prec, UNSIGNED) 383738fd1498Szrj - wi::to_wide (@1)); }))))) 383838fd1498Szrj 383938fd1498Szrj/* To detect overflow in unsigned A - B, A < B is simpler than A - B > A. 384038fd1498Szrj However, the detection logic for SUB_OVERFLOW in tree-ssa-math-opts.c 384138fd1498Szrj expects the long form, so we restrict the transformation for now. */ 384238fd1498Szrj(for cmp (gt le) 384338fd1498Szrj (simplify 384438fd1498Szrj (cmp:c (minus@2 @0 @1) @0) 384538fd1498Szrj (if (single_use (@2) 384638fd1498Szrj && ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) 384738fd1498Szrj && TYPE_UNSIGNED (TREE_TYPE (@0)) 384838fd1498Szrj && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))) 384938fd1498Szrj (cmp @1 @0)))) 385038fd1498Szrj 385138fd1498Szrj/* Testing for overflow is unnecessary if we already know the result. */ 385238fd1498Szrj/* A - B > A */ 385338fd1498Szrj(for cmp (gt le) 385438fd1498Szrj out (ne eq) 385538fd1498Szrj (simplify 385638fd1498Szrj (cmp:c (realpart (IFN_SUB_OVERFLOW@2 @0 @1)) @0) 385738fd1498Szrj (if (TYPE_UNSIGNED (TREE_TYPE (@0)) 385838fd1498Szrj && types_match (TREE_TYPE (@0), TREE_TYPE (@1))) 385938fd1498Szrj (out (imagpart @2) { build_zero_cst (TREE_TYPE (@0)); })))) 386038fd1498Szrj/* A + B < A */ 386138fd1498Szrj(for cmp (lt ge) 386238fd1498Szrj out (ne eq) 386338fd1498Szrj (simplify 386438fd1498Szrj (cmp:c (realpart (IFN_ADD_OVERFLOW:c@2 @0 @1)) @0) 386538fd1498Szrj (if (TYPE_UNSIGNED (TREE_TYPE (@0)) 386638fd1498Szrj && types_match (TREE_TYPE (@0), TREE_TYPE (@1))) 386738fd1498Szrj (out (imagpart @2) { build_zero_cst (TREE_TYPE (@0)); })))) 386838fd1498Szrj 386938fd1498Szrj/* For unsigned operands, -1 / B < A checks whether A * B would overflow. 387038fd1498Szrj Simplify it to __builtin_mul_overflow (A, B, <unused>). */ 387138fd1498Szrj(for cmp (lt ge) 387238fd1498Szrj out (ne eq) 387338fd1498Szrj (simplify 387438fd1498Szrj (cmp:c (trunc_div:s integer_all_onesp @1) @0) 387538fd1498Szrj (if (TYPE_UNSIGNED (TREE_TYPE (@0)) && !VECTOR_TYPE_P (TREE_TYPE (@0))) 387638fd1498Szrj (with { tree t = TREE_TYPE (@0), cpx = build_complex_type (t); } 387738fd1498Szrj (out (imagpart (IFN_MUL_OVERFLOW:cpx @0 @1)) { build_zero_cst (t); }))))) 387838fd1498Szrj 387938fd1498Szrj/* Simplification of math builtins. These rules must all be optimizations 388038fd1498Szrj as well as IL simplifications. If there is a possibility that the new 388138fd1498Szrj form could be a pessimization, the rule should go in the canonicalization 388238fd1498Szrj section that follows this one. 388338fd1498Szrj 388438fd1498Szrj Rules can generally go in this section if they satisfy one of 388538fd1498Szrj the following: 388638fd1498Szrj 388738fd1498Szrj - the rule describes an identity 388838fd1498Szrj 388938fd1498Szrj - the rule replaces calls with something as simple as addition or 389038fd1498Szrj multiplication 389138fd1498Szrj 389238fd1498Szrj - the rule contains unary calls only and simplifies the surrounding 389338fd1498Szrj arithmetic. (The idea here is to exclude non-unary calls in which 389438fd1498Szrj one operand is constant and in which the call is known to be cheap 389538fd1498Szrj when the operand has that value.) */ 389638fd1498Szrj 389738fd1498Szrj(if (flag_unsafe_math_optimizations) 389838fd1498Szrj /* Simplify sqrt(x) * sqrt(x) -> x. */ 389938fd1498Szrj (simplify 390038fd1498Szrj (mult (SQRT_ALL@1 @0) @1) 390138fd1498Szrj (if (!HONOR_SNANS (type)) 390238fd1498Szrj @0)) 390338fd1498Szrj 390438fd1498Szrj (for op (plus minus) 390538fd1498Szrj /* Simplify (A / C) +- (B / C) -> (A +- B) / C. */ 390638fd1498Szrj (simplify 390738fd1498Szrj (op (rdiv @0 @1) 390838fd1498Szrj (rdiv @2 @1)) 390938fd1498Szrj (rdiv (op @0 @2) @1))) 391038fd1498Szrj 391138fd1498Szrj /* Simplify sqrt(x) * sqrt(y) -> sqrt(x*y). */ 391238fd1498Szrj (for root (SQRT CBRT) 391338fd1498Szrj (simplify 391438fd1498Szrj (mult (root:s @0) (root:s @1)) 391538fd1498Szrj (root (mult @0 @1)))) 391638fd1498Szrj 391738fd1498Szrj /* Simplify expN(x) * expN(y) -> expN(x+y). */ 391838fd1498Szrj (for exps (EXP EXP2 EXP10 POW10) 391938fd1498Szrj (simplify 392038fd1498Szrj (mult (exps:s @0) (exps:s @1)) 392138fd1498Szrj (exps (plus @0 @1)))) 392238fd1498Szrj 392338fd1498Szrj /* Simplify a/root(b/c) into a*root(c/b). */ 392438fd1498Szrj (for root (SQRT CBRT) 392538fd1498Szrj (simplify 392638fd1498Szrj (rdiv @0 (root:s (rdiv:s @1 @2))) 392738fd1498Szrj (mult @0 (root (rdiv @2 @1))))) 392838fd1498Szrj 392938fd1498Szrj /* Simplify x/expN(y) into x*expN(-y). */ 393038fd1498Szrj (for exps (EXP EXP2 EXP10 POW10) 393138fd1498Szrj (simplify 393238fd1498Szrj (rdiv @0 (exps:s @1)) 393338fd1498Szrj (mult @0 (exps (negate @1))))) 393438fd1498Szrj 393538fd1498Szrj (for logs (LOG LOG2 LOG10 LOG10) 393638fd1498Szrj exps (EXP EXP2 EXP10 POW10) 393738fd1498Szrj /* logN(expN(x)) -> x. */ 393838fd1498Szrj (simplify 393938fd1498Szrj (logs (exps @0)) 394038fd1498Szrj @0) 394138fd1498Szrj /* expN(logN(x)) -> x. */ 394238fd1498Szrj (simplify 394338fd1498Szrj (exps (logs @0)) 394438fd1498Szrj @0)) 394538fd1498Szrj 394638fd1498Szrj /* Optimize logN(func()) for various exponential functions. We 394738fd1498Szrj want to determine the value "x" and the power "exponent" in 394838fd1498Szrj order to transform logN(x**exponent) into exponent*logN(x). */ 394938fd1498Szrj (for logs (LOG LOG LOG LOG2 LOG2 LOG2 LOG10 LOG10) 395038fd1498Szrj exps (EXP2 EXP10 POW10 EXP EXP10 POW10 EXP EXP2) 395138fd1498Szrj (simplify 395238fd1498Szrj (logs (exps @0)) 395338fd1498Szrj (if (SCALAR_FLOAT_TYPE_P (type)) 395438fd1498Szrj (with { 395538fd1498Szrj tree x; 395638fd1498Szrj switch (exps) 395738fd1498Szrj { 395838fd1498Szrj CASE_CFN_EXP: 395938fd1498Szrj /* Prepare to do logN(exp(exponent)) -> exponent*logN(e). */ 396038fd1498Szrj x = build_real_truncate (type, dconst_e ()); 396138fd1498Szrj break; 396238fd1498Szrj CASE_CFN_EXP2: 396338fd1498Szrj /* Prepare to do logN(exp2(exponent)) -> exponent*logN(2). */ 396438fd1498Szrj x = build_real (type, dconst2); 396538fd1498Szrj break; 396638fd1498Szrj CASE_CFN_EXP10: 396738fd1498Szrj CASE_CFN_POW10: 396838fd1498Szrj /* Prepare to do logN(exp10(exponent)) -> exponent*logN(10). */ 396938fd1498Szrj { 397038fd1498Szrj REAL_VALUE_TYPE dconst10; 397138fd1498Szrj real_from_integer (&dconst10, VOIDmode, 10, SIGNED); 397238fd1498Szrj x = build_real (type, dconst10); 397338fd1498Szrj } 397438fd1498Szrj break; 397538fd1498Szrj default: 397638fd1498Szrj gcc_unreachable (); 397738fd1498Szrj } 397838fd1498Szrj } 397938fd1498Szrj (mult (logs { x; }) @0))))) 398038fd1498Szrj 398138fd1498Szrj (for logs (LOG LOG 398238fd1498Szrj LOG2 LOG2 398338fd1498Szrj LOG10 LOG10) 398438fd1498Szrj exps (SQRT CBRT) 398538fd1498Szrj (simplify 398638fd1498Szrj (logs (exps @0)) 398738fd1498Szrj (if (SCALAR_FLOAT_TYPE_P (type)) 398838fd1498Szrj (with { 398938fd1498Szrj tree x; 399038fd1498Szrj switch (exps) 399138fd1498Szrj { 399238fd1498Szrj CASE_CFN_SQRT: 399338fd1498Szrj /* Prepare to do logN(sqrt(x)) -> 0.5*logN(x). */ 399438fd1498Szrj x = build_real (type, dconsthalf); 399538fd1498Szrj break; 399638fd1498Szrj CASE_CFN_CBRT: 399738fd1498Szrj /* Prepare to do logN(cbrt(x)) -> (1/3)*logN(x). */ 399838fd1498Szrj x = build_real_truncate (type, dconst_third ()); 399938fd1498Szrj break; 400038fd1498Szrj default: 400138fd1498Szrj gcc_unreachable (); 400238fd1498Szrj } 400338fd1498Szrj } 400438fd1498Szrj (mult { x; } (logs @0)))))) 400538fd1498Szrj 400638fd1498Szrj /* logN(pow(x,exponent)) -> exponent*logN(x). */ 400738fd1498Szrj (for logs (LOG LOG2 LOG10) 400838fd1498Szrj pows (POW) 400938fd1498Szrj (simplify 401038fd1498Szrj (logs (pows @0 @1)) 401138fd1498Szrj (mult @1 (logs @0)))) 401238fd1498Szrj 401338fd1498Szrj /* pow(C,x) -> exp(log(C)*x) if C > 0, 401438fd1498Szrj or if C is a positive power of 2, 401538fd1498Szrj pow(C,x) -> exp2(log2(C)*x). */ 401638fd1498Szrj#if GIMPLE 401738fd1498Szrj (for pows (POW) 401838fd1498Szrj exps (EXP) 401938fd1498Szrj logs (LOG) 402038fd1498Szrj exp2s (EXP2) 402138fd1498Szrj log2s (LOG2) 402238fd1498Szrj (simplify 402338fd1498Szrj (pows REAL_CST@0 @1) 402438fd1498Szrj (if (real_compare (GT_EXPR, TREE_REAL_CST_PTR (@0), &dconst0) 402538fd1498Szrj && real_isfinite (TREE_REAL_CST_PTR (@0)) 402638fd1498Szrj /* As libmvec doesn't have a vectorized exp2, defer optimizing 402738fd1498Szrj the use_exp2 case until after vectorization. It seems actually 402838fd1498Szrj beneficial for all constants to postpone this until later, 402938fd1498Szrj because exp(log(C)*x), while faster, will have worse precision 403038fd1498Szrj and if x folds into a constant too, that is unnecessary 403138fd1498Szrj pessimization. */ 403238fd1498Szrj && canonicalize_math_after_vectorization_p ()) 403338fd1498Szrj (with { 403438fd1498Szrj const REAL_VALUE_TYPE *const value = TREE_REAL_CST_PTR (@0); 403538fd1498Szrj bool use_exp2 = false; 403638fd1498Szrj if (targetm.libc_has_function (function_c99_misc) 403738fd1498Szrj && value->cl == rvc_normal) 403838fd1498Szrj { 403938fd1498Szrj REAL_VALUE_TYPE frac_rvt = *value; 404038fd1498Szrj SET_REAL_EXP (&frac_rvt, 1); 404138fd1498Szrj if (real_equal (&frac_rvt, &dconst1)) 404238fd1498Szrj use_exp2 = true; 404338fd1498Szrj } 404438fd1498Szrj } 404538fd1498Szrj (if (!use_exp2) 404638fd1498Szrj (if (optimize_pow_to_exp (@0, @1)) 404738fd1498Szrj (exps (mult (logs @0) @1))) 404838fd1498Szrj (exp2s (mult (log2s @0) @1))))))) 404938fd1498Szrj#endif 405038fd1498Szrj 405138fd1498Szrj /* pow(C,x)*expN(y) -> expN(logN(C)*x+y) if C > 0. */ 405238fd1498Szrj (for pows (POW) 405338fd1498Szrj exps (EXP EXP2 EXP10 POW10) 405438fd1498Szrj logs (LOG LOG2 LOG10 LOG10) 405538fd1498Szrj (simplify 405638fd1498Szrj (mult:c (pows:s REAL_CST@0 @1) (exps:s @2)) 405738fd1498Szrj (if (real_compare (GT_EXPR, TREE_REAL_CST_PTR (@0), &dconst0) 405838fd1498Szrj && real_isfinite (TREE_REAL_CST_PTR (@0))) 405938fd1498Szrj (exps (plus (mult (logs @0) @1) @2))))) 406038fd1498Szrj 406138fd1498Szrj (for sqrts (SQRT) 406238fd1498Szrj cbrts (CBRT) 406338fd1498Szrj pows (POW) 406438fd1498Szrj exps (EXP EXP2 EXP10 POW10) 406538fd1498Szrj /* sqrt(expN(x)) -> expN(x*0.5). */ 406638fd1498Szrj (simplify 406738fd1498Szrj (sqrts (exps @0)) 406838fd1498Szrj (exps (mult @0 { build_real (type, dconsthalf); }))) 406938fd1498Szrj /* cbrt(expN(x)) -> expN(x/3). */ 407038fd1498Szrj (simplify 407138fd1498Szrj (cbrts (exps @0)) 407238fd1498Szrj (exps (mult @0 { build_real_truncate (type, dconst_third ()); }))) 407338fd1498Szrj /* pow(expN(x), y) -> expN(x*y). */ 407438fd1498Szrj (simplify 407538fd1498Szrj (pows (exps @0) @1) 407638fd1498Szrj (exps (mult @0 @1)))) 407738fd1498Szrj 407838fd1498Szrj /* tan(atan(x)) -> x. */ 407938fd1498Szrj (for tans (TAN) 408038fd1498Szrj atans (ATAN) 408138fd1498Szrj (simplify 408238fd1498Szrj (tans (atans @0)) 408338fd1498Szrj @0))) 408438fd1498Szrj 408538fd1498Szrj/* cabs(x+0i) or cabs(0+xi) -> abs(x). */ 408638fd1498Szrj(simplify 408738fd1498Szrj (CABS (complex:C @0 real_zerop@1)) 408838fd1498Szrj (abs @0)) 408938fd1498Szrj 409038fd1498Szrj/* trunc(trunc(x)) -> trunc(x), etc. */ 409138fd1498Szrj(for fns (TRUNC_ALL FLOOR_ALL CEIL_ALL ROUND_ALL NEARBYINT_ALL RINT_ALL) 409238fd1498Szrj (simplify 409338fd1498Szrj (fns (fns @0)) 409438fd1498Szrj (fns @0))) 409538fd1498Szrj/* f(x) -> x if x is integer valued and f does nothing for such values. */ 409638fd1498Szrj(for fns (TRUNC_ALL FLOOR_ALL CEIL_ALL ROUND_ALL NEARBYINT_ALL RINT_ALL) 409738fd1498Szrj (simplify 409838fd1498Szrj (fns integer_valued_real_p@0) 409938fd1498Szrj @0)) 410038fd1498Szrj 410138fd1498Szrj/* hypot(x,0) and hypot(0,x) -> abs(x). */ 410238fd1498Szrj(simplify 410338fd1498Szrj (HYPOT:c @0 real_zerop@1) 410438fd1498Szrj (abs @0)) 410538fd1498Szrj 410638fd1498Szrj/* pow(1,x) -> 1. */ 410738fd1498Szrj(simplify 410838fd1498Szrj (POW real_onep@0 @1) 410938fd1498Szrj @0) 411038fd1498Szrj 411138fd1498Szrj(simplify 411238fd1498Szrj /* copysign(x,x) -> x. */ 411338fd1498Szrj (COPYSIGN_ALL @0 @0) 411438fd1498Szrj @0) 411538fd1498Szrj 411638fd1498Szrj(simplify 411738fd1498Szrj /* copysign(x,y) -> fabs(x) if y is nonnegative. */ 411838fd1498Szrj (COPYSIGN_ALL @0 tree_expr_nonnegative_p@1) 411938fd1498Szrj (abs @0)) 412038fd1498Szrj 412138fd1498Szrj(for scale (LDEXP SCALBN SCALBLN) 412238fd1498Szrj /* ldexp(0, x) -> 0. */ 412338fd1498Szrj (simplify 412438fd1498Szrj (scale real_zerop@0 @1) 412538fd1498Szrj @0) 412638fd1498Szrj /* ldexp(x, 0) -> x. */ 412738fd1498Szrj (simplify 412838fd1498Szrj (scale @0 integer_zerop@1) 412938fd1498Szrj @0) 413038fd1498Szrj /* ldexp(x, y) -> x if x is +-Inf or NaN. */ 413138fd1498Szrj (simplify 413238fd1498Szrj (scale REAL_CST@0 @1) 413338fd1498Szrj (if (!real_isfinite (TREE_REAL_CST_PTR (@0))) 413438fd1498Szrj @0))) 413538fd1498Szrj 413638fd1498Szrj/* Canonicalization of sequences of math builtins. These rules represent 413738fd1498Szrj IL simplifications but are not necessarily optimizations. 413838fd1498Szrj 413938fd1498Szrj The sincos pass is responsible for picking "optimal" implementations 414038fd1498Szrj of math builtins, which may be more complicated and can sometimes go 414138fd1498Szrj the other way, e.g. converting pow into a sequence of sqrts. 414238fd1498Szrj We only want to do these canonicalizations before the pass has run. */ 414338fd1498Szrj 414438fd1498Szrj(if (flag_unsafe_math_optimizations && canonicalize_math_p ()) 414538fd1498Szrj /* Simplify tan(x) * cos(x) -> sin(x). */ 414638fd1498Szrj (simplify 414738fd1498Szrj (mult:c (TAN:s @0) (COS:s @0)) 414838fd1498Szrj (SIN @0)) 414938fd1498Szrj 415038fd1498Szrj /* Simplify x * pow(x,c) -> pow(x,c+1). */ 415138fd1498Szrj (simplify 415238fd1498Szrj (mult:c @0 (POW:s @0 REAL_CST@1)) 415338fd1498Szrj (if (!TREE_OVERFLOW (@1)) 415438fd1498Szrj (POW @0 (plus @1 { build_one_cst (type); })))) 415538fd1498Szrj 415638fd1498Szrj /* Simplify sin(x) / cos(x) -> tan(x). */ 415738fd1498Szrj (simplify 415838fd1498Szrj (rdiv (SIN:s @0) (COS:s @0)) 415938fd1498Szrj (TAN @0)) 416038fd1498Szrj 416138fd1498Szrj /* Simplify cos(x) / sin(x) -> 1 / tan(x). */ 416238fd1498Szrj (simplify 416338fd1498Szrj (rdiv (COS:s @0) (SIN:s @0)) 416438fd1498Szrj (rdiv { build_one_cst (type); } (TAN @0))) 416538fd1498Szrj 416638fd1498Szrj /* Simplify sin(x) / tan(x) -> cos(x). */ 416738fd1498Szrj (simplify 416838fd1498Szrj (rdiv (SIN:s @0) (TAN:s @0)) 416938fd1498Szrj (if (! HONOR_NANS (@0) 417038fd1498Szrj && ! HONOR_INFINITIES (@0)) 417138fd1498Szrj (COS @0))) 417238fd1498Szrj 417338fd1498Szrj /* Simplify tan(x) / sin(x) -> 1.0 / cos(x). */ 417438fd1498Szrj (simplify 417538fd1498Szrj (rdiv (TAN:s @0) (SIN:s @0)) 417638fd1498Szrj (if (! HONOR_NANS (@0) 417738fd1498Szrj && ! HONOR_INFINITIES (@0)) 417838fd1498Szrj (rdiv { build_one_cst (type); } (COS @0)))) 417938fd1498Szrj 418038fd1498Szrj /* Simplify pow(x,y) * pow(x,z) -> pow(x,y+z). */ 418138fd1498Szrj (simplify 418238fd1498Szrj (mult (POW:s @0 @1) (POW:s @0 @2)) 418338fd1498Szrj (POW @0 (plus @1 @2))) 418438fd1498Szrj 418538fd1498Szrj /* Simplify pow(x,y) * pow(z,y) -> pow(x*z,y). */ 418638fd1498Szrj (simplify 418738fd1498Szrj (mult (POW:s @0 @1) (POW:s @2 @1)) 418838fd1498Szrj (POW (mult @0 @2) @1)) 418938fd1498Szrj 419038fd1498Szrj /* Simplify powi(x,y) * powi(z,y) -> powi(x*z,y). */ 419138fd1498Szrj (simplify 419238fd1498Szrj (mult (POWI:s @0 @1) (POWI:s @2 @1)) 419338fd1498Szrj (POWI (mult @0 @2) @1)) 419438fd1498Szrj 419538fd1498Szrj /* Simplify pow(x,c) / x -> pow(x,c-1). */ 419638fd1498Szrj (simplify 419738fd1498Szrj (rdiv (POW:s @0 REAL_CST@1) @0) 419838fd1498Szrj (if (!TREE_OVERFLOW (@1)) 419938fd1498Szrj (POW @0 (minus @1 { build_one_cst (type); })))) 420038fd1498Szrj 420138fd1498Szrj /* Simplify x / pow (y,z) -> x * pow(y,-z). */ 420238fd1498Szrj (simplify 420338fd1498Szrj (rdiv @0 (POW:s @1 @2)) 420438fd1498Szrj (mult @0 (POW @1 (negate @2)))) 420538fd1498Szrj 420638fd1498Szrj (for sqrts (SQRT) 420738fd1498Szrj cbrts (CBRT) 420838fd1498Szrj pows (POW) 420938fd1498Szrj /* sqrt(sqrt(x)) -> pow(x,1/4). */ 421038fd1498Szrj (simplify 421138fd1498Szrj (sqrts (sqrts @0)) 421238fd1498Szrj (pows @0 { build_real (type, dconst_quarter ()); })) 421338fd1498Szrj /* sqrt(cbrt(x)) -> pow(x,1/6). */ 421438fd1498Szrj (simplify 421538fd1498Szrj (sqrts (cbrts @0)) 421638fd1498Szrj (pows @0 { build_real_truncate (type, dconst_sixth ()); })) 421738fd1498Szrj /* cbrt(sqrt(x)) -> pow(x,1/6). */ 421838fd1498Szrj (simplify 421938fd1498Szrj (cbrts (sqrts @0)) 422038fd1498Szrj (pows @0 { build_real_truncate (type, dconst_sixth ()); })) 422138fd1498Szrj /* cbrt(cbrt(x)) -> pow(x,1/9), iff x is nonnegative. */ 422238fd1498Szrj (simplify 422338fd1498Szrj (cbrts (cbrts tree_expr_nonnegative_p@0)) 422438fd1498Szrj (pows @0 { build_real_truncate (type, dconst_ninth ()); })) 422538fd1498Szrj /* sqrt(pow(x,y)) -> pow(|x|,y*0.5). */ 422638fd1498Szrj (simplify 422738fd1498Szrj (sqrts (pows @0 @1)) 422838fd1498Szrj (pows (abs @0) (mult @1 { build_real (type, dconsthalf); }))) 422938fd1498Szrj /* cbrt(pow(x,y)) -> pow(x,y/3), iff x is nonnegative. */ 423038fd1498Szrj (simplify 423138fd1498Szrj (cbrts (pows tree_expr_nonnegative_p@0 @1)) 423238fd1498Szrj (pows @0 (mult @1 { build_real_truncate (type, dconst_third ()); }))) 423338fd1498Szrj /* pow(sqrt(x),y) -> pow(x,y*0.5). */ 423438fd1498Szrj (simplify 423538fd1498Szrj (pows (sqrts @0) @1) 423638fd1498Szrj (pows @0 (mult @1 { build_real (type, dconsthalf); }))) 423738fd1498Szrj /* pow(cbrt(x),y) -> pow(x,y/3) iff x is nonnegative. */ 423838fd1498Szrj (simplify 423938fd1498Szrj (pows (cbrts tree_expr_nonnegative_p@0) @1) 424038fd1498Szrj (pows @0 (mult @1 { build_real_truncate (type, dconst_third ()); }))) 424138fd1498Szrj /* pow(pow(x,y),z) -> pow(x,y*z) iff x is nonnegative. */ 424238fd1498Szrj (simplify 424338fd1498Szrj (pows (pows tree_expr_nonnegative_p@0 @1) @2) 424438fd1498Szrj (pows @0 (mult @1 @2)))) 424538fd1498Szrj 424638fd1498Szrj /* cabs(x+xi) -> fabs(x)*sqrt(2). */ 424738fd1498Szrj (simplify 424838fd1498Szrj (CABS (complex @0 @0)) 424938fd1498Szrj (mult (abs @0) { build_real_truncate (type, dconst_sqrt2 ()); })) 425038fd1498Szrj 425138fd1498Szrj /* hypot(x,x) -> fabs(x)*sqrt(2). */ 425238fd1498Szrj (simplify 425338fd1498Szrj (HYPOT @0 @0) 425438fd1498Szrj (mult (abs @0) { build_real_truncate (type, dconst_sqrt2 ()); })) 425538fd1498Szrj 425638fd1498Szrj /* cexp(x+yi) -> exp(x)*cexpi(y). */ 425738fd1498Szrj (for cexps (CEXP) 425838fd1498Szrj exps (EXP) 425938fd1498Szrj cexpis (CEXPI) 426038fd1498Szrj (simplify 426138fd1498Szrj (cexps compositional_complex@0) 426238fd1498Szrj (if (targetm.libc_has_function (function_c99_math_complex)) 426338fd1498Szrj (complex 426438fd1498Szrj (mult (exps@1 (realpart @0)) (realpart (cexpis:type@2 (imagpart @0)))) 426538fd1498Szrj (mult @1 (imagpart @2))))))) 426638fd1498Szrj 426738fd1498Szrj(if (canonicalize_math_p ()) 426838fd1498Szrj /* floor(x) -> trunc(x) if x is nonnegative. */ 426938fd1498Szrj (for floors (FLOOR_ALL) 427038fd1498Szrj truncs (TRUNC_ALL) 427138fd1498Szrj (simplify 427238fd1498Szrj (floors tree_expr_nonnegative_p@0) 427338fd1498Szrj (truncs @0)))) 427438fd1498Szrj 427538fd1498Szrj(match double_value_p 427638fd1498Szrj @0 427738fd1498Szrj (if (TYPE_MAIN_VARIANT (TREE_TYPE (@0)) == double_type_node))) 427838fd1498Szrj(for froms (BUILT_IN_TRUNCL 427938fd1498Szrj BUILT_IN_FLOORL 428038fd1498Szrj BUILT_IN_CEILL 428138fd1498Szrj BUILT_IN_ROUNDL 428238fd1498Szrj BUILT_IN_NEARBYINTL 428338fd1498Szrj BUILT_IN_RINTL) 428438fd1498Szrj tos (BUILT_IN_TRUNC 428538fd1498Szrj BUILT_IN_FLOOR 428638fd1498Szrj BUILT_IN_CEIL 428738fd1498Szrj BUILT_IN_ROUND 428838fd1498Szrj BUILT_IN_NEARBYINT 428938fd1498Szrj BUILT_IN_RINT) 429038fd1498Szrj /* truncl(extend(x)) -> extend(trunc(x)), etc., if x is a double. */ 429138fd1498Szrj (if (optimize && canonicalize_math_p ()) 429238fd1498Szrj (simplify 429338fd1498Szrj (froms (convert double_value_p@0)) 429438fd1498Szrj (convert (tos @0))))) 429538fd1498Szrj 429638fd1498Szrj(match float_value_p 429738fd1498Szrj @0 429838fd1498Szrj (if (TYPE_MAIN_VARIANT (TREE_TYPE (@0)) == float_type_node))) 429938fd1498Szrj(for froms (BUILT_IN_TRUNCL BUILT_IN_TRUNC 430038fd1498Szrj BUILT_IN_FLOORL BUILT_IN_FLOOR 430138fd1498Szrj BUILT_IN_CEILL BUILT_IN_CEIL 430238fd1498Szrj BUILT_IN_ROUNDL BUILT_IN_ROUND 430338fd1498Szrj BUILT_IN_NEARBYINTL BUILT_IN_NEARBYINT 430438fd1498Szrj BUILT_IN_RINTL BUILT_IN_RINT) 430538fd1498Szrj tos (BUILT_IN_TRUNCF BUILT_IN_TRUNCF 430638fd1498Szrj BUILT_IN_FLOORF BUILT_IN_FLOORF 430738fd1498Szrj BUILT_IN_CEILF BUILT_IN_CEILF 430838fd1498Szrj BUILT_IN_ROUNDF BUILT_IN_ROUNDF 430938fd1498Szrj BUILT_IN_NEARBYINTF BUILT_IN_NEARBYINTF 431038fd1498Szrj BUILT_IN_RINTF BUILT_IN_RINTF) 431138fd1498Szrj /* truncl(extend(x)) and trunc(extend(x)) -> extend(truncf(x)), etc., 431238fd1498Szrj if x is a float. */ 431338fd1498Szrj (if (optimize && canonicalize_math_p () 431438fd1498Szrj && targetm.libc_has_function (function_c99_misc)) 431538fd1498Szrj (simplify 431638fd1498Szrj (froms (convert float_value_p@0)) 431738fd1498Szrj (convert (tos @0))))) 431838fd1498Szrj 431938fd1498Szrj(for froms (XFLOORL XCEILL XROUNDL XRINTL) 432038fd1498Szrj tos (XFLOOR XCEIL XROUND XRINT) 432138fd1498Szrj /* llfloorl(extend(x)) -> llfloor(x), etc., if x is a double. */ 432238fd1498Szrj (if (optimize && canonicalize_math_p ()) 432338fd1498Szrj (simplify 432438fd1498Szrj (froms (convert double_value_p@0)) 432538fd1498Szrj (tos @0)))) 432638fd1498Szrj 432738fd1498Szrj(for froms (XFLOORL XCEILL XROUNDL XRINTL 432838fd1498Szrj XFLOOR XCEIL XROUND XRINT) 432938fd1498Szrj tos (XFLOORF XCEILF XROUNDF XRINTF) 433038fd1498Szrj /* llfloorl(extend(x)) and llfloor(extend(x)) -> llfloorf(x), etc., 433138fd1498Szrj if x is a float. */ 433238fd1498Szrj (if (optimize && canonicalize_math_p ()) 433338fd1498Szrj (simplify 433438fd1498Szrj (froms (convert float_value_p@0)) 433538fd1498Szrj (tos @0)))) 433638fd1498Szrj 433738fd1498Szrj(if (canonicalize_math_p ()) 433838fd1498Szrj /* xfloor(x) -> fix_trunc(x) if x is nonnegative. */ 433938fd1498Szrj (for floors (IFLOOR LFLOOR LLFLOOR) 434038fd1498Szrj (simplify 434138fd1498Szrj (floors tree_expr_nonnegative_p@0) 434238fd1498Szrj (fix_trunc @0)))) 434338fd1498Szrj 434438fd1498Szrj(if (canonicalize_math_p ()) 434538fd1498Szrj /* xfloor(x) -> fix_trunc(x), etc., if x is integer valued. */ 434638fd1498Szrj (for fns (IFLOOR LFLOOR LLFLOOR 434738fd1498Szrj ICEIL LCEIL LLCEIL 434838fd1498Szrj IROUND LROUND LLROUND) 434938fd1498Szrj (simplify 435038fd1498Szrj (fns integer_valued_real_p@0) 435138fd1498Szrj (fix_trunc @0))) 435238fd1498Szrj (if (!flag_errno_math) 435338fd1498Szrj /* xrint(x) -> fix_trunc(x), etc., if x is integer valued. */ 435438fd1498Szrj (for rints (IRINT LRINT LLRINT) 435538fd1498Szrj (simplify 435638fd1498Szrj (rints integer_valued_real_p@0) 435738fd1498Szrj (fix_trunc @0))))) 435838fd1498Szrj 435938fd1498Szrj(if (canonicalize_math_p ()) 436038fd1498Szrj (for ifn (IFLOOR ICEIL IROUND IRINT) 436138fd1498Szrj lfn (LFLOOR LCEIL LROUND LRINT) 436238fd1498Szrj llfn (LLFLOOR LLCEIL LLROUND LLRINT) 436338fd1498Szrj /* Canonicalize iround (x) to lround (x) on ILP32 targets where 436438fd1498Szrj sizeof (int) == sizeof (long). */ 436538fd1498Szrj (if (TYPE_PRECISION (integer_type_node) 436638fd1498Szrj == TYPE_PRECISION (long_integer_type_node)) 436738fd1498Szrj (simplify 436838fd1498Szrj (ifn @0) 436938fd1498Szrj (lfn:long_integer_type_node @0))) 437038fd1498Szrj /* Canonicalize llround (x) to lround (x) on LP64 targets where 437138fd1498Szrj sizeof (long long) == sizeof (long). */ 437238fd1498Szrj (if (TYPE_PRECISION (long_long_integer_type_node) 437338fd1498Szrj == TYPE_PRECISION (long_integer_type_node)) 437438fd1498Szrj (simplify 437538fd1498Szrj (llfn @0) 437638fd1498Szrj (lfn:long_integer_type_node @0))))) 437738fd1498Szrj 437838fd1498Szrj/* cproj(x) -> x if we're ignoring infinities. */ 437938fd1498Szrj(simplify 438038fd1498Szrj (CPROJ @0) 438138fd1498Szrj (if (!HONOR_INFINITIES (type)) 438238fd1498Szrj @0)) 438338fd1498Szrj 438438fd1498Szrj/* If the real part is inf and the imag part is known to be 438538fd1498Szrj nonnegative, return (inf + 0i). */ 438638fd1498Szrj(simplify 438738fd1498Szrj (CPROJ (complex REAL_CST@0 tree_expr_nonnegative_p@1)) 438838fd1498Szrj (if (real_isinf (TREE_REAL_CST_PTR (@0))) 438938fd1498Szrj { build_complex_inf (type, false); })) 439038fd1498Szrj 439138fd1498Szrj/* If the imag part is inf, return (inf+I*copysign(0,imag)). */ 439238fd1498Szrj(simplify 439338fd1498Szrj (CPROJ (complex @0 REAL_CST@1)) 439438fd1498Szrj (if (real_isinf (TREE_REAL_CST_PTR (@1))) 439538fd1498Szrj { build_complex_inf (type, TREE_REAL_CST_PTR (@1)->sign); })) 439638fd1498Szrj 439738fd1498Szrj(for pows (POW) 439838fd1498Szrj sqrts (SQRT) 439938fd1498Szrj cbrts (CBRT) 440038fd1498Szrj (simplify 440138fd1498Szrj (pows @0 REAL_CST@1) 440238fd1498Szrj (with { 440338fd1498Szrj const REAL_VALUE_TYPE *value = TREE_REAL_CST_PTR (@1); 440438fd1498Szrj REAL_VALUE_TYPE tmp; 440538fd1498Szrj } 440638fd1498Szrj (switch 440738fd1498Szrj /* pow(x,0) -> 1. */ 440838fd1498Szrj (if (real_equal (value, &dconst0)) 440938fd1498Szrj { build_real (type, dconst1); }) 441038fd1498Szrj /* pow(x,1) -> x. */ 441138fd1498Szrj (if (real_equal (value, &dconst1)) 441238fd1498Szrj @0) 441338fd1498Szrj /* pow(x,-1) -> 1/x. */ 441438fd1498Szrj (if (real_equal (value, &dconstm1)) 441538fd1498Szrj (rdiv { build_real (type, dconst1); } @0)) 441638fd1498Szrj /* pow(x,0.5) -> sqrt(x). */ 441738fd1498Szrj (if (flag_unsafe_math_optimizations 441838fd1498Szrj && canonicalize_math_p () 441938fd1498Szrj && real_equal (value, &dconsthalf)) 442038fd1498Szrj (sqrts @0)) 442138fd1498Szrj /* pow(x,1/3) -> cbrt(x). */ 442238fd1498Szrj (if (flag_unsafe_math_optimizations 442338fd1498Szrj && canonicalize_math_p () 442438fd1498Szrj && (tmp = real_value_truncate (TYPE_MODE (type), dconst_third ()), 442538fd1498Szrj real_equal (value, &tmp))) 442638fd1498Szrj (cbrts @0)))))) 442738fd1498Szrj 442838fd1498Szrj/* powi(1,x) -> 1. */ 442938fd1498Szrj(simplify 443038fd1498Szrj (POWI real_onep@0 @1) 443138fd1498Szrj @0) 443238fd1498Szrj 443338fd1498Szrj(simplify 443438fd1498Szrj (POWI @0 INTEGER_CST@1) 443538fd1498Szrj (switch 443638fd1498Szrj /* powi(x,0) -> 1. */ 443738fd1498Szrj (if (wi::to_wide (@1) == 0) 443838fd1498Szrj { build_real (type, dconst1); }) 443938fd1498Szrj /* powi(x,1) -> x. */ 444038fd1498Szrj (if (wi::to_wide (@1) == 1) 444138fd1498Szrj @0) 444238fd1498Szrj /* powi(x,-1) -> 1/x. */ 444338fd1498Szrj (if (wi::to_wide (@1) == -1) 444438fd1498Szrj (rdiv { build_real (type, dconst1); } @0)))) 444538fd1498Szrj 444638fd1498Szrj/* Narrowing of arithmetic and logical operations. 444738fd1498Szrj 444838fd1498Szrj These are conceptually similar to the transformations performed for 444938fd1498Szrj the C/C++ front-ends by shorten_binary_op and shorten_compare. Long 445038fd1498Szrj term we want to move all that code out of the front-ends into here. */ 445138fd1498Szrj 445238fd1498Szrj/* If we have a narrowing conversion of an arithmetic operation where 445338fd1498Szrj both operands are widening conversions from the same type as the outer 445438fd1498Szrj narrowing conversion. Then convert the innermost operands to a suitable 445538fd1498Szrj unsigned type (to avoid introducing undefined behavior), perform the 445638fd1498Szrj operation and convert the result to the desired type. */ 445738fd1498Szrj(for op (plus minus) 445838fd1498Szrj (simplify 445938fd1498Szrj (convert (op:s (convert@2 @0) (convert?@3 @1))) 446038fd1498Szrj (if (INTEGRAL_TYPE_P (type) 446138fd1498Szrj /* We check for type compatibility between @0 and @1 below, 446238fd1498Szrj so there's no need to check that @1/@3 are integral types. */ 446338fd1498Szrj && INTEGRAL_TYPE_P (TREE_TYPE (@0)) 446438fd1498Szrj && INTEGRAL_TYPE_P (TREE_TYPE (@2)) 446538fd1498Szrj /* The precision of the type of each operand must match the 446638fd1498Szrj precision of the mode of each operand, similarly for the 446738fd1498Szrj result. */ 446838fd1498Szrj && type_has_mode_precision_p (TREE_TYPE (@0)) 446938fd1498Szrj && type_has_mode_precision_p (TREE_TYPE (@1)) 447038fd1498Szrj && type_has_mode_precision_p (type) 447138fd1498Szrj /* The inner conversion must be a widening conversion. */ 447238fd1498Szrj && TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (TREE_TYPE (@0)) 447338fd1498Szrj && types_match (@0, type) 447438fd1498Szrj && (types_match (@0, @1) 447538fd1498Szrj /* Or the second operand is const integer or converted const 447638fd1498Szrj integer from valueize. */ 447738fd1498Szrj || TREE_CODE (@1) == INTEGER_CST)) 447838fd1498Szrj (if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))) 447938fd1498Szrj (op @0 (convert @1)) 448038fd1498Szrj (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); } 448138fd1498Szrj (convert (op (convert:utype @0) 448238fd1498Szrj (convert:utype @1)))))))) 448338fd1498Szrj 448438fd1498Szrj/* This is another case of narrowing, specifically when there's an outer 448538fd1498Szrj BIT_AND_EXPR which masks off bits outside the type of the innermost 448638fd1498Szrj operands. Like the previous case we have to convert the operands 448738fd1498Szrj to unsigned types to avoid introducing undefined behavior for the 448838fd1498Szrj arithmetic operation. */ 448938fd1498Szrj(for op (minus plus) 449038fd1498Szrj (simplify 449138fd1498Szrj (bit_and (op:s (convert@2 @0) (convert@3 @1)) INTEGER_CST@4) 449238fd1498Szrj (if (INTEGRAL_TYPE_P (type) 449338fd1498Szrj /* We check for type compatibility between @0 and @1 below, 449438fd1498Szrj so there's no need to check that @1/@3 are integral types. */ 449538fd1498Szrj && INTEGRAL_TYPE_P (TREE_TYPE (@0)) 449638fd1498Szrj && INTEGRAL_TYPE_P (TREE_TYPE (@2)) 449738fd1498Szrj /* The precision of the type of each operand must match the 449838fd1498Szrj precision of the mode of each operand, similarly for the 449938fd1498Szrj result. */ 450038fd1498Szrj && type_has_mode_precision_p (TREE_TYPE (@0)) 450138fd1498Szrj && type_has_mode_precision_p (TREE_TYPE (@1)) 450238fd1498Szrj && type_has_mode_precision_p (type) 450338fd1498Szrj /* The inner conversion must be a widening conversion. */ 450438fd1498Szrj && TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (TREE_TYPE (@0)) 450538fd1498Szrj && types_match (@0, @1) 450638fd1498Szrj && (tree_int_cst_min_precision (@4, TYPE_SIGN (TREE_TYPE (@0))) 450738fd1498Szrj <= TYPE_PRECISION (TREE_TYPE (@0))) 450838fd1498Szrj && (wi::to_wide (@4) 450938fd1498Szrj & wi::mask (TYPE_PRECISION (TREE_TYPE (@0)), 451038fd1498Szrj true, TYPE_PRECISION (type))) == 0) 451138fd1498Szrj (if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))) 451238fd1498Szrj (with { tree ntype = TREE_TYPE (@0); } 451338fd1498Szrj (convert (bit_and (op @0 @1) (convert:ntype @4)))) 451438fd1498Szrj (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); } 451538fd1498Szrj (convert (bit_and (op (convert:utype @0) (convert:utype @1)) 451638fd1498Szrj (convert:utype @4)))))))) 451738fd1498Szrj 451838fd1498Szrj/* Transform (@0 < @1 and @0 < @2) to use min, 451938fd1498Szrj (@0 > @1 and @0 > @2) to use max */ 452038fd1498Szrj(for op (lt le gt ge) 452138fd1498Szrj ext (min min max max) 452238fd1498Szrj (simplify 452338fd1498Szrj (bit_and (op:cs @0 @1) (op:cs @0 @2)) 452438fd1498Szrj (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) 452538fd1498Szrj && TREE_CODE (@0) != INTEGER_CST) 452638fd1498Szrj (op @0 (ext @1 @2))))) 452738fd1498Szrj 452838fd1498Szrj(simplify 452938fd1498Szrj /* signbit(x) -> 0 if x is nonnegative. */ 453038fd1498Szrj (SIGNBIT tree_expr_nonnegative_p@0) 453138fd1498Szrj { integer_zero_node; }) 453238fd1498Szrj 453338fd1498Szrj(simplify 453438fd1498Szrj /* signbit(x) -> x<0 if x doesn't have signed zeros. */ 453538fd1498Szrj (SIGNBIT @0) 453638fd1498Szrj (if (!HONOR_SIGNED_ZEROS (@0)) 453738fd1498Szrj (convert (lt @0 { build_real (TREE_TYPE (@0), dconst0); })))) 453838fd1498Szrj 453938fd1498Szrj/* Transform comparisons of the form X +- C1 CMP C2 to X CMP C2 -+ C1. */ 454038fd1498Szrj(for cmp (eq ne) 454138fd1498Szrj (for op (plus minus) 454238fd1498Szrj rop (minus plus) 454338fd1498Szrj (simplify 454438fd1498Szrj (cmp (op@3 @0 INTEGER_CST@1) INTEGER_CST@2) 454538fd1498Szrj (if (!TREE_OVERFLOW (@1) && !TREE_OVERFLOW (@2) 454638fd1498Szrj && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@0)) 454738fd1498Szrj && !TYPE_OVERFLOW_TRAPS (TREE_TYPE (@0)) 454838fd1498Szrj && !TYPE_SATURATING (TREE_TYPE (@0))) 454938fd1498Szrj (with { tree res = int_const_binop (rop, @2, @1); } 455038fd1498Szrj (if (TREE_OVERFLOW (res) 455138fd1498Szrj && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))) 455238fd1498Szrj { constant_boolean_node (cmp == NE_EXPR, type); } 455338fd1498Szrj (if (single_use (@3)) 455438fd1498Szrj (cmp @0 { TREE_OVERFLOW (res) 455538fd1498Szrj ? drop_tree_overflow (res) : res; })))))))) 455638fd1498Szrj(for cmp (lt le gt ge) 455738fd1498Szrj (for op (plus minus) 455838fd1498Szrj rop (minus plus) 455938fd1498Szrj (simplify 456038fd1498Szrj (cmp (op@3 @0 INTEGER_CST@1) INTEGER_CST@2) 456138fd1498Szrj (if (!TREE_OVERFLOW (@1) && !TREE_OVERFLOW (@2) 456238fd1498Szrj && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))) 456338fd1498Szrj (with { tree res = int_const_binop (rop, @2, @1); } 456438fd1498Szrj (if (TREE_OVERFLOW (res)) 456538fd1498Szrj { 456638fd1498Szrj fold_overflow_warning (("assuming signed overflow does not occur " 456738fd1498Szrj "when simplifying conditional to constant"), 456838fd1498Szrj WARN_STRICT_OVERFLOW_CONDITIONAL); 456938fd1498Szrj bool less = cmp == LE_EXPR || cmp == LT_EXPR; 457038fd1498Szrj /* wi::ges_p (@2, 0) should be sufficient for a signed type. */ 457138fd1498Szrj bool ovf_high = wi::lt_p (wi::to_wide (@1), 0, 457238fd1498Szrj TYPE_SIGN (TREE_TYPE (@1))) 457338fd1498Szrj != (op == MINUS_EXPR); 457438fd1498Szrj constant_boolean_node (less == ovf_high, type); 457538fd1498Szrj } 457638fd1498Szrj (if (single_use (@3)) 457738fd1498Szrj (with 457838fd1498Szrj { 457938fd1498Szrj fold_overflow_warning (("assuming signed overflow does not occur " 458038fd1498Szrj "when changing X +- C1 cmp C2 to " 458138fd1498Szrj "X cmp C2 -+ C1"), 458238fd1498Szrj WARN_STRICT_OVERFLOW_COMPARISON); 458338fd1498Szrj } 458438fd1498Szrj (cmp @0 { res; }))))))))) 458538fd1498Szrj 458638fd1498Szrj/* Canonicalizations of BIT_FIELD_REFs. */ 458738fd1498Szrj 458838fd1498Szrj(simplify 458938fd1498Szrj (BIT_FIELD_REF @0 @1 @2) 459038fd1498Szrj (switch 459138fd1498Szrj (if (TREE_CODE (TREE_TYPE (@0)) == COMPLEX_TYPE 459238fd1498Szrj && tree_int_cst_equal (@1, TYPE_SIZE (TREE_TYPE (TREE_TYPE (@0))))) 459338fd1498Szrj (switch 459438fd1498Szrj (if (integer_zerop (@2)) 459538fd1498Szrj (view_convert (realpart @0))) 459638fd1498Szrj (if (tree_int_cst_equal (@2, TYPE_SIZE (TREE_TYPE (TREE_TYPE (@0))))) 459738fd1498Szrj (view_convert (imagpart @0))))) 459838fd1498Szrj (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) 459938fd1498Szrj && INTEGRAL_TYPE_P (type) 460038fd1498Szrj /* On GIMPLE this should only apply to register arguments. */ 460138fd1498Szrj && (! GIMPLE || is_gimple_reg (@0)) 460238fd1498Szrj /* A bit-field-ref that referenced the full argument can be stripped. */ 460338fd1498Szrj && ((compare_tree_int (@1, TYPE_PRECISION (TREE_TYPE (@0))) == 0 460438fd1498Szrj && integer_zerop (@2)) 460538fd1498Szrj /* Low-parts can be reduced to integral conversions. 460638fd1498Szrj ??? The following doesn't work for PDP endian. */ 460738fd1498Szrj || (BYTES_BIG_ENDIAN == WORDS_BIG_ENDIAN 460838fd1498Szrj /* Don't even think about BITS_BIG_ENDIAN. */ 460938fd1498Szrj && TYPE_PRECISION (TREE_TYPE (@0)) % BITS_PER_UNIT == 0 461038fd1498Szrj && TYPE_PRECISION (type) % BITS_PER_UNIT == 0 461138fd1498Szrj && compare_tree_int (@2, (BYTES_BIG_ENDIAN 461238fd1498Szrj ? (TYPE_PRECISION (TREE_TYPE (@0)) 461338fd1498Szrj - TYPE_PRECISION (type)) 461438fd1498Szrj : 0)) == 0))) 461538fd1498Szrj (convert @0)))) 461638fd1498Szrj 461738fd1498Szrj/* Simplify vector extracts. */ 461838fd1498Szrj 461938fd1498Szrj(simplify 462038fd1498Szrj (BIT_FIELD_REF CONSTRUCTOR@0 @1 @2) 462138fd1498Szrj (if (VECTOR_TYPE_P (TREE_TYPE (@0)) 462238fd1498Szrj && (types_match (type, TREE_TYPE (TREE_TYPE (@0))) 462338fd1498Szrj || (VECTOR_TYPE_P (type) 462438fd1498Szrj && types_match (TREE_TYPE (type), TREE_TYPE (TREE_TYPE (@0)))))) 462538fd1498Szrj (with 462638fd1498Szrj { 462738fd1498Szrj tree ctor = (TREE_CODE (@0) == SSA_NAME 462838fd1498Szrj ? gimple_assign_rhs1 (SSA_NAME_DEF_STMT (@0)) : @0); 462938fd1498Szrj tree eltype = TREE_TYPE (TREE_TYPE (ctor)); 463038fd1498Szrj unsigned HOST_WIDE_INT width = tree_to_uhwi (TYPE_SIZE (eltype)); 463138fd1498Szrj unsigned HOST_WIDE_INT n = tree_to_uhwi (@1); 463238fd1498Szrj unsigned HOST_WIDE_INT idx = tree_to_uhwi (@2); 463338fd1498Szrj } 463438fd1498Szrj (if (n != 0 463538fd1498Szrj && (idx % width) == 0 463638fd1498Szrj && (n % width) == 0 463738fd1498Szrj && known_le ((idx + n) / width, 463838fd1498Szrj TYPE_VECTOR_SUBPARTS (TREE_TYPE (ctor)))) 463938fd1498Szrj (with 464038fd1498Szrj { 464138fd1498Szrj idx = idx / width; 464238fd1498Szrj n = n / width; 464338fd1498Szrj /* Constructor elements can be subvectors. */ 464438fd1498Szrj poly_uint64 k = 1; 464538fd1498Szrj if (CONSTRUCTOR_NELTS (ctor) != 0) 464638fd1498Szrj { 464738fd1498Szrj tree cons_elem = TREE_TYPE (CONSTRUCTOR_ELT (ctor, 0)->value); 464838fd1498Szrj if (TREE_CODE (cons_elem) == VECTOR_TYPE) 464938fd1498Szrj k = TYPE_VECTOR_SUBPARTS (cons_elem); 465038fd1498Szrj } 465138fd1498Szrj unsigned HOST_WIDE_INT elt, count, const_k; 465238fd1498Szrj } 465338fd1498Szrj (switch 465438fd1498Szrj /* We keep an exact subset of the constructor elements. */ 465538fd1498Szrj (if (multiple_p (idx, k, &elt) && multiple_p (n, k, &count)) 465638fd1498Szrj (if (CONSTRUCTOR_NELTS (ctor) == 0) 465738fd1498Szrj { build_constructor (type, NULL); } 465838fd1498Szrj (if (count == 1) 465938fd1498Szrj (if (elt < CONSTRUCTOR_NELTS (ctor)) 466038fd1498Szrj (view_convert { CONSTRUCTOR_ELT (ctor, elt)->value; }) 466138fd1498Szrj { build_zero_cst (type); }) 466238fd1498Szrj { 466338fd1498Szrj vec<constructor_elt, va_gc> *vals; 466438fd1498Szrj vec_alloc (vals, count); 466538fd1498Szrj for (unsigned i = 0; 466638fd1498Szrj i < count && elt + i < CONSTRUCTOR_NELTS (ctor); ++i) 466738fd1498Szrj CONSTRUCTOR_APPEND_ELT (vals, NULL_TREE, 466838fd1498Szrj CONSTRUCTOR_ELT (ctor, elt + i)->value); 466938fd1498Szrj build_constructor (type, vals); 467038fd1498Szrj }))) 467138fd1498Szrj /* The bitfield references a single constructor element. */ 467238fd1498Szrj (if (k.is_constant (&const_k) 467338fd1498Szrj && idx + n <= (idx / const_k + 1) * const_k) 467438fd1498Szrj (switch 467538fd1498Szrj (if (CONSTRUCTOR_NELTS (ctor) <= idx / const_k) 467638fd1498Szrj { build_zero_cst (type); }) 467738fd1498Szrj (if (n == const_k) 467838fd1498Szrj (view_convert { CONSTRUCTOR_ELT (ctor, idx / const_k)->value; })) 467938fd1498Szrj (BIT_FIELD_REF { CONSTRUCTOR_ELT (ctor, idx / const_k)->value; } 468038fd1498Szrj @1 { bitsize_int ((idx % const_k) * width); }))))))))) 468138fd1498Szrj 468238fd1498Szrj/* Simplify a bit extraction from a bit insertion for the cases with 468338fd1498Szrj the inserted element fully covering the extraction or the insertion 468438fd1498Szrj not touching the extraction. */ 468538fd1498Szrj(simplify 468638fd1498Szrj (BIT_FIELD_REF (bit_insert @0 @1 @ipos) @rsize @rpos) 468738fd1498Szrj (with 468838fd1498Szrj { 468938fd1498Szrj unsigned HOST_WIDE_INT isize; 469038fd1498Szrj if (INTEGRAL_TYPE_P (TREE_TYPE (@1))) 469138fd1498Szrj isize = TYPE_PRECISION (TREE_TYPE (@1)); 469238fd1498Szrj else 469338fd1498Szrj isize = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (@1))); 469438fd1498Szrj } 469538fd1498Szrj (switch 469638fd1498Szrj (if (wi::leu_p (wi::to_wide (@ipos), wi::to_wide (@rpos)) 469738fd1498Szrj && wi::leu_p (wi::to_wide (@rpos) + wi::to_wide (@rsize), 469838fd1498Szrj wi::to_wide (@ipos) + isize)) 469938fd1498Szrj (BIT_FIELD_REF @1 @rsize { wide_int_to_tree (bitsizetype, 470038fd1498Szrj wi::to_wide (@rpos) 470138fd1498Szrj - wi::to_wide (@ipos)); })) 470238fd1498Szrj (if (wi::geu_p (wi::to_wide (@ipos), 470338fd1498Szrj wi::to_wide (@rpos) + wi::to_wide (@rsize)) 470438fd1498Szrj || wi::geu_p (wi::to_wide (@rpos), 470538fd1498Szrj wi::to_wide (@ipos) + isize)) 470638fd1498Szrj (BIT_FIELD_REF @0 @rsize @rpos))))) 4707