xref: /netbsd-src/external/gpl3/gcc.old/dist/gcc/tree-ssa-math-opts.c (revision d90047b5d07facf36e6c01dcc0bded8997ce9cc2)
1 /* Global, SSA-based optimizations using mathematical identities.
2    Copyright (C) 2005-2017 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
10 
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14 for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 /* Currently, the only mini-pass in this file tries to CSE reciprocal
21    operations.  These are common in sequences such as this one:
22 
23 	modulus = sqrt(x*x + y*y + z*z);
24 	x = x / modulus;
25 	y = y / modulus;
26 	z = z / modulus;
27 
28    that can be optimized to
29 
30 	modulus = sqrt(x*x + y*y + z*z);
31         rmodulus = 1.0 / modulus;
32 	x = x * rmodulus;
33 	y = y * rmodulus;
34 	z = z * rmodulus;
35 
36    We do this for loop invariant divisors, and with this pass whenever
37    we notice that a division has the same divisor multiple times.
38 
39    Of course, like in PRE, we don't insert a division if a dominator
40    already has one.  However, this cannot be done as an extension of
41    PRE for several reasons.
42 
43    First of all, with some experiments it was found out that the
44    transformation is not always useful if there are only two divisions
45    by the same divisor.  This is probably because modern processors
46    can pipeline the divisions; on older, in-order processors it should
47    still be effective to optimize two divisions by the same number.
48    We make this a param, and it shall be called N in the remainder of
49    this comment.
50 
51    Second, if trapping math is active, we have less freedom on where
52    to insert divisions: we can only do so in basic blocks that already
53    contain one.  (If divisions don't trap, instead, we can insert
54    divisions elsewhere, which will be in blocks that are common dominators
55    of those that have the division).
56 
57    We really don't want to compute the reciprocal unless a division will
58    be found.  To do this, we won't insert the division in a basic block
59    that has less than N divisions *post-dominating* it.
60 
61    The algorithm constructs a subset of the dominator tree, holding the
62    blocks containing the divisions and the common dominators to them,
63    and walk it twice.  The first walk is in post-order, and it annotates
64    each block with the number of divisions that post-dominate it: this
65    gives information on where divisions can be inserted profitably.
66    The second walk is in pre-order, and it inserts divisions as explained
67    above, and replaces divisions by multiplications.
68 
69    In the best case, the cost of the pass is O(n_statements).  In the
70    worst-case, the cost is due to creating the dominator tree subset,
71    with a cost of O(n_basic_blocks ^ 2); however this can only happen
72    for n_statements / n_basic_blocks statements.  So, the amortized cost
73    of creating the dominator tree subset is O(n_basic_blocks) and the
74    worst-case cost of the pass is O(n_statements * n_basic_blocks).
75 
76    More practically, the cost will be small because there are few
77    divisions, and they tend to be in the same basic block, so insert_bb
78    is called very few times.
79 
80    If we did this using domwalk.c, an efficient implementation would have
81    to work on all the variables in a single pass, because we could not
82    work on just a subset of the dominator tree, as we do now, and the
83    cost would also be something like O(n_statements * n_basic_blocks).
84    The data structures would be more complex in order to work on all the
85    variables in a single pass.  */
86 
87 #include "config.h"
88 #include "system.h"
89 #include "coretypes.h"
90 #include "backend.h"
91 #include "target.h"
92 #include "rtl.h"
93 #include "tree.h"
94 #include "gimple.h"
95 #include "predict.h"
96 #include "alloc-pool.h"
97 #include "tree-pass.h"
98 #include "ssa.h"
99 #include "optabs-tree.h"
100 #include "gimple-pretty-print.h"
101 #include "alias.h"
102 #include "fold-const.h"
103 #include "gimple-fold.h"
104 #include "gimple-iterator.h"
105 #include "gimplify.h"
106 #include "gimplify-me.h"
107 #include "stor-layout.h"
108 #include "tree-cfg.h"
109 #include "tree-dfa.h"
110 #include "tree-ssa.h"
111 #include "builtins.h"
112 #include "params.h"
113 #include "internal-fn.h"
114 #include "case-cfn-macros.h"
115 #include "optabs-libfuncs.h"
116 #include "tree-eh.h"
117 #include "targhooks.h"
118 
119 /* This structure represents one basic block that either computes a
120    division, or is a common dominator for basic block that compute a
121    division.  */
122 struct occurrence {
123   /* The basic block represented by this structure.  */
124   basic_block bb;
125 
126   /* If non-NULL, the SSA_NAME holding the definition for a reciprocal
127      inserted in BB.  */
128   tree recip_def;
129 
130   /* If non-NULL, the GIMPLE_ASSIGN for a reciprocal computation that
131      was inserted in BB.  */
132   gimple *recip_def_stmt;
133 
134   /* Pointer to a list of "struct occurrence"s for blocks dominated
135      by BB.  */
136   struct occurrence *children;
137 
138   /* Pointer to the next "struct occurrence"s in the list of blocks
139      sharing a common dominator.  */
140   struct occurrence *next;
141 
142   /* The number of divisions that are in BB before compute_merit.  The
143      number of divisions that are in BB or post-dominate it after
144      compute_merit.  */
145   int num_divisions;
146 
147   /* True if the basic block has a division, false if it is a common
148      dominator for basic blocks that do.  If it is false and trapping
149      math is active, BB is not a candidate for inserting a reciprocal.  */
150   bool bb_has_division;
151 };
152 
153 static struct
154 {
155   /* Number of 1.0/X ops inserted.  */
156   int rdivs_inserted;
157 
158   /* Number of 1.0/FUNC ops inserted.  */
159   int rfuncs_inserted;
160 } reciprocal_stats;
161 
162 static struct
163 {
164   /* Number of cexpi calls inserted.  */
165   int inserted;
166 } sincos_stats;
167 
168 static struct
169 {
170   /* Number of hand-written 16-bit nop / bswaps found.  */
171   int found_16bit;
172 
173   /* Number of hand-written 32-bit nop / bswaps found.  */
174   int found_32bit;
175 
176   /* Number of hand-written 64-bit nop / bswaps found.  */
177   int found_64bit;
178 } nop_stats, bswap_stats;
179 
180 static struct
181 {
182   /* Number of widening multiplication ops inserted.  */
183   int widen_mults_inserted;
184 
185   /* Number of integer multiply-and-accumulate ops inserted.  */
186   int maccs_inserted;
187 
188   /* Number of fp fused multiply-add ops inserted.  */
189   int fmas_inserted;
190 
191   /* Number of divmod calls inserted.  */
192   int divmod_calls_inserted;
193 } widen_mul_stats;
194 
195 /* The instance of "struct occurrence" representing the highest
196    interesting block in the dominator tree.  */
197 static struct occurrence *occ_head;
198 
199 /* Allocation pool for getting instances of "struct occurrence".  */
200 static object_allocator<occurrence> *occ_pool;
201 
202 
203 
204 /* Allocate and return a new struct occurrence for basic block BB, and
205    whose children list is headed by CHILDREN.  */
206 static struct occurrence *
207 occ_new (basic_block bb, struct occurrence *children)
208 {
209   struct occurrence *occ;
210 
211   bb->aux = occ = occ_pool->allocate ();
212   memset (occ, 0, sizeof (struct occurrence));
213 
214   occ->bb = bb;
215   occ->children = children;
216   return occ;
217 }
218 
219 
220 /* Insert NEW_OCC into our subset of the dominator tree.  P_HEAD points to a
221    list of "struct occurrence"s, one per basic block, having IDOM as
222    their common dominator.
223 
224    We try to insert NEW_OCC as deep as possible in the tree, and we also
225    insert any other block that is a common dominator for BB and one
226    block already in the tree.  */
227 
228 static void
229 insert_bb (struct occurrence *new_occ, basic_block idom,
230 	   struct occurrence **p_head)
231 {
232   struct occurrence *occ, **p_occ;
233 
234   for (p_occ = p_head; (occ = *p_occ) != NULL; )
235     {
236       basic_block bb = new_occ->bb, occ_bb = occ->bb;
237       basic_block dom = nearest_common_dominator (CDI_DOMINATORS, occ_bb, bb);
238       if (dom == bb)
239 	{
240 	  /* BB dominates OCC_BB.  OCC becomes NEW_OCC's child: remove OCC
241 	     from its list.  */
242 	  *p_occ = occ->next;
243 	  occ->next = new_occ->children;
244 	  new_occ->children = occ;
245 
246 	  /* Try the next block (it may as well be dominated by BB).  */
247 	}
248 
249       else if (dom == occ_bb)
250 	{
251 	  /* OCC_BB dominates BB.  Tail recurse to look deeper.  */
252 	  insert_bb (new_occ, dom, &occ->children);
253 	  return;
254 	}
255 
256       else if (dom != idom)
257 	{
258 	  gcc_assert (!dom->aux);
259 
260 	  /* There is a dominator between IDOM and BB, add it and make
261 	     two children out of NEW_OCC and OCC.  First, remove OCC from
262 	     its list.	*/
263 	  *p_occ = occ->next;
264 	  new_occ->next = occ;
265 	  occ->next = NULL;
266 
267 	  /* None of the previous blocks has DOM as a dominator: if we tail
268 	     recursed, we would reexamine them uselessly. Just switch BB with
269 	     DOM, and go on looking for blocks dominated by DOM.  */
270           new_occ = occ_new (dom, new_occ);
271 	}
272 
273       else
274 	{
275 	  /* Nothing special, go on with the next element.  */
276 	  p_occ = &occ->next;
277 	}
278     }
279 
280   /* No place was found as a child of IDOM.  Make BB a sibling of IDOM.  */
281   new_occ->next = *p_head;
282   *p_head = new_occ;
283 }
284 
285 /* Register that we found a division in BB.  */
286 
287 static inline void
288 register_division_in (basic_block bb)
289 {
290   struct occurrence *occ;
291 
292   occ = (struct occurrence *) bb->aux;
293   if (!occ)
294     {
295       occ = occ_new (bb, NULL);
296       insert_bb (occ, ENTRY_BLOCK_PTR_FOR_FN (cfun), &occ_head);
297     }
298 
299   occ->bb_has_division = true;
300   occ->num_divisions++;
301 }
302 
303 
304 /* Compute the number of divisions that postdominate each block in OCC and
305    its children.  */
306 
307 static void
308 compute_merit (struct occurrence *occ)
309 {
310   struct occurrence *occ_child;
311   basic_block dom = occ->bb;
312 
313   for (occ_child = occ->children; occ_child; occ_child = occ_child->next)
314     {
315       basic_block bb;
316       if (occ_child->children)
317         compute_merit (occ_child);
318 
319       if (flag_exceptions)
320 	bb = single_noncomplex_succ (dom);
321       else
322 	bb = dom;
323 
324       if (dominated_by_p (CDI_POST_DOMINATORS, bb, occ_child->bb))
325         occ->num_divisions += occ_child->num_divisions;
326     }
327 }
328 
329 
330 /* Return whether USE_STMT is a floating-point division by DEF.  */
331 static inline bool
332 is_division_by (gimple *use_stmt, tree def)
333 {
334   return is_gimple_assign (use_stmt)
335 	 && gimple_assign_rhs_code (use_stmt) == RDIV_EXPR
336 	 && gimple_assign_rhs2 (use_stmt) == def
337 	 /* Do not recognize x / x as valid division, as we are getting
338 	    confused later by replacing all immediate uses x in such
339 	    a stmt.  */
340 	 && gimple_assign_rhs1 (use_stmt) != def;
341 }
342 
343 /* Walk the subset of the dominator tree rooted at OCC, setting the
344    RECIP_DEF field to a definition of 1.0 / DEF that can be used in
345    the given basic block.  The field may be left NULL, of course,
346    if it is not possible or profitable to do the optimization.
347 
348    DEF_BSI is an iterator pointing at the statement defining DEF.
349    If RECIP_DEF is set, a dominator already has a computation that can
350    be used.  */
351 
352 static void
353 insert_reciprocals (gimple_stmt_iterator *def_gsi, struct occurrence *occ,
354 		    tree def, tree recip_def, int threshold)
355 {
356   tree type;
357   gassign *new_stmt;
358   gimple_stmt_iterator gsi;
359   struct occurrence *occ_child;
360 
361   if (!recip_def
362       && (occ->bb_has_division || !flag_trapping_math)
363       && occ->num_divisions >= threshold)
364     {
365       /* Make a variable with the replacement and substitute it.  */
366       type = TREE_TYPE (def);
367       recip_def = create_tmp_reg (type, "reciptmp");
368       new_stmt = gimple_build_assign (recip_def, RDIV_EXPR,
369 				      build_one_cst (type), def);
370 
371       if (occ->bb_has_division)
372         {
373           /* Case 1: insert before an existing division.  */
374           gsi = gsi_after_labels (occ->bb);
375           while (!gsi_end_p (gsi) && !is_division_by (gsi_stmt (gsi), def))
376 	    gsi_next (&gsi);
377 
378           gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
379         }
380       else if (def_gsi && occ->bb == def_gsi->bb)
381         {
382           /* Case 2: insert right after the definition.  Note that this will
383 	     never happen if the definition statement can throw, because in
384 	     that case the sole successor of the statement's basic block will
385 	     dominate all the uses as well.  */
386           gsi_insert_after (def_gsi, new_stmt, GSI_NEW_STMT);
387         }
388       else
389         {
390           /* Case 3: insert in a basic block not containing defs/uses.  */
391           gsi = gsi_after_labels (occ->bb);
392           gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
393         }
394 
395       reciprocal_stats.rdivs_inserted++;
396 
397       occ->recip_def_stmt = new_stmt;
398     }
399 
400   occ->recip_def = recip_def;
401   for (occ_child = occ->children; occ_child; occ_child = occ_child->next)
402     insert_reciprocals (def_gsi, occ_child, def, recip_def, threshold);
403 }
404 
405 
406 /* Replace the division at USE_P with a multiplication by the reciprocal, if
407    possible.  */
408 
409 static inline void
410 replace_reciprocal (use_operand_p use_p)
411 {
412   gimple *use_stmt = USE_STMT (use_p);
413   basic_block bb = gimple_bb (use_stmt);
414   struct occurrence *occ = (struct occurrence *) bb->aux;
415 
416   if (optimize_bb_for_speed_p (bb)
417       && occ->recip_def && use_stmt != occ->recip_def_stmt)
418     {
419       gimple_stmt_iterator gsi = gsi_for_stmt (use_stmt);
420       gimple_assign_set_rhs_code (use_stmt, MULT_EXPR);
421       SET_USE (use_p, occ->recip_def);
422       fold_stmt_inplace (&gsi);
423       update_stmt (use_stmt);
424     }
425 }
426 
427 
428 /* Free OCC and return one more "struct occurrence" to be freed.  */
429 
430 static struct occurrence *
431 free_bb (struct occurrence *occ)
432 {
433   struct occurrence *child, *next;
434 
435   /* First get the two pointers hanging off OCC.  */
436   next = occ->next;
437   child = occ->children;
438   occ->bb->aux = NULL;
439   occ_pool->remove (occ);
440 
441   /* Now ensure that we don't recurse unless it is necessary.  */
442   if (!child)
443     return next;
444   else
445     {
446       while (next)
447 	next = free_bb (next);
448 
449       return child;
450     }
451 }
452 
453 
454 /* Look for floating-point divisions among DEF's uses, and try to
455    replace them by multiplications with the reciprocal.  Add
456    as many statements computing the reciprocal as needed.
457 
458    DEF must be a GIMPLE register of a floating-point type.  */
459 
460 static void
461 execute_cse_reciprocals_1 (gimple_stmt_iterator *def_gsi, tree def)
462 {
463   use_operand_p use_p;
464   imm_use_iterator use_iter;
465   struct occurrence *occ;
466   int count = 0, threshold;
467 
468   gcc_assert (FLOAT_TYPE_P (TREE_TYPE (def)) && is_gimple_reg (def));
469 
470   FOR_EACH_IMM_USE_FAST (use_p, use_iter, def)
471     {
472       gimple *use_stmt = USE_STMT (use_p);
473       if (is_division_by (use_stmt, def))
474 	{
475 	  register_division_in (gimple_bb (use_stmt));
476 	  count++;
477 	}
478     }
479 
480   /* Do the expensive part only if we can hope to optimize something.  */
481   threshold = targetm.min_divisions_for_recip_mul (TYPE_MODE (TREE_TYPE (def)));
482   if (count >= threshold)
483     {
484       gimple *use_stmt;
485       for (occ = occ_head; occ; occ = occ->next)
486 	{
487 	  compute_merit (occ);
488 	  insert_reciprocals (def_gsi, occ, def, NULL, threshold);
489 	}
490 
491       FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, def)
492 	{
493 	  if (is_division_by (use_stmt, def))
494 	    {
495 	      FOR_EACH_IMM_USE_ON_STMT (use_p, use_iter)
496 		replace_reciprocal (use_p);
497 	    }
498 	}
499     }
500 
501   for (occ = occ_head; occ; )
502     occ = free_bb (occ);
503 
504   occ_head = NULL;
505 }
506 
507 /* Return an internal function that implements the reciprocal of CALL,
508    or IFN_LAST if there is no such function that the target supports.  */
509 
510 internal_fn
511 internal_fn_reciprocal (gcall *call)
512 {
513   internal_fn ifn;
514 
515   switch (gimple_call_combined_fn (call))
516     {
517     CASE_CFN_SQRT:
518       ifn = IFN_RSQRT;
519       break;
520 
521     default:
522       return IFN_LAST;
523     }
524 
525   tree_pair types = direct_internal_fn_types (ifn, call);
526   if (!direct_internal_fn_supported_p (ifn, types, OPTIMIZE_FOR_SPEED))
527     return IFN_LAST;
528 
529   return ifn;
530 }
531 
532 /* Go through all the floating-point SSA_NAMEs, and call
533    execute_cse_reciprocals_1 on each of them.  */
534 namespace {
535 
536 const pass_data pass_data_cse_reciprocals =
537 {
538   GIMPLE_PASS, /* type */
539   "recip", /* name */
540   OPTGROUP_NONE, /* optinfo_flags */
541   TV_NONE, /* tv_id */
542   PROP_ssa, /* properties_required */
543   0, /* properties_provided */
544   0, /* properties_destroyed */
545   0, /* todo_flags_start */
546   TODO_update_ssa, /* todo_flags_finish */
547 };
548 
549 class pass_cse_reciprocals : public gimple_opt_pass
550 {
551 public:
552   pass_cse_reciprocals (gcc::context *ctxt)
553     : gimple_opt_pass (pass_data_cse_reciprocals, ctxt)
554   {}
555 
556   /* opt_pass methods: */
557   virtual bool gate (function *) { return optimize && flag_reciprocal_math; }
558   virtual unsigned int execute (function *);
559 
560 }; // class pass_cse_reciprocals
561 
562 unsigned int
563 pass_cse_reciprocals::execute (function *fun)
564 {
565   basic_block bb;
566   tree arg;
567 
568   occ_pool = new object_allocator<occurrence> ("dominators for recip");
569 
570   memset (&reciprocal_stats, 0, sizeof (reciprocal_stats));
571   calculate_dominance_info (CDI_DOMINATORS);
572   calculate_dominance_info (CDI_POST_DOMINATORS);
573 
574   if (flag_checking)
575     FOR_EACH_BB_FN (bb, fun)
576       gcc_assert (!bb->aux);
577 
578   for (arg = DECL_ARGUMENTS (fun->decl); arg; arg = DECL_CHAIN (arg))
579     if (FLOAT_TYPE_P (TREE_TYPE (arg))
580 	&& is_gimple_reg (arg))
581       {
582 	tree name = ssa_default_def (fun, arg);
583 	if (name)
584 	  execute_cse_reciprocals_1 (NULL, name);
585       }
586 
587   FOR_EACH_BB_FN (bb, fun)
588     {
589       tree def;
590 
591       for (gphi_iterator gsi = gsi_start_phis (bb); !gsi_end_p (gsi);
592 	   gsi_next (&gsi))
593 	{
594 	  gphi *phi = gsi.phi ();
595 	  def = PHI_RESULT (phi);
596 	  if (! virtual_operand_p (def)
597 	      && FLOAT_TYPE_P (TREE_TYPE (def)))
598 	    execute_cse_reciprocals_1 (NULL, def);
599 	}
600 
601       for (gimple_stmt_iterator gsi = gsi_after_labels (bb); !gsi_end_p (gsi);
602 	   gsi_next (&gsi))
603         {
604 	  gimple *stmt = gsi_stmt (gsi);
605 
606 	  if (gimple_has_lhs (stmt)
607 	      && (def = SINGLE_SSA_TREE_OPERAND (stmt, SSA_OP_DEF)) != NULL
608 	      && FLOAT_TYPE_P (TREE_TYPE (def))
609 	      && TREE_CODE (def) == SSA_NAME)
610 	    execute_cse_reciprocals_1 (&gsi, def);
611 	}
612 
613       if (optimize_bb_for_size_p (bb))
614         continue;
615 
616       /* Scan for a/func(b) and convert it to reciprocal a*rfunc(b).  */
617       for (gimple_stmt_iterator gsi = gsi_after_labels (bb); !gsi_end_p (gsi);
618 	   gsi_next (&gsi))
619         {
620 	  gimple *stmt = gsi_stmt (gsi);
621 
622 	  if (is_gimple_assign (stmt)
623 	      && gimple_assign_rhs_code (stmt) == RDIV_EXPR)
624 	    {
625 	      tree arg1 = gimple_assign_rhs2 (stmt);
626 	      gimple *stmt1;
627 
628 	      if (TREE_CODE (arg1) != SSA_NAME)
629 		continue;
630 
631 	      stmt1 = SSA_NAME_DEF_STMT (arg1);
632 
633 	      if (is_gimple_call (stmt1)
634 		  && gimple_call_lhs (stmt1))
635 		{
636 		  bool fail;
637 		  imm_use_iterator ui;
638 		  use_operand_p use_p;
639 		  tree fndecl = NULL_TREE;
640 
641 		  gcall *call = as_a <gcall *> (stmt1);
642 		  internal_fn ifn = internal_fn_reciprocal (call);
643 		  if (ifn == IFN_LAST)
644 		    {
645 		      fndecl = gimple_call_fndecl (call);
646 		      if (!fndecl
647 			  || DECL_BUILT_IN_CLASS (fndecl) != BUILT_IN_MD)
648 			continue;
649 		      fndecl = targetm.builtin_reciprocal (fndecl);
650 		      if (!fndecl)
651 			continue;
652 		    }
653 
654 		  /* Check that all uses of the SSA name are divisions,
655 		     otherwise replacing the defining statement will do
656 		     the wrong thing.  */
657 		  fail = false;
658 		  FOR_EACH_IMM_USE_FAST (use_p, ui, arg1)
659 		    {
660 		      gimple *stmt2 = USE_STMT (use_p);
661 		      if (is_gimple_debug (stmt2))
662 			continue;
663 		      if (!is_gimple_assign (stmt2)
664 			  || gimple_assign_rhs_code (stmt2) != RDIV_EXPR
665 			  || gimple_assign_rhs1 (stmt2) == arg1
666 			  || gimple_assign_rhs2 (stmt2) != arg1)
667 			{
668 			  fail = true;
669 			  break;
670 			}
671 		    }
672 		  if (fail)
673 		    continue;
674 
675 		  gimple_replace_ssa_lhs (call, arg1);
676 		  if (gimple_call_internal_p (call) != (ifn != IFN_LAST))
677 		    {
678 		      auto_vec<tree, 4> args;
679 		      for (unsigned int i = 0;
680 			   i < gimple_call_num_args (call); i++)
681 			args.safe_push (gimple_call_arg (call, i));
682 		      gcall *stmt2;
683 		      if (ifn == IFN_LAST)
684 			stmt2 = gimple_build_call_vec (fndecl, args);
685 		      else
686 			stmt2 = gimple_build_call_internal_vec (ifn, args);
687 		      gimple_call_set_lhs (stmt2, arg1);
688 		      if (gimple_vdef (call))
689 			{
690 			  gimple_set_vdef (stmt2, gimple_vdef (call));
691 			  SSA_NAME_DEF_STMT (gimple_vdef (stmt2)) = stmt2;
692 			}
693 		      gimple_set_vuse (stmt2, gimple_vuse (call));
694 		      gimple_stmt_iterator gsi2 = gsi_for_stmt (call);
695 		      gsi_replace (&gsi2, stmt2, true);
696 		    }
697 		  else
698 		    {
699 		      if (ifn == IFN_LAST)
700 			gimple_call_set_fndecl (call, fndecl);
701 		      else
702 			gimple_call_set_internal_fn (call, ifn);
703 		      update_stmt (call);
704 		    }
705 		  reciprocal_stats.rfuncs_inserted++;
706 
707 		  FOR_EACH_IMM_USE_STMT (stmt, ui, arg1)
708 		    {
709 		      gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
710 		      gimple_assign_set_rhs_code (stmt, MULT_EXPR);
711 		      fold_stmt_inplace (&gsi);
712 		      update_stmt (stmt);
713 		    }
714 		}
715 	    }
716 	}
717     }
718 
719   statistics_counter_event (fun, "reciprocal divs inserted",
720 			    reciprocal_stats.rdivs_inserted);
721   statistics_counter_event (fun, "reciprocal functions inserted",
722 			    reciprocal_stats.rfuncs_inserted);
723 
724   free_dominance_info (CDI_DOMINATORS);
725   free_dominance_info (CDI_POST_DOMINATORS);
726   delete occ_pool;
727   return 0;
728 }
729 
730 } // anon namespace
731 
732 gimple_opt_pass *
733 make_pass_cse_reciprocals (gcc::context *ctxt)
734 {
735   return new pass_cse_reciprocals (ctxt);
736 }
737 
738 /* Records an occurrence at statement USE_STMT in the vector of trees
739    STMTS if it is dominated by *TOP_BB or dominates it or this basic block
740    is not yet initialized.  Returns true if the occurrence was pushed on
741    the vector.  Adjusts *TOP_BB to be the basic block dominating all
742    statements in the vector.  */
743 
744 static bool
745 maybe_record_sincos (vec<gimple *> *stmts,
746 		     basic_block *top_bb, gimple *use_stmt)
747 {
748   basic_block use_bb = gimple_bb (use_stmt);
749   if (*top_bb
750       && (*top_bb == use_bb
751 	  || dominated_by_p (CDI_DOMINATORS, use_bb, *top_bb)))
752     stmts->safe_push (use_stmt);
753   else if (!*top_bb
754 	   || dominated_by_p (CDI_DOMINATORS, *top_bb, use_bb))
755     {
756       stmts->safe_push (use_stmt);
757       *top_bb = use_bb;
758     }
759   else
760     return false;
761 
762   return true;
763 }
764 
765 /* Look for sin, cos and cexpi calls with the same argument NAME and
766    create a single call to cexpi CSEing the result in this case.
767    We first walk over all immediate uses of the argument collecting
768    statements that we can CSE in a vector and in a second pass replace
769    the statement rhs with a REALPART or IMAGPART expression on the
770    result of the cexpi call we insert before the use statement that
771    dominates all other candidates.  */
772 
773 static bool
774 execute_cse_sincos_1 (tree name)
775 {
776   gimple_stmt_iterator gsi;
777   imm_use_iterator use_iter;
778   tree fndecl, res, type;
779   gimple *def_stmt, *use_stmt, *stmt;
780   int seen_cos = 0, seen_sin = 0, seen_cexpi = 0;
781   auto_vec<gimple *> stmts;
782   basic_block top_bb = NULL;
783   int i;
784   bool cfg_changed = false;
785 
786   type = TREE_TYPE (name);
787   FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, name)
788     {
789       if (gimple_code (use_stmt) != GIMPLE_CALL
790 	  || !gimple_call_lhs (use_stmt))
791 	continue;
792 
793       switch (gimple_call_combined_fn (use_stmt))
794 	{
795 	CASE_CFN_COS:
796 	  seen_cos |= maybe_record_sincos (&stmts, &top_bb, use_stmt) ? 1 : 0;
797 	  break;
798 
799 	CASE_CFN_SIN:
800 	  seen_sin |= maybe_record_sincos (&stmts, &top_bb, use_stmt) ? 1 : 0;
801 	  break;
802 
803 	CASE_CFN_CEXPI:
804 	  seen_cexpi |= maybe_record_sincos (&stmts, &top_bb, use_stmt) ? 1 : 0;
805 	  break;
806 
807 	default:;
808 	}
809     }
810 
811   if (seen_cos + seen_sin + seen_cexpi <= 1)
812     return false;
813 
814   /* Simply insert cexpi at the beginning of top_bb but not earlier than
815      the name def statement.  */
816   fndecl = mathfn_built_in (type, BUILT_IN_CEXPI);
817   if (!fndecl)
818     return false;
819   stmt = gimple_build_call (fndecl, 1, name);
820   res = make_temp_ssa_name (TREE_TYPE (TREE_TYPE (fndecl)), stmt, "sincostmp");
821   gimple_call_set_lhs (stmt, res);
822 
823   def_stmt = SSA_NAME_DEF_STMT (name);
824   if (!SSA_NAME_IS_DEFAULT_DEF (name)
825       && gimple_code (def_stmt) != GIMPLE_PHI
826       && gimple_bb (def_stmt) == top_bb)
827     {
828       gsi = gsi_for_stmt (def_stmt);
829       gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
830     }
831   else
832     {
833       gsi = gsi_after_labels (top_bb);
834       gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
835     }
836   sincos_stats.inserted++;
837 
838   /* And adjust the recorded old call sites.  */
839   for (i = 0; stmts.iterate (i, &use_stmt); ++i)
840     {
841       tree rhs = NULL;
842 
843       switch (gimple_call_combined_fn (use_stmt))
844 	{
845 	CASE_CFN_COS:
846 	  rhs = fold_build1 (REALPART_EXPR, type, res);
847 	  break;
848 
849 	CASE_CFN_SIN:
850 	  rhs = fold_build1 (IMAGPART_EXPR, type, res);
851 	  break;
852 
853 	CASE_CFN_CEXPI:
854 	  rhs = res;
855 	  break;
856 
857 	default:;
858 	  gcc_unreachable ();
859 	}
860 
861 	/* Replace call with a copy.  */
862 	stmt = gimple_build_assign (gimple_call_lhs (use_stmt), rhs);
863 
864 	gsi = gsi_for_stmt (use_stmt);
865 	gsi_replace (&gsi, stmt, true);
866 	if (gimple_purge_dead_eh_edges (gimple_bb (stmt)))
867 	  cfg_changed = true;
868     }
869 
870   return cfg_changed;
871 }
872 
873 /* To evaluate powi(x,n), the floating point value x raised to the
874    constant integer exponent n, we use a hybrid algorithm that
875    combines the "window method" with look-up tables.  For an
876    introduction to exponentiation algorithms and "addition chains",
877    see section 4.6.3, "Evaluation of Powers" of Donald E. Knuth,
878    "Seminumerical Algorithms", Vol. 2, "The Art of Computer Programming",
879    3rd Edition, 1998, and Daniel M. Gordon, "A Survey of Fast Exponentiation
880    Methods", Journal of Algorithms, Vol. 27, pp. 129-146, 1998.  */
881 
882 /* Provide a default value for POWI_MAX_MULTS, the maximum number of
883    multiplications to inline before calling the system library's pow
884    function.  powi(x,n) requires at worst 2*bits(n)-2 multiplications,
885    so this default never requires calling pow, powf or powl.  */
886 
887 #ifndef POWI_MAX_MULTS
888 #define POWI_MAX_MULTS  (2*HOST_BITS_PER_WIDE_INT-2)
889 #endif
890 
891 /* The size of the "optimal power tree" lookup table.  All
892    exponents less than this value are simply looked up in the
893    powi_table below.  This threshold is also used to size the
894    cache of pseudo registers that hold intermediate results.  */
895 #define POWI_TABLE_SIZE 256
896 
897 /* The size, in bits of the window, used in the "window method"
898    exponentiation algorithm.  This is equivalent to a radix of
899    (1<<POWI_WINDOW_SIZE) in the corresponding "m-ary method".  */
900 #define POWI_WINDOW_SIZE 3
901 
902 /* The following table is an efficient representation of an
903    "optimal power tree".  For each value, i, the corresponding
904    value, j, in the table states than an optimal evaluation
905    sequence for calculating pow(x,i) can be found by evaluating
906    pow(x,j)*pow(x,i-j).  An optimal power tree for the first
907    100 integers is given in Knuth's "Seminumerical algorithms".  */
908 
909 static const unsigned char powi_table[POWI_TABLE_SIZE] =
910   {
911       0,   1,   1,   2,   2,   3,   3,   4,  /*   0 -   7 */
912       4,   6,   5,   6,   6,  10,   7,   9,  /*   8 -  15 */
913       8,  16,   9,  16,  10,  12,  11,  13,  /*  16 -  23 */
914      12,  17,  13,  18,  14,  24,  15,  26,  /*  24 -  31 */
915      16,  17,  17,  19,  18,  33,  19,  26,  /*  32 -  39 */
916      20,  25,  21,  40,  22,  27,  23,  44,  /*  40 -  47 */
917      24,  32,  25,  34,  26,  29,  27,  44,  /*  48 -  55 */
918      28,  31,  29,  34,  30,  60,  31,  36,  /*  56 -  63 */
919      32,  64,  33,  34,  34,  46,  35,  37,  /*  64 -  71 */
920      36,  65,  37,  50,  38,  48,  39,  69,  /*  72 -  79 */
921      40,  49,  41,  43,  42,  51,  43,  58,  /*  80 -  87 */
922      44,  64,  45,  47,  46,  59,  47,  76,  /*  88 -  95 */
923      48,  65,  49,  66,  50,  67,  51,  66,  /*  96 - 103 */
924      52,  70,  53,  74,  54, 104,  55,  74,  /* 104 - 111 */
925      56,  64,  57,  69,  58,  78,  59,  68,  /* 112 - 119 */
926      60,  61,  61,  80,  62,  75,  63,  68,  /* 120 - 127 */
927      64,  65,  65, 128,  66, 129,  67,  90,  /* 128 - 135 */
928      68,  73,  69, 131,  70,  94,  71,  88,  /* 136 - 143 */
929      72, 128,  73,  98,  74, 132,  75, 121,  /* 144 - 151 */
930      76, 102,  77, 124,  78, 132,  79, 106,  /* 152 - 159 */
931      80,  97,  81, 160,  82,  99,  83, 134,  /* 160 - 167 */
932      84,  86,  85,  95,  86, 160,  87, 100,  /* 168 - 175 */
933      88, 113,  89,  98,  90, 107,  91, 122,  /* 176 - 183 */
934      92, 111,  93, 102,  94, 126,  95, 150,  /* 184 - 191 */
935      96, 128,  97, 130,  98, 133,  99, 195,  /* 192 - 199 */
936     100, 128, 101, 123, 102, 164, 103, 138,  /* 200 - 207 */
937     104, 145, 105, 146, 106, 109, 107, 149,  /* 208 - 215 */
938     108, 200, 109, 146, 110, 170, 111, 157,  /* 216 - 223 */
939     112, 128, 113, 130, 114, 182, 115, 132,  /* 224 - 231 */
940     116, 200, 117, 132, 118, 158, 119, 206,  /* 232 - 239 */
941     120, 240, 121, 162, 122, 147, 123, 152,  /* 240 - 247 */
942     124, 166, 125, 214, 126, 138, 127, 153,  /* 248 - 255 */
943   };
944 
945 
946 /* Return the number of multiplications required to calculate
947    powi(x,n) where n is less than POWI_TABLE_SIZE.  This is a
948    subroutine of powi_cost.  CACHE is an array indicating
949    which exponents have already been calculated.  */
950 
951 static int
952 powi_lookup_cost (unsigned HOST_WIDE_INT n, bool *cache)
953 {
954   /* If we've already calculated this exponent, then this evaluation
955      doesn't require any additional multiplications.  */
956   if (cache[n])
957     return 0;
958 
959   cache[n] = true;
960   return powi_lookup_cost (n - powi_table[n], cache)
961 	 + powi_lookup_cost (powi_table[n], cache) + 1;
962 }
963 
964 /* Return the number of multiplications required to calculate
965    powi(x,n) for an arbitrary x, given the exponent N.  This
966    function needs to be kept in sync with powi_as_mults below.  */
967 
968 static int
969 powi_cost (HOST_WIDE_INT n)
970 {
971   bool cache[POWI_TABLE_SIZE];
972   unsigned HOST_WIDE_INT digit;
973   unsigned HOST_WIDE_INT val;
974   int result;
975 
976   if (n == 0)
977     return 0;
978 
979   /* Ignore the reciprocal when calculating the cost.  */
980   val = (n < 0) ? -n : n;
981 
982   /* Initialize the exponent cache.  */
983   memset (cache, 0, POWI_TABLE_SIZE * sizeof (bool));
984   cache[1] = true;
985 
986   result = 0;
987 
988   while (val >= POWI_TABLE_SIZE)
989     {
990       if (val & 1)
991 	{
992 	  digit = val & ((1 << POWI_WINDOW_SIZE) - 1);
993 	  result += powi_lookup_cost (digit, cache)
994 		    + POWI_WINDOW_SIZE + 1;
995 	  val >>= POWI_WINDOW_SIZE;
996 	}
997       else
998 	{
999 	  val >>= 1;
1000 	  result++;
1001 	}
1002     }
1003 
1004   return result + powi_lookup_cost (val, cache);
1005 }
1006 
1007 /* Recursive subroutine of powi_as_mults.  This function takes the
1008    array, CACHE, of already calculated exponents and an exponent N and
1009    returns a tree that corresponds to CACHE[1]**N, with type TYPE.  */
1010 
1011 static tree
1012 powi_as_mults_1 (gimple_stmt_iterator *gsi, location_t loc, tree type,
1013 		 HOST_WIDE_INT n, tree *cache)
1014 {
1015   tree op0, op1, ssa_target;
1016   unsigned HOST_WIDE_INT digit;
1017   gassign *mult_stmt;
1018 
1019   if (n < POWI_TABLE_SIZE && cache[n])
1020     return cache[n];
1021 
1022   ssa_target = make_temp_ssa_name (type, NULL, "powmult");
1023 
1024   if (n < POWI_TABLE_SIZE)
1025     {
1026       cache[n] = ssa_target;
1027       op0 = powi_as_mults_1 (gsi, loc, type, n - powi_table[n], cache);
1028       op1 = powi_as_mults_1 (gsi, loc, type, powi_table[n], cache);
1029     }
1030   else if (n & 1)
1031     {
1032       digit = n & ((1 << POWI_WINDOW_SIZE) - 1);
1033       op0 = powi_as_mults_1 (gsi, loc, type, n - digit, cache);
1034       op1 = powi_as_mults_1 (gsi, loc, type, digit, cache);
1035     }
1036   else
1037     {
1038       op0 = powi_as_mults_1 (gsi, loc, type, n >> 1, cache);
1039       op1 = op0;
1040     }
1041 
1042   mult_stmt = gimple_build_assign (ssa_target, MULT_EXPR, op0, op1);
1043   gimple_set_location (mult_stmt, loc);
1044   gsi_insert_before (gsi, mult_stmt, GSI_SAME_STMT);
1045 
1046   return ssa_target;
1047 }
1048 
1049 /* Convert ARG0**N to a tree of multiplications of ARG0 with itself.
1050    This function needs to be kept in sync with powi_cost above.  */
1051 
1052 static tree
1053 powi_as_mults (gimple_stmt_iterator *gsi, location_t loc,
1054 	       tree arg0, HOST_WIDE_INT n)
1055 {
1056   tree cache[POWI_TABLE_SIZE], result, type = TREE_TYPE (arg0);
1057   gassign *div_stmt;
1058   tree target;
1059 
1060   if (n == 0)
1061     return build_real (type, dconst1);
1062 
1063   memset (cache, 0,  sizeof (cache));
1064   cache[1] = arg0;
1065 
1066   result = powi_as_mults_1 (gsi, loc, type, (n < 0) ? -n : n, cache);
1067   if (n >= 0)
1068     return result;
1069 
1070   /* If the original exponent was negative, reciprocate the result.  */
1071   target = make_temp_ssa_name (type, NULL, "powmult");
1072   div_stmt = gimple_build_assign (target, RDIV_EXPR,
1073 				  build_real (type, dconst1), result);
1074   gimple_set_location (div_stmt, loc);
1075   gsi_insert_before (gsi, div_stmt, GSI_SAME_STMT);
1076 
1077   return target;
1078 }
1079 
1080 /* ARG0 and N are the two arguments to a powi builtin in GSI with
1081    location info LOC.  If the arguments are appropriate, create an
1082    equivalent sequence of statements prior to GSI using an optimal
1083    number of multiplications, and return an expession holding the
1084    result.  */
1085 
1086 static tree
1087 gimple_expand_builtin_powi (gimple_stmt_iterator *gsi, location_t loc,
1088 			    tree arg0, HOST_WIDE_INT n)
1089 {
1090   /* Avoid largest negative number.  */
1091   if (n != -n
1092       && ((n >= -1 && n <= 2)
1093 	  || (optimize_function_for_speed_p (cfun)
1094 	      && powi_cost (n) <= POWI_MAX_MULTS)))
1095     return powi_as_mults (gsi, loc, arg0, n);
1096 
1097   return NULL_TREE;
1098 }
1099 
1100 /* Build a gimple call statement that calls FN with argument ARG.
1101    Set the lhs of the call statement to a fresh SSA name.  Insert the
1102    statement prior to GSI's current position, and return the fresh
1103    SSA name.  */
1104 
1105 static tree
1106 build_and_insert_call (gimple_stmt_iterator *gsi, location_t loc,
1107 		       tree fn, tree arg)
1108 {
1109   gcall *call_stmt;
1110   tree ssa_target;
1111 
1112   call_stmt = gimple_build_call (fn, 1, arg);
1113   ssa_target = make_temp_ssa_name (TREE_TYPE (arg), NULL, "powroot");
1114   gimple_set_lhs (call_stmt, ssa_target);
1115   gimple_set_location (call_stmt, loc);
1116   gsi_insert_before (gsi, call_stmt, GSI_SAME_STMT);
1117 
1118   return ssa_target;
1119 }
1120 
1121 /* Build a gimple binary operation with the given CODE and arguments
1122    ARG0, ARG1, assigning the result to a new SSA name for variable
1123    TARGET.  Insert the statement prior to GSI's current position, and
1124    return the fresh SSA name.*/
1125 
1126 static tree
1127 build_and_insert_binop (gimple_stmt_iterator *gsi, location_t loc,
1128 			const char *name, enum tree_code code,
1129 			tree arg0, tree arg1)
1130 {
1131   tree result = make_temp_ssa_name (TREE_TYPE (arg0), NULL, name);
1132   gassign *stmt = gimple_build_assign (result, code, arg0, arg1);
1133   gimple_set_location (stmt, loc);
1134   gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
1135   return result;
1136 }
1137 
1138 /* Build a gimple reference operation with the given CODE and argument
1139    ARG, assigning the result to a new SSA name of TYPE with NAME.
1140    Insert the statement prior to GSI's current position, and return
1141    the fresh SSA name.  */
1142 
1143 static inline tree
1144 build_and_insert_ref (gimple_stmt_iterator *gsi, location_t loc, tree type,
1145 		      const char *name, enum tree_code code, tree arg0)
1146 {
1147   tree result = make_temp_ssa_name (type, NULL, name);
1148   gimple *stmt = gimple_build_assign (result, build1 (code, type, arg0));
1149   gimple_set_location (stmt, loc);
1150   gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
1151   return result;
1152 }
1153 
1154 /* Build a gimple assignment to cast VAL to TYPE.  Insert the statement
1155    prior to GSI's current position, and return the fresh SSA name.  */
1156 
1157 static tree
1158 build_and_insert_cast (gimple_stmt_iterator *gsi, location_t loc,
1159 		       tree type, tree val)
1160 {
1161   tree result = make_ssa_name (type);
1162   gassign *stmt = gimple_build_assign (result, NOP_EXPR, val);
1163   gimple_set_location (stmt, loc);
1164   gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
1165   return result;
1166 }
1167 
1168 struct pow_synth_sqrt_info
1169 {
1170   bool *factors;
1171   unsigned int deepest;
1172   unsigned int num_mults;
1173 };
1174 
1175 /* Return true iff the real value C can be represented as a
1176    sum of powers of 0.5 up to N.  That is:
1177    C == SUM<i from 1..N> (a[i]*(0.5**i)) where a[i] is either 0 or 1.
1178    Record in INFO the various parameters of the synthesis algorithm such
1179    as the factors a[i], the maximum 0.5 power and the number of
1180    multiplications that will be required.  */
1181 
1182 bool
1183 representable_as_half_series_p (REAL_VALUE_TYPE c, unsigned n,
1184 				 struct pow_synth_sqrt_info *info)
1185 {
1186   REAL_VALUE_TYPE factor = dconsthalf;
1187   REAL_VALUE_TYPE remainder = c;
1188 
1189   info->deepest = 0;
1190   info->num_mults = 0;
1191   memset (info->factors, 0, n * sizeof (bool));
1192 
1193   for (unsigned i = 0; i < n; i++)
1194     {
1195       REAL_VALUE_TYPE res;
1196 
1197       /* If something inexact happened bail out now.  */
1198       if (real_arithmetic (&res, MINUS_EXPR, &remainder, &factor))
1199 	return false;
1200 
1201       /* We have hit zero.  The number is representable as a sum
1202          of powers of 0.5.  */
1203       if (real_equal (&res, &dconst0))
1204 	{
1205 	  info->factors[i] = true;
1206 	  info->deepest = i + 1;
1207 	  return true;
1208 	}
1209       else if (!REAL_VALUE_NEGATIVE (res))
1210 	{
1211 	  remainder = res;
1212 	  info->factors[i] = true;
1213 	  info->num_mults++;
1214 	}
1215       else
1216 	info->factors[i] = false;
1217 
1218       real_arithmetic (&factor, MULT_EXPR, &factor, &dconsthalf);
1219     }
1220   return false;
1221 }
1222 
1223 /* Return the tree corresponding to FN being applied
1224    to ARG N times at GSI and LOC.
1225    Look up previous results from CACHE if need be.
1226    cache[0] should contain just plain ARG i.e. FN applied to ARG 0 times.  */
1227 
1228 static tree
1229 get_fn_chain (tree arg, unsigned int n, gimple_stmt_iterator *gsi,
1230 	      tree fn, location_t loc, tree *cache)
1231 {
1232   tree res = cache[n];
1233   if (!res)
1234     {
1235       tree prev = get_fn_chain (arg, n - 1, gsi, fn, loc, cache);
1236       res = build_and_insert_call (gsi, loc, fn, prev);
1237       cache[n] = res;
1238     }
1239 
1240   return res;
1241 }
1242 
1243 /* Print to STREAM the repeated application of function FNAME to ARG
1244    N times.  So, for FNAME = "foo", ARG = "x", N = 2 it would print:
1245    "foo (foo (x))".  */
1246 
1247 static void
1248 print_nested_fn (FILE* stream, const char *fname, const char* arg,
1249 		 unsigned int n)
1250 {
1251   if (n == 0)
1252     fprintf (stream, "%s", arg);
1253   else
1254     {
1255       fprintf (stream, "%s (", fname);
1256       print_nested_fn (stream, fname, arg, n - 1);
1257       fprintf (stream, ")");
1258     }
1259 }
1260 
1261 /* Print to STREAM the fractional sequence of sqrt chains
1262    applied to ARG, described by INFO.  Used for the dump file.  */
1263 
1264 static void
1265 dump_fractional_sqrt_sequence (FILE *stream, const char *arg,
1266 			        struct pow_synth_sqrt_info *info)
1267 {
1268   for (unsigned int i = 0; i < info->deepest; i++)
1269     {
1270       bool is_set = info->factors[i];
1271       if (is_set)
1272 	{
1273 	  print_nested_fn (stream, "sqrt", arg, i + 1);
1274 	  if (i != info->deepest - 1)
1275 	    fprintf (stream, " * ");
1276 	}
1277     }
1278 }
1279 
1280 /* Print to STREAM a representation of raising ARG to an integer
1281    power N.  Used for the dump file.  */
1282 
1283 static void
1284 dump_integer_part (FILE *stream, const char* arg, HOST_WIDE_INT n)
1285 {
1286   if (n > 1)
1287     fprintf (stream, "powi (%s, " HOST_WIDE_INT_PRINT_DEC ")", arg, n);
1288   else if (n == 1)
1289     fprintf (stream, "%s", arg);
1290 }
1291 
1292 /* Attempt to synthesize a POW[F] (ARG0, ARG1) call using chains of
1293    square roots.  Place at GSI and LOC.  Limit the maximum depth
1294    of the sqrt chains to MAX_DEPTH.  Return the tree holding the
1295    result of the expanded sequence or NULL_TREE if the expansion failed.
1296 
1297    This routine assumes that ARG1 is a real number with a fractional part
1298    (the integer exponent case will have been handled earlier in
1299    gimple_expand_builtin_pow).
1300 
1301    For ARG1 > 0.0:
1302    * For ARG1 composed of a whole part WHOLE_PART and a fractional part
1303      FRAC_PART i.e. WHOLE_PART == floor (ARG1) and
1304                     FRAC_PART == ARG1 - WHOLE_PART:
1305      Produce POWI (ARG0, WHOLE_PART) * POW (ARG0, FRAC_PART) where
1306      POW (ARG0, FRAC_PART) is expanded as a product of square root chains
1307      if it can be expressed as such, that is if FRAC_PART satisfies:
1308      FRAC_PART == <SUM from i = 1 until MAX_DEPTH> (a[i] * (0.5**i))
1309      where integer a[i] is either 0 or 1.
1310 
1311      Example:
1312      POW (x, 3.625) == POWI (x, 3) * POW (x, 0.625)
1313        --> POWI (x, 3) * SQRT (x) * SQRT (SQRT (SQRT (x)))
1314 
1315    For ARG1 < 0.0 there are two approaches:
1316    * (A) Expand to 1.0 / POW (ARG0, -ARG1) where POW (ARG0, -ARG1)
1317          is calculated as above.
1318 
1319      Example:
1320      POW (x, -5.625) == 1.0 / POW (x, 5.625)
1321        -->  1.0 / (POWI (x, 5) * SQRT (x) * SQRT (SQRT (SQRT (x))))
1322 
1323    * (B) : WHOLE_PART := - ceil (abs (ARG1))
1324            FRAC_PART  := ARG1 - WHOLE_PART
1325      and expand to POW (x, FRAC_PART) / POWI (x, WHOLE_PART).
1326      Example:
1327      POW (x, -5.875) == POW (x, 0.125) / POWI (X, 6)
1328        --> SQRT (SQRT (SQRT (x))) / (POWI (x, 6))
1329 
1330    For ARG1 < 0.0 we choose between (A) and (B) depending on
1331    how many multiplications we'd have to do.
1332    So, for the example in (B): POW (x, -5.875), if we were to
1333    follow algorithm (A) we would produce:
1334    1.0 / POWI (X, 5) * SQRT (X) * SQRT (SQRT (X)) * SQRT (SQRT (SQRT (X)))
1335    which contains more multiplications than approach (B).
1336 
1337    Hopefully, this approach will eliminate potentially expensive POW library
1338    calls when unsafe floating point math is enabled and allow the compiler to
1339    further optimise the multiplies, square roots and divides produced by this
1340    function.  */
1341 
1342 static tree
1343 expand_pow_as_sqrts (gimple_stmt_iterator *gsi, location_t loc,
1344 		     tree arg0, tree arg1, HOST_WIDE_INT max_depth)
1345 {
1346   tree type = TREE_TYPE (arg0);
1347   machine_mode mode = TYPE_MODE (type);
1348   tree sqrtfn = mathfn_built_in (type, BUILT_IN_SQRT);
1349   bool one_over = true;
1350 
1351   if (!sqrtfn)
1352     return NULL_TREE;
1353 
1354   if (TREE_CODE (arg1) != REAL_CST)
1355     return NULL_TREE;
1356 
1357   REAL_VALUE_TYPE exp_init = TREE_REAL_CST (arg1);
1358 
1359   gcc_assert (max_depth > 0);
1360   tree *cache = XALLOCAVEC (tree, max_depth + 1);
1361 
1362   struct pow_synth_sqrt_info synth_info;
1363   synth_info.factors = XALLOCAVEC (bool, max_depth + 1);
1364   synth_info.deepest = 0;
1365   synth_info.num_mults = 0;
1366 
1367   bool neg_exp = REAL_VALUE_NEGATIVE (exp_init);
1368   REAL_VALUE_TYPE exp = real_value_abs (&exp_init);
1369 
1370   /* The whole and fractional parts of exp.  */
1371   REAL_VALUE_TYPE whole_part;
1372   REAL_VALUE_TYPE frac_part;
1373 
1374   real_floor (&whole_part, mode, &exp);
1375   real_arithmetic (&frac_part, MINUS_EXPR, &exp, &whole_part);
1376 
1377 
1378   REAL_VALUE_TYPE ceil_whole = dconst0;
1379   REAL_VALUE_TYPE ceil_fract = dconst0;
1380 
1381   if (neg_exp)
1382     {
1383       real_ceil (&ceil_whole, mode, &exp);
1384       real_arithmetic (&ceil_fract, MINUS_EXPR, &ceil_whole, &exp);
1385     }
1386 
1387   if (!representable_as_half_series_p (frac_part, max_depth, &synth_info))
1388     return NULL_TREE;
1389 
1390   /* Check whether it's more profitable to not use 1.0 / ...  */
1391   if (neg_exp)
1392     {
1393       struct pow_synth_sqrt_info alt_synth_info;
1394       alt_synth_info.factors = XALLOCAVEC (bool, max_depth + 1);
1395       alt_synth_info.deepest = 0;
1396       alt_synth_info.num_mults = 0;
1397 
1398       if (representable_as_half_series_p (ceil_fract, max_depth,
1399 					   &alt_synth_info)
1400 	  && alt_synth_info.deepest <= synth_info.deepest
1401 	  && alt_synth_info.num_mults < synth_info.num_mults)
1402 	{
1403 	  whole_part = ceil_whole;
1404 	  frac_part = ceil_fract;
1405 	  synth_info.deepest = alt_synth_info.deepest;
1406 	  synth_info.num_mults = alt_synth_info.num_mults;
1407 	  memcpy (synth_info.factors, alt_synth_info.factors,
1408 		  (max_depth + 1) * sizeof (bool));
1409 	  one_over = false;
1410 	}
1411     }
1412 
1413   HOST_WIDE_INT n = real_to_integer (&whole_part);
1414   REAL_VALUE_TYPE cint;
1415   real_from_integer (&cint, VOIDmode, n, SIGNED);
1416 
1417   if (!real_identical (&whole_part, &cint))
1418     return NULL_TREE;
1419 
1420   if (powi_cost (n) + synth_info.num_mults > POWI_MAX_MULTS)
1421     return NULL_TREE;
1422 
1423   memset (cache, 0, (max_depth + 1) * sizeof (tree));
1424 
1425   tree integer_res = n == 0 ? build_real (type, dconst1) : arg0;
1426 
1427   /* Calculate the integer part of the exponent.  */
1428   if (n > 1)
1429     {
1430       integer_res = gimple_expand_builtin_powi (gsi, loc, arg0, n);
1431       if (!integer_res)
1432 	return NULL_TREE;
1433     }
1434 
1435   if (dump_file)
1436     {
1437       char string[64];
1438 
1439       real_to_decimal (string, &exp_init, sizeof (string), 0, 1);
1440       fprintf (dump_file, "synthesizing pow (x, %s) as:\n", string);
1441 
1442       if (neg_exp)
1443 	{
1444 	  if (one_over)
1445 	    {
1446 	      fprintf (dump_file, "1.0 / (");
1447 	      dump_integer_part (dump_file, "x", n);
1448 	      if (n > 0)
1449 	        fprintf (dump_file, " * ");
1450 	      dump_fractional_sqrt_sequence (dump_file, "x", &synth_info);
1451 	      fprintf (dump_file, ")");
1452 	    }
1453 	  else
1454 	    {
1455 	      dump_fractional_sqrt_sequence (dump_file, "x", &synth_info);
1456 	      fprintf (dump_file, " / (");
1457 	      dump_integer_part (dump_file, "x", n);
1458 	      fprintf (dump_file, ")");
1459 	    }
1460 	}
1461       else
1462 	{
1463 	  dump_fractional_sqrt_sequence (dump_file, "x", &synth_info);
1464 	  if (n > 0)
1465 	    fprintf (dump_file, " * ");
1466 	  dump_integer_part (dump_file, "x", n);
1467 	}
1468 
1469       fprintf (dump_file, "\ndeepest sqrt chain: %d\n", synth_info.deepest);
1470     }
1471 
1472 
1473   tree fract_res = NULL_TREE;
1474   cache[0] = arg0;
1475 
1476   /* Calculate the fractional part of the exponent.  */
1477   for (unsigned i = 0; i < synth_info.deepest; i++)
1478     {
1479       if (synth_info.factors[i])
1480 	{
1481 	  tree sqrt_chain = get_fn_chain (arg0, i + 1, gsi, sqrtfn, loc, cache);
1482 
1483 	  if (!fract_res)
1484 	      fract_res = sqrt_chain;
1485 
1486 	  else
1487 	    fract_res = build_and_insert_binop (gsi, loc, "powroot", MULT_EXPR,
1488 					   fract_res, sqrt_chain);
1489 	}
1490     }
1491 
1492   tree res = NULL_TREE;
1493 
1494   if (neg_exp)
1495     {
1496       if (one_over)
1497 	{
1498 	  if (n > 0)
1499 	    res = build_and_insert_binop (gsi, loc, "powroot", MULT_EXPR,
1500 					   fract_res, integer_res);
1501 	  else
1502 	    res = fract_res;
1503 
1504 	  res = build_and_insert_binop (gsi, loc, "powrootrecip", RDIV_EXPR,
1505 					  build_real (type, dconst1), res);
1506 	}
1507       else
1508 	{
1509 	  res = build_and_insert_binop (gsi, loc, "powroot", RDIV_EXPR,
1510 					 fract_res, integer_res);
1511 	}
1512     }
1513   else
1514     res = build_and_insert_binop (gsi, loc, "powroot", MULT_EXPR,
1515 				   fract_res, integer_res);
1516   return res;
1517 }
1518 
1519 /* ARG0 and ARG1 are the two arguments to a pow builtin call in GSI
1520    with location info LOC.  If possible, create an equivalent and
1521    less expensive sequence of statements prior to GSI, and return an
1522    expession holding the result.  */
1523 
1524 static tree
1525 gimple_expand_builtin_pow (gimple_stmt_iterator *gsi, location_t loc,
1526 			   tree arg0, tree arg1)
1527 {
1528   REAL_VALUE_TYPE c, cint, dconst1_3, dconst1_4, dconst1_6;
1529   REAL_VALUE_TYPE c2, dconst3;
1530   HOST_WIDE_INT n;
1531   tree type, sqrtfn, cbrtfn, sqrt_arg0, result, cbrt_x, powi_cbrt_x;
1532   machine_mode mode;
1533   bool speed_p = optimize_bb_for_speed_p (gsi_bb (*gsi));
1534   bool hw_sqrt_exists, c_is_int, c2_is_int;
1535 
1536   dconst1_4 = dconst1;
1537   SET_REAL_EXP (&dconst1_4, REAL_EXP (&dconst1_4) - 2);
1538 
1539   /* If the exponent isn't a constant, there's nothing of interest
1540      to be done.  */
1541   if (TREE_CODE (arg1) != REAL_CST)
1542     return NULL_TREE;
1543 
1544   /* Don't perform the operation if flag_signaling_nans is on
1545      and the operand is a signaling NaN.  */
1546   if (HONOR_SNANS (TYPE_MODE (TREE_TYPE (arg1)))
1547       && ((TREE_CODE (arg0) == REAL_CST
1548 	   && REAL_VALUE_ISSIGNALING_NAN (TREE_REAL_CST (arg0)))
1549 	  || REAL_VALUE_ISSIGNALING_NAN (TREE_REAL_CST (arg1))))
1550     return NULL_TREE;
1551 
1552   /* If the exponent is equivalent to an integer, expand to an optimal
1553      multiplication sequence when profitable.  */
1554   c = TREE_REAL_CST (arg1);
1555   n = real_to_integer (&c);
1556   real_from_integer (&cint, VOIDmode, n, SIGNED);
1557   c_is_int = real_identical (&c, &cint);
1558 
1559   if (c_is_int
1560       && ((n >= -1 && n <= 2)
1561 	  || (flag_unsafe_math_optimizations
1562 	      && speed_p
1563 	      && powi_cost (n) <= POWI_MAX_MULTS)))
1564     return gimple_expand_builtin_powi (gsi, loc, arg0, n);
1565 
1566   /* Attempt various optimizations using sqrt and cbrt.  */
1567   type = TREE_TYPE (arg0);
1568   mode = TYPE_MODE (type);
1569   sqrtfn = mathfn_built_in (type, BUILT_IN_SQRT);
1570 
1571   /* Optimize pow(x,0.5) = sqrt(x).  This replacement is always safe
1572      unless signed zeros must be maintained.  pow(-0,0.5) = +0, while
1573      sqrt(-0) = -0.  */
1574   if (sqrtfn
1575       && real_equal (&c, &dconsthalf)
1576       && !HONOR_SIGNED_ZEROS (mode))
1577     return build_and_insert_call (gsi, loc, sqrtfn, arg0);
1578 
1579   hw_sqrt_exists = optab_handler (sqrt_optab, mode) != CODE_FOR_nothing;
1580 
1581   /* Optimize pow(x,1./3.) = cbrt(x).  This requires unsafe math
1582      optimizations since 1./3. is not exactly representable.  If x
1583      is negative and finite, the correct value of pow(x,1./3.) is
1584      a NaN with the "invalid" exception raised, because the value
1585      of 1./3. actually has an even denominator.  The correct value
1586      of cbrt(x) is a negative real value.  */
1587   cbrtfn = mathfn_built_in (type, BUILT_IN_CBRT);
1588   dconst1_3 = real_value_truncate (mode, dconst_third ());
1589 
1590   if (flag_unsafe_math_optimizations
1591       && cbrtfn
1592       && (!HONOR_NANS (mode) || tree_expr_nonnegative_p (arg0))
1593       && real_equal (&c, &dconst1_3))
1594     return build_and_insert_call (gsi, loc, cbrtfn, arg0);
1595 
1596   /* Optimize pow(x,1./6.) = cbrt(sqrt(x)).  Don't do this optimization
1597      if we don't have a hardware sqrt insn.  */
1598   dconst1_6 = dconst1_3;
1599   SET_REAL_EXP (&dconst1_6, REAL_EXP (&dconst1_6) - 1);
1600 
1601   if (flag_unsafe_math_optimizations
1602       && sqrtfn
1603       && cbrtfn
1604       && (!HONOR_NANS (mode) || tree_expr_nonnegative_p (arg0))
1605       && speed_p
1606       && hw_sqrt_exists
1607       && real_equal (&c, &dconst1_6))
1608     {
1609       /* sqrt(x)  */
1610       sqrt_arg0 = build_and_insert_call (gsi, loc, sqrtfn, arg0);
1611 
1612       /* cbrt(sqrt(x))  */
1613       return build_and_insert_call (gsi, loc, cbrtfn, sqrt_arg0);
1614     }
1615 
1616 
1617   /* Attempt to expand the POW as a product of square root chains.
1618      Expand the 0.25 case even when otpimising for size.  */
1619   if (flag_unsafe_math_optimizations
1620       && sqrtfn
1621       && hw_sqrt_exists
1622       && (speed_p || real_equal (&c, &dconst1_4))
1623       && !HONOR_SIGNED_ZEROS (mode))
1624     {
1625       unsigned int max_depth = speed_p
1626 				? PARAM_VALUE (PARAM_MAX_POW_SQRT_DEPTH)
1627 				: 2;
1628 
1629       tree expand_with_sqrts
1630 	= expand_pow_as_sqrts (gsi, loc, arg0, arg1, max_depth);
1631 
1632       if (expand_with_sqrts)
1633 	return expand_with_sqrts;
1634     }
1635 
1636   real_arithmetic (&c2, MULT_EXPR, &c, &dconst2);
1637   n = real_to_integer (&c2);
1638   real_from_integer (&cint, VOIDmode, n, SIGNED);
1639   c2_is_int = real_identical (&c2, &cint);
1640 
1641   /* Optimize pow(x,c), where 3c = n for some nonzero integer n, into
1642 
1643      powi(x, n/3) * powi(cbrt(x), n%3),                    n > 0;
1644      1.0 / (powi(x, abs(n)/3) * powi(cbrt(x), abs(n)%3)),  n < 0.
1645 
1646      Do not calculate the first factor when n/3 = 0.  As cbrt(x) is
1647      different from pow(x, 1./3.) due to rounding and behavior with
1648      negative x, we need to constrain this transformation to unsafe
1649      math and positive x or finite math.  */
1650   real_from_integer (&dconst3, VOIDmode, 3, SIGNED);
1651   real_arithmetic (&c2, MULT_EXPR, &c, &dconst3);
1652   real_round (&c2, mode, &c2);
1653   n = real_to_integer (&c2);
1654   real_from_integer (&cint, VOIDmode, n, SIGNED);
1655   real_arithmetic (&c2, RDIV_EXPR, &cint, &dconst3);
1656   real_convert (&c2, mode, &c2);
1657 
1658   if (flag_unsafe_math_optimizations
1659       && cbrtfn
1660       && (!HONOR_NANS (mode) || tree_expr_nonnegative_p (arg0))
1661       && real_identical (&c2, &c)
1662       && !c2_is_int
1663       && optimize_function_for_speed_p (cfun)
1664       && powi_cost (n / 3) <= POWI_MAX_MULTS)
1665     {
1666       tree powi_x_ndiv3 = NULL_TREE;
1667 
1668       /* Attempt to fold powi(arg0, abs(n/3)) into multiplies.  If not
1669          possible or profitable, give up.  Skip the degenerate case when
1670          abs(n) < 3, where the result is always 1.  */
1671       if (absu_hwi (n) >= 3)
1672 	{
1673 	  powi_x_ndiv3 = gimple_expand_builtin_powi (gsi, loc, arg0,
1674 						     abs_hwi (n / 3));
1675 	  if (!powi_x_ndiv3)
1676 	    return NULL_TREE;
1677 	}
1678 
1679       /* Calculate powi(cbrt(x), n%3).  Don't use gimple_expand_builtin_powi
1680          as that creates an unnecessary variable.  Instead, just produce
1681          either cbrt(x) or cbrt(x) * cbrt(x).  */
1682       cbrt_x = build_and_insert_call (gsi, loc, cbrtfn, arg0);
1683 
1684       if (absu_hwi (n) % 3 == 1)
1685 	powi_cbrt_x = cbrt_x;
1686       else
1687 	powi_cbrt_x = build_and_insert_binop (gsi, loc, "powroot", MULT_EXPR,
1688 					      cbrt_x, cbrt_x);
1689 
1690       /* Multiply the two subexpressions, unless powi(x,abs(n)/3) = 1.  */
1691       if (absu_hwi (n) < 3)
1692 	result = powi_cbrt_x;
1693       else
1694 	result = build_and_insert_binop (gsi, loc, "powroot", MULT_EXPR,
1695 					 powi_x_ndiv3, powi_cbrt_x);
1696 
1697       /* If n is negative, reciprocate the result.  */
1698       if (n < 0)
1699 	result = build_and_insert_binop (gsi, loc, "powroot", RDIV_EXPR,
1700 					 build_real (type, dconst1), result);
1701 
1702       return result;
1703     }
1704 
1705   /* No optimizations succeeded.  */
1706   return NULL_TREE;
1707 }
1708 
1709 /* ARG is the argument to a cabs builtin call in GSI with location info
1710    LOC.  Create a sequence of statements prior to GSI that calculates
1711    sqrt(R*R + I*I), where R and I are the real and imaginary components
1712    of ARG, respectively.  Return an expression holding the result.  */
1713 
1714 static tree
1715 gimple_expand_builtin_cabs (gimple_stmt_iterator *gsi, location_t loc, tree arg)
1716 {
1717   tree real_part, imag_part, addend1, addend2, sum, result;
1718   tree type = TREE_TYPE (TREE_TYPE (arg));
1719   tree sqrtfn = mathfn_built_in (type, BUILT_IN_SQRT);
1720   machine_mode mode = TYPE_MODE (type);
1721 
1722   if (!flag_unsafe_math_optimizations
1723       || !optimize_bb_for_speed_p (gimple_bb (gsi_stmt (*gsi)))
1724       || !sqrtfn
1725       || optab_handler (sqrt_optab, mode) == CODE_FOR_nothing)
1726     return NULL_TREE;
1727 
1728   real_part = build_and_insert_ref (gsi, loc, type, "cabs",
1729 				    REALPART_EXPR, arg);
1730   addend1 = build_and_insert_binop (gsi, loc, "cabs", MULT_EXPR,
1731 				    real_part, real_part);
1732   imag_part = build_and_insert_ref (gsi, loc, type, "cabs",
1733 				    IMAGPART_EXPR, arg);
1734   addend2 = build_and_insert_binop (gsi, loc, "cabs", MULT_EXPR,
1735 				    imag_part, imag_part);
1736   sum = build_and_insert_binop (gsi, loc, "cabs", PLUS_EXPR, addend1, addend2);
1737   result = build_and_insert_call (gsi, loc, sqrtfn, sum);
1738 
1739   return result;
1740 }
1741 
1742 /* Go through all calls to sin, cos and cexpi and call execute_cse_sincos_1
1743    on the SSA_NAME argument of each of them.  Also expand powi(x,n) into
1744    an optimal number of multiplies, when n is a constant.  */
1745 
1746 namespace {
1747 
1748 const pass_data pass_data_cse_sincos =
1749 {
1750   GIMPLE_PASS, /* type */
1751   "sincos", /* name */
1752   OPTGROUP_NONE, /* optinfo_flags */
1753   TV_NONE, /* tv_id */
1754   PROP_ssa, /* properties_required */
1755   PROP_gimple_opt_math, /* properties_provided */
1756   0, /* properties_destroyed */
1757   0, /* todo_flags_start */
1758   TODO_update_ssa, /* todo_flags_finish */
1759 };
1760 
1761 class pass_cse_sincos : public gimple_opt_pass
1762 {
1763 public:
1764   pass_cse_sincos (gcc::context *ctxt)
1765     : gimple_opt_pass (pass_data_cse_sincos, ctxt)
1766   {}
1767 
1768   /* opt_pass methods: */
1769   virtual bool gate (function *)
1770     {
1771       /* We no longer require either sincos or cexp, since powi expansion
1772 	 piggybacks on this pass.  */
1773       return optimize;
1774     }
1775 
1776   virtual unsigned int execute (function *);
1777 
1778 }; // class pass_cse_sincos
1779 
1780 unsigned int
1781 pass_cse_sincos::execute (function *fun)
1782 {
1783   basic_block bb;
1784   bool cfg_changed = false;
1785 
1786   calculate_dominance_info (CDI_DOMINATORS);
1787   memset (&sincos_stats, 0, sizeof (sincos_stats));
1788 
1789   FOR_EACH_BB_FN (bb, fun)
1790     {
1791       gimple_stmt_iterator gsi;
1792       bool cleanup_eh = false;
1793 
1794       for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1795         {
1796 	  gimple *stmt = gsi_stmt (gsi);
1797 
1798 	  /* Only the last stmt in a bb could throw, no need to call
1799 	     gimple_purge_dead_eh_edges if we change something in the middle
1800 	     of a basic block.  */
1801 	  cleanup_eh = false;
1802 
1803 	  if (is_gimple_call (stmt)
1804 	      && gimple_call_lhs (stmt))
1805 	    {
1806 	      tree arg, arg0, arg1, result;
1807 	      HOST_WIDE_INT n;
1808 	      location_t loc;
1809 
1810 	      switch (gimple_call_combined_fn (stmt))
1811 		{
1812 		CASE_CFN_COS:
1813 		CASE_CFN_SIN:
1814 		CASE_CFN_CEXPI:
1815 		  /* Make sure we have either sincos or cexp.  */
1816 		  if (!targetm.libc_has_function (function_c99_math_complex)
1817 		      && !targetm.libc_has_function (function_sincos))
1818 		    break;
1819 
1820 		  arg = gimple_call_arg (stmt, 0);
1821 		  if (TREE_CODE (arg) == SSA_NAME)
1822 		    cfg_changed |= execute_cse_sincos_1 (arg);
1823 		  break;
1824 
1825 		CASE_CFN_POW:
1826 		  arg0 = gimple_call_arg (stmt, 0);
1827 		  arg1 = gimple_call_arg (stmt, 1);
1828 
1829 		  loc = gimple_location (stmt);
1830 		  result = gimple_expand_builtin_pow (&gsi, loc, arg0, arg1);
1831 
1832 		  if (result)
1833 		    {
1834 		      tree lhs = gimple_get_lhs (stmt);
1835 		      gassign *new_stmt = gimple_build_assign (lhs, result);
1836 		      gimple_set_location (new_stmt, loc);
1837 		      unlink_stmt_vdef (stmt);
1838 		      gsi_replace (&gsi, new_stmt, true);
1839 		      cleanup_eh = true;
1840 		      if (gimple_vdef (stmt))
1841 			release_ssa_name (gimple_vdef (stmt));
1842 		    }
1843 		  break;
1844 
1845 		CASE_CFN_POWI:
1846 		  arg0 = gimple_call_arg (stmt, 0);
1847 		  arg1 = gimple_call_arg (stmt, 1);
1848 		  loc = gimple_location (stmt);
1849 
1850 		  if (real_minus_onep (arg0))
1851 		    {
1852                       tree t0, t1, cond, one, minus_one;
1853 		      gassign *stmt;
1854 
1855 		      t0 = TREE_TYPE (arg0);
1856 		      t1 = TREE_TYPE (arg1);
1857 		      one = build_real (t0, dconst1);
1858 		      minus_one = build_real (t0, dconstm1);
1859 
1860 		      cond = make_temp_ssa_name (t1, NULL, "powi_cond");
1861 		      stmt = gimple_build_assign (cond, BIT_AND_EXPR,
1862 						  arg1, build_int_cst (t1, 1));
1863 		      gimple_set_location (stmt, loc);
1864 		      gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
1865 
1866 		      result = make_temp_ssa_name (t0, NULL, "powi");
1867 		      stmt = gimple_build_assign (result, COND_EXPR, cond,
1868 						  minus_one, one);
1869 		      gimple_set_location (stmt, loc);
1870 		      gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
1871 		    }
1872 		  else
1873 		    {
1874 		      if (!tree_fits_shwi_p (arg1))
1875 			break;
1876 
1877 		      n = tree_to_shwi (arg1);
1878 		      result = gimple_expand_builtin_powi (&gsi, loc, arg0, n);
1879 		    }
1880 
1881 		  if (result)
1882 		    {
1883 		      tree lhs = gimple_get_lhs (stmt);
1884 		      gassign *new_stmt = gimple_build_assign (lhs, result);
1885 		      gimple_set_location (new_stmt, loc);
1886 		      unlink_stmt_vdef (stmt);
1887 		      gsi_replace (&gsi, new_stmt, true);
1888 		      cleanup_eh = true;
1889 		      if (gimple_vdef (stmt))
1890 			release_ssa_name (gimple_vdef (stmt));
1891 		    }
1892 		  break;
1893 
1894 		CASE_CFN_CABS:
1895 		  arg0 = gimple_call_arg (stmt, 0);
1896 		  loc = gimple_location (stmt);
1897 		  result = gimple_expand_builtin_cabs (&gsi, loc, arg0);
1898 
1899 		  if (result)
1900 		    {
1901 		      tree lhs = gimple_get_lhs (stmt);
1902 		      gassign *new_stmt = gimple_build_assign (lhs, result);
1903 		      gimple_set_location (new_stmt, loc);
1904 		      unlink_stmt_vdef (stmt);
1905 		      gsi_replace (&gsi, new_stmt, true);
1906 		      cleanup_eh = true;
1907 		      if (gimple_vdef (stmt))
1908 			release_ssa_name (gimple_vdef (stmt));
1909 		    }
1910 		  break;
1911 
1912 		default:;
1913 		}
1914 	    }
1915 	}
1916       if (cleanup_eh)
1917 	cfg_changed |= gimple_purge_dead_eh_edges (bb);
1918     }
1919 
1920   statistics_counter_event (fun, "sincos statements inserted",
1921 			    sincos_stats.inserted);
1922 
1923   return cfg_changed ? TODO_cleanup_cfg : 0;
1924 }
1925 
1926 } // anon namespace
1927 
1928 gimple_opt_pass *
1929 make_pass_cse_sincos (gcc::context *ctxt)
1930 {
1931   return new pass_cse_sincos (ctxt);
1932 }
1933 
1934 /* A symbolic number structure is used to detect byte permutation and selection
1935    patterns of a source.  To achieve that, its field N contains an artificial
1936    number consisting of BITS_PER_MARKER sized markers tracking where does each
1937    byte come from in the source:
1938 
1939    0	   - target byte has the value 0
1940    FF	   - target byte has an unknown value (eg. due to sign extension)
1941    1..size - marker value is the byte index in the source (0 for lsb).
1942 
1943    To detect permutations on memory sources (arrays and structures), a symbolic
1944    number is also associated:
1945    - a base address BASE_ADDR and an OFFSET giving the address of the source;
1946    - a range which gives the difference between the highest and lowest accessed
1947      memory location to make such a symbolic number;
1948    - the address SRC of the source element of lowest address as a convenience
1949      to easily get BASE_ADDR + offset + lowest bytepos.
1950 
1951    Note 1: the range is different from size as size reflects the size of the
1952    type of the current expression.  For instance, for an array char a[],
1953    (short) a[0] | (short) a[3] would have a size of 2 but a range of 4 while
1954    (short) a[0] | ((short) a[0] << 1) would still have a size of 2 but this
1955    time a range of 1.
1956 
1957    Note 2: for non-memory sources, range holds the same value as size.
1958 
1959    Note 3: SRC points to the SSA_NAME in case of non-memory source.  */
1960 
1961 struct symbolic_number {
1962   uint64_t n;
1963   tree type;
1964   tree base_addr;
1965   tree offset;
1966   HOST_WIDE_INT bytepos;
1967   tree src;
1968   tree alias_set;
1969   tree vuse;
1970   unsigned HOST_WIDE_INT range;
1971 };
1972 
1973 #define BITS_PER_MARKER 8
1974 #define MARKER_MASK ((1 << BITS_PER_MARKER) - 1)
1975 #define MARKER_BYTE_UNKNOWN MARKER_MASK
1976 #define HEAD_MARKER(n, size) \
1977   ((n) & ((uint64_t) MARKER_MASK << (((size) - 1) * BITS_PER_MARKER)))
1978 
1979 /* The number which the find_bswap_or_nop_1 result should match in
1980    order to have a nop.  The number is masked according to the size of
1981    the symbolic number before using it.  */
1982 #define CMPNOP (sizeof (int64_t) < 8 ? 0 : \
1983   (uint64_t)0x08070605 << 32 | 0x04030201)
1984 
1985 /* The number which the find_bswap_or_nop_1 result should match in
1986    order to have a byte swap.  The number is masked according to the
1987    size of the symbolic number before using it.  */
1988 #define CMPXCHG (sizeof (int64_t) < 8 ? 0 : \
1989   (uint64_t)0x01020304 << 32 | 0x05060708)
1990 
1991 /* Perform a SHIFT or ROTATE operation by COUNT bits on symbolic
1992    number N.  Return false if the requested operation is not permitted
1993    on a symbolic number.  */
1994 
1995 static inline bool
1996 do_shift_rotate (enum tree_code code,
1997 		 struct symbolic_number *n,
1998 		 int count)
1999 {
2000   int i, size = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
2001   unsigned head_marker;
2002 
2003   if (count % BITS_PER_UNIT != 0)
2004     return false;
2005   count = (count / BITS_PER_UNIT) * BITS_PER_MARKER;
2006 
2007   /* Zero out the extra bits of N in order to avoid them being shifted
2008      into the significant bits.  */
2009   if (size < 64 / BITS_PER_MARKER)
2010     n->n &= ((uint64_t) 1 << (size * BITS_PER_MARKER)) - 1;
2011 
2012   switch (code)
2013     {
2014     case LSHIFT_EXPR:
2015       n->n <<= count;
2016       break;
2017     case RSHIFT_EXPR:
2018       head_marker = HEAD_MARKER (n->n, size);
2019       n->n >>= count;
2020       /* Arithmetic shift of signed type: result is dependent on the value.  */
2021       if (!TYPE_UNSIGNED (n->type) && head_marker)
2022 	for (i = 0; i < count / BITS_PER_MARKER; i++)
2023 	  n->n |= (uint64_t) MARKER_BYTE_UNKNOWN
2024 		  << ((size - 1 - i) * BITS_PER_MARKER);
2025       break;
2026     case LROTATE_EXPR:
2027       n->n = (n->n << count) | (n->n >> ((size * BITS_PER_MARKER) - count));
2028       break;
2029     case RROTATE_EXPR:
2030       n->n = (n->n >> count) | (n->n << ((size * BITS_PER_MARKER) - count));
2031       break;
2032     default:
2033       return false;
2034     }
2035   /* Zero unused bits for size.  */
2036   if (size < 64 / BITS_PER_MARKER)
2037     n->n &= ((uint64_t) 1 << (size * BITS_PER_MARKER)) - 1;
2038   return true;
2039 }
2040 
2041 /* Perform sanity checking for the symbolic number N and the gimple
2042    statement STMT.  */
2043 
2044 static inline bool
2045 verify_symbolic_number_p (struct symbolic_number *n, gimple *stmt)
2046 {
2047   tree lhs_type;
2048 
2049   lhs_type = gimple_expr_type (stmt);
2050 
2051   if (TREE_CODE (lhs_type) != INTEGER_TYPE)
2052     return false;
2053 
2054   if (TYPE_PRECISION (lhs_type) != TYPE_PRECISION (n->type))
2055     return false;
2056 
2057   return true;
2058 }
2059 
2060 /* Initialize the symbolic number N for the bswap pass from the base element
2061    SRC manipulated by the bitwise OR expression.  */
2062 
2063 static bool
2064 init_symbolic_number (struct symbolic_number *n, tree src)
2065 {
2066   int size;
2067 
2068   if (! INTEGRAL_TYPE_P (TREE_TYPE (src)))
2069     return false;
2070 
2071   n->base_addr = n->offset = n->alias_set = n->vuse = NULL_TREE;
2072   n->src = src;
2073 
2074   /* Set up the symbolic number N by setting each byte to a value between 1 and
2075      the byte size of rhs1.  The highest order byte is set to n->size and the
2076      lowest order byte to 1.  */
2077   n->type = TREE_TYPE (src);
2078   size = TYPE_PRECISION (n->type);
2079   if (size % BITS_PER_UNIT != 0)
2080     return false;
2081   size /= BITS_PER_UNIT;
2082   if (size > 64 / BITS_PER_MARKER)
2083     return false;
2084   n->range = size;
2085   n->n = CMPNOP;
2086 
2087   if (size < 64 / BITS_PER_MARKER)
2088     n->n &= ((uint64_t) 1 << (size * BITS_PER_MARKER)) - 1;
2089 
2090   return true;
2091 }
2092 
2093 /* Check if STMT might be a byte swap or a nop from a memory source and returns
2094    the answer. If so, REF is that memory source and the base of the memory area
2095    accessed and the offset of the access from that base are recorded in N.  */
2096 
2097 bool
2098 find_bswap_or_nop_load (gimple *stmt, tree ref, struct symbolic_number *n)
2099 {
2100   /* Leaf node is an array or component ref. Memorize its base and
2101      offset from base to compare to other such leaf node.  */
2102   HOST_WIDE_INT bitsize, bitpos;
2103   machine_mode mode;
2104   int unsignedp, reversep, volatilep;
2105   tree offset, base_addr;
2106 
2107   /* Not prepared to handle PDP endian.  */
2108   if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN)
2109     return false;
2110 
2111   if (!gimple_assign_load_p (stmt) || gimple_has_volatile_ops (stmt))
2112     return false;
2113 
2114   base_addr = get_inner_reference (ref, &bitsize, &bitpos, &offset, &mode,
2115 				   &unsignedp, &reversep, &volatilep);
2116 
2117   if (TREE_CODE (base_addr) == MEM_REF)
2118     {
2119       offset_int bit_offset = 0;
2120       tree off = TREE_OPERAND (base_addr, 1);
2121 
2122       if (!integer_zerop (off))
2123 	{
2124 	  offset_int boff, coff = mem_ref_offset (base_addr);
2125 	  boff = coff << LOG2_BITS_PER_UNIT;
2126 	  bit_offset += boff;
2127 	}
2128 
2129       base_addr = TREE_OPERAND (base_addr, 0);
2130 
2131       /* Avoid returning a negative bitpos as this may wreak havoc later.  */
2132       if (wi::neg_p (bit_offset))
2133 	{
2134 	  offset_int mask = wi::mask <offset_int> (LOG2_BITS_PER_UNIT, false);
2135 	  offset_int tem = bit_offset.and_not (mask);
2136 	  /* TEM is the bitpos rounded to BITS_PER_UNIT towards -Inf.
2137 	     Subtract it to BIT_OFFSET and add it (scaled) to OFFSET.  */
2138 	  bit_offset -= tem;
2139 	  tem >>= LOG2_BITS_PER_UNIT;
2140 	  if (offset)
2141 	    offset = size_binop (PLUS_EXPR, offset,
2142 				    wide_int_to_tree (sizetype, tem));
2143 	  else
2144 	    offset = wide_int_to_tree (sizetype, tem);
2145 	}
2146 
2147       bitpos += bit_offset.to_shwi ();
2148     }
2149 
2150   if (bitpos % BITS_PER_UNIT)
2151     return false;
2152   if (bitsize % BITS_PER_UNIT)
2153     return false;
2154   if (reversep)
2155     return false;
2156 
2157   if (!init_symbolic_number (n, ref))
2158     return false;
2159   n->base_addr = base_addr;
2160   n->offset = offset;
2161   n->bytepos = bitpos / BITS_PER_UNIT;
2162   n->alias_set = reference_alias_ptr_type (ref);
2163   n->vuse = gimple_vuse (stmt);
2164   return true;
2165 }
2166 
2167 /* Compute the symbolic number N representing the result of a bitwise OR on 2
2168    symbolic number N1 and N2 whose source statements are respectively
2169    SOURCE_STMT1 and SOURCE_STMT2.  */
2170 
2171 static gimple *
2172 perform_symbolic_merge (gimple *source_stmt1, struct symbolic_number *n1,
2173 			gimple *source_stmt2, struct symbolic_number *n2,
2174 			struct symbolic_number *n)
2175 {
2176   int i, size;
2177   uint64_t mask;
2178   gimple *source_stmt;
2179   struct symbolic_number *n_start;
2180 
2181   tree rhs1 = gimple_assign_rhs1 (source_stmt1);
2182   if (TREE_CODE (rhs1) == BIT_FIELD_REF
2183       && TREE_CODE (TREE_OPERAND (rhs1, 0)) == SSA_NAME)
2184     rhs1 = TREE_OPERAND (rhs1, 0);
2185   tree rhs2 = gimple_assign_rhs1 (source_stmt2);
2186   if (TREE_CODE (rhs2) == BIT_FIELD_REF
2187       && TREE_CODE (TREE_OPERAND (rhs2, 0)) == SSA_NAME)
2188     rhs2 = TREE_OPERAND (rhs2, 0);
2189 
2190   /* Sources are different, cancel bswap if they are not memory location with
2191      the same base (array, structure, ...).  */
2192   if (rhs1 != rhs2)
2193     {
2194       uint64_t inc;
2195       HOST_WIDE_INT start_sub, end_sub, end1, end2, end;
2196       struct symbolic_number *toinc_n_ptr, *n_end;
2197       basic_block bb1, bb2;
2198 
2199       if (!n1->base_addr || !n2->base_addr
2200 	  || !operand_equal_p (n1->base_addr, n2->base_addr, 0))
2201 	return NULL;
2202 
2203       if (!n1->offset != !n2->offset
2204 	  || (n1->offset && !operand_equal_p (n1->offset, n2->offset, 0)))
2205 	return NULL;
2206 
2207       if (n1->bytepos < n2->bytepos)
2208 	{
2209 	  n_start = n1;
2210 	  start_sub = n2->bytepos - n1->bytepos;
2211 	}
2212       else
2213 	{
2214 	  n_start = n2;
2215 	  start_sub = n1->bytepos - n2->bytepos;
2216 	}
2217 
2218       bb1 = gimple_bb (source_stmt1);
2219       bb2 = gimple_bb (source_stmt2);
2220       if (dominated_by_p (CDI_DOMINATORS, bb1, bb2))
2221 	source_stmt = source_stmt1;
2222       else
2223 	source_stmt = source_stmt2;
2224 
2225       /* Find the highest address at which a load is performed and
2226 	 compute related info.  */
2227       end1 = n1->bytepos + (n1->range - 1);
2228       end2 = n2->bytepos + (n2->range - 1);
2229       if (end1 < end2)
2230 	{
2231 	  end = end2;
2232 	  end_sub = end2 - end1;
2233 	}
2234       else
2235 	{
2236 	  end = end1;
2237 	  end_sub = end1 - end2;
2238 	}
2239       n_end = (end2 > end1) ? n2 : n1;
2240 
2241       /* Find symbolic number whose lsb is the most significant.  */
2242       if (BYTES_BIG_ENDIAN)
2243 	toinc_n_ptr = (n_end == n1) ? n2 : n1;
2244       else
2245 	toinc_n_ptr = (n_start == n1) ? n2 : n1;
2246 
2247       n->range = end - n_start->bytepos + 1;
2248 
2249       /* Check that the range of memory covered can be represented by
2250 	 a symbolic number.  */
2251       if (n->range > 64 / BITS_PER_MARKER)
2252 	return NULL;
2253 
2254       /* Reinterpret byte marks in symbolic number holding the value of
2255 	 bigger weight according to target endianness.  */
2256       inc = BYTES_BIG_ENDIAN ? end_sub : start_sub;
2257       size = TYPE_PRECISION (n1->type) / BITS_PER_UNIT;
2258       for (i = 0; i < size; i++, inc <<= BITS_PER_MARKER)
2259 	{
2260 	  unsigned marker
2261 	    = (toinc_n_ptr->n >> (i * BITS_PER_MARKER)) & MARKER_MASK;
2262 	  if (marker && marker != MARKER_BYTE_UNKNOWN)
2263 	    toinc_n_ptr->n += inc;
2264 	}
2265     }
2266   else
2267     {
2268       n->range = n1->range;
2269       n_start = n1;
2270       source_stmt = source_stmt1;
2271     }
2272 
2273   if (!n1->alias_set
2274       || alias_ptr_types_compatible_p (n1->alias_set, n2->alias_set))
2275     n->alias_set = n1->alias_set;
2276   else
2277     n->alias_set = ptr_type_node;
2278   n->vuse = n_start->vuse;
2279   n->base_addr = n_start->base_addr;
2280   n->offset = n_start->offset;
2281   n->src = n_start->src;
2282   n->bytepos = n_start->bytepos;
2283   n->type = n_start->type;
2284   size = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
2285 
2286   for (i = 0, mask = MARKER_MASK; i < size; i++, mask <<= BITS_PER_MARKER)
2287     {
2288       uint64_t masked1, masked2;
2289 
2290       masked1 = n1->n & mask;
2291       masked2 = n2->n & mask;
2292       if (masked1 && masked2 && masked1 != masked2)
2293 	return NULL;
2294     }
2295   n->n = n1->n | n2->n;
2296 
2297   return source_stmt;
2298 }
2299 
2300 /* find_bswap_or_nop_1 invokes itself recursively with N and tries to perform
2301    the operation given by the rhs of STMT on the result.  If the operation
2302    could successfully be executed the function returns a gimple stmt whose
2303    rhs's first tree is the expression of the source operand and NULL
2304    otherwise.  */
2305 
2306 static gimple *
2307 find_bswap_or_nop_1 (gimple *stmt, struct symbolic_number *n, int limit)
2308 {
2309   enum tree_code code;
2310   tree rhs1, rhs2 = NULL;
2311   gimple *rhs1_stmt, *rhs2_stmt, *source_stmt1;
2312   enum gimple_rhs_class rhs_class;
2313 
2314   if (!limit || !is_gimple_assign (stmt))
2315     return NULL;
2316 
2317   rhs1 = gimple_assign_rhs1 (stmt);
2318 
2319   if (find_bswap_or_nop_load (stmt, rhs1, n))
2320     return stmt;
2321 
2322   /* Handle BIT_FIELD_REF.  */
2323   if (TREE_CODE (rhs1) == BIT_FIELD_REF
2324       && TREE_CODE (TREE_OPERAND (rhs1, 0)) == SSA_NAME)
2325     {
2326       unsigned HOST_WIDE_INT bitsize = tree_to_uhwi (TREE_OPERAND (rhs1, 1));
2327       unsigned HOST_WIDE_INT bitpos = tree_to_uhwi (TREE_OPERAND (rhs1, 2));
2328       if (bitpos % BITS_PER_UNIT == 0
2329 	  && bitsize % BITS_PER_UNIT == 0
2330 	  && init_symbolic_number (n, TREE_OPERAND (rhs1, 0)))
2331 	{
2332 	  /* Handle big-endian bit numbering in BIT_FIELD_REF.  */
2333 	  if (BYTES_BIG_ENDIAN)
2334 	    bitpos = TYPE_PRECISION (n->type) - bitpos - bitsize;
2335 
2336 	  /* Shift.  */
2337 	  if (!do_shift_rotate (RSHIFT_EXPR, n, bitpos))
2338 	    return NULL;
2339 
2340 	  /* Mask.  */
2341 	  uint64_t mask = 0;
2342 	  uint64_t tmp = (1 << BITS_PER_UNIT) - 1;
2343 	  for (unsigned i = 0; i < bitsize / BITS_PER_UNIT;
2344 	       i++, tmp <<= BITS_PER_UNIT)
2345 	    mask |= (uint64_t) MARKER_MASK << (i * BITS_PER_MARKER);
2346 	  n->n &= mask;
2347 
2348 	  /* Convert.  */
2349 	  n->type = TREE_TYPE (rhs1);
2350 	  if (!n->base_addr)
2351 	    n->range = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
2352 
2353 	  return verify_symbolic_number_p (n, stmt) ? stmt : NULL;
2354 	}
2355 
2356       return NULL;
2357     }
2358 
2359   if (TREE_CODE (rhs1) != SSA_NAME)
2360     return NULL;
2361 
2362   code = gimple_assign_rhs_code (stmt);
2363   rhs_class = gimple_assign_rhs_class (stmt);
2364   rhs1_stmt = SSA_NAME_DEF_STMT (rhs1);
2365 
2366   if (rhs_class == GIMPLE_BINARY_RHS)
2367     rhs2 = gimple_assign_rhs2 (stmt);
2368 
2369   /* Handle unary rhs and binary rhs with integer constants as second
2370      operand.  */
2371 
2372   if (rhs_class == GIMPLE_UNARY_RHS
2373       || (rhs_class == GIMPLE_BINARY_RHS
2374 	  && TREE_CODE (rhs2) == INTEGER_CST))
2375     {
2376       if (code != BIT_AND_EXPR
2377 	  && code != LSHIFT_EXPR
2378 	  && code != RSHIFT_EXPR
2379 	  && code != LROTATE_EXPR
2380 	  && code != RROTATE_EXPR
2381 	  && !CONVERT_EXPR_CODE_P (code))
2382 	return NULL;
2383 
2384       source_stmt1 = find_bswap_or_nop_1 (rhs1_stmt, n, limit - 1);
2385 
2386       /* If find_bswap_or_nop_1 returned NULL, STMT is a leaf node and
2387 	 we have to initialize the symbolic number.  */
2388       if (!source_stmt1)
2389 	{
2390 	  if (gimple_assign_load_p (stmt)
2391 	      || !init_symbolic_number (n, rhs1))
2392 	    return NULL;
2393 	  source_stmt1 = stmt;
2394 	}
2395 
2396       switch (code)
2397 	{
2398 	case BIT_AND_EXPR:
2399 	  {
2400 	    int i, size = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
2401 	    uint64_t val = int_cst_value (rhs2), mask = 0;
2402 	    uint64_t tmp = (1 << BITS_PER_UNIT) - 1;
2403 
2404 	    /* Only constants masking full bytes are allowed.  */
2405 	    for (i = 0; i < size; i++, tmp <<= BITS_PER_UNIT)
2406 	      if ((val & tmp) != 0 && (val & tmp) != tmp)
2407 		return NULL;
2408 	      else if (val & tmp)
2409 		mask |= (uint64_t) MARKER_MASK << (i * BITS_PER_MARKER);
2410 
2411 	    n->n &= mask;
2412 	  }
2413 	  break;
2414 	case LSHIFT_EXPR:
2415 	case RSHIFT_EXPR:
2416 	case LROTATE_EXPR:
2417 	case RROTATE_EXPR:
2418 	  if (!do_shift_rotate (code, n, (int) TREE_INT_CST_LOW (rhs2)))
2419 	    return NULL;
2420 	  break;
2421 	CASE_CONVERT:
2422 	  {
2423 	    int i, type_size, old_type_size;
2424 	    tree type;
2425 
2426 	    type = gimple_expr_type (stmt);
2427 	    type_size = TYPE_PRECISION (type);
2428 	    if (type_size % BITS_PER_UNIT != 0)
2429 	      return NULL;
2430 	    type_size /= BITS_PER_UNIT;
2431 	    if (type_size > 64 / BITS_PER_MARKER)
2432 	      return NULL;
2433 
2434 	    /* Sign extension: result is dependent on the value.  */
2435 	    old_type_size = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
2436 	    if (!TYPE_UNSIGNED (n->type) && type_size > old_type_size
2437 		&& HEAD_MARKER (n->n, old_type_size))
2438 	      for (i = 0; i < type_size - old_type_size; i++)
2439 		n->n |= (uint64_t) MARKER_BYTE_UNKNOWN
2440 			<< ((type_size - 1 - i) * BITS_PER_MARKER);
2441 
2442 	    if (type_size < 64 / BITS_PER_MARKER)
2443 	      {
2444 		/* If STMT casts to a smaller type mask out the bits not
2445 		   belonging to the target type.  */
2446 		n->n &= ((uint64_t) 1 << (type_size * BITS_PER_MARKER)) - 1;
2447 	      }
2448 	    n->type = type;
2449 	    if (!n->base_addr)
2450 	      n->range = type_size;
2451 	  }
2452 	  break;
2453 	default:
2454 	  return NULL;
2455 	};
2456       return verify_symbolic_number_p (n, stmt) ? source_stmt1 : NULL;
2457     }
2458 
2459   /* Handle binary rhs.  */
2460 
2461   if (rhs_class == GIMPLE_BINARY_RHS)
2462     {
2463       struct symbolic_number n1, n2;
2464       gimple *source_stmt, *source_stmt2;
2465 
2466       if (code != BIT_IOR_EXPR)
2467 	return NULL;
2468 
2469       if (TREE_CODE (rhs2) != SSA_NAME)
2470 	return NULL;
2471 
2472       rhs2_stmt = SSA_NAME_DEF_STMT (rhs2);
2473 
2474       switch (code)
2475 	{
2476 	case BIT_IOR_EXPR:
2477 	  source_stmt1 = find_bswap_or_nop_1 (rhs1_stmt, &n1, limit - 1);
2478 
2479 	  if (!source_stmt1)
2480 	    return NULL;
2481 
2482 	  source_stmt2 = find_bswap_or_nop_1 (rhs2_stmt, &n2, limit - 1);
2483 
2484 	  if (!source_stmt2)
2485 	    return NULL;
2486 
2487 	  if (TYPE_PRECISION (n1.type) != TYPE_PRECISION (n2.type))
2488 	    return NULL;
2489 
2490 	  if (!n1.vuse != !n2.vuse
2491 	      || (n1.vuse && !operand_equal_p (n1.vuse, n2.vuse, 0)))
2492 	    return NULL;
2493 
2494 	  source_stmt
2495 	    = perform_symbolic_merge (source_stmt1, &n1, source_stmt2, &n2, n);
2496 
2497 	  if (!source_stmt)
2498 	    return NULL;
2499 
2500 	  if (!verify_symbolic_number_p (n, stmt))
2501 	    return NULL;
2502 
2503 	  break;
2504 	default:
2505 	  return NULL;
2506 	}
2507       return source_stmt;
2508     }
2509   return NULL;
2510 }
2511 
2512 /* Check if STMT completes a bswap implementation or a read in a given
2513    endianness consisting of ORs, SHIFTs and ANDs and sets *BSWAP
2514    accordingly.  It also sets N to represent the kind of operations
2515    performed: size of the resulting expression and whether it works on
2516    a memory source, and if so alias-set and vuse.  At last, the
2517    function returns a stmt whose rhs's first tree is the source
2518    expression.  */
2519 
2520 static gimple *
2521 find_bswap_or_nop (gimple *stmt, struct symbolic_number *n, bool *bswap)
2522 {
2523   unsigned rsize;
2524   uint64_t tmpn, mask;
2525 /* The number which the find_bswap_or_nop_1 result should match in order
2526    to have a full byte swap.  The number is shifted to the right
2527    according to the size of the symbolic number before using it.  */
2528   uint64_t cmpxchg = CMPXCHG;
2529   uint64_t cmpnop = CMPNOP;
2530 
2531   gimple *ins_stmt;
2532   int limit;
2533 
2534   /* The last parameter determines the depth search limit.  It usually
2535      correlates directly to the number n of bytes to be touched.  We
2536      increase that number by log2(n) + 1 here in order to also
2537      cover signed -> unsigned conversions of the src operand as can be seen
2538      in libgcc, and for initial shift/and operation of the src operand.  */
2539   limit = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (gimple_expr_type (stmt)));
2540   limit += 1 + (int) ceil_log2 ((unsigned HOST_WIDE_INT) limit);
2541   ins_stmt = find_bswap_or_nop_1 (stmt, n, limit);
2542 
2543   if (!ins_stmt)
2544     return NULL;
2545 
2546   /* Find real size of result (highest non-zero byte).  */
2547   if (n->base_addr)
2548     for (tmpn = n->n, rsize = 0; tmpn; tmpn >>= BITS_PER_MARKER, rsize++);
2549   else
2550     rsize = n->range;
2551 
2552   /* Zero out the bits corresponding to untouched bytes in original gimple
2553      expression.  */
2554   if (n->range < (int) sizeof (int64_t))
2555     {
2556       mask = ((uint64_t) 1 << (n->range * BITS_PER_MARKER)) - 1;
2557       cmpxchg >>= (64 / BITS_PER_MARKER - n->range) * BITS_PER_MARKER;
2558       cmpnop &= mask;
2559     }
2560 
2561   /* Zero out the bits corresponding to unused bytes in the result of the
2562      gimple expression.  */
2563   if (rsize < n->range)
2564     {
2565       if (BYTES_BIG_ENDIAN)
2566 	{
2567 	  mask = ((uint64_t) 1 << (rsize * BITS_PER_MARKER)) - 1;
2568 	  cmpxchg &= mask;
2569 	  cmpnop >>= (n->range - rsize) * BITS_PER_MARKER;
2570 	}
2571       else
2572 	{
2573 	  mask = ((uint64_t) 1 << (rsize * BITS_PER_MARKER)) - 1;
2574 	  cmpxchg >>= (n->range - rsize) * BITS_PER_MARKER;
2575 	  cmpnop &= mask;
2576 	}
2577       n->range = rsize;
2578     }
2579 
2580   /* A complete byte swap should make the symbolic number to start with
2581      the largest digit in the highest order byte. Unchanged symbolic
2582      number indicates a read with same endianness as target architecture.  */
2583   if (n->n == cmpnop)
2584     *bswap = false;
2585   else if (n->n == cmpxchg)
2586     *bswap = true;
2587   else
2588     return NULL;
2589 
2590   /* Useless bit manipulation performed by code.  */
2591   if (!n->base_addr && n->n == cmpnop)
2592     return NULL;
2593 
2594   n->range *= BITS_PER_UNIT;
2595   return ins_stmt;
2596 }
2597 
2598 namespace {
2599 
2600 const pass_data pass_data_optimize_bswap =
2601 {
2602   GIMPLE_PASS, /* type */
2603   "bswap", /* name */
2604   OPTGROUP_NONE, /* optinfo_flags */
2605   TV_NONE, /* tv_id */
2606   PROP_ssa, /* properties_required */
2607   0, /* properties_provided */
2608   0, /* properties_destroyed */
2609   0, /* todo_flags_start */
2610   0, /* todo_flags_finish */
2611 };
2612 
2613 class pass_optimize_bswap : public gimple_opt_pass
2614 {
2615 public:
2616   pass_optimize_bswap (gcc::context *ctxt)
2617     : gimple_opt_pass (pass_data_optimize_bswap, ctxt)
2618   {}
2619 
2620   /* opt_pass methods: */
2621   virtual bool gate (function *)
2622     {
2623       return flag_expensive_optimizations && optimize;
2624     }
2625 
2626   virtual unsigned int execute (function *);
2627 
2628 }; // class pass_optimize_bswap
2629 
2630 /* Perform the bswap optimization: replace the expression computed in the rhs
2631    of CUR_STMT by an equivalent bswap, load or load + bswap expression.
2632    Which of these alternatives replace the rhs is given by N->base_addr (non
2633    null if a load is needed) and BSWAP.  The type, VUSE and set-alias of the
2634    load to perform are also given in N while the builtin bswap invoke is given
2635    in FNDEL.  Finally, if a load is involved, SRC_STMT refers to one of the
2636    load statements involved to construct the rhs in CUR_STMT and N->range gives
2637    the size of the rhs expression for maintaining some statistics.
2638 
2639    Note that if the replacement involve a load, CUR_STMT is moved just after
2640    SRC_STMT to do the load with the same VUSE which can lead to CUR_STMT
2641    changing of basic block.  */
2642 
2643 static bool
2644 bswap_replace (gimple *cur_stmt, gimple *ins_stmt, tree fndecl,
2645 	       tree bswap_type, tree load_type, struct symbolic_number *n,
2646 	       bool bswap)
2647 {
2648   gimple_stmt_iterator gsi;
2649   tree src, tmp, tgt;
2650   gimple *bswap_stmt;
2651 
2652   gsi = gsi_for_stmt (cur_stmt);
2653   src = n->src;
2654   tgt = gimple_assign_lhs (cur_stmt);
2655 
2656   /* Need to load the value from memory first.  */
2657   if (n->base_addr)
2658     {
2659       gimple_stmt_iterator gsi_ins = gsi_for_stmt (ins_stmt);
2660       tree addr_expr, addr_tmp, val_expr, val_tmp;
2661       tree load_offset_ptr, aligned_load_type;
2662       gimple *addr_stmt, *load_stmt;
2663       unsigned align;
2664       HOST_WIDE_INT load_offset = 0;
2665       basic_block ins_bb, cur_bb;
2666 
2667       ins_bb = gimple_bb (ins_stmt);
2668       cur_bb = gimple_bb (cur_stmt);
2669       if (!dominated_by_p (CDI_DOMINATORS, cur_bb, ins_bb))
2670 	return false;
2671 
2672       align = get_object_alignment (src);
2673 
2674       /* Move cur_stmt just before  one of the load of the original
2675 	 to ensure it has the same VUSE.  See PR61517 for what could
2676 	 go wrong.  */
2677       if (gimple_bb (cur_stmt) != gimple_bb (ins_stmt))
2678 	reset_flow_sensitive_info (gimple_assign_lhs (cur_stmt));
2679       gsi_move_before (&gsi, &gsi_ins);
2680       gsi = gsi_for_stmt (cur_stmt);
2681 
2682       /* Compute address to load from and cast according to the size
2683 	 of the load.  */
2684       addr_expr = build_fold_addr_expr (unshare_expr (src));
2685       if (is_gimple_mem_ref_addr (addr_expr))
2686 	addr_tmp = addr_expr;
2687       else
2688 	{
2689 	  addr_tmp = make_temp_ssa_name (TREE_TYPE (addr_expr), NULL,
2690 					 "load_src");
2691 	  addr_stmt = gimple_build_assign (addr_tmp, addr_expr);
2692 	  gsi_insert_before (&gsi, addr_stmt, GSI_SAME_STMT);
2693 	}
2694 
2695       /* Perform the load.  */
2696       aligned_load_type = load_type;
2697       if (align < TYPE_ALIGN (load_type))
2698 	aligned_load_type = build_aligned_type (load_type, align);
2699       load_offset_ptr = build_int_cst (n->alias_set, load_offset);
2700       val_expr = fold_build2 (MEM_REF, aligned_load_type, addr_tmp,
2701 			      load_offset_ptr);
2702 
2703       if (!bswap)
2704 	{
2705 	  if (n->range == 16)
2706 	    nop_stats.found_16bit++;
2707 	  else if (n->range == 32)
2708 	    nop_stats.found_32bit++;
2709 	  else
2710 	    {
2711 	      gcc_assert (n->range == 64);
2712 	      nop_stats.found_64bit++;
2713 	    }
2714 
2715 	  /* Convert the result of load if necessary.  */
2716 	  if (!useless_type_conversion_p (TREE_TYPE (tgt), load_type))
2717 	    {
2718 	      val_tmp = make_temp_ssa_name (aligned_load_type, NULL,
2719 					    "load_dst");
2720 	      load_stmt = gimple_build_assign (val_tmp, val_expr);
2721 	      gimple_set_vuse (load_stmt, n->vuse);
2722 	      gsi_insert_before (&gsi, load_stmt, GSI_SAME_STMT);
2723 	      gimple_assign_set_rhs_with_ops (&gsi, NOP_EXPR, val_tmp);
2724 	    }
2725 	  else
2726 	    {
2727 	      gimple_assign_set_rhs_with_ops (&gsi, MEM_REF, val_expr);
2728 	      gimple_set_vuse (cur_stmt, n->vuse);
2729 	    }
2730 	  update_stmt (cur_stmt);
2731 
2732 	  if (dump_file)
2733 	    {
2734 	      fprintf (dump_file,
2735 		       "%d bit load in target endianness found at: ",
2736 		       (int) n->range);
2737 	      print_gimple_stmt (dump_file, cur_stmt, 0, 0);
2738 	    }
2739 	  return true;
2740 	}
2741       else
2742 	{
2743 	  val_tmp = make_temp_ssa_name (aligned_load_type, NULL, "load_dst");
2744 	  load_stmt = gimple_build_assign (val_tmp, val_expr);
2745 	  gimple_set_vuse (load_stmt, n->vuse);
2746 	  gsi_insert_before (&gsi, load_stmt, GSI_SAME_STMT);
2747 	}
2748       src = val_tmp;
2749     }
2750   else if (TREE_CODE (src) == BIT_FIELD_REF)
2751     src = TREE_OPERAND (src, 0);
2752 
2753   if (n->range == 16)
2754     bswap_stats.found_16bit++;
2755   else if (n->range == 32)
2756     bswap_stats.found_32bit++;
2757   else
2758     {
2759       gcc_assert (n->range == 64);
2760       bswap_stats.found_64bit++;
2761     }
2762 
2763   tmp = src;
2764 
2765   /* Convert the src expression if necessary.  */
2766   if (!useless_type_conversion_p (TREE_TYPE (tmp), bswap_type))
2767     {
2768       gimple *convert_stmt;
2769 
2770       tmp = make_temp_ssa_name (bswap_type, NULL, "bswapsrc");
2771       convert_stmt = gimple_build_assign (tmp, NOP_EXPR, src);
2772       gsi_insert_before (&gsi, convert_stmt, GSI_SAME_STMT);
2773     }
2774 
2775   /* Canonical form for 16 bit bswap is a rotate expression.  Only 16bit values
2776      are considered as rotation of 2N bit values by N bits is generally not
2777      equivalent to a bswap.  Consider for instance 0x01020304 r>> 16 which
2778      gives 0x03040102 while a bswap for that value is 0x04030201.  */
2779   if (bswap && n->range == 16)
2780     {
2781       tree count = build_int_cst (NULL, BITS_PER_UNIT);
2782       src = fold_build2 (LROTATE_EXPR, bswap_type, tmp, count);
2783       bswap_stmt = gimple_build_assign (NULL, src);
2784     }
2785   else
2786     bswap_stmt = gimple_build_call (fndecl, 1, tmp);
2787 
2788   tmp = tgt;
2789 
2790   /* Convert the result if necessary.  */
2791   if (!useless_type_conversion_p (TREE_TYPE (tgt), bswap_type))
2792     {
2793       gimple *convert_stmt;
2794 
2795       tmp = make_temp_ssa_name (bswap_type, NULL, "bswapdst");
2796       convert_stmt = gimple_build_assign (tgt, NOP_EXPR, tmp);
2797       gsi_insert_after (&gsi, convert_stmt, GSI_SAME_STMT);
2798     }
2799 
2800   gimple_set_lhs (bswap_stmt, tmp);
2801 
2802   if (dump_file)
2803     {
2804       fprintf (dump_file, "%d bit bswap implementation found at: ",
2805 	       (int) n->range);
2806       print_gimple_stmt (dump_file, cur_stmt, 0, 0);
2807     }
2808 
2809   gsi_insert_after (&gsi, bswap_stmt, GSI_SAME_STMT);
2810   gsi_remove (&gsi, true);
2811   return true;
2812 }
2813 
2814 /* Find manual byte swap implementations as well as load in a given
2815    endianness. Byte swaps are turned into a bswap builtin invokation
2816    while endian loads are converted to bswap builtin invokation or
2817    simple load according to the target endianness.  */
2818 
2819 unsigned int
2820 pass_optimize_bswap::execute (function *fun)
2821 {
2822   basic_block bb;
2823   bool bswap32_p, bswap64_p;
2824   bool changed = false;
2825   tree bswap32_type = NULL_TREE, bswap64_type = NULL_TREE;
2826 
2827   if (BITS_PER_UNIT != 8)
2828     return 0;
2829 
2830   bswap32_p = (builtin_decl_explicit_p (BUILT_IN_BSWAP32)
2831 	       && optab_handler (bswap_optab, SImode) != CODE_FOR_nothing);
2832   bswap64_p = (builtin_decl_explicit_p (BUILT_IN_BSWAP64)
2833 	       && (optab_handler (bswap_optab, DImode) != CODE_FOR_nothing
2834 		   || (bswap32_p && word_mode == SImode)));
2835 
2836   /* Determine the argument type of the builtins.  The code later on
2837      assumes that the return and argument type are the same.  */
2838   if (bswap32_p)
2839     {
2840       tree fndecl = builtin_decl_explicit (BUILT_IN_BSWAP32);
2841       bswap32_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
2842     }
2843 
2844   if (bswap64_p)
2845     {
2846       tree fndecl = builtin_decl_explicit (BUILT_IN_BSWAP64);
2847       bswap64_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
2848     }
2849 
2850   memset (&nop_stats, 0, sizeof (nop_stats));
2851   memset (&bswap_stats, 0, sizeof (bswap_stats));
2852   calculate_dominance_info (CDI_DOMINATORS);
2853 
2854   FOR_EACH_BB_FN (bb, fun)
2855     {
2856       gimple_stmt_iterator gsi;
2857 
2858       /* We do a reverse scan for bswap patterns to make sure we get the
2859 	 widest match. As bswap pattern matching doesn't handle previously
2860 	 inserted smaller bswap replacements as sub-patterns, the wider
2861 	 variant wouldn't be detected.  */
2862       for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi);)
2863         {
2864 	  gimple *ins_stmt, *cur_stmt = gsi_stmt (gsi);
2865 	  tree fndecl = NULL_TREE, bswap_type = NULL_TREE, load_type;
2866 	  enum tree_code code;
2867 	  struct symbolic_number n;
2868 	  bool bswap;
2869 
2870 	  /* This gsi_prev (&gsi) is not part of the for loop because cur_stmt
2871 	     might be moved to a different basic block by bswap_replace and gsi
2872 	     must not points to it if that's the case.  Moving the gsi_prev
2873 	     there make sure that gsi points to the statement previous to
2874 	     cur_stmt while still making sure that all statements are
2875 	     considered in this basic block.  */
2876 	  gsi_prev (&gsi);
2877 
2878 	  if (!is_gimple_assign (cur_stmt))
2879 	    continue;
2880 
2881 	  code = gimple_assign_rhs_code (cur_stmt);
2882 	  switch (code)
2883 	    {
2884 	    case LROTATE_EXPR:
2885 	    case RROTATE_EXPR:
2886 	      if (!tree_fits_uhwi_p (gimple_assign_rhs2 (cur_stmt))
2887 		  || tree_to_uhwi (gimple_assign_rhs2 (cur_stmt))
2888 		     % BITS_PER_UNIT)
2889 		continue;
2890 	      /* Fall through.  */
2891 	    case BIT_IOR_EXPR:
2892 	      break;
2893 	    default:
2894 	      continue;
2895 	    }
2896 
2897 	  ins_stmt = find_bswap_or_nop (cur_stmt, &n, &bswap);
2898 
2899 	  if (!ins_stmt)
2900 	    continue;
2901 
2902 	  switch (n.range)
2903 	    {
2904 	    case 16:
2905 	      /* Already in canonical form, nothing to do.  */
2906 	      if (code == LROTATE_EXPR || code == RROTATE_EXPR)
2907 		continue;
2908 	      load_type = bswap_type = uint16_type_node;
2909 	      break;
2910 	    case 32:
2911 	      load_type = uint32_type_node;
2912 	      if (bswap32_p)
2913 		{
2914 		  fndecl = builtin_decl_explicit (BUILT_IN_BSWAP32);
2915 		  bswap_type = bswap32_type;
2916 		}
2917 	      break;
2918 	    case 64:
2919 	      load_type = uint64_type_node;
2920 	      if (bswap64_p)
2921 		{
2922 		  fndecl = builtin_decl_explicit (BUILT_IN_BSWAP64);
2923 		  bswap_type = bswap64_type;
2924 		}
2925 	      break;
2926 	    default:
2927 	      continue;
2928 	    }
2929 
2930 	  if (bswap && !fndecl && n.range != 16)
2931 	    continue;
2932 
2933 	  if (bswap_replace (cur_stmt, ins_stmt, fndecl, bswap_type, load_type,
2934 			     &n, bswap))
2935 	    changed = true;
2936 	}
2937     }
2938 
2939   statistics_counter_event (fun, "16-bit nop implementations found",
2940 			    nop_stats.found_16bit);
2941   statistics_counter_event (fun, "32-bit nop implementations found",
2942 			    nop_stats.found_32bit);
2943   statistics_counter_event (fun, "64-bit nop implementations found",
2944 			    nop_stats.found_64bit);
2945   statistics_counter_event (fun, "16-bit bswap implementations found",
2946 			    bswap_stats.found_16bit);
2947   statistics_counter_event (fun, "32-bit bswap implementations found",
2948 			    bswap_stats.found_32bit);
2949   statistics_counter_event (fun, "64-bit bswap implementations found",
2950 			    bswap_stats.found_64bit);
2951 
2952   return (changed ? TODO_update_ssa : 0);
2953 }
2954 
2955 } // anon namespace
2956 
2957 gimple_opt_pass *
2958 make_pass_optimize_bswap (gcc::context *ctxt)
2959 {
2960   return new pass_optimize_bswap (ctxt);
2961 }
2962 
2963 /* Return true if stmt is a type conversion operation that can be stripped
2964    when used in a widening multiply operation.  */
2965 static bool
2966 widening_mult_conversion_strippable_p (tree result_type, gimple *stmt)
2967 {
2968   enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
2969 
2970   if (TREE_CODE (result_type) == INTEGER_TYPE)
2971     {
2972       tree op_type;
2973       tree inner_op_type;
2974 
2975       if (!CONVERT_EXPR_CODE_P (rhs_code))
2976 	return false;
2977 
2978       op_type = TREE_TYPE (gimple_assign_lhs (stmt));
2979 
2980       /* If the type of OP has the same precision as the result, then
2981 	 we can strip this conversion.  The multiply operation will be
2982 	 selected to create the correct extension as a by-product.  */
2983       if (TYPE_PRECISION (result_type) == TYPE_PRECISION (op_type))
2984 	return true;
2985 
2986       /* We can also strip a conversion if it preserves the signed-ness of
2987 	 the operation and doesn't narrow the range.  */
2988       inner_op_type = TREE_TYPE (gimple_assign_rhs1 (stmt));
2989 
2990       /* If the inner-most type is unsigned, then we can strip any
2991 	 intermediate widening operation.  If it's signed, then the
2992 	 intermediate widening operation must also be signed.  */
2993       if ((TYPE_UNSIGNED (inner_op_type)
2994 	   || TYPE_UNSIGNED (op_type) == TYPE_UNSIGNED (inner_op_type))
2995 	  && TYPE_PRECISION (op_type) > TYPE_PRECISION (inner_op_type))
2996 	return true;
2997 
2998       return false;
2999     }
3000 
3001   return rhs_code == FIXED_CONVERT_EXPR;
3002 }
3003 
3004 /* Return true if RHS is a suitable operand for a widening multiplication,
3005    assuming a target type of TYPE.
3006    There are two cases:
3007 
3008      - RHS makes some value at least twice as wide.  Store that value
3009        in *NEW_RHS_OUT if so, and store its type in *TYPE_OUT.
3010 
3011      - RHS is an integer constant.  Store that value in *NEW_RHS_OUT if so,
3012        but leave *TYPE_OUT untouched.  */
3013 
3014 static bool
3015 is_widening_mult_rhs_p (tree type, tree rhs, tree *type_out,
3016 			tree *new_rhs_out)
3017 {
3018   gimple *stmt;
3019   tree type1, rhs1;
3020 
3021   if (TREE_CODE (rhs) == SSA_NAME)
3022     {
3023       stmt = SSA_NAME_DEF_STMT (rhs);
3024       if (is_gimple_assign (stmt))
3025 	{
3026 	  if (! widening_mult_conversion_strippable_p (type, stmt))
3027 	    rhs1 = rhs;
3028 	  else
3029 	    {
3030 	      rhs1 = gimple_assign_rhs1 (stmt);
3031 
3032 	      if (TREE_CODE (rhs1) == INTEGER_CST)
3033 		{
3034 		  *new_rhs_out = rhs1;
3035 		  *type_out = NULL;
3036 		  return true;
3037 		}
3038 	    }
3039 	}
3040       else
3041 	rhs1 = rhs;
3042 
3043       type1 = TREE_TYPE (rhs1);
3044 
3045       if (TREE_CODE (type1) != TREE_CODE (type)
3046 	  || TYPE_PRECISION (type1) * 2 > TYPE_PRECISION (type))
3047 	return false;
3048 
3049       *new_rhs_out = rhs1;
3050       *type_out = type1;
3051       return true;
3052     }
3053 
3054   if (TREE_CODE (rhs) == INTEGER_CST)
3055     {
3056       *new_rhs_out = rhs;
3057       *type_out = NULL;
3058       return true;
3059     }
3060 
3061   return false;
3062 }
3063 
3064 /* Return true if STMT performs a widening multiplication, assuming the
3065    output type is TYPE.  If so, store the unwidened types of the operands
3066    in *TYPE1_OUT and *TYPE2_OUT respectively.  Also fill *RHS1_OUT and
3067    *RHS2_OUT such that converting those operands to types *TYPE1_OUT
3068    and *TYPE2_OUT would give the operands of the multiplication.  */
3069 
3070 static bool
3071 is_widening_mult_p (gimple *stmt,
3072 		    tree *type1_out, tree *rhs1_out,
3073 		    tree *type2_out, tree *rhs2_out)
3074 {
3075   tree type = TREE_TYPE (gimple_assign_lhs (stmt));
3076 
3077   if (TREE_CODE (type) == INTEGER_TYPE)
3078     {
3079       if (TYPE_OVERFLOW_TRAPS (type))
3080 	return false;
3081     }
3082   else if (TREE_CODE (type) != FIXED_POINT_TYPE)
3083     return false;
3084 
3085   if (!is_widening_mult_rhs_p (type, gimple_assign_rhs1 (stmt), type1_out,
3086 			       rhs1_out))
3087     return false;
3088 
3089   if (!is_widening_mult_rhs_p (type, gimple_assign_rhs2 (stmt), type2_out,
3090 			       rhs2_out))
3091     return false;
3092 
3093   if (*type1_out == NULL)
3094     {
3095       if (*type2_out == NULL || !int_fits_type_p (*rhs1_out, *type2_out))
3096 	return false;
3097       *type1_out = *type2_out;
3098     }
3099 
3100   if (*type2_out == NULL)
3101     {
3102       if (!int_fits_type_p (*rhs2_out, *type1_out))
3103 	return false;
3104       *type2_out = *type1_out;
3105     }
3106 
3107   /* Ensure that the larger of the two operands comes first. */
3108   if (TYPE_PRECISION (*type1_out) < TYPE_PRECISION (*type2_out))
3109     {
3110       std::swap (*type1_out, *type2_out);
3111       std::swap (*rhs1_out, *rhs2_out);
3112     }
3113 
3114   return true;
3115 }
3116 
3117 /* Process a single gimple statement STMT, which has a MULT_EXPR as
3118    its rhs, and try to convert it into a WIDEN_MULT_EXPR.  The return
3119    value is true iff we converted the statement.  */
3120 
3121 static bool
3122 convert_mult_to_widen (gimple *stmt, gimple_stmt_iterator *gsi)
3123 {
3124   tree lhs, rhs1, rhs2, type, type1, type2;
3125   enum insn_code handler;
3126   machine_mode to_mode, from_mode, actual_mode;
3127   optab op;
3128   int actual_precision;
3129   location_t loc = gimple_location (stmt);
3130   bool from_unsigned1, from_unsigned2;
3131 
3132   lhs = gimple_assign_lhs (stmt);
3133   type = TREE_TYPE (lhs);
3134   if (TREE_CODE (type) != INTEGER_TYPE)
3135     return false;
3136 
3137   if (!is_widening_mult_p (stmt, &type1, &rhs1, &type2, &rhs2))
3138     return false;
3139 
3140   to_mode = TYPE_MODE (type);
3141   from_mode = TYPE_MODE (type1);
3142   from_unsigned1 = TYPE_UNSIGNED (type1);
3143   from_unsigned2 = TYPE_UNSIGNED (type2);
3144 
3145   if (from_unsigned1 && from_unsigned2)
3146     op = umul_widen_optab;
3147   else if (!from_unsigned1 && !from_unsigned2)
3148     op = smul_widen_optab;
3149   else
3150     op = usmul_widen_optab;
3151 
3152   handler = find_widening_optab_handler_and_mode (op, to_mode, from_mode,
3153 						  0, &actual_mode);
3154 
3155   if (handler == CODE_FOR_nothing)
3156     {
3157       if (op != smul_widen_optab)
3158 	{
3159 	  /* We can use a signed multiply with unsigned types as long as
3160 	     there is a wider mode to use, or it is the smaller of the two
3161 	     types that is unsigned.  Note that type1 >= type2, always.  */
3162 	  if ((TYPE_UNSIGNED (type1)
3163 	       && TYPE_PRECISION (type1) == GET_MODE_PRECISION (from_mode))
3164 	      || (TYPE_UNSIGNED (type2)
3165 		  && TYPE_PRECISION (type2) == GET_MODE_PRECISION (from_mode)))
3166 	    {
3167 	      from_mode = GET_MODE_WIDER_MODE (from_mode);
3168 	      if (GET_MODE_SIZE (to_mode) <= GET_MODE_SIZE (from_mode))
3169 		return false;
3170 	    }
3171 
3172 	  op = smul_widen_optab;
3173 	  handler = find_widening_optab_handler_and_mode (op, to_mode,
3174 							  from_mode, 0,
3175 							  &actual_mode);
3176 
3177 	  if (handler == CODE_FOR_nothing)
3178 	    return false;
3179 
3180 	  from_unsigned1 = from_unsigned2 = false;
3181 	}
3182       else
3183 	return false;
3184     }
3185 
3186   /* Ensure that the inputs to the handler are in the correct precison
3187      for the opcode.  This will be the full mode size.  */
3188   actual_precision = GET_MODE_PRECISION (actual_mode);
3189   if (2 * actual_precision > TYPE_PRECISION (type))
3190     return false;
3191   if (actual_precision != TYPE_PRECISION (type1)
3192       || from_unsigned1 != TYPE_UNSIGNED (type1))
3193     rhs1 = build_and_insert_cast (gsi, loc,
3194 				  build_nonstandard_integer_type
3195 				    (actual_precision, from_unsigned1), rhs1);
3196   if (actual_precision != TYPE_PRECISION (type2)
3197       || from_unsigned2 != TYPE_UNSIGNED (type2))
3198     rhs2 = build_and_insert_cast (gsi, loc,
3199 				  build_nonstandard_integer_type
3200 				    (actual_precision, from_unsigned2), rhs2);
3201 
3202   /* Handle constants.  */
3203   if (TREE_CODE (rhs1) == INTEGER_CST)
3204     rhs1 = fold_convert (type1, rhs1);
3205   if (TREE_CODE (rhs2) == INTEGER_CST)
3206     rhs2 = fold_convert (type2, rhs2);
3207 
3208   gimple_assign_set_rhs1 (stmt, rhs1);
3209   gimple_assign_set_rhs2 (stmt, rhs2);
3210   gimple_assign_set_rhs_code (stmt, WIDEN_MULT_EXPR);
3211   update_stmt (stmt);
3212   widen_mul_stats.widen_mults_inserted++;
3213   return true;
3214 }
3215 
3216 /* Process a single gimple statement STMT, which is found at the
3217    iterator GSI and has a either a PLUS_EXPR or a MINUS_EXPR as its
3218    rhs (given by CODE), and try to convert it into a
3219    WIDEN_MULT_PLUS_EXPR or a WIDEN_MULT_MINUS_EXPR.  The return value
3220    is true iff we converted the statement.  */
3221 
3222 static bool
3223 convert_plusminus_to_widen (gimple_stmt_iterator *gsi, gimple *stmt,
3224 			    enum tree_code code)
3225 {
3226   gimple *rhs1_stmt = NULL, *rhs2_stmt = NULL;
3227   gimple *conv1_stmt = NULL, *conv2_stmt = NULL, *conv_stmt;
3228   tree type, type1, type2, optype;
3229   tree lhs, rhs1, rhs2, mult_rhs1, mult_rhs2, add_rhs;
3230   enum tree_code rhs1_code = ERROR_MARK, rhs2_code = ERROR_MARK;
3231   optab this_optab;
3232   enum tree_code wmult_code;
3233   enum insn_code handler;
3234   machine_mode to_mode, from_mode, actual_mode;
3235   location_t loc = gimple_location (stmt);
3236   int actual_precision;
3237   bool from_unsigned1, from_unsigned2;
3238 
3239   lhs = gimple_assign_lhs (stmt);
3240   type = TREE_TYPE (lhs);
3241   if (TREE_CODE (type) != INTEGER_TYPE
3242       && TREE_CODE (type) != FIXED_POINT_TYPE)
3243     return false;
3244 
3245   if (code == MINUS_EXPR)
3246     wmult_code = WIDEN_MULT_MINUS_EXPR;
3247   else
3248     wmult_code = WIDEN_MULT_PLUS_EXPR;
3249 
3250   rhs1 = gimple_assign_rhs1 (stmt);
3251   rhs2 = gimple_assign_rhs2 (stmt);
3252 
3253   if (TREE_CODE (rhs1) == SSA_NAME)
3254     {
3255       rhs1_stmt = SSA_NAME_DEF_STMT (rhs1);
3256       if (is_gimple_assign (rhs1_stmt))
3257 	rhs1_code = gimple_assign_rhs_code (rhs1_stmt);
3258     }
3259 
3260   if (TREE_CODE (rhs2) == SSA_NAME)
3261     {
3262       rhs2_stmt = SSA_NAME_DEF_STMT (rhs2);
3263       if (is_gimple_assign (rhs2_stmt))
3264 	rhs2_code = gimple_assign_rhs_code (rhs2_stmt);
3265     }
3266 
3267   /* Allow for one conversion statement between the multiply
3268      and addition/subtraction statement.  If there are more than
3269      one conversions then we assume they would invalidate this
3270      transformation.  If that's not the case then they should have
3271      been folded before now.  */
3272   if (CONVERT_EXPR_CODE_P (rhs1_code))
3273     {
3274       conv1_stmt = rhs1_stmt;
3275       rhs1 = gimple_assign_rhs1 (rhs1_stmt);
3276       if (TREE_CODE (rhs1) == SSA_NAME)
3277 	{
3278 	  rhs1_stmt = SSA_NAME_DEF_STMT (rhs1);
3279 	  if (is_gimple_assign (rhs1_stmt))
3280 	    rhs1_code = gimple_assign_rhs_code (rhs1_stmt);
3281 	}
3282       else
3283 	return false;
3284     }
3285   if (CONVERT_EXPR_CODE_P (rhs2_code))
3286     {
3287       conv2_stmt = rhs2_stmt;
3288       rhs2 = gimple_assign_rhs1 (rhs2_stmt);
3289       if (TREE_CODE (rhs2) == SSA_NAME)
3290 	{
3291 	  rhs2_stmt = SSA_NAME_DEF_STMT (rhs2);
3292 	  if (is_gimple_assign (rhs2_stmt))
3293 	    rhs2_code = gimple_assign_rhs_code (rhs2_stmt);
3294 	}
3295       else
3296 	return false;
3297     }
3298 
3299   /* If code is WIDEN_MULT_EXPR then it would seem unnecessary to call
3300      is_widening_mult_p, but we still need the rhs returns.
3301 
3302      It might also appear that it would be sufficient to use the existing
3303      operands of the widening multiply, but that would limit the choice of
3304      multiply-and-accumulate instructions.
3305 
3306      If the widened-multiplication result has more than one uses, it is
3307      probably wiser not to do the conversion.  */
3308   if (code == PLUS_EXPR
3309       && (rhs1_code == MULT_EXPR || rhs1_code == WIDEN_MULT_EXPR))
3310     {
3311       if (!has_single_use (rhs1)
3312 	  || !is_widening_mult_p (rhs1_stmt, &type1, &mult_rhs1,
3313 				  &type2, &mult_rhs2))
3314 	return false;
3315       add_rhs = rhs2;
3316       conv_stmt = conv1_stmt;
3317     }
3318   else if (rhs2_code == MULT_EXPR || rhs2_code == WIDEN_MULT_EXPR)
3319     {
3320       if (!has_single_use (rhs2)
3321 	  || !is_widening_mult_p (rhs2_stmt, &type1, &mult_rhs1,
3322 				  &type2, &mult_rhs2))
3323 	return false;
3324       add_rhs = rhs1;
3325       conv_stmt = conv2_stmt;
3326     }
3327   else
3328     return false;
3329 
3330   to_mode = TYPE_MODE (type);
3331   from_mode = TYPE_MODE (type1);
3332   from_unsigned1 = TYPE_UNSIGNED (type1);
3333   from_unsigned2 = TYPE_UNSIGNED (type2);
3334   optype = type1;
3335 
3336   /* There's no such thing as a mixed sign madd yet, so use a wider mode.  */
3337   if (from_unsigned1 != from_unsigned2)
3338     {
3339       if (!INTEGRAL_TYPE_P (type))
3340 	return false;
3341       /* We can use a signed multiply with unsigned types as long as
3342 	 there is a wider mode to use, or it is the smaller of the two
3343 	 types that is unsigned.  Note that type1 >= type2, always.  */
3344       if ((from_unsigned1
3345 	   && TYPE_PRECISION (type1) == GET_MODE_PRECISION (from_mode))
3346 	  || (from_unsigned2
3347 	      && TYPE_PRECISION (type2) == GET_MODE_PRECISION (from_mode)))
3348 	{
3349 	  from_mode = GET_MODE_WIDER_MODE (from_mode);
3350 	  if (GET_MODE_SIZE (from_mode) >= GET_MODE_SIZE (to_mode))
3351 	    return false;
3352 	}
3353 
3354       from_unsigned1 = from_unsigned2 = false;
3355       optype = build_nonstandard_integer_type (GET_MODE_PRECISION (from_mode),
3356 					       false);
3357     }
3358 
3359   /* If there was a conversion between the multiply and addition
3360      then we need to make sure it fits a multiply-and-accumulate.
3361      The should be a single mode change which does not change the
3362      value.  */
3363   if (conv_stmt)
3364     {
3365       /* We use the original, unmodified data types for this.  */
3366       tree from_type = TREE_TYPE (gimple_assign_rhs1 (conv_stmt));
3367       tree to_type = TREE_TYPE (gimple_assign_lhs (conv_stmt));
3368       int data_size = TYPE_PRECISION (type1) + TYPE_PRECISION (type2);
3369       bool is_unsigned = TYPE_UNSIGNED (type1) && TYPE_UNSIGNED (type2);
3370 
3371       if (TYPE_PRECISION (from_type) > TYPE_PRECISION (to_type))
3372 	{
3373 	  /* Conversion is a truncate.  */
3374 	  if (TYPE_PRECISION (to_type) < data_size)
3375 	    return false;
3376 	}
3377       else if (TYPE_PRECISION (from_type) < TYPE_PRECISION (to_type))
3378 	{
3379 	  /* Conversion is an extend.  Check it's the right sort.  */
3380 	  if (TYPE_UNSIGNED (from_type) != is_unsigned
3381 	      && !(is_unsigned && TYPE_PRECISION (from_type) > data_size))
3382 	    return false;
3383 	}
3384       /* else convert is a no-op for our purposes.  */
3385     }
3386 
3387   /* Verify that the machine can perform a widening multiply
3388      accumulate in this mode/signedness combination, otherwise
3389      this transformation is likely to pessimize code.  */
3390   this_optab = optab_for_tree_code (wmult_code, optype, optab_default);
3391   handler = find_widening_optab_handler_and_mode (this_optab, to_mode,
3392 						  from_mode, 0, &actual_mode);
3393 
3394   if (handler == CODE_FOR_nothing)
3395     return false;
3396 
3397   /* Ensure that the inputs to the handler are in the correct precison
3398      for the opcode.  This will be the full mode size.  */
3399   actual_precision = GET_MODE_PRECISION (actual_mode);
3400   if (actual_precision != TYPE_PRECISION (type1)
3401       || from_unsigned1 != TYPE_UNSIGNED (type1))
3402     mult_rhs1 = build_and_insert_cast (gsi, loc,
3403 				       build_nonstandard_integer_type
3404 				         (actual_precision, from_unsigned1),
3405 				       mult_rhs1);
3406   if (actual_precision != TYPE_PRECISION (type2)
3407       || from_unsigned2 != TYPE_UNSIGNED (type2))
3408     mult_rhs2 = build_and_insert_cast (gsi, loc,
3409 				       build_nonstandard_integer_type
3410 					 (actual_precision, from_unsigned2),
3411 				       mult_rhs2);
3412 
3413   if (!useless_type_conversion_p (type, TREE_TYPE (add_rhs)))
3414     add_rhs = build_and_insert_cast (gsi, loc, type, add_rhs);
3415 
3416   /* Handle constants.  */
3417   if (TREE_CODE (mult_rhs1) == INTEGER_CST)
3418     mult_rhs1 = fold_convert (type1, mult_rhs1);
3419   if (TREE_CODE (mult_rhs2) == INTEGER_CST)
3420     mult_rhs2 = fold_convert (type2, mult_rhs2);
3421 
3422   gimple_assign_set_rhs_with_ops (gsi, wmult_code, mult_rhs1, mult_rhs2,
3423 				  add_rhs);
3424   update_stmt (gsi_stmt (*gsi));
3425   widen_mul_stats.maccs_inserted++;
3426   return true;
3427 }
3428 
3429 /* Combine the multiplication at MUL_STMT with operands MULOP1 and MULOP2
3430    with uses in additions and subtractions to form fused multiply-add
3431    operations.  Returns true if successful and MUL_STMT should be removed.  */
3432 
3433 static bool
3434 convert_mult_to_fma (gimple *mul_stmt, tree op1, tree op2)
3435 {
3436   tree mul_result = gimple_get_lhs (mul_stmt);
3437   tree type = TREE_TYPE (mul_result);
3438   gimple *use_stmt, *neguse_stmt;
3439   gassign *fma_stmt;
3440   use_operand_p use_p;
3441   imm_use_iterator imm_iter;
3442 
3443   if (FLOAT_TYPE_P (type)
3444       && flag_fp_contract_mode == FP_CONTRACT_OFF)
3445     return false;
3446 
3447   /* We don't want to do bitfield reduction ops.  */
3448   if (INTEGRAL_TYPE_P (type)
3449       && (TYPE_PRECISION (type)
3450 	  != GET_MODE_PRECISION (TYPE_MODE (type))
3451 	  || TYPE_OVERFLOW_TRAPS (type)))
3452     return false;
3453 
3454   /* If the target doesn't support it, don't generate it.  We assume that
3455      if fma isn't available then fms, fnma or fnms are not either.  */
3456   if (optab_handler (fma_optab, TYPE_MODE (type)) == CODE_FOR_nothing)
3457     return false;
3458 
3459   /* If the multiplication has zero uses, it is kept around probably because
3460      of -fnon-call-exceptions.  Don't optimize it away in that case,
3461      it is DCE job.  */
3462   if (has_zero_uses (mul_result))
3463     return false;
3464 
3465   /* Make sure that the multiplication statement becomes dead after
3466      the transformation, thus that all uses are transformed to FMAs.
3467      This means we assume that an FMA operation has the same cost
3468      as an addition.  */
3469   FOR_EACH_IMM_USE_FAST (use_p, imm_iter, mul_result)
3470     {
3471       enum tree_code use_code;
3472       tree result = mul_result;
3473       bool negate_p = false;
3474 
3475       use_stmt = USE_STMT (use_p);
3476 
3477       if (is_gimple_debug (use_stmt))
3478 	continue;
3479 
3480       /* For now restrict this operations to single basic blocks.  In theory
3481 	 we would want to support sinking the multiplication in
3482 	 m = a*b;
3483 	 if ()
3484 	   ma = m + c;
3485 	 else
3486 	   d = m;
3487 	 to form a fma in the then block and sink the multiplication to the
3488 	 else block.  */
3489       if (gimple_bb (use_stmt) != gimple_bb (mul_stmt))
3490 	return false;
3491 
3492       if (!is_gimple_assign (use_stmt))
3493 	return false;
3494 
3495       use_code = gimple_assign_rhs_code (use_stmt);
3496 
3497       /* A negate on the multiplication leads to FNMA.  */
3498       if (use_code == NEGATE_EXPR)
3499 	{
3500 	  ssa_op_iter iter;
3501 	  use_operand_p usep;
3502 
3503 	  result = gimple_assign_lhs (use_stmt);
3504 
3505 	  /* Make sure the negate statement becomes dead with this
3506 	     single transformation.  */
3507 	  if (!single_imm_use (gimple_assign_lhs (use_stmt),
3508 			       &use_p, &neguse_stmt))
3509 	    return false;
3510 
3511 	  /* Make sure the multiplication isn't also used on that stmt.  */
3512 	  FOR_EACH_PHI_OR_STMT_USE (usep, neguse_stmt, iter, SSA_OP_USE)
3513 	    if (USE_FROM_PTR (usep) == mul_result)
3514 	      return false;
3515 
3516 	  /* Re-validate.  */
3517 	  use_stmt = neguse_stmt;
3518 	  if (gimple_bb (use_stmt) != gimple_bb (mul_stmt))
3519 	    return false;
3520 	  if (!is_gimple_assign (use_stmt))
3521 	    return false;
3522 
3523 	  use_code = gimple_assign_rhs_code (use_stmt);
3524 	  negate_p = true;
3525 	}
3526 
3527       switch (use_code)
3528 	{
3529 	case MINUS_EXPR:
3530 	  if (gimple_assign_rhs2 (use_stmt) == result)
3531 	    negate_p = !negate_p;
3532 	  break;
3533 	case PLUS_EXPR:
3534 	  break;
3535 	default:
3536 	  /* FMA can only be formed from PLUS and MINUS.  */
3537 	  return false;
3538 	}
3539 
3540       /* If the subtrahend (gimple_assign_rhs2 (use_stmt)) is computed
3541 	 by a MULT_EXPR that we'll visit later, we might be able to
3542 	 get a more profitable match with fnma.
3543 	 OTOH, if we don't, a negate / fma pair has likely lower latency
3544 	 that a mult / subtract pair.  */
3545       if (use_code == MINUS_EXPR && !negate_p
3546 	  && gimple_assign_rhs1 (use_stmt) == result
3547 	  && optab_handler (fms_optab, TYPE_MODE (type)) == CODE_FOR_nothing
3548 	  && optab_handler (fnma_optab, TYPE_MODE (type)) != CODE_FOR_nothing)
3549 	{
3550 	  tree rhs2 = gimple_assign_rhs2 (use_stmt);
3551 
3552 	  if (TREE_CODE (rhs2) == SSA_NAME)
3553 	    {
3554 	      gimple *stmt2 = SSA_NAME_DEF_STMT (rhs2);
3555 	      if (has_single_use (rhs2)
3556 		  && is_gimple_assign (stmt2)
3557 		  && gimple_assign_rhs_code (stmt2) == MULT_EXPR)
3558 	      return false;
3559 	    }
3560 	}
3561 
3562       /* We can't handle a * b + a * b.  */
3563       if (gimple_assign_rhs1 (use_stmt) == gimple_assign_rhs2 (use_stmt))
3564 	return false;
3565 
3566       /* While it is possible to validate whether or not the exact form
3567 	 that we've recognized is available in the backend, the assumption
3568 	 is that the transformation is never a loss.  For instance, suppose
3569 	 the target only has the plain FMA pattern available.  Consider
3570 	 a*b-c -> fma(a,b,-c): we've exchanged MUL+SUB for FMA+NEG, which
3571 	 is still two operations.  Consider -(a*b)-c -> fma(-a,b,-c): we
3572 	 still have 3 operations, but in the FMA form the two NEGs are
3573 	 independent and could be run in parallel.  */
3574     }
3575 
3576   FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, mul_result)
3577     {
3578       gimple_stmt_iterator gsi = gsi_for_stmt (use_stmt);
3579       enum tree_code use_code;
3580       tree addop, mulop1 = op1, result = mul_result;
3581       bool negate_p = false;
3582 
3583       if (is_gimple_debug (use_stmt))
3584 	continue;
3585 
3586       use_code = gimple_assign_rhs_code (use_stmt);
3587       if (use_code == NEGATE_EXPR)
3588 	{
3589 	  result = gimple_assign_lhs (use_stmt);
3590 	  single_imm_use (gimple_assign_lhs (use_stmt), &use_p, &neguse_stmt);
3591 	  gsi_remove (&gsi, true);
3592 	  release_defs (use_stmt);
3593 
3594 	  use_stmt = neguse_stmt;
3595 	  gsi = gsi_for_stmt (use_stmt);
3596 	  use_code = gimple_assign_rhs_code (use_stmt);
3597 	  negate_p = true;
3598 	}
3599 
3600       if (gimple_assign_rhs1 (use_stmt) == result)
3601 	{
3602 	  addop = gimple_assign_rhs2 (use_stmt);
3603 	  /* a * b - c -> a * b + (-c)  */
3604 	  if (gimple_assign_rhs_code (use_stmt) == MINUS_EXPR)
3605 	    addop = force_gimple_operand_gsi (&gsi,
3606 					      build1 (NEGATE_EXPR,
3607 						      type, addop),
3608 					      true, NULL_TREE, true,
3609 					      GSI_SAME_STMT);
3610 	}
3611       else
3612 	{
3613 	  addop = gimple_assign_rhs1 (use_stmt);
3614 	  /* a - b * c -> (-b) * c + a */
3615 	  if (gimple_assign_rhs_code (use_stmt) == MINUS_EXPR)
3616 	    negate_p = !negate_p;
3617 	}
3618 
3619       if (negate_p)
3620 	mulop1 = force_gimple_operand_gsi (&gsi,
3621 					   build1 (NEGATE_EXPR,
3622 						   type, mulop1),
3623 					   true, NULL_TREE, true,
3624 					   GSI_SAME_STMT);
3625 
3626       fma_stmt = gimple_build_assign (gimple_assign_lhs (use_stmt),
3627 				      FMA_EXPR, mulop1, op2, addop);
3628       gsi_replace (&gsi, fma_stmt, true);
3629       widen_mul_stats.fmas_inserted++;
3630     }
3631 
3632   return true;
3633 }
3634 
3635 
3636 /* Helper function of match_uaddsub_overflow.  Return 1
3637    if USE_STMT is unsigned overflow check ovf != 0 for
3638    STMT, -1 if USE_STMT is unsigned overflow check ovf == 0
3639    and 0 otherwise.  */
3640 
3641 static int
3642 uaddsub_overflow_check_p (gimple *stmt, gimple *use_stmt)
3643 {
3644   enum tree_code ccode = ERROR_MARK;
3645   tree crhs1 = NULL_TREE, crhs2 = NULL_TREE;
3646   if (gimple_code (use_stmt) == GIMPLE_COND)
3647     {
3648       ccode = gimple_cond_code (use_stmt);
3649       crhs1 = gimple_cond_lhs (use_stmt);
3650       crhs2 = gimple_cond_rhs (use_stmt);
3651     }
3652   else if (is_gimple_assign (use_stmt))
3653     {
3654       if (gimple_assign_rhs_class (use_stmt) == GIMPLE_BINARY_RHS)
3655 	{
3656 	  ccode = gimple_assign_rhs_code (use_stmt);
3657 	  crhs1 = gimple_assign_rhs1 (use_stmt);
3658 	  crhs2 = gimple_assign_rhs2 (use_stmt);
3659 	}
3660       else if (gimple_assign_rhs_code (use_stmt) == COND_EXPR)
3661 	{
3662 	  tree cond = gimple_assign_rhs1 (use_stmt);
3663 	  if (COMPARISON_CLASS_P (cond))
3664 	    {
3665 	      ccode = TREE_CODE (cond);
3666 	      crhs1 = TREE_OPERAND (cond, 0);
3667 	      crhs2 = TREE_OPERAND (cond, 1);
3668 	    }
3669 	  else
3670 	    return 0;
3671 	}
3672       else
3673 	return 0;
3674     }
3675   else
3676     return 0;
3677 
3678   if (TREE_CODE_CLASS (ccode) != tcc_comparison)
3679     return 0;
3680 
3681   enum tree_code code = gimple_assign_rhs_code (stmt);
3682   tree lhs = gimple_assign_lhs (stmt);
3683   tree rhs1 = gimple_assign_rhs1 (stmt);
3684   tree rhs2 = gimple_assign_rhs2 (stmt);
3685 
3686   switch (ccode)
3687     {
3688     case GT_EXPR:
3689     case LE_EXPR:
3690       /* r = a - b; r > a or r <= a
3691 	 r = a + b; a > r or a <= r or b > r or b <= r.  */
3692       if ((code == MINUS_EXPR && crhs1 == lhs && crhs2 == rhs1)
3693 	  || (code == PLUS_EXPR && (crhs1 == rhs1 || crhs1 == rhs2)
3694 	      && crhs2 == lhs))
3695 	return ccode == GT_EXPR ? 1 : -1;
3696       break;
3697     case LT_EXPR:
3698     case GE_EXPR:
3699       /* r = a - b; a < r or a >= r
3700 	 r = a + b; r < a or r >= a or r < b or r >= b.  */
3701       if ((code == MINUS_EXPR && crhs1 == rhs1 && crhs2 == lhs)
3702 	  || (code == PLUS_EXPR && crhs1 == lhs
3703 	      && (crhs2 == rhs1 || crhs2 == rhs2)))
3704 	return ccode == LT_EXPR ? 1 : -1;
3705       break;
3706     default:
3707       break;
3708     }
3709   return 0;
3710 }
3711 
3712 /* Recognize for unsigned x
3713    x = y - z;
3714    if (x > y)
3715    where there are other uses of x and replace it with
3716    _7 = SUB_OVERFLOW (y, z);
3717    x = REALPART_EXPR <_7>;
3718    _8 = IMAGPART_EXPR <_7>;
3719    if (_8)
3720    and similarly for addition.  */
3721 
3722 static bool
3723 match_uaddsub_overflow (gimple_stmt_iterator *gsi, gimple *stmt,
3724 			enum tree_code code)
3725 {
3726   tree lhs = gimple_assign_lhs (stmt);
3727   tree type = TREE_TYPE (lhs);
3728   use_operand_p use_p;
3729   imm_use_iterator iter;
3730   bool use_seen = false;
3731   bool ovf_use_seen = false;
3732   gimple *use_stmt;
3733 
3734   gcc_checking_assert (code == PLUS_EXPR || code == MINUS_EXPR);
3735   if (!INTEGRAL_TYPE_P (type)
3736       || !TYPE_UNSIGNED (type)
3737       || has_zero_uses (lhs)
3738       || has_single_use (lhs)
3739       || optab_handler (code == PLUS_EXPR ? uaddv4_optab : usubv4_optab,
3740 			TYPE_MODE (type)) == CODE_FOR_nothing)
3741     return false;
3742 
3743   FOR_EACH_IMM_USE_FAST (use_p, iter, lhs)
3744     {
3745       use_stmt = USE_STMT (use_p);
3746       if (is_gimple_debug (use_stmt))
3747 	continue;
3748 
3749       if (uaddsub_overflow_check_p (stmt, use_stmt))
3750 	ovf_use_seen = true;
3751       else
3752 	use_seen = true;
3753       if (ovf_use_seen && use_seen)
3754 	break;
3755     }
3756 
3757   if (!ovf_use_seen || !use_seen)
3758     return false;
3759 
3760   tree ctype = build_complex_type (type);
3761   tree rhs1 = gimple_assign_rhs1 (stmt);
3762   tree rhs2 = gimple_assign_rhs2 (stmt);
3763   gcall *g = gimple_build_call_internal (code == PLUS_EXPR
3764 					 ? IFN_ADD_OVERFLOW : IFN_SUB_OVERFLOW,
3765 					 2, rhs1, rhs2);
3766   tree ctmp = make_ssa_name (ctype);
3767   gimple_call_set_lhs (g, ctmp);
3768   gsi_insert_before (gsi, g, GSI_SAME_STMT);
3769   gassign *g2 = gimple_build_assign (lhs, REALPART_EXPR,
3770 				     build1 (REALPART_EXPR, type, ctmp));
3771   gsi_replace (gsi, g2, true);
3772   tree ovf = make_ssa_name (type);
3773   g2 = gimple_build_assign (ovf, IMAGPART_EXPR,
3774 			    build1 (IMAGPART_EXPR, type, ctmp));
3775   gsi_insert_after (gsi, g2, GSI_NEW_STMT);
3776 
3777   FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
3778     {
3779       if (is_gimple_debug (use_stmt))
3780 	continue;
3781 
3782       int ovf_use = uaddsub_overflow_check_p (stmt, use_stmt);
3783       if (ovf_use == 0)
3784 	continue;
3785       if (gimple_code (use_stmt) == GIMPLE_COND)
3786 	{
3787 	  gcond *cond_stmt = as_a <gcond *> (use_stmt);
3788 	  gimple_cond_set_lhs (cond_stmt, ovf);
3789 	  gimple_cond_set_rhs (cond_stmt, build_int_cst (type, 0));
3790 	  gimple_cond_set_code (cond_stmt, ovf_use == 1 ? NE_EXPR : EQ_EXPR);
3791 	}
3792       else
3793 	{
3794 	  gcc_checking_assert (is_gimple_assign (use_stmt));
3795 	  if (gimple_assign_rhs_class (use_stmt) == GIMPLE_BINARY_RHS)
3796 	    {
3797 	      gimple_assign_set_rhs1 (use_stmt, ovf);
3798 	      gimple_assign_set_rhs2 (use_stmt, build_int_cst (type, 0));
3799 	      gimple_assign_set_rhs_code (use_stmt,
3800 					  ovf_use == 1 ? NE_EXPR : EQ_EXPR);
3801 	    }
3802 	  else
3803 	    {
3804 	      gcc_checking_assert (gimple_assign_rhs_code (use_stmt)
3805 				   == COND_EXPR);
3806 	      tree cond = build2 (ovf_use == 1 ? NE_EXPR : EQ_EXPR,
3807 				  boolean_type_node, ovf,
3808 				  build_int_cst (type, 0));
3809 	      gimple_assign_set_rhs1 (use_stmt, cond);
3810 	    }
3811 	}
3812       update_stmt (use_stmt);
3813     }
3814   return true;
3815 }
3816 
3817 /* Return true if target has support for divmod.  */
3818 
3819 static bool
3820 target_supports_divmod_p (optab divmod_optab, optab div_optab, machine_mode mode)
3821 {
3822   /* If target supports hardware divmod insn, use it for divmod.  */
3823   if (optab_handler (divmod_optab, mode) != CODE_FOR_nothing)
3824     return true;
3825 
3826   /* Check if libfunc for divmod is available.  */
3827   rtx libfunc = optab_libfunc (divmod_optab, mode);
3828   if (libfunc != NULL_RTX)
3829     {
3830       /* If optab_handler exists for div_optab, perhaps in a wider mode,
3831 	 we don't want to use the libfunc even if it exists for given mode.  */
3832       for (machine_mode div_mode = mode;
3833 	   div_mode != VOIDmode;
3834 	   div_mode = GET_MODE_WIDER_MODE (div_mode))
3835 	if (optab_handler (div_optab, div_mode) != CODE_FOR_nothing)
3836 	  return false;
3837 
3838       return targetm.expand_divmod_libfunc != NULL;
3839     }
3840 
3841   return false;
3842 }
3843 
3844 /* Check if stmt is candidate for divmod transform.  */
3845 
3846 static bool
3847 divmod_candidate_p (gassign *stmt)
3848 {
3849   tree type = TREE_TYPE (gimple_assign_lhs (stmt));
3850   enum machine_mode mode = TYPE_MODE (type);
3851   optab divmod_optab, div_optab;
3852 
3853   if (TYPE_UNSIGNED (type))
3854     {
3855       divmod_optab = udivmod_optab;
3856       div_optab = udiv_optab;
3857     }
3858   else
3859     {
3860       divmod_optab = sdivmod_optab;
3861       div_optab = sdiv_optab;
3862     }
3863 
3864   tree op1 = gimple_assign_rhs1 (stmt);
3865   tree op2 = gimple_assign_rhs2 (stmt);
3866 
3867   /* Disable the transform if either is a constant, since division-by-constant
3868      may have specialized expansion.  */
3869   if (CONSTANT_CLASS_P (op1) || CONSTANT_CLASS_P (op2))
3870     return false;
3871 
3872   /* Exclude the case where TYPE_OVERFLOW_TRAPS (type) as that should
3873      expand using the [su]divv optabs.  */
3874   if (TYPE_OVERFLOW_TRAPS (type))
3875     return false;
3876 
3877   if (!target_supports_divmod_p (divmod_optab, div_optab, mode))
3878     return false;
3879 
3880   return true;
3881 }
3882 
3883 /* This function looks for:
3884    t1 = a TRUNC_DIV_EXPR b;
3885    t2 = a TRUNC_MOD_EXPR b;
3886    and transforms it to the following sequence:
3887    complex_tmp = DIVMOD (a, b);
3888    t1 = REALPART_EXPR(a);
3889    t2 = IMAGPART_EXPR(b);
3890    For conditions enabling the transform see divmod_candidate_p().
3891 
3892    The pass has three parts:
3893    1) Find top_stmt which is trunc_div or trunc_mod stmt and dominates all
3894       other trunc_div_expr and trunc_mod_expr stmts.
3895    2) Add top_stmt and all trunc_div and trunc_mod stmts dominated by top_stmt
3896       to stmts vector.
3897    3) Insert DIVMOD call just before top_stmt and update entries in
3898       stmts vector to use return value of DIMOVD (REALEXPR_PART for div,
3899       IMAGPART_EXPR for mod).  */
3900 
3901 static bool
3902 convert_to_divmod (gassign *stmt)
3903 {
3904   if (stmt_can_throw_internal (stmt)
3905       || !divmod_candidate_p (stmt))
3906     return false;
3907 
3908   tree op1 = gimple_assign_rhs1 (stmt);
3909   tree op2 = gimple_assign_rhs2 (stmt);
3910 
3911   imm_use_iterator use_iter;
3912   gimple *use_stmt;
3913   auto_vec<gimple *> stmts;
3914 
3915   gimple *top_stmt = stmt;
3916   basic_block top_bb = gimple_bb (stmt);
3917 
3918   /* Part 1: Try to set top_stmt to "topmost" stmt that dominates
3919      at-least stmt and possibly other trunc_div/trunc_mod stmts
3920      having same operands as stmt.  */
3921 
3922   FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, op1)
3923     {
3924       if (is_gimple_assign (use_stmt)
3925 	  && (gimple_assign_rhs_code (use_stmt) == TRUNC_DIV_EXPR
3926 	      || gimple_assign_rhs_code (use_stmt) == TRUNC_MOD_EXPR)
3927 	  && operand_equal_p (op1, gimple_assign_rhs1 (use_stmt), 0)
3928 	  && operand_equal_p (op2, gimple_assign_rhs2 (use_stmt), 0))
3929 	{
3930 	  if (stmt_can_throw_internal (use_stmt))
3931 	    continue;
3932 
3933 	  basic_block bb = gimple_bb (use_stmt);
3934 
3935 	  if (bb == top_bb)
3936 	    {
3937 	      if (gimple_uid (use_stmt) < gimple_uid (top_stmt))
3938 		top_stmt = use_stmt;
3939 	    }
3940 	  else if (dominated_by_p (CDI_DOMINATORS, top_bb, bb))
3941 	    {
3942 	      top_bb = bb;
3943 	      top_stmt = use_stmt;
3944 	    }
3945 	}
3946     }
3947 
3948   tree top_op1 = gimple_assign_rhs1 (top_stmt);
3949   tree top_op2 = gimple_assign_rhs2 (top_stmt);
3950 
3951   stmts.safe_push (top_stmt);
3952   bool div_seen = (gimple_assign_rhs_code (top_stmt) == TRUNC_DIV_EXPR);
3953 
3954   /* Part 2: Add all trunc_div/trunc_mod statements domianted by top_bb
3955      to stmts vector. The 2nd loop will always add stmt to stmts vector, since
3956      gimple_bb (top_stmt) dominates gimple_bb (stmt), so the
3957      2nd loop ends up adding at-least single trunc_mod_expr stmt.  */
3958 
3959   FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, top_op1)
3960     {
3961       if (is_gimple_assign (use_stmt)
3962 	  && (gimple_assign_rhs_code (use_stmt) == TRUNC_DIV_EXPR
3963 	      || gimple_assign_rhs_code (use_stmt) == TRUNC_MOD_EXPR)
3964 	  && operand_equal_p (top_op1, gimple_assign_rhs1 (use_stmt), 0)
3965 	  && operand_equal_p (top_op2, gimple_assign_rhs2 (use_stmt), 0))
3966 	{
3967 	  if (use_stmt == top_stmt
3968 	      || stmt_can_throw_internal (use_stmt)
3969 	      || !dominated_by_p (CDI_DOMINATORS, gimple_bb (use_stmt), top_bb))
3970 	    continue;
3971 
3972 	  stmts.safe_push (use_stmt);
3973 	  if (gimple_assign_rhs_code (use_stmt) == TRUNC_DIV_EXPR)
3974 	    div_seen = true;
3975 	}
3976     }
3977 
3978   if (!div_seen)
3979     return false;
3980 
3981   /* Part 3: Create libcall to internal fn DIVMOD:
3982      divmod_tmp = DIVMOD (op1, op2).  */
3983 
3984   gcall *call_stmt = gimple_build_call_internal (IFN_DIVMOD, 2, op1, op2);
3985   tree res = make_temp_ssa_name (build_complex_type (TREE_TYPE (op1)),
3986 				 call_stmt, "divmod_tmp");
3987   gimple_call_set_lhs (call_stmt, res);
3988 
3989   /* Insert the call before top_stmt.  */
3990   gimple_stmt_iterator top_stmt_gsi = gsi_for_stmt (top_stmt);
3991   gsi_insert_before (&top_stmt_gsi, call_stmt, GSI_SAME_STMT);
3992 
3993   widen_mul_stats.divmod_calls_inserted++;
3994 
3995   /* Update all statements in stmts vector:
3996      lhs = op1 TRUNC_DIV_EXPR op2 -> lhs = REALPART_EXPR<divmod_tmp>
3997      lhs = op1 TRUNC_MOD_EXPR op2 -> lhs = IMAGPART_EXPR<divmod_tmp>.  */
3998 
3999   for (unsigned i = 0; stmts.iterate (i, &use_stmt); ++i)
4000     {
4001       tree new_rhs;
4002 
4003       switch (gimple_assign_rhs_code (use_stmt))
4004 	{
4005 	  case TRUNC_DIV_EXPR:
4006 	    new_rhs = fold_build1 (REALPART_EXPR, TREE_TYPE (op1), res);
4007 	    break;
4008 
4009 	  case TRUNC_MOD_EXPR:
4010 	    new_rhs = fold_build1 (IMAGPART_EXPR, TREE_TYPE (op1), res);
4011 	    break;
4012 
4013 	  default:
4014 	    gcc_unreachable ();
4015 	}
4016 
4017       gimple_stmt_iterator gsi = gsi_for_stmt (use_stmt);
4018       gimple_assign_set_rhs_from_tree (&gsi, new_rhs);
4019       update_stmt (use_stmt);
4020     }
4021 
4022   return true;
4023 }
4024 
4025 /* Find integer multiplications where the operands are extended from
4026    smaller types, and replace the MULT_EXPR with a WIDEN_MULT_EXPR
4027    where appropriate.  */
4028 
4029 namespace {
4030 
4031 const pass_data pass_data_optimize_widening_mul =
4032 {
4033   GIMPLE_PASS, /* type */
4034   "widening_mul", /* name */
4035   OPTGROUP_NONE, /* optinfo_flags */
4036   TV_NONE, /* tv_id */
4037   PROP_ssa, /* properties_required */
4038   0, /* properties_provided */
4039   0, /* properties_destroyed */
4040   0, /* todo_flags_start */
4041   TODO_update_ssa, /* todo_flags_finish */
4042 };
4043 
4044 class pass_optimize_widening_mul : public gimple_opt_pass
4045 {
4046 public:
4047   pass_optimize_widening_mul (gcc::context *ctxt)
4048     : gimple_opt_pass (pass_data_optimize_widening_mul, ctxt)
4049   {}
4050 
4051   /* opt_pass methods: */
4052   virtual bool gate (function *)
4053     {
4054       return flag_expensive_optimizations && optimize;
4055     }
4056 
4057   virtual unsigned int execute (function *);
4058 
4059 }; // class pass_optimize_widening_mul
4060 
4061 unsigned int
4062 pass_optimize_widening_mul::execute (function *fun)
4063 {
4064   basic_block bb;
4065   bool cfg_changed = false;
4066 
4067   memset (&widen_mul_stats, 0, sizeof (widen_mul_stats));
4068   calculate_dominance_info (CDI_DOMINATORS);
4069   renumber_gimple_stmt_uids ();
4070 
4071   FOR_EACH_BB_FN (bb, fun)
4072     {
4073       gimple_stmt_iterator gsi;
4074 
4075       for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi);)
4076         {
4077 	  gimple *stmt = gsi_stmt (gsi);
4078 	  enum tree_code code;
4079 
4080 	  if (is_gimple_assign (stmt))
4081 	    {
4082 	      code = gimple_assign_rhs_code (stmt);
4083 	      switch (code)
4084 		{
4085 		case MULT_EXPR:
4086 		  if (!convert_mult_to_widen (stmt, &gsi)
4087 		      && convert_mult_to_fma (stmt,
4088 					      gimple_assign_rhs1 (stmt),
4089 					      gimple_assign_rhs2 (stmt)))
4090 		    {
4091 		      gsi_remove (&gsi, true);
4092 		      release_defs (stmt);
4093 		      continue;
4094 		    }
4095 		  break;
4096 
4097 		case PLUS_EXPR:
4098 		case MINUS_EXPR:
4099 		  if (!convert_plusminus_to_widen (&gsi, stmt, code))
4100 		    match_uaddsub_overflow (&gsi, stmt, code);
4101 		  break;
4102 
4103 		case TRUNC_MOD_EXPR:
4104 		  convert_to_divmod (as_a<gassign *> (stmt));
4105 		  break;
4106 
4107 		default:;
4108 		}
4109 	    }
4110 	  else if (is_gimple_call (stmt)
4111 		   && gimple_call_lhs (stmt))
4112 	    {
4113 	      tree fndecl = gimple_call_fndecl (stmt);
4114 	      if (fndecl
4115 		  && gimple_call_builtin_p (stmt, BUILT_IN_NORMAL))
4116 		{
4117 		  switch (DECL_FUNCTION_CODE (fndecl))
4118 		    {
4119 		      case BUILT_IN_POWF:
4120 		      case BUILT_IN_POW:
4121 		      case BUILT_IN_POWL:
4122 			if (TREE_CODE (gimple_call_arg (stmt, 1)) == REAL_CST
4123 			    && real_equal
4124 			         (&TREE_REAL_CST (gimple_call_arg (stmt, 1)),
4125 				  &dconst2)
4126 			    && convert_mult_to_fma (stmt,
4127 						    gimple_call_arg (stmt, 0),
4128 						    gimple_call_arg (stmt, 0)))
4129 			  {
4130 			    unlink_stmt_vdef (stmt);
4131 			    if (gsi_remove (&gsi, true)
4132 				&& gimple_purge_dead_eh_edges (bb))
4133 			      cfg_changed = true;
4134 			    release_defs (stmt);
4135 			    continue;
4136 			  }
4137 			  break;
4138 
4139 		      default:;
4140 		    }
4141 		}
4142 	    }
4143 	  gsi_next (&gsi);
4144 	}
4145     }
4146 
4147   statistics_counter_event (fun, "widening multiplications inserted",
4148 			    widen_mul_stats.widen_mults_inserted);
4149   statistics_counter_event (fun, "widening maccs inserted",
4150 			    widen_mul_stats.maccs_inserted);
4151   statistics_counter_event (fun, "fused multiply-adds inserted",
4152 			    widen_mul_stats.fmas_inserted);
4153   statistics_counter_event (fun, "divmod calls inserted",
4154 			    widen_mul_stats.divmod_calls_inserted);
4155 
4156   return cfg_changed ? TODO_cleanup_cfg : 0;
4157 }
4158 
4159 } // anon namespace
4160 
4161 gimple_opt_pass *
4162 make_pass_optimize_widening_mul (gcc::context *ctxt)
4163 {
4164   return new pass_optimize_widening_mul (ctxt);
4165 }
4166