1 /* Lower GIMPLE_SWITCH expressions to something more efficient than
2 a jump table.
3 Copyright (C) 2006-2020 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by the
9 Free Software Foundation; either version 3, or (at your option) any
10 later version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not, write to the Free
19 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
20 02110-1301, USA. */
21
22 /* This file handles the lowering of GIMPLE_SWITCH to an indexed
23 load, or a series of bit-test-and-branch expressions. */
24
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "backend.h"
29 #include "insn-codes.h"
30 #include "rtl.h"
31 #include "tree.h"
32 #include "gimple.h"
33 #include "cfghooks.h"
34 #include "tree-pass.h"
35 #include "ssa.h"
36 #include "optabs-tree.h"
37 #include "cgraph.h"
38 #include "gimple-pretty-print.h"
39 #include "fold-const.h"
40 #include "varasm.h"
41 #include "stor-layout.h"
42 #include "cfganal.h"
43 #include "gimplify.h"
44 #include "gimple-iterator.h"
45 #include "gimplify-me.h"
46 #include "gimple-fold.h"
47 #include "tree-cfg.h"
48 #include "cfgloop.h"
49 #include "alloc-pool.h"
50 #include "target.h"
51 #include "tree-into-ssa.h"
52 #include "omp-general.h"
53
54 /* ??? For lang_hooks.types.type_for_mode, but is there a word_mode
55 type in the GIMPLE type system that is language-independent? */
56 #include "langhooks.h"
57
58 #include "tree-switch-conversion.h"
59
60 using namespace tree_switch_conversion;
61
62 /* Constructor. */
63
switch_conversion()64 switch_conversion::switch_conversion (): m_final_bb (NULL),
65 m_constructors (NULL), m_default_values (NULL),
66 m_arr_ref_first (NULL), m_arr_ref_last (NULL),
67 m_reason (NULL), m_default_case_nonstandard (false), m_cfg_altered (false)
68 {
69 }
70
71 /* Collection information about SWTCH statement. */
72
73 void
collect(gswitch * swtch)74 switch_conversion::collect (gswitch *swtch)
75 {
76 unsigned int branch_num = gimple_switch_num_labels (swtch);
77 tree min_case, max_case;
78 unsigned int i;
79 edge e, e_default, e_first;
80 edge_iterator ei;
81
82 m_switch = swtch;
83
84 /* The gimplifier has already sorted the cases by CASE_LOW and ensured there
85 is a default label which is the first in the vector.
86 Collect the bits we can deduce from the CFG. */
87 m_index_expr = gimple_switch_index (swtch);
88 m_switch_bb = gimple_bb (swtch);
89 e_default = gimple_switch_default_edge (cfun, swtch);
90 m_default_bb = e_default->dest;
91 m_default_prob = e_default->probability;
92
93 /* Get upper and lower bounds of case values, and the covered range. */
94 min_case = gimple_switch_label (swtch, 1);
95 max_case = gimple_switch_label (swtch, branch_num - 1);
96
97 m_range_min = CASE_LOW (min_case);
98 if (CASE_HIGH (max_case) != NULL_TREE)
99 m_range_max = CASE_HIGH (max_case);
100 else
101 m_range_max = CASE_LOW (max_case);
102
103 m_contiguous_range = true;
104 tree last = CASE_HIGH (min_case) ? CASE_HIGH (min_case) : m_range_min;
105 for (i = 2; i < branch_num; i++)
106 {
107 tree elt = gimple_switch_label (swtch, i);
108 if (wi::to_wide (last) + 1 != wi::to_wide (CASE_LOW (elt)))
109 {
110 m_contiguous_range = false;
111 break;
112 }
113 last = CASE_HIGH (elt) ? CASE_HIGH (elt) : CASE_LOW (elt);
114 }
115
116 if (m_contiguous_range)
117 e_first = gimple_switch_edge (cfun, swtch, 1);
118 else
119 e_first = e_default;
120
121 /* See if there is one common successor block for all branch
122 targets. If it exists, record it in FINAL_BB.
123 Start with the destination of the first non-default case
124 if the range is contiguous and default case otherwise as
125 guess or its destination in case it is a forwarder block. */
126 if (! single_pred_p (e_first->dest))
127 m_final_bb = e_first->dest;
128 else if (single_succ_p (e_first->dest)
129 && ! single_pred_p (single_succ (e_first->dest)))
130 m_final_bb = single_succ (e_first->dest);
131 /* Require that all switch destinations are either that common
132 FINAL_BB or a forwarder to it, except for the default
133 case if contiguous range. */
134 if (m_final_bb)
135 FOR_EACH_EDGE (e, ei, m_switch_bb->succs)
136 {
137 if (e->dest == m_final_bb)
138 continue;
139
140 if (single_pred_p (e->dest)
141 && single_succ_p (e->dest)
142 && single_succ (e->dest) == m_final_bb)
143 continue;
144
145 if (e == e_default && m_contiguous_range)
146 {
147 m_default_case_nonstandard = true;
148 continue;
149 }
150
151 m_final_bb = NULL;
152 break;
153 }
154
155 m_range_size
156 = int_const_binop (MINUS_EXPR, m_range_max, m_range_min);
157
158 /* Get a count of the number of case labels. Single-valued case labels
159 simply count as one, but a case range counts double, since it may
160 require two compares if it gets lowered as a branching tree. */
161 m_count = 0;
162 for (i = 1; i < branch_num; i++)
163 {
164 tree elt = gimple_switch_label (swtch, i);
165 m_count++;
166 if (CASE_HIGH (elt)
167 && ! tree_int_cst_equal (CASE_LOW (elt), CASE_HIGH (elt)))
168 m_count++;
169 }
170
171 /* Get the number of unique non-default targets out of the GIMPLE_SWITCH
172 block. Assume a CFG cleanup would have already removed degenerate
173 switch statements, this allows us to just use EDGE_COUNT. */
174 m_uniq = EDGE_COUNT (gimple_bb (swtch)->succs) - 1;
175 }
176
177 /* Checks whether the range given by individual case statements of the switch
178 switch statement isn't too big and whether the number of branches actually
179 satisfies the size of the new array. */
180
181 bool
check_range()182 switch_conversion::check_range ()
183 {
184 gcc_assert (m_range_size);
185 if (!tree_fits_uhwi_p (m_range_size))
186 {
187 m_reason = "index range way too large or otherwise unusable";
188 return false;
189 }
190
191 if (tree_to_uhwi (m_range_size)
192 > ((unsigned) m_count * param_switch_conversion_branch_ratio))
193 {
194 m_reason = "the maximum range-branch ratio exceeded";
195 return false;
196 }
197
198 return true;
199 }
200
201 /* Checks whether all but the final BB basic blocks are empty. */
202
203 bool
check_all_empty_except_final()204 switch_conversion::check_all_empty_except_final ()
205 {
206 edge e, e_default = find_edge (m_switch_bb, m_default_bb);
207 edge_iterator ei;
208
209 FOR_EACH_EDGE (e, ei, m_switch_bb->succs)
210 {
211 if (e->dest == m_final_bb)
212 continue;
213
214 if (!empty_block_p (e->dest))
215 {
216 if (m_contiguous_range && e == e_default)
217 {
218 m_default_case_nonstandard = true;
219 continue;
220 }
221
222 m_reason = "bad case - a non-final BB not empty";
223 return false;
224 }
225 }
226
227 return true;
228 }
229
230 /* This function checks whether all required values in phi nodes in final_bb
231 are constants. Required values are those that correspond to a basic block
232 which is a part of the examined switch statement. It returns true if the
233 phi nodes are OK, otherwise false. */
234
235 bool
check_final_bb()236 switch_conversion::check_final_bb ()
237 {
238 gphi_iterator gsi;
239
240 m_phi_count = 0;
241 for (gsi = gsi_start_phis (m_final_bb); !gsi_end_p (gsi); gsi_next (&gsi))
242 {
243 gphi *phi = gsi.phi ();
244 unsigned int i;
245
246 if (virtual_operand_p (gimple_phi_result (phi)))
247 continue;
248
249 m_phi_count++;
250
251 for (i = 0; i < gimple_phi_num_args (phi); i++)
252 {
253 basic_block bb = gimple_phi_arg_edge (phi, i)->src;
254
255 if (bb == m_switch_bb
256 || (single_pred_p (bb)
257 && single_pred (bb) == m_switch_bb
258 && (!m_default_case_nonstandard
259 || empty_block_p (bb))))
260 {
261 tree reloc, val;
262 const char *reason = NULL;
263
264 val = gimple_phi_arg_def (phi, i);
265 if (!is_gimple_ip_invariant (val))
266 reason = "non-invariant value from a case";
267 else
268 {
269 reloc = initializer_constant_valid_p (val, TREE_TYPE (val));
270 if ((flag_pic && reloc != null_pointer_node)
271 || (!flag_pic && reloc == NULL_TREE))
272 {
273 if (reloc)
274 reason
275 = "value from a case would need runtime relocations";
276 else
277 reason
278 = "value from a case is not a valid initializer";
279 }
280 }
281 if (reason)
282 {
283 /* For contiguous range, we can allow non-constant
284 or one that needs relocation, as long as it is
285 only reachable from the default case. */
286 if (bb == m_switch_bb)
287 bb = m_final_bb;
288 if (!m_contiguous_range || bb != m_default_bb)
289 {
290 m_reason = reason;
291 return false;
292 }
293
294 unsigned int branch_num = gimple_switch_num_labels (m_switch);
295 for (unsigned int i = 1; i < branch_num; i++)
296 {
297 if (gimple_switch_label_bb (cfun, m_switch, i) == bb)
298 {
299 m_reason = reason;
300 return false;
301 }
302 }
303 m_default_case_nonstandard = true;
304 }
305 }
306 }
307 }
308
309 return true;
310 }
311
312 /* The following function allocates default_values, target_{in,out}_names and
313 constructors arrays. The last one is also populated with pointers to
314 vectors that will become constructors of new arrays. */
315
316 void
create_temp_arrays()317 switch_conversion::create_temp_arrays ()
318 {
319 int i;
320
321 m_default_values = XCNEWVEC (tree, m_phi_count * 3);
322 /* ??? Macros do not support multi argument templates in their
323 argument list. We create a typedef to work around that problem. */
324 typedef vec<constructor_elt, va_gc> *vec_constructor_elt_gc;
325 m_constructors = XCNEWVEC (vec_constructor_elt_gc, m_phi_count);
326 m_target_inbound_names = m_default_values + m_phi_count;
327 m_target_outbound_names = m_target_inbound_names + m_phi_count;
328 for (i = 0; i < m_phi_count; i++)
329 vec_alloc (m_constructors[i], tree_to_uhwi (m_range_size) + 1);
330 }
331
332 /* Populate the array of default values in the order of phi nodes.
333 DEFAULT_CASE is the CASE_LABEL_EXPR for the default switch branch
334 if the range is non-contiguous or the default case has standard
335 structure, otherwise it is the first non-default case instead. */
336
337 void
gather_default_values(tree default_case)338 switch_conversion::gather_default_values (tree default_case)
339 {
340 gphi_iterator gsi;
341 basic_block bb = label_to_block (cfun, CASE_LABEL (default_case));
342 edge e;
343 int i = 0;
344
345 gcc_assert (CASE_LOW (default_case) == NULL_TREE
346 || m_default_case_nonstandard);
347
348 if (bb == m_final_bb)
349 e = find_edge (m_switch_bb, bb);
350 else
351 e = single_succ_edge (bb);
352
353 for (gsi = gsi_start_phis (m_final_bb); !gsi_end_p (gsi); gsi_next (&gsi))
354 {
355 gphi *phi = gsi.phi ();
356 if (virtual_operand_p (gimple_phi_result (phi)))
357 continue;
358 tree val = PHI_ARG_DEF_FROM_EDGE (phi, e);
359 gcc_assert (val);
360 m_default_values[i++] = val;
361 }
362 }
363
364 /* The following function populates the vectors in the constructors array with
365 future contents of the static arrays. The vectors are populated in the
366 order of phi nodes. */
367
368 void
build_constructors()369 switch_conversion::build_constructors ()
370 {
371 unsigned i, branch_num = gimple_switch_num_labels (m_switch);
372 tree pos = m_range_min;
373 tree pos_one = build_int_cst (TREE_TYPE (pos), 1);
374
375 for (i = 1; i < branch_num; i++)
376 {
377 tree cs = gimple_switch_label (m_switch, i);
378 basic_block bb = label_to_block (cfun, CASE_LABEL (cs));
379 edge e;
380 tree high;
381 gphi_iterator gsi;
382 int j;
383
384 if (bb == m_final_bb)
385 e = find_edge (m_switch_bb, bb);
386 else
387 e = single_succ_edge (bb);
388 gcc_assert (e);
389
390 while (tree_int_cst_lt (pos, CASE_LOW (cs)))
391 {
392 int k;
393 for (k = 0; k < m_phi_count; k++)
394 {
395 constructor_elt elt;
396
397 elt.index = int_const_binop (MINUS_EXPR, pos, m_range_min);
398 elt.value
399 = unshare_expr_without_location (m_default_values[k]);
400 m_constructors[k]->quick_push (elt);
401 }
402
403 pos = int_const_binop (PLUS_EXPR, pos, pos_one);
404 }
405 gcc_assert (tree_int_cst_equal (pos, CASE_LOW (cs)));
406
407 j = 0;
408 if (CASE_HIGH (cs))
409 high = CASE_HIGH (cs);
410 else
411 high = CASE_LOW (cs);
412 for (gsi = gsi_start_phis (m_final_bb);
413 !gsi_end_p (gsi); gsi_next (&gsi))
414 {
415 gphi *phi = gsi.phi ();
416 if (virtual_operand_p (gimple_phi_result (phi)))
417 continue;
418 tree val = PHI_ARG_DEF_FROM_EDGE (phi, e);
419 tree low = CASE_LOW (cs);
420 pos = CASE_LOW (cs);
421
422 do
423 {
424 constructor_elt elt;
425
426 elt.index = int_const_binop (MINUS_EXPR, pos, m_range_min);
427 elt.value = unshare_expr_without_location (val);
428 m_constructors[j]->quick_push (elt);
429
430 pos = int_const_binop (PLUS_EXPR, pos, pos_one);
431 } while (!tree_int_cst_lt (high, pos)
432 && tree_int_cst_lt (low, pos));
433 j++;
434 }
435 }
436 }
437
438 /* If all values in the constructor vector are products of a linear function
439 a * x + b, then return true. When true, COEFF_A and COEFF_B and
440 coefficients of the linear function. Note that equal values are special
441 case of a linear function with a and b equal to zero. */
442
443 bool
contains_linear_function_p(vec<constructor_elt,va_gc> * vec,wide_int * coeff_a,wide_int * coeff_b)444 switch_conversion::contains_linear_function_p (vec<constructor_elt, va_gc> *vec,
445 wide_int *coeff_a,
446 wide_int *coeff_b)
447 {
448 unsigned int i;
449 constructor_elt *elt;
450
451 gcc_assert (vec->length () >= 2);
452
453 /* Let's try to find any linear function a * x + y that can apply to
454 given values. 'a' can be calculated as follows:
455
456 a = (y2 - y1) / (x2 - x1) where x2 - x1 = 1 (consecutive case indices)
457 a = y2 - y1
458
459 and
460
461 b = y2 - a * x2
462
463 */
464
465 tree elt0 = (*vec)[0].value;
466 tree elt1 = (*vec)[1].value;
467
468 if (TREE_CODE (elt0) != INTEGER_CST || TREE_CODE (elt1) != INTEGER_CST)
469 return false;
470
471 wide_int range_min
472 = wide_int::from (wi::to_wide (m_range_min),
473 TYPE_PRECISION (TREE_TYPE (elt0)),
474 TYPE_SIGN (TREE_TYPE (m_range_min)));
475 wide_int y1 = wi::to_wide (elt0);
476 wide_int y2 = wi::to_wide (elt1);
477 wide_int a = y2 - y1;
478 wide_int b = y2 - a * (range_min + 1);
479
480 /* Verify that all values fulfill the linear function. */
481 FOR_EACH_VEC_SAFE_ELT (vec, i, elt)
482 {
483 if (TREE_CODE (elt->value) != INTEGER_CST)
484 return false;
485
486 wide_int value = wi::to_wide (elt->value);
487 if (a * range_min + b != value)
488 return false;
489
490 ++range_min;
491 }
492
493 *coeff_a = a;
494 *coeff_b = b;
495
496 return true;
497 }
498
499 /* Return type which should be used for array elements, either TYPE's
500 main variant or, for integral types, some smaller integral type
501 that can still hold all the constants. */
502
503 tree
array_value_type(tree type,int num)504 switch_conversion::array_value_type (tree type, int num)
505 {
506 unsigned int i, len = vec_safe_length (m_constructors[num]);
507 constructor_elt *elt;
508 int sign = 0;
509 tree smaller_type;
510
511 /* Types with alignments greater than their size can reach here, e.g. out of
512 SRA. We couldn't use these as an array component type so get back to the
513 main variant first, which, for our purposes, is fine for other types as
514 well. */
515
516 type = TYPE_MAIN_VARIANT (type);
517
518 if (!INTEGRAL_TYPE_P (type))
519 return type;
520
521 scalar_int_mode type_mode = SCALAR_INT_TYPE_MODE (type);
522 scalar_int_mode mode = get_narrowest_mode (type_mode);
523 if (GET_MODE_SIZE (type_mode) <= GET_MODE_SIZE (mode))
524 return type;
525
526 if (len < (optimize_bb_for_size_p (gimple_bb (m_switch)) ? 2 : 32))
527 return type;
528
529 FOR_EACH_VEC_SAFE_ELT (m_constructors[num], i, elt)
530 {
531 wide_int cst;
532
533 if (TREE_CODE (elt->value) != INTEGER_CST)
534 return type;
535
536 cst = wi::to_wide (elt->value);
537 while (1)
538 {
539 unsigned int prec = GET_MODE_BITSIZE (mode);
540 if (prec > HOST_BITS_PER_WIDE_INT)
541 return type;
542
543 if (sign >= 0 && cst == wi::zext (cst, prec))
544 {
545 if (sign == 0 && cst == wi::sext (cst, prec))
546 break;
547 sign = 1;
548 break;
549 }
550 if (sign <= 0 && cst == wi::sext (cst, prec))
551 {
552 sign = -1;
553 break;
554 }
555
556 if (sign == 1)
557 sign = 0;
558
559 if (!GET_MODE_WIDER_MODE (mode).exists (&mode)
560 || GET_MODE_SIZE (mode) >= GET_MODE_SIZE (type_mode))
561 return type;
562 }
563 }
564
565 if (sign == 0)
566 sign = TYPE_UNSIGNED (type) ? 1 : -1;
567 smaller_type = lang_hooks.types.type_for_mode (mode, sign >= 0);
568 if (GET_MODE_SIZE (type_mode)
569 <= GET_MODE_SIZE (SCALAR_INT_TYPE_MODE (smaller_type)))
570 return type;
571
572 return smaller_type;
573 }
574
575 /* Create an appropriate array type and declaration and assemble a static
576 array variable. Also create a load statement that initializes
577 the variable in question with a value from the static array. SWTCH is
578 the switch statement being converted, NUM is the index to
579 arrays of constructors, default values and target SSA names
580 for this particular array. ARR_INDEX_TYPE is the type of the index
581 of the new array, PHI is the phi node of the final BB that corresponds
582 to the value that will be loaded from the created array. TIDX
583 is an ssa name of a temporary variable holding the index for loads from the
584 new array. */
585
586 void
build_one_array(int num,tree arr_index_type,gphi * phi,tree tidx)587 switch_conversion::build_one_array (int num, tree arr_index_type,
588 gphi *phi, tree tidx)
589 {
590 tree name;
591 gimple *load;
592 gimple_stmt_iterator gsi = gsi_for_stmt (m_switch);
593 location_t loc = gimple_location (m_switch);
594
595 gcc_assert (m_default_values[num]);
596
597 name = copy_ssa_name (PHI_RESULT (phi));
598 m_target_inbound_names[num] = name;
599
600 vec<constructor_elt, va_gc> *constructor = m_constructors[num];
601 wide_int coeff_a, coeff_b;
602 bool linear_p = contains_linear_function_p (constructor, &coeff_a, &coeff_b);
603 tree type;
604 if (linear_p
605 && (type = range_check_type (TREE_TYPE ((*constructor)[0].value))))
606 {
607 if (dump_file && coeff_a.to_uhwi () > 0)
608 fprintf (dump_file, "Linear transformation with A = %" PRId64
609 " and B = %" PRId64 "\n", coeff_a.to_shwi (),
610 coeff_b.to_shwi ());
611
612 /* We must use type of constructor values. */
613 gimple_seq seq = NULL;
614 tree tmp = gimple_convert (&seq, type, m_index_expr);
615 tree tmp2 = gimple_build (&seq, MULT_EXPR, type,
616 wide_int_to_tree (type, coeff_a), tmp);
617 tree tmp3 = gimple_build (&seq, PLUS_EXPR, type, tmp2,
618 wide_int_to_tree (type, coeff_b));
619 tree tmp4 = gimple_convert (&seq, TREE_TYPE (name), tmp3);
620 gsi_insert_seq_before (&gsi, seq, GSI_SAME_STMT);
621 load = gimple_build_assign (name, tmp4);
622 }
623 else
624 {
625 tree array_type, ctor, decl, value_type, fetch, default_type;
626
627 default_type = TREE_TYPE (m_default_values[num]);
628 value_type = array_value_type (default_type, num);
629 array_type = build_array_type (value_type, arr_index_type);
630 if (default_type != value_type)
631 {
632 unsigned int i;
633 constructor_elt *elt;
634
635 FOR_EACH_VEC_SAFE_ELT (constructor, i, elt)
636 elt->value = fold_convert (value_type, elt->value);
637 }
638 ctor = build_constructor (array_type, constructor);
639 TREE_CONSTANT (ctor) = true;
640 TREE_STATIC (ctor) = true;
641
642 decl = build_decl (loc, VAR_DECL, NULL_TREE, array_type);
643 TREE_STATIC (decl) = 1;
644 DECL_INITIAL (decl) = ctor;
645
646 DECL_NAME (decl) = create_tmp_var_name ("CSWTCH");
647 DECL_ARTIFICIAL (decl) = 1;
648 DECL_IGNORED_P (decl) = 1;
649 TREE_CONSTANT (decl) = 1;
650 TREE_READONLY (decl) = 1;
651 DECL_IGNORED_P (decl) = 1;
652 if (offloading_function_p (cfun->decl))
653 DECL_ATTRIBUTES (decl)
654 = tree_cons (get_identifier ("omp declare target"), NULL_TREE,
655 NULL_TREE);
656 varpool_node::finalize_decl (decl);
657
658 fetch = build4 (ARRAY_REF, value_type, decl, tidx, NULL_TREE,
659 NULL_TREE);
660 if (default_type != value_type)
661 {
662 fetch = fold_convert (default_type, fetch);
663 fetch = force_gimple_operand_gsi (&gsi, fetch, true, NULL_TREE,
664 true, GSI_SAME_STMT);
665 }
666 load = gimple_build_assign (name, fetch);
667 }
668
669 gsi_insert_before (&gsi, load, GSI_SAME_STMT);
670 update_stmt (load);
671 m_arr_ref_last = load;
672 }
673
674 /* Builds and initializes static arrays initialized with values gathered from
675 the switch statement. Also creates statements that load values from
676 them. */
677
678 void
build_arrays()679 switch_conversion::build_arrays ()
680 {
681 tree arr_index_type;
682 tree tidx, sub, utype;
683 gimple *stmt;
684 gimple_stmt_iterator gsi;
685 gphi_iterator gpi;
686 int i;
687 location_t loc = gimple_location (m_switch);
688
689 gsi = gsi_for_stmt (m_switch);
690
691 /* Make sure we do not generate arithmetics in a subrange. */
692 utype = TREE_TYPE (m_index_expr);
693 if (TREE_TYPE (utype))
694 utype = lang_hooks.types.type_for_mode (TYPE_MODE (TREE_TYPE (utype)), 1);
695 else
696 utype = lang_hooks.types.type_for_mode (TYPE_MODE (utype), 1);
697
698 arr_index_type = build_index_type (m_range_size);
699 tidx = make_ssa_name (utype);
700 sub = fold_build2_loc (loc, MINUS_EXPR, utype,
701 fold_convert_loc (loc, utype, m_index_expr),
702 fold_convert_loc (loc, utype, m_range_min));
703 sub = force_gimple_operand_gsi (&gsi, sub,
704 false, NULL, true, GSI_SAME_STMT);
705 stmt = gimple_build_assign (tidx, sub);
706
707 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
708 update_stmt (stmt);
709 m_arr_ref_first = stmt;
710
711 for (gpi = gsi_start_phis (m_final_bb), i = 0;
712 !gsi_end_p (gpi); gsi_next (&gpi))
713 {
714 gphi *phi = gpi.phi ();
715 if (!virtual_operand_p (gimple_phi_result (phi)))
716 build_one_array (i++, arr_index_type, phi, tidx);
717 else
718 {
719 edge e;
720 edge_iterator ei;
721 FOR_EACH_EDGE (e, ei, m_switch_bb->succs)
722 {
723 if (e->dest == m_final_bb)
724 break;
725 if (!m_default_case_nonstandard
726 || e->dest != m_default_bb)
727 {
728 e = single_succ_edge (e->dest);
729 break;
730 }
731 }
732 gcc_assert (e && e->dest == m_final_bb);
733 m_target_vop = PHI_ARG_DEF_FROM_EDGE (phi, e);
734 }
735 }
736 }
737
738 /* Generates and appropriately inserts loads of default values at the position
739 given by GSI. Returns the last inserted statement. */
740
741 gassign *
gen_def_assigns(gimple_stmt_iterator * gsi)742 switch_conversion::gen_def_assigns (gimple_stmt_iterator *gsi)
743 {
744 int i;
745 gassign *assign = NULL;
746
747 for (i = 0; i < m_phi_count; i++)
748 {
749 tree name = copy_ssa_name (m_target_inbound_names[i]);
750 m_target_outbound_names[i] = name;
751 assign = gimple_build_assign (name, m_default_values[i]);
752 gsi_insert_before (gsi, assign, GSI_SAME_STMT);
753 update_stmt (assign);
754 }
755 return assign;
756 }
757
758 /* Deletes the unused bbs and edges that now contain the switch statement and
759 its empty branch bbs. BBD is the now dead BB containing
760 the original switch statement, FINAL is the last BB of the converted
761 switch statement (in terms of succession). */
762
763 void
prune_bbs(basic_block bbd,basic_block final,basic_block default_bb)764 switch_conversion::prune_bbs (basic_block bbd, basic_block final,
765 basic_block default_bb)
766 {
767 edge_iterator ei;
768 edge e;
769
770 for (ei = ei_start (bbd->succs); (e = ei_safe_edge (ei)); )
771 {
772 basic_block bb;
773 bb = e->dest;
774 remove_edge (e);
775 if (bb != final && bb != default_bb)
776 delete_basic_block (bb);
777 }
778 delete_basic_block (bbd);
779 }
780
781 /* Add values to phi nodes in final_bb for the two new edges. E1F is the edge
782 from the basic block loading values from an array and E2F from the basic
783 block loading default values. BBF is the last switch basic block (see the
784 bbf description in the comment below). */
785
786 void
fix_phi_nodes(edge e1f,edge e2f,basic_block bbf)787 switch_conversion::fix_phi_nodes (edge e1f, edge e2f, basic_block bbf)
788 {
789 gphi_iterator gsi;
790 int i;
791
792 for (gsi = gsi_start_phis (bbf), i = 0;
793 !gsi_end_p (gsi); gsi_next (&gsi))
794 {
795 gphi *phi = gsi.phi ();
796 tree inbound, outbound;
797 if (virtual_operand_p (gimple_phi_result (phi)))
798 inbound = outbound = m_target_vop;
799 else
800 {
801 inbound = m_target_inbound_names[i];
802 outbound = m_target_outbound_names[i++];
803 }
804 add_phi_arg (phi, inbound, e1f, UNKNOWN_LOCATION);
805 if (!m_default_case_nonstandard)
806 add_phi_arg (phi, outbound, e2f, UNKNOWN_LOCATION);
807 }
808 }
809
810 /* Creates a check whether the switch expression value actually falls into the
811 range given by all the cases. If it does not, the temporaries are loaded
812 with default values instead. */
813
814 void
gen_inbound_check()815 switch_conversion::gen_inbound_check ()
816 {
817 tree label_decl1 = create_artificial_label (UNKNOWN_LOCATION);
818 tree label_decl2 = create_artificial_label (UNKNOWN_LOCATION);
819 tree label_decl3 = create_artificial_label (UNKNOWN_LOCATION);
820 glabel *label1, *label2, *label3;
821 tree utype, tidx;
822 tree bound;
823
824 gcond *cond_stmt;
825
826 gassign *last_assign = NULL;
827 gimple_stmt_iterator gsi;
828 basic_block bb0, bb1, bb2, bbf, bbd;
829 edge e01 = NULL, e02, e21, e1d, e1f, e2f;
830 location_t loc = gimple_location (m_switch);
831
832 gcc_assert (m_default_values);
833
834 bb0 = gimple_bb (m_switch);
835
836 tidx = gimple_assign_lhs (m_arr_ref_first);
837 utype = TREE_TYPE (tidx);
838
839 /* (end of) block 0 */
840 gsi = gsi_for_stmt (m_arr_ref_first);
841 gsi_next (&gsi);
842
843 bound = fold_convert_loc (loc, utype, m_range_size);
844 cond_stmt = gimple_build_cond (LE_EXPR, tidx, bound, NULL_TREE, NULL_TREE);
845 gsi_insert_before (&gsi, cond_stmt, GSI_SAME_STMT);
846 update_stmt (cond_stmt);
847
848 /* block 2 */
849 if (!m_default_case_nonstandard)
850 {
851 label2 = gimple_build_label (label_decl2);
852 gsi_insert_before (&gsi, label2, GSI_SAME_STMT);
853 last_assign = gen_def_assigns (&gsi);
854 }
855
856 /* block 1 */
857 label1 = gimple_build_label (label_decl1);
858 gsi_insert_before (&gsi, label1, GSI_SAME_STMT);
859
860 /* block F */
861 gsi = gsi_start_bb (m_final_bb);
862 label3 = gimple_build_label (label_decl3);
863 gsi_insert_before (&gsi, label3, GSI_SAME_STMT);
864
865 /* cfg fix */
866 e02 = split_block (bb0, cond_stmt);
867 bb2 = e02->dest;
868
869 if (m_default_case_nonstandard)
870 {
871 bb1 = bb2;
872 bb2 = m_default_bb;
873 e01 = e02;
874 e01->flags = EDGE_TRUE_VALUE;
875 e02 = make_edge (bb0, bb2, EDGE_FALSE_VALUE);
876 edge e_default = find_edge (bb1, bb2);
877 for (gphi_iterator gsi = gsi_start_phis (bb2);
878 !gsi_end_p (gsi); gsi_next (&gsi))
879 {
880 gphi *phi = gsi.phi ();
881 tree arg = PHI_ARG_DEF_FROM_EDGE (phi, e_default);
882 add_phi_arg (phi, arg, e02,
883 gimple_phi_arg_location_from_edge (phi, e_default));
884 }
885 /* Partially fix the dominator tree, if it is available. */
886 if (dom_info_available_p (CDI_DOMINATORS))
887 redirect_immediate_dominators (CDI_DOMINATORS, bb1, bb0);
888 }
889 else
890 {
891 e21 = split_block (bb2, last_assign);
892 bb1 = e21->dest;
893 remove_edge (e21);
894 }
895
896 e1d = split_block (bb1, m_arr_ref_last);
897 bbd = e1d->dest;
898 remove_edge (e1d);
899
900 /* Flags and profiles of the edge for in-range values. */
901 if (!m_default_case_nonstandard)
902 e01 = make_edge (bb0, bb1, EDGE_TRUE_VALUE);
903 e01->probability = m_default_prob.invert ();
904
905 /* Flags and profiles of the edge taking care of out-of-range values. */
906 e02->flags &= ~EDGE_FALLTHRU;
907 e02->flags |= EDGE_FALSE_VALUE;
908 e02->probability = m_default_prob;
909
910 bbf = m_final_bb;
911
912 e1f = make_edge (bb1, bbf, EDGE_FALLTHRU);
913 e1f->probability = profile_probability::always ();
914
915 if (m_default_case_nonstandard)
916 e2f = NULL;
917 else
918 {
919 e2f = make_edge (bb2, bbf, EDGE_FALLTHRU);
920 e2f->probability = profile_probability::always ();
921 }
922
923 /* frequencies of the new BBs */
924 bb1->count = e01->count ();
925 bb2->count = e02->count ();
926 if (!m_default_case_nonstandard)
927 bbf->count = e1f->count () + e2f->count ();
928
929 /* Tidy blocks that have become unreachable. */
930 prune_bbs (bbd, m_final_bb,
931 m_default_case_nonstandard ? m_default_bb : NULL);
932
933 /* Fixup the PHI nodes in bbF. */
934 fix_phi_nodes (e1f, e2f, bbf);
935
936 /* Fix the dominator tree, if it is available. */
937 if (dom_info_available_p (CDI_DOMINATORS))
938 {
939 vec<basic_block> bbs_to_fix_dom;
940
941 set_immediate_dominator (CDI_DOMINATORS, bb1, bb0);
942 if (!m_default_case_nonstandard)
943 set_immediate_dominator (CDI_DOMINATORS, bb2, bb0);
944 if (! get_immediate_dominator (CDI_DOMINATORS, bbf))
945 /* If bbD was the immediate dominator ... */
946 set_immediate_dominator (CDI_DOMINATORS, bbf, bb0);
947
948 bbs_to_fix_dom.create (3 + (bb2 != bbf));
949 bbs_to_fix_dom.quick_push (bb0);
950 bbs_to_fix_dom.quick_push (bb1);
951 if (bb2 != bbf)
952 bbs_to_fix_dom.quick_push (bb2);
953 bbs_to_fix_dom.quick_push (bbf);
954
955 iterate_fix_dominators (CDI_DOMINATORS, bbs_to_fix_dom, true);
956 bbs_to_fix_dom.release ();
957 }
958 }
959
960 /* The following function is invoked on every switch statement (the current
961 one is given in SWTCH) and runs the individual phases of switch
962 conversion on it one after another until one fails or the conversion
963 is completed. On success, NULL is in m_reason, otherwise points
964 to a string with the reason why the conversion failed. */
965
966 void
expand(gswitch * swtch)967 switch_conversion::expand (gswitch *swtch)
968 {
969 /* Group case labels so that we get the right results from the heuristics
970 that decide on the code generation approach for this switch. */
971 m_cfg_altered |= group_case_labels_stmt (swtch);
972
973 /* If this switch is now a degenerate case with only a default label,
974 there is nothing left for us to do. */
975 if (gimple_switch_num_labels (swtch) < 2)
976 {
977 m_reason = "switch is a degenerate case";
978 return;
979 }
980
981 collect (swtch);
982
983 /* No error markers should reach here (they should be filtered out
984 during gimplification). */
985 gcc_checking_assert (TREE_TYPE (m_index_expr) != error_mark_node);
986
987 /* A switch on a constant should have been optimized in tree-cfg-cleanup. */
988 gcc_checking_assert (!TREE_CONSTANT (m_index_expr));
989
990 /* Prefer bit test if possible. */
991 if (tree_fits_uhwi_p (m_range_size)
992 && bit_test_cluster::can_be_handled (tree_to_uhwi (m_range_size), m_uniq)
993 && bit_test_cluster::is_beneficial (m_count, m_uniq))
994 {
995 m_reason = "expanding as bit test is preferable";
996 return;
997 }
998
999 if (m_uniq <= 2)
1000 {
1001 /* This will be expanded as a decision tree . */
1002 m_reason = "expanding as jumps is preferable";
1003 return;
1004 }
1005
1006 /* If there is no common successor, we cannot do the transformation. */
1007 if (!m_final_bb)
1008 {
1009 m_reason = "no common successor to all case label target blocks found";
1010 return;
1011 }
1012
1013 /* Check the case label values are within reasonable range: */
1014 if (!check_range ())
1015 {
1016 gcc_assert (m_reason);
1017 return;
1018 }
1019
1020 /* For all the cases, see whether they are empty, the assignments they
1021 represent constant and so on... */
1022 if (!check_all_empty_except_final ())
1023 {
1024 gcc_assert (m_reason);
1025 return;
1026 }
1027 if (!check_final_bb ())
1028 {
1029 gcc_assert (m_reason);
1030 return;
1031 }
1032
1033 /* At this point all checks have passed and we can proceed with the
1034 transformation. */
1035
1036 create_temp_arrays ();
1037 gather_default_values (m_default_case_nonstandard
1038 ? gimple_switch_label (swtch, 1)
1039 : gimple_switch_default_label (swtch));
1040 build_constructors ();
1041
1042 build_arrays (); /* Build the static arrays and assignments. */
1043 gen_inbound_check (); /* Build the bounds check. */
1044
1045 m_cfg_altered = true;
1046 }
1047
1048 /* Destructor. */
1049
~switch_conversion()1050 switch_conversion::~switch_conversion ()
1051 {
1052 XDELETEVEC (m_constructors);
1053 XDELETEVEC (m_default_values);
1054 }
1055
1056 /* Constructor. */
1057
group_cluster(vec<cluster * > & clusters,unsigned start,unsigned end)1058 group_cluster::group_cluster (vec<cluster *> &clusters,
1059 unsigned start, unsigned end)
1060 {
1061 gcc_checking_assert (end - start + 1 >= 1);
1062 m_prob = profile_probability::never ();
1063 m_cases.create (end - start + 1);
1064 for (unsigned i = start; i <= end; i++)
1065 {
1066 m_cases.quick_push (static_cast<simple_cluster *> (clusters[i]));
1067 m_prob += clusters[i]->m_prob;
1068 }
1069 m_subtree_prob = m_prob;
1070 }
1071
1072 /* Destructor. */
1073
~group_cluster()1074 group_cluster::~group_cluster ()
1075 {
1076 for (unsigned i = 0; i < m_cases.length (); i++)
1077 delete m_cases[i];
1078
1079 m_cases.release ();
1080 }
1081
1082 /* Dump content of a cluster. */
1083
1084 void
dump(FILE * f,bool details)1085 group_cluster::dump (FILE *f, bool details)
1086 {
1087 unsigned total_values = 0;
1088 for (unsigned i = 0; i < m_cases.length (); i++)
1089 total_values += m_cases[i]->get_range (m_cases[i]->get_low (),
1090 m_cases[i]->get_high ());
1091
1092 unsigned comparison_count = 0;
1093 for (unsigned i = 0; i < m_cases.length (); i++)
1094 {
1095 simple_cluster *sc = static_cast<simple_cluster *> (m_cases[i]);
1096 comparison_count += sc->m_range_p ? 2 : 1;
1097 }
1098
1099 unsigned HOST_WIDE_INT range = get_range (get_low (), get_high ());
1100 fprintf (f, "%s", get_type () == JUMP_TABLE ? "JT" : "BT");
1101
1102 if (details)
1103 fprintf (f, "(values:%d comparisons:%d range:" HOST_WIDE_INT_PRINT_DEC
1104 " density: %.2f%%)", total_values, comparison_count, range,
1105 100.0f * comparison_count / range);
1106
1107 fprintf (f, ":");
1108 PRINT_CASE (f, get_low ());
1109 fprintf (f, "-");
1110 PRINT_CASE (f, get_high ());
1111 fprintf (f, " ");
1112 }
1113
1114 /* Emit GIMPLE code to handle the cluster. */
1115
1116 void
emit(tree index_expr,tree,tree default_label_expr,basic_block default_bb,location_t loc)1117 jump_table_cluster::emit (tree index_expr, tree,
1118 tree default_label_expr, basic_block default_bb,
1119 location_t loc)
1120 {
1121 unsigned HOST_WIDE_INT range = get_range (get_low (), get_high ());
1122 unsigned HOST_WIDE_INT nondefault_range = 0;
1123
1124 /* For jump table we just emit a new gswitch statement that will
1125 be latter lowered to jump table. */
1126 auto_vec <tree> labels;
1127 labels.create (m_cases.length ());
1128
1129 make_edge (m_case_bb, default_bb, 0);
1130 for (unsigned i = 0; i < m_cases.length (); i++)
1131 {
1132 labels.quick_push (unshare_expr (m_cases[i]->m_case_label_expr));
1133 make_edge (m_case_bb, m_cases[i]->m_case_bb, 0);
1134 }
1135
1136 gswitch *s = gimple_build_switch (index_expr,
1137 unshare_expr (default_label_expr), labels);
1138 gimple_set_location (s, loc);
1139 gimple_stmt_iterator gsi = gsi_start_bb (m_case_bb);
1140 gsi_insert_after (&gsi, s, GSI_NEW_STMT);
1141
1142 /* Set up even probabilities for all cases. */
1143 for (unsigned i = 0; i < m_cases.length (); i++)
1144 {
1145 simple_cluster *sc = static_cast<simple_cluster *> (m_cases[i]);
1146 edge case_edge = find_edge (m_case_bb, sc->m_case_bb);
1147 unsigned HOST_WIDE_INT case_range
1148 = sc->get_range (sc->get_low (), sc->get_high ());
1149 nondefault_range += case_range;
1150
1151 /* case_edge->aux is number of values in a jump-table that are covered
1152 by the case_edge. */
1153 case_edge->aux = (void *) ((intptr_t) (case_edge->aux) + case_range);
1154 }
1155
1156 edge default_edge = gimple_switch_default_edge (cfun, s);
1157 default_edge->probability = profile_probability::never ();
1158
1159 for (unsigned i = 0; i < m_cases.length (); i++)
1160 {
1161 simple_cluster *sc = static_cast<simple_cluster *> (m_cases[i]);
1162 edge case_edge = find_edge (m_case_bb, sc->m_case_bb);
1163 case_edge->probability
1164 = profile_probability::always ().apply_scale ((intptr_t)case_edge->aux,
1165 range);
1166 }
1167
1168 /* Number of non-default values is probability of default edge. */
1169 default_edge->probability
1170 += profile_probability::always ().apply_scale (nondefault_range,
1171 range).invert ();
1172
1173 switch_decision_tree::reset_out_edges_aux (s);
1174 }
1175
1176 /* Find jump tables of given CLUSTERS, where all members of the vector
1177 are of type simple_cluster. New clusters are returned. */
1178
1179 vec<cluster *>
find_jump_tables(vec<cluster * > & clusters)1180 jump_table_cluster::find_jump_tables (vec<cluster *> &clusters)
1181 {
1182 if (!is_enabled ())
1183 return clusters.copy ();
1184
1185 unsigned l = clusters.length ();
1186 auto_vec<min_cluster_item> min;
1187 min.reserve (l + 1);
1188
1189 min.quick_push (min_cluster_item (0, 0, 0));
1190
1191 for (unsigned i = 1; i <= l; i++)
1192 {
1193 /* Set minimal # of clusters with i-th item to infinite. */
1194 min.quick_push (min_cluster_item (INT_MAX, INT_MAX, INT_MAX));
1195
1196 for (unsigned j = 0; j < i; j++)
1197 {
1198 unsigned HOST_WIDE_INT s = min[j].m_non_jt_cases;
1199 if (i - j < case_values_threshold ())
1200 s += i - j;
1201
1202 /* Prefer clusters with smaller number of numbers covered. */
1203 if ((min[j].m_count + 1 < min[i].m_count
1204 || (min[j].m_count + 1 == min[i].m_count
1205 && s < min[i].m_non_jt_cases))
1206 && can_be_handled (clusters, j, i - 1))
1207 min[i] = min_cluster_item (min[j].m_count + 1, j, s);
1208 }
1209
1210 gcc_checking_assert (min[i].m_count != INT_MAX);
1211 }
1212
1213 /* No result. */
1214 if (min[l].m_count == l)
1215 return clusters.copy ();
1216
1217 vec<cluster *> output;
1218 output.create (4);
1219
1220 /* Find and build the clusters. */
1221 for (unsigned int end = l;;)
1222 {
1223 int start = min[end].m_start;
1224
1225 /* Do not allow clusters with small number of cases. */
1226 if (is_beneficial (clusters, start, end - 1))
1227 output.safe_push (new jump_table_cluster (clusters, start, end - 1));
1228 else
1229 for (int i = end - 1; i >= start; i--)
1230 output.safe_push (clusters[i]);
1231
1232 end = start;
1233
1234 if (start <= 0)
1235 break;
1236 }
1237
1238 output.reverse ();
1239 return output;
1240 }
1241
1242 /* Return true when cluster starting at START and ending at END (inclusive)
1243 can build a jump-table. */
1244
1245 bool
can_be_handled(const vec<cluster * > & clusters,unsigned start,unsigned end)1246 jump_table_cluster::can_be_handled (const vec<cluster *> &clusters,
1247 unsigned start, unsigned end)
1248 {
1249 /* If the switch is relatively small such that the cost of one
1250 indirect jump on the target are higher than the cost of a
1251 decision tree, go with the decision tree.
1252
1253 If range of values is much bigger than number of values,
1254 or if it is too large to represent in a HOST_WIDE_INT,
1255 make a sequence of conditional branches instead of a dispatch.
1256
1257 The definition of "much bigger" depends on whether we are
1258 optimizing for size or for speed.
1259
1260 For algorithm correctness, jump table for a single case must return
1261 true. We bail out in is_beneficial if it's called just for
1262 a single case. */
1263 if (start == end)
1264 return true;
1265
1266 unsigned HOST_WIDE_INT max_ratio
1267 = (optimize_insn_for_size_p ()
1268 ? param_jump_table_max_growth_ratio_for_size
1269 : param_jump_table_max_growth_ratio_for_speed);
1270 unsigned HOST_WIDE_INT range = get_range (clusters[start]->get_low (),
1271 clusters[end]->get_high ());
1272 /* Check overflow. */
1273 if (range == 0)
1274 return false;
1275
1276 if (range > HOST_WIDE_INT_M1U / 100)
1277 return false;
1278
1279 unsigned HOST_WIDE_INT lhs = 100 * range;
1280 if (lhs < range)
1281 return false;
1282
1283 /* First make quick guess as each cluster
1284 can add at maximum 2 to the comparison_count. */
1285 if (lhs > 2 * max_ratio * (end - start + 1))
1286 return false;
1287
1288 unsigned HOST_WIDE_INT comparison_count = 0;
1289 for (unsigned i = start; i <= end; i++)
1290 {
1291 simple_cluster *sc = static_cast<simple_cluster *> (clusters[i]);
1292 comparison_count += sc->m_range_p ? 2 : 1;
1293 }
1294
1295 return lhs <= max_ratio * comparison_count;
1296 }
1297
1298 /* Return true if cluster starting at START and ending at END (inclusive)
1299 is profitable transformation. */
1300
1301 bool
is_beneficial(const vec<cluster * > &,unsigned start,unsigned end)1302 jump_table_cluster::is_beneficial (const vec<cluster *> &,
1303 unsigned start, unsigned end)
1304 {
1305 /* Single case bail out. */
1306 if (start == end)
1307 return false;
1308
1309 return end - start + 1 >= case_values_threshold ();
1310 }
1311
1312 /* Find bit tests of given CLUSTERS, where all members of the vector
1313 are of type simple_cluster. New clusters are returned. */
1314
1315 vec<cluster *>
find_bit_tests(vec<cluster * > & clusters)1316 bit_test_cluster::find_bit_tests (vec<cluster *> &clusters)
1317 {
1318 unsigned l = clusters.length ();
1319 auto_vec<min_cluster_item> min;
1320 min.reserve (l + 1);
1321
1322 min.quick_push (min_cluster_item (0, 0, 0));
1323
1324 for (unsigned i = 1; i <= l; i++)
1325 {
1326 /* Set minimal # of clusters with i-th item to infinite. */
1327 min.quick_push (min_cluster_item (INT_MAX, INT_MAX, INT_MAX));
1328
1329 for (unsigned j = 0; j < i; j++)
1330 {
1331 if (min[j].m_count + 1 < min[i].m_count
1332 && can_be_handled (clusters, j, i - 1))
1333 min[i] = min_cluster_item (min[j].m_count + 1, j, INT_MAX);
1334 }
1335
1336 gcc_checking_assert (min[i].m_count != INT_MAX);
1337 }
1338
1339 /* No result. */
1340 if (min[l].m_count == l)
1341 return clusters.copy ();
1342
1343 vec<cluster *> output;
1344 output.create (4);
1345
1346 /* Find and build the clusters. */
1347 for (unsigned end = l;;)
1348 {
1349 int start = min[end].m_start;
1350
1351 if (is_beneficial (clusters, start, end - 1))
1352 {
1353 bool entire = start == 0 && end == clusters.length ();
1354 output.safe_push (new bit_test_cluster (clusters, start, end - 1,
1355 entire));
1356 }
1357 else
1358 for (int i = end - 1; i >= start; i--)
1359 output.safe_push (clusters[i]);
1360
1361 end = start;
1362
1363 if (start <= 0)
1364 break;
1365 }
1366
1367 output.reverse ();
1368 return output;
1369 }
1370
1371 /* Return true when RANGE of case values with UNIQ labels
1372 can build a bit test. */
1373
1374 bool
can_be_handled(unsigned HOST_WIDE_INT range,unsigned int uniq)1375 bit_test_cluster::can_be_handled (unsigned HOST_WIDE_INT range,
1376 unsigned int uniq)
1377 {
1378 /* Check overflow. */
1379 if (range == 0)
1380 return false;
1381
1382 if (range >= GET_MODE_BITSIZE (word_mode))
1383 return false;
1384
1385 return uniq <= m_max_case_bit_tests;
1386 }
1387
1388 /* Return true when cluster starting at START and ending at END (inclusive)
1389 can build a bit test. */
1390
1391 bool
can_be_handled(const vec<cluster * > & clusters,unsigned start,unsigned end)1392 bit_test_cluster::can_be_handled (const vec<cluster *> &clusters,
1393 unsigned start, unsigned end)
1394 {
1395 auto_vec<int, m_max_case_bit_tests> dest_bbs;
1396 /* For algorithm correctness, bit test for a single case must return
1397 true. We bail out in is_beneficial if it's called just for
1398 a single case. */
1399 if (start == end)
1400 return true;
1401
1402 unsigned HOST_WIDE_INT range = get_range (clusters[start]->get_low (),
1403 clusters[end]->get_high ());
1404
1405 /* Make a guess first. */
1406 if (!can_be_handled (range, m_max_case_bit_tests))
1407 return false;
1408
1409 for (unsigned i = start; i <= end; i++)
1410 {
1411 simple_cluster *sc = static_cast<simple_cluster *> (clusters[i]);
1412 /* m_max_case_bit_tests is very small integer, thus the operation
1413 is constant. */
1414 if (!dest_bbs.contains (sc->m_case_bb->index))
1415 {
1416 if (dest_bbs.length () >= m_max_case_bit_tests)
1417 return false;
1418 dest_bbs.quick_push (sc->m_case_bb->index);
1419 }
1420 }
1421
1422 return true;
1423 }
1424
1425 /* Return true when COUNT of cases of UNIQ labels is beneficial for bit test
1426 transformation. */
1427
1428 bool
is_beneficial(unsigned count,unsigned uniq)1429 bit_test_cluster::is_beneficial (unsigned count, unsigned uniq)
1430 {
1431 return (((uniq == 1 && count >= 3)
1432 || (uniq == 2 && count >= 5)
1433 || (uniq == 3 && count >= 6)));
1434 }
1435
1436 /* Return true if cluster starting at START and ending at END (inclusive)
1437 is profitable transformation. */
1438
1439 bool
is_beneficial(const vec<cluster * > & clusters,unsigned start,unsigned end)1440 bit_test_cluster::is_beneficial (const vec<cluster *> &clusters,
1441 unsigned start, unsigned end)
1442 {
1443 /* Single case bail out. */
1444 if (start == end)
1445 return false;
1446
1447 auto_bitmap dest_bbs;
1448
1449 for (unsigned i = start; i <= end; i++)
1450 {
1451 simple_cluster *sc = static_cast<simple_cluster *> (clusters[i]);
1452 bitmap_set_bit (dest_bbs, sc->m_case_bb->index);
1453 }
1454
1455 unsigned uniq = bitmap_count_bits (dest_bbs);
1456 unsigned count = end - start + 1;
1457 return is_beneficial (count, uniq);
1458 }
1459
1460 /* Comparison function for qsort to order bit tests by decreasing
1461 probability of execution. */
1462
1463 int
cmp(const void * p1,const void * p2)1464 case_bit_test::cmp (const void *p1, const void *p2)
1465 {
1466 const case_bit_test *const d1 = (const case_bit_test *) p1;
1467 const case_bit_test *const d2 = (const case_bit_test *) p2;
1468
1469 if (d2->bits != d1->bits)
1470 return d2->bits - d1->bits;
1471
1472 /* Stabilize the sort. */
1473 return (LABEL_DECL_UID (CASE_LABEL (d2->label))
1474 - LABEL_DECL_UID (CASE_LABEL (d1->label)));
1475 }
1476
1477 /* Expand a switch statement by a short sequence of bit-wise
1478 comparisons. "switch(x)" is effectively converted into
1479 "if ((1 << (x-MINVAL)) & CST)" where CST and MINVAL are
1480 integer constants.
1481
1482 INDEX_EXPR is the value being switched on.
1483
1484 MINVAL is the lowest case value of in the case nodes,
1485 and RANGE is highest value minus MINVAL. MINVAL and RANGE
1486 are not guaranteed to be of the same type as INDEX_EXPR
1487 (the gimplifier doesn't change the type of case label values,
1488 and MINVAL and RANGE are derived from those values).
1489 MAXVAL is MINVAL + RANGE.
1490
1491 There *MUST* be max_case_bit_tests or less unique case
1492 node targets. */
1493
1494 void
emit(tree index_expr,tree index_type,tree,basic_block default_bb,location_t loc)1495 bit_test_cluster::emit (tree index_expr, tree index_type,
1496 tree, basic_block default_bb, location_t loc)
1497 {
1498 case_bit_test test[m_max_case_bit_tests] = { {} };
1499 unsigned int i, j, k;
1500 unsigned int count;
1501
1502 tree unsigned_index_type = range_check_type (index_type);
1503
1504 gimple_stmt_iterator gsi;
1505 gassign *shift_stmt;
1506
1507 tree idx, tmp, csui;
1508 tree word_type_node = lang_hooks.types.type_for_mode (word_mode, 1);
1509 tree word_mode_zero = fold_convert (word_type_node, integer_zero_node);
1510 tree word_mode_one = fold_convert (word_type_node, integer_one_node);
1511 int prec = TYPE_PRECISION (word_type_node);
1512 wide_int wone = wi::one (prec);
1513
1514 tree minval = get_low ();
1515 tree maxval = get_high ();
1516 tree range = int_const_binop (MINUS_EXPR, maxval, minval);
1517 unsigned HOST_WIDE_INT bt_range = get_range (minval, maxval);
1518
1519 /* Go through all case labels, and collect the case labels, profile
1520 counts, and other information we need to build the branch tests. */
1521 count = 0;
1522 for (i = 0; i < m_cases.length (); i++)
1523 {
1524 unsigned int lo, hi;
1525 simple_cluster *n = static_cast<simple_cluster *> (m_cases[i]);
1526 for (k = 0; k < count; k++)
1527 if (n->m_case_bb == test[k].target_bb)
1528 break;
1529
1530 if (k == count)
1531 {
1532 gcc_checking_assert (count < m_max_case_bit_tests);
1533 test[k].mask = wi::zero (prec);
1534 test[k].target_bb = n->m_case_bb;
1535 test[k].label = n->m_case_label_expr;
1536 test[k].bits = 0;
1537 count++;
1538 }
1539
1540 test[k].bits += n->get_range (n->get_low (), n->get_high ());
1541
1542 lo = tree_to_uhwi (int_const_binop (MINUS_EXPR, n->get_low (), minval));
1543 if (n->get_high () == NULL_TREE)
1544 hi = lo;
1545 else
1546 hi = tree_to_uhwi (int_const_binop (MINUS_EXPR, n->get_high (),
1547 minval));
1548
1549 for (j = lo; j <= hi; j++)
1550 test[k].mask |= wi::lshift (wone, j);
1551 }
1552
1553 qsort (test, count, sizeof (*test), case_bit_test::cmp);
1554
1555 /* If all values are in the 0 .. BITS_PER_WORD-1 range, we can get rid of
1556 the minval subtractions, but it might make the mask constants more
1557 expensive. So, compare the costs. */
1558 if (compare_tree_int (minval, 0) > 0
1559 && compare_tree_int (maxval, GET_MODE_BITSIZE (word_mode)) < 0)
1560 {
1561 int cost_diff;
1562 HOST_WIDE_INT m = tree_to_uhwi (minval);
1563 rtx reg = gen_raw_REG (word_mode, 10000);
1564 bool speed_p = optimize_insn_for_speed_p ();
1565 cost_diff = set_rtx_cost (gen_rtx_PLUS (word_mode, reg,
1566 GEN_INT (-m)), speed_p);
1567 for (i = 0; i < count; i++)
1568 {
1569 rtx r = immed_wide_int_const (test[i].mask, word_mode);
1570 cost_diff += set_src_cost (gen_rtx_AND (word_mode, reg, r),
1571 word_mode, speed_p);
1572 r = immed_wide_int_const (wi::lshift (test[i].mask, m), word_mode);
1573 cost_diff -= set_src_cost (gen_rtx_AND (word_mode, reg, r),
1574 word_mode, speed_p);
1575 }
1576 if (cost_diff > 0)
1577 {
1578 for (i = 0; i < count; i++)
1579 test[i].mask = wi::lshift (test[i].mask, m);
1580 minval = build_zero_cst (TREE_TYPE (minval));
1581 range = maxval;
1582 }
1583 }
1584
1585 /* Now build the test-and-branch code. */
1586
1587 gsi = gsi_last_bb (m_case_bb);
1588
1589 /* idx = (unsigned)x - minval. */
1590 idx = fold_convert_loc (loc, unsigned_index_type, index_expr);
1591 idx = fold_build2_loc (loc, MINUS_EXPR, unsigned_index_type, idx,
1592 fold_convert_loc (loc, unsigned_index_type, minval));
1593 idx = force_gimple_operand_gsi (&gsi, idx,
1594 /*simple=*/true, NULL_TREE,
1595 /*before=*/true, GSI_SAME_STMT);
1596
1597 if (m_handles_entire_switch)
1598 {
1599 /* if (idx > range) goto default */
1600 range
1601 = force_gimple_operand_gsi (&gsi,
1602 fold_convert (unsigned_index_type, range),
1603 /*simple=*/true, NULL_TREE,
1604 /*before=*/true, GSI_SAME_STMT);
1605 tmp = fold_build2_loc (loc, GT_EXPR, boolean_type_node, idx, range);
1606 basic_block new_bb
1607 = hoist_edge_and_branch_if_true (&gsi, tmp, default_bb,
1608 profile_probability::unlikely (), loc);
1609 gsi = gsi_last_bb (new_bb);
1610 }
1611
1612 /* csui = (1 << (word_mode) idx) */
1613 csui = make_ssa_name (word_type_node);
1614 tmp = fold_build2_loc (loc, LSHIFT_EXPR, word_type_node, word_mode_one,
1615 fold_convert_loc (loc, word_type_node, idx));
1616 tmp = force_gimple_operand_gsi (&gsi, tmp,
1617 /*simple=*/false, NULL_TREE,
1618 /*before=*/true, GSI_SAME_STMT);
1619 shift_stmt = gimple_build_assign (csui, tmp);
1620 gsi_insert_before (&gsi, shift_stmt, GSI_SAME_STMT);
1621 update_stmt (shift_stmt);
1622
1623 profile_probability prob = profile_probability::always ();
1624
1625 /* for each unique set of cases:
1626 if (const & csui) goto target */
1627 for (k = 0; k < count; k++)
1628 {
1629 prob = profile_probability::always ().apply_scale (test[k].bits,
1630 bt_range);
1631 bt_range -= test[k].bits;
1632 tmp = wide_int_to_tree (word_type_node, test[k].mask);
1633 tmp = fold_build2_loc (loc, BIT_AND_EXPR, word_type_node, csui, tmp);
1634 tmp = force_gimple_operand_gsi (&gsi, tmp,
1635 /*simple=*/true, NULL_TREE,
1636 /*before=*/true, GSI_SAME_STMT);
1637 tmp = fold_build2_loc (loc, NE_EXPR, boolean_type_node,
1638 tmp, word_mode_zero);
1639 basic_block new_bb
1640 = hoist_edge_and_branch_if_true (&gsi, tmp, test[k].target_bb,
1641 prob, loc);
1642 gsi = gsi_last_bb (new_bb);
1643 }
1644
1645 /* We should have removed all edges now. */
1646 gcc_assert (EDGE_COUNT (gsi_bb (gsi)->succs) == 0);
1647
1648 /* If nothing matched, go to the default label. */
1649 edge e = make_edge (gsi_bb (gsi), default_bb, EDGE_FALLTHRU);
1650 e->probability = profile_probability::always ();
1651 }
1652
1653 /* Split the basic block at the statement pointed to by GSIP, and insert
1654 a branch to the target basic block of E_TRUE conditional on tree
1655 expression COND.
1656
1657 It is assumed that there is already an edge from the to-be-split
1658 basic block to E_TRUE->dest block. This edge is removed, and the
1659 profile information on the edge is re-used for the new conditional
1660 jump.
1661
1662 The CFG is updated. The dominator tree will not be valid after
1663 this transformation, but the immediate dominators are updated if
1664 UPDATE_DOMINATORS is true.
1665
1666 Returns the newly created basic block. */
1667
1668 basic_block
hoist_edge_and_branch_if_true(gimple_stmt_iterator * gsip,tree cond,basic_block case_bb,profile_probability prob,location_t loc)1669 bit_test_cluster::hoist_edge_and_branch_if_true (gimple_stmt_iterator *gsip,
1670 tree cond, basic_block case_bb,
1671 profile_probability prob,
1672 location_t loc)
1673 {
1674 tree tmp;
1675 gcond *cond_stmt;
1676 edge e_false;
1677 basic_block new_bb, split_bb = gsi_bb (*gsip);
1678
1679 edge e_true = make_edge (split_bb, case_bb, EDGE_TRUE_VALUE);
1680 e_true->probability = prob;
1681 gcc_assert (e_true->src == split_bb);
1682
1683 tmp = force_gimple_operand_gsi (gsip, cond, /*simple=*/true, NULL,
1684 /*before=*/true, GSI_SAME_STMT);
1685 cond_stmt = gimple_build_cond_from_tree (tmp, NULL_TREE, NULL_TREE);
1686 gimple_set_location (cond_stmt, loc);
1687 gsi_insert_before (gsip, cond_stmt, GSI_SAME_STMT);
1688
1689 e_false = split_block (split_bb, cond_stmt);
1690 new_bb = e_false->dest;
1691 redirect_edge_pred (e_true, split_bb);
1692
1693 e_false->flags &= ~EDGE_FALLTHRU;
1694 e_false->flags |= EDGE_FALSE_VALUE;
1695 e_false->probability = e_true->probability.invert ();
1696 new_bb->count = e_false->count ();
1697
1698 return new_bb;
1699 }
1700
1701 /* Compute the number of case labels that correspond to each outgoing edge of
1702 switch statement. Record this information in the aux field of the edge. */
1703
1704 void
compute_cases_per_edge()1705 switch_decision_tree::compute_cases_per_edge ()
1706 {
1707 reset_out_edges_aux (m_switch);
1708 int ncases = gimple_switch_num_labels (m_switch);
1709 for (int i = ncases - 1; i >= 1; --i)
1710 {
1711 edge case_edge = gimple_switch_edge (cfun, m_switch, i);
1712 case_edge->aux = (void *) ((intptr_t) (case_edge->aux) + 1);
1713 }
1714 }
1715
1716 /* Analyze switch statement and return true when the statement is expanded
1717 as decision tree. */
1718
1719 bool
analyze_switch_statement()1720 switch_decision_tree::analyze_switch_statement ()
1721 {
1722 unsigned l = gimple_switch_num_labels (m_switch);
1723 basic_block bb = gimple_bb (m_switch);
1724 auto_vec<cluster *> clusters;
1725 clusters.create (l - 1);
1726
1727 basic_block default_bb = gimple_switch_default_bb (cfun, m_switch);
1728 m_case_bbs.reserve (l);
1729 m_case_bbs.quick_push (default_bb);
1730
1731 compute_cases_per_edge ();
1732
1733 for (unsigned i = 1; i < l; i++)
1734 {
1735 tree elt = gimple_switch_label (m_switch, i);
1736 tree lab = CASE_LABEL (elt);
1737 basic_block case_bb = label_to_block (cfun, lab);
1738 edge case_edge = find_edge (bb, case_bb);
1739 tree low = CASE_LOW (elt);
1740 tree high = CASE_HIGH (elt);
1741
1742 profile_probability p
1743 = case_edge->probability.apply_scale (1, (intptr_t) (case_edge->aux));
1744 clusters.quick_push (new simple_cluster (low, high, elt, case_edge->dest,
1745 p));
1746 m_case_bbs.quick_push (case_edge->dest);
1747 }
1748
1749 reset_out_edges_aux (m_switch);
1750
1751 /* Find jump table clusters. */
1752 vec<cluster *> output = jump_table_cluster::find_jump_tables (clusters);
1753
1754 /* Find bit test clusters. */
1755 vec<cluster *> output2;
1756 auto_vec<cluster *> tmp;
1757 output2.create (1);
1758 tmp.create (1);
1759
1760 for (unsigned i = 0; i < output.length (); i++)
1761 {
1762 cluster *c = output[i];
1763 if (c->get_type () != SIMPLE_CASE)
1764 {
1765 if (!tmp.is_empty ())
1766 {
1767 vec<cluster *> n = bit_test_cluster::find_bit_tests (tmp);
1768 output2.safe_splice (n);
1769 n.release ();
1770 tmp.truncate (0);
1771 }
1772 output2.safe_push (c);
1773 }
1774 else
1775 tmp.safe_push (c);
1776 }
1777
1778 /* We still can have a temporary vector to test. */
1779 if (!tmp.is_empty ())
1780 {
1781 vec<cluster *> n = bit_test_cluster::find_bit_tests (tmp);
1782 output2.safe_splice (n);
1783 n.release ();
1784 }
1785
1786 if (dump_file)
1787 {
1788 fprintf (dump_file, ";; GIMPLE switch case clusters: ");
1789 for (unsigned i = 0; i < output2.length (); i++)
1790 output2[i]->dump (dump_file, dump_flags & TDF_DETAILS);
1791 fprintf (dump_file, "\n");
1792 }
1793
1794 output.release ();
1795
1796 bool expanded = try_switch_expansion (output2);
1797
1798 for (unsigned i = 0; i < output2.length (); i++)
1799 delete output2[i];
1800
1801 output2.release ();
1802
1803 return expanded;
1804 }
1805
1806 /* Attempt to expand CLUSTERS as a decision tree. Return true when
1807 expanded. */
1808
1809 bool
try_switch_expansion(vec<cluster * > & clusters)1810 switch_decision_tree::try_switch_expansion (vec<cluster *> &clusters)
1811 {
1812 tree index_expr = gimple_switch_index (m_switch);
1813 tree index_type = TREE_TYPE (index_expr);
1814 basic_block bb = gimple_bb (m_switch);
1815
1816 if (gimple_switch_num_labels (m_switch) == 1
1817 || range_check_type (index_type) == NULL_TREE)
1818 return false;
1819
1820 /* Find the default case target label. */
1821 edge default_edge = gimple_switch_default_edge (cfun, m_switch);
1822 m_default_bb = default_edge->dest;
1823
1824 /* Do the insertion of a case label into m_case_list. The labels are
1825 fed to us in descending order from the sorted vector of case labels used
1826 in the tree part of the middle end. So the list we construct is
1827 sorted in ascending order. */
1828
1829 for (int i = clusters.length () - 1; i >= 0; i--)
1830 {
1831 case_tree_node *r = m_case_list;
1832 m_case_list = m_case_node_pool.allocate ();
1833 m_case_list->m_right = r;
1834 m_case_list->m_c = clusters[i];
1835 }
1836
1837 record_phi_operand_mapping ();
1838
1839 /* Split basic block that contains the gswitch statement. */
1840 gimple_stmt_iterator gsi = gsi_last_bb (bb);
1841 edge e;
1842 if (gsi_end_p (gsi))
1843 e = split_block_after_labels (bb);
1844 else
1845 {
1846 gsi_prev (&gsi);
1847 e = split_block (bb, gsi_stmt (gsi));
1848 }
1849 bb = split_edge (e);
1850
1851 /* Create new basic blocks for non-case clusters where specific expansion
1852 needs to happen. */
1853 for (unsigned i = 0; i < clusters.length (); i++)
1854 if (clusters[i]->get_type () != SIMPLE_CASE)
1855 {
1856 clusters[i]->m_case_bb = create_empty_bb (bb);
1857 clusters[i]->m_case_bb->count = bb->count;
1858 clusters[i]->m_case_bb->loop_father = bb->loop_father;
1859 }
1860
1861 /* Do not do an extra work for a single cluster. */
1862 if (clusters.length () == 1
1863 && clusters[0]->get_type () != SIMPLE_CASE)
1864 {
1865 cluster *c = clusters[0];
1866 c->emit (index_expr, index_type,
1867 gimple_switch_default_label (m_switch), m_default_bb,
1868 gimple_location (m_switch));
1869 redirect_edge_succ (single_succ_edge (bb), c->m_case_bb);
1870 }
1871 else
1872 {
1873 emit (bb, index_expr, default_edge->probability, index_type);
1874
1875 /* Emit cluster-specific switch handling. */
1876 for (unsigned i = 0; i < clusters.length (); i++)
1877 if (clusters[i]->get_type () != SIMPLE_CASE)
1878 clusters[i]->emit (index_expr, index_type,
1879 gimple_switch_default_label (m_switch),
1880 m_default_bb, gimple_location (m_switch));
1881 }
1882
1883 fix_phi_operands_for_edges ();
1884
1885 return true;
1886 }
1887
1888 /* Before switch transformation, record all SSA_NAMEs defined in switch BB
1889 and used in a label basic block. */
1890
1891 void
record_phi_operand_mapping()1892 switch_decision_tree::record_phi_operand_mapping ()
1893 {
1894 basic_block switch_bb = gimple_bb (m_switch);
1895 /* Record all PHI nodes that have to be fixed after conversion. */
1896 for (unsigned i = 0; i < m_case_bbs.length (); i++)
1897 {
1898 gphi_iterator gsi;
1899 basic_block bb = m_case_bbs[i];
1900 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1901 {
1902 gphi *phi = gsi.phi ();
1903
1904 for (unsigned i = 0; i < gimple_phi_num_args (phi); i++)
1905 {
1906 basic_block phi_src_bb = gimple_phi_arg_edge (phi, i)->src;
1907 if (phi_src_bb == switch_bb)
1908 {
1909 tree def = gimple_phi_arg_def (phi, i);
1910 tree result = gimple_phi_result (phi);
1911 m_phi_mapping.put (result, def);
1912 break;
1913 }
1914 }
1915 }
1916 }
1917 }
1918
1919 /* Append new operands to PHI statements that were introduced due to
1920 addition of new edges to case labels. */
1921
1922 void
fix_phi_operands_for_edges()1923 switch_decision_tree::fix_phi_operands_for_edges ()
1924 {
1925 gphi_iterator gsi;
1926
1927 for (unsigned i = 0; i < m_case_bbs.length (); i++)
1928 {
1929 basic_block bb = m_case_bbs[i];
1930 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1931 {
1932 gphi *phi = gsi.phi ();
1933 for (unsigned j = 0; j < gimple_phi_num_args (phi); j++)
1934 {
1935 tree def = gimple_phi_arg_def (phi, j);
1936 if (def == NULL_TREE)
1937 {
1938 edge e = gimple_phi_arg_edge (phi, j);
1939 tree *definition
1940 = m_phi_mapping.get (gimple_phi_result (phi));
1941 gcc_assert (definition);
1942 add_phi_arg (phi, *definition, e, UNKNOWN_LOCATION);
1943 }
1944 }
1945 }
1946 }
1947 }
1948
1949 /* Generate a decision tree, switching on INDEX_EXPR and jumping to
1950 one of the labels in CASE_LIST or to the DEFAULT_LABEL.
1951
1952 We generate a binary decision tree to select the appropriate target
1953 code. */
1954
1955 void
emit(basic_block bb,tree index_expr,profile_probability default_prob,tree index_type)1956 switch_decision_tree::emit (basic_block bb, tree index_expr,
1957 profile_probability default_prob, tree index_type)
1958 {
1959 balance_case_nodes (&m_case_list, NULL);
1960
1961 if (dump_file)
1962 dump_function_to_file (current_function_decl, dump_file, dump_flags);
1963 if (dump_file && (dump_flags & TDF_DETAILS))
1964 {
1965 int indent_step = ceil_log2 (TYPE_PRECISION (index_type)) + 2;
1966 fprintf (dump_file, ";; Expanding GIMPLE switch as decision tree:\n");
1967 gcc_assert (m_case_list != NULL);
1968 dump_case_nodes (dump_file, m_case_list, indent_step, 0);
1969 }
1970
1971 bb = emit_case_nodes (bb, index_expr, m_case_list, default_prob, index_type,
1972 gimple_location (m_switch));
1973
1974 if (bb)
1975 emit_jump (bb, m_default_bb);
1976
1977 /* Remove all edges and do just an edge that will reach default_bb. */
1978 bb = gimple_bb (m_switch);
1979 gimple_stmt_iterator gsi = gsi_last_bb (bb);
1980 gsi_remove (&gsi, true);
1981
1982 delete_basic_block (bb);
1983 }
1984
1985 /* Take an ordered list of case nodes
1986 and transform them into a near optimal binary tree,
1987 on the assumption that any target code selection value is as
1988 likely as any other.
1989
1990 The transformation is performed by splitting the ordered
1991 list into two equal sections plus a pivot. The parts are
1992 then attached to the pivot as left and right branches. Each
1993 branch is then transformed recursively. */
1994
1995 void
balance_case_nodes(case_tree_node ** head,case_tree_node * parent)1996 switch_decision_tree::balance_case_nodes (case_tree_node **head,
1997 case_tree_node *parent)
1998 {
1999 case_tree_node *np;
2000
2001 np = *head;
2002 if (np)
2003 {
2004 int i = 0;
2005 int ranges = 0;
2006 case_tree_node **npp;
2007 case_tree_node *left;
2008 profile_probability prob = profile_probability::never ();
2009
2010 /* Count the number of entries on branch. Also count the ranges. */
2011
2012 while (np)
2013 {
2014 if (!tree_int_cst_equal (np->m_c->get_low (), np->m_c->get_high ()))
2015 ranges++;
2016
2017 i++;
2018 prob += np->m_c->m_prob;
2019 np = np->m_right;
2020 }
2021
2022 if (i > 2)
2023 {
2024 /* Split this list if it is long enough for that to help. */
2025 npp = head;
2026 left = *npp;
2027 profile_probability pivot_prob = prob.apply_scale (1, 2);
2028
2029 /* Find the place in the list that bisects the list's total cost,
2030 where ranges count as 2. */
2031 while (1)
2032 {
2033 /* Skip nodes while their probability does not reach
2034 that amount. */
2035 prob -= (*npp)->m_c->m_prob;
2036 if ((prob.initialized_p () && prob < pivot_prob)
2037 || ! (*npp)->m_right)
2038 break;
2039 npp = &(*npp)->m_right;
2040 }
2041
2042 np = *npp;
2043 *npp = 0;
2044 *head = np;
2045 np->m_parent = parent;
2046 np->m_left = left == np ? NULL : left;
2047
2048 /* Optimize each of the two split parts. */
2049 balance_case_nodes (&np->m_left, np);
2050 balance_case_nodes (&np->m_right, np);
2051 np->m_c->m_subtree_prob = np->m_c->m_prob;
2052 if (np->m_left)
2053 np->m_c->m_subtree_prob += np->m_left->m_c->m_subtree_prob;
2054 if (np->m_right)
2055 np->m_c->m_subtree_prob += np->m_right->m_c->m_subtree_prob;
2056 }
2057 else
2058 {
2059 /* Else leave this branch as one level,
2060 but fill in `parent' fields. */
2061 np = *head;
2062 np->m_parent = parent;
2063 np->m_c->m_subtree_prob = np->m_c->m_prob;
2064 for (; np->m_right; np = np->m_right)
2065 {
2066 np->m_right->m_parent = np;
2067 (*head)->m_c->m_subtree_prob += np->m_right->m_c->m_subtree_prob;
2068 }
2069 }
2070 }
2071 }
2072
2073 /* Dump ROOT, a list or tree of case nodes, to file. */
2074
2075 void
dump_case_nodes(FILE * f,case_tree_node * root,int indent_step,int indent_level)2076 switch_decision_tree::dump_case_nodes (FILE *f, case_tree_node *root,
2077 int indent_step, int indent_level)
2078 {
2079 if (root == 0)
2080 return;
2081 indent_level++;
2082
2083 dump_case_nodes (f, root->m_left, indent_step, indent_level);
2084
2085 fputs (";; ", f);
2086 fprintf (f, "%*s", indent_step * indent_level, "");
2087 root->m_c->dump (f);
2088 root->m_c->m_prob.dump (f);
2089 fputs (" subtree: ", f);
2090 root->m_c->m_subtree_prob.dump (f);
2091 fputs (")\n", f);
2092
2093 dump_case_nodes (f, root->m_right, indent_step, indent_level);
2094 }
2095
2096
2097 /* Add an unconditional jump to CASE_BB that happens in basic block BB. */
2098
2099 void
emit_jump(basic_block bb,basic_block case_bb)2100 switch_decision_tree::emit_jump (basic_block bb, basic_block case_bb)
2101 {
2102 edge e = single_succ_edge (bb);
2103 redirect_edge_succ (e, case_bb);
2104 }
2105
2106 /* Generate code to compare OP0 with OP1 so that the condition codes are
2107 set and to jump to LABEL_BB if the condition is true.
2108 COMPARISON is the GIMPLE comparison (EQ, NE, GT, etc.).
2109 PROB is the probability of jumping to LABEL_BB. */
2110
2111 basic_block
emit_cmp_and_jump_insns(basic_block bb,tree op0,tree op1,tree_code comparison,basic_block label_bb,profile_probability prob,location_t loc)2112 switch_decision_tree::emit_cmp_and_jump_insns (basic_block bb, tree op0,
2113 tree op1, tree_code comparison,
2114 basic_block label_bb,
2115 profile_probability prob,
2116 location_t loc)
2117 {
2118 // TODO: it's once called with lhs != index.
2119 op1 = fold_convert (TREE_TYPE (op0), op1);
2120
2121 gcond *cond = gimple_build_cond (comparison, op0, op1, NULL_TREE, NULL_TREE);
2122 gimple_set_location (cond, loc);
2123 gimple_stmt_iterator gsi = gsi_last_bb (bb);
2124 gsi_insert_after (&gsi, cond, GSI_NEW_STMT);
2125
2126 gcc_assert (single_succ_p (bb));
2127
2128 /* Make a new basic block where false branch will take place. */
2129 edge false_edge = split_block (bb, cond);
2130 false_edge->flags = EDGE_FALSE_VALUE;
2131 false_edge->probability = prob.invert ();
2132
2133 edge true_edge = make_edge (bb, label_bb, EDGE_TRUE_VALUE);
2134 true_edge->probability = prob;
2135
2136 return false_edge->dest;
2137 }
2138
2139 /* Generate code to jump to LABEL if OP0 and OP1 are equal.
2140 PROB is the probability of jumping to LABEL_BB.
2141 BB is a basic block where the new condition will be placed. */
2142
2143 basic_block
do_jump_if_equal(basic_block bb,tree op0,tree op1,basic_block label_bb,profile_probability prob,location_t loc)2144 switch_decision_tree::do_jump_if_equal (basic_block bb, tree op0, tree op1,
2145 basic_block label_bb,
2146 profile_probability prob,
2147 location_t loc)
2148 {
2149 op1 = fold_convert (TREE_TYPE (op0), op1);
2150
2151 gcond *cond = gimple_build_cond (EQ_EXPR, op0, op1, NULL_TREE, NULL_TREE);
2152 gimple_set_location (cond, loc);
2153 gimple_stmt_iterator gsi = gsi_last_bb (bb);
2154 gsi_insert_before (&gsi, cond, GSI_SAME_STMT);
2155
2156 gcc_assert (single_succ_p (bb));
2157
2158 /* Make a new basic block where false branch will take place. */
2159 edge false_edge = split_block (bb, cond);
2160 false_edge->flags = EDGE_FALSE_VALUE;
2161 false_edge->probability = prob.invert ();
2162
2163 edge true_edge = make_edge (bb, label_bb, EDGE_TRUE_VALUE);
2164 true_edge->probability = prob;
2165
2166 return false_edge->dest;
2167 }
2168
2169 /* Emit step-by-step code to select a case for the value of INDEX.
2170 The thus generated decision tree follows the form of the
2171 case-node binary tree NODE, whose nodes represent test conditions.
2172 DEFAULT_PROB is probability of cases leading to default BB.
2173 INDEX_TYPE is the type of the index of the switch. */
2174
2175 basic_block
emit_case_nodes(basic_block bb,tree index,case_tree_node * node,profile_probability default_prob,tree index_type,location_t loc)2176 switch_decision_tree::emit_case_nodes (basic_block bb, tree index,
2177 case_tree_node *node,
2178 profile_probability default_prob,
2179 tree index_type, location_t loc)
2180 {
2181 profile_probability p;
2182
2183 /* If node is null, we are done. */
2184 if (node == NULL)
2185 return bb;
2186
2187 /* Single value case. */
2188 if (node->m_c->is_single_value_p ())
2189 {
2190 /* Node is single valued. First see if the index expression matches
2191 this node and then check our children, if any. */
2192 p = node->m_c->m_prob / (node->m_c->m_subtree_prob + default_prob);
2193 bb = do_jump_if_equal (bb, index, node->m_c->get_low (),
2194 node->m_c->m_case_bb, p, loc);
2195 /* Since this case is taken at this point, reduce its weight from
2196 subtree_weight. */
2197 node->m_c->m_subtree_prob -= p;
2198
2199 if (node->m_left != NULL && node->m_right != NULL)
2200 {
2201 /* 1) the node has both children
2202
2203 If both children are single-valued cases with no
2204 children, finish up all the work. This way, we can save
2205 one ordered comparison. */
2206
2207 if (!node->m_left->has_child ()
2208 && node->m_left->m_c->is_single_value_p ()
2209 && !node->m_right->has_child ()
2210 && node->m_right->m_c->is_single_value_p ())
2211 {
2212 p = (node->m_right->m_c->m_prob
2213 / (node->m_c->m_subtree_prob + default_prob));
2214 bb = do_jump_if_equal (bb, index, node->m_right->m_c->get_low (),
2215 node->m_right->m_c->m_case_bb, p, loc);
2216
2217 p = (node->m_left->m_c->m_prob
2218 / (node->m_c->m_subtree_prob + default_prob));
2219 bb = do_jump_if_equal (bb, index, node->m_left->m_c->get_low (),
2220 node->m_left->m_c->m_case_bb, p, loc);
2221 }
2222 else
2223 {
2224 /* Branch to a label where we will handle it later. */
2225 basic_block test_bb = split_edge (single_succ_edge (bb));
2226 redirect_edge_succ (single_pred_edge (test_bb),
2227 single_succ_edge (bb)->dest);
2228
2229 p = ((node->m_right->m_c->m_subtree_prob
2230 + default_prob.apply_scale (1, 2))
2231 / (node->m_c->m_subtree_prob + default_prob));
2232 bb = emit_cmp_and_jump_insns (bb, index, node->m_c->get_high (),
2233 GT_EXPR, test_bb, p, loc);
2234 default_prob = default_prob.apply_scale (1, 2);
2235
2236 /* Handle the left-hand subtree. */
2237 bb = emit_case_nodes (bb, index, node->m_left,
2238 default_prob, index_type, loc);
2239
2240 /* If the left-hand subtree fell through,
2241 don't let it fall into the right-hand subtree. */
2242 if (bb && m_default_bb)
2243 emit_jump (bb, m_default_bb);
2244
2245 bb = emit_case_nodes (test_bb, index, node->m_right,
2246 default_prob, index_type, loc);
2247 }
2248 }
2249 else if (node->m_left == NULL && node->m_right != NULL)
2250 {
2251 /* 2) the node has only right child. */
2252
2253 /* Here we have a right child but no left so we issue a conditional
2254 branch to default and process the right child.
2255
2256 Omit the conditional branch to default if the right child
2257 does not have any children and is single valued; it would
2258 cost too much space to save so little time. */
2259
2260 if (node->m_right->has_child ()
2261 || !node->m_right->m_c->is_single_value_p ())
2262 {
2263 p = (default_prob.apply_scale (1, 2)
2264 / (node->m_c->m_subtree_prob + default_prob));
2265 bb = emit_cmp_and_jump_insns (bb, index, node->m_c->get_low (),
2266 LT_EXPR, m_default_bb, p, loc);
2267 default_prob = default_prob.apply_scale (1, 2);
2268
2269 bb = emit_case_nodes (bb, index, node->m_right, default_prob,
2270 index_type, loc);
2271 }
2272 else
2273 {
2274 /* We cannot process node->right normally
2275 since we haven't ruled out the numbers less than
2276 this node's value. So handle node->right explicitly. */
2277 p = (node->m_right->m_c->m_subtree_prob
2278 / (node->m_c->m_subtree_prob + default_prob));
2279 bb = do_jump_if_equal (bb, index, node->m_right->m_c->get_low (),
2280 node->m_right->m_c->m_case_bb, p, loc);
2281 }
2282 }
2283 else if (node->m_left != NULL && node->m_right == NULL)
2284 {
2285 /* 3) just one subtree, on the left. Similar case as previous. */
2286
2287 if (node->m_left->has_child ()
2288 || !node->m_left->m_c->is_single_value_p ())
2289 {
2290 p = (default_prob.apply_scale (1, 2)
2291 / (node->m_c->m_subtree_prob + default_prob));
2292 bb = emit_cmp_and_jump_insns (bb, index, node->m_c->get_high (),
2293 GT_EXPR, m_default_bb, p, loc);
2294 default_prob = default_prob.apply_scale (1, 2);
2295
2296 bb = emit_case_nodes (bb, index, node->m_left, default_prob,
2297 index_type, loc);
2298 }
2299 else
2300 {
2301 /* We cannot process node->left normally
2302 since we haven't ruled out the numbers less than
2303 this node's value. So handle node->left explicitly. */
2304 p = (node->m_left->m_c->m_subtree_prob
2305 / (node->m_c->m_subtree_prob + default_prob));
2306 bb = do_jump_if_equal (bb, index, node->m_left->m_c->get_low (),
2307 node->m_left->m_c->m_case_bb, p, loc);
2308 }
2309 }
2310 }
2311 else
2312 {
2313 /* Node is a range. These cases are very similar to those for a single
2314 value, except that we do not start by testing whether this node
2315 is the one to branch to. */
2316 if (node->has_child () || node->m_c->get_type () != SIMPLE_CASE)
2317 {
2318 /* Branch to a label where we will handle it later. */
2319 basic_block test_bb = split_edge (single_succ_edge (bb));
2320 redirect_edge_succ (single_pred_edge (test_bb),
2321 single_succ_edge (bb)->dest);
2322
2323
2324 profile_probability right_prob = profile_probability::never ();
2325 if (node->m_right)
2326 right_prob = node->m_right->m_c->m_subtree_prob;
2327 p = ((right_prob + default_prob.apply_scale (1, 2))
2328 / (node->m_c->m_subtree_prob + default_prob));
2329
2330 bb = emit_cmp_and_jump_insns (bb, index, node->m_c->get_high (),
2331 GT_EXPR, test_bb, p, loc);
2332 default_prob = default_prob.apply_scale (1, 2);
2333
2334 /* Value belongs to this node or to the left-hand subtree. */
2335 p = node->m_c->m_prob / (node->m_c->m_subtree_prob + default_prob);
2336 bb = emit_cmp_and_jump_insns (bb, index, node->m_c->get_low (),
2337 GE_EXPR, node->m_c->m_case_bb, p, loc);
2338
2339 /* Handle the left-hand subtree. */
2340 bb = emit_case_nodes (bb, index, node->m_left,
2341 default_prob, index_type, loc);
2342
2343 /* If the left-hand subtree fell through,
2344 don't let it fall into the right-hand subtree. */
2345 if (bb && m_default_bb)
2346 emit_jump (bb, m_default_bb);
2347
2348 bb = emit_case_nodes (test_bb, index, node->m_right,
2349 default_prob, index_type, loc);
2350 }
2351 else
2352 {
2353 /* Node has no children so we check low and high bounds to remove
2354 redundant tests. Only one of the bounds can exist,
2355 since otherwise this node is bounded--a case tested already. */
2356 tree lhs, rhs;
2357 generate_range_test (bb, index, node->m_c->get_low (),
2358 node->m_c->get_high (), &lhs, &rhs);
2359 p = default_prob / (node->m_c->m_subtree_prob + default_prob);
2360
2361 bb = emit_cmp_and_jump_insns (bb, lhs, rhs, GT_EXPR,
2362 m_default_bb, p, loc);
2363
2364 emit_jump (bb, node->m_c->m_case_bb);
2365 return NULL;
2366 }
2367 }
2368
2369 return bb;
2370 }
2371
2372 /* The main function of the pass scans statements for switches and invokes
2373 process_switch on them. */
2374
2375 namespace {
2376
2377 const pass_data pass_data_convert_switch =
2378 {
2379 GIMPLE_PASS, /* type */
2380 "switchconv", /* name */
2381 OPTGROUP_NONE, /* optinfo_flags */
2382 TV_TREE_SWITCH_CONVERSION, /* tv_id */
2383 ( PROP_cfg | PROP_ssa ), /* properties_required */
2384 0, /* properties_provided */
2385 0, /* properties_destroyed */
2386 0, /* todo_flags_start */
2387 TODO_update_ssa, /* todo_flags_finish */
2388 };
2389
2390 class pass_convert_switch : public gimple_opt_pass
2391 {
2392 public:
pass_convert_switch(gcc::context * ctxt)2393 pass_convert_switch (gcc::context *ctxt)
2394 : gimple_opt_pass (pass_data_convert_switch, ctxt)
2395 {}
2396
2397 /* opt_pass methods: */
gate(function *)2398 virtual bool gate (function *) { return flag_tree_switch_conversion != 0; }
2399 virtual unsigned int execute (function *);
2400
2401 }; // class pass_convert_switch
2402
2403 unsigned int
execute(function * fun)2404 pass_convert_switch::execute (function *fun)
2405 {
2406 basic_block bb;
2407 bool cfg_altered = false;
2408
2409 FOR_EACH_BB_FN (bb, fun)
2410 {
2411 gimple *stmt = last_stmt (bb);
2412 if (stmt && gimple_code (stmt) == GIMPLE_SWITCH)
2413 {
2414 if (dump_file)
2415 {
2416 expanded_location loc = expand_location (gimple_location (stmt));
2417
2418 fprintf (dump_file, "beginning to process the following "
2419 "SWITCH statement (%s:%d) : ------- \n",
2420 loc.file, loc.line);
2421 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2422 putc ('\n', dump_file);
2423 }
2424
2425 switch_conversion sconv;
2426 sconv.expand (as_a <gswitch *> (stmt));
2427 cfg_altered |= sconv.m_cfg_altered;
2428 if (!sconv.m_reason)
2429 {
2430 if (dump_file)
2431 {
2432 fputs ("Switch converted\n", dump_file);
2433 fputs ("--------------------------------\n", dump_file);
2434 }
2435
2436 /* Make no effort to update the post-dominator tree.
2437 It is actually not that hard for the transformations
2438 we have performed, but it is not supported
2439 by iterate_fix_dominators. */
2440 free_dominance_info (CDI_POST_DOMINATORS);
2441 }
2442 else
2443 {
2444 if (dump_file)
2445 {
2446 fputs ("Bailing out - ", dump_file);
2447 fputs (sconv.m_reason, dump_file);
2448 fputs ("\n--------------------------------\n", dump_file);
2449 }
2450 }
2451 }
2452 }
2453
2454 return cfg_altered ? TODO_cleanup_cfg : 0;;
2455 }
2456
2457 } // anon namespace
2458
2459 gimple_opt_pass *
make_pass_convert_switch(gcc::context * ctxt)2460 make_pass_convert_switch (gcc::context *ctxt)
2461 {
2462 return new pass_convert_switch (ctxt);
2463 }
2464
2465 /* The main function of the pass scans statements for switches and invokes
2466 process_switch on them. */
2467
2468 namespace {
2469
2470 template <bool O0> class pass_lower_switch: public gimple_opt_pass
2471 {
2472 public:
pass_lower_switch(gcc::context * ctxt)2473 pass_lower_switch (gcc::context *ctxt) : gimple_opt_pass (data, ctxt) {}
2474
2475 static const pass_data data;
2476 opt_pass *
clone()2477 clone ()
2478 {
2479 return new pass_lower_switch<O0> (m_ctxt);
2480 }
2481
2482 virtual bool
gate(function *)2483 gate (function *)
2484 {
2485 return !O0 || !optimize;
2486 }
2487
2488 virtual unsigned int execute (function *fun);
2489 }; // class pass_lower_switch
2490
2491 template <bool O0>
2492 const pass_data pass_lower_switch<O0>::data = {
2493 GIMPLE_PASS, /* type */
2494 O0 ? "switchlower_O0" : "switchlower", /* name */
2495 OPTGROUP_NONE, /* optinfo_flags */
2496 TV_TREE_SWITCH_LOWERING, /* tv_id */
2497 ( PROP_cfg | PROP_ssa ), /* properties_required */
2498 0, /* properties_provided */
2499 0, /* properties_destroyed */
2500 0, /* todo_flags_start */
2501 TODO_update_ssa | TODO_cleanup_cfg, /* todo_flags_finish */
2502 };
2503
2504 template <bool O0>
2505 unsigned int
execute(function * fun)2506 pass_lower_switch<O0>::execute (function *fun)
2507 {
2508 basic_block bb;
2509 bool expanded = false;
2510
2511 auto_vec<gimple *> switch_statements;
2512 switch_statements.create (1);
2513
2514 FOR_EACH_BB_FN (bb, fun)
2515 {
2516 gimple *stmt = last_stmt (bb);
2517 gswitch *swtch;
2518 if (stmt && (swtch = dyn_cast<gswitch *> (stmt)))
2519 {
2520 if (!O0)
2521 group_case_labels_stmt (swtch);
2522 switch_statements.safe_push (swtch);
2523 }
2524 }
2525
2526 for (unsigned i = 0; i < switch_statements.length (); i++)
2527 {
2528 gimple *stmt = switch_statements[i];
2529 if (dump_file)
2530 {
2531 expanded_location loc = expand_location (gimple_location (stmt));
2532
2533 fprintf (dump_file, "beginning to process the following "
2534 "SWITCH statement (%s:%d) : ------- \n",
2535 loc.file, loc.line);
2536 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2537 putc ('\n', dump_file);
2538 }
2539
2540 gswitch *swtch = dyn_cast<gswitch *> (stmt);
2541 if (swtch)
2542 {
2543 switch_decision_tree dt (swtch);
2544 expanded |= dt.analyze_switch_statement ();
2545 }
2546 }
2547
2548 if (expanded)
2549 {
2550 free_dominance_info (CDI_DOMINATORS);
2551 free_dominance_info (CDI_POST_DOMINATORS);
2552 mark_virtual_operands_for_renaming (cfun);
2553 }
2554
2555 return 0;
2556 }
2557
2558 } // anon namespace
2559
2560 gimple_opt_pass *
make_pass_lower_switch_O0(gcc::context * ctxt)2561 make_pass_lower_switch_O0 (gcc::context *ctxt)
2562 {
2563 return new pass_lower_switch<true> (ctxt);
2564 }
2565 gimple_opt_pass *
make_pass_lower_switch(gcc::context * ctxt)2566 make_pass_lower_switch (gcc::context *ctxt)
2567 {
2568 return new pass_lower_switch<false> (ctxt);
2569 }
2570
2571
2572